summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-08-10 14:36:23 +0200
committerIngo Molnar <mingo@kernel.org>2016-08-10 14:36:23 +0200
commitfdbdfefbabefcdf3f57560163b43fdc4cf95eb2f (patch)
tree1d0c420d4eaff48cf2486f10dded8d551241ee94
parent6731b0d611a1274f9e785fa0189ac2aeeabd0591 (diff)
parenta0cba2179ea4c1820fce2ee046b6ed90ecc56196 (diff)
Merge branch 'linus' into timers/urgent, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--.cocciconfig3
-rw-r--r--.gitignore2
-rw-r--r--.mailmap15
-rw-r--r--Documentation/ABI/testing/sysfs-class-pwm9
-rw-r--r--Documentation/DMA-API.txt33
-rw-r--r--Documentation/DMA-attributes.txt2
-rw-r--r--Documentation/DocBook/Makefile31
-rw-r--r--Documentation/DocBook/device-drivers.tmpl4
-rw-r--r--Documentation/DocBook/gpu.tmpl3540
-rw-r--r--Documentation/DocBook/media/.gitignore1
-rw-r--r--Documentation/DocBook/media/Makefile427
-rw-r--r--Documentation/DocBook/media/bayer.png.b64171
-rw-r--r--Documentation/DocBook/media/constraints.png.b6459
-rw-r--r--Documentation/DocBook/media/crop.gif.b64105
-rw-r--r--Documentation/DocBook/media/dvb/.gitignore1
-rw-r--r--Documentation/DocBook/media/dvb/audio.xml1314
-rw-r--r--Documentation/DocBook/media/dvb/ca.xml582
-rw-r--r--Documentation/DocBook/media/dvb/demux.xml1162
-rw-r--r--Documentation/DocBook/media/dvb/dvbapi.xml156
-rw-r--r--Documentation/DocBook/media/dvb/dvbproperty.xml1680
-rw-r--r--Documentation/DocBook/media/dvb/dvbstb.pdfbin1881 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/dvb/examples.xml367
-rw-r--r--Documentation/DocBook/media/dvb/fe-diseqc-recv-slave-reply.xml78
-rw-r--r--Documentation/DocBook/media/dvb/fe-diseqc-reset-overload.xml51
-rw-r--r--Documentation/DocBook/media/dvb/fe-diseqc-send-burst.xml89
-rw-r--r--Documentation/DocBook/media/dvb/fe-diseqc-send-master-cmd.xml72
-rw-r--r--Documentation/DocBook/media/dvb/fe-enable-high-lnb-voltage.xml61
-rw-r--r--Documentation/DocBook/media/dvb/fe-get-info.xml266
-rw-r--r--Documentation/DocBook/media/dvb/fe-get-property.xml81
-rw-r--r--Documentation/DocBook/media/dvb/fe-read-status.xml107
-rw-r--r--Documentation/DocBook/media/dvb/fe-set-frontend-tune-mode.xml64
-rw-r--r--Documentation/DocBook/media/dvb/fe-set-tone.xml91
-rw-r--r--Documentation/DocBook/media/dvb/fe-set-voltage.xml69
-rw-r--r--Documentation/DocBook/media/dvb/frontend.xml269
-rw-r--r--Documentation/DocBook/media/dvb/frontend_legacy_api.xml654
-rw-r--r--Documentation/DocBook/media/dvb/intro.xml211
-rw-r--r--Documentation/DocBook/media/dvb/net.xml238
-rw-r--r--Documentation/DocBook/media/dvb/video.xml1968
-rw-r--r--Documentation/DocBook/media/dvbstb.png.b64398
-rw-r--r--Documentation/DocBook/media/fieldseq_bt.gif.b64447
-rw-r--r--Documentation/DocBook/media/fieldseq_tb.gif.b64445
-rw-r--r--Documentation/DocBook/media/nv12mt.gif.b6437
-rw-r--r--Documentation/DocBook/media/nv12mt_example.gif.b64121
-rw-r--r--Documentation/DocBook/media/pipeline.png.b64213
-rw-r--r--Documentation/DocBook/media/selection.png.b64206
-rw-r--r--Documentation/DocBook/media/typical_media_device.svg28
-rw-r--r--Documentation/DocBook/media/v4l/.gitignore1
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml381
-rw-r--r--Documentation/DocBook/media/v4l/capture.c.xml659
-rw-r--r--Documentation/DocBook/media/v4l/cec-api.xml75
-rw-r--r--Documentation/DocBook/media/v4l/cec-func-close.xml64
-rw-r--r--Documentation/DocBook/media/v4l/cec-func-ioctl.xml78
-rw-r--r--Documentation/DocBook/media/v4l/cec-func-open.xml104
-rw-r--r--Documentation/DocBook/media/v4l/cec-func-poll.xml94
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-adap-g-caps.xml151
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-adap-g-log-addrs.xml329
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-adap-g-phys-addr.xml86
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-dqevent.xml202
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-g-mode.xml255
-rw-r--r--Documentation/DocBook/media/v4l/cec-ioc-receive.xml274
-rw-r--r--Documentation/DocBook/media/v4l/common.xml1102
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml2723
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml5505
-rw-r--r--Documentation/DocBook/media/v4l/crop.pdfbin5846 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/dev-capture.xml110
-rw-r--r--Documentation/DocBook/media/v4l/dev-codec.xml27
-rw-r--r--Documentation/DocBook/media/v4l/dev-effect.xml17
-rw-r--r--Documentation/DocBook/media/v4l/dev-event.xml43
-rw-r--r--Documentation/DocBook/media/v4l/dev-osd.xml149
-rw-r--r--Documentation/DocBook/media/v4l/dev-output.xml106
-rw-r--r--Documentation/DocBook/media/v4l/dev-overlay.xml368
-rw-r--r--Documentation/DocBook/media/v4l/dev-radio.xml49
-rw-r--r--Documentation/DocBook/media/v4l/dev-raw-vbi.xml345
-rw-r--r--Documentation/DocBook/media/v4l/dev-rds.xml196
-rw-r--r--Documentation/DocBook/media/v4l/dev-sdr.xml126
-rw-r--r--Documentation/DocBook/media/v4l/dev-sliced-vbi.xml706
-rw-r--r--Documentation/DocBook/media/v4l/dev-subdev.xml478
-rw-r--r--Documentation/DocBook/media/v4l/dev-teletext.xml29
-rw-r--r--Documentation/DocBook/media/v4l/driver.xml200
-rw-r--r--Documentation/DocBook/media/v4l/fdl-appendix.xml671
-rw-r--r--Documentation/DocBook/media/v4l/fieldseq_bt.pdfbin9185 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/fieldseq_tb.pdfbin9173 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/func-close.xml62
-rw-r--r--Documentation/DocBook/media/v4l/func-ioctl.xml71
-rw-r--r--Documentation/DocBook/media/v4l/func-mmap.xml183
-rw-r--r--Documentation/DocBook/media/v4l/func-munmap.xml76
-rw-r--r--Documentation/DocBook/media/v4l/func-open.xml113
-rw-r--r--Documentation/DocBook/media/v4l/func-poll.xml142
-rw-r--r--Documentation/DocBook/media/v4l/func-read.xml181
-rw-r--r--Documentation/DocBook/media/v4l/func-select.xml130
-rw-r--r--Documentation/DocBook/media/v4l/func-write.xml128
-rw-r--r--Documentation/DocBook/media/v4l/gen-errors.xml77
-rw-r--r--Documentation/DocBook/media/v4l/io.xml1545
-rw-r--r--Documentation/DocBook/media/v4l/keytable.c.xml172
-rw-r--r--Documentation/DocBook/media/v4l/libv4l.xml160
-rw-r--r--Documentation/DocBook/media/v4l/lirc_device_interface.xml255
-rw-r--r--Documentation/DocBook/media/v4l/media-controller.xml105
-rw-r--r--Documentation/DocBook/media/v4l/media-func-close.xml59
-rw-r--r--Documentation/DocBook/media/v4l/media-func-ioctl.xml73
-rw-r--r--Documentation/DocBook/media/v4l/media-func-open.xml94
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-device-info.xml132
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml180
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-enum-links.xml160
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-g-topology.xml391
-rw-r--r--Documentation/DocBook/media/v4l/media-ioc-setup-link.xml84
-rw-r--r--Documentation/DocBook/media/v4l/media-types.xml379
-rw-r--r--Documentation/DocBook/media/v4l/pipeline.pdfbin20276 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-grey.xml62
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-m420.xml139
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv12.xml143
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv12m.xml153
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv12mt.xml66
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv16.xml166
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv16m.xml170
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv24.xml121
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml937
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-packed-yuv.xml236
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sbggr16.xml83
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sbggr8.xml67
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sdr-cs08.xml44
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sdr-cs14le.xml47
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sdr-cu08.xml44
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sdr-cu16le.xml46
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sdr-ru12le.xml40
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sgbrg8.xml67
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml67
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10.xml90
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml34
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml28
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml99
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb12.xml90
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-srggb8.xml67
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-uv8.xml62
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-uyvy.xml120
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-vyuy.xml120
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y10.xml79
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y10b.xml43
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y12.xml79
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y12i.xml49
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y16-be.xml81
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y16.xml81
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y41p.xml149
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-y8i.xml80
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv410.xml133
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv411p.xml147
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv420.xml149
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml162
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml166
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv422p.xml153
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml177
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yuyv.xml120
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-yvyu.xml120
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-z16.xml81
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml2003
-rw-r--r--Documentation/DocBook/media/v4l/planar-apis.xml62
-rw-r--r--Documentation/DocBook/media/v4l/remote_controllers.xml320
-rw-r--r--Documentation/DocBook/media/v4l/selection-api.xml317
-rw-r--r--Documentation/DocBook/media/v4l/selections-common.xml180
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml4040
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia614
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg63
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-full.dia1588
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-full.svg163
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia1152
-rw-r--r--Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg116
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml728
-rw-r--r--Documentation/DocBook/media/v4l/v4l2grab.c.xml164
-rw-r--r--Documentation/DocBook/media/v4l/vbi_525.pdfbin3395 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/vbi_625.pdfbin3683 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/vbi_hsync.pdfbin7405 -> 0 bytes
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-create-bufs.xml158
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-cropcap.xml166
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml207
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml227
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-decoder-cmd.xml259
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dqevent.xml471
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml210
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml197
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml128
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml159
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-frameintervals.xml260
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-framesizes.xml265
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml175
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumaudio.xml76
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumaudioout.xml79
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enuminput.xml316
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumoutput.xml201
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enumstd.xml389
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml205
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-audio.xml172
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-audioout.xml138
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-crop.xml129
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml133
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml343
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-edid.xml173
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-enc-index.xml189
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml456
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml459
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-fmt.xml204
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-frequency.xml148
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-input.xml83
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml175
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-modulator.xml252
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-output.xml85
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-parm.xml314
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-priority.xml135
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-selection.xml233
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml255
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-std.xml98
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-tuner.xml594
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-log-status.xml41
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-overlay.xml74
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml88
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-qbuf.xml202
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml115
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querybuf.xml106
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querycap.xml350
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-queryctrl.xml661
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querystd.xml85
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-reqbufs.xml137
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml188
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-streamon.xml136
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml151
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml153
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml118
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml158
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml177
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml135
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml159
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml130
-rw-r--r--Documentation/DocBook/media/vbi_525.gif.b6484
-rw-r--r--Documentation/DocBook/media/vbi_625.gif.b6490
-rw-r--r--Documentation/DocBook/media/vbi_hsync.gif.b6443
-rw-r--r--Documentation/DocBook/media_api.tmpl121
-rw-r--r--Documentation/Makefile.sphinx5
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt469
-rw-r--r--Documentation/arm/Atmel/README10
-rw-r--r--Documentation/binfmt_misc.txt7
-rw-r--r--Documentation/block/biodoc.txt4
-rw-r--r--Documentation/cgroup-v1/cgroups.txt4
-rw-r--r--Documentation/cgroup-v1/cpusets.txt2
-rw-r--r--Documentation/cgroup-v1/memcg_test.txt4
-rw-r--r--Documentation/coccinelle.txt148
-rw-r--r--Documentation/device-mapper/dm-flakey.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scpi.txt34
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt36
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/olimex.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/shmobile.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/tegra.txt4
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt1
-rw-r--r--Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt45
-rw-r--r--Documentation/devicetree/bindings/clock/clps711x-clock.txt4
-rw-r--r--Documentation/devicetree/bindings/display/arm,malidp.txt65
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt26
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix_dp.txt1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii902x.txt35
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt53
-rw-r--r--Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt4
-rw-r--r--Documentation/devicetree/bindings/display/connector/hdmi-connector.txt1
-rw-r--r--Documentation/devicetree/bindings/display/fsl,dcu.txt9
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt148
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt117
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp.txt59
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp4.txt112
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp5.txt160
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt7
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt9
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt13
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt4
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/adc/at91_adc.txt12
-rw-r--r--Documentation/devicetree/bindings/input/clps711x-keypad.txt4
-rw-r--r--Documentation/devicetree/bindings/input/rotary-encoder.txt4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt36
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt33
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt2
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.txt13
-rw-r--r--Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt64
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lp855x.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt23
-rw-r--r--Documentation/devicetree/bindings/media/nokia,n900-ir20
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt136
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt21
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt4
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt7
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt35
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt36
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-quadspi.txt32
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt56
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt24
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt160
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/aardvark-pci.txt56
-rw-r--r--Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt46
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt4
-rw-r--r--Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt60
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.txt31
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt1
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt43
-rw-r--r--Documentation/devicetree/bindings/powerpc/opal/oppanel-opal.txt14
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt21
-rw-r--r--Documentation/devicetree/bindings/pwm/cirrus,clps711x-pwm.txt5
-rw-r--r--Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.txt23
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt12
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt38
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt38
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt33
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/regulator/pwm-regulator.txt19
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/ramoops.txt (renamed from Documentation/devicetree/bindings/misc/ramoops.txt)8
-rw-r--r--Documentation/devicetree/bindings/reset/amlogic,meson-reset.txt18
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt4
-rw-r--r--Documentation/devicetree/bindings/reset/ti-syscon-reset.txt91
-rw-r--r--Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt14
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-opal.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/brg.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/i2c.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/pic.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/usb.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt124
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt)52
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/firmware.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/serial.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt)0
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/uqe_serial.txt17
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt4
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt116
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau17x1.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau7002.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/bt-sco.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l33.txt126
-rw-r--r--Documentation/devicetree/bindings/sound/cs53l30.txt44
-rw-r--r--Documentation/devicetree/bindings/sound/designware-i2s.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-asoc-card.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98504.txt44
-rw-r--r--Documentation/devicetree/bindings/sound/max9860.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt150
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-cs42448.txt43
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroidx2-max98090.txt35
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt34
-rw-r--r--Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/atmel-usb.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt8
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt16
-rw-r--r--Documentation/devicetree/bindings/watchdog/meson-gxbb-wdt.txt16
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt6
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-model/devres.txt7
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/nilfs2.txt3
-rw-r--r--Documentation/filesystems/orangefs.txt50
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/filesystems/tmpfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/gcc-plugins.txt87
-rw-r--r--Documentation/gpu/drm-internals.rst381
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst260
-rw-r--r--Documentation/gpu/drm-kms.rst653
-rw-r--r--Documentation/gpu/drm-mm.rst454
-rw-r--r--Documentation/gpu/drm-uapi.rst111
-rw-r--r--Documentation/gpu/i915.rst347
-rw-r--r--Documentation/gpu/index.rst14
-rw-r--r--Documentation/gpu/introduction.rst51
-rw-r--r--Documentation/gpu/kms-properties.csv129
-rw-r--r--Documentation/gpu/vga-switcheroo.rst98
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt38
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt2
-rw-r--r--Documentation/mmc/mmc-dev-attrs.txt2
-rw-r--r--Documentation/module-signing.txt6
-rw-r--r--Documentation/ramoops.txt38
-rw-r--r--Documentation/rapidio/mport_cdev.txt3
-rw-r--r--Documentation/rapidio/rio_cm.txt119
-rw-r--r--Documentation/rapidio/tsi721.txt26
-rw-r--r--Documentation/scheduler/sched-deadline.txt2
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt2
-rw-r--r--Documentation/scheduler/sched-rt-group.txt2
-rw-r--r--Documentation/sound/alsa/soc/machine.txt2
-rw-r--r--Documentation/sound/alsa/timestamping.txt12
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--Documentation/virtual/kvm/api.txt115
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt25
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt87
-rw-r--r--Documentation/virtual/kvm/locking.txt4
-rw-r--r--Documentation/vm/numa4
-rw-r--r--Documentation/vm/numa_memory_policy.txt2
-rw-r--r--Documentation/vm/page_migration2
-rw-r--r--Documentation/vm/unevictable-lru.txt2
-rw-r--r--Documentation/watchdog/hpwdt.txt5
-rw-r--r--Documentation/watchdog/src/watchdog-test.c39
-rw-r--r--Documentation/watchdog/watchdog-kernel-api.txt9
-rw-r--r--Documentation/x86/x86_64/fake-numa-for-cpusets4
-rw-r--r--MAINTAINERS233
-rw-r--r--Makefile45
-rw-r--r--arch/Kconfig46
-rw-r--r--arch/alpha/boot/Makefile2
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/include/asm/rtc.h1
-rw-r--r--arch/alpha/include/asm/thread_info.h27
-rw-r--r--arch/alpha/kernel/core_marvel.c1
-rw-r--r--arch/alpha/kernel/machvec_impl.h2
-rw-r--r--arch/alpha/kernel/pci-noop.c2
-rw-r--r--arch/alpha/kernel/pci_iommu.c12
-rw-r--r--arch/alpha/kernel/rtc.c6
-rw-r--r--arch/arc/boot/dts/nsimosci.dts14
-rw-r--r--arch/arc/boot/dts/nsimosci_hs.dts14
-rw-r--r--arch/arc/boot/dts/nsimosci_hs_idu.dts14
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi13
-rw-r--r--arch/arc/boot/dts/vdk_hs38_smp.dts2
-rw-r--r--arch/arc/configs/nsimosci_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig3
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig7
-rw-r--r--arch/arc/mm/dma.c12
-rw-r--r--arch/arc/mm/init.c2
-rw-r--r--arch/arm/Kconfig52
-rw-r--r--arch/arm/Kconfig.debug65
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/dts/Makefile43
-rw-r--r--arch/arm/boot/dts/aks-cdu.dts2
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi12
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts11
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts2
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts7
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi126
-rw-r--r--arch/arm/boot/dts/am3517-craneboard.dts2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi111
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts6
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts1
-rw-r--r--arch/arm/boot/dts/am437x-sbc-t43.dts2
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts2
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/am57xx-sbc-am57x.dts2
-rw-r--r--arch/arm/boot/dts/animeo_ip.dts11
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts16
-rw-r--r--arch/arm/boot/dts/at91-ariag25.dts11
-rw-r--r--arch/arm/boot/dts/at91-cosino.dtsi9
-rw-r--r--arch/arm/boot/dts/at91-foxg20.dts13
-rw-r--r--arch/arm/boot/dts/at91-kizbox.dts4
-rw-r--r--arch/arm/boot/dts/at91-qil_a9260.dts13
-rw-r--r--arch/arm/boot/dts/at91-sam9_l9260.dts121
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts41
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi11
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts4
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts20
-rw-r--r--arch/arm/boot/dts/at91-vinco.dts2
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi16
-rw-r--r--arch/arm/boot/dts/at91sam9260ek.dts211
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts26
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi28
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi30
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi32
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi33
-rw-r--r--arch/arm/boot/dts/axp209.dtsi1
-rw-r--r--arch/arm/boot/dts/axp22x.dtsi12
-rw-r--r--arch/arm/boot/dts/axp809.dtsi53
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi11
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi55
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm21664.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm23550-sparrow.dts80
-rw-r--r--arch/arm/boot/dts/bcm23550.dtsi415
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-plus.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b.dts1
-rw-r--r--arch/arm/boot/dts/bcm2836-rpi-2-b.dts1
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi19
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi19
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6250.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts40
-rw-r--r--arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts6
-rw-r--r--arch/arm/boot/dts/bcm5301x-nand-cs0-bch1.dtsi15
-rw-r--r--arch/arm/boot/dts/bcm5301x-nand-cs0-bch8.dtsi16
-rw-r--r--arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi18
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi47
-rw-r--r--arch/arm/boot/dts/bcm953012er.dts104
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts109
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts111
-rw-r--r--arch/arm/boot/dts/compulab-sb-som.dtsi2
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi242
-rw-r--r--arch/arm/boot/dts/dra72-evm-common.dtsi12
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi16
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi25
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d.dts24
-rw-r--r--arch/arm/boot/dts/emev2.dtsi26
-rw-r--r--arch/arm/boot/dts/ep7209.dtsi191
-rw-r--r--arch/arm/boot/dts/ep7211-edb7211.dts100
-rw-r--r--arch/arm/boot/dts/ep7211.dtsi12
-rw-r--r--arch/arm/boot/dts/ethernut5.dts4
-rw-r--r--arch/arm/boot/dts/evk-pro3.dts4
-rw-r--r--arch/arm/boot/dts/exynos-mfc-reserved-memory.dtsi35
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts4
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts7
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts7
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi29
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidu3.dts18
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx.dts11
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx2.dts11
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts23
-rw-r--r--arch/arm/boot/dts/exynos4412-smdk4412.dts7
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts4
-rw-r--r--arch/arm/boot/dts/exynos5.dtsi215
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts6
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts6
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250-spring.dts6
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi1701
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts580
-rw-r--r--arch/arm/boot/dts/exynos5410-pinctrl.dtsi210
-rw-r--r--arch/arm/boot/dts/exynos5410-smdk5410.dts6
-rw-r--r--arch/arm/boot/dts/exynos5410.dtsi333
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts6
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts32
-rw-r--r--arch/arm/boot/dts/exynos5420-pinctrl.dtsi12
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts6
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2770
-rw-r--r--arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi103
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi104
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts35
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3.dts35
-rw-r--r--arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi50
-rw-r--r--arch/arm/boot/dts/exynos54xx.dtsi199
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts32
-rw-r--r--arch/arm/boot/dts/ge863-pro3.dtsi9
-rw-r--r--arch/arm/boot/dts/hi3519-demb.dts42
-rw-r--r--arch/arm/boot/dts/hi3519.dtsi187
-rw-r--r--arch/arm/boot/dts/imx1-ads.dts4
-rw-r--r--arch/arm/boot/dts/imx1-apf9328.dts4
-rw-r--r--arch/arm/boot/dts/imx23-sansa.dts207
-rw-r--r--arch/arm/boot/dts/imx23-xfi3.dts179
-rw-r--r--arch/arm/boot/dts/imx23.dtsi48
-rw-r--r--arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts4
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts2
-rw-r--r--arch/arm/boot/dts/imx25-pinfunc.h627
-rw-r--r--arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi2
-rw-r--r--arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts6
-rw-r--r--arch/arm/boot/dts/imx27-pdk.dts2
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts6
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts4
-rw-r--r--arch/arm/boot/dts/imx28-apf28dev.dts2
-rw-r--r--arch/arm/boot/dts/imx28-cfa10049.dts2
-rw-r--r--arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi4
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28-tx28.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi5
-rw-r--r--arch/arm/boot/dts/imx31-bug.dts2
-rw-r--r--arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts4
-rw-r--r--arch/arm/boot/dts/imx35-pdk.dts2
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts4
-rw-r--r--arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts4
-rw-r--r--arch/arm/boot/dts/imx51-ts4800.dts27
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts2
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts1
-rw-r--r--arch/arm/boot/dts/imx6q-apalis-ixora.dts4
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-ba16.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6q-cm-fx6.dts281
-rw-r--r--arch/arm/boot/dts/imx6q-h100.dts395
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-utilite-pro.dts197
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-apf6dev.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-microsom.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi19
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sl-warp.dts2
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sx-nitrogen6sx.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi14
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts63
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-hobbit.dts6
-rw-r--r--arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts4
-rw-r--r--arch/arm/boot/dts/imx6ul-tx6ul.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi9
-rw-r--r--arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi148
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi571
-rw-r--r--arch/arm/boot/dts/imx7d-cl-som-imx7.dts1
-rw-r--r--arch/arm/boot/dts/imx7d-colibri-eval-v3.dts66
-rw-r--r--arch/arm/boot/dts/imx7d-colibri.dtsi54
-rw-r--r--arch/arm/boot/dts/imx7d-nitrogen7.dts3
-rw-r--r--arch/arm/boot/dts/imx7d-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts131
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi923
-rw-r--r--arch/arm/boot/dts/imx7s-colibri-eval-v3.dts51
-rw-r--r--arch/arm/boot/dts/imx7s-colibri.dtsi50
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi933
-rw-r--r--arch/arm/boot/dts/keystone-k2e.dtsi7
-rw-r--r--arch/arm/boot/dts/keystone-k2g-evm.dts11
-rw-r--r--arch/arm/boot/dts/keystone-k2g.dtsi8
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi149
-rw-r--r--arch/arm/boot/dts/keystone.dtsi15
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts4
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi7
-rw-r--r--arch/arm/boot/dts/mpa1600.dts11
-rw-r--r--arch/arm/boot/dts/omap24xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts10
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts10
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-common.dtsi12
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd70.dts2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi72
-rw-r--r--arch/arm/boot/dts/omap3-ha-lcd.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi8
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts12
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-dvi.dtsi8
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-sb-t35.dtsi8
-rw-r--r--arch/arm/boot/dts/omap3-thunder.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi10
-rw-r--r--arch/arm/boot/dts/omap4-duovero-parlor.dts2
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi5
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi21
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts13
-rw-r--r--arch/arm/boot/dts/omap4-var-om44customboard.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi13
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts15
-rw-r--r--arch/arm/boot/dts/pm9g45.dts9
-rw-r--r--arch/arm/boot/dts/pxa27x.dtsi7
-rw-r--r--arch/arm/boot/dts/pxa2xx.dtsi8
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi133
-rw-r--r--arch/arm/boot/dts/qcom-apq8060-dragonboard.dts626
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval-pins.dtsi (renamed from arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi)0
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts (renamed from arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts)10
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts6
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-pins.dtsi40
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts44
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom-apq8074-dragonboard.dts247
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom-msm8660-surf.dts11
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi166
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts4
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom-pma8084.dtsi20
-rw-r--r--arch/arm/boot/dts/r7s72100-genmai.dts6
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm.dts12
-rw-r--r--arch/arm/boot/dts/r8a73a4.dtsi34
-rw-r--r--arch/arm/boot/dts/r8a7740-armadillo800eva.dts20
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7778-bockw.dts7
-rw-r--r--arch/arm/boot/dts/r8a7778.dtsi28
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen.dts10
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts30
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi237
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts20
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts12
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi227
-rw-r--r--arch/arm/boot/dts/r8a7792-blanche.dts66
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi385
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts18
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi213
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts4
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts22
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi132
-rw-r--r--arch/arm/boot/dts/rk3228-evb.dts2
-rw-r--r--arch/arm/boot/dts/rk3229-evb.dts90
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi (renamed from arch/arm/boot/dts/rk3228.dtsi)118
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi31
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts26
-rw-r--r--arch/arm/boot/dts/rk3288-popmetal.dts31
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-som.dtsi31
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi101
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi1
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi31
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi37
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi44
-rw-r--r--arch/arm/boot/dts/sama5d31ek.dts1
-rw-r--r--arch/arm/boot/dts/sama5d33ek.dts1
-rw-r--r--arch/arm/boot/dts/sama5d34ek.dts1
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d36ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d3xcm.dtsi34
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi12
-rw-r--r--arch/arm/boot/dts/sama5d3xmb_emac.dtsi26
-rw-r--r--arch/arm/boot/dts/sama5d3xmb_gmac.dtsi48
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi40
-rw-r--r--arch/arm/boot/dts/sh73a0-kzm9g.dts19
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi24
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk.dtsi7
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi1
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618.dtsi62
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi1
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi120
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts84
-rw-r--r--arch/arm/boot/dts/stih410-clock.dtsi9
-rw-r--r--arch/arm/boot/dts/sun4i-a10-a1000.dts4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-hackberry.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts1
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi227
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts19
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-mk802.dts32
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts7
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts10
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi14
-rw-r--r--arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts178
-rw-r--r--arch/arm/boot/dts/sun5i-a13-q8-tablet.dts40
-rw-r--r--arch/arm/boot/dts/sun5i-a13-utoo-p66.dts180
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi122
-rw-r--r--arch/arm/boot/dts/sun5i-r8.dtsi120
-rw-r--r--arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi (renamed from arch/arm/boot/dts/sun5i-q8-common.dtsi)36
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-m9.dts96
-rw-r--r--arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts96
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts229
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts80
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi474
-rw-r--r--arch/arm/boot/dts/sun8i-a23-inet86dz.dts58
-rw-r--r--arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts50
-rw-r--r--arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts195
-rw-r--r--arch/arm/boot/dts/sun8i-a23-q8-tablet.dts15
-rw-r--r--arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts77
-rw-r--r--arch/arm/boot/dts/sun8i-a33-q8-tablet.dts15
-rw-r--r--arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts195
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi21
-rw-r--r--arch/arm/boot/dts/sun8i-q8-common.dtsi139
-rw-r--r--arch/arm/boot/dts/sun8i-r16-parrot.dts351
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi216
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts164
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts195
-rw-r--r--arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi (renamed from arch/arm/boot/dts/sunxi-q8-common.dtsi)0
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts2
-rw-r--r--arch/arm/boot/dts/tegra114-roth.dts4
-rw-r--r--arch/arm/boot/dts/tegra114-tn7.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-emc.dtsi1502
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-eval.dts284
-rw-r--r--arch/arm/boot/dts/tegra124-apalis.dtsi2100
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts88
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big.dts4
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-blaze.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi60
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts70
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi125
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-512.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-tamonten.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-apalis.dtsi7
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-colibri-eval-v3.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-colibri.dtsi2
-rw-r--r--arch/arm/boot/dts/tny_a9260_common.dtsi9
-rw-r--r--arch/arm/boot/dts/tny_a9263.dts11
-rw-r--r--arch/arm/boot/dts/uniphier-common32.dtsi12
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-ld4.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-pro4.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-pro5.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ph1-sld8.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-pinctrl.dtsi5
-rw-r--r--arch/arm/boot/dts/uniphier-proxstream2-gentil.dts8
-rw-r--r--arch/arm/boot/dts/uniphier-proxstream2-vodka.dts8
-rw-r--r--arch/arm/boot/dts/uniphier-proxstream2.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9260_common.dtsi13
-rw-r--r--arch/arm/boot/dts/usb_a9263.dts11
-rw-r--r--arch/arm/boot/dts/usb_a9g20_common.dtsi5
-rw-r--r--arch/arm/common/dmabounce.c4
-rw-r--r--arch/arm/configs/bcm_defconfig141
-rw-r--r--arch/arm/configs/exynos_defconfig29
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig4
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/lpc18xx_defconfig2
-rw-r--r--arch/arm/configs/multi_v4t_defconfig106
-rw-r--r--arch/arm/configs/multi_v5_defconfig54
-rw-r--r--arch/arm/configs/multi_v7_defconfig52
-rw-r--r--arch/arm/configs/omap2plus_defconfig5
-rw-r--r--arch/arm/configs/qcom_defconfig8
-rw-r--r--arch/arm/configs/sama5_defconfig3
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig16
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/include/asm/dma-mapping.h13
-rw-r--r--arch/arm/include/asm/kexec.h24
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h27
-rw-r--r--arch/arm/include/asm/kvm_hyp.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h15
-rw-r--r--arch/arm/include/asm/mach/pci.h1
-rw-r--r--arch/arm/include/asm/pgtable.h4
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm/include/asm/virt.h4
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h16
-rw-r--r--arch/arm/include/debug/at91.S10
-rw-r--r--arch/arm/include/debug/clps711x.S4
-rw-r--r--arch/arm/include/debug/exynos.S6
-rw-r--r--arch/arm/include/debug/samsung.S8
-rw-r--r--arch/arm/kernel/bios32.c45
-rw-r--r--arch/arm/kernel/machine_kexec.c2
-rw-r--r--arch/arm/kernel/setup.c39
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/kvm/Kconfig9
-rw-r--r--arch/arm/kvm/Makefile7
-rw-r--r--arch/arm/kvm/arm.c46
-rw-r--r--arch/arm/kvm/emulate.c2
-rw-r--r--arch/arm/kvm/guest.c2
-rw-r--r--arch/arm/kvm/init.S56
-rw-r--r--arch/arm/kvm/irq.h19
-rw-r--r--arch/arm/kvm/mmu.c142
-rw-r--r--arch/arm/kvm/reset.c2
-rw-r--r--arch/arm/mach-at91/Kconfig2
-rw-r--r--arch/arm/mach-at91/pm.c5
-rw-r--r--arch/arm/mach-bcm/Kconfig19
-rw-r--r--arch/arm/mach-bcm/Makefile5
-rw-r--r--arch/arm/mach-bcm/board_bcm21664.c43
-rw-r--r--arch/arm/mach-bcm/board_bcm23550.c25
-rw-r--r--arch/arm/mach-bcm/kona_l2_cache.c1
-rw-r--r--arch/arm/mach-bcm/platsmp.c174
-rw-r--r--arch/arm/mach-berlin/Kconfig2
-rw-r--r--arch/arm/mach-clps711x/Kconfig53
-rw-r--r--arch/arm/mach-clps711x/Makefile14
-rw-r--r--arch/arm/mach-clps711x/Makefile.boot5
-rw-r--r--arch/arm/mach-clps711x/board-dt.c82
-rw-r--r--arch/arm/mach-clps711x/common.c4
-rw-r--r--arch/arm/mach-clps711x/include/mach/clps711x.h204
-rw-r--r--arch/arm/mach-clps711x/include/mach/hardware.h53
-rw-r--r--arch/arm/mach-clps711x/include/mach/uncompress.h55
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c8
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c4
-rw-r--r--arch/arm/mach-davinci/da850.c16
-rw-r--r--arch/arm/mach-davinci/davinci.h8
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c6
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/psc.h2
-rw-r--r--arch/arm/mach-digicolor/Kconfig2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig3
-rw-r--r--arch/arm/mach-exynos/Makefile3
-rw-r--r--arch/arm/mach-exynos/common.h5
-rw-r--r--arch/arm/mach-exynos/exynos.c20
-rw-r--r--arch/arm/mach-exynos/firmware.c18
-rw-r--r--arch/arm/mach-exynos/headsmp.S3
-rw-r--r--arch/arm/mach-exynos/mfc.h16
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c6
-rw-r--r--arch/arm/mach-exynos/s5p-dev-mfc.c93
-rw-r--r--arch/arm/mach-exynos/suspend.c12
-rw-r--r--arch/arm/mach-hisi/hisilicon.c28
-rw-r--r--arch/arm/mach-hisi/platsmp.c4
-rw-r--r--arch/arm/mach-imx/Kconfig6
-rw-r--r--arch/arm/mach-imx/Makefile4
-rw-r--r--arch/arm/mach-imx/avic.c19
-rw-r--r--arch/arm/mach-imx/common.h3
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c8
-rw-r--r--arch/arm/mach-imx/cpu.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c20
-rw-r--r--arch/arm/mach-imx/devices/Kconfig4
-rw-r--r--arch/arm/mach-imx/devices/devices.c3
-rw-r--r--arch/arm/mach-imx/devices/platform-gpio-mxc.c1
-rw-r--r--arch/arm/mach-imx/devices/platform-mxc_rnga.c53
-rw-r--r--arch/arm/mach-imx/eukrea-baseboards.h42
-rw-r--r--arch/arm/mach-imx/imx27-dt.c1
-rw-r--r--arch/arm/mach-imx/imx31-dt.c12
-rw-r--r--arch/arm/mach-imx/imx35-dt.c10
-rw-r--r--arch/arm/mach-imx/irq-common.c6
-rw-r--r--arch/arm/mach-imx/mach-imx50.c1
-rw-r--r--arch/arm/mach-imx/mach-imx51.c3
-rw-r--r--arch/arm/mach-imx/mach-imx53.c1
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c2
-rw-r--r--arch/arm/mach-imx/mach-imx7d.c1
-rw-r--r--arch/arm/mach-imx/mm-imx1.c2
-rw-r--r--arch/arm/mach-imx/mm-imx27.c2
-rw-r--r--arch/arm/mach-imx/mm-imx3.c32
-rw-r--r--arch/arm/mach-imx/mxc.h101
-rw-r--r--arch/arm/mach-imx/pm-imx27.c8
-rw-r--r--arch/arm/mach-imx/pm-imx3.c38
-rw-r--r--arch/arm/mach-imx/system.c58
-rw-r--r--arch/arm/mach-imx/tzic.c16
-rw-r--r--arch/arm/mach-integrator/Kconfig4
-rw-r--r--arch/arm/mach-integrator/impd1.c4
-rw-r--r--arch/arm/mach-keystone/Kconfig2
-rw-r--r--arch/arm/mach-keystone/pm_domain.c2
-rw-r--r--arch/arm/mach-meson/Kconfig5
-rw-r--r--arch/arm/mach-mmp/Kconfig2
-rw-r--r--arch/arm/mach-moxart/Kconfig2
-rw-r--r--arch/arm/mach-mv78xx0/Kconfig2
-rw-r--r--arch/arm/mach-mv78xx0/common.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-mvebu/coherency.h1
-rw-r--r--arch/arm/mach-mvebu/cpu-reset.c2
-rw-r--r--arch/arm/mach-mvebu/kirkwood-pm.c4
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c2
-rw-r--r--arch/arm/mach-mvebu/pm.c1
-rw-r--r--arch/arm/mach-mvebu/pmsu.c3
-rw-r--r--arch/arm/mach-mvebu/system-controller.c2
-rw-r--r--arch/arm/mach-mxs/Kconfig2
-rw-r--r--arch/arm/mach-nomadik/Kconfig2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c2
-rw-r--r--arch/arm/mach-omap1/include/mach/mtd-xip.h2
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile9
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c5
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c4
-rw-r--r--arch/arm/mach-omap2/clockdomain.c36
-rw-r--r--arch/arm/mach-omap2/clockdomain.h2
-rw-r--r--arch/arm/mach-omap2/cm33xx.c3
-rw-r--r--arch/arm/mach-omap2/cm3xxx.c2
-rw-r--r--arch/arm/mach-omap2/common.h8
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c2
-rw-r--r--arch/arm/mach-omap2/display.c2
-rw-r--r--arch/arm/mach-omap2/display.h5
-rw-r--r--arch/arm/mach-omap2/dss-common.c2
-rw-r--r--arch/arm/mach-omap2/io.c3
-rw-r--r--arch/arm/mach-omap2/mcbsp.c31
-rw-r--r--arch/arm/mach-omap2/mux34xx.c4
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S18
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c6
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c31
-rw-r--r--arch/arm/mach-omap2/omap-smp.c98
-rw-r--r--arch/arm/mach-omap2/omap4-common.c16
-rw-r--r--arch/arm/mach-omap2/omap_device.c21
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c82
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c64
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c85
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c54
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c41
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c28
-rw-r--r--arch/arm/mach-omap2/pm.c8
-rw-r--r--arch/arm/mach-omap2/powerdomain.c20
-rw-r--r--arch/arm/mach-omap2/prcm43xx.h1
-rw-r--r--arch/arm/mach-omap2/prm33xx.h2
-rw-r--r--arch/arm/mach-omap2/sdrc.h4
-rw-r--r--arch/arm/mach-omap2/timer.c3
-rw-r--r--arch/arm/mach-orion5x/Kconfig2
-rw-r--r--arch/arm/mach-orion5x/irq.c2
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-oxnas/Kconfig4
-rw-r--r--arch/arm/mach-picoxcell/Kconfig2
-rw-r--r--arch/arm/mach-prima2/Kconfig2
-rw-r--r--arch/arm/mach-pxa/cm-x270.c2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-qcom/Kconfig4
-rw-r--r--arch/arm/mach-qcom/board.c1
-rw-r--r--arch/arm/mach-rockchip/Kconfig2
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/regs-gpio.h2
-rw-r--r--arch/arm/mach-s3c24xx/iotiming-s3c2410.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-n30.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2410.c3
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c1
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c1
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c64xx/common.h1
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/map.h2
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq.c1
-rw-r--r--arch/arm/mach-s5pv210/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/Kconfig6
-rw-r--r--arch/arm/mach-shmobile/Makefile1
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c94
-rw-r--r--arch/arm/mach-shmobile/platsmp.c6
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7779.c6
-rw-r--r--arch/arm/mach-shmobile/pm-rcar-gen2.c19
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7790.c1
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7791.c1
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7792.c35
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c45
-rw-r--r--arch/arm/mach-spear/Kconfig2
-rw-r--r--arch/arm/mach-sti/Kconfig2
-rw-r--r--arch/arm/mach-sti/board-dt.c11
-rw-r--r--arch/arm/mach-sunxi/Kconfig2
-rw-r--r--arch/arm/mach-tango/Makefile1
-rw-r--r--arch/arm/mach-tango/platsmp.c35
-rw-r--r--arch/arm/mach-tango/pm.c32
-rw-r--r--arch/arm/mach-tango/smc.h5
-rw-r--r--arch/arm/mach-tegra/Kconfig2
-rw-r--r--arch/arm/mach-tegra/common.h22
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c1
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c1
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c1
-rw-r--r--arch/arm/mach-tegra/cpuidle.h2
-rw-r--r--arch/arm/mach-tegra/hotplug.c1
-rw-r--r--arch/arm/mach-tegra/irq.c1
-rw-r--r--arch/arm/mach-tegra/pm.h2
-rw-r--r--arch/arm/mach-tegra/tegra.c24
-rw-r--r--arch/arm/mach-u300/Kconfig2
-rw-r--r--arch/arm/mach-uniphier/Makefile1
-rw-r--r--arch/arm/mach-uniphier/platsmp.c18
-rw-r--r--arch/arm/mach-uniphier/uniphier.c30
-rw-r--r--arch/arm/mach-ux500/Kconfig2
-rw-r--r--arch/arm/mach-ux500/Makefile6
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.c1065
-rw-r--r--arch/arm/mach-ux500/board-mop500-regulators.h24
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c67
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c139
-rw-r--r--arch/arm/mach-ux500/cpu.c148
-rw-r--r--arch/arm/mach-ux500/id.c116
-rw-r--r--arch/arm/mach-ux500/id.h144
-rw-r--r--arch/arm/mach-ux500/platsmp.c1
-rw-r--r--arch/arm/mach-ux500/setup.h12
-rw-r--r--arch/arm/mach-vexpress/Kconfig2
-rw-r--r--arch/arm/mach-vexpress/hotplug.c2
-rw-r--r--arch/arm/mach-vexpress/spc.c6
-rw-r--r--arch/arm/mach-vt8500/Kconfig2
-rw-r--r--arch/arm/mm/dma-mapping.c129
-rw-r--r--arch/arm/plat-iop/setup.c4
-rw-r--r--arch/arm/plat-samsung/cpu.c10
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu-freq-core.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/fb-s3c2410.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/pm-check.c2
-rw-r--r--arch/arm/plat-samsung/pm-common.c8
-rw-r--r--arch/arm/plat-samsung/watchdog-reset.c2
-rw-r--r--arch/arm/plat-versatile/platsmp.c2
-rw-r--r--arch/arm/xen/mm.c8
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/Kconfig.platforms28
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts20
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi170
-rw-r--r--arch/arm64/boot/dts/apm/apm-shadowcat.dtsi68
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi44
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi357
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts40
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts40
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts24
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts30
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837.dtsi76
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts37
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi119
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi44
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts143
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi18
-rw-r--r--arch/arm64/boot/dts/lg/Makefile1
-rw-r--r--arch/arm64/boot/dts/lg/lg1313-ref.dts36
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi351
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts5
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi60
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi18
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6755-evb.dts38
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6755.dtsi145
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi223
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi249
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts45
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi319
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts314
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi292
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi78
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-pins.dtsi303
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi103
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts14
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi124
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts50
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi138
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts12
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi313
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi20
-rw-r--r--arch/arm64/configs/defconfig49
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h17
-rw-r--r--arch/arm64/include/asm/elf.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h19
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h23
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h96
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h17
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/include/asm/virt.h4
-rw-r--r--arch/arm64/include/uapi/asm/auxvec.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c19
-rw-r--r--arch/arm64/kernel/head.S21
-rw-r--r--arch/arm64/kernel/pci.c159
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S13
-rw-r--r--arch/arm64/kvm/Kconfig10
-rw-r--r--arch/arm64/kvm/Makefile10
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/hyp-init.S61
-rw-r--r--arch/arm64/kvm/hyp/entry.S19
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S15
-rw-r--r--arch/arm64/kvm/hyp/switch.c11
-rw-r--r--arch/arm64/kvm/inject_fault.c12
-rw-r--r--arch/arm64/kvm/irq.h19
-rw-r--r--arch/arm64/kvm/reset.c38
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/arm64/mm/dma-mapping.c66
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/avr32/mm/dma-coherent.c12
-rw-r--r--arch/blackfin/kernel/dma-mapping.c8
-rw-r--r--arch/blackfin/mm/init.c2
-rw-r--r--arch/c6x/include/asm/dma-mapping.h4
-rw-r--r--arch/c6x/kernel/dma.c9
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c2
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c9
-rw-r--r--arch/frv/include/asm/mc146818rtc.h16
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c8
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c9
-rw-r--r--arch/h8300/include/asm/mc146818rtc.h9
-rw-r--r--arch/h8300/kernel/dma.c8
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h1
-rw-r--r--arch/hexagon/kernel/dma.c8
-rw-r--r--arch/hexagon/mm/init.c2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c22
-rw-r--r--arch/ia64/include/asm/machvec.h1
-rw-r--r--arch/ia64/include/asm/mc146818rtc.h10
-rw-r--r--arch/ia64/include/asm/thread_info.h28
-rw-r--r--arch/ia64/include/asm/uaccess.h18
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/ia64/kernel/machine_kexec.c2
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c4
-rw-r--r--arch/ia64/kernel/salinfo.c38
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c22
-rw-r--r--arch/m68k/amiga/config.c1
-rw-r--r--arch/m68k/apollo/config.c1
-rw-r--r--arch/m68k/bvme6000/config.c1
-rw-r--r--arch/m68k/hp300/config.c2
-rw-r--r--arch/m68k/include/asm/flat.h13
-rw-r--r--arch/m68k/include/asm/processor.h15
-rw-r--r--arch/m68k/include/asm/rtc.h79
-rw-r--r--arch/m68k/kernel/dma.c12
-rw-r--r--arch/m68k/kernel/time.c48
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/mac/misc.c1
-rw-r--r--arch/m68k/mvme147/config.c1
-rw-r--r--arch/m68k/mvme16x/config.c1
-rw-r--r--arch/m68k/q40/config.c2
-rw-r--r--arch/m68k/sun3/config.c1
-rw-r--r--arch/m68k/sun3/intersil.c2
-rw-r--r--arch/m68k/sun3x/time.c2
-rw-r--r--arch/metag/include/asm/cmpxchg_lnkget.h2
-rw-r--r--arch/metag/include/asm/metag_mem.h2
-rw-r--r--arch/metag/include/asm/metag_regs.h2
-rw-r--r--arch/metag/kernel/cachepart.c2
-rw-r--r--arch/metag/kernel/dma.c16
-rw-r--r--arch/metag/lib/divsi3.S4
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h1
-rw-r--r--arch/microblaze/include/asm/pci.h3
-rw-r--r--arch/microblaze/include/asm/thread_info.h27
-rw-r--r--arch/microblaze/kernel/dma.c12
-rw-r--r--arch/microblaze/mm/init.c4
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/microblaze/pci/pci-common.c73
-rw-r--r--arch/mips/Kconfig49
-rw-r--r--arch/mips/ath79/setup.c4
-rw-r--r--arch/mips/bmips/setup.c4
-rw-r--r--arch/mips/boot/compressed/decompress.c17
-rw-r--r--arch/mips/boot/compressed/head.S16
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts20
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts12
-rw-r--r--arch/mips/boot/tools/relocs_64.c19
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c22
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c14
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c125
-rw-r--r--arch/mips/cavium-octeon/setup.c20
-rw-r--r--arch/mips/cavium-octeon/smp.c1
-rw-r--r--arch/mips/cobalt/setup.c4
-rw-r--r--arch/mips/configs/ath25_defconfig119
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig2
-rw-r--r--arch/mips/include/asm/addrspace.h2
-rw-r--r--arch/mips/include/asm/bootinfo.h4
-rw-r--r--arch/mips/include/asm/dsemul.h92
-rw-r--r--arch/mips/include/asm/elf.h4
-rw-r--r--arch/mips/include/asm/fpu_emulator.h17
-rw-r--r--arch/mips/include/asm/kvm_host.h315
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h42
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h21
-rw-r--r--arch/mips/include/asm/mmu.h9
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/msa.h2
-rw-r--r--arch/mips/include/asm/page.h44
-rw-r--r--arch/mips/include/asm/pci.h10
-rw-r--r--arch/mips/include/asm/pgtable.h16
-rw-r--r--arch/mips/include/asm/processor.h18
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/seccomp.h4
-rw-r--r--arch/mips/include/asm/setup.h1
-rw-r--r--arch/mips/include/asm/signal.h6
-rw-r--r--arch/mips/include/asm/smp.h4
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/uaccess.h2
-rw-r--r--arch/mips/include/asm/uasm.h7
-rw-r--r--arch/mips/include/uapi/asm/auxvec.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h114
-rw-r--r--arch/mips/jz4740/setup.c2
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c70
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/cevt-r4k.c7
-rw-r--r--arch/mips/kernel/cpu-bugs64.c6
-rw-r--r--arch/mips/kernel/csrc-r4k.c4
-rw-r--r--arch/mips/kernel/elf.c23
-rw-r--r--arch/mips/kernel/head.S21
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c42
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/process.c14
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/segment.c13
-rw-r--r--arch/mips/kernel/setup.c4
-rw-r--r--arch/mips/kernel/signal.c18
-rw-r--r--arch/mips/kernel/signal32.c288
-rw-r--r--arch/mips/kernel/signal_o32.c285
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/smp-cps.c46
-rw-r--r--arch/mips/kernel/smp.c34
-rw-r--r--arch/mips/kernel/traps.c27
-rw-r--r--arch/mips/kernel/unaligned.c10
-rw-r--r--arch/mips/kernel/vdso.c10
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile3
-rw-r--r--arch/mips/kvm/commpage.c2
-rw-r--r--arch/mips/kvm/dyntrans.c182
-rw-r--r--arch/mips/kvm/emulate.c485
-rw-r--r--arch/mips/kvm/entry.c701
-rw-r--r--arch/mips/kvm/fpu.S7
-rw-r--r--arch/mips/kvm/interrupt.c12
-rw-r--r--arch/mips/kvm/interrupt.h14
-rw-r--r--arch/mips/kvm/locore.S605
-rw-r--r--arch/mips/kvm/mips.c367
-rw-r--r--arch/mips/kvm/mmu.c375
-rw-r--r--arch/mips/kvm/stats.c21
-rw-r--r--arch/mips/kvm/tlb.c498
-rw-r--r--arch/mips/kvm/trace.h236
-rw-r--r--arch/mips/kvm/trap_emul.c178
-rw-r--r--arch/mips/lantiq/irq.c31
-rw-r--r--arch/mips/lantiq/prom.c4
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c10
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c14
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c1
-rw-r--r--arch/mips/math-emu/cp1emu.c22
-rw-r--r--arch/mips/math-emu/dsemul.c333
-rw-r--r--arch/mips/mm/c-r4k.c286
-rw-r--r--arch/mips/mm/dma-default.c20
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/sc-debugfs.c4
-rw-r--r--arch/mips/mm/sc-rm7k.c2
-rw-r--r--arch/mips/mm/tlbex.c12
-rw-r--r--arch/mips/mm/uasm-micromips.c13
-rw-r--r--arch/mips/mm/uasm-mips.c13
-rw-r--r--arch/mips/mm/uasm.c30
-rw-r--r--arch/mips/mti-malta/malta-dtshim.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c2
-rw-r--r--arch/mips/net/bpf_jit.c6
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c4
-rw-r--r--arch/mips/pci/pci.c19
-rw-r--r--arch/mips/pic32/pic32mzda/init.c4
-rw-r--r--arch/mips/pistachio/init.c25
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sni/time.c1
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mn10300/Kconfig4
-rw-r--r--arch/mn10300/include/asm/rtc-regs.h4
-rw-r--r--arch/mn10300/include/asm/rtc.h2
-rw-r--r--arch/mn10300/kernel/rtc.c104
-rw-r--r--arch/mn10300/mm/dma-alloc.c8
-rw-r--r--arch/mn10300/proc-mn103e010/proc-init.c3
-rw-r--r--arch/mn10300/proc-mn2ws0050/proc-init.c1
-rw-r--r--arch/nios2/mm/dma-mapping.c12
-rw-r--r--arch/nios2/mm/init.c2
-rw-r--r--arch/openrisc/kernel/dma.c21
-rw-r--r--arch/openrisc/mm/ioremap.c4
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/hash.h146
-rw-r--r--arch/parisc/include/asm/mc146818rtc.h9
-rw-r--r--arch/parisc/include/asm/rtc.h131
-rw-r--r--arch/parisc/kernel/firmware.c6
-rw-r--r--arch/parisc/kernel/pci-dma.c18
-rw-r--r--arch/parisc/kernel/time.c36
-rw-r--r--arch/parisc/lib/iomap.c64
-rw-r--r--arch/powerpc/Kconfig72
-rw-r--r--arch/powerpc/Kconfig.debug24
-rw-r--r--arch/powerpc/Makefile7
-rw-r--r--arch/powerpc/boot/Makefile10
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcoge4.dts37
-rw-r--r--arch/powerpc/boot/dts/fsl/mvme7100.dts153
-rw-r--r--arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi49
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xqds.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xrdb.dtsi38
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi2
-rw-r--r--arch/powerpc/boot/motload-head.S11
-rw-r--r--arch/powerpc/boot/mvme7100.c59
-rw-r--r--arch/powerpc/boot/opal-calls.S58
-rw-r--r--arch/powerpc/boot/opal.c98
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/boot/ppc_asm.h4
-rw-r--r--arch/powerpc/boot/ppcboot.h2
-rw-r--r--arch/powerpc/boot/serial.c2
-rw-r--r--arch/powerpc/boot/types.h10
-rwxr-xr-xarch/powerpc/boot/wrapper5
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig1
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig3
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig1
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig1
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig8
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig1
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig2
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig8
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig1
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig2
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig4
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig8
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig1
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig1
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig6
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig1
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig1
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig1
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig5
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig4
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig5
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/kmp204x_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig4
-rw-r--r--arch/powerpc/configs/86xx-hw.config4
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig2
-rw-r--r--arch/powerpc/configs/c2k_defconfig8
-rw-r--r--arch/powerpc/configs/cell_defconfig2
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig5
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config4
-rw-r--r--arch/powerpc/configs/g5_defconfig7
-rw-r--r--arch/powerpc/configs/gamecube_defconfig5
-rw-r--r--arch/powerpc/configs/holly_defconfig3
-rw-r--r--arch/powerpc/configs/linkstation_defconfig4
-rw-r--r--arch/powerpc/configs/maple_defconfig5
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig1
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig8
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig4
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig4
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig4
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig4
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig4
-rw-r--r--arch/powerpc/configs/mpc86xx_basic_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig4
-rw-r--r--arch/powerpc/configs/pasemi_defconfig3
-rw-r--r--arch/powerpc/configs/pmac32_defconfig5
-rw-r--r--arch/powerpc/configs/powernv_defconfig8
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig4
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig6
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig6
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig9
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig5
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig7
-rw-r--r--arch/powerpc/configs/storcenter_defconfig4
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig5
-rw-r--r--arch/powerpc/include/asm/accounting.h24
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h75
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb-radix.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h60
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h11
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h99
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h27
-rw-r--r--arch/powerpc/include/asm/cacheflush.h1
-rw-r--r--arch/powerpc/include/asm/code-patching.h10
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h53
-rw-r--r--arch/powerpc/include/asm/cpufeature.h40
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h15
-rw-r--r--arch/powerpc/include/asm/cputime.h15
-rw-r--r--arch/powerpc/include/asm/dbell.h1
-rw-r--r--arch/powerpc/include/asm/dcr-native.h1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h7
-rw-r--r--arch/powerpc/include/asm/eeh.h4
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h4
-rw-r--r--arch/powerpc/include/asm/firmware.h6
-rw-r--r--arch/powerpc/include/asm/fixmap.h7
-rw-r--r--arch/powerpc/include/asm/ftrace.h8
-rw-r--r--arch/powerpc/include/asm/hmi.h45
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h11
-rw-r--r--arch/powerpc/include/asm/hw_irq.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h11
-rw-r--r--arch/powerpc/include/asm/jump_label.h5
-rw-r--r--arch/powerpc/include/asm/kprobes.h8
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/linkage.h6
-rw-r--r--arch/powerpc/include/asm/machdep.h42
-rw-r--r--arch/powerpc/include/asm/mman.h9
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h107
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/opal-api.h46
-rw-r--r--arch/powerpc/include/asm/opal.h34
-rw-r--r--arch/powerpc/include/asm/paca.h15
-rw-r--r--arch/powerpc/include/asm/page.h6
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h15
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h2
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h47
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h47
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc4xx.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h28
-rw-r--r--arch/powerpc/include/asm/processor.h7
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/ps3av.h2
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/reg.h97
-rw-r--r--arch/powerpc/include/asm/rtas.h7
-rw-r--r--arch/powerpc/include/asm/rtc.h78
-rw-r--r--arch/powerpc/include/asm/sections.h4
-rw-r--r--arch/powerpc/include/asm/setup.h12
-rw-r--r--arch/powerpc/include/asm/smp.h9
-rw-r--r--arch/powerpc/include/asm/smu.h9
-rw-r--r--arch/powerpc/include/asm/spinlock.h38
-rw-r--r--arch/powerpc/include/asm/string.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h8
-rw-r--r--arch/powerpc/include/asm/synch.h1
-rw-r--r--arch/powerpc/include/asm/tce.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h29
-rw-r--r--arch/powerpc/include/asm/time.h9
-rw-r--r--arch/powerpc/include/asm/tlb.h13
-rw-r--r--arch/powerpc/include/asm/tlbflush.h1
-rw-r--r--arch/powerpc/include/asm/tsi108.h2
-rw-r--r--arch/powerpc/include/asm/types.h8
-rw-r--r--arch/powerpc/include/asm/uaccess.h21
-rw-r--r--arch/powerpc/include/asm/xics.h6
-rw-r--r--arch/powerpc/include/asm/xor.h1
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h5
-rw-r--r--arch/powerpc/kernel/Makefile7
-rw-r--r--arch/powerpc/kernel/align.c19
-rw-r--r--arch/powerpc/kernel/asm-offsets.c80
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S20
-rw-r--r--arch/powerpc/kernel/cputable.c41
-rw-r--r--arch/powerpc/kernel/crash.c15
-rw-r--r--arch/powerpc/kernel/dma-iommu.c12
-rw-r--r--arch/powerpc/kernel/dma.c18
-rw-r--r--arch/powerpc/kernel/eeh_cache.c8
-rw-r--r--arch/powerpc/kernel/eeh_dev.c17
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S17
-rw-r--r--arch/powerpc/kernel/entry_64.S10
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S78
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/ftrace.c39
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S159
-rw-r--r--arch/powerpc/kernel/hmi.c56
-rw-r--r--arch/powerpc/kernel/ibmebus.c12
-rw-r--r--arch/powerpc/kernel/idle_book3s.S (renamed from arch/powerpc/kernel/idle_power7.S)373
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c18
-rw-r--r--arch/powerpc/kernel/kprobes.c17
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c10
-rw-r--r--arch/powerpc/kernel/misc_32.S14
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/module_64.c9
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c161
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/pci_dn.c34
-rw-r--r--arch/powerpc/kernel/process.c69
-rw-r--r--arch/powerpc/kernel/prom.c16
-rw-r--r--arch/powerpc/kernel/ptrace.c1532
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c8
-rw-r--r--arch/powerpc/kernel/rtasd.c28
-rw-r--r--arch/powerpc/kernel/setup-common.c190
-rw-r--r--arch/powerpc/kernel/setup.h58
-rw-r--r--arch/powerpc/kernel/setup_32.c114
-rw-r--r--arch/powerpc/kernel/setup_64.c285
-rw-r--r--arch/powerpc/kernel/signal_64.c11
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/time.c177
-rw-r--r--arch/powerpc/kernel/tm.S3
-rw-r--r--arch/powerpc/kernel/traps.c18
-rw-r--r--arch/powerpc/kernel/vector.S9
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kvm/Makefile2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c18
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c41
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c176
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S531
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/booke.c4
-rw-r--r--arch/powerpc/kvm/emulate.c1
-rw-r--r--arch/powerpc/kvm/mpic.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c6
-rw-r--r--arch/powerpc/lib/alloc.c2
-rw-r--r--arch/powerpc/lib/checksum_64.S12
-rw-r--r--arch/powerpc/lib/feature-fixups.c67
-rw-r--r--arch/powerpc/lib/locks.c16
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/lib/string.S44
-rw-r--r--arch/powerpc/lib/vmx-helper.c1
-rw-r--r--arch/powerpc/mm/8xx_mmu.c131
-rw-r--r--arch/powerpc/mm/hash64_4k.c18
-rw-r--r--arch/powerpc/mm/hash64_64k.c39
-rw-r--r--arch/powerpc/mm/hash_native_64.c42
-rw-r--r--arch/powerpc/mm/hash_utils_64.c206
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c17
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c39
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c22
-rw-r--r--arch/powerpc/mm/mem.c18
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c5
-rw-r--r--arch/powerpc/mm/mmu_decl.h3
-rw-r--r--arch/powerpc/mm/numa.c36
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c7
-rw-r--r--arch/powerpc/mm/pgtable-radix.c20
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/tlb-radix.c155
-rw-r--r--arch/powerpc/mm/tlb_hash32.c11
-rw-r--r--arch/powerpc/mm/tlb_nohash.c6
-rw-r--r--arch/powerpc/net/Makefile4
-rw-r--r--arch/powerpc/net/bpf_jit.h235
-rw-r--r--arch/powerpc/net/bpf_jit32.h139
-rw-r--r--arch/powerpc/net/bpf_jit64.h102
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S2
-rw-r--r--arch/powerpc/net/bpf_jit_asm64.S180
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c954
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c2
-rw-r--r--arch/powerpc/perf/Makefile2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.h2
-rw-r--r--arch/powerpc/perf/isa207-common.c263
-rw-r--r--arch/powerpc/perf/isa207-common.h236
-rw-r--r--arch/powerpc/perf/power8-pmu.c477
-rw-r--r--arch/powerpc/perf/power9-events-list.h55
-rw-r--r--arch/powerpc/perf/power9-pmu.c330
-rw-r--r--arch/powerpc/platforms/40x/ep405.c4
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/40x/virtex.c4
-rw-r--r--arch/powerpc/platforms/40x/walnut.c4
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c5
-rw-r--r--arch/powerpc/platforms/44x/ebony.c4
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c4
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c3
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c10
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c4
-rw-r--r--arch/powerpc/platforms/44x/virtex.c4
-rw-r--r--arch/powerpc/platforms/44x/warp.c4
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c2
-rw-r--r--arch/powerpc/platforms/512x/pdm360ng.c8
-rw-r--r--arch/powerpc/platforms/52xx/efika.c3
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c3
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c3
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c3
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2.h2
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c3
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c3
-rw-r--r--arch/powerpc/platforms/83xx/misc.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c4
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h2
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c4
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_qds.c4
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c4
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c4
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c5
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c7
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c16
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c43
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c6
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c4
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c4
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c4
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c4
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c4
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c2
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c4
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c12
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/86xx/Makefile1
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c6
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c121
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c4
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c3
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c3
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc8xx.h2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/iommu.c32
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c1
-rw-r--r--arch/powerpc/platforms/cell/setup.c7
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c19
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c12
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c19
-rw-r--r--arch/powerpc/platforms/maple/pci.c34
-rw-r--r--arch/powerpc/platforms/maple/setup.c37
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c17
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c3
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c18
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/setup.c58
-rw-r--r--arch/powerpc/platforms/powermac/smp.c3
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c49
-rw-r--r--arch/powerpc/platforms/powernv/idle.c188
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S74
-rw-r--r--arch/powerpc/platforms/powernv/opal.c30
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c385
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c909
-rw-r--r--arch/powerpc/platforms/powernv/pci.c134
-rw-r--r--arch/powerpc/platforms/powernv/pci.h45
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c4
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c12
-rw-r--r--arch/powerpc/platforms/ps3/repository.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c23
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c18
-rw-r--r--arch/powerpc/platforms/ps3/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c56
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c53
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c48
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c13
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c184
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c59
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c23
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c41
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/power.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h21
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c8
-rw-r--r--arch/powerpc/platforms/pseries/ras.c39
-rw-r--r--arch/powerpc/platforms/pseries/setup.c241
-rw-r--r--arch/powerpc/platforms/pseries/smp.c31
-rw-r--r--arch/powerpc/sysdev/cpm_common.c22
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c186
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c8
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c25
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c8
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h6
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c2
-rw-r--r--arch/powerpc/sysdev/xics/Makefile2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c144
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c5
-rw-r--r--arch/powerpc/xmon/ppc-dis.c1
-rw-r--r--arch/powerpc/xmon/xmon.c107
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/boot/compressed/Makefile6
-rw-r--r--arch/s390/hypfs/hypfs_diag.c377
-rw-r--r--arch/s390/include/asm/cpacf.h10
-rw-r--r--arch/s390/include/asm/diag.h149
-rw-r--r--arch/s390/include/asm/dma-mapping.h1
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/gmap.h82
-rw-r--r--arch/s390/include/asm/kvm_host.h33
-rw-r--r--arch/s390/include/asm/mmu.h11
-rw-r--r--arch/s390/include/asm/mmu_context.h3
-rw-r--r--arch/s390/include/asm/page.h9
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h37
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/sclp.h23
-rw-r--r--arch/s390/include/uapi/asm/auxvec.h2
-rw-r--r--arch/s390/include/uapi/asm/kvm.h41
-rw-r--r--arch/s390/include/uapi/asm/sie.h1
-rw-r--r--arch/s390/kernel/Makefile11
-rw-r--r--arch/s390/kernel/als.c124
-rw-r--r--arch/s390/kernel/diag.c39
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/head.S43
-rw-r--r--arch/s390/kernel/kprobes.c12
-rw-r--r--arch/s390/kernel/sclp.c5
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/diag.c5
-rw-r--r--arch/s390/kvm/gaccess.c387
-rw-r--r--arch/s390/kvm/gaccess.h3
-rw-r--r--arch/s390/kvm/guestdbg.c19
-rw-r--r--arch/s390/kvm/intercept.c33
-rw-r--r--arch/s390/kvm/interrupt.c34
-rw-r--r--arch/s390/kvm/kvm-s390.c404
-rw-r--r--arch/s390/kvm/kvm-s390.h22
-rw-r--r--arch/s390/kvm/priv.c226
-rw-r--r--arch/s390/kvm/sigp.c10
-rw-r--r--arch/s390/kvm/sthyi.c471
-rw-r--r--arch/s390/kvm/trace.h49
-rw-r--r--arch/s390/kvm/vsie.c1091
-rw-r--r--arch/s390/lib/uaccess.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gmap.c1574
-rw-r--r--arch/s390/mm/hugetlbpage.c52
-rw-r--r--arch/s390/mm/pgalloc.c39
-rw-r--r--arch/s390/mm/pgtable.c209
-rw-r--r--arch/s390/numa/mode_emu.c4
-rw-r--r--arch/s390/numa/numa.c8
-rw-r--r--arch/s390/pci/pci_dma.c23
-rw-r--r--arch/s390/tools/gen_facilities.c1
-rw-r--r--arch/score/mm/init.c2
-rw-r--r--arch/sh/Kconfig34
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/Kconfig1
-rw-r--r--arch/sh/boards/board-secureedge5410.c3
-rw-r--r--arch/sh/boards/of-generic.c15
-rw-r--r--arch/sh/boot/dts/Makefile3
-rwxr-xr-xarch/sh/boot/dts/j2_mimas_v2.dts96
-rw-r--r--arch/sh/configs/j2_defconfig40
-rw-r--r--arch/sh/drivers/heartbeat.c32
-rw-r--r--arch/sh/drivers/pci/pci.c4
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/barrier.h5
-rw-r--r--arch/sh/include/asm/bitops-cas.h93
-rw-r--r--arch/sh/include/asm/bitops.h2
-rw-r--r--arch/sh/include/asm/cmpxchg-cas.h24
-rw-r--r--arch/sh/include/asm/cmpxchg-xchg.h2
-rw-r--r--arch/sh/include/asm/cmpxchg.h2
-rw-r--r--arch/sh/include/asm/dma-mapping.h4
-rw-r--r--arch/sh/include/asm/futex-cas.h34
-rw-r--r--arch/sh/include/asm/futex-irq.h86
-rw-r--r--arch/sh/include/asm/futex-llsc.h41
-rw-r--r--arch/sh/include/asm/futex.h97
-rw-r--r--arch/sh/include/asm/mc146818rtc.h7
-rw-r--r--arch/sh/include/asm/processor.h2
-rw-r--r--arch/sh/include/asm/rtc.h11
-rw-r--r--arch/sh/include/asm/spinlock-cas.h117
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h224
-rw-r--r--arch/sh/include/asm/spinlock.h222
-rw-r--r--arch/sh/include/asm/thread_info.h26
-rw-r--r--arch/sh/include/uapi/asm/cpu-features.h1
-rw-r--r--arch/sh/include/uapi/asm/sigcontext.h3
-rw-r--r--arch/sh/include/uapi/asm/unistd_32.h16
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h16
-rw-r--r--arch/sh/kernel/cpu/clock.c4
-rw-r--r--arch/sh/kernel/cpu/init.c6
-rw-r--r--arch/sh/kernel/cpu/proc.c1
-rw-r--r--arch/sh/kernel/cpu/sh2/Makefile4
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S55
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c39
-rw-r--r--arch/sh/kernel/cpu/sh2/smp-j2.c139
-rw-r--r--arch/sh/kernel/dma-nommu.c4
-rw-r--r--arch/sh/kernel/dwarf.c6
-rw-r--r--arch/sh/kernel/head_32.S6
-rw-r--r--arch/sh/kernel/setup.c6
-rw-r--r--arch/sh/kernel/syscalls_32.S14
-rw-r--r--arch/sh/kernel/syscalls_64.S14
-rw-r--r--arch/sh/kernel/time.c36
-rw-r--r--arch/sh/mm/Makefile3
-rw-r--r--arch/sh/mm/asids-debugfs.c5
-rw-r--r--arch/sh/mm/cache-j2.c65
-rw-r--r--arch/sh/mm/cache.c13
-rw-r--r--arch/sh/mm/consistent.c4
-rw-r--r--arch/sh/mm/ioremap.c2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/io_32.h10
-rw-r--r--arch/sparc/include/asm/pci_64.h3
-rw-r--r--arch/sparc/include/asm/thread_info_64.h24
-rw-r--r--arch/sparc/include/asm/uaccess_32.h14
-rw-r--r--arch/sparc/include/asm/uaccess_64.h11
-rw-r--r--arch/sparc/kernel/iommu.c12
-rw-r--r--arch/sparc/kernel/ioport.c24
-rw-r--r--arch/sparc/kernel/pci.c20
-rw-r--r--arch/sparc/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S7
-rw-r--r--arch/tile/include/asm/thread_info.h27
-rw-r--r--arch/tile/kernel/pci-dma.c28
-rw-r--r--arch/tile/kernel/vmlinux.lds.S12
-rw-r--r--arch/um/Kconfig.common6
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/include/asm/irqflags.h18
-rw-r--r--arch/um/kernel/Makefile5
-rw-r--r--arch/um/kernel/initrd.c2
-rw-r--r--arch/um/kernel/um_arch.c8
-rw-r--r--arch/um/os-Linux/Makefile3
-rw-r--r--arch/um/os-Linux/signal.c5
-rw-r--r--arch/unicore32/kernel/pci.c9
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c4
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/entry/common.c6
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/vdso/Makefile3
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c25
-rw-r--r--arch/x86/entry/vdso/vdso2c.h6
-rw-r--r--arch/x86/events/amd/ibs.c3
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/events/intel/uncore.h1
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/elf.h4
-rw-r--r--arch/x86/include/asm/fpu/internal.h16
-rw-r--r--arch/x86/include/asm/kvm_host.h31
-rw-r--r--arch/x86/include/asm/livepatch.h1
-rw-r--r--arch/x86/include/asm/mc146818rtc.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/pvclock.h39
-rw-r--r--arch/x86/include/asm/rtc.h1
-rw-r--r--arch/x86/include/asm/svm.h1
-rw-r--r--arch/x86/include/asm/swiotlb.h4
-rw-r--r--arch/x86/include/asm/syscall.h5
-rw-r--r--arch/x86/include/asm/thread_info.h71
-rw-r--r--arch/x86/include/asm/topology.h1
-rw-r--r--arch/x86/include/asm/uaccess.h26
-rw-r--r--arch/x86/include/asm/uaccess_32.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/virtext.h8
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h9
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/amd_gart_64.c21
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/apic_noop.c1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/ipi.c1
-rw-r--r--arch/x86/kernel/apic/probe_32.c2
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/match.c2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c3
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/dumpstack_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/early-quirks.c404
-rw-r--r--arch/x86/kernel/fpu/signal.c12
-rw-r--r--arch/x86/kernel/hpet.c3
-rw-r--r--arch/x86/kernel/hw_breakpoint.c3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c3
-rw-r--r--arch/x86/kernel/i8253.c2
-rw-r--r--arch/x86/kernel/io_delay.c2
-rw-r--r--arch/x86/kernel/irq_32.c1
-rw-r--r--arch/x86/kernel/irq_64.c1
-rw-r--r--arch/x86/kernel/kdebugfs.c2
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/mpparse.c1
-rw-r--r--arch/x86/kernel/nmi.c1
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c2
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c14
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/pci-nommu.c4
-rw-r--r--arch/x86/kernel/pci-swiotlb.c6
-rw-r--r--arch/x86/kernel/pmem.c2
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/ptrace.c15
-rw-r--r--arch/x86/kernel/pvclock.c17
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/rtc.c3
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c40
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/test_rodata.c5
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c3
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/iommu.c4
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/irq.h3
-rw-r--r--arch/x86/kvm/irq_comm.c49
-rw-r--r--arch/x86/kvm/lapic.c544
-rw-r--r--arch/x86/kvm/lapic.h19
-rw-r--r--arch/x86/kvm/mmu.c32
-rw-r--r--arch/x86/kvm/mmu.h5
-rw-r--r--arch/x86/kvm/paging_tmpl.h10
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/trace.h15
-rw-r--r--arch/x86/kvm/vmx.c401
-rw-r--r--arch/x86/kvm/x86.c178
-rw-r--r--arch/x86/lib/cache-smp.c2
-rw-r--r--arch/x86/lib/cpu.c3
-rw-r--r--arch/x86/lib/csum-partial_64.c2
-rw-r--r--arch/x86/lib/csum-wrappers_64.c2
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/hweight.S2
-rw-r--r--arch/x86/lib/memcpy_32.c2
-rw-r--r--arch/x86/lib/mmx_32.c2
-rw-r--r--arch/x86/lib/msr-reg-export.c2
-rw-r--r--arch/x86/lib/msr-smp.c2
-rw-r--r--arch/x86/lib/msr.c3
-rw-r--r--arch/x86/lib/string_32.c2
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/lib/usercopy_32.c2
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/dump_pagetables.c6
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/x86/mm/ioremap.c1
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c1
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/numa.c1
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pat.c1
-rw-r--r--arch/x86/mm/pat_rbtree.c1
-rw-r--r--arch/x86/mm/pf_in.c1
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/mm/physaddr.c2
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/sta2x11-fixup.c2
-rw-r--r--arch/x86/pci/vmd.c57
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/efi/early_printk.c4
-rw-r--r--arch/x86/platform/efi/efi.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c3
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_vrtc.c1
-rw-r--r--arch/x86/platform/intel-mid/pwr.c4
-rw-r--r--arch/x86/platform/intel-mid/sfi.c2
-rw-r--r--arch/x86/platform/olpc/olpc.c2
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c5
-rw-r--r--arch/x86/platform/ts5500/ts5500.c6
-rw-r--r--arch/x86/platform/uv/uv_irq.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c2
-rw-r--r--arch/x86/power/hibernate_64.c18
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/purgatory/Makefile2
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/um/delay.c2
-rw-r--r--arch/x86/um/vdso/Makefile3
-rw-r--r--arch/x86/xen/debugfs.c1
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--arch/x86/xen/mmu.c3
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/xtensa/kernel/pci-dma.c12
-rw-r--r--block/Kconfig13
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c21
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq-sysfs.c12
-rw-r--r--block/blk-mq.c25
-rw-r--r--block/blk-throttle.c13
-rw-r--r--block/cfq-iosched.c4
-rw-r--r--block/genhd.c3
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/ec.c41
-rw-r--r--drivers/acpi/osl.c5
-rw-r--r--drivers/acpi/pci_mcfg.c92
-rw-r--r--drivers/acpi/pci_root.c35
-rw-r--r--drivers/base/firmware_class.c183
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/opp/core.c31
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c18
-rw-r--r--drivers/base/property.c28
-rw-r--r--drivers/block/brd.c19
-rw-r--r--drivers/block/drbd/drbd_actlog.c1
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c21
-rw-r--r--drivers/block/loop.c67
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c15
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/zram/zram_drv.c29
-rw-r--r--drivers/bus/Kconfig13
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/mvebu-mbus.c10
-rw-r--r--drivers/bus/tegra-aconnect.c112
-rw-r--r--drivers/char/Kconfig42
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/intel-gtt.c8
-rw-r--r--drivers/char/genrtc.c539
-rw-r--r--drivers/char/powernv-op-panel.c223
-rw-r--r--drivers/char/random.c5
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clkdev.c4
-rw-r--r--drivers/clk/ti/clk-33xx.c3
-rw-r--r--drivers/clk/ti/clk-43xx.c7
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c16
-rw-r--r--drivers/clk/ux500/u8540_clk.c16
-rw-r--r--drivers/clk/ux500/u9540_clk.c4
-rw-r--r--drivers/clocksource/clps711x-timer.c2
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c68
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c101
-rw-r--r--drivers/crypto/marvell/cesa.c7
-rw-r--r--drivers/crypto/marvell/cipher.c25
-rw-r--r--drivers/crypto/marvell/hash.c12
-rw-r--r--drivers/crypto/vmx/Kconfig2
-rw-r--r--drivers/crypto/vmx/vmx.c6
-rw-r--r--drivers/dma-buf/Kconfig15
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c59
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scpi.c42
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/qcom_scm-32.c327
-rw-r--r--drivers/firmware/qcom_scm-64.c307
-rw-r--r--drivers/firmware/qcom_scm.c353
-rw-r--r--drivers/firmware/qcom_scm.h47
-rw-r--r--drivers/firmware/scpi_pm_domain.c163
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c334
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c217
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c480
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smum.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h)4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h14
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h11
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c404
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c464
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c303
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h165
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c272
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c190
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h60
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
-rw-r--r--drivers/gpu/drm/arc/Kconfig1
-rw-r--r--drivers/gpu/drm/arc/Makefile2
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c64
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c18
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c128
-rw-r--r--drivers/gpu/drm/arm/Kconfig17
-rw-r--r--drivers/gpu/drm/arm/Makefile2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c19
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c216
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c519
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h54
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c691
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h241
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c298
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h172
-rw-r--r--drivers/gpu/drm/armada/Kconfig4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c16
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/ast/Kconfig4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c13
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/Kconfig4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c15
-rw-r--r--drivers/gpu/drm/bridge/Kconfig19
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h (renamed from drivers/gpu/drm/i2c/adv7511.h)103
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c (renamed from drivers/gpu/drm/i2c/adv7511.c)324
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c265
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h5
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c30
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c14
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c467
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1413
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c13
-rw-r--r--drivers/gpu/drm/drm_atomic.c109
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c573
-rw-r--r--drivers/gpu/drm/drm_auth.c285
-rw-r--r--drivers/gpu/drm/drm_blend.c238
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c725
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c36
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h92
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c90
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c245
-rw-r--r--drivers/gpu/drm/drm_edid.c22
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c56
-rw-r--r--drivers/gpu/drm/drm_fops.c149
-rw-r--r--drivers/gpu/drm/drm_fourcc.c320
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_info.c117
-rw-r--r--drivers/gpu/drm/drm_internal.h21
-rw-r--r--drivers/gpu/drm/drm_ioctl.c176
-rw-r--r--drivers/gpu/drm/drm_irq.c243
-rw-r--r--drivers/gpu/drm/drm_legacy.h8
-rw-r--r--drivers/gpu/drm/drm_lock.c240
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_pci.c51
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c18
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c206
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vm.c58
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h7
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h91
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c67
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c44
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c49
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c15
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c16
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c87
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c1
-rw-r--r--drivers/gpu/drm/gma500/Kconfig4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c34
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c11
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c9
-rw-r--r--drivers/gpu/drm/i915/Kconfig22
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c145
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h69
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h38
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h49
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c53
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c570
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1587
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1956
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h786
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1262
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c515
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.h45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h84
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c203
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c538
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c807
-rw-r--r--drivers/gpu/drm/i915/i915_params.c23
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c503
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h113
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h65
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c40
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h54
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c44
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h92
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c50
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c595
-rw-r--r--drivers/gpu/drm/i915/intel_color.c23
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c78
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c274
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c388
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2675
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1087
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c63
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h355
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c90
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c41
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c116
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c141
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c30
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h50
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c222
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c104
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h45
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c429
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c134
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c964
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c58
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c100
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c210
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c151
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c48
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1469
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c137
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1323
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h176
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c282
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c87
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c32
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c81
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c19
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c328
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c32
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c121
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h21
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c189
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c97
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c400
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c548
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h16
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c149
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig9
-rw-r--r--drivers/gpu/drm/mediatek/Makefile7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c265
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1828
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c358
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs.h238
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c515
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c13
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c117
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c31
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h203
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c125
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c339
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c235
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c22
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c283
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c139
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c168
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c26
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c71
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c179
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c896
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c40
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c136
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c255
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h871
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c56
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c15
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c166
-rw-r--r--drivers/gpu/drm/qxl/Kconfig5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c45
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c117
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c14
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig5
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c189
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c90
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c7
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c26
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c71
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c39
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c148
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h1
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c43
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c46
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c43
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c350
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c40
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c21
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c112
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h8
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c43
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c12
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/tegra/dc.c176
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c245
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c248
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c508
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h21
-rw-r--r--drivers/gpu/drm/tegra/output.c9
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c717
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c168
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c121
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c6
-rw-r--r--drivers/gpu/drm/udl/Kconfig5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c183
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c70
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h12
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h17
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h22
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c449
-rw-r--r--drivers/gpu/drm/vgem/Makefile2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c291
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h20
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c283
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c187
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/host1x/cdma.c42
-rw-r--r--drivers/gpu/host1x/channel.c5
-rw-r--r--drivers/gpu/host1x/debug.c38
-rw-r--r--drivers/gpu/host1x/dev.c16
-rw-r--r--drivers/gpu/host1x/dev.h38
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c23
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c5
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c36
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c30
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c10
-rw-r--r--drivers/gpu/host1x/intr.c16
-rw-r--r--drivers/gpu/host1x/intr.h4
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/host1x/syncpt.c58
-rw-r--r--drivers/gpu/host1x/syncpt.h8
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c213
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c62
-rw-r--r--drivers/hwmon/adt7411.c48
-rw-r--r--drivers/hwmon/ftsteutates.c2
-rw-r--r--drivers/hwmon/iio_hwmon.c24
-rw-r--r--drivers/hwmon/lm75.c6
-rw-r--r--drivers/hwmon/lm90.c14
-rw-r--r--drivers/hwmon/sht3x.c2
-rw-r--r--drivers/hwmon/tmp102.c4
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c98
-rw-r--r--drivers/infiniband/core/device.c9
-rw-r--r--drivers/infiniband/core/iwcm.c54
-rw-r--r--drivers/infiniband/core/iwcm.h2
-rw-r--r--drivers/infiniband/core/iwpm_util.c3
-rw-r--r--drivers/infiniband/core/multicast.c12
-rw-r--r--drivers/infiniband/core/netlink.c6
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--drivers/infiniband/core/sa_query.c41
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucma.c18
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/uverbs.h14
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c535
-rw-r--r--drivers/infiniband/core/uverbs_main.c75
-rw-r--r--drivers/infiniband/core/verbs.c172
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c42
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c31
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c40
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig4
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c567
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h38
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c296
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h4
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c52
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c93
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c125
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h123
-rw-r--r--drivers/infiniband/hw/hfi1/init.c51
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c60
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h7
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c254
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h37
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c68
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c21
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c20
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c68
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h4
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c409
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h3
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c90
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c57
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h1333
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h141
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h155
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h209
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h81
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h123
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h322
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h642
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.c489
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.h65
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c61
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c86
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c124
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c19
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c301
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c89
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c29
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c76
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c222
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c87
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c19
-rw-r--r--drivers/infiniband/hw/mlx5/main.c429
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h74
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c691
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c112
-rw-r--r--drivers/infiniband/hw/mlx5/user.h88
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c42
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c19
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c17
-rw-r--r--drivers/infiniband/sw/Makefile1
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c124
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c246
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig24
-rw-r--r--drivers/infiniband/sw/rxe/Makefile24
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c386
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h77
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c98
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c734
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c165
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c166
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h952
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c96
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h286
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c190
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c173
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c643
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c708
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h53
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c961
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h129
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h172
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c502
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h163
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c851
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c217
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h178
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c420
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c726
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c1380
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c193
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c157
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c154
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h95
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1330
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h480
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h6
-rw-r--r--drivers/input/joystick/xpad.c43
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c4
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c19
-rw-r--r--drivers/input/misc/rotary_encoder.c23
-rw-r--r--drivers/input/mouse/elan_i2c_core.c79
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/rmi4/rmi_bus.c5
-rw-r--r--drivers/input/serio/i8042.c16
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/input/touchscreen/Kconfig25
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/silead.c565
-rw-r--r--drivers/input/touchscreen/sis_i2c.c413
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c1044
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c28
-rw-r--r--drivers/iommu/dma-iommu.c8
-rw-r--r--drivers/iommu/dmar.c21
-rw-r--r--drivers/iommu/exynos-iommu.c107
-rw-r--r--drivers/iommu/intel-iommu.c18
-rw-r--r--drivers/iommu/io-pgtable-arm.c2
-rw-r--r--drivers/iommu/iommu.c32
-rw-r--r--drivers/iommu/msm_iommu.c870
-rw-r--r--drivers/iommu/msm_iommu.h73
-rw-r--r--drivers/iommu/msm_iommu_dev.c381
-rw-r--r--drivers/iommu/mtk_iommu.c49
-rw-r--r--drivers/iommu/mtk_iommu.h77
-rw-r--r--drivers/iommu/mtk_iommu_v1.c727
-rw-r--r--drivers/iommu/rockchip-iommu.c181
-rw-r--r--drivers/irqchip/Kconfig18
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/leds/leds-powernv.c2
-rw-r--r--drivers/macintosh/smu.c9
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c1531
-rw-r--r--drivers/mailbox/mailbox-test.c1
-rw-r--r--drivers/mailbox/pl320-ipc.c46
-rw-r--r--drivers/md/bcache/request.c12
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-cache-target.c8
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c29
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-mpath.c26
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-region-hash.c4
-rw-r--r--drivers/md/dm-rq.c20
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c24
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c10
-rw-r--r--drivers/media/i2c/adv7180.c18
-rw-r--r--drivers/media/i2c/adv7511.c24
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c26
-rw-r--r--drivers/media/platform/vim2m.c15
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c10
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/ir-rx51.c229
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c30
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c21
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c2
-rw-r--r--drivers/memory/Kconfig13
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c766
-rw-r--r--drivers/memory/atmel-sdramc.c11
-rw-r--r--drivers/memory/fsl_ifc.c4
-rw-r--r--drivers/memory/mtk-smi.c167
-rw-r--r--drivers/memory/omap-gpmc.c136
-rw-r--r--drivers/memory/samsung/exynos-srom.c41
-rw-r--r--drivers/memory/tegra/mc.c10
-rw-r--r--drivers/memory/tegra/tegra124-emc.c8
-rw-r--r--drivers/memstick/core/ms_block.c17
-rw-r--r--drivers/mfd/Kconfig17
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c4
-rw-r--r--drivers/mfd/ab8500-sysctrl.c34
-rw-r--r--drivers/mfd/altera-a10sr.c169
-rw-r--r--drivers/mfd/arizona-core.c4
-rw-r--r--drivers/mfd/arizona-irq.c16
-rw-r--r--drivers/mfd/axp20x.c17
-rw-r--r--drivers/mfd/db8500-prcmu.c10
-rw-r--r--drivers/mfd/dm355evm_msp.c7
-rw-r--r--drivers/mfd/hi655x-pmic.c59
-rw-r--r--drivers/mfd/kempld-core.c16
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max77620.c75
-rw-r--r--drivers/mfd/max77843.c24
-rw-r--r--drivers/mfd/max8925-i2c.c14
-rw-r--r--drivers/mfd/max8997.c30
-rw-r--r--drivers/mfd/max8998.c27
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/qcom_rpm.c57
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/smsc-ece1099.c11
-rw-r--r--drivers/mfd/stmpe.c40
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c135
-rw-r--r--drivers/mfd/tps6507x.c4
-rw-r--r--drivers/mfd/twl-core.c28
-rw-r--r--drivers/mfd/twl6040.c41
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/cxl/Kconfig17
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c168
-rw-r--r--drivers/misc/cxl/base.c104
-rw-r--r--drivers/misc/cxl/context.c6
-rw-r--r--drivers/misc/cxl/cxl.h87
-rw-r--r--drivers/misc/cxl/debugfs.c35
-rw-r--r--drivers/misc/cxl/file.c64
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c26
-rw-r--r--drivers/misc/cxl/irq.c32
-rw-r--r--drivers/misc/cxl/main.c5
-rw-r--r--drivers/misc/cxl/native.c207
-rw-r--r--drivers/misc/cxl/pci.c484
-rw-r--r--drivers/misc/cxl/phb.c44
-rw-r--r--drivers/misc/cxl/vphb.c78
-rw-r--r--drivers/misc/genwqe/card_base.c18
-rw-r--r--drivers/misc/lkdtm_usercopy.c2
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/host/mic_boot.c20
-rw-r--r--drivers/mmc/card/block.c13
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/core.c95
-rw-r--r--drivers/mmc/core/debugfs.c3
-rw-r--r--drivers/mmc/core/host.c8
-rw-r--r--drivers/mmc/core/mmc.c253
-rw-r--r--drivers/mmc/core/mmc_ops.c27
-rw-r--r--drivers/mmc/core/quirks.c2
-rw-r--r--drivers/mmc/core/sd.c19
-rw-r--r--drivers/mmc/host/Kconfig22
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c7
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c126
-rw-r--r--drivers/mmc/host/dw_mmc.h10
-rw-r--r--drivers/mmc/host/mtk-sd.c70
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c16
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/s3cmci.h2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c8
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c8
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c204
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c143
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c2
-rw-r--r--drivers/mmc/host/sdhci-dove.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c153
-rw-r--r--drivers/mmc/host/sdhci-iproc.c15
-rw-r--r--drivers/mmc/host/sdhci-msm.c117
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c332
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c16
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c83
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c13
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c9
-rw-r--r--drivers/mmc/host/sdhci-s3c.c9
-rw-r--r--drivers/mmc/host/sdhci-sirf.c4
-rw-r--r--drivers/mmc/host/sdhci-st.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c51
-rw-r--r--drivers/mmc/host/sdhci.c745
-rw-r--r--drivers/mmc/host/sdhci.h30
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mtd/bcm47xxpart.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/devices/Kconfig16
-rw-r--r--drivers/mtd/devices/m25p80.c37
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/maps/pmcmsp-flash.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c4
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c173
-rw-r--r--drivers/mtd/nand/jz4780_bch.c2
-rw-r--r--drivers/mtd/nand/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/mtk_ecc.c530
-rw-r--r--drivers/mtd/nand/mtk_ecc.h50
-rw-r--r--drivers/mtd/nand/mtk_nand.c1526
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/omap2.c11
-rw-r--r--drivers/mtd/nand/sunxi_nand.c397
-rw-r--r--drivers/mtd/nand/xway_nand.c231
-rw-r--r--drivers/mtd/onenand/onenand_base.c4
-rw-r--r--drivers/mtd/spi-nor/Kconfig27
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c732
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c1299
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c29
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c489
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c43
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c25
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c127
-rw-r--r--drivers/mtd/ssfdc.c3
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/ubi/attach.c141
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/fastmap.c65
-rw-r--r--drivers/mtd/ubi/gluebi.c5
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/ubi.h46
-rw-r--r--drivers/mtd/ubi/vmt.c25
-rw-r--r--drivers/mtd/ubi/wl.c41
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_spi.c4
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/ethernet/8390/ax88796.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c15
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c12
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c19
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/macsec.c16
-rw-r--r--drivers/net/phy/mdio-xgene.c4
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c49
-rw-r--r--drivers/ntb/ntb_transport.c38
-rw-r--r--drivers/ntb/test/ntb_perf.c240
-rw-r--r--drivers/ntb/test/ntb_pingpong.c62
-rw-r--r--drivers/ntb/test/ntb_tool.c459
-rw-r--r--drivers/nvdimm/btt.c18
-rw-r--r--drivers/nvdimm/pmem.c16
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/of/base.c22
-rw-r--r--drivers/of/platform.c20
-rw-r--r--drivers/parisc/ccio-dma.c16
-rw-r--r--drivers/parisc/sba_iommu.c16
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/bus.c31
-rw-r--r--drivers/pci/ecam.c8
-rw-r--r--drivers/pci/host/Kconfig49
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c1001
-rw-r--r--drivers/pci/host/pci-dra7xx.c4
-rw-r--r--drivers/pci/host/pci-host-common.c44
-rw-r--r--drivers/pci/host/pci-host-generic.c13
-rw-r--r--drivers/pci/host/pci-hyperv.c30
-rw-r--r--drivers/pci/host/pci-keystone.c10
-rw-r--r--drivers/pci/host/pci-layerscape.c10
-rw-r--r--drivers/pci/host/pci-mvebu.c28
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c27
-rw-r--r--drivers/pci/host/pci-tegra.c99
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c11
-rw-r--r--drivers/pci/host/pci-thunder-pem.c14
-rw-r--r--drivers/pci/host/pci-versatile.c29
-rw-r--r--drivers/pci/host/pci-xgene.c24
-rw-r--r--drivers/pci/host/pcie-altera.c83
-rw-r--r--drivers/pci/host/pcie-armada8k.c14
-rw-r--r--drivers/pci/host/pcie-artpec6.c280
-rw-r--r--drivers/pci/host/pcie-designware-plat.c10
-rw-r--r--drivers/pci/host/pcie-designware.c34
-rw-r--r--drivers/pci/host/pcie-hisi.c13
-rw-r--r--drivers/pci/host/pcie-iproc.c4
-rw-r--r--drivers/pci/host/pcie-rcar.c44
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c20
-rw-r--r--drivers/pci/host/pcie-xilinx.c22
-rw-r--r--drivers/pci/hotplug/Kconfig13
-rw-r--r--drivers/pci/hotplug/Makefile3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c711
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c17
-rw-r--r--drivers/pci/msi.c266
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c281
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/Kconfig5
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/pcie/pcie-dpc.c19
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c52
-rw-r--r--drivers/pci/probe.c22
-rw-r--r--drivers/pci/proc.c9
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/setup-bus.c73
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/phy/phy-rockchip-emmc.c241
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c17
-rw-r--r--drivers/platform/x86/apple-gmux.c55
-rw-r--r--drivers/platform/x86/dell-wmi.c4
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c10
-rw-r--r--drivers/pnp/pnpbios/core.c1
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/brcm-kona-reset.c73
-rw-r--r--drivers/power/reset/vexpress-poweroff.c4
-rw-r--r--drivers/pwm/Kconfig26
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c27
-rw-r--r--drivers/pwm/pwm-atmel.c30
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c277
-rw-r--r--drivers/pwm/pwm-clps711x.c2
-rw-r--r--drivers/pwm/pwm-cros-ec.c260
-rw-r--r--drivers/pwm/pwm-lpc32xx.c7
-rw-r--r--drivers/pwm/pwm-lpss-pci.c1
-rw-r--r--drivers/pwm/pwm-lpss.c26
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c12
-rw-r--r--drivers/pwm/pwm-rockchip.c178
-rw-r--r--drivers/pwm/pwm-stmpe.c319
-rw-r--r--drivers/pwm/pwm-tegra.c69
-rw-r--r--drivers/pwm/pwm-tiecap.c37
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c38
-rw-r--r--drivers/pwm/pwm-tipwmss.c49
-rw-r--r--drivers/pwm/pwm-tipwmss.h39
-rw-r--r--drivers/pwm/sysfs.c17
-rw-r--r--drivers/rapidio/Kconfig9
-rw-r--r--drivers/rapidio/Makefile1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c6
-rw-r--r--drivers/rapidio/devices/tsi721.c57
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c27
-rw-r--r--drivers/rapidio/rio-scan.c74
-rw-r--r--drivers/rapidio/rio.c212
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c2366
-rw-r--r--drivers/rapidio/switches/Kconfig6
-rw-r--r--drivers/rapidio/switches/Makefile1
-rw-r--r--drivers/rapidio/switches/idt_gen2.c7
-rw-r--r--drivers/rapidio/switches/idt_gen3.c382
-rw-r--r--drivers/rapidio/switches/tsi57x.c26
-rw-r--r--drivers/regulator/ab8500-ext.c465
-rw-r--r--drivers/regulator/pwm-regulator.c162
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c7
-rw-r--r--drivers/reset/Kconfig14
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/core.c37
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c122
-rw-r--r--drivers/reset/reset-ath79.c3
-rw-r--r--drivers/reset/reset-meson.c136
-rw-r--r--drivers/reset/reset-oxnas.c12
-rw-r--r--drivers/reset/reset-pistachio.c12
-rw-r--r--drivers/reset/reset-socfpga.c12
-rw-r--r--drivers/reset/reset-sunxi.c12
-rw-r--r--drivers/reset/reset-ti-syscon.c237
-rw-r--r--drivers/reset/reset-zynq.c12
-rw-r--r--drivers/reset/sti/Kconfig1
-rw-r--r--drivers/rtc/Kconfig26
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/interface.c32
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-cmos.c17
-rw-r--r--drivers/rtc/rtc-da9052.c1
-rw-r--r--drivers/rtc/rtc-da9055.c1
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-ds1286.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c7
-rw-r--r--drivers/rtc/rtc-ds1307.c33
-rw-r--r--drivers/rtc/rtc-ds1343.c6
-rw-r--r--drivers/rtc/rtc-ds1685.c53
-rw-r--r--drivers/rtc/rtc-ds2404.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c6
-rw-r--r--drivers/rtc/rtc-efi.c6
-rw-r--r--drivers/rtc/rtc-generic.c36
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12057.c33
-rw-r--r--drivers/rtc/rtc-m41t80.c34
-rw-r--r--drivers/rtc/rtc-m48t86.c2
-rw-r--r--drivers/rtc/rtc-max6916.c164
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c (renamed from include/asm-generic/rtc.h)69
-rw-r--r--drivers/rtc/rtc-mrst.c10
-rw-r--r--drivers/rtc/rtc-opal.c4
-rw-r--r--drivers/rtc/rtc-pcf2123.c4
-rw-r--r--drivers/rtc/rtc-pcf85063.c59
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-rc5t583.c1
-rw-r--r--drivers/rtc/rtc-rs5c372.c6
-rw-r--r--drivers/rtc/rtc-rv8803.c205
-rw-r--r--drivers/rtc/rtc-rx8010.c8
-rw-r--r--drivers/rtc/rtc-rx8025.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c157
-rw-r--r--drivers/rtc/rtc-s3c.c16
-rw-r--r--drivers/rtc/rtc-sh.c42
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/rtc/rtc-v3020.c2
-rw-r--r--drivers/s390/char/sclp_early.c12
-rw-r--r--drivers/s390/char/sclp_ocf.c23
-rw-r--r--drivers/s390/cio/chp.c60
-rw-r--r--drivers/s390/crypto/ap_bus.c50
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/cxlflash/main.c10
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/fcoe/fcoe.c52
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/Makefile3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4087
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h346
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c427
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h123
-rw-r--r--drivers/scsi/ipr.c15
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c15
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/bcm/Kconfig18
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile (renamed from drivers/soc/brcmstb/Makefile)0
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c (renamed from drivers/soc/brcmstb/biuctrl.c)1
-rw-r--r--drivers/soc/bcm/brcmstb/common.c (renamed from drivers/soc/brcmstb/common.c)0
-rw-r--r--drivers/soc/brcmstb/Kconfig10
-rw-r--r--drivers/soc/qcom/smem_state.c12
-rw-r--r--drivers/soc/qcom/smp2p.c7
-rw-r--r--drivers/soc/qcom/smsm.c2
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c125
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a7792-sysc.c34
-rw-r--r--drivers/soc/renesas/r8a7796-sysc.c48
-rw-r--r--drivers/soc/renesas/rcar-sysc.c45
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/samsung/Kconfig4
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c2
-rw-r--r--drivers/soc/samsung/pm_domains.c (renamed from arch/arm/mach-exynos/pm_domains.c)36
-rw-r--r--drivers/soc/tegra/pmc.c149
-rw-r--r--drivers/soc/ux500/Kconfig7
-rw-r--r--drivers/soc/ux500/Makefile1
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c222
-rw-r--r--drivers/spi/spi-st-ssc4.c36
-rw-r--r--drivers/ssb/driver_gpio.c22
-rw-r--r--drivers/staging/android/sync_debug.h3
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c36
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/cec/cec-adap.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c5
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_transport.c94
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/tty/hvc/hvc_console.h1
-rw-r--r--drivers/tty/hvc/hvc_irq.c9
-rw-r--r--drivers/tty/hvc/hvc_opal.c11
-rw-r--r--drivers/tty/serial/ar933x_uart.c4
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/generic.c18
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c271
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h259
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/Kconfig.vringh5
-rw-r--r--drivers/vhost/Makefile4
-rw-r--r--drivers/vhost/net.c67
-rw-r--r--drivers/vhost/vhost.c927
-rw-r--r--drivers/vhost/vhost.h64
-rw-r--r--drivers/vhost/vsock.c719
-rw-r--r--drivers/video/backlight/lp855x_bl.c29
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c2
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.h2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c60
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c54
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c47
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c45
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h5
-rw-r--r--drivers/video/logo/logo.c4
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/w1_ds2406.c14
-rw-r--r--drivers/w1/slaves/w1_ds2408.c14
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2423.c14
-rw-r--r--drivers/w1/slaves/w1_ds2431.c14
-rw-r--r--drivers/w1/slaves/w1_ds2433.c14
-rw-r--r--drivers/w1/slaves/w1_ds2760.c43
-rw-r--r--drivers/w1/slaves/w1_ds2780.c39
-rw-r--r--drivers/w1/slaves/w1_ds2781.c40
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c14
-rw-r--r--drivers/w1/w1_family.h12
-rw-r--r--drivers/watchdog/Kconfig33
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/aspeed_wdt.c212
-rw-r--r--drivers/watchdog/bcm2835_wdt.c11
-rw-r--r--drivers/watchdog/da9063_wdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c28
-rw-r--r--drivers/watchdog/gpio_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
-rw-r--r--drivers/watchdog/max77620_wdt.c227
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c270
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/pcwd.c14
-rw-r--r--drivers/watchdog/pic32-dmt.c5
-rw-r--r--drivers/watchdog/pic32-wdt.c9
-rw-r--r--drivers/watchdog/qcom-wdt.c69
-rw-r--r--drivers/watchdog/sbsa_gwdt.c16
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c15
-rw-r--r--drivers/watchdog/softdog.c92
-rw-r--r--drivers/watchdog/tangox_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c39
-rw-r--r--drivers/watchdog/watchdog_dev.c61
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/swiotlb-xen.c14
-rw-r--r--fs/9p/fid.c26
-rw-r--r--fs/9p/fid.h9
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/9p/xattr.c4
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/adfs/dir.c6
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/namei.c12
-rw-r--r--fs/autofs4/root.c8
-rw-r--r--fs/autofs4/waitq.c4
-rw-r--r--fs/binfmt_elf.c34
-rw-r--r--fs/binfmt_elf_fdpic.c38
-rw-r--r--fs/binfmt_em86.c3
-rw-r--r--fs/binfmt_flat.c525
-rw-r--r--fs/binfmt_misc.c41
-rw-r--r--fs/block_dev.c11
-rw-r--r--fs/btrfs/acl.c3
-rw-r--r--fs/btrfs/async-thread.c31
-rw-r--r--fs/btrfs/async-thread.h6
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/check-integrity.c10
-rw-r--r--fs/btrfs/compression.c10
-rw-r--r--fs/btrfs/ctree.c91
-rw-r--r--fs/btrfs/ctree.h116
-rw-r--r--fs/btrfs/dedupe.h24
-rw-r--r--fs/btrfs/delayed-inode.c72
-rw-r--r--fs/btrfs/delayed-ref.c17
-rw-r--r--fs/btrfs/dev-replace.c4
-rw-r--r--fs/btrfs/disk-io.c103
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/extent-tree.c855
-rw-r--r--fs/btrfs/extent_io.c38
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c28
-rw-r--r--fs/btrfs/free-space-cache.c8
-rw-r--r--fs/btrfs/free-space-tree.c16
-rw-r--r--fs/btrfs/inode-map.c16
-rw-r--r--fs/btrfs/inode.c231
-rw-r--r--fs/btrfs/ioctl.c40
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/props.c6
-rw-r--r--fs/btrfs/qgroup.c25
-rw-r--r--fs/btrfs/qgroup.h9
-rw-r--r--fs/btrfs/relocation.c65
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/super.c218
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c67
-rw-r--r--fs/btrfs/tests/btrfs-tests.h36
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c23
-rw-r--r--fs/btrfs/tests/free-space-tests.c14
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c18
-rw-r--r--fs/btrfs/tests/inode-tests.c46
-rw-r--r--fs/btrfs/tests/qgroup-tests.c23
-rw-r--r--fs/btrfs/transaction.c37
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c22
-rw-r--r--fs/btrfs/volumes.c144
-rw-r--r--fs/cachefiles/namei.c5
-rw-r--r--fs/ceph/addr.c77
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c873
-rw-r--r--fs/ceph/dir.c73
-rw-r--r--fs/ceph/file.c77
-rw-r--r--fs/ceph/inode.c40
-rw-r--r--fs/ceph/ioctl.c30
-rw-r--r--fs/ceph/mds_client.c358
-rw-r--r--fs/ceph/mds_client.h19
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/super.c43
-rw-r--r--fs/ceph/super.h48
-rw-r--r--fs/ceph/xattr.c101
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/dcache.c59
-rw-r--r--fs/efivarfs/super.c3
-rw-r--r--fs/exec.c42
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ext2.h4
-rw-r--r--fs/f2fs/data.c1
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h4
-rw-r--r--fs/fat/namei_msdos.c4
-rw-r--r--fs/fat/namei_vfat.c12
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dir.c4
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c8
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/hfs/catalog.c12
-rw-r--r--fs/hfs/hfs_fs.h16
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/string.c2
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/hfsplus/catalog.c12
-rw-r--r--fs/hfsplus/hfsplus_fs.h13
-rw-r--r--fs/hfsplus/unicode.c4
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/hpfs/dentry.c4
-rw-r--r--fs/inode.c9
-rw-r--r--fs/internal.h2
-rw-r--r--fs/isofs/inode.c15
-rw-r--r--fs/isofs/namei.c2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ncpfs/dir.c6
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/blocklayout/dev.c110
-rw-r--r--fs/nfs/blocklayout/extent_tree.c27
-rw-r--r--fs/nfs/callback_proc.c64
-rw-r--r--fs/nfs/callback_xdr.c6
-rw-r--r--fs/nfs/client.c22
-rw-r--r--fs/nfs/dir.c52
-rw-r--r--fs/nfs/direct.c93
-rw-r--r--fs/nfs/file.c100
-rw-r--r--fs/nfs/filelayout/filelayout.c18
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c23
-rw-r--r--fs/nfs/inode.c138
-rw-r--r--fs/nfs/internal.h62
-rw-r--r--fs/nfs/io.c147
-rw-r--r--fs/nfs/nfs3client.c14
-rw-r--r--fs/nfs/nfs3proc.c8
-rw-r--r--fs/nfs/nfs42proc.c24
-rw-r--r--fs/nfs/nfs42xdr.c12
-rw-r--r--fs/nfs/nfs4_fs.h6
-rw-r--r--fs/nfs/nfs4client.c26
-rw-r--r--fs/nfs/nfs4file.c16
-rw-r--r--fs/nfs/nfs4namespace.c4
-rw-r--r--fs/nfs/nfs4proc.c169
-rw-r--r--fs/nfs/nfs4xdr.c11
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfs/pnfs.c191
-rw-r--r--fs/nfs/pnfs.h34
-rw-r--r--fs/nfs/pnfs_nfs.c13
-rw-r--r--fs/nfs/proc.c8
-rw-r--r--fs/nfs/super.c14
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c44
-rw-r--r--fs/nfsd/Kconfig19
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/blocklayout.c2
-rw-r--r--fs/nfsd/blocklayoutxdr.c4
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/flexfilelayout.c133
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c115
-rw-r--r--fs/nfsd/flexfilelayoutxdr.h49
-rw-r--r--fs/nfsd/nfs4layouts.c16
-rw-r--r--fs/nfsd/nfs4proc.c48
-rw-r--r--fs/nfsd/nfs4state.c74
-rw-r--r--fs/nfsd/nfs4xdr.c81
-rw-r--r--fs/nfsd/nfsd.h5
-rw-r--r--fs/nfsd/nfsfh.c18
-rw-r--r--fs/nfsd/nfsproc.c7
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/pnfs.h4
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/vfs.c142
-rw-r--r--fs/nfsd/vfs.h3
-rw-r--r--fs/nfsd/xdr4.h5
-rw-r--r--fs/nilfs2/alloc.c45
-rw-r--r--fs/nilfs2/bmap.c4
-rw-r--r--fs/nilfs2/bmap.h2
-rw-r--r--fs/nilfs2/btnode.c4
-rw-r--r--fs/nilfs2/btree.c61
-rw-r--r--fs/nilfs2/btree.h2
-rw-r--r--fs/nilfs2/cpfile.c23
-rw-r--r--fs/nilfs2/cpfile.h3
-rw-r--r--fs/nilfs2/dat.c19
-rw-r--r--fs/nilfs2/dat.h1
-rw-r--r--fs/nilfs2/dir.c60
-rw-r--r--fs/nilfs2/direct.c10
-rw-r--r--fs/nilfs2/direct.h10
-rw-r--r--fs/nilfs2/gcinode.c9
-rw-r--r--fs/nilfs2/ifile.c7
-rw-r--r--fs/nilfs2/ifile.h1
-rw-r--r--fs/nilfs2/inode.c36
-rw-r--r--fs/nilfs2/ioctl.c48
-rw-r--r--fs/nilfs2/mdt.c6
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/nilfs2/nilfs.h48
-rw-r--r--fs/nilfs2/page.c45
-rw-r--r--fs/nilfs2/recovery.c72
-rw-r--r--fs/nilfs2/segbuf.c6
-rw-r--r--fs/nilfs2/segment.c61
-rw-r--r--fs/nilfs2/segment.h1
-rw-r--r--fs/nilfs2/sufile.c44
-rw-r--r--fs/nilfs2/sufile.h1
-rw-r--r--fs/nilfs2/super.c206
-rw-r--r--fs/nilfs2/sysfs.c74
-rw-r--r--fs/nilfs2/the_nilfs.c134
-rw-r--r--fs/nilfs2/the_nilfs.h11
-rw-r--r--fs/ocfs2/alloc.c37
-rw-r--r--fs/ocfs2/alloc.h2
-rw-r--r--fs/ocfs2/aops.c37
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c53
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c29
-rw-r--r--fs/ocfs2/dlm/dlmthread.c57
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c2
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h2
-rw-r--r--fs/ocfs2/stack_user.c11
-rw-r--r--fs/ocfs2/suballoc.c20
-rw-r--r--fs/open.c20
-rw-r--r--fs/orangefs/dcache.c4
-rw-r--r--fs/orangefs/inode.c6
-rw-r--r--fs/orangefs/namei.c12
-rw-r--r--fs/orangefs/orangefs-kernel.h6
-rw-r--r--fs/orangefs/orangefs-mod.c2
-rw-r--r--fs/orangefs/orangefs-sysfs.c43
-rw-r--r--fs/orangefs/orangefs-utils.c38
-rw-r--r--fs/orangefs/protocol.h8
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/base.c7
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/stat.c10
-rw-r--r--fs/pstore/ram.c29
-rw-r--r--fs/reiserfs/ibalance.c3
-rw-r--r--fs/ubifs/gc.c4
-rw-r--r--fs/ubifs/super.c19
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/utimes.c3
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c149
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h52
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c12
-rw-r--r--fs/xfs/libxfs/xfs_attr.c71
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c19
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c241
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h54
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c32
-rw-r--r--fs/xfs/libxfs/xfs_btree.c914
-rw-r--r--fs/xfs/libxfs/xfs_btree.h88
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h1
-rw-r--r--fs/xfs/libxfs/xfs_defer.c463
-rw-r--r--fs/xfs/libxfs/xfs_defer.h97
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c15
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_format.h131
-rw-r--r--fs/xfs/libxfs/xfs_fs.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c23
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c18
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c1
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h63
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c1399
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h209
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c511
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h61
-rw-r--r--fs/xfs/libxfs/xfs_sb.c9
-rw-r--r--fs/xfs/libxfs/xfs_shared.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c62
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h10
-rw-r--r--fs/xfs/libxfs/xfs_types.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c139
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c13
-rw-r--r--fs/xfs/xfs_error.h6
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c69
-rw-r--r--fs/xfs/xfs_extfree_item.h3
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsops.c106
-rw-r--r--fs/xfs/xfs_inode.c99
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_ioctl.c8
-rw-r--r--fs/xfs/xfs_iomap.c31
-rw-r--r--fs/xfs/xfs_log_recover.c336
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_mount.h6
-rw-r--r--fs/xfs/xfs_ondisk.h3
-rw-r--r--fs/xfs/xfs_pnfs.h4
-rw-r--r--fs/xfs/xfs_rmap_item.c536
-rw-r--r--fs/xfs/xfs_rmap_item.h95
-rw-r--r--fs/xfs/xfs_rtalloc.c11
-rw-r--r--fs/xfs/xfs_stats.c1
-rw-r--r--fs/xfs/xfs_stats.h18
-rw-r--r--fs/xfs/xfs_super.c30
-rw-r--r--fs/xfs/xfs_symlink.c25
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h374
-rw-r--r--fs/xfs/xfs_trans.h26
-rw-r--r--fs/xfs/xfs_trans_extfree.c215
-rw-r--r--fs/xfs/xfs_trans_rmap.c271
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/acpi/acpi_io.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/bridge/analogix_dp.h9
-rw-r--r--include/drm/drmP.h167
-rw-r--r--include/drm/drm_atomic.h82
-rw-r--r--include/drm/drm_atomic_helper.h42
-rw-r--r--include/drm/drm_auth.h59
-rw-r--r--include/drm/drm_crtc.h876
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h13
-rw-r--r--include/drm/drm_dp_mst_helper.h141
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_fb_helper.h11
-rw-r--r--include/drm/drm_fourcc.h37
-rw-r--r--include/drm/drm_irq.h183
-rw-r--r--include/drm/drm_legacy.h2
-rw-r--r--include/drm/drm_mipi_dsi.h3
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h49
-rw-r--r--include/drm/drm_plane_helper.h1
-rw-r--r--include/drm/drm_simple_kms_helper.h94
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/drm/intel-gtt.h3
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h38
-rw-r--r--include/dt-bindings/clock/r8a7792-clock.h102
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h1
-rw-r--r--include/dt-bindings/clock/stih407-clks.h4
-rw-r--r--include/dt-bindings/memory/mt2701-larb-port.h85
-rw-r--r--include/dt-bindings/pinctrl/keystone.h39
-rw-r--r--include/dt-bindings/power/r8a7792-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a7796-sysc.h36
-rw-r--r--include/dt-bindings/reset/amlogic,meson-gxbb-reset.h210
-rw-r--r--include/dt-bindings/reset/amlogic,meson8b-reset.h175
-rw-r--r--include/dt-bindings/reset/hisi,hi6220-resets.h8
-rw-r--r--include/dt-bindings/reset/ti-syscon.h38
-rw-r--r--include/kvm/arm_vgic.h445
-rw-r--r--include/kvm/vgic/vgic.h246
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio.h7
-rw-r--r--include/linux/bitmap.h7
-rw-r--r--include/linux/blk-cgroup.h4
-rw-r--r--include/linux/blk_types.h15
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/capability.h1
-rw-r--r--include/linux/ceph/ceph_fs.h55
-rw-r--r--include/linux/ceph/decode.h55
-rw-r--r--include/linux/ceph/libceph.h4
-rw-r--r--include/linux/ceph/mon_client.h7
-rw-r--r--include/linux/ceph/msgpool.h1
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h15
-rw-r--r--include/linux/ceph/string_table.h62
-rw-r--r--include/linux/context_tracking.h38
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dma-attrs.h71
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-iommu.h6
-rw-r--r--include/linux/dma-mapping.h128
-rw-r--r--include/linux/ds17287rtc.h66
-rw-r--r--include/linux/dynamic_debug.h60
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/export.h2
-rw-r--r--include/linux/extable.h32
-rw-r--r--include/linux/fence-array.h73
-rw-r--r--include/linux/fence.h15
-rw-r--r--include/linux/firmware.h8
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/i8042.h6
-rw-r--r--include/linux/init.h6
-rw-r--r--include/linux/io-mapping.h10
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h213
-rw-r--r--include/linux/jump_label.h46
-rw-r--r--include/linux/kasan.h3
-rw-r--r--include/linux/kconfig.h31
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kexec.h46
-rw-r--r--include/linux/kvm_host.h70
-rw-r--r--include/linux/lsm_hooks.h2
-rw-r--r--include/linux/mailbox/brcm-message.h56
-rw-r--r--include/linux/mbus.h2
-rw-r--r--include/linux/mc146818rtc.h5
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h6
-rw-r--r--include/linux/mfd/altera-a10sr.h85
-rw-r--r--include/linux/mfd/arizona/core.h10
-rw-r--r--include/linux/mfd/arizona/registers.h6
-rw-r--r--include/linux/mfd/cros_ec.h15
-rw-r--r--include/linux/mfd/cros_ec_commands.h31
-rw-r--r--include/linux/mfd/dbx500-prcmu.h10
-rw-r--r--include/linux/mfd/hi655x-pmic.h25
-rw-r--r--include/linux/mfd/stmpe.h22
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h2
-rw-r--r--include/linux/mfd/twl6040.h5
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mlx5/qp.h4
-rw-r--r--include/linux/mlx5/srq.h25
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/card.h36
-rw-r--r--include/linux/mmc/dw_mmc.h9
-rw-r--r--include/linux/mmc/host.h14
-rw-r--r--include/linux/mmc/mmc.h3
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/spi-nor.h8
-rw-r--r--include/linux/nfs4.h11
-rw-r--r--include/linux/nfs_fs.h3
-rw-r--r--include/linux/nfs_xdr.h24
-rw-r--r--include/linux/of.h16
-rw-r--r--include/linux/page_ref.h9
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--include/linux/pci-ecam.h (renamed from drivers/pci/ecam.h)4
-rw-r--r--include/linux/pci.h94
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h4
-rw-r--r--include/linux/platform_data/clk-ux500.h17
-rw-r--r--include/linux/platform_data/media/ir-rx51.h3
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/platform_data/omapdss.h37
-rw-r--r--include/linux/platform_data/rtc-ds2404.h (renamed from include/linux/rtc-ds2404.h)0
-rw-r--r--include/linux/platform_data/rtc-m48t86.h (renamed from include/linux/m48t86.h)0
-rw-r--r--include/linux/platform_data/rtc-v3020.h (renamed from include/linux/rtc-v3020.h)0
-rw-r--r--include/linux/printk.h12
-rw-r--r--include/linux/property.h3
-rw-r--r--include/linux/pwm.h113
-rw-r--r--include/linux/qcom_scm.h8
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/ratelimit.h38
-rw-r--r--include/linux/reset-controller.h4
-rw-r--r--include/linux/reset.h4
-rw-r--r--include/linux/rio.h13
-rw-r--r--include/linux/rio_ids.h2
-rw-r--r--include/linux/rio_regs.h167
-rw-r--r--include/linux/rtc/ds1286.h (renamed from include/linux/ds1286.h)0
-rw-r--r--include/linux/sched.h63
-rw-r--r--include/linux/scpi_protocol.h2
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/serio.h24
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h8
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h2
-rw-r--r--include/linux/sunrpc/auth.h9
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/sunrpc/sched.h5
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/xprtsock.h1
-rw-r--r--include/linux/swiotlb.h10
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/thread_info.h55
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/linux/virtio_vsock.h154
-rw-r--r--include/linux/watchdog.h6
-rw-r--r--include/linux/ww_mutex.h4
-rw-r--r--include/media/videobuf2-core.h6
-rw-r--r--include/media/videobuf2-dma-contig.h2
-rw-r--r--include/misc/cxl-base.h14
-rw-r--r--include/misc/cxl.h114
-rw-r--r--include/net/af_vsock.h6
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/sctp/constants.h2
-rw-r--r--include/rdma/ib_sa.h13
-rw-r--r--include/rdma/ib_verbs.h125
-rw-r--r--include/rdma/opa_port_info.h16
-rw-r--r--include/rdma/rdma_cm.h4
-rw-r--r--include/rdma/rdma_vt.h7
-rw-r--r--include/rdma/rdmavt_mr.h1
-rw-r--r--include/rdma/rdmavt_qp.h92
-rw-r--r--include/scsi/viosrp.h (renamed from drivers/scsi/ibmvscsi/viosrp.h)13
-rw-r--r--include/soc/imx/cpuidle.h25
-rw-r--r--include/soc/tegra/cpuidle.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/cs35l33.h48
-rw-r--r--include/sound/hdmi-codec.h13
-rw-r--r--include/sound/omap-hdmi-audio.h9
-rw-r--r--include/sound/simple_card.h11
-rw-r--r--include/sound/simple_card_utils.h36
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/sound/soc.h11
-rw-r--r--include/target/target_core_backend.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/events/bcache.h8
-rw-r--r--include/trace/events/block.h14
-rw-r--r--include/trace/events/btrfs.h404
-rw-r--r--include/trace/events/kvm.h5
-rw-r--r--include/trace/events/sunrpc.h118
-rw-r--r--include/trace/events/timer.h14
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h144
-rw-r--r--include/uapi/drm/amdgpu_drm.h32
-rw-r--r--include/uapi/drm/i915_drm.h3
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/vc4_drm.h13
-rw-r--r--include/uapi/drm/vgem_drm.h62
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/btrfs.h2
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/elf.h14
-rw-r--r--include/uapi/linux/kvm.h18
-rw-r--r--include/uapi/linux/nilfs2_api.h292
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h (renamed from include/linux/nilfs2_fs.h)328
-rw-r--r--include/uapi/linux/rio_cm_cdev.h78
-rw-r--r--include/uapi/linux/sysctl.h2
-rw-r--r--include/uapi/linux/vhost.h33
-rw-r--r--include/uapi/linux/virtio_config.h10
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_vsock.h94
-rw-r--r--include/uapi/misc/cxl.h17
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h95
-rw-r--r--include/uapi/rdma/rdma_user_cm.h9
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h144
-rw-r--r--include/video/imx-ipu-v3.h3
-rw-r--r--include/video/omap-panel-data.h157
-rw-r--r--include/video/omapfb_dss.h (renamed from include/video/omapdss.h)80
-rw-r--r--include/xen/swiotlb-xen.h12
-rw-r--r--init/Kconfig10
-rw-r--r--init/main.c8
-rw-r--r--ipc/msg.c2
-rw-r--r--ipc/msgutil.c2
-rw-r--r--ipc/namespace.c2
-rw-r--r--ipc/sem.c12
-rw-r--r--kernel/configs/android-base.config152
-rw-r--r--kernel/configs/android-recommended.config121
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/jump_label.c61
-rw-r--r--kernel/kexec.c3
-rw-r--r--kernel/kexec_core.c69
-rw-r--r--kernel/ksysfs.c6
-rw-r--r--kernel/livepatch/core.c2
-rw-r--r--kernel/module.c122
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/printk/printk.c170
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/relay.c34
-rw-r--r--kernel/seccomp.c6
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/task_work.c10
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/blktrace.c6
-rw-r--r--kernel/trace/trace_events_hist.c14
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/crc32.c16
-rw-r--r--lib/dma-noop.c9
-rw-r--r--lib/dynamic_debug.c7
-rw-r--r--lib/iommu-helper.c3
-rw-r--r--lib/mpi/mpicoder.c14
-rw-r--r--lib/radix-tree.c14
-rw-r--r--lib/ratelimit.c10
-rw-r--r--lib/strncpy_from_user.c8
-rw-r--r--lib/strnlen_user.c7
-rw-r--r--lib/swiotlb.c13
-rw-r--r--lib/ubsan.c2
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile4
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/gup.c1
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/kasan/kasan.c65
-rw-r--r--mm/kasan/kasan.h15
-rw-r--r--mm/kasan/quarantine.c19
-rw-r--r--mm/kasan/report.c85
-rw-r--r--mm/memblock.c9
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/page_io.c5
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c36
-rw-r--r--mm/slub.c43
-rw-r--r--mm/sparse-vmemmap.c2
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/usercopy.c268
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ceph/Makefile2
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/ceph_fs.c30
-rw-r--r--net/ceph/debugfs.c12
-rw-r--r--net/ceph/mon_client.c4
-rw-r--r--net/ceph/msgpool.c1
-rw-r--r--net/ceph/osd_client.c49
-rw-r--r--net/ceph/osdmap.c58
-rw-r--r--net/ceph/string_table.c111
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sctp/ulpqueue.c4
-rw-r--r--net/sunrpc/auth.c8
-rw-r--r--net/sunrpc/auth_generic.c9
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c12
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c5
-rw-r--r--net/sunrpc/auth_null.c1
-rw-r--r--net/sunrpc/auth_unix.c1
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/sched.c67
-rw-r--r--net/sunrpc/svc.c8
-rw-r--r--net/sunrpc/svc_xprt.c51
-rw-r--r--net/sunrpc/svcsock.c150
-rw-r--r--net/sunrpc/xprt.c14
-rw-r--r--net/sunrpc/xprtmultipath.c8
-rw-r--r--net/sunrpc/xprtrdma/Makefile2
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c378
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c369
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c122
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c274
-rw-r--r--net/sunrpc/xprtrdma/transport.c40
-rw-r--r--net/sunrpc/xprtrdma/verbs.c242
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h118
-rw-r--r--net/sunrpc/xprtsock.c125
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/vmw_vsock/Kconfig20
-rw-r--r--net/vmw_vsock/Makefile6
-rw-r--r--net/vmw_vsock/af_vsock.c25
-rw-r--r--net/vmw_vsock/virtio_transport.c624
-rw-r--r--net/vmw_vsock/virtio_transport_common.c992
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/wireless/chan.c2
-rw-r--r--samples/kprobes/jprobe_example.c6
-rw-r--r--samples/kprobes/kprobe_example.c29
-rw-r--r--samples/kprobes/kretprobe_example.c14
-rw-r--r--scripts/Kbuild.include12
-rw-r--r--scripts/Makefile2
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.clean4
-rw-r--r--scripts/Makefile.gcc-plugins60
-rw-r--r--scripts/Makefile.host55
-rw-r--r--scripts/Makefile.lib9
-rw-r--r--scripts/basic/bin2c.c3
-rwxr-xr-xscripts/checkpatch.pl29
-rwxr-xr-xscripts/coccicheck96
-rw-r--r--scripts/coccinelle/free/devm_free.cocci26
-rw-r--r--scripts/coccinelle/free/ifnullfree.cocci4
-rw-r--r--scripts/coccinelle/free/kfree.cocci18
-rw-r--r--scripts/coccinelle/free/kfreeaddr.cocci6
-rw-r--r--scripts/coccinelle/iterators/device_node_continue.cocci3
-rw-r--r--scripts/coccinelle/misc/noderef.cocci18
-rwxr-xr-xscripts/gcc-plugin.sh65
-rw-r--r--scripts/gcc-plugins/Makefile29
-rw-r--r--scripts/gcc-plugins/cyc_complexity_plugin.c73
-rw-r--r--scripts/gcc-plugins/gcc-common.h830
-rw-r--r--scripts/gcc-plugins/gcc-generate-gimple-pass.h175
-rw-r--r--scripts/gcc-plugins/gcc-generate-ipa-pass.h289
-rw-r--r--scripts/gcc-plugins/gcc-generate-rtl-pass.h175
-rw-r--r--scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h175
-rw-r--r--scripts/gcc-plugins/sancov_plugin.c144
-rwxr-xr-xscripts/get_maintainer.pl20
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-xscripts/package/builddeb12
-rw-r--r--scripts/recordmcount.c9
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--security/Kconfig28
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/Makefile1
-rw-r--r--sound/arm/Kconfig15
-rw-r--r--sound/core/compress_offload.c67
-rw-r--r--sound/core/control.c32
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c10
-rw-r--r--sound/core/seq/seq_timer.c23
-rw-r--r--sound/core/seq/seq_timer.h2
-rw-r--r--sound/hda/array.c4
-rw-r--r--sound/hda/hdmi_chmap.c28
-rw-r--r--sound/i2c/other/ak4114.c2
-rw-r--r--sound/i2c/other/ak4117.c2
-rw-r--r--sound/isa/ad1848/ad1848.c13
-rw-r--r--sound/isa/adlib.c13
-rw-r--r--sound/isa/cmi8328.c13
-rw-r--r--sound/isa/cs423x/cs4231.c13
-rw-r--r--sound/isa/galaxy/galaxy.c13
-rw-r--r--sound/isa/gus/gusclassic.c13
-rw-r--r--sound/isa/gus/gusextreme.c13
-rw-r--r--sound/isa/gus/gusmax.c13
-rw-r--r--sound/isa/sb/jazz16.c13
-rw-r--r--sound/isa/sb/sb8.c13
-rw-r--r--sound/isa/sc6000.c13
-rw-r--r--sound/oss/ad1848.c2
-rw-r--r--sound/oss/aedsp16.c14
-rw-r--r--sound/oss/sound_firmware.h29
-rw-r--r--sound/oss/sound_timer.c2
-rw-r--r--sound/oss/sys_timer.c2
-rw-r--r--sound/pci/ctxfi/cthw20k2.c34
-rw-r--r--sound/pci/echoaudio/echoaudio.c6
-rw-r--r--sound/pci/hda/hda_codec.c8
-rw-r--r--sound/pci/hda/hda_generic.c4
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c75
-rw-r--r--sound/pci/mixart/mixart_mixer.c2
-rw-r--r--sound/pci/riptide/riptide.c2
-rw-r--r--sound/ppc/awacs.c1
-rw-r--r--sound/sh/aica.c16
-rw-r--r--sound/soc/atmel/Kconfig1
-rw-r--r--sound/soc/atmel/atmel-classd.c5
-rw-r--r--sound/soc/atmel/atmel-pdmic.c5
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/bcm/Kconfig9
-rw-r--r--sound/soc/bcm/Makefile5
-rw-r--r--sound/soc/bcm/cygnus-pcm.c861
-rw-r--r--sound/soc/bcm/cygnus-ssp.c1529
-rw-r--r--sound/soc/bcm/cygnus-ssp.h139
-rw-r--r--sound/soc/codecs/Kconfig38
-rw-r--r--sound/soc/codecs/Makefile14
-rw-r--r--sound/soc/codecs/ab8500-codec.c33
-rw-r--r--sound/soc/codecs/adau-utils.c61
-rw-r--r--sound/soc/codecs/adau-utils.h7
-rw-r--r--sound/soc/codecs/adau1373.c38
-rw-r--r--sound/soc/codecs/adau1761-i2c.c2
-rw-r--r--sound/soc/codecs/adau1761-spi.c2
-rw-r--r--sound/soc/codecs/adau1781-i2c.c2
-rw-r--r--sound/soc/codecs/adau1781-spi.c2
-rw-r--r--sound/soc/codecs/adau17x1.c251
-rw-r--r--sound/soc/codecs/adau17x1.h6
-rw-r--r--sound/soc/codecs/adau7002.c80
-rw-r--r--sound/soc/codecs/ak4613.c12
-rw-r--r--sound/soc/codecs/ak4642.c13
-rw-r--r--sound/soc/codecs/arizona.c91
-rw-r--r--sound/soc/codecs/arizona.h22
-rw-r--r--sound/soc/codecs/bt-sco.c52
-rw-r--r--sound/soc/codecs/cs35l33.c1303
-rw-r--r--sound/soc/codecs/cs35l33.h221
-rw-r--r--sound/soc/codecs/cs47l24.c19
-rw-r--r--sound/soc/codecs/cs53l30.c1143
-rw-r--r--sound/soc/codecs/cs53l30.h459
-rw-r--r--sound/soc/codecs/da7219-aad.c103
-rw-r--r--sound/soc/codecs/da7219.c34
-rw-r--r--sound/soc/codecs/hdac_hdmi.c7
-rw-r--r--sound/soc/codecs/hdmi-codec.c15
-rw-r--r--sound/soc/codecs/max98504.c383
-rw-r--r--sound/soc/codecs/max98504.h59
-rw-r--r--sound/soc/codecs/max9860.c753
-rw-r--r--sound/soc/codecs/max9860.h162
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/max9867.c0
-rw-r--r--[-rwxr-xr-x]sound/soc/codecs/max9867.h0
-rw-r--r--sound/soc/codecs/nau8825.c1256
-rw-r--r--sound/soc/codecs/nau8825.h127
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/pcm179x.c2
-rw-r--r--sound/soc/codecs/pcm5102a.c1
-rw-r--r--sound/soc/codecs/rt286.c7
-rw-r--r--sound/soc/codecs/rt5514-spi.c447
-rw-r--r--sound/soc/codecs/rt5514-spi.h38
-rw-r--r--sound/soc/codecs/rt5514.c168
-rw-r--r--sound/soc/codecs/rt5514.h7
-rw-r--r--sound/soc/codecs/rt5645.c24
-rw-r--r--sound/soc/codecs/rt5645.h3
-rw-r--r--sound/soc/codecs/rt5670.c3
-rw-r--r--sound/soc/codecs/sgtl5000.c431
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/tas571x.c188
-rw-r--r--sound/soc/codecs/tas571x.h40
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h134
-rw-r--r--sound/soc/codecs/tpa6130a2.c394
-rw-r--r--sound/soc/codecs/tpa6130a2.h14
-rw-r--r--sound/soc/codecs/wm5102.c1
-rw-r--r--sound/soc/codecs/wm5110.c19
-rw-r--r--sound/soc/codecs/wm8731.c5
-rw-r--r--sound/soc/codecs/wm8753.c3
-rw-r--r--sound/soc/codecs/wm8985.c143
-rw-r--r--sound/soc/codecs/wm8985.h38
-rw-r--r--sound/soc/codecs/wm8998.c1
-rw-r--r--sound/soc/codecs/wm_adsp.c34
-rw-r--r--sound/soc/codecs/wm_adsp.h4
-rw-r--r--sound/soc/davinci/davinci-mcasp.c9
-rw-r--r--sound/soc/dwc/Kconfig9
-rw-r--r--sound/soc/dwc/Makefile4
-rw-r--r--sound/soc/dwc/designware_i2s.c231
-rw-r--r--sound/soc/dwc/designware_pcm.c225
-rw-r--r--sound/soc/dwc/local.h128
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/generic/Kconfig4
-rw-r--r--sound/soc/generic/Makefile2
-rw-r--r--sound/soc/generic/simple-card-utils.c97
-rw-r--r--sound/soc/generic/simple-card.c276
-rw-r--r--sound/soc/intel/Kconfig73
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c44
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c460
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c118
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c20
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c51
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c51
-rw-r--r--sound/soc/intel/boards/skl_rt286.c9
-rw-r--r--sound/soc/intel/common/Makefile4
-rw-r--r--sound/soc/intel/common/sst-acpi.h4
-rw-r--r--sound/soc/intel/common/sst-dsp-priv.h4
-rw-r--r--sound/soc/intel/common/sst-dsp.c69
-rw-r--r--sound/soc/intel/common/sst-dsp.h2
-rw-r--r--sound/soc/intel/common/sst-firmware.c68
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c1
-rw-r--r--sound/soc/intel/skylake/Makefile2
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c202
-rw-r--r--sound/soc/intel/skylake/skl-messages.c53
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c40
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.h22
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c93
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c260
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h102
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c4
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h18
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c256
-rw-r--r--sound/soc/intel/skylake/skl-sst.c124
-rw-r--r--sound/soc/intel/skylake/skl-topology.c230
-rw-r--r--sound/soc/intel/skylake/skl-topology.h7
-rw-r--r--sound/soc/intel/skylake/skl.c39
-rw-r--r--sound/soc/intel/skylake/skl.h10
-rw-r--r--sound/soc/mediatek/Kconfig38
-rw-r--r--sound/soc/mediatek/Makefile10
-rw-r--r--sound/soc/mediatek/common/Makefile16
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c379
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.h45
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c90
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.h23
-rw-r--r--sound/soc/mediatek/common/mtk-base-afe.h104
-rw-r--r--sound/soc/mediatek/mt2701/Makefile19
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c464
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h38
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-common.h172
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c1656
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c412
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-reg.h186
-rw-r--r--sound/soc/mediatek/mt8173/Makefile7
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-common.h73
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c (renamed from sound/soc/mediatek/mtk-afe-pcm.c)925
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c (renamed from sound/soc/mediatek/mt8173-max98090.c)2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c (renamed from sound/soc/mediatek/mt8173-rt5650-rt5514.c)2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c (renamed from sound/soc/mediatek/mt8173-rt5650-rt5676.c)4
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c (renamed from sound/soc/mediatek/mt8173-rt5650.c)73
-rw-r--r--sound/soc/mediatek/mtk-afe-common.h101
-rw-r--r--sound/soc/omap/Kconfig3
-rw-r--r--sound/soc/omap/mcbsp.c21
-rw-r--r--sound/soc/omap/mcbsp.h3
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c1
-rw-r--r--sound/soc/omap/omap-mcbsp.c5
-rw-r--r--sound/soc/omap/omap-mcpdm.c74
-rw-r--r--sound/soc/omap/rx51.c46
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c71
-rw-r--r--sound/soc/rockchip/rockchip_i2s.h11
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c65
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c17
-rw-r--r--sound/soc/samsung/Kconfig8
-rw-r--r--sound/soc/samsung/Makefile2
-rw-r--r--sound/soc/samsung/ac97.c3
-rw-r--r--sound/soc/samsung/dma.h9
-rw-r--r--sound/soc/samsung/dmaengine.c31
-rw-r--r--sound/soc/samsung/i2s.c53
-rw-r--r--sound/soc/samsung/odroidx2_max98090.c185
-rw-r--r--sound/soc/samsung/pcm.c3
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c2
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c3
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c3
-rw-r--r--sound/soc/samsung/spdif.c3
-rw-r--r--sound/soc/sh/Kconfig1
-rw-r--r--sound/soc/sh/rcar/adg.c18
-rw-r--r--sound/soc/sh/rcar/gen.c12
-rw-r--r--sound/soc/sh/rcar/rsrc-card.c99
-rw-r--r--sound/soc/soc-compress.c5
-rw-r--r--sound/soc/soc-dapm.c61
-rw-r--r--sound/soc/soc-pcm.c43
-rw-r--r--sound/soc/sti/uniperif_player.c4
-rw-r--r--sound/soc/sunxi/Kconfig9
-rw-r--r--sound/soc/sunxi/Makefile2
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c701
-rw-r--r--sound/sound_firmware.c77
-rw-r--r--sound/usb/mixer_maps.c6
-rw-r--r--tools/include/linux/bitmap.h37
-rw-r--r--tools/lib/bitmap.c44
-rw-r--r--tools/lib/traceevent/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Makefile.config (renamed from tools/perf/config/Makefile)0
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/builtin-record.c1
-rw-r--r--tools/perf/builtin-top.c6
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build4
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/bitmap.c53
-rw-r--r--tools/perf/tests/bpf-script-example.c4
-rw-r--r--tools/perf/tests/builtin-test.c4
-rw-r--r--tools/perf/tests/code-reading.c100
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/ui/browsers/annotate.c9
-rw-r--r--tools/perf/ui/gtk/annotate.c8
-rw-r--r--tools/perf/util/annotate.c116
-rw-r--r--tools/perf/util/annotate.h22
-rw-r--r--tools/perf/util/evlist.c22
-rw-r--r--tools/perf/util/evsel.c30
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/hist.c15
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/target.c6
-rw-r--r--tools/testing/radix-tree/linux/gfp.h2
-rw-r--r--tools/testing/selftests/exec/Makefile3
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/lib/printf.sh0
-rw-r--r--tools/testing/selftests/media_tests/.gitignore2
-rw-r--r--tools/testing/selftests/media_tests/Makefile4
-rwxr-xr-xtools/testing/selftests/media_tests/bind_unbind_sample.sh12
-rw-r--r--tools/testing/selftests/media_tests/media_device_open.c81
-rw-r--r--tools/testing/selftests/media_tests/media_device_test.c19
-rwxr-xr-xtools/testing/selftests/media_tests/open_loop_test.sh10
-rw-r--r--tools/testing/selftests/media_tests/regression_test.txt43
-rw-r--r--tools/testing/selftests/media_tests/video_device_test.c100
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh422
-rw-r--r--tools/testing/selftests/powerpc/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/alignment/.gitignore5
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile10
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c41
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.c53
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.h26
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_unaligned.c41
-rw-r--r--tools/testing/selftests/powerpc/alignment/paste_last_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/alignment/paste_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c17
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/futex_bench.c42
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/mmap_bench.c41
-rw-r--r--tools/testing/selftests/powerpc/instructions.h68
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/prot_sao.c42
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c143
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h39
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c37
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c6
-rw-r--r--tools/testing/selftests/powerpc/reg.h5
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-exec.c70
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c15
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h23
-rw-r--r--tools/testing/selftests/powerpc/utils.h5
-rw-r--r--tools/testing/selftests/timers/Makefile3
-rw-r--r--tools/testing/selftests/timers/rtctest.c13
-rw-r--r--tools/testing/selftests/timers/set-tz.c119
-rw-r--r--tools/testing/selftests/vm/compaction_test.c8
-rw-r--r--tools/testing/selftests/vm/on-fault-limit.c2
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c15
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c856
-rw-r--r--virt/kvm/arm/vgic-v2.c274
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c1074
-rw-r--r--virt/kvm/arm/vgic-v3.c279
-rw-r--r--virt/kvm/arm/vgic.c2417
-rw-r--r--virt/kvm/arm/vgic.h140
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c13
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c116
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c1500
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c247
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c64
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h31
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c29
-rw-r--r--virt/kvm/arm/vgic/vgic.c112
-rw-r--r--virt/kvm/arm/vgic/vgic.h38
-rw-r--r--virt/kvm/irqchip.c35
-rw-r--r--virt/kvm/kvm_main.c110
4843 files changed, 205105 insertions, 138342 deletions
diff --git a/.cocciconfig b/.cocciconfig
new file mode 100644
index 000000000000..43967c6b2015
--- /dev/null
+++ b/.cocciconfig
@@ -0,0 +1,3 @@
+[spatch]
+ options = --timeout 200
+ options = --use-gitgrep
diff --git a/.gitignore b/.gitignore
index 0c320bf02586..c2ed4ecb0acd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@ modules.builtin
Module.symvers
*.dwo
*.su
+*.c.[012]*.*
#
# Top-level generic files
@@ -66,6 +67,7 @@ Module.symvers
#
!.gitignore
!.mailmap
+!.cocciconfig
#
# Generated include files
diff --git a/.mailmap b/.mailmap
index d2acafb09e60..2a91c14c80bf 100644
--- a/.mailmap
+++ b/.mailmap
@@ -92,9 +92,17 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Mark Brown <broonie@sirena.org.uk>
Matthieu CASTET <castet.matthieu@free.fr>
-Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
+Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
+Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
Mayuresh Janorkar <mayur@ti.com>
@@ -130,7 +138,10 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
-Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
+Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
+Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
+Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
diff --git a/Documentation/ABI/testing/sysfs-class-pwm b/Documentation/ABI/testing/sysfs-class-pwm
index c479d77b67c5..c20e61354561 100644
--- a/Documentation/ABI/testing/sysfs-class-pwm
+++ b/Documentation/ABI/testing/sysfs-class-pwm
@@ -77,3 +77,12 @@ Description:
Enable/disable the PWM signal.
0 is disabled
1 is enabled
+
+What: /sys/class/pwm/pwmchipN/pwmX/capture
+Date: June 2016
+KernelVersion: 4.8
+Contact: Lee Jones <lee.jones@linaro.org>
+Description:
+ Capture information about a PWM signal. The output format is a
+ pair unsigned integers (period and duty cycle), separated by a
+ single space.
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 45ef3f279c3b..1d26eeb6b5f6 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -369,35 +369,32 @@ See also dma_map_single().
dma_addr_t
dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
void
dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
int
dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
void
dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
The four functions above are just like the counterpart functions
without the _attrs suffixes, except that they pass an optional
-struct dma_attrs*.
-
-struct dma_attrs encapsulates a set of "DMA attributes". For the
-definition of struct dma_attrs see linux/dma-attrs.h.
+dma_attrs.
The interpretation of DMA attributes is architecture-specific, and
each attribute should be documented in Documentation/DMA-attributes.txt.
-If struct dma_attrs* is NULL, the semantics of each of these
-functions is identical to those of the corresponding function
+If dma_attrs are 0, the semantics of each of these functions
+is identical to those of the corresponding function
without the _attrs suffix. As a result dma_map_single_attrs()
can generally replace dma_map_single(), etc.
@@ -405,15 +402,15 @@ As an example of the use of the *_attrs functions, here's how
you could pass an attribute DMA_ATTR_FOO when mapping memory
for DMA:
-#include <linux/dma-attrs.h>
-/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and
+#include <linux/dma-mapping.h>
+/* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and
* documented in Documentation/DMA-attributes.txt */
...
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_FOO, &attrs);
+ unsigned long attr;
+ attr |= DMA_ATTR_FOO;
....
- n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr);
+ n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr);
....
Architectures that care about DMA_ATTR_FOO would check for its
@@ -422,12 +419,10 @@ routines, e.g.:
void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
....
- int foo = dma_get_attr(DMA_ATTR_FOO, attrs);
- ....
- if (foo)
+ if (attrs & DMA_ATTR_FOO)
/* twizzle the frobnozzle */
....
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index e8cf9cf873b3..2d455a5cf671 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -2,7 +2,7 @@
==============
This document describes the semantics of the DMA attributes that are
-defined in linux/dma-attrs.h.
+defined in linux/dma-mapping.h.
DMA_ATTR_WRITE_BARRIER
----------------------
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 01bab5014a4a..64460a897f56 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -6,8 +6,6 @@
# To add a new book the only step required is to add the book to the
# list of DOCBOOKS.
-ifeq ($(IGNORE_DOCBOOKS),)
-
DOCBOOKS := z8530book.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
writing_usb_driver.xml networking.xml \
@@ -16,10 +14,16 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
- tracepoint.xml gpu.xml media_api.xml w1.xml \
+ tracepoint.xml w1.xml \
writing_musb_glue_layer.xml crypto-API.xml iio.xml
-include Documentation/DocBook/media/Makefile
+ifeq ($(DOCBOOKS),)
+
+# Skip DocBook build if the user explicitly requested no DOCBOOKS.
+.DEFAULT:
+ @echo " SKIP DocBook $@ target (DOCBOOKS=\"\" specified)."
+
+else
###
# The build process is as follows (targets):
@@ -49,7 +53,6 @@ pdfdocs: $(PDF)
HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
htmldocs: $(HTML)
$(call cmd,build_main_index)
- $(call install_media_images)
MAN := $(patsubst %.xml, %.9, $(BOOKS))
mandocs: $(MAN)
@@ -217,19 +220,7 @@ silent_gen_xml = :
-e "s/>/\\&gt;/g"; \
echo "</programlisting>") > $@
-else
-
-# Needed, due to cleanmediadocs
-include Documentation/DocBook/media/Makefile
-
-htmldocs:
-pdfdocs:
-psdocs:
-xmldocs:
-installmandocs:
-
-endif # IGNORE_DOCBOOKS
-
+endif # DOCBOOKS=""
###
# Help targets as used by the top-level makefile
@@ -246,7 +237,7 @@ dochelp:
@echo ' make DOCBOOKS="s1.xml s2.xml" [target] Generate only docs s1.xml s2.xml'
@echo ' valid values for DOCBOOKS are: $(DOCBOOKS)'
@echo
- @echo " make IGNORE_DOCBOOKS=1 [target] Don't generate docs from Docbook"
+ @echo " make DOCBOOKS=\"\" [target] Don't generate docs from Docbook"
@echo ' This is useful to generate only the ReST docs (Sphinx)'
@@ -269,7 +260,7 @@ clean-files := $(DOCBOOKS) \
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
-cleandocs: cleanmediadocs
+cleandocs:
$(Q)rm -f $(call objectify, $(clean-files))
$(Q)rm -rf $(call objectify, $(clean-dirs))
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 99cdc05bbb7a..9c10030eb2be 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -161,6 +161,10 @@ X!Edrivers/base/interface.c
!Iinclude/linux/fence.h
!Edrivers/dma-buf/seqno-fence.c
!Iinclude/linux/seqno-fence.h
+!Edrivers/dma-buf/fence-array.c
+!Iinclude/linux/fence-array.h
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
!Edrivers/dma-buf/sync_file.c
!Iinclude/linux/sync_file.h
</sect2>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
deleted file mode 100644
index 7586bf75f62e..000000000000
--- a/Documentation/DocBook/gpu.tmpl
+++ /dev/null
@@ -1,3540 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="gpuDevelopersGuide">
- <bookinfo>
- <title>Linux GPU Driver Developer's Guide</title>
-
- <authorgroup>
- <author>
- <firstname>Jesse</firstname>
- <surname>Barnes</surname>
- <contrib>Initial version</contrib>
- <affiliation>
- <orgname>Intel Corporation</orgname>
- <address>
- <email>jesse.barnes@intel.com</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>Laurent</firstname>
- <surname>Pinchart</surname>
- <contrib>Driver internals</contrib>
- <affiliation>
- <orgname>Ideas on board SPRL</orgname>
- <address>
- <email>laurent.pinchart@ideasonboard.com</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>Daniel</firstname>
- <surname>Vetter</surname>
- <contrib>Contributions all over the place</contrib>
- <affiliation>
- <orgname>Intel Corporation</orgname>
- <address>
- <email>daniel.vetter@ffwll.ch</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>Lukas</firstname>
- <surname>Wunner</surname>
- <contrib>vga_switcheroo documentation</contrib>
- <affiliation>
- <address>
- <email>lukas@wunner.de</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <copyright>
- <year>2008-2009</year>
- <year>2013-2014</year>
- <holder>Intel Corporation</holder>
- </copyright>
- <copyright>
- <year>2012</year>
- <holder>Laurent Pinchart</holder>
- </copyright>
- <copyright>
- <year>2015</year>
- <holder>Lukas Wunner</holder>
- </copyright>
-
- <legalnotice>
- <para>
- The contents of this file may be used under the terms of the GNU
- General Public License version 2 (the "GPL") as distributed in
- the kernel source COPYING file.
- </para>
- </legalnotice>
-
- <revhistory>
- <!-- Put document revisions here, newest first. -->
- <revision>
- <revnumber>1.0</revnumber>
- <date>2012-07-13</date>
- <authorinitials>LP</authorinitials>
- <revremark>Added extensive documentation about driver internals.
- </revremark>
- </revision>
- <revision>
- <revnumber>1.1</revnumber>
- <date>2015-10-11</date>
- <authorinitials>LW</authorinitials>
- <revremark>Added vga_switcheroo documentation.
- </revremark>
- </revision>
- </revhistory>
- </bookinfo>
-
-<toc></toc>
-
-<part id="drmCore">
- <title>DRM Core</title>
- <partintro>
- <para>
- This first part of the GPU Driver Developer's Guide documents core DRM
- code, helper libraries for writing drivers and generic userspace
- interfaces exposed by DRM drivers.
- </para>
- </partintro>
-
- <chapter id="drmIntroduction">
- <title>Introduction</title>
- <para>
- The Linux DRM layer contains code intended to support the needs
- of complex graphics devices, usually containing programmable
- pipelines well suited to 3D graphics acceleration. Graphics
- drivers in the kernel may make use of DRM functions to make
- tasks like memory management, interrupt handling and DMA easier,
- and provide a uniform interface to applications.
- </para>
- <para>
- A note on versions: this guide covers features found in the DRM
- tree, including the TTM memory manager, output configuration and
- mode setting, and the new vblank internals, in addition to all
- the regular features found in current kernels.
- </para>
- <para>
- [Insert diagram of typical DRM stack here]
- </para>
- <sect1>
- <title>Style Guidelines</title>
- <para>
- For consistency this documentation uses American English. Abbreviations
- are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
- on. To aid in reading, documentations make full use of the markup
- characters kerneldoc provides: @parameter for function parameters, @member
- for structure members, &amp;structure to reference structures and
- function() for functions. These all get automatically hyperlinked if
- kerneldoc for the referenced objects exists. When referencing entries in
- function vtables please use -&gt;vfunc(). Note that kerneldoc does
- not support referencing struct members directly, so please add a reference
- to the vtable struct somewhere in the same paragraph or at least section.
- </para>
- <para>
- Except in special situations (to separate locked from unlocked variants)
- locking requirements for functions aren't documented in the kerneldoc.
- Instead locking should be check at runtime using e.g.
- <code>WARN_ON(!mutex_is_locked(...));</code>. Since it's much easier to
- ignore documentation than runtime noise this provides more value. And on
- top of that runtime checks do need to be updated when the locking rules
- change, increasing the chances that they're correct. Within the
- documentation the locking rules should be explained in the relevant
- structures: Either in the comment for the lock explaining what it
- protects, or data fields need a note about which lock protects them, or
- both.
- </para>
- <para>
- Functions which have a non-<code>void</code> return value should have a
- section called "Returns" explaining the expected return values in
- different cases and their meanings. Currently there's no consensus whether
- that section name should be all upper-case or not, and whether it should
- end in a colon or not. Go with the file-local style. Other common section
- names are "Notes" with information for dangerous or tricky corner cases,
- and "FIXME" where the interface could be cleaned up.
- </para>
- </sect1>
- </chapter>
-
- <!-- Internals -->
-
- <chapter id="drmInternals">
- <title>DRM Internals</title>
- <para>
- This chapter documents DRM internals relevant to driver authors
- and developers working to add support for the latest features to
- existing drivers.
- </para>
- <para>
- First, we go over some typical driver initialization
- requirements, like setting up command buffers, creating an
- initial output configuration, and initializing core services.
- Subsequent sections cover core internals in more detail,
- providing implementation notes and examples.
- </para>
- <para>
- The DRM layer provides several services to graphics drivers,
- many of them driven by the application interfaces it provides
- through libdrm, the library that wraps most of the DRM ioctls.
- These include vblank event handling, memory
- management, output management, framebuffer management, command
- submission &amp; fencing, suspend/resume support, and DMA
- services.
- </para>
-
- <!-- Internals: driver init -->
-
- <sect1>
- <title>Driver Initialization</title>
- <para>
- At the core of every DRM driver is a <structname>drm_driver</structname>
- structure. Drivers typically statically initialize a drm_driver structure,
- and then pass it to <function>drm_dev_alloc()</function> to allocate a
- device instance. After the device instance is fully initialized it can be
- registered (which makes it accessible from userspace) using
- <function>drm_dev_register()</function>.
- </para>
- <para>
- The <structname>drm_driver</structname> structure contains static
- information that describes the driver and features it supports, and
- pointers to methods that the DRM core will call to implement the DRM API.
- We will first go through the <structname>drm_driver</structname> static
- information fields, and will then describe individual operations in
- details as they get used in later sections.
- </para>
- <sect2>
- <title>Driver Information</title>
- <sect3>
- <title>Driver Features</title>
- <para>
- Drivers inform the DRM core about their requirements and supported
- features by setting appropriate flags in the
- <structfield>driver_features</structfield> field. Since those flags
- influence the DRM core behaviour since registration time, most of them
- must be set to registering the <structname>drm_driver</structname>
- instance.
- </para>
- <synopsis>u32 driver_features;</synopsis>
- <variablelist>
- <title>Driver Feature Flags</title>
- <varlistentry>
- <term>DRIVER_USE_AGP</term>
- <listitem><para>
- Driver uses AGP interface, the DRM core will manage AGP resources.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_REQUIRE_AGP</term>
- <listitem><para>
- Driver needs AGP interface to function. AGP initialization failure
- will become a fatal error.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_PCI_DMA</term>
- <listitem><para>
- Driver is capable of PCI DMA, mapping of PCI DMA buffers to
- userspace will be enabled. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_SG</term>
- <listitem><para>
- Driver can perform scatter/gather DMA, allocation and mapping of
- scatter/gather buffers will be enabled. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_HAVE_DMA</term>
- <listitem><para>
- Driver supports DMA, the userspace DMA API will be supported.
- Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
- <listitem><para>
- DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
- managed by the DRM Core. The core will support simple IRQ handler
- installation when the flag is set. The installation process is
- described in <xref linkend="drm-irq-registration"/>.</para>
- <para>DRIVER_IRQ_SHARED indicates whether the device &amp; handler
- support shared IRQs (note that this is required of PCI drivers).
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_GEM</term>
- <listitem><para>
- Driver use the GEM memory manager.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_MODESET</term>
- <listitem><para>
- Driver supports mode setting interfaces (KMS).
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_PRIME</term>
- <listitem><para>
- Driver implements DRM PRIME buffer sharing.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_RENDER</term>
- <listitem><para>
- Driver supports dedicated render nodes.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_ATOMIC</term>
- <listitem><para>
- Driver supports atomic properties. In this case the driver
- must implement appropriate obj->atomic_get_property() vfuncs
- for any modeset objects with driver specific properties.
- </para></listitem>
- </varlistentry>
- </variablelist>
- </sect3>
- <sect3>
- <title>Major, Minor and Patchlevel</title>
- <synopsis>int major;
-int minor;
-int patchlevel;</synopsis>
- <para>
- The DRM core identifies driver versions by a major, minor and patch
- level triplet. The information is printed to the kernel log at
- initialization time and passed to userspace through the
- DRM_IOCTL_VERSION ioctl.
- </para>
- <para>
- The major and minor numbers are also used to verify the requested driver
- API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
- between minor versions, applications can call DRM_IOCTL_SET_VERSION to
- select a specific version of the API. If the requested major isn't equal
- to the driver major, or the requested minor is larger than the driver
- minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
- the driver's set_version() method will be called with the requested
- version.
- </para>
- </sect3>
- <sect3>
- <title>Name, Description and Date</title>
- <synopsis>char *name;
-char *desc;
-char *date;</synopsis>
- <para>
- The driver name is printed to the kernel log at initialization time,
- used for IRQ registration and passed to userspace through
- DRM_IOCTL_VERSION.
- </para>
- <para>
- The driver description is a purely informative string passed to
- userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
- the kernel.
- </para>
- <para>
- The driver date, formatted as YYYYMMDD, is meant to identify the date of
- the latest modification to the driver. However, as most drivers fail to
- update it, its value is mostly useless. The DRM core prints it to the
- kernel log at initialization time and passes it to userspace through the
- DRM_IOCTL_VERSION ioctl.
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>Device Instance and Driver Handling</title>
-!Pdrivers/gpu/drm/drm_drv.c driver instance overview
-!Edrivers/gpu/drm/drm_drv.c
- </sect2>
- <sect2>
- <title>Driver Load</title>
- <sect3 id="drm-irq-registration">
- <title>IRQ Registration</title>
- <para>
- The DRM core tries to facilitate IRQ handler registration and
- unregistration by providing <function>drm_irq_install</function> and
- <function>drm_irq_uninstall</function> functions. Those functions only
- support a single interrupt per device, devices that use more than one
- IRQs need to be handled manually.
- </para>
- <sect4>
- <title>Managed IRQ Registration</title>
- <para>
- <function>drm_irq_install</function> starts by calling the
- <methodname>irq_preinstall</methodname> driver operation. The operation
- is optional and must make sure that the interrupt will not get fired by
- clearing all pending interrupt flags or disabling the interrupt.
- </para>
- <para>
- The passed-in IRQ will then be requested by a call to
- <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
- feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
- requested.
- </para>
- <para>
- The IRQ handler function must be provided as the mandatory irq_handler
- driver operation. It will get passed directly to
- <function>request_irq</function> and thus has the same prototype as all
- IRQ handlers. It will get called with a pointer to the DRM device as the
- second argument.
- </para>
- <para>
- Finally the function calls the optional
- <methodname>irq_postinstall</methodname> driver operation. The operation
- usually enables interrupts (excluding the vblank interrupt, which is
- enabled separately), but drivers may choose to enable/disable interrupts
- at a different time.
- </para>
- <para>
- <function>drm_irq_uninstall</function> is similarly used to uninstall an
- IRQ handler. It starts by waking up all processes waiting on a vblank
- interrupt to make sure they don't hang, and then calls the optional
- <methodname>irq_uninstall</methodname> driver operation. The operation
- must disable all hardware interrupts. Finally the function frees the IRQ
- by calling <function>free_irq</function>.
- </para>
- </sect4>
- <sect4>
- <title>Manual IRQ Registration</title>
- <para>
- Drivers that require multiple interrupt handlers can't use the managed
- IRQ registration functions. In that case IRQs must be registered and
- unregistered manually (usually with the <function>request_irq</function>
- and <function>free_irq</function> functions, or their devm_* equivalent).
- </para>
- <para>
- When manually registering IRQs, drivers must not set the DRIVER_HAVE_IRQ
- driver feature flag, and must not provide the
- <methodname>irq_handler</methodname> driver operation. They must set the
- <structname>drm_device</structname> <structfield>irq_enabled</structfield>
- field to 1 upon registration of the IRQs, and clear it to 0 after
- unregistering the IRQs.
- </para>
- </sect4>
- </sect3>
- <sect3>
- <title>Memory Manager Initialization</title>
- <para>
- Every DRM driver requires a memory manager which must be initialized at
- load time. DRM currently contains two memory managers, the Translation
- Table Manager (TTM) and the Graphics Execution Manager (GEM).
- This document describes the use of the GEM memory manager only. See
- <xref linkend="drm-memory-management"/> for details.
- </para>
- </sect3>
- <sect3>
- <title>Miscellaneous Device Configuration</title>
- <para>
- Another task that may be necessary for PCI devices during configuration
- is mapping the video BIOS. On many devices, the VBIOS describes device
- configuration, LCD panel timings (if any), and contains flags indicating
- device state. Mapping the BIOS can be done using the pci_map_rom() call,
- a convenience function that takes care of mapping the actual ROM,
- whether it has been shadowed into memory (typically at address 0xc0000)
- or exists on the PCI device in the ROM BAR. Note that after the ROM has
- been mapped and any necessary information has been extracted, it should
- be unmapped; on many devices, the ROM address decoder is shared with
- other BARs, so leaving it mapped could cause undesired behaviour like
- hangs or memory corruption.
- <!--!Fdrivers/pci/rom.c pci_map_rom-->
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>Bus-specific Device Registration and PCI Support</title>
- <para>
- A number of functions are provided to help with device registration.
- The functions deal with PCI and platform devices respectively and are
- only provided for historical reasons. These are all deprecated and
- shouldn't be used in new drivers. Besides that there's a few
- helpers for pci drivers.
- </para>
-!Edrivers/gpu/drm/drm_pci.c
-!Edrivers/gpu/drm/drm_platform.c
- </sect2>
- </sect1>
-
- <!-- Internals: memory management -->
-
- <sect1 id="drm-memory-management">
- <title>Memory management</title>
- <para>
- Modern Linux systems require large amount of graphics memory to store
- frame buffers, textures, vertices and other graphics-related data. Given
- the very dynamic nature of many of that data, managing graphics memory
- efficiently is thus crucial for the graphics stack and plays a central
- role in the DRM infrastructure.
- </para>
- <para>
- The DRM core includes two memory managers, namely Translation Table Maps
- (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
- manager to be developed and tried to be a one-size-fits-them all
- solution. It provides a single userspace API to accommodate the need of
- all hardware, supporting both Unified Memory Architecture (UMA) devices
- and devices with dedicated video RAM (i.e. most discrete video cards).
- This resulted in a large, complex piece of code that turned out to be
- hard to use for driver development.
- </para>
- <para>
- GEM started as an Intel-sponsored project in reaction to TTM's
- complexity. Its design philosophy is completely different: instead of
- providing a solution to every graphics memory-related problems, GEM
- identified common code between drivers and created a support library to
- share it. GEM has simpler initialization and execution requirements than
- TTM, but has no video RAM management capabilities and is thus limited to
- UMA devices.
- </para>
- <sect2>
- <title>The Translation Table Manager (TTM)</title>
- <para>
- TTM design background and information belongs here.
- </para>
- <sect3>
- <title>TTM initialization</title>
- <warning><para>This section is outdated.</para></warning>
- <para>
- Drivers wishing to support TTM must fill out a drm_bo_driver
- structure. The structure contains several fields with function
- pointers for initializing the TTM, allocating and freeing memory,
- waiting for command completion and fence synchronization, and memory
- migration. See the radeon_ttm.c file for an example of usage.
- </para>
- <para>
- The ttm_global_reference structure is made up of several fields:
- </para>
- <programlisting>
- struct ttm_global_reference {
- enum ttm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct ttm_global_reference *);
- void (*release) (struct ttm_global_reference *);
- };
- </programlisting>
- <para>
- There should be one global reference structure for your memory
- manager as a whole, and there will be others for each object
- created by the memory manager at runtime. Your global TTM should
- have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
- object should be sizeof(struct ttm_mem_global), and the init and
- release hooks should point at your driver-specific init and
- release routines, which probably eventually call
- ttm_mem_global_init and ttm_mem_global_release, respectively.
- </para>
- <para>
- Once your global TTM accounting structure is set up and initialized
- by calling ttm_global_item_ref() on it,
- you need to create a buffer object TTM to
- provide a pool for buffer object allocation by clients and the
- kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
- and its size should be sizeof(struct ttm_bo_global). Again,
- driver-specific init and release functions may be provided,
- likely eventually calling ttm_bo_global_init() and
- ttm_bo_global_release(), respectively. Also, like the previous
- object, ttm_global_item_ref() is used to create an initial reference
- count for the TTM, which will call your initialization function.
- </para>
- </sect3>
- </sect2>
- <sect2 id="drm-gem">
- <title>The Graphics Execution Manager (GEM)</title>
- <para>
- The GEM design approach has resulted in a memory manager that doesn't
- provide full coverage of all (or even all common) use cases in its
- userspace or kernel API. GEM exposes a set of standard memory-related
- operations to userspace and a set of helper functions to drivers, and let
- drivers implement hardware-specific operations with their own private API.
- </para>
- <para>
- The GEM userspace API is described in the
- <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
- Execution Manager</citetitle></ulink> article on LWN. While slightly
- outdated, the document provides a good overview of the GEM API principles.
- Buffer allocation and read and write operations, described as part of the
- common GEM API, are currently implemented using driver-specific ioctls.
- </para>
- <para>
- GEM is data-agnostic. It manages abstract buffer objects without knowing
- what individual buffers contain. APIs that require knowledge of buffer
- contents or purpose, such as buffer allocation or synchronization
- primitives, are thus outside of the scope of GEM and must be implemented
- using driver-specific ioctls.
- </para>
- <para>
- On a fundamental level, GEM involves several operations:
- <itemizedlist>
- <listitem>Memory allocation and freeing</listitem>
- <listitem>Command execution</listitem>
- <listitem>Aperture management at command execution time</listitem>
- </itemizedlist>
- Buffer object allocation is relatively straightforward and largely
- provided by Linux's shmem layer, which provides memory to back each
- object.
- </para>
- <para>
- Device-specific operations, such as command execution, pinning, buffer
- read &amp; write, mapping, and domain ownership transfers are left to
- driver-specific ioctls.
- </para>
- <sect3>
- <title>GEM Initialization</title>
- <para>
- Drivers that use GEM must set the DRIVER_GEM bit in the struct
- <structname>drm_driver</structname>
- <structfield>driver_features</structfield> field. The DRM core will
- then automatically initialize the GEM core before calling the
- <methodname>load</methodname> operation. Behind the scene, this will
- create a DRM Memory Manager object which provides an address space
- pool for object allocation.
- </para>
- <para>
- In a KMS configuration, drivers need to allocate and initialize a
- command ring buffer following core GEM initialization if required by
- the hardware. UMA devices usually have what is called a "stolen"
- memory region, which provides space for the initial framebuffer and
- large, contiguous memory regions required by the device. This space is
- typically not managed by GEM, and must be initialized separately into
- its own DRM MM object.
- </para>
- </sect3>
- <sect3>
- <title>GEM Objects Creation</title>
- <para>
- GEM splits creation of GEM objects and allocation of the memory that
- backs them in two distinct operations.
- </para>
- <para>
- GEM objects are represented by an instance of struct
- <structname>drm_gem_object</structname>. Drivers usually need to extend
- GEM objects with private information and thus create a driver-specific
- GEM object structure type that embeds an instance of struct
- <structname>drm_gem_object</structname>.
- </para>
- <para>
- To create a GEM object, a driver allocates memory for an instance of its
- specific GEM object type and initializes the embedded struct
- <structname>drm_gem_object</structname> with a call to
- <function>drm_gem_object_init</function>. The function takes a pointer to
- the DRM device, a pointer to the GEM object and the buffer object size
- in bytes.
- </para>
- <para>
- GEM uses shmem to allocate anonymous pageable memory.
- <function>drm_gem_object_init</function> will create an shmfs file of
- the requested size and store it into the struct
- <structname>drm_gem_object</structname> <structfield>filp</structfield>
- field. The memory is used as either main storage for the object when the
- graphics hardware uses system memory directly or as a backing store
- otherwise.
- </para>
- <para>
- Drivers are responsible for the actual physical pages allocation by
- calling <function>shmem_read_mapping_page_gfp</function> for each page.
- Note that they can decide to allocate pages when initializing the GEM
- object, or to delay allocation until the memory is needed (for instance
- when a page fault occurs as a result of a userspace memory access or
- when the driver needs to start a DMA transfer involving the memory).
- </para>
- <para>
- Anonymous pageable memory allocation is not always desired, for instance
- when the hardware requires physically contiguous system memory as is
- often the case in embedded devices. Drivers can create GEM objects with
- no shmfs backing (called private GEM objects) by initializing them with
- a call to <function>drm_gem_private_object_init</function> instead of
- <function>drm_gem_object_init</function>. Storage for private GEM
- objects must be managed by drivers.
- </para>
- </sect3>
- <sect3>
- <title>GEM Objects Lifetime</title>
- <para>
- All GEM objects are reference-counted by the GEM core. References can be
- acquired and release by <function>calling drm_gem_object_reference</function>
- and <function>drm_gem_object_unreference</function> respectively. The
- caller must hold the <structname>drm_device</structname>
- <structfield>struct_mutex</structfield> lock when calling
- <function>drm_gem_object_reference</function>. As a convenience, GEM
- provides <function>drm_gem_object_unreference_unlocked</function>
- functions that can be called without holding the lock.
- </para>
- <para>
- When the last reference to a GEM object is released the GEM core calls
- the <structname>drm_driver</structname>
- <methodname>gem_free_object</methodname> operation. That operation is
- mandatory for GEM-enabled drivers and must free the GEM object and all
- associated resources.
- </para>
- <para>
- <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
- Drivers are responsible for freeing all GEM object resources. This includes
- the resources created by the GEM core, which need to be released with
- <function>drm_gem_object_release</function>.
- </para>
- </sect3>
- <sect3>
- <title>GEM Objects Naming</title>
- <para>
- Communication between userspace and the kernel refers to GEM objects
- using local handles, global names or, more recently, file descriptors.
- All of those are 32-bit integer values; the usual Linux kernel limits
- apply to the file descriptors.
- </para>
- <para>
- GEM handles are local to a DRM file. Applications get a handle to a GEM
- object through a driver-specific ioctl, and can use that handle to refer
- to the GEM object in other standard or driver-specific ioctls. Closing a
- DRM file handle frees all its GEM handles and dereferences the
- associated GEM objects.
- </para>
- <para>
- To create a handle for a GEM object drivers call
- <function>drm_gem_handle_create</function>. The function takes a pointer
- to the DRM file and the GEM object and returns a locally unique handle.
- When the handle is no longer needed drivers delete it with a call to
- <function>drm_gem_handle_delete</function>. Finally the GEM object
- associated with a handle can be retrieved by a call to
- <function>drm_gem_object_lookup</function>.
- </para>
- <para>
- Handles don't take ownership of GEM objects, they only take a reference
- to the object that will be dropped when the handle is destroyed. To
- avoid leaking GEM objects, drivers must make sure they drop the
- reference(s) they own (such as the initial reference taken at object
- creation time) as appropriate, without any special consideration for the
- handle. For example, in the particular case of combined GEM object and
- handle creation in the implementation of the
- <methodname>dumb_create</methodname> operation, drivers must drop the
- initial reference to the GEM object before returning the handle.
- </para>
- <para>
- GEM names are similar in purpose to handles but are not local to DRM
- files. They can be passed between processes to reference a GEM object
- globally. Names can't be used directly to refer to objects in the DRM
- API, applications must convert handles to names and names to handles
- using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
- respectively. The conversion is handled by the DRM core without any
- driver-specific support.
- </para>
- <para>
- GEM also supports buffer sharing with dma-buf file descriptors through
- PRIME. GEM-based drivers must use the provided helpers functions to
- implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
- Since sharing file descriptors is inherently more secure than the
- easily guessable and global GEM names it is the preferred buffer
- sharing mechanism. Sharing buffers through GEM names is only supported
- for legacy userspace. Furthermore PRIME also allows cross-device
- buffer sharing since it is based on dma-bufs.
- </para>
- </sect3>
- <sect3 id="drm-gem-objects-mapping">
- <title>GEM Objects Mapping</title>
- <para>
- Because mapping operations are fairly heavyweight GEM favours
- read/write-like access to buffers, implemented through driver-specific
- ioctls, over mapping buffers to userspace. However, when random access
- to the buffer is needed (to perform software rendering for instance),
- direct access to the object can be more efficient.
- </para>
- <para>
- The mmap system call can't be used directly to map GEM objects, as they
- don't have their own file handle. Two alternative methods currently
- co-exist to map GEM objects to userspace. The first method uses a
- driver-specific ioctl to perform the mapping operation, calling
- <function>do_mmap</function> under the hood. This is often considered
- dubious, seems to be discouraged for new GEM-enabled drivers, and will
- thus not be described here.
- </para>
- <para>
- The second method uses the mmap system call on the DRM file handle.
- <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
- off_t offset);</synopsis>
- DRM identifies the GEM object to be mapped by a fake offset passed
- through the mmap offset argument. Prior to being mapped, a GEM object
- must thus be associated with a fake offset. To do so, drivers must call
- <function>drm_gem_create_mmap_offset</function> on the object.
- </para>
- <para>
- Once allocated, the fake offset value
- must be passed to the application in a driver-specific way and can then
- be used as the mmap offset argument.
- </para>
- <para>
- The GEM core provides a helper method <function>drm_gem_mmap</function>
- to handle object mapping. The method can be set directly as the mmap
- file operation handler. It will look up the GEM object based on the
- offset value and set the VMA operations to the
- <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
- field. Note that <function>drm_gem_mmap</function> doesn't map memory to
- userspace, but relies on the driver-provided fault handler to map pages
- individually.
- </para>
- <para>
- To use <function>drm_gem_mmap</function>, drivers must fill the struct
- <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
- field with a pointer to VM operations.
- </para>
- <para>
- <synopsis>struct vm_operations_struct *gem_vm_ops
-
- struct vm_operations_struct {
- void (*open)(struct vm_area_struct * area);
- void (*close)(struct vm_area_struct * area);
- int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- };</synopsis>
- </para>
- <para>
- The <methodname>open</methodname> and <methodname>close</methodname>
- operations must update the GEM object reference count. Drivers can use
- the <function>drm_gem_vm_open</function> and
- <function>drm_gem_vm_close</function> helper functions directly as open
- and close handlers.
- </para>
- <para>
- The fault operation handler is responsible for mapping individual pages
- to userspace when a page fault occurs. Depending on the memory
- allocation scheme, drivers can allocate pages at fault time, or can
- decide to allocate memory for the GEM object at the time the object is
- created.
- </para>
- <para>
- Drivers that want to map the GEM object upfront instead of handling page
- faults can implement their own mmap file operation handler.
- </para>
- </sect3>
- <sect3>
- <title>Memory Coherency</title>
- <para>
- When mapped to the device or used in a command buffer, backing pages
- for an object are flushed to memory and marked write combined so as to
- be coherent with the GPU. Likewise, if the CPU accesses an object
- after the GPU has finished rendering to the object, then the object
- must be made coherent with the CPU's view of memory, usually involving
- GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
- coherency management is provided by a device-specific ioctl, which
- evaluates an object's current domain and performs any necessary
- flushing or synchronization to put the object into the desired
- coherency domain (note that the object may be busy, i.e. an active
- render target; in that case, setting the domain blocks the client and
- waits for rendering to complete before performing any necessary
- flushing operations).
- </para>
- </sect3>
- <sect3>
- <title>Command Execution</title>
- <para>
- Perhaps the most important GEM function for GPU devices is providing a
- command execution interface to clients. Client programs construct
- command buffers containing references to previously allocated memory
- objects, and then submit them to GEM. At that point, GEM takes care to
- bind all the objects into the GTT, execute the buffer, and provide
- necessary synchronization between clients accessing the same buffers.
- This often involves evicting some objects from the GTT and re-binding
- others (a fairly expensive operation), and providing relocation
- support which hides fixed GTT offsets from clients. Clients must take
- care not to submit command buffers that reference more objects than
- can fit in the GTT; otherwise, GEM will reject them and no rendering
- will occur. Similarly, if several objects in the buffer require fence
- registers to be allocated for correct rendering (e.g. 2D blits on
- pre-965 chips), care must be taken not to require more fence registers
- than are available to the client. Such resource management should be
- abstracted from the client in libdrm.
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>GEM Function Reference</title>
-!Edrivers/gpu/drm/drm_gem.c
-!Iinclude/drm/drm_gem.h
- </sect2>
- <sect2>
- <title>VMA Offset Manager</title>
-!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
-!Edrivers/gpu/drm/drm_vma_manager.c
-!Iinclude/drm/drm_vma_manager.h
- </sect2>
- <sect2 id="drm-prime-support">
- <title>PRIME Buffer Sharing</title>
- <para>
- PRIME is the cross device buffer sharing framework in drm, originally
- created for the OPTIMUS range of multi-gpu platforms. To userspace
- PRIME buffers are dma-buf based file descriptors.
- </para>
- <sect3>
- <title>Overview and Driver Interface</title>
- <para>
- Similar to GEM global names, PRIME file descriptors are
- also used to share buffer objects across processes. They offer
- additional security: as file descriptors must be explicitly sent over
- UNIX domain sockets to be shared between applications, they can't be
- guessed like the globally unique GEM names.
- </para>
- <para>
- Drivers that support the PRIME
- API must set the DRIVER_PRIME bit in the struct
- <structname>drm_driver</structname>
- <structfield>driver_features</structfield> field, and implement the
- <methodname>prime_handle_to_fd</methodname> and
- <methodname>prime_fd_to_handle</methodname> operations.
- </para>
- <para>
- <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags, int *prime_fd);
-int (*prime_fd_to_handle)(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle);</synopsis>
- Those two operations convert a handle to a PRIME file descriptor and
- vice versa. Drivers must use the kernel dma-buf buffer sharing framework
- to manage the PRIME file descriptors. Similar to the mode setting
- API PRIME is agnostic to the underlying buffer object manager, as
- long as handles are 32bit unsigned integers.
- </para>
- <para>
- While non-GEM drivers must implement the operations themselves, GEM
- drivers must use the <function>drm_gem_prime_handle_to_fd</function>
- and <function>drm_gem_prime_fd_to_handle</function> helper functions.
- Those helpers rely on the driver
- <methodname>gem_prime_export</methodname> and
- <methodname>gem_prime_import</methodname> operations to create a dma-buf
- instance from a GEM object (dma-buf exporter role) and to create a GEM
- object from a dma-buf instance (dma-buf importer role).
- </para>
- <para>
- <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
- struct dma_buf *dma_buf);</synopsis>
- These two operations are mandatory for GEM drivers that support
- PRIME.
- </para>
- </sect3>
- <sect3>
- <title>PRIME Helper Functions</title>
-!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
- </sect3>
- </sect2>
- <sect2>
- <title>PRIME Function References</title>
-!Edrivers/gpu/drm/drm_prime.c
- </sect2>
- <sect2>
- <title>DRM MM Range Allocator</title>
- <sect3>
- <title>Overview</title>
-!Pdrivers/gpu/drm/drm_mm.c Overview
- </sect3>
- <sect3>
- <title>LRU Scan/Eviction Support</title>
-!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
- </sect3>
- </sect2>
- <sect2>
- <title>DRM MM Range Allocator Function References</title>
-!Edrivers/gpu/drm/drm_mm.c
-!Iinclude/drm/drm_mm.h
- </sect2>
- <sect2>
- <title>CMA Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
-!Edrivers/gpu/drm/drm_gem_cma_helper.c
-!Iinclude/drm/drm_gem_cma_helper.h
- </sect2>
- </sect1>
-
- <!-- Internals: mode setting -->
-
- <sect1 id="drm-mode-setting">
- <title>Mode Setting</title>
- <para>
- Drivers must initialize the mode setting core by calling
- <function>drm_mode_config_init</function> on the DRM device. The function
- initializes the <structname>drm_device</structname>
- <structfield>mode_config</structfield> field and never fails. Once done,
- mode configuration must be setup by initializing the following fields.
- </para>
- <itemizedlist>
- <listitem>
- <synopsis>int min_width, min_height;
-int max_width, max_height;</synopsis>
- <para>
- Minimum and maximum width and height of the frame buffers in pixel
- units.
- </para>
- </listitem>
- <listitem>
- <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
- <para>Mode setting functions.</para>
- </listitem>
- </itemizedlist>
- <sect2>
- <title>Display Modes Function Reference</title>
-!Iinclude/drm/drm_modes.h
-!Edrivers/gpu/drm/drm_modes.c
- </sect2>
- <sect2>
- <title>Atomic Mode Setting Function Reference</title>
-!Edrivers/gpu/drm/drm_atomic.c
-!Idrivers/gpu/drm/drm_atomic.c
- </sect2>
- <sect2>
- <title>Frame Buffer Abstraction</title>
- <para>
- Frame buffers are abstract memory objects that provide a source of
- pixels to scanout to a CRTC. Applications explicitly request the
- creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
- receive an opaque handle that can be passed to the KMS CRTC control,
- plane configuration and page flip functions.
- </para>
- <para>
- Frame buffers rely on the underneath memory manager for low-level memory
- operations. When creating a frame buffer applications pass a memory
- handle (or a list of memory handles for multi-planar formats) through
- the <parameter>drm_mode_fb_cmd2</parameter> argument. For drivers using
- GEM as their userspace buffer management interface this would be a GEM
- handle. Drivers are however free to use their own backing storage object
- handles, e.g. vmwgfx directly exposes special TTM handles to userspace
- and so expects TTM handles in the create ioctl and not GEM handles.
- </para>
- <para>
- The lifetime of a drm framebuffer is controlled with a reference count,
- drivers can grab additional references with
- <function>drm_framebuffer_reference</function>and drop them
- again with <function>drm_framebuffer_unreference</function>. For
- driver-private framebuffers for which the last reference is never
- dropped (e.g. for the fbdev framebuffer when the struct
- <structname>drm_framebuffer</structname> is embedded into the fbdev
- helper struct) drivers can manually clean up a framebuffer at module
- unload time with
- <function>drm_framebuffer_unregister_private</function>.
- </para>
- </sect2>
- <sect2>
- <title>Dumb Buffer Objects</title>
- <para>
- The KMS API doesn't standardize backing storage object creation and
- leaves it to driver-specific ioctls. Furthermore actually creating a
- buffer object even for GEM-based drivers is done through a
- driver-specific ioctl - GEM only has a common userspace interface for
- sharing and destroying objects. While not an issue for full-fledged
- graphics stacks that include device-specific userspace components (in
- libdrm for instance), this limit makes DRM-based early boot graphics
- unnecessarily complex.
- </para>
- <para>
- Dumb objects partly alleviate the problem by providing a standard
- API to create dumb buffers suitable for scanout, which can then be used
- to create KMS frame buffers.
- </para>
- <para>
- To support dumb objects drivers must implement the
- <methodname>dumb_create</methodname>,
- <methodname>dumb_destroy</methodname> and
- <methodname>dumb_map_offset</methodname> operations.
- </para>
- <itemizedlist>
- <listitem>
- <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_create_dumb *args);</synopsis>
- <para>
- The <methodname>dumb_create</methodname> operation creates a driver
- object (GEM or TTM handle) suitable for scanout based on the
- width, height and depth from the struct
- <structname>drm_mode_create_dumb</structname> argument. It fills the
- argument's <structfield>handle</structfield>,
- <structfield>pitch</structfield> and <structfield>size</structfield>
- fields with a handle for the newly created object and its line
- pitch and size in bytes.
- </para>
- </listitem>
- <listitem>
- <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);</synopsis>
- <para>
- The <methodname>dumb_destroy</methodname> operation destroys a dumb
- object created by <methodname>dumb_create</methodname>.
- </para>
- </listitem>
- <listitem>
- <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);</synopsis>
- <para>
- The <methodname>dumb_map_offset</methodname> operation associates an
- mmap fake offset with the object given by the handle and returns
- it. Drivers must use the
- <function>drm_gem_create_mmap_offset</function> function to
- associate the fake offset as described in
- <xref linkend="drm-gem-objects-mapping"/>.
- </para>
- </listitem>
- </itemizedlist>
- <para>
- Note that dumb objects may not be used for gpu acceleration, as has been
- attempted on some ARM embedded platforms. Such drivers really must have
- a hardware-specific ioctl to allocate suitable buffer objects.
- </para>
- </sect2>
- <sect2>
- <title>Output Polling</title>
- <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
- <para>
- This operation notifies the driver that the status of one or more
- connectors has changed. Drivers that use the fb helper can just call the
- <function>drm_fb_helper_hotplug_event</function> function to handle this
- operation.
- </para>
- </sect2>
- <sect2>
- <title>Locking</title>
- <para>
- Beside some lookup structures with their own locking (which is hidden
- behind the interface functions) most of the modeset state is protected
- by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
- per-crtc locks to allow cursor updates, pageflips and similar operations
- to occur concurrently with background tasks like output detection.
- Operations which cross domains like a full modeset always grab all
- locks. Drivers there need to protect resources shared between crtcs with
- additional locking. They also need to be careful to always grab the
- relevant crtc locks if a modset functions touches crtc state, e.g. for
- load detection (which does only grab the <code>mode_config.lock</code>
- to allow concurrent screen updates on live crtcs).
- </para>
- </sect2>
- </sect1>
-
- <!-- Internals: kms initialization and cleanup -->
-
- <sect1 id="drm-kms-init">
- <title>KMS Initialization and Cleanup</title>
- <para>
- A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
- and connectors. KMS drivers must thus create and initialize all those
- objects at load time after initializing mode setting.
- </para>
- <sect2>
- <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
- <para>
- A CRTC is an abstraction representing a part of the chip that contains a
- pointer to a scanout buffer. Therefore, the number of CRTCs available
- determines how many independent scanout buffers can be active at any
- given time. The CRTC structure contains several fields to support this:
- a pointer to some video memory (abstracted as a frame buffer object), a
- display mode, and an (x, y) offset into the video memory to support
- panning or configurations where one piece of video memory spans multiple
- CRTCs.
- </para>
- <sect3>
- <title>CRTC Initialization</title>
- <para>
- A KMS device must create and register at least one struct
- <structname>drm_crtc</structname> instance. The instance is allocated
- and zeroed by the driver, possibly as part of a larger structure, and
- registered with a call to <function>drm_crtc_init</function> with a
- pointer to CRTC functions.
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>Planes (struct <structname>drm_plane</structname>)</title>
- <para>
- A plane represents an image source that can be blended with or overlayed
- on top of a CRTC during the scanout process. Planes are associated with
- a frame buffer to crop a portion of the image memory (source) and
- optionally scale it to a destination size. The result is then blended
- with or overlayed on top of a CRTC.
- </para>
- <para>
- The DRM core recognizes three types of planes:
- <itemizedlist>
- <listitem>
- DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
- planes are the planes operated upon by CRTC modesetting and flipping
- operations described in the page_flip hook in <structname>drm_crtc_funcs</structname>.
- </listitem>
- <listitem>
- DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC. Cursor
- planes are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- DRM_IOCTL_MODE_CURSOR2 ioctls.
- </listitem>
- <listitem>
- DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor planes.
- Some drivers refer to these types of planes as "sprites" internally.
- </listitem>
- </itemizedlist>
- For compatibility with legacy userspace, only overlay planes are made
- available to userspace by default. Userspace clients may set the
- DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that
- they wish to receive a universal plane list containing all plane types.
- </para>
- <sect3>
- <title>Plane Initialization</title>
- <para>
- To create a plane, a KMS drivers allocates and
- zeroes an instances of struct <structname>drm_plane</structname>
- (possibly as part of a larger structure) and registers it with a call
- to <function>drm_universal_plane_init</function>. The function takes a bitmask
- of the CRTCs that can be associated with the plane, a pointer to the
- plane functions, a list of format supported formats, and the type of
- plane (primary, cursor, or overlay) being initialized.
- </para>
- <para>
- Cursor and overlay planes are optional. All drivers should provide
- one primary plane per CRTC (although this requirement may change in
- the future); drivers that do not wish to provide special handling for
- primary planes may make use of the helper functions described in
- <xref linkend="drm-kms-planehelpers"/> to create and register a
- primary plane with standard capabilities.
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>Encoders (struct <structname>drm_encoder</structname>)</title>
- <para>
- An encoder takes pixel data from a CRTC and converts it to a format
- suitable for any attached connectors. On some devices, it may be
- possible to have a CRTC send data to more than one encoder. In that
- case, both encoders would receive data from the same scanout buffer,
- resulting in a "cloned" display configuration across the connectors
- attached to each encoder.
- </para>
- <sect3>
- <title>Encoder Initialization</title>
- <para>
- As for CRTCs, a KMS driver must create, initialize and register at
- least one struct <structname>drm_encoder</structname> instance. The
- instance is allocated and zeroed by the driver, possibly as part of a
- larger structure.
- </para>
- <para>
- Drivers must initialize the struct <structname>drm_encoder</structname>
- <structfield>possible_crtcs</structfield> and
- <structfield>possible_clones</structfield> fields before registering the
- encoder. Both fields are bitmasks of respectively the CRTCs that the
- encoder can be connected to, and sibling encoders candidate for cloning.
- </para>
- <para>
- After being initialized, the encoder must be registered with a call to
- <function>drm_encoder_init</function>. The function takes a pointer to
- the encoder functions and an encoder type. Supported types are
- <itemizedlist>
- <listitem>
- DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
- </listitem>
- <listitem>
- DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
- </listitem>
- <listitem>
- DRM_MODE_ENCODER_LVDS for display panels
- </listitem>
- <listitem>
- DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
- SCART)
- </listitem>
- <listitem>
- DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
- </listitem>
- </itemizedlist>
- </para>
- <para>
- Encoders must be attached to a CRTC to be used. DRM drivers leave
- encoders unattached at initialization time. Applications (or the fbdev
- compatibility layer when implemented) are responsible for attaching the
- encoders they want to use to a CRTC.
- </para>
- </sect3>
- </sect2>
- <sect2>
- <title>Connectors (struct <structname>drm_connector</structname>)</title>
- <para>
- A connector is the final destination for pixel data on a device, and
- usually connects directly to an external display device like a monitor
- or laptop panel. A connector can only be attached to one encoder at a
- time. The connector is also the structure where information about the
- attached display is kept, so it contains fields for display data, EDID
- data, DPMS &amp; connection status, and information about modes
- supported on the attached displays.
- </para>
- <sect3>
- <title>Connector Initialization</title>
- <para>
- Finally a KMS driver must create, initialize, register and attach at
- least one struct <structname>drm_connector</structname> instance. The
- instance is created as other KMS objects and initialized by setting the
- following fields.
- </para>
- <variablelist>
- <varlistentry>
- <term><structfield>interlace_allowed</structfield></term>
- <listitem><para>
- Whether the connector can handle interlaced modes.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term><structfield>doublescan_allowed</structfield></term>
- <listitem><para>
- Whether the connector can handle doublescan.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term><structfield>display_info
- </structfield></term>
- <listitem><para>
- Display information is filled from EDID information when a display
- is detected. For non hot-pluggable displays such as flat panels in
- embedded systems, the driver should initialize the
- <structfield>display_info</structfield>.<structfield>width_mm</structfield>
- and
- <structfield>display_info</structfield>.<structfield>height_mm</structfield>
- fields with the physical size of the display.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
- <listitem><para>
- Connector polling mode, a combination of
- <variablelist>
- <varlistentry>
- <term>DRM_CONNECTOR_POLL_HPD</term>
- <listitem><para>
- The connector generates hotplug events and doesn't need to be
- periodically polled. The CONNECT and DISCONNECT flags must not
- be set together with the HPD flag.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_CONNECTOR_POLL_CONNECT</term>
- <listitem><para>
- Periodically poll the connector for connection.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
- <listitem><para>
- Periodically poll the connector for disconnection.
- </para></listitem>
- </varlistentry>
- </variablelist>
- Set to 0 for connectors that don't support connection status
- discovery.
- </para></listitem>
- </varlistentry>
- </variablelist>
- <para>
- The connector is then registered with a call to
- <function>drm_connector_init</function> with a pointer to the connector
- functions and a connector type, and exposed through sysfs with a call to
- <function>drm_connector_register</function>.
- </para>
- <para>
- Supported connector types are
- <itemizedlist>
- <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
- <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
- <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
- <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
- <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
- <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
- <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
- <listitem>DRM_MODE_CONNECTOR_Component</listitem>
- <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
- <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
- <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
- <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
- <listitem>DRM_MODE_CONNECTOR_TV</listitem>
- <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
- <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
- </itemizedlist>
- </para>
- <para>
- Connectors must be attached to an encoder to be used. For devices that
- map connectors to encoders 1:1, the connector should be attached at
- initialization time with a call to
- <function>drm_mode_connector_attach_encoder</function>. The driver must
- also set the <structname>drm_connector</structname>
- <structfield>encoder</structfield> field to point to the attached
- encoder.
- </para>
- <para>
- Finally, drivers must initialize the connectors state change detection
- with a call to <function>drm_kms_helper_poll_init</function>. If at
- least one connector is pollable but can't generate hotplug interrupts
- (indicated by the DRM_CONNECTOR_POLL_CONNECT and
- DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
- automatically be queued to periodically poll for changes. Connectors
- that can generate hotplug interrupts must be marked with the
- DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
- call <function>drm_helper_hpd_irq_event</function>. The function will
- queue a delayed work to check the state of all connectors, but no
- periodic polling will be done.
- </para>
- </sect3>
- <sect3>
- <title>Connector Operations</title>
- <note><para>
- Unless otherwise state, all operations are mandatory.
- </para></note>
- <sect4>
- <title>DPMS</title>
- <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
- <para>
- The DPMS operation sets the power state of a connector. The mode
- argument is one of
- <itemizedlist>
- <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
- <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
- <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
- <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
- </itemizedlist>
- </para>
- <para>
- In all but DPMS_ON mode the encoder to which the connector is attached
- should put the display in low-power mode by driving its signals
- appropriately. If more than one connector is attached to the encoder
- care should be taken not to change the power state of other displays as
- a side effect. Low-power mode should be propagated to the encoders and
- CRTCs when all related connectors are put in low-power mode.
- </para>
- </sect4>
- <sect4>
- <title>Modes</title>
- <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
- uint32_t max_height);</synopsis>
- <para>
- Fill the mode list with all supported modes for the connector. If the
- <parameter>max_width</parameter> and <parameter>max_height</parameter>
- arguments are non-zero, the implementation must ignore all modes wider
- than <parameter>max_width</parameter> or higher than
- <parameter>max_height</parameter>.
- </para>
- <para>
- The connector must also fill in this operation its
- <structfield>display_info</structfield>
- <structfield>width_mm</structfield> and
- <structfield>height_mm</structfield> fields with the connected display
- physical size in millimeters. The fields should be set to 0 if the value
- isn't known or is not applicable (for instance for projector devices).
- </para>
- </sect4>
- <sect4>
- <title>Connection Status</title>
- <para>
- The connection status is updated through polling or hotplug events when
- supported (see <xref linkend="drm-kms-connector-polled"/>). The status
- value is reported to userspace through ioctls and must not be used
- inside the driver, as it only gets initialized by a call to
- <function>drm_mode_getconnector</function> from userspace.
- </para>
- <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
- bool force);</synopsis>
- <para>
- Check to see if anything is attached to the connector. The
- <parameter>force</parameter> parameter is set to false whilst polling or
- to true when checking the connector due to user request.
- <parameter>force</parameter> can be used by the driver to avoid
- expensive, destructive operations during automated probing.
- </para>
- <para>
- Return connector_status_connected if something is connected to the
- connector, connector_status_disconnected if nothing is connected and
- connector_status_unknown if the connection state isn't known.
- </para>
- <para>
- Drivers should only return connector_status_connected if the connection
- status has really been probed as connected. Connectors that can't detect
- the connection status, or failed connection status probes, should return
- connector_status_unknown.
- </para>
- </sect4>
- </sect3>
- </sect2>
- <sect2>
- <title>Cleanup</title>
- <para>
- The DRM core manages its objects' lifetime. When an object is not needed
- anymore the core calls its destroy function, which must clean up and
- free every resource allocated for the object. Every
- <function>drm_*_init</function> call must be matched with a
- corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
- (<function>drm_crtc_cleanup</function>), planes
- (<function>drm_plane_cleanup</function>), encoders
- (<function>drm_encoder_cleanup</function>) and connectors
- (<function>drm_connector_cleanup</function>). Furthermore, connectors
- that have been added to sysfs must be removed by a call to
- <function>drm_connector_unregister</function> before calling
- <function>drm_connector_cleanup</function>.
- </para>
- <para>
- Connectors state change detection must be cleanup up with a call to
- <function>drm_kms_helper_poll_fini</function>.
- </para>
- </sect2>
- <sect2>
- <title>Output discovery and initialization example</title>
- <programlisting><![CDATA[
-void intel_crt_init(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct intel_output *intel_output;
-
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
- if (!intel_output)
- return;
-
- connector = &intel_output->base;
- drm_connector_init(dev, &intel_output->base,
- &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
- drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
- DRM_MODE_ENCODER_DAC);
-
- drm_mode_connector_attach_encoder(&intel_output->base,
- &intel_output->enc);
-
- /* Set up the DDC bus. */
- intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
- if (!intel_output->ddc_bus) {
- dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
- "failed.\n");
- return;
- }
-
- intel_output->type = INTEL_OUTPUT_ANALOG;
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
- drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
-
- drm_connector_register(connector);
-}]]></programlisting>
- <para>
- In the example above (taken from the i915 driver), a CRTC, connector and
- encoder combination is created. A device-specific i2c bus is also
- created for fetching EDID data and performing monitor detection. Once
- the process is complete, the new connector is registered with sysfs to
- make its properties available to applications.
- </para>
- </sect2>
- <sect2>
- <title>KMS API Functions</title>
-!Edrivers/gpu/drm/drm_crtc.c
- </sect2>
- <sect2>
- <title>KMS Data Structures</title>
-!Iinclude/drm/drm_crtc.h
- </sect2>
- <sect2>
- <title>KMS Locking</title>
-!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
-!Iinclude/drm/drm_modeset_lock.h
-!Edrivers/gpu/drm/drm_modeset_lock.c
- </sect2>
- </sect1>
-
- <!-- Internals: kms helper functions -->
-
- <sect1>
- <title>Mode Setting Helper Functions</title>
- <para>
- The plane, CRTC, encoder and connector functions provided by the drivers
- implement the DRM API. They're called by the DRM core and ioctl handlers
- to handle device state changes and configuration request. As implementing
- those functions often requires logic not specific to drivers, mid-layer
- helper functions are available to avoid duplicating boilerplate code.
- </para>
- <para>
- The DRM core contains one mid-layer implementation. The mid-layer provides
- implementations of several plane, CRTC, encoder and connector functions
- (called from the top of the mid-layer) that pre-process requests and call
- lower-level functions provided by the driver (at the bottom of the
- mid-layer). For instance, the
- <function>drm_crtc_helper_set_config</function> function can be used to
- fill the struct <structname>drm_crtc_funcs</structname>
- <structfield>set_config</structfield> field. When called, it will split
- the <methodname>set_config</methodname> operation in smaller, simpler
- operations and call the driver to handle them.
- </para>
- <para>
- To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
- <function>drm_encoder_helper_add</function> and
- <function>drm_connector_helper_add</function> functions to install their
- mid-layer bottom operations handlers, and fill the
- <structname>drm_crtc_funcs</structname>,
- <structname>drm_encoder_funcs</structname> and
- <structname>drm_connector_funcs</structname> structures with pointers to
- the mid-layer top API functions. Installing the mid-layer bottom operation
- handlers is best done right after registering the corresponding KMS object.
- </para>
- <para>
- The mid-layer is not split between CRTC, encoder and connector operations.
- To use it, a driver must provide bottom functions for all of the three KMS
- entities.
- </para>
- <sect2>
- <title>Atomic Modeset Helper Functions Reference</title>
- <sect3>
- <title>Overview</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c overview
- </sect3>
- <sect3>
- <title>Implementing Asynchronous Atomic Commit</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
- </sect3>
- <sect3>
- <title>Atomic State Reset and Initialization</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
- </sect3>
-!Iinclude/drm/drm_atomic_helper.h
-!Edrivers/gpu/drm/drm_atomic_helper.c
- </sect2>
- <sect2>
- <title>Modeset Helper Reference for Common Vtables</title>
-!Iinclude/drm/drm_modeset_helper_vtables.h
-!Pinclude/drm/drm_modeset_helper_vtables.h overview
- </sect2>
- <sect2>
- <title>Legacy CRTC/Modeset Helper Functions Reference</title>
-!Edrivers/gpu/drm/drm_crtc_helper.c
-!Pdrivers/gpu/drm/drm_crtc_helper.c overview
- </sect2>
- <sect2>
- <title>Output Probing Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_probe_helper.c output probing helper overview
-!Edrivers/gpu/drm/drm_probe_helper.c
- </sect2>
- <sect2>
- <title>fbdev Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
-!Edrivers/gpu/drm/drm_fb_helper.c
-!Iinclude/drm/drm_fb_helper.h
- </sect2>
- <sect2>
- <title>Framebuffer CMA Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_fb_cma_helper.c framebuffer cma helper functions
-!Edrivers/gpu/drm/drm_fb_cma_helper.c
- </sect2>
- <sect2>
- <title>Display Port Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
-!Iinclude/drm/drm_dp_helper.h
-!Edrivers/gpu/drm/drm_dp_helper.c
- </sect2>
- <sect2>
- <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
-!Iinclude/drm/drm_dp_dual_mode_helper.h
-!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
- </sect2>
- <sect2>
- <title>Display Port MST Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
-!Iinclude/drm/drm_dp_mst_helper.h
-!Edrivers/gpu/drm/drm_dp_mst_topology.c
- </sect2>
- <sect2>
- <title>MIPI DSI Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
-!Iinclude/drm/drm_mipi_dsi.h
-!Edrivers/gpu/drm/drm_mipi_dsi.c
- </sect2>
- <sect2>
- <title>EDID Helper Functions Reference</title>
-!Edrivers/gpu/drm/drm_edid.c
- </sect2>
- <sect2>
- <title>Rectangle Utilities Reference</title>
-!Pinclude/drm/drm_rect.h rect utils
-!Iinclude/drm/drm_rect.h
-!Edrivers/gpu/drm/drm_rect.c
- </sect2>
- <sect2>
- <title>Flip-work Helper Reference</title>
-!Pinclude/drm/drm_flip_work.h flip utils
-!Iinclude/drm/drm_flip_work.h
-!Edrivers/gpu/drm/drm_flip_work.c
- </sect2>
- <sect2>
- <title>HDMI Infoframes Helper Reference</title>
- <para>
- Strictly speaking this is not a DRM helper library but generally useable
- by any driver interfacing with HDMI outputs like v4l or alsa drivers.
- But it nicely fits into the overall topic of mode setting helper
- libraries and hence is also included here.
- </para>
-!Iinclude/linux/hdmi.h
-!Edrivers/video/hdmi.c
- </sect2>
- <sect2>
- <title id="drm-kms-planehelpers">Plane Helper Reference</title>
-!Edrivers/gpu/drm/drm_plane_helper.c
-!Pdrivers/gpu/drm/drm_plane_helper.c overview
- </sect2>
- <sect2>
- <title>Tile group</title>
-!Pdrivers/gpu/drm/drm_crtc.c Tile group
- </sect2>
- <sect2>
- <title>Bridges</title>
- <sect3>
- <title>Overview</title>
-!Pdrivers/gpu/drm/drm_bridge.c overview
- </sect3>
- <sect3>
- <title>Default bridge callback sequence</title>
-!Pdrivers/gpu/drm/drm_bridge.c bridge callbacks
- </sect3>
-!Edrivers/gpu/drm/drm_bridge.c
- </sect2>
- <sect2>
- <title>Panel Helper Reference</title>
-!Iinclude/drm/drm_panel.h
-!Edrivers/gpu/drm/drm_panel.c
-!Pdrivers/gpu/drm/drm_panel.c drm panel
- </sect2>
- </sect1>
-
- <!-- Internals: kms properties -->
-
- <sect1 id="drm-kms-properties">
- <title>KMS Properties</title>
- <para>
- Drivers may need to expose additional parameters to applications than
- those described in the previous sections. KMS supports attaching
- properties to CRTCs, connectors and planes and offers a userspace API to
- list, get and set the property values.
- </para>
- <para>
- Properties are identified by a name that uniquely defines the property
- purpose, and store an associated value. For all property types except blob
- properties the value is a 64-bit unsigned integer.
- </para>
- <para>
- KMS differentiates between properties and property instances. Drivers
- first create properties and then create and associate individual instances
- of those properties to objects. A property can be instantiated multiple
- times and associated with different objects. Values are stored in property
- instances, and all other property information are stored in the property
- and shared between all instances of the property.
- </para>
- <para>
- Every property is created with a type that influences how the KMS core
- handles the property. Supported property types are
- <variablelist>
- <varlistentry>
- <term>DRM_MODE_PROP_RANGE</term>
- <listitem><para>Range properties report their minimum and maximum
- admissible values. The KMS core verifies that values set by
- application fit in that range.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_MODE_PROP_ENUM</term>
- <listitem><para>Enumerated properties take a numerical value that
- ranges from 0 to the number of enumerated values defined by the
- property minus one, and associate a free-formed string name to each
- value. Applications can retrieve the list of defined value-name pairs
- and use the numerical value to get and set property instance values.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_MODE_PROP_BITMASK</term>
- <listitem><para>Bitmask properties are enumeration properties that
- additionally restrict all enumerated values to the 0..63 range.
- Bitmask property instance values combine one or more of the
- enumerated bits defined by the property.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_MODE_PROP_BLOB</term>
- <listitem><para>Blob properties store a binary blob without any format
- restriction. The binary blobs are created as KMS standalone objects,
- and blob property instance values store the ID of their associated
- blob object.</para>
- <para>Blob properties are only used for the connector EDID property
- and cannot be created by drivers.</para></listitem>
- </varlistentry>
- </variablelist>
- </para>
- <para>
- To create a property drivers call one of the following functions depending
- on the property type. All property creation functions take property flags
- and name, as well as type-specific arguments.
- <itemizedlist>
- <listitem>
- <synopsis>struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
- const char *name,
- uint64_t min, uint64_t max);</synopsis>
- <para>Create a range property with the given minimum and maximum
- values.</para>
- </listitem>
- <listitem>
- <synopsis>struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
- const char *name,
- const struct drm_prop_enum_list *props,
- int num_values);</synopsis>
- <para>Create an enumerated property. The <parameter>props</parameter>
- argument points to an array of <parameter>num_values</parameter>
- value-name pairs.</para>
- </listitem>
- <listitem>
- <synopsis>struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
- int flags, const char *name,
- const struct drm_prop_enum_list *props,
- int num_values);</synopsis>
- <para>Create a bitmask property. The <parameter>props</parameter>
- argument points to an array of <parameter>num_values</parameter>
- value-name pairs.</para>
- </listitem>
- </itemizedlist>
- </para>
- <para>
- Properties can additionally be created as immutable, in which case they
- will be read-only for applications but can be modified by the driver. To
- create an immutable property drivers must set the DRM_MODE_PROP_IMMUTABLE
- flag at property creation time.
- </para>
- <para>
- When no array of value-name pairs is readily available at property
- creation time for enumerated or range properties, drivers can create
- the property using the <function>drm_property_create</function> function
- and manually add enumeration value-name pairs by calling the
- <function>drm_property_add_enum</function> function. Care must be taken to
- properly specify the property type through the <parameter>flags</parameter>
- argument.
- </para>
- <para>
- After creating properties drivers can attach property instances to CRTC,
- connector and plane objects by calling the
- <function>drm_object_attach_property</function>. The function takes a
- pointer to the target object, a pointer to the previously created property
- and an initial instance value.
- </para>
- <sect2>
- <title>Existing KMS Properties</title>
- <para>
- The following table gives description of drm properties exposed by various
- modules/drivers.
- </para>
- <table border="1" cellpadding="0" cellspacing="0">
- <tbody>
- <tr style="font-weight: bold;">
- <td valign="top" >Owner Module/Drivers</td>
- <td valign="top" >Group</td>
- <td valign="top" >Property Name</td>
- <td valign="top" >Type</td>
- <td valign="top" >Property Values</td>
- <td valign="top" >Object attached</td>
- <td valign="top" >Description/Restrictions</td>
- </tr>
- <tr>
- <td rowspan="42" valign="top" >DRM</td>
- <td rowspan="2" valign="top" >Generic</td>
- <td valign="top" >“rotation”</td>
- <td valign="top" >BITMASK</td>
- <td valign="top" >{ 0, "rotate-0" },
- { 1, "rotate-90" },
- { 2, "rotate-180" },
- { 3, "rotate-270" },
- { 4, "reflect-x" },
- { 5, "reflect-y" }</td>
- <td valign="top" >CRTC, Plane</td>
- <td valign="top" >rotate-(degrees) rotates the image by the specified amount in degrees
- in counter clockwise direction. reflect-x and reflect-y reflects the
- image along the specified axis prior to rotation</td>
- </tr>
- <tr>
- <td valign="top" >“scaling mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
- </tr>
- <tr>
- <td rowspan="5" valign="top" >Connector</td>
- <td valign="top" >“EDID”</td>
- <td valign="top" >BLOB | IMMUTABLE</td>
- <td valign="top" >0</td>
- <td valign="top" >Connector</td>
- <td valign="top" >Contains id of edid blob ptr object.</td>
- </tr>
- <tr>
- <td valign="top" >“DPMS”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ “On”, “Standby”, “Suspend”, “Off” }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >Contains DPMS operation mode value.</td>
- </tr>
- <tr>
- <td valign="top" >“PATH”</td>
- <td valign="top" >BLOB | IMMUTABLE</td>
- <td valign="top" >0</td>
- <td valign="top" >Connector</td>
- <td valign="top" >Contains topology path to a connector.</td>
- </tr>
- <tr>
- <td valign="top" >“TILE”</td>
- <td valign="top" >BLOB | IMMUTABLE</td>
- <td valign="top" >0</td>
- <td valign="top" >Connector</td>
- <td valign="top" >Contains tiling information for a connector.</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_ID”</td>
- <td valign="top" >OBJECT</td>
- <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
- <td valign="top" >Connector</td>
- <td valign="top" >CRTC that connector is attached to (atomic)</td>
- </tr>
- <tr>
- <td rowspan="11" valign="top" >Plane</td>
- <td valign="top" >“type”</td>
- <td valign="top" >ENUM | IMMUTABLE</td>
- <td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Plane type</td>
- </tr>
- <tr>
- <td valign="top" >“SRC_X”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“SRC_Y”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“SRC_W”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“SRC_H”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_X”</td>
- <td valign="top" >SIGNED_RANGE</td>
- <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_Y”</td>
- <td valign="top" >SIGNED_RANGE</td>
- <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_W”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_H”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“FB_ID”</td>
- <td valign="top" >OBJECT</td>
- <td valign="top" >DRM_MODE_OBJECT_FB</td>
- <td valign="top" >Plane</td>
- <td valign="top" >Scanout framebuffer (atomic)</td>
- </tr>
- <tr>
- <td valign="top" >“CRTC_ID”</td>
- <td valign="top" >OBJECT</td>
- <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
- <td valign="top" >Plane</td>
- <td valign="top" >CRTC that plane is attached to (atomic)</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >DVI-I</td>
- <td valign="top" >“subconnector”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ “Unknown”, “DVI-D”, “DVI-A” }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“select subconnector”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ “Automatic”, “DVI-D”, “DVI-A” }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="13" valign="top" >TV</td>
- <td valign="top" >“subconnector”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "Unknown", "Composite", "SVIDEO", "Component", "SCART" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“select subconnector”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "Automatic", "Composite", "SVIDEO", "Component", "SCART" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“left margin”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“right margin”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“top margin”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“bottom margin”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“brightness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“contrast”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker reduction”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“overscan”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“saturation”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hue”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >Virtual GPU</td>
- <td valign="top" >“suggested X”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffffff</td>
- <td valign="top" >Connector</td>
- <td valign="top" >property to suggest an X offset for a connector</td>
- </tr>
- <tr>
- <td valign="top" >“suggested Y”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffffff</td>
- <td valign="top" >Connector</td>
- <td valign="top" >property to suggest an Y offset for a connector</td>
- </tr>
- <tr>
- <td rowspan="7" valign="top" >Optional</td>
- <td valign="top" >"aspect ratio"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "None", "4:3", "16:9" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TDB</td>
- </tr>
- <tr>
- <td valign="top" >“dirty”</td>
- <td valign="top" >ENUM | IMMUTABLE</td>
- <td valign="top" >{ "Off", "On", "Annotate" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“DEGAMMA_LUT”</td>
- <td valign="top" >BLOB</td>
- <td valign="top" >0</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >DRM property to set the degamma lookup table
- (LUT) mapping pixel data from the framebuffer before it is
- given to the transformation matrix. The data is an interpreted
- as an array of struct drm_color_lut elements. Hardware might
- choose not to use the full precision of the LUT elements nor
- use all the elements of the LUT (for example the hardware
- might choose to interpolate between LUT[0] and LUT[4]). </td>
- </tr>
- <tr>
- <td valign="top" >“DEGAMMA_LUT_SIZE”</td>
- <td valign="top" >RANGE | IMMUTABLE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >DRM property to gives the size of the lookup
- table to be set on the DEGAMMA_LUT property (the size depends
- on the underlying hardware).</td>
- </tr>
- <tr>
- <td valign="top" >“CTM”</td>
- <td valign="top" >BLOB</td>
- <td valign="top" >0</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >DRM property to set the current
- transformation matrix (CTM) apply to pixel data after the
- lookup through the degamma LUT and before the lookup through
- the gamma LUT. The data is an interpreted as a struct
- drm_color_ctm.</td>
- </tr>
- <tr>
- <td valign="top" >“GAMMA_LUT”</td>
- <td valign="top" >BLOB</td>
- <td valign="top" >0</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >DRM property to set the gamma lookup table
- (LUT) mapping pixel data after to the transformation matrix to
- data sent to the connector. The data is an interpreted as an
- array of struct drm_color_lut elements. Hardware might choose
- not to use the full precision of the LUT elements nor use all
- the elements of the LUT (for example the hardware might choose
- to interpolate between LUT[0] and LUT[4]).</td>
- </tr>
- <tr>
- <td valign="top" >“GAMMA_LUT_SIZE”</td>
- <td valign="top" >RANGE | IMMUTABLE</td>
- <td valign="top" >Min=0, Max=UINT_MAX</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >DRM property to gives the size of the lookup
- table to be set on the GAMMA_LUT property (the size depends on
- the underlying hardware).</td>
- </tr>
- <tr>
- <td rowspan="20" valign="top" >i915</td>
- <td rowspan="2" valign="top" >Generic</td>
- <td valign="top" >"Broadcast RGB"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >When this property is set to Limited 16:235
- and CTM is set, the hardware will be programmed with the
- result of the multiplication of CTM by the limited range
- matrix to ensure the pixels normaly in the range 0..1.0 are
- remapped to the range 16/255..235/255.</td>
- </tr>
- <tr>
- <td valign="top" >“audio”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "force-dvi", "off", "auto", "on" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="17" valign="top" >SDVO-TV</td>
- <td valign="top" >“mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"left_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"right_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"top_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"bottom_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hpos”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“vpos”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“contrast”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“saturation”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hue”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“sharpness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter_adaptive”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter_2d”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“tv_chroma_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“tv_luma_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“dot_crawl”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >SDVO-TV/LVDS</td>
- <td valign="top" >“brightness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >CDV gma-500</td>
- <td rowspan="2" valign="top" >Generic</td>
- <td valign="top" >"Broadcast RGB"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ “Full”, “Limited 16:235” }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"Broadcast RGB"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ “off”, “auto”, “on” }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="19" valign="top" >Poulsbo</td>
- <td rowspan="1" valign="top" >Generic</td>
- <td valign="top" >“backlight”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=100</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="17" valign="top" >SDVO-TV</td>
- <td valign="top" >“mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"left_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"right_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"top_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"bottom_margin"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hpos”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“vpos”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“contrast”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“saturation”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hue”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“sharpness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter_adaptive”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“flicker_filter_2d”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“tv_chroma_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“tv_luma_filter”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“dot_crawl”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >SDVO-TV/LVDS</td>
- <td valign="top" >“brightness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max= SDVO dependent</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="11" valign="top" >armada</td>
- <td rowspan="2" valign="top" >CRTC</td>
- <td valign="top" >"CSC_YUV"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "Auto" , "CCIR601", "CCIR709" }</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"CSC_RGB"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "Auto", "Computer system", "Studio" }</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="9" valign="top" >Overlay</td>
- <td valign="top" >"colorkey"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey_min"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey_max"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey_val"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey_alpha"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0xffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey_mode"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "disabled", "Y component", "U component"
- , "V component", "RGB", “R component", "G component", "B component" }</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"brightness"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=256 + 255</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"contrast"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0x7fff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"saturation"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0x7fff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >exynos</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >“mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "normal", "blank" }</td>
- <td valign="top" >CRTC</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >Overlay</td>
- <td valign="top" >“zpos”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=MAX_PLANE-1</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
- <td valign="top" >Generic</td>
- <td valign="top" >“scale”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=2</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="1" valign="top" >TV</td>
- <td valign="top" >“mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
- , "PAL-60", "NTSC-M", "NTSC-J" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="15" valign="top" >nouveau</td>
- <td rowspan="6" valign="top" >NV10 Overlay</td>
- <td valign="top" >"colorkey"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0x01ffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“contrast”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=8192-1</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“brightness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1024</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“hue”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=359</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“saturation”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=8192-1</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“iturbt_709”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="2" valign="top" >Nv04 Overlay</td>
- <td valign="top" >“colorkey”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0x01ffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“brightness”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1024</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="7" valign="top" >Display</td>
- <td valign="top" >“dithering mode”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "auto", "off", "on" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“dithering depth”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "auto", "off", "on", "static 2x2", "dynamic 2x2", "temporal" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“underscan”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "auto", "6 bpc", "8 bpc" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“underscan hborder”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=128</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“underscan vborder”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=128</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“vibrant hue”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=180</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >“color vibrance”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=200</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >omap</td>
- <td valign="top" >Generic</td>
- <td valign="top" >“zorder”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=3</td>
- <td valign="top" >CRTC, Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >qxl</td>
- <td valign="top" >Generic</td>
- <td valign="top" >“hotplug_mode_update"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="9" valign="top" >radeon</td>
- <td valign="top" >DVI-I</td>
- <td valign="top" >“coherent”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >DAC enable load detect</td>
- <td valign="top" >“load detection”</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=1</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >TV Standard</td>
- <td valign="top" >"tv standard"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "ntsc", "pal", "pal-m", "pal-60", "ntsc-j"
- , "scart-pal", "pal-cn", "secam" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >legacy TMDS PLL detect</td>
- <td valign="top" >"tmds_pll"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "driver", "bios" }</td>
- <td valign="top" >-</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="3" valign="top" >Underscan</td>
- <td valign="top" >"underscan"</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "off", "on", "auto" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"underscan hborder"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=128</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"underscan vborder"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=128</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >Audio</td>
- <td valign="top" >“audio”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "off", "on", "auto" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >FMT Dithering</td>
- <td valign="top" >“dither”</td>
- <td valign="top" >ENUM</td>
- <td valign="top" >{ "off", "on" }</td>
- <td valign="top" >Connector</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td rowspan="3" valign="top" >rcar-du</td>
- <td rowspan="3" valign="top" >Generic</td>
- <td valign="top" >"alpha"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=255</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"colorkey"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=0, Max=0x01ffffff</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- <tr>
- <td valign="top" >"zpos"</td>
- <td valign="top" >RANGE</td>
- <td valign="top" >Min=1, Max=7</td>
- <td valign="top" >Plane</td>
- <td valign="top" >TBD</td>
- </tr>
- </tbody>
- </table>
- </sect2>
- </sect1>
-
- <!-- Internals: vertical blanking -->
-
- <sect1 id="drm-vertical-blank">
- <title>Vertical Blanking</title>
- <para>
- Vertical blanking plays a major role in graphics rendering. To achieve
- tear-free display, users must synchronize page flips and/or rendering to
- vertical blanking. The DRM API offers ioctls to perform page flips
- synchronized to vertical blanking and wait for vertical blanking.
- </para>
- <para>
- The DRM core handles most of the vertical blanking management logic, which
- involves filtering out spurious interrupts, keeping race-free blanking
- counters, coping with counter wrap-around and resets and keeping use
- counts. It relies on the driver to generate vertical blanking interrupts
- and optionally provide a hardware vertical blanking counter. Drivers must
- implement the following operations.
- </para>
- <itemizedlist>
- <listitem>
- <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
-void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
- <para>
- Enable or disable vertical blanking interrupts for the given CRTC.
- </para>
- </listitem>
- <listitem>
- <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
- <para>
- Retrieve the value of the vertical blanking counter for the given
- CRTC. If the hardware maintains a vertical blanking counter its value
- should be returned. Otherwise drivers can use the
- <function>drm_vblank_count</function> helper function to handle this
- operation.
- </para>
- </listitem>
- </itemizedlist>
- <para>
- Drivers must initialize the vertical blanking handling core with a call to
- <function>drm_vblank_init</function> in their
- <methodname>load</methodname> operation. The function will set the struct
- <structname>drm_device</structname>
- <structfield>vblank_disable_allowed</structfield> field to 0. This will
- keep vertical blanking interrupts enabled permanently until the first mode
- set operation, where <structfield>vblank_disable_allowed</structfield> is
- set to 1. The reason behind this is not clear. Drivers can set the field
- to 1 after <function>calling drm_vblank_init</function> to make vertical
- blanking interrupts dynamically managed from the beginning.
- </para>
- <para>
- Vertical blanking interrupts can be enabled by the DRM core or by drivers
- themselves (for instance to handle page flipping operations). The DRM core
- maintains a vertical blanking use count to ensure that the interrupts are
- not disabled while a user still needs them. To increment the use count,
- drivers call <function>drm_vblank_get</function>. Upon return vertical
- blanking interrupts are guaranteed to be enabled.
- </para>
- <para>
- To decrement the use count drivers call
- <function>drm_vblank_put</function>. Only when the use count drops to zero
- will the DRM core disable the vertical blanking interrupts after a delay
- by scheduling a timer. The delay is accessible through the vblankoffdelay
- module parameter or the <varname>drm_vblank_offdelay</varname> global
- variable and expressed in milliseconds. Its default value is 5000 ms.
- Zero means never disable, and a negative value means disable immediately.
- Drivers may override the behaviour by setting the
- <structname>drm_device</structname>
- <structfield>vblank_disable_immediate</structfield> flag, which when set
- causes vblank interrupts to be disabled immediately regardless of the
- drm_vblank_offdelay value. The flag should only be set if there's a
- properly working hardware vblank counter present.
- </para>
- <para>
- When a vertical blanking interrupt occurs drivers only need to call the
- <function>drm_handle_vblank</function> function to account for the
- interrupt.
- </para>
- <para>
- Resources allocated by <function>drm_vblank_init</function> must be freed
- with a call to <function>drm_vblank_cleanup</function> in the driver
- <methodname>unload</methodname> operation handler.
- </para>
- <sect2>
- <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
-!Edrivers/gpu/drm/drm_irq.c
-!Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
- </sect2>
- </sect1>
-
- <!-- Internals: open/close, file operations and ioctls -->
-
- <sect1>
- <title>Open/Close, File Operations and IOCTLs</title>
- <sect2>
- <title>Open and Close</title>
- <synopsis>int (*firstopen) (struct drm_device *);
-void (*lastclose) (struct drm_device *);
-int (*open) (struct drm_device *, struct drm_file *);
-void (*preclose) (struct drm_device *, struct drm_file *);
-void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
- <abstract>Open and close handlers. None of those methods are mandatory.
- </abstract>
- <para>
- The <methodname>firstopen</methodname> method is called by the DRM core
- for legacy UMS (User Mode Setting) drivers only when an application
- opens a device that has no other opened file handle. UMS drivers can
- implement it to acquire device resources. KMS drivers can't use the
- method and must acquire resources in the <methodname>load</methodname>
- method instead.
- </para>
- <para>
- Similarly the <methodname>lastclose</methodname> method is called when
- the last application holding a file handle opened on the device closes
- it, for both UMS and KMS drivers. Additionally, the method is also
- called at module unload time or, for hot-pluggable devices, when the
- device is unplugged. The <methodname>firstopen</methodname> and
- <methodname>lastclose</methodname> calls can thus be unbalanced.
- </para>
- <para>
- The <methodname>open</methodname> method is called every time the device
- is opened by an application. Drivers can allocate per-file private data
- in this method and store them in the struct
- <structname>drm_file</structname> <structfield>driver_priv</structfield>
- field. Note that the <methodname>open</methodname> method is called
- before <methodname>firstopen</methodname>.
- </para>
- <para>
- The close operation is split into <methodname>preclose</methodname> and
- <methodname>postclose</methodname> methods. Drivers must stop and
- cleanup all per-file operations in the <methodname>preclose</methodname>
- method. For instance pending vertical blanking and page flip events must
- be cancelled. No per-file operation is allowed on the file handle after
- returning from the <methodname>preclose</methodname> method.
- </para>
- <para>
- Finally the <methodname>postclose</methodname> method is called as the
- last step of the close operation, right before calling the
- <methodname>lastclose</methodname> method if no other open file handle
- exists for the device. Drivers that have allocated per-file private data
- in the <methodname>open</methodname> method should free it here.
- </para>
- <para>
- The <methodname>lastclose</methodname> method should restore CRTC and
- plane properties to default value, so that a subsequent open of the
- device will not inherit state from the previous user. It can also be
- used to execute delayed power switching state changes, e.g. in
- conjunction with the vga_switcheroo infrastructure (see
- <xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
- do any further cleanup. Only legacy UMS drivers might need to clean up
- device state so that the vga console or an independent fbdev driver
- could take over.
- </para>
- </sect2>
- <sect2>
- <title>File Operations</title>
-!Pdrivers/gpu/drm/drm_fops.c file operations
-!Edrivers/gpu/drm/drm_fops.c
- </sect2>
- <sect2>
- <title>IOCTLs</title>
- <synopsis>struct drm_ioctl_desc *ioctls;
-int num_ioctls;</synopsis>
- <abstract>Driver-specific ioctls descriptors table.</abstract>
- <para>
- Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
- descriptors table is indexed by the ioctl number offset from the base
- value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
- table entries.
- </para>
- <para>
- <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
- <para>
- <parameter>ioctl</parameter> is the ioctl name. Drivers must define
- the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
- offset from DRM_COMMAND_BASE and the ioctl number respectively. The
- first macro is private to the device while the second must be exposed
- to userspace in a public header.
- </para>
- <para>
- <parameter>func</parameter> is a pointer to the ioctl handler function
- compatible with the <type>drm_ioctl_t</type> type.
- <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
- struct drm_file *file_priv);</programlisting>
- </para>
- <para>
- <parameter>flags</parameter> is a bitmask combination of the following
- values. It restricts how the ioctl is allowed to be called.
- <itemizedlist>
- <listitem><para>
- DRM_AUTH - Only authenticated callers allowed
- </para></listitem>
- <listitem><para>
- DRM_MASTER - The ioctl can only be called on the master file
- handle
- </para></listitem>
- <listitem><para>
- DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
- </para></listitem>
- <listitem><para>
- DRM_CONTROL_ALLOW - The ioctl can only be called on a control
- device
- </para></listitem>
- <listitem><para>
- DRM_UNLOCKED - The ioctl handler will be called without locking
- the DRM global mutex. This is the enforced default for kms drivers
- (i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
- any more for new drivers.
- </para></listitem>
- </itemizedlist>
- </para>
- </para>
-!Edrivers/gpu/drm/drm_ioctl.c
- </sect2>
- </sect1>
- <sect1>
- <title>Legacy Support Code</title>
- <para>
- The section very briefly covers some of the old legacy support code which
- is only used by old DRM drivers which have done a so-called shadow-attach
- to the underlying device instead of registering as a real driver. This
- also includes some of the old generic buffer management and command
- submission code. Do not use any of this in new and modern drivers.
- </para>
-
- <sect2>
- <title>Legacy Suspend/Resume</title>
- <para>
- The DRM core provides some suspend/resume code, but drivers wanting full
- suspend/resume support should provide save() and restore() functions.
- These are called at suspend, hibernate, or resume time, and should perform
- any state save or restore required by your device across suspend or
- hibernate states.
- </para>
- <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
- int (*resume) (struct drm_device *);</synopsis>
- <para>
- Those are legacy suspend and resume methods which
- <emphasis>only</emphasis> work with the legacy shadow-attach driver
- registration functions. New driver should use the power management
- interface provided by their bus type (usually through
- the struct <structname>device_driver</structname> dev_pm_ops) and set
- these methods to NULL.
- </para>
- </sect2>
-
- <sect2>
- <title>Legacy DMA Services</title>
- <para>
- This should cover how DMA mapping etc. is supported by the core.
- These functions are deprecated and should not be used.
- </para>
- </sect2>
- </sect1>
- </chapter>
-
-<!-- TODO
-
-- Add a glossary
-- Document the struct_mutex catch-all lock
-- Document connector properties
-
-- Why is the load method optional?
-- What are drivers supposed to set the initial display state to, and how?
- Connector's DPMS states are not initialized and are thus equal to
- DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
- drm_helper_disable_unused_functions(), which disables unused encoders and
- CRTCs, but doesn't touch the connectors' DPMS state, and
- drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
- that don't implement (or just don't use) fbcon compatibility need to call
- those functions themselves?
-- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
- around mode setting. Should this be done in the DRM core?
-- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
- call and never set back to 0. It seems to be safe to permanently set it to 1
- in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
- well. This should be investigated.
-- crtc and connector .save and .restore operations are only used internally in
- drivers, should they be removed from the core?
-- encoder mid-layer .save and .restore operations are only used internally in
- drivers, should they be removed from the core?
-- encoder mid-layer .detect operation is only used internally in drivers,
- should it be removed from the core?
--->
-
- <!-- External interfaces -->
-
- <chapter id="drmExternals">
- <title>Userland interfaces</title>
- <para>
- The DRM core exports several interfaces to applications,
- generally intended to be used through corresponding libdrm
- wrapper functions. In addition, drivers export device-specific
- interfaces for use by userspace drivers &amp; device-aware
- applications through ioctls and sysfs files.
- </para>
- <para>
- External interfaces include: memory mapping, context management,
- DMA operations, AGP management, vblank control, fence
- management, memory management, and output management.
- </para>
- <para>
- Cover generic ioctls and sysfs layout here. We only need high-level
- info, since man pages should cover the rest.
- </para>
-
- <!-- External: render nodes -->
-
- <sect1>
- <title>Render nodes</title>
- <para>
- DRM core provides multiple character-devices for user-space to use.
- Depending on which device is opened, user-space can perform a different
- set of operations (mainly ioctls). The primary node is always created
- and called card&lt;num&gt;. Additionally, a currently
- unused control node, called controlD&lt;num&gt; is also
- created. The primary node provides all legacy operations and
- historically was the only interface used by userspace. With KMS, the
- control node was introduced. However, the planned KMS control interface
- has never been written and so the control node stays unused to date.
- </para>
- <para>
- With the increased use of offscreen renderers and GPGPU applications,
- clients no longer require running compositors or graphics servers to
- make use of a GPU. But the DRM API required unprivileged clients to
- authenticate to a DRM-Master prior to getting GPU access. To avoid this
- step and to grant clients GPU access without authenticating, render
- nodes were introduced. Render nodes solely serve render clients, that
- is, no modesetting or privileged ioctls can be issued on render nodes.
- Only non-global rendering commands are allowed. If a driver supports
- render nodes, it must advertise it via the DRIVER_RENDER
- DRM driver capability. If not supported, the primary node must be used
- for render clients together with the legacy drmAuth authentication
- procedure.
- </para>
- <para>
- If a driver advertises render node support, DRM core will create a
- separate render node called renderD&lt;num&gt;. There will
- be one render node per device. No ioctls except PRIME-related ioctls
- will be allowed on this node. Especially GEM_OPEN will be
- explicitly prohibited. Render nodes are designed to avoid the
- buffer-leaks, which occur if clients guess the flink names or mmap
- offsets on the legacy interface. Additionally to this basic interface,
- drivers must mark their driver-dependent render-only ioctls as
- DRM_RENDER_ALLOW so render clients can use them. Driver
- authors must be careful not to allow any privileged ioctls on render
- nodes.
- </para>
- <para>
- With render nodes, user-space can now control access to the render node
- via basic file-system access-modes. A running graphics server which
- authenticates clients on the privileged primary/legacy node is no longer
- required. Instead, a client can open the render node and is immediately
- granted GPU access. Communication between clients (or servers) is done
- via PRIME. FLINK from render node to legacy node is not supported. New
- clients must not use the insecure FLINK interface.
- </para>
- <para>
- Besides dropping all modeset/global ioctls, render nodes also drop the
- DRM-Master concept. There is no reason to associate render clients with
- a DRM-Master as they are independent of any graphics server. Besides,
- they must work without any running master, anyway.
- Drivers must be able to run without a master object if they support
- render nodes. If, on the other hand, a driver requires shared state
- between clients which is visible to user-space and accessible beyond
- open-file boundaries, they cannot support render nodes.
- </para>
- </sect1>
-
- <!-- External: vblank handling -->
-
- <sect1>
- <title>VBlank event handling</title>
- <para>
- The DRM core exposes two vertical blank related ioctls:
- <variablelist>
- <varlistentry>
- <term>DRM_IOCTL_WAIT_VBLANK</term>
- <listitem>
- <para>
- This takes a struct drm_wait_vblank structure as its argument,
- and it is used to block or request a signal when a specified
- vblank event occurs.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>DRM_IOCTL_MODESET_CTL</term>
- <listitem>
- <para>
- This was only used for user-mode-settind drivers around
- modesetting changes to allow the kernel to update the vblank
- interrupt after mode setting, since on many devices the vertical
- blank counter is reset to 0 at some point during modeset. Modern
- drivers should not call this any more since with kernel mode
- setting it is a no-op.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </para>
- </sect1>
-
- </chapter>
-</part>
-<part id="drmDrivers">
- <title>DRM Drivers</title>
-
- <partintro>
- <para>
- This second part of the GPU Driver Developer's Guide documents driver
- code, implementation details and also all the driver-specific userspace
- interfaces. Especially since all hardware-acceleration interfaces to
- userspace are driver specific for efficiency and other reasons these
- interfaces can be rather substantial. Hence every driver has its own
- chapter.
- </para>
- </partintro>
-
- <chapter id="drmI915">
- <title>drm/i915 Intel GFX Driver</title>
- <para>
- The drm/i915 driver supports all (with the exception of some very early
- models) integrated GFX chipsets with both Intel display and rendering
- blocks. This excludes a set of SoC platforms with an SGX rendering unit,
- those have basic support through the gma500 drm driver.
- </para>
- <sect1>
- <title>Core Driver Infrastructure</title>
- <para>
- This section covers core driver infrastructure used by both the display
- and the GEM parts of the driver.
- </para>
- <sect2>
- <title>Runtime Power Management</title>
-!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
-!Idrivers/gpu/drm/i915/intel_runtime_pm.c
-!Idrivers/gpu/drm/i915/intel_uncore.c
- </sect2>
- <sect2>
- <title>Interrupt Handling</title>
-!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
- </sect2>
- <sect2>
- <title>Intel GVT-g Guest Support(vGPU)</title>
-!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
-!Idrivers/gpu/drm/i915/i915_vgpu.c
- </sect2>
- </sect1>
- <sect1>
- <title>Display Hardware Handling</title>
- <para>
- This section covers everything related to the display hardware including
- the mode setting infrastructure, plane, sprite and cursor handling and
- display, output probing and related topics.
- </para>
- <sect2>
- <title>Mode Setting Infrastructure</title>
- <para>
- The i915 driver is thus far the only DRM driver which doesn't use the
- common DRM helper code to implement mode setting sequences. Thus it
- has its own tailor-made infrastructure for executing a display
- configuration change.
- </para>
- </sect2>
- <sect2>
- <title>Frontbuffer Tracking</title>
-!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
-!Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
- </sect2>
- <sect2>
- <title>Display FIFO Underrun Reporting</title>
-!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
-!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
- </sect2>
- <sect2>
- <title>Plane Configuration</title>
- <para>
- This section covers plane configuration and composition with the
- primary plane, sprites, cursors and overlays. This includes the
- infrastructure to do atomic vsync'ed updates of all this state and
- also tightly coupled topics like watermark setup and computation,
- framebuffer compression and panel self refresh.
- </para>
- </sect2>
- <sect2>
- <title>Atomic Plane Helpers</title>
-!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
-!Idrivers/gpu/drm/i915/intel_atomic_plane.c
- </sect2>
- <sect2>
- <title>Output Probing</title>
- <para>
- This section covers output probing and related infrastructure like the
- hotplug interrupt storm detection and mitigation code. Note that the
- i915 driver still uses most of the common DRM helper code for output
- probing, so those sections fully apply.
- </para>
- </sect2>
- <sect2>
- <title>Hotplug</title>
-!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
-!Idrivers/gpu/drm/i915/intel_hotplug.c
- </sect2>
- <sect2>
- <title>High Definition Audio</title>
-!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
-!Idrivers/gpu/drm/i915/intel_audio.c
-!Iinclude/drm/i915_component.h
- </sect2>
- <sect2>
- <title>Panel Self Refresh PSR (PSR/SRD)</title>
-!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
-!Idrivers/gpu/drm/i915/intel_psr.c
- </sect2>
- <sect2>
- <title>Frame Buffer Compression (FBC)</title>
-!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
-!Idrivers/gpu/drm/i915/intel_fbc.c
- </sect2>
- <sect2>
- <title>Display Refresh Rate Switching (DRRS)</title>
-!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
-
- </sect2>
- <sect2>
- <title>DPIO</title>
-!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
- </sect2>
-
- <sect2>
- <title>CSR firmware support for DMC</title>
-!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
-!Idrivers/gpu/drm/i915/intel_csr.c
- </sect2>
- <sect2>
- <title>Video BIOS Table (VBT)</title>
-!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
-!Idrivers/gpu/drm/i915/intel_bios.c
-!Idrivers/gpu/drm/i915/intel_vbt_defs.h
- </sect2>
- </sect1>
-
- <sect1>
- <title>Memory Management and Command Submission</title>
- <para>
- This sections covers all things related to the GEM implementation in the
- i915 driver.
- </para>
- <sect2>
- <title>Batchbuffer Parsing</title>
-!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
-!Idrivers/gpu/drm/i915/i915_cmd_parser.c
- </sect2>
- <sect2>
- <title>Batchbuffer Pools</title>
-!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
-!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
- </sect2>
- <sect2>
- <title>Logical Rings, Logical Ring Contexts and Execlists</title>
-!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
-!Idrivers/gpu/drm/i915/intel_lrc.c
- </sect2>
- <sect2>
- <title>Global GTT views</title>
-!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
-!Idrivers/gpu/drm/i915/i915_gem_gtt.c
- </sect2>
- <sect2>
- <title>GTT Fences and Swizzling</title>
-!Idrivers/gpu/drm/i915/i915_gem_fence.c
- <sect3>
- <title>Global GTT Fence Handling</title>
-!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
- </sect3>
- <sect3>
- <title>Hardware Tiling and Swizzling Details</title>
-!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
- </sect3>
- </sect2>
- <sect2>
- <title>Object Tiling IOCTLs</title>
-!Idrivers/gpu/drm/i915/i915_gem_tiling.c
-!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
- </sect2>
- <sect2>
- <title>Buffer Object Eviction</title>
- <para>
- This section documents the interface functions for evicting buffer
- objects to make space available in the virtual gpu address spaces.
- Note that this is mostly orthogonal to shrinking buffer objects
- caches, which has the goal to make main memory (shared with the gpu
- through the unified memory architecture) available.
- </para>
-!Idrivers/gpu/drm/i915/i915_gem_evict.c
- </sect2>
- <sect2>
- <title>Buffer Object Memory Shrinking</title>
- <para>
- This section documents the interface function for shrinking memory
- usage of buffer object caches. Shrinking is used to make main memory
- available. Note that this is mostly orthogonal to evicting buffer
- objects, which has the goal to make space in gpu virtual address
- spaces.
- </para>
-!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
- </sect2>
- </sect1>
- <sect1>
- <title>GuC</title>
- <sect2>
- <title>GuC-specific firmware loader</title>
-!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
-!Idrivers/gpu/drm/i915/intel_guc_loader.c
- </sect2>
- <sect2>
- <title>GuC-based command submission</title>
-!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
-!Idrivers/gpu/drm/i915/i915_guc_submission.c
- </sect2>
- <sect2>
- <title>GuC Firmware Layout</title>
-!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
- </sect2>
- </sect1>
-
- <sect1>
- <title> Tracing </title>
- <para>
- This sections covers all things related to the tracepoints implemented in
- the i915 driver.
- </para>
- <sect2>
- <title> i915_ppgtt_create and i915_ppgtt_release </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
- </sect2>
- <sect2>
- <title> i915_context_create and i915_context_free </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
- </sect2>
- <sect2>
- <title> switch_mm </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
- </sect2>
- </sect1>
-
- </chapter>
-!Cdrivers/gpu/drm/i915/i915_irq.c
-</part>
-
-<part id="vga_switcheroo">
- <title>vga_switcheroo</title>
- <partintro>
-!Pdrivers/gpu/vga/vga_switcheroo.c Overview
- </partintro>
-
- <chapter id="modes_of_use">
- <title>Modes of Use</title>
- <sect1>
- <title>Manual switching and manual power control</title>
-!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
- </sect1>
- <sect1>
- <title>Driver power control</title>
-!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
- </sect1>
- </chapter>
-
- <chapter id="api">
- <title>API</title>
- <sect1>
- <title>Public functions</title>
-!Edrivers/gpu/vga/vga_switcheroo.c
- </sect1>
- <sect1>
- <title>Public structures</title>
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
- </sect1>
- <sect1>
- <title>Public constants</title>
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
- </sect1>
- <sect1>
- <title>Private structures</title>
-!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
-!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
- </sect1>
- </chapter>
-
- <chapter id="handlers">
- <title>Handlers</title>
- <sect1>
- <title>apple-gmux Handler</title>
-!Pdrivers/platform/x86/apple-gmux.c Overview
-!Pdrivers/platform/x86/apple-gmux.c Interrupt
- <sect2>
- <title>Graphics mux</title>
-!Pdrivers/platform/x86/apple-gmux.c Graphics mux
- </sect2>
- <sect2>
- <title>Power control</title>
-!Pdrivers/platform/x86/apple-gmux.c Power control
- </sect2>
- <sect2>
- <title>Backlight control</title>
-!Pdrivers/platform/x86/apple-gmux.c Backlight control
- </sect2>
- <sect2>
- <title>Public functions</title>
-!Iinclude/linux/apple-gmux.h
- </sect2>
- </sect1>
- </chapter>
-
-!Cdrivers/gpu/vga/vga_switcheroo.c
-!Cinclude/linux/vga_switcheroo.h
-!Cdrivers/platform/x86/apple-gmux.c
-</part>
-
-</book>
diff --git a/Documentation/DocBook/media/.gitignore b/Documentation/DocBook/media/.gitignore
deleted file mode 100644
index e461c585fde8..000000000000
--- a/Documentation/DocBook/media/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-!*.svg
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
deleted file mode 100644
index fdc138624800..000000000000
--- a/Documentation/DocBook/media/Makefile
+++ /dev/null
@@ -1,427 +0,0 @@
-###
-# Media build rules - Auto-generates media contents/indexes and *.h xml's
-#
-
-SHELL=/bin/bash
-
-MEDIA_OBJ_DIR=$(objtree)/Documentation/DocBook/
-MEDIA_SRC_DIR=$(srctree)/Documentation/DocBook/media
-
-MEDIA_TEMP = media-entities.tmpl \
- media-indices.tmpl \
- videodev2.h.xml \
- v4l2.xml \
- audio.h.xml \
- ca.h.xml \
- dmx.h.xml \
- frontend.h.xml \
- net.h.xml \
- video.h.xml \
-
-IMGFILES := $(patsubst %.b64,%, $(notdir $(shell ls $(MEDIA_SRC_DIR)/*.b64)))
-OBJIMGFILES := $(addprefix $(MEDIA_OBJ_DIR)/, $(IMGFILES))
-GENFILES := $(addprefix $(MEDIA_OBJ_DIR)/, $(MEDIA_TEMP))
-
-PHONY += cleanmediadocs
-
-cleanmediadocs:
- -@rm -f `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null
-
-$(obj)/media_api.xml: $(GENFILES) FORCE
-
-#$(MEDIA_OBJ_DIR)/media_api.html: $(MEDIA_OBJ_DIR)/media_api.xml
-#$(MEDIA_OBJ_DIR)/media_api.pdf: $(MEDIA_OBJ_DIR)/media_api.xml
-#$(MEDIA_OBJ_DIR)/media_api.ps: $(MEDIA_OBJ_DIR)/media_api.xml
-
-V4L_SGMLS = \
- $(shell ls $(MEDIA_SRC_DIR)/v4l/*.xml|perl -ne 'print "$$1 " if (m,.*/(.*)\n,)') \
- capture.c.xml \
- keytable.c.xml \
- v4l2grab.c.xml
-
-DVB_SGMLS = \
- $(shell ls $(MEDIA_SRC_DIR)/dvb/*.xml|perl -ne 'print "$$1 " if (m,.*/(.*)\n,)')
-
-MEDIA_SGMLS = $(addprefix ./,$(V4L_SGMLS)) $(addprefix ./,$(DVB_SGMLS)) $(addprefix ./,$(MEDIA_TEMP))
-
-FUNCS = \
- close \
- ioctl \
- mmap \
- munmap \
- open \
- poll \
- read \
- select \
- write \
-
-IOCTLS = \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/videodev2.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/audio.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/ca.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/dmx.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/frontend.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([A-Z][^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/net.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/dvb/video.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/media.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/linux/cec.h) \
- $(shell perl -ne 'print "$$1 " if /\#define\s+([^\s]+)\s+_IO/' $(srctree)/include/uapi/linux/v4l2-subdev.h) \
-
-DEFINES = \
- $(shell perl -ne 'print "$$1 " if /\#define\s+(DTV_[^\s]+)\s+/' $(srctree)/include/uapi/linux/dvb/frontend.h) \
-
-TYPES = \
- $(shell perl -ne 'print "$$1 " if /^typedef\s+.*\s+(\S+)\;/' $(srctree)/include/uapi/linux/videodev2.h) \
- $(shell perl -ne 'print "$$1 " if /^typedef\s+.*\s+(\S+)\;/' $(srctree)/include/uapi/linux/dvb/frontend.h)
-
-ENUMS = \
- $(shell perl -ne 'print "$$1 " if /^enum\s+([^\s]+)\s+/' \
- $(srctree)/include/uapi/linux/videodev2.h \
- $(srctree)/include/uapi/linux/dvb/audio.h \
- $(srctree)/include/uapi/linux/dvb/ca.h \
- $(srctree)/include/uapi/linux/dvb/dmx.h \
- $(srctree)/include/uapi/linux/dvb/frontend.h \
- $(srctree)/include/uapi/linux/dvb/net.h \
- $(srctree)/include/uapi/linux/dvb/video.h \
- $(srctree)/include/uapi/linux/media.h \
- $(srctree)/include/uapi/linux/v4l2-mediabus.h \
- $(srctree)/include/uapi/linux/v4l2-subdev.h)
-
-ENUM_DEFS = \
- $(shell perl -e 'open IN,"cat @ARGV| cpp -fpreprocessed |"; while (<IN>) { if ($$enum) {print "$$1\n" if (/\s*([A-Z]\S+)\b/); } $$enum = 0 if ($$enum && /^\}/); $$enum = 1 if(/^\s*enum\s/); }; close IN;' \
- $(srctree)/include/uapi/linux/dvb/dmx.h \
- $(srctree)/include/uapi/linux/dvb/frontend.h)
-
-STRUCTS = \
- $(shell perl -ne 'print "$$1 " if /^struct\s+([^\s]+)\s+/' $(srctree)/include/uapi/linux/videodev2.h) \
- $(shell perl -ne 'print "$$1 " if (/^struct\s+([^\s\{]+)\s*/)' $(srctree)/include/uapi/linux/dvb/audio.h) \
- $(shell perl -ne 'print "$$1 " if (/^struct\s+([^\s]+)\s+/)' $(srctree)/include/uapi/linux/dvb/ca.h) \
- $(shell perl -ne 'print "$$1 " if (/^struct\s+([^\s]+)\s+/)' $(srctree)/include/uapi/linux/dvb/dmx.h) \
- $(shell perl -ne 'print "$$1 " if (!/dtv\_cmds\_h/ && /^struct\s+([^\s]+)\s+/)' $(srctree)/include/uapi/linux/dvb/frontend.h) \
- $(shell perl -ne 'print "$$1 " if (/^struct\s+([^\s]+)\s+/ && !/_old/)' $(srctree)/include/uapi/linux/dvb/net.h) \
- $(shell perl -ne 'print "$$1 " if (/^struct\s+([^\s]+)\s+/)' $(srctree)/include/uapi/linux/dvb/video.h) \
- $(shell perl -ne 'print "$$1 " if /^struct\s+([^\s]+)\s+/' $(srctree)/include/uapi/linux/media.h) \
- $(shell perl -ne 'print "$$1 " if /^struct\s+([^\s]+)\s+/' $(srctree)/include/linux/cec.h) \
- $(shell perl -ne 'print "$$1 " if /^struct\s+([^\s]+)\s+/' $(srctree)/include/uapi/linux/v4l2-subdev.h) \
- $(shell perl -ne 'print "$$1 " if /^struct\s+([^\s]+)\s+/' $(srctree)/include/uapi/linux/v4l2-mediabus.h)
-
-ERRORS = \
- E2BIG \
- EACCES \
- EAGAIN \
- EBADF \
- EBADFD \
- EBADR \
- EBADRQC \
- EBUSY \
- ECHILD \
- ECONNRESET \
- EDEADLK \
- EDOM \
- EEXIST \
- EFAULT \
- EFBIG \
- EILSEQ \
- EINIT \
- EINPROGRESS \
- EINTR \
- EINVAL \
- EIO \
- EMFILE \
- ENFILE \
- ENOBUFS \
- ENODATA \
- ENODEV \
- ENOENT \
- ENOIOCTLCMD \
- ENOMEM \
- ENOSPC \
- ENOSR \
- ENOSYS \
- ENOTSUP \
- ENOTSUPP \
- ENOTTY \
- ENXIO \
- EOPNOTSUPP \
- EOVERFLOW \
- EPERM \
- EPIPE \
- EPROTO \
- ERANGE \
- EREMOTE \
- EREMOTEIO \
- ERESTART \
- ERESTARTSYS \
- ESHUTDOWN \
- ESPIPE \
- ETIME \
- ETIMEDOUT \
- EUSERS \
- EWOULDBLOCK \
- EXDEV \
-
-ESCAPE = \
- -e "s/&/\\&amp;/g" \
- -e "s/</\\&lt;/g" \
- -e "s/>/\\&gt;/g"
-
-FILENAME = \
- -e s,"^[^\/]*/",, \
- -e s/"\\.xml"// \
- -e s/"\\.tmpl"// \
- -e s/\\\./-/g \
- -e s/"^func-"// \
- -e s/"^pixfmt-"// \
- -e s/"^vidioc-"//
-
-# Generate references to these structs in videodev2.h.xml.
-DOCUMENTED = \
- -e "s/\(enum *\)v4l2_mpeg_cx2341x_video_\([a-z]*_spatial_filter_type\)/\1<link linkend=\"\2\">v4l2_mpeg_cx2341x_video_\2<\/link>/g" \
- -e "s/\(\(enum\|struct\) *\)\(v4l2_[a-zA-Z0-9_]*\)/\1<link linkend=\"\3\">\3<\/link>/g" \
- -e "s/\(V4L2_PIX_FMT_[A-Z0-9_]\+\)\(\s\+v4l2_fourcc\)/<link linkend=\"\1\">\1<\/link>\2/g" \
- -e ":a;s/\(linkend=\".*\)_\(.*\">\)/\1-\2/;ta" \
- -e "s/v4l2\-mpeg\-vbi\-ITV0/v4l2-mpeg-vbi-itv0-1/g"
-
-DVB_DOCUMENTED = \
- -e "s,\(struct\s\+\)\([a-z0-9_]\+\)\(\s\+{\),\1\<link linkend=\"\2\">\2\<\/link\>\3,g" \
- -e "s,\(}\s\+\)\([a-z0-9_]\+_t\+\),\1\<link linkend=\"\2\">\2\<\/link\>,g" \
- -e "s,\(define\s\+\)\(DTV_[A-Z0-9_]\+\)\(\s\+[0-9]\+\),\1\<link linkend=\"\2\">\2\<\/link\>\3,g" \
- -e "s,<link\s\+linkend=\".*\">\(DTV_IOCTL_MAX_MSGS\|dtv_cmds_h\|__.*_old\)<\/link>,\1,g" \
- -e ":a;s/\(linkend=\".*\)_\(.*\">\)/\1-\2/;ta" \
- -e "s,\(audio-mixer\|audio-karaoke\|audio-status\|ca-slot-info\|ca-descr-info\|ca-caps\|ca-msg\|ca-descr\|ca-pid\|dmx-filter\|dmx-caps\|video-system\|video-highlight\|video-spu\|video-spu-palette\|video-navi-pack\)-t,\1,g" \
- -e "s,DTV-ISDBT-LAYER[A-C],DTV-ISDBT-LAYER,g" \
- -e "s,\(define\s\+\)\([A-Z0-9_]\+\)\(\s\+_IO\),\1\<link linkend=\"\2\">\2\<\/link\>\3,g" \
- -e "s,\(define\s\+\)\(DTV_[A-Z0-9_]\+\)\(\s\+\),\1\<link linkend=\"\2\">\2\<\/link\>\3,g" \
- -e "s,<link\s\+linkend=\".*\">\(__.*_OLD\)<\/link>,\1,g" \
- -e "s/\(linkend\=\"\)FE_SET_PROPERTY/\1FE_GET_PROPERTY/g" \
- -e "s,<link\s\+linkend=\".*\">\(DTV_ISDBS_TS_ID_LEGACY\|DTV_MAX_COMMAND\|DTV_IOCTL_MAX_MSGS\)<\/link>,\1,g" \
-
-#
-# Media targets and dependencies
-#
-
-install_media_images = \
- $(Q)if [ "x$(findstring media_api.xml,$(DOCBOOKS))" != "x" ]; then \
- mkdir -p $(MEDIA_OBJ_DIR)/media_api; \
- cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/*.svg $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api; \
- fi
-
-$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
- $(Q)base64 -d $< >$@
-
-$(MEDIA_OBJ_DIR)/v4l2.xml: $(OBJIMGFILES)
- @$($(quiet)gen_xml)
- @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/v4l/*xml $(MEDIA_OBJ_DIR)/)
- @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/dvb/*xml $(MEDIA_OBJ_DIR)/)
-
-$(MEDIA_OBJ_DIR)/videodev2.h.xml: $(srctree)/include/uapi/linux/videodev2.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DOCUMENTED) | \
- sed 's/i\.e\./&ie;/') >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/audio.h.xml: $(srctree)/include/uapi/linux/dvb/audio.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/') >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/ca.h.xml: $(srctree)/include/uapi/linux/dvb/ca.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/') >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/dmx.h.xml: $(srctree)/include/uapi/linux/dvb/dmx.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- for ident in $(ENUM_DEFS) ; do \
- entity=`echo $$ident | tr _ -` ; \
- r="$$r s/([^\w\-])$$ident([^\w\-])/\1\&$$entity\;\2/g;";\
- done; \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/' | \
- perl -ne "$$r print $$_;") >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/frontend.h.xml: $(srctree)/include/uapi/linux/dvb/frontend.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- for ident in $(ENUM_DEFS) ; do \
- entity=`echo $$ident | tr _ -` ; \
- r="$$r s/([^\w\-])$$ident([^\w\-])/\1\&$$entity\;\2/g;";\
- done; \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/' | \
- perl -ne "$$r print $$_;") >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/net.h.xml: $(srctree)/include/uapi/linux/dvb/net.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/') >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/video.h.xml: $(srctree)/include/uapi/linux/dvb/video.h $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<programlisting>") > $@
- @( \
- expand --tabs=8 < $< | \
- sed $(ESCAPE) $(DVB_DOCUMENTED) | \
- sed 's/i\.e\./&ie;/') >> $@
- @( \
- echo "</programlisting>") >> $@
-
-$(MEDIA_OBJ_DIR)/media-entities.tmpl: $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<!-- Generated file! Do not edit. -->") >$@
- @( \
- echo -e "\n<!-- Functions -->") >>$@
- @( \
- for ident in $(FUNCS) ; do \
- entity=`echo $$ident | tr _ -` ; \
- echo "<!ENTITY func-$$entity \"<link" \
- "linkend='func-$$entity'><function>$$ident()</function></link>\">" \
- >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Ioctls -->") >>$@
- @( \
- for ident in `echo $(IOCTLS) | sed -e "s,VIDIOC_RESERVED,,"`; do\
- entity=`echo $$ident | tr _ -` ; \
- id=`grep -e "<refname>$$ident" -e "<section id=\"$$ident\"" $$(find $(MEDIA_SRC_DIR) -name *.xml -type f)| sed -r s,"^.*/(.*).xml.*","\1",` ; \
- if [ "$$id" != "" ]; then echo "<!ENTITY $$entity \"<link" \
- "linkend='$$id'><constant>$$ident</constant></link>\">" \
- >>$@ ; else \
- echo "Warning: undocumented ioctl: $$ident. Please document it at the media DocBook!" >&2; \
- fi; \
- done)
- @( \
- echo -e "\n<!-- Defines -->") >>$@
- @( \
- for ident in $(DEFINES) ; do \
- entity=`echo $$ident | tr _ -` ; \
- echo "<!ENTITY $$entity \"<link" \
- "linkend='$$entity'><constant>$$ident</constant></link>\">" \
- >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Types -->") >>$@
- @( \
- for ident in $(TYPES) ; do \
- entity=`echo $$ident | tr _ -` ; \
- echo "<!ENTITY $$entity \"<link" \
- "linkend='$$entity'>$$ident</link>\">" >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Enums -->") >>$@
- @( \
- for ident in $(ENUMS) ; do \
- entity=`echo $$ident | sed -e "s/v4l2_mpeg_cx2341x_video_\([a-z]*_spatial_filter_type\)/\1/" | tr _ -` ; \
- echo "<!ENTITY $$entity \"enum&nbsp;<link" \
- "linkend='$$entity'>$$ident</link>\">" >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Enum definitions -->") >>$@
- @( \
- for ident in $(ENUM_DEFS) ; do \
- entity=`echo $$ident | tr _ -` ; \
- echo "<!ENTITY $$entity \"<link" \
- "linkend='$$entity'><constant>$$ident</constant></link>\">" \
- >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Structures -->") >>$@
- @( \
- for ident in $(STRUCTS) ; do \
- entity=`echo $$ident | tr _ - | sed s/v4l2-mpeg-vbi-ITV0/v4l2-mpeg-vbi-itv0-1/g` ; \
- echo "<!ENTITY $$entity \"struct&nbsp;<link" \
- "linkend='$$entity'>$$ident</link>\">" >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Error Codes -->") >>$@
- @( \
- for ident in $(ERRORS) ; do \
- echo "<!ENTITY $$ident \"<errorcode>$$ident</errorcode>" \
- "error code\">" >>$@ ; \
- done)
- @( \
- echo -e "\n<!-- Subsections -->") >>$@
- @( \
- for file in $(MEDIA_SGMLS) ; do \
- entity=`echo "$$file" | sed $(FILENAME) -e s/"^([^-]*)"/sub\1/` ; \
- if ! echo "$$file" | \
- grep -q -E -e '^(func|vidioc|pixfmt)-' ; then \
- echo "<!ENTITY sub-$$entity SYSTEM \"$$file\">" >>$@ ; \
- fi ; \
- done)
- @( \
- echo -e "\n<!-- Function Reference -->") >>$@
- @( \
- for file in $(MEDIA_SGMLS) ; do \
- if echo "$$file" | \
- grep -q -E -e '(func|vidioc|pixfmt)-' ; then \
- entity=`echo "$$file" |sed $(FILENAME)` ; \
- echo "<!ENTITY $$entity SYSTEM \"$$file\">" >>$@ ; \
- fi ; \
- done)
-
-# Jade can auto-generate a list-of-tables, which includes all structs,
-# but we only want data types, all types, and sorted please.
-$(MEDIA_OBJ_DIR)/media-indices.tmpl: $(MEDIA_OBJ_DIR)/v4l2.xml
- @$($(quiet)gen_xml)
- @( \
- echo "<!-- Generated file! Do not edit. -->") >$@
- @( \
- echo -e "\n<index><title>List of Types</title>") >>$@
- @( \
- for ident in $(TYPES) ; do \
- id=`echo $$ident | tr _ -` ; \
- echo "<indexentry><primaryie><link" \
- "linkend='$$id'>$$ident</link></primaryie></indexentry>" >>$@ ; \
- done)
- @( \
- for ident in $(ENUMS) ; do \
- id=`echo $$ident | sed -e "s/v4l2_mpeg_cx2341x_video_\([a-z]*_spatial_filter_type\)/\1/" | tr _ -`; \
- echo "<indexentry><primaryie>enum&nbsp;<link" \
- "linkend='$$id'>$$ident</link></primaryie></indexentry>" >>$@ ; \
- done)
- @( \
- for ident in $(STRUCTS) ; do \
- id=`echo $$ident | tr _ - | sed s/v4l2-mpeg-vbi-ITV0/v4l2-mpeg-vbi-itv0-1/g` ; \
- echo "<indexentry><primaryie>struct&nbsp;<link" \
- "linkend='$$id'>$$ident</link></primaryie></indexentry>" >>$@ ; \
- done)
- @( \
- echo "</index>") >>$@
-
diff --git a/Documentation/DocBook/media/bayer.png.b64 b/Documentation/DocBook/media/bayer.png.b64
deleted file mode 100644
index ccdf2bcda95c..000000000000
--- a/Documentation/DocBook/media/bayer.png.b64
+++ /dev/null
@@ -1,171 +0,0 @@
-iVBORw0KGgoAAAANSUhEUgAAAlgAAACqCAMAAABGfcHVAAAAAXNSR0IArs4c6QAAAwBQTFRFAAIA
-CAICAAQVEQEBAgsAJgECAAogAwsTAQopHQYBNAEAAAxNARQAERIQAhoDABwAABZEHRQKGRYKQw0F
-ACMBACUAERwpHR4cVRAFBR5rZhADACR2JiIhBDAGAiWGgQ4AcxQABDYACSeQMSYlJykmESxYlQ4A
-PSYZIS05OSsJHS5JOC8kAEMDUC8SADXLNDUzADbEAEsAADX/2RABCFIAAD/qxB0AAD//BFgAK0Vp
-WT4r3hwA3RsTRERAAEf/5CIA2iYCCUv+WUgz7iIAOk5g3CgVSU5SiD8uB2sABm8AE1X/U1RQOFyL
-4jkfIlz/RV98M1j+G2H/fVk23jtD4T0pXl9ieFtGcV894UIiYWJfAIwA50gOV2p+4kssO2j+dGZx
-bG1qVmj/OHH/aHJzfnBX5lQ7B50AZnahdXd0AKUG5V1ARnz/6mErCqgAAKsAent46GBIW4GhAK0A
-AK8B42FtALIOin9/ALUAiIOBALkAVIf/6WxWg4eBi4SKJrEAmoVtdY2geoP/rYVXhoyOqYVuJbUh
-IrgWX5D/jo6J7nszP7gAsI9S63xnN70zZqO/fZzCOb4+cZr+64dy8otYnJ6b7ImDRcM56IqcWMEo
-oJb/N8ZoTMRL7Y9/QchcsaOTo6eohaj/7ZqKXspXj6v9xal+oK+7d7vTUM+Afco5r7CumLTVStKV
-bs9ukbb/9qx/9q9l8queoLv/e9R66beG7rDImNRhi9aDwsPAs8bWzcK2cd67jtqP5MWUodyB8b+1
-tMr/z8L/j9+kbOXWnN2ZstD7yc7Rzs7Ly9xb183UwdD/+si/qeOmvuKIx9fj4tPCtuWiqOrL+tS2
-y9v++NPK2dvZt+m0ueq80+Wo3OeSwuy/yezG+d7f/eS/z/DS3uf/6Ono4PC71O39xPb02vPZ/+nR
-+Ori6e399+vt+PGz+ur65fL55/Xb4vbh7ffX/PPY8vP9+vLy6Pf36fjr/PfM8vjr//f+/vn48P36
-9vv+/vzf+fv4/fvu//z7+v7//P/7/v/8//QpxAAAAAFiS0dEAIgFHUgAAAAJcEhZcwAAFY8AABWW
-AQ2TT8cAAAAHdElNRQfaCRQXGSltwbPRAAAgAElEQVR42u2dDXwU1bXAZwEJtEaNH1nbh68fpoWK
-iE1ao2Bgo9RqIrEg+BIFmqLYLOlMcHHlU6DiQmrJM2jKo0QIBHgUjD5ETcQIlKq0gKDmA+UjiRAT
-BCOBkGzC5re/++6987Ezszszdzc7s9jfPa2wO+zMPefc/5575t67Z5hB/0Ek/W668xckcmVmQZ5S
-CvLmgshl4QCiZu+8ntCOgWlzVfrl5ZZFrl6T/VYSv9x5K3Pj9wnkh9fFFxQE6VcVqXY+8PjgH5K0
-+/0bBxDaYcsN0i+vLlTbzH9kjEknkEF3zptjLPPmXL2VwGC/nxysm+YRyc+/S2bHNYUgmtJkf5RI
-vScH3HEvifz05mhqB8G68d6xJO3ecSWhHXYfYdvM99LHGEv6mEF3zmFJ5Gr49e9qVUh7O/wP/w/9
-gf4EXnKwbpjNGQs779bvktlxzULg7TCQzvDAItBvzqMD7hjrMJaxPx0Cv3OdBvqFBRZJs46xCCwi
-O+xNwNfSclom6F2L4j1A/UsG1hgI1jyWUzLEKf/gX0CwevIzsvSlJoyh8IY5LmPhEFhEhsCI9b7L
-oy/uI2GBRaDfPATWaGO596dDADhioJ+7PKyI5SBoF4NFZAcEa6ZjvL7MOg9MAWtPxv4aHdlfM315
-TMHy7Gg4pifN5cUxBMsPisub9dRrqHc1xBCsC7vHH6jVlQOO3eGBhccc9B+rGIWkP/ALBNYEA3uX
-xxasooMGbVaWxhSs0kr9Njs8zbEE60C2UbOTTAOrR6/ZHjB/ZWzBet+gzR0xBmuHfpttsQbLIEP2
-ZpsGVrsBWMspWBQsGrEoWFEAK1UUDbBkQEkJu+Ko+WDxDRmApWmH+WCF0u/bCFYIMyIHK30CL1kZ
-Y1J17wo51snhW1/4d9BdoZlgcZx7mcezzM1yemBp22E2WBzL66fsExVYjmxBxsNed1gHVra8XX2w
-WBc2A/4dDbCSp4v/2PrGb1L1hkKnZ8sRNFH39cel6K1lQyFbvLcZXf3YrmWsNlg6dpgMFltc3dAN
-j3+zazWrCVbKBun8ltcfS3FYBpb0D721L+uCxXoqxO5VfEMiBmsa6BL/+UxWqhZYMFytPSVd5yMU
-qKxJ3jlub7f4D5f+xmqDpW2HuWCxr0r69b7N6oAV6JsTj6VYBpaciP9L0QaLVXQv13ewUqeBdjyS
-ZM0/Cf6uBRbkak03uLSraBHnWfsJAJ/LEi2TIxZs7bPyZS6XZwu0XEaWCiwdO0wFi3sXgC/K4QDi
-qfhEoV8QWNtT8FLK+L90gddHWwjWjNGw1dG/mgW7/jFNsFjYvd/sKnK73Kh7P4oSWHw3JOcDkJGq
-BVbxBfD5IidKqpzOV/3gb05rwGJfRXEAfYM41nMKfMXpgaVhh5lgsVsAeJvj9YOMXVrE6YAlvHwa
-XJSFLJPBOg8m8W2lpLwFQ5YjNFgc6t45OFCx0OVgNRu1iIVEByznu+ArIUixnPMfKGRZARaCSRpf
-ENx/4wwiVgg7TASLc52CA4f4BiobCFmaYDlSusBUC8GaGgC6VgssFnavS3QtC7uXiyJYMP09o5m8
-O2GfOsW8il1TudoisF4FX8hGvy3lc1yGYAXZYSZYa+RBitvy9hyXIVij744RWP+jDRb8ygaCFLdm
-x7KoJO/tyWj2Jz3/JPhjssY8lnNL91cvsNL8KOtk1fNY5iTv3D/AP2UJMaubvGvZYSJY8Jv+T04+
-8eAyBCsFdvBXVg6F2UK7k85oDoUs7N5FsiwjSsk7v5cKkqHsD3nEcm4BnznxHINTENaaCVJpcBGn
-zXQilpYdZoL1iThSB+kXBNbu8VOhzFhwAICXrUzeF2RPnTpp6qy/nAG9YzWSd5gpfqZhRl/AkpjY
-P0HrrtBZDQ468ZuKHVgqXdYk793Ag4zkllXyDZfq5FhadpgJVjMoxZ3g3sHrV84ZzmMB8LpjdCym
-G3r/oDXdwFaD97EZHG9FxQ53VHKsadOh5K8/q51jYbDwC/FSiywFixX7/Sirk2Np2GEmWA2gHOvn
-Efe3aCfvXiTA27J9lpVLOl7cLvyH2g2PaU6QSmCx4mXcXDTASkaSmpxxEvw1VXsofBLPt79/9AgU
-2DJr5VDIFh2rh9IM6vXA0rDDgqGQW4b1awAN+neFvzoDvpTPjlqVvD8Nw+ToFG2wxKGQO3gUmnEk
-GmAlS/M/Y5KXg5pkLbD45F3IsdhgsExO3vHS5JMV2mDp2GFJ8o71KzYCK+VhSJYjxXKw4A0DeF0P
-LDF5xxOVXLQiltg384PAktaanxSmG+D9AkrtEFhWLEKzr4Jv+FsUNOizO/QjloYd5k439C6SVIID
-doPRPNbTXeA96yPW6JS3AFCkWMrpBg/qXmmYcEcbLO2IxTrfBRdfcAqYOZ1WDYVozvGf0s2vkxAs
-6yIWGqs/l9ZsnWtBsxFYKHa8bOEitDiPBQfhc49prhWyqHuliWhX1HIsvI1JL8eCMJ0CF1ezeBxk
-iz+xLMdCSyYfzRZugbd0gCO6OVZoO0xd0lnTDT57QdiktqYDtBnOvMPYcc7CRWhpghSmWW9qgoVW
-EC6u5uMGh7s3KmBNQzJ9+UnQpTnzzjmLTwHwRUVxcemWBnjnusuqRWi0ctX5cXlR8dq9HQB8s1pv
-SUfDDlMjFkxPQO/H5auKy/e2of0XhmuFKQ93gTctHwpHO1ColA+GqkVovntXFQndG5WZd0m6fqe9
-bYZzej6RPvjZ6qAJUtP2vLNrpP0c53bNYXVm3rXsMHnbzFrJL727XtCbIA0srYA/pVg33SAu6dx9
-BpyQ3Teot80oujc6E6TtWBpr1mfobPRDUrzlSEfH10d3FcEbBws3+rnX7m3o6Pjm43K9jX46dpi8
-0Y9zFe891tHZ/HHFMo5zEawV/uo4+HKsVWCdli1C+2F2p7nRj+OK+O7dUeRio7vnPdVoazIr3/Ru
-4dZkce2bI9vznmr51mRh2wd72e95T9HdmhzKDEt+paP4MQX9+Rf9lU60wKI//6JgUbAoWJczWMRF
-QehQSMEyJWJNM7B3eYwj1re8KEhnjMGaZNSsaUVB0tcrZaPqbVaMyxiVVlcqRP22KLZljEqLlApV
-q97uiG0ZowOOVzboyitmlTECK6fly2V6fr7qfXtMwTpaVKyUUtX74uYYggVAs1o9lX5F1SCGYDWB
-l2bMVMos5dsZL4HwwTIQFwYrmmICWNEUM8CKnpgDFpmEAVZQM263+shsl1ZxWz/6H/oD/ukPC6x5
-s42L6s4mrEFqClgkRX8hWPeONRYzwBpN0i4Ci8iOkGB5Q7xjbP2CZGDwoX62K29Qy/U33RB8bEDS
-SLUkpfUlYjE3EMmVIewIJTZ7sH4FfQHrqhuuV8tNQUduuJrpTyQ228hg/UoiByuXsN3+A64OtiPE
-kauYEP0bslw4c9MD9xPIA9d/5wc/JJH+uWUlaunL6Di3P1GzPxhMaMfV920N0q8qcvVO27/34/80
-lh9/b8D9D5DIz+3B7ivZFzlYv73+AaKG7x9AaEd8YbB+IUdH5hdkddR/9H2iOuX3XrE1ujnW3O+Q
-tXsdqR3PRnko/GUGQXX5jNsYjki9B5JIWvWSg3UrmVtY5jYSO9J/SV7n/efzOJKsDYI1mkSugOGp
-7ai+HAsLrLEE2afj3uvI7JhzEwTrgJGEA9ZtRPXlbx/wJMlNCA/WfgNpB/4wwCJyy5PM7UQ56u0w
-x2o7YtC/bSaB1eZx6xcqd9XHFKyXpLpnGuLYQBwTog+WF7wmlo3TkIzp7SB2YJ027F63p80csOoX
-dXR3aksHKC2PKVjZG8BpPQEvzYgpWPkrhd1koaWnJqMmhmCdqXd3dOpJd4e73hywjngM7C2viClY
-M7YbtPnKrFiDpSutWY0xBcuoe4HHNLC6KVgmgtUYa7AM8ncfBYuCRcGiYH3rwRJ+UKYLVookVoLl
-0Gw3FFgh7TAZrNRkQVKNwVKXCLIIrNBuUYKlZUZfwOJYd3FpeemqZawOWI4VCwSZ6bAyYk0V2501
-VVnzIBgsDTvMBSt1+vL5WPKVtZNCgMW6iqB6pcs41lKwtNyiACt1gmjGNHWZ/IjBYj17+T0jX+9a
-xGqCNT5wlZbXrQMrJUBEb+0f5D9NDwJLyw5zwUreLx4/80Z6qg5YrGvLMeykznplPXiTwZLc8o3K
-LQqwkqX9cl5VdbGIwWLXXIDGNjc0dwBwSfFLRWXEAoB/NN3xLgBetw6sDeC00C5UT/5LXjVYmnaY
-DNYe0IoeydgIe75GBywO/SC0t62hARXpV5S7NhcsdouWW9RgdfFm+EGXskx+hGCxW/yoTjnHch6o
-wsUXdMDi053Rk94CQFFewmSwtgsp1oIz4M2xmmBp22E6WCtxapK+shv8MVUTLM8p0LurCFVRXauq
-B28qWKj2hcwti3TAqklORRlWvrpMfoRgeU6Cz4VfvqLyDB+x2mA5UCV62OV3v6V8xoHpYOHC+6ic
-9CUZ0CqwtO0wHaz1yWPSUfb7GngjWQss9l0UL4QSVKgevEVgofrtvFtw9Y1drA5YqenIjuT5UqGx
-voCFCnzPFltzvgo+l1XADwZL6Oy/SHUIrAFLfH0azNACS8cOK8DCr1aCPVpgscXdgSjFek71yoqH
-mAkWrt+u4ZbgiKWuYNcXsIrlNe9dntJlLpcxWG8pC+JYBdbDivroSrB07LAALFw9acwH2kMh7ODP
-ZflN6arZ1kQsPbeEAAvbsTIaQyG79pQ8HXEpCnyHzrFSUHGJP8Ugx4Ij8InHNHIsPTtMB2vjmIyM
-jKzpe5QdohwK/6GsB29R8q7rliCwxmRBM6at7z7zm2iABb7RLPCtBmsFlld2A/CplXeFtQtwuxvO
-AHmxFDVY2naYDpbU+2O0wTolPPmBcwbVgzcVLB23aEw3gK7fJfd9uoEtB8f4Osw7ULnc+vpjHlYL
-rIDjP1UW/jUZrIC8PFoTLB07LAML7E/XBMsnlBUv4tU7uoO1BKwK0S2VQrsezhAs0Pi71KiB5XaK
-v6srZnXnsbygd/tMVWFnk8FqOYAnsb58KVt75l3PDvNzrFS0E3nCym7FWKgEqxsUadSrNxUsyS1t
-wW4JcVcIBT2VrysKEWut/yIfossr0SMJOsEqVjfHelo9O2pRjvUW+FJZ9Fc9FGrbYdFdYWry/G4g
-G0XUQyFOojkPUq/iiKxIr7lDodotRazBXWFqctZJ8NfkKCTvwnQsXw65Qw8sNI/FFwxPsRYs9BzH
-46D3MZ2IpWOHVdMNY1JrwHwNsHTq1ZsJFgfd8oLCLYZgwZfrFfNxkc5jfSKfS2QNwBIKhv/J4oiF
-XkxCFTS1F6F17LAMrGRtsFhUDz6g7A6LwFK5hbMQLG4NWl/gxJKMHXo5Ft+vdx9XFQy3BCx+ENZe
-hNaxwyqwUtNPakcszyk0A87x6jmrZWXFzQULAh1wC8z0VhmClZr6RjTAQlN34O1l+HET7jUNQIa0
-BlgpDwNFOWmrJkhhqOzVWYTWtsOatcLk5DGvgTOy/Q2qJZ21F8AXq92ouoq7aK8ffMxatFao7ZZg
-sPj9WMv9QHFbGCFYnAs23ftxZcWOgx3oOezGM+9BT8+waOYdDoafai9Ca9thOlh7lq+Esr4GKJJe
-1SI03nzxBVSvGpW9/uwFa5Z0VG659LbOPFYjNmPlHgD+nhyV/VicVEi996NlrM5+LLG3YQ9flG+6
-Mxms3YFnGsufIBm0H0vLDqv2YwGwUXc/VvFe8XNflLo4y/ZjabpFcx5rf3qUdpCyruLqg0cOVpe7
-We2Nfo7aA9Ja4YLa2plWgbXi+EvSIvT22t1jdXaQathhMljra/BPlfe8sVK5jSloBynLeir2HqlH
-5eBZ6/ZjSW6pVLtFCVa+YMaejdNTo73nnTXY8x76tfl73h2ybfcke97Z2Ox5Tybd887FZs87S7bn
-PWjTu9m/0nE4ZC8dlu15d2i1e9n8SkeonfFt/5VOuoYd9Odf9OdffQKL/q6QgkXBomBRsPoKlo+C
-9e8MllGzZoFV7+4EPm3pBqWxBSt7A/DqyWVQxqirpwv+H/6BRfybF9AY4zJGHt3u9YFOs8BqVlfi
-KlIXXjsYU7BWOCYpC61NUr6f5NhArJ4ZYK1Pn6astKZ6mzWtNYZgnf7aYyjNPFizSeowQ7DGkgiq
-QdpWf0QhR5Vv64+CcMAiaheBRWZHqFKRu1UCog7WbQOcROWucanID5RSs3+PUlrDKhVJ5BYnQ2iH
-vQl8repetdTj/ZXMreyTBML+6EbHHSRyRYmv6fQZlYDI5ZnvELU7+joyO5w3PXO+6YJKuiNXr8l+
-+5hfGkv67cyjThI3329vamrqVYu61TCK2/6IzC2PwohFYAeMWB8Gd29IdZgBVwbJVVcFHxtgG0wk
-tiH2IBnZB7BKCNu9NpQdwYeuZOKD1IvP7QNYSf0GBsmg4EP9mBC6XB3iWLB69viIn3ngA8+GajeU
-MKR2BOtnD13nPbNuH4HUjcwl+ty+pMLgz9X1BayRZPpl9sGOPujXNKSs7kNjqSuzV5HoV1eYFOJo
-U+Rg5RK6pcreBztCTgIwhF/XtKVkn0siqfPeRe6bQsLHWuROJrRjIYimNNnJqKyznyf63NakaGrn
-Azk5ZJ/sIraDpM67VwCrcf1GXVnfDjtkLgANldX6gsAqA2C//vXWv0acJPBgvW/QbmUbADkQrI0b
-CewoAJ1GZlSHAxYcB+r1L1gJ7773oWfQbDd4HNsBASwD7SobwgLLf3yDgWzn7TDqXtGOAxsM7fBi
-sHqmTcifriP5WfkYrA6P+nlsKnFVYLBqMqZN15X0jWFFrGqXfrPFniIfAmtlhq4Zgh3PglJ3qbEd
-YYBVb6Sfqx53yAbHzBm64qiFYPlAhUtfvyJPWzhgeWdkz9JtdqZjA7TjX4bdy9txoXb8jBmGdmCw
-2rMMJtzemIDBanYbGFRZjMHak2VgbziP7oVgVRg98PSYuwOBZTRjzdvxLPAYPmG1OCywqosMPlRU
-jcFascDgc9m7MVhGj+7tcDeEA9bp8bUGH1uwAoH1tbuDxI4LB7KBsR08WBP2AP6Rb/5QAjtkGg+W
-0SNj0bOUk/hnQoe8EN9GTwRg6Q/sxzydAlh+YzuejfIzoavA+0ZgreLBemmBfgrgJQQrnGdCQ7DO
-Zx8wSIh4sNoMA+EqASyyZ0IjsPYbfNPDBSuKT7EnilgSWAR2ULAoWBQsChYFi4JFwaJgfVvBajcA
-azkFi4JFIxYFi4JFwaJgUbAoWBQsChZN3ilYNGJRsChYFCwKVphg5RCCVUAG1pCS6A6Fc0eSgNUB
-cu4jBKsgumDFE4IVTwZWmT3KYGWC00RgxROCFU8MViEZWLklZGDlVEU3YpXlkkWswgIysKAdUQUr
-s44IrLpMMrCqMkFUwVo4lzBiZf7raxKw6jK7ScECRGChaxGBBSWqYEEhAgsKCVjQDm80wUJCAhYS
-ErCQRA8sLERg4e5tI7Jjd1TBQjuiiMDygegOhfCCZGD1kEUsnzeqEQvZSwSWjxAsX5TB8hGC5SME
-yxdtsC77iOUnAwsKjViXU8QKC6xoRiwKFgWLRiwKFgWLgkXBomBRsChYFCwKFgWL3hVSsChYNGJR
-sChYFCwKFgWLgkXBomBhsGoIwTIoR1IpgmWg4PIwk/dygw80IMUgWOsJwTKsNhNlsIolsPRlkgBW
-pf7HOsIFy6jazIoVRN0r2LHbsNrMJBGsjNcaa3SkcT1fl6jBVd/coCNtFXz5nz0ZNcrrqa7emB8m
-WMVtDbrtHnR1oC9e/nxdMyQ7PJUG16soDku/ao+uWxqaPTwpK2Ycr9WV8TxYxeW6+jUfcTWEo97p
-8dv12z0+cwXfvUeI7Ng9vraWwA4IFliZkaWQaRMUbydk8KHAV+7WL+8t1G9vn66+nvJtFnEBfGGk
-W2RQVhwXSvNDoLN0RbSj0uUhsoNUDOvaCxGmdrxKpirfOma04M/VG+jnLveFpd8Kh7Kd7Gy1IgeI
-ulewo2WG6nrjs0PZwaCa4Y2tja2tjY3wL/g3fo3+j9/gF9LorpQO1Xt+jPaDdnxuo3AJ8bKyNkjr
-lIuxv81AhM81tirsaNWyo43wepHqp37fKeQyLcdb9OT4eSEHazO4XpjqAf1moVZ8uz4jt3TyZpw3
-uh62gyFSzQ8uf/H/m9jxbyIMdQEVChYVChYVChYVKhQsKhQsKhQsKlQoWFQoWFQoWCD0g0V8fvUL
-2SdDbKDwmqqu1xtQQd1SqCNBp/WYrKDkpR5/kEt9BKf5zFscUDTfE/zSq+llXwTdq4hYWwvmIlla
-8o786M6SwmeXbj6ruOjhrYVzl5YdEo41FSycK5z5odnfhJadJagZ6XG7hULLSBm0ZFNXoDgiSi86
-benmdtO/qYGGsGuqeJfOXbi0rJVfUtonOHnp5h6VlxeqvRxlrsokXTa3KjpzHWq6Sd408vKzopel
-M5eWHIpsKExjBIkfd1LEc93wBHxo2JRuiebD9wyxoWOJ4w7hz9QxktinmNp3LYtvxi3HD1si+EFs
-2JY4hf9yyHXhu9ILehcPF0/zmxey4IV7BXfFDVvSjRvKlanzIj5SKB0Y9g7Q8rIpYKUxoZremZYg
-eOuk6JqAlzerzhTACBesTGZkDpTJsKlbeANbHoH43Dc5J3M4w9wiXvP5BCYOHUuzMQO3ocel1jHx
-OVgyYf89ZOJ37vBQ6JP7YDsJjO0hXsN4JlNs+SkcIhi7qAsDj3Sh074cJZ3G3GVaz6H9C7Ahu+Cu
-u86iYwVMkqBOAtNvG3JNCX9kMjww6JD8tEzey2dN857QvTk5sH8HviN00xM2Ji5tMvZfIj7mlbyc
-hrzs589ME8Eg9Z8KrBL+xbqh/V7EcX0iE7fkEPrWt6yTrvmcjZnyYQ+Ol6OYQTU4YsVLEaV/3Aem
-9dyXI5jEzdjxdYttzO9xOLIzTfwQ9Hx/rEsVIz7bvOURW2KNeNqSJv60BHiaecPgKNjQId4PCcyD
-PFjis5EPj7Jh1kqYTOHACPgRv+RlIHj5Qb95YJWIugy1Pci/eo6JEzpz3XAhdEB3DdvcJHr5KcWZ
-6wbzYIQNlvjA9CeY3yKbN6GQJMh7gxkcGE8k2J4Sj50bxUzhwRLzu97/Mq/n4LWlqAnxjsOv7cw+
-4V+HM1sxWElisnnpZ7YXkRXPMbdIucEm4bToC24oUcpON/W3HVKABb66FkeoEiYNKojo2cQMA7yX
-A6dBL79jHliFUjPMNThlec8WJ4FyYiizJNjLiScVSCIwIgfLDxZiPHqHMjI+JyIdusAjzDi/6Enw
-pu2hs3Kw/HJPRrvjDg+OCzj93IjEDySwvH6o2HDbZgVYXYI9h69gtgXgHJG4zaxtWS3XMbJrTxy4
-TeUO/jvAg4XzdeYWrNJ1zIuB9GFi3IsWgNXL2M6jUW8UzFykf3+OeQpqj73slcYI6OUAWH6QRxo3
-QkescyP6ob54b8A1rYF/PXcIJiz+iz/jUwW+oUsfnvdaFrH+zPwk0AG9hw95gWwohN91/EWTR6x7
-bEuE0wLSiawwR95kftKtcpccrBPX9jskHwrBRD4rhNHjrPo08yPWoPNYpUEBiADsTOyuu4CkQm8n
-9LIsYn05ot8HEYGVu68KyrpRcLSDt3+/tj0l3HBCEW6Ot18x7JR0DM+6+GU5Vi/MsfaY1XG/Zv4X
-KNThc6wSrPPihMRtQJFj9T5iG/gBPu2/g08zI3VXNCT0TagcKw0rvPUe20N4UgJ62a/2sgU5Fmpz
-Q/9xFwKdqeVldGYeD8bQuCWE92YqsEQZ181rsjTohHXMfUGXhmAVYMk19a5wKBNiGiU+XtSZn26o
-YobMRark5eK7Qi867R1ggfjguLI56GgBM5JXJwfeSgt3haLGiYcwj6G8bBJYabwu8IZ0IJ4oeoZ5
-POj7EexlnwqMSMCyJyEZYkP5G5q+4BH3jeQlTYjkXlxCPAkfS6rDYEkTHQ+1muYaO1OHo03VEL7l
-PB6sJEFnfGsMwZJ0ieMjwRCmisdfUDgH+MzpuJH47gGAJqGhTHisIDB3lMinEBAsXuEEG8zZ/TIv
-JwW8bBJY4pyfeLeQJ8bTNEFlH/DFq7xcgM+UgxF5jtWybiiDponE75JPpAb75T4erCTh4D4MVi6U
-oTbhZtskuVn4LolzoJkibcJEBx6Jqhg7VCUnIT5xyVnxtHeEWMZLmllgpQkRS5wvTsJgjczLzc0c
-HD9MnPUXcyzv4XuYRMXXV+ZlsyIWdE1mf2bcZiHuSBFLRA7mqwlBXg4Moi3rbmYe8kcOFkzuEq75
-AA23S7AGvq1QqgqZIfD17sHj+ByrCh3cahfAQkc+HYxaNW+x60Zh9G/C6uSIYO0Tb/ZtP+mRcqxN
-trglqtSMPy3XLLACORbvLogUBiuXny5KFO9MA3eFJ0b0ezHotELIo6k5Vu9E5hYxY39ezLFwZ5bF
-I7DUXs5RpP0QjJo+gNUDhjNl6DZHfldYh8E68bNB2xTD077AXeGmBHT3ahpZf2YelC2eFirAQvdT
-TL/zgbvC51CGhXVR3hVuNQss3JDM9io5WLA3+21TgyVMCilvJveZCVYh8si5UWjePYBJICXGEes5
-6OUumZdz5Gm/X5gtjBSsLhif4fmXZPNYfvAhAgvNYz0kW65UgAWet9leNAsrPzjcP64m8G6pGqxe
-xtYjm26YaBO6Ep4mZu9ePygzD6xzV0jzWLChrXKwwLkRaNxTgCVOcp2TzbMhL5sLFpozGMrccoEP
-sqOYpwJN92CwdvZPVHhZAVaXlEhGOBQKcUk+lQzvB6/FlPfv91RgkeVaOVj+XvhlOGkWWTiIB67+
-hHoofNP23XYZWOew+/zq0xabB5Ziih+6Sw4WzhOE5F1U4NJE2+9BsJeHmD6Ptckm8iRfWgGH8awp
-dNddSi+LYHl5MPZHApYAZssjDB58YSOJwlrhzidsaAUCyNYKfXWLb7bZ6gJgecGJwba7zNs+EFjF
-atl5j42ZrJggPTwUeyswQWjMGXEAAAJqSURBVPpef366gV8rPCuddp9pYPGLkry7HrEx4+RgoTUo
-YbohU5zzs/FBLMjLfpPBgtFcXISeKK4Vnt+3OMEWh159qvTy44oJ0ntIJxxUYKUVoNu7nOEMjs5e
-YXcDOmJjmHHC1/F5G9rdkItWv6EKfsXM+3MMs80srsR1d3hnAxWMm9LKg5WTh3TOTGDUM++PMGgM
-9PrA4VH8adiKKa3m6Ye2UUjuwhMvAbBganNLK45YSdjJuXg/hh97+Z4QXjYTLDgY3iXQ/QQT6Mxx
-7wS8PFn08ln+fhI7OedmJo5wUjD0fixmmLRss244nhey2ccJW3jwfqwEfr/OyCU9wv21CNalEcwg
-8wZDaacQY59yiE/NmcBWITznt5Wxi2DBACJsQhH3Y/GnmSi968SGkLu8aD9WjrSM0h9veAjsx7Lz
-82z8Nq74wGlmgZXGzBW/AZsYKbkS92PF4xiBs4qWxUPkXvaFBCMcsEpy87Aod1ruLJlbwG/HlO0w
-hMcKln4oZDdNuXmBT+dONm8XKcqYdhbCljdLq2sFvMoFS/mOBHU5c6UAsi53ssiR+jTzBDaUt7Ss
-SbwJzSmTdH8+93GYX1TlCE4uUygDvZyn9nKUwSrJqZLePZO7tNsrtHUYdTDuTG9IL/tkYBAvORnu
-eff6Zb0qSo/OcADM3Pfu1VHWq3fAr2djlNlXudQXdCTYjV4L6uCodfEG97RwSL7nXa2zPwKwqFCJ
-mlCwqFCwqFCwqFCwqFChYFGhYFGhYFGhQsGiQsGiQsGiQoWCRYWCRYWCRYUKBYsKBYsKBYsKFQoW
-FQoWFQoWFSoULCqXq/w/gbudjI6bMwYAAAAASUVORK5CYII=
diff --git a/Documentation/DocBook/media/constraints.png.b64 b/Documentation/DocBook/media/constraints.png.b64
deleted file mode 100644
index 125b4a94962c..000000000000
--- a/Documentation/DocBook/media/constraints.png.b64
+++ /dev/null
@@ -1,59 +0,0 @@
-iVBORw0KGgoAAAANSUhEUgAAAlQAAAFYCAYAAACVsmLPAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A
-/wD/oL2nkwAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sLCBIAKVtZsMAAAAxxSURBVHja
-7d3ZbqvIAkDRLsv//8v0QytXvpYZap7Wko56OAnE2AXbBSbhOI7jHwAAkr1sAgAAQQUAIKgAAAQV
-AICgAgBAUAEACCoAAEEFACCoAAAQVAAAzb2jvyMEWw0AmFvh37xnhgoAQFABAPT1zvruwtNlAADV
-VLxsyQwVAICgAgAQVAAAggoAQFABACCoYEohuFkugKACsmLq178DIKiAyJgSVQCCCigQU6IKQFAB
-BWJKVAEIKqBgKIkqAEEFFAgkUQUgqIACYSSqAAQViKkwxjIAEFSwbUyJKgBBBWJq8GUCIKhgm5gS
-VQCCCsSUqAIQVMBYoSOqAAQVLOk41lwXAIIKhoqqJyFUYhkACCpYMqpiQqjEMgAQVLBUVKWEUIll
-ACCoYImoygmhEssAQFDBElHVexkACCoAAEEFACCoAAAQVAAAggoAQFABAAgqAAAEFQCAoAIAEFQA
-AIIKAABBBQAgqAAABBUAgKACAOA/b5sAGjsO2wBgMWaoAAAEFQCAoAIAEFQAADtzUXohIQQbAYDi
-Dh9kmYIZKgAAQQUAIKgAAAQVAICgAgAgmU/5VeSTGQDE8InxeZmhAgAQVAAAggoAQFABAAgqAAAE
-FQCAoAIAEFQAAHtyY0/o4O7efe4JCzAXM1QAAIIKAEBQAQAIKgAAQQUAgKACABBUAACCCgBAUAEA
-IKgAAAQVAICgAgAQVAAACCoAAEEFACCoAAAEFVBICGMsAwBBBVPHVE4QlVgGAIIKpo6ps/9utQwA
-BBUsEVMpQVRiGQAIKlgqpmKCqMQyABBUsGRMzbouAAQVNHMca64LAEEFy0WVmAIQVCCqxBSAoAL6
-hI+YAhBUIKrEFICgAvqEkJgCEFQgqo4+3wuAoILto0pMAQgqICOQxBSAoAIyQklMAQgqICOYxBSA
-oAIyokpMAQgqICOqxBTAvN42AYwTVQDMyQwVAICgAgAQVAAAggoAQFABAJDMp/y4FIJtwJx8ehJo
-yQwVAICgAgDoyyk/HnMKhdE5RQ30YoYKAEBQAQAIKgAAQQUAIKgAABBUAACCCgBAUAEACCoAAAQV
-AICgAgAQVAAAggoAAEEFACCoAAAEFQCAoAIAQFABAAgqAABBBQAgqAAAEFQAAIIKAEBQAQAIKgAA
-BBUAgKACABBUAACCCgAAQQUAIKgAAAQVAICgAgBAUAEACCoAAEEFACCoAAAQVAAAggoAQFABAAgq
-AACGCKoQPAs2JQAIquwCUAI2JQAIqowCOPtvbEoAEFQRBaAEbEoAEFQFCkAJ2JQAIKgKFIASsClh
-szEKrDGoXkNuiOPwwim4iezYoc9+39iDfQbVq+mGEFOiCjZ7E23swR6D6tV8Q4gpUQWb7PeNPdhn
-UL26bAgxJapgk/2+sQd7DKr3EDE1y96mUPT1fqgh6Ffosbsz9mDdQfXquiEY/rUKlBtLYgoqDJZB
-Dmjlg8qRWlSBMSSmYLOoKhtUjtCiCowdMQUbRtXLswUgpkBU5XkXf9CmPJZ9nQJrft6Gife9XmC/
-t0mHg9tr3FcJYgrmjilgn8Fa55SfI7WYAvtnYKNBW+8+VLGn/zY6wtd4qDY1iCngx+BtdNCre1G6
-W3gPt7MXUwAwW1CJKjEFCzB2wODtH1SiSkyB/TKw+KB9DfnARJWYAvtnYKLB+m7+AJ+UgL2WTQmT
-jz1jEJVf0ASD7jXck2/vY1PCQscwE+6wfkz1CaqrB6wAbEoQVcBkMdUvqH49cAVgU4KoAiaMqb5B
-9bkBFIBNCaIKmDSm+geVArApYaOxZ4zCuoPq5VkDqL//F1Ow9qASVACV9/9iCtYfVIIKoOL+X0zB
-HoNKUAFU2v+LKdhnUAkqgAZvqoG1B5WgAgAQVAAAggoAQFABAAgqAAAEFQCAoAIAEFQAAIIKAABB
-BQAgqAAABBUAgKACAEBQAQAIKgAAQQUAIKgAABBUAACCCgBAUAEACCoAAAQVAICgAgAY3NsmIEYI
-//3zONK/7u/v/nx+zdPl/1rO0++LWd6vZZ59Xe7jSfnZSq3z6jnJ2ValX09PHj9AD2aoiPJ34Lo6
-wJWKiJQD7N2BN/WAzbNtZTsCuzJDRZeD8XHkH3zPZo5CSJudeTKbdrX+lkE7QkzFbq8VHj/AGTNU
-dDkY1ziw1jjY7nAA/wzKqxnIu5gSPICggoTIuDroXh1YRz3ohuCUlcgESOOUH81iZdR1fJ9+zL1Q
-use1Y6nrvLsearR46rHNAQQVw6l14HtyOurJz5USVqs9LynXt8V+ShBAUMHHQfdzFuMsQGqHSW5M
-PQmrVtdsjRCkOwY5gKBiGne3Okg5WJaMqbuw2uX5+P6aX4H8/f922F4AgorlgyD3hp47z3ycPfZf
-p/FSb00BIKjg4kD8/cm4mFNjKfd/OpsJyb2GJ+V+UzEXSK9wAfuvqGr9s7ooHRiV2yYgDCe8xUOp
-gHny2GNjVdwAOzJDRbUYSfnep8srfdCOWV6tr225ztzt3PpxiTRgdGaoAAAEFQBAX075sbS7C6dH
-OJU0w8/ocQEIKjY2w0F71bAQTMBOnPIDABBUAAB9OeXHY36tCAD8ZoYKAEBQAQD05ZQfl3xSCwDu
-maECABBUAACCCgBAUAEACCqgiRDczwtAUAFZMfXr3wEQVEBkTIkqAEEFFIgpUQUgqIACMSWqAAQV
-UDCURBWAoAIKBJKoAhBUQIEwElUAggrEVBhjGQAIKtg2pkQVgKACMTX4MgEQVLBNTIkqAEEFYkpU
-AQgqYKzQEVUAggqWdBxrrgsAQQVDRdWTECqxDAAEFSwZVTEhVGIZAAgqWCqqUkKoxDIAEFSwRFTl
-hFCJZQAgqGCJqOq9DAAEFQCAoAIAEFQAAAgqAABBBQAwibdNAECqcPKLJo8fH1cNN7+U8up7jpOP
-v6as//PvPr+/xPpTlsEazFABUDSmnsRTie/pvX74ZIYKgKz4+J55+fu7EMLPWZmU2auY9YsjejBD
-BUDRmDk7pdZq/Vf/P2bZT7/2OI7/rU/ICSoAiHIVLS2uFyq5Dtc3kcspPwCairmQvHUghhBOT1U+
-eQx/fyfQBBUALBNrtcPmc/l/QYagAoDqYi9ib/2zPZ2l+hVw7Ms1VAAkKXXbgpIXkH9eIF7r8T15
-bEJLUAHA4wD6FQ5PPoVXc/0ll3/3db/+sCen/ABIio7PU3U5YfIdY0++78n6RzPqxfiUYYYKqh94
-rv/AzFGV8nelouLue3JC5e5XzTx57E777SUcsa+4zxeIo8HlOw/vOgBwLBlqA1drGDNUAACCCgBA
-UAEATM2n/CpyQSIA7MEMFQCAoAIAEFQAAIIKAGBnLkovxI3XAGBfZqgAAAQVAEBfTvlBbXf3I3O6
-GGB6ZqgAAAQVAICgAgAQVAAAggoAAEEFACCoAAAEFQCAoAIAQFABAAgqAABBBQAgqAAAEFQAAIIK
-AEBQAQAIKiBFCGMsAwBBBVPHVE4QlVgGAM29bQIoGFOf/30c7ZcBrV/zd6/Rq6/7fs1/fs3T5Z+9
-AckZO2dvaL6XeffGJ/XxpPxspdZ59ZzkbKve278BM1RQOqaeDvbSy4CW/g5WV6/RUhHRcuwYc2W2
-VY3tP/hzY4YKar5bfLIDeLIMM1WsOnaOI/9AeTZzETt2YmbTrtbfMmhH2PfFbq/Syxxk/2iGCmrF
-1Kzrgplez78OpjUOsDu8qfkMyqsZyLvwSdleNZYpqGASLQe3GSpGHgNXB92r1+6or+sQvInptV+a
-eF/nlB/kDv7aO14xxUpahErqOr7Hc+yF9y3Hbul13l27NPJ+aJBTgYIKRo4qMcXK46b2wTVlHb9m
-3VpcXD/i85Kyb4v9lGCvZQoq2CiqxBQzvfY/ZzHOAqR2mOTG1JOwanXN1ghBunucR3INFYw4qMUU
-K/sLsO9rlXKuXSoZU99jcfXxmPpp5LP7f5W+B9Ukz4GggtGiSkxBn5ja/UL0v3D5/nO1jyq1zWos
-szGn/KDGTinnoliY9TV/FzZnr++U+z+dfcIw93qblPtNxVwUvcIF7N/7uZJRlbLMQS5KN0MFtQ4w
-YgrWGberjs+Y21vExmqN/eDAz0M4jsifrtZ5alh5ZyWmAMbaJxfe75qhgl7veMUUwDIEFfSMKjEF
-sAQXpUOrqJrk5nSwpLvT7yOMxxl+Ro9LUMFQUSWmoP348zN6XIIK7FgAWDWo/DZuAAAXpQMACCoA
-gM7iT/m5BgQA4P+YoQIAEFQAAIIKAEBQAQAIKgAABBUAgKACABBUAAB7+hfHbDX87cMFJQAAAABJ
-RU5ErkJggg==
diff --git a/Documentation/DocBook/media/crop.gif.b64 b/Documentation/DocBook/media/crop.gif.b64
deleted file mode 100644
index 11d936ae72e8..000000000000
--- a/Documentation/DocBook/media/crop.gif.b64
+++ /dev/null
@@ -1,105 +0,0 @@
-R0lGODlhuQJGAeMAAAAAAH9/fwCvAP8AANEA0dEAAK8Ar////wCOAAAA0QAA////////////////
-/////ywAAAAAuQJGAQAE/vDISau9OOvNu/9gKI5kaZ5oqq5s675wLM90bd94ru987//AoHBILBqP
-yKRyyWw6n9CodEqtWq/YrHbL7Xq/4LB4TC6bz+i0es1uu9/wuHxOr9vv+Lx+z+/7/4CBgoOEhYaH
-iImKi4yNjo+QkZKTlJWWl5iZmpucnZ6foKGio6SlpqeoqaqrrK2ur7CxsrO0tba3uLm6gQC9vr/A
-wcLDxMXGx8jJysvMzc7P0NHS09TV1tfYxbth2d3e3+DRAePk5ebn6Onl4ezt7u3q8fLqANtg7/j5
-+s/z/f4B+wIKHAjsn8F09ex5IciwobuDEM1Bi0ixosWLGDNqrJhQIZdk/htDihxJsiTJiSZTqlzJ
-MmNHj1q+tRznsKbNmzhzDoz3EiYWmTN7+vQJgOfQmN5mAjzKtCg9pj+TBoU61ClCqlaAthSKVZdV
-dFy7NtHKMqxYW1/PmT2bhOzKtWxlpZUYF4pblXDrvpq7Tq+Tu+UGCB5MuLDhw4gTK17MuLHjx5Aj
-S55MubLly5gza95MmVxev0EAkxsg8jNoVXNJ0zy9RPQ41RtNsz6V2vPstlLTwdYo+zap2qt9G3Ed
-YLdL4bGAL0VOhLhxjL2Zf1IeXboM56Wtt6KuPXRudM8vVu+eiTt5H9hDjj9vyfyIXrTW80gfO4OC
-+/jz69/Pv7///wAG/ijggAQWaOCBCCao4IIMNujggRe4J4IwBxBg4YUYZqjhhhx26OGHIIYo4ogk
-loihMBbi1k084VlklgLsWQKjBRJqgIwEBJRyY4UqZsNidhjMGOMkQlLgnjERwkdBjuVpk2QFTB5B
-H2/2DUlJkRNYhWQKUTKyJQpdFjHlcUFaSaQxo9nGQph/fCkDm0OMCV2VZh7iZpbnwCYfBnDKcecO
-fXq3ojotckRnnXr8SQGWEtQIphuKEhEoEHKKdygHCUiQ6QEJdDrEphWA2oGo3UXaAaMHOHrCpFmY
-2gSr6H2XJ5AXoHqBp5xyuimpPfCa6we+6uWqCaiqagKsTAxrBbLz/slqTqEUvWgBqLviSqqvnXpq
-rbbZTpDtt9ziSsG3unKraabkltutWMq+UOyswa3A7A/tfjGvDpW6eKm3v+a667i38vvvuQLzW7Cm
-AJ878L/W9ouuR/Xi8O6zasorRMRo3JtDvoaWOe2v4IIc7LUIE4zwtd1Sm7C6KZ8MLsmzYBzExIFV
-rILGJsgcB843cBztvgqHWnKwup5s8rroVivwwEc3DHLR/jKcis5K0JxmvDezQLUePNvgc0TSBix0
-1OuG6nS56nob7ssqp132wuIi7cnWU1j9ms1chkD3IF3X8DVEYe9AtNi37M2F3cXh/WgFhjPSNw1/
-HxS4CS97MPjH/ts5uQfieqbQuCWPzxC5QZPncPnYoXz+BueKY+Bm6J3AHsPo/5TOmup5sB5vxLJv
-0vsLtPtjO1W4D0Kz6r9nknwLwfczvFeam6IAmndjnfcsy2vtbM3qAT2KkhkULwj4SRITIbzLWYx9
-j9j82L3HvyljivzeG1tC9qCzf4379cEPigACCAYAB0jAAhrwgAhMoAIXyMAGOvCBEIygAVMVDBLo
-Ln1ZWx8SmjeP521CAEYiXypAGML1XHBPF8BfJVToue1drX+1GgUJZTHDFJywBSycRA5PwEF5eFAT
-NYRFEE9wwzXRYoc5c2H1YGgBW32QFkMk1vkoZr3FyQKJJeih/lH894kotsKLFpwi9zB4vSvqzxr8
-oxIXPQHGVbRRBEVUnxk3qMTEvS+GonjjBBCwxwMg4I+d0CMI4pjBOUqpjtACm/c4IUhASuCPfPQj
-I1lAyDLGAosk0OJT1hhIC0RSkpDsoyg9GUpAhtKPp6QAJD9pB0F+oJJWvOQZq5FGMuExFFHkYyR1
-OUpWqrKPvHykJIXZyzy40gOwXNURZ0mNWs6Jk5P0JChXKUxHXsCXwQTlKIe5h2OeSowvRKEFMOkI
-ck4IkbRqogyvaU1uZpOd1URlNXepSnriwZscSOaxlknHQekmnRVwIhAxgM09rtKXBrXnKalJzFTe
-AZ8b0Of9/vh5SH+CB6CLWicPEAoIiGpAoiQwp+OYOQ1nWgqaT0TBQTl6TUN4tH7oEyeUKDocdN5R
-nXnsAUv98FJO2i+kNBWTTZkYUI3SkJLgXKJMlxTU5gxVjbf8HxSRSqOY4rCpcXqqLXGKy6muAKQj
-EOkixPoBTV4FpQOdRU+jiicqkjGWsCCrB8wKlkWm9KhfTaod36pMDVbUR4TC6AQEmom1spGqjLOq
-Ef1aU4uiD6pclapaEWskxcpRlv0E7D9vWtScTjavVXXrUicgV0SUlgN0VYtd04pXFYBVBKc1RGxt
-pNVnsvWwn3WtXju3WEM2VrMX5WxGPdtaG+62dftkrFAd/utWyHa2q7k1bmjHOFocYfVitT3pbTsZ
-XRS8NgSzJUR4XZddfaG1sF7V7XTDeVXlOpW5Y3TucKFbXO8et4p99e1ygfvYrT5XsvUl4n35mlz9
-vpe/zfXvfAEcC8P+t63Uba+BswrfF8p3sEZtMGUhzN7eYvav7QuscDFMXA2DNrGilfCHfxvizRJ1
-wV1Mr3RRHGEPx5Wk0jCpebcbzQBLcb1KVfGNM9vi4L6YxPQ1sXpp3OHLDhnE+xPxkSVAWEw4uMcz
-rmyKbfyK8ZYPwfFVMJIZLMQNN8qyhVzxfovcX9tGNsbdFTCQ91pdHrmXwmC2sJipnOEyn1jLNXZy
-l3Es/g4pX5jPJfbzkgHd5DQ/mcVRdvGhD1DlS1z5rlnmcJC57Aov06i8HeMxphWd6TNvWdCdJjRK
-JL1nSvf5FZdGNJM3jepWeJpxoP7Zea0sY/vOmbe1ZsWtS5jnJU660paINXr/rGk6C3nQRI60kY/9
-alco+7sgGLYftN2oXCty15butZxn7WxO21rV/DB0q5FdCWXzmtmmDrSjoQ1lNKrbzQ/GrY9LgO0P
-cJsP/04tXcCdbHH/mNzAnneqo21vVuMbxvpWcqlThWZLPnrN0m6zdt8ccVL7GuHIneidsVthY6+7
-2l80M8VPrfBzM5yW9954vrm77zD+OuRAHbmgir1X/monGtYq/2lYr7tzNif44WOGc81H0G8P/HsP
-Afc24Fa77KXDccB1fjrX0O0MHYea4zSX+McZTeuWC5vrzfC6rkXNWrGPm+zlDvYqol7ynp/859YO
-esXhSm9IN3zad0+yx9/e7IRbvO8Y/7vGdwx2LA/+4HA3PN8XXm+YO1zmEA/74/mN9WdT3u+WBzzS
-ZU1moMN75fI+/OcTH/rFf33mjjf9oguP86HrnFJSlxzV3231EDS9A1rPA915nsipE7zdBuf8zfFb
-YDUf2OhhHr2r8Z7y0wsdtkTHfd2Lr/vjU8Ld4bb+3vPrfDxDX8/SZ/f3k29zkDNf5BMmOfEFS3ql
-/rsd8rR/f87jX/SMHx3zSddxsjdx1wde2UcvuUc6uxd+vTdInWduZ/dyzRRzjAd7ozaAY5d/BAZ/
-5Sd/52dy6YdyrHBtDyh3qjB8H2h3IUh9I6h3LKd6Lld5E3h5FZh5sZd34veCkxeDoDeDogeA9SeA
-ODh78dZoMBiBMlhSFPh6NniBQ0iA49d8F/d8/hd9QDh9goeBhFeEZXeEc4d2zKB238Z2VXd/yud+
-G7h/Hdh/ivd/NRiAmqeF+MeFcWd2XyiBSkiDTAiHN1h9RIh6RriDSNiDefiDbxiEcfiEGUiHkkd+
-U2h+VYh+V6h+kyBBlniJmJiJmriJluiCqSeI/neYhDm2hGvXeJzgCzEjQkxXgnZ4gmC4DGJofGS4
-CcAAC7XYfpFXe9h3ewi4ffSHhfGjiqvwC2eYi/pne/ynffM3YoiYOqhoi894dcuXhsi4hsqYgtyn
-gN5XHt1mi93oe6zoha6Ih6Ooh6VogTpSZ+3RG7/HAcGHBygYiSA4idCYisgUjqA4jqJYaOY4hqY4
-NepYCcI4cwWYbQcYK77IjMBYj3KxFu24Ae94B/HYhlZ4iAuZHAFJCQP5kBoQkXYwka3nhnvYjAyJ
-kWBXkP52kD0gcH2xjQBpj3CIkk6nks2SkFN2kWiRkZOgJByZAR5ZByDpg653jk1YCwM5jADQ/pN8
-QpM7wJIhR4l7oZOSoIoyCXxMiS8JWDsLaJRS+QgwQj5V6Y5XuTFZKTxbmZOvICRKEpYQOZY44JTv
-B5Xx0ZWKgCW+EIUc+IgeKI8qSI9YcZSiUCxp0YhSiHhUSJGSaJFyWTh0WQjv0guB6IiGCYmIOY+K
-GReA2QnHUxSEmZeTuZeV2ZeXWReZqQl2A5nHuIvJ2IvLeJOLuQ2leQmcA5lZ55Y9U5bOc5ZHEZtX
-cl+8mZK8iJCt6XO305h6cEK/KZbBuZK42UG6STzGeQdFlJw+aZte05w+9Jx/GZ10QEjUOU7W6TfY
-uUWzKCzcGQew9J2kFZ6QM56bVJ5+oZ6E/qBP6vmTdBCUhTiU/oiOtyGfgQBS1Gmfc4Cf5WiII4mT
-0uGffvBavymgckCg/GigRMmH1qGgm4OP5GWVy1mTwxl4Q2KheNB0memgO/OKyhCL3QefzAGiddCO
-R0micAChqyah+1mU58GicsCRwgijbyCj6daPsviPMYKjq4OhZdUTPOoGPtp1pFijFGomRMoGSvmN
-draawomNv/iaCXqeh2Ok51Sl1siaWKqQWlqhXJoFU4pr7Ck67nlWKgqlZ2oFaQolUZo/5Bih+hmk
-/IkoIfQHc8pUFKSXbBiSFXmgZcoedQoGf7qeWRKngrCkadekemqjfPokx+mlakilqQCp/mEoqSkq
-pJWaoSGKqdXYp5tqoiDhqdr4pqFqqi1KqqppqabAqbCoqlrpkq3aqK86jbW5AYlqWqiKDCi6qqCa
-q7Q1B4tqXR3wq4VAqydqq2aJq8bqqm6QrGCKWo4KlMF6DMN6q6w6rbIqpbBqgHqTrQ+6rdQDpJ+6
-p+C6rObqA9baNcy6behaDN0ard/arqKaBvGaRJzgrKmqrsTKrvo6V++aA/3KQwebBgArrNCam9Ja
-sPtKBgmLAvMqkfVKDPcKsfkqscdKseNqkCtwsfeZscOwsc4ZsR4bPgsbAxU7si0bBg3LrQ+bsh27
-sr4asy3wstojCTObrjQ6qU+Ks/7q/gU8yzw6uwU/a681m50qS7Q52wVH6wIkuwZLq7FNS57FCrVZ
-lLTFqIG92p4jdaczmqfrSqlcq7BoGrLAeZ2KcLUnm7XvubVpW7RVMLVsCqxk+6NBe7ZDW7cqULUu
-y7Yz2ZRe+wRwKwwo67Q3C7jlWjeEq6HNIl4mq7hy66Z067gWe7gwpYOSiZWPWrnBsLhaS7CaG7ic
-e5J4manMCQiJO7qXW1dPe7pfygR4O7l98LoFEbuqNbu0W7tJcLutC3Wiu7sC662Z+7swG7yRq5w1
-tXV7y6THi6/Jq7zLawTCi3vwWLy/QLpza7rWS7U6m73e8ZHce5e8O3CNG76bOwTk/otdJRu9kTq9
-HFu97Iu0M9O8bfkEgvsq54ua9Guz9nu/+Auv+tuRUtC/SqC73Zu+Lbm+BIy6PfC+h6Sk/8sXiWmo
-EQy/OkDBdMQGDIy+Acy4A7zBwHOeHowbahDCANy3A4u2JnybN5DCSqDAQcDCGGyZGhzDTlUDNNwa
-qQuB18iX2Yi84MvDbisDP5wsQSyOV0rEWYrEFQwDSzwWTVyYlEmoGTyhJCnFCOguB1yd3HDFpXqY
-WqzDXIygXly+nhiZWNwFNmwDOOy9mHvEa0yWOfiJn/sFcTwDc+zAT3nHQAyFnvvG90DGzkuIBWq2
-L/y3gqy9cwiIXZiPfIzIYryP/mUrkml8qI8snl/LiLpIrmrQx0IsplBMpp38F+NRxUhBBX88wqUL
-w6nMxq8Uxkv5BqS8x4MqlJrspF08y897j7zqeWuQy6ybxbxcqJsMzFEQm6xcyU7wyi5sxLLMzFkV
-UbacQnZgzG2ryHjay0L7y9b8wT61umXsJ6krzYxMzY48zlNMkOYcq9t8uOoMzn4rzu6swpnHlgi8
-B9x8y5jMt+tMvXaczz2MiPx8yf4cs/WszL6sxgatyjiZ0ADdB//MqAEtvdNM0NUc0eSMhmHbJu/a
-0Fv80Jzs0T8wPT1B0do8CNxM0mhs0ih9BTMCPiwNnoWQyzAtmjs801RQJPBx/tMzZSdcutNFzNHt
-7NNHwCgtPMm6zAvcadRRrNSQ+2lf0ZnHnNPGKdWoTNVSMDGoidXnTNQ0wNWu6dVfDU69INbyvAgX
-a9bEidZOgDioGdKOwKxw7aFybbvHFY2tmAiJmtcruNdNgJyl7NbVJdh+Sdh8Pcw4yiwGMAGRbQCU
-jQGRnQKXvQWPfcF0LLsQzNg+XIIgiiyVLQGUfdmZTQGpbQKr3cpPbcqhedT1W9CgjbDhqKBsktmT
-XdoHkNqtXdqnLdm7fdqVTdy7PcaGPMSxPdW1zbzD/GnHPNmm3duSXd0XIN3TTd3ajdoVwN1iIJ+K
-PZrNjQQS9Z1wIt3GPd28/m0B2L3d2e3dxJ3dY2DenA3IcTnezg3SUdvNwu3b1d3aqt3b8P3e6m3d
-AH7IIpvR87vRs93R+D3D48qbfbLaup3e7G3avD3g1G3c7W0GEl7fsPy9Dv7gof3c7prIolCa4d3T
-JO6+kQuYssPhgL0WK77MLU4ED7mWQ40KOg6oCt6pIV7HI37jg2vi50Q+SVoGxIjR3pzJDh3OEE3k
-tm3kR94RSa7k0VjjMi3l+Uvl5fqMV04GFaTlUH7SXL6FklyH/hrmZ+ALZH7PUX7mJa7fa2Iidn7n
-eJ7ner7nJgLiDC7AtC3neezG9wuXG2jmgr6KXh7Bhv5DiT4D1qqvja6d/o/+h0K9spPuu5UujXTO
-w5n+2ZsOjotOwJ9ewqGOi2ArxaUe6Keu6J0ew6s+5K3u6sZIjdYb60k962h+6R6L6/is64uY5myt
-vL4e58Ae7LwuscWO6Me+XclesMve7EqczUQb7dJOxdSOs9Z+7S4Q6e267dzexk5N6m3q2aYe7uVc
-yIVe7r0L6ugek/FM7OyuvudurAUgAfd+AAWw7z+Q7yfg79806utuk3F9uvyu7/qe7wCvAwtPAg3/
-UdmO6fP+wPVuJf5+7/uu8BXw8BmP8QrP7x0/AR0/8gl/8CKf8fhu8hpf8h4P8iHfuXpM7gAw8wBQ
-8zZ/8zif8zrf2e1e/vEWj/AIv/L4fgEXD/QXX/RFL/JAv/RLr/JDb/Qpr/QmD/ECz746f/VYj/U8
-T++sjigYz/Jfn/AYsPBC7/Rkj/JJ//Ri//Qr//FKz/JU/+omnPV0X/dbT/FdXyco//ZCbwEHH/Z/
-//drb/Z9H/htz/Ypr/Fp7+zx/rt1//hXf/eB7LhkP/Qk7/eCn/hwr/kjf/lBv/d7v/mKj/ahn+4x
-P/CQn/o5zNM2jtIPnwGvvwPeDq6qX/uSf99I3PkeEPtE7+JVH761r/q3f+g+zft+7/tyv8HBn/rD
-7+jvLurJz+jL//jNT+nPb/qEbvXTb/f2fegP8v3gH/7iP/7kX/7m/n/+6D/707r93K8bnPH+8B//
-8j//9F//9n//+E//oez47J/1SmHJEHDkpNVenPXm3X8wFEeyNM8RCFa2BVA4lme6tm8g13e+9/lW
-UDgkFgOvW1K5ZDadT6hSVURGrVdsdvnjdntGcHhY1ZbNZ3Ra3ZkSyWt4XF7z1rtivNi+5/f9f8BA
-wUHCQsNDxETFHaO3uUfISDa7vErLS8xMzU3OTr1Az1DRUdJS0yBHSdXVyL3TV9hY2dmjRdtb3NxB
-2iNW3985XeFh4mLjY+Rk5WUeYOdn6Gjpaepq62vsbO1t7m7vb/Bw8XHycvNz9HT1dfZ293f4ePl5
-+nr7e/x8/X3+G37/f4ABBQ4kWNDgQYQJFS5k2NDhQ4gRJdKLAAA7
diff --git a/Documentation/DocBook/media/dvb/.gitignore b/Documentation/DocBook/media/dvb/.gitignore
deleted file mode 100644
index d7ec32eafac9..000000000000
--- a/Documentation/DocBook/media/dvb/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-!*.xml
diff --git a/Documentation/DocBook/media/dvb/audio.xml b/Documentation/DocBook/media/dvb/audio.xml
deleted file mode 100644
index ea56743ddbe7..000000000000
--- a/Documentation/DocBook/media/dvb/audio.xml
+++ /dev/null
@@ -1,1314 +0,0 @@
-<title>DVB Audio Device</title>
-<para>The DVB audio device controls the MPEG2 audio decoder of the DVB hardware. It
-can be accessed through <constant>/dev/dvb/adapter?/audio?</constant>. Data types and and
-ioctl definitions can be accessed by including <constant>linux/dvb/audio.h</constant> in your
-application.
-</para>
-<para>Please note that some DVB cards don&#8217;t have their own MPEG decoder, which results in
-the omission of the audio and video device.
-</para>
-<para>
-These ioctls were also used by V4L2 to control MPEG decoders implemented in V4L2. The use
-of these ioctls for that purpose has been made obsolete and proper V4L2 ioctls or controls
-have been created to replace that functionality.</para>
-
-<section id="audio_data_types">
-<title>Audio Data Types</title>
-<para>This section describes the structures, data types and defines used when talking to the
-audio device.
-</para>
-
-<section id="audio-stream-source-t">
-<title>audio_stream_source_t</title>
-<para>The audio stream source is set through the AUDIO_SELECT_SOURCE call and can take
-the following values, depending on whether we are replaying from an internal (demux) or
-external (user write) source.
-</para>
-<programlisting>
-typedef enum {
- AUDIO_SOURCE_DEMUX,
- AUDIO_SOURCE_MEMORY
-} audio_stream_source_t;
-</programlisting>
-<para>AUDIO_SOURCE_DEMUX selects the demultiplexer (fed either by the frontend or the
-DVR device) as the source of the video stream. If AUDIO_SOURCE_MEMORY
-is selected the stream comes from the application through the <constant>write()</constant> system
-call.
-</para>
-
-</section>
-<section id="audio-play-state-t">
-<title>audio_play_state_t</title>
-<para>The following values can be returned by the AUDIO_GET_STATUS call representing the
-state of audio playback.
-</para>
-<programlisting>
-typedef enum {
- AUDIO_STOPPED,
- AUDIO_PLAYING,
- AUDIO_PAUSED
-} audio_play_state_t;
-</programlisting>
-
-</section>
-<section id="audio-channel-select-t">
-<title>audio_channel_select_t</title>
-<para>The audio channel selected via AUDIO_CHANNEL_SELECT is determined by the
-following values.
-</para>
-<programlisting>
-typedef enum {
- AUDIO_STEREO,
- AUDIO_MONO_LEFT,
- AUDIO_MONO_RIGHT,
- AUDIO_MONO,
- AUDIO_STEREO_SWAPPED
-} audio_channel_select_t;
-</programlisting>
-
-</section>
-<section id="audio-status">
-<title>struct audio_status</title>
-<para>The AUDIO_GET_STATUS call returns the following structure informing about various
-states of the playback operation.
-</para>
-<programlisting>
-typedef struct audio_status {
- boolean AV_sync_state;
- boolean mute_state;
- audio_play_state_t play_state;
- audio_stream_source_t stream_source;
- audio_channel_select_t channel_select;
- boolean bypass_mode;
- audio_mixer_t mixer_state;
-} audio_status_t;
-</programlisting>
-
-</section>
-<section id="audio-mixer">
-<title>struct audio_mixer</title>
-<para>The following structure is used by the AUDIO_SET_MIXER call to set the audio
-volume.
-</para>
-<programlisting>
-typedef struct audio_mixer {
- unsigned int volume_left;
- unsigned int volume_right;
-} audio_mixer_t;
-</programlisting>
-
-</section>
-<section id="audio_encodings">
-<title>audio encodings</title>
-<para>A call to AUDIO_GET_CAPABILITIES returns an unsigned integer with the following
-bits set according to the hardwares capabilities.
-</para>
-<programlisting>
- #define AUDIO_CAP_DTS 1
- #define AUDIO_CAP_LPCM 2
- #define AUDIO_CAP_MP1 4
- #define AUDIO_CAP_MP2 8
- #define AUDIO_CAP_MP3 16
- #define AUDIO_CAP_AAC 32
- #define AUDIO_CAP_OGG 64
- #define AUDIO_CAP_SDDS 128
- #define AUDIO_CAP_AC3 256
-</programlisting>
-
-</section>
-<section id="audio-karaoke">
-<title>struct audio_karaoke</title>
-<para>The ioctl AUDIO_SET_KARAOKE uses the following format:
-</para>
-<programlisting>
-typedef
-struct audio_karaoke {
- int vocal1;
- int vocal2;
- int melody;
-} audio_karaoke_t;
-</programlisting>
-<para>If Vocal1 or Vocal2 are non-zero, they get mixed into left and right t at 70% each. If both,
-Vocal1 and Vocal2 are non-zero, Vocal1 gets mixed into the left channel and Vocal2 into the
-right channel at 100% each. Ff Melody is non-zero, the melody channel gets mixed into left
-and right.
-</para>
-
-</section>
-<section id="audio-attributes-t">
-<title>audio attributes</title>
-<para>The following attributes can be set by a call to AUDIO_SET_ATTRIBUTES:
-</para>
-<programlisting>
- typedef uint16_t audio_attributes_t;
- /&#x22C6; bits: descr. &#x22C6;/
- /&#x22C6; 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, &#x22C6;/
- /&#x22C6; 12 multichannel extension &#x22C6;/
- /&#x22C6; 11-10 audio type (0=not spec, 1=language included) &#x22C6;/
- /&#x22C6; 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) &#x22C6;/
- /&#x22C6; 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, &#x22C6;/
- /&#x22C6; 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) &#x22C6;/
- /&#x22C6; 2- 0 number of audio channels (n+1 channels) &#x22C6;/
-</programlisting>
- </section></section>
-<section id="audio_function_calls">
-<title>Audio Function Calls</title>
-
-
-<section id="audio_fopen">
-<title>open()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call opens a named audio device (e.g. /dev/dvb/adapter0/audio0)
- for subsequent use. When an open() call has succeeded, the device will be ready
- for use. The significance of blocking or non-blocking mode is described in the
- documentation for functions where there is a difference. It does not affect the
- semantics of the open() call itself. A device opened in blocking mode can later
- be put into non-blocking mode (and vice versa) using the F_SETFL command
- of the fcntl system call. This is a standard system call, documented in the Linux
- manual page for fcntl. Only one user can open the Audio Device in O_RDWR
- mode. All other attempts to open the device in this mode will fail, and an error
- code will be returned. If the Audio Device is opened in O_RDONLY mode, the
- only ioctl call that can be used is AUDIO_GET_STATUS. All other call will
- return with an error code.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int open(const char &#x22C6;deviceName, int flags);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>const char
- *deviceName</para>
-</entry><entry
- align="char">
-<para>Name of specific audio device.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int flags</para>
-</entry><entry
- align="char">
-<para>A bit-wise OR of the following flags:</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDONLY read-only access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDWR read/write access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_NONBLOCK open in non-blocking mode</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>(blocking mode is the default)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>ENODEV</para>
-</entry><entry
- align="char">
-<para>Device driver not loaded/available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>Device or resource busy.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid argument.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="audio_fclose">
-<title>close()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call closes a previously opened audio device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int close(int fd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="audio_fwrite">
-<title>write()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call can only be used if AUDIO_SOURCE_MEMORY is selected
- in the ioctl call AUDIO_SELECT_SOURCE. The data provided shall be in
- PES format. If O_NONBLOCK is not specified the function will block until
- buffer space is available. The amount of data to be transferred is implied by
- count.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>size_t write(int fd, const void &#x22C6;buf, size_t count);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>void *buf</para>
-</entry><entry
- align="char">
-<para>Pointer to the buffer containing the PES data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t count</para>
-</entry><entry
- align="char">
-<para>Size of buf.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EPERM</para>
-</entry><entry
- align="char">
-<para>Mode AUDIO_SOURCE_MEMORY not selected.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ENOMEM</para>
-</entry><entry
- align="char">
-<para>Attempted to write more data than the internal buffer can
- hold.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="AUDIO_STOP"
-role="subsection"><title>AUDIO_STOP</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to stop playing the current stream.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_STOP);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_STOP for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_PLAY"
-role="subsection"><title>AUDIO_PLAY</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to start playing an audio stream from the
- selected source.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_PLAY);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_PLAY for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_PAUSE"
-role="subsection"><title>AUDIO_PAUSE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call suspends the audio stream being played. Decoding and playing
- are paused. It is then possible to restart again decoding and playing process of
- the audio stream using AUDIO_CONTINUE command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>If AUDIO_SOURCE_MEMORY is selected in the ioctl call
- AUDIO_SELECT_SOURCE, the DVB-subsystem will not decode (consume)
- any more data until the ioctl call AUDIO_CONTINUE or AUDIO_PLAY is
- performed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_PAUSE);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_PAUSE for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_CONTINUE"
-role="subsection"><title>AUDIO_CONTINUE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl restarts the decoding and playing process previously paused
-with AUDIO_PAUSE command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>It only works if the stream were previously stopped with AUDIO_PAUSE</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_CONTINUE);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_CONTINUE for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SELECT_SOURCE"
-role="subsection"><title>AUDIO_SELECT_SOURCE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call informs the audio device which source shall be used
- for the input data. The possible sources are demux or memory. If
- AUDIO_SOURCE_MEMORY is selected, the data is fed to the Audio Device
- through the write command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_SELECT_SOURCE,
- audio_stream_source_t source);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SELECT_SOURCE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_stream_source_t
- source</para>
-</entry><entry
- align="char">
-<para>Indicates the source that shall be used for the Audio
- stream.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_MUTE"
-role="subsection"><title>AUDIO_SET_MUTE</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-&VIDIOC-DECODER-CMD; with the <constant>V4L2_DEC_CMD_START_MUTE_AUDIO</constant> flag instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the audio device to mute the stream that is currently being
- played.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_SET_MUTE,
- boolean state);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_MUTE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>boolean state</para>
-</entry><entry
- align="char">
-<para>Indicates if audio device shall mute or not.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>TRUE Audio Mute</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>FALSE Audio Un-mute</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_AV_SYNC"
-role="subsection"><title>AUDIO_SET_AV_SYNC</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to turn ON or OFF A/V synchronization.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_SET_AV_SYNC,
- boolean state);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_AV_SYNC for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>boolean state</para>
-</entry><entry
- align="char">
-<para>Tells the DVB subsystem if A/V synchronization shall be
- ON or OFF.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>TRUE AV-sync ON</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>FALSE AV-sync OFF</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_BYPASS_MODE"
-role="subsection"><title>AUDIO_SET_BYPASS_MODE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to bypass the Audio decoder and forward
- the stream without decoding. This mode shall be used if streams that can&#8217;t be
- handled by the DVB system shall be decoded. Dolby DigitalTM streams are
- automatically forwarded by the DVB subsystem if the hardware can handle it.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- AUDIO_SET_BYPASS_MODE, boolean mode);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_BYPASS_MODE for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>boolean mode</para>
-</entry><entry
- align="char">
-<para>Enables or disables the decoding of the current Audio
- stream in the DVB subsystem.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>TRUE Bypass is disabled</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>FALSE Bypass is enabled</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_CHANNEL_SELECT"
-role="subsection"><title>AUDIO_CHANNEL_SELECT</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-<constant>V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK</constant> control instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to select the requested channel if possible.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- AUDIO_CHANNEL_SELECT, audio_channel_select_t);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_CHANNEL_SELECT for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_channel_select_t
- ch</para>
-</entry><entry
- align="char">
-<para>Select the output format of the audio (mono left/right,
- stereo).</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_BILINGUAL_CHANNEL_SELECT"
-role="subsection"><title>AUDIO_BILINGUAL_CHANNEL_SELECT</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. It has been replaced by
-the V4L2 <constant>V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK</constant> control
-for MPEG decoders controlled through V4L2.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to select the requested channel for bilingual streams if possible.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- AUDIO_BILINGUAL_CHANNEL_SELECT, audio_channel_select_t);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_BILINGUAL_CHANNEL_SELECT for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_channel_select_t
-ch</para>
-</entry><entry
- align="char">
-<para>Select the output format of the audio (mono left/right,
- stereo).</para>
-</entry>
- </row>
-</tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_GET_PTS"
-role="subsection"><title>AUDIO_GET_PTS</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. If you need this functionality,
-then please contact the linux-media mailing list (&v4l-ml;).</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to return the current PTS timestamp.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- AUDIO_GET_PTS, __u64 *pts);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_GET_PTS for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u64 *pts
-</para>
-</entry><entry
- align="char">
-<para>Returns the 33-bit timestamp as defined in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
-</para>
-<para>
-The PTS should belong to the currently played
-frame if possible, but may also be a value close to it
-like the PTS of the last decoded frame or the last PTS
-extracted by the PES parser.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_GET_STATUS"
-role="subsection"><title>AUDIO_GET_STATUS</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to return the current state of the Audio
- Device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_GET_STATUS,
- struct audio_status &#x22C6;status);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_GET_STATUS for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct audio_status
- *status</para>
-</entry><entry
- align="char">
-<para>Returns the current state of Audio Device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_GET_CAPABILITIES"
-role="subsection"><title>AUDIO_GET_CAPABILITIES</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to tell us about the decoding capabilities
- of the audio hardware.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- AUDIO_GET_CAPABILITIES, unsigned int &#x22C6;cap);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_GET_CAPABILITIES for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>unsigned int *cap</para>
-</entry><entry
- align="char">
-<para>Returns a bit array of supported sound formats.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_CLEAR_BUFFER"
-role="subsection"><title>AUDIO_CLEAR_BUFFER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Audio Device to clear all software and hardware buffers
- of the audio decoder device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_CLEAR_BUFFER);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_CLEAR_BUFFER for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_ID"
-role="subsection"><title>AUDIO_SET_ID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl selects which sub-stream is to be decoded if a program or system
- stream is sent to the video device. If no audio stream type is set the id has to be
- in [0xC0,0xDF] for MPEG sound, in [0x80,0x87] for AC3 and in [0xA0,0xA7]
- for LPCM. More specifications may follow for other stream types. If the stream
- type is set the id just specifies the substream id of the audio stream and only
- the first 5 bits are recognized.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_SET_ID, int
- id);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_ID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int id</para>
-</entry><entry
- align="char">
-<para>audio sub-stream id</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_MIXER"
-role="subsection"><title>AUDIO_SET_MIXER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl lets you adjust the mixer settings of the audio decoder.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = AUDIO_SET_MIXER,
- audio_mixer_t &#x22C6;mix);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_ID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_mixer_t *mix</para>
-</entry><entry
- align="char">
-<para>mixer settings.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="AUDIO_SET_STREAMTYPE"
-role="subsection"><title>AUDIO_SET_STREAMTYPE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl tells the driver which kind of audio stream to expect. This is useful
- if the stream offers several audio sub-streams like LPCM and AC3.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = AUDIO_SET_STREAMTYPE,
- int type);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_STREAMTYPE for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int type</para>
-</entry><entry
- align="char">
-<para>stream type</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>type is not a valid or supported stream type.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="AUDIO_SET_EXT_ID"
-role="subsection"><title>AUDIO_SET_EXT_ID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl can be used to set the extension id for MPEG streams in DVD
- playback. Only the first 3 bits are recognized.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = AUDIO_SET_EXT_ID, int
- id);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_EXT_ID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int id</para>
-</entry><entry
- align="char">
-<para>audio sub_stream_id</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>id is not a valid id.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="AUDIO_SET_ATTRIBUTES"
-role="subsection"><title>AUDIO_SET_ATTRIBUTES</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is intended for DVD playback and allows you to set certain
- information about the audio stream.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = AUDIO_SET_ATTRIBUTES,
- audio_attributes_t attr );</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_ATTRIBUTES for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_attributes_t
- attr</para>
-</entry><entry
- align="char">
-<para>audio attributes according to section ??</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>attr is not a valid or supported attribute setting.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="AUDIO_SET_KARAOKE"
-role="subsection"><title>AUDIO_SET_KARAOKE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl allows one to set the mixer settings for a karaoke DVD.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = AUDIO_SET_KARAOKE,
- audio_karaoke_t &#x22C6;karaoke);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals AUDIO_SET_KARAOKE for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>audio_karaoke_t
- *karaoke</para>
-</entry><entry
- align="char">
-<para>karaoke settings according to section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>karaoke is not a valid or supported karaoke setting.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section>
-</section>
diff --git a/Documentation/DocBook/media/dvb/ca.xml b/Documentation/DocBook/media/dvb/ca.xml
deleted file mode 100644
index d0b07e763908..000000000000
--- a/Documentation/DocBook/media/dvb/ca.xml
+++ /dev/null
@@ -1,582 +0,0 @@
-<title>DVB CA Device</title>
-<para>The DVB CA device controls the conditional access hardware. It can be accessed through
-<constant>/dev/dvb/adapter?/ca?</constant>. Data types and and ioctl definitions can be accessed by
-including <constant>linux/dvb/ca.h</constant> in your application.
-</para>
-
-<section id="ca_data_types">
-<title>CA Data Types</title>
-
-
-<section id="ca-slot-info">
-<title>ca_slot_info_t</title>
- <programlisting>
-typedef struct ca_slot_info {
- int num; /&#x22C6; slot number &#x22C6;/
-
- int type; /&#x22C6; CA interface this slot supports &#x22C6;/
-#define CA_CI 1 /&#x22C6; CI high level interface &#x22C6;/
-#define CA_CI_LINK 2 /&#x22C6; CI link layer level interface &#x22C6;/
-#define CA_CI_PHYS 4 /&#x22C6; CI physical layer level interface &#x22C6;/
-#define CA_DESCR 8 /&#x22C6; built-in descrambler &#x22C6;/
-#define CA_SC 128 /&#x22C6; simple smart card interface &#x22C6;/
-
- unsigned int flags;
-#define CA_CI_MODULE_PRESENT 1 /&#x22C6; module (or card) inserted &#x22C6;/
-#define CA_CI_MODULE_READY 2
-} ca_slot_info_t;
-</programlisting>
-
-</section>
-<section id="ca-descr-info">
-<title>ca_descr_info_t</title>
-<programlisting>
-typedef struct ca_descr_info {
- unsigned int num; /&#x22C6; number of available descramblers (keys) &#x22C6;/
- unsigned int type; /&#x22C6; type of supported scrambling system &#x22C6;/
-#define CA_ECD 1
-#define CA_NDS 2
-#define CA_DSS 4
-} ca_descr_info_t;
-</programlisting>
-
-</section>
-<section id="ca-caps">
-<title>ca_caps_t</title>
-<programlisting>
-typedef struct ca_caps {
- unsigned int slot_num; /&#x22C6; total number of CA card and module slots &#x22C6;/
- unsigned int slot_type; /&#x22C6; OR of all supported types &#x22C6;/
- unsigned int descr_num; /&#x22C6; total number of descrambler slots (keys) &#x22C6;/
- unsigned int descr_type;/&#x22C6; OR of all supported types &#x22C6;/
- } ca_cap_t;
-</programlisting>
-
-</section>
-<section id="ca-msg">
-<title>ca_msg_t</title>
-<programlisting>
-/&#x22C6; a message to/from a CI-CAM &#x22C6;/
-typedef struct ca_msg {
- unsigned int index;
- unsigned int type;
- unsigned int length;
- unsigned char msg[256];
-} ca_msg_t;
-</programlisting>
-
-</section>
-<section id="ca-descr">
-<title>ca_descr_t</title>
-<programlisting>
-typedef struct ca_descr {
- unsigned int index;
- unsigned int parity;
- unsigned char cw[8];
-} ca_descr_t;
-</programlisting>
-</section>
-
-<section id="ca-pid">
-<title>ca-pid</title>
-<programlisting>
-typedef struct ca_pid {
- unsigned int pid;
- int index; /&#x22C6; -1 == disable&#x22C6;/
-} ca_pid_t;
-</programlisting>
-</section></section>
-
-<section id="ca_function_calls">
-<title>CA Function Calls</title>
-
-
-<section id="ca_fopen">
-<title>open()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call opens a named ca device (e.g. /dev/ost/ca) for subsequent use.</para>
-<para>When an open() call has succeeded, the device will be ready for use.
- The significance of blocking or non-blocking mode is described in the
- documentation for functions where there is a difference. It does not affect the
- semantics of the open() call itself. A device opened in blocking mode can later
- be put into non-blocking mode (and vice versa) using the F_SETFL command
- of the fcntl system call. This is a standard system call, documented in the Linux
- manual page for fcntl. Only one user can open the CA Device in O_RDWR
- mode. All other attempts to open the device in this mode will fail, and an error
- code will be returned.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int open(const char &#x22C6;deviceName, int flags);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>const char
- *deviceName</para>
-</entry><entry
- align="char">
-<para>Name of specific video device.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int flags</para>
-</entry><entry
- align="char">
-<para>A bit-wise OR of the following flags:</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDONLY read-only access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDWR read/write access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_NONBLOCK open in non-blocking mode</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>(blocking mode is the default)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>ENODEV</para>
-</entry><entry
- align="char">
-<para>Device driver not loaded/available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINTERNAL</para>
-</entry><entry
- align="char">
-<para>Internal error.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>Device or resource busy.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid argument.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="ca_fclose">
-<title>close()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call closes a previously opened audio device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int close(int fd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section>
-
-<section id="CA_RESET"
-role="subsection"><title>CA_RESET</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_RESET);
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_RESET for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_GET_CAP"
-role="subsection"><title>CA_GET_CAP</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_GET_CAP,
- ca_caps_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_GET_CAP for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_caps_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_GET_SLOT_INFO"
-role="subsection"><title>CA_GET_SLOT_INFO</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_GET_SLOT_INFO,
- ca_slot_info_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_GET_SLOT_INFO for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_slot_info_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_GET_DESCR_INFO"
-role="subsection"><title>CA_GET_DESCR_INFO</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_GET_DESCR_INFO,
- ca_descr_info_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_GET_DESCR_INFO for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_descr_info_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_GET_MSG"
-role="subsection"><title>CA_GET_MSG</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_GET_MSG,
- ca_msg_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_GET_MSG for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_msg_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_SEND_MSG"
-role="subsection"><title>CA_SEND_MSG</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_SEND_MSG,
- ca_msg_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_SEND_MSG for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_msg_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_SET_DESCR"
-role="subsection"><title>CA_SET_DESCR</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_SET_DESCR,
- ca_descr_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_SET_DESCR for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_descr_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="CA_SET_PID"
-role="subsection"><title>CA_SET_PID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = CA_SET_PID,
- ca_pid_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals CA_SET_PID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ca_pid_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-</section>
diff --git a/Documentation/DocBook/media/dvb/demux.xml b/Documentation/DocBook/media/dvb/demux.xml
deleted file mode 100644
index 34f2fb1cd601..000000000000
--- a/Documentation/DocBook/media/dvb/demux.xml
+++ /dev/null
@@ -1,1162 +0,0 @@
-<title>DVB Demux Device</title>
-
-<para>The DVB demux device controls the filters of the DVB hardware/software. It can be
-accessed through <constant>/dev/adapter?/demux?</constant>. Data types and and ioctl definitions can be
-accessed by including <constant>linux/dvb/dmx.h</constant> in your application.
-</para>
-<section id="dmx_types">
-<title>Demux Data Types</title>
-
-<section id="dmx-output-t">
-<title>Output for the demux</title>
-
-<table pgwide="1" frame="none" id="dmx-output">
- <title>enum dmx_output</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="DMX-OUT-DECODER">DMX_OUT_DECODER</entry>
- <entry>Streaming directly to decoder.</entry>
- </row><row>
- <entry align="char" id="DMX-OUT-TAP">DMX_OUT_TAP</entry>
- <entry>Output going to a memory buffer (to be retrieved via the
- read command). Delivers the stream output to the demux
- device on which the ioctl is called.</entry>
- </row><row>
- <entry align="char" id="DMX-OUT-TS-TAP">DMX_OUT_TS_TAP</entry>
- <entry>Output multiplexed into a new TS (to be retrieved by
- reading from the logical DVR device). Routes output to the
- logical DVR device <constant>/dev/dvb/adapter?/dvr?</constant>,
- which delivers a TS multiplexed from all filters for which
- <constant>DMX_OUT_TS_TAP</constant> was specified.</entry>
- </row><row>
- <entry align="char" id="DMX-OUT-TSDEMUX-TAP">DMX_OUT_TSDEMUX_TAP</entry>
- <entry>Like &DMX-OUT-TS-TAP; but retrieved from the DMX
- device.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-</section>
-
-<section id="dmx-input-t">
-<title>dmx_input_t</title>
-<programlisting>
-typedef enum
-{
- DMX_IN_FRONTEND, /&#x22C6; Input from a front-end device. &#x22C6;/
- DMX_IN_DVR /&#x22C6; Input from the logical DVR device. &#x22C6;/
-} dmx_input_t;
-</programlisting>
-</section>
-
-<section id="dmx-pes-type-t">
-<title>dmx_pes_type_t</title>
-<programlisting>
-typedef enum
-{
- DMX_PES_AUDIO0,
- DMX_PES_VIDEO0,
- DMX_PES_TELETEXT0,
- DMX_PES_SUBTITLE0,
- DMX_PES_PCR0,
-
- DMX_PES_AUDIO1,
- DMX_PES_VIDEO1,
- DMX_PES_TELETEXT1,
- DMX_PES_SUBTITLE1,
- DMX_PES_PCR1,
-
- DMX_PES_AUDIO2,
- DMX_PES_VIDEO2,
- DMX_PES_TELETEXT2,
- DMX_PES_SUBTITLE2,
- DMX_PES_PCR2,
-
- DMX_PES_AUDIO3,
- DMX_PES_VIDEO3,
- DMX_PES_TELETEXT3,
- DMX_PES_SUBTITLE3,
- DMX_PES_PCR3,
-
- DMX_PES_OTHER
-} dmx_pes_type_t;
-</programlisting>
-</section>
-
-<section id="dmx-filter">
-<title>struct dmx_filter</title>
- <programlisting>
- typedef struct dmx_filter
-{
- __u8 filter[DMX_FILTER_SIZE];
- __u8 mask[DMX_FILTER_SIZE];
- __u8 mode[DMX_FILTER_SIZE];
-} dmx_filter_t;
-</programlisting>
-</section>
-
-<section id="dmx-sct-filter-params">
-<title>struct dmx_sct_filter_params</title>
-<programlisting>
-struct dmx_sct_filter_params
-{
- __u16 pid;
- dmx_filter_t filter;
- __u32 timeout;
- __u32 flags;
-#define DMX_CHECK_CRC 1
-#define DMX_ONESHOT 2
-#define DMX_IMMEDIATE_START 4
-#define DMX_KERNEL_CLIENT 0x8000
-};
-</programlisting>
-</section>
-
-<section id="dmx-pes-filter-params">
-<title>struct dmx_pes_filter_params</title>
-<programlisting>
-struct dmx_pes_filter_params
-{
- __u16 pid;
- dmx_input_t input;
- dmx_output_t output;
- dmx_pes_type_t pes_type;
- __u32 flags;
-};
-</programlisting>
-</section>
-
-<section id="dmx-event">
-<title>struct dmx_event</title>
- <programlisting>
- struct dmx_event
- {
- dmx_event_t event;
- time_t timeStamp;
- union
- {
- dmx_scrambling_status_t scrambling;
- } u;
- };
-</programlisting>
-</section>
-
-<section id="dmx-stc">
-<title>struct dmx_stc</title>
-<programlisting>
-struct dmx_stc {
- unsigned int num; /&#x22C6; input : which STC? 0..N &#x22C6;/
- unsigned int base; /&#x22C6; output: divisor for stc to get 90 kHz clock &#x22C6;/
- __u64 stc; /&#x22C6; output: stc in 'base'&#x22C6;90 kHz units &#x22C6;/
-};
-</programlisting>
-</section>
-
-<section id="dmx-caps">
-<title>struct dmx_caps</title>
-<programlisting>
- typedef struct dmx_caps {
- __u32 caps;
- int num_decoders;
-} dmx_caps_t;
-</programlisting>
-</section>
-
-<section id="dmx-source-t">
-<title>enum dmx_source_t</title>
-<programlisting>
-typedef enum {
- DMX_SOURCE_FRONT0 = 0,
- DMX_SOURCE_FRONT1,
- DMX_SOURCE_FRONT2,
- DMX_SOURCE_FRONT3,
- DMX_SOURCE_DVR0 = 16,
- DMX_SOURCE_DVR1,
- DMX_SOURCE_DVR2,
- DMX_SOURCE_DVR3
-} dmx_source_t;
-</programlisting>
-</section>
-
-</section>
-<section id="dmx_fcalls">
-<title>Demux Function Calls</title>
-
-<section id="dmx_fopen">
-<title>open()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call, used with a device name of /dev/dvb/adapter0/demux0,
- allocates a new filter and returns a handle which can be used for subsequent
- control of that filter. This call has to be made for each filter to be used, i.e. every
- returned file descriptor is a reference to a single filter. /dev/dvb/adapter0/dvr0
- is a logical device to be used for retrieving Transport Streams for digital
- video recording. When reading from this device a transport stream containing
- the packets from all PES filters set in the corresponding demux device
- (/dev/dvb/adapter0/demux0) having the output set to DMX_OUT_TS_TAP. A
- recorded Transport Stream is replayed by writing to this device. </para>
-<para>The significance of blocking or non-blocking mode is described in the
- documentation for functions where there is a difference. It does not affect the
- semantics of the open() call itself. A device opened in blocking mode can later
- be put into non-blocking mode (and vice versa) using the F_SETFL command
- of the fcntl system call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int open(const char &#x22C6;deviceName, int flags);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>const char
- *deviceName</para>
-</entry><entry
- align="char">
-<para>Name of demux device.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int flags</para>
-</entry><entry
- align="char">
-<para>A bit-wise OR of the following flags:</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDWR read/write access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_NONBLOCK open in non-blocking mode</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>(blocking mode is the default)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>ENODEV</para>
-</entry><entry
- align="char">
-<para>Device driver not loaded/available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid argument.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EMFILE</para>
-</entry><entry
- align="char">
-<para>&#8220;Too many open files&#8221;, i.e. no more filters available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ENOMEM</para>
-</entry><entry
- align="char">
-<para>The driver failed to allocate enough memory.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="dmx_fclose">
-<title>close()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call deactivates and deallocates a filter that was previously
- allocated via the open() call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int close(int fd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="dmx_fread">
-<title>read()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call returns filtered data, which might be section or PES data. The
- filtered data is transferred from the driver&#8217;s internal circular buffer to buf. The
- maximum amount of data to be transferred is implied by count.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>When returning section data the driver always tries to return a complete single
- section (even though buf would provide buffer space for more data). If the size
- of the buffer is smaller than the section as much as possible will be returned,
- and the remaining data will be provided in subsequent calls.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The size of the internal buffer is 2 * 4096 bytes (the size of two maximum
- sized sections) by default. The size of this buffer may be changed by using the
- DMX_SET_BUFFER_SIZE function. If the buffer is not large enough, or if
- the read operations are not performed fast enough, this may result in a buffer
- overflow error. In this case EOVERFLOW will be returned, and the circular
- buffer will be emptied. This call is blocking if there is no data to return, i.e. the
- process will be put to sleep waiting for data, unless the O_NONBLOCK flag
- is specified.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>Note that in order to be able to read, the filtering process has to be started
- by defining either a section or a PES filter by means of the ioctl functions,
- and then starting the filtering process via the DMX_START ioctl function
- or by setting the DMX_IMMEDIATE_START flag. If the reading is done
- from a logical DVR demux device, the data will constitute a Transport Stream
- including the packets from all PES filters in the corresponding demux device
- /dev/dvb/adapter0/demux0 having the output set to DMX_OUT_TS_TAP.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>size_t read(int fd, void &#x22C6;buf, size_t count);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>void *buf</para>
-</entry><entry
- align="char">
-<para>Pointer to the buffer to be used for returned filtered data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t count</para>
-</entry><entry
- align="char">
-<para>Size of buf.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EWOULDBLOCK</para>
-</entry><entry
- align="char">
-<para>No data to return and O_NONBLOCK was specified.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ECRC</para>
-</entry><entry
- align="char">
-<para>Last section had a CRC error - no data returned. The
- buffer is flushed.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EOVERFLOW</para>
-</entry><entry
- align="char">
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>The filtered data was not read from the buffer in due
- time, resulting in non-read data being lost. The buffer is
- flushed.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ETIMEDOUT</para>
-</entry><entry
- align="char">
-<para>The section was not loaded within the stated timeout
- period. See ioctl DMX_SET_FILTER for how to set a
- timeout.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EFAULT</para>
-</entry><entry
- align="char">
-<para>The driver failed to write to the callers buffer due to an
- invalid *buf pointer.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="dmx_fwrite">
-<title>write()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call is only provided by the logical device /dev/dvb/adapter0/dvr0,
- associated with the physical demux device that provides the actual DVR
- functionality. It is used for replay of a digitally recorded Transport Stream.
- Matching filters have to be defined in the corresponding physical demux
- device, /dev/dvb/adapter0/demux0. The amount of data to be transferred is
- implied by count.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>ssize_t write(int fd, const void &#x22C6;buf, size_t
- count);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>void *buf</para>
-</entry><entry
- align="char">
-<para>Pointer to the buffer containing the Transport Stream.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t count</para>
-</entry><entry
- align="char">
-<para>Size of buf.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EWOULDBLOCK</para>
-</entry><entry
- align="char">
-<para>No data was written. This
- might happen if O_NONBLOCK was specified and there
- is no more buffer space available (if O_NONBLOCK is
- not specified the function will block until buffer space is
- available).</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>This error code indicates that there are conflicting
- requests. The corresponding demux device is setup to
- receive data from the front- end. Make sure that these
- filters are stopped and that the filters with input set to
- DMX_IN_DVR are started.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="DMX_START">
-<title>DMX_START</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call is used to start the actual filtering operation defined via the ioctl
- calls DMX_SET_FILTER or DMX_SET_PES_FILTER.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_START);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_START for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid argument, i.e. no filtering parameters provided via
- the DMX_SET_FILTER or DMX_SET_PES_FILTER
- functions.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>This error code indicates that there are conflicting
- requests. There are active filters filtering data from
- another input source. Make sure that these filters are
- stopped before starting this filter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="DMX_STOP">
-<title>DMX_STOP</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call is used to stop the actual filtering operation defined via the
- ioctl calls DMX_SET_FILTER or DMX_SET_PES_FILTER and started via
- the DMX_START command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_STOP);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_STOP for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_SET_FILTER">
-<title>DMX_SET_FILTER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call sets up a filter according to the filter and mask parameters
- provided. A timeout may be defined stating number of seconds to wait for a
- section to be loaded. A value of 0 means that no timeout should be applied.
- Finally there is a flag field where it is possible to state whether a section should
- be CRC-checked, whether the filter should be a &#8221;one-shot&#8221; filter, i.e. if the
- filtering operation should be stopped after the first section is received, and
- whether the filtering operation should be started immediately (without waiting
- for a DMX_START ioctl call). If a filter was previously set-up, this filter will
- be canceled, and the receive buffer will be flushed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_SET_FILTER,
- struct dmx_sct_filter_params &#x22C6;params);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_SET_FILTER for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- dmx_sct_filter_params
- *params</para>
-</entry><entry
- align="char">
-<para>Pointer to structure containing filter parameters.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_SET_PES_FILTER">
-<title>DMX_SET_PES_FILTER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call sets up a PES filter according to the parameters provided. By a
- PES filter is meant a filter that is based just on the packet identifier (PID), i.e.
- no PES header or payload filtering capability is supported.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The transport stream destination for the filtered output may be set. Also the
- PES type may be stated in order to be able to e.g. direct a video stream directly
- to the video decoder. Finally there is a flag field where it is possible to state
- whether the filtering operation should be started immediately (without waiting
- for a DMX_START ioctl call). If a filter was previously set-up, this filter will
- be cancelled, and the receive buffer will be flushed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_SET_PES_FILTER,
- struct dmx_pes_filter_params &#x22C6;params);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_SET_PES_FILTER for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- dmx_pes_filter_params
- *params</para>
-</entry><entry
- align="char">
-<para>Pointer to structure containing filter parameters.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>This error code indicates that there are conflicting
- requests. There are active filters filtering data from
- another input source. Make sure that these filters are
- stopped before starting this filter.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="DMX_SET_BUFFER_SIZE">
-<title>DMX_SET_BUFFER_SIZE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call is used to set the size of the circular buffer used for filtered data.
- The default size is two maximum sized sections, i.e. if this function is not called
- a buffer size of 2 * 4096 bytes will be used.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request =
- DMX_SET_BUFFER_SIZE, unsigned long size);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_SET_BUFFER_SIZE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>unsigned long size</para>
-</entry><entry
- align="char">
-<para>Size of circular buffer.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_GET_EVENT">
-<title>DMX_GET_EVENT</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns an event if available. If an event is not available,
- the behavior depends on whether the device is in blocking or non-blocking
- mode. In the latter case, the call fails immediately with errno set to
- EWOULDBLOCK. In the former case, the call blocks until an event becomes
- available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The standard Linux poll() and/or select() system calls can be used with the
- device file descriptor to watch for new events. For select(), the file descriptor
- should be included in the exceptfds argument, and for poll(), POLLPRI should
- be specified as the wake-up condition. Only the latest event for each filter is
- saved.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_GET_EVENT,
- struct dmx_event &#x22C6;ev);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_GET_EVENT for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct dmx_event *ev</para>
-</entry><entry
- align="char">
-<para>Pointer to the location where the event is to be stored.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EWOULDBLOCK</para>
-</entry><entry
- align="char">
-<para>There is no event pending, and the device is in
- non-blocking mode.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-</section>
-
-<section id="DMX_GET_STC">
-<title>DMX_GET_STC</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns the current value of the system time counter (which is driven
- by a PES filter of type DMX_PES_PCR). Some hardware supports more than one
- STC, so you must specify which one by setting the num field of stc before the ioctl
- (range 0...n). The result is returned in form of a ratio with a 64 bit numerator
- and a 32 bit denominator, so the real 90kHz STC value is stc-&#x003E;stc /
- stc-&#x003E;base
- .</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request = DMX_GET_STC, struct
- dmx_stc &#x22C6;stc);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_GET_STC for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct dmx_stc *stc</para>
-</entry><entry
- align="char">
-<para>Pointer to the location where the stc is to be stored.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid stc number.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section>
-
-<section id="DMX_GET_PES_PIDS"
-role="subsection"><title>DMX_GET_PES_PIDS</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = DMX_GET_PES_PIDS,
- __u16[5]);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_GET_PES_PIDS for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16[5]
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_GET_CAPS"
-role="subsection"><title>DMX_GET_CAPS</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = DMX_GET_CAPS,
- dmx_caps_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_GET_CAPS for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_caps_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_SET_SOURCE"
-role="subsection"><title>DMX_SET_SOURCE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is undocumented. Documentation is welcome.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = DMX_SET_SOURCE,
- dmx_source_t *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_SET_SOURCE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>dmx_source_t *
-</para>
-</entry><entry
- align="char">
-<para>Undocumented.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_ADD_PID"
-role="subsection"><title>DMX_ADD_PID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call allows to add multiple PIDs to a transport stream filter
-previously set up with DMX_SET_PES_FILTER and output equal to DMX_OUT_TSDEMUX_TAP.
-</para></entry></row><row><entry align="char"><para>
-It is used by readers of /dev/dvb/adapterX/demuxY.
-</para></entry></row><row><entry align="char"><para>
-It may be called at any time, i.e. before or after the first filter on the
-shared file descriptor was started. It makes it possible to record multiple
-services without the need to de-multiplex or re-multiplex TS packets.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = DMX_ADD_PID,
- __u16 *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_ADD_PID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 *
-</para>
-</entry><entry
- align="char">
-<para>PID number to be filtered.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-<section id="DMX_REMOVE_PID"
-role="subsection"><title>DMX_REMOVE_PID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call allows to remove a PID when multiple PIDs are set on a
-transport stream filter, e. g. a filter previously set up with output equal to
-DMX_OUT_TSDEMUX_TAP, created via either DMX_SET_PES_FILTER or DMX_ADD_PID.
-</para></entry></row><row><entry align="char"><para>
-It is used by readers of /dev/dvb/adapterX/demuxY.
-</para></entry></row><row><entry align="char"><para>
-It may be called at any time, i.e. before or after the first filter on the
-shared file descriptor was started. It makes it possible to record multiple
-services without the need to de-multiplex or re-multiplex TS packets.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = DMX_REMOVE_PID,
- __u16 *);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals DMX_REMOVE_PID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u16 *
-</para>
-</entry><entry
- align="char">
-<para>PID of the PES filter to be removed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-</section>
-
-
-</section>
diff --git a/Documentation/DocBook/media/dvb/dvbapi.xml b/Documentation/DocBook/media/dvb/dvbapi.xml
deleted file mode 100644
index 8576481e20ae..000000000000
--- a/Documentation/DocBook/media/dvb/dvbapi.xml
+++ /dev/null
@@ -1,156 +0,0 @@
-<partinfo>
-<authorgroup>
-<author>
-<firstname>Ralph</firstname>
-<surname>Metzler</surname>
-<othername role="mi">J. K.</othername>
-<affiliation><address><email>rjkm@metzlerbros.de</email></address></affiliation>
-</author>
-<author>
-<firstname>Marcus</firstname>
-<surname>Metzler</surname>
-<othername role="mi">O. C.</othername>
-<affiliation><address><email>rjkm@metzlerbros.de</email></address></affiliation>
-</author>
-</authorgroup>
-<authorgroup>
-<author>
-<firstname>Mauro</firstname>
-<othername role="mi">Carvalho</othername>
-<surname>Chehab</surname>
-<affiliation><address><email>m.chehab@samsung.com</email></address></affiliation>
-<contrib>Ported document to Docbook XML.</contrib>
-</author>
-</authorgroup>
-<copyright>
- <year>2002</year>
- <year>2003</year>
- <holder>Convergence GmbH</holder>
-</copyright>
-<copyright>
- <year>2009-2015</year>
- <holder>Mauro Carvalho Chehab</holder>
-</copyright>
-
-<revhistory>
-<!-- Put document revisions here, newest first. -->
-<revision>
- <revnumber>2.1.0</revnumber>
- <date>2015-05-29</date>
- <authorinitials>mcc</authorinitials>
- <revremark>
- DocBook improvements and cleanups, in order to document the
- system calls on a more standard way and provide more description
- about the current DVB API.
- </revremark>
-</revision>
-<revision>
- <revnumber>2.0.4</revnumber>
- <date>2011-05-06</date>
- <authorinitials>mcc</authorinitials>
- <revremark>
- Add more information about DVB APIv5, better describing the frontend GET/SET props ioctl's.
- </revremark>
-</revision>
-<revision>
- <revnumber>2.0.3</revnumber>
- <date>2010-07-03</date>
- <authorinitials>mcc</authorinitials>
- <revremark>
- Add some frontend capabilities flags, present on kernel, but missing at the specs.
- </revremark>
-</revision>
-<revision>
- <revnumber>2.0.2</revnumber>
- <date>2009-10-25</date>
- <authorinitials>mcc</authorinitials>
- <revremark>
- documents FE_SET_FRONTEND_TUNE_MODE and FE_DISHETWORK_SEND_LEGACY_CMD ioctls.
- </revremark>
-</revision>
-<revision>
-<revnumber>2.0.1</revnumber>
-<date>2009-09-16</date>
-<authorinitials>mcc</authorinitials>
-<revremark>
-Added ISDB-T test originally written by Patrick Boettcher
-</revremark>
-</revision>
-<revision>
-<revnumber>2.0.0</revnumber>
-<date>2009-09-06</date>
-<authorinitials>mcc</authorinitials>
-<revremark>Conversion from LaTex to DocBook XML. The
- contents is the same as the original LaTex version.</revremark>
-</revision>
-<revision>
-<revnumber>1.0.0</revnumber>
-<date>2003-07-24</date>
-<authorinitials>rjkm</authorinitials>
-<revremark>Initial revision on LaTEX.</revremark>
-</revision>
-</revhistory>
-</partinfo>
-
-
-<title>LINUX DVB API</title>
-<subtitle>Version 5.10</subtitle>
-<!-- ADD THE CHAPTERS HERE -->
- <chapter id="dvb_introdution">
- &sub-intro;
- </chapter>
- <chapter id="dvb_frontend">
- &sub-frontend;
- </chapter>
- <chapter id="dvb_demux">
- &sub-demux;
- </chapter>
- <chapter id="dvb_ca">
- &sub-ca;
- </chapter>
- <chapter id="net">
- &sub-net;
- </chapter>
- <chapter id="legacy_dvb_apis">
- <title>DVB Deprecated APIs</title>
- <para>The APIs described here are kept only for historical reasons. There's
- just one driver for a very legacy hardware that uses this API. No
- modern drivers should use it. Instead, audio and video should be using
- the V4L2 and ALSA APIs, and the pipelines should be set using the
- Media Controller API</para>
- <section id="dvb_video">
- &sub-video;
- </section>
- <section id="dvb_audio">
- &sub-audio;
- </section>
- </chapter>
- <chapter id="dvb_examples">
- &sub-examples;
- </chapter>
-<!-- END OF CHAPTERS -->
- <appendix id="audio_h">
- <title>DVB Audio Header File</title>
- &sub-audio-h;
- </appendix>
- <appendix id="ca_h">
- <title>DVB Conditional Access Header File</title>
- &sub-ca-h;
- </appendix>
- <appendix id="dmx_h">
- <title>DVB Demux Header File</title>
- &sub-dmx-h;
- </appendix>
- <appendix id="frontend_h">
- <title>DVB Frontend Header File</title>
- &sub-frontend-h;
- </appendix>
- <appendix id="net_h">
- <title>DVB Network Header File</title>
- &sub-net-h;
- </appendix>
- <appendix id="video_h">
- <title>DVB Video Header File</title>
- &sub-video-h;
- </appendix>
-
diff --git a/Documentation/DocBook/media/dvb/dvbproperty.xml b/Documentation/DocBook/media/dvb/dvbproperty.xml
deleted file mode 100644
index e579ae5088ae..000000000000
--- a/Documentation/DocBook/media/dvb/dvbproperty.xml
+++ /dev/null
@@ -1,1680 +0,0 @@
-<section id="frontend-properties">
-<title>DVB Frontend properties</title>
-<para>Tuning into a Digital TV physical channel and starting decoding it
- requires changing a set of parameters, in order to control the
- tuner, the demodulator, the Linear Low-noise Amplifier (LNA) and to set the
- antenna subsystem via Satellite Equipment Control (SEC), on satellite
- systems. The actual parameters are specific to each particular digital
- TV standards, and may change as the digital TV specs evolves.</para>
-<para>In the past, the strategy used was to have a union with the parameters
- needed to tune for DVB-S, DVB-C, DVB-T and ATSC delivery systems grouped
- there. The problem is that, as the second generation standards appeared,
- those structs were not big enough to contain the additional parameters.
- Also, the union didn't have any space left to be expanded without breaking
- userspace. So, the decision was to deprecate the legacy union/struct based
- approach, in favor of a properties set approach.</para>
-
-<para>NOTE: on Linux DVB API version 3, setting a frontend were done via
- <link linkend="dvb-frontend-parameters">struct <constant>dvb_frontend_parameters</constant></link>.
- This got replaced on version 5 (also called "S2API", as this API were
- added originally_enabled to provide support for DVB-S2), because the old
- API has a very limited support to new standards and new hardware. This
- section describes the new and recommended way to set the frontend, with
- suppports all digital TV delivery systems.</para>
-
-<para>Example: with the properties based approach, in order to set the tuner
- to a DVB-C channel at 651 kHz, modulated with 256-QAM, FEC 3/4 and symbol
- rate of 5.217 Mbauds, those properties should be sent to
- <link linkend="FE_GET_PROPERTY"><constant>FE_SET_PROPERTY</constant></link> ioctl:</para>
- <itemizedlist>
- <listitem><para>&DTV-DELIVERY-SYSTEM; = SYS_DVBC_ANNEX_A</para></listitem>
- <listitem><para>&DTV-FREQUENCY; = 651000000</para></listitem>
- <listitem><para>&DTV-MODULATION; = QAM_256</para></listitem>
- <listitem><para>&DTV-INVERSION; = INVERSION_AUTO</para></listitem>
- <listitem><para>&DTV-SYMBOL-RATE; = 5217000</para></listitem>
- <listitem><para>&DTV-INNER-FEC; = FEC_3_4</para></listitem>
- <listitem><para>&DTV-TUNE;</para></listitem>
- </itemizedlist>
-
-<para>The code that would do the above is:</para>
-<programlisting>
-#include &lt;stdio.h&gt;
-#include &lt;fcntl.h&gt;
-#include &lt;sys/ioctl.h&gt;
-#include &lt;linux/dvb/frontend.h&gt;
-
-static struct dtv_property props[] = {
- { .cmd = DTV_DELIVERY_SYSTEM, .u.data = SYS_DVBC_ANNEX_A },
- { .cmd = DTV_FREQUENCY, .u.data = 651000000 },
- { .cmd = DTV_MODULATION, .u.data = QAM_256 },
- { .cmd = DTV_INVERSION, .u.data = INVERSION_AUTO },
- { .cmd = DTV_SYMBOL_RATE, .u.data = 5217000 },
- { .cmd = DTV_INNER_FEC, .u.data = FEC_3_4 },
- { .cmd = DTV_TUNE }
-};
-
-static struct dtv_properties dtv_prop = {
- .num = 6, .props = props
-};
-
-int main(void)
-{
- int fd = open("/dev/dvb/adapter0/frontend0", O_RDWR);
-
- if (!fd) {
- perror ("open");
- return -1;
- }
- if (ioctl(fd, FE_SET_PROPERTY, &amp;dtv_prop) == -1) {
- perror("ioctl");
- return -1;
- }
- printf("Frontend set\n");
- return 0;
-}
-</programlisting>
-
-<para>NOTE: While it is possible to directly call the Kernel code like the
- above example, it is strongly recommended to use
- <ulink url="https://linuxtv.org/docs/libdvbv5/index.html">libdvbv5</ulink>,
- as it provides abstraction to work with the supported digital TV standards
- and provides methods for usual operations like program scanning and to
- read/write channel descriptor files.</para>
-
-<section id="dtv-stats">
-<title>struct <structname>dtv_stats</structname></title>
-<programlisting>
-struct dtv_stats {
- __u8 scale; /* enum fecap_scale_params type */
- union {
- __u64 uvalue; /* for counters and relative scales */
- __s64 svalue; /* for 1/1000 dB measures */
- };
-} __packed;
-</programlisting>
-</section>
-<section id="dtv-fe-stats">
-<title>struct <structname>dtv_fe_stats</structname></title>
-<programlisting>
-#define MAX_DTV_STATS 4
-
-struct dtv_fe_stats {
- __u8 len;
- &dtv-stats; stat[MAX_DTV_STATS];
-} __packed;
-</programlisting>
-</section>
-
-<section id="dtv-property">
-<title>struct <structname>dtv_property</structname></title>
-<programlisting>
-/* Reserved fields should be set to 0 */
-
-struct dtv_property {
- __u32 cmd;
- __u32 reserved[3];
- union {
- __u32 data;
- &dtv-fe-stats; st;
- struct {
- __u8 data[32];
- __u32 len;
- __u32 reserved1[3];
- void *reserved2;
- } buffer;
- } u;
- int result;
-} __attribute__ ((packed));
-
-/* num of properties cannot exceed DTV_IOCTL_MAX_MSGS per ioctl */
-#define DTV_IOCTL_MAX_MSGS 64
-</programlisting>
-</section>
-<section id="dtv-properties">
-<title>struct <structname>dtv_properties</structname></title>
-<programlisting>
-struct dtv_properties {
- __u32 num;
- &dtv-property; *props;
-};
-</programlisting>
-</section>
-
-<section>
- <title>Property types</title>
-<para>
-On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY and FE_SET_PROPERTY</link>,
-the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to
-get/set up to 64 properties. The actual meaning of each property is described on the next sections.
-</para>
-
-<para>The available frontend property types are shown on the next section.</para>
-</section>
-
-<section id="fe_property_parameters">
- <title>Digital TV property parameters</title>
- <section id="DTV-UNDEFINED">
- <title><constant>DTV_UNDEFINED</constant></title>
- <para>Used internally. A GET/SET operation for it won't change or return anything.</para>
- </section>
- <section id="DTV-TUNE">
- <title><constant>DTV_TUNE</constant></title>
- <para>Interpret the cache of data, build either a traditional frontend tunerequest so we can pass validation in the <constant>FE_SET_FRONTEND</constant> ioctl.</para>
- </section>
- <section id="DTV-CLEAR">
- <title><constant>DTV_CLEAR</constant></title>
- <para>Reset a cache of data specific to the frontend here. This does not effect hardware.</para>
- </section>
- <section id="DTV-FREQUENCY">
- <title><constant>DTV_FREQUENCY</constant></title>
-
- <para>Central frequency of the channel.</para>
-
- <para>Notes:</para>
- <para>1)For satellite delivery systems, it is measured in kHz.
- For the other ones, it is measured in Hz.</para>
- <para>2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
- E.g. a valid frequency could be 474143 kHz. The stepping is bound to the bandwidth of
- the channel which is 6MHz.</para>
-
- <para>3)As in ISDB-Tsb the channel consists of only one or three segments the
- frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
- central frequency of the channel is expected.</para>
- </section>
- <section id="DTV-MODULATION">
- <title><constant>DTV_MODULATION</constant></title>
-<para>Specifies the frontend modulation type for delivery systems that supports
- more than one modulation type. The modulation can be one of the types
- defined by &fe-modulation;.</para>
-
-
-<section id="fe-modulation-t">
-<title>Modulation property</title>
-
-<para>Most of the digital TV standards currently offers more than one possible
- modulation (sometimes called as "constellation" on some standards). This
- enum contains the values used by the Kernel. Please note that not all
- modulations are supported by a given standard.</para>
-
-<table pgwide="1" frame="none" id="fe-modulation">
- <title>enum fe_modulation</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="QPSK"><constant>QPSK</constant></entry>
- <entry>QPSK modulation</entry>
- </row><row>
- <entry id="QAM-16"><constant>QAM_16</constant></entry>
- <entry>16-QAM modulation</entry>
- </row><row>
- <entry id="QAM-32"><constant>QAM_32</constant></entry>
- <entry>32-QAM modulation</entry>
- </row><row>
- <entry id="QAM-64"><constant>QAM_64</constant></entry>
- <entry>64-QAM modulation</entry>
- </row><row>
- <entry id="QAM-128"><constant>QAM_128</constant></entry>
- <entry>128-QAM modulation</entry>
- </row><row>
- <entry id="QAM-256"><constant>QAM_256</constant></entry>
- <entry>256-QAM modulation</entry>
- </row><row>
- <entry id="QAM-AUTO"><constant>QAM_AUTO</constant></entry>
- <entry>Autodetect QAM modulation</entry>
- </row><row>
- <entry id="VSB-8"><constant>VSB_8</constant></entry>
- <entry>8-VSB modulation</entry>
- </row><row>
- <entry id="VSB-16"><constant>VSB_16</constant></entry>
- <entry>16-VSB modulation</entry>
- </row><row>
- <entry id="PSK-8"><constant>PSK_8</constant></entry>
- <entry>8-PSK modulation</entry>
- </row><row>
- <entry id="APSK-16"><constant>APSK_16</constant></entry>
- <entry>16-APSK modulation</entry>
- </row><row>
- <entry id="APSK-32"><constant>APSK_32</constant></entry>
- <entry>32-APSK modulation</entry>
- </row><row>
- <entry id="DQPSK"><constant>DQPSK</constant></entry>
- <entry>DQPSK modulation</entry>
- </row><row>
- <entry id="QAM-4-NR"><constant>QAM_4_NR</constant></entry>
- <entry>4-QAM-NR modulation</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</section>
-
- </section>
- <section id="DTV-BANDWIDTH-HZ">
- <title><constant>DTV_BANDWIDTH_HZ</constant></title>
-
- <para>Bandwidth for the channel, in HZ.</para>
-
- <para>Possible values:
- <constant>1712000</constant>,
- <constant>5000000</constant>,
- <constant>6000000</constant>,
- <constant>7000000</constant>,
- <constant>8000000</constant>,
- <constant>10000000</constant>.
- </para>
-
- <para>Notes:</para>
-
- <para>1) For ISDB-T it should be always 6000000Hz (6MHz)</para>
- <para>2) For ISDB-Tsb it can vary depending on the number of connected segments</para>
- <para>3) Bandwidth doesn't apply for DVB-C transmissions, as the bandwidth
- for DVB-C depends on the symbol rate</para>
- <para>4) Bandwidth in ISDB-T is fixed (6MHz) or can be easily derived from
- other parameters (DTV_ISDBT_SB_SEGMENT_IDX,
- DTV_ISDBT_SB_SEGMENT_COUNT).</para>
- <para>5) DVB-T supports 6, 7 and 8MHz.</para>
- <para>6) In addition, DVB-T2 supports 1.172, 5 and 10MHz.</para>
- </section>
- <section id="DTV-INVERSION">
- <title><constant>DTV_INVERSION</constant></title>
-
- <para>Specifies if the frontend should do spectral inversion or not.</para>
-
-<section id="fe-spectral-inversion-t">
-<title>enum fe_modulation: Frontend spectral inversion</title>
-
-<para>This parameter indicates if spectral inversion should be presumed or not.
- In the automatic setting (<constant>INVERSION_AUTO</constant>) the hardware
- will try to figure out the correct setting by itself. If the hardware
- doesn't support, the DVB core will try to lock at the carrier first with
- inversion off. If it fails, it will try to enable inversion.
-</para>
-
-<table pgwide="1" frame="none" id="fe-spectral-inversion">
- <title>enum fe_modulation</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="INVERSION-OFF"><constant>INVERSION_OFF</constant></entry>
- <entry>Don't do spectral band inversion.</entry>
- </row><row>
- <entry id="INVERSION-ON"><constant>INVERSION_ON</constant></entry>
- <entry>Do spectral band inversion.</entry>
- </row><row>
- <entry id="INVERSION-AUTO"><constant>INVERSION_AUTO</constant></entry>
- <entry>Autodetect spectral band inversion.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</section>
-
- </section>
- <section id="DTV-DISEQC-MASTER">
- <title><constant>DTV_DISEQC_MASTER</constant></title>
- <para>Currently not implemented.</para>
- </section>
- <section id="DTV-SYMBOL-RATE">
- <title><constant>DTV_SYMBOL_RATE</constant></title>
- <para>Digital TV symbol rate, in bauds (symbols/second). Used on cable standards.</para>
- </section>
- <section id="DTV-INNER-FEC">
- <title><constant>DTV_INNER_FEC</constant></title>
- <para>Used cable/satellite transmissions. The acceptable values are:
- </para>
-<section id="fe-code-rate-t">
-<title>enum fe_code_rate: type of the Forward Error Correction.</title>
-
-<table pgwide="1" frame="none" id="fe-code-rate">
- <title>enum fe_code_rate</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="FEC-NONE"><constant>FEC_NONE</constant></entry>
- <entry>No Forward Error Correction Code</entry>
- </row><row>
- <entry id="FEC-AUTO"><constant>FEC_AUTO</constant></entry>
- <entry>Autodetect Error Correction Code</entry>
- </row><row>
- <entry id="FEC-1-2"><constant>FEC_1_2</constant></entry>
- <entry>Forward Error Correction Code 1/2</entry>
- </row><row>
- <entry id="FEC-2-3"><constant>FEC_2_3</constant></entry>
- <entry>Forward Error Correction Code 2/3</entry>
- </row><row>
- <entry id="FEC-3-4"><constant>FEC_3_4</constant></entry>
- <entry>Forward Error Correction Code 3/4</entry>
- </row><row>
- <entry id="FEC-4-5"><constant>FEC_4_5</constant></entry>
- <entry>Forward Error Correction Code 4/5</entry>
- </row><row>
- <entry id="FEC-5-6"><constant>FEC_5_6</constant></entry>
- <entry>Forward Error Correction Code 5/6</entry>
- </row><row>
- <entry id="FEC-6-7"><constant>FEC_6_7</constant></entry>
- <entry>Forward Error Correction Code 6/7</entry>
- </row><row>
- <entry id="FEC-7-8"><constant>FEC_7_8</constant></entry>
- <entry>Forward Error Correction Code 7/8</entry>
- </row><row>
- <entry id="FEC-8-9"><constant>FEC_8_9</constant></entry>
- <entry>Forward Error Correction Code 8/9</entry>
- </row><row>
- <entry id="FEC-9-10"><constant>FEC_9_10</constant></entry>
- <entry>Forward Error Correction Code 9/10</entry>
- </row><row>
- <entry id="FEC-2-5"><constant>FEC_2_5</constant></entry>
- <entry>Forward Error Correction Code 2/5</entry>
- </row><row>
- <entry id="FEC-3-5"><constant>FEC_3_5</constant></entry>
- <entry>Forward Error Correction Code 3/5</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</section>
- </section>
- <section id="DTV-VOLTAGE">
- <title><constant>DTV_VOLTAGE</constant></title>
- <para>The voltage is usually used with non-DiSEqC capable LNBs to switch
- the polarzation (horizontal/vertical). When using DiSEqC epuipment this
- voltage has to be switched consistently to the DiSEqC commands as
- described in the DiSEqC spec.</para>
-
-<table pgwide="1" frame="none" id="fe-sec-voltage">
- <title id="fe-sec-voltage-t">enum fe_sec_voltage</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="SEC-VOLTAGE-13"><constant>SEC_VOLTAGE_13</constant></entry>
- <entry align="char">Set DC voltage level to 13V</entry>
- </row><row>
- <entry align="char" id="SEC-VOLTAGE-18"><constant>SEC_VOLTAGE_18</constant></entry>
- <entry align="char">Set DC voltage level to 18V</entry>
- </row><row>
- <entry align="char" id="SEC-VOLTAGE-OFF"><constant>SEC_VOLTAGE_OFF</constant></entry>
- <entry align="char">Don't send any voltage to the antenna</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-TONE">
- <title><constant>DTV_TONE</constant></title>
- <para>Currently not used.</para>
- </section>
- <section id="DTV-PILOT">
- <title><constant>DTV_PILOT</constant></title>
- <para>Sets DVB-S2 pilot</para>
- <section id="fe-pilot-t">
- <title>fe_pilot type</title>
-<table pgwide="1" frame="none" id="fe-pilot">
- <title>enum fe_pilot</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="PILOT-ON"><constant>PILOT_ON</constant></entry>
- <entry align="char">Pilot tones enabled</entry>
- </row><row>
- <entry align="char" id="PILOT-OFF"><constant>PILOT_OFF</constant></entry>
- <entry align="char">Pilot tones disabled</entry>
- </row><row>
- <entry align="char" id="PILOT-AUTO"><constant>PILOT_AUTO</constant></entry>
- <entry align="char">Autodetect pilot tones</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- </section>
- <section id="DTV-ROLLOFF">
- <title><constant>DTV_ROLLOFF</constant></title>
- <para>Sets DVB-S2 rolloff</para>
-
- <section id="fe-rolloff-t">
- <title>fe_rolloff type</title>
-<table pgwide="1" frame="none" id="fe-rolloff">
- <title>enum fe_rolloff</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="ROLLOFF-35"><constant>ROLLOFF_35</constant></entry>
- <entry align="char">Roloff factor: &alpha;=35%</entry>
- </row><row>
- <entry align="char" id="ROLLOFF-20"><constant>ROLLOFF_20</constant></entry>
- <entry align="char">Roloff factor: &alpha;=20%</entry>
- </row><row>
- <entry align="char" id="ROLLOFF-25"><constant>ROLLOFF_25</constant></entry>
- <entry align="char">Roloff factor: &alpha;=25%</entry>
- </row><row>
- <entry align="char" id="ROLLOFF-AUTO"><constant>ROLLOFF_AUTO</constant></entry>
- <entry align="char">Auto-detect the roloff factor.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- </section>
- <section id="DTV-DISEQC-SLAVE-REPLY">
- <title><constant>DTV_DISEQC_SLAVE_REPLY</constant></title>
- <para>Currently not implemented.</para>
- </section>
- <section id="DTV-FE-CAPABILITY-COUNT">
- <title><constant>DTV_FE_CAPABILITY_COUNT</constant></title>
- <para>Currently not implemented.</para>
- </section>
- <section id="DTV-FE-CAPABILITY">
- <title><constant>DTV_FE_CAPABILITY</constant></title>
- <para>Currently not implemented.</para>
- </section>
- <section id="DTV-DELIVERY-SYSTEM">
- <title><constant>DTV_DELIVERY_SYSTEM</constant></title>
- <para>Specifies the type of Delivery system</para>
- <section id="fe-delivery-system-t">
- <title>fe_delivery_system type</title>
- <para>Possible values: </para>
-
-<table pgwide="1" frame="none" id="fe-delivery-system">
- <title>enum fe_delivery_system</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="SYS-UNDEFINED"><constant>SYS_UNDEFINED</constant></entry>
- <entry>Undefined standard. Generally, indicates an error</entry>
- </row><row>
- <entry id="SYS-DVBC-ANNEX-A"><constant>SYS_DVBC_ANNEX_A</constant></entry>
- <entry>Cable TV: DVB-C following ITU-T J.83 Annex A spec</entry>
- </row><row>
- <entry id="SYS-DVBC-ANNEX-B"><constant>SYS_DVBC_ANNEX_B</constant></entry>
- <entry>Cable TV: DVB-C following ITU-T J.83 Annex B spec (ClearQAM)</entry>
- </row><row>
- <entry id="SYS-DVBC-ANNEX-C"><constant>SYS_DVBC_ANNEX_C</constant></entry>
- <entry>Cable TV: DVB-C following ITU-T J.83 Annex C spec</entry>
- </row><row>
- <entry id="SYS-ISDBC"><constant>SYS_ISDBC</constant></entry>
- <entry>Cable TV: ISDB-C (no drivers yet)</entry>
- </row><row>
- <entry id="SYS-DVBT"><constant>SYS_DVBT</constant></entry>
- <entry>Terrestral TV: DVB-T</entry>
- </row><row>
- <entry id="SYS-DVBT2"><constant>SYS_DVBT2</constant></entry>
- <entry>Terrestral TV: DVB-T2</entry>
- </row><row>
- <entry id="SYS-ISDBT"><constant>SYS_ISDBT</constant></entry>
- <entry>Terrestral TV: ISDB-T</entry>
- </row><row>
- <entry id="SYS-ATSC"><constant>SYS_ATSC</constant></entry>
- <entry>Terrestral TV: ATSC</entry>
- </row><row>
- <entry id="SYS-ATSCMH"><constant>SYS_ATSCMH</constant></entry>
- <entry>Terrestral TV (mobile): ATSC-M/H</entry>
- </row><row>
- <entry id="SYS-DTMB"><constant>SYS_DTMB</constant></entry>
- <entry>Terrestrial TV: DTMB</entry>
- </row><row>
- <entry id="SYS-DVBS"><constant>SYS_DVBS</constant></entry>
- <entry>Satellite TV: DVB-S</entry>
- </row><row>
- <entry id="SYS-DVBS2"><constant>SYS_DVBS2</constant></entry>
- <entry>Satellite TV: DVB-S2</entry>
- </row><row>
- <entry id="SYS-TURBO"><constant>SYS_TURBO</constant></entry>
- <entry>Satellite TV: DVB-S Turbo</entry>
- </row><row>
- <entry id="SYS-ISDBS"><constant>SYS_ISDBS</constant></entry>
- <entry>Satellite TV: ISDB-S</entry>
- </row><row>
- <entry id="SYS-DAB"><constant>SYS_DAB</constant></entry>
- <entry>Digital audio: DAB (not fully supported)</entry>
- </row><row>
- <entry id="SYS-DSS"><constant>SYS_DSS</constant></entry>
- <entry>Satellite TV:"DSS (not fully supported)</entry>
- </row><row>
- <entry id="SYS-CMMB"><constant>SYS_CMMB</constant></entry>
- <entry>Terrestral TV (mobile):CMMB (not fully supported)</entry>
- </row><row>
- <entry id="SYS-DVBH"><constant>SYS_DVBH</constant></entry>
- <entry>Terrestral TV (mobile): DVB-H (standard deprecated)</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-
-</section>
- </section>
- <section id="DTV-ISDBT-PARTIAL-RECEPTION">
- <title><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></title>
-
- <para>If <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> is '0' this bit-field represents whether
- the channel is in partial reception mode or not.</para>
-
- <para>If '1' <constant>DTV_ISDBT_LAYERA_*</constant> values are assigned to the center segment and
- <constant>DTV_ISDBT_LAYERA_SEGMENT_COUNT</constant> has to be '1'.</para>
-
- <para>If in addition <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> is '1'
- <constant>DTV_ISDBT_PARTIAL_RECEPTION</constant> represents whether this ISDB-Tsb channel
- is consisting of one segment and layer or three segments and two layers.</para>
-
- <para>Possible values: 0, 1, -1 (AUTO)</para>
- </section>
- <section id="DTV-ISDBT-SOUND-BROADCASTING">
- <title><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></title>
-
- <para>This field represents whether the other DTV_ISDBT_*-parameters are
- referring to an ISDB-T and an ISDB-Tsb channel. (See also
- <constant>DTV_ISDBT_PARTIAL_RECEPTION</constant>).</para>
-
- <para>Possible values: 0, 1, -1 (AUTO)</para>
- </section>
- <section id="DTV-ISDBT-SB-SUBCHANNEL-ID">
- <title><constant>DTV_ISDBT_SB_SUBCHANNEL_ID</constant></title>
-
- <para>This field only applies if <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> is '1'.</para>
-
- <para>(Note of the author: This might not be the correct description of the
- <constant>SUBCHANNEL-ID</constant> in all details, but it is my understanding of the technical
- background needed to program a device)</para>
-
- <para>An ISDB-Tsb channel (1 or 3 segments) can be broadcasted alone or in a
- set of connected ISDB-Tsb channels. In this set of channels every
- channel can be received independently. The number of connected
- ISDB-Tsb segment can vary, e.g. depending on the frequency spectrum
- bandwidth available.</para>
-
- <para>Example: Assume 8 ISDB-Tsb connected segments are broadcasted. The
- broadcaster has several possibilities to put those channels in the
- air: Assuming a normal 13-segment ISDB-T spectrum he can align the 8
- segments from position 1-8 to 5-13 or anything in between.</para>
-
- <para>The underlying layer of segments are subchannels: each segment is
- consisting of several subchannels with a predefined IDs. A sub-channel
- is used to help the demodulator to synchronize on the channel.</para>
-
- <para>An ISDB-T channel is always centered over all sub-channels. As for
- the example above, in ISDB-Tsb it is no longer as simple as that.</para>
-
- <para><constant>The DTV_ISDBT_SB_SUBCHANNEL_ID</constant> parameter is used to give the
- sub-channel ID of the segment to be demodulated.</para>
-
- <para>Possible values: 0 .. 41, -1 (AUTO)</para>
- </section>
- <section id="DTV-ISDBT-SB-SEGMENT-IDX">
- <title><constant>DTV_ISDBT_SB_SEGMENT_IDX</constant></title>
- <para>This field only applies if <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> is '1'.</para>
- <para><constant>DTV_ISDBT_SB_SEGMENT_IDX</constant> gives the index of the segment to be
- demodulated for an ISDB-Tsb channel where several of them are
- transmitted in the connected manner.</para>
- <para>Possible values: 0 .. <constant>DTV_ISDBT_SB_SEGMENT_COUNT</constant> - 1</para>
- <para>Note: This value cannot be determined by an automatic channel search.</para>
- </section>
- <section id="DTV-ISDBT-SB-SEGMENT-COUNT">
- <title><constant>DTV_ISDBT_SB_SEGMENT_COUNT</constant></title>
- <para>This field only applies if <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> is '1'.</para>
- <para><constant>DTV_ISDBT_SB_SEGMENT_COUNT</constant> gives the total count of connected ISDB-Tsb
- channels.</para>
- <para>Possible values: 1 .. 13</para>
- <para>Note: This value cannot be determined by an automatic channel search.</para>
- </section>
- <section id="isdb-hierq-layers">
- <title><constant>DTV-ISDBT-LAYER*</constant> parameters</title>
- <para>ISDB-T channels can be coded hierarchically. As opposed to DVB-T in
- ISDB-T hierarchical layers can be decoded simultaneously. For that
- reason a ISDB-T demodulator has 3 Viterbi and 3 Reed-Solomon decoders.</para>
- <para>ISDB-T has 3 hierarchical layers which each can use a part of the
- available segments. The total number of segments over all layers has
- to 13 in ISDB-T.</para>
- <para>There are 3 parameter sets, for Layers A, B and C.</para>
- <section id="DTV-ISDBT-LAYER-ENABLED">
- <title><constant>DTV_ISDBT_LAYER_ENABLED</constant></title>
- <para>Hierarchical reception in ISDB-T is achieved by enabling or disabling
- layers in the decoding process. Setting all bits of
- <constant>DTV_ISDBT_LAYER_ENABLED</constant> to '1' forces all layers (if applicable) to be
- demodulated. This is the default.</para>
- <para>If the channel is in the partial reception mode
- (<constant>DTV_ISDBT_PARTIAL_RECEPTION</constant> = 1) the central segment can be decoded
- independently of the other 12 segments. In that mode layer A has to
- have a <constant>SEGMENT_COUNT</constant> of 1.</para>
- <para>In ISDB-Tsb only layer A is used, it can be 1 or 3 in ISDB-Tsb
- according to <constant>DTV_ISDBT_PARTIAL_RECEPTION</constant>. <constant>SEGMENT_COUNT</constant> must be filled
- accordingly.</para>
- <para>Possible values: 0x1, 0x2, 0x4 (|-able)</para>
- <para><constant>DTV_ISDBT_LAYER_ENABLED[0:0]</constant> - layer A</para>
- <para><constant>DTV_ISDBT_LAYER_ENABLED[1:1]</constant> - layer B</para>
- <para><constant>DTV_ISDBT_LAYER_ENABLED[2:2]</constant> - layer C</para>
- <para><constant>DTV_ISDBT_LAYER_ENABLED[31:3]</constant> unused</para>
- </section>
- <section id="DTV-ISDBT-LAYER-FEC">
- <title><constant>DTV_ISDBT_LAYER*_FEC</constant></title>
- <para>Possible values: <constant>FEC_AUTO</constant>, <constant>FEC_1_2</constant>, <constant>FEC_2_3</constant>, <constant>FEC_3_4</constant>, <constant>FEC_5_6</constant>, <constant>FEC_7_8</constant></para>
- </section>
- <section id="DTV-ISDBT-LAYER-MODULATION">
- <title><constant>DTV_ISDBT_LAYER*_MODULATION</constant></title>
- <para>Possible values: <constant>QAM_AUTO</constant>, QP<constant>SK, QAM_16</constant>, <constant>QAM_64</constant>, <constant>DQPSK</constant></para>
- <para>Note: If layer C is <constant>DQPSK</constant> layer B has to be <constant>DQPSK</constant>. If layer B is <constant>DQPSK</constant>
- and <constant>DTV_ISDBT_PARTIAL_RECEPTION</constant>=0 layer has to be <constant>DQPSK</constant>.</para>
- </section>
- <section id="DTV-ISDBT-LAYER-SEGMENT-COUNT">
- <title><constant>DTV_ISDBT_LAYER*_SEGMENT_COUNT</constant></title>
- <para>Possible values: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 (AUTO)</para>
- <para>Note: Truth table for <constant>DTV_ISDBT_SOUND_BROADCASTING</constant> and
- <constant>DTV_ISDBT_PARTIAL_RECEPTION</constant> and <constant>LAYER</constant>*_SEGMENT_COUNT</para>
- <informaltable id="isdbt-layer_seg-cnt-table">
- <tgroup cols="6">
- <tbody>
- <row>
- <entry>PR</entry>
- <entry>SB</entry>
- <entry>Layer A width</entry>
- <entry>Layer B width</entry>
- <entry>Layer C width</entry>
- <entry>total width</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>0</entry>
- <entry>1 .. 13</entry>
- <entry>1 .. 13</entry>
- <entry>1 .. 13</entry>
- <entry>13</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>0</entry>
- <entry>1</entry>
- <entry>1 .. 13</entry>
- <entry>1 .. 13</entry>
- <entry>13</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>1</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>1</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>1</entry>
- <entry>1</entry>
- <entry>2</entry>
- <entry>0</entry>
- <entry>13</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </section>
- <section id="DTV-ISDBT-LAYER-TIME-INTERLEAVING">
- <title><constant>DTV_ISDBT_LAYER*_TIME_INTERLEAVING</constant></title>
- <para>Valid values: 0, 1, 2, 4, -1 (AUTO)</para>
- <para>when DTV_ISDBT_SOUND_BROADCASTING is active, value 8 is also valid.</para>
- <para>Note: The real time interleaving length depends on the mode (fft-size). The values
- here are referring to what can be found in the TMCC-structure, as shown in the table below.</para>
- <informaltable id="isdbt-layer-interleaving-table">
- <tgroup cols="4" align="center">
- <tbody>
- <row>
- <entry>DTV_ISDBT_LAYER*_TIME_INTERLEAVING</entry>
- <entry>Mode 1 (2K FFT)</entry>
- <entry>Mode 2 (4K FFT)</entry>
- <entry>Mode 3 (8K FFT)</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>4</entry>
- <entry>2</entry>
- <entry>1</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>8</entry>
- <entry>4</entry>
- <entry>2</entry>
- </row>
- <row>
- <entry>4</entry>
- <entry>16</entry>
- <entry>8</entry>
- <entry>4</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </section>
- <section id="DTV-ATSCMH-FIC-VER">
- <title><constant>DTV_ATSCMH_FIC_VER</constant></title>
- <para>Version number of the FIC (Fast Information Channel) signaling data.</para>
- <para>FIC is used for relaying information to allow rapid service acquisition by the receiver.</para>
- <para>Possible values: 0, 1, 2, 3, ..., 30, 31</para>
- </section>
- <section id="DTV-ATSCMH-PARADE-ID">
- <title><constant>DTV_ATSCMH_PARADE_ID</constant></title>
- <para>Parade identification number</para>
- <para>A parade is a collection of up to eight MH groups, conveying one or two ensembles.</para>
- <para>Possible values: 0, 1, 2, 3, ..., 126, 127</para>
- </section>
- <section id="DTV-ATSCMH-NOG">
- <title><constant>DTV_ATSCMH_NOG</constant></title>
- <para>Number of MH groups per MH subframe for a designated parade.</para>
- <para>Possible values: 1, 2, 3, 4, 5, 6, 7, 8</para>
- </section>
- <section id="DTV-ATSCMH-TNOG">
- <title><constant>DTV_ATSCMH_TNOG</constant></title>
- <para>Total number of MH groups including all MH groups belonging to all MH parades in one MH subframe.</para>
- <para>Possible values: 0, 1, 2, 3, ..., 30, 31</para>
- </section>
- <section id="DTV-ATSCMH-SGN">
- <title><constant>DTV_ATSCMH_SGN</constant></title>
- <para>Start group number.</para>
- <para>Possible values: 0, 1, 2, 3, ..., 14, 15</para>
- </section>
- <section id="DTV-ATSCMH-PRC">
- <title><constant>DTV_ATSCMH_PRC</constant></title>
- <para>Parade repetition cycle.</para>
- <para>Possible values: 1, 2, 3, 4, 5, 6, 7, 8</para>
- </section>
- <section id="DTV-ATSCMH-RS-FRAME-MODE">
- <title><constant>DTV_ATSCMH_RS_FRAME_MODE</constant></title>
- <para>Reed Solomon (RS) frame mode.</para>
- <para>Possible values are:</para>
-<table pgwide="1" frame="none" id="atscmh-rs-frame-mode">
- <title>enum atscmh_rs_frame_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="ATSCMH-RSFRAME-PRI-ONLY"><constant>ATSCMH_RSFRAME_PRI_ONLY</constant></entry>
- <entry>Single Frame: There is only a primary RS Frame for all
- Group Regions.</entry>
- </row><row>
- <entry id="ATSCMH-RSFRAME-PRI-SEC"><constant>ATSCMH_RSFRAME_PRI_SEC</constant></entry>
- <entry>Dual Frame: There are two separate RS Frames: Primary RS
- Frame for Group Region A and B and Secondary RS Frame for Group
- Region C and D.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-ATSCMH-RS-FRAME-ENSEMBLE">
- <title><constant>DTV_ATSCMH_RS_FRAME_ENSEMBLE</constant></title>
- <para>Reed Solomon(RS) frame ensemble.</para>
- <para>Possible values are:</para>
-<table pgwide="1" frame="none" id="atscmh-rs-frame-ensemble">
- <title>enum atscmh_rs_frame_ensemble</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="ATSCMH-RSFRAME-ENS-PRI"><constant>ATSCMH_RSFRAME_ENS_PRI</constant></entry>
- <entry>Primary Ensemble.</entry>
- </row><row>
- <entry id="ATSCMH-RSFRAME-ENS-SEC"><constant>AATSCMH_RSFRAME_PRI_SEC</constant></entry>
- <entry>Secondary Ensemble.</entry>
- </row><row>
- <entry id="ATSCMH-RSFRAME-RES"><constant>AATSCMH_RSFRAME_RES</constant></entry>
- <entry>Reserved. Shouldn't be used.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-ATSCMH-RS-CODE-MODE-PRI">
- <title><constant>DTV_ATSCMH_RS_CODE_MODE_PRI</constant></title>
- <para>Reed Solomon (RS) code mode (primary).</para>
- <para>Possible values are:</para>
-<table pgwide="1" frame="none" id="atscmh-rs-code-mode">
- <title>enum atscmh_rs_code_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="ATSCMH-RSCODE-211-187"><constant>ATSCMH_RSCODE_211_187</constant></entry>
- <entry>Reed Solomon code (211,187).</entry>
- </row><row>
- <entry id="ATSCMH-RSCODE-223-187"><constant>ATSCMH_RSCODE_223_187</constant></entry>
- <entry>Reed Solomon code (223,187).</entry>
- </row><row>
- <entry id="ATSCMH-RSCODE-235-187"><constant>ATSCMH_RSCODE_235_187</constant></entry>
- <entry>Reed Solomon code (235,187).</entry>
- </row><row>
- <entry id="ATSCMH-RSCODE-RES"><constant>ATSCMH_RSCODE_RES</constant></entry>
- <entry>Reserved. Shouldn't be used.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-ATSCMH-RS-CODE-MODE-SEC">
- <title><constant>DTV_ATSCMH_RS_CODE_MODE_SEC</constant></title>
- <para>Reed Solomon (RS) code mode (secondary).</para>
- <para>Possible values are the same as documented on
- &atscmh-rs-code-mode;:</para>
- </section>
- <section id="DTV-ATSCMH-SCCC-BLOCK-MODE">
- <title><constant>DTV_ATSCMH_SCCC_BLOCK_MODE</constant></title>
- <para>Series Concatenated Convolutional Code Block Mode.</para>
- <para>Possible values are:</para>
-<table pgwide="1" frame="none" id="atscmh-sccc-block-mode">
- <title>enum atscmh_scc_block_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="ATSCMH-SCCC-BLK-SEP"><constant>ATSCMH_SCCC_BLK_SEP</constant></entry>
- <entry>Separate SCCC: the SCCC outer code mode shall be set independently
- for each Group Region (A, B, C, D)</entry>
- </row><row>
- <entry id="ATSCMH-SCCC-BLK-COMB"><constant>ATSCMH_SCCC_BLK_COMB</constant></entry>
- <entry>Combined SCCC: all four Regions shall have the same SCCC outer
- code mode.</entry>
- </row><row>
- <entry id="ATSCMH-SCCC-BLK-RES"><constant>ATSCMH_SCCC_BLK_RES</constant></entry>
- <entry>Reserved. Shouldn't be used.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-ATSCMH-SCCC-CODE-MODE-A">
- <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_A</constant></title>
- <para>Series Concatenated Convolutional Code Rate.</para>
- <para>Possible values are:</para>
-<table pgwide="1" frame="none" id="atscmh-sccc-code-mode">
- <title>enum atscmh_sccc_code_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="ATSCMH-SCCC-CODE-HLF"><constant>ATSCMH_SCCC_CODE_HLF</constant></entry>
- <entry>The outer code rate of a SCCC Block is 1/2 rate.</entry>
- </row><row>
- <entry id="ATSCMH-SCCC-CODE-QTR"><constant>ATSCMH_SCCC_CODE_QTR</constant></entry>
- <entry>The outer code rate of a SCCC Block is 1/4 rate.</entry>
- </row><row>
- <entry id="ATSCMH-SCCC-CODE-RES"><constant>ATSCMH_SCCC_CODE_RES</constant></entry>
- <entry>to be documented.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
- </section>
- <section id="DTV-ATSCMH-SCCC-CODE-MODE-B">
- <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_B</constant></title>
- <para>Series Concatenated Convolutional Code Rate.</para>
- <para>Possible values are the same as documented on
- &atscmh-sccc-code-mode;.</para>
- </section>
- <section id="DTV-ATSCMH-SCCC-CODE-MODE-C">
- <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_C</constant></title>
- <para>Series Concatenated Convolutional Code Rate.</para>
- <para>Possible values are the same as documented on
- &atscmh-sccc-code-mode;.</para>
- </section>
- <section id="DTV-ATSCMH-SCCC-CODE-MODE-D">
- <title><constant>DTV_ATSCMH_SCCC_CODE_MODE_D</constant></title>
- <para>Series Concatenated Convolutional Code Rate.</para>
- <para>Possible values are the same as documented on
- &atscmh-sccc-code-mode;.</para>
- </section>
- </section>
- <section id="DTV-API-VERSION">
- <title><constant>DTV_API_VERSION</constant></title>
- <para>Returns the major/minor version of the DVB API</para>
- </section>
- <section id="DTV-CODE-RATE-HP">
- <title><constant>DTV_CODE_RATE_HP</constant></title>
- <para>Used on terrestrial transmissions. The acceptable values are
- the ones described at &fe-transmit-mode-t;.
- </para>
- </section>
- <section id="DTV-CODE-RATE-LP">
- <title><constant>DTV_CODE_RATE_LP</constant></title>
- <para>Used on terrestrial transmissions. The acceptable values are
- the ones described at &fe-transmit-mode-t;.
- </para>
-
- </section>
-
- <section id="DTV-GUARD-INTERVAL">
- <title><constant>DTV_GUARD_INTERVAL</constant></title>
-
- <para>Possible values are:</para>
-
-<section id="fe-guard-interval-t">
-<title>Modulation guard interval</title>
-
-<table pgwide="1" frame="none" id="fe-guard-interval">
- <title>enum fe_guard_interval</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="GUARD-INTERVAL-AUTO"><constant>GUARD_INTERVAL_AUTO</constant></entry>
- <entry>Autodetect the guard interval</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-1-128"><constant>GUARD_INTERVAL_1_128</constant></entry>
- <entry>Guard interval 1/128</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-1-32"><constant>GUARD_INTERVAL_1_32</constant></entry>
- <entry>Guard interval 1/32</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-1-16"><constant>GUARD_INTERVAL_1_16</constant></entry>
- <entry>Guard interval 1/16</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-1-8"><constant>GUARD_INTERVAL_1_8</constant></entry>
- <entry>Guard interval 1/8</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-1-4"><constant>GUARD_INTERVAL_1_4</constant></entry>
- <entry>Guard interval 1/4</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-19-128"><constant>GUARD_INTERVAL_19_128</constant></entry>
- <entry>Guard interval 19/128</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-19-256"><constant>GUARD_INTERVAL_19_256</constant></entry>
- <entry>Guard interval 19/256</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-PN420"><constant>GUARD_INTERVAL_PN420</constant></entry>
- <entry>PN length 420 (1/4)</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-PN595"><constant>GUARD_INTERVAL_PN595</constant></entry>
- <entry>PN length 595 (1/6)</entry>
- </row><row>
- <entry id="GUARD-INTERVAL-PN945"><constant>GUARD_INTERVAL_PN945</constant></entry>
- <entry>PN length 945 (1/9)</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
- <para>Notes:</para>
- <para>1) If <constant>DTV_GUARD_INTERVAL</constant> is set the <constant>GUARD_INTERVAL_AUTO</constant> the hardware will
- try to find the correct guard interval (if capable) and will use TMCC to fill
- in the missing parameters.</para>
- <para>2) Intervals 1/128, 19/128 and 19/256 are used only for DVB-T2 at present</para>
- <para>3) DTMB specifies PN420, PN595 and PN945.</para>
-</section>
- </section>
- <section id="DTV-TRANSMISSION-MODE">
- <title><constant>DTV_TRANSMISSION_MODE</constant></title>
-
- <para>Specifies the number of carriers used by the standard.
- This is used only on OFTM-based standards, e. g.
- DVB-T/T2, ISDB-T, DTMB</para>
-
-<section id="fe-transmit-mode-t">
-<title>enum fe_transmit_mode: Number of carriers per channel</title>
-
-<table pgwide="1" frame="none" id="fe-transmit-mode">
- <title>enum fe_transmit_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="TRANSMISSION-MODE-AUTO"><constant>TRANSMISSION_MODE_AUTO</constant></entry>
- <entry>Autodetect transmission mode. The hardware will try to find
- the correct FFT-size (if capable) to fill in the missing
- parameters.</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-1K"><constant>TRANSMISSION_MODE_1K</constant></entry>
- <entry>Transmission mode 1K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-2K"><constant>TRANSMISSION_MODE_2K</constant></entry>
- <entry>Transmission mode 2K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-8K"><constant>TRANSMISSION_MODE_8K</constant></entry>
- <entry>Transmission mode 8K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-4K"><constant>TRANSMISSION_MODE_4K</constant></entry>
- <entry>Transmission mode 4K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-16K"><constant>TRANSMISSION_MODE_16K</constant></entry>
- <entry>Transmission mode 16K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-32K"><constant>TRANSMISSION_MODE_32K</constant></entry>
- <entry>Transmission mode 32K</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-C1"><constant>TRANSMISSION_MODE_C1</constant></entry>
- <entry>Single Carrier (C=1) transmission mode (DTMB)</entry>
- </row><row>
- <entry id="TRANSMISSION-MODE-C3780"><constant>TRANSMISSION_MODE_C3780</constant></entry>
- <entry>Multi Carrier (C=3780) transmission mode (DTMB)</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-
- <para>Notes:</para>
- <para>1) ISDB-T supports three carrier/symbol-size: 8K, 4K, 2K. It is called
- 'mode' in the standard: Mode 1 is 2K, mode 2 is 4K, mode 3 is 8K</para>
-
- <para>2) If <constant>DTV_TRANSMISSION_MODE</constant> is set the <constant>TRANSMISSION_MODE_AUTO</constant> the
- hardware will try to find the correct FFT-size (if capable) and will
- use TMCC to fill in the missing parameters.</para>
- <para>3) DVB-T specifies 2K and 8K as valid sizes.</para>
- <para>4) DVB-T2 specifies 1K, 2K, 4K, 8K, 16K and 32K.</para>
- <para>5) DTMB specifies C1 and C3780.</para>
-</section>
- </section>
- <section id="DTV-HIERARCHY">
- <title><constant>DTV_HIERARCHY</constant></title>
- <para>Frontend hierarchy</para>
-
-
-<section id="fe-hierarchy-t">
-<title>Frontend hierarchy</title>
-
-<table pgwide="1" frame="none" id="fe-hierarchy">
- <title>enum fe_hierarchy</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="HIERARCHY-NONE"><constant>HIERARCHY_NONE</constant></entry>
- <entry>No hierarchy</entry>
- </row><row>
- <entry id="HIERARCHY-AUTO"><constant>HIERARCHY_AUTO</constant></entry>
- <entry>Autodetect hierarchy (if supported)</entry>
- </row><row>
- <entry id="HIERARCHY-1"><constant>HIERARCHY_1</constant></entry>
- <entry>Hierarchy 1</entry>
- </row><row>
- <entry id="HIERARCHY-2"><constant>HIERARCHY_2</constant></entry>
- <entry>Hierarchy 2</entry>
- </row><row>
- <entry id="HIERARCHY-4"><constant>HIERARCHY_4</constant></entry>
- <entry>Hierarchy 4</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</section>
-
- </section>
- <section id="DTV-STREAM-ID">
- <title><constant>DTV_STREAM_ID</constant></title>
- <para>DVB-S2, DVB-T2 and ISDB-S support the transmission of several
- streams on a single transport stream.
- This property enables the DVB driver to handle substream filtering,
- when supported by the hardware.
- By default, substream filtering is disabled.
- </para><para>
- For DVB-S2 and DVB-T2, the valid substream id range is from 0 to 255.
- </para><para>
- For ISDB, the valid substream id range is from 1 to 65535.
- </para><para>
- To disable it, you should use the special macro NO_STREAM_ID_FILTER.
- </para><para>
- Note: any value outside the id range also disables filtering.
- </para>
- </section>
- <section id="DTV-DVBT2-PLP-ID-LEGACY">
- <title><constant>DTV_DVBT2_PLP_ID_LEGACY</constant></title>
- <para>Obsolete, replaced with DTV_STREAM_ID.</para>
- </section>
- <section id="DTV-ENUM-DELSYS">
- <title><constant>DTV_ENUM_DELSYS</constant></title>
- <para>A Multi standard frontend needs to advertise the delivery systems provided.
- Applications need to enumerate the provided delivery systems, before using
- any other operation with the frontend. Prior to it's introduction,
- FE_GET_INFO was used to determine a frontend type. A frontend which
- provides more than a single delivery system, FE_GET_INFO doesn't help much.
- Applications which intends to use a multistandard frontend must enumerate
- the delivery systems associated with it, rather than trying to use
- FE_GET_INFO. In the case of a legacy frontend, the result is just the same
- as with FE_GET_INFO, but in a more structured format </para>
- </section>
- <section id="DTV-INTERLEAVING">
- <title><constant>DTV_INTERLEAVING</constant></title>
-
-<para>Time interleaving to be used. Currently, used only on DTMB.</para>
-
-<table pgwide="1" frame="none" id="fe-interleaving">
- <title>enum fe_interleaving</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="INTERLEAVING-NONE"><constant>INTERLEAVING_NONE</constant></entry>
- <entry>No interleaving.</entry>
- </row><row>
- <entry id="INTERLEAVING-AUTO"><constant>INTERLEAVING_AUTO</constant></entry>
- <entry>Auto-detect interleaving.</entry>
- </row><row>
- <entry id="INTERLEAVING-240"><constant>INTERLEAVING_240</constant></entry>
- <entry>Interleaving of 240 symbols.</entry>
- </row><row>
- <entry id="INTERLEAVING-720"><constant>INTERLEAVING_720</constant></entry>
- <entry>Interleaving of 720 symbols.</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
- </section>
- <section id="DTV-LNA">
- <title><constant>DTV_LNA</constant></title>
- <para>Low-noise amplifier.</para>
- <para>Hardware might offer controllable LNA which can be set manually
- using that parameter. Usually LNA could be found only from
- terrestrial devices if at all.</para>
- <para>Possible values: 0, 1, LNA_AUTO</para>
- <para>0, LNA off</para>
- <para>1, LNA on</para>
- <para>use the special macro LNA_AUTO to set LNA auto</para>
- </section>
-</section>
-
- <section id="frontend-stat-properties">
- <title>Frontend statistics indicators</title>
- <para>The values are returned via <constant>dtv_property.stat</constant>.
- If the property is supported, <constant>dtv_property.stat.len</constant> is bigger than zero.</para>
- <para>For most delivery systems, <constant>dtv_property.stat.len</constant>
- will be 1 if the stats is supported, and the properties will
- return a single value for each parameter.</para>
- <para>It should be noted, however, that new OFDM delivery systems
- like ISDB can use different modulation types for each group of
- carriers. On such standards, up to 3 groups of statistics can be
- provided, and <constant>dtv_property.stat.len</constant> is updated
- to reflect the "global" metrics, plus one metric per each carrier
- group (called "layer" on ISDB).</para>
- <para>So, in order to be consistent with other delivery systems, the first
- value at <link linkend="dtv-stats"><constant>dtv_property.stat.dtv_stats</constant></link>
- array refers to the global metric. The other elements of the array
- represent each layer, starting from layer A(index 1),
- layer B (index 2) and so on.</para>
- <para>The number of filled elements are stored at <constant>dtv_property.stat.len</constant>.</para>
- <para>Each element of the <constant>dtv_property.stat.dtv_stats</constant> array consists on two elements:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><constant>svalue</constant> or <constant>uvalue</constant>, where
- <constant>svalue</constant> is for signed values of the measure (dB measures)
- and <constant>uvalue</constant> is for unsigned values (counters, relative scale)</para></listitem>
- <listitem><para><constant>scale</constant> - Scale for the value. It can be:</para>
- <itemizedlist mark='bullet' id="fecap-scale-params">
- <listitem id="FE-SCALE-NOT-AVAILABLE"><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - The parameter is supported by the frontend, but it was not possible to collect it (could be a transitory or permanent condition)</para></listitem>
- <listitem id="FE-SCALE-DECIBEL"><para><constant>FE_SCALE_DECIBEL</constant> - parameter is a signed value, measured in 1/1000 dB</para></listitem>
- <listitem id="FE-SCALE-RELATIVE"><para><constant>FE_SCALE_RELATIVE</constant> - parameter is a unsigned value, where 0 means 0% and 65535 means 100%.</para></listitem>
- <listitem id="FE-SCALE-COUNTER"><para><constant>FE_SCALE_COUNTER</constant> - parameter is a unsigned value that counts the occurrence of an event, like bit error, block error, or lapsed time.</para></listitem>
- </itemizedlist>
- </listitem>
- </itemizedlist>
- <section id="DTV-STAT-SIGNAL-STRENGTH">
- <title><constant>DTV_STAT_SIGNAL_STRENGTH</constant></title>
- <para>Indicates the signal strength level at the analog part of the tuner or of the demod.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_DECIBEL</constant> - signal strength is in 0.001 dBm units, power measured in miliwatts. This value is generally negative.</para></listitem>
- <listitem><para><constant>FE_SCALE_RELATIVE</constant> - The frontend provides a 0% to 100% measurement for power (actually, 0 to 65535).</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-CNR">
- <title><constant>DTV_STAT_CNR</constant></title>
- <para>Indicates the Signal to Noise ratio for the main carrier.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_DECIBEL</constant> - Signal/Noise ratio is in 0.001 dB units.</para></listitem>
- <listitem><para><constant>FE_SCALE_RELATIVE</constant> - The frontend provides a 0% to 100% measurement for Signal/Noise (actually, 0 to 65535).</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-PRE-ERROR-BIT-COUNT">
- <title><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></title>
- <para>Measures the number of bit errors before the forward error correction (FEC) on the inner coding block (before Viterbi, LDPC or other inner code).</para>
- <para>This measure is taken during the same interval as <constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant>.</para>
- <para>In order to get the BER (Bit Error Rate) measurement, it should be divided by
- <link linkend="DTV-STAT-PRE-TOTAL-BIT-COUNT"><constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant></link>.</para>
- <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
- The frontend may reset it when a channel/transponder is tuned.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of error bits counted before the inner coding.</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-PRE-TOTAL-BIT-COUNT">
- <title><constant>DTV_STAT_PRE_TOTAL_BIT_COUNT</constant></title>
- <para>Measures the amount of bits received before the inner code block, during the same period as
- <link linkend="DTV-STAT-PRE-ERROR-BIT-COUNT"><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></link> measurement was taken.</para>
- <para>It should be noted that this measurement can be smaller than the total amount of bits on the transport stream,
- as the frontend may need to manually restart the measurement, losing some data between each measurement interval.</para>
- <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
- The frontend may reset it when a channel/transponder is tuned.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of bits counted while measuring
- <link linkend="DTV-STAT-PRE-ERROR-BIT-COUNT"><constant>DTV_STAT_PRE_ERROR_BIT_COUNT</constant></link>.</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-POST-ERROR-BIT-COUNT">
- <title><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></title>
- <para>Measures the number of bit errors after the forward error correction (FEC) done by inner code block (after Viterbi, LDPC or other inner code).</para>
- <para>This measure is taken during the same interval as <constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant>.</para>
- <para>In order to get the BER (Bit Error Rate) measurement, it should be divided by
- <link linkend="DTV-STAT-POST-TOTAL-BIT-COUNT"><constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant></link>.</para>
- <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
- The frontend may reset it when a channel/transponder is tuned.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of error bits counted after the inner coding.</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-POST-TOTAL-BIT-COUNT">
- <title><constant>DTV_STAT_POST_TOTAL_BIT_COUNT</constant></title>
- <para>Measures the amount of bits received after the inner coding, during the same period as
- <link linkend="DTV-STAT-POST-ERROR-BIT-COUNT"><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></link> measurement was taken.</para>
- <para>It should be noted that this measurement can be smaller than the total amount of bits on the transport stream,
- as the frontend may need to manually restart the measurement, losing some data between each measurement interval.</para>
- <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
- The frontend may reset it when a channel/transponder is tuned.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of bits counted while measuring
- <link linkend="DTV-STAT-POST-ERROR-BIT-COUNT"><constant>DTV_STAT_POST_ERROR_BIT_COUNT</constant></link>.</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-ERROR-BLOCK-COUNT">
- <title><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></title>
- <para>Measures the number of block errors after the outer forward error correction coding (after Reed-Solomon or other outer code).</para>
- <para>This measurement is monotonically increased, as the frontend gets more bit count measurements.
- The frontend may reset it when a channel/transponder is tuned.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of error blocks counted after the outer coding.</para></listitem>
- </itemizedlist>
- </section>
- <section id="DTV-STAT-TOTAL-BLOCK-COUNT">
- <title><constant>DTV-STAT_TOTAL_BLOCK_COUNT</constant></title>
- <para>Measures the total number of blocks received during the same period as
- <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link> measurement was taken.</para>
- <para>It can be used to calculate the PER indicator, by dividing
- <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link>
- by <link linkend="DTV-STAT-TOTAL-BLOCK-COUNT"><constant>DTV-STAT-TOTAL-BLOCK-COUNT</constant></link>.</para>
- <para>Possible scales for this metric are:</para>
- <itemizedlist mark='bullet'>
- <listitem><para><constant>FE_SCALE_NOT_AVAILABLE</constant> - it failed to measure it, or the measurement was not complete yet.</para></listitem>
- <listitem><para><constant>FE_SCALE_COUNTER</constant> - Number of blocks counted while measuring
- <link linkend="DTV-STAT-ERROR-BLOCK-COUNT"><constant>DTV_STAT_ERROR_BLOCK_COUNT</constant></link>.</para></listitem>
- </itemizedlist>
- </section>
- </section>
-
- <section id="frontend-property-terrestrial-systems">
- <title>Properties used on terrestrial delivery systems</title>
- <section id="dvbt-params">
- <title>DVB-T delivery system</title>
- <para>The following parameters are valid for DVB-T:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="dvbt2-params">
- <title>DVB-T2 delivery system</title>
- <para>DVB-T2 support is currently in the early stages
- of development, so expect that this section maygrow and become
- more detailed with time.</para>
- <para>The following parameters are valid for DVB-T2:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-STREAM-ID"><constant>DTV_STREAM_ID</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="isdbt">
- <title>ISDB-T delivery system</title>
- <para>This ISDB-T/ISDB-Tsb API extension should reflect all information
- needed to tune any ISDB-T/ISDB-Tsb hardware. Of course it is possible
- that some very sophisticated devices won't need certain parameters to
- tune.</para>
- <para>The information given here should help application writers to know how
- to handle ISDB-T and ISDB-Tsb hardware using the Linux DVB-API.</para>
- <para>The details given here about ISDB-T and ISDB-Tsb are just enough to
- basically show the dependencies between the needed parameter values,
- but surely some information is left out. For more detailed information
- see the following documents:</para>
- <para>ARIB STD-B31 - "Transmission System for Digital Terrestrial
- Television Broadcasting" and</para>
- <para>ARIB TR-B14 - "Operational Guidelines for Digital Terrestrial
- Television Broadcasting".</para>
- <para>In order to understand the ISDB specific parameters,
- one has to have some knowledge the channel structure in
- ISDB-T and ISDB-Tsb. I.e. it has to be known to
- the reader that an ISDB-T channel consists of 13 segments,
- that it can have up to 3 layer sharing those segments,
- and things like that.</para>
- <para>The following parameters are valid for ISDB-T:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-SB-SUBCHANNEL-ID"><constant>DTV_ISDBT_SB_SUBCHANNEL_ID</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-SB-SEGMENT-IDX"><constant>DTV_ISDBT_SB_SEGMENT_IDX</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-SB-SEGMENT-COUNT"><constant>DTV_ISDBT_SB_SEGMENT_COUNT</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-FEC"><constant>DTV_ISDBT_LAYERA_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-MODULATION"><constant>DTV_ISDBT_LAYERA_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-SEGMENT-COUNT"><constant>DTV_ISDBT_LAYERA_SEGMENT_COUNT</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-TIME-INTERLEAVING"><constant>DTV_ISDBT_LAYERA_TIME_INTERLEAVING</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-FEC"><constant>DTV_ISDBT_LAYERB_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-MODULATION"><constant>DTV_ISDBT_LAYERB_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-SEGMENT-COUNT"><constant>DTV_ISDBT_LAYERB_SEGMENT_COUNT</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-TIME-INTERLEAVING"><constant>DTV_ISDBT_LAYERB_TIME_INTERLEAVING</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-FEC"><constant>DTV_ISDBT_LAYERC_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-MODULATION"><constant>DTV_ISDBT_LAYERC_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-SEGMENT-COUNT"><constant>DTV_ISDBT_LAYERC_SEGMENT_COUNT</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ISDBT-LAYER-TIME-INTERLEAVING"><constant>DTV_ISDBT_LAYERC_TIME_INTERLEAVING</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="atsc-params">
- <title>ATSC delivery system</title>
- <para>The following parameters are valid for ATSC:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="atscmh-params">
- <title>ATSC-MH delivery system</title>
- <para>The following parameters are valid for ATSC-MH:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-FIC-VER"><constant>DTV_ATSCMH_FIC_VER</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-PARADE-ID"><constant>DTV_ATSCMH_PARADE_ID</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-NOG"><constant>DTV_ATSCMH_NOG</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-TNOG"><constant>DTV_ATSCMH_TNOG</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SGN"><constant>DTV_ATSCMH_SGN</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-PRC"><constant>DTV_ATSCMH_PRC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-RS-FRAME-MODE"><constant>DTV_ATSCMH_RS_FRAME_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-RS-FRAME-ENSEMBLE"><constant>DTV_ATSCMH_RS_FRAME_ENSEMBLE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-RS-CODE-MODE-PRI"><constant>DTV_ATSCMH_RS_CODE_MODE_PRI</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-RS-CODE-MODE-SEC"><constant>DTV_ATSCMH_RS_CODE_MODE_SEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SCCC-BLOCK-MODE"><constant>DTV_ATSCMH_SCCC_BLOCK_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-A"><constant>DTV_ATSCMH_SCCC_CODE_MODE_A</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-B"><constant>DTV_ATSCMH_SCCC_CODE_MODE_B</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-C"><constant>DTV_ATSCMH_SCCC_CODE_MODE_C</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ATSCMH-SCCC-CODE-MODE-D"><constant>DTV_ATSCMH_SCCC_CODE_MODE_D</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="dtmb-params">
- <title>DTMB delivery system</title>
- <para>The following parameters are valid for DTMB:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INNER-FEC"><constant>DTV_INNER_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INTERLEAVING"><constant>DTV_INTERLEAVING</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- </section>
- <section id="frontend-property-cable-systems">
- <title>Properties used on cable delivery systems</title>
- <section id="dvbc-params">
- <title>DVB-C delivery system</title>
- <para>The DVB-C Annex-A is the widely used cable standard. Transmission uses QAM modulation.</para>
- <para>The DVB-C Annex-C is optimized for 6MHz, and is used in Japan. It supports a subset of the Annex A modulation types, and a roll-off of 0.13, instead of 0.15</para>
- <para>The following parameters are valid for DVB-C Annex A/C:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-SYMBOL-RATE"><constant>DTV_SYMBOL_RATE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INNER-FEC"><constant>DTV_INNER_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="dvbc-annex-b-params">
- <title>DVB-C Annex B delivery system</title>
- <para>The DVB-C Annex-B is only used on a few Countries like the United States.</para>
- <para>The following parameters are valid for DVB-C Annex B:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-LNA"><constant>DTV_LNA</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- </section>
- <section id="frontend-property-satellite-systems">
- <title>Properties used on satellite delivery systems</title>
- <section id="dvbs-params">
- <title>DVB-S delivery system</title>
- <para>The following parameters are valid for DVB-S:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-SYMBOL-RATE"><constant>DTV_SYMBOL_RATE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INNER-FEC"><constant>DTV_INNER_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-VOLTAGE"><constant>DTV_VOLTAGE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TONE"><constant>DTV_TONE</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- <para>Future implementations might add those two missing parameters:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-DISEQC-MASTER"><constant>DTV_DISEQC_MASTER</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DISEQC-SLAVE-REPLY"><constant>DTV_DISEQC_SLAVE_REPLY</constant></link></para></listitem>
- </itemizedlist>
- </section>
- <section id="dvbs2-params">
- <title>DVB-S2 delivery system</title>
- <para>In addition to all parameters valid for DVB-S, DVB-S2 supports the following parameters:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-PILOT"><constant>DTV_PILOT</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-ROLLOFF"><constant>DTV_ROLLOFF</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-STREAM-ID"><constant>DTV_STREAM_ID</constant></link></para></listitem>
- </itemizedlist>
- <para>In addition, the <link linkend="frontend-stat-properties">DTV QoS statistics</link> are also valid.</para>
- </section>
- <section id="turbo-params">
- <title>Turbo code delivery system</title>
- <para>In addition to all parameters valid for DVB-S, turbo code supports the following parameters:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
- </itemizedlist>
- </section>
- <section id="isdbs-params">
- <title>ISDB-S delivery system</title>
- <para>The following parameters are valid for ISDB-S:</para>
- <itemizedlist mark='opencircle'>
- <listitem><para><link linkend="DTV-API-VERSION"><constant>DTV_API_VERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-DELIVERY-SYSTEM"><constant>DTV_DELIVERY_SYSTEM</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-SYMBOL-RATE"><constant>DTV_SYMBOL_RATE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-INNER-FEC"><constant>DTV_INNER_FEC</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-VOLTAGE"><constant>DTV_VOLTAGE</constant></link></para></listitem>
- <listitem><para><link linkend="DTV-STREAM-ID"><constant>DTV_STREAM_ID</constant></link></para></listitem>
- </itemizedlist>
- </section>
- </section>
-</section>
diff --git a/Documentation/DocBook/media/dvb/dvbstb.pdf b/Documentation/DocBook/media/dvb/dvbstb.pdf
deleted file mode 100644
index 0fa75d90c3eb..000000000000
--- a/Documentation/DocBook/media/dvb/dvbstb.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/dvb/examples.xml b/Documentation/DocBook/media/dvb/examples.xml
deleted file mode 100644
index 837fb3b64b72..000000000000
--- a/Documentation/DocBook/media/dvb/examples.xml
+++ /dev/null
@@ -1,367 +0,0 @@
-<title>Examples</title>
-<para>In this section we would like to present some examples for using the DVB API.
-</para>
-<para>NOTE: This section is out of date, and the code below won't even
- compile. Please refer to the
- <ulink url="https://linuxtv.org/docs/libdvbv5/index.html">libdvbv5</ulink>
- for updated/recommended examples.
-</para>
-
-<section id="tuning">
-<title>Tuning</title>
-<para>We will start with a generic tuning subroutine that uses the frontend and SEC, as well as
-the demux devices. The example is given for QPSK tuners, but can easily be adjusted for
-QAM.
-</para>
-<programlisting>
- #include &#x003C;sys/ioctl.h&#x003E;
- #include &#x003C;stdio.h&#x003E;
- #include &#x003C;stdint.h&#x003E;
- #include &#x003C;sys/types.h&#x003E;
- #include &#x003C;sys/stat.h&#x003E;
- #include &#x003C;fcntl.h&#x003E;
- #include &#x003C;time.h&#x003E;
- #include &#x003C;unistd.h&#x003E;
-
- #include &#x003C;linux/dvb/dmx.h&#x003E;
- #include &#x003C;linux/dvb/frontend.h&#x003E;
- #include &#x003C;linux/dvb/sec.h&#x003E;
- #include &#x003C;sys/poll.h&#x003E;
-
- #define DMX "/dev/dvb/adapter0/demux1"
- #define FRONT "/dev/dvb/adapter0/frontend1"
- #define SEC "/dev/dvb/adapter0/sec1"
-
- /&#x22C6; routine for checking if we have a signal and other status information&#x22C6;/
- int FEReadStatus(int fd, fe_status_t &#x22C6;stat)
- {
- int ans;
-
- if ( (ans = ioctl(fd,FE_READ_STATUS,stat) &#x003C; 0)){
- perror("FE READ STATUS: ");
- return -1;
- }
-
- if (&#x22C6;stat &amp; FE_HAS_POWER)
- printf("FE HAS POWER\n");
-
- if (&#x22C6;stat &amp; FE_HAS_SIGNAL)
- printf("FE HAS SIGNAL\n");
-
- if (&#x22C6;stat &amp; FE_SPECTRUM_INV)
- printf("SPEKTRUM INV\n");
-
- return 0;
- }
-
-
- /&#x22C6; tune qpsk &#x22C6;/
- /&#x22C6; freq: frequency of transponder &#x22C6;/
- /&#x22C6; vpid, apid, tpid: PIDs of video, audio and teletext TS packets &#x22C6;/
- /&#x22C6; diseqc: DiSEqC address of the used LNB &#x22C6;/
- /&#x22C6; pol: Polarisation &#x22C6;/
- /&#x22C6; srate: Symbol Rate &#x22C6;/
- /&#x22C6; fec. FEC &#x22C6;/
- /&#x22C6; lnb_lof1: local frequency of lower LNB band &#x22C6;/
- /&#x22C6; lnb_lof2: local frequency of upper LNB band &#x22C6;/
- /&#x22C6; lnb_slof: switch frequency of LNB &#x22C6;/
-
- int set_qpsk_channel(int freq, int vpid, int apid, int tpid,
- int diseqc, int pol, int srate, int fec, int lnb_lof1,
- int lnb_lof2, int lnb_slof)
- {
- struct secCommand scmd;
- struct secCmdSequence scmds;
- struct dmx_pes_filter_params pesFilterParams;
- FrontendParameters frp;
- struct pollfd pfd[1];
- FrontendEvent event;
- int demux1, demux2, demux3, front;
-
- frequency = (uint32_t) freq;
- symbolrate = (uint32_t) srate;
-
- if((front = open(FRONT,O_RDWR)) &#x003C; 0){
- perror("FRONTEND DEVICE: ");
- return -1;
- }
-
- if((sec = open(SEC,O_RDWR)) &#x003C; 0){
- perror("SEC DEVICE: ");
- return -1;
- }
-
- if (demux1 &#x003C; 0){
- if ((demux1=open(DMX, O_RDWR|O_NONBLOCK))
- &#x003C; 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux2 &#x003C; 0){
- if ((demux2=open(DMX, O_RDWR|O_NONBLOCK))
- &#x003C; 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux3 &#x003C; 0){
- if ((demux3=open(DMX, O_RDWR|O_NONBLOCK))
- &#x003C; 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (freq &#x003C; lnb_slof) {
- frp.Frequency = (freq - lnb_lof1);
- scmds.continuousTone = SEC_TONE_OFF;
- } else {
- frp.Frequency = (freq - lnb_lof2);
- scmds.continuousTone = SEC_TONE_ON;
- }
- frp.Inversion = INVERSION_AUTO;
- if (pol) scmds.voltage = SEC_VOLTAGE_18;
- else scmds.voltage = SEC_VOLTAGE_13;
-
- scmd.type=0;
- scmd.u.diseqc.addr=0x10;
- scmd.u.diseqc.cmd=0x38;
- scmd.u.diseqc.numParams=1;
- scmd.u.diseqc.params[0] = 0xF0 | ((diseqc &#x22C6; 4) &amp; 0x0F) |
- (scmds.continuousTone == SEC_TONE_ON ? 1 : 0) |
- (scmds.voltage==SEC_VOLTAGE_18 ? 2 : 0);
-
- scmds.miniCommand=SEC_MINI_NONE;
- scmds.numCommands=1;
- scmds.commands=&amp;scmd;
- if (ioctl(sec, SEC_SEND_SEQUENCE, &amp;scmds) &#x003C; 0){
- perror("SEC SEND: ");
- return -1;
- }
-
- if (ioctl(sec, SEC_SEND_SEQUENCE, &amp;scmds) &#x003C; 0){
- perror("SEC SEND: ");
- return -1;
- }
-
- frp.u.qpsk.SymbolRate = srate;
- frp.u.qpsk.FEC_inner = fec;
-
- if (ioctl(front, FE_SET_FRONTEND, &amp;frp) &#x003C; 0){
- perror("QPSK TUNE: ");
- return -1;
- }
-
- pfd[0].fd = front;
- pfd[0].events = POLLIN;
-
- if (poll(pfd,1,3000)){
- if (pfd[0].revents &amp; POLLIN){
- printf("Getting QPSK event\n");
- if ( ioctl(front, FE_GET_EVENT, &amp;event)
-
- == -EOVERFLOW){
- perror("qpsk get event");
- return -1;
- }
- printf("Received ");
- switch(event.type){
- case FE_UNEXPECTED_EV:
- printf("unexpected event\n");
- return -1;
- case FE_FAILURE_EV:
- printf("failure event\n");
- return -1;
-
- case FE_COMPLETION_EV:
- printf("completion event\n");
- }
- }
- }
-
-
- pesFilterParams.pid = vpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_VIDEO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux1, DMX_SET_PES_FILTER, &amp;pesFilterParams) &#x003C; 0){
- perror("set_vpid");
- return -1;
- }
-
- pesFilterParams.pid = apid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_AUDIO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux2, DMX_SET_PES_FILTER, &amp;pesFilterParams) &#x003C; 0){
- perror("set_apid");
- return -1;
- }
-
- pesFilterParams.pid = tpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_TELETEXT;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux3, DMX_SET_PES_FILTER, &amp;pesFilterParams) &#x003C; 0){
- perror("set_tpid");
- return -1;
- }
-
- return has_signal(fds);
- }
-
-</programlisting>
-<para>The program assumes that you are using a universal LNB and a standard DiSEqC
-switch with up to 4 addresses. Of course, you could build in some more checking if
-tuning was successful and maybe try to repeat the tuning process. Depending on the
-external hardware, i.e. LNB and DiSEqC switch, and weather conditions this may be
-necessary.
-</para>
-</section>
-
-<section id="the_dvr_device">
-<title>The DVR device</title>
-<para>The following program code shows how to use the DVR device for recording.
-</para>
-<programlisting>
- #include &#x003C;sys/ioctl.h&#x003E;
- #include &#x003C;stdio.h&#x003E;
- #include &#x003C;stdint.h&#x003E;
- #include &#x003C;sys/types.h&#x003E;
- #include &#x003C;sys/stat.h&#x003E;
- #include &#x003C;fcntl.h&#x003E;
- #include &#x003C;time.h&#x003E;
- #include &#x003C;unistd.h&#x003E;
-
- #include &#x003C;linux/dvb/dmx.h&#x003E;
- #include &#x003C;linux/dvb/video.h&#x003E;
- #include &#x003C;sys/poll.h&#x003E;
- #define DVR "/dev/dvb/adapter0/dvr1"
- #define AUDIO "/dev/dvb/adapter0/audio1"
- #define VIDEO "/dev/dvb/adapter0/video1"
-
- #define BUFFY (188&#x22C6;20)
- #define MAX_LENGTH (1024&#x22C6;1024&#x22C6;5) /&#x22C6; record 5MB &#x22C6;/
-
-
- /&#x22C6; switch the demuxes to recording, assuming the transponder is tuned &#x22C6;/
-
- /&#x22C6; demux1, demux2: file descriptor of video and audio filters &#x22C6;/
- /&#x22C6; vpid, apid: PIDs of video and audio channels &#x22C6;/
-
- int switch_to_record(int demux1, int demux2, uint16_t vpid, uint16_t apid)
- {
- struct dmx_pes_filter_params pesFilterParams;
-
- if (demux1 &#x003C; 0){
- if ((demux1=open(DMX, O_RDWR|O_NONBLOCK))
- &#x003C; 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux2 &#x003C; 0){
- if ((demux2=open(DMX, O_RDWR|O_NONBLOCK))
- &#x003C; 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- pesFilterParams.pid = vpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_TS_TAP;
- pesFilterParams.pes_type = DMX_PES_VIDEO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux1, DMX_SET_PES_FILTER, &amp;pesFilterParams) &#x003C; 0){
- perror("DEMUX DEVICE");
- return -1;
- }
- pesFilterParams.pid = apid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_TS_TAP;
- pesFilterParams.pes_type = DMX_PES_AUDIO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux2, DMX_SET_PES_FILTER, &amp;pesFilterParams) &#x003C; 0){
- perror("DEMUX DEVICE");
- return -1;
- }
- return 0;
- }
-
- /&#x22C6; start recording MAX_LENGTH , assuming the transponder is tuned &#x22C6;/
-
- /&#x22C6; demux1, demux2: file descriptor of video and audio filters &#x22C6;/
- /&#x22C6; vpid, apid: PIDs of video and audio channels &#x22C6;/
- int record_dvr(int demux1, int demux2, uint16_t vpid, uint16_t apid)
- {
- int i;
- int len;
- int written;
- uint8_t buf[BUFFY];
- uint64_t length;
- struct pollfd pfd[1];
- int dvr, dvr_out;
-
- /&#x22C6; open dvr device &#x22C6;/
- if ((dvr = open(DVR, O_RDONLY|O_NONBLOCK)) &#x003C; 0){
- perror("DVR DEVICE");
- return -1;
- }
-
- /&#x22C6; switch video and audio demuxes to dvr &#x22C6;/
- printf ("Switching dvr on\n");
- i = switch_to_record(demux1, demux2, vpid, apid);
- printf("finished: ");
-
- printf("Recording %2.0f MB of test file in TS format\n",
- MAX_LENGTH/(1024.0&#x22C6;1024.0));
- length = 0;
-
- /&#x22C6; open output file &#x22C6;/
- if ((dvr_out = open(DVR_FILE,O_WRONLY|O_CREAT
- |O_TRUNC, S_IRUSR|S_IWUSR
- |S_IRGRP|S_IWGRP|S_IROTH|
- S_IWOTH)) &#x003C; 0){
- perror("Can't open file for dvr test");
- return -1;
- }
-
- pfd[0].fd = dvr;
- pfd[0].events = POLLIN;
-
- /&#x22C6; poll for dvr data and write to file &#x22C6;/
- while (length &#x003C; MAX_LENGTH ) {
- if (poll(pfd,1,1)){
- if (pfd[0].revents &amp; POLLIN){
- len = read(dvr, buf, BUFFY);
- if (len &#x003C; 0){
- perror("recording");
- return -1;
- }
- if (len &#x003E; 0){
- written = 0;
- while (written &#x003C; len)
- written +=
- write (dvr_out,
- buf, len);
- length += len;
- printf("written %2.0f MB\r",
- length/1024./1024.);
- }
- }
- }
- }
- return 0;
- }
-
-</programlisting>
-
-</section>
diff --git a/Documentation/DocBook/media/dvb/fe-diseqc-recv-slave-reply.xml b/Documentation/DocBook/media/dvb/fe-diseqc-recv-slave-reply.xml
deleted file mode 100644
index 4595dbfff208..000000000000
--- a/Documentation/DocBook/media/dvb/fe-diseqc-recv-slave-reply.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-<refentry id="FE_DISEQC_RECV_SLAVE_REPLY">
- <refmeta>
- <refentrytitle>ioctl FE_DISEQC_RECV_SLAVE_REPLY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_DISEQC_RECV_SLAVE_REPLY</refname>
- <refpurpose>Receives reply from a DiSEqC 2.0 command</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dvb_diseqc_slave_reply *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_DISEQC_RECV_SLAVE_REPLY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>pointer to &dvb-diseqc-slave-reply;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Receives reply from a DiSEqC 2.0 command.</para>
-&return-value-dvb;
-
-<table pgwide="1" frame="none" id="dvb-diseqc-slave-reply">
- <title>struct <structname>dvb_diseqc_slave_reply</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>uint8_t</entry>
- <entry>msg[4]</entry>
- <entry>DiSEqC message (framing, data[3])</entry>
- </row><row>
- <entry>uint8_t</entry>
- <entry>msg_len</entry>
- <entry>Length of the DiSEqC message. Valid values are 0 to 4,
- where 0 means no msg</entry>
- </row><row>
- <entry>int</entry>
- <entry>timeout</entry>
- <entry>Return from ioctl after timeout ms with errorcode when no
- message was received</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-diseqc-reset-overload.xml b/Documentation/DocBook/media/dvb/fe-diseqc-reset-overload.xml
deleted file mode 100644
index c104df77ecd0..000000000000
--- a/Documentation/DocBook/media/dvb/fe-diseqc-reset-overload.xml
+++ /dev/null
@@ -1,51 +0,0 @@
-<refentry id="FE_DISEQC_RESET_OVERLOAD">
- <refmeta>
- <refentrytitle>ioctl FE_DISEQC_RESET_OVERLOAD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_DISEQC_RESET_OVERLOAD</refname>
- <refpurpose>Restores the power to the antenna subsystem, if it was powered
- off due to power overload.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>NULL</paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_DISEQC_RESET_OVERLOAD</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>If the bus has been automatically powered off due to power overload, this ioctl
- call restores the power to the bus. The call requires read/write access to the
- device. This call has no effect if the device is manually powered off. Not all
- DVB adapters support this ioctl.</para>
-&return-value-dvb;
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-diseqc-send-burst.xml b/Documentation/DocBook/media/dvb/fe-diseqc-send-burst.xml
deleted file mode 100644
index 9f6a68f32de3..000000000000
--- a/Documentation/DocBook/media/dvb/fe-diseqc-send-burst.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<refentry id="FE_DISEQC_SEND_BURST">
- <refmeta>
- <refentrytitle>ioctl FE_DISEQC_SEND_BURST</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_DISEQC_SEND_BURST</refname>
- <refpurpose>Sends a 22KHz tone burst for 2x1 mini DiSEqC satellite selection.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>enum fe_sec_mini_cmd *<parameter>tone</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_DISEQC_SEND_BURST</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>tone</parameter></term>
- <listitem>
- <para>pointer to &fe-sec-mini-cmd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>This ioctl is used to set the generation of a 22kHz tone burst for mini
- DiSEqC satellite
- selection for 2x1 switches.
- This call requires read/write permissions.</para>
-<para>It provides support for what's specified at
- <ulink url="http://www.eutelsat.com/files/contributed/satellites/pdf/Diseqc/associated%20docs/simple_tone_burst_detec.pdf">Digital Satellite Equipment Control
- (DiSEqC) - Simple "ToneBurst" Detection Circuit specification.</ulink>
- </para>
-&return-value-dvb;
-</refsect1>
-
-<refsect1 id="fe-sec-mini-cmd-t">
-<title>enum fe_sec_mini_cmd</title>
-
-<table pgwide="1" frame="none" id="fe-sec-mini-cmd">
- <title>enum fe_sec_mini_cmd</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="SEC-MINI-A"><constant>SEC_MINI_A</constant></entry>
- <entry align="char">Sends a mini-DiSEqC 22kHz '0' Tone Burst to
- select satellite-A</entry>
- </row><row>
- <entry align="char" id="SEC-MINI-B"><constant>SEC_MINI_B</constant></entry>
- <entry align="char">Sends a mini-DiSEqC 22kHz '1' Data Burst to
- select satellite-B</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-diseqc-send-master-cmd.xml b/Documentation/DocBook/media/dvb/fe-diseqc-send-master-cmd.xml
deleted file mode 100644
index 38cf313e121b..000000000000
--- a/Documentation/DocBook/media/dvb/fe-diseqc-send-master-cmd.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<refentry id="FE_DISEQC_SEND_MASTER_CMD">
- <refmeta>
- <refentrytitle>ioctl FE_DISEQC_SEND_MASTER_CMD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_DISEQC_SEND_MASTER_CMD</refname>
- <refpurpose>Sends a DiSEqC command</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dvb_diseqc_master_cmd *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_DISEQC_SEND_MASTER_CMD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>pointer to &dvb-diseqc-master-cmd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Sends a DiSEqC command to the antenna subsystem.</para>
-&return-value-dvb;
-
-<table pgwide="1" frame="none" id="dvb-diseqc-master-cmd">
- <title>struct <structname>dvb_diseqc_master_cmd</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>uint8_t</entry>
- <entry>msg[6]</entry>
- <entry>DiSEqC message (framing, address, command, data[3])</entry>
- </row><row>
- <entry>uint8_t</entry>
- <entry>msg_len</entry>
- <entry>Length of the DiSEqC message. Valid values are 3 to 6</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-enable-high-lnb-voltage.xml b/Documentation/DocBook/media/dvb/fe-enable-high-lnb-voltage.xml
deleted file mode 100644
index c11890b184ad..000000000000
--- a/Documentation/DocBook/media/dvb/fe-enable-high-lnb-voltage.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<refentry id="FE_ENABLE_HIGH_LNB_VOLTAGE">
- <refmeta>
- <refentrytitle>ioctl FE_ENABLE_HIGH_LNB_VOLTAGE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_ENABLE_HIGH_LNB_VOLTAGE</refname>
- <refpurpose>Select output DC level between normal LNBf voltages or higher
- LNBf voltages.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>unsigned int <parameter>high</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_ENABLE_HIGH_LNB_VOLTAGE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>high</parameter></term>
- <listitem>
- <para>Valid flags:</para>
- <itemizedlist>
- <listitem><para>0 - normal 13V and 18V.</para></listitem>
- <listitem><para>&gt;0 - enables slightly higher voltages instead of
- 13/18V, in order to compensate for long antenna cables.</para></listitem>
- </itemizedlist>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Select output DC level between normal LNBf voltages or higher
- LNBf voltages between 0 (normal) or a value grater than 0 for higher
- voltages.</para>
-&return-value-dvb;
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-get-info.xml b/Documentation/DocBook/media/dvb/fe-get-info.xml
deleted file mode 100644
index ed0eeb29dd65..000000000000
--- a/Documentation/DocBook/media/dvb/fe-get-info.xml
+++ /dev/null
@@ -1,266 +0,0 @@
-<refentry id="FE_GET_INFO">
- <refmeta>
- <refentrytitle>ioctl FE_GET_INFO</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_GET_INFO</refname>
- <refpurpose>Query DVB frontend capabilities and returns information about
- the front-end. This call only requires read-only access to the device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dvb_frontend_info *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_GET_INFO</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>pointer to struct &dvb-frontend-info;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>All DVB frontend devices support the
-<constant>FE_GET_INFO</constant> ioctl. It is used to identify
-kernel devices compatible with this specification and to obtain
-information about driver and hardware capabilities. The ioctl takes a
-pointer to dvb_frontend_info which is filled by the driver. When the
-driver is not compatible with this specification the ioctl returns an error.
-</para>
-&return-value-dvb;
-
- <table pgwide="1" frame="none" id="dvb-frontend-info">
- <title>struct <structname>dvb_frontend_info</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>char</entry>
- <entry>name[128]</entry>
- <entry>Name of the frontend</entry>
- </row><row>
- <entry>fe_type_t</entry>
- <entry>type</entry>
- <entry><emphasis role="bold">DEPRECATED</emphasis>. DVBv3 type. Should not be used on modern programs, as a
- frontend may have more than one type. So, the DVBv5 API should
- be used instead to enumerate and select the frontend type.</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>frequency_min</entry>
- <entry>Minimal frequency supported by the frontend</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>frequency_max</entry>
- <entry>Maximal frequency supported by the frontend</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>frequency_stepsize</entry>
- <entry>Frequency step - all frequencies are multiple of this value</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>frequency_tolerance</entry>
- <entry>Tolerance of the frequency</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>symbol_rate_min</entry>
- <entry>Minimal symbol rate (for Cable/Satellite systems), in bauds</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>symbol_rate_max</entry>
- <entry>Maximal symbol rate (for Cable/Satellite systems), in bauds</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>symbol_rate_tolerance</entry>
- <entry>Maximal symbol rate tolerance, in ppm</entry>
- </row><row>
- <entry>uint32_t</entry>
- <entry>notifier_delay</entry>
- <entry><emphasis role="bold">DEPRECATED</emphasis>. Not used by any driver.</entry>
- </row><row>
- <entry>&fe-caps;</entry>
- <entry>caps</entry>
- <entry>Capabilities supported by the frontend</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>NOTE: The frequencies are specified in Hz for Terrestrial and Cable
- systems. They're specified in kHz for Satellite systems</para>
- </refsect1>
-
-<refsect1 id="fe-caps-t">
-<title>frontend capabilities</title>
-
-<para>Capabilities describe what a frontend can do. Some capabilities are
- supported only on some specific frontend types.</para>
-
-<table pgwide="1" frame="none" id="fe-caps">
- <title>enum fe_caps</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="FE-IS-STUPID"><constant>FE_IS_STUPID</constant></entry>
- <entry>There's something wrong at the frontend, and it can't
- report its capabilities</entry>
- </row>
- <row>
- <entry id="FE-CAN-INVERSION-AUTO"><constant>FE_CAN_INVERSION_AUTO</constant></entry>
- <entry>The frontend is capable of auto-detecting inversion</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-1-2"><constant>FE_CAN_FEC_1_2</constant></entry>
- <entry>The frontend supports FEC 1/2</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-2-3"><constant>FE_CAN_FEC_2_3</constant></entry>
- <entry>The frontend supports FEC 2/3</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-3-4"><constant>FE_CAN_FEC_3_4</constant></entry>
- <entry>The frontend supports FEC 3/4</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-4-5"><constant>FE_CAN_FEC_4_5</constant></entry>
- <entry>The frontend supports FEC 4/5</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-5-6"><constant>FE_CAN_FEC_5_6</constant></entry>
- <entry>The frontend supports FEC 5/6</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-6-7"><constant>FE_CAN_FEC_6_7</constant></entry>
- <entry>The frontend supports FEC 6/7</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-7-8"><constant>FE_CAN_FEC_7_8</constant></entry>
- <entry>The frontend supports FEC 7/8</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-8-9"><constant>FE_CAN_FEC_8_9</constant></entry>
- <entry>The frontend supports FEC 8/9</entry>
- </row>
- <row>
- <entry id="FE-CAN-FEC-AUTO"><constant>FE_CAN_FEC_AUTO</constant></entry>
- <entry>The frontend can autodetect FEC.</entry>
- </row>
- <row>
- <entry id="FE-CAN-QPSK"><constant>FE_CAN_QPSK</constant></entry>
- <entry>The frontend supports QPSK modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-16"><constant>FE_CAN_QAM_16</constant></entry>
- <entry>The frontend supports 16-QAM modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-32"><constant>FE_CAN_QAM_32</constant></entry>
- <entry>The frontend supports 32-QAM modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-64"><constant>FE_CAN_QAM_64</constant></entry>
- <entry>The frontend supports 64-QAM modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-128"><constant>FE_CAN_QAM_128</constant></entry>
- <entry>The frontend supports 128-QAM modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-256"><constant>FE_CAN_QAM_256</constant></entry>
- <entry>The frontend supports 256-QAM modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-QAM-AUTO"><constant>FE_CAN_QAM_AUTO</constant></entry>
- <entry>The frontend can autodetect modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-TRANSMISSION-MODE-AUTO"><constant>FE_CAN_TRANSMISSION_MODE_AUTO</constant></entry>
- <entry>The frontend can autodetect the transmission mode</entry>
- </row>
- <row>
- <entry id="FE-CAN-BANDWIDTH-AUTO"><constant>FE_CAN_BANDWIDTH_AUTO</constant></entry>
- <entry>The frontend can autodetect the bandwidth</entry>
- </row>
- <row>
- <entry id="FE-CAN-GUARD-INTERVAL-AUTO"><constant>FE_CAN_GUARD_INTERVAL_AUTO</constant></entry>
- <entry>The frontend can autodetect the guard interval</entry>
- </row>
- <row>
- <entry id="FE-CAN-HIERARCHY-AUTO"><constant>FE_CAN_HIERARCHY_AUTO</constant></entry>
- <entry>The frontend can autodetect hierarch</entry>
- </row>
- <row>
- <entry id="FE-CAN-8VSB"><constant>FE_CAN_8VSB</constant></entry>
- <entry>The frontend supports 8-VSB modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-16VSB"><constant>FE_CAN_16VSB</constant></entry>
- <entry>The frontend supports 16-VSB modulation</entry>
- </row>
- <row>
- <entry id="FE-HAS-EXTENDED-CAPS"><constant>FE_HAS_EXTENDED_CAPS</constant></entry>
- <entry>Currently, unused</entry>
- </row>
- <row>
- <entry id="FE-CAN-MULTISTREAM"><constant>FE_CAN_MULTISTREAM</constant></entry>
- <entry>The frontend supports multistream filtering</entry>
- </row>
- <row>
- <entry id="FE-CAN-TURBO-FEC"><constant>FE_CAN_TURBO_FEC</constant></entry>
- <entry>The frontend supports turbo FEC modulation</entry>
- </row>
- <row>
- <entry id="FE-CAN-2G-MODULATION"><constant>FE_CAN_2G_MODULATION</constant></entry>
- <entry>The frontend supports "2nd generation modulation" (DVB-S2/T2)></entry>
- </row>
- <row>
- <entry id="FE-NEEDS-BENDING"><constant>FE_NEEDS_BENDING</constant></entry>
- <entry>Not supported anymore, don't use it</entry>
- </row>
- <row>
- <entry id="FE-CAN-RECOVER"><constant>FE_CAN_RECOVER</constant></entry>
- <entry>The frontend can recover from a cable unplug automatically</entry>
- </row>
- <row>
- <entry id="FE-CAN-MUTE-TS"><constant>FE_CAN_MUTE_TS</constant></entry>
- <entry>The frontend can stop spurious TS data output</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-get-property.xml b/Documentation/DocBook/media/dvb/fe-get-property.xml
deleted file mode 100644
index 53a170ed3bd1..000000000000
--- a/Documentation/DocBook/media/dvb/fe-get-property.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<refentry id="FE_GET_PROPERTY">
- <refmeta>
- <refentrytitle>ioctl FE_SET_PROPERTY, FE_GET_PROPERTY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_SET_PROPERTY</refname>
- <refname>FE_GET_PROPERTY</refname>
- <refpurpose>FE_SET_PROPERTY sets one or more frontend properties.
- FE_GET_PROPERTY returns one or more frontend properties.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dtv_properties *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_PROPERTY, FE_GET_PROPERTY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>pointer to &dtv-properties;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>All DVB frontend devices support the
-<constant>FE_SET_PROPERTY</constant> and <constant>FE_GET_PROPERTY</constant>
-ioctls. The supported properties and statistics depends on the delivery system
-and on the device:</para>
-<itemizedlist>
-<listitem>
- <para><constant>FE_SET_PROPERTY:</constant></para>
-<itemizedlist>
-<listitem><para>This ioctl is used to set one or more
- frontend properties.</para></listitem>
-<listitem><para>This is the basic command to request the frontend to tune into some
- frequency and to start decoding the digital TV signal.</para></listitem>
-<listitem><para>This call requires read/write access to the device.</para></listitem>
-<listitem><para>At return, the values are updated to reflect the
- actual parameters used.</para></listitem>
-</itemizedlist>
-</listitem>
-<listitem>
- <para><constant>FE_GET_PROPERTY:</constant></para>
-<itemizedlist>
-<listitem><para>This ioctl is used to get properties and
-statistics from the frontend.</para></listitem>
-<listitem><para>No properties are changed, and statistics aren't reset.</para></listitem>
-<listitem><para>This call only requires read-only access to the device.</para></listitem>
-</itemizedlist>
-</listitem>
-</itemizedlist>
-&return-value-dvb;
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-read-status.xml b/Documentation/DocBook/media/dvb/fe-read-status.xml
deleted file mode 100644
index bc0dc2a55f19..000000000000
--- a/Documentation/DocBook/media/dvb/fe-read-status.xml
+++ /dev/null
@@ -1,107 +0,0 @@
-<refentry id="FE_READ_STATUS">
- <refmeta>
- <refentrytitle>ioctl FE_READ_STATUS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_READ_STATUS</refname>
- <refpurpose>Returns status information about the front-end. This call only
- requires read-only access to the device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>unsigned int *<parameter>status</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_READ_STATUS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>status</parameter></term>
- <listitem>
- <para>pointer to a bitmask integer filled with the values defined by
- &fe-status;.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>All DVB frontend devices support the
-<constant>FE_READ_STATUS</constant> ioctl. It is used to check about the
-locking status of the frontend after being tuned. The ioctl takes a
-pointer to an integer where the status will be written.
-</para>
-<para>NOTE: the size of status is actually sizeof(enum fe_status), with varies
- according with the architecture. This needs to be fixed in the future.</para>
-&return-value-dvb;
-</refsect1>
-
-<refsect1 id="fe-status-t">
-<title>int fe_status</title>
-
-<para>The fe_status parameter is used to indicate the current state
- and/or state changes of the frontend hardware. It is produced using
- the &fe-status; values on a bitmask</para>
-
-<table pgwide="1" frame="none" id="fe-status">
- <title>enum fe_status</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="FE-HAS-SIGNAL"><constant>FE_HAS_SIGNAL</constant></entry>
- <entry align="char">The frontend has found something above the noise level</entry>
- </row><row>
- <entry align="char" id="FE-HAS-CARRIER"><constant>FE_HAS_CARRIER</constant></entry>
- <entry align="char">The frontend has found a DVB signal</entry>
- </row><row>
- <entry align="char" id="FE-HAS-VITERBI"><constant>FE_HAS_VITERBI</constant></entry>
- <entry align="char">The frontend FEC inner coding (Viterbi, LDPC or other inner code) is stable</entry>
- </row><row>
- <entry align="char" id="FE-HAS-SYNC"><constant>FE_HAS_SYNC</constant></entry>
- <entry align="char">Synchronization bytes was found</entry>
- </row><row>
- <entry align="char" id="FE-HAS-LOCK"><constant>FE_HAS_LOCK</constant></entry>
- <entry align="char">The DVB were locked and everything is working</entry>
- </row><row>
- <entry align="char" id="FE-TIMEDOUT"><constant>FE_TIMEDOUT</constant></entry>
- <entry align="char">no lock within the last about 2 seconds</entry>
- </row><row>
- <entry align="char" id="FE-REINIT"><constant>FE_REINIT</constant></entry>
- <entry align="char">The frontend was reinitialized, application is
- recommended to reset DiSEqC, tone and parameters</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-set-frontend-tune-mode.xml b/Documentation/DocBook/media/dvb/fe-set-frontend-tune-mode.xml
deleted file mode 100644
index 99fa8a015c7a..000000000000
--- a/Documentation/DocBook/media/dvb/fe-set-frontend-tune-mode.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<refentry id="FE_SET_FRONTEND_TUNE_MODE">
- <refmeta>
- <refentrytitle>ioctl FE_SET_FRONTEND_TUNE_MODE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_SET_FRONTEND_TUNE_MODE</refname>
- <refpurpose>Allow setting tuner mode flags to the frontend.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>unsigned int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_FRONTEND_TUNE_MODE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>Valid flags:</para>
- <itemizedlist>
- <listitem><para>0 - normal tune mode</para></listitem>
- <listitem><para>FE_TUNE_MODE_ONESHOT - When set, this flag will
- disable any zigzagging or other "normal" tuning behaviour.
- Additionally, there will be no automatic monitoring of the
- lock status, and hence no frontend events will be
- generated. If a frontend device is closed, this flag will
- be automatically turned off when the device is reopened
- read-write.</para></listitem>
- </itemizedlist>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Allow setting tuner mode flags to the frontend, between 0 (normal)
- or FE_TUNE_MODE_ONESHOT mode</para>
-&return-value-dvb;
-</refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-set-tone.xml b/Documentation/DocBook/media/dvb/fe-set-tone.xml
deleted file mode 100644
index 62d44e4ccc39..000000000000
--- a/Documentation/DocBook/media/dvb/fe-set-tone.xml
+++ /dev/null
@@ -1,91 +0,0 @@
-<refentry id="FE_SET_TONE">
- <refmeta>
- <refentrytitle>ioctl FE_SET_TONE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_SET_TONE</refname>
- <refpurpose>Sets/resets the generation of the continuous 22kHz tone.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>enum fe_sec_tone_mode *<parameter>tone</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_TONE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>tone</parameter></term>
- <listitem>
- <para>pointer to &fe-sec-tone-mode;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>This ioctl is used to set the generation of the continuous 22kHz tone.
- This call requires read/write permissions.</para>
-<para>Usually, satellite antenna subsystems require that the digital TV
- device to send a 22kHz tone in order to select between high/low band on
- some dual-band LNBf. It is also used to send signals to DiSEqC equipment,
- but this is done using the DiSEqC ioctls.</para>
-<para>NOTE: if more than one device is connected to the same antenna,
- setting a tone may interfere on other devices, as they may lose
- the capability of selecting the band. So, it is recommended that
- applications would change to SEC_TONE_OFF when the device is not used.</para>
-
-&return-value-dvb;
-</refsect1>
-
-<refsect1 id="fe-sec-tone-mode-t">
-<title>enum fe_sec_tone_mode</title>
-
-<table pgwide="1" frame="none" id="fe-sec-tone-mode">
- <title>enum fe_sec_tone_mode</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char" id="SEC-TONE-ON"><constant>SEC_TONE_ON</constant></entry>
- <entry align="char">Sends a 22kHz tone burst to the antenna</entry>
- </row><row>
- <entry align="char" id="SEC-TONE-OFF"><constant>SEC_TONE_OFF</constant></entry>
- <entry align="char">Don't send a 22kHz tone to the antenna
- (except if the FE_DISEQC_* ioctls are called)</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/fe-set-voltage.xml b/Documentation/DocBook/media/dvb/fe-set-voltage.xml
deleted file mode 100644
index c89a6f79b5af..000000000000
--- a/Documentation/DocBook/media/dvb/fe-set-voltage.xml
+++ /dev/null
@@ -1,69 +0,0 @@
-<refentry id="FE_SET_VOLTAGE">
- <refmeta>
- <refentrytitle>ioctl FE_SET_VOLTAGE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>FE_SET_VOLTAGE</refname>
- <refpurpose>Allow setting the DC level sent to the antenna subsystem.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>enum fe_sec_voltage *<parameter>voltage</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_VOLTAGE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>voltage</parameter></term>
- <listitem>
- <para>pointer to &fe-sec-voltage;</para>
- <para>Valid values are described at &fe-sec-voltage;.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>This ioctl allows to set the DC voltage level sent through the antenna
- cable to 13V, 18V or off.</para>
-<para>Usually, a satellite antenna subsystems require that the digital TV
- device to send a DC voltage to feed power to the LNBf. Depending on the
- LNBf type, the polarization or the intermediate frequency (IF) of the LNBf
- can controlled by the voltage level. Other devices (for example, the ones
- that implement DISEqC and multipoint LNBf's don't need to control the
- voltage level, provided that either 13V or 18V is sent to power up the
- LNBf.</para>
-<para>NOTE: if more than one device is connected to the same antenna,
- setting a voltage level may interfere on other devices, as they may lose
- the capability of setting polarization or IF. So, on those
- cases, setting the voltage to SEC_VOLTAGE_OFF while the device is not is
- used is recommended.</para>
-
-&return-value-dvb;
-</refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/dvb/frontend.xml b/Documentation/DocBook/media/dvb/frontend.xml
deleted file mode 100644
index 01210b33c130..000000000000
--- a/Documentation/DocBook/media/dvb/frontend.xml
+++ /dev/null
@@ -1,269 +0,0 @@
-<title>DVB Frontend API</title>
-
-<para>The DVB frontend API was designed to support three types of delivery systems:</para>
-<itemizedlist>
- <listitem><para>Terrestrial systems: DVB-T, DVB-T2, ATSC, ATSC M/H, ISDB-T, DVB-H, DTMB, CMMB</para></listitem>
- <listitem><para>Cable systems: DVB-C Annex A/C, ClearQAM (DVB-C Annex B), ISDB-C</para></listitem>
- <listitem><para>Satellite systems: DVB-S, DVB-S2, DVB Turbo, ISDB-S, DSS</para></listitem>
-</itemizedlist>
-<para>The DVB frontend controls several sub-devices including:</para>
-<itemizedlist>
- <listitem><para>Tuner</para></listitem>
- <listitem><para>Digital TV demodulator</para></listitem>
- <listitem><para>Low noise amplifier (LNA)</para></listitem>
- <listitem><para>Satellite Equipment Control (SEC) hardware (only for Satellite).</para></listitem>
-</itemizedlist>
-<para>The frontend can be accessed through
- <constant>/dev/dvb/adapter?/frontend?</constant>. Data types and
- ioctl definitions can be accessed by including
- <constant>linux/dvb/frontend.h</constant> in your application.
-</para>
-
-<para>NOTE: Transmission via the internet (DVB-IP)
- is not yet handled by this API but a future extension is possible.</para>
-<para>On Satellite systems, the API support for the Satellite Equipment Control
- (SEC) allows to power control and to send/receive signals to control the
- antenna subsystem, selecting the polarization and choosing the Intermediate
- Frequency IF) of the Low Noise Block Converter Feed Horn (LNBf). It
- supports the DiSEqC and V-SEC protocols. The DiSEqC (digital SEC)
-specification is available at
-<ulink url="http://www.eutelsat.com/satellites/4_5_5.html">Eutelsat</ulink>.</para>
-
-<section id="query-dvb-frontend-info">
-<title>Querying frontend information</title>
-
-<para>Usually, the first thing to do when the frontend is opened is to
- check the frontend capabilities. This is done using <link linkend="FE_GET_INFO">FE_GET_INFO</link>. This ioctl will enumerate
- the DVB API version and other characteristics about the frontend, and
- can be opened either in read only or read/write mode.</para>
-</section>
-
-<section id="dvb-fe-read-status">
-<title>Querying frontend status and statistics</title>
-
-<para>Once <link linkend="FE_GET_PROPERTY"><constant>FE_SET_PROPERTY</constant></link>
- is called, the frontend will run a kernel thread that will periodically
- check for the tuner lock status and provide statistics about the quality
- of the signal.</para>
-<para>The information about the frontend tuner locking status can be queried
- using <link linkend="FE_READ_STATUS">FE_READ_STATUS</link>.</para>
-<para>Signal statistics are provided via <link linkend="FE_GET_PROPERTY"><constant>FE_GET_PROPERTY</constant></link>.
- Please note that several statistics require the demodulator to be fully
- locked (e. g. with FE_HAS_LOCK bit set). See
- <link linkend="frontend-stat-properties">Frontend statistics indicators</link>
- for more details.</para>
-</section>
-
-&sub-dvbproperty;
-
-<section id="frontend_fcalls">
-<title>Frontend Function Calls</title>
-
-<refentry id="frontend_f_open">
- <refmeta>
- <refentrytitle>DVB frontend open()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>fe-open</refname>
- <refpurpose>Open a frontend device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;fcntl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>open</function></funcdef>
- <paramdef>const char *<parameter>device_name</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>device_name</parameter></term>
- <listitem>
- <para>Device to be opened.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>Open flags. Access can either be
- <constant>O_RDWR</constant> or <constant>O_RDONLY</constant>.</para>
- <para>Multiple opens are allowed with <constant>O_RDONLY</constant>. In this mode, only query and read ioctls are allowed.</para>
- <para>Only one open is allowed in <constant>O_RDWR</constant>. In this mode, all ioctls are allowed.</para>
- <para>When the <constant>O_NONBLOCK</constant> flag is given, the system calls may return &EAGAIN; when no data is available or when the device driver is temporarily busy.</para>
- <para>Other flags have no effect.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
- <refsect1>
- <title>Description</title>
- <para>This system call opens a named frontend device (<constant>/dev/dvb/adapter?/frontend?</constant>)
- for subsequent use. Usually the first thing to do after a successful open is to
- find out the frontend type with <link linkend="FE_GET_INFO">FE_GET_INFO</link>.</para>
-<para>The device can be opened in read-only mode, which only allows monitoring of
- device status and statistics, or read/write mode, which allows any kind of use
- (e.g. performing tuning operations.)
-</para>
-<para>In a system with multiple front-ends, it is usually the case that multiple devices
- cannot be open in read/write mode simultaneously. As long as a front-end
- device is opened in read/write mode, other open() calls in read/write mode will
- either fail or block, depending on whether non-blocking or blocking mode was
- specified. A front-end device opened in blocking mode can later be put into
- non-blocking mode (and vice versa) using the F_SETFL command of the fcntl
- system call. This is a standard system call, documented in the Linux manual
- page for fcntl. When an open() call has succeeded, the device will be ready
- for use in the specified mode. This implies that the corresponding hardware is
- powered up, and that other front-ends may have been powered down to make
- that possible.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success <function>open</function> returns the new file
-descriptor. On error -1 is returned, and the <varname>errno</varname>
-variable is set appropriately. Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>The caller has no permission to access the
-device.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The the device driver is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENXIO</errorcode></term>
- <listitem>
- <para>No device corresponding to this device special file
-exists.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>Not enough kernel memory was available to complete the
-request.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EMFILE</errorcode></term>
- <listitem>
- <para>The process already has the maximum number of
-files open.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENFILE</errorcode></term>
- <listitem>
- <para>The limit on the total number of files open on the
-system has been reached.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODEV</errorcode></term>
- <listitem>
- <para>The device got removed.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
-
-<refentry id="frontend_f_close">
- <refmeta>
- <refentrytitle>DVB frontend close()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>fe-close</refname>
- <refpurpose>Close a frontend device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>close</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-<para>This system call closes a previously opened front-end device. After closing
- a front-end device, its corresponding hardware might be powered down
- automatically.</para>
-</refsect1>
- <refsect1>
- <title>Return Value</title>
-
- <para>The function returns <returnvalue>0</returnvalue> on
-success, <returnvalue>-1</returnvalue> on failure and the
-<varname>errno</varname> is set appropriately. Possible error
-codes:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid open file
-descriptor.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
-
-&sub-fe-get-info;
-&sub-fe-read-status;
-&sub-fe-get-property;
-&sub-fe-diseqc-reset-overload;
-&sub-fe-diseqc-send-master-cmd;
-&sub-fe-diseqc-recv-slave-reply;
-&sub-fe-diseqc-send-burst;
-&sub-fe-set-tone;
-&sub-fe-set-voltage;
-&sub-fe-enable-high-lnb-voltage;
-&sub-fe-set-frontend-tune-mode;
-
-</section>
-
-<section id="frontend_legacy_dvbv3_api">
-<title>DVB Frontend legacy API (a. k. a. DVBv3)</title>
-<para>The usage of this API is deprecated, as it doesn't support all digital
- TV standards, doesn't provide good statistics measurements and provides
- incomplete information. This is kept only to support legacy applications.</para>
-
-&sub-frontend_legacy_api;
-</section>
diff --git a/Documentation/DocBook/media/dvb/frontend_legacy_api.xml b/Documentation/DocBook/media/dvb/frontend_legacy_api.xml
deleted file mode 100644
index 8fadf3a4ba44..000000000000
--- a/Documentation/DocBook/media/dvb/frontend_legacy_api.xml
+++ /dev/null
@@ -1,654 +0,0 @@
-<section id="frontend_legacy_types">
-<title>Frontend Legacy Data Types</title>
-
-<section id="fe-type-t">
-<title>Frontend type</title>
-
-<para>For historical reasons, frontend types are named by the type of modulation
- used in transmission. The fontend types are given by fe_type_t type, defined as:</para>
-
-<table pgwide="1" frame="none" id="fe-type">
-<title>Frontend types</title>
-<tgroup cols="3">
- &cs-def;
- <thead>
- <row>
- <entry>fe_type</entry>
- <entry>Description</entry>
- <entry><link linkend="DTV-DELIVERY-SYSTEM">DTV_DELIVERY_SYSTEM</link> equivalent type</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="FE-QPSK"><constant>FE_QPSK</constant></entry>
- <entry>For DVB-S standard</entry>
- <entry><constant>SYS_DVBS</constant></entry>
- </row>
- <row>
- <entry id="FE-QAM"><constant>FE_QAM</constant></entry>
- <entry>For DVB-C annex A standard</entry>
- <entry><constant>SYS_DVBC_ANNEX_A</constant></entry>
- </row>
- <row>
- <entry id="FE-OFDM"><constant>FE_OFDM</constant></entry>
- <entry>For DVB-T standard</entry>
- <entry><constant>SYS_DVBT</constant></entry>
- </row>
- <row>
- <entry id="FE-ATSC"><constant>FE_ATSC</constant></entry>
- <entry>For ATSC standard (terrestrial) or for DVB-C Annex B (cable) used in US.</entry>
- <entry><constant>SYS_ATSC</constant> (terrestrial) or <constant>SYS_DVBC_ANNEX_B</constant> (cable)</entry>
- </row>
-</tbody></tgroup></table>
-
-<para>Newer formats like DVB-S2, ISDB-T, ISDB-S and DVB-T2 are not described at the above, as they're
-supported via the new <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY/FE_GET_SET_PROPERTY</link> ioctl's, using the <link linkend="DTV-DELIVERY-SYSTEM">DTV_DELIVERY_SYSTEM</link> parameter.
-</para>
-
-<para>In the old days, &dvb-frontend-info; used to contain
- <constant>fe_type_t</constant> field to indicate the delivery systems,
- filled with either FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC. While this is
- still filled to keep backward compatibility, the usage of this
- field is deprecated, as it can report just one delivery system, but some
- devices support multiple delivery systems. Please use
- <link linkend="DTV-ENUM-DELSYS">DTV_ENUM_DELSYS</link> instead.
-</para>
-<para>On devices that support multiple delivery systems,
- &dvb-frontend-info;::<constant>fe_type_t</constant> is filled with the
- currently standard, as selected by the last call to
- <link linkend="FE_GET_PROPERTY">FE_SET_PROPERTY</link>
- using the &DTV-DELIVERY-SYSTEM; property.</para>
-</section>
-
-<section id="fe-bandwidth-t">
-<title>Frontend bandwidth</title>
-
-<table pgwide="1" frame="none" id="fe-bandwidth">
- <title>enum fe_bandwidth</title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry id="BANDWIDTH-AUTO"><constant>BANDWIDTH_AUTO</constant></entry>
- <entry>Autodetect bandwidth (if supported)</entry>
- </row><row>
- <entry id="BANDWIDTH-1-712-MHZ"><constant>BANDWIDTH_1_712_MHZ</constant></entry>
- <entry>1.712 MHz</entry>
- </row><row>
- <entry id="BANDWIDTH-5-MHZ"><constant>BANDWIDTH_5_MHZ</constant></entry>
- <entry>5 MHz</entry>
- </row><row>
- <entry id="BANDWIDTH-6-MHZ"><constant>BANDWIDTH_6_MHZ</constant></entry>
- <entry>6 MHz</entry>
- </row><row>
- <entry id="BANDWIDTH-7-MHZ"><constant>BANDWIDTH_7_MHZ</constant></entry>
- <entry>7 MHz</entry>
- </row><row>
- <entry id="BANDWIDTH-8-MHZ"><constant>BANDWIDTH_8_MHZ</constant></entry>
- <entry>8 MHz</entry>
- </row><row>
- <entry id="BANDWIDTH-10-MHZ"><constant>BANDWIDTH_10_MHZ</constant></entry>
- <entry>10 MHz</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-</section>
-
-<section id="dvb-frontend-parameters">
-<title>frontend parameters</title>
-<para>The kind of parameters passed to the frontend device for tuning depend on
-the kind of hardware you are using.</para>
-<para>The struct <constant>dvb_frontend_parameters</constant> uses an
-union with specific per-system parameters. However, as newer delivery systems
-required more data, the structure size weren't enough to fit, and just
-extending its size would break the existing applications. So, those parameters
-were replaced by the usage of <link linkend="FE_GET_PROPERTY">
-<constant>FE_GET_PROPERTY/FE_SET_PROPERTY</constant></link> ioctl's. The
-new API is flexible enough to add new parameters to existing delivery systems,
-and to add newer delivery systems.</para>
-<para>So, newer applications should use <link linkend="FE_GET_PROPERTY">
-<constant>FE_GET_PROPERTY/FE_SET_PROPERTY</constant></link> instead, in
-order to be able to support the newer System Delivery like DVB-S2, DVB-T2,
-DVB-C2, ISDB, etc.</para>
-<para>All kinds of parameters are combined as an union in the FrontendParameters structure:
-<programlisting>
-struct dvb_frontend_parameters {
- uint32_t frequency; /&#x22C6; (absolute) frequency in Hz for QAM/OFDM &#x22C6;/
- /&#x22C6; intermediate frequency in kHz for QPSK &#x22C6;/
- &fe-spectral-inversion-t; inversion;
- union {
- struct dvb_qpsk_parameters qpsk;
- struct dvb_qam_parameters qam;
- struct dvb_ofdm_parameters ofdm;
- struct dvb_vsb_parameters vsb;
- } u;
-};
-</programlisting></para>
-<para>In the case of QPSK frontends the <constant>frequency</constant> field specifies the intermediate
-frequency, i.e. the offset which is effectively added to the local oscillator frequency (LOF) of
-the LNB. The intermediate frequency has to be specified in units of kHz. For QAM and
-OFDM frontends the <constant>frequency</constant> specifies the absolute frequency and is given in Hz.
-</para>
-
-<section id="dvb-qpsk-parameters">
-<title>QPSK parameters</title>
-<para>For satellite QPSK frontends you have to use the <constant>dvb_qpsk_parameters</constant> structure:</para>
-<programlisting>
- struct dvb_qpsk_parameters {
- uint32_t symbol_rate; /&#x22C6; symbol rate in Symbols per second &#x22C6;/
- &fe-code-rate-t; fec_inner; /&#x22C6; forward error correction (see above) &#x22C6;/
- };
-</programlisting>
-</section>
-
-<section id="dvb-qam-parameters">
-<title>QAM parameters</title>
-<para>for cable QAM frontend you use the <constant>dvb_qam_parameters</constant> structure:</para>
-<programlisting>
- struct dvb_qam_parameters {
- uint32_t symbol_rate; /&#x22C6; symbol rate in Symbols per second &#x22C6;/
- &fe-code-rate-t; fec_inner; /&#x22C6; forward error correction (see above) &#x22C6;/
- &fe-modulation-t; modulation; /&#x22C6; modulation type (see above) &#x22C6;/
- };
-</programlisting>
-</section>
-
-<section id="dvb-vsb-parameters">
-<title>VSB parameters</title>
-<para>ATSC frontends are supported by the <constant>dvb_vsb_parameters</constant> structure:</para>
-<programlisting>
-struct dvb_vsb_parameters {
- &fe-modulation-t; modulation; /&#x22C6; modulation type (see above) &#x22C6;/
-};
-</programlisting>
-</section>
-
-<section id="dvb-ofdm-parameters">
-<title>OFDM parameters</title>
-<para>DVB-T frontends are supported by the <constant>dvb_ofdm_parameters</constant> structure:</para>
-<programlisting>
- struct dvb_ofdm_parameters {
- &fe-bandwidth-t; bandwidth;
- &fe-code-rate-t; code_rate_HP; /&#x22C6; high priority stream code rate &#x22C6;/
- &fe-code-rate-t; code_rate_LP; /&#x22C6; low priority stream code rate &#x22C6;/
- &fe-modulation-t; constellation; /&#x22C6; modulation type (see above) &#x22C6;/
- &fe-transmit-mode-t; transmission_mode;
- &fe-guard-interval-t; guard_interval;
- &fe-hierarchy-t; hierarchy_information;
- };
-</programlisting>
-</section>
-</section>
-
-<section id="dvb-frontend-event">
-<title>frontend events</title>
- <programlisting>
- struct dvb_frontend_event {
- fe_status_t status;
- struct dvb_frontend_parameters parameters;
- };
-</programlisting>
- </section>
-</section>
-
-<section id="frontend_legacy_fcalls">
-<title>Frontend Legacy Function Calls</title>
-
-<para>Those functions are defined at DVB version 3. The support is kept in
- the kernel due to compatibility issues only. Their usage is strongly
- not recommended</para>
-
-<section id="FE_READ_BER">
-<title>FE_READ_BER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns the bit error rate for the signal currently
- received/demodulated by the front-end. For this command, read-only access to
- the device is sufficient.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = <link linkend="FE_READ_BER">FE_READ_BER</link>,
- uint32_t &#x22C6;ber);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_READ_BER">FE_READ_BER</link> for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>uint32_t *ber</para>
-</entry><entry
- align="char">
-<para>The bit error rate is stored into *ber.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-</section>
-
-<section id="FE_READ_SNR">
-<title>FE_READ_SNR</title>
-
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns the signal-to-noise ratio for the signal currently received
- by the front-end. For this command, read-only access to the device is sufficient.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = <link linkend="FE_READ_SNR">FE_READ_SNR</link>, uint16_t
- &#x22C6;snr);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_READ_SNR">FE_READ_SNR</link> for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>uint16_t *snr</para>
-</entry><entry
- align="char">
-<para>The signal-to-noise ratio is stored into *snr.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-</section>
-
-<section id="FE_READ_SIGNAL_STRENGTH">
-<title>FE_READ_SIGNAL_STRENGTH</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns the signal strength value for the signal currently received
- by the front-end. For this command, read-only access to the device is sufficient.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request =
- <link linkend="FE_READ_SIGNAL_STRENGTH">FE_READ_SIGNAL_STRENGTH</link>, uint16_t &#x22C6;strength);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_READ_SIGNAL_STRENGTH">FE_READ_SIGNAL_STRENGTH</link> for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>uint16_t *strength</para>
-</entry><entry
- align="char">
-<para>The signal strength value is stored into *strength.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-</section>
-
-<section id="FE_READ_UNCORRECTED_BLOCKS">
-<title>FE_READ_UNCORRECTED_BLOCKS</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns the number of uncorrected blocks detected by the device
- driver during its lifetime. For meaningful measurements, the increment in block
- count during a specific time interval should be calculated. For this command,
- read-only access to the device is sufficient.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>Note that the counter will wrap to zero after its maximum count has been
- reached.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl( int fd, int request =
- <link linkend="FE_READ_UNCORRECTED_BLOCKS">FE_READ_UNCORRECTED_BLOCKS</link>, uint32_t &#x22C6;ublocks);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_READ_UNCORRECTED_BLOCKS">FE_READ_UNCORRECTED_BLOCKS</link> for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>uint32_t *ublocks</para>
-</entry><entry
- align="char">
-<para>The total number of uncorrected blocks seen by the driver
- so far.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-</section>
-
-<section id="FE_SET_FRONTEND">
-<title>FE_SET_FRONTEND</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call starts a tuning operation using specified parameters. The result
- of this call will be successful if the parameters were valid and the tuning could
- be initiated. The result of the tuning operation in itself, however, will arrive
- asynchronously as an event (see documentation for <link linkend="FE_GET_EVENT">FE_GET_EVENT</link> and
- FrontendEvent.) If a new <link linkend="FE_SET_FRONTEND">FE_SET_FRONTEND</link> operation is initiated before
- the previous one was completed, the previous operation will be aborted in favor
- of the new one. This command requires read/write access to the device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = <link linkend="FE_SET_FRONTEND">FE_SET_FRONTEND</link>,
- struct dvb_frontend_parameters &#x22C6;p);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_SET_FRONTEND">FE_SET_FRONTEND</link> for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- dvb_frontend_parameters
- *p</para>
-</entry><entry
- align="char">
-<para>Points to parameters for tuning operation.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Maximum supported symbol rate reached.</para>
-</entry>
-</row></tbody></tgroup></informaltable>
-</section>
-
-<section id="FE_GET_FRONTEND">
-<title>FE_GET_FRONTEND</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call queries the currently effective frontend parameters. For this
- command, read-only access to the device is sufficient.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = <link linkend="FE_GET_FRONTEND">FE_GET_FRONTEND</link>,
- struct dvb_frontend_parameters &#x22C6;p);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_SET_FRONTEND">FE_SET_FRONTEND</link> for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- dvb_frontend_parameters
- *p</para>
-</entry><entry
- align="char">
-<para>Points to parameters for tuning operation.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Maximum supported symbol rate reached.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-
-<section id="FE_GET_EVENT">
-<title>FE_GET_EVENT</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns a frontend event if available. If an event is not
- available, the behavior depends on whether the device is in blocking or
- non-blocking mode. In the latter case, the call fails immediately with errno
- set to EWOULDBLOCK. In the former case, the call blocks until an event
- becomes available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>The standard Linux poll() and/or select() system calls can be used with the
- device file descriptor to watch for new events. For select(), the file descriptor
- should be included in the exceptfds argument, and for poll(), POLLPRI should
- be specified as the wake-up condition. Since the event queue allocated is
- rather small (room for 8 events), the queue must be serviced regularly to avoid
- overflow. If an overflow happens, the oldest event is discarded from the queue,
- and an error (EOVERFLOW) occurs the next time the queue is read. After
- reporting the error condition in this fashion, subsequent
- <link linkend="FE_GET_EVENT">FE_GET_EVENT</link>
- calls will return events from the queue as usual.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>For the sake of implementation simplicity, this command requires read/write
- access to the device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = QPSK_GET_EVENT,
- struct dvb_frontend_event &#x22C6;ev);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals <link linkend="FE_GET_EVENT">FE_GET_EVENT</link> for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- dvb_frontend_event
- *ev</para>
-</entry><entry
- align="char">
-<para>Points to the location where the event,</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>if any, is to be stored.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EWOULDBLOCK</para>
-</entry><entry
- align="char">
-<para>There is no event pending, and the device is in
- non-blocking mode.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EOVERFLOW</para>
-</entry><entry
- align="char">
-<para>Overflow in event queue - one or more events were lost.</para>
-</entry>
-</row></tbody></tgroup></informaltable>
-</section>
-
-<section id="FE_DISHNETWORK_SEND_LEGACY_CMD">
- <title>FE_DISHNETWORK_SEND_LEGACY_CMD</title>
-<para>DESCRIPTION</para>
-<informaltable><tgroup cols="1"><tbody><row>
-<entry align="char">
-<para>WARNING: This is a very obscure legacy command, used only at stv0299 driver. Should not be used on newer drivers.</para>
-<para>It provides a non-standard method for selecting Diseqc voltage on the frontend, for Dish Network legacy switches.</para>
-<para>As support for this ioctl were added in 2004, this means that such dishes were already legacy in 2004.</para>
-</entry>
-</row></tbody></tgroup></informaltable>
-
-<para>SYNOPSIS</para>
-<informaltable><tgroup cols="1"><tbody><row>
-<entry align="char">
-<para>int ioctl(int fd, int request =
- <link linkend="FE_DISHNETWORK_SEND_LEGACY_CMD">FE_DISHNETWORK_SEND_LEGACY_CMD</link>, unsigned long cmd);</para>
-</entry>
-</row></tbody></tgroup></informaltable>
-
-<para>PARAMETERS</para>
-<informaltable><tgroup cols="2"><tbody><row>
-<entry align="char">
- <para>unsigned long cmd</para>
-</entry>
-<entry align="char">
-<para>
-sends the specified raw cmd to the dish via DISEqC.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-&return-value-dvb;
-</section>
-
-</section>
diff --git a/Documentation/DocBook/media/dvb/intro.xml b/Documentation/DocBook/media/dvb/intro.xml
deleted file mode 100644
index b5b701f5d8c2..000000000000
--- a/Documentation/DocBook/media/dvb/intro.xml
+++ /dev/null
@@ -1,211 +0,0 @@
-<title>Introduction</title>
-
-<section id="requisites">
-<title>What you need to know</title>
-
-<para>The reader of this document is required to have some knowledge in
-the area of digital video broadcasting (DVB) and should be familiar with
-part I of the MPEG2 specification ISO/IEC 13818 (aka ITU-T H.222), i.e
-you should know what a program/transport stream (PS/TS) is and what is
-meant by a packetized elementary stream (PES) or an I-frame.</para>
-
-<para>Various DVB standards documents are available from
-<ulink url="http://www.dvb.org" /> and/or
-<ulink url="http://www.etsi.org" />.</para>
-
-<para>It is also necessary to know how to access unix/linux devices and
-how to use ioctl calls. This also includes the knowledge of C or C++.
-</para>
-</section>
-
-<section id="history">
-<title>History</title>
-
-<para>The first API for DVB cards we used at Convergence in late 1999
-was an extension of the Video4Linux API which was primarily developed
-for frame grabber cards. As such it was not really well suited to be
-used for DVB cards and their new features like recording MPEG streams
-and filtering several section and PES data streams at the same time.
-</para>
-
-<para>In early 2000, we were approached by Nokia with a proposal for a
-new standard Linux DVB API. As a commitment to the development of
-terminals based on open standards, Nokia and Convergence made it
-available to all Linux developers and published it on
-<ulink url="https://linuxtv.org" /> in September 2000.
-Convergence is the maintainer of the Linux DVB API. Together with the
-LinuxTV community (i.e. you, the reader of this document), the Linux DVB
-API will be constantly reviewed and improved. With the Linux driver for
-the Siemens/Hauppauge DVB PCI card Convergence provides a first
-implementation of the Linux DVB API.</para>
-</section>
-
-<section id="overview">
-<title>Overview</title>
-
-<figure id="stb_components">
-<title>Components of a DVB card/STB</title>
-<mediaobject>
-<imageobject>
-<imagedata fileref="dvbstb.pdf" format="PS" />
-</imageobject>
-<imageobject>
-<imagedata fileref="dvbstb.png" format="PNG" />
-</imageobject>
-</mediaobject>
-</figure>
-
-<para>A DVB PCI card or DVB set-top-box (STB) usually consists of the
-following main hardware components: </para>
-
-<itemizedlist>
- <listitem>
-
-<para>Frontend consisting of tuner and DVB demodulator</para>
-
-<para>Here the raw signal reaches the DVB hardware from a satellite dish
-or antenna or directly from cable. The frontend down-converts and
-demodulates this signal into an MPEG transport stream (TS). In case of a
-satellite frontend, this includes a facility for satellite equipment
-control (SEC), which allows control of LNB polarization, multi feed
-switches or dish rotors.</para>
-
-</listitem>
- <listitem>
-
-<para>Conditional Access (CA) hardware like CI adapters and smartcard slots
-</para>
-
-<para>The complete TS is passed through the CA hardware. Programs to
-which the user has access (controlled by the smart card) are decoded in
-real time and re-inserted into the TS.</para>
-
-</listitem>
- <listitem>
- <para>Demultiplexer which filters the incoming DVB stream</para>
-
-<para>The demultiplexer splits the TS into its components like audio and
-video streams. Besides usually several of such audio and video streams
-it also contains data streams with information about the programs
-offered in this or other streams of the same provider.</para>
-
-</listitem>
-<listitem>
-
-<para>MPEG2 audio and video decoder</para>
-
-<para>The main targets of the demultiplexer are the MPEG2 audio and
-video decoders. After decoding they pass on the uncompressed audio and
-video to the computer screen or (through a PAL/NTSC encoder) to a TV
-set.</para>
-
-
-</listitem>
-</itemizedlist>
-
-<para><xref linkend="stb_components" /> shows a crude schematic of the control and data flow
-between those components.</para>
-
-<para>On a DVB PCI card not all of these have to be present since some
-functionality can be provided by the main CPU of the PC (e.g. MPEG
-picture and sound decoding) or is not needed (e.g. for data-only uses
-like &#8220;internet over satellite&#8221;). Also not every card or STB
-provides conditional access hardware.</para>
-
-</section>
-
-<section id="dvb_devices">
-<title>Linux DVB Devices</title>
-
-<para>The Linux DVB API lets you control these hardware components
-through currently six Unix-style character devices for video, audio,
-frontend, demux, CA and IP-over-DVB networking. The video and audio
-devices control the MPEG2 decoder hardware, the frontend device the
-tuner and the DVB demodulator. The demux device gives you control over
-the PES and section filters of the hardware. If the hardware does not
-support filtering these filters can be implemented in software. Finally,
-the CA device controls all the conditional access capabilities of the
-hardware. It can depend on the individual security requirements of the
-platform, if and how many of the CA functions are made available to the
-application through this device.</para>
-
-<para>All devices can be found in the <constant>/dev</constant>
-tree under <constant>/dev/dvb</constant>. The individual devices
-are called:</para>
-
-<itemizedlist>
-<listitem>
-
-<para><constant>/dev/dvb/adapterN/audioM</constant>,</para>
-</listitem>
-<listitem>
-<para><constant>/dev/dvb/adapterN/videoM</constant>,</para>
-</listitem>
-<listitem>
-<para><constant>/dev/dvb/adapterN/frontendM</constant>,</para>
-</listitem>
- <listitem>
-
-<para><constant>/dev/dvb/adapterN/netM</constant>,</para>
-</listitem>
- <listitem>
-
-<para><constant>/dev/dvb/adapterN/demuxM</constant>,</para>
-</listitem>
- <listitem>
-
-<para><constant>/dev/dvb/adapterN/dvrM</constant>,</para>
-</listitem>
- <listitem>
-
-<para><constant>/dev/dvb/adapterN/caM</constant>,</para></listitem></itemizedlist>
-
-<para>where N enumerates the DVB PCI cards in a system starting
-from&#x00A0;0, and M enumerates the devices of each type within each
-adapter, starting from&#x00A0;0, too. We will omit the &#8220;
-<constant>/dev/dvb/adapterN/</constant>&#8221; in the further discussion
-of these devices.</para>
-
-<para>More details about the data structures and function calls of all
-the devices are described in the following chapters.</para>
-
-</section>
-
-<section id="include_files">
-<title>API include files</title>
-
-<para>For each of the DVB devices a corresponding include file exists.
-The DVB API include files should be included in application sources with
-a partial path like:</para>
-
-<programlisting>
- #include &#x003C;linux/dvb/audio.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/ca.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/dmx.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/frontend.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/net.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/osd.h&#x003E;
-</programlisting>
-<programlisting>
- #include &#x003C;linux/dvb/video.h&#x003E;
-</programlisting>
-
-<para>To enable applications to support different API version, an
-additional include file
-<constant>linux/dvb/version.h</constant> exists, which defines the
-constant <constant>DVB_API_VERSION</constant>. This document
-describes <constant>DVB_API_VERSION 5.10</constant>.
-</para>
-
-</section>
-
diff --git a/Documentation/DocBook/media/dvb/net.xml b/Documentation/DocBook/media/dvb/net.xml
deleted file mode 100644
index da095ed0b75c..000000000000
--- a/Documentation/DocBook/media/dvb/net.xml
+++ /dev/null
@@ -1,238 +0,0 @@
-<title>DVB Network API</title>
-<para>The DVB net device controls the mapping of data packages that are
- part of a transport stream to be mapped into a virtual network interface,
- visible through the standard Linux network protocol stack.</para>
-<para>Currently, two encapsulations are supported:</para>
-<itemizedlist>
- <listitem><para><ulink url="http://en.wikipedia.org/wiki/Multiprotocol_Encapsulation">
- Multi Protocol Encapsulation (MPE)</ulink></para></listitem>
- <listitem><para><ulink url="http://en.wikipedia.org/wiki/Unidirectional_Lightweight_Encapsulation">
- Ultra Lightweight Encapsulation (ULE)</ulink></para></listitem>
-</itemizedlist>
-
-<para>In order to create the Linux virtual network interfaces, an application
- needs to tell to the Kernel what are the PIDs and the encapsulation types
- that are present on the transport stream. This is done through
- <constant>/dev/dvb/adapter?/net?</constant> device node.
- The data will be available via virtual <constant>dvb?_?</constant>
- network interfaces, and will be controlled/routed via the standard
- ip tools (like ip, route, netstat, ifconfig, etc).</para>
-<para> Data types and and ioctl definitions are defined via
- <constant>linux/dvb/net.h</constant> header.</para>
-
-<section id="net_fcalls">
-<title>DVB net Function Calls</title>
-
-
-<refentry id="NET_ADD_IF">
- <refmeta>
- <refentrytitle>ioctl NET_ADD_IF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>NET_ADD_IF</refname>
- <refpurpose>Creates a new network interface for a given Packet ID.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dvb_net_if *<parameter>net_if</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_TONE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>net_if</parameter></term>
- <listitem>
- <para>pointer to &dvb-net-if;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>The NET_ADD_IF ioctl system call selects the Packet ID (PID) that
- contains a TCP/IP traffic, the type of encapsulation to be used (MPE or ULE)
- and the interface number for the new interface to be created. When the
- system call successfully returns, a new virtual network interface is created.</para>
-<para>The &dvb-net-if;::ifnum field will be filled with the number of the
- created interface.</para>
-
-&return-value-dvb;
-</refsect1>
-
-<refsect1 id="dvb-net-if-t">
-<title>struct <structname>dvb_net_if</structname> description</title>
-
-<table pgwide="1" frame="none" id="dvb-net-if">
- <title>struct <structname>dvb_net_if</structname></title>
- <tgroup cols="2">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry align="char">pid</entry>
- <entry align="char">Packet ID (PID) of the MPEG-TS that contains
- data</entry>
- </row><row>
- <entry align="char">ifnum</entry>
- <entry align="char">number of the DVB interface.</entry>
- </row><row>
- <entry align="char">feedtype</entry>
- <entry align="char">Encapsulation type of the feed. It can be:
- <constant>DVB_NET_FEEDTYPE_MPE</constant> for MPE encoding
- or
- <constant>DVB_NET_FEEDTYPE_ULE</constant> for ULE encoding.
- </entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-</refsect1>
-</refentry>
-
-<refentry id="NET_REMOVE_IF">
- <refmeta>
- <refentrytitle>ioctl NET_REMOVE_IF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>NET_REMOVE_IF</refname>
- <refpurpose>Removes a network interface.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>int <parameter>ifnum</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_TONE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>net_if</parameter></term>
- <listitem>
- <para>number of the interface to be removed</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>The NET_REMOVE_IF ioctl deletes an interface previously created
- via &NET-ADD-IF;.</para>
-
-&return-value-dvb;
-</refsect1>
-</refentry>
-
-
-<refentry id="NET_GET_IF">
- <refmeta>
- <refentrytitle>ioctl NET_GET_IF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>NET_GET_IF</refname>
- <refpurpose>Read the configuration data of an interface created via
- &NET-ADD-IF;.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct dvb_net_if *<parameter>net_if</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fe_fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>FE_SET_TONE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>net_if</parameter></term>
- <listitem>
- <para>pointer to &dvb-net-if;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>The NET_GET_IF ioctl uses the interface number given by the
- &dvb-net-if;::ifnum field and fills the content of &dvb-net-if; with
- the packet ID and encapsulation type used on such interface. If the
- interface was not created yet with &NET-ADD-IF;, it will return -1 and
- fill the <constant>errno</constant> with <constant>EINVAL</constant>
- error code.</para>
-
-&return-value-dvb;
-</refsect1>
-</refentry>
-</section>
diff --git a/Documentation/DocBook/media/dvb/video.xml b/Documentation/DocBook/media/dvb/video.xml
deleted file mode 100644
index 71547fcd7ba0..000000000000
--- a/Documentation/DocBook/media/dvb/video.xml
+++ /dev/null
@@ -1,1968 +0,0 @@
-<title>DVB Video Device</title>
-<para>The DVB video device controls the MPEG2 video decoder of the DVB hardware. It
-can be accessed through <emphasis role="bold">/dev/dvb/adapter0/video0</emphasis>. Data types and and
-ioctl definitions can be accessed by including <emphasis role="bold">linux/dvb/video.h</emphasis> in your
-application.
-</para>
-<para>Note that the DVB video device only controls decoding of the MPEG video stream, not
-its presentation on the TV or computer screen. On PCs this is typically handled by an
-associated video4linux device, e.g. <emphasis role="bold">/dev/video</emphasis>, which allows scaling and defining output
-windows.
-</para>
-<para>Some DVB cards don&#8217;t have their own MPEG decoder, which results in the omission of
-the audio and video device as well as the video4linux device.
-</para>
-<para>The ioctls that deal with SPUs (sub picture units) and navigation packets are only
-supported on some MPEG decoders made for DVD playback.
-</para>
-<para>
-These ioctls were also used by V4L2 to control MPEG decoders implemented in V4L2. The use
-of these ioctls for that purpose has been made obsolete and proper V4L2 ioctls or controls
-have been created to replace that functionality.</para>
-<section id="video_types">
-<title>Video Data Types</title>
-
-<section id="video-format-t">
-<title>video_format_t</title>
-<para>The <constant>video_format_t</constant> data type defined by
-</para>
-<programlisting>
-typedef enum {
- VIDEO_FORMAT_4_3, /&#x22C6; Select 4:3 format &#x22C6;/
- VIDEO_FORMAT_16_9, /&#x22C6; Select 16:9 format. &#x22C6;/
- VIDEO_FORMAT_221_1 /&#x22C6; 2.21:1 &#x22C6;/
-} video_format_t;
-</programlisting>
-<para>is used in the VIDEO_SET_FORMAT function (??) to tell the driver which aspect ratio
-the output hardware (e.g. TV) has. It is also used in the data structures video_status
-(??) returned by VIDEO_GET_STATUS (??) and video_event (??) returned by
-VIDEO_GET_EVENT (??) which report about the display format of the current video
-stream.
-</para>
-</section>
-
-<section id="video-displayformat-t">
-<title>video_displayformat_t</title>
-<para>In case the display format of the video stream and of the display hardware differ the
-application has to specify how to handle the cropping of the picture. This can be done using
-the VIDEO_SET_DISPLAY_FORMAT call (??) which accepts
-</para>
-<programlisting>
-typedef enum {
- VIDEO_PAN_SCAN, /&#x22C6; use pan and scan format &#x22C6;/
- VIDEO_LETTER_BOX, /&#x22C6; use letterbox format &#x22C6;/
- VIDEO_CENTER_CUT_OUT /&#x22C6; use center cut out format &#x22C6;/
-} video_displayformat_t;
-</programlisting>
-<para>as argument.
-</para>
-</section>
-
-<section id="video-stream-source-t">
-<title>video_stream_source_t</title>
-<para>The video stream source is set through the VIDEO_SELECT_SOURCE call and can take
-the following values, depending on whether we are replaying from an internal (demuxer) or
-external (user write) source.
-</para>
-<programlisting>
-typedef enum {
- VIDEO_SOURCE_DEMUX, /&#x22C6; Select the demux as the main source &#x22C6;/
- VIDEO_SOURCE_MEMORY /&#x22C6; If this source is selected, the stream
- comes from the user through the write
- system call &#x22C6;/
-} video_stream_source_t;
-</programlisting>
-<para>VIDEO_SOURCE_DEMUX selects the demultiplexer (fed either by the frontend or the
-DVR device) as the source of the video stream. If VIDEO_SOURCE_MEMORY
-is selected the stream comes from the application through the <emphasis role="bold">write()</emphasis> system
-call.
-</para>
-</section>
-
-<section id="video-play-state-t">
-<title>video_play_state_t</title>
-<para>The following values can be returned by the VIDEO_GET_STATUS call representing the
-state of video playback.
-</para>
-<programlisting>
-typedef enum {
- VIDEO_STOPPED, /&#x22C6; Video is stopped &#x22C6;/
- VIDEO_PLAYING, /&#x22C6; Video is currently playing &#x22C6;/
- VIDEO_FREEZED /&#x22C6; Video is freezed &#x22C6;/
-} video_play_state_t;
-</programlisting>
-</section>
-
-<section id="video-command">
-<title>struct video_command</title>
-<para>The structure must be zeroed before use by the application
-This ensures it can be extended safely in the future.</para>
-<programlisting>
-struct video_command {
- __u32 cmd;
- __u32 flags;
- union {
- struct {
- __u64 pts;
- } stop;
-
- struct {
- /&#x22C6; 0 or 1000 specifies normal speed,
- 1 specifies forward single stepping,
- -1 specifies backward single stepping,
- &gt;>1: playback at speed/1000 of the normal speed,
- &lt;-1: reverse playback at (-speed/1000) of the normal speed. &#x22C6;/
- __s32 speed;
- __u32 format;
- } play;
-
- struct {
- __u32 data[16];
- } raw;
- };
-};
-</programlisting>
-</section>
-
-<section id="video-size-t">
-<title>video_size_t</title>
-<programlisting>
-typedef struct {
- int w;
- int h;
- video_format_t aspect_ratio;
-} video_size_t;
-</programlisting>
-</section>
-
-
-<section id="video-event">
-<title>struct video_event</title>
-<para>The following is the structure of a video event as it is returned by the VIDEO_GET_EVENT
-call.
-</para>
-<programlisting>
-struct video_event {
- __s32 type;
-#define VIDEO_EVENT_SIZE_CHANGED 1
-#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
-#define VIDEO_EVENT_DECODER_STOPPED 3
-#define VIDEO_EVENT_VSYNC 4
- __kernel_time_t timestamp;
- union {
- video_size_t size;
- unsigned int frame_rate; /&#x22C6; in frames per 1000sec &#x22C6;/
- unsigned char vsync_field; /&#x22C6; unknown/odd/even/progressive &#x22C6;/
- } u;
-};
-</programlisting>
-</section>
-
-<section id="video-status">
-<title>struct video_status</title>
-<para>The VIDEO_GET_STATUS call returns the following structure informing about various
-states of the playback operation.
-</para>
-<programlisting>
-struct video_status {
- int video_blank; /&#x22C6; blank video on freeze? &#x22C6;/
- video_play_state_t play_state; /&#x22C6; current state of playback &#x22C6;/
- video_stream_source_t stream_source; /&#x22C6; current source (demux/memory) &#x22C6;/
- video_format_t video_format; /&#x22C6; current aspect ratio of stream &#x22C6;/
- video_displayformat_t display_format;/&#x22C6; selected cropping mode &#x22C6;/
-};
-</programlisting>
-<para>If video_blank is set video will be blanked out if the channel is changed or if playback is
-stopped. Otherwise, the last picture will be displayed. play_state indicates if the video is
-currently frozen, stopped, or being played back. The stream_source corresponds to the seleted
-source for the video stream. It can come either from the demultiplexer or from memory.
-The video_format indicates the aspect ratio (one of 4:3 or 16:9) of the currently
-played video stream. Finally, display_format corresponds to the selected cropping
-mode in case the source video format is not the same as the format of the output
-device.
-</para>
-</section>
-
-<section id="video-still-picture">
-<title>struct video_still_picture</title>
-<para>An I-frame displayed via the VIDEO_STILLPICTURE call is passed on within the
-following structure.
-</para>
-<programlisting>
-/&#x22C6; pointer to and size of a single iframe in memory &#x22C6;/
-struct video_still_picture {
- char &#x22C6;iFrame; /&#x22C6; pointer to a single iframe in memory &#x22C6;/
- int32_t size;
-};
-</programlisting>
-</section>
-
-<section id="video_caps">
-<title>video capabilities</title>
-<para>A call to VIDEO_GET_CAPABILITIES returns an unsigned integer with the following
-bits set according to the hardwares capabilities.
-</para>
-<programlisting>
- /&#x22C6; bit definitions for capabilities: &#x22C6;/
- /&#x22C6; can the hardware decode MPEG1 and/or MPEG2? &#x22C6;/
- #define VIDEO_CAP_MPEG1 1
- #define VIDEO_CAP_MPEG2 2
- /&#x22C6; can you send a system and/or program stream to video device?
- (you still have to open the video and the audio device but only
- send the stream to the video device) &#x22C6;/
- #define VIDEO_CAP_SYS 4
- #define VIDEO_CAP_PROG 8
- /&#x22C6; can the driver also handle SPU, NAVI and CSS encoded data?
- (CSS API is not present yet) &#x22C6;/
- #define VIDEO_CAP_SPU 16
- #define VIDEO_CAP_NAVI 32
- #define VIDEO_CAP_CSS 64
-</programlisting>
-</section>
-
-<section id="video-system">
-<title>video_system_t</title>
-<para>A call to VIDEO_SET_SYSTEM sets the desired video system for TV output. The
-following system types can be set:
-</para>
-<programlisting>
-typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
-} video_system_t;
-</programlisting>
-</section>
-
-<section id="video-highlight">
-<title>struct video_highlight</title>
-<para>Calling the ioctl VIDEO_SET_HIGHLIGHTS posts the SPU highlight information. The
-call expects the following format for that information:
-</para>
-<programlisting>
- typedef
- struct video_highlight {
- boolean active; /&#x22C6; 1=show highlight, 0=hide highlight &#x22C6;/
- uint8_t contrast1; /&#x22C6; 7- 4 Pattern pixel contrast &#x22C6;/
- /&#x22C6; 3- 0 Background pixel contrast &#x22C6;/
- uint8_t contrast2; /&#x22C6; 7- 4 Emphasis pixel-2 contrast &#x22C6;/
- /&#x22C6; 3- 0 Emphasis pixel-1 contrast &#x22C6;/
- uint8_t color1; /&#x22C6; 7- 4 Pattern pixel color &#x22C6;/
- /&#x22C6; 3- 0 Background pixel color &#x22C6;/
- uint8_t color2; /&#x22C6; 7- 4 Emphasis pixel-2 color &#x22C6;/
- /&#x22C6; 3- 0 Emphasis pixel-1 color &#x22C6;/
- uint32_t ypos; /&#x22C6; 23-22 auto action mode &#x22C6;/
- /&#x22C6; 21-12 start y &#x22C6;/
- /&#x22C6; 9- 0 end y &#x22C6;/
- uint32_t xpos; /&#x22C6; 23-22 button color number &#x22C6;/
- /&#x22C6; 21-12 start x &#x22C6;/
- /&#x22C6; 9- 0 end x &#x22C6;/
- } video_highlight_t;
-</programlisting>
-
-</section>
-<section id="video-spu">
-<title>struct video_spu</title>
-<para>Calling VIDEO_SET_SPU deactivates or activates SPU decoding, according to the
-following format:
-</para>
-<programlisting>
- typedef
- struct video_spu {
- boolean active;
- int stream_id;
- } video_spu_t;
-</programlisting>
-
-</section>
-<section id="video-spu-palette">
-<title>struct video_spu_palette</title>
-<para>The following structure is used to set the SPU palette by calling VIDEO_SPU_PALETTE:
-</para>
-<programlisting>
- typedef
- struct video_spu_palette {
- int length;
- uint8_t &#x22C6;palette;
- } video_spu_palette_t;
-</programlisting>
-
-</section>
-<section id="video-navi-pack">
-<title>struct video_navi_pack</title>
-<para>In order to get the navigational data the following structure has to be passed to the ioctl
-VIDEO_GET_NAVI:
-</para>
-<programlisting>
- typedef
- struct video_navi_pack {
- int length; /&#x22C6; 0 ... 1024 &#x22C6;/
- uint8_t data[1024];
- } video_navi_pack_t;
-</programlisting>
-</section>
-
-
-<section id="video-attributes-t">
-<title>video_attributes_t</title>
-<para>The following attributes can be set by a call to VIDEO_SET_ATTRIBUTES:
-</para>
-<programlisting>
- typedef uint16_t video_attributes_t;
- /&#x22C6; bits: descr. &#x22C6;/
- /&#x22C6; 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) &#x22C6;/
- /&#x22C6; 13-12 TV system (0=525/60, 1=625/50) &#x22C6;/
- /&#x22C6; 11-10 Aspect ratio (0=4:3, 3=16:9) &#x22C6;/
- /&#x22C6; 9- 8 permitted display mode on 4:3 monitor (0=both, 1=only pan-sca &#x22C6;/
- /&#x22C6; 7 line 21-1 data present in GOP (1=yes, 0=no) &#x22C6;/
- /&#x22C6; 6 line 21-2 data present in GOP (1=yes, 0=no) &#x22C6;/
- /&#x22C6; 5- 3 source resolution (0=720x480/576, 1=704x480/576, 2=352x480/57 &#x22C6;/
- /&#x22C6; 2 source letterboxed (1=yes, 0=no) &#x22C6;/
- /&#x22C6; 0 film/camera mode (0=camera, 1=film (625/50 only)) &#x22C6;/
-</programlisting>
-</section></section>
-
-
-<section id="video_function_calls">
-<title>Video Function Calls</title>
-
-
-<section id="video_fopen">
-<title>open()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call opens a named video device (e.g. /dev/dvb/adapter0/video0)
- for subsequent use.</para>
-<para>When an open() call has succeeded, the device will be ready for use.
- The significance of blocking or non-blocking mode is described in the
- documentation for functions where there is a difference. It does not affect the
- semantics of the open() call itself. A device opened in blocking mode can later
- be put into non-blocking mode (and vice versa) using the F_SETFL command
- of the fcntl system call. This is a standard system call, documented in the Linux
- manual page for fcntl. Only one user can open the Video Device in O_RDWR
- mode. All other attempts to open the device in this mode will fail, and an
- error-code will be returned. If the Video Device is opened in O_RDONLY
- mode, the only ioctl call that can be used is VIDEO_GET_STATUS. All other
- call will return an error code.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int open(const char &#x22C6;deviceName, int flags);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>const char
- *deviceName</para>
-</entry><entry
- align="char">
-<para>Name of specific video device.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int flags</para>
-</entry><entry
- align="char">
-<para>A bit-wise OR of the following flags:</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDONLY read-only access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_RDWR read/write access</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>O_NONBLOCK open in non-blocking mode</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>(blocking mode is the default)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>ENODEV</para>
-</entry><entry
- align="char">
-<para>Device driver not loaded/available.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINTERNAL</para>
-</entry><entry
- align="char">
-<para>Internal error.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBUSY</para>
-</entry><entry
- align="char">
-<para>Device or resource busy.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid argument.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="video_fclose">
-<title>close()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call closes a previously opened video device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int close(int fd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section>
-<section id="video_fwrite">
-<title>write()</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This system call can only be used if VIDEO_SOURCE_MEMORY is selected
- in the ioctl call VIDEO_SELECT_SOURCE. The data provided shall be in
- PES format, unless the capability allows other formats. If O_NONBLOCK is
- not specified the function will block until buffer space is available. The amount
- of data to be transferred is implied by count.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>size_t write(int fd, const void &#x22C6;buf, size_t count);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>void *buf</para>
-</entry><entry
- align="char">
-<para>Pointer to the buffer containing the PES data.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>size_t count</para>
-</entry><entry
- align="char">
-<para>Size of buf.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>RETURN VALUE</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EPERM</para>
-</entry><entry
- align="char">
-<para>Mode VIDEO_SOURCE_MEMORY not selected.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>ENOMEM</para>
-</entry><entry
- align="char">
-<para>Attempted to write more data than the internal buffer can
- hold.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EBADF</para>
-</entry><entry
- align="char">
-<para>fd is not a valid open file descriptor.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_STOP"
-role="subsection"><title>VIDEO_STOP</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-&VIDIOC-DECODER-CMD; instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to stop playing the current stream.
- Depending on the input parameter, the screen can be blanked out or displaying
- the last decoded frame.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_STOP, boolean
- mode);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_STOP for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>Boolean mode</para>
-</entry><entry
- align="char">
-<para>Indicates how the screen shall be handled.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>TRUE: Blank screen when stop.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>FALSE: Show last decoded frame.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_PLAY"
-role="subsection"><title>VIDEO_PLAY</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-&VIDIOC-DECODER-CMD; instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to start playing a video stream from the
- selected source.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_PLAY);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_PLAY for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_FREEZE"
-role="subsection"><title>VIDEO_FREEZE</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-&VIDIOC-DECODER-CMD; instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call suspends the live video stream being played. Decoding
- and playing are frozen. It is then possible to restart the decoding
- and playing process of the video stream using the VIDEO_CONTINUE
- command. If VIDEO_SOURCE_MEMORY is selected in the ioctl call
- VIDEO_SELECT_SOURCE, the DVB subsystem will not decode any more
- data until the ioctl call VIDEO_CONTINUE or VIDEO_PLAY is performed.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_FREEZE);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_FREEZE for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_CONTINUE"
-role="subsection"><title>VIDEO_CONTINUE</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To control a V4L2 decoder use the V4L2
-&VIDIOC-DECODER-CMD; instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call restarts decoding and playing processes of the video stream
- which was played before a call to VIDEO_FREEZE was made.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_CONTINUE);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_CONTINUE for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SELECT_SOURCE"
-role="subsection"><title>VIDEO_SELECT_SOURCE</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. This ioctl was also supported by the
-V4L2 ivtv driver, but that has been replaced by the ivtv-specific
-<constant>IVTV_IOC_PASSTHROUGH_MODE</constant> ioctl.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call informs the video device which source shall be used for the input
- data. The possible sources are demux or memory. If memory is selected, the
- data is fed to the video device through the write command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_SELECT_SOURCE,
- video_stream_source_t source);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SELECT_SOURCE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_stream_source_t
- source</para>
-</entry><entry
- align="char">
-<para>Indicates which source shall be used for the Video stream.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_BLANK"
-role="subsection"><title>VIDEO_SET_BLANK</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to blank out the picture.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_SET_BLANK, boolean
- mode);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_BLANK for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>boolean mode</para>
-</entry><entry
- align="char">
-<para>TRUE: Blank screen when stop.</para>
-</entry>
- </row><row><entry
- align="char">
-</entry><entry
- align="char">
-<para>FALSE: Show last decoded frame.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_STATUS"
-role="subsection"><title>VIDEO_GET_STATUS</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to return the current status of the device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_GET_STATUS, struct
- video_status &#x22C6;status);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_STATUS for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct video_status
- *status</para>
-</entry><entry
- align="char">
-<para>Returns the current status of the Video Device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_FRAME_COUNT"
-role="subsection"><title>VIDEO_GET_FRAME_COUNT</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. For V4L2 decoders this
-ioctl has been replaced by the <constant>V4L2_CID_MPEG_VIDEO_DEC_FRAME</constant> control.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to return the number of displayed frames
-since the decoder was started.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_GET_FRAME_COUNT, __u64 *pts);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_FRAME_COUNT for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u64 *pts
-</para>
-</entry><entry
- align="char">
-<para>Returns the number of frames displayed since the decoder was started.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_PTS"
-role="subsection"><title>VIDEO_GET_PTS</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. For V4L2 decoders this
-ioctl has been replaced by the <constant>V4L2_CID_MPEG_VIDEO_DEC_PTS</constant> control.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to return the current PTS timestamp.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_GET_PTS, __u64 *pts);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_PTS for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>__u64 *pts
-</para>
-</entry><entry
- align="char">
-<para>Returns the 33-bit timestamp as defined in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
-</para>
-<para>
-The PTS should belong to the currently played
-frame if possible, but may also be a value close to it
-like the PTS of the last decoded frame or the last PTS
-extracted by the PES parser.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_FRAME_RATE"
-role="subsection"><title>VIDEO_GET_FRAME_RATE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to return the current framerate.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_GET_FRAME_RATE, unsigned int *rate);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_FRAME_RATE for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>unsigned int *rate
-</para>
-</entry><entry
- align="char">
-<para>Returns the framerate in number of frames per 1000 seconds.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_EVENT"
-role="subsection"><title>VIDEO_GET_EVENT</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is for DVB devices only. To get events from a V4L2 decoder use the V4L2
-&VIDIOC-DQEVENT; ioctl instead.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call returns an event of type video_event if available. If an event is
- not available, the behavior depends on whether the device is in blocking or
- non-blocking mode. In the latter case, the call fails immediately with errno
- set to EWOULDBLOCK. In the former case, the call blocks until an event
- becomes available. The standard Linux poll() and/or select() system calls can
- be used with the device file descriptor to watch for new events. For select(),
- the file descriptor should be included in the exceptfds argument, and for
- poll(), POLLPRI should be specified as the wake-up condition. Read-only
- permissions are sufficient for this ioctl call.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_GET_EVENT, struct
- video_event &#x22C6;ev);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_EVENT for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct video_event
- *ev</para>
-</entry><entry
- align="char">
-<para>Points to the location where the event, if any, is to be
- stored.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EWOULDBLOCK</para>
-</entry><entry
- align="char">
-<para>There is no event pending, and the device is in
- non-blocking mode.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>EOVERFLOW</para>
-</entry><entry
- align="char">
-<para>Overflow in event queue - one or more events were lost.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_COMMAND"
-role="subsection"><title>VIDEO_COMMAND</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. For V4L2 decoders this
-ioctl has been replaced by the &VIDIOC-DECODER-CMD; ioctl.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl commands the decoder. The <constant>video_command</constant> struct
-is a subset of the <constant>v4l2_decoder_cmd</constant> struct, so refer to the
-&VIDIOC-DECODER-CMD; documentation for more information.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_COMMAND, struct video_command *cmd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_COMMAND for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct video_command *cmd
-</para>
-</entry><entry
- align="char">
-<para>Commands the decoder.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_TRY_COMMAND"
-role="subsection"><title>VIDEO_TRY_COMMAND</title>
-<para>DESCRIPTION
-</para>
-<para>This ioctl is obsolete. Do not use in new drivers. For V4L2 decoders this
-ioctl has been replaced by the &VIDIOC-TRY-DECODER-CMD; ioctl.</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl tries a decoder command. The <constant>video_command</constant> struct
-is a subset of the <constant>v4l2_decoder_cmd</constant> struct, so refer to the
-&VIDIOC-TRY-DECODER-CMD; documentation for more information.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_TRY_COMMAND, struct video_command *cmd);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_TRY_COMMAND for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct video_command *cmd
-</para>
-</entry><entry
- align="char">
-<para>Try a decoder command.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_GET_SIZE"
-role="subsection"><title>VIDEO_GET_SIZE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl returns the size and aspect ratio.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request =
- VIDEO_GET_SIZE, video_size_t *size);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_SIZE for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_size_t *size
-</para>
-</entry><entry
- align="char">
-<para>Returns the size and aspect ratio.
-</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_DISPLAY_FORMAT"
-role="subsection"><title>VIDEO_SET_DISPLAY_FORMAT</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to select the video format to be applied
- by the MPEG chip on the video.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request =
- VIDEO_SET_DISPLAY_FORMAT, video_display_format_t
- format);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_DISPLAY_FORMAT for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_display_format_t
- format</para>
-</entry><entry
- align="char">
-<para>Selects the video format to be used.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_STILLPICTURE"
-role="subsection"><title>VIDEO_STILLPICTURE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to display a still picture (I-frame). The
- input data shall contain an I-frame. If the pointer is NULL, then the current
- displayed still picture is blanked.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_STILLPICTURE,
- struct video_still_picture &#x22C6;sp);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_STILLPICTURE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>struct
- video_still_picture
- *sp</para>
-</entry><entry
- align="char">
-<para>Pointer to a location where an I-frame and size is stored.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_FAST_FORWARD"
-role="subsection"><title>VIDEO_FAST_FORWARD</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the Video Device to skip decoding of N number of I-frames.
- This call can only be used if VIDEO_SOURCE_MEMORY is selected.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_FAST_FORWARD, int
- nFrames);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_FAST_FORWARD for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int nFrames</para>
-</entry><entry
- align="char">
-<para>The number of frames to skip.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EPERM</para>
-</entry><entry
- align="char">
-<para>Mode VIDEO_SOURCE_MEMORY not selected.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_SLOWMOTION"
-role="subsection"><title>VIDEO_SLOWMOTION</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the video device to repeat decoding frames N number of
- times. This call can only be used if VIDEO_SOURCE_MEMORY is selected.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_SLOWMOTION, int
- nFrames);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SLOWMOTION for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int nFrames</para>
-</entry><entry
- align="char">
-<para>The number of times to repeat each frame.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EPERM</para>
-</entry><entry
- align="char">
-<para>Mode VIDEO_SOURCE_MEMORY not selected.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_GET_CAPABILITIES"
-role="subsection"><title>VIDEO_GET_CAPABILITIES</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call asks the video device about its decoding capabilities. On success
- it returns and integer which has bits set according to the defines in section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_GET_CAPABILITIES,
- unsigned int &#x22C6;cap);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_CAPABILITIES for this
- command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>unsigned int *cap</para>
-</entry><entry
- align="char">
-<para>Pointer to a location where to store the capability
- information.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_ID"
-role="subsection"><title>VIDEO_SET_ID</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl selects which sub-stream is to be decoded if a program or system
- stream is sent to the video device.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(int fd, int request = VIDEO_SET_ID, int
- id);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_ID for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int id</para>
-</entry><entry
- align="char">
-<para>video sub-stream id</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>Invalid sub-stream id.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_CLEAR_BUFFER"
-role="subsection"><title>VIDEO_CLEAR_BUFFER</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl call clears all video buffers in the driver and in the decoder hardware.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_CLEAR_BUFFER);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_CLEAR_BUFFER for this command.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_STREAMTYPE"
-role="subsection"><title>VIDEO_SET_STREAMTYPE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl tells the driver which kind of stream to expect being written to it. If
- this call is not used the default of video PES is used. Some drivers might not
- support this call and always expect PES.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>int ioctl(fd, int request = VIDEO_SET_STREAMTYPE,
- int type);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_STREAMTYPE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int type</para>
-</entry><entry
- align="char">
-<para>stream type</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_FORMAT"
-role="subsection"><title>VIDEO_SET_FORMAT</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl sets the screen format (aspect ratio) of the connected output device
- (TV) so that the output of the decoder can be adjusted accordingly.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_FORMAT,
- video_format_t format);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_FORMAT for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_format_t
- format</para>
-</entry><entry
- align="char">
-<para>video format of TV as defined in section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>format is not a valid video format.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_SET_SYSTEM"
-role="subsection"><title>VIDEO_SET_SYSTEM</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl sets the television output format. The format (see section ??) may
- vary from the color format of the displayed MPEG stream. If the hardware is
- not able to display the requested format the call will return an error.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_SYSTEM ,
- video_system_t system);</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_FORMAT for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_system_t
- system</para>
-</entry><entry
- align="char">
-<para>video system of TV output.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>system is not a valid or supported video system.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_SET_HIGHLIGHT"
-role="subsection"><title>VIDEO_SET_HIGHLIGHT</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl sets the SPU highlight information for the menu access of a DVD.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_HIGHLIGHT
- ,video_highlight_t &#x22C6;vhilite)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_HIGHLIGHT for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_highlight_t
- *vhilite</para>
-</entry><entry
- align="char">
-<para>SPU Highlight information according to section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-
-</section><section id="VIDEO_SET_SPU"
-role="subsection"><title>VIDEO_SET_SPU</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl activates or deactivates SPU decoding in a DVD input stream. It can
- only be used, if the driver is able to handle a DVD stream.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_SPU ,
- video_spu_t &#x22C6;spu)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_SPU for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_spu_t *spu</para>
-</entry><entry
- align="char">
-<para>SPU decoding (de)activation and subid setting according
- to section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>input is not a valid spu setting or driver cannot handle
- SPU.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_SET_SPU_PALETTE"
-role="subsection"><title>VIDEO_SET_SPU_PALETTE</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl sets the SPU color palette.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_SPU_PALETTE
- ,video_spu_palette_t &#x22C6;palette )</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_SPU_PALETTE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_spu_palette_t
- *palette</para>
-</entry><entry
- align="char">
-<para>SPU palette according to section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>input is not a valid palette or driver doesn&#8217;t handle SPU.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_GET_NAVI"
-role="subsection"><title>VIDEO_GET_NAVI</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl returns navigational information from the DVD stream. This is
- especially needed if an encoded stream has to be decoded by the hardware.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_GET_NAVI ,
- video_navi_pack_t &#x22C6;navipack)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_GET_NAVI for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_navi_pack_t
- *navipack</para>
-</entry><entry
- align="char">
-<para>PCI or DSI pack (private stream 2) according to section
- ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EFAULT</para>
-</entry><entry
- align="char">
-<para>driver is not able to return navigational information</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-
-</section><section id="VIDEO_SET_ATTRIBUTES"
-role="subsection"><title>VIDEO_SET_ATTRIBUTES</title>
-<para>DESCRIPTION
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para>This ioctl is intended for DVD playback and allows you to set certain
- information about the stream. Some hardware may not need this information,
- but the call also tells the hardware to prepare for DVD playback.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>SYNOPSIS
-</para>
-<informaltable><tgroup cols="1"><tbody><row><entry
- align="char">
-<para> int ioctl(fd, int request = VIDEO_SET_ATTRIBUTE
- ,video_attributes_t vattr)</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-<para>PARAMETERS
-</para>
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>int fd</para>
-</entry><entry
- align="char">
-<para>File descriptor returned by a previous call to open().</para>
-</entry>
- </row><row><entry
- align="char">
-<para>int request</para>
-</entry><entry
- align="char">
-<para>Equals VIDEO_SET_ATTRIBUTE for this command.</para>
-</entry>
- </row><row><entry
- align="char">
-<para>video_attributes_t
- vattr</para>
-</entry><entry
- align="char">
-<para>video attributes according to section ??.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
-&return-value-dvb;
-<informaltable><tgroup cols="2"><tbody><row><entry
- align="char">
-<para>EINVAL</para>
-</entry><entry
- align="char">
-<para>input is not a valid attribute setting.</para>
-</entry>
- </row></tbody></tgroup></informaltable>
- </section></section>
diff --git a/Documentation/DocBook/media/dvbstb.png.b64 b/Documentation/DocBook/media/dvbstb.png.b64
deleted file mode 100644
index e8b52fde3d11..000000000000
--- a/Documentation/DocBook/media/dvbstb.png.b64
+++ /dev/null
@@ -1,398 +0,0 @@
-iVBORw0KGgoAAAANSUhEUgAAAzMAAAGaCAYAAAA7Jx25AAAABmJLR0QAAAAAAAD5Q7t/AAAACXBI
-WXMAAA3XAAANiQFmEOuiAAAgAElEQVR42uzdd1RU18I28GdgKFZUBE0saFA0KoqFFkEhKhbAQmxJ
-bIkNNEpMEUwsMZarJMZrw4KxRExQczUqil0jRBA1GAjGQqLYC4TemdnfH76cj3HodYDntxaLmTll
-zuw57Zmz9z4yIYQAkYZzcnJCSkoKGjZsyMIgIiIiquPS09PRoEEDyBhmqCaQyWRo06YN3nvvPRYG
-ERERUR137Ngx/Pnnn5CzKKgmMDAwwKpVqxhmiIiIiAj29vZ4//33ocWiICIiIiKimohhhoiIiIiI
-GGaIiIiIiIgYZoiIiIiIiBhmiIiIiIiIYYaIiIiIiIhhhoiIiIiIiGGGiIiIiIgYZoiIiIiIiBhm
-iIiIiIiIGGaIiIiIiIgYZoiIiIiIiGGGiIiIiIiIYYaIiIiIiIhhhoiIiIiIGGaIiIiIiIgYZoiI
-iIiIiBhmiIiIiIiIYYaIiIiIiIhhhoiIiIiIqFLIWQRElSMsLAy2trZo1KgR5HJualTxEhIS8P33
-3+PDDz+sM5+5bdu2ePDgAZo2bcoVgCplm3J0dMS5c+fqzGf++uuvsWTJEm5TVClSU1ORk5ODBw8e
-oHXr1gwzRDVJbm4uAGDRokUwMDBggVCFmzlzJrKysurUZ3727BksLCzg4eHBFYAq3IIFC5CQkFCn
-PnNGRgYAYNWqVVwBqMJFRUVh48aNUCqVlfYeDDNElWzGjBkMM1QpNm7cWOc+c8uWLTFjxgzMmDGD
-KwBVuLt37yIkJKTOfW5nZ2duU1SpYaYysc0MERERERHVSAwzRERERETEMENERERERMQwQ0RERERE
-xDBDREREREQMM0RERERERAwzREREREREDDNERERERMQwQ0RERERExDBDRERERETEMENERERERMQw
-Q0REREREDDNEREREREQMM0RERERERAwzRERERETEMENERERERMQwQ0RERERExDBDREREREQMM0RE
-RERERAwzREREREREDDNEREREREQMM0RERERExDBDRERERETEMENERERERMQwQ0REREREDDNERERE
-REQMM0RERERERAwzRERERETEMENERERERMQwQ0REREREVGnkLAKimunBgwdISkoq8/SGhoZ47bXX
-WJCV6NmzZwgMDMS5c+ewd+9eFgiVSVZWFkJCQnD16lU8evQICoUChoaG6NChA2xsbNCxY0fIZDI8
-efIEp06dwuTJk0s876CgIJiYmKBLly4saKq2Y5Wuri6aNm0KQ0NDaGnxd3ZimCGqE/78808EBgbi
-p59+QkJCgsowLS0tyGQy6blSqYQQQmWcjz/+GGvXrmVBVoKtW7di+/btuHbtGoQQMDQ0ZKFQqf37
-77/w8fHBtm3bkJCQgCZNmsDS0hLGxsZ48OABtm/fjidPnsDU1BR2dnYICwtDz549SxxmlEol5s6d
-CxsbG+zZs4cFTpV2rDpx4gQOHDiAJ0+eqAzT09ODUqlETk4OAEBfXx/dunWDvb093Nzc0LdvX5Vj
-GVFBGH+JaqihQ4di06ZNOHr0qMrrly5dgkKhQG5urvSnVCqRlZWF27dvY8mSJQCA7OxsFmIlmTFj
-Bs6ePctfu6nMTp48iTfffBOrV6+Gnp4e9uzZg+fPn+PUqVPw9/fHkSNH8PDhQxw9ehRCCOzevRu3
-bt1CWlpaqd4jJiYG+/btw+PHj1noVGnHqnXr1uHcuXMqr+/fvx8ZGRnIzs5GSkoKIiIi8M0330BH
-Rwdr166Fvb09evXqhdOnT7MQiWGGqDazsrJSeV5Y1TFdXV107NgRX331FSZPniz9ElbTnDp1SuOX
-USaToXHjxujevTtXUCq1H3/8EcOGDcPz58/RtWtXREREYMKECdDR0VE9gGtpwcXFBdeuXYONjQ0A
-ID09vcTvs2HDBgBATk4OfH19WfBUqTp16gS5/P9XCDI3N5euujRs2BAWFhb46KOP8Ntvv+HIkSNo
-3rw5rl+/DicnJ3z66adQKpUsRGKYIaqNdHR0Sl3HeNy4cTXyysyBAwdq1EkX635TaV29ehVTpkyB
-UqlEw4YNcfToUbRs2bLIaZo0aYIjR47AyMioxFdm7ty5g6CgIGhrawMAtmzZgoyMDH4BVGlkMhl0
-dXVLNJ6rqyvCwsLQqlUrAMB3332Hjz/+mIVIDDNEtfkgURqOjo5YunRpjfqMd+7cwfTp0/llU62l
-VCoxY8YM6arp/Pnz0b59+xJNa2RkBC8vrxJfmfH19YWVlRUmTJgAAIiPj2cnFaRRxypTU1McOnRI
-CtwbNmzA4cOHWYjEMENUl+Xm5iIhIQH6+vowMTEpcJz8HQUIIdQ6DijoBKy0CppnUfN59uwZnJ2d
-S9V7mxCiVMtW2mWqiPckyu/EiROIiIgAAGhra8Pd3b1U00+aNAlZWVnFjpeamoqdO3di9uzZmD17
-tvT6f//732K3d6KqZGlpiRkzZkjPvby8it3HlmY/XNh+v6jtoCTHRU1RlmNSac8BGGaIqEpduXIF
-CxYsUHs9MTERfn5+sLa2xrVr15CSkoJJkyZBX18fbdq0QWRkpMrOLTAwEMOHD4epqSnat2+Pxo0b
-o3///vDz8yu0LU5ubi7Onz8PDw8PmJubS+87d+5cGBoaQi6Xw8LCQq2x5+XLl2Fra4s7d+4AAEJD
-Q+Hi4gIXFxfMnz9fZdzs7Gz4+vrC2toa+vr60NHRQdeuXeHj41PgSV5Zl+lVx44dw8CBA/Haa6+h
-Q4cO6NmzJw4cOFBn17OgoCC1XouoeD/++KP02NbWFkZGRqWa3sjICDt37ix2PH9/f8jlcowdOxaW
-lpawtrYGAERHR+Ps2bP8IjRQaGgooqKi6mTYnDNnjvT41q1buHTpkto4pdn3CyFw7do1eHt7o127
-dkhMTIQQAv7+/rCwsIBcLkfTpk3x8ccfS9Wxc3NzsXnzZvTu3Ru6urqoX78+3n33XbWeRPfv34/x
-48dLx6jFixdLw5KSkjB37lwMHz5cGp6/hkRsbCzmz58vDcv7++KLL5Cbm4vDhw9j7Nix0utz587F
-s2fPylUWZTkH0NTURqTxDAwMxN69e2vUMgcHBwsAIjExsdLfS1tbWwAQAMTdu3cLHW/hwoVi5syZ
-0vMrV66IESNGCF1dXWn63377TfTv31/o6+tLry1YsEAIIUR6eroYPXq00NPTE7t37xY5OTlCCCFu
-374t+vbtKwCIHj16iNjYWJX3PXXqlHBycpLm16JFCxEdHS06duwoHB0dhYuLi6hfv74AIHR0dMQf
-f/whTfvXX3+J06dPC2NjYwFA2NraitOnT4vTp0+L8PBwabynT5+KPn36iOnTp4vIyEjx6NEjcejQ
-IdGiRQsBQPTt21ekpaVVyDLlUSgUYvbs2UIul4stW7aI7OxsIYQQ0dHRwsLCQjRq1EgAEIaGhpXy
-vZubmwtfX1+NW/fzyrRdu3Zi5syZIiAgQDx58qRC5t22bVuN/MwVoVWrVlLZeXp6Vsp7KJVK0bVr
-V+Hl5SW95u/vL72vs7NznT7WeHt7Czs7O41brmnTpgkAwsDAQIwYMUKsX79eREZGCqVSWSGfuaq+
-9wYNGkjr2l9//VXi6dq3by9Nt3jxYpVhpdn3X7p0SYwePVrI5XKV5Rg8eLCwsrIS7u7u4u2335aG
-ff755+LJkyfirbfeEo6OjmLWrFli1KhRQktLSwAQrq6uast67949af6DBw9WGx4dHS0ds18td6VS
-KZYtWya9f+/evVWGr1y5Uujq6oqAgIACv/vSHgdLew5QFpGRkQKA2nlBRQgMDBQGBgaCYYYYZmpZ
-mDl48KAIDQ2V/i5duiTOnj0rvv76a6Grq6sSZtLS0kR2drZ0oAQgnJycxKFDh0RqaqqYOHGiaNKk
-iTh9+rRQKpVi7NixAkCBJ5MpKSmic+fOAoDo1KmTSElJURtn6NChAoDQ19cXlpaWIiIiQhr2xx9/
-SJ9jypQpatOamJgIAGLEiBFqw7Kzs0WfPn3EqFGj1Hbw+/fvlz6bt7d3hS7TokWLBACxZs0atWGP
-Hz+WDtx1Lcw0a9ZMKnMdHR3pwF4R4aa2hpnk5GSpzApbpyrC2bNnhUwmU/nRIzMzU/qxAIC4efMm
-w4yG8fDwkE6gtbS0hJ6eXoWFm5oQZkaOHClNN3r06HLv+xcsWCANs7GxUflhTKlUSu/XoEEDYWlp
-KS5cuKAy/erVq6XpY2Ji1JbX1NS00DCT/3hWULkrlUoxZMgQ6bvOK6f09HTRsWNH8d133xU4z7KU
-RWnOARhmiBhmqizMFPeXP8zk+eGHH6ThX331VYHvcezYMQFANG3aVGRlZRU4zpEjR6T5fPHFF2rD
-P/roI2n4s2fP1Ib369dPCkOlCTNbt24VAMS5c+fUhmVmZkq/MDVt2lS6mlTeZbpx44bQ1tYWhoaG
-hZbH8OHD62SYMTIyKnT9K2+4qa1h5p9//lEpp61bt1bK+4waNarAX5PzgjkAMWvWLIYZDQwz+a8m
-5P8rb7ipCWFmxowZ0nSOjo7l3vdv375dml9YWJjatPv27ZOGb9q0SW34zZs3peG7du1SG96pU6ci
-w0xe2Cms3O/fvy9d2XdychJKpVJMmzZNODg4CIVCUeA05TkOluQcQJPDjJw1UYlql5s3b6o07hdC
-IC0tDZcuXcKHH35Y4DT5718xZMiQAsfJ6xLZysqq0O41hw0bBmNjYzx//hxbt27F0qVLVe4rkNcr
-DQAYGxurTZ/XDWd8fHypPrOfnx8AIDw8HNHR0WrDmzVrhsePHyMhIQE3btxQuf9LWZdp3bp1UCgU
-GDhwYKHl0ahRI66Qr8jfpurevXvYsWMHvv/+e+Tm5qJdu3YYPHgwHB0d0b9//2K7JK7NFApFhc8z
-NjYWhw8fxvHjx9WGzZw5EytXroRCocCuXbuwfPlyNG3alCtsDZB3U+S8dhlHjx7FiRMnkJWVBQMD
-Azg4OGDAgAFwcHBAt27dSt37pSbIv8z5j1dl3ffn3+83aNCg0P1+3jxe1aJFC+nxw4cPK/zztmnT
-Bt988w3c3d1x6tQpTJo0CYcPH0ZUVFShXf6X5zhYknMATcYwQ1TL6OnpQV9fX+W1evXqYfjw4Viw
-YIHUkL4w+Xfy+Q+WFy5cAAA0b968yGn79++PAwcOID4+HtHR0ejRo0eJlz1vJy1K0cg1OTkZ165d
-g7a2dqGNzseMGaP2HuVZJiGE1EVo586dq/Uk5v79+7h27ZpGrYO5ubllDjfff/89tm/fDoVCIYWb
-3r17w9XVtVaHm1eDQ1xcXIW/x5YtW/DGG29g0KBBBZ68ubm54cCBA0hPT8f27dvx+eef18l9aFpa
-msZtUy9evChzuDly5AiCgoKQnZ2Nxo0bw9HREf369YO9vT369OlTI76Tf//9V3r8+uuvV/q+v6Dj
-oMrJc74f6Srr/kzTp0/Hvn37cP78efj7+2Pjxo2F9kJakWVR3GdnmCGqQ4QG9jrz9ttv4+7du6We
-Lj4+XroZX3Enqp06dZIeP3z4sFRhpizu3r0rdT+5Zs2aKtkRv3jxAk+fPgVQvVdfsrOzsWrVKqxa
-tarWbDf516979+5h69atUkifOnVqpVyx0ARNmjSRrmoCQExMTIXOPyMjA35+flAoFCq/yL66nefZ
-sGED5s2bp3LSVlfcuHGjxpzkl/RYlNcrV3JyMg4fPiz9GOPo6CiFA01269Yt6XHv3r2rbd9flbS0
-tODn5wdzc3NkZGTg1KlTmDVrVoFX1mp7WTDMEFWDFy9eYPny5Rq3XD179sTGjRtLPV3+E8i8k/jC
-GBoaSo+rYoeaF7KEELh//36JbzJYHvl/Nc/MzKy271NPTw/ffvttodUHq4uZmVmZryzo6ekhKysL
-enp6sLS0hJOTE/r37w8bGxvo6uoiMDCw1u437OzscPDgQQAvu+KtSAEBAUhNTYWPj0+Rv8quWLEC
-T58+xYMHD3Do0CGVX3Prip49exZYFa86ffbZZ/jhhx9KddUzj46ODpRKJZRKJTp06IChQ4fCwcEB
-ffv2hbGxMRYsWIDExESNPp5GRUVJz11cXKpt31/V0tLSpOPvkSNHsG/fPowfP14jjoMMM0S12Llz
-5zBgwABMnTq11nym5s2bQ0dHBzk5OYiOjoYQotB61/lv0PXGG29U+rLVr19fehwSElIlO3E9PT3p
-8d9//11t34tMJkP9+vU1rm1Daerk6+vrIzMzUyW8ODo6Ftk2q7YaO3asFGbu3LmDqKgo6f5H5SGE
-wIYNGzBmzBjMnTu3yHHj4+Px1VdfAXh5E826GGby7jOiSfLvc4qjq6sLhUIBpVKJjh07YsiQIXBw
-cIC9vX2R1YQ11c6dO6WaDi4uLmjXrl217furUlZWFiZMmIClS5di48aNePToEebMmYMBAwao3YOq
-tpdFcXjTTKIKolQqsXz5cgwYMAAAMHz48Fp1cM+7sV5cXBxu3LhR6Lh59XVbtWqFjh07VvqymZqa
-SifPfn5+RVbvS01NxcyZM8v9nq1bt5YaTF64cIF3TS+FvPZcenp6sLOzwxdffIHg4GAkJycjODgY
-ixYtgp2dXZ0LMgDg5uamchLy3XffVch8L168iIiICEyfPr3YcWfOnCmt25cuXUJ4eDhXWg2nq6sL
-bW1tyGQymJmZwd3dHQcOHMCLFy9w69YtrFu3DqNGjaqRQebx48dSNVpdXV2sXr26Wvf9FaUkx4yF
-CxeicePGmD9/PjZv3iwdfz09PTXiOMgwQ1TLvHjxAoMGDcKSJUsAvLxjcUE9oFTWTjH/1ZDKOrH+
-4IMPpMcBAQGFjpd38uPu7l7qXnOKWva8eeXV/c7TqFEjKWgFBwdjz549BU6fm5uLKVOmwMnJqdzL
-pKenh/79+wN4WVc5KCioyGnrWtjJq/Lwanixt7dneCmCjo4ONm3aJD3ftWsXTp8+XeLpExMT4erq
-qnZX8NWrV8PMzAz29vbFzqNly5YYPXq09Hzt2rXcwWsApVIpVTcqaXjJX+W3Jp3E50lISMDIkSOR
-kJAAANi0aRO6dOlSZfv+8sirYp2enl5gGaSkpBQ5/a+//oqNGzfCz88PWlpacHV1xbvvvgsA+Omn
-n3D06NEqPQ4yzBDVcufPn0fXrl1x8eJFKJVKaGlp4bPPPquy909PT1c5QJSlZ5X8YaiwOtmTJk2C
-paUlAGDz5s0F1rG+ffs2goODYWZmhnnz5qkNL67xdl7PVgUd8PKqfdy+fVsanp2djcePH6v8UjVt
-2jSsX79e6s0HeFllx8XFBdnZ2XBzc6uQZcr/+Tw8PNS650xMTERISAiAl41uU1NT68w2kXcAfzW8
-XLx4keGlGEOHDsWaNWuk525ubiVqJxQaGgpLS0v069dPpdvYK1euICgoCGPGjCnxjwvvvfee9PjA
-gQPVWpWS/v8JsBCixoWXV/e1+dsYFhZshBA4deoU+vTpgytXrkBXVxfbtm3DtGnT1MYt674//zGv
-LMfE4n5AzLvCevnyZdy+fVulDFasWCHtIwu6DUFycjImT56MBQsW4M0335ReX7dunfQdu7u7qx2D
-y3McLMk5AMMMUS2kVCrx9ddfY+DAgYiPj0dubi7kcjnGjx+Ptm3bVtlynDx5UuV5YVcJipK/u+bf
-f/+9wHHkcjkOHTqETp06IT4+HhMmTFD5BT4hIQETJ05EmzZtEBgYWGDf/flPivJ6bcp/QPjzzz8B
-vOxONDk5WWW4ra2tNI/58+fj4MGDeOedd/Dvv/9i3LhxGDdunBQ+PD090bx5c/To0QOmpqYwMzND
-UlIS/P391U7oyrpMw4YNg4eHBwDg/v376NWrF1asWIHAwEBs27YNjo6OMDAwkA4O3bt3r3WX9gtz
-7do1ZGVlMbyU0SeffIJ9+/ahWbNmSE1NhaurK9zc3HDy5EmVX3pTU1MRFBSEd955By4uLli2bJlK
-d8qZmZmYNWuWtP2WVF6bhLyTr3nz5hV78keVa968eYiLi6tR4eVV58+fV1mP9u3bh5iYGPz999/4
-/fffcfjwYSxatAg9evTA4MGDce/ePbi5ueH69euFVpEs674/f2+BBQWK2NhY6XFB1aofPHggPX78
-+LHa8LyaDNnZ2bCzs4OXlxe8vb1hbm4OIQQcHBwAAGFhYXj//fdx7Ngx6bxi2rRpUCqV8PLyUpmn
-kZGRVPvj8ePHmDJlikrwKM9xsCTnAJqe9ok0noGBgdi7d6/GLM+TJ0+Eg4OD2h2ZZTKZiIyMFEII
-ERwcLACIxMTESlkGPz8/MWjQoALvCm1nZyc+/fTTYudx5swZ4erqKrS0tKRp9fT0xKRJk8T27dsL
-nCYpKUnMmzdPNGrUSLz++utixowZ4sMPPxQmJibC3d1dxMXFqU0TGhoqxo8fr7KMXbp0EV9++aUQ
-QoiTJ08KJycnleE9e/YU27Ztk+YRGxsrWrduLQ1//fXXxfnz56XhOTk5YsmSJdJdk/P+DAwMxMKF
-C0VGRkaFL5NCoRDffPONaNq0qcp4JiYm4ty5c+L9998XhoaGwsPDQwQHBxd65+ayMjc3F76+vnVq
-X9C2bds685nj4uLEkiVLRNu2bVX2MYaGhqJZs2YCgGjTpo1YtGiR2na3f/9+0blzZ2k6uVwuRo0a
-JY4fP17o+/31119i8uTJol27dmr7lN69e4tffvml1pe5t7e3sLOzq1PblLe3d6F3oq+oY5WTk5PQ
-1dVVW6/y/zVu3Fh06dJFjB07VmzatEk8fPiwRPMvzb7/119/FbNmzRJ6enoq+2tvb28RGxsr/v77
-bzFv3jzRvHlzabiurq7w8PAQFy5cEBkZGeLzzz8X7du3V9m23N3dRWBgoPQ+SqVSLF26VOX43KxZ
-M+n44ezsLNq3by+8vb1FaGioyM3NFcHBweLtt98WAISRkZHw8fFR+Zznzp0TDg4OKp/R1tZWZZsu
-7XGwLOcApRUZGSkAiNjY2ApftwIDA4WBgYGQCbZcpRqgSZMm8PX1Van+UJ2/Lo0ZMwYpKSkq7Tfk
-cjkcHR1x6tQpAC97FLG3t0diYqL0C31tkpWVhT/++ANxcXFo2rQpevToodKjSmVIS0tDWFiY1PNV
-QT38ZGRk4Pr160hISICRkRG6d+9eqp6AyloW169fR1xcHIyNjdGzZ0/I5XLExMTAxMRE5e7KFal7
-9+7w8PCQrhDVBSYmJvD29q5Tn1kIgdu3b+PPP/9EXFwclEoljI2N0bVrV3Tq1KlG3tFdUy1YsAAh
-ISEIDg6uU585Kiqqxnd7Xh37/uK8ePEC169fR7169dCnTx+pDeE///yDdu3alfomzjWxLKKiotC9
-e3fExsZWeK2VY8eO4f3332fXzEQlpVAosGzZMixbtky6HPzq8C+++KLOlIeenh6srKyq9D0bNGgg
-9RZXmHr16klV0qqyLPIaX+bXoUMHbjhUbjKZDJ06dVK5IS0RVf++vzhGRkYYNGiQ2uuVfdsCTSyL
-ysQwQ1QCT58+xZgxYxAWFlZg3XEtLS1YWFhI9WCJiIiIqPKxAwCiYpw5cwbdunVDeHh4kb18LFy4
-kIVFRERExDBDVP0UCgUWL14MJycnJCQkqN3fJI9MJoOJiQlGjBjBQiMiIiKqQqxmRlSAR48eYfz4
-8QgLC5P69y+MtrY2vvjii0pryEdEREREDDNEJXLq1CkMHjwYurq6Jbp5lIGBASZNmsSCIyIiIqpi
-/CmZKB8hBPbv3w8AhVYry09XVxdeXl68ISARERERwwxR9ZLJZNi+fTvWr18PLS2tYquO6ejoYMaM
-GSw4IiIiIoYZIs0wZ84cnDlzBo0aNSr0hoe6urr46KOPauUNMYmIiIgYZohqMEdHR4SGhkJbW7vA
-O2wrlUp4enqyoIiIiIgYZog0z6pVq9CsWTO4uLhAW1tbel1XVxeTJ0/Ga6+9xkIiIiIiYpgh0izr
-1q1DQEAA/ve//+Hw4cNYvnw5tLS0IJPJkJ2dDS8vLxYSEREREcMMkWa5cOECPvvsM/j6+sLGxgYy
-mQze3t4IDAyEEAI2Njbo2LEjC4qIiIioGvE+M0SvuH//PsaOHYtp06Zh6tSpKsOGDh2KW7duISsr
-iwVFRERExDBDpDnS09Ph5uYGMzMzrFu3rsBxzMzMWFBEREREDDNEmsXDwwNPnz7F1atXeSNMIiIi
-IoYZopohr8H/r7/+ipYtW7JAiIiIiBhmiDRfXoP/LVu2wMbGhgVCREREVAOwNzOq84pq8E9ERERE
-DDNEGqkkDf6JiIiISDOxmhnVaWzwT0RERFQLwsz333+P77//Hg0aNGCpUIVTKBTIycnB//73Pxgb
-G2vEMrHBPxEREVEtCTMxMTEIDQ2Fl5cXS4UqXFRUFM6fP4/MzEyNWB42+CciIiKqRWEGAJydnbFq
-1SqWClVKmDl+/LhGLAsb/BMRERHVDuwAgOoUNvgnIiIiqj3YAQDVKWzwT0RERMQwQ1TjsME/ERER
-EcMMUY3DBv9EREREtQ/bzFCtxwb/RERERAwzRDUOG/wTERER1V6sZka1Ghv8ExERETHMENU4bPBP
-RERExDBDVOOwwT8RERFR7cc2M1TrsME/EREREcMMUY3DBv9EREREdQermVGtwgb/RERERAwzRDWO
-pjb4X716NQwMDPgFUYWLioqqc5/54cOHWL16NZKTk7kCUKXsr83Nzevc5z527BhWr17NFaAYycnJ
-uH//Ptq1a4eGDRuyQDTkOMUwQ7WCJjb4b9iwIUxMTHDixAloabFGJ1W8Nm3awMjIqE59ZkdHR9y/
-fx8HDhzgCkAVrl27dujZs2ed+sytWrVCmzZtuE29QqlUIi0tTeUvJycHMpkM+vr66NKlCwupBLKz
-s2FqalqptWUYZqjG09QG/xYWFrh37x6/IKIKdObMGRYCUQX66KOP8NFHH9XpMlAoFIiOjkZ4eDgu
-X76My5cv48aNGxBCoHPnznBycoKVlRVsbGwQFRWFhQsX4urVq1x5NATDDNVobPBPREREpfHo0SMp
-tFy+fBnXrhOJlUQAACAASURBVF1DamoqWrZsCWtra4wfPx42Njbo06cPGjdurDLtjRs3WIAMM0QV
-hw3+iYiIqDCpqam4evUqwsPDERYWhvDwcDx69Aj169dH7969YWVlhdmzZ8Pa2hpt27ZlgTHMEFUd
-TW3wT0RERFWvuOpi1tbWWLhwIWxsbNCtWzfI5TwNZpghqiaa2OCfiIiIqk55qosRwwxRtdHUBv9E
-RERUOVhdjBhmqFZgg38iIqLajdXFiGGGai02+CciIqpdWF2MGGaoTjhx4gT27dvHBv9EREQ1FKuL
-EcMM1Um5ubn48ccfsXXrVjb4JyIiqgFYXYwYZojwssF/eno63n77bTb4JyIi0lCsLkYMM0SvyGvw
-r6WlhUmTJrFAiIiINACrixHDDFEJeHh44NmzZ2jQoAEvPxMREVUDVhcjhhmiMli3bh0CAgLw66+/
-YsiQISwQIiKiKlCS6mLW1tawtLRkdTFimCEqyIULF/DZZ59hy5YtbPBPRERUSVhdjBhmiCrY/fv3
-MXbsWEybNo0N/omIiCoIq4sRw0wVCAoKgomJCbp06cJvpw7Ka/BvZmaGdevWsUCIiIjKiNXFiGGm
-iimVSsydOxc2NjbYs2cPv506KK/B/5UrV6Crq8sCISIiKgFWFyOGGQ1w8uRJxMTEIDY2FqtXr8br
-r7/Ob6gOyd/gv2XLliwQIiKiAigUCty4cUPlqgurixHDjAbYsGEDACAnJwe+vr5Yvnx5lb7/qVOn
-4OTkxLWiGrDBPxERUcFYXYyoBoSZO3fuICgoCNra2lAoFNiyZQu+/PJL1KtXr0re/8CBA9i7dy/D
-TDVgg38iIqKXWF2MqIaGGV9fX1hZWeHNN9/E7t27ER8fj71792LatGlVEqSmT58OBwcHrhFVjA3+
-iYiormJ1Mc329OlThISEYPTo0UWOFxMTg3/++Yc/iNflMJOamoqdO3di/fr1UpgBgP/+97+YOnUq
-ZDJZieclhFAbX6lUQktLq8Dxnz17BmdnZyQlJZVqmYUQEEIUOt/yLFNFTfvqfACUqiyrAhv8ExFR
-XcHqYjXLzZs3MW7cOMTFxaFp06aFjrdnzx4cPXqUYaaaaGnCQvj7+0Mul2Ps2LGwtLSEtbU1ACA6
-Ohpnz54tdvrc3FycP38eHh4eMDc3BwAkJiZi7ty5MDQ0hFwuh4WFBU6fPq0y3eXLl2Fra4s7d+4A
-AEJDQ+Hi4gIXFxfMnz9f7X2ys7Ph6+sLa2tr6OvrQ0dHB127doWPjw+ysrIqZJnKO21+V69excSJ
-E2Fvb4/Bgwejbdu26N27N3bs2CGFm+qU1+D/wIEDbPBPRES1SmpqKi5cuAAfHx+4ubmhdevWaN26
-NSZOnIjQ0FD06dMHO3bsQGxsLJ48eYJffvkFX3zxBQYMGMAgoyFsbGygq6uLkJCQIse7cOECHB0d
-WWDVRfwfb29v4ezsLKqaUqkUXbt2FV5eXtJr/v7+AoAAUOwynTp1Sjg5OUnjt2jRQkRHR4uOHTsK
-R0dH4eLiIurXry8ACB0dHfHHH39I0/7111/i9OnTwtjYWAAQtra24vTp0+L06dMiPDxc5X2ePn0q
-+vTpI6ZPny4iIyPFo0ePxKFDh0SLFi0EANG3b1+RlpZW7mUqz7T5bdq0SchkMuHp6SkUCoUQQoi0
-tDRhZ2cnAIgVK1ZU6fccGRkpAIjY2FghhBDnz58XcrlcbN++vUTTGxgYiL179woiIiJNk5ubKyIj
-I4Wfn5+YNm2aMDc3F9ra2kJLS0t06dJFfPDBB2Lz5s0iIiJC5OTksMBqkH79+olPPvlEer53717R
-tm1b6Xl6errQ09MTR44cYWFVscDAQGFgYCCqPcycPXtWyGQycffuXem1zMxMKWAAEDdv3ix2PkOH
-DhUAhL6+vrC0tBQRERHSsD/++ENoa2sLAGLKlClq05qYmAgAYsSIEQXOOzs7W/Tp00eMGjVKKJVK
-lWH79++XltPb27vClqk80z548EAafurUKZVhAQEBAoBo1KiRyMrKqpYwExsbK4yMjIS7u3uJp2eY
-ISIiTfHw4UPxv//9T8yfP1/0799fNGzYUAAQLVu2FCNGjBArVqwQZ86cEUlJSSysGm7x4sWiZ8+e
-hYaZ8+fPC21tbZGQkMDCqqYwU+3VzDZu3AgXFxe0a9dOek1PTw8zZ86Unq9fv77Y+ZiamgIAMjMz
-ERgYCAsLC2lY9+7d0bdvX6kqWWnt3LkTV69exZw5c9TanAwfPhz6+voAgK1btyI3N7dClqk8096+
-fRsKhQIAEBcXpzLM2NgYAJCSkoK7d+9W+fedkZHBBv9ERMTqYlQjODo64o8//kBCQkKBw8+fP4/u
-3bujSZMmLKxqUq0dAMTGxuLw4cM4fvy42rCZM2di5cqVUCgU2LVrF5YvX15k4yttbW21E/b8WrVq
-BQCIj48v9XL6+fkBAMLDwxEdHa02vFmzZnj8+DESEhJw48YNdO/evdzLVJ5p+/Xrh08//RRZWVkY
-OXKkyrD8Yay0nR5UhC+//JIN/omISCOxdzF6lY2NDXR0dBASEgJXV1e14WwvU8fDzJYtW/DGG29g
-0KBBBZ6su7m54cCBA0hPT8f27dvx+eefl/m98nr/EqVs+J6cnIxr165BW1sbT548KXCcMWPGqL1P
-ZS5TcdPK5XJ8++23Kq+lp6cjICAAO3bskF5TKpVV/p0fOXIEFy9eZIN/IiKqduxdjIqjr68Pa2tr
-XLhwQS3MZGRk4PLly/jss89YUHUxzGRkZMDPzw8KhUK6kvGq/FcdNmzYgHnz5lX5ryB3796FEAJK
-pRJr1qxRuWJSE9y7dw/r16/HrVu3MHXqVCxZsqRauw5csWIFbGxsuOUREVGV4s0oqawcHBxw9OhR
-tdcvX76M3Nxc2Nvbs5DqYpgJCAhAamoqfHx8iryasWLFCjx9+hQPHjzAoUOHVK6CVIW0tDQAL6+A
-3L9/H+3bt68RX2xaWhoWLFgAf39/+Pr6Ys2aNZDJZLhw4UK1Ltft27dx//59HiiIiKjSsLoYVSRH
-R0csX74ciYmJKq+zvUwdDjNCCGzYsAFjxozB3Llzixw3Pj4eX331FYCXN9Gs6jBTv3596XFISEiN
-CDNJSUlwdHREREQEgoKCMGTIEI1Ztl69esHExASTJk2Cn58f280QEVG5sboYVaa8djPBwcEqr7O9
-jGaolt7MLl68iIiICEyfPr3YcWfOnAkdHR0AwKVLlxAeHl6ly2pqaio1mvfz8yuyfUtqaqpKL2zV
-ZeXKlYiIiICJiYlGBRkAcHZ2BgD88MMPePPNN3HixAluhUREVGLsXYyqWv52M3ny2ss4ODiwgOpi
-mFm9ejXMzMxKVMewZcuWGD16tPR87dq1ZXrPokJIXljJzs5WG9aoUSNYW1sDAIKDg7Fnz54C55Gb
-m4spU6aUqj1KWRr+l2TavN7h9PT01Ibl5ORU+0oXFRUF4GV7JGdnZ4wcORL//PMPt0YiIlKhUCgQ
-FRWF7du3Y/r06VKVngEDBmD37t1o0qQJFi5ciIiICCQlJeHixYv49ttvMWbMGFZnpgrl4OCA8+fP
-S8/DwsLYXqauhpkrV64gKCgIY8aMUbtnS2Hee+896fGBAwfw999/F7jDK0reSXxBISCvy+fbt29L
-w7Ozs/H48WMAgKenpzTutGnTsH79emRlZUmv3blzBy4uLsjOzoabm1uFLFN5ps3rpezOnTv4888/
-pdczMzOxe/dulV8VyhuqyqJbt2745JNPYGBgAF1dXcTExKBbt2746quvpGUiIiLNdP36ddy5c6dS
-5v3o0SMcPHgQXl5ecHBwQJMmTdC9e3csWrQIL168wPjx43Hy5EkkJCQgOjoaO3bsgLu7OywsLNju
-hSpV3v1m0tPTAbysYtajRw+2l6lrYSYzMxOzZs0CgFLtdPLfUFOhUGDevHlq3QrnDzjPnz9XGSaE
-kE7qk5KSkJycrDLc1tZWmsf8+fNx8OBBvPPOO/j3338BAOPGjcO4ceOkEOHp6YnmzZujR48eMDU1
-hZmZGZKSkuDv768S0MqzTOWZNu/qkBACgwYNwsKFCzF79mz06NEDXbt2lcZbtWoVlixZgh9++KHK
-V7wlS5ZAX18fPXr0wN27dzFr1ixs3rwZXbp0waFDh7hlEhFpmN9//x0jR45Er169EBAQUO75sboY
-1SR57WZu3rwphRlWMdMQ4v94e3sLZ2dnUVn2798vOnfuLAAIAEIul4tRo0aJ48ePFzrNX3/9JSZP
-nizatWsnTZf317t3b/HLL7+I0NBQMX78eJVhXbp0EV9++aUQQoiTJ08KJycnleE9e/YU27Ztk94n
-NjZWtG7dWhr++uuvi/Pnz6ssS05OjliyZIlo1KiRyrwMDAzEwoULRUZGhjRueZapIj5PUlKScHBw
-UBnH2dlZxMTECIVCIczNzaXXR40aJdLT00Vli4yMFABEbGys9NquXbuEvr6+mDx5sqhfv744dOiQ
-mDt3rpDL5WLIkCHi1q1b0rgGBgZi7969goiIqtaVK1eEq6urkMlkwsnJSYSEhJR6Hrm5uSIyMlL4
-+fmJadOmCXNzc6GtrS20tLREly5dxAcffCA2b94sIiIiRE5ODgudNFK/fv3EsGHDRJs2bYSenp44
-cuQIC6UaBQYGCgMDAyET/1fHaMGCBYiKikJgYGCdDHVpaWkICwuDnp4eLC0tC2xvArysmnX9+nUk
-JCTAyMgI3bt3L3Tcag6piIyMxJMnT/Dmm2/CxMREGpaSkoLQ0FA0b94cPXv2LHF1v/KIiopC9+7d
-ERsbK9VjFkLA3t4eTZo0QYcOHeDn54fDhw+jRYsWmDNnDkJDQ/HJJ5/gyy+/ROvWreHr66tS5ZCI
-iCpPeHg4vv76axw7dgxDhgzB4sWLpZoMxSmudzErKyv2LkY1zpIlS+Dv74/U1FTEx8cjLi6O1cyq
-0bFjx/D++++DFUz/T4MGDTBgwIBix6tXr16Jd+bVSSaToUePHujRo4fasEaNGlXrjTPzL+PGjRvR
-p08f/PLLLwCAESNG4PDhwzh//jwCAgLw6aefwt/fXyM6LiAiqgvCwsLw9ddfIygoCMOGDUNYWJjU
-EU5BeDNKqivy7jfToEEDtpfRIAwzVK0sLCwwY8YMzJs3D5GRkSqB5t1334Wrqyu+/vprfPPNN1i+
-fLlaux8iIqoYv/32G77++mucPn0azs7OCA8Ph6Wlpco4vBkl1WXW1taQy+VISUlhexmGGaL/b9my
-Zfj555/x7bffSl1v5wWagQMHwsfHB1u2bIFcLoeFhQXmzJmDxYsX8xcRIqIKEBwcjKVLl+LcuXNw
-dXXFlStX0Lt3bwBFVxezsrLizSipTqlXrx7Mzc1x7do1hhmGGaL/z9DQECtXroSnpycmT55cYKDR
-0tKCt7c39PX1MW/ePAQEBGD9+vUq9yAiIqKS+/XXX7F06VJcuHABI0eOREhICLKzs3H27FmsWLGC
-1cWICtC8eXMA4P1lGGaIVH344YfYunUrPv30Uxw4cEAt0ORxc3PD0KFDsXLlSrXuuYmIqHjnz5/H
-0qVLERwcjF69emHUqFG4c+cO+vXrx+piRMXw9vbGixcvWDuEYYZIlZaWFjZs2IC+ffvi1KlTcHJy
-Ugk0+Xtcq1evHpYtW8ZCIyIqhb1792LChAkAAG1tbSiVSsTGxsLIyAgjR47E2rVrWV2MqBjW1tYY
-PHgwC4JhhkidjY0NPvzwQ3h6eiIyMhI6OjpSoFm/fr10o9Ca4s8//8TEiRPRrFkzaGlp8QumCvf8
-+XMsX74crq6udeYzv/POO3j48CFPuEshOTkZMTEx0o2ggZcN+QHgxYsXCAoKQlBQEJYtWwaZTCZd
-hcn7r6Ojo/JcLpdDJpNBW1sbLVq0gKGhYa0pq3///RdvvfUWNmzYUGfWj++//x7r16+HsbExN5YS
-ysrKwqBBg1gQJZCdnY309HQcO3as0tYxhhnSKCtXroSZmRnWrVuHzz77DDKZDGvXrsWWLVuwZs0a
-vP322xg4cGCN+CyJiYm4fv063N3dYWBgwC+XKtzq1avx8OHDOvWZDx48iMaNG8PDw4MrQCk4OjpK
-ISYnJ0f6r1QqkZ2drfZfCIGsrCzpf94JHABkZmZK/9u3b4+OHTvWqm0qOzu7Tq0bMTExiIyMhJeX
-FzcUqnBRUVG4ePGitN9gmKFaz8jICEuXLsWXX36Jd999F61atYJMJoO+vj5sbW1VOgWoKVatWsUw
-Q5Xi+PHjde4zt23bFt7e3gwzVClkMhlCQkLq3Od2dnbGqlWruAJQpYSZyj5Wse4LaZzZs2ejQ4cO
-mD9/vsrrEyZMwPTp0zFixAicOXOGBUVERERUxzHMkMbR1tbG+vXrERAQgODgYOn1vCpnDDRERERE
-BLCaGWkoe3t7zJw5E8+ePVN5PS/QAKiRVc6IiIiIiGGG6gBfX98CX2egISIiIiKGGaqxGGiIiIiI
-iGGGGGiIiIiIiGGGiIGGiIiIiBhmiBhoiIiIiIhhhhhoiIiIiIhhhoiBhoiIiIgYZogYaIiIiIiI
-YYaIgYaIiIiIYYaIgYaIiIiIGGaIGGiIiIiIiGGGiIGGiIiIiBhmiIGGiIiIiBhmiBhoiIiIiIhh
-hoiBhoiIiIgYZogYaIiIiIgYZogYaIiIiIiIYYaIgYaIiIiIGGaIGGiIiIiIGGaIGGiIiIiIiGGG
-iIGGiIiIiBhmiBhoiIiIiIhhhoiBhoiIiIhhhoiBhoiIiIiqNcxcuHABZ86cYalQhYuKimKgISIi
-IqLKCzNpaWkYNGgQS4XqPAYaIiIiohoUZv7zn//gP//5D0uEiIGGiIiIqGaFGSJioCmMEALh4eE4
-ceIEnj17BmNjY1haWuLtt99GvXr1kJiYiJ9//hnTpk2Tpnnw4AGSkpLK/J6GhoZ47bXXih0vKysL
-ISEhuHr1Kh49egSFQgFDQ0N06NABNjY26NixI2QyGZ48eYJTp05h8uTJXLGpWgUFBcHExARdunTR
-mGV69uwZAgMDce7cOezdu1dl2L179+Dh4QEDAwNs3boVBgYG/BKpytbLFy9eFDq8adOmaNWqVYHH
-rOjo6AKnad++PRo0aFBh63ZR2w4xzBAx0GiAFy9eYNKkSThx4gSaN28OW1tb3L9/H76+vsjKyoKL
-iwsePnwIuVyuEmb+/PNPBAYG4qeffkJCQoLKPLW0tCCTyaTnSqUSQgiVcT7++GOp3Avy77//wsfH
-B9u2bUNCQgKaNGkCS0tLGBsb48GDB9i+fTuePHkCU1NT2NnZISwsDD179mSYoWqlVCoxd+5c2NjY
-YM+ePdW+PFu3bsX27dtx7do1CCFgaGioNs6aNWtw4sQJAICtrS08PT35RVKV+Oeff+Dn54ddu3ap
-HCM6deqE8ePHo1+/foWGmaCgIPz22284fPgwAMDAwACzZs3CRx99JIWZ8qzbJdl2qIoIohrAwMBA
-7N27t1qXQalUCk9PT1G/fn1x+vTpYscPDg4WAERiYmKNLfeMjAxhYWEhAIjJkyeLjIwMaVh2drbY
-vHmzqFevngAgOnfuXOA8QkJCBADpLyQkpMDxsrKyxO3bt8WSJUsEADFr1qxCl+vEiRPC2NhYABAt
-W7YUe/bsEdnZ2SrjKBQKcfToUfHGG29I7+3q6lqrtgtzc3Ph6+tbp/YFbdu2rdGf+fjx4wKA0NHR
-EY8ePar25VEqlSIpKUl07dpVABCGhoZq42zbtk0AEDKZTJw9e7ZWr1/e3t7Czs6uTm1T3t7ewtnZ
-WaOX0cfHR+U48tNPP5V42i5duggA4sSJExW6bpdk2yEhIiMjBQARGxtb4fMODAwUBgYGQotxjqh0
-V2imT5+OESNG1Ime/7Zt24br16+jSZMm2Lx5M/T19aVhOjo6cHd3x5kzZ1CvXj08evSowHlYWVmp
-PC/oVzQA0NXVRceOHfHVV19h8uTJyMnJKXC8H3/8EcOGDcPz58/RtWtXREREYMKECdDR0VG7+uPi
-4oJr167BxsYGAJCens4VmarVhg0bAAA5OTnw9fXViP1a48aN0b1790LHmT59Oi5fvoyoqCi8/fbb
-/BKpys2bNw9vvvmm9DwkJKSkP9gjLi4OdnZ2GDx4cIWu2yXZdqhqMMwQMdAU6siRIwAAU1NT1KtX
-r8Bx3nrrLXzzzTdISUlBSkqK2nAdHR1oaZVuVzNu3DhkZ2ervX716lVMmTIFSqUSDRs2xNGjR9Gy
-Zcsi59WkSRMcOXIERkZGSEtL40pM1ebOnTsICgqCtrY2AGDLli3IyMjQjJOBYrZRKysrdO3alV8i
-VQu5XI7FixdLz/39/Uu0Pw8PD8fz58/x0UcfVdq6XdrjGzHMEDHQVKHHjx9LJ2FFXdWYPn06WrRo
-IY1fUJmVhqOjI5YuXarymlKpxIwZM6QrNvPnz0f79u1LND8jIyN4eXnxygxVK19fX1hZWWHChAkA
-gPj4eDYYJiqh0aNHw8TEBACQlJSEHTt2FDvN999/j+bNm2PkyJEsQIYZIqqLgaZJkyYAgOTkZCxY
-sKDQ8XR1dTFp0iT8+++/5X7PFy9eQF9fXzpo5Tlx4gQiIiIAANra2nB3dy/VfCdNmoSsrCyuvFQt
-UlNTsXPnTsyePRuzZ8+WXv/vf/+r1vlFVRBCQKlUlmm6kirL/IkKI5fL8fHHH0vP165dC4VCUej4
-KSkp+PHHHzF58mTo6elV2Lpd1m2nPNNyW2KYIWKgKaP8N9Fdv349Pv744wKrfwGAj48PbG1ty/V+
-SqWy0J7ifvzxR+mxra0tjIyMSjVvIyMj7Ny5kysuVQt/f3/I5XKMHTsWlpaWsLa2BgBER0fj7Nmz
-RU67f/9+jB8/Hi4uLnBxcVGpbpOUlIS5c+di+PDh0vBXr2rmd+zYMQwcOBCvvfYaOnTogJ49e+LA
-gQNFvn9KSgp++OEHDB48GOvWrSvyRC0wMBDDhw+Hqakp2rdvj8aNG6N///7w8/MrtB0cUUlNnTpV
-6j757t27Uk9lhR0z0tLSMH369HKv22XddgAgOzsbvr6+sLa2hr6+PnR0dNC1a1f4+PgU+gMbt6XS
-p0Qi9mZWCb2c1YbezOLi4sRrr72m0ouMhYWFCA8PL9V8tLW1penv3r1b6HghISHCxMSkwGGtWrWS
-5uHp6cmNgr2Z1RhKpVJ07dpVeHl5Sa/5+/tL63NJepK6d++ekMvlAoAYPHiw2vDo6GhpOytofgqF
-QsyePVvI5XKxZcsWqfe/6OhoYWFhIRo1aqTWI1N0dLQYO3as0NfXl5b1m2++KXD50tPTxejRo4We
-np7YvXu3yMnJEUIIcfv2bdG3b18BQPTo0aNSejRib2a1vzez/D7//HNpfezbt2+h21zPnj1Fv379
-ChxemnW7LNtOnqdPn4o+ffqI6dOni8jISPHo0SNx6NAh0aJFC2n509LSauW2VJW9mTHMEMNMJQWa
-2hBmhBDi999/F82bN1cJNADExIkTxcOHD0sdZg4ePChCQ0NV/n799VexZcsW0a5duwLDTHJyssp7
-r1mzhhsFw0yNcfbsWSGTyVSCfGZmptS9OABx8+bNYudjampaaJgRQggTE5NCw8yiRYsK3XYeP34s
-GjRooHZClpqaKjIzM8Xu3buLPOFTKpVi7NixAkCB301KSoro3LmzACA6deokUlJSGGYYZsrswYMH
-UrAHIMLCwtTGuXLligAg/P39C5xHSdftsm47Qry8fUGfPn3EqFGjhFKpVBm2f/9+6X29vb1r5bZU
-lWGG1cyIWOWsSD179sS1a9fUuq3cs2cPOnfujG3btpWqHr2bmxtsbW1V/vr37w93d3fcu3evwGni
-4uJUnjds2JArHdUYGzduhIuLC9q1aye9pqenh5kzZ6pU4yyOXC4v0/C//voLK1euhKGhYYG9Or32
-2msYMGCA2usNGjSAnp4e7O3ti3zfoKAg7N+/H02bNsXUqVPVhjds2BA+Pj4AgFu3buE///kPVwoq
-s9atW2PcuHHS8++++05tnK1bt6Jp06Z45513CpxHSdftsm47ALBz505cvXoVc+bMUesEZ/jw4dKt
-DrZu3Yrc3FxuS+XAMENUSYHm6tWrteaztW3bFmfOnMG+ffvwxhtvSK+npqZi5syZRd4X5lU3b95E
-RkaG9Jeeno6kpCRcvXoVffv2LdE8imr0SaRJYmNjcfjwYZVG/3lmzpwpddO8a9cuJCQkVMoyrFu3
-DgqFAgMHDoSurm6B4zRq1KjQ6V+9h9Or8u6XY2VlVej8hw0bBmNjY7WTN6Ky+OSTT6THP//8M2Jj
-Y6XnycnJ+OmnnzBx4kSVe6OVZd0uz7bj5+cH4GX30Bs3blT58/PzQ7NmzQAACQkJuHHjBrclhhki
-zQs0RfX+VVM/29ixY3Hjxg0sXbpU5VfgPXv2YPLkySW6QqOnpwd9fX3pr169emjcuDF69+6NzZs3
-FzhN06ZNVZ6/eqWGSFNt2bIFb7zxhkpnGnlatWoFNzc3AC9v6Lp9+/YKf38hhNRIunPnzhU+f6VS
-iQsXLgAAmjdvXuh42tra6N+/P4CXXVJHR0dz5aAy69WrFxwcHKR1MP+VzZI0/K/sbSc5ORnXrl2D
-trY2njx5gpiYGLW/MWPGwNPTE56entDS0uK2VA5ybhJElRNoHj9+XKKeTmoaPT09LF68GEOGDMGI
-ESPw9OlTAMBPP/2EUaNGYcyYMWWed7du3dCqVSu115s0aQJjY2M8f/4cABATE8MVjTReRkYG/Pz8
-oFAoCr1LeHx8vPR4w4YNmDdvXrHVyUrjxYsX0jZa1NWXsoqPj5duXljcL8SdOnWSHj98+BA9evTg
-SkJl9umnn0on/35+fliyZAkaNWqErVu3wtbWFt26dau2befu3btSN8xr1qyRrsAW937clhhmiDQq
-0MyZM6dGh5nU1FQ0aNCg0BteWllZITg4GLa2ttKVEl9f33KFGZlMht9++63AYXZ2djh48CAAIDQ0
-lCsZDE9QJwAAGAdJREFUabyAgACkpqbCx8enyLuEr1ixAk+fPsWDBw9w6NChcm1Dr8p/FTMzM7PC
-P2P+Kp95J36FMTQ0lB6X5OSOqCjDhg1Dp06dcOvWLaSkpGD79u2wt7fH9evXK6Qb/vJsO3mhRAiB
-+/fvl+gGz9yWGGaINDLQ1GTu7u6YNWsW3nrrrULH6dChA3x8fPDhhx8CAP74448KXYaHDx/CyMgI
-enp6GDt2rBRm7ty5g6ioKJibm3NFI40khMCGDRswZswYzJ07t8hx4+Pj8dVXXwF4eRPNigwz+W8W
-+Pfff1f452zevDl0dHSQk5OD6OhoCCEK3fflv/Ff/rZ3RGWhpaWFTz75ROpIY926dYiMjETjxo0r
-ZBsqz7ZTv3596XFISEiJwgy3pXKsC9wciKiwoLJ8+fJix8t/o8yS3GW5pJKSkmBpaSldbndzc1M5
-IBTUgw2Rprh48SIiIiJKVG9/5syZUkPkS5cuITw8vMwB6lWtW7eW5n3hwoVS9TxYEnK5XLoBaFxc
-nNSQuSBPnjwB8LKtUMeOHbmSULlNnDhRal9y//597N69GxMmTECDBg3KPe/ybDumpqZSEPHz8yty
-2ryOdLgtMcwQUSWEmaCgoCLvsAy8bBeQx8bGpsQnWcVZtGgROnfuLB2UdHR0sGnTJmn4rl27cPr0
-6RLPLzExEa6urnj27Bm/XKp0q1evhpmZWbFdvwJAy5YtMXr0aOn52rVrCxwvrzpJenp6gdtYSkqK
-2ut6enpSY+G7d+8iKCioyG20oG21uO33gw8+kB4HBAQUOl5eSHN3d6/xV65JM9SrVw+zZs1Sea00
-Df+LWrfLs+00atRICibBwcHYs2dPgdPm5uZiypQpcHJy4rbEMENElRFm8nauRf1ClL9dUP7uMvNk
-ZmaqXBIv7sQor3rOhg0b1HqAGjp0KNasWSM9d3NzQ2BgYLGfJTQ0FJaWlujXrx9atGjBL5cq1ZUr
-VxAUFIQxY8aU+ETjvffeU9mmCqrWkndl8vLly7h9+7b0ukKhwIoVK6SQk79TAQCYN2+e9NjDwwMP
-Hz5UC/ohISEAXvbClJqaqjI8f7frBTVMnjRpEiwtLQEAmzdvRmJioto4t2/fRnBwMMzMzFSWh6i8
-Zs2aJdUK6NOnDywsLEo8bXHrdnm2HU9PT+nxtGnTsH79emRlZUmv3blzBy4uLsjOzpZ6NeS2xDBD
-RJUQZhISEmBnZ4effvpJpYGiUqnEjh07pBt4LV++vMBfoV8NGzt27EBYWBhiYmJw79493L17Fzdv
-3sTFixexadMm2NvbS20MBg4cqDa/Tz75BPv27UOzZs2QmpoKV1dXuLm54eTJkyq/WKempiIoKAjv
-vPMOXFxcsGzZMnz++ef8YqlSZWZmSr8Ul6ZXsvw31FQoFJg3b57KjwB5PywAQHZ2Nuzs7ODl5QVv
-b2+Ym5tDCCF1VRsWFob3338fx44dA/CyobSHhweAl1VxevXqhRUrViAwMBDbtm2Do6MjDAwMpBO6
-7t27q9zQM/+PGbdu3VJbdrlcjkOHDqFTp06Ij4/HhAkTpAbQefuQiRMnok2bNggMDKyQKkBEeVq0
-aIGJEycCAGbMmFGqaYtbt8uz7YwbN066uWdOTg48PT3RvHlz9OjRA6ampjAzM0NSUhL8/f2lHz24
-LZWRIKoBDAwMxN69e2vUMgcHBwsAIjExsUaWuVKpFAYGBmLJkiXCy8tLmJmZiRYtWoghQ4YIV1dX
-0bZtWwFAmJqaip9//lltej8/PzFgwAChra0tAJT6z8DAQOTm5ha6fHFxcWLJkiXScgAQMplMGBoa
-imbNmgkAok2bNmLRokUiLi6uVm4X5ubmwtfXt07tC9q2bauxn3n//v2ic+fO0vool8vFqFGjxPHj
-xwud5q+//hKTJ08W7dq1U9sGevfuLX755ReVbXLp0qVCLpdL4zRr1kxs27ZNCCGEs7OzaN++vfD2
-9hahoaEq249CoRDffPONaNq0qcp7mJiYiHPnzon33/9/7d1/dNV1/cDxF9vdxgZzTn4VET/q2DIC
-A9EgEaxOBxI9kOmRUA6dOqEQET+UMDrQOXKC6mQHS4qAlHM8mgdPHTMMzBN4GHFU1CUJyVkonFJ+
-jDMYsrGN7dMfftnXxa8pG9y7PR5/7W73fnbv+3Mv7LnPfX12e9KtW7dk2rRpyebNm5OGhoZk06ZN
-yYwZM5Lu3bs3e41NnDgxeeyxx055LEeOHElmz56dFBYWJr17906mTp2afOMb30j69euX3HXXXRnx
-Opw/f34ycuTIDvWamj9/fjJu3LiMfgw7duxICgsLk6qqqhZd//08tz/Ia+ek+vr6ZNGiRUlhYeEp
-/7/94Ac/SGpqak57/9rDa+mkV199NYmIZM+ePa2+7T/96U9JUVFR0ilp7WlAaAOXXnppLF++vNlb
-MdJdaWlpXHfddXH48OGm39xkmqeffjpuuOGGk7/4iF27dkVZWVkcOnQo8vPzY9CgQTF06NCznnb2
-AvxCJnbt2hX/+Mc/oqKiIhobG6Nnz54xcODAKCkpadfvJx48eHBMmzat6TeHHUG/fv1i/vz5Heox
-/6+DBw9GWVlZ5Ofnx7Bhw5r+yvnu3bujf//+Z3091tbWRllZWVRUVETPnj1jyJAhkUqlory8PPr1
-63fOv4jeErW1tfH3v/89Kioqori4OK688spmZ3dKZ/fee2+UlpbG5s2bO8zz6d57743t27e36C27
-6ezll1+OoUOHttn2z+e1U1NTE2VlZVFZWRk9evSIwYMHt+iEOZn8Wjpp+/btMXjw4NizZ0/07du3
-Vbe9bt26uP32252aGTizkyET8e6ppktKSpr9sa50kK73C9pKjx49Tpkni2jZKVrz8vKaBpPf6+Tb
-SltDXl5eXHPNNXYUF1Rbhsz5vnby8/ObnfnTa6l1mZkBAADEDAAAgJgBAAAQMwAAgJgBAAAQMwAA
-AGIGAAAQMwAAAGIGAABAzAAAAIgZAABAzAAAAIgZAAAAMQMAAIgZAAAAMQMAACBmAAAAMQMAACBm
-AAAAxAwAAICYAQAAxAwAAICYAQAAEDMAAICYAQAAEDMAAABiBgAAEDMAAABiBgAAQMwAAACIGQAA
-QMwAAACIGQAAADEDAACIGQAAgDSTsgTQttauXRtFRUUWgla3ffv2DveYKyoqYu3atdG9e/cO85gb
-Ghri+PHj0aVLF096/163iXXr1sXatWs9AVqguro68vPzo1OnThYjTf6fEjPQVi+u1Lsvr9mzZ0dO
-To4FoU3k5eV1qMdbVFQUGzdujLKysg7zmOvq6qK6ujry8vKic+fOfohqQ5WVlTF69OgO9Zjz8/Mj
-IuLOO+/0BDiH2traqKmpiYKCgsjNzbUgLXDkyJGIiMjKars3g4kZaCPDhw+PJEksBLSit956q8M9
-5rq6uli1alUsXbo0qqqqYubMmTFr1qwoLi72hOC8LVy4MBYuXGghzmL9+vUxd+7c2Lt3b9x3330x
-Z86cpgjk4jMzAwBpLDc3N6ZPnx7l5eXxox/9KB566KEYMGBALFq0KCorKy0QtJHy8vIYP358jBs3
-LoYPHx67du2KBQsWCBkxAwCIGkhPhw8fjrvvvjsGDhwYVVVVsW3btli9enV8+MMftjhiBgAQNZB+
-Tpw4EcuXL4/LL7881q5dG2vWrImNGzfGkCFDLI6YAQBEDaSn9evXx5VXXhnf+973YtasWfHPf/4z
-Jk6caGHEDAAgaiA9mYsRMwCAqIGMYi5GzAAAogYyirkYMQMAiBrIOOZixAwAIGogo5iLETMAQDuN
-moULF4oa2iVzMWIGAGjnUfPwww+LGtoVczFiBgAQNZBxzMWIGQBA1IgaMkp5eXlMmDDBXIyYAQBE
-jaghM7x3LubIkSPmYsQMACBqRA3pzVwMYgYAEDVkHHMxiBkAQNSQUczFIGYAAFFDRjEXg5gBAFo9
-apYsWSJqaDPmYhAzAECbRc20adNEDW3CXAwt1SlJksQykPZP1E6dYvDgwTFp0iSLAZCGGhoa4sUX
-X4yNGzdGbW1tXHvttTFy5EjzDLwvhw4dinXr1sXOnTvjqquuijFjxkRhYaGF4RTr1q2LzZs3ixky
-wzXXXBNVVVXRtWtXiwGQxpIkiYqKiti3b180NDREz549o1evXpGdnW1xOGsMv/3223HgwIHo2rVr
-9OnTJwoKCiwMZ1RdXR1du3YVMwBA66urq4vVq1fHkiVLoqqqKmbOnBmzZ8+O4uJii0OTEydOxG9+
-85tYtGhRFBQUxI9//GNvJ+N9ETMAgKjhglu/fn3MnTs39u7dG/Pnz485c+Z4WyJiBgAQNaSv8vLy
-uPvuu+Opp56Kr3/967F48WKnWeYDczYzAKDNOfsZ/l4MbcGRGQDggnOkpuMwF4OYAQBEDRnHXAxi
-BgAQNWQUczFcKGZmAICLzkxN+2AuhgvNkRkAIO04UpNZzMUgZgAARE3GMReDmAEAEDUZxVwM6cDM
-DACQ9szUpA9zMaQTR2YAgIzjSM2FZy4GMQMAIGoyjrkYxAwAgKjJKOZiSHdmZgCAjGempnWZiyFT
-ODIDALQ7jtR8MP87F7N06dL42te+ZmEQMwAAoiZ9mYtBzAAAiJqMYi6GTGZmBgBo98zUnMpcDO2B
-IzMAQIfTHo/UvPnmm/Hmm2/G9ddff9brmYtBzAAAdPCoOXbsWDz77LMxfvz4i/44du3aFSUlJU2P
-KScn57TXMxdDe+NtZgBAh3U+bz/75S9/GRMmTIilS5de1Mewc+fOuPbaayM7OzuysrLigQceOOU6
-5eXlMWHChBg3blwMHz48du3aFQsWLBAyZDxHZgAA/k9Lj9QcO3YsPvrRj0ZlZWVkZ2fHlClTYsWK
-FZFKpS7o/S0rK4svfOELcfTo0Thx4kRERHTp0iXeeOON6NGjRxw+fDgWL14cv/jFL+Jzn/tc3H//
-/TFkyBA7GjEDANBRo+anP/1pLFiwIOrr6yMiIicnJ0aOHBl/+MMfoqio6ILcx5deeik+//nPR3V1
-dTQ0NDR9Pjc3NyZNmhRXX321uRjEDACAqPn/qLnzzjtj0KBBp7wNLTc3NwYMGBDPPPNM9O3bt03v
-15YtW2LMmDFx/PjxZiHzXgUFBfH973/fXAxiBgBA1LwbNQcPHoz6+vrTRkROTk5ccsklsWHDhrjq
-qqva5L5s2rQpvvzlL0ddXV00Njae9jpZWVkxZMiQ2LZtm52HmAEAIKKysjL69OkT1dXVZ7xOdnZ2
-pFKpePzxx1v9TGcbNmyI8ePHR319/RlD5r1B87vf/S5uvfVWO452y9nMAABaaNWqVU1zMmfS0NAQ
-dXV1cfPNN8eyZcta7Xs/9dRTceONN571iMx7JUkS3/3ud+P48eN2HGIGAKAjO3bsWCxZsuScMXMy
-JBobG2POnDnx7W9/+4xzLS31xBNPxM033xwnTpyIlr6pJkmSePvtty/6qaNBzAAAXGTLli2Lo0eP
-vq/bNDY2xsqVK+Omm26Kd9555wN930ceeSRuu+229xVEeXl50alTp4iIeO2118JUAe2VmRkAgHOo
-qamJgoKCiHj3rGV1dXXv6/a5ubnxiU98IjZs2BC9e/du8e1Wr14dU6dObdF8TCqVirq6uhgwYEBM
-mDAhxo4dG6NGjYrOnTvbgYgZAICObN++fbF169bYunVrPPfcc1FWVhZ1dXWRl5fXooH8nJycKC4u
-jmeffTYGDRp0zu/34IMPxne+850zHlXJy8uLurq6yM3NjS9+8Ytx0003xdixY6N///52FmIGAIAz
-q6+vj5dffjmef/75+Nvf/habNm2K/fv3RyqViuzs7KitrT3lNllZWZGXlxe///3vY+zYsWfc9s9+
-9rOYN29es0By9AXEDABAm/nPf/4TL7zwQmzZsiU2bdoUr776atTX10fnzp2jtra22VGWX/3qV3HX
-XXedso377rsvFi5cGBHvnua5sbHR0RcQMwAAF1ZdXV288sorsXXr1ti8eXOUlpbGgQMHmr7+pS99
-KdavXx9ZWe+ek2natGnx61//OiIi+vfvH1/5ylccfQExAwBcKPv27Ys//vGPFuIMDh8+HP/617/i
-6aefjoaGhujbt2/ccccdsX79+vj3v/8dH/rQh2L06NHRrVs3i3UWI0eOjE996lMWQsyIGQCg9ZSW
-lsZ1110XvXr1ii5duliQczj5N2mysrKaTqfM2e3evTuWL18e06ZNsxgdXMoSAABt4fXXX4+ioiIL
-QasbPHiwRSAi/NFMAABAzAAAAIgZAAAAMQMAAIgZAAAAMQMAACBmAAAAMQMAACBmAAAAxAwAAICY
-AQAAxAwAAICYAQAAEDMAAICYAQAAEDMAAABiBgAAEDMAAABiBgAAQMwAAACIGQAAQMwAAACIGQAA
-ADEDAACIGQAAADEDAAAgZgAAADEDAAAgZgAAAMQMAACAmAEAANqBlCUAADh/VVVVsXfv3vP7wSyV
-ik9+8pOxf//+OHjw4BmvV1xcHB/5yEdO+XySJPHaa6+d9jYDBgyILl262FGIGQAAmvvLX/4St9xy
-y3lto2fPnrF///7YvXt3rFy5Mh5++OFIkqTp6yUlJTFx4sQYNWrUGWPmz3/+c2zZsiWefPLJiIgo
-KiqK6dOnx4wZM8QMYgYAgFPV1NRERETv3r1jwYIF8dnPfjYuu+yySKVSUVpaGpMmTYqIiI997GPx
-3HPPRZIkcfz48XjrrbfiySefjGXLljVtY8SIETFixIi44oorYt68eU3f44c//GFMnDjxjPchKysr
-7rnnnrjnnnti4MCBsWPHjnj88cdjzJgxdhBiBgCA06uuro6cnJz461//GiUlJc2+1qNHj6aPc3Jy
-ok+fPk2XL7/88hg9enRceumlsXjx4ma3mz17djz00EOxc+fOiIgoLS09a8yclCRJVFRUxMiRI4UM
-7ZoTAAAAtIKampoYP378KSHTUjNmzIjGxsZoaGho+lwqlYqFCxc2XX7kkUfi2LFj59zWCy+8EAcO
-HIgZM2bYMYgZAADOHTPjxo37wLe/7LLLYtiwYXH8+PFmn7/llluiX79+ERFx5MiR+O1vf3vOba1e
-vTq6d+8eEyZMsGMQMwAAnN28efNiypQp57WNLVu2REFBQbPPpVKpmDVrVtPln//8582O3vyvo0eP
-xqOPPhpTpkyJvLw8OwYxAwDAOX6oysqKTp06ndc2srOzT7uNb37zm1FUVBQREW+88UbTmcpO59FH
-H41jx47Ft771LTsFMQMAwMVVWFgYU6dObbp8//33n/Z6SZLEihUrYtSoUR94dgfEDAAArWrmzJmR
-Sr17ItotW7bE888/f8p1XnrppXjllVeahQ+IGQAALqo+ffrEbbfd1nT5dEdnVqxYEcXFxfHVr37V
-giFmAABIH3PmzGn6+Iknnog9e/Y0Xa6qqorHHnssJk+eHJ07d7ZYiBkAANLH0KFD4/rrr4+IiMbG
-xnjggQeavmbwHzEDAEBamzt3btPHK1eujKqqqqbB/xEjRsSnP/1pi4SYAQAg/dxwww1NZyo7evRo
-rFq1KrZt2xZlZWUG/xEzAACk8Q9vWVnNZmeWLVsWDz74YFxyySVx6623WiDEDAAA6Wvy5MnRvXv3
-iIjYu3dvrFmzJu64447o0qWLxUHMAADQehobG1t1e/n5+TF9+vRmnzP4j5gBAKDVvfPOO00f19TU
-tMo2p0+fHnl5eRERMWzYsPjMZz5joREzAAC0rmeeeabp471798aOHTvOe5u9evWKyZMnR0QY/EfM
-AADQeg4dOhRTpkyJIUOGxIoVK5p97eqrr44bb7wxfvKTn5zX95gzZ04UFhbGxIkTLTgdUsoSAAC0
-vm7dusWaNWva9HtcccUVsWnTpigsLLTgdEiOzAAAZLChQ4daBMQMAACAmAEAABAzAAAAYgYAABAz
-AAAAYgYAAEDMAAAAYgYAAEDMAAAAiBkAAEDMAAAAiBkAAAAxAwAAIGYAAAAxAwAAIGYAAADEDAAA
-IGYAAADEDAAAgJgBAADEjCUAAADEDAAAgJgBAAAQMwAAgJgBAAAQMwAAAGIGAAAQMwAAAGIGAABA
-zAAAAIgZAABAzAAAAIgZAAAAMQMAAIgZAACA9JCyBABAW+jVq1cUFBRYCFpdZWWlRSAiIjolSZJY
-BgCgtVRUVMTGjRstBG1q6NCh8fGPf9xCiBkxAwAAZB4zMwAAQEZKvb79xaYLJYOutiIAAEBGcGQG
-AADISP8FpxZnWS0U37cAAAAASUVORK5CYII=
diff --git a/Documentation/DocBook/media/fieldseq_bt.gif.b64 b/Documentation/DocBook/media/fieldseq_bt.gif.b64
deleted file mode 100644
index b5b557b88158..000000000000
--- a/Documentation/DocBook/media/fieldseq_bt.gif.b64
+++ /dev/null
@@ -1,447 +0,0 @@
-R0lGODlhcwKfAucAAAAAAElJDK+vr0gSElYMDC8kDV5bEBcHOwYGSEQODmEaGgoKOBkTVC0tVyAg
-aDcJC6Ojoys8DAAYGqSkxV9fFFtdEJmZmUA4EF0wMAAAcAoTHTZHJ0gYGAcMTwcSO29ISFUHB2AV
-FXd3YAcHMRUVQiIAGg4HT3t7eywOJ3d3dwcHSEEgABMuDnd3OGpkSQAAYlZGBzEEBGJlDCstCxwc
-WQcHSzkRGWBtYC0AACA3ABAKNhAQTTMwDA0VQD4AAEYVFVVVVSQMJQULOB8fQScnYBgYRD5VPmZm
-DEZRB2ZiDAoKSgAAVAwQOH5+lBwcS+7u7hoaST4+X3d3WACPADMzMyBRIDgAAGBgc0JCEHEAAEwN
-DRkwDAoKOR8kPZR7eyA1IABpABgNQBA9EABVAAsLRww/DAwMPgBNAENDCgc9B8zMzAUFQQBDAD4M
-DAwOKgAAcQA5AEtLFYqKAA0NTC8HBxEREQgfCAArAAApACIqMkkGBhoqKnwAAAsGQ6qqqkoKCg4O
-MlkcHAoZJCcrW6SkpFQAAAAAOBAOSwAVGh0ROgMPHWZmB00QEGUAAFQaGjEyC2w4OLe3n4qKioiI
-iBAVMC4uXhkZUGIAAHJYWHd3AAAAPhAQUQUGL0BAIGggIBgAGkIVFV9fEAwcJR8KJA8MU9EAAAcH
-VRoaYWhoaDcAALu7AGZmZnAAAGRkZGQVFVhqWD4KCgwOUzMzDAAAmgklBzEHBzExClhYWBMTPAYJ
-Qy8fCFpaB///////ACISRExUDUQrDAwMVhISSEYYGHd3IDhcOERERElJAAkPNTsHF1hYckgGBj05
-CFYAADg4OCAVO0hCDDAwMLu7ilpaDR8qCDg+EBxGHN3d3REGNjo9CDQ8DBwYRGZmHFMAABQ+FBE+
-ESIiIhs+BxU0FWVeBw04DYqKsxAsEB8hQAwuDAc2BwwqDAoqCgcIL1dMDQAA0Q0iDQwiDAckBxAQ
-EDwAAAAAU0JCDAkJPru7u5oAADg4bAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAALwALAAAAABzAp8C
-AAj+AHkJHEiwoMGDCBMqXMiwocOHECNKnEixosWLGDNq3Mixo8ePIEOKHEmypMmTKFOqXMmypcuX
-MGPKnEmzps2bOHPq3Mkz5z0AQIMCSNHyZ0WjE5GqNAZAqcGmT+8VZMoL6k6rEp0KfEJl489VBcEB
-GGjBwk8LBJsyFQqU15MUdQDUWfVk4JNVccER5ZXCT8+/gAN/xFp0LEWtDxEfRKo44s8n1/YeJCyQ
-8GO+1xhSbvwxRWaklBEiXoVW4886BNW0FQjkyem6laUKdLqKSuZrQIxtpbLqc51JbsHBFkw8pYUT
-w4uHDK2SM0PnCqHPNiz9uWGFoS1fb7h5u0nQshf+ar2G2isAKn4FpqByHQivn8YkY3WK9RoANXx1
-kwUncBVw5QCSdAsA8jiDXIAeEQYXAMbgp0YKKQAFYVxEPbjgXm/FBURmD1pQxz1qsDfUdAVlCMCG
-vFg4lhpMgbOKYX6IBY5fHX642FBx0cULbnKlUFdQkgS1IxA91mWMBWJRYQF7dZQ2HVBIsdhjbG4R
-WUeE4f3UFlQN6hUiUK1puV1Q93Sp24LglNYlAGmmKGJrvBxJ4YWxiUkmLzGymZ6ULnqXAlgC5Tmj
-QRE2qd+NkwDKCziTwAjcT6rhV1WW28013D11UfHfVrItieCnHw0YVIEHgnoRVutllgJ/X+F54hP+
-fjT1FWR68WXbXV3dU4cxmBoDnGpSaZUqru/NJRUVrV3DXrHBAnCNrrwmN9Cs17jIC2+QUUEUY4Zh
-qyxR52X2IlFwFWSUUU/8tmObXBrzxBNEhveeYVCd5wd5RD1Ra3eVzajGPeCoSq8x4qJ2ZXDghvlq
-rFJBBR6z82aGbLbe+TqbjT9lNtCq4npHUMblqQEOUr3Nmx+VJJIVl7aSToqQan/ZydbMNNds8804
-56zzzjzXbGpFWA0q0IdKWSWrswIhyUuTAtn3L9IE2ddsQUzveF/GKUJtwVirSAZECliLpnUdqmns
-ockml500agCkx625YxnlqUCT6NaU2lbL+zD+AKXNpTHK09Lr5MaCk+h3WrIZ3fDULnc90Nd4b12Q
-VY6zJtmiTnoc+LV+QYiUfuiyS6lB96wHAKDMAa5TZBC27vrrsMcu++y012777bjDHg3N4Dgj7c8P
-YTXzPUUnTvx1Rgl/PFlUgBMv2gMp/zaJawUFtuabT6fUudTFjfxYVk2/uVERCmX38tHrTe/iTa8C
-Th1A4MevyykCsStV9Bt1jfvwy288lQ5bX5zYcr3spU8g1ZMQ4gbCH7HxIlGLetp7/sOYOjxhPthz
-FX40VRDPqA54IISIqAhkoN+F0CFBkxf0FBe2s1XNaVG6D5W08sKrGcY+ZuPa5aB3I7OBI3L+qHkb
-EPPXPbiZzAKHSh8Om6YdAKowQ/TLnlUOhrbwbQeKinNiFaVSuYEskReSI4iNeNFFg1StKgnRH9lY
-NZYUSEopX8NgWpIDlVURJFawSd0J92iQW5DKhHzkTnhS9UALFq9Op6MVuW5VG7RF6oFt46GtrkEs
-pRgjWcvSVbCIhrwdUqtW3tLWvOqClFCCSzbiIxG61PUtlxnDXfBqosusAgQ42TGSHwPAj2TDSqbs
-kkS1rMst/zdLxjkMYza6JMWmshdNQqx0fAkYGanjyW6J0iiHUkq65DiQV2aGWMFB0T1EmbVAmjMh
-lDwnRRS0MvwcclI/WZCOTAQnpSwoPgD+mIRW6EnK6zyBKYZKmozQYk/vqIiKPEKoWNRAHtQkdC9W
-TNn4DPMlKkIllldyYy61mKK1UAE/AI3aQq2CJDbBhW2oXJFH/YeyekmlodeSYUnb5BaAIrEgk3CP
-QNlUmgi55UVNMoxPNwpJd95HMk5p1Ojsghe5lGwrTa1V0nSqzqpadSXVmckOr7rHdAEyNbDRVU+A
-MDiumvWsHclqTO6xKbSeMAVlPcgqWgMvReWkWm7Nq14tota9+vUjb2FILF/FE7P89bCITaxiF8vY
-xjr2sZCNrGQnS9nKWvaymM2sZjfL2c569rOgDa1oR1uSfxHvtKhNrWpXy9rWuva1sI3+7WkBaVrZ
-2va2uM0tbNOo29769re7FQ1wh0vc3/K2uMhNbnAN4hrlOve5p21ZQvIxi+pa97rYza52t8vd7nr3
-u+CtbgZUOBBP4OO86E2vetfL3va6973wja98z1uIhHBDFfjNr373y9/++ve/AA6wgAeMX24kxBpg
-SLCCF8zgBjv4wRCOsIQnTOEEWyMhSwivhjfM4Q6DlwaiYcV8R0ziEptYvp5gSD7cweIWu/jFMI6x
-jGdM4xrb+MYsngV5BeKJUvj4x0AOspCHTOQiG/nISE6yjy+REGL04slQjrKUp0zlKlv5yljOspaf
-TIyEVGEKYA6zmMdM5jKb+cxoTrP+mtcM5iok5AU4jrOc50znGztANPhQsp73zOc+JznFC1lxnQdN
-6ELTWMcI6bGfF83oRhuZyQhx8pYnTelKWzrLXUbIl9nM6U57+tNqdjNC4GzoUpt60HdeTJ4dzepW
-LxrQChH0qWdN60PvmBeKdrWud/3oJl/618AONpYzfZBNg/rYyE72mUV9EFLX+tnQdkeqSbdqXlv7
-2qWAdULOYYZue/vb4A63uMdN7nKb+9zo7jYXIIAQDhDg3fCOt7znTe962/ve+M63vt9di4TI4ggA
-D7jAB07wghv84AhPuMIXDnBZJOQd6Ii4xCdO8Ypb/OIYz7jGN87xiL8jIexIt8j+R07ykqPbDQiB
-wB/2zfKWu/zl+uaAiqNNc1oj+iC5xrbOWw3pg0ha2EAP+qWJbRBjK/voSP80sw3i7Jo7ndDTNle1
-d051RmsbIbJ+utbnfHOD5LzqYN9zzw3yc6Gb/exWJnpBjJ70trvdzEsvSNO3TncbR/1jUw+73pF8
-9YNkve6Al3HXC/L1vRt+yGMvSNnRznjGq50gbH+75N8ed4LMPfCYb/Hdp5X3w3v+x303yN8zn/nB
-E6Twn/d84gmy+Ma7PuiPH0jkJ0/7o1d+IJcnfeA3P5vOp/7woS/IEOZA/OIb//jIT77yl8/85jv/
-+cUnBEJ+oIXqW//62M++9rf+z/3ue//74K8+LBKChWmY//zoT7/618/+9rv//fCPv/mxkBBzkOP+
-+M+//vfP//77//8AGIACeH/mkBD2AH0ImIAKuIDPxwQIQQjhF4ESOIEUGH4/MHO6p3umV16/14Gr
-NxCt93oi+GuxJxCzV3soCGq3JxC5l4F0x3vv4XsdqHfBRxCj54J0t4E8NoOp94ECEYIjGISTVoK8
-cIIpeIRstoK80II4+HQweA8yyINVV4MDcYNN+HQ6iGtSqHq+JoReWGlEaIRIOIbL9mZXuHt4toXA
-h4FnmIO3hnpquHM+yAtA+IV2OGVhSIZ6mIRm2IYvmIZxSIMMAQUvUIiGeIj+iJiIiriIjNiIjviI
-kFiIS8BuB5EA83CJmJiJmriJnNiJnviJoBiKoniJOJAQ2ZAJqJiKqriKrNiKrviKsBiLsjiLqJgN
-CREPbJCLuriLvNiLvviLwBiMwjiMxJiL8ZAQhhCJyriMzNiMkDgCKZcKoziN1FiN1iiKCcCGfoiF
-bxiIejeHdXiH4tgLebiH5liGo7aNW/eEUeiNvEaFAmGF6lhrWQiH7shr4DiO+oiHXnaO/khmSsiE
-8zhr7HiPU6iNAwlt9WiQVJeP+/iQ5NiP/ziRUxCQCVlzBcmQOgePvEAEHvaRIBmS3pUBAoAQrsAH
-KJmSKrmSLNmSLvmSMBn+kzI5kyjZDAkRCgSWkzq5kzw5YKGQEGJQYUI5lERZlBQmBhgmkkq5lCC5
-CQghAI1Ak1I5lVRZlTPpCgzRAG+wlVzZlV75lWAZlmI5lmRZlma5lfRQkgehACfWlm75lvBVXwgR
-B3JQl3Z5l3iZl3q5l3zZl375l4BZl3GQEOJwBoZ5mIiZmIq5mIzZmI75mJAZmYYpDgmhCWd5mZiZ
-mZppliTwlCIGl6AZmm2pAAh5kTbXjRqJbQ4JkfpYjhTpjxZpmtCWkalpbRwpj7JZaAtZm9a2mqwp
-jq75muYYm7lJa7TJm7p2m8VJj6iJnLrmm79ph8EpnHpInMtpasfpnKz+xpFOUAPe+Z3gGZ7iOZ7k
-WZ7meZ7omZ7eqQKUaBADAALwGZ/yOZ/0WZ/2eZ/4mZ/6uZ/wGQMJgQa7EKACOqAEWqAGeqAImqAK
-uqAMGqBokBDrkA4SOqEUWqEWeqEYmqEauqEc2qESug4JsQbqOaIkWqImmp4LkHJ6wJ8s2qIu+qL7
-OQCleZ2EtpvayXNdGJ13OJ3UOYbWSaOFlp03anUzCqR0ZqND2mjQqaNCyKM9eoQ/aqR1JqRJ2mfK
-KaW62ZxVumhLyqQj6KRPioJRiqVyRqVbqmdXSqZ1hqRnymdd6qWvB6ZhSntjqqZ2BohtaqUM0Z0n
-2qd++qfmyZ4I8Z7+MFqohnqo+OmfCAGgDdqojvqokLqgD4oQEeqhlnqpmJqpHAqiCCGigPqpoNqn
-KXoQELCiiHqqqFqoMhpodrqmWpqnevamcNp4cjqnklenrUpjZgqrRpamuYpjbMqrSCars4p2tWqr
-boervxpjuyqsQ+ary1pjweqsRUasxWp2x4qsSaes0epizUqtQMaRWrmZ5Fqu5jqWaYkQbCma7Nqu
-cZkQdBmY8jqv9FqvfzmYCFGYkrmv/Nqv/gqZlIkQlnmuBFuw5NqZByEAn+muDNuw+ECaC+GRTDmx
-FDuSamkQJ2mVGruxHAuTNokQONmTIjuyJAtgP4kQQWmUKruyLBv+YUiJEBlWsTI7s9XllAkblR2b
-szqrsVjJqt1qY9MKrkJmrdcKexKprWLahz+rq3gqtEUGrUsLY0HrtD9GtEUrbNmKtMrGrUv7rU4L
-tVHrYlNLtaVgtVcLbFmrtcjGtT/rtULLkVHgAHI7t3Rbt3Z7t3ibt3q7t3zbt3JLA7eGAZ4wuIRb
-uIZ7uIibuIq7uIzbuI47uKCQEJ1ADJRbuZZ7uZibuZq7uZzbuZ77uZTbCQnxBVVQuqZ7uqibuqq7
-uqzbuq77urBbul+QELjgt7Z7u7ibu317DqIRCI/7u8AbvMLruBhAWsZ7vMibvMq7vMzbvM77vNAb
-vdI7vdRbvdb+e73Ym73au73c273e+73gG77iO77kW77me77om77qy1VnBEblsRXNlEGawRa6ARr0
-K0nzEhRCcxDlwxZScRdxYSnRIxRwsr4GfMAPkQJUxQtAYFdDhRhqQCSEhRCh8TBGdMHumx5ptB3k
-gSK4UQcaYxXKssAIXMImPBCqARsXpMF+EBcSxUB0wRVJNDnkZcFEdcPRJB7bcUkFUUuAEysnHMRB
-TFMt7EVpUkRTEVYZVMEChMEGZDV/cyN2IUOpoUtRBMRCnMUHrMD9IRm+kkqE0kCTUcNNjMMvHEVS
-fMYcNcJa3Mbqm8JLIylcDMZ2ARe3VhVskTIzsy0eoxV6BD3+ilEv5vNVblzI3qskMXIx/aTGhUQw
-2EHGH4S/TvFFDrQVVIzCVvzHhrzJ3ptTldO/ZIIYq3LHB3TBTEw622FH0bHDJOzDaMzJsAy+9vFD
-qHzGFyRdCXHKryzJ1+EhGlzJTQM/t2E/IUzKsXzM19s8aSwzvDIzuQzJeJzHzJy/QLG/G1wiTXU4
-kYzM3NzN3vzN4BzO4jzOZpUCr3TOr4TLH4FE6PxKcUXO8BzP8jzP9FzP9nzP+JzP+rzP/NzP/vzP
-AP1YuTPQBF3QBn3QUexXrHPQDN3QDk3QCb1XpfPQFF3RFg0hjtUzGr3RHN3RPGPMZiUzHj3SJF3S
-NwPSXAX+yia90ixN0hm9VXr1Eyh9VTKdWDWNWEOF0/K7VyOCWDd9WD/9V0HtVzl9WEWtWD0N1Joc
-0kvNVUO9V0dN1DutV0ntBZhw1Vid1Vq91Vzd1V791WAd1mKN1YEz01b10zfwCmq91mzd1m791nAd
-13I913Rd12p9A2WdWDl9DWPd137914A91l5AOC/NgWRLZD331DGNFWKotlub1zrNeYdNZLAW1YiV
-1PZItond1DTN2I5Np5Bt1MjTjlRb2VOdV5g92YgX2kLt2Z99q6wt1ZKt2kFm2oW9g7QNZJtt1lX1
-04392ioY21A92rkdroTdWEnNCKm63Mytn8sg3IsdHur+oKnUXd3WvaHqAN15ldOE0Nze/d3yyQjH
-zVipXdxLpt1u5dvATXnojVY5DYXm7WO2jdySkdlUu9s27drrva3tfVbvTdpfO96LVd7mjd8+rd/7
-bXv9Xc7EHd/zTd71Hd9lu+BOjeAJnmzMptjb3eDm/eADLhmECt4inqqKOi+8rU4/XanXveIsrqmc
-auJ6fR2lOuI0fqqryhen7VYEXtwGrtTh8dsXvmYZztlW9d8S7uFIHeHx3eOt/eNBruDaE+Oz3eEC
-nuSGXeAU3tlO/uQYnuVFzuHFjeSJldTr6rBmLppyCeMHHh76+q9u/uZw3pgBq+aiPRAKe+Z4DpoQ
-i+P+t80LjpAFgB7ogj7ohF7ohn7oiJ7oir7ogO4DXt7bWOENLTvplL6y3vDo6pTTAtANjN7pnv7p
-oL7ojlDlY67kWB7lay57XN7lqF7nvXfkpH7Zps7jmH5O6r3qxzbkJ35ORu7gsX5YO57bTO5Xt47r
-Slfr5tTrVK4eOY5WST0MbRDt0j7t1F7t1n7t2J7t2r7t3B7tdIDsgfTT8FAG5F7u5n7u6J7u6r7u
-7N7u7v7u5A4P4M5HOQ0BD9Dt+J7v+r7v3D4Mv/5XwU7bwy7RFm7sfNjqf6XsYf7vfhXwqj3w0a3q
-Bu9pui7lr+7rzN7n9u20EJ9XxT7xB0/nCQ/muS3+5rJ+5bSO8MRe8CCPZhUf2Re/7HxO3wNBfRV4
-8zif8903fipP8OFhfwMY9EI/9EQfgAXY83rF3Tq/9Eyf8xeY8TSP26cu8iu/5S0v5PO+Rwpf8gzP
-07Mu7Fl/Qh9/9S4f9iG09bRt8sD+9QJv9iA09mQPd24PPGiv2moP8Gz/8HP/M3Af9wC596ZS95N9
-9w0vGe4Gc4if+Ip/b/2G9B6PFRDXcZI/+ZRf+Rv3cY7vVvW+covf+Z6P+DIH9RCO8mCf+WjV934v
-Zi/v6jEI66L/4aTf9qZ/Vqif+m0G+KAi+IdN+F4f+3o/+0xt9bb/98Cf0iSf9l1P1ZIRAnne/G3+
-meYant5Y8Q1jUP3Wf/3Yn/3av/3c3/3e//3gX/3fgPufoukL6/zoL18hkPyoLRl/HurwH//yj+iO
-XvxaPhCSXun6v/8Sdun2/+UAwUuggG5ZDB5EmFDhQoYNHT6EGNGgI4G8UgComFHjRo4dPX4EGVLk
-yIwAUlT0VErlSpYtXb6EGVPmTJo1VV6qeA/APZI9ff4E2lMnT4FVphxFmlTpUqZNnT6FGlXq0So5
-dwbFmlUryYs58dkEG1bs2JqeKnbdmlbt2oomUZKFG1fuTJwCh7LFm3fk3aJT/f4FHFhqVbtX9R5G
-nBEtr3tf5z6GDNeswMWJLSd2KzBlZM6d6Vr+JXpZtFq+vIwKRp1a9VPCjA2Php11cWPPtW2vnGwR
-Y2zeWzPzwuBJ+HDixY0fR55c+XLmzYWDAt1butDXX6pcx55d+3bu3b1/Bx9e/PUv0aefBzk7kHP2
-7d2/b47h7G709UH+tp/fdWj99kv3r+8/AM+rbEAC6TMwP/wS7E1ABmNz8MHRIpTwsgIrHO1CDGNb
-cEPLKPRQLxBDxGtEEtfS8MS8UlTxMABWuSdGGWeksUYbb8QxRx135DHHSV5rUUQAJumxSCOPRDLJ
-GH/kL0i8LlIySimnNHIVBJ1EDAAtt+SySy+/BDNMMccks0wzm8SSNDPXZLNNN9/0Es00t7r+CE47
-78TTzTkTo7JPP/+U8Yk92XoCUEMPRVLQQdW6BlFHH8VxUUknpbRSSy/FNFNNN+W0U08/BTVUUUcl
-tVRTT0U1VVVXZbVVV1+FNVZZZ6W1VltvxTVXXXfltdfD6rAgIwvqyOiJk1zDyktjkOVFWWYz0mlL
-cPz4qEsqQrtmlToAACcFQaPt8pYvj7WACi2pCFYgbjWyUk5f34V31xSAyAiIVRTbTaeN1ABCSyAU
-5QjIwtRFU1+NDOZlWGo7MuwJbQW9po5VruFFDSqo0EjgZvlbhYqF/aDi3mYBUCOjbd2NN2WVY1UD
-AEWfAGBhXvzYdmCNwFnliSeoWJbhgnf+0xhhqxQDx6PXYObJGHoreqLbktB8zQJwAOblCWCbpeLY
-mc1FeWWvv0YVnHRprugasfO9khdjFBU6458J3qjtZ3m5BgCKAw7Nap1J1mgSjNuCOjQqJtkoBYyH
-LFqgVZgEu3HHUZ1Xca2NIRLtj1JIHO+4gX774Cs1hrvqVaiVW2iNXwsao53AKZmXOuru+nHZZ6+0
-ZUHraD1yg+UWyNg6UPbSZi9PKv1zd50t/umNUH97pxROAnlj2qen/lJ0/Uj8njq+tXyjSeow5m6f
-Nw8d2rSFrlt8tyv6UQ2YW2f/7/IBr2hw7w+/R42iF5e+ev//T9Mk7HWsOnEpRmnDXOz++me++dmM
-gWfJ3PL4cxWlFctp9FufQIZFNasF6yqse90CAThCEmKobqwj39yqxreQpK6BKnyWwowWmu8JJGIT
-q9jFMqi8inTsYyGDm+H+BroSFtGI9qFCBIfmGi4ZA1xbmmEKv8TELS3ridOqFpeoAL9sbatbVBNh
-A8t1LsLBrWVlJOIR1bhGNrbRjW+EYxzlOMdQpcAYd8SjMeCHGAvkEY/pomMgBTlIQhbSkIdEZCIV
-uUhGNtKRj4RkJCU5SUpW0pKXxGR98rRJTnZyS1pD1RM9OUpSnmlVoixlKlXJJVgBwBjPg2UsZTlL
-WtbSlrfEZS51mcsOlUonq9hlMIX+OUxiFvN57Trli4y5TGY2M5jGSJuqelmqaY7KRKC65qey6SkW
-naqao/pmqLbZqXFyqpyb6qapwhmqdWozjaI6p6bimal0UlNryshHPvW5T37205//BGhABTpQguoT
-fu30VGlc0AKGNtShD4VoRCU6UYpW1KIXZagLzJOq0lSioB8FaUhFStAozKeVWsuHO1S6Upa21KUv
-hWlMZTpTmtZUpbMIDULJ+Rpi9MKnPwVqUIU6VKIW1ahHRWpSfUqMjYbyNS+waVSlOlWq1tQBJn3V
-b1JaVa521asyxWlbQHmq0vRUqWdFa1rVilSmFkaB7gwNVL86V7py9aqUiWaqtFr+V772laZhVddY
-TVXWtRbWsIc9alv3k8y4+tWxj13pXXVz0opsFbKXrStgmyVYX/IUsZ8FrWEVO09MlUaumEWtVyVb
-T1L9BgovgG1sZTtb2tbWtrfFbW51u1vYLgECYmVsRWxxDOIW17jHRW5ylbtc5jbXuc8lri2aStbX
-GIK318VudrW72xFg1VV7TW14q6pZnZrTs6FFb3oTO93BPlW875XqavOKKvDC175gzSlnSUVY9fbX
-v0tlb2cbe18Cv1S+lBWIZQu8YHeQV7/WPO9/JRza0b5TnO5lMIMPnFWtEWEWHwZxiEU8YhKX2MQn
-RnGKVfzhDAgAuKoqTShUMWP+GtfYxjfGcY51vGMe99jHMw5FgPf7miWs2MhHRnKSVbwJ77bqNw14
-Q5SlPGUqV9nKV8ZylrW8ZS5HmR4uDmxwBRIHOZTZzGdGc5rVvGY2t9nNb4ZzmeMgZAiHRhNdxnOe
-9bxnLpOgyayqb4YJ7GAx88KsE0b0YSv81oRiWNAE3vB3UfroAhMaxhFOdKbTuuhCn5bS8I20kyf9
-aftamqOY1nSq2UpneDqa1OINNaC15oQa1NrWt8Z1rnW9a1732te/BnatVfDbMF86NGjYRbKVvWxm
-N9vZz4Z2tKU9bWonGw2svnBo1hBsbnfb298G9gL+vKpAvzq1pnZqaA6tanb+F5XTxq6Ip82N2liT
-e9TzPnd+C73udvcbqO8+9YDxTe9xS/PeA78suqmrbn83/N/YxqarEf7Yehu8shPHrMLby3CHOxzg
-6Y43xi9bcb3OGtwnR3nKfT3sFwe8IsiudsxlPnOaT/vabi30tlW+c56fXNx4RTAvFCzyvmpcwBXh
-d8fZ/fGFh5zojiU5fQ/+9MzqG94CSbrSU830jTud6nyNujen/vWvGn3IHNd6u7l+dIHIm+yqLXjJ
-KwJlPtfd7nfP8pdbDvIxx9nvfwd84OE8Z5xfnRd3xnviFV93PwOdwxXxsJIlP3nKn7jFe2+6QGT8
-Y8533vOf73GQC+9ygRT+ufKnR73kmex4SV/87XQ1e52RnvZ+r/3sXn893Fkvatfn3quxbzXaab91
-iMMV976vatjVOXbkSxX42Z798FVte9m3vfldVb49KzKEYHTf+98Hf/jFP37yl9/850d/95VA7M0W
-WhZHgH/85T9/+tff/vfHf/71v3/4y6L4jQ6NEUi/ASTAAjRA9HODuJO63ru+qXq+iBM+6Us06gu+
-42tAm8q+1mK+C5ypBzQ+rJNA4hs9vuMFt+PAmcpAcNrAE4QpDwTA6AvBCfy/nRI4FqSpFBSVcrPB
-mHJBGoTBGJwwCoQ+69vBG1RAsdu+OVDCJWTCJnTCJ4TCKJTCKaTCKlz+QkLAvK4TCCyYhi70wi8E
-wzAUwzEkwzI0wzNEwy7Eghk0r9CwByuEwziUwzmsQiY4wuVjwCJsQasjPUMDQhkcwcwrQT1EwTvU
-vgQjRPzKQrbzwz8MQjbcFNNKxJjCQXZawUTswTb8QUfsLyGEQAucRJWqRFDRwVBsMD4kwazjRAqD
-RHmSOFN0h1H8lN9oAmWwxVvExVzUxV3kxV70xV8ExmC8RfEpr0h8DRGQgmRUxmVkxmZ0xmeExmiU
-xmmkxmQUgVbMlNIQxm3kxm70RmG8AkPUQFkrNBI0x1NhLRUkR8M7Ry1MlXTMwQdTR3YUxHYsFXhk
-J3Dwo33kx370x3/+BMiAFMiB5MdidEWeIciEVMiFZEiC5JpkQsiGlMiJpEiBBIf5OpWK1MiN5Mg/
-WhU16MiQFMmF3KNTAcmRRMmU7MdMYsmWdMmXhMmYlMmZpMmatMmbxMmc1Mmd5Mme9MmfBMqgFMqh
-JMrzuJqKGJZiORbeGQnkAZousaLz4RIsAomWWZr5QSWTQCXisZaFcaUHEoh+OShncZaiNEv0iJyK
-sBd8caCK4Bd/ASMeeiAX8hykrAOZ6Yg6ARgieg3eEZoU2J6RKaNngZkOARILO8vEFA3b6Z2YqQia
-6Z6MwBmd4ZkoqsswgqG/VKKNAJajxMzy8UvjaRZjCMxnmQRwOE3+CZJLxWTN3hAbgSAbGzqbthSI
-taHN1RSezgFLurEbj/ADl1mFq+TL0AjNjKGYnaCCpUGYrKmbkjxMRmvN6ESRpQGmiqCc3cFIzLHM
-udTNJcIgjgjOmXGZFwLNaPpLNMofxzSY5uSFrNmhz5TO+MwLxsQdyqAX7OwI3wGeLhGeLuHKFPpO
-zlyYOkDP8SmMLuEJUTrKqwDMQtkNvxGIGlrN4ZHPCsWL68mewMRP7wEf9VHNy6TL3UyfjvhNLSLP
-BSrOHkocwwgZgzEXLsHL57TQGc0LAapO3UBQuUmgFuqcEPVOi9jMHhIZ8TzO4yFO80QQgzGMlumK
-ERUIIApQxKT+0SkdiRMqSWZpm/cRCbr0UQeSIY6wGrwkUPjsSyTlzvnACMPJCAmdHyml0jf9iCTi
-COxsIlTaziWaIlGKSmnBS41IyozomJFhpe9EpajMiLqhFiAxF15AzUN1zDYNHjiV1Eml1Eq11Ev1
-HzvaxyvNiz7aR0DC1FAV1VEl1VI11VNF1VRV1VVl1VZ1VQNxpliV1Vl9Hme4BVrF1VzdJVRwBlTQ
-1V8FVlniVV8N1mLV1WE11mSl1VtwBmV11llVl1WS1mml1mq11mvF1mzV1m3l1m711m8Nk2KTJmdY
-FUkAAEko13NNV3RVFWfASFFBC4PUFAAgV1UxV3ZNlXtd11X+cVdWiVd5zMF6zVd1tVeCHVh8RZV+
-XZV/dUtIcdg+2UuBVYOHpdgoqQh9tYuK1dgjuViD3diP7ZGOxVeQJdkcqQiFrZqSVVkagR+GTbDU
-g9mYTbEMyCmB9QR8wNmc1dmd5dme9dmfBdqgFdqhxdlCENmK4AbQU9qlZVoe44ajFQhrAIOppdqq
-tdqrxdqs1dqt5dqu9dqptQao5QXTk9myNdtZoIGTpY97YAWiddu3hdu4HdrccFmhg8WWIi+bvY29
-7Yy64AWMbcRV7ESxPY3VMNzDFYzWAFwTDEXJQlna4NvInQu6BZpLJMS8fQvJ1Vyy8FvAVUXBRSzF
-AtzCRdz+0jVdp1Bcg2XcSXTctXWMzYVdm6BccR06WMRczYjd3KWJzjXYzwVd0SLc0xXe4UWK1MXX
-1U3E1vUK3WVemJjd9hOIczCD6aXe6rXe68Xe7NXe7eXe7vXe6eUC9qPXiuAAAjDf80Xf9FXf9WXf
-9nXf94Xf+DXfWhDb9+O/+8Xf/NVf/fM/gQDcd0CHABbgASbgAjbgA0bgBFbgBWbgAH4HsWWH75Xg
-CabgCvbeBBQIlIWAP5DfDvbgDwbh+OUArCrFULxdXtiM5lXhmxBb3/3dtRJdgyVd4qVhwzVeUGxc
-tV3eFV7h5y3hSTzhFOZh5uVdfHXhF9604K3hJV6NGyb+wrsVRR22i9cdYt31YcvVwyCu4uYt4k1E
-YkVTYiYW48Bw4kGE4ijOYNfdYua94jy03ZrN3DWO3S4GwS8GrRjG1xke4z2GijJGXkJU3imW49xt
-Y4EYAjpE5ERWZCrEQnUR2B/QgkiW5Emm5Eq25EvG5EzW5E3m5EiGBbHlwjQU5VEm5VI+wzX0X4M1
-B3Jg5VZ25VeG5ViW5Vmm5Vq25VtmZXMQ2zdc5F72ZUS2wzSuCELo5GI25mNG5k7+ARLG4iLU4kGG
-XToOXDsG41TOYz7G5qjw4zNGY154XCqG5sgtZLvl5mcOZ8mV5iOm5lWz5orQ42yGZ6oQ2z/Ww0Bm
-DHD+PufbGOfaNUVzzue9Ted1Dt0wjueCnoJt5mZ7htx/3tt95uZTbAu9ZWiAbmGBrua/lWGD1miE
-PmOFxueJ5oxx5r4DJOmSNunyW7+IrohhaIOWdumXhumYlumZpumatumbxumWpoP63d+e9umfzr/+
-xWh8hYcyMOqjRuqkVuqlZuqmduqnhuqoNmp4EFsBPOmrxmqSxmBvpg8IeICcBuuwFuuxxulhYGY3
-7mc4xl2Qto2AtujCwmN31miD5mgo9mi2tg2HLme1RmG8rg23fmu1iuu+mOt4ruu7vWu/7gy9PmN/
-VuzHAOzARqvBNo3CNux5fujEfmzIGOfIO9vPTr3+y3PkinAFPjDt00bt1Fbt1Wbt1nbt14bt2Dbt
-ZhDbzWva28Zt0BO9oa4IMfja3wbu4BZurxUDsSVb0EbuyVs9rq4IAWgE2Ybu6Jbu6Y5tVzhrgaC7
-xdPu7c47MGsWgVUAuRXv8SbvoDXadu47wVPv9WbvNiM83hYIcTiD+abv+rbv+8bv/Nbv/ebv/vbv
-+RYHsUU87ibwAn+DxmPugWjb8mbwBhdvBbhucm5svhbizZaLyJZspaLsd7ZsJj5sWNRsC5cLxoZi
-xxZxzq3oDJ9sgu7wMf5wUwzxEycLEr9bE5fxsMBwFV8v9K7sFufjF89hYRbkG5eMCKe1nkPyJF/+
-OfEV2AEAgSeH8iiX8imn8iq38ivH8izX8iePAbGFuZoD8zAXc2i7OfjmhXVIhzRX8zVn8zZ38zeH
-8ziX8zmn8zRfB7HVOSXX8z2vgZ9LcF6AAD3Y8kEn9EI3dC0fgAjnZxOmcCKHixzXcXdjcR9fYiBn
-XSm+Z0efcUV/aBvX9M/gcXWO9F7YcErfY0tPXkxf6E+XXU7fa5Vea1a3CUgfdaEqdVMXY1QHZFX/
-aFl3XlefcFjva1+vCVqv9YfjcQ7H9dPV9XrmdWIviwgvAj6ndiVv5O+uCEY49G3n9m7H8mUQ23oY
-83En9zCvB7FVhzpX93Vn93anc3XA82qX953+83OUJQRvx/d873ZGAPYSb3RoB3UzF/VIv/Vlr+Fm
-L8IYB3iXoPE3FvYKX/iWMPZjB7BkN/hKx+yEfvaI/3XH+2FM/HeOd4mJp/iCv/jhRfgdVHiRL4Vx
-zm4Dh3nF07vRFojwdvCbx3nzFlsya++e93n1fm/Ale//JvqiN/qj7+8A5/EBj/mmtzsER1kBWPCc
-p/qqxwcI9/gOS+6tlzzRxnaBKG3qFvuxJ3vXpm0et+3cVvu117HdBlzfHu64l/u539ri5vHj5vq8
-T7HljvrnLvu/B3yxt+6sR2tGf3iWH/kUp3iiMvmTZ/aM7+iNR3zc6PcaD/nJLwWSP/bGd/z+0k15
-G1x5kW/4tD58zGfhUF98Sbf4zhfez2fB0Of4ca6E7aL92rf93FoCvg6ES+D93vf93wf+4Bf+4Sf+
-4jf+4+d9KwDlTGD+5nf+54f+6Jf+6af+6rf+62d+VDZzFmCD7vf+7wf/8Bf/8Sf/8jf/80f/7mcB
-sbWu23f/96d9XFD1QkD++rf/+8f/4w+ECNcrgUUVwAUIXgIHEixo8CDChAoXMmx4UBIASQ4nUqxo
-8aJAiBIxcuzo0aEzAB9HkiyZQiQvAClKsmxpEYAzlzJnItRI8+ZNmzh3ttTJ8+fHkECHejwpUCXR
-pBdhKm3q0KfTqA8jSq1qEKrVqkKzcjX+mnLVvbBix5Ita/Ys2rRq17JVCyBa27hy59KtG7YVgFZ2
-9/LtGxevXr+CBwsGTPgw4rnRACRu7BjtKpQAJlOubPky5syaN3Pu7Pkz6NCiR5Mubfo06tSqV7Nu
-7fo17NiyZ78W+Pg27rCSIOTufViAJAG+h/sFLpw48rrGkzOXC0FS8+hxuVKvbv069uzat3Pv7v07
-+PDix5Mvb/48+vTq17Nv7/49/Pjy59Ovb/8+/vz69/Pv7/8/gAEKOCCBBRp4IIIJKrgggw06+CCE
-EUo4IYUVWnghhhlq2N0TFvBizEIgJmSMMSlQoUaIaohI0IofbvgijDGilwKIIF5DhTH+JxbUokE1
-NsTjQC0CKSORRRqp1CQe+rHSkj5a4Acv1/BYIonXuPjhNeCkoOU1JuqYQgoe8rJll7xYQAUVHoLo
-B47gCDTkkXHKOadH1wDByyrXPOFji0vueA8v96xyJZ+8qHHnjR+iGKSLVDzBCzh78uLoE3XQeSmm
-mVrkIxBW+mgoECvtyKiIhVpQolFTYqnllvd8Cqemscp66SSrqEliHacGusqjPQIq6KSGFqrGSh1e
-SeqVkzT6RKWzOvssndcA0OubLq6CY4kFgUnio9eC6aKIXkJZarUgnknFSmvieOex0Lr7Lrzxyjsv
-vQx1CSaYVta7L7/9+vsvwAELPDD+wQUbfDDCCSu8MMMNO/wwxBFLPHF1VOhrgbK8GNuuQiSSqOaH
-HqfLIonoHlRHyYt6uZKKxqBM5ctK4kjFuIMKtAqsCIUpkJ0DiZkzySSOzMvLxqhYUNHUBukyiSiu
-/GbJYlL0hJs3L2os0AQVjWKNIrfrsckGvazjmGiudCrT28ZcdtjG2Ixn1tqK2bNAPzvkcbZde/x1
-yaIS1PLLvLCZI5Qh57jo1FXjuWjhcQu0tYhFgzxQ0ifnKOrgNJuJK5VMG822qG4PhHNFFtedsZnV
-/ugxyHh/CvXlBqGNMpMzFz424hdNknGnAtGoepQz5w58uaMyerPUx0u6s5l+Czn+0D1APPoEEK4q
-Do7jAx0qUJK2odSil8kTTyiLxgN70IrMWzB0sBYBsSgVA/3e4o2H9ziqqsfjKf6Vy4u5vv6Opb50
-XS97AtkeL7o3JnINBAho4p+q8gc8WyFERNGbXvWuRLeKvE8g8TPU9whyD+Hd73gSvNL5jBe/C2os
-g8RL3/8IOBDsVWR3AundAoFXP7KVr3wnXBEF0Wcb6bUQUCLa4EUSRTWBNAl4T4oSkCJoPB2yq4dP
-YN+xnnczfUUJZ6uAkh9IV5FITcpK1GPgzTxYQhP2UH9wWtHzVvTBigAwjEycH0GeKKU1Fq+Nx0Ki
-6q74tQDC0YdfFJwYKUJG0xH+C43EEojiAgiuKf6xisbL00CuMagjWnIidbSZGMGHIkAOkmNa5Jjq
-QIRJnr3tlHE8ZBgNGDyNVa2RToTSHik5ST+uiJSqW2UXNdhJi1jMT5JC40D8tKO9gYt1pRwf0XBl
-xDa6cplqGBRYZDkmCzzhgyk4GioltcymXaloppSkG30YtYssMQVQOiYqBec3ygmtXM48JSpfVodp
-prOf5bomnlxlkTB184biBBIp8cY1w+WNlxUcn97AMTyHtBNKk/ADMpM5z8fVM3IeW6g/tZYjcOQy
-pIUkFUCzScxrKFN6GZVnj5ipt4YS8qHnxJ1HdrcKFPUOmYfaKETPqb+ECqT+oNXUHzA1CaL4xU+b
-h8JYoIbWoiumQGlBfSbw3lhTbRrkfR/sKcd+KkQfUrKXwxSRUampVmQxNZ4LeaqyUsDPqa6CV3zc
-ZU15NsxfclGTbqWIV1tIPhGuAqil/CE6r3SopLZyrcVrqzZ16rS5/i1UY0WWQ/VqU8b+1SJ2+qDH
-cnUsQVlVkq974eggyKgB5jVQRKSeQFNQ2M4mhAq9m91pO3jZVPoRhW+bImu5WhAL1BV2uWoRaW2K
-2dYGUa2sRa3vYugi2WJxIra1Eo1cxr49cVGXp31mc8332gwKN4/FBZzRWgRA5Q42rym0YvXGCyjo
-brNuI6MubQ/yWd/hin3+yd0tXukb3h6yELb5BexGPUqia9XTuwxNF+vAZljJ+c5smWWi7VwkLU/p
-bo6qK5oAzOngaDYNbyup3G6fdmCG1KFwpBIZg7PlYBOTOL19s9zHKhw2+oKuWhtecUEm4eFyeuwE
-2CPRXWc60+zGTrmZa9xIWtxDEx9Zxpml8NZQjLSPCi7Dre2xhgHAYfclmKMLxhZQI4g3hqKtyZd9
-MjQpJuc5y+he+OouR/AFpvmqZ3344t9F/AwmQGfFzvn6iKFTgOfz6FmuIxE0865zj0aPJNGLNk+j
-+dyRTNO5057+NKhDLepRk7rUpj41qlOt6lWzutWufjWsY91q0frMUgP+EWSgULIUy4DoHpLhda4N
-4mvKgMPFB/F1rpPHmGFbZiWX6bVlNJcSTWvN1rdeRR0AUIeqHuUy3aYMDvkIhN9KK3d+YwxGmE2Z
-YKfE29MuiLqLrRBkpwS5IsEMiDDz7cnIm91dBcDwXDUZ3ap7Mr+qTL8REm1AneQy1JZ1VFJgyXET
-xCv0ruzASzsQdMNbMg+/OPR0bQEpIwTZvq4DtTh+FE2rPOQVR/m7DeKHybi4m71Tg21XfhCOPwHb
-pX1kMq2dwEh6JebXzjYPha1rlxudIC3398iN3fFuow7kLX+6zn0Hc5DfejLzVAMAlHXFrS+d6WYi
-+c7n23OYew/i1gH+e6+eAAAX+yHbbS8IOHjVTVhhnd59L7u/xxTJqfvayldn+cfLjm6s82LcFPdd
-JMFuJca33MoDmQS10b7je9g966Mznc9LDni/P7zpgd9SQvzusl5ZHfFpd/rBDzIJcMyeRfNMU+DN
-LviEqFzufOa6260CDjHVPZPDRwnwjcF6wG888Z7X/d15JmbR5xrsizo87JVekOljXe5+GPlAqIA6
-2zyK8pq2wOBRWVyNAbyotNf10x39fBGO3uOvpz9Bfnxsj+c88Ng3yNWhCPBNiqK1H/vljhpYyQCC
-nP4BIJ9VCrUMYPBFHLvM1ptgHvIxn/ykX/Npn+nlXu4xXrAh2+/+NN3/4Z/8ZAzWgZ/3zV8HOiAK
-ZtJvMZGtnYn8yBX8lZ73XBoIkt79QZ8Ikh7YQUnrZR8M+o7NAF+5EWD0TZ0HvuAR9pyxSeAENgXc
-EY3K3Am9SeDY6WC7VcbdXcZKAB/XBaFIIBvVeMgJfiFljKFl0BrWWVbjsYsIPtvzcd1FnQyUAEHG
-jJC/ieDIjV/HVUbsseFkiMjTlaEO+mAKoFwRbhywGSJSgKCQcY+tSWDBZeD26aAdQl8VSkWa+EHV
-cN6jbCHzTQLK8KAL+qATxmCUTN/+sRspruHfEcQqVM3TyZ1lPIr4DRegmF8eDd5ejQmvTF+lFOIH
-3gM4gIMX9qD+/R2h7jWgE3IcFYDF0q0hFFrA9AEfFVgGLhmgbXjIAi6dNBphAoEjFX5iUkzCuIlK
-wxEi8LUKQ9RiMtZfxXFg211cqNCi8zHd06Ff13kI6nUdlABjkMwT9SCEGuTKB2Wis1Eb5wliLE5j
-6SXi0g3kRHIc2BWdCVak63FdA1bjm8yTZY3jPfKe6zWhOkaFtEiUB3Kd3E3UD/pjP8Zg1KXeGepa
-pfDjE7Lb01keENyJzVnJjXyQQa6d0lgAoUEK7T3h0/UiQ5DhM25iK94k9Rldw5njB3Zk7pkIQaCi
-90zCo6CiAjLfxVnlD4KlJ65kU1ABB5piZRhDJqKkB+pbwUH+G7FJHeEt3STQorcx4NxJIgSAo5lM
-i8Zgm7bZlSQuXmWEm0ElxEks2sV5m8NdJQpixi9aRq7l5bxN5aRcI8tVZstpyWa2YVNmUmAGSjcC
-AMGJYWkCQMLtXGUkXTqypW3eJm7mpm7uZp65DjnhRJvhzVJSSHA6E060jOsYFnsUZ47xpnM+J3RG
-p3ROJ3VWp3VeJ3Zmp3Zu54D4pnd+J3iGp3iOJ3mWp3nKpH8gp3muJ3u2p3uyJ3r2B3O+J33Wp32S
-p4LA5n3uJ3/yZzc24374Grb0J4EWqHn+Z4GchIEuKIOCJzhooIBM4oD4GoDqB4UWyIUSCEcKyIYW
-iIQKSIb+TqgI9keIciiE9keHEsiHBkiJsuiI8keLAkiK/seMRqjfeAEm5KiO7iiP9qiP/iiQBqmQ
-DimR6mjIVWh+hOgNvAKTNqmTPimURqmUTimVVqmVXimT3sCRJqiuXUORfimYhqmYFqkXyM+JAoiE
-ekIprCmbtqmbvimcxqmczimd1qmdruklbCmBhGgVTIGf/imgBqqgDiqhFqqhHiqiJqqfVoGeDkjR
-3QM+3KmkTiqlVqqdeoKZ5qffqKmldqqnfuqc5qn3ICl+8KminiqqpqqqJiqjjiqXQk+kgqqszmqn
-YqrvnOl/pCmt7iqv1qmo5hqp3oepriqxFquxGmqrAuv+q9pGrPaqsz5rKdjqmOCqf+gqtF7rrv5q
-jP7HsB6rt37rqibrtvrHozYrtp6rp0prjQaIhNpAMrwrvMarvM4rvdarvd4rvuarvr7rAzQqiKrc
-FoSDwA4swRaswR4swiaswi4swzaswG6BvwZI0RHCvlasxV4sxu6rDWRqgkioA7gDyIasyI4syZas
-yZ4syqasyq4syL5AxAJIiBJDL8wszdaszd4szuaszu4sz/asz84sMbwsjeraPcwCyx4t0iat0q6s
-A3Asgnjs0kat1E5tyrqsq+6pysnsz24t13at1/Zs0F6toxKt0VKt2Z5t1DbtrWrqQHws2r4t3Fat
-0Pr+R8x+rd3eLd7ybNgqq4aSbdz+LeCGrNpOK9sKhNsGLuKirdXyrYjymdbmLeRG7tfu7biiqN8m
-LuZS7eCuK5r6DQqUAOiGruiOLumWrumeLuqmruquLuh+wtySqMrRAhvMLu3Wru3eLu7mru7uLu/2
-ru/OLi28Ln8UHQSwrvEeL/ImL+uigNMeiLWiK/RWqra+aICqXJ+CK/Zm76GKK/XqR7lGL/hOqrpS
-a388b/ier5xOb7DaR7dqr/u+L/eub318L/rWL5yOb+HyAqfaL//iqfBWL59d7/sOcPbG77IGirn2
-b/3ib8d6rvI+MARHcOq6rtj+K59RQw5ksAZvMAf+d7AHfzAIh7AIjzAJZzA1/K/36lrxSjALtzAE
-M+/aNnDbZi4NR+3iVi6MZq3k7jAPgy0K58ejlm0NDzHLbi758gfUErESy20Fu6jj9jAURzHNUm73
-AvHlLjEWk6wR5+/hZrEXu8MNV3Gp6rAUl/EOU7H80kcQfzEbb7EMGy4bf3EYp/F81K0Z33HeovEB
-F20ce7EbP63fhIEJDDIhF7IhHzIiJ7IiLzIjN7IjD3If/PAY81nAOqwlXzImZzLDQmwTy6iuEcIj
-h7IojzIpP3IYNK+BmK8Co6/6Yqj1EjAsg6sB9y2srjL/MjAgD8T+2vL5tjLWBnAsB7OxzvLY1jL+
-L6MvLjvvph4zK0uysL6yMEdzqhKziRozM4NvMqfyMl8z+Ppy4w6EAEuzOG+vM9sH/XIz9Gazh/rN
-NpCCO78zPMezPM8zPdezPd8zPuezO7NDOddHiD6CDAS0QA80QRe0QR80Qie0Qi80Qwf0I/SzGqsw
-GegzRVe0RV90Pm8DKq/zDPcxFs+xKz8xHo/05EL0fKyxRy/xHytzR6c0EYP0Lw/E45I0TfusHtOy
-bQixS9fwSmtzS+80DcP0NwvETNe0UefsTRdzTgP1EPc0RwvEKcyCVE81VVe1VV81Vme1Vm81V3e1
-VC+BSctHiFKAKpS1WZ81Wqe1Wq81W7e1W7/+NVyXNQWEdXwUnQBkgFfntV7vNV939SlstIr6jSNk
-AWEXtmEfNmIntmIvNmM3tmM/NmH7AF3DR4iKwxlcNmZntmZvNmd3tmd/NmiHtmhftjhM9nvYdTdA
-tmqvNmu39mM7AmAPiCqj87V6swWD8zjnNrKatnucM21jqzoHti7/NrrathPjtm4nd6BSs8QSbQIT
-t7MGt2xvM3RDq3HDLDQrt3Yztydbc3VHd2zb6ECgQBCUt3mfN3qnt3qvN3u3t3u/N3yXNwUz7m0L
-xAxEAH7nt37vN3/3t3//N4AHuIAPOH7PAG+3B/GOQnwvOIM3uIPDNwwT7hvzQhczdeIKdX3+80JR
-HzWHT/GBswdKWzjmOrVww7GIYy6GHzdRdziL12xSV/NSn3jikvh0/7SM/22KY7dItziLv3hzQ49O
-33jc0rh4m7iQ4/iHr4cd83iH+3h3x/iRD3l4s6vfPEMYXDmWZ7mWbzmXd7mXfzmYh7mYX7kOJLl6
-hGg1/IKarzmbt7mbvzmcx7mczzmd17maV4OZp8fEjjmf97mf//mYP8OUd+5wf/ezXje3Zrd2Jzd3
-D613G/quSneR6y+kOyui062iL3puNzq5Onel96qkU3mhfzqtXjrsArOmM3qez4inkzqthjqhC8Qu
-u/qnmnoOo3qqb/qqM1qr0zqownqusjP+Rg87sRf7PfNzJyc6nwF0Qze7sz87tC/0Qyd7pw8EBEy0
-sWe7tg+7RsdwLht5lL9tjiu7TDN5j+86pl1xuKMtkYs6uK+72Y47pu+4uRu1kzs6lMO72bZ7rFO4
-visuupfHktd7Td97tef7v0stvwf7QER1Xz88xEe8VoM1tZ/6QFRAXGe8xm88x8N1BQQ8edg1Xks8
-yZf8w/+1t7O0QAy2a7e8y788Y0t2xd/6QFj2aN88zue8zod2ac/8fqA2zAe90Ls8bKe8T8u6r8uq
-rQMwcuf6OHO65T560lcqsFcrdU+9pS69hWa60wsz1A9vr2M91Q86wyO92Gc9yI9H+3b+vdenvXj4
-9tlLatWXr9/4wgHcPd7nvd7vPd/3vd//PeAHvuDfPTa4fXiEaDYggeIvPuM3vuM/PuRHvuRPPuVX
-vuJng+GDB/EOPud3vud//uD7AtlbvY0n/NLKu8WvOMHbe+Z/R4ibftqOPt2XPuwjLerTvOqvPk0b
-fNQjfO0j7cKT/rv//tHePtPnvu6PNO+DPZAT/9IG/+wPv/OrrPFvPb0n/x0v/8+r+/Qzrewjsd8k
-AuiPP/mXP+AXvs9b/0Bog+W3v/u/P/xXvja0vndMrPnfP/6XfyJ8/37Mdtz7KkDwEngPwD2BBxEm
-VLiQYUOHDyFGfEjQoMAqUzBm1Lj+kWNHjx9BhhQ5EmOVgxQlplS5kmVLgSkAnMRXimZNmzdx5tS5
-k2dPnz9pejoI02VRo0ddAkhx0BNQp0+hRv156WRBpFexYkVpkWRXr1/BjjQ50GpWs2dVEh04U2pb
-t295Cn0ZE21duxCVMoW7l69bqmQr3hVsdyuvi2ERJ1YMciyvwoMhZ1XrmG1fy5fjDqUbmfPZvAId
-ZRE9mnRp06dRp1a9mnVr0T6qBu48u2VhcWdw59a9m3dv37+BBxc+HLe42LSRs5wsoJtr58+hR2/t
-SHNy6y0/8zo1i3t379/Bhxc/nnx58+e5Lzl+nX3DwhVUxZc/n359+/fx59e/n3/+/Arr2wsQoeUy
-QM/AAxFM8LxTqhPQQYWyc8CdCSms0MILMcxQww057NDDCV8A8MH2CiOmlxNRTFHFFVls0cUXYYxR
-xhOJEXHE6ya7Z5YPeezRxx89dKDBGx+MEMgjkUySwxABI5LEsngxccYpqazSyhhrbNJJHDfTUckv
-wURSyLm2dNDIMNFMc0kby+ysxCvhjFNOGLN0DMo2Z8txRzX57HPCMXmZDE/rsjvkhUMRTVTRRRlt
-1NFHIY1U0kMNYXPQwQrLJpNNOe3U009BDVXUUUkt1dRNs7H00rsmg2CJSWGNVdZZJT1kyFVpy64p
-zHjtlaa/7JQNV8EKO2yxY5H+Dauxx4ZltcvKfI2WL7kC3axZznSVVtu9gGX2WrSKTVbccUVa9s5v
-0coR2m3ZfYpaQdEVLNt26XWq23PjzSpccvnttyRV8z1K3XoJ9uldawOua96CGcbpXmETRmpffylO
-1lyIIzZq4IY5rungjO3K7pkwSC7Z5JNRTlnllVlu2eWXSdYBYJBXKqyaX3DOWeedee7Z55+BDlro
-oXGuZmaaU5qMEJiZbtrpp2F+5lakrzrTz6vDZDJYqo96c86vwYazTm+5TqvLPbFOO0lA4S27KKvV
-jttHrcl2OyKvw85b7xfHxtfuiPSUW/Ae2Ub4b5bgHlzxDOn2+3CH8N5b8sn++8b48YYCX1xzDAu/
-PKmlBJJw89EpbNxyzxWKfPLVw64cdcDPJl32zl9XKbsmlMld9915793334EPXvjhidf96NoLE0GK
-5Zlv3vnnoY9e+umpr9765UU4/vXJrine++/BD7/4Jqau/aHszF+o7vS3Zl99x9lv230y53cI/frb
-x19L/fPXX/75/6e/+9VvfekroPkOWLsAxs9w/OMFAMBhDAlOkIIVtOAFMZhBDW6Qgx3UIBXgZ0AA
-UMGDJTThCVGYQgmC8HTpg4kKYRhDGZoQHA3k3wxxmEMdTlANDuSFGnYYRCGisIcOtMAQkZjEDPqQ
-iU104hOhGEUpTpGKVbT+4hWxmEUtbpGLXfTiF8EYRjGOkYxlNOMZ0ZhGNa6RjW104xvhGEc5zpGO
-dbTjHfGYRz3ukY999OMfARlIQQ6SkIU05CERyQsgrAIh1wBAEV9ykBBCDgCVtKSdBGJJTWZSWASx
-JDj8QMlMGgMhBHmgJitJSlRWMpOaBCVZGAKERybkHsaoJBCK6ElNGkSXEAwlQ2Cyynv08pUKUQMj
-z9fChFhlkispyBOM8YRETvMufqgDQiYBjgFt5lxPWEUdRgjJ1BnOlA/E2LnKyQsL1OGX72vlJKqC
-kDv5DUopqIM004mQJ1QSdAJRAwDg+QR74rOB6VxnOxmSz3TCRJoJMYb+OIF5jZRYJQUSzYpVLIBM
-am7ULOw8CBX6eQ9wSlJYq6CCRL/Z0HGu1JwLQae1UqDNhNIFAMa4JyxJKs9zysYq+TxINrOJEGP0
-kxdUsAAmWfoSmbrHWvk8lxqWelFlJsUgT6iDRTma1aOsApn7LOITwDEJbgorBYFxnE+RSs9OWsuR
-WKUlTe9BBSDglJM6dSlPe4jWolZ0lrzwKkLUIFG95rOtooynJN0aKNAZwwI1NCoI63BUq6gBsqCj
-LD/rapUnyDKypFRDCuz5GW9WchXSHG0dYFKRVcBTq611iTUFYgEqDKWsY2VqYg9bSrje9a3LtFw5
-C/LPUDqVpzsdCjL+0erIHoKUrr1Nal2ZqlvaKsQYrB0hSvNiz5ailhfXqINBqvtDZhrEKsYAwhOe
-AELH1IGRfpilMUq7WUbG9BqjrcgkSOla/a7Eo0BgbVyR2tKFrJO179tlWlGZ35f6dqZ1HShx5Zng
-VmoSdGidxGx5MYlrBvitqGyugBtcFVQe1a4P/GU5TWkV9uK2u+M1Z2ExSZCGFsSRDf0nLyIrEEdW
-RK/79fFCUlBaAEjUqry0LS3BAY4WohW4xpXuQWDM0rJQYRUQzimEAmOBIXNYICDUZCiVW8qjDpat
-W45ubr254SuXBcUxsco1VgGOOuBSnVQAhyzJm+d0OjLABeklK6H+xGYb/pjQ/owshv+cF3TWocBn
-frJanavUM5fln4Kap5Pr6tMoU1kgQ0UIEJZC5gFF1Z255XJZBA3LOwm0Dv8soouDa2ZTppPGhgMH
-iXc8EDUXmtcISXKj6XouKgA7xI/GNF0POunABLPE0GVwTn2aAgwLRMNkmYQ0NSzYgm4m2Yalqzfn
-KlTr8pguKTYIdwMFDldnGAChdLF50ateWoM3vqsgJX3t+5L89prf1cItrT0szGIfdpXM9DAxESpl
-2aj3yhM+8J3AEWpUxrTRjjyxl+lsp4lrvJLF9DbHb6nSlyAz1Zi0ih8w3sPQDhWgLoYmBFcRk3n7
-VZYAOO9clJL+giL6t98997lDrkHqutyjoSJFilVF/nOl/7zKg4k4zTVaFNAuneorOaIFSSwYIFqQ
-qFy7egWz3pJrdB0tQCRt0lsS9aqvne1td/vb4R53uc+d7nW3+93xnne9F1KJffd72PG3db8PXogQ
-rZ/gCZ94HEIRgop3PAwHOD+CkPDxlS8hC304ectvnoM1ZDzZ6xd59yXwdaRHnek9t8DQg35+omcf
-6i8H+8fJ/nCqbz3r3ed6EU619M3sPe9RZ/vc91MSrzD+8ZGffOUvn/nNd/7zoR/942NV9wiEEiaw
-n33tb5/73ff+98EffvGPX/vaOz2UpJ9+9a+f/dK/QfkcmJ3+YlSM/scCg1lxv3u9dKxj1KL93wqD
-DepvABGjMYSPfeSPABXQK+5PkvLP+gJjV/ivYfzP984vMARwATUwJAxw0PAnATcwBDuiATPpAZEH
-SiRwAgumAoHPcwJQBGFQIzrw8w5i/mIwBknwgUzw9/ZPBVfQ/FwQSjLwBkVwBp8oO7qgB5RwCZmw
-CZ3wCaEwCqVwCqmwCpeQEBww86DkAtqhC73wC8EwDMVwDMmwDM3wDNGwCy8ACGMPSpDBCuEwDuVw
-DquQEuCPf7IjH2SHdGYB/7QwMKSEdQTxa1zHgQrjBfZwdGjHifIwETenD7PQEKEkEAexEq+kEPnn
-EB1Rcxb+sYkacRMVBxJL8A8PghIt8RRnBBP1RxNBcXA6kYk+sRXlRhR1kBQFwhRRMRddRBXxhxVl
-MW5e0YeyIw9EoRiN8RiRMRmVcRmZsRmd8RmhsRgFAQIiMROhpACAIRu1cRu5sRu98RvBMRzFcRzJ
-MRsLgA1nD0o0IBrZsR3d8R2hUQPuUID6yQaJMARzsPpOMAJ9kGFY0BZ5YQjvUQONkBHrcSDx0Q8l
-kR/7kWD+cSEPQiARkgAL0hMPciIXMB938AJ7sCHb5SGtEQMxcgErEhb7aQOIIyVVciVZcjjgQACq
-cRWhBBSkoyZt8iZXAxTQ8XAK4w5a8ieBMiiDwzjoxyD+D6ISFCQplXIpyyMDYHIUIVIgKKA/qLIq
-rfIq+YMCdhIAoWQJmPIrwVIpGWAeP7Cf9PAX44YW9ZEHb1EX3ZJvttJufBEtsSYY488s6VJt1HIj
-gxAQ3/IvV4QXCQhKEDEv65IsV+8gztIw/WQvARIXAdMtBVPyCJMxr8Yu8bCfuqADOLMzPfMzQTM0
-RXM0SbM0TfM0OTMXqBEqQ/IgeGAcYDM2ZXM2abM2bfM2cTM3dXM3YZMH4tJtCoMTUHM4ibM4jfM0
-3QAxb68GR1IBNRIgU9AjtwUkZVIkm3MAS1IYL/I66e85o5IXolM6pYU6e1EIubP+svMumfM8K8Y7
-W1P+IMJTPH2FPAfTOtnTX9IzM9fzPvvFPauzI+UzWuiTMu2TP8klP+nxIIbgOBm0QR3UNLGQNf9T
-IKCBNy30QjE0Q3cTGn6zbAojFx40REW0QZlAOYdPMS2zMRXyPaMkMl10MkevMlOUTzAzQQViMWcU
-TRzzOyHTRVERRl9PRnMUTWq0LFF0SHV0RSe0RX30L4FU/wSiMJEUTIo0MW90SsNkR1m0R5u0Ep8U
-Ag9CSrF0bUwUAftpDyQgTdV0Tdm0Td30TeE0TuV0Tuk0TRFhNWvxO3VhBfi0T/30TwE1UAV1UAm1
-UA31UPlUFzqUawpDEer0USE1UiWVTuWxKC1yPw3+dFz8szwZMkDHc1Gp5gUz9UDLNH1AcFTFZVPr
-E0A9lVcGNEYLFFWPBUGNVCDsUVbtT0k5lVVb9TJeNUhjFVcTg1atlBfQdFKRNVmVNU7vNCZ3VSD2
-FFGldVqptVoNVVH3Z0kddVm5tVuRtVKrhQavdEyVREuXlEu7VBC/dB/DlFyVpEqXc1zd9UjM9VmZ
-NF1zcV3ZkhfEdF59BF5PVF791UfqdVXbEl/zFVSRZi4HlkcA1kyPtGF7pGAJtBQRNmGz1V77VWI7
-5GFNtZ8WdERFdmQh1FkNlhcqVENVdmVZFjc5NGNPFkRJdmZpljNL1FJNElOFNTFUtWLhs1cFVGH+
-aUZUd1YxiDVeeeFWizYsehZWeRVop0VoQYZol1ZZStV8TrVqwaJpgfVpoRYufhVKA1JrC/Bqaydr
-ybYruFZs4/NrwVZqM4Zq05YkjjZgeeEcYiFv9XZv+bZv/fZvATdwBXdwCTdvyQBP15IjBYIHkqBx
-HfdxITdyJXdyKbdyLfdyMbdxfRNmfZYX2KFwQTd0RXd0CTc5cVY7I5ZjPYRinfZgL9YS9VVx+VV1
-HdZsXycWaZdDWLdrXfd1BzF2+7Jdc7djbRd1cHd4M2R3xRZdfTdvgLcNA2NjkddCPBZr+wkpwzJ7
-tbcpnzJPWXQqsTJ8xXd880MrObd1ecErt3f+fdmXO8bydNVTIFBSKOm3fn/yJU22c2kSJ/m3f2tS
-J8+Xd3nBJ+23gA14KIvXc9B2bkVibcH0Z93WV+E2YuSWgTkwgS9ngS34IxyYXSE4gvsibB94bDdY
-LDD4cTS4hDmig/e1bUFYKkTYg0lYhRnjhA9nGOExh3V4h51xGvMXfbGxHIV4iIm4iMfxHANYbNeR
-h5m4iXMYXA/wY1N3ei9EeUeYeZsXbJ43HaOXijWkes8WL70YQ6xYhrE4i+dki3lSSMe4QsD4dsW4
-jSukjPf1jNE4TtSYK7tYjt3Yhv/meOWYjmXXju/4Eic4YRhWjt/YeDWTDh35kSGZCiPUe5f+lAvT
-8JIxOZM1+QzXMIlH+A0jOZRF2ZHtEH7101ZpOCRYWHZd+IWhIob3VSJTeSPqFmJReZY9YpWD94Nd
-+S1gWXZlGZf/xZRtNGmFeQR19WRbuZeB4pd3eYaPOSNqWYpvOZozQpeh12uZuZkPOWAq+Jin2XoP
-Am9Jt5zN+ZwD93B/WIAZN3Pd+Z3hOZ4vd3P7x14/F53xOZ/L2XTD9QjjmI/dQZCfmZALuUryWC7Z
-mI8XWYH/mY8FOpt7t6Cdt5vzJZHbeKEzuKEDOZk7l6AlOhUpOl4seowxGoX7aRDaN6W11ynXWWwN
-gHxhOqbF1wBCGl0KQ31VOqeXcgf82G7+smN+DziohfoM8FdC7XV//TeplVo1ALieT5aAhzqq7Zco
-+9koq9mapwCbuVibt9lgavpbvlmYwzmMdTaatXqNO7Wro8KZIRqarXms4bisj/ms9Zir1Xon2Hqr
-BSKYxbqn3QYJPSCwBXuwCbuwDfuwETuxFXuxGVuwEZcv25oZYGCyKbuyLfuyMTuzNXuzObuzPXuy
-meGrr6UwhKCxTfu0UTu1GRsQ/LpsALmNH1qv7/WjW0e0m2Wkvbikb1ijYZuj0dejaZtObHtYcJuK
-dfuPeXuMYxutLTa4a9uTZVh6Sbq1uea1ldu3BRi4nbtFDho4E1qRqZtqskMZ8qG8zfv+vNE7vdV7
-vdm7vd37veHbvCEpcZ/ZBVrgvvE7v/V7v/m7v/37vwE8wAX8vl1guHGlMCohvhV8wRm8weE7CsIb
-aeiboQFySS0cf6JYnFH3Oy/8ZP3HA4v1lFm0wzv3wxkvgjgvxTFowtOR8lT8xVfIAoPQxWFcxT3v
-iWo8xyUI8A5Px3Pc8OYH8Xw8xfeuyI38yJE8yZV8yZm8yZ38yaE8yqV8yqm8yq38yrE8y7V8y7m8
-y738y8E8zMV8zMm8zM38zNEcInLsINZJnyoMxO1HwoBLzsnJlRIuIVgNANhrxlYJyr4JglIA7UTM
-koDgGhKNxdP8y1Mg3ARikbbpw37+qOZujiHwpcmeC9k8iiFMSqKuAQgUDGO8axUkirKmLamugYSa
-LdFVHbAAoKH2qZ2sqdxsLb5QndIhxtIjzdRiqiGgJMz8xrz0KawGzr1SfdWN/dYEArZ07NZknbqK
-btAqfbdyvbmiLCH2/CSkCV/+CptK/cmSfd9AzNiNfdEFYhX6qbrazCF23dYV7tJPzXLWaYR0ruFM
-DdLpauxETsbFHcz/SZpa7SXmKt0XgtVOp88RjMK47N0hR9oAgOTo3N3JYsSKfd8T3aj8QKZEiqDs
-XcOMgcXoHaei/bmqvSHcq4e0va9+Sui+7ZsmnuLRfBIWCXSCaZfQKqamKuTDHdL+82ndXSrf82wh
-gL3T7uHpSq259N3lu9yR1I2lfGrbk6ndpz3Aug3oO96vTMrZGmnF2O2mih7cWh7pzdzOSu3PjCHR
-eP3WaarPyR7kPG7g/1zrTwmVKgLOwKmmkN3ACN3nwX7v9UcNiI3vAT/wBf+OUuCCgPwsvo6CeLwo
-MGjwHf/xIT/yJX/yKb/yLf/yMT/zNZ8zQKvzPf/zQT/0RX/0Sb/0Tf/0Ub+ifGjsUr/1Xf/1YR/2
-Pf71Yr/2bf/2YZ/xCm73eb/3ff/3gT/4hV/utXD4jf/4kT/5g78FL2fmlf/5oT/6f19cR9xeSdx9
-Mrz54dxur1+Au9+Ftn97wp/+mqvfw60fw8c/9dKfrAViAsrh/eE//uV//um//u3//vE///Uf/g18
-VQojEgBCmsCBBAsaPIgwocKFDBsKjMQr4j0A9yJavIgxo8aNHDt6/AjSYwoAFp+UO4kypcqVLFu6
-fAkzpkyUEyyODIkzp86dPDUCSGHRwayhRIsaPYo0qdKlTJs6HbrE4sSKPatavRpyqkVuqrp6/Qo2
-rNixZMuaPYu2KzepFLG6ffv2psQMT+vavYvXqQObJOH6/Vv1Z1B3hAsbPow4seLFjBs7fkz4BVuq
-gCtbzqg1IrFenDt7/gw6tOjRpEubPs2Z2OTLrFnL5XVvFuTZtGvbfrw34uv+1rwtC47o4Lbw4cQb
-S5bYtrdyt5l5bUYNPbr06aZVI6e8PDvP17GLe/8+PDev3drL8/zNKzj49eyNrzYPH2Tz59Tr278/
-2jrs5PH7b+QuW3sCDkiYeOT5h2BG6O1gSoMOPghhhBJOSGGFFl6IYYMIvJdgh83Vs0uIIo5IYokm
-nohiiiquyGKI9XDYIYKvEZJhjTbeiGOGO/AVY48WoacegUKCd9x+2PkI33z4Lcmkffo1hyR8AA5J
-pXcG9hVlgkBWyaVwRUKZpXZKNklmmaU9yV+Y2k3ZZZuzXakmglu6Sad718VZ3phm7slnL2geiSdv
-bNZJKGJwBgrfnIUuGhn+jIi2pmefkjL556PKDcpooYdamh16wuQIaqiiWsiJo5xW1pwsi6zKaquu
-vgprrLLOSmuttq4qi6mn/vUaBAiMCmywoQrD467KKZopoV+maexfkU4KbX2VNlsZpsnSuSm1lyF7
-rZvLAqotVs9GSy5004YLl7Xddpktun9xuy6X37oL17jl3kvaufRepW68VLa7r1vonZJXwQYfzFRU
-dwZ8VXNxyAFxxBJPTHHFFl+MccYabwxxHLoynNNrAtCFcMkmF3xKsSALDFRElrwBc8wyz0xzzTbf
-jHPOOu8MsyYfrywffxSkRXTRRh+NFgU/Ay0SlgLQw3PUUk9N9c6WqMz+dGAtp+evt0tnvZG9+I79
-mb5gg9Rv1wQCfDZO8KpN4LxtZ8UffWTfXfbXc6vcHdxVsr23R2/73Z7cgXckNt5jm314RmkTvh7g
-jfu0dRFmXI555ppvznnnnn8OeuiiX86O3oE390gAqq/Oeuuuvw577LLPTnvtqj9i+t69cjF6777/
-DrzoRWA9eUeDQ05k7nMnrvi9jBevG5Z9Iz+g5ND/uHWQ1BeufNvMN0/u89A/vj1x1l/Py/HlD2c4
-+gs7B37z4hdP/vq3nX+9+vbb1r7738c/qflNrn77ow3+oIceEsxhgQxsoAMfCMEISnCCFKygBRdo
-j+6drTlYmIYHPwj+whCKcIQkLKEJT4jCFHoQCxoE24wuCMMYynCGFyQB8dx3Ef0VcDb9Q9//ANgn
-ATaOgDvEzQ1xGBEdFtExPbzeD4G4JyEejohLZMwBi6fEKi6midB7IhTLJMXAUVGLibni5LJIRsRw
-sXhe/GKTwqg76QUojbUxY+PQA4Vg6HGPfOyjH/8IyEAKcpCELKQeR9DCrKXqCIxspCMfCclISnKS
-lKykJS/JyFy9D4e9UoIhPwnKUIqykFA4IhLRSMfCrHFybXTjkuA4tzGmskCmxCEqZ7nKxrXSlfeB
-ZdtkOUs7Hu6Wqczl4XbJS2klkmnATKUwAzewk0lzmk1RmJGQ+L7+h3Fsm9zspjc15rFNuk9kJKOm
-Oc85i5RFD5sK2trLqgbPeMoTZz4Tpw+FhrR86nOfZlGaPa8nMqjNc6AEhefV1snOHGZvlrQx5unq
-lszwLRNozaTjM/dGTDo6dG/IjKh0fHm2iqbxonPLaBo3ujyIejSAE12ZSMlI0rahJxEmqKlNb4rT
-nOp0pzztqU9/CtSaluqfXeQPC8KB1KQqdalMbapTnwrVqEp1qkhlQUtB1qs+BHWrXO2qV4GaiFq6
-Dz2eKIVZz4rWtKp1rWxtq1vfCte4mvUSV2VYc6owhbzqda987atf/wrYwAp2sITNaxXqGjDu4EOu
-jG2sYx8bV0/+iBV9ZIWsZS+L2bfSlahs5A9eCwva0Ip2tIM9LGcHKL3FZna1rLWsZBGa0CRurayt
-ra1tNYvYfd2VtLztrW8Fa9prYlOxty2ucc/62vFgKbaVPa5za7tZ4SJxt7+trnV5G1wwoY+4z+1u
-ZpN7IGyihxEgKK95z4ve9Kp3vextr3vfC9/yLiO39GqOOtKB3/zqd7/87a9//wvgAAt4wPhVB33d
-NaP4KnjBDG5wfBkx2fzN1rsUtmx0tetEz153wxwu7YHRxd0Ki1iu4F1uQps74hS79cLMuidlPtvh
-GMt4CtltMUBTq+Ics7XEscWeRWir4yDP9cPhou6Mj3zdGoP+a3w4FrKQedzj9E3YyUFm8ZI7+2Ik
-a7m6SmZniKmsYij3GD0JmIeZz4zmNKt5zWxus5vfDOc4mxkHRNZWc+yAhzzrec987rOf/wzoQAt6
-0ITOsx3qTK1epULOjG60ox8d5wREGIFTBrOKrcxOI29506Lt8nCbbOkUi5m5lQ61iDGNTU1zetUe
-Pu0QQW3qCo/6xKWOtXdRPV0Ns3rXwEV0s75s6+7Omp1khrSxj43sN9PZ1cfkTzzYAO1oS3va1K62
-ta+N7Wxre9vQjoevjaXoZIt73MeWNGxJ/eNgVxjXOFQ1r9+tV08jEdjqPu6wxVvrehuX3f7TNbz/
-bdhv74r+3vq+7b1Pme+C25bfLrYIjAEOb3lzEtYKN/iksZjwirOW4RnOMsT/LfFxUlzjrT24LbdG
-XgerfOUsb+98mf1QyqAjDTSvuc1vjvOc63znPO+5z39Oc3QI/FQJbrnRj75yCJ+b1ukm+W05XlSP
-f/zdId/uyJ3+3YufMeNYt/DQOeXuqXO66jeWimq7zlqTj5XraHcs1LHscLFT/euWInjbH6t2yrL9
-7nJ9Oyv9LfdVk53JZud71pdO7K1xgACMb7zjHw/5yEt+8pSvvOUvz/ha0P1RzXkHOj4P+tCLfvSk
-L73pT4/61Kv+8+/YPKJ69QfMy372tK/95Tmg9Tvu3fD+uIU5RwEf+E0Pnn5X5z2Jcz/M3RufrX7X
-JfCDr+Xho7bwy8c78qGp/OqntfnNljr0tyz9V1Nf+4zNu4QtogB8qH/97G+/+98P//jLf/70r7/6
-C+H6QDXHG2Dov///D4ABKIADSIAFaIAHiID95w35hyciwwr2B4ERKIETWH8KcH0YtTWOkAUbyIEd
-6IEfCIIhKIIjSIIlaIIb6AMMGCfNIQ5n4IIvCIMxKIMzSIM1aIM3iIM56ILioIJqIjLdcIJBKIRD
-SIQm6AgXWFLZR35D5nsp5X3fh2ThN0XFt4RrZX6U1nRVCFfcF3NxB4Xg14NhYndamFZXiHFZSIZt
-xYX+v/eEXyhjUihGVJiGZmWGW2cRw9AGeaiHe8iHfeiHfwiIgSiIg0iIeUgHYZglzQEPZcCIjeiI
-jwiJkSiJk0iJlWiJl8iI8ICIUdIrD1CInwiKoSiKhDgMSChTSriEa+iEXuiGRwaHcTR+c2iFpng2
-KCaLaqWK3vN8rchhrxhLcjiHdah7aHiLaJWLG7SLvJhkm4gkYyiLwph8xFiMTChd7ZaMyshlzOgj
-zhiMtAg26PEBkCCO40iO5WiO54iO6aiO68iO7SiOGKCNPdIcRlAM9WiP94iP+aiP+8iP/eiP/wiQ
-9WgE8Rgjr6EG7oiQCamQC+mOH+CNWYMe1nhlfzeTkc5XkVOYUOGFcJlmY1HHkRcZhxlpYon3kSWZ
-ah1JfCIZZeljDCngki8JkzEpkzNJkzVpkzeJkzlpk6uAkn+3CjoJlEEplENJlC7JkyAZR0WplEvJ
-lEFpDCMpXgAglVNJlVVplVeJlVmplVvJlV3ZlUiZUl4plmNJlmVpllUJlr90lmvJlm1plisJl3Ep
-l3O5NwEBADs=
diff --git a/Documentation/DocBook/media/fieldseq_tb.gif.b64 b/Documentation/DocBook/media/fieldseq_tb.gif.b64
deleted file mode 100644
index 7b4c1766b407..000000000000
--- a/Documentation/DocBook/media/fieldseq_tb.gif.b64
+++ /dev/null
@@ -1,445 +0,0 @@
-R0lGODlhdQKaAucAAAAAAElJDK+vr1YMDBUVZC8kDQAAVkYQEBcHOwYGSCEJHSAgaKOjoys8DDMz
-CgAYGp+fn19fFJmZmQoKO10wMA0VIAAAcDsICCsMDAcMT1MMD2ZmAAcSO29ISFUHByIAGoiIAA4H
-T0pKDJaFhXd3d0EgABoaVGYyAC4AKXd3ODs7BwAAN1MAKQAAYlZGB2JlDBwcWWBtYCA3ABAQTQAA
-ZQ0VQD4AAFVVVUhjSCQMJQAAfBMHMkQgIEtLSzAyDD5VPmZmDEZRB2FhEWZiDFo2ETkdCwAAVEUt
-Gu7u7js7Ozc3N3d3WACPADU1NTMzMyBRIDgAAEJCEHEAAEwNDZeXAABpAEQFBSMjIxgNQDooCBA9
-EEhIbwBVAAw/DAwMPgBNAENDCgc9B8zMzABDAD4MDAwOKjwKCkQWKUscHAAAcUtLFRMTEwohCoqK
-AA0NTBEREQgfCBUqIgApADIAAA4ULzg+DEEfH3wAAAcHSaqqqlkcHDgMDKSkpFQAABUVRjEwCGZm
-B00QEDAwXSUMJGUAAJaWlhQUUnx8jVQaGgcGLggSGy8GBmw4OGNAL4qKioiIiGIAAEsHB3JYWHd3
-AAAAPlctLYQyAGggIBgAGkIVFQwcJRgYSA8MU9EAAAcHVQAALRoaYbu7AEY1H2ZmZlxdEHAAAD82
-DlhqWExGHgwOUzMzDAAAmgA5KTEHB2ZmPlpaB///////ACISRExUDTJPJUQrDAwMVhISSEhISHd3
-IC4xCjhcOA4ORERERBkVXElJAG5gYFhYcnt1ZkgGBlYAAAUFMTg4ODo3BTJrAFESEmZmMF5jBwoG
-Q1paDUkKChxGHN3d3RwYRGZmHCgoKFMAACYmJi4YLhQ+FCIiIhU0FT0AKR4eHmVeBw04DRAsEAwu
-DAc2BwoqCgAAPFdMDQAA0WAqKgwiDEgZGRkQRAckBxsTPDEwDBAQEDwAAEJGDAAAU0FBQEJCDLu7
-u2IYGJoAABgYRjg4bAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAEAALAALAAAAAB1ApoC
-AAj+AGEJHEiwoMGDCBMqXMiwocOHECNKnEixosWLGDNq3Mixo8ePIEOKHEmypMmTKFOqXMmypcuX
-MGOelAegpk0AJFrSrLhTpYQ3AHoeDFpQqMCfQQHIXEh0olGBYkZtpGkTW56B0EYBfTMKCUEJEqja
-7DpQDIAbBJsOJHF1qdu3cOOqVKtTKcWnEOnmlQALWk6Eep8C4Ou3YWC7JUlAg9VUL0K8vcRMRUwC
-gFdoXBdD6+WE4A0kQqE5kSqwsuWBepFg8yq3tevXsPPKg4n3YW2HjnHPZrp7oODehoHDui2ysfDH
-iKFi42iU6A20A5G84SsQrdE8iKdPR3181KPY4MP+NySBC4L4lHRJAI0MSwwJ0++B5nSvHqdAJPVv
-LHb/U54YJzX99RR+QOnX3ntKidELANiMYlce2DB4FX9vHMdYTfIQeNZ+8dlHkFg9QSihQEQpyKCD
-H9q0E4X+AfhGTir6ZhMskWGTU33Y8EWffFC5OB+CONX3V1BixVgfe7DgWFlB7621nmRMAjBdLwk1
-Bw0SAEBZ1mKw5PHddQ9aNgp0jB0nQWfnpanmDTVNU56aI6lFghOLkbAcTVJh9xl28uCJBDQ2Jkkn
-EqN0Js8bvWTYy3dmzfbUnFcWOhxXsznxmWhKHeooANAcmihrBhEFKaGGAtAVoH9xN1ymFa66GFH+
-lv4JYEFE7eRphi/21ephJDqRR6fY1MlqnlnCkitmfZra5VlI8Fnms89CmuRy6jkKVEGLlkbntEwG
-CwuKyK2VE3HfLhaapQKNuSxrjpkF50Sg9SnvvPTWa++9+Oar77789lsvPDcBsE0Tqb67kVpWDVSh
-UMbRxKUEy23XV5YOFwRNrckZS93FYlRs1sNKjZLqDSRUnBBREnNsMsS0bvrtyDD6x+lRGWPsqkCZ
-pSVcT4MZtCRN7M6Ws0AM70bU0UbPljDO8uQ4kAQ177a0scA5TWKVAQ8mVJECUbnqTZ/h3NYb30Hb
-ssEQFZn12my37fbbcMct99xxo62RWlknq2r+mVsrhbfeRzmBDZsu68xdT33TGHDJGYc629+JG77T
-gjfFHLmqtiKWFTZv3CAZr9CKcQOiC66q6uadd4yYcc+ynrdav5EYMHAIYx2dVljunGnZtWF3E5q5
-5Wb3QfH6a/zxyCev/LwAB3xFEoUMnxHCuu9N5Myw/KSxQCrbZRaUNhOUMsV2XQzyy0U1fvb4qj/8
-xtk7iey4+TTDn2njBJpNtF1N5Wp60QQhEABb97ikwQIbwrFa9qImEAQmRGKMsZ2MaESmdCmFBFCq
-zShIs6xXWUh40gthStgEAHZYSksitIictvWIN4BmdUnzE6oEFalSQaVYjwDAVR41KEkJpRf+l5qV
-piYFuOEUjESzGZUPTfWnQEmOVdYqIAVlxcDMKewvdtKf6aBFtr4s6IWq8p+dBog0xrRFVCx0YbWG
-EyFslU1aLfRKyaa1GHDNMUW3QwtmDOSXa3ltiwF8Q1tw9p12qS+FiCxJoTyXyOkBpz5OkAwZk1Uf
-smgobEI5UmUeMaD8eEUoSFjQlI4SIadlsnEAEsMlP4kToJDliaRkEHWIEkopSeCPSPwfYvIAoLNI
-JpUEwcz7mgIxBqmnQjDsUi89N8lniRIWwIQklKSJyyRxUJokygmhbMm/I6otbH3RijFp5J4JFkR7
-BJFU3rJXwUa6UySgemddxHPEhZBLnij+AVRDVEm097XmBtTBp0AHOrx7ukUeZXOIQQk6klFYqCBj
-QgISbsBBuOiToRjNaHgWCieOarQjhVnIRGsCzriE5aMoTalKV8rSlrr0pTCNqUxnStOa2vSmOM2p
-TnfK05769KdADapQh0rUohr1qEhNqlKXipARSOKpUI2qVKdK1apa9apYzapWocolgxwiGGANq1jH
-StaymvWsaE2rWtca1oTE4BRwjatc50rXutr1rnjNq173CtcYJEQYSwisYAdL2MIa9rCITaxiF8vY
-wAojIVuNrGQnS9mtAgMh0GCrZjfL2c6y9RAOYUQnRkva0pr2tKhNrWpXy9rWuna09Hj+6ALIQdva
-2va2uM2tbnfL29769re0bUFCnsCE4hr3uMhNrnKXy9zmOve50C3uExKSi1hY97rYza52t8vd7nr3
-u+ANr3VzkRBIvPa86E2vel17icesArjwja985/vbBYR2vfjNr35ZG1uEzJa+AA6wgHkrXIQQN7oI
-TrCCF/zc6SKkuuKNsIQnTGHwkhch5t2vhjeM3/YeRB7vHbCIRwxg+zZEtBxOsYr5K1sSu/jFvi3w
-QQ7M4Brb+MbMdfBBIFzhHvv4x9298EEyvOIiG7kTHjYIiGHM5CbX1sQMQfGRp8zh/h7kv07OMoll
-bBAa4/jLYF6wjg3CYyCb+cwUFrL+QYhM5TbnN8lFCbGW5yxgKC/EDlPIs573zOc++/nPgA60oAdN
-aD3zASF+cIOiF83oRjv60ZCOtKQnTelKK9oLCfFGNzbN6U57+tOgDrWoR03qUpt6095ISBSawepW
-u/rVsI61rGdN61rb+tasjkJCzFDoXvv618Am9B4QwgdLG/vYyE62pf1wXzc7e71WNgiW6Uxt+XK5
-IF4Os7a3vdwxF6TMaA63uLmr5oKw+dnobi2cPyTnaru7vs1Ot7xXG+2CTPvd+NbttQmSbW77m9ve
-Jgi4x03wcZebIOeet8JJu27ftDvfELetnRUi5YVbvBP1Jsi9Ix7xfQ+k3/8O+Zf+Az6QgRf85GY+
-+EASfnF5N5xoD+c4xCeekIq3fN4ZH8jGZY5vjwsE5CIPOoNJLhCTo/zoFVa5QFh+82e/fDgx5/m7
-aY4QHgzg6ljPuta3zvWue/3rYA+72K/uDAYgpB5eSLva1872trv97XCPu9znTve0JyIh5uiC3vfO
-9777/e+AD7zgB0/4wuvdHAlRBRAWz/jGO/7xkI+85CdP+cpbfvGqSEgrxs75znv+82LHAEIYMIG6
-m/70qE893esR76bLO+cC2bnUq+1zWABd6LiPLtFhYXSk+168SocF013f5qcvefYzbz3xnw17WMge
-+XOu/e1zT/0cU/f32E9zeZf+73L3Qj/fVD+Izbnf5uY///tOln7116/762f//eEN/vDJX2TjRx39
-WQ6/QcpBj/77//8AGIACOIAEWIAGeIAI2H+lIAAIYQJp8IAQGIESOIEUWIEWeIEYmIEa+ICUkBDZ
-8AUgGIIiOIIkWIImeIIomIIquIIgmA0JoQZtEIMyOIM0WIM2eIM4mIM6uIM8GINqkBB9kIBCOIRE
-WIQImAwIIQDvsIFM2IRO+IQaaAIOUQlSUIVWeIVYmIVauIVc2IVe+IVgWIXUwIAHAQqrcIZomIZq
-uIZs2IZu+IZwGIdyeIZGkBDXUAV4mId6uId82Id++IeAGIiCOIh4eA0JEQH+oZCIiriIjNiIjviI
-kBiJkjiJlJiIEZAQNhCGmriJnNiJYJgJSWgBcziKpFiKpiiHoKB89Ddl5od/7qZ+7BeL1vdg8FeL
-3iV/q+hs9ueK1aZ/BTF+uVhkrciLdAaLsniMx7V7vWeL8IeLwUhlu0iMc+aLBAGMz5hiwyiNWWaM
-yIiMysiM4IhdzniNRhaN2uhk1DgQaBAJ7NiO7viO8BiP8jiP9FiP9niP7DgMZncQt+AJ/viPABmQ
-AjmQBFmQBnmQCJmQ/lgMCREO4PCQEBmREjmRFFmRFnmRGJmRGvmQ4ZAQYPAKIBmSIjmSJFmSJnmS
-KJmSKrmSIAkGCWEF+Bj+kzI5kzR5j8N2EAyQAAq5kzzZkz6ZkLegiuRYZS12juk3XN2YlN8YjuE4
-jkOpYuZolDCWjgJhjU+pX9kolS/GjUkZi0vJlMzolFe5YVGplSRGlbBglWMJbUVpli7GlV25fl8J
-lrUolmupX2XplgOGlmp5l+iVlXo5YHAZl9Q3l3T5fnbpl+uVl4FZYg6RCTUZmZI5mfZ4aAcxAz+Z
-mZq5mQeZAAnxDWEQmqI5mqRZmqZ5mqiZmqq5mqwZmt+QECIACLI5m7RZm7Z5m7iZm7q5m7zZm7Ip
-AglxAZQ5nMQpmTdpEHzAmcq5nJs5A0KpmOkFmI0JYINJmLhnmIeJfYn+CZ3oxZjTKV98yZ1Y2Zbf
-SZ1IaZ3sh53Z6XvbKZ7s5X3lGWDh6Z5s6V/xGWDViZ4ip57reXTtSZ+r5Z336VvzCaB/SZ4DCl/5
-qZ//xp/9eXL/aaCoJaAJultoeQ6QkKEauqEc2qEe+qEgGqIiOqIkmqF9sI8GwQvisKIs2qIu+qIw
-GqMyOqM0WqM2uqKfkBDpMAY82qM++qNAGqRCOqREWqRGeqQ8mg4JoQKT0KRO+qRQGqVSOqVUWqVW
-eqVY2qQqkBBQUKJe+qVgGqYk2gqjtwI3eqZomqZqaqO88JwSmlrSWaG9taAMCnDu96C/F6FvWloU
-Kqe4VaB7ymL26af+wEWndaptDoqnBrd9gapu8EmoBOqmjUpacQqpuGWohwpmiaqo4aanjdqnlkoO
-FyqmpFqqpiqiJ4oQKrqmrNqqrjqjOYoQcCAHtFqrtnqruJqrurqrvNqrvvqrtAoHCfEHv1Csxnqs
-yJqsyrqszNqszvqs0Fqsf8Clp1qt1lqqZIqTZvqq3NqtrNqmJzapgnploTqn55mpQbepnHpmnhqo
-oGqpgCqup1Wp5Rpc54quIaeu6wpk7bqn7wqp8SqvpUWv9Yqp+Gpj+rqvPtavb/qvhBqwAgtbCFqv
-tmWwBzt0d6qwBMewEuqwfoqWHXAJIjuyJFuyJnuyKJuyKruyLNv+siOLQgRhDwswszRbszZ7szib
-szq7szzbsz47szCQELTwBERbtEZ7tEibtEq7tEzbtE77tERLCwmhDLlQtVZ7tVibtVq7tVzbtV77
-tWBbtcqQEHrgsmZ7tmibti1LAQghBj/7tnAbt3L7s/bAVHZ7t3ibt3q7t3zbt377t4AbuII7uIRb
-uIZ7uIibuIq7uIzbuI77uJAbuZI7uZRbuZb7UhCUPf50HwJySIYRMFTCM6ALSDJSFYPkM2szG4Sy
-Fa90ITZRUpcbu7KLESRQQRRFEEtCulBBQrB7NumTS78bvJp7uh+WHHukGaPDJU0hGu00u877vPt0
-GrCAJYOUB9f+orsH1BVI4ATVZDjB6xh4YRRZpBBGAUQF8RzAuyzQu77sqxBWY71YkSN2ARmsQRzg
-yz/HEb7Giz3hch/FQhBm4RVqgR3tW8AGnCTQIT9dw0nz67ncAhj5i79K1jixI0HYS0Dcgy4HvMHP
-G8DGMk3W0cAIIUAnEzD7IzuVg70V3L8XfDQ30QvxxMEybLlOIAEQ0k+sdMIG0UK90FVDEcHpa054
-hBX8W7zR8b9lcRogNMNMLLmPQFFYlDd4MUYMcb9BrMNDXBrLQb7JYb6eAR1L3MRi3LgXgw0wu0UD
-gsQKYcVapMKI8RPEO8HB1DnI+wbK+1BjnMeMKzhG/DU2kSj+WXMyQOy6N+HHNRG6NzE1Fnwf4jQ0
-bazHkBzJkjzJlFzJlnzJNEUCvbDJnIwkInFLnbzJAYXJpFzKpnzKqJzKqrzKrNzKrvzKsBzLsjzL
-tFzLtnzLGhHKurzLvNzLvvzLwBzMwuzLo8xSCjLMyJzMyrzMy3zGKnXMzBzN0jzNyXxTDELN2JzN
-2uwhLkUT3KvN4BzOyAwgePxR3izO6JzOvNxGNsXNLuXOLEUT5axR8gxT9fxSuVtT8MxS+6xS99zN
-YZxR/9xS+UxT/axSB41SAx3PAY1RC71SBT1T3CwMjFDRFn3RGJ3RGr3RHN3RHv3RIG3RXZXQ5qwW
-OLALKJ3+0iq90izd0i790jAd0zI90yiNAzIyzwKtFiG90zzd0z4d0h2wFg7cUtzcl5MabSRNz2ox
-fRd7Yzr20P6sFvMnrkkW0TJV1BGLWkhdT1HdG0zd1DX21A3NUP881ZNa1UPNz6li1I261fa81GDt
-b2KN0w4t1Vl9WmhtzWt916bl1i/1z18d1wo212/dG2b9qUKt1+pYnIzd2Paoj6jB1Sn1z2zQmpZ9
-2Zid2azJBjdd2AMBk44d2qLNjsdp1TGF1XxNqb2R1Dnt1YK9bYT913ad2qOV1+2817SNcast2QoN
-168dZrEN0Iad20iW2Lc9EGwdqH4t3B/328Dd2bI93Ln+bdv6nCpU6InYnd3a3YVjGNmeLRAfyILi
-Pd7kXd4q6IL7Q9dkrRaZuN3u/d7YDYqlkdYrxc38Z4T4nd/6XYAL6N3RPRBaQIgCPuAEXuCDqAXQ
-zdwCEYT73eAOjt9ION+KXZXEvdwtBdjOrakJfuGzTdvUbdC4TdsWztCuneE4FtwcLt0ebtzVjdwV
-vtvfbXsmfuIbTuIrR9wfLtGpgqHX2uM+jqooytp13RuzCqxGfuRInuS+KqzpHeNd+uNQHuUZmq1J
-Qt8IHeKpPeIrheEzjrA1vuUdnto5ftVYztda3tXN3eVh/eVovnQ4zuIg7uK5feaT7dtqLmZsXucq
-Lub+cK7jci7iMP7fP3fnGNvkgi58by7hxy0QeBZsjv7okC5olskYvF3SvaFpp5bpmr7pnF5qqWbo
-Cg4LvBbppF7qjl7aVp5SqA3o/h3qgU3oyIXiNu7m093nZP7nWR7org7rg53nvb3nfD3mp13md03n
-v57mvN5+oJ7iN17rit7iFD7nus7sg57syr4q6k1QZZ3oVT7hsMADZBDu4j7u5F7u5n7u6J7u6r7u
-7B7uZhDkla7UvZF3hlfv9n7v+E54iLfssw4Li9DuAB/wAj/w7C56zx7n0c7q2RTjr27tsg7mwH7X
-wg5Tq57rrU7tMm7t0PXwbY7ozt7ti56WL37x/d7+8MnO8Xre7Ct+8H4uENcN3zAf89xNhpQe4+Ft
-3jif8zp/guiN7THe3jIf9EIvBfIN8tAOC/f94Eq/9PxN80K+3r1xhwY+9VRf9YBoiPwO8QPB4Ezf
-9V5PDxFu9Agv8tJO8lpf7RrvXCh/7LS+8mLf8mSv8DV/6CbP62tv6SrP5yx/6wlv8QtP92nfYL6O
-922v92/P998Oeoq/+IwPdmVn9h1vDt8w+ZRf+ZZ/+Zif+Zq/+Zzf+Z4/+fvu84e+eY1f+qav+AZ/
-+MOO62Y+7SUf+Go/+PKe98Fu66vf960P+SmP9rCvXHc/+4Vf+3t/+3Hv93O/673fbbLf2rQv8bb+
-T/HEntXGTvgZn/zJ9fvMH/zOP/zQPxCS8NPgH/7i/9Fa8vTarhY/QNPqv/7s3/4z/QPLP+QDQQHj
-X//2L/7P/1LmT1D7L1BQvfsAAUvgQIIFDR5EmFDhQoYNEcoDIM/hRIoVLV4USAIARo4dPXYEQOLj
-SJIlQ5ZEmbIiRIkqXb48yBLmzJkyad5MqRHnTpgnef4EKRLo0JURiR51aBPp0odGmT4tqBPqVIQA
-epHAmlXrVq5dvX4FG1bs2LA+qT6FOIrsWrZt3b7FOsrpWaZp4d7Fm3dtr410/QIAHFjwYMKFDR9G
-nFjxYsZC/SKFyFjyZMqVLQ9u+fho5MudPX/+pqxZ9GjSpU2fRp1a9WrWrV2/hh1b9mzatW3fxp1b
-927evX3/Bh5c+HDixY0fR55c+XLmzZ0/hx5d+nTq1a1fx55d+3buFN9IICjhDUEkQiFyJNwL1nlY
-6df3JcgZALY8Cgc7yQxt1Jv5JJC8J+yGwoSSwAnAnABPoPkKkiuz7h6EMEKKSLiBoBtGIUgq9ggS
-Q0AAbvivKgcFYm+ugTY8ET7x6hNRICT2+w+aN0aBBhYxnHCiIBMVdHAUJ1jMwwkM2wNADIL4G1FC
-JZdUUgwAQkQCABZhyYM/EuEbCJtRkEDCCfVaLKjEJFG8MkNsEporSol6qXAgJLBxrL0k55L+AJsQ
-XfyuPSccC3JHJv8EVDtsEqxyIGgG7YtMWHoJUdGB/BTTIEVRhAaAGg9K8w0uiyzokRwfnTMzJx4x
-iIQcAXjkTIFGecTPQF+FFToKV3Wsl0fYc3QgElTFdMy+IMWyTIJcldPFUeqbFEs/5wJWTmyMhOWN
-SpOMtVprj3PyvzegnRXXYN0k4Q1qFRxMWMJESlbHcd1LF1SDmPVVTqyoPHXca+/FtzcE81BVHk3f
-E7bTN3q5FMz4fo03TCwrLVjdgVoVI0poH/6Ux3dFJbXTesU4k9Vi8wU55N0euVAojTBTdFd73VXY
-4pZfzojXi4dds00X4aR5ZoHEuxMWJPL+NOpZaT8WuWijY6v0WUkTDVbihpolOsWlB1oRzcweGQ8W
-GWm0EUeHv171R4GCHNIoUz8l9mi11x7NCZmlBjCwXuQDzOqpiSyXbvXko88+wZyYWD/+4Ow5apep
-NhCAUd11MuO02YY8csknp7xyyy/HPPNYSeilc897mRgmCT73PEHNT0c9ddVXZ71111+HPXbZZ6e9
-dttvxz133XeHXB7ffwc+eOGHJ754449HPnnlC08OCeWfhz566acXnnnkoKE+e+23n7460L4HP/zA
-ViaObvHPR38x8oc7OX333y/M+1G4p7/++ltdXziIbrW/f/+hx59zNPI/AhaweHLxXpz+lKOU5TBw
-gY8jjlSaI0HpmKWBEByOA5OjQeRQcDkehI4FH5i/4HDwOCY0DgiTo0LnmEUQC4BhDGU4QxrW0IY3
-xGEOdbhDGNYgRSQEjlJQkQsiFtGIR0RiEpW4RCY20YlPJCIqfihA+MgDBjzEYha1uMUdCkJX33qO
-WRZADjKW0YxnRGMa1bhGNrbRjW8kYwum2Byl5CIWd8RjHvW4Rz720Y9/BGQgBXnHXMyRORKUxyrg
-uEhGNtKRb1zAFxM4kDE+0pKXxCQb5XglIP6mjoMEZShFOUpBFpKTVDyRIjO5SlZaMpIZAWMLHVPJ
-VtbSlpo05AUzY0dS9tKXv/yjKd/+00nfIFKVt0RmMsnxSliwsDlmMUEapDlNalbTmtfEZja1uU1u
-dlOalMjlCAeihjaU05znRGc61blOdrbTne+EZznVEM4VwkcA7/BmPvW5T3520wSSpI5ZQLEKghbU
-oAdFaEIVulCGNtShDyWoEeiJHKVEIBQXxWhGNbpRjnbUox8FaUhFetEITPQ4EhSABSC6Upa21KUP
-BQVApyNGZdbUlpscpnM+CUye9rSXwkRhcYxpU6KukpnOZA5Ni7rUR+I0qOWbCy99OlWqBtOkKazi
-MZm6VUjKtIKz5GpY3ehUDOovqlVFa1rxCNSyBmeoYoVrGo8ay2c65haewGte9br+V7721a9/BWxg
-BTtYvBbjqsVRChhesVjGNtaxj4VsZCU7WcpW1rKLBcNhIwgfBiSAsJ8FbWhFO9hbeDU6So1rauOo
-2QyeVa2vnSpbidmbt6o2tXOdpEBoaVu4knW2vNkpbIX7S9mikkRa5W1YcRtQsCa3t6w16y6HO92f
-QtetWXUuXJc70+Zml6u+1alrqTveQRZ3gtj1Lle3+9WBzGC074VvfAObAOsGcS4iAER+9btf/vbX
-v/8FcIAFPGAC51cE9f2NBPkgXwY3OL4zMG0Iu5vepYKXjuIlb4b9aN5DopfCS13vaSf8YZtamDnB
-1XCK9cjhD3qYxDYNsYQp+eL+oppYlwORqop1HAsWK6e2NFZmjMM4YiDf0sbiFEiOd5ziHtczlUWu
-qZBlORBeiMPKV8ZylrW8ZS532ctfBnOYrfwJBPtGKSqYRJrVvGY2t9nNb4ZznOU8ZzqnWQVlpi1n
-VyBmPvfZz38OMy8iPOQZQzmZR94ghpes4SZ30MWGrqWU61poSN8Uz8BV9KLJ2+iTPrrSRh30lHX7
-aUuf8sLS1TSTL72bH5M6k5JOqmOqDGha19rWXiazqU88lz/8wte/BnawhT1sYhfb2MdGdrJ9/YdV
-60aCDNjzraU9bVoLGpa5hcVuXY1JRFM006meLqex+uRtg/razKV0uS/Z7RP+fhvcwhW3UD2t7kbC
-ejmopXdTm50bFL873PvGTavzzUh7KwffA2cku43Tb3/DG+C3ETjCu3pu7rbXwRfHuGDpq+sbCyQe
-lwV5yEU+csvG4+G2UXDGVb5yvUKY4uwdtcQbqXDEurvhaY33Zskt80UWPDkH53kbaQ5VVN98uDln
-37yDrkafIwfoS1fj0FtbdKPDFunCiTjU0dj04zxd62eUenRxXPWjn7w2Wf96GbluHLNswhZvh3vc
-5T53utfd7nfHe971/nbDchzJsFAFEAQ/eMIX3vCHR3ziFb94xjde8KowO22ejYe9V97yl8e83jcR
-6knHPO1Rj/xsGE72ql7+/bo7//zWOR/rdKfejGEvoc1J31PTAwftn197cbyeetjbl+qzL33oZXP7
-tOeeOGJ8afKVv/yGStTvic6MNEY6fepX3/oilYbwY4NIlTLf+99PvvGHYxYxGND89tM+bJRyfvZz
-L/2vkaDz2j9/6U0MqfdWIPTD+9vdPDXpz7k/g8s/b+M/3fA/sTsvAKQr1ju1/XPABDSumYKfCZzA
-AuQ3CsRA97HAgMvADhSf6tCLEBTBEcyKhlEOaCDBFFRBtzDBDVrBF4TBr+CdGaTBGrTBG8TBHNTB
-HeTBHvTBHwTCIBTCISTCIjTCI0TCJFTCJeSOPKGarHER81jAhWCXXxn+jL35Fr6ZkoRwEpuJGvMJ
-CfNBl/tgEas4GAvhFHK5QryRGyZ0Q9OYlYG4kAxhGoPoEMAAEbt5Gag5w515gy08iJO5E2KZC0dB
-kXD5D8DIGIBxEcDIPxNpqzeUxKfIlkackiqpw4LQEi7xEj3sQz6Em5hZiO9wQsNhGUaEGYuxin9B
-xVRJFZ05nEmURc0YlLGBwkORgEwkCEYJGFiEG1DsRYZJiDx4klGwGULMDEMMFkuREydokw3Zk0oJ
-naiJxFm0xp+IQ7UYCFvxFoXYFU/8xYTpw1M0CGOkkieJxZzpxVBsJseRBydBlr6QRljYE7AxxWvE
-R6SoxG3JiArpxoP+KA9xQZNyYcRzQUV2vEeB+EOFdEeDMR+JoBsnNBtNYQ9PEQissUeDzMeNHIp9
-6Zd/+UeBIRj7SBhgPEhhPAhi/Jt0PEVlJIhRUBWnEBL2SJzA2EJI3ECO1EmOIBltbCbC8J1vURmG
-gBqTPMS3eckhOccaQUaE7MVIsZGQ2AiUpEelpMac3MmspIikmUZGJBOnIcqSFMdQrBqAXMiBeANS
-acp1fEqE+aKNMBWCwEhyrEattEuOcJuYYBrBmBvCAEdz8Uu9iZv5AMTwgMKwYcO6ackqJIhKqQ8T
-MRBYwIZF1BopoUu/vMvM1MzN5MzO9Ewe5BzSAZ2bGB3RNJ3PRM3+1FTN1WTN1nTN14TN2JTN2aTN
-2jwd0cTN3NTN3eTN3vTN3/zNNZgG4CTO4jTO4yROJVgDJUDO5nTO5wRO5WRO6KTO6qRO6bTO7NTO
-4pyGNdjO7wRP3hwWbAjP8jRPAGAH81TP7awGAKiG9YTP6mzP94zP+mzO+bTP/DROdrAK/fTP38QG
-ZRlApzMG53AEAHAEA0VQBU3Q5jCGKdS5RxnQrivQ5jjQBmWOC2VQ53hQBRyWCWW7Cs3QBbVQEh1R
-DF2ODo3A9nCMEZCEF4XRGJXRGaXRGrXRG8XRHNVRGC0YABDRQwiGIBXSISXSIjXSI0XSJFXSJWVS
-IR0IDRWIGDj+hSml0iq10ivF0izV0i3l0i710imNgSc1UWFYgjI10zNF0zRV0zVl0zZ10zeF0zIV
-BjHF0B210zvF0zzdUWAYCBXVmiYF1EAV1EFt0kPwKrNghE5Q1EVl1EZ11EeF1EiV1Eml1EpVVHrI
-DB9tPddbLYGAUlh4AiYQ1VEl1VI11VNF1VRV1VVl1VYV1Seg07EDPpyLVYGABEvF1VzV1V2t1Evo
-U6VLvfVCVF4l1mI11knF1EcRUW3jVHLAqU8NVVeV1mml1mplVVj1VBNVsln1KWH61Fs91nAVV2L1
-VYHw00Rq1jMSVsdI1HF113dF1kxd1nR9vVoFVWvF13zV11X+xVZY+NRt5Vae8lYTBVd4NdiD7YRy
-hYVzRa5mXdeBaFeEldhxTVYFmVd67VR/NdFo3deO9Vhr7dd/Ddjgy1YMLdiJRdliVViGxVgyeliB
-sIMpkNmZpdmatdmbxdmc1dmd5dmenVk+UNaB8AM3INqiNdqjRdqkVdqlZdqmddqnJVovsFdv6Iaq
-tdqrxdqs1dqt5dqu9dqvBduq9QZ7jYJmMNuzRdu0Vdu1Zdu2ddu3hdu4NdsosFcz8Nm7xdu81due
-3YNfHQg+gNrAFdzBJVyo9YNDZdeUVVxirdj2uFiMfdaN/djJpdxrtVeAHVlfGliTXdzOzdWVBVbc
-Q1yI9dz+0qXUxtVUz6PXyMVQjq3c14XdkNXWzI0tez1Z08VdRgVd1EvXl4WFiM3d4O0E1H3c1bVX
-14Xd5P1Y2cVQzKXdUdrcgbhd4TXd3T2ull2m0RUI4KVe0yXeTeVU1h0I5FXe8s1X5pXV5wWm6LXV
-7g1e612PhuVU3+WBAbDf+8Xf/NXf/eXf/vXf/wXgALZfZ2CAoBWIevCCBFbgBWbgBnbgB4bgCJbg
-CabgBE4EezWHLtDgDebgDvbgDwbhEBbhESbhEtZgc7DXwHO8FWbhFnZhxoO8kh2IVhDgGrbhG8bh
-AMYAvxUIBpiACgbiIBbiIabgetDe33Xf3P1e1U1X8RX+CPI13yieVvRNMvUVWNtNYtyFX3RtWd/l
-3ixW3CXONux14nuV4jOmViqGBee14vLCYjD23C2WX9fzYjj2XDFm1vA9XjTmY1dVYzZu40BiX1iY
-XjuWWDnGXt8tB3pg5EZ25EeG5EiW5Emm5Eq25Etm5FIQAAOGhWjqp08G5VDWJnCSYYHIhi9A5VRW
-5VVm5VZ25VeG5ViW5VlG5WywV3KKp1zW5V3m5Xeap1KGhT7A5GEm5mI25ktOBh6GhXsS5WZ2ZlD+
-p5czi0qQgmq25mvG5mzW5m3m5m725m8G52qmhk222IEYKPBD53RuPnu9hipw53eG53iW53mm53q2
-53v+xud8dudrsFeLur5/BuiA/qiSAmYbCOeDRuiEVmhwzgRlTil1huiILqiYkubENeQwllfwdb0y
-huI+9uhR/eNA1tw3vmiUReQuPuIvLmmDxWMy3uOPhulSDWmRJqVBLuSVfteTxtg6xmmJbemW5eiY
-FupXvVyarmmS7mmD1Wl6pV8ycOqnhuqoluqppuqqtuqrxuqsdmozKOByFgi3y7ywFuuxvru+01gM
-NYdvUOu1Zuu2duu3huu4luu5puu6VmsUBuZ1CIC95uu+9uu/BuzAFuzBJuzCNuy9Xgd7XQStZuzG
-duzHzuodNlfOojyytuzLDuvNq2jSTeqD/WnIfen+oYbpmTbqULLpzj7Ype7dlEZtls5oJm7WoBbt
-0S7q0jZtpG5tcVVth2Xt3HbXzzZeYO7o2Y5i0rbtUsJt3zbW3Z7f3lbucAXuJg5t4uZj4z5uQDrt
-5w5X5qbjI86ESADv8Bbv8Sbv8jbv80bv9Fbv9Q5voPVqWHAvlpPvi9u4sx6IbwiD/Nbv/ebv/vbv
-/wbwABfwASfw/P4Ge8WvAlPwBWfwBh+wAwPmC2DvCafwCrfw9e7byf7b+ebwBnO5n/xQztZuY43u
-2J5u6j5j677uDUvuEddV7g5W53ZxXi1xPRZuFO9jFV9xPsruGedVGBfdzd5eH2fc1x5joD5xHDf+
-Xx3f8RVrcSKnVCAvPhmHckut8Y1OciVXXiZv8rV68iqPVCn/Ot89B0gw8zNH8zRX8zVn8zZ38zeH
-8zg38z7oaselMmrD8zwHs1yzb4FIhzEA9EAX9EEn9EI39ENH9ERX9EUH9HSwVzSrs0iX9Emn9Dm7
-M2CGAjnX9E3n9E6P81ZQZmjT81En9SuzNhCXUBEH80q9ct7Lci2P3dru8j7q8VWPcmXm4p2mcluP
-1Fb/PNmG9SWX9Vnfo1rn9TDH9TmOcSFH4mOPV07OYyy/8WAXdmAG5CY3dmd3VDHXOkU+5m8H93Cv
-ZE3mZALQgXNH93RX93Vn93Z393eH93iX93P+NwB71QIuwPd81/d95/d+9/d/B/iAF/iBx3ctsFch
-oIKEV/iFZ/iGd/iHh/iIl/iJp/iEFwJ7FWZx1/iN//Zk1nCBEAAamPeRJ/mSN3l5J4AjpuaFZvmW
-d/luHmdOPmeJpnnwc74+h4V21ued5/me93l85mdg9meBJvqivz6CxnmDfvmlZ3qWb+iPX+buq/mp
-Xz6KRnUFsWhth1RfTztgp/bk5fJZz3atX1RuhzqeJntH5fqv8/qvf92w7/KxT3uzXzq0T3tGXXut
-a3u3p1y4x/Yv13q6Dzrf7YBLMPzDR/zEV/zFZ/zGd/zHh/zIP/yJSV1YsAcuwvzM1/wcggH+e6WF
-JwD90Bf90Sf90jf900f91Ff91Qd9WrBXZYCi2Jf92af9J1IGe9UDydf93ef93o98ClBmMdj84Sd+
-zbeHI04qEV2OT11+E21+FFUOP+2wEG8h5VcO5r9+589+6E8O6W8x6n8m608O7B9/7S9/7kcO7/cx
-ZbmKGHT/F+yP95f/EewBAOiB+cd/vaj/+8///n+L/QcIEgIHEixo8CDChAoXMmyoEBsAhxInUqxo
-cWAvALA2wgLg8SPIkCJHkixp8iTKlCpXsmzp8iXMmDJn0qxp8ybOnDp38uzpUyfHoEKHEi1q9CjS
-pEqXMm3q9CnUqFKnUq1q9SrWrFq3cu3+6vUr2LBix5Ita/Ys2rRq17Jt6/Yt3Lhy59Kta/cu3rx6
-9/Lt6/cv4MCCBxMubPgw4sSKFzNu7Pgx5MiSJ1OubPky5syaN3Pu7Pkz6NCiR5Mubfo06tSqV7Nu
-7fo17NiyyyKRAKsXUtxGe/Ui4URMbjG6gw6/Pfs48uSbSeDGDc1Jr99Ciw9trpQ6x+LYlXPv7t3v
-I9t5SMAab11CHljQqPfmDc34bWjYSMyH5lu6QNuw6NuHJcGJE7bhlgd02Gy03XcJKrigWtDcAMso
-0CBhXXHjTScPLPKMAh+FsIjx4HO3AZedcU4gAQs2E8JiIhJvMPgijDGWZd0N71nn4Q3+5E1Hom4d
-StAbCRqxF9989MlzI4IyKrkkk009MoqAvL3xY4ajnFgdhhqu6GGHYpBXG3w8wvdIiUi02CSaaapZ
-FDQAXHmgcaNA15tQAvF2opwCGafbfen1CCdu/zlB3oDQPRjmmokquiijjTr6aFn2EfQepJVaeimm
-mWq6KaedevopqKGKOiqppZp6KqqpqspWHhtu5ARH+iU51Bu83Vgrb8AVh2svb2bXC67A3aejrQFC
-5QSlEpAJC5izTmergLfZSihxvA1KFK7S7QcgecIByxtzuIoHnRN+ugqhs0ORoJ+DscJ53bTw8Spc
-tdHpGJS3uG407IHWjtgUEgZuNMr+iOkhitS8Pca7q62+clTrtfty65+UQH7bi2383nbuKOnWye6h
-/r2blK0Y71kyosXey1G+vJVHrsHF/stUqxzBulHMTPEqL7SI8urwRhDrSGB06f34LXMXCzvxxhx1
-TJXAYrgqj0aI3qffjiQerN1GWhI13LobSUDtq1A9smyN+9640XPRzTwyoFmPDGV1G6kYtn/3ct31
-DSciccORAqPoMb6Hhtd11cXdACDWWmu999yNE8cR3mPD1+5TN4x4sxiJByUPuW+nDPfoVX5d9n76
-WQ432KoTKjg2hLNsuH7Mwb24sXLHPbnjdJ+eYd/MAn55yE1F7WrncIPudt2P667+m9dZwypP8H9j
-yDrlrt8Gu+xFjZJe2H//6TTqz5MOOYLDcT3czU6FGDDOtheHnnrYDTmk45jzjgTZI0MeIUeg0bHv
-ledpTknRit4jvpF5aSOCc9y7IBcm/cltfWwrHlMsVzN0wY0EwKEg9rbGu4NFMISlG10vCNiq7jnw
-RMjykO3C1EAUNU9MI5wgBrUGQLZtSDcgXAoB8WbA4nhQPTncG/789zvc7FA950Lf5FKYnhVSpWYm
-2g+9SKgiWvEmWrDY2QnTB8HmWEtyTEGWhVS0tqBY6FkuO1kXT0hCXL0BQ1B0HHt6ITUIHekp60LC
-zTw4vqD8sGS6Cpqt5ChG7Nn+ChuiUwr8SJCeR+RhkGxcGSLppLA43lEoEMOGjUbYyT3tcRR9dMof
-b9Y3S16wOomEoyZvuMj78caRULHiiSjJyvhx0WS6mVcYsRUdUCZxlM0p5SmnAsoHyYNsxeEfCYBW
-uiSGaZEorMrZCAaLtK0RR5g8nwjzd0S7wcqYTqOUE3EDq3I+5UPKypAzg4KEUViphiUcYwBzqEQb
-TkVzsPobh4SioW9O03yke9eHgonHKG6JnU5xJ5lIYEe4zbOeutuTQY1YFN00UYAkfMoyhRfQz42C
-oEjM6HZ086GOPlGUDF3nR51CT9scrVaI0txueEdN6J1LbpXrn1QcdDNbTan+OBqSpgh36jQzqi91
-YtPbCKnnN8DhhgQljSlSnJC2mq5tQui8qCX/Z8aR/RSrGaQnl76Vxafm1Hmy7FpPtQbQspqwrFW9
-KgtflbakvaF/Xt3ojpQ6sLHCR6rCmyhVZprWWq01b229Jz5N1zzrVW94ddXeXYEalc75apO9eAQY
-DQrMnf0MsJRjWl6JkqPATktO4LInLAkFLZUJM47bipgJX1Y047QplE95RPvexSv6vPKGPMtVyZJm
-r8fellhWeYPBIjit2L3WoMn9oq3EUNpe6lFiuJWjxpwDAN86BbhZ45UEqGs/1iYyubQ1rW7LddB2
-ukmn0FLvN/eGXuy6bLv+nsxufKMLXtSqZ7xmXRWCE5ymsQ2EsE5pJkHsIqmBfDUqExZIhQnD4PxY
-ZcN4o8uFSZDhp4R4xIIpyPWogmK7QHggVmmxnhQs4xnTuMY2vjGOc6zjHfO4xz7+MZCDLOQhE7nI
-Rj4yp6YUFAm4iCP8Q5xURIIbqm1Eyhmq2udAgg0BD4XKVGscAORBNZGQx8pj/oh8O5JiTzbZyaN4
-AwD6eqWRVBkkafvdRm7Q0zbN7F5hjsqZP3LlOoekymu+8ke2fBQqd8SoGhkJbujcES0bjNFDuQEA
-3nYkj+AU0SDJEqWNEiSRiDnUSMYLCYqn56AECcpC+RCnkarmLlftzwL+xTJHLM1kLmf5ym94k60N
-HZRg5xrXJPj1rIeSB49EF5BpE4NWhT0UW88T2a9eWR7aDItHPLDV0nYznLVFFEsX+9vDPrSuoWsU
-RntkWYPmCLGJDe8UH/tE5HayR1bWOTLxD9n3drV/1L1oXFs6SLI+tVs4azcARDfbVfs3NqwEyCTJ
-+90V/ze56bNujVCNTuaO96HfPe9kC0XPq97XAzv3noqT/DbffMShBb6ie8kDziMPipze82akYrzW
-IW+5yPfzwFtXGVhXIjfIiZL0f28bG9wmzsqMxfSMD53Wvb45wuuCDf1kO4BbfziuD3T0sJ/b6kAP
-OsALbGIvZzrTH1/+88XDbuCKI4HhTLaZu7t2IpYTO72whZCr6j6igD0Cy/KWKNaJrni+oxvXvS0K
-u0F3KKTD/efxBg7TB8Xnhc9MDO+ZuuMNvPGrV9nEWXdLqgemN5iD/SgaL0rczZ322bN80FSOYdIT
-n3YSLKvid6+7wWrPeNLzUNlN/g/lJGr4nyNu7WSPvNIbL5TaR75z6aF82adN76mRffMRYzraac/8
-tBecoKd/i8LfMKLUMxr8/R6/SFxN5vDfm/oc10jAbJN7Qn+EPIGOs35U3Gpt06EIX0joBrHdGyVh
-S3rcwLKAjsjVHpPl3a19msUdoOzVH/NBX71hH7xh4KSBhI78m3n+bVuTgd//tV72QR7BiYSDnR9b
-BEgeCEzN2ZsKCgVo9YLpnR30KR7xPd643V/X/Nr+8SCujYLAyFvdhYQLUaAEYMjwxcrQ6dN+WImB
-tQio6V6GYAM2jB/9+Vz0+aDasaC0OYEp4VoRYp0EGNi/OUFIpMfm5ZptgB4hiR4ZlpvdvBkM0sUj
-6JmOjNqn/ZuRJEXsGaHZodzoWVqOFGEhyt+h+d3CZcwDAd/ZtZzH2Q0VahfyeVr/AV3NUWAQml0U
-4iEi3qGtdY63vV0Yjty9AeGKuMolbpP/kV3Qvd7okWL47WFbtIktKd69CZ5SFGIjkuKuJSKWtQgj
-hhy59WB2rMz+DTyIs73Hc9xMFFabr0iAg3EhKFqavDkBKIbi4lme9ImNzIkivXnEClbi0oWdbwQF
-aCHOI+TSG3weLaYbr4mhpc0TFeriWjhB1b3b//VCChqFMMZfQHKiohmjOzKiQYZeeogEA7hdrNRX
-tcWZRY0EFNpZhQVPUQRJhnFjoZHaHV4dRoagoAVaQprimrlhOpYksc0HJ0aE09VhpbkhAHRaCs5i
-ot0jPoYER/IjUAalUA4lURYlYCiXIYXF0ZSMyXTHUpbMC16FtzCl+SHGU/aMUWalVm4lV3alV34l
-WIalWI4lWZalWZ4lWqalcvwEW/qEF2JGCralXNLEW14GIM7+JV7ORGoAAJBchF/+JWA2xCjUnmZQ
-TUkFJmImJmIOZl1aRpAoJmRGZkVkxF5WZWZQTWNWBmaKxmaGRiqCxmeWRkRwJmFeZmnC5WnaJS1y
-RmiSxmiGRmeCRmx+xmx6Rmt2xm2KxmvCQgcwgm/+JnAGp3AOJ3EWp3EeJ3Imp29SQLFlJmXE5g/s
-gnROJ3VWp3VeJ3Zmp3ZuJ3d2p3T+QHOKRiqKgXKWp3meJ3oqZwdQzmqGxm5eQifEp3zOJ33Wp33e
-J37mp37uJ3/GJySEJ2wG2xMwAYEWqIEeKIImqIIuKIM2qIM+KIE+AYCCJpbJAz30J4ZmqIZuKH9e
-AntWJkf+wCeHjiiJlmh+/ifiOOdkxOaAQqiLviiMxqiDSmiKimeFXqiJ5qiOjqiH7kt7gsZ77qiQ
-Dul+ouiVqahksKiMLimTNmmD0uiR2miu4SiRVqmVdkKP7sePfkaQXqmXCqmR1mZnKKmTlqmZLimU
-iilr3uiXtmmOZmluuue9WMMg1Kmd3ime5qme7imf9qmf/img1qkCTChtBpsPvACiJqqiLiqjNqqj
-PiqkRqqkTiqi+gCh2iaWMYACBCqndqqnfiqgWsOHosZuLgA5nCqqpqqqriqrtqqrviqsxqqsnmoL
-XOqYBlsuxIKu7iqv9qqv/iqwBquwDiuxFquu5oKtrmn+rq3CrDarsz4rtMrqAozqaZRqtF4rtmYr
-rNZqjQZoiuWqsYaruI4ruRIrsnYrhS6rtq4ru2LrtPooiG6EqbYrvdbrtibrZsQmuJYrv/arvwbr
-uUapZ1Yos9qrwR7sqb6rlsYrLPjCKjwsxEasxE4sxVasxV4sxmasxj6sEeBrYQYbKYSCyI4syZas
-yZ4syqasyq4sy7asyJKCx2ZGKgqABWyszd4szuasxvoCtZrGbiKCFASt0A4t0Rat0R4t0iat0i4t
-0watDcQsaqZYNnwB1Vat1V4t1mat1m4t13at134t1WYD1KomRwgANTQt2qat2q4t0yJCz4rmvYio
-m87+7YaGaWpaBpmeqd7uLYOm6d1WRipaKN0ObobC6ZZ6RpcSruKe6NjirYDyLeRGboH6LZJGRuBS
-6eJmLn0aLsPKreZ+bifYbeVCRt5KrumaKeVKaddgLuhmLueS6r3IAgLMLu3Wru3eLu7mru7uLu/2
-ru/O7gQ0rmYGWx0EgfEeL/Imr/IuL/M2r/M+L/RGr/HWgfBSRioywO9mr/ZuL/f+riy8rWvey7wi
-LPm2K7cKrGzi6r+uL/v2a8Cq6XIQbPnO77oqbJwCqfjSr/5e6/nC78d+a/sGsACba/VORuAW7P4m
-8Kza7+F2hrUqMATfK7oWKgAPsAVf8LEWsGQccAT+d3CrMjDDjq8HjzA59O/fPqf6YrAKB/D7nrAB
-yy8JkzAIwy5HyG733jAO5/DuBu8Ee0ZsFq/0BrEQDzERQy/19jBuZqoOLzET5/D3wisNb4Tntu7i
-ii5ppliLnq4WN2nqDuyUUvHnvm61xi0Ya64VeytHZPEWrzGMdnG6rm4Zuy74jkbixvHgnnH6YjEb
-7/GLuvFnXK4dK64Y++y9AC3bHjIiJ3LSPi0Sc0ZsTi3YRrIkTzIle63YNnL8lu3ZKjInd/Ihuy0U
-jzFHOKzOlrIpn/LFdiwm/y9HhKzLvjIsx7IssyzMrrLMYhnNorIu77Ip82woEzJHiHAMR7AJj+7+
-Y+jrCifz+rawMTsGBw+zB8+wKMsrNHtwMV8xR+yrMm/zuDKz6mYIAlezAkszMFOzOEPwNaPxRmgz
-N7czAdsyZjzzOScwOcMtR0QDJ+SzPu8zP/ezP/8zQAe0QA80QedzImhwZMTmoVIqQze0Qz+0pFoq
-PJPtRjAALxQ0Rme0Rm80QUfDHOsmGQfyHSM06T4uH5/0k5L0YwCySNPtINuzFLc03eIxBacxSt+0
-gvoxpn6xTLvpS4dviPa0m9K0D5s0Th91hKq0M7OpUH/pT9NxSDf1lRL1reoxUiO1TicxT0u1lT41
-SHOEOmCBWI81WZe1WZ81Wqe1Wq81W7e1WO/+gFI3xg/PAl3XtV3fNV7ntV7vNV/3tV//NV0fMfr+
-MZbxgVsfNmIntmK7tTp8tJwG8zwrcDrncTa7s2W/82DvdNeEc2TPbz0DtTl3Nv1Odk2v82WfNrB6
-sxdvtmjr72dDNWS39vySdlFXMGrfdgZPtGPCsGyT72t/dWj39sHSdlVXNm7jtmq/MTgLt287Nv5y
-hDXkgHRPN3VXt3VfN3Znt3ZvN3d3t3RjQlwzRmzqQgOUt3mfN3qnt3qvN3u3t3u/N3yXty6E92Jc
-r3ffN37nt357t6j+MkzDwhRzNZjSt2KU7lWjdFYrKxwLeJV69WPHNIMTKVU7slEfOIITeGL+sHSE
-C6mDPzeEb/iOTni+VriF83GCZ/KCg7iOdjiXFrInvziMLy0jZ3ZxbwQkVzKO57iOb+0l07iCw4LZ
-xriQD7nQgvLCRjEsgAIvLzmTZ6wq+/iIp1gEzDKVV7mVr2wEYDhizGzNNrmXf/nDgoJzt3hsM7fB
-EjeF2/Zxn3ZyE7a6mrnB/vaDw4Iwwzm7onmUG/eas7mWH4Y823m7yrmH0zmg1yues7Jp7zmf6zbg
-8nah1++YI+694DNHV7qlX3pAHzSjo3CKLTREfzqoh/qjSjSUa8b1XjSmp7qqV7pH+zdoA7iKh3if
-G4aBlzgbn7ipM3WsmyiLS3pQ77qJijj+osOCGtv6rc96YWg4sPNopDtwVC87hwq7aVq1se8xrt/y
-VkP7hva6s/+6ttctshNGrVf76V57POv6txdus3PGbqJDCLw7vMe7vM87vde7vd87vue7vr87M4T7
-YMSmOyyDwA88wRe8wR88wie8wi88wze8wLuDv59YYe87xVe8xV/8vqPDum/GAz/6uh76tOu5ortz
-m2v2cns8pLs6bAc3yvNvxAcGMo+8ZZe8VrN2y2eroJM5y9/8s4J81Iq8zG8zzf+4PHA2zztrzvv6
-zh/9rPr8ZcR80HPz0KP4yTP9syZ9t2/EIHwA13e913892Ie92I892Ze92Z8914P3pq/+aLDJgNu/
-PdzHvdzPPd3Xvd3fPd7nPdy//FFmKtr/PeAHvuCj/SBsvGbUcbr3p7T//EYUO7mXO9//hbInfoca
-fmYgPuXr5+I/PYk/vumaO0VnCOtmfn5yO7s/O+kz7tonaed7fuSC/m5ne+rjp+lz/L2cAQvkvu7v
-Pu/3vu//PvAHv/APP/HnPgpEvl8APDIsP/M3v/M/P/RHv/RPP/VXv/UvP8SvvuXisjYUv/d/P/iH
-P/GfgeVjxm4qOZin/5I/uf+G/EZM+ZXHv/xTeZZrP2Rwufrnvy6LucoDN6FbPUCQEziQYEGBLWAl
-lAdAXkKHDyFGlDiRYkWLFzFaXNj+MGGuWB9BhhQ5kmRJkydRplT5MZfDjRlhxpQ5k2ZCEgBcrjK4
-k2dPnz+BBhU6cIHDmzWRJlWaFAAJhwuGRpU6lSpPhAoZLtW6devLjivBhhU7VmVLrBy5plUL86hC
-nVXhxpVrsKhNnGvx5s3Y9Olcv3+nXoXlVW/hvIQ9klW8mPFJs4OzGpastu3gt4AxZ95ZF1blyZ/X
-8k2ILkRp06dRp1a9mnVr169hl2bmMjJo2zUJu1u2m3dv37+BBxc+nHhx47vd0UZ7mznbu7D4xJY+
-nXr12OiMPm++faZoWJc6hRc/nnx58+fRp1e/nn14SMq5x69I+AkT+/fx59e/n3/+f///AQzQvifg
-k89AiCqTh572GGzQwQfZuyS7AymkyDvwIMxQww3Te++sCimkT8ARSSzRxAAJ/BBEAxNckMMXYcxQ
-QrtWrBGWC2PMUcf1PIRsORuZE/HEIYks0r8UfQSSuxZ3bNJJ8WbsTDsl5cPxyStz7JEwKm0T0sgv
-wTwRyS25/IxJLNHkMErPytzOO3Wsi1POOV1LpMA2PyNMl+P47NPPP43T5U48JauMAWboTFRROdWZ
-kFA3nUoIKs0opVQwMh89rLbEGuvUU7IewzTTtRK8rNJT/eKMzVEn825SVGGN69LaWMULsU9xzTWl
-UGmtlbLn5DE11mGjUnVKXwv+c5XYZaOa9Udku9pU12mpBYnXZ6FdqlRmuQXK2Gwlc3WVcckt19xz
-0U1X3XXZbdfdcY0YFNylCJMmlHvxzVffffnt199/AQ5Y4HulkXfepBK04N2FGW7YYXe/PTgv78SQ
-x+KLMc5Y44057tjjj0EO+WKDJaaJMJFRTlnllVEmuWSZKkOC5ZlprhlkMRx9OS3vdDa5155jEhVo
-jIQe2qJVjcYI6aRl4pnpi4p+OqKopX6I6qppxPqipbWuyOmuIboaa7GrJltqrsGWMm2YAGjb7bfh
-jlvuuemu2+678c4b27UXytvvvwEPXHC49077psERT1zxwNfOiITHIY9c8sn+Ka/c8ssxz1zzzaFp
-PCJoNg9d9NFJL33yzj232vTVWW+99NRhj1322Wmv3fbbcc9d9915793334EPXvjhiS/e+OORT175
-5Zlv3vnnoY9e+umpr97667HvXQwAUE9olBseygP1hZCK26ms+n7bKfIhgvuG7iVCHwAJHmLocMLT
-d7uh/AHoBecbsbU98D0ECaN4AwCwEakbwa0XCXEf/ML2QP61TYFGgeDUjkUR8rEvWrB4X/ZAmDQn
-POIhb6BfQrbHEQ46JA9OaNsokGChZ8lPIit0IEcK+IYYTkR+OnRIbVZoQ6w4BAk3aCAAJXK4HcIC
-CU4YRefk8QYSIjEikcn+4RIj6BBoOOGIN5zIFi8COpiQT4xc2WAXQ5jGkj3CCSwEwBJ7ERkbQkOK
-sNgiGts3w/1lUIi06kUF8wiZP/4QLUHko3bY9zNYmNCERkGjBLDhxSouZ5A11E4etKPIzgCSXhns
-ZEKw8T81jhJc0OCeTQbYmUfIcUpi6KIQCTlJyFhSlg6BpAwhs73/ARGRh7RaJKkYvjd+zyEjJCBH
-FEmrW9KShWjUJDYaIobHUZAEB1zfXaqJwP9lExv02+BdHoENBOahgf4TZzcTkodz5gGFccTGKO4y
-Ck6Skp6jMiYssMFOWIghkqysCAmAGT89zhKDtRwiD/fYmSPy0mq+TEj+AZH5LGJicoeaTOZyYElQ
-O5IAi4pEwikXMgpYYPIGSMCkxXCSQlg8ooHywEbnjvLNlb50pW27US86Z8DBvGGPnXNCSbcITjzW
-k6iEYuM+31DMhvhTIjkUZRXfttA9Ek6jsaQNLsmHBHQylDZww8rbQinJEuqzjsFsH9wiRauMTvCE
-Ym3oQanIEPJt7xFYnJBM0ZkQF97ohOSTp0NuQILtoU4Cd8loURELpI+CTqSdGSBTEfSGJ3ptoLDs
-43KWKVCCRvGjhewlM5l4g8b2CpNvayMs7mlBs1o1IZnN4kN1ytq3ajQycr0LOQHgBHaK4QZv6EUc
-Z0m+2sCTiuSLo/r+OMi+wyaWuSC6QTj1edy3VTUhN3DCUxEqS8sey4/z9CIHA8tVuM7WJlKFyCga
-O9JTAlSYMbzoQypZ0KvKdr4crC1KIYJJMbwhUjIlX15hAVx//vUhpiSsYT3ZXAUfCJJJraUN8+DD
-i/yMhvKtHw4NaNcLaxQJB/QseUFMAvMS8Q36TEgdtfpBeTghrdiyYoYnwsECpnK1P8SZfSNKvnBC
-kXt1hEYcZYYT8gG0c6vEiT+juD+eBhioe13pUBccZfl8NL0bhoz67uc2XAZygi1F6wLd9kGsameV
-H+6q+qoaxc7FTQIOdsgoTptDBCpQbg58m5gteeeO7k23tM0xTgr+2DZ0QnLOb+CpkLEpThYfWYV3
-IXRekRBHEx5xFFOU8qUxbbRHeJcrYuhem5fyhgtmmtSlRlYTDUNkO7JYKRKosqlhjaffznrWkxEx
-rX+L3ZfhutZJecSouYKEbPJXwzArdqyRnWxlL5vZzXb2s6EdbWlPm9rVtva1sZ1tbfOa29329rfB
-HW5xj5vcbYWdK8mdbnWvm93s1nXj0N1uec+b3urWHQLrnW997/trjVsIF/cdcIGn24WFA9u/B55w
-hXdbnPfmdNr6zTdNps1sT6s409AGu4ivbeMHn7jHDd61iyct46nrONhOrrWRG23lQ2s50EruOe+M
-QBI1t/nNcZ7+c53vnOc99/nPgW7z7qV8bLWJwSmQnnSlL53pTXf606EedalPHekxcBnFaxN0rW+d
-610POjBydjvvMCJNZc8QPdBC9LLVpj5hcvvb/zOmj4u8NpAw+90btKYEx27sePf7etD+w4fTHS1t
-h/vhEc8EuYdc5XX/++PPo3eHO4TskLe8eALvwME3vvCJ9/zbFz87wtj98peXfO76XnrLZ/5Gmy96
-5z8feyOFXnajV73lT48774jCBb33/e+BH3zhD5/4xTf+8ZHvez4IXvS1oQMHoB996U+f+tW3/vWx
-n33tbx/6dLg6yB0ChuSPn/zlNz/yTRF223knBdVyf65CkXb+168dLa/q1v0LwpmX94wwk3j//zvl
-MWKucdgPAA1QMeKP+WqvNuwP/xxQ/+aO8xzC/w6wAsFCAPdO4xSo/SywA1EiATWv+erPAUlQICCQ
-8V5vAj1wBUsCAycvITiQBWXwI0Cw9USwL0rwAb+P8FRwBmfQBVFPgUShFoiwCI3wCJEwCZVwCZmw
-CZ3wCYuQARQwdgijAjThCrEwC7VwC7mwC73wC8EwDMXwCitgByUwIbIACtVwDdmwDZ+wANSvdlLv
-9v6O9dROarxE9vSwRGiPCh2PDv8u98ROgSoPEPHODucPD9luDxmRRPoQdmzPEPFOENePECXxEOXv
-BhPC8Br+sRP74xFTJxIvsewoUQ4VqBKkIBVVcRVZsRVd8RVhMRZlcRZpMRWpQQCmEBJrIxu+oBd9
-8ReBMRiFcRiJsRiN8RiRsRezwQxTMCFsoBahMRqlcRppMRPikHa8QxkGZhu5sRu9UWBAABdDcAHR
-whAe5hzRMR3ZxRCYkf4cYgO+MR7lcR4BJgKucXYK0AdlsAbv0OIYMAd1UEX8EC0oUB9XEAh1bwMN
-kgX5MRH9cQQB8v5OUBNhoSAX0gIRchAdIgYvsgIbkiIbMCKJZSLJsQc7sgIzshIdAhWGoCVd8iVh
-MiZlciZpsiZt8iZxsiVfQQrHcSAdogxSISiFciiJsij+jfIokTIplXIpmTIoy6AdFREtjiEnqbIq
-rfIqcXId7lF25nAU0QQRKZITPXEs8QMUPUcUvRJLShEbLTEtvzITS3ITyXIuyxIqH9IhSM8tr2Qt
-8bEt9fJJwDIuYUEs6dITzdLf/vAvnYQvudIvFXNHAtMn5bIw5/IwJQ4t8vIxdYQx+U4Ix+EzQTM0
-RXM0SbM0TfM0UTM1VRM0l68ndREt6KAGZHM2abM2bfM2cTM3dXM3ebM3ZdP7BPI1w281ibM4jfM4
-VTP9siYhN/IkLfAjBTMkRTJWSFIyK9I5UXIrO7M5sdMAodM6pXM6UaU6hTMhLLI73S8lTZE70fP9
-vrP+PGEhPMWzUsgzFGvjPNtzWtSTLdkzP6nlPe0TIudzWOrzLO/TP9NTOzXQIY6gBBz0QSE0QiV0
-Qim0Qi30QjE0Qx+UJ21QMBXhAUA0REV0REm0RE30RFE0RVV0RUFUEeySaQijCDR0Rmm0Rm00Q+Fw
-OTUyIQpRM3MkMuGTMCmTES0T6zDTR3eEMxeUR5FUR4A0QB1CSIdUD4sU/BIiM5tUTRTU5BwzSzfk
-SQ0U9qa0E6uUB6/US2FESbnUIRrBEtz0TeE0TuV0Tum0Tu30TvE0T930BMSxQ60zDlghUAV1UAm1
-UA31UBE1URV1URk1UOPgRZOGME5ATym1Ui31UvP+lAi2VOYUSBvp8VNBNR7DMRehNCHMUR1RNVXP
-kR2Ds1RhAR5DNVZltR43lQAVEkH/Ey7Bc0BHElJZ7kBxVT9rleNuNVjhT1fhUz55FTMKFDEJ0lh1
-ZT/7sj+htVMANExxcFnH01ddDlir1VOktTFXkhvItVzN9VzRNV3VdV3ZtV3d9V3JdRw4tB9htDbK
-IAPwNV/1dV/5tV/99V8BNmAFdmDx9SlbFVsT4hjgdWEZtmEd9l3/YFghrkvRFELA1FmjdEwNk1uB
-Bi0rFkLUlFMp72O/FFldVUo11vPK9AxhAUtJNu8kFuUo9mXb42IvM2NTlkg5lv8Sk2ZhVkdVkkn+
-fdZBbNZIcTZnqXRndcZjh3Y9QtZWx/VhpXZqqZZd5ZVUERYW7pVgubZrvfZrBdZgk0QwFbZqzfZs
-pTZigXY9YfBbP+VaMVZStBVWmvVmzdNtwTVmuyYf8ZYx4NZu43Nut/Vg4/Y6+5YxwnU72/ZwF+Nv
-jVZuBZc+lfZl+o9xFyNxlxQWPHVWObdzQ2FUXdNVT1VVSbd014VVx9Y6YdVzWTdU7XFt+TMh2hRT
-abd2bddO+RRrCxdQG7V3ffd3gXdRH5VwAXdSb/d4kZd2NRV2p1Vom7ZmTTZrURZp4W5lm7Fln5dB
-npZYRzZ72aNorXQwqTdpifdxsdd7nVZvtab+K9H3PMDXTMV3fGPPet3xTNs3PbZ3Yh1iCN2wf/33
-f5twXh2yXtHCCsfwgBE4gRU4DMuwfMM3DQE4giW4f3NUbYKQWi0XLBw3fJU1cuOibs0XPzO4LNQX
-a/h2hDU4egu3gz24KkA4fEUYhR2jhKvmhGU4JTYYflm4hafiheE3hm+YJDB3TRc3iHFYhQF3h3m4
-WCa3ZCrXiHeFhqVm95Cziq34ilOzNf0UPmPTN734i8E4jHkTOFMXPsEAi9E4ja1YOS2YOZ33fs3j
-fVl2euUXTOg3KvESjtEjf2W2e/W4POT4eum4jmeviSWGaf+4E/h4b2f2jwO5fuOXkBHvju/+0n4T
-eTwWeX0bWY8fGY8nU5IPj5IJOI8vGZOl+Gm8gyWxcpVZuZVrcid1F3CBsilpuZZt+ZaXUmz3b2lr
-Yypd+ZeBeZW1knnFtYih2CRymGWVeImFwodZFoiPmSVOmWlsOJpFIpmvd5mZ2VsM+WCe2JqFeJqT
-pprBmQaR2Hy1eZt9wpmvF5qjeYhFNiE2t3XpWR5Bd4tF13T1eZ/JBXV3mXJrY3XreaC78XXbeEdh
-ARWpcaEZuqFj8RZj2Xx5MRkpuqIt+qKPcRkdGH6f0aE9+qMX2hqJWXFhoUdLuZMrOZJBGfS6eV4Q
-+Y8z2YQ3GY5RepQ/eaXdTpQjtWdLOab+a3im77emd1pMcTpMdPpXj7SUoUScjWb3JvipofoJBZgi
-DXiBrfqqsdoLG7iMXRWCo/qrwboWKngAudeYyxkksBmS01mdeYKdIdmdjxmeodaszzoW0tqTA5et
-5cKt8RquoViuyxoWOLKu7fqcOViv97qlweWbCTsWAFt/6fqs7zql1xqxCYKvU9qvjfix+zghliAF
-QDu0RXu0Sbu0Tfu0UTu1VXu1Q1uU6HWoHcIeZHu2abu2bfu2cTu3dXu3ebu3aVuxs4UwlIG1ibu4
-jfu4V9sVmHpoXnucBxi2BRM+YYesIbuYrVO6XXW6M5CIrRu7s9a7G4e6Ue6PXKe8zdv+vJu7W+Xp
-vNm7vUUHnlBw7dbbvem7vicHuFBvcfR7v/mbgpqvvwE8wAEnvhVRwA38wOdG2xR8wRm8wR38wSE8
-wiV8wim8wi38wjE8wzV8wzm8wz38w0E8xEV8xEm8xE38xFE8xVV8xVm8xV38xWE8sRrJltyMifpr
-uy2EgQgqbrwMg9wmnypC2A5IsioqbrTIgObs2MAsqvgHz2L8yZmDBGhMtB6iLYSIt9qmpLZstn5G
-iDiozUwsIuCsc6DBiNxKiySrc8TACU6LsuZr1aAMyuW8MLZnhz7KxCIMwSLinZCgieK8xhLpWbxc
-O9gruxLClHBGkXqBxrTK0gy9qjD+ac4l/TPyKsK0qJv0HCJ6YYcOq8vvwtMtzI5OSSKI3CXc61k+
-CruOys3Hi5wm/dULQ8q9R4F64RGUa+8KXbNeC9Rfi75oPLcEy9cH3SLEC3SUHNaRHSnqfJG2CXxu
-fSKEvHCMfMe/bNitLMZIwIVGS8etfcvZKtnBnSucQALUSSF86Nkj4hF8C9jOfMcF/VhWyMAwQr9q
-LNUhIpyI3cyYKLbCvd+R4hFEK1KyrG3wC0GgacLevd27vTMCqor2rMaYzCF6waUGT7yoy98xPiNM
-Kaxey4bsHeHli9fJC8wp4rc6p4BOS5HoaLJWScK8/TlmLONlPiacoOHri4EmaMv+56vO+MfLfjzM
-I0LO0tzOqMqOkLx/AOzRm/zYZ77pdUcMHN3ppX7qqR7Cbw3X3k0tJIDbzE0puq3qwT7sxX7syb7s
-zf7s0T7t1X7t2f7B7fvt4R5y2B1sQCfu7Z6+517k7n7v2fveEPzv/57A/RHwCV/ABR/jCj/x+/sF
-rzu7Hd9zxBvxGR+8AZfyDQfHsSbyNZkiLT98tZt2NF+mHaJibKb0TZ9mgBtaTub0Wb/1Qyb1kSVm
-XH/2aR9jRCn0f9ohLoEeeL/3ff/3gT/4hX/4ib/4jf/4eb8PYN9XCOMZquD5oT/6pX/6qb/6rf/6
-sT/7tf/5n2H5ayVBSgH5xX/+/Mm//I/fp6dYgTBEqTtBSyJQkIuapTdaa86E/dEfldWf/d3D+1kl
-D+O/kAECFix5AOQJPIgwocKFDBs6fAgx4kMSAA7Ko9cpo8aNHDt6/AgypMiRJDNeOkhRosqVLFu6
-bAiAxMFLJWvavImTJCSLBV/6/AlUJUGDAp8wOYo0qdKlTJs6fQo1qtSjT3gSDYo1a9CUAi/m/Ao2
-rMiTArlqPYv2Z8yDBzy4fQs3rty5dOvavYs3r9tFVtP6/ctw6MFw4AobPow4seLFjBs7fgy5cLi+
-gCsDNstgmN7NnDt7znsAZUXLpEmvFUhTrOrVX3d27Vk6NlbBRafavo07t9T+qq+vyv7t0qxX1sSL
-jxUNPDnW07BSG38OvZPrgbCVW4dIG5ZR3dy7e3/Km7rv6+QVCscYPT1xsrDMln/vkLlz9fTBTs8O
-H3727d/7+9cdHn75lXdefQZ+xZ57Ay4Ii3wHPmjTfdUxeN1+/12IYVQBTkihcgVCCOJxZY3W4YDM
-BfJZiiqueNcwlJWoXHbfhEFjjTbeiGOOOu7IY48+/kjjNy/CCJxZfLCIZJIrBoIckfA5GGKUHkk4
-npOlWZhhllpSNaSVpX0oZZgmNenldVCKGSaVZcqG5ZZu/rdhlWv+BSaaUSZI4pzAnWlniGrqaVmb
-bw7aXZyAWlZnnxDieej+njIJNA89kk5KaaWWXopppppuymmnkvbRZaNnZZfNF6aeimqqqq7Kaquu
-vgprrKZmE6qoWZklQCme7sprr752Og+ZtlrG3Dx3HItsssouy2yzzj4LbbTSHktMrcMClZ0WVWzL
-bbfefgtuuOKOS2655m6rhbXX+oQrIdO+C2+88kob7IjrEvtoc4pG+ee9s1XHH6EC42aov1gluq+B
-jBrsF58JG9gvwz4JOnDFGqorsUQIP6zewhlr5TDH6kX8MUsUW4wyUwWX3NLGIkPnMctAMXdAOzbf
-jHPOOu/Mc88+/wx00DbPgbHMgVUHhxxKL810004/DXXUUk9NddVKw1H+tNHmkcgAFEJ/DXbYYgcd
-mr1aq5XvfC+nR/LZDp2cctxc9ua2xiQOt3bHwta9Ush5G9c23wrBLXfKKwvOkMt/sxYz4hH5vThr
-gTtOd22FX87E4ZQjpHjkYjW+OUxpew7d5JQTjvnAmofe3t3okV4c6KwrdKKStt+Ol4uVz747YZH9
-Dnzwwj822e68G4l78srDxaTZvDMEOez2Zc036qkTunronUuPk+zPCxQ99ziZ7rj117+Z/ebbi1+T
-99+Hz35N5CNu/vlbpk/5+vGP5P7z8O8/kvkJrn72yxL+HKc/AIKkf7xjzjkGAMEISnCCFKygBS+I
-wQxqcIMQbAX16pb+HXN0YYQkLKEJT4jCFKpwhSxsoQtHaI4Pug0zzuCgDW+Iwxxu8Bx7+95B/qdA
-kAiwegArIMoOiLgEBrEjDJwdEJfYkSGCsIhGrBgSBadEKGqkiax7ohY1IkW3EbCKcJLh2bL4RS6G
-rli/aqMb37gpUBlvdqSSlR3viMc8xopWc2QdrnQFx0AKso31ap0PF8IcREhhkYxspCMfCclISnKS
-lKykJRdpAzNqLTvXOJcnPwnKUJrrGpo0Gq6occlUqnKVrLQkInp4SC9+UTqllNkYydifK/INjVpU
-4+Zk+cUwnu2WuPSOLuvGSyj6knLA1KIwN0nFYqKvlixL5hKX6Tj+B2pgm9zspje/Cc5winOc5Cyn
-ObfpwT6GLju6aIA73wnPeMpznvSspz3vic98ulMX1CwZDc8J0IAKdKDm5KHzDvnDfC2AHAxtqEMf
-CtGISnSiFK2oRS/K0Bb082PZyUUsPgrSkIp0pCQtqUlPitKUqvSjudhoxoSzCozKdKY0relFFwBL
-HzJnoTbtqU9/SlGNqnNzHV2pUY+K1KSqtKVDzd/dYgrUqEq1pzg9KEIbpNCpanWrFhWqeHxYVKWK
-daxkPSlTv/o9mHJ1rWxtaFUNedWEHoSnba3rVL0qINaFtax87atSz5pX7T3VroSN6lsVhFDm+MEN
-jG2sYx8L2cj+SnaylK2sZS/LWC+4VGLZiUIzPgva0Ip2tKQtrWlPi9rUqvazUdgsw4yE2djKdra0
-xawfcvq+rBZ2tzbFK4f0Wh2P+nW4xEUpYH8rWItAlbfMxehh83TVnTZ3ul11rcH2Wtzsave4clLf
-YKkL3og+N65yFQhdw4tecvi2u6cLrnbfm13u+lCt6U3veMmL1bnWN73rBat74Qvgvso3rd/dL3jv
-S17m8EIcDG6wgx8M4QhLeMIUrrCFL8zgT1jXX9lRwSQ+DOIQi3jEJC6xiU+M4hSr+MMq2PC9MLMC
-DMt4xjSu8YV5gVv/6dbA1O3v97Ab4CAndcDPoy+Pp4vguEr+98jT9fHzgCzkKC/VxesyMpN5m+To
-7vjKu3Uy76As5TCXlMjHKzCXC5vlxOaLADpos5vfDOc4y3nOdK6zne+M5zYbgMrXyo4QqADoQAt6
-0IQutKEPjehEK3rRgBYCn4eFKxrkedKUrrSl8UyAHDcwX6BYhac/DepQi3rUpC61qU+N6lR72giP
-tlV2IhCKWMt61rSuta1vjetc63rXvI51BFotKlxZQNXELraxj51qUGjaiVs+s129TMf/innaYwZ2
-o6zs7LqmOZbNzjZboQ3cqwiX2uQOKZlnh21vr3XbOu22urcK7nVKu9zlPrcfzfxurrI7twe5hSf+
-DfCAC3z+4AQvuMEPjvCEK/zfxbD2obIDhldIfOIUr7jFL47xjGt84xzvuMTB4HBAYSYBCy+5yU+O
-coXfYtlddHe+pRpvos6b3tS2d3K7styXb3XfOtavzrka8/aKm+b0trl3lftzfbN8jS5Pek+DXr6Z
-Ez3MRncq0p0+VZ5v2udYjyrU6Sf1qUe56gjEd9epuvRf5svfKW+7299+8IY3NepXcQAg7o73vOt9
-73zvu9//DvjAC/7uDgi5nkYO98Qr3u0rt6qauX52n359gGEXe5DJnkSzR56mWmc25Ddf08kTceiW
-FzPmsah50Ds37cxsuuorKvopkr70Uj79LlP/+op2vuX+B+k0sn8P/OCbmtVzB/tV1NCG5Ct/+cxv
-vvOfD/3oS3/61E++Ggw/J2ELf/vcB76yHc/tg7D50uQvv/nrvOfiU/4qQui1+98P//jz2tHqv/1B
-BCDp8+t//+TPNPjb/Xm5d1GxJ0aVR3vvZXvIhHsCKFG7x3QByIBBhX1rAmYHiIATWCbpFoET5YBq
-B4EbGFEEOEwGaIHFlYAztIAg6Fasl035Ug9eAIMxKIMzSIM1aIM3iIM5qIM7CIOJgIFekh3rEABD
-SIRFaIRHiIRJqIRLyIRN6IRDuA4/aCWYMQE8aIVXiIVZuIP1wIKIs2QqKIH1J3sHMW4lCGAneEYp
-CIb+Hdh6HwiGDSWC0DR7ZghfaKg1GviGK/h//GZeeThRcWg0FUiHw2WHpqSGKsiGLeiGeQiItkSC
-g8hXhSgzeOiHieiF+bIF9qCJm8iJneiJnwiKoSiKo0iKpaiJgiCFTpIdrpACreiKrwiLsSiLs0iL
-tWiLt4iLregKqUgkZiEGpgiMwSiMw2iKW9CFgsMcX4ZcMsdedDdf0IVu0Bh+P7aMQudfzZh5h4RY
-0/hk1eiM1IiNqKeN0qhTvUAC54iO6aiO68iO7eiO7wiP8SiP7zgK3gh2ozCP+aiP+8iP/XiO9RiO
-t+ePA0mQBamPvUCO7wMAC8mQDemQDwmRESmRE0kokRVpkRYZkFN0kRvJkR3pkR/pkBmJgiBJkiVp
-kh6JXympkivJkm4TEAA7
diff --git a/Documentation/DocBook/media/nv12mt.gif.b64 b/Documentation/DocBook/media/nv12mt.gif.b64
deleted file mode 100644
index 083a7c85d107..000000000000
--- a/Documentation/DocBook/media/nv12mt.gif.b64
+++ /dev/null
@@ -1,37 +0,0 @@
-R0lGODlhFgFnAMZnAAAAAAYCAgAASAwFBQAAdEgAACQODkgASCoQEEgAdHQAADATEjUVFHQASDsX
-F3QAdE0eHVMhIABISABInEhIAIM0Mok2NI84Nk9PT5o9O5xIAHRInFlZWaxEQbhJRgB0v75LSLhQ
-TbRTUcBRTrBXVatcWsJWVKdfXW9vb6VhX0h0v8RcWZhpaJJubpBwb8ZiX8ZiYI5zc4t1dYd4eMhn
-Zb90AIN8fH9/f8pracpta8ttasxzcM51dM52dM53dc94dkic39F+fNOEgpmZmdWJh9ePjdiTkt+c
-SNuamd2gnt6lo3S//5y/nOCqqeKwr+S1tOa7uv+/dOjBwOrGxuzKye3KyuzMy5zf/7/fnO/S0fHX
-1//fnPPd3fTe3vXj4vfo6Pnu7r////v09N////35+f//v///3///////////////////////////
-/////////////////////////////////////////////////////////////////////////yH+
-FE5WMTJNVCBtZW1vcnkgbGF5b3V0ACwAAAAAFgFnAAAH/oBngoOEhYaHiImKi4yNjo+QkZKTlJWW
-l5iZmpucnZ6foKGio6SlpqeoqaqrrK2ur7CxsrO0tba3uLm6u7y9vr/AwcLDxMXGmkM3ysvMzc7P
-0NHS0CjT1tfY19XZ3N3Y297h4sxDlQDj6OLn6ezZ6+3w0u/x9M0A5r/3vvq9/Lz+kQDqEpiLIC6D
-txAyUliLIS2HsyDKkniIIiyLrzC60tiKoyCPq0CqEpmKJCqQJk+lNLWyVEtSKPPJ3Ddz0stRN0Xl
-DLUTVEyaQPvVlNTzU1FPRzsl5fRTaNB/QwNGHTi1IL6nu5Zu0qqpKVSsVME+4pqJLCazl9Ba8oqp
-jAIA/gTCIOXkVsAVo52OAABgV6kmMxr2Cgbil9LLGh/OIJ6r6QgBLAfuMm48YYzPT1siF7a5qUyD
-u1HibtaUWfLoS55NT+a0+DSklqXPxK5EJkuhm7NdV8pMYW9i3YJsT8q99Wqm2MQn/cihZRBuzasv
-RenrdgnwM8uFQzpSOfrrTcihW5KyogiYM89VF9cUWq7i3+sRTVlB5Lyj6ngNd/58pn0mMUmY4ER6
-+R2nGWCEMbUIGQE2QUYj3Fnm3VibIPgeJ178kEFz4Imn4F8aEJbcWY18EcQLViziVoITOvLSFgXA
-5R4iVvxg44045oijByAU0QMIQAYppJAwPHhIFILx/qUeieDFCIB1igjBg45U4nhBlVSaAEIQYiTi
-2IzXLbTKF1mUaeaZaJr5RAc5aKeIFStw0RErVehARZp4mrlAnmlCscILU8yp3y5iILECBI7AKaeg
-q3DxQ5cuMgLgCg5uZBwuTpiAhBgQKZqRK45CKqYiftZ30aW1WEFDEF58xIinn4L6aCMIabHDDhye
-OqgtYgSRonOLwBqrrKImglAQUjyEalg0xjlRLKEuotayFIoliLC6whKtsVVFai222Wo7KyLT7kpU
-VeCGK26xhJTLmblZGZKuuutW1C2tUc1Lb7233TuqU9c6m9At2wJrLb68JEEGP/rG4u4iBaPnr7S8
-/ohhgsRnNOxwLgU/LJVh9YR8Qwsl3HCOCyHIIDI986yMjgwkzKBMyy63g1LN8JzAgskoq4wzOzT/
-3E0MIIiQggM2CG0ztbW88MUZDAgc7y5aFlHBCDsk4ebA8NryxQsZd7DoV7oIQcQKBpyhRRM/jOCE
-VV3X8kQRcKYtCBnsNsQLFHSLPQjecL+bi9lxApCFEjuM0ASzuXwNtdSMf5yLlkQEAcIOSmzN9S5O
-A6Dxs3HPogUIOSBhRQATy1LEE/d8vq+3t5BhhH0YA6zLFELo47qlocNChhC/unowLRYzDLnevbvy
-e/DC265LEqgPsntITCsPvCEef4fV9CVVz8ry/vYOjzwh3KvkvSrgh+985AGPDbrgvl9PbuoRNev+
-6wjHzzz29L8v7/G8g18r0sct8SkrEeXTyfkigaS9sIgRBCwge/bSlzBNAjAV/BACAXgIGAmmOxo8
-lybK8AC5TGdJh4igBAuhOUaUYQOWqQEIMVGb2jVGAh6iRA0N9iYOFmJE8RFhJ/CzIPn9qxDZoYR/
-NLEcBNClAUzIISWWw6FO+XAQQOzKAocjxUKoUBEImY+pIAEY+GhCCh0wT2OAkEVJkMc8EtGYB/cC
-pRBKjjVmTKER85fCBhlpEQ2c4SYGEKC3XWILCQhDGyUBIBNE4BG7O0IGtZg8SkRIEV+k2CJM/oSi
-R0gShWs5Q4aYY4kaiKiLlvDCBUjZCNcRMYh3zMSXFuEDHGAJS1e65Y20xKVGvPJNurTSjUbQox8N
-6ZhAosEf3ZIkADxQXsHU0QWI6SNkIrNIKlJAHSkpwEvIUEKK6AKfxrmncZbJT4BSxBZUIIgTlsic
-ZyrnE1bQJkws8gxkgieaIEDPFkaiBlCapR21dxwn7UWQTErEpCqlCMBQEJRrKdQKnkAaVFKiUB2g
-aEEPCk5YEpRshygV7VwRAU3lbRaZQsIA+qc+kBLCVriKhaos0CpcqIpVNpxaNwNXCGTJoldWyJ5C
-fcVDl35Up0blKVJjyT6lNvUgW/TfUp+6jzkhJhWqBqxfJQ+4Pqd6FXZXrepUv8rHsWK1q2e1qlnF
-SlVbsKWt4wurW6O6saxKFa5gZCn+5mrXiijNZn8FWmDTEbTBuqMSHDDsODCgWHEwtrHeeCxkucGB
-Y1j2spjNrGY3y9nOevazoA2taEdL2tKa9rSoTa1qV8va1rr2tbCNrWxnS9va2va2uM3tLQIBADs=
diff --git a/Documentation/DocBook/media/nv12mt_example.gif.b64 b/Documentation/DocBook/media/nv12mt_example.gif.b64
deleted file mode 100644
index a512078c7f24..000000000000
--- a/Documentation/DocBook/media/nv12mt_example.gif.b64
+++ /dev/null
@@ -1,121 +0,0 @@
-R0lGODlhoAHkAOe1AAAAAAAASAAAdEgAAEgASEgAdBgYGHQAABoaGnQASHQAdC0eHigoKEIlJEYm
-JS4uLlssKzY2NgBISFIyMQBInEBAQEhBQUhIAFBBQVhCQkhISF5DQ2NDQmdDQ3NEQ05OToNGRHhJ
-SJxIAItHRY9HRXRInJdIRlpaWppLSaJJRqpJR7BIRa5KR2NfX7JKR2BgYGxdXWleXmZfX31ZWXJc
-XHpaWQB0v29dXLZKSHhbWolXVpVUU5JVU55SUJhUUqdQTrpLSK9OTKRRT6FSULRNS7hMSrVNSr5L
-SL9MSr1OS7xQTsBRTrtTUL5WU7lYVsJWVEh0v7deW4pqab5dW8RcWbdgXrZjYcZgXnd3d8ZiX8Zi
-YL9lY7RoZshnZb90ALJubLFwb8prabBzccpta79wbsttaqx6ecxzcKt8e4aGhr93dc10cs51dM52
-dImJib97ec53dc94dqiEhKeHhkic39F+fKeKiaWPj9OEgqSSkb6PjtWJh5qamqGamqCcnNePjZ+f
-n9iTkr6amtiUktmVk9+cSL6enduamb+jo92gnr+pqd6lo7Ozs7+wsHS//7W1tZy/nOCqqbm5ub+4
-uOKwr+S1tL+/v9+/dOa7uv+/dOjBwOrGxuzKye3KyuzMy5zf/7/fnO/S0d/fnPHX1+Dg4P/fnPPd
-3fTe3vXj4vfo6L//v+/v7/nu7r///9//v/v09N//39////35+f//v///3///////////////////
-////////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////////
-////////////////////////////////////////////////////////////////////////////
-/////////////////////////////////////////////////////yH+EUNyZWF0ZWQgd2l0aCBH
-SU1QACwAAAAAoAHkAAAI/gBrCRxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFixgzatzIsaPHjyBDihxJ
-sqTJkyhTqlzJsqXLlzBjypxJs6bNmzhz6tzJs6fPn0CDCh1KtKjRo0iTKl3KtKnTp1CjSp1KtapV
-h6QkWdrKtavXr2DDih0rlpFWsmjTqkVrdq3bt27bwp1L16vcunjn3s3LNy2jVRU/MKhAuLDhw4gT
-K17MeDGCwY0jS54c+THly5gvW87MufPhzZ5DcwYturRkBGkqvgAEdPVP169Z+4Q9W3ZP2hJx89St
-k3dv27uBB4+tWvhO3ziRJzeeU/lN5zWhK5ROk7pM69eZL2+t/Xl3hthj/oZ/OZ789+jn0ROnWN5l
-e5bv4aefGX9l/ZTx7+Ofn537+tr/5cZffwAGeNyA4iFoXnH+FejgcAb+xmCEzSm44IMQ3mbhQPqh
-1KFJH4K4oXwNajghhhJSuJ2K3p1oIoopvgjjihOFWJKNI+GY44j28dgji+D5qJKOIRFZpJAeIpkk
-kAvZOMsBAAjQykdOHhDAJ1QqSVAhAABwpUcf0iJCl2TS0ZGNmZDpSJYcpekllmfyaKMXNtRCZ0av
-jEJQiIUIAAoBcMbJUSEUxHJkQa+wglEpgGKUJ4da1vLkmn1OiZEsoUC6USkDrJmJlIKyF+ksCWD5
-qaUWydJFHagIhCOj/oFyFCKpsYIp3CZPRCKLRXdeumqrs5Zay6kaxTGGniESS2uoNUYKay3PZhQJ
-FYnI8mqjtm5KwAVd1pltQangkYUnFEUrLbUtRGqnAKocsOZGmlDxRwyRZvLlpMwKqC2W5mLEyh5U
-zKAutNjme5G9WOJr8ECerJqKRIQautG/RsjRkRcAFNrRK4kUYQZHGHt5wbsb5edso/1mFEoQVrS6
-L5saEWunt7J+N221DyncUQ5OsJrRk2Z6AWpHN1RxbEc6a2TyRsvKjJAncUQt9dRUT/3DEUgcofXW
-XHc9CEMpK30ylmKaaRAebFSt9to/VLG21F0cQYUmDkXMENRvs13F/hFLXNH131wvgpCy7t6dt9pt
-L3HEH20ADrgWuyrktNguXlT2zAulEsrmnHfueec6KNEFuQt5QoUpYBdc80aXh11LJ2Vw8vnss+dg
-B+2cH3IEHg8zBHRDmuNOew5mPDE6RggTXOtBwQvveQ5oUDFGpnefvtCTNJcsp7qcRomqRq8Q4nFD
-pqMuOZlvLmxR9wCQbJApcbzSbEKoGEt9Q5V+9IoPSlCyEZddch/4dsCESjikfAl5UrdgNj+eUOIJ
-h5BBehB4oZTAT34R8c0rDvEEA8bkgTvog09AKELyWa8lS9NJw3wGHQpW0ILxy6BxHngIDL5khagw
-EkdwSB0XkkhU/jpBxRhIVwvn+NA9A4PIBSGiG0z4LCZCJJ0OwVcHKU7whEis3IEScsQstmSJDpmi
-+ipUOix6sYEz6uIZWQLGIDFJPVw04xplWCKCqHGOK2njdJJ4KBbd8UdAjNAfUchHiegRIWJcnYoG
-OaTt1bEWjPwhTA5pkERq75ECieR+tBgjSMqRPoWcCCX3FMpvoUiTmwykg1AJSJmM0lWlHCONMvnJ
-BHHyJtWCDStb6coYHsSSlPsJD/xgx1raUpU6ecUTisiaXfKylzYk5Rtn8ooi2MaZS0JmTjaBB2Zi
-s5E3McUaeifNGd1kE0OQzTdPEp8TYKEv8ByLGvRgCQ1IoQmK/oinPr+igXfuE56IOMIU3iCISWyl
-n/9M6EH9qdB9qiEE7zQEPhsKTw3woSIagIxpNqoYFmCgAg5YQQY4StLEGECjJTUNDo5AAhMAIQUg
-aABKU1rSk9L0pixYAAM44IKR3pSjBkiNNm+SiixA0gh3mCZL8AAw1I0iEj9Igv9ktJOivmAOxoQj
-GnNSiT+YjgaykUU0W4QTTHj1hC/ww1jJ2JOu1oAJ5quFWNm6VZww9XQvsMMizrCESGwRJ0X1JOqA
-mRHCXoSpSK1FKPbaV7rqaydPOMIe6nCEKCzifp20SRYeVj7DXsSzFYlsD6xwhDNc9q91tckojjCG
-Q3giXUpl/skfPPjVWCpyJ6sdww7mELkMPRYnsgiEomAZ25Vwk2GJNedMgqso0N6yIc51iCzwQERm
-FlclyiTIW+OKWuBSl7jKbZJtKTLd6lo3vDDBmauwyl3H2qS8mkJvQqK7EPgWhL6/1dA6s3kT+4KX
-qnSsiX/LCeAt7ldE3jUvfgOc2pcMmMC+hdCBSbJghDz4vAV+SIUJcuH4ZpiuExbJhgvS4RFD15Ey
-6bCHI9zdEIPExAJRMYzF+9yUqHjF3Y2wi00ZkxvPeI81PsmNcZzZHAvWJiYe8o/ni+KYlekhQyay
-QBIlETd9CXzIGpiYrkyRR1WSOc5kX8YkdkmMPInLChxa/n2/C2SEYPZ6VgrUmZcX5Db/TAFTSh5D
-oizlWuBKVzkrgaG8oLGMqKoONxhYISSgOokc2mV93qXrgmmRPv0pUHfqlUL4rBz7McTS2AI1nYeq
-YT4mzcJsPnFCwjWuKqtZWkYQQ2/NnABINHoiN+utclg56cJqKVpNe7VB+Izhg2yCCnsYrkL61ev8
-MpgjzZZrqlWtkBWSkyFiyp5GYsAFKmxCI4WgQ7Qd8i9v//cgqBSzAH2tkWg9q9fELvZBZJGIXM3a
-IMy+dZ2Z/BFNo9q8bmRIrhfipkLXLBS/ukgpCtCKcT8E4axqYVYRUgguU1rh2Hq3vqUNcH4zJBV1
-aDVC/vI96gaHMYl2UwgcwnA4xLmt5XGI29zwZ/GnwVxqbYsa1hwHOMEZxAtmmjTebj41vhGB53/z
-uUNOjZEPuRtlG1850eOQ85tHtg5rVR5BHH7yfUMkfws5hfOGd7uxh0J3vGsI0w3SPLPbLhQPPB5F
-FIg+s4HL7J+Lew1MXbjbYhxOwf4eQcSO98293eyYoEIWvn0QkpfZ5BQhNJl5jJD6TW8hpYCCQPRc
-sj4Q4glTzQjXGRI+0MubV+8C++P/LpDL+fuz6eEYFQA9ctWNPuCQl4iYxyzLWmywgw0RU5dqnpHQ
-1RDaG48IDTEYIvYZfPUUcdPw+TUA70HfIIlPdkKk/v8m7hM/9zRm0fJh0jAh5AEnOISwkYs8kFGc
-4Qx6Oib4tVoQJ0LaJVFcchS/fN0XFqQOdANKXkcgHzZL6+de7Nd1/UdImISACUhW81cd43VxLOaA
-FgiBznaAGFiBBqiBSNZkBfiBDdiBDyiCpHaBJihfBBiC9PdsHLiBHtiCMSiBAyh/LwiDKJiCNxh+
-KmiDO0iDCyhJGViCMkiEQNiDMJFCSOh/MyiAQfhMTDSBFGiER/iDVdiECOFOFPVPCLWF8dSFXlhR
-DBWGeQGGZFiGY3iGdGFRGDVTP7VRNvWGJBWHcghUbliHnkGHeFgaeriHeShUEeiELFiEJJiDVxiF
-/iOIg4YoiIPIiC6IhT4IiUkohey2hOoniUxIhStohZuIiE8ITp+YSpbIgCdYiIpoijrIiZFYaomY
-ipiIR5q4igo4ikKoipPYioRIbbQIhbF4i6HIX0O4iJ3Yi5kojLKoi404jMboi7sIioGojKcYjbko
-jYfIir/ITpTYdNloEUvmccn4S9vIjeFYitToiK9Iis94jK5IjLC4jMWIjLb4juUIjfOojjxoEXM2
-ENJnd9r4M3Gmj9M3hRORj5unJtc3kP9YkOlTiQfzZJICJcImjhmxjwJBkAwZjF/nJ9gyC3g2LN+X
-jhSnkYAnaHbyfDUYkpdWkYWjerCHEaImEJzi/ikRCZIHwZF59iWZpm0SeRE26ZGf8JICOYusVxBr
-R5O1tzyTc5IJAWzCkpRKeZSbByrL0o9M4y6BF5Rz13e3d4+PWC63tpXeOJQDkW1YGRHmIjTtsm5P
-2XjYkjxF2ZUVASsaV3Jw6ZVwApZhWZdmeWuvBxFedm7rUzAFN3k7uSiqgzEmWZhTpmwQcZbDNzJd
-Fn+nxyt1MpcVgSl9Fnk0g5eIBIKBWSspNxGPBphxeWsVR5fWaJiAdwBBM5N19Wf3ljrL85b1lXB8
-ojGWWRHGkmX/Y3CcCY5riW+qw5IUkWtOd2u0mZpDSThqmVqs1nFLeWtOCRE3A1sZAXZXaRHx/jIv
-WsKSv8l/5NiYBSN5GlFuAtNuGad5PnmRn7l5X/Kdl5hJDuMQKYM9/rIHFZMR5Nl6IhA0OjkRHDM+
-GLGfAwGf8SmU0Yc+V7J7iSkRK9MyDUkmVyJ8C0mVFuF9WAJA7XOQAzFw26egn0B3/0kRPPNE5VJ9
-ZFIo3eOaE1E0RxOXKNolFHAJIMqenqgSQzd1V5M1SMc1X4OOJoE2U0c1Vfc2MheAG5GjN9c2fOM3
-Pbo1SocRStpyibM4jfOkRwA5vIigJ9F2hRc6cldtE0dh2Qg7slN4nXN4uIN215YRXup2xROmKPGm
-zgM90vNmTzOmfWSUKxE+AlpG7eWMJ/FK/lxJEJaHp33KP6G3E69AQB5UPYGKjcGJEjQkQZDKjChB
-qHk5ZRz0qC9BQiP0BCF0QHoqYp5pE+kncZEqqJnqS8opEONHfr/SjXY0q/OxY8FJq7Wwf6RZTKvK
-qq2adXlpfzLBq7rqe1XUqwWBq7k6jqXIrC35Ra6Ke/Eoj0eGqXq5jtdKjyWhqQdaj+oIrc3agOIa
-rS7hrcoKrtjqq+b4qkZWruZ6rtPamdcoqehWqvbKp7UIr/Eqr8KamdpKgvwannZGVQOrmC+hqcf6
-rTB4sPq6sATmsJPqEYQKsem6gRKLkdSqE7mkTvhai5M0rwA7jTYxTOyqrgxLTcvkGhk7/rEfMUoW
-O5k4UU3X9LHA+hBaqIZ0IQhkUE9YIFH5pLNwYYZCKxaKsAWNwE9pWLRrQbRMqxaCgAL+BLRPmxZs
-SBEZ5YehQQIeUAEGMAE9pbV5eIdiqxgbcAQqMAIdYAGE0Ydlexlu+7aTQQIQMBg85VNyKxlB5bIl
-sVm1sF3NOBKR9Qd4sARnkAg54KwaWxNZAAOA0LLjWlVG5QnJ9Y0qcVdOBVVSFbhdmgVXZbNAmq0x
-0VW1FWP/yq0uYVYUlFanW4034VZwNRBzhYqFahOYm1eM5Vfs2LlHFrMWi1hJtVh8pbsoO7IxEVmT
-VVmnVa0o4bedpbiiKxOiRVqmhaiu/guPqsVarmWdlqsSs0VLYFWvI5FbuxWbtFuw7yVcxtuuM3Fc
-mVS5zGtj6uu7p7pc00a/ybRMAwG43asS9oW/BMsSDwbAOaFeRcRe/YsS/kXAiyvA03axJIsekBvA
-DlxdDBy9NvbAEHy9LTLBDVzB9wW9N/rBCqzBG8y+K+LBI2y/AHfBK+xgJnzCqHuEKsylMFFiIuyu
-OgzCm+qOL1TDG3vDMezCO+y/Mby+M+yIQIy+MAydMlu8MjwSnJbDNoyAS0yvKXbETxywQYyQFrdl
-qElsyEFlS5eQYtagD/EoHWKRtQBAH5kQfxmfrJSPFOqQ/ZqgXWI2FLlnWvzE1nsQ/ntsZahZxF0c
-EUApEIuWfBxXxbD5aSIJk4oMEYeWaC75yIiMxnuWcAeqSYdcoJFcxRDRkwgjym8cY338xJ52PR05
-yiRJoAj7wnY5EKRiayUXb0/8nA3xdINMnbFmvrq3kcJiER76xOnGlyNKwmXsPrQpxt9xbNqndn0X
-lYJHwcApegUTbpNmy1sMSfO5bBkXo80pEdxmbkPJKNwCAMfMEOXGeEZUqiljoFGcy7aXfPEGHfRm
-b7I5liKQzrAMynt5lwznOtq8zQIxzMK5PKfpaxB3f78MJ24ZzROx0Kqaz1uCyRjsEK/Xl7J7yusL
-ciKXEL0ymGVZuxMRLUCndQYh/nVDWqSHc6RQSZQQbUc6+nI7h6VHEKWQ7NBDo2lTenNGZ9M3jXmq
-k5z9PBGh2caYrNJL+nIwd3XCetRtXMqETNIN/ZDoAwD8KBCEh6ahoKbOw6YvTRBFSae483Zx58QU
-/SyXIxBk7Tx692vDadEXvRAsSZwFsdV459XCk3iLF5LTTNTYO9UP8c6fLNiH6s1w4gWpx6JM5Hmm
-15610DqFjRCl5z/HKWes6XcV4cqubKMGIXu093O+qZ6c98qCzRAY6sm77M+/56kGgaHOR5gVYXyt
-S3A1yj7hzBCxqh+pbdembZYxmjGiENxy7c+1kH2MWRAMCgtjUqF3fNooHKsu/lF+53cT6YfE9piL
-7gd/SVzNM0KsMJF/VFwQvJqyHBywAHjeTLy7IMveW3q+KFzI8N3d2R3fXBzBPezDoXuO7a3f/R3Y
-/v3eUCzg963e+T3f9W3fBq7gDI7FnJuvCfzfA36zxo3f9L2uAU7gC+7dEa7hG47hGS6KyJzgJF7i
-1mrhJm7eH37iK96OCA7iAP7iLN7gKe7iBX7gEy7i8evhNA7j8p3jwLjjFC7jNr4QOVu1buG0SD4W
-Sr7kYtHkTq60Ua4WUD7lXHG1E5G1eZsZcbvljNHlXr4YYB7miDHmZH4YZn7mbQuII+7jKF7jEn7j
-F67iPT7jc77f7j3kPy7n/nAe5y1e5H8ez24e6H5e53Y+6DjO54hu6Hge4kFe1EAO4UKu4/zN4xw+
-6Y+e55Su6Zle4YSu54p+6Ive5zFL0Ize6JEu6e5d6kTs6KrO6a/u6adu6X0O6m8u6nR+54U+67b+
-6Zsu67pO66M+7MQO6Il+68bO679O5Ki+58je7KGe7MHe6w5+E3F8rF62ZHGM3dglmab+Etk+3s5+
-7A2xx5EtAlK9aZrcIRR5xrINZbY6kXbsxqstu5rM7R+ax3Wcx6K57koSyAaZKvGOPA4pyCO93hFB
-ynCSyPWOEMaJJAoPzwVNBSbLk6v8JVAdEcOsHwq/dZN9EA9vZhcfoivJ/tgOMS0Vj48jH5PDYvLA
-Dp4agS+z/PEJYZ7qMikSLxCs0ANMwHj+6AhTaRHrLOhw5j4aDRE2zxGTkp0XsfM9X5WOoCzB/Nwv
-n8/YTPMK8aAM/ZnqpjR2cO9cvy0LZBESzT0FA89a/zJoGdPc+PUm+pkPndvQfekXcScL13D61tNU
-WtNYitM/p20JbRB6/zY516RA7fcCcSdxzzBDKjU/bdOI//cVLduDnzdX0zeHrxCahpjvnkkr7TaG
-D/kI0SshEwCQaaFtrhB2c9Ku09bCA6Zo/RBQvXauXzt2cNYXYTcyo2m1Tztv3ZtkRvtcvTmwf50a
-8ztCM81szdW2g/uV/maSgD3u1V5pUgkldeco4vMx1xmR0b8QMvAFj039lqLW/VkRlf3tjvw9GQ+g
-2Q9uQ8OcGvH94V8Rdj2dqY/vo5+YBlqpSkKgio3IACGgVS2CBQ0eRFiQUhEwrxI+hOiFQqyCtETQ
-qVWKwCeIHWtRenLIYa0XgDyeJCiRYsFZBzCi9AjykAyTMDuqLJgpAEeNHG1CXNjw50OcB1vaGAqx
-ZNKCS5kaLDUAwFQAEwn2fErQU5c6qEjWzJpRKlUKosZaDbu1zg2wWaNSrRrrLQBHTNV6bdr26dyp
-EwsJDFvr7tfAYuH6pVo3sFq2hflWdXVgKtLCBJ0yvVxZs0FUYzzl/t0cmrNnwqIrd/58MLPpwKhB
-sw7tejVszbNt2qZdGHfurLt5P/X9e2hw4T+JF0d5/KFy5B6ZN1+uF3rS59NVS7d+G3v2k9UNeude
-Orz28cO3l08Inrt66+zZtz+P/rp8mO+h278fn75l/R3xN/+vuAAF7I++AYU7kDf3CjSQQfQSVNDB
-8iDMjULYFtyvOwnHs/DCDcPrkLUQRcMwQ/8+XA/FFE1USsXsRgytRBbTc3E6GGOsMb8Zo9sRIRl7
-fA1I/oT8LkcAjTySyCGBQxK5G2trkkAlxSPyycp+FNJK3aJEkMsIp6QSSCzF9LLCMs2cUsst03RR
-zcDc7O1M2uBksRJMOjFz8QQsLOGzTz//BDRQQQcldFAN9iw0UUUXTfRQRh+F9FFHI6W00j8ntTRT
-SjHVtFNFNeAjKzciqKBUU09FNVVVV2W1VVYfINVVWWelVVZYa8U1V1xv1bVXX1Hl9Vdhew12WGNn
-feARMJdltllnn4U2WmmnpbZaa6/FNlttt+W2W2+/BTdcccclt1xzz0U3XXXXZbddd9+FN15556W3
-XnvvxTdfffflt19//+03IAA7
diff --git a/Documentation/DocBook/media/pipeline.png.b64 b/Documentation/DocBook/media/pipeline.png.b64
deleted file mode 100644
index 97d9ac007473..000000000000
--- a/Documentation/DocBook/media/pipeline.png.b64
+++ /dev/null
@@ -1,213 +0,0 @@
-iVBORw0KGgoAAAANSUhEUgAAAlgAAAEcCAMAAAAsmToJAAAAAXNSR0IArs4c6QAAAwBQTFRFAAEA
-EAEBAwUBCgMBCAUKAgkMHQIDCwYXFQYDBgkVDwgFCAsHChASJwkDFA4NEg4TDhANHgwHDg8aExAG
-DBEjBBUZERMQCBNRDxU0ExcZFRgVFRcgCRkzHBcUDRdCNRELIxYTAx4mLBUMEBwcEhsiDxwhExsu
-Dh8XJRkQByAtDCMUHx4bACk0DSsiGScsFCRcJSUiGSZCDiwqGCg1LyQbIycpVhsPDy0wNyQVECxA
-PSIfCS84ADJCEStWRiIULSwpADdHCDkgEy1/BjorEjhADDhXCzZvITNPUysQDzs8MjIvCj04BT1O
-PDEoQjEhMDU3LzY9KTdFTTMYNzk2GD6PAEpfEUVjBExFCEpPB1ArK0FjKEVZYTscdDYXG0d5SkA5
-FEiJQUNAOURPAFhCA1RpP0ZJVEMvYUAyBFtRWkYnDlR+QEleAF1jkTojHlV1AGB4PlN5YU87cUws
-SVRXAGaBWVJKUlRRRVZlAG1PWFNSBGt5AmqVAG+NAHNpK17BWl9ifFs9KWmnSGZ0YGFfp1IuCnWj
-AHuKbWBXoVNAdGBPDniWAHujTWmLk14rAH+ch19XX2aRX2t8AIeLiWgvAIeeAIaqa21rOnehdW1m
-AImspWJYj2tLW3C0AIyvWXaNAo6xb3Z5dXZzent4WIKiQojAen+CkX1pcoSWcYDOoH9fp4E+Y4ur
-hoWCXIvGb4mut3xpqYNYnYdUO5rDyXxdjY6LYJqk0IRGb5azXZnMjJGUgJWqpJB8l5aUyJN4kKK1
-rZ6IYqzge6Xho6GdnaWpgqy6yZ9zvaR0hqzMk6rN5Jxxw6mF26lbq7C2pLTGwrCbs7Owvb+8zcGc
-8rp42L2xsMbY3L+jtMfsz8W2nM/yx8jH0MfA7MWU78h/3crIyNTo4NLD6dK1z9bc1dbT3NXOv9vr
-/OKSwur8++Gw4uTh0ef78uPU+OPL3Ojz++bGyfH36evo/+3b6vT88vPw/feu1/v+//bb//nK/PnW
-5f/+//ro+fv49/7///37//71//3//v/8sZeTnQAAAAFiS0dEAIgFHUgAAAAJcEhZcwAAFY4AABWO
-AUTUBDsAAAAHdElNRQfaCRQPAiJBEFMLAAAgAElEQVR42u2dD3wU5bX300nWws50shA4lyTGikRE
-Qy0aUChKvJZqTQGBFNurVo0FWq2VqhAq0hc0t1rAxtLLpu61cUl6gWtNe3vb7Z9IKm+Xqn2tkFih
-GmwFA9Xwpw1s/UOyCc97zvPM7L/sbjabXTLZPL/PhzA7O7M7s893zjnPmec5k8WkpNKgLPkTSEmw
-pCRYVlSjp5n/3+3xGGsONHt2vx1t04PNTbtP8aUujxAuHvN6XjHe721pbmqLsp/H021+crP5wce8
-Ta8Evi+wKMHKHKkwjf/vA+D/77SrAKDeeTRiO3/vcnzDoTzXgy/+CEKMbVdw5dh9tMGubAfuuMjf
-E7HnfgAf/X9yJn6Aupqv206fNfYdXHr/Aloc944EK7PBWg9QurVxlQ3s70Vs+FmAdQ0LAP6By1cA
-KCT2AsBqXKme7mEfajBr25MaTIjkSjPA0mHsVnz/G8jgzyBna4MGH/UzdgHYt9ZrMNovwcpksN4E
-eJpeHbLB5PDtelT4L/wP4Fr+95NirQa34d85cB1jX3eAsGVhiPRuR644WPjOafxgHXw9uD8axL/k
-3+gj3P4vLgJ0SrAyGaxzoFCsfpObJh8aJVMtzUSMA3JZTw/Ab8RKYcD+Rvu2ehsJJIBu9llFwdW3
-0l8N1B0CrDlgpz1K4OfIkRqgT4cOjNkkWBkLFiM4ejT4nXjVrcH/wb9G4BUUwOcZ+xCgtkh74Ci9
-9Iu1Jig/BjhDhmzCGUTzccaKNx9lAqwSUAVY17Kvw7U7S2wLn6fXRXCzn90P2dIVZhxYIlxSiCE0
-W+8F1l+GYDU0hG99P9/gVxiEKxiGvYPMoCMjT2a8fxyRQkQw9HpJg3EBGAmsr4PDz8Eaxz4F+UBf
-+DjtMZ0WJ/9ZBu8ZDlZwfX7fjTG0x9ibPTF93D52UEdELgD9LXYw19zteA7onMwLAHuI74SB9QGG
-b372DKAvLQF4xN/7FPlR/17gjD3H/BKszHWFYRbr6siEA7sL4Jbga4zG2Yd2sCuqaoC1H63YS3wJ
-nSW3RyFgsQ0coQLsNn4KdD/3gtcauG0HOCItVgYH78EYq4fHWOFaDLA6tMNHe3St0m0L3+Ixln+/
-BqP3ib0xGue2LRQstmu+vXjz13mMZYRbubhoFw7yXyRYmdwrHMN7hR/fgVGSdrovVzvEUpuXYPhn
-wG9+wJcOaTDZ3GcOpU/fiACLmb3CP5o0ncemCcamQY4EK+PzWD3zYayNYvcwYUj0UiCldTf+9yXQ
-2RNlKl+6Aw2YLRCtU/D+uBZgxQDr9iIkrauIeo067OOu8Cfs38Fxgi9+XoKVyWCx+wBKG5t1tDfU
-9ME81hlybtki3U59w9XNuOVL/r+IJbu/58yXwOgGdPd0aXAuwXU384eAdT7YtzXqPPO+HuybPRdA
-9hnW5YDR2zz41ntnJFiZDBbbbnfQvUI7OBaF5bF+ZdwehALCj+4n2n9qLDkK/8zxMdSNjlA/3UP4
-vRFqsf4+xhG4V8h3G0sB+/4ptJjzB5luyDDV1otUVXd9vbGmpXFr09td96mXha5kT9Yb4psfbKxv
-EgMdDjZuE2MTzPfru/9eX/8HsabWeMcY3eBtaHrL+LiDjVt3B75v224/k2CNFL3+D/kbSLCkJFhS
-EiwpKQmWlARLSoIlJSXBkpJgSUmwpKQkWFISLCkJlpSUBEtKgiUlwZKSkmBJSbCkJFhSUhIsKQmW
-leRP28f50/H5EizLSlHEDC9jCuGxJ6crtoU7TkVjxJxkuGu+XVvNp9t0bS9SFu4Ra1+/PTt/c+R+
-uFLbfMJYtNvMbRk7nq34JFiZrPB5hYdsoKqqA9Sjfbm61ZhkOAdwG1D3IVd2WhJFGp4CB+6Xw8Iq
-kD6j0QbZR3D3F/he8CPjHT1s3r0EK8PB6rHBLARmby7kRm538i4QYP2Jag4dmwkOKkU69gjbqcEb
-rOddgD1sL8DDofu8D/C4/9gCyPHTrNVZp3oXixpZjCCVYI0gsHy8Tihjf1Rt+Le7pSWw2U4bjBZg
-faUEkRJ1QETZte8ShefDT3CxtukV1tXSQm6yBXf+d7Nc0T9o2/d4SUjCyf8iTJZgjTCwng2+FVKG
-janqMn9o3UgDLOKmy1g83NrKQ6zzwc493+/YFFFKpoAqIuk03/5dmoCP1g/00xKsjAerpIWLYzMK
-oDTw9ACfqgY2w7A8rCDpHEETzZb+kPgCeIpqAz7HOD0PI2yXMfYpUcS2gGosLwf7bq8G1zNuwn7D
-JFgZD1ZAFBWdTxVBcjZHezJFKFhPAfyQwLqDu0LOGCysmi8qHS0G+Dg6wR4qJHKCVy4ajWtn0lec
-SzvfDzczCVbmg1VQxWVgs2E6oaY/HgcsP3FVKvB6pGUDCLCu53aMB+eE0E9E3y9ntxet4Gh6a/QD
-q3Qi62UYyyRYIyzdwPt/O2/Xg+X4+oJF1bNhEV/k1YgWCVdIXvGfIupCS6Xz9/8+xQGQOwddoXh8
-ABqv/8JNf9/a2goQ0jWQYGU2WH7W0cbJ6dL6lLYNsVi3AnzNWHfwyXV7WDCOZyI4p7ptRvW/XdVb
-T8/B4P1TZtnRccFCWqoEa6RYrCvUfHPt5JhgfTeQ5uxobz1DVd7HElH0rIq/cb5wAx3yCLTDbf4z
-It1gxPElMAE7mCQqFS/BGilgIRiXdeDyTkoWsO7W1ihgvYkheCuJPz7nBDumUTbrV5D3NvOdT2VI
-j2O8hRudxz/8YT97EXL9fnSFz/nZq4FnpbA+z7yQYGVyjLXcAXb+MIFFEXmsIFi5gU6kv0tFs6OK
-eGsBf1iF9g9yhLnMz0vh+u/npd3VN/j7/EEBpWcCYMngPaOlqCWhSatd84tUteDGHZF5LAKLv+xR
-TeGLQ/N1dcZm8faGEjV/4VF0hCoHaaaqnOIrtYXivuP2MlBnBOvEq6oESyq+QobC9ER7vydyq5Eq
-CZaUBEtKgiUlwZJKnyqmlXRLsKRSrhJlBI5KlmClX+USLKl0qFKCJZUO1UqwpNKhZkVtl2BJpVzt
-qtIswZJKvRSlUoIllQ6wFAmWVOqlKkq3BEsq5apWyzokWFIpV4eiVEiwpNISZHVLsKRSrlpFaZBg
-SaVeoCidEiyplKthZGYcJFhpV4FSPgIHZUmw0q8KtFkdEiypVKsbwVIafBIsqVSrhNCq7ZBgSaU8
-gie2ytq6JVhSqVWVSlZLbeiUYEmlVp4ChexWebsESyq16qzgZmtahwRLKg1mSylvlGBJpVotQLGW
-T4IllXKrpYyAjKkEaygEmU+WBGtINE3J9AmHEqwhIwskWFKpV4mi1EuwpFIvJbMHAEqwhkrNme0M
-+4Dl6+w4++rs7EnT+Q3R6SRysxm7hu3D5HQGC1Z3x7Kc8RNnX2XIds1VadXU8ebS7Ik2W+pHlfja
-Z+RMnBg4CVt6z+YqW+CHmzrRNr6x39PpUJQBPQjF10anY37HNek+nYkXm42Dp6N5O5MHy9eQs8Lt
-CqpufOirNGjtNaGv3POyWlOJVWeVbW3YCUysS+/pTHWGnc5VOYf7OcKCgSSzOm8cXxN6Au7x6T0b
-15IVoSy4LtYOJwlWs80Z/slnGSxaYUuZ1equ7XP0Zxcs1NJ+TqdNUaoSPZ2qyKM/u2DxCz/flwRY
-3fkrIz/57IPlqrumKjVcdUReJUMBlqtuam3co0y4sENn39M562DRV3oHDFZ7Tl+IhgAsl6smPxWD
-LFuiQXT2wXK5NtrinU5VGbQlcjreiX0/eQjAQhtcNUCwWi+OwtCQgOVy2gbPlWdetK8bCrBc7nh3
-broVpTyB09m6xGURsFxrywcE1oGov/nQgOVyOgadIVrisgxYSFb3IH1h41KXZcBybawcAFi+0VEJ
-GiKwXM6iwXHVNs9lIbCQrNiHWpFA9r09+q80RGC5VtYmDlZBdICGCizX2nWDSjPE+sWHCCyXszrm
-sbYqiqe/89FclgLLNa8lUbDqV7qsBZZr9mACeJvbYmC55nbEuWFY0s/pFNVYDCy3LUGwfBe7rAaW
-syR5rg6sdFkNLFdsZ+io6KesQ3uM5h06sFw1tYmB9aDTcmC5ViQ9Rao7x2U9sNYeiHW40/ob76e7
-LAeW62JfQmDFMlhDCZZzWrJgHV5pQbBim6zGfu7qdC+1IFg1iUwyyvKstCBYrquSBSvfZUWwlsS6
-xtv7id5vrLEgWHUTEwGr3G1FsNY2JTmgYbwlwaqrjToEprMdu4UFFQWxx89oLguC5Zp3IAGwprqs
-CFZNeXJgdaywJFiurOiDkwOKZdGWWBKstZ4EwFpiSbBcSSZJm93WBCvaLcP2ELBi5bnWWhIs94PD
-F6z85MCqqrMmWBdGM0mOAFexplU0bLQmWAn4k6yl1gRrYnJgLbMoWHN90buEhmL1DGstCtalIw6s
-iuEEFus3xFpnUbAulGBZGqwyg6uY4zkkWBKsZMDqNMDySLAkWKkEixcHiTdxVYIlwUoKrGYBVrcE
-S4KVUrBE+B77xqgES4KVHFj00ArFK8GSYKUYLF/cEEuCJcFKEiwevndLsCRYqQbLqyhx5r1IsCRY
-SYKF4XurBEuClXqwKuONTZZgSbCSBYvFe/yqBEuClTRYtRIsCVY6wGISLAmWBEuCJcGSYEmwMhGs
-JRIslFM3phzrOsQ54keHCViz9VFiI11fGe901kiLlW6wwOBJh/zYW10EwwUs0A2wIA5Yn4AVEixL
-gAUZBRa+KcE662A5UeZ7xrITN3I6hydYEafD/yewnBKsswqW8xOA0jfRms/RItxM9oo0HMF6lB95
-qVO4c+PM5vKVKyRYaQeLWyYOVo0uIIK7Xa5vGosPDTew6GwEWF8Vp6MjTuKK4WRJsM4SWKYQrEkA
-ZKwuIYwESxcu2TS8YqyAVrrcAJNx3RaACUZc9f1S8oEyxjrbYNUA3GEE658nyAqXOodd8B4C1hf0
-vG/Tyq/i/25yiStl8D40MVbIi0K60Emzhm+vcK7xoo5e3CA84b0SrKEBy2m8GId/vxgIroY3WFt4
-wPXo5fxs7pVgnX2wTFdYQ66Qa+MlwxgsdIWbTFco3l5TALl11gJr49xJ+Zfem+nphgIRvF9EXUWA
-q3HxWwZYzmEIVmjwjl1BKrl1OYyzFliXGB3Xh1IOVs0NULppoGD54oK1ZlL+TcmBZYby+sO8a8g1
-ziUyWYmCNQlqBgpWd1ywHr08/xpnUnksI91AOQZ3gVii1RTI5z2UIFg3wNI0gvUFgM9sqrkHD6wu
-xWBR5kindhwQWNOUKl9MsP7VyB0klXkX6Z6QBOksc22CYH0ZBg4WKNXdMcH6ajBlO/AEKcdpFh3f
-lot0I8Ry8UB+ZWJg3aOnE6y6POCPqXlUh9tSDNZFUOz8NBS6BwoWKoytIFiI6t14oNclBlYqFAoW
-IZgEWKgwtoJgoXn5TE0BfDIxsFKhULC+gDSmEyxdXLimq9ENr1in67xTXhxYC6V1Bueom/iuOl5y
-t8UC69FR8DDyqn87CbDC2AqC9W90OP8Gk4YELPScyYIVxlYQrG9SBP5lXXcOAVho9wrTChZ6F714
-ZRhXSNbdBJzQVdyaBVzHp43VpS6xSaQDDYK1ln6xLaH2ZUBgBdkKgnURXEgxd27dkIB1t2sQYAXZ
-CoJ1Azm37+uwaUjA+ow7ra6Qmp74mOcMmq8vQK4bF9H5110CY92uFaDjXl+dh32PFWi9hdm6jbbW
-H6qJ6QrX8r1CDeLEhgQU0hIGW0GwdAKrJhTmszuCNBys2fUJnI4adjrEVhCsKwks/BU3DokrdKUZ
-LIxkddMerRFm+fuj4A4836uIIKIDadJu4ZcVgja5Tti5c4m90jgxlgnW7CAFSjKq6gMWWAOsS5M6
-neogWJcSWO7MBQvbiiei5xFBhm5Caua6XIaxKuC92eJNLmMtIiiM2uwEwJo1OLCmtVvVYiUFVmXn
-yLFYwiNehE5vBeUHuOaGgeW6h/dt9W8PEKw+MVZ3vwqNsYgq68RYfcCa60vgdCCcKtbHFQ5RjJV2
-sJaCkWnCBfeaYA8lHCye7dRhXogrnFwXoCxGr1AfVK/QoCpar7DYZRGwBhS8V3b27RXqQ9Yr7B+s
-jsGB9X262y9uDlxWh/ZlFrbUo6VLnSFg1WFHG/faMgrj9jV83GUgeI8Hlisv+TxWkKrwPBbEymPp
-ISOv+tOWgn43ASMZmTKwKjti5rH08/rJY9WJzO7Fse8uzU00uz0QsMpaB515N8YifttMUgHkhoIV
-SEKQC7oE9JB0Q1ywRJbiNtfAwZrW3h098/7pWJl33XTi/YPljH/bmSs/pWBVdsTIvFOO0ribHBus
-uov4uYWl6Psm5VMPVr3SOLgYy8CmeFPwRbEz3BWKtYWbAglS/SZX/2Ald69wWjhVCd4r1EOzpmkD
-S9cHDJajsiPuvUK4qr97hV8QRIVb6tSApccDq7Vv+biBBu/OkPkewQkf5n9ha8OWnVHmtwx6dEN3
-UqMbQsCqyZ90zyTRFf3ipPzZnGy0clvmTip9iC+izSO79uiVBcaYjnxw3TPpwltMdC9cEgusCAd0
-dkY3mPcIL9dzuSOnkxK3mddcWZA/9aEgWFvmFly4xEDm0UmznP3dhA7/7gv7lo9THN0jfTxWKFim
-uzScfDF/W7y42ozGzFvSfMyMGZ/R3gVmZGAZsG7g8z8CSUdxeDRmRozug3NNsIzzXcvBmgR5mwYJ
-VjkFh+0SrFCw9E/y4VaFaylDV8xpKtxIMR+a2Ro+k/DLGDg715iDSOGmGgzfruW4Xc9XWwesOqLl
-QmO83Fd1KN1Y8zli7Vu6fq9z4yQaJcPBwrdmbdp4ER9IQxHZeXWDBKuWdzuaRzpYgU4hgkPjBWqM
-7u73aaSMDvS83G9yjJwGTNfxZbrhLpb58CyA8wSb1gHLmEEIGh6p8xJ+fM4r521y1XzxXpcxNpnA
-cn6Cj17EUP9eAuu8/mOs/sBqNe58jHSwzE5hjRhz5TSGXrnoRxYjsraEgeUU/bxP0p9NZorB2MlS
-YGEwdZHRwXZHDrZaczkYYLmN+YRz4bw6BOvewYNlPiGvRLpC0xWGdf4IKmMudBhYhgpNyAywnBYE
-ixz4F3XI+3ZY3Qa6HUJpCAOsQCpvVF3MAcoDA8t8Ql7gaQgSrOgWKxwspyFzPoVVLdaauWvNVPZa
-d8g0588BxlhiXrRpscwzShFYtYG7Bi0SrABYGGNNDomx+oJ1XUjeMwSsqy0GlrsAxvKFbyFYGGNN
-EIHUBMMrfisQY10iDt1IfaYCrNbgXc5aCZYJFvX7Jm+iXmGhKxKsux+l0fPXO7d8EfKvDgPrX83O
-oqXSDYUr+QCUUS7R9auhwArByuWnhwwFe4Wue4KT7gcNFgu5f14uwTLBMge7FrrCweKBvlGhxZyo
-E7RSYvVtVoqxPgGBCTmBPNb15tLkAozTRR7LeCt3U8rAKguC1T5SwQreJKwxK0TiVT5J55lpfJvW
-GbUjt1yuz+aZd9BmG+8Gt3GtmaSXbnLpVgret+B55M9+SBzKlhsgV9xOoCNd6/qEPoGqSNaJt/RL
-HxKZ95SAVRvuCeVM6NRpZBe3NYMsn5xiL8FKJVgs1A9KsCRYKQMLg6xpXkWpl2BJsFIKVi09H09R
-CiRYEqyUgtVBw2YqFaVj2IJVKcFyWfXJFM2K4hmuYHWoEizLguUz0qPDEawyRYJlWbBoGkL38ASr
-Q1HaJFiWBavBvAs97MAqD/ZoJVjWA6tNUaqHJVgdNONLgmXdx8opijoswaJR+yDBsi5YZsJhmIHV
-wW8bSLCsC5bHmFMxzMCqCBv+KsGyHljtxpSK4QWWMFhh04wkWNYCywyyhhdYFUbZDAmWdcEygqxh
-BVZnn0lGEizLgYVBlne4gWUYLEWVYFkXrHaRyRpOYHUGRlVLsKwLFgZZZRYGa3x0722oNdZJLbMo
-WLMzC6wZ/VQtI7CWWBOs/HgGyxyw31cPuq0J1vjkwGq0KFjl8Q66mo9PzppnTbCmxzNYsW/qtFnU
-YuUkN26xc601waqPd9Beng/Kml1nRbBqlvWtsuapClTYj3lTp3OpNcGakRxY7BpLgrXCy/qN3rNi
-O4+hBGtljKExtXjQVY7Y0Xu3zZpgtSQJlm5JsK7qZPGj9woEy7vUimCNj8GN6BF2x45YKtxWBGui
-L0mwqjZaECx3fvyDBsoHZbHxddYDa2NF9ENuDYx8jTkOcK4FwaorYMlqqgXBWtEc/5gr6erPYo0r
-rAfW1JhGVunv2tfc1gNrbmfSYE2rsRxYbls/x1xL3cIsxmLxM3RgbVwW02CV9NuRutJyYLnLWfKa
-aDmwlnr7OeRmuqmDYLXMsxpYWuwIq/9rv95pNbBs3YMAq3ajxcByl/V3yHj9NxBYrHqltcC6qj2G
-7y5XKhJoCsVtLbDmtrPBSK+xFFju/i8Tnm/I4jOqaqwE1opt0Y+3Je6g5JC5bTluK4G1opENTjZL
-gTW1td8D7qZ8AweL5TutA9bKZTGvg3gTv0J7hqPd1gFrRfUguWL+8RYC65rdifgMDIUFWKxko1XA
-WrouNleJtlFnVG84JGDNa2CDls9mFbDcF7+SyAFTIivLzMQtsQRYdeO9MQNCpXIATbHWEmC5x7ey
-FMinbbQEWHVKYuFiuaIEwGItORYAa60tRpqqakBcoZr6HvzZB2tFTgdLjWonuoccLPe8GxPs3lYq
-SndW8LJYN945pGDVrc2KYa7aoCDOUJkY7vDGqe4hBcu9IquNpUydkyJGC5xtsNxLshLu3aIZ6MwK
-Pfgm29SN7qDG/9ydVm28JuTFivHj26JfEB0l2B1UBu5TOmpzZteEfMPE9J6Ne6or5MXS8eMPs5Sq
-Y5ntGudZPJ0lK4LLdUtslw4gaVKtKB1ZEUd/eOvCS01pl6ZZ+ebC3GVN7TFSn94CGidTltxd3I7D
-62688mydTsjnL/O2d7LUq+PAA5eetdaZdKG5dOMDLe0DyvHSPZ0sZmV5y8IrpkoNC9VbGqz2anSB
-NLSvoE021fBSg6K0WROs1uoCY6xoWUOHbCgJVirUVlUgLBWG7M2SquEoC8ZYLWXGuPYSpT4t8a/U
-WVCUXuHQqkpV+FOrobHdJ5tnWIPVaSGwqvjseaVaQjXchS3pswxYbdwH1sugKgNUEXqvcIjlAap7
-JW1VRqjEOmB5MFwvl1hliGiqujXAokExHtkgmQNWpTXA6lQKlGbZHpmijsCYdwsEe9WyPTJGLYrS
-aAmwOhKcJSE1PNRIjz2xAlhV5YpXNkfmqIrK21oALF/ch5hIDTuBqN1ghS6hjLAySD5RxmjoD6Q2
-8LhXqUyQl2J3K4BVEO/hOFLDTtV8YvHQg9WtqOWyNTJIojje0IPV2W8xNanhpLYy/pSmoQfLfFyU
-VMZ4whargFUrmyPTPKEFwGqjMl1SGdQnrLYGWL7WVpltyByVG7NAs+RPIZVa/yNuo0iwpFKpShG6
-JwCWP4FPC27TM9AD6Ym+m182UUp0tn9HNFhGTftIsDyqkQZXVPEknu3zdVWbsbnvIR+7vUjVFu4j
-Jj7MUkm054YiNX/ZUbFnma6WrO6740Gb+IpD+MkzdoiVG0rU/DtPibWAn3B6xCNRFmwIUbniNfy9
-HTOWnYqGz1Pmxo9NdzhmPE1LO8t0rWQ1geXfH/JDB/puqiGyLxum68Ze2BB2beGJnqSP2jRYfcEC
-83yA+mpd04FmOQAUHo3Y8KAOqmID/Q90zhqQVNY7Exy4tUYb36/hnhpcH/ndvVOAPyH1VTtk4/tf
-o1XT+WcVcupwUaPFkR4Emw3hAA7WBgf+MPjL5L7Ul6tDYGy8gFoL4BvM/4wDf13gv/5+vuj4Qdg+
-KhhCDhabezF2K7afTbRfMmpWppkDVfoBaw7kPO9jPXtzYHLEht+FcSeYbw5chsu7wNbWimIvQu4R
-5ltM53PcBt/zs72a43fh++2fAhysro/Aom6210bv/xhmdLL9NvWHjH0JinFRgx9KsMLAegbgEew8
-Hz4f7H3syU4wwPp/GjzP2FOQd5RdADf72c+0vLcYmwn3+tkL2qi3Qvdp5VoOE/zsTQ2ew720vD/j
-Vnl7mG8B3JZkBz+kMFB8sF7TdHGBYFP/lEKzqsDQ9NunP0BZC8jhkF0mVl4BtPLvNvt77ECFfhoN
-cRasY69WVf2ZsWMPVG1lbL4KYzlYf9X0dxgxtYx15ehv0Lm+3clYNizjX75MghUG1vlGY6O9vxr/
-q68KFs6cDo5csfH2+dl+P+tVoY3RP8b//tNGdCFev2QfPFBFrbihqlLEXm/qo470sGa+F277CrsL
-rsPVv9Wy30s21RAYABUfrP+A85iJzE18ZUSS/H9hHP49Bx7Y+WATIyyqxb7/bYTk7+fA91jvR8je
-fRZGY3ygaM+3cbBahUNshQlo3EazXdVbT9DrL0EpWjObtFjhYP0JHCeE2/sOjMKlSgiOCFFK9wVf
-4m/+Tw3eRouFru9Nsli/EJgsh2sZ+wrkveV/hlsoxmF7OriXDfagg/olo0U0XkmoUVEcLDZYjuoq
-VLWDwPo4PBIgaAKdQ8SQvJOj4BbGPrBxb134FvsYt1hdWfC4scHPwP4Gt3dPvyhsXpvfQOqvoNP5
-vgpj0XgVLnAAOOgsj+fAwqrpUCxjLHBU8YYAAus7kG108Dg2vC6xqbfDOUN6sk+xF2z6sgd53PSE
-2HUVNWDXGDj3Q53bPGGZToXu9Q4bA1TGvZeM18DVGlYhry9YATWQBdpqrBc+L0IY2tuPoMO0QfHu
-XWSW/gPGYn/uZQ0M/l6wwSLGY8J8HS41D4CDhTHYN/hFo7EDALk7WpaDg8h7ir46SoQ68sAKCFur
-IkiOg8CKzB6FgLUe4EcijgcYi+A8KN6rogb0v4DRP2QbUdocvqHQUxpZL7sgKimwOsLnhvYFS21t
-QbUqkWCN7vNRJ6eA/izFbIep8uwhjNNP5kBh1e06YGBF9vUZJI5fEyfRpuW+EwYWGjO0TWXAwdJ/
-YnrB+0Hb0XKfpv9UgmU0BJbt7hUAAAvPSURBVAfr9gTB6mH3YVfQTxfs2N0tMyHnRBhYgrdfi23f
-tI06Yu613gGz/ATW7mTBosA9dJRK/BjrioAr/HEg2gro2BSw/zokjZAFm9mhMQ5wzLoCAys8WrwK
-ik+ZfUhhukLAIuwge7M2Dl2h6udeMZcds+m/YRTbTZZghcZYQVf4N97TiwFWD4ZScIuf+of2oxyj
-O4KukDfgnzQ0WGKn++BfTK7uEnsZrrCLwq2BclUWMayuv+D9XAqH9jGMnh6J5KoIcvaF5dyA0qgH
-WtrYKP13wrpeb/wax5AhnvEKBYsda209cQBmoVfkYB1AsA6I9w6AJsEKBetvGFmh/3qeneERVEyL
-tRx0Ho+vMs2UzQze74LPi3hdJKy4J/ylsTty9XjIqn/mDDh471SVaeGPkowP1n4bueHP2m/ZzoPw
-sPhqCow10J9TcgsPmvTfdXqf9NNSDv7FaP0b5hUxEwp1yPf7Q8HyNnq4Kfsa682B33CjeC6F9PQ9
-f8SQXoIVmm6YQhmdX6jFe3X4AYsJ1nrIEx2+JwR+y9FMYdeKTNwF8HsRwY4FnduDDwJ9P1z5U9OI
-kVn7Hy17gLeC2qgGY/cAwMIj1Vef6L0PIz5+U6eq2hsSIy6sRlVRj1HfwY4tQPd1yIbo49LX0Bzl
-QC69X+2hcEr/DfYMbw6zWN+F/CNsly33HT++n7+H4U/2DdaVQ51LXFwowQoD600NFu5hL4MOY6nR
-G6qrooCFG43lv3n7u7nwtVNsu07x1AIoPYHNRR7wXR1uRotAeR/yliLqfTPX2KsNwy7HDnZwSqBT
-n3DCXVGmRVSC7wcsdp8GqupA+5m/2R+ax3rfZnRZcPNetK+qCqOPUOxNS6V+sj+GqsgRLqLrwnCG
-Aiw/kqerDpVHaYtpEQr93EbS92knJFjht3RetuMPw2/E3HkkIr8QeLnA/M3bKIClrSkWOa7TIv+h
-MZo/RVc/XeL/CdmnTUcYuLuznpobxp0a0KFWRnvQUVbfJJcBliJuQr/+WJGiPdAyBfLeCM1jeRVT
-9GpXmZK/mTqxPTunKzOa6Jo6x3y/qmuMMprWzFFs7xn5DhFi3We33WkEorumK8U7uP099mQRLp4a
-6VyxacGGEMmhY9vLsm0Ln7/LQb6wPHzKXIV4+RHzN8de+sFVevaM53lO9dgG3XYnub/1ikJ5nKcU
-Bd3ig+ZHBPYib7R3fnb+5u6B3IRudSjRxgAnPGxm552Jf5c/ua3kWJlE9PpMS111vHJslEc8yIF+
-UoNQgwLoBqOVYpRgSSWtRipIDC1R35NgSSWn7lpR5zpG5VgJllQyaq0UPbOYTw+RYEkNWG314sE0
-1XHm7UmwpAYkX3MlUkUpBk/c8ukSLKmE1d5YaT7ur7q/h0hKsKT6V2erp7qiIJATr27t/0m+Eiyp
-aGrxNjd7GmurqyrLSxziOX9Clc2JPUdZgiUVTaBECFeU17Yk/nBuCZZUNBWEMFVQUe1paRvgY0kl
-WFJRXSEfFt3W3pHsY24lWFJpkQRLSoIlJcGSkmBJSUmwpCRYUhIsKak0g9Vab0zwqq3nQ057vQ1b
-m96OuuvL9XySoZ+1NG7b7RcTO7z1TebErdcbt/WtAHAwuPJg49am4LQA39Z62RgRqq8X2UmP0SYH
-Guu3RS+q8O5WsQW2RQNuwtuCWiV2U9TWG2o3WlDs5e/11jeeSAdYEfMKt9tpTqFaGuW7juXwSYb+
-Y9NVAIe2B49rfx5ubeNzvHuX22i/8FqiYmXhW+Yi2AKVMReb8+6lAnKAGEgnSs3sn0KzCtXRf4iy
-5QLRav69OpWDKj7Rw96fSa0yms95vs/GV4btEVIq0r+3iO91BFvwNWpB5fG0g/WyBouamh+zw8V9
-djw5EzhYvRdA/jbPdBj9Hk1MLW3aYHM820OgFDY9aaPqWSFaD7lbPTMh30+Tox2rmxeICfWMV0KU
-YMUF630dxm1tbiwC+9tnIjdcr4lWw20ubmoEGOdH1OybPUU0PxV/dftWXDw3bHaduAuoQd4+KpqF
-exXBuNO0iC2oOX6fbrCuAF71+GUtL6KSKNuZBwKs12xU1eGkDb7HvstrxPyWivy9puHJsf+dsSh0
-n+M2eBZJ/Bh87wz7GE2O7rIZ5R0O2SRY/YD1HbATGb1TqDJfmF6badYg/Y5qP2HU8RtFk+vf1ahg
-5Bh4iTO3L5qlQ+P0Pzaaa/+uLW8PewJoavEGXmQvrWBlGzVmmprbOOWBQpFesD+icrA6W71i82qE
-ZR1dNlRR9Me8tB93f3OU0afRrimj/X8FlVzji8TUOXyDLF40mfk+kvusBCs+WBVGISNvM9XkDJ0J
-rUDpjaLV2luahZtrYzq0MlGN9C/aqKN+ciE/YF15Ctqt9/lf0lNU25YdNveiGqQPMdbzF9uod9IM
-1q2Qu/lEyBkEAuzdq98KK0iKJucniOE6sdkP0dT98LEi28I96PePU6mAn4H+UmjZUVxRfITt0kQN
-m8Xw+AEJVhSwOoNg/QrgzmAEHlq7YeHz4aUcXgC0WAvgslOiEsh/CiKpGhYV9HuWikKKNv1wlB60
-Yi9Q1eSP8zJGH+bk/SENYBlDvHipyIPZ6KJmrDNOyNPcGuamg2D1ziS/fgUPqdArbkbzS1Xewf4c
-vy7053gditc0HlL9GHLxstgAdkXTnhMBVilrk2BFActsCsJmOjaFtszoSHubPaFbhoJ1vIjs0MkF
-oCowDsF5DLJp9SpeknEBZG/XzKJF60OK6Z2cQnvpyVf0GxhY7OAC6rw5tOei7BoEqxej8F9TKObY
-wU4uB/SK2aA/z3z3UUjPus7BjyhGJ9h7Dsw4wfbmIEPYl8TvUeFOvx+tXf5pCVa/YPVu4D05ZXWU
-EhchYCFXo45QTw93duAl7g+UiqSm/ruOH2E4wt4xwcJ9yNWofURUiwBrd7rTDeiBPbdnO0B9Lg5Y
-vpmgPm3UKcSzKcLTGWUaLyqrvV8Dx0uUJTmETClU8IhgW3SKvf4R3OzkmFw8pXYJVnxXSL+z97E8
-ozRRTLAO6ZCNsfoHOmz2U52xl9BiKUGLRUXW7EdEzvG3WraZDTpWJKp+ptVihYPFj2Hv+dH6CSZY
-vinC5VFfcboyY88V8N/oFY2C7xR1vaoB/Jp/0LFVDu2BV7QJ9PiAf/A4fhzaKsgm4wVKhWQpTh5L
-6PBiyDsRGyzkitfvNAKr5RhYhcRYIq+jG2DdBXeYNOZBDjdeIsb6IMk674mDdWDVFNE92AW2mGCh
-FdVCe7JdOXazV9ibRVVuu8ag+R0d7Ge8CnMpjqezfR00AstQpWQpNliPlf1cOLB4VZOP6/QgGhao
-wb0Kctifgr1CwxUaddXGmHYJvee4I0aamnqFf7LZ09wrxL7encYXxrZYiyHXeKbPX8vz3qFIazLF
-6WOPUoVu9Q1ebnKHqPf+lRL6Owc7kGixqMb4rTCZdXhI9eBo9kqWYoM1nUprM+pOx7FYRfBR4Sd/
-oelHeMD/eQLo14E8lgjeeWb9bwG7NF2Un2QskMc6L915rPsBCmsbn5wOKsVIEU8F52Cd2asZ4X41
-FRDN3/aY3f4Sz7xrW58cBbPQQFEW9Ble1f5F0Fc3zOePnVhMi7drjh8ZaWQZvPcD1ssOyFm3rWG5
-g9d5jKjoJ8A6Qw9fEBX9eqfg1g0XYBx/hjLv6xpE5v1n1CP8iqhua1RTZmeeMffyknOZsW2VI/2Z
-994N2fxeofZ8RB4raLG+ZDoyDKr2Z9P9Px5v9S7HqEmddYr6HsXcNo0+KlbaZ5GB66EbWJD9tPlh
-Eqz+YqxdOvUKHaJXGL0G6cxgDdJj/F5hDm+39XRfgyruoyO8mSfvP3oq8CgU8iDB24YH6bah8jRL
-PVhtnkax0OjhSavelmZPc1vYKlONnhb+1xC96PV6dpsjFl5vbqL9Xvd46Kazr8lDHY4DYiXpIG4b
-NOroECVJkRe5R/QKvUabHPB6ml45FbbKkPHSbApPB/+pPa8YHu5gcxMfvOD1eGj31zweDNNagi1t
-iM9GbfE0p2V0g5QUk2BJSbCkJFhSUoPW/wfr5tj8wgE+HwAAAABJRU5ErkJggg==
diff --git a/Documentation/DocBook/media/selection.png.b64 b/Documentation/DocBook/media/selection.png.b64
deleted file mode 100644
index 416186558cb2..000000000000
--- a/Documentation/DocBook/media/selection.png.b64
+++ /dev/null
@@ -1,206 +0,0 @@
-iVBORw0KGgoAAAANSUhEUgAABIsAAAHpCAYAAAACi7yYAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A
-/wD/oL2nkwAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sLCBAiCLMGMtAAACAASURBVHja
-7d3rkds4FgZQaMohTBY7ObRCV+fgyWJy4P6wJavVIgmSAIjHOVWu3bElPkBSAj5dgpdpmqYAAAAA
-ACGEvzQBAAAAAHfCIgAAAAAehEUAAAAAPAiLAAAAAHgQFgEAAADwICwCAAAA4EFYBAAAAMDDD00A
-21wul9XXTNN0aHnP749Z39o2rK0jRzssLX/pvVve9+61S69Jdey2bn/sMTx6TAAA/cIW+oVb+2tb
-3p+izwioLIJsHYe9X+a979vae89ut6Pb1+txBwD0C3vZN0ERrFNZBAct/ZJxuVx2Vdg8v+/oLyEx
-69j7xbq2/1u2e0u75Th2Mevf8ytVzDkDAOgXjtYv3LquVP0nQRHEUVkEBTsJve/r0hfu2hdz7e0W
-27HQ4QAA9Avr7BcJiiCesAhO+GKK/YIt8SV+RscoNmippUPl1jIAQL/w3PUc7Y8JimAbYRGc9KVY
-Yu6b3OsYNUTRuQAA9AvL9AtT9LsERbCdOYsAX74ZOiVbO1M6LQCAfmH7/TzohcoiqOhLK+eXV4p1
-xP4y1krF0X1bn7dXBwIA0C+ss19oagAoR1gEJ4j9osv5iPq965imKUk59eidwNc/AIB+oX7h/HpK
-tzeMzm1oQJIv7Ra/eO/7sOWxtgAAtN0v1N+DdcIiyPQFlPP1JbZpTyehl19q1joQOhgAgH7hOf3C
-Pct9tz36c7DMbWhQwPMXUYkOQ6517P3Sj/216axJEdfWoyMBAOgXpukX5uqv7Xm/W9JgnsoiSGxr
-4FHiiyvlOu7v21pu/PqLzuuvOTHtlmIZW/bz+f1r6177ewBAv1C/8FwqjCCesAgSdwK2dAh63e+5
-fX8XuBxtt1SdkZhy6djt37vNOioAoF84Sr8wV39tzzIERvCd29Agg7knQ8T+unTk15mc64j5El17
-KsbRW75inrqR6glj79rELWsAgH5hmn7hmcckpt8HI7tMRjYAAAAA/KayCAAAAIAHYREAAAAAD8Ii
-AAAAAB6ERQAAAAA8CIsAAAAAeBAWAQAAAPAgLAIAAADgQVgEAAAAwIOwCAAAAIAHYREAAAAADz80
-AQAAqVwuF40AABWbpmn1NbvDIh0BAKDGzg3n0T8EgD7sCot0BAAAmDNNUwj6iwBQlS3fzIduQ7vd
-blobAMjuer1qhKZ6o4IiAGiZOYsAAMji0w+LAHC6jx0/unkaGgAAAAAPwiIAAAAAHoRFAAAAADwI
-iwAAAAB4EBYBAAAA8OBpaAAAFDf3ZJa5J6htef3za5eeyDb3urWnxsQuM/V7jmxX7Dr3HIMUbfj6
-+qXjurZ977Zja1vuaVOAnqgsAgCgqKWB+rt/2/r6s7Z/z3aesf0x+1fjdgFQjsoiALpyfRng3J5+
-Fb7/2+3NL8Xv/m1pWa/veX7t/XXXN4OtuWXs+fe59c/t45H2erd/Mdu/9XX0b63q5zWkWHr9/d8+
-rtfFapOY9byz9L7X5e7ZzqVKmT2VP3ts2cc966+1MmfuGKkkAvhFZREA3XgON94FNnMhzlJQNLes
-1/ffX/f62ue/fw1d3r3m9d/nlhu7/rX22rv8LW20d/voT8ztYbEBzNJrS4YMubbzzNCidLs+BzX3
-datsAjiXsAiALrwLfPYGE1uXtaVK5l2YNLes2OXurdI5svwtbaSKiFdbg5Cl18f821y1UupAZu92
-1njblwobgLG5DQ0AZqSofjkSnOSuvsmxf2fsB5SUMtT5vN2+LC82xNoziXaJNthyO11MBdHS7YUA
-5CUsAmAo91u97rdGLc1jdKQi5t08QiH8uSVrTcwcSkekWv7avuTeD1hzD2TuwcOWqqIS8wa9C01G
-nD/neV9fQzQAyhMWAUAma5NVA23KEeLMhUZHJ5g+e/9jXyscAqiLOYsA6MK7+XLW5gWK/fdnsYHP
-2uvWJtveu969ti5/bxsJzNgTDOx5JP2z1yAmNsC4T7j8+ifXdj6vs7VjlGsdQiSAc6gsAqAbz7eY
-Pf9dqmVtWd7cbWivE0LPbe/rv80tL1Vb7Vl+TBvl3g/a8nx70dIj7e9/v/b6mKer1bBfc9tZ65w8
-pdt1bh1zQdFaGwNw3GWapmnzmy6XQx1wAIAt7gHTjm4LJTuWv/uI084QYC482Pv6LfMSvXtc/Nag
-pNR+xb7+yLYeXX9MG669ZunYpN7mEeeJAsZx/4y7/P7vmP6U29AAAChq6yPm9z6S3n7t34/c648J
-Z97N49TKuQDQOpVFAED1VBY10rGMrCwCAMpRWQQAAADAIcIiAAAAAB48DQ0AADqSciJsAMYkLAIA
-gI4IgwA4SlgEAADAZh9/X9/+/ed/t8Ovf37t3PKWXje3rq3LTP2eI9sVs961969t59r2LbX16zJi
-t+Xzv1vyduE4YVHpD9SZsuDnX4COlA7HLD/Ferase2lZW7Zh6/a+vn6pDda27912rK0vVbsCAEB1
-45qFwf3H39dNIcm715fY/rWQKsV7Wj5me93Dn6VlxgZKnEdYVPLiXAgTPq7X6BBh7rWpln/kPWv7
-LigBAIDGxzUrVT+vocTS6+//thYs7A1plt73utw927kUeixt3xnhWEybzO13qe0VHtVDWFTq4nwK
-cmKDni2B0NLy7/82F/4srWdPYLRneVvWUWvgNNfuAjIAALoZ10TcHhYbwNz/LiYwStpvf3PbU47t
-zL0v727/WqvqijlmEEIIf2mCAh+oK0HR0UBhbflbbuVKsT1ry4vdhhRt/nm7PdZdYr0AADCCreHC
-0utj/m0u3EkdcuzdzntQ09MxS7Gud23iFrQ2qCwqeXFmrjBZWv7n7XZ6WFLDNgAAAGNLGeq8Vilt
-ndz53fKO7sMZc0DlPjaCpfKERTVfKBsmqy617hr2de21qeduAgAAzvM6YfKWypQS8wa9q6IpVT3z
-vPyYp4pBLGERu55i1sSXytO2q2oCAAAe44MMIc5caDQ3B1KSsVzF4dC7p6KthWgqiOohLKr5A2zj
-RNW511/LurY8NQ4AAEhv661OMY9RXxwDPAUP9/+OGjtsDB+ObufzOnMFOTHLnZvoWhhDLBNcl/xA
-PRherIUka7dfLS333Z/a9j/VOoRIAACwc0wy86SzL/3tmadvLU12/Pra2vZryz6V3OZ3f44eMwhB
-ZVGZi/jpFqi5qqAj1UJry495Gltupbdhbh1zQdFauwEAAL/72i+PkU/x+hoeRb93O/fMi1R6Iuet
-xyz1emNDQRNc10NYVOoieQl0jnoNN2KWXyoo2jMH0lnbfKTdzm5nAAA4bXyzMJnyXHVLC0FA7fsV
-cxveu7mCWjoG1EFYVPKDZ2GS5diAYW0ZtQYYJZ/gtrSuexs9h201txsAAFQ7vtkYMGx5/dHXHgk/
-atmvI+9PNYF0ioqvGqrG2O4yTdO0+U2XSwghhJuBNABQwPV3qL+j20LJjuXvPuL9KPnRBWCbtVvE
-hCrsOq9+96Muv/87pj+lsggAAKDFAeBLsCBIaJ9jSC2ERQAAAB0QHgGpCIuI++JZmZRbmTkAAFTW
-h98QHn1cPzQYFPR5+6x6+4RFRJ7IN40AAAA19dGfwp+Yx6HHPr4cQFgEAADQuNfwZy08inkEOzAu
-YREAAECjYiqKdvl50bg04Ujg+Xr7Ze5bw1q63VNYlPzgXzUCAP13zNyeDJB/bJErCAKKB0WtERYB
-AACcNWA9IRBy6xnDX3eColXCoowUbgLQk0kTAMQPRguFQItPOHuzDXuCoss/jieV9Ul+Hrg2TwqK
-WnvioLAIAABgy6CvgiBoz/apKGL4a1dQFE1YBAAA8DywK3hrWOoAJ1U1EXR3XQuKNhEWAQAAYwwW
-Gw6B9u6foAgERXsIiwAAgLYHgoUnia4tgBESwcL1UUlQ9Hn7bCo8EhYBAAB1DvJOenR860GLoAh+
-f4ZUFBS1RlgEAACUH8R5ZLx9hJyfMYKiQ4RFAABAuoGSEMj+w9mfQ4Kiw4RFAADA+iBICAS08Fkl
-KEpCWAQAACMPrMwLBPTyeSYoSkZYBAAAPQ6ahEDASJ95gqKkhEUAANDaoMgtYQB/PhMFRckJiwAA
-oJYBjxAIYNvnpqAoC2ERAADkHlQIgQDyf+4JipIRFgEAwN4Bg3mBAKogKEpLWAQAAK+DASEQQDME
-RekJiwAAGIpbwgD6ISjKQ1gEAEAXhEAAZPl+GSwoCkFYBABA7Z10IRAAZ30HDRgUhSAsAgDgrA64
-eYEAqPl7atCgKARhEQAAR/17CSGEMP186WSHa9HNEAIB70zTNMy+Xi4XBzyRkYOiEIRFAAAs+ff8
-gYcQCICSRg+KQhAWAQCMSQgE0J25KioVR/EERb8IiwAAenJGCPS/6ctgZHp0sG+OB0AFXkMk4dF7
-gqI/hEUAAC04qxLof5O2B6B7gqKvhEUAAGcSAgFQ2HOlkSojQdE7wiIAgFxOvCUMAFgnKHpPWAQA
-sJUQCIBOjFxlJCiaJywCALgTAgHAEARFy4RFAED/zAsEAKvuVUa9VxgJitYJi6DmD+uf7//+8s/6
-a969ds/yU6xn636uLWttu9e2dakdX5cRuy2Xf/K2ETBDCAQAbHBWUPS63toJi6BSS8HD9DM+eJh7
-barlH3nPme2y5h7+LC0zNlACdnaq/r5+v/Zzh0NCIADotsJIUBRPWAQ1fjg/BSKxQc+WQGhp+fd/
-mwtJltaTOzCKbZe5fSoV6giPYKXD9BQCFSMEAoCx+x+Cok2ERVCZtUBk6e9TLP/5dqrY8CfmFqy1
-7Xm+/evdenO3C5CgMyQEAoC+xibT1EV1kaBoO2ERVCp38LG0/CPhT+vt8q4dlsIrARVDdBTffB58
-hGv29X7+d3v8/+v1+ui0AgDEqiUo+rx9NhUeCYug48FcCOfPI7T3faXmQOrtWECJa/eo5xAIAKi8
-v9Dw/EU1BUWtERYByQaXe8OQ5/fVXNUEvVyruQiBAIBaCIqOERZBJ7ZOVJ17/bUParfs1+utaGu3
-oKkgIqczrpfHuf+l43NzMABgpD5IQ/MXCYqOExZBxQPCI6HDWoVOzCPhlwaNJQa8c3MFCWPo9Zov
-zbUEAPRGUJSGsAgqE/M0siOBydryY546VmKw+jpwzt0ukMtZlXOuBQAgeb+m8uoiQVE6wiKo0Gsw
-kmKwOjcvUEuTMadul63rjQ3STHA9SGdJCAQAUA1BUVrCIqjU0m1ksYPFtWWcFWrEPHZ+7rH1Z243
-43BLGADATD+pwuoiQVF6wiKoWMzgce01a4HMGQPZLWFXim3J3Y4G+w11boRAAABdERTlISwCoHlC
-IACAgn2v6dczUmurMBIUpSMsAqDejoh5gQAAiCAoSktYBBQf4BuIIwQCACAVQVF6wiLAgJyk3BIG
-AEApgqI8hEUARBECAQDwpX9Y4ZPRchgtKApBWATgS14IBAAAb40YFIUgLALolnmBAADI3ufsuLpo
-1KAoBGERQHtfyEIgAADIauSgKARhEUBV3BIGAEBzfdjOqotGD4pCEBYBlPkCFQIBAED1BEW/CIsA
-DhACAQCMpbYKmmmaqtmO1quLBEV/CIsA3n3ZmRcIAACGISj6SlgEDEUIBABAT16reWqpNGqJoOg7
-YRHQDbeEAQAAWwiK3hMWAdUTAgEAQGQ/9qnSqHSVUWvzFgmK5gmLgNMIgQAAgDMIipYJi4DkzAsE
-AADnu1f5mMfoK0HROmEREE0IBAAAtOysoOh1vbUTFgEhBLeEAQBAr0pWGNU8b5GgKJ6wCDonBAIA
-AEYnKNpGWASNEgIBAACb+vODzmEkKNpOWASVMS8QAABAGrUERZ+3z6bCI2ERFCIEAgAAanC5XLJW
-F9Uyb1FNQVFrhEWQ+oOxUCgkBAIAAHaPJzIHRmcTFB0jLILaPrSFQAAAALsJio4TFkEhQiAAAKCq
-MUqH1UWCojSERZD6A1coBAAAUJygKJ2/nE4AAABASqUrlgRFaaksghQfhD+1Af1QHQcAQEsERemp
-LAIAAIBB1fCI+yMERXkIiwAAAIDmCYrScRsaJOYWHlrkVkoAgIHHMB08FU1QlJbKIgAAAKBZgqL0
-hEUAAABAkwRFeQiLAAAAAGaMFhSFICwCAAAAeGvEoCgEYREAAADAN6MGRSEIiwAAAGB4l8sl+TJb
-fsLayEFRCCH8cEkAQJkOTo5OGAAAaY0eFIUgLAJgcCV/8VpalyAJAOB8gqJfhEUADKPmUuh32yZA
-AgAoR1D0h7CIrgduBlp9DqqdM4xyHj9vv3MTACAfQdFXwiKAmcH5K4P19o9hT/vlfAQASENQ9J2w
-iO4HjQZUGKyPeXxG2V/nIQCQyuVyGa5PJSh6T1iEgR0kOIcN2H2OOA8BANoiKJonLAIwYG+6vfne
-Ls5BAIBlgqJlf2kCeh/oGVRyxvntvNO22gkAoE6ConUqiwAyDthDUOWRsi1xDgIAHHFWUPS63tqp
-LAIoMGAXdhxrP5yDAABHCYriCYsYYuBnkIQBu/ZCmwIA4xIUbSMsAjhhwI42Ort9tTEAMApB0XbC
-IoYZABoY4Vpoo120jfMQACCVWoKi1ibRFhYBGKhrD+0OANAdQdF+wiKAkwfqBusCCwAA0hIUHSMs
-YqjBoAEp1Pe54LoEACAlQdFxP5xGAOebpilcLpfh9rkVKY6NUAwAID9BURrCIoBKjBQY1Rqc5Gz/
-uWULkQAA0hAUpSMsYriB4YgVHLR1rfR+ftb0eVBDW79ug/AIAGA7QVFawiJgqIH5O7UNznsOjGpo
-69rb9nn7BEcAAOsERekJixhuIN77YJxjg3OD9D4/C1q93gVHAADLBEV5CIsAKhyk9xZonhV09NSG
-giMAgGWConSERQCRg3QD9PaOmXMSAGAMgqJkHc0Qpin85ZQip5oHMgZZ7BmglwwhejlHS+/HSLeY
-lj4nAQBqJChK2nkPIQRhEW0NisAAvbXvmslxse8AgDFcNoKiPIRFGMhCxV9+LZ+jpYMitAMAQA6j
-BUUhCItoZKB4HwAZCGFwPt71v9b+joE2AQDa6sO1ZMSgKARhEUCSwTnaXfsAAPRl1KAoBGERmbSU
-SEvPcY62t72CkPh20lYAANuNHBSFICyikcGOQSKtnaejEhQ5PwEAWjd6UBSCsAjAgFwbD9N22g8A
-YJmg6BdhEcnlmNi6pW0G134egg7tCACQk6DoD2ERBjuAa157AgAMTVD0lbCIpFqu0FFdRM2D8NrP
-z5zbJ9jQrgBAe/25lvoagqLvhEU0O5Ax0IE+OxbU8zkLANA7QdF7wiIAqiXM0MYAALkIiuYJi0im
-xYmtc+4DBt+ue+0IAECdBEXLhEUYlAMAAAxstB/NBUXrhEUAVNepEAQDAJDDWUHR63prJyyiukHj
-1kFi6kGlW9HgXIIiAAD9uRwERfGERQAAAEDXBEXbCIs4rMdKHNVFcM41oqoIAMDYJzVB0XbCIqqy
-d6BogAkAAMCrWoKi1ibRFhYBsImqIgAA/boW+nSCov2ERVTz4VLbQNGtaAAAAG0SFB0jLKIbqhLA
-9QsAQJyefxwXFB0nLIJBP0BpSy1himsCAICaCYrSEBZRxaAx1UBYdQK9XRsAANBKf/Xs8ZigKB1h
-EQCnEvICAHCUoCgtYRG79Dyxdc59Bdc9AABn9ud67NMJitITFtEdVQoAAABjEBTl8cOpBZBOjl9q
-eg5AhbsAAG32UWvs1wmK0lFZxKkfNLk+UFIv1+03AAAA9RIUpaWyCCCRnkNFgSkAgD7cnLOrigRF
-6akswoDRvlMxt2kBAMA8QVEeKovodhB8uVwEPBTjXKvvMwAAQL9Uny6F0YKiEFQWAVT7hSxMAQCA
-c40YFIWgsoiTBsSlBsGpq4umaTKAJ9t1AQAALfVHex8bjRoUhaCyCKDKL+aavngFYgAAjGbkoCgE
-lUUAmwlPjlOhBwDoC+rP1Wr0oCgElUWc8IFY+kMl9fp8OYx9HZQ4/oIUAAA4h6DoF5VFACtKBoSC
-IgAAatdrn1VQ9IewiKID5V4+VEx07bz3pQsAAP0QFH0lLGIIqZ+KRl9qODcERQAAtDK26o2g6Dth
-EVCMwG6cL1wAAGiBoOg9E1xTbHB/9oDYRNfUSFAEAEAr/dbe+q6ConnCIoATv3BrJxQFAKBHgqJl
-bkMDKGz0aiLVVAAA+m5nEhStU1nErB6fguZWNM4+/wQlAABwnrOCotf11k5lEUBmAiIAAPRjzyco
-iqeyiLd6rCrKtT2qi5g7z1QSAQBAHQRF26gsAjhIIAQAgL5tvQRF26ksAjhomqYvfwAAgDrUEhS1
-Nom2yiLeDnxTqTWVvlwuBvUUuYZUHQEAUKve+6qCov2ERQAZCY4AAGihr9pbf1VQdIzb0Fj8sDjC
-wBi+X18q2gAAIC9B0XHCIoYlzOIsQiMAAGrup7bcVxUUpSEsAjjxyxgAAEhDUJSOsIgsA9dWqnZU
-F1HDdSc0AgBAP/UYQVFawiKASr6MAQCA7QRF6QmLACohMAIAoMY+as39VEFRHj+c+qQepLZ2a9fl
-ckm6/9M0ub2t4XPj7C9C5w8AAOwjKEpHWATw5F1QUzpAEhgBAFCbe5+41n6qoCgtt6ExdFVRru12
-O1FfLpfL40+L1yUAAPRMUJSesAhgg5LBkcAIAIDa1NZHFRTlISwC2KlEaCQwAgCAc40WFIUgLBqe
-W9Dybb9B/jgERgAAjDaOHKWPOmJQFIKwCCCJ0nMaAQAAeY0aFIUgLCLhQBnIdy2oLgIAoDY991FH
-DopCEBa5sMk60NfGzqPWz6cc++K6AACgZqMHRSEIiwCyUG0HAMAIevshUFD0i7DIBW1QnHl/VFHg
-fAIAgPoJiv744XQAyONyuQh3AIDmTdOkavqlj1fzsXKO7CMo+kplEUBjnQkBFAAApCMo+k5YNCC3
-oJXfL4N7AACgxDjm+U+r48ySBEXvCYsACnxp+zIGAIC6CIrmCYsGo6rovP0zuAfXAwD47qb0mKZk
-lVFL54mgaJkJrvGFAax2MlzvAAD0QlC0TmURQAGeIAIAwNn90RJVRrX/yHhWUPS63toJiwaiMsAx
-wPkEAACjEhTFExYBcAphFwDAOXJXGNXYzxMUbSMsAgAAALolKNpOWDQIv+A7Fpyv5XmLzLkEAOjH
-6p+2eL7UEhS1Nom2sAgAAADojqBoP2HRAPwC4JjgXLL9AAC8U+IJaWcQFB0jLAIAAAC6ISg6TlgE
-QBTzFgEA6OttcUYVuaAoDWFR59zi4diAawEAgBEIitIRFgEAABDFjzx9a7m6SFCUlrAIgFM7EAAA
-cISgKD1hUcek/o4RuBYAANiitR8HBUV5CIsAAACA5gmK0hEWdcqv9I4V5JLr1ybXAgDov+Kc2UtQ
-lJawCAAAAGiWoCi9H04rYpjU9iu/puAz4ZLlOpimyecNAECnfb0cBEV5qCzqkCDDMcNxBgAA0hgt
-KApBWEQEv/IDJQnVAACMA2sxYlAUgrDIIItqPjgdO1wHrgcAMO6AeowaFIUgLAJoml98AAAgvZGD
-ohCERRiIahuK6PXXN9VFAAD01rcbPSgKQVjk4sMxBNeENgYAIIQgKLoTFjFL5Qzgs6JvgiIAfI/A
-H4KiP4RFYJCMjpT2064AAEMTFH0lLNLpx7GkUTWFlbm3xXWhPQEAchEUfScsovpBKBiU+9wYrS21
-IwBAGYKi94RFOv5UOEB2TF2baNMcbaf9AICzxzo1ERTNExYB+OJuarsEHtoMAOAoQdEyYRHNDELB
-4NxniPbVVgD4nsH5cpSgaJ2wyMWGY4tjp507bR9tBADw1VlB0et6aycsAkg8QM+theq/UtsoENEm
-AACxBEXxhEU0NwgFA3SfJ+/aH+0AADBHULSNsMigAMeYho5Ta4Fu6cBo1GtGWAkAME9QtJ2wiGYH
-oWCA7rNl7rg4BwEACKGeoKi1SbSFRQ0PEHCsOW9wfsZxEehuP072DwD0Vxm3Dyoo2u+HUx+g/g5Q
-60HR5XI5pR3v6+whaNMRBwCIJyg6RlhENwMpMCCv/3PmrPZ9Xm9rn3fOSQCAbQRFxwmLDGZpYEA8
-TZPKiMHPKddHnvOwxrZ1nQAA7CcoSkNYBFCxHqv+agiM7l6344z2Fg4B0INeftyk7XNFUJSOsAgf
-6uDaPGXfagxJ5rYpxbEQCgEA5CMoSktY1BiDjXEHwn6tGe8ccp347AUAYJ2gKL2/nFYGpIDr8sx9
-9TkEAMBegqI8hEUN8cu2Ab9zwHljv9H2AADvCYrSERYBGLTbf20OANA0QVFa5iwySABci1W1hQo6
-5xwAwBaCovRUFjXC4MmAzLngHBmpTbSLcw4AIIagKA+VRQAG7FW3kYDUOQcAcKbRgqIQVBY1IcdA
-yaDBOcF5A3bXn88r5xwAQBtGDIpCUFkERQZqwh0M1tO0n2vJOQcAUMqoQVEIwiIAA/YG21No5JwD
-AMhp5KAoBLehVc8taAZvJc8N0h1vt/6UaWO0CQB9j13gDKMHRSGoLAJINlDn3HYfsYPqvAMASEtQ
-9IuwyMACcB11dVxGCI2cgwAA6QmK/hAWVUwZZ3+Du9THdJomg0aDcRaOXS+fo85HAIC8BEVfCYsM
-DnBMnX8Mc821FB65BgFokR8zaZGg6DthEaT+gvypDaBW7zqvNQRIOtUAAOcQFL0nLAJgaEtBTcog
-SSAEAFAXQdE8YREAzBDwAAD0SVC0TFgEKQaU//z637lb0O7/DgAAwLkEReuERVBAzDxGAiUAAIC8
-zgqKXtdbO2ERVGItUBImAQDQRL/WE9G6O569EBTFExZBQnOBToonpKlOAgAA2EdQtI2wCAqICXEE
-SgAAAOkJirYTFkEl1kKcFGFS7HIESgAAHOpzuhWNStQSFH3ePpsKj4RF0IhS1UkxyxEmAQAAtasp
-KGqNsAg64nY3AACg6jFLoYozQdExwiIY7cPZ7W4AAEDHBEXHCYuAL2q63S12ewAAgPSmaWpumwVF
-aQiLgM3MnwQAANRGUJSOsAjIwvxJAABj80Q0ShIUpSUsAk5j/iQA8i3Z/QAADThJREFUAOAoQVF6
-wiKgWm53AwAAlgiK8hAWAU1zuxsAABCCoCglYRHQPYESAAD0TVCUlrAIIJg/CQAAWiUoSk9YBBDB
-/EkAADv6NZ6IxnM/NsO5ICjKQ1gEkOrLz+1uAADQndGCohCERQBFCZQAAGjBNE0aIYwZFIUgLAKo
-jvmTAADgfKMGRSEIiwCaY/4kAKAl5i1q85iNbuSgKARhEUCX3O4GAAD7jB4UhSAsAhiW290AACjW
-92ykukxQ9IuwCID3X+gV3e4Wuz0AALCXoOgPYREAu5k/CQCgL6POVyQo+kpYBEBW5k8CAKBmgqLv
-hEUAnM78SQDQN09Ea+c4jUZQ9J6wCIDqmT8JAIDUBEXzhEUAdMH8SQAAB/o3g1UVCYqWCYsAGIb5
-kwAAEBStExYBwBPzJwEAI1FR9HnKemsnLAKADdzuBgDQJkFRPGERACTmdjcAePO95YloVR6TIn2j
-Co67oGgbYREAnECgBABQhqBoO2ERAFTK/EkAQA4jzVNUS1D0eftsKjwSFgFAo86cP+kjXL92gP67
-OSAAQFVqCopaIywCgI6VCpQ+/r6uvkagBIB5i85t+1P6Iicdb0HRMcIiABhcqdvdBEoAQAmCouOE
-RQDAonuYNH3p/Ny+do4igqCoTtbMch6B1b+XEP43OSgAEOHsuYnOqCoSFKUhLAIADoupCEoVKIV/
-VzqewiQAGJKgKB1hEQBQRLFA6d+IXzEFSgB0aKSnnH3rQwiKkhIWAQDVmAuUrtfrr05wovmTBEoA
-0A9BUXrCIgCgHTEBzr+J5kcQKAGEEH7NO5OyYqX1J6KNXL2z9bwpQVCUh7AIAOhLTYGSMAkAihEU
-pSMsAgDGUypQUp0EwIDOqBwTFKUlLAIAeGctxHG7GwBUQVCUnrAIAGAPt7sBwDelq4oERXkIiwAA
-cnG7G9BRAGCSa2LOkx6NFhSFICwCADiXQAkAqjViUBSCsAgAoH7mTwKgcj1WFY0aFIUgLAIAaF8l
-8ydNP0O4/ONwANC+kYOiEIRFAABjKBQoTT+fOtrhGvWez/9ujg80wLxFLJ0bPRk9KApBWAQAwF2p
-291eO+V/X1dfI1ACoARB0S/CIgAA4qyESZfL5UtlUdLOu0AJoEo9VRUJiv4QFgEAkG7Q8E8I06OT
-fYvrnEcEQSmWI0wCYPY7RFD0hbAIAIBTxYQ4KQIl1UkA6ago6puwCACA6q2FOKWqk2K2BYB2CIre
-ExYBANC8UtVJscsRKNErT0Tjfh70QFA0T1gEAMAQagqUhEkA5xIULRMWAQDAfbBg/iSAWSqKxiEs
-AgCADcyfBNCus4Ki1/XWTlgEAAAJud0NtjFvUf1UFKVdbwuERQAAUJjb3QDKEhRtIywCAIAKCZSo
-VeonolH3se6BoGg7YREAADTK/EkAK59flQRFn7fPpsIjYREAAHTK/EnAXj1UFdUUFLVGWAQAAANz
-uxvQI0HRMcIiAABgkUCJV6nnLfJEtHqOaw8ERccJiwAAgMPMnwTUQFCUhrAIAADIzvxJUKeeKroE
-RekIiwAAgCq43S3xAPZpPwVk9E5QlJawCAAAaEYNt7u1GLx8/H0VGNHtvFCCovSERQAAQDdKVCe1
-WpkkMKJHgqI8hEUAAMBQSlQn1TBv0ud/t2/bkTIw8kS0Oo3choKidIRFAAAAzwO/CsKkmO2I3Zec
-gRFUc90KipISFgEAAGwZlJ44b9KekCdnYNRCFYtqpQGuSUFRcsIiAACAlAPXjPMm7b29TYUR3V5v
-gqIshEUAAAClB7iZAqWt74kJjKafjhdjGy0oCkFYBAAAUKV3IU6qW9y+L3PS4PDu+hgwKApBWAQA
-ANCMUvMlAeMGRSEIiwAAALqR6va2PXMZnTWwtl7r7Wm9tRAWAQAADCBn1ZEgwXqtty/Coozc9QsA
-AJwt5glqHwb01mu9p663NsIiAACAzsQERAb01mu9day3RsIiAACATpQKiUYc0Fuv9Y5EWJTY5+2m
-EQAAgHrGKAkDolEH9NZrvaMRFgEAAHQoR0g04oDeeq13RMIiAACATuQKiEYd0Fuv9Y7qL00AAACA
-Ab31Wi93wiIAAAAM6K3XenkQFgEAAGBAb73WW3C9tRMWAQAAYEBvvdZbaL0tEBYBAABgQG+91ltg
-va0QFgEAAGBAb73Wm3m9LREWAQAAMEuQYL3W2856UxEWAQAA8JYBvfVabzvrTekyTdO0+U2XSwgh
-hNvt5tMTAMjuer2GEELY0W2hZMfydx9xenSO9RWhFS3fLgMtKhkgffzuR11+/3dMf0plEQAAAAAP
-wiIAAAAAHn5oAgAAgLG1OKcKkI/KIgAAAAAehEUAAAAAPAiLAAAAAHgQFgEAAADwYIJrAAAAivq4
-frz9+7mJtre8/vm1SxN3z71ubl1bl5n6PUe2K3adW4/DWvsfPb5737PlmJrc/T2VRQAAABSzNHB/
-929bX3/W9u/ZzjO2/+gxOrrcrcve856alt8qlUUAAAAUsVb18zpoX3r9/d8+rh+L1Sdbq19itu91
-uXu28/73qapz9tiyjyWWneo9Z+xvb1QWAQAAkF3M7WGxAczSa3Pac9vbnu08M7RYu+3r8/b5eM3W
-dj/aFjmO8xnnUQuERQAAABSzNQhZen3Mv81VK6UOZPZu52i3Qe1p99zhmYqi79yGBgAAABFShjqf
-t88vy4sNsfZMon10H9fmYzozbMndHqMSFgEAANCleyBzDzS2VBWVmDfoXfVTrsqnFPv4/HevYRd9
-ERYBAABApBwhzlxodHRC59T7WGM4pIIoD2ERAAAAxWy9bWntaWdrnquL7v8dY2sIcXQ7n9d55oTd
-e7Z9yzHds2+520OF1HcmuAYAACC7mKdOzT1ZbG0enVqeHrZlO1sLKO5PQXv9s8WeY5b7ONdyHtVG
-ZREAAABFPM9zs6UqaOn1MQP8Ek/T2rOde+ZFamVC55T7lqo9SsxD1QuVRQAAABSz9RHzex9Jb7+O
-i7l1b8utc3uqkfa8p6blt+oyTdO0+U2XSwghhNvtpgUBgOyu12sIIYQd3RZKdix/9xGnRwdcXxEA
-zvbxux91+f3fMf0plUUAAAAAPJizCACA09yrxl7NVbBvef3za5cq4udeN7eurctM/Z4j2xW7ztT7
-eH/t2nGda//YZS7tz1q77DlmAL1SWQQAwCmWBvbv/m3r68/a/j3becb2x+5jDccixTLn9qXm9oc9
-Pq4fi38gRrHKopikvvQvG3vWs+fLxS8yfpEBAOb7DDH9taXX3//ter0u9pP29AvXtu91uXu2c6mP
-d6RftsWWdR89FiXsOWZ7zw+ojcmaSaFIZVGqXx5S/nqzd3v37r9fZAAA1sOGd3+/9votPz6msue2
-tz3bWWvgcsaxOLq81tof4EzZK4u2/mq05XVry1/7ZWPLLw4pvlBTbXcNHQS/yAAAOfoae19/u90W
-K5zvP3jN9V9S9lf2budaFXlpe6uacrRnquW11P4AZ8paWbT1V6PUy6/h1wO/yPjCBQD6kzNcWqrk
-fve61z9792duOTX05e7bkONHyL3tD9CzIreh5f6CWftlo9aORMntzn1Puy9XAKBmr2HDliqSEkHK
-7XYTWpx8fmh/gD9+1LhRZ06SfOQLodQEhEe+BN+VYKdc9mtbqCoCAHqVo5/zroJmy5QKqfclV9+x
-tr7snvYH6NmPkXe+9nCn1Q6T0AgAiO2LbekjrD3tLKav8lwtErvuPU/KPbKdc/2qVo5diW0+crtd
-D+0PkNtfNW7UvQz0tRz0zKdb7Nnu5+2v5YumxPbMlfECALz2tbY+DGTtCbO1PBxky3a21E86eiy2
-PiE4VT+9l/YHKKVIZdHR0s21JyDs/WWjhvmM/CIDAIzouX+3pSpo6fUxfbsSc2nu2c49fdaUUzds
-DWy27mOq45dif1K1P0DPslYWbf3VKPXya3uKQ6rt9osMANCDrQ/7qPmhJr3u17uK8b3bnGo/j94F
-0Op5BVDSZZqmafObLpdNH55rQcJrBcrWx83HLv/19ak+/Pc+Qn7rdqfc19flbA1+UuwLAGz9rt3R
-baFkx/J3H/F+lD59/wPA6T5+96Muv/87pj9VZM6iFGn93mXU8uQGv8gAAAAALShSWQQAcITKokY6
-liqLAKA6eyqLfmg2AADoj2kCANhLWAQAAB0SBgGwl7BohV9kAAAAgJEIi1YIgwAAAICRCIsAAMji
-Y6VCGwCo01+aAAAAAIA7lUUAACR10QQA0PZ3+TRN0+Y3XXQBAIDydnRbKNmx1EcEgC76UyqLAAAo
-1vkEAOq3KyzSEQAAAADokwmuAQAAAHgQFgEAAADwICwCAAAA4EFYBAAAAMCDsAgAAACAB2ERAAAA
-AA/CIgAAAAAehEUAAAAAPAiLAAAAAHgQFgEAAADwICwCAAAA4EFYBAAAAMCDsAgAAACAB2ERAAAA
-AA/CIgAAAAAehEUAAAAAPAiLAAAAAHgQFgEAAADwICwCAAAA4EFYBAAAAMCDsAgAAACAB2ERAAAA
-AA/CIgAAAAAe/g/10lQlA3JSSwAAAABJRU5ErkJggg==
diff --git a/Documentation/DocBook/media/typical_media_device.svg b/Documentation/DocBook/media/typical_media_device.svg
deleted file mode 100644
index f0c82f72c4b6..000000000000
--- a/Documentation/DocBook/media/typical_media_device.svg
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg stroke-linejoin="round" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://www.w3.org/2000/svg" clip-path="url(#a)" xml:space="preserve" fill-rule="evenodd" height="178.78mm" viewBox="0 0 24285.662 17877.829" width="251.99mm" version="1.2" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" preserveAspectRatio="xMidYMid" stroke-width="28.222"><defs><clipPath id="a" clipPathUnits="userSpaceOnUse"><rect y="0" x="0" width="28000" height="21000"/></clipPath></defs><g transform="matrix(1.004 0 0 1 -2185.6 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#fcf" d="m12231 4800c-516 0-1031 515-1031 1031v4124c0 516 515 1032 1031 1032h8538c516 0 1032-516 1032-1032v-4124c0-516-516-1031-1032-1031h-8538z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ffc" d="m3595 15607c-293 0-585 292-585 585v2340c0 293 292 586 585 586h3275c293 0 586-293 586-586v-2340c0-293-293-585-586-585h-3275z"/></g><g transform="translate(-2197.3 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#e6e6e6" d="m2663 2186c-461 0-922 461-922 922v11169c0 461 461 923 922 923h3692c461 0 922-462 922-923v-11169c0-461-461-922-922-922h-3692z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4461 8602h-2260v-1086h4520v1086h-2260z"/><path fill="none" d="m4461 8602h-2260v-1086h4520v1086h-2260z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="8275" x="2579" class="TextPosition"><tspan fill="#000000">Audio decoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4461 11772h-2260v-1270h4520v1270h-2260z"/><path fill="none" d="m4461 11772h-2260v-1270h4520v1270h-2260z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="11353" x="2617" class="TextPosition"><tspan fill="#000000">Video decoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4453 10217h-2269v-1224h4537v1224h-2268z"/><path fill="none" d="m4453 10217h-2269v-1224h4537v1224h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="9821" x="2571" class="TextPosition"><tspan fill="#000000">Audio encoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.RectangleShape"><path fill="#cfc" d="m15711 12832h-3810v-1281h7620v1281h-3810z"/><path fill="none" d="m15711 12832h-3810v-1281h7620v1281h-3810z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="12407" x="12377" class="TextPosition"><tspan fill="#000000">Button Key/IR input logic</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2411.8)" class="com.sun.star.drawing.RectangleShape"><path fill="#cfe7f5" d="m14169 14572h-2268v-1412h4536v1412h-2268z"/><path fill="none" d="m14169 14572h-2268v-1412h4536v1412h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14082" x="12882" class="TextPosition"><tspan fill="#000000">EEPROM</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#fc9" d="m5140 17662h-1563v-1715h3126v1715h-1563z"/><path fill="none" d="m5140 17662h-1563v-1715h3126v1715h-1563z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="17020" x="4276" class="TextPosition"><tspan fill="#000000">Sensor</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 8030 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 8030 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 9612 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 9612 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6721 11100 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m6721 11100 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2411.8)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 13854 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 13854 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 12163 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 12163 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 17158 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z"/><path fill="none" d="m9962 17158 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z" stroke="#3465af"/></g><g transform="matrix(0 .83339 -1.0005 0 30268 -5276.3)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m23229 12779 1009-978 1009 978h-505v2959h505l-1009 979-1009-979h504v-2959h-504z"/><path fill="none" d="m23229 12779 1009-978 1009 978h-505v2959h505l-1009 979-1009-979h504v-2959h-504z" stroke="#3465af"/></g><g transform="translate(-9973.6 -666.6)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="706px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15832" x="24341" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)"><tspan fill="#000000">System Bus</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m13151 9262h-1250v-875h2499v875h-1249z"/><path fill="none" d="m13151 9262h-1250v-875h2499v875h-1249z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="9040" x="12215" class="TextPosition"><tspan fill="#000000">Demux</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9996 8765 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z"/><path fill="none" d="m9996 8765 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9996 7378 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z"/><path fill="none" d="m9996 7378 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m16322 7992h-4421v-1270h8841v1270h-4420z"/><path fill="none" d="m16322 7992h-4421v-1270h8841v1270h-4420z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="7573" x="12786" class="TextPosition"><tspan fill="#000000">Conditional Access Module</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4445 13287h-2269v-1224h4537v1224h-2268z"/><path fill="none" d="m4445 13287h-2269v-1224h4537v1224h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="12891" x="2601" class="TextPosition"><tspan fill="#000000">Video encoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6721 12634 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m6721 12634 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m20791 7545 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m20791 7545 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2028 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14478" x="1990" class="TextPosition"><tspan fill="#000000">Radio / Analog TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="700" class="TextParagraph"><tspan y="10724" x="14956" class="TextPosition"><tspan fill="#000000">Digital TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-8970.5 -1395.8)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="494px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="19167" x="14724" class="TextPosition"><tspan fill="#000000">PS.: picture is not complete: other blocks may be present</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="18561" x="4199" class="TextPosition"><tspan fill="#000000">Webcam</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.RectangleShape"><path fill="#f90" d="m14552 16372h-2650v-1412h5299v1412h-2649z"/><path fill="none" d="m14552 16372h-2650v-1412h5299v1412h-2649z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15882" x="12265" class="TextPosition"><tspan fill="#000000">Processing blocks</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 15654 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 15654 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6702 16954 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z"/><path fill="none" d="m6702 16954 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z" stroke="#3465af"/></g><g transform="translate(-2479.5 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="8792" x="22850" class="TextPosition"><tspan fill="#000000">Smartcard</tspan></tspan></tspan></text>
-</g><g transform="matrix(1.0048 0 0 1 -2207.4 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#fcf" d="m2766 2600c-333 0-666 333-666 666v2668c0 333 333 666 666 666h18368c333 0 667-333 667-666v-2668c0-333-334-666-667-666h-18368z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m5121 5155h-1614v-1816h3227v1816h-1613z"/><path fill="none" d="m5121 5155h-1614v-1816h3227v1816h-1613z" stroke="#3465af"/><text font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextShape"><tspan class="TextParagraph"><tspan y="4111" x="4374" class="TextPosition"><tspan fill="#000000">Tuner</tspan></tspan></tspan><tspan class="TextParagraph"><tspan y="4814" x="4151" class="TextPosition"><tspan fill="#000000">FM/TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ff8080" d="m2902 3702c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z"/><path fill="none" d="m2902 3702c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z" stroke="#3465af"/><path fill="#ffb3b3" d="m2902 3702c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z"/><path fill="none" d="m2902 3702c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ff8080" d="m2903 4267c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z"/><path fill="none" d="m2903 4267c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z" stroke="#3465af"/><path fill="#ffb3b3" d="m2903 4267c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z"/><path fill="none" d="m2903 4267c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 4196 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 4196 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9979 4150 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z"/><path fill="none" d="m9979 4150 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m16500 6189h-4500v-1389h9e3v1389h-4500z"/><path fill="none" d="m16500 6189h-4500v-1389h9e3v1389h-4500z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="5710" x="12051" class="TextPosition"><tspan fill="#000000">Satellite Equipment Control (SEC)</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#cff" d="m13400 4600h-1400v-1e3h2800v1e3h-1400z"/><path fill="none" d="m13400 4600h-1400v-1e3h2800v1e3h-1400z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="4316" x="12465" class="TextPosition"><tspan fill="#000000">Demod</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9979 5451 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z"/><path fill="none" d="m9979 5451 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" stroke="#3465af"/></g><path fill="#ff9" d="m7855.1 9099v7302h-1270v-14605h1270v7303z"/><path fill="none" d="m7855.1 9099v7302h-1270v-14605h1270v7303z" stroke="#3465af"/><text y="-6640.4663" x="-20770.572" transform="rotate(-90)" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="7409.5396" x="-11193.634" class="TextPosition" transform="matrix(0,-1,1,0,-4473,23627)"><tspan fill="#000000">I2C Bus (control bus)</tspan></tspan></tspan></text>
-<g transform="translate(-2197.3 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="3278" x="9391" class="TextPosition"><tspan fill="#000000">Digital TV Frontend</tspan></tspan></tspan></text>
-</g><g transform="matrix(1.015 0 0 .99994 -2233.3 -2185.7)" class="com.sun.star.drawing.CustomShape"><g stroke="#3465af" fill="none"><path d="m3e3 2800c-18 0-35 1-53 3"/><path d="m2915 2808c-17 3-35 7-52 12"/><path d="m2832 2830c-16 6-33 12-49 20"/><path d="m2754 2864c-15 8-31 17-46 27"/><path d="m2681 2909c-14 10-28 21-42 32"/><path d="m2614 2962c-13 12-26 24-38 37"/><path d="m2554 3023c-11 13-22 27-33 41"/><path d="m2502 3091c-10 14-19 29-28 45"/><path d="m2459 3164c-8 16-15 32-22 49"/><path d="m2426 3243c-5 17-10 34-14 51"/><path d="m2406 3326c-3 18-5 35-6 53"/><path d="m2400 3411v53"/><path d="m2400 3497v53"/><path d="m2400 3582v53"/><path d="m2400 3668v53"/><path d="m2400 3753v53"/><path d="m2400 3839v53"/><path d="m2400 3924v53"/><path d="m2400 4009v54"/><path d="m2400 4095v53"/><path d="m2400 4180v53"/><path d="m2400 4266v53"/><path d="m2400 4351v53"/><path d="m2400 4437v53"/><path d="m2400 4522v53"/><path d="m2400 4607v54"/><path d="m2400 4693v53"/><path d="m2400 4778v53"/><path d="m2400 4864v53"/><path d="m2400 4949v53"/><path d="m2400 5035v53"/><path d="m2400 5120v53"/><path d="m2400 5205v54"/><path d="m2400 5291v53"/><path d="m2400 5376v53"/><path d="m2400 5462v53"/><path d="m2400 5547v53"/><path d="m2400 5633v53"/><path d="m2400 5718v53"/><path d="m2400 5803c0 18 1 36 3 53"/><path d="m2408 5888c4 18 8 35 13 52"/><path d="m2431 5971c6 16 13 33 20 49"/><path d="m2466 6049c8 15 17 31 27 46"/><path d="m2511 6122c10 14 21 28 32 42"/><path d="m2564 6188c12 13 25 26 38 38"/><path d="m2626 6248c13 11 27 23 41 33"/><path d="m2694 6300c14 10 29 19 45 27"/><path d="m2768 6343c15 7 32 15 48 21"/><path d="m2847 6375c17 5 34 10 51 14"/><path d="m2930 6395c17 2 35 4 53 5"/><path d="m3015 6400h53"/><path d="m3100 6400h53"/><path d="m3186 6400h53"/><path d="m3271 6400h53"/><path d="m3357 6400h53"/><path d="m3442 6400h53"/><path d="m3527 6400h54"/><path d="m3613 6400h53"/><path d="m3698 6400h53"/><path d="m3784 6400h53"/><path d="m3869 6400h53"/><path d="m3955 6400h53"/><path d="m4040 6400h53"/><path d="m4125 6400h54"/><path d="m4211 6400h53"/><path d="m4296 6400h53"/><path d="m4382 6400h53"/><path d="m4467 6400h53"/><path d="m4553 6400h53"/><path d="m4638 6400h53"/><path d="m4723 6400h54"/><path d="m4809 6400h53"/><path d="m4894 6400h53"/><path d="m4980 6400h53"/><path d="m5065 6400h53"/><path d="m5151 6400h53"/><path d="m5236 6400h53"/><path d="m5322 6400h53"/><path d="m5407 6400h53"/><path d="m5492 6400h53"/><path d="m5578 6400h53"/><path d="m5663 6400h53"/><path d="m5749 6400h53"/><path d="m5834 6400h53"/><path d="m5920 6400h53"/><path d="m6005 6400h53"/><path d="m6090 6400h53"/><path d="m6176 6400h53"/><path d="m6261 6400h53"/><path d="m6347 6400h53"/><path d="m6432 6400h53"/><path d="m6518 6400h53"/><path d="m6603 6400h53"/><path d="m6688 6400h54"/><path d="m6774 6400h53"/><path d="m6859 6400h53"/><path d="m6945 6400h53"/><path d="m7030 6400h53"/><path d="m7116 6400h53"/><path d="m7201 6400h53"/><path d="m7286 6400h54"/><path d="m7372 6400h53"/><path d="m7457 6400h53"/><path d="m7543 6400h53"/><path d="m7628 6400h53"/><path d="m7714 6400h53"/><path d="m7799 6400h53"/><path d="m7884 6400h54"/><path d="m7970 6400h53"/><path d="m8055 6400h53"/><path d="m8141 6400h53"/><path d="m8226 6400h53"/><path d="m8312 6400h53"/><path d="m8397 6400h53"/><path d="m8482 6400h54"/><path d="m8568 6400h53"/><path d="m8653 6400h53"/><path d="m8739 6400h53"/><path d="m8824 6400h53"/><path d="m8910 6400h53"/><path d="m8995 6400h53"/><path d="m9081 6400h53"/><path d="m9166 6400h53"/><path d="m9251 6400h53"/><path d="m9337 6400h53"/><path d="m9422 6400h53"/><path d="m9508 6400h53"/><path d="m9593 6400h53"/><path d="m9679 6400h53"/><path d="m9764 6400h53"/><path d="m9849 6400h53"/><path d="m9935 6400h53"/><path d="m10020 6400h53"/><path d="m10106 6400h53"/><path d="m10191 6400h53"/><path d="m10277 6400h53"/><path d="m10362 6400h53"/><path d="m10447 6400h53"/><path d="m10533 6400h53"/><path d="m10618 6400h53"/><path d="m10704 6400h53"/><path d="m10789 6400h53"/><path d="m10875 6400h53"/><path d="m10960 6400h53"/><path d="m11045 6400h54"/><path d="m11131 6400h53"/><path d="m11216 6400h53"/><path d="m11302 6400h53"/><path d="m11387 6400h53"/><path d="m11473 6400h53"/><path d="m11558 6400h53"/><path d="m11643 6400h54"/><path d="m11729 6400h53"/><path d="m11814 6400h53"/><path d="m11900 6400h53"/><path d="m11985 6400h53"/><path d="m12071 6400h53"/><path d="m12156 6400h53"/><path d="m12241 6400h54"/><path d="m12327 6400h53"/><path d="m12412 6400h53"/><path d="m12498 6400h53"/><path d="m12583 6400h53"/><path d="m12669 6400h53"/><path d="m12754 6400h53"/><path d="m12839 6400h54"/><path d="m12925 6400h53"/><path d="m13010 6400h53"/><path d="m13096 6400h53"/><path d="m13181 6400h53"/><path d="m13267 6400h53"/><path d="m13352 6400h53"/><path d="m13438 6400h53"/><path d="m13523 6400h53"/><path d="m13608 6400h53"/><path d="m13694 6400h53"/><path d="m13779 6400h53"/><path d="m13865 6400h53"/><path d="m13950 6400h53"/><path d="m14036 6400h53"/><path d="m14121 6400h53"/><path d="m14206 6400h53"/><path d="m14292 6400h53"/><path d="m14377 6400h53"/><path d="m14463 6400h53"/><path d="m14548 6400h53"/><path d="m14634 6400h53"/><path d="m14719 6400h53"/><path d="m14804 6400h54"/><path d="m14890 6400h53"/><path d="m14975 6400h53"/><path d="m15061 6400h53"/><path d="m15146 6400h53"/><path d="m15232 6400h53"/><path d="m15317 6400h53"/><path d="m15402 6400h54"/><path d="m15488 6400h53"/><path d="m15573 6400h53"/><path d="m15659 6400h53"/><path d="m15744 6400h53"/><path d="m15830 6400h53"/><path d="m15915 6400h53"/><path d="m16000 6400h54"/><path d="m16086 6400h53"/><path d="m16171 6400h53"/><path d="m16257 6400h53"/><path d="m16342 6400h53"/><path d="m16428 6400h53"/><path d="m16513 6400h53"/><path d="m16598 6400h54"/><path d="m16684 6400h53"/><path d="m16769 6400h53"/><path d="m16855 6400h53"/><path d="m16940 6400h53"/><path d="m17026 6400h53"/><path d="m17111 6400h53"/><path d="m17196 6400h54"/><path d="m17282 6400h53"/><path d="m17367 6400h53"/><path d="m17453 6400h53"/><path d="m17538 6400h53"/><path d="m17624 6400h53"/><path d="m17709 6400h53"/><path d="m17795 6400h53"/><path d="m17880 6400h53"/><path d="m17965 6400h53"/><path d="m18051 6400h53"/><path d="m18136 6400h53"/><path d="m18222 6400h53"/><path d="m18307 6400h53"/><path d="m18393 6400h53"/><path d="m18478 6400h53"/><path d="m18563 6400h53"/><path d="m18649 6400h53"/><path d="m18734 6400h53"/><path d="m18820 6400h53"/><path d="m18905 6400h53"/><path d="m18991 6400h53"/><path d="m19076 6400h53"/><path d="m19161 6400h54"/><path d="m19247 6400h53"/><path d="m19332 6400h53"/><path d="m19418 6400h53"/><path d="m19503 6400h53"/><path d="m19589 6400h53"/><path d="m19674 6400h53"/><path d="m19759 6400h54"/><path d="m19845 6400h53"/><path d="m19930 6400h53"/><path d="m20016 6400h53"/><path d="m20101 6400h53"/><path d="m20187 6400h53"/><path d="m20272 6400h53"/><path d="m20357 6400h54"/><path d="m20443 6400h53"/><path d="m20528 6400h53"/><path d="m20614 6400c17-1 35-2 53-5"/><path d="m20699 6390c17-4 34-9 51-14"/><path d="m20781 6365c16-6 32-13 48-21"/><path d="m20858 6329c15-8 31-17 45-27"/><path d="m20930 6283c14-10 28-21 42-32"/><path d="m20996 6229c13-12 25-25 37-38"/><path d="m21055 6167c11-14 22-28 33-42"/><path d="m21106 6098c10-15 19-30 27-45"/><path d="m21148 6024c7-16 14-33 20-49"/><path d="m21179 5944c5-17 9-34 13-51"/><path d="m21197 5861c2-18 4-35 4-53"/><path d="m21201 5776v-54"/><path d="m21201 5690v-53"/><path d="m21201 5605v-53"/><path d="m21201 5519v-53"/><path d="m21201 5434v-53"/><path d="m21201 5348v-53"/><path d="m21201 5263v-53"/><path d="m21201 5178v-54"/><path d="m21201 5092v-53"/><path d="m21201 5007v-53"/><path d="m21201 4921v-53"/><path d="m21201 4836v-53"/><path d="m21201 4750v-53"/><path d="m21201 4665v-53"/><path d="m21201 4579v-53"/><path d="m21201 4494v-53"/><path d="m21201 4409v-53"/><path d="m21201 4323v-53"/><path d="m21201 4238v-53"/><path d="m21201 4152v-53"/><path d="m21201 4067v-53"/><path d="m21201 3981v-53"/><path d="m21201 3896v-53"/><path d="m21201 3811v-53"/><path d="m21201 3725v-53"/><path d="m21201 3640v-53"/><path d="m21201 3554v-53"/><path d="m21201 3469v-53"/><path d="m21201 3383c-1-17-3-35-5-52"/><path d="m21190 3299c-4-17-8-35-14-51"/><path d="m21165 3217c-6-16-13-33-21-49"/><path d="m21129 3140c-9-16-18-31-28-46"/><path d="m21082 3068c-10-14-21-28-33-42"/><path d="m21027 3002c-12-13-24-25-37-37"/><path d="m20965 2944c-14-12-28-22-42-33"/><path d="m20896 2893c-15-9-30-18-46-27"/><path d="m20821 2852c-16-8-32-14-49-20"/><path d="m20741 2821c-17-5-34-9-51-12"/><path d="m20658 2804c-18-3-35-4-53-4"/><path d="m20573 2800h-53"/><path d="m20487 2800h-53"/><path d="m20402 2800h-53"/><path d="m20316 2800h-53"/><path d="m20231 2800h-53"/><path d="m20146 2800h-54"/><path d="m20060 2800h-53"/><path d="m19975 2800h-53"/><path d="m19889 2800h-53"/><path d="m19804 2800h-53"/><path d="m19718 2800h-53"/><path d="m19633 2800h-53"/><path d="m19548 2800h-54"/><path d="m19462 2800h-53"/><path d="m19377 2800h-53"/><path d="m19291 2800h-53"/><path d="m19206 2800h-53"/><path d="m19120 2800h-53"/><path d="m19035 2800h-53"/><path d="m18950 2800h-54"/><path d="m18864 2800h-53"/><path d="m18779 2800h-53"/><path d="m18693 2800h-53"/><path d="m18608 2800h-53"/><path d="m18522 2800h-53"/><path d="m18437 2800h-53"/><path d="m18352 2800h-54"/><path d="m18266 2800h-53"/><path d="m18181 2800h-53"/><path d="m18095 2800h-53"/><path d="m18010 2800h-53"/><path d="m17924 2800h-53"/><path d="m17839 2800h-53"/><path d="m17753 2800h-53"/><path d="m17668 2800h-53"/><path d="m17583 2800h-53"/><path d="m17497 2800h-53"/><path d="m17412 2800h-53"/><path d="m17326 2800h-53"/><path d="m17241 2800h-53"/><path d="m17155 2800h-53"/><path d="m17070 2800h-53"/><path d="m16985 2800h-53"/><path d="m16899 2800h-53"/><path d="m16814 2800h-53"/><path d="m16728 2800h-53"/><path d="m16643 2800h-53"/><path d="m16557 2800h-53"/><path d="m16472 2800h-53"/><path d="m16387 2800h-54"/><path d="m16301 2800h-53"/><path d="m16216 2800h-53"/><path d="m16130 2800h-53"/><path d="m16045 2800h-53"/><path d="m15959 2800h-53"/><path d="m15874 2800h-53"/><path d="m15789 2800h-54"/><path d="m15703 2800h-53"/><path d="m15618 2800h-53"/><path d="m15532 2800h-53"/><path d="m15447 2800h-53"/><path d="m15361 2800h-53"/><path d="m15276 2800h-53"/><path d="m15191 2800h-54"/><path d="m15105 2800h-53"/><path d="m15020 2800h-53"/><path d="m14934 2800h-53"/><path d="m14849 2800h-53"/><path d="m14763 2800h-53"/><path d="m14678 2800h-53"/><path d="m14593 2800h-54"/><path d="m14507 2800h-53"/><path d="m14422 2800h-53"/><path d="m14336 2800h-53"/><path d="m14251 2800h-53"/><path d="m14165 2800h-53"/><path d="m14080 2800h-53"/><path d="m13994 2800h-53"/><path d="m13909 2800h-53"/><path d="m13824 2800h-53"/><path d="m13738 2800h-53"/><path d="m13653 2800h-53"/><path d="m13567 2800h-53"/><path d="m13482 2800h-53"/><path d="m13396 2800h-53"/><path d="m13311 2800h-53"/><path d="m13226 2800h-53"/><path d="m13140 2800h-53"/><path d="m13055 2800h-53"/><path d="m12969 2800h-53"/><path d="m12884 2800h-53"/><path d="m12798 2800h-53"/><path d="m12713 2800h-53"/><path d="m12628 2800h-53"/><path d="m12542 2800h-53"/><path d="m12457 2800h-53"/><path d="m12371 2800h-53"/><path d="m12286 2800h-53"/><path d="m12200 2800h-53"/><path d="m12115 2800h-53"/><path d="m12030 2800h-54"/><path d="m11944 2800h-53"/><path d="m11859 2800h-53"/><path d="m11773 2800h-53"/><path d="m11688 2800h-53"/><path d="m11602 2800h-53"/><path d="m11517 2800h-53"/><path d="m11432 2800h-54"/><path d="m11346 2800h-53"/><path d="m11261 2800h-53"/><path d="m11175 2800h-53"/><path d="m11090 2800h-53"/><path d="m11004 2800h-53"/><path d="m10919 2800h-53"/><path d="m10834 2800h-54"/><path d="m10748 2800h-53"/><path d="m10663 2800h-53"/><path d="m10577 2800h-53"/><path d="m10492 2800h-53"/><path d="m10406 2800h-53"/><path d="m10321 2800h-53"/><path d="m10236 2800h-54"/><path d="m10150 2800h-53"/><path d="m10065 2800h-53"/><path d="m9979 2800h-53"/><path d="m9894 2800h-53"/><path d="m9808 2800h-53"/><path d="m9723 2800h-53"/><path d="m9637 2800h-53"/><path d="m9552 2800h-53"/><path d="m9467 2800h-53"/><path d="m9381 2800h-53"/><path d="m9296 2800h-53"/><path d="m9210 2800h-53"/><path d="m9125 2800h-53"/><path d="m9039 2800h-53"/><path d="m8954 2800h-53"/><path d="m8869 2800h-53"/><path d="m8783 2800h-53"/><path d="m8698 2800h-53"/><path d="m8612 2800h-53"/><path d="m8527 2800h-53"/><path d="m8441 2800h-53"/><path d="m8356 2800h-53"/><path d="m8271 2800h-54"/><path d="m8185 2800h-53"/><path d="m8100 2800h-53"/><path d="m8014 2800h-53"/><path d="m7929 2800h-53"/><path d="m7843 2800h-53"/><path d="m7758 2800h-53"/><path d="m7673 2800h-54"/><path d="m7587 2800h-53"/><path d="m7502 2800h-53"/><path d="m7416 2800h-53"/><path d="m7331 2800h-53"/><path d="m7245 2800h-53"/><path d="m7160 2800h-53"/><path d="m7075 2800h-54"/><path d="m6989 2800h-53"/><path d="m6904 2800h-53"/><path d="m6818 2800h-53"/><path d="m6733 2800h-53"/><path d="m6647 2800h-53"/><path d="m6562 2800h-53"/><path d="m6477 2800h-54"/><path d="m6391 2800h-53"/><path d="m6306 2800h-53"/><path d="m6220 2800h-53"/><path d="m6135 2800h-53"/><path d="m6049 2800h-53"/><path d="m5964 2800h-53"/><path d="m5879 2800h-54"/><path d="m5793 2800h-53"/><path d="m5708 2800h-53"/><path d="m5622 2800h-53"/><path d="m5537 2800h-53"/><path d="m5451 2800h-53"/><path d="m5366 2800h-53"/><path d="m5280 2800h-53"/><path d="m5195 2800h-53"/><path d="m5110 2800h-53"/><path d="m5024 2800h-53"/><path d="m4939 2800h-53"/><path d="m4853 2800h-53"/><path d="m4768 2800h-53"/><path d="m4682 2800h-53"/><path d="m4597 2800h-53"/><path d="m4512 2800h-53"/><path d="m4426 2800h-53"/><path d="m4341 2800h-53"/><path d="m4255 2800h-53"/><path d="m4170 2800h-53"/><path d="m4084 2800h-53"/><path d="m3999 2800h-53"/><path d="m3914 2800h-54"/><path d="m3828 2800h-53"/><path d="m3743 2800h-53"/><path d="m3657 2800h-53"/><path d="m3572 2800h-53"/><path d="m3486 2800h-53"/><path d="m3401 2800h-53"/><path d="m3316 2800h-54"/><path d="m3230 2800h-53"/><path d="m3145 2800h-53"/><path d="m3059 2800h-53"/></g></g><g transform="translate(-2197.3 -2186)"><rect height="1100.7" width="1213.6" y="6917.1" x="23255" fill="#f3e777"/><path fill="#ca4677" d="m22802 7700.4v-405.46l150.7-169.16c82.886-93.039 170.53-186.62 194.77-207.96l44.069-38.798 783.23-0.086 783.23-0.086v613.5 613.5h-978-978v-405.46zm1027.7 136.98v-78.372l-169.91 4.925-169.91 4.9249-5.09 45.854c-8.249 74.303 46.711 101.04 207.69 101.04h137.21v-78.372zm235.86-262.94 4.495-341.31 207.2-8.6408 207.2-8.6408 5.144-46.443c9.596-86.615-41.863-102.05-322.02-96.607l-246.71 4.7956-4.438 419.08-4.439 419.08h74.537 74.538l4.494-341.31zm391.3 313.72c26.41-19.286 36.255-41.399 32.697-73.447l-5.09-45.854h-174.05-174.05l-5.38 48.984c-9.97 90.771 0.993 97.91 150.36 97.91 99.305 0 148.27-7.6982 175.52-27.594zm-627.16-274.84v-77.768h-174.05-174.05v66.246c0 36.436 4.973 71.431 11.051 77.768 6.078 6.3366 84.401 11.521 174.05 11.521h163v-77.768zm659.89-4.9154 5.125-74.042-179.18 4.9155-179.18 4.9155-5.38 48.984c-10.473 95.348-2.259 99.57 183.28 94.197l170.2-4.9284 5.125-74.042zm-659.89-237.63v-78.372l-169.91 4.925-169.91 4.925-5.097 73.447-5.097 73.447h175 175v-78.372zm659.86 4.925-5.097-73.447h-174.05-174.05l-5.38 48.984c-10.289 93.673-2.146 97.91 188.15 97.91h175.52l-5.097-73.447zm-659.86-228.98v-77.768h-137.21c-97.358 0-147.91 7.8138-174.05 26.902-34.952 25.523-49.645 92.242-25.79 117.11 6.078 6.3366 84.401 11.521 174.05 11.521h163v-77.768z"/></g><g transform="matrix(.84874 0 0 .76147 2408.1 3615.3)"><rect height="3076.2" width="2734.3" y="13264" x="19249" fill="#6076b3"/><g stroke-linejoin="round" fill-rule="evenodd" stroke-width="28.222" fill="#e0ee2c"><rect y="13369" width="356.65" x="18937" height="180.95"/><rect y="13708" width="356.65" x="18937" height="180.95"/><rect y="14048" width="356.65" x="18937" height="180.95"/><rect y="14387" width="356.65" x="18937" height="180.95"/><rect y="14726" width="356.65" x="18937" height="180.95"/><rect y="15066" width="356.65" x="18937" height="180.95"/><rect y="15405" width="356.65" x="18937" height="180.95"/><rect y="15744" width="356.65" x="18937" height="180.95"/><rect y="16083" width="356.65" x="18937" height="180.95"/><rect y="13324" width="356.65" x="21939" height="180.95"/><rect y="13663" width="356.65" x="21939" height="180.95"/><rect y="14002" width="356.65" x="21939" height="180.95"/><rect y="14342" width="356.65" x="21939" height="180.95"/><rect y="14681" width="356.65" x="21939" height="180.95"/><rect y="15020" width="356.65" x="21939" height="180.95"/><rect y="15360" width="356.65" x="21939" height="180.95"/><rect y="15699" width="356.65" x="21939" height="180.95"/><rect y="16038" width="356.65" x="21939" height="180.95"/></g><g stroke-linejoin="round" fill-rule="evenodd" transform="matrix(.98702 0 0 .90336 -2675 7020.8)" class="com.sun.star.drawing.TextShape" stroke-width="28.222"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"/></text>
-<text style="word-spacing:0px;letter-spacing:0px" xml:space="preserve" font-size="1128.9px" y="9042.0264" x="22439.668" font-family="Sans" line-height="125%" fill="#000000"><tspan y="9042.0264" x="22439.668">CPU</tspan></text>
-</g></g><g stroke-linejoin="round" fill-rule="evenodd" transform="translate(-11752 543.6)" class="com.sun.star.drawing.TextShape" stroke-width="28.222"><text class="TextShape"><tspan font-size="706px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15832" x="24341" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)"><tspan fill="#000000">PCI, USB, SPI, I2C, ...</tspan></tspan></tspan></text>
-</g><g stroke-linejoin="round" fill-rule="evenodd" transform="translate(-655.31 963.83)" class="com.sun.star.drawing.RectangleShape" stroke-width="28.222"><g transform="matrix(.49166 0 0 1.0059 6045.6 -82.24)"><path fill="#cfe7f5" d="m14169 14572h-2268v-1412h4536v1412h-2268z"/><path fill="none" d="m14169 14572h-2268v-1412h4536v1412h-2268z" stroke="#3465af"/></g><text y="-395.11282" x="-790.22229" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="13686.9" x="12091.779" class="TextPosition"><tspan fill="#000000">Bridge</tspan></tspan></tspan></text>
-<text y="338.66486" x="-846.66675" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14420.677" x="12035.335" class="TextPosition"><tspan fill="#000000"> DMA</tspan></tspan></tspan></text>
-</g></svg>
diff --git a/Documentation/DocBook/media/v4l/.gitignore b/Documentation/DocBook/media/v4l/.gitignore
deleted file mode 100644
index d7ec32eafac9..000000000000
--- a/Documentation/DocBook/media/v4l/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-!*.xml
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
deleted file mode 100644
index 87f1d24958aa..000000000000
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ /dev/null
@@ -1,381 +0,0 @@
- <bibliography>
- <title>References</title>
-
- <biblioentry id="cea608">
- <abbrev>CEA&nbsp;608-E</abbrev>
- <authorgroup>
- <corpauthor>Consumer Electronics Association (<ulink
-url="http://www.ce.org">http://www.ce.org</ulink>)</corpauthor>
- </authorgroup>
- <title>CEA-608-E R-2014 "Line 21 Data Services"</title>
- </biblioentry>
-
- <biblioentry id="en300294">
- <abbrev>EN&nbsp;300&nbsp;294</abbrev>
- <authorgroup>
- <corpauthor>European Telecommunication Standards Institute
-(<ulink url="http://www.etsi.org">http://www.etsi.org</ulink>)</corpauthor>
- </authorgroup>
- <title>EN 300 294 "625-line television Wide Screen Signalling
-(WSS)"</title>
- </biblioentry>
-
- <biblioentry id="ets300231">
- <abbrev>ETS&nbsp;300&nbsp;231</abbrev>
- <authorgroup>
- <corpauthor>European Telecommunication Standards Institute
-(<ulink
-url="http://www.etsi.org">http://www.etsi.org</ulink>)</corpauthor>
- </authorgroup>
- <title>ETS 300 231 "Specification of the domestic video
-Programme Delivery Control system (PDC)"</title>
- </biblioentry>
-
- <biblioentry id="ets300706">
- <abbrev>ETS&nbsp;300&nbsp;706</abbrev>
- <authorgroup>
- <corpauthor>European Telecommunication Standards Institute
-(<ulink url="http://www.etsi.org">http://www.etsi.org</ulink>)</corpauthor>
- </authorgroup>
- <title>ETS 300 706 "Enhanced Teletext specification"</title>
- </biblioentry>
-
- <biblioentry id="mpeg2part1">
- <abbrev>ISO&nbsp;13818-1</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>), International
-Organisation for Standardisation (<ulink
-url="http://www.iso.ch">http://www.iso.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-T Rec. H.222.0 | ISO/IEC 13818-1 "Information
-technology &mdash; Generic coding of moving pictures and associated
-audio information: Systems"</title>
- </biblioentry>
-
- <biblioentry id="mpeg2part2">
- <abbrev>ISO&nbsp;13818-2</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>), International
-Organisation for Standardisation (<ulink
-url="http://www.iso.ch">http://www.iso.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-T Rec. H.262 | ISO/IEC 13818-2 "Information
-technology &mdash; Generic coding of moving pictures and associated
-audio information: Video"</title>
- </biblioentry>
-
- <biblioentry id="itu470">
- <abbrev>ITU&nbsp;BT.470</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.470-6 "Conventional Television
-Systems"</title>
- </biblioentry>
-
- <biblioentry id="itu601">
- <abbrev>ITU&nbsp;BT.601</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.601-5 "Studio Encoding Parameters
-of Digital Television for Standard 4:3 and Wide-Screen 16:9 Aspect
-Ratios"</title>
- </biblioentry>
-
- <biblioentry id="itu653">
- <abbrev>ITU&nbsp;BT.653</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.653-3 "Teletext systems"</title>
- </biblioentry>
-
- <biblioentry id="itu709">
- <abbrev>ITU&nbsp;BT.709</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.709-5 "Parameter values for the
-HDTV standards for production and international programme
-exchange"</title>
- </biblioentry>
-
- <biblioentry id="itu1119">
- <abbrev>ITU&nbsp;BT.1119</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.1119 "625-line
-television Wide Screen Signalling (WSS)"</title>
- </biblioentry>
-
- <biblioentry id="jfif">
- <abbrev>JFIF</abbrev>
- <authorgroup>
- <corpauthor>Independent JPEG Group (<ulink
-url="http://www.ijg.org">http://www.ijg.org</ulink>)</corpauthor>
- </authorgroup>
- <title>JPEG File Interchange Format</title>
- <subtitle>Version 1.02</subtitle>
- </biblioentry>
-
- <biblioentry id="itu-t81">
- <abbrev>ITU-T.81</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union
-(<ulink url="http://www.itu.int">http://www.itu.int</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-T Recommendation T.81
-"Information Technology &mdash; Digital Compression and Coding of Continous-Tone
-Still Images &mdash; Requirements and Guidelines"</title>
- </biblioentry>
-
- <biblioentry id="w3c-jpeg-jfif">
- <abbrev>W3C JPEG JFIF</abbrev>
- <authorgroup>
- <corpauthor>The World Wide Web Consortium (<ulink
-url="http://www.w3.org/Graphics/JPEG">http://www.w3.org</ulink>)</corpauthor>
- </authorgroup>
- <title>JPEG JFIF</title>
- </biblioentry>
-
- <biblioentry id="smpte12m">
- <abbrev>SMPTE&nbsp;12M</abbrev>
- <authorgroup>
- <corpauthor>Society of Motion Picture and Television Engineers
-(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
- </authorgroup>
- <title>SMPTE 12M-1999 "Television, Audio and Film - Time and
-Control Code"</title>
- </biblioentry>
-
- <biblioentry id="smpte170m">
- <abbrev>SMPTE&nbsp;170M</abbrev>
- <authorgroup>
- <corpauthor>Society of Motion Picture and Television Engineers
-(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
- </authorgroup>
- <title>SMPTE 170M-1999 "Television - Composite Analog Video
-Signal - NTSC for Studio Applications"</title>
- </biblioentry>
-
- <biblioentry id="smpte240m">
- <abbrev>SMPTE&nbsp;240M</abbrev>
- <authorgroup>
- <corpauthor>Society of Motion Picture and Television Engineers
-(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
- </authorgroup>
- <title>SMPTE 240M-1999 "Television - Signal Parameters -
-1125-Line High-Definition Production"</title>
- </biblioentry>
-
- <biblioentry id="smpte431">
- <abbrev>SMPTE&nbsp;RP&nbsp;431-2</abbrev>
- <authorgroup>
- <corpauthor>Society of Motion Picture and Television Engineers
-(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
- </authorgroup>
- <title>SMPTE RP 431-2:2011 "D-Cinema Quality - Reference Projector and Environment"</title>
- </biblioentry>
-
- <biblioentry id="smpte2084">
- <abbrev>SMPTE&nbsp;ST&nbsp;2084</abbrev>
- <authorgroup>
- <corpauthor>Society of Motion Picture and Television Engineers
-(<ulink url="http://www.smpte.org">http://www.smpte.org</ulink>)</corpauthor>
- </authorgroup>
- <title>SMPTE ST 2084:2014 "High Dynamic Range Electro-Optical Transfer Function of Master Reference Displays"</title>
- </biblioentry>
-
- <biblioentry id="srgb">
- <abbrev>sRGB</abbrev>
- <authorgroup>
- <corpauthor>International Electrotechnical Commission
-(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>IEC 61966-2-1 ed1.0 "Multimedia systems and equipment - Colour measurement
-and management - Part 2-1: Colour management - Default RGB colour space - sRGB"</title>
- </biblioentry>
-
- <biblioentry id="sycc">
- <abbrev>sYCC</abbrev>
- <authorgroup>
- <corpauthor>International Electrotechnical Commission
-(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>IEC 61966-2-1-am1 ed1.0 "Amendment 1 - Multimedia systems and equipment - Colour measurement
-and management - Part 2-1: Colour management - Default RGB colour space - sRGB"</title>
- </biblioentry>
-
- <biblioentry id="xvycc">
- <abbrev>xvYCC</abbrev>
- <authorgroup>
- <corpauthor>International Electrotechnical Commission
-(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>IEC 61966-2-4 ed1.0 "Multimedia systems and equipment - Colour measurement
-and management - Part 2-4: Colour management - Extended-gamut YCC colour space for video
-applications - xvYCC"</title>
- </biblioentry>
-
- <biblioentry id="adobergb">
- <abbrev>AdobeRGB</abbrev>
- <authorgroup>
- <corpauthor>Adobe Systems Incorporated (<ulink url="http://www.adobe.com">http://www.adobe.com</ulink>)</corpauthor>
- </authorgroup>
- <title>Adobe&copy; RGB (1998) Color Image Encoding Version 2005-05</title>
- </biblioentry>
-
- <biblioentry id="oprgb">
- <abbrev>opRGB</abbrev>
- <authorgroup>
- <corpauthor>International Electrotechnical Commission
-(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>IEC 61966-2-5 "Multimedia systems and equipment - Colour measurement
-and management - Part 2-5: Colour management - Optional RGB colour space - opRGB"</title>
- </biblioentry>
-
- <biblioentry id="itu2020">
- <abbrev>ITU&nbsp;BT.2020</abbrev>
- <authorgroup>
- <corpauthor>International Telecommunication Union (<ulink
-url="http://www.itu.ch">http://www.itu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>ITU-R Recommendation BT.2020 (08/2012) "Parameter values for ultra-high
-definition television systems for production and international programme exchange"
-</title>
- </biblioentry>
-
- <biblioentry id="tech3213">
- <abbrev>EBU&nbsp;Tech&nbsp;3213</abbrev>
- <authorgroup>
- <corpauthor>European Broadcast Union (<ulink
-url="http://www.ebu.ch">http://www.ebu.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>E.B.U. Standard for Chromaticity Tolerances for Studio Monitors"</title>
- </biblioentry>
-
- <biblioentry id="iec62106">
- <abbrev>IEC&nbsp;62106</abbrev>
- <authorgroup>
- <corpauthor>International Electrotechnical Commission
-(<ulink url="http://www.iec.ch">http://www.iec.ch</ulink>)</corpauthor>
- </authorgroup>
- <title>Specification of the radio data system (RDS) for VHF/FM sound broadcasting
-in the frequency range from 87,5 to 108,0 MHz</title>
- </biblioentry>
-
- <biblioentry id="nrsc4">
- <abbrev>NRSC-4-B</abbrev>
- <authorgroup>
- <corpauthor>National Radio Systems Committee
-(<ulink url="http://www.nrscstandards.org">http://www.nrscstandards.org</ulink>)</corpauthor>
- </authorgroup>
- <title>NRSC-4-B: United States RBDS Standard</title>
- </biblioentry>
-
- <biblioentry id="iso12232">
- <abbrev>ISO&nbsp;12232:2006</abbrev>
- <authorgroup>
- <corpauthor>International Organization for Standardization
-(<ulink url="http://www.iso.org">http://www.iso.org</ulink>)</corpauthor>
- </authorgroup>
- <title>Photography &mdash; Digital still cameras &mdash; Determination
- of exposure index, ISO speed ratings, standard output sensitivity, and
- recommended exposure index</title>
- </biblioentry>
-
- <biblioentry id="cea861">
- <abbrev>CEA-861-E</abbrev>
- <authorgroup>
- <corpauthor>Consumer Electronics Association
-(<ulink url="http://www.ce.org">http://www.ce.org</ulink>)</corpauthor>
- </authorgroup>
- <title>A DTV Profile for Uncompressed High Speed Digital Interfaces</title>
- </biblioentry>
-
- <biblioentry id="vesadmt">
- <abbrev>VESA&nbsp;DMT</abbrev>
- <authorgroup>
- <corpauthor>Video Electronics Standards Association
-(<ulink url="http://www.vesa.org">http://www.vesa.org</ulink>)</corpauthor>
- </authorgroup>
- <title>VESA and Industry Standards and Guidelines for Computer Display Monitor Timing (DMT)</title>
- </biblioentry>
-
- <biblioentry id="vesaedid">
- <abbrev>EDID</abbrev>
- <authorgroup>
- <corpauthor>Video Electronics Standards Association
-(<ulink url="http://www.vesa.org">http://www.vesa.org</ulink>)</corpauthor>
- </authorgroup>
- <title>VESA Enhanced Extended Display Identification Data Standard</title>
- <subtitle>Release A, Revision 2</subtitle>
- </biblioentry>
-
- <biblioentry id="hdcp">
- <abbrev>HDCP</abbrev>
- <authorgroup>
- <corpauthor>Digital Content Protection LLC
-(<ulink url="http://www.digital-cp.com">http://www.digital-cp.com</ulink>)</corpauthor>
- </authorgroup>
- <title>High-bandwidth Digital Content Protection System</title>
- <subtitle>Revision 1.3</subtitle>
- </biblioentry>
-
- <biblioentry id="hdmi">
- <abbrev>HDMI</abbrev>
- <authorgroup>
- <corpauthor>HDMI Licensing LLC
-(<ulink url="http://www.hdmi.org">http://www.hdmi.org</ulink>)</corpauthor>
- </authorgroup>
- <title>High-Definition Multimedia Interface</title>
- <subtitle>Specification Version 1.4a</subtitle>
- </biblioentry>
-
- <biblioentry id="hdmi2">
- <abbrev>HDMI2</abbrev>
- <authorgroup>
- <corpauthor>HDMI Licensing LLC
-(<ulink url="http://www.hdmi.org">http://www.hdmi.org</ulink>)</corpauthor>
- </authorgroup>
- <title>High-Definition Multimedia Interface</title>
- <subtitle>Specification Version 2.0</subtitle>
- </biblioentry>
-
- <biblioentry id="dp">
- <abbrev>DP</abbrev>
- <authorgroup>
- <corpauthor>Video Electronics Standards Association
-(<ulink url="http://www.vesa.org">http://www.vesa.org</ulink>)</corpauthor>
- </authorgroup>
- <title>VESA DisplayPort Standard</title>
- <subtitle>Version 1, Revision 2</subtitle>
- </biblioentry>
-
- <biblioentry id="poynton">
- <abbrev>poynton</abbrev>
- <authorgroup>
- <corpauthor>Charles Poynton</corpauthor>
- </authorgroup>
- <title>Digital Video and HDTV, Algorithms and Interfaces</title>
- </biblioentry>
-
- <biblioentry id="colimg">
- <abbrev>colimg</abbrev>
- <authorgroup>
- <corpauthor>Erik Reinhard et al.</corpauthor>
- </authorgroup>
- <title>Color Imaging: Fundamentals and Applications</title>
- </biblioentry>
-
- </bibliography>
diff --git a/Documentation/DocBook/media/v4l/capture.c.xml b/Documentation/DocBook/media/v4l/capture.c.xml
deleted file mode 100644
index 22126a991b34..000000000000
--- a/Documentation/DocBook/media/v4l/capture.c.xml
+++ /dev/null
@@ -1,659 +0,0 @@
-<programlisting>
-/*
- * V4L2 video capture example
- *
- * This program can be used and distributed without restrictions.
- *
- * This program is provided with the V4L2 API
- * see https://linuxtv.org/docs.php for more information
- */
-
-#include &lt;stdio.h&gt;
-#include &lt;stdlib.h&gt;
-#include &lt;string.h&gt;
-#include &lt;assert.h&gt;
-
-#include &lt;getopt.h&gt; /* getopt_long() */
-
-#include &lt;fcntl.h&gt; /* low-level i/o */
-#include &lt;unistd.h&gt;
-#include &lt;errno.h&gt;
-#include &lt;sys/stat.h&gt;
-#include &lt;sys/types.h&gt;
-#include &lt;sys/time.h&gt;
-#include &lt;sys/mman.h&gt;
-#include &lt;sys/ioctl.h&gt;
-
-#include &lt;linux/videodev2.h&gt;
-
-#define CLEAR(x) memset(&amp;(x), 0, sizeof(x))
-
-enum io_method {
- IO_METHOD_READ,
- IO_METHOD_MMAP,
- IO_METHOD_USERPTR,
-};
-
-struct buffer {
- void *start;
- size_t length;
-};
-
-static char *dev_name;
-static enum io_method io = IO_METHOD_MMAP;
-static int fd = -1;
-struct buffer *buffers;
-static unsigned int n_buffers;
-static int out_buf;
-static int force_format;
-static int frame_count = 70;
-
-static void errno_exit(const char *s)
-{
- fprintf(stderr, "%s error %d, %s\n", s, errno, strerror(errno));
- exit(EXIT_FAILURE);
-}
-
-static int xioctl(int fh, int request, void *arg)
-{
- int r;
-
- do {
- r = ioctl(fh, request, arg);
- } while (-1 == r &amp;&amp; EINTR == errno);
-
- return r;
-}
-
-static void process_image(const void *p, int size)
-{
- if (out_buf)
- fwrite(p, size, 1, stdout);
-
- fflush(stderr);
- fprintf(stderr, ".");
- fflush(stdout);
-}
-
-static int read_frame(void)
-{
- struct <link linkend="v4l2-buffer">v4l2_buffer</link> buf;
- unsigned int i;
-
- switch (io) {
- case IO_METHOD_READ:
- if (-1 == read(fd, buffers[0].start, buffers[0].length)) {
- switch (errno) {
- case EAGAIN:
- return 0;
-
- case EIO:
- /* Could ignore EIO, see spec. */
-
- /* fall through */
-
- default:
- errno_exit("read");
- }
- }
-
- process_image(buffers[0].start, buffers[0].length);
- break;
-
- case IO_METHOD_MMAP:
- CLEAR(buf);
-
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
-
- if (-1 == xioctl(fd, VIDIOC_DQBUF, &amp;buf)) {
- switch (errno) {
- case EAGAIN:
- return 0;
-
- case EIO:
- /* Could ignore EIO, see spec. */
-
- /* fall through */
-
- default:
- errno_exit("VIDIOC_DQBUF");
- }
- }
-
- assert(buf.index &lt; n_buffers);
-
- process_image(buffers[buf.index].start, buf.bytesused);
-
- if (-1 == xioctl(fd, VIDIOC_QBUF, &amp;buf))
- errno_exit("VIDIOC_QBUF");
- break;
-
- case IO_METHOD_USERPTR:
- CLEAR(buf);
-
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_USERPTR;
-
- if (-1 == xioctl(fd, VIDIOC_DQBUF, &amp;buf)) {
- switch (errno) {
- case EAGAIN:
- return 0;
-
- case EIO:
- /* Could ignore EIO, see spec. */
-
- /* fall through */
-
- default:
- errno_exit("VIDIOC_DQBUF");
- }
- }
-
- for (i = 0; i &lt; n_buffers; ++i)
- if (buf.m.userptr == (unsigned long)buffers[i].start
- &amp;&amp; buf.length == buffers[i].length)
- break;
-
- assert(i &lt; n_buffers);
-
- process_image((void *)buf.m.userptr, buf.bytesused);
-
- if (-1 == xioctl(fd, VIDIOC_QBUF, &amp;buf))
- errno_exit("VIDIOC_QBUF");
- break;
- }
-
- return 1;
-}
-
-static void mainloop(void)
-{
- unsigned int count;
-
- count = frame_count;
-
- while (count-- &gt; 0) {
- for (;;) {
- fd_set fds;
- struct timeval tv;
- int r;
-
- FD_ZERO(&amp;fds);
- FD_SET(fd, &amp;fds);
-
- /* Timeout. */
- tv.tv_sec = 2;
- tv.tv_usec = 0;
-
- r = select(fd + 1, &amp;fds, NULL, NULL, &amp;tv);
-
- if (-1 == r) {
- if (EINTR == errno)
- continue;
- errno_exit("select");
- }
-
- if (0 == r) {
- fprintf(stderr, "select timeout\n");
- exit(EXIT_FAILURE);
- }
-
- if (read_frame())
- break;
- /* EAGAIN - continue select loop. */
- }
- }
-}
-
-static void stop_capturing(void)
-{
- enum <link linkend="v4l2-buf-type">v4l2_buf_type</link> type;
-
- switch (io) {
- case IO_METHOD_READ:
- /* Nothing to do. */
- break;
-
- case IO_METHOD_MMAP:
- case IO_METHOD_USERPTR:
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (-1 == xioctl(fd, VIDIOC_STREAMOFF, &amp;type))
- errno_exit("VIDIOC_STREAMOFF");
- break;
- }
-}
-
-static void start_capturing(void)
-{
- unsigned int i;
- enum <link linkend="v4l2-buf-type">v4l2_buf_type</link> type;
-
- switch (io) {
- case IO_METHOD_READ:
- /* Nothing to do. */
- break;
-
- case IO_METHOD_MMAP:
- for (i = 0; i &lt; n_buffers; ++i) {
- struct <link linkend="v4l2-buffer">v4l2_buffer</link> buf;
-
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = i;
-
- if (-1 == xioctl(fd, VIDIOC_QBUF, &amp;buf))
- errno_exit("VIDIOC_QBUF");
- }
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (-1 == xioctl(fd, VIDIOC_STREAMON, &amp;type))
- errno_exit("VIDIOC_STREAMON");
- break;
-
- case IO_METHOD_USERPTR:
- for (i = 0; i &lt; n_buffers; ++i) {
- struct <link linkend="v4l2-buffer">v4l2_buffer</link> buf;
-
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_USERPTR;
- buf.index = i;
- buf.m.userptr = (unsigned long)buffers[i].start;
- buf.length = buffers[i].length;
-
- if (-1 == xioctl(fd, VIDIOC_QBUF, &amp;buf))
- errno_exit("VIDIOC_QBUF");
- }
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (-1 == xioctl(fd, VIDIOC_STREAMON, &amp;type))
- errno_exit("VIDIOC_STREAMON");
- break;
- }
-}
-
-static void uninit_device(void)
-{
- unsigned int i;
-
- switch (io) {
- case IO_METHOD_READ:
- free(buffers[0].start);
- break;
-
- case IO_METHOD_MMAP:
- for (i = 0; i &lt; n_buffers; ++i)
- if (-1 == munmap(buffers[i].start, buffers[i].length))
- errno_exit("munmap");
- break;
-
- case IO_METHOD_USERPTR:
- for (i = 0; i &lt; n_buffers; ++i)
- free(buffers[i].start);
- break;
- }
-
- free(buffers);
-}
-
-static void init_read(unsigned int buffer_size)
-{
- buffers = calloc(1, sizeof(*buffers));
-
- if (!buffers) {
- fprintf(stderr, "Out of memory\n");
- exit(EXIT_FAILURE);
- }
-
- buffers[0].length = buffer_size;
- buffers[0].start = malloc(buffer_size);
-
- if (!buffers[0].start) {
- fprintf(stderr, "Out of memory\n");
- exit(EXIT_FAILURE);
- }
-}
-
-static void init_mmap(void)
-{
- struct <link linkend="v4l2-requestbuffers">v4l2_requestbuffers</link> req;
-
- CLEAR(req);
-
- req.count = 4;
- req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- req.memory = V4L2_MEMORY_MMAP;
-
- if (-1 == xioctl(fd, VIDIOC_REQBUFS, &amp;req)) {
- if (EINVAL == errno) {
- fprintf(stderr, "%s does not support "
- "memory mapping\n", dev_name);
- exit(EXIT_FAILURE);
- } else {
- errno_exit("VIDIOC_REQBUFS");
- }
- }
-
- if (req.count &lt; 2) {
- fprintf(stderr, "Insufficient buffer memory on %s\n",
- dev_name);
- exit(EXIT_FAILURE);
- }
-
- buffers = calloc(req.count, sizeof(*buffers));
-
- if (!buffers) {
- fprintf(stderr, "Out of memory\n");
- exit(EXIT_FAILURE);
- }
-
- for (n_buffers = 0; n_buffers &lt; req.count; ++n_buffers) {
- struct <link linkend="v4l2-buffer">v4l2_buffer</link> buf;
-
- CLEAR(buf);
-
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = n_buffers;
-
- if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &amp;buf))
- errno_exit("VIDIOC_QUERYBUF");
-
- buffers[n_buffers].length = buf.length;
- buffers[n_buffers].start =
- mmap(NULL /* start anywhere */,
- buf.length,
- PROT_READ | PROT_WRITE /* required */,
- MAP_SHARED /* recommended */,
- fd, buf.m.offset);
-
- if (MAP_FAILED == buffers[n_buffers].start)
- errno_exit("mmap");
- }
-}
-
-static void init_userp(unsigned int buffer_size)
-{
- struct <link linkend="v4l2-requestbuffers">v4l2_requestbuffers</link> req;
-
- CLEAR(req);
-
- req.count = 4;
- req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- req.memory = V4L2_MEMORY_USERPTR;
-
- if (-1 == xioctl(fd, VIDIOC_REQBUFS, &amp;req)) {
- if (EINVAL == errno) {
- fprintf(stderr, "%s does not support "
- "user pointer i/o\n", dev_name);
- exit(EXIT_FAILURE);
- } else {
- errno_exit("VIDIOC_REQBUFS");
- }
- }
-
- buffers = calloc(4, sizeof(*buffers));
-
- if (!buffers) {
- fprintf(stderr, "Out of memory\n");
- exit(EXIT_FAILURE);
- }
-
- for (n_buffers = 0; n_buffers &lt; 4; ++n_buffers) {
- buffers[n_buffers].length = buffer_size;
- buffers[n_buffers].start = malloc(buffer_size);
-
- if (!buffers[n_buffers].start) {
- fprintf(stderr, "Out of memory\n");
- exit(EXIT_FAILURE);
- }
- }
-}
-
-static void init_device(void)
-{
- struct <link linkend="v4l2-capability">v4l2_capability</link> cap;
- struct <link linkend="v4l2-cropcap">v4l2_cropcap</link> cropcap;
- struct <link linkend="v4l2-crop">v4l2_crop</link> crop;
- struct <link linkend="v4l2-format">v4l2_format</link> fmt;
- unsigned int min;
-
- if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &amp;cap)) {
- if (EINVAL == errno) {
- fprintf(stderr, "%s is no V4L2 device\n",
- dev_name);
- exit(EXIT_FAILURE);
- } else {
- errno_exit("VIDIOC_QUERYCAP");
- }
- }
-
- if (!(cap.capabilities &amp; V4L2_CAP_VIDEO_CAPTURE)) {
- fprintf(stderr, "%s is no video capture device\n",
- dev_name);
- exit(EXIT_FAILURE);
- }
-
- switch (io) {
- case IO_METHOD_READ:
- if (!(cap.capabilities &amp; V4L2_CAP_READWRITE)) {
- fprintf(stderr, "%s does not support read i/o\n",
- dev_name);
- exit(EXIT_FAILURE);
- }
- break;
-
- case IO_METHOD_MMAP:
- case IO_METHOD_USERPTR:
- if (!(cap.capabilities &amp; V4L2_CAP_STREAMING)) {
- fprintf(stderr, "%s does not support streaming i/o\n",
- dev_name);
- exit(EXIT_FAILURE);
- }
- break;
- }
-
-
- /* Select video input, video standard and tune here. */
-
-
- CLEAR(cropcap);
-
- cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- if (0 == xioctl(fd, VIDIOC_CROPCAP, &amp;cropcap)) {
- crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- crop.c = cropcap.defrect; /* reset to default */
-
- if (-1 == xioctl(fd, VIDIOC_S_CROP, &amp;crop)) {
- switch (errno) {
- case EINVAL:
- /* Cropping not supported. */
- break;
- default:
- /* Errors ignored. */
- break;
- }
- }
- } else {
- /* Errors ignored. */
- }
-
-
- CLEAR(fmt);
-
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- if (force_format) {
- fmt.fmt.pix.width = 640;
- fmt.fmt.pix.height = 480;
- fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
- fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
-
- if (-1 == xioctl(fd, VIDIOC_S_FMT, &amp;fmt))
- errno_exit("VIDIOC_S_FMT");
-
- /* Note VIDIOC_S_FMT may change width and height. */
- } else {
- /* Preserve original settings as set by v4l2-ctl for example */
- if (-1 == xioctl(fd, VIDIOC_G_FMT, &amp;fmt))
- errno_exit("VIDIOC_G_FMT");
- }
-
- /* Buggy driver paranoia. */
- min = fmt.fmt.pix.width * 2;
- if (fmt.fmt.pix.bytesperline &lt; min)
- fmt.fmt.pix.bytesperline = min;
- min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
- if (fmt.fmt.pix.sizeimage &lt; min)
- fmt.fmt.pix.sizeimage = min;
-
- switch (io) {
- case IO_METHOD_READ:
- init_read(fmt.fmt.pix.sizeimage);
- break;
-
- case IO_METHOD_MMAP:
- init_mmap();
- break;
-
- case IO_METHOD_USERPTR:
- init_userp(fmt.fmt.pix.sizeimage);
- break;
- }
-}
-
-static void close_device(void)
-{
- if (-1 == close(fd))
- errno_exit("close");
-
- fd = -1;
-}
-
-static void open_device(void)
-{
- struct stat st;
-
- if (-1 == stat(dev_name, &amp;st)) {
- fprintf(stderr, "Cannot identify '%s': %d, %s\n",
- dev_name, errno, strerror(errno));
- exit(EXIT_FAILURE);
- }
-
- if (!S_ISCHR(st.st_mode)) {
- fprintf(stderr, "%s is no device\n", dev_name);
- exit(EXIT_FAILURE);
- }
-
- fd = open(dev_name, O_RDWR /* required */ | O_NONBLOCK, 0);
-
- if (-1 == fd) {
- fprintf(stderr, "Cannot open '%s': %d, %s\n",
- dev_name, errno, strerror(errno));
- exit(EXIT_FAILURE);
- }
-}
-
-static void usage(FILE *fp, int argc, char **argv)
-{
- fprintf(fp,
- "Usage: %s [options]\n\n"
- "Version 1.3\n"
- "Options:\n"
- "-d | --device name Video device name [%s]\n"
- "-h | --help Print this message\n"
- "-m | --mmap Use memory mapped buffers [default]\n"
- "-r | --read Use read() calls\n"
- "-u | --userp Use application allocated buffers\n"
- "-o | --output Outputs stream to stdout\n"
- "-f | --format Force format to 640x480 YUYV\n"
- "-c | --count Number of frames to grab [%i]\n"
- "",
- argv[0], dev_name, frame_count);
-}
-
-static const char short_options[] = "d:hmruofc:";
-
-static const struct option
-long_options[] = {
- { "device", required_argument, NULL, 'd' },
- { "help", no_argument, NULL, 'h' },
- { "mmap", no_argument, NULL, 'm' },
- { "read", no_argument, NULL, 'r' },
- { "userp", no_argument, NULL, 'u' },
- { "output", no_argument, NULL, 'o' },
- { "format", no_argument, NULL, 'f' },
- { "count", required_argument, NULL, 'c' },
- { 0, 0, 0, 0 }
-};
-
-int main(int argc, char **argv)
-{
- dev_name = "/dev/video0";
-
- for (;;) {
- int idx;
- int c;
-
- c = getopt_long(argc, argv,
- short_options, long_options, &amp;idx);
-
- if (-1 == c)
- break;
-
- switch (c) {
- case 0: /* getopt_long() flag */
- break;
-
- case 'd':
- dev_name = optarg;
- break;
-
- case 'h':
- usage(stdout, argc, argv);
- exit(EXIT_SUCCESS);
-
- case 'm':
- io = IO_METHOD_MMAP;
- break;
-
- case 'r':
- io = IO_METHOD_READ;
- break;
-
- case 'u':
- io = IO_METHOD_USERPTR;
- break;
-
- case 'o':
- out_buf++;
- break;
-
- case 'f':
- force_format++;
- break;
-
- case 'c':
- errno = 0;
- frame_count = strtol(optarg, NULL, 0);
- if (errno)
- errno_exit(optarg);
- break;
-
- default:
- usage(stderr, argc, argv);
- exit(EXIT_FAILURE);
- }
- }
-
- open_device();
- init_device();
- start_capturing();
- mainloop();
- stop_capturing();
- uninit_device();
- close_device();
- fprintf(stderr, "\n");
- return 0;
-}
-</programlisting>
diff --git a/Documentation/DocBook/media/v4l/cec-api.xml b/Documentation/DocBook/media/v4l/cec-api.xml
deleted file mode 100644
index 7062c1fa4904..000000000000
--- a/Documentation/DocBook/media/v4l/cec-api.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<partinfo>
- <authorgroup>
- <author>
- <firstname>Hans</firstname>
- <surname>Verkuil</surname>
- <affiliation><address><email>hans.verkuil@cisco.com</email></address></affiliation>
- <contrib>Initial version.</contrib>
- </author>
- </authorgroup>
- <copyright>
- <year>2016</year>
- <holder>Hans Verkuil</holder>
- </copyright>
-
- <revhistory>
- <!-- Put document revisions here, newest first. -->
- <revision>
- <revnumber>1.0.0</revnumber>
- <date>2016-03-17</date>
- <authorinitials>hv</authorinitials>
- <revremark>Initial revision</revremark>
- </revision>
- </revhistory>
-</partinfo>
-
-<title>CEC API</title>
-
-<chapter id="cec-api">
- <title>CEC: Consumer Electronics Control</title>
-
- <section id="cec-intro">
- <title>Introduction</title>
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
- <para>HDMI connectors provide a single pin for use by the Consumer Electronics
- Control protocol. This protocol allows different devices connected by an HDMI cable
- to communicate. The protocol for CEC version 1.4 is defined in supplements 1 (CEC)
- and 2 (HEAC or HDMI Ethernet and Audio Return Channel) of the HDMI 1.4a
- (<xref linkend="hdmi" />) specification and the extensions added to CEC version 2.0
- are defined in chapter 11 of the HDMI 2.0 (<xref linkend="hdmi2" />) specification.
- </para>
-
- <para>The bitrate is very slow (effectively no more than 36 bytes per second) and
- is based on the ancient AV.link protocol used in old SCART connectors. The protocol
- closely resembles a crazy Rube Goldberg contraption and is an unholy mix of low and
- high level messages. Some messages, especially those part of the HEAC protocol layered
- on top of CEC, need to be handled by the kernel, others can be handled either by the
- kernel or by userspace.</para>
-
- <para>In addition, CEC can be implemented in HDMI receivers, transmitters and in USB
- devices that have an HDMI input and an HDMI output and that control just the CEC pin.</para>
-
- <para>Drivers that support CEC will create a CEC device node (/dev/cecX)
- to give userspace access to the CEC adapter. The &CEC-ADAP-G-CAPS; ioctl will tell userspace
- what it is allowed to do.</para>
- </section>
-</chapter>
-
-<appendix id="cec-user-func">
- <title>Function Reference</title>
- <!-- Keep this alphabetically sorted. -->
- &sub-cec-func-open;
- &sub-cec-func-close;
- &sub-cec-func-ioctl;
- &sub-cec-func-poll;
- <!-- All ioctls go here. -->
- &sub-cec-ioc-adap-g-caps;
- &sub-cec-ioc-adap-g-log-addrs;
- &sub-cec-ioc-adap-g-phys-addr;
- &sub-cec-ioc-dqevent;
- &sub-cec-ioc-g-mode;
- &sub-cec-ioc-receive;
-</appendix>
diff --git a/Documentation/DocBook/media/v4l/cec-func-close.xml b/Documentation/DocBook/media/v4l/cec-func-close.xml
deleted file mode 100644
index 0812c8cd9634..000000000000
--- a/Documentation/DocBook/media/v4l/cec-func-close.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<refentry id="cec-func-close">
- <refmeta>
- <refentrytitle>cec close()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>cec-close</refname>
- <refpurpose>Close a cec device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>close</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>Closes the cec device. Resources associated with the file descriptor
- are freed. The device configuration remain unchanged.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para><function>close</function> returns 0 on success. On error, -1 is
- returned, and <varname>errno</varname> is set appropriately. Possible error
- codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid open file descriptor.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-func-ioctl.xml b/Documentation/DocBook/media/v4l/cec-func-ioctl.xml
deleted file mode 100644
index f92817a2dc80..000000000000
--- a/Documentation/DocBook/media/v4l/cec-func-ioctl.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-<refentry id="cec-func-ioctl">
- <refmeta>
- <refentrytitle>cec ioctl()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>cec-ioctl</refname>
- <refpurpose>Control a cec device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;sys/ioctl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>void *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC ioctl request code as defined in the cec.h header file,
- for example CEC_ADAP_G_CAPS.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to a request-specific structure.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>The <function>ioctl()</function> function manipulates cec device
- parameters. The argument <parameter>fd</parameter> must be an open file
- descriptor.</para>
- <para>The ioctl <parameter>request</parameter> code specifies the cec
- function to be called. It has encoded in it whether the argument is an
- input, output or read/write parameter, and the size of the argument
- <parameter>argp</parameter> in bytes.</para>
- <para>Macros and structures definitions specifying cec ioctl requests and
- their parameters are located in the cec.h header file. All cec ioctl
- requests, their respective function and parameters are specified in
- <xref linkend="cec-user-func" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <para>Request-specific error codes are listed in the
- individual requests descriptions.</para>
- <para>When an ioctl that takes an output or read/write parameter fails,
- the parameter remains unmodified.</para>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-func-open.xml b/Documentation/DocBook/media/v4l/cec-func-open.xml
deleted file mode 100644
index 2edc5555b81a..000000000000
--- a/Documentation/DocBook/media/v4l/cec-func-open.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<refentry id="cec-func-open">
- <refmeta>
- <refentrytitle>cec open()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>cec-open</refname>
- <refpurpose>Open a cec device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;fcntl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>open</function></funcdef>
- <paramdef>const char *<parameter>device_name</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>device_name</parameter></term>
- <listitem>
- <para>Device to be opened.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>Open flags. Access mode must be <constant>O_RDWR</constant>.
- </para>
- <para>When the <constant>O_NONBLOCK</constant> flag is
-given, the &CEC-RECEIVE; ioctl will return &EAGAIN; when no message is
-available, and the &CEC-TRANSMIT;, &CEC-ADAP-S-PHYS-ADDR; and
-&CEC-ADAP-S-LOG-ADDRS; ioctls all act in non-blocking mode.</para>
- <para>Other flags have no effect.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
- <refsect1>
- <title>Description</title>
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>To open a cec device applications call <function>open()</function>
- with the desired device name. The function has no side effects; the device
- configuration remain unchanged.</para>
- <para>When the device is opened in read-only mode, attempts to modify its
- configuration will result in an error, and <varname>errno</varname> will be
- set to <errorcode>EBADF</errorcode>.</para>
- </refsect1>
- <refsect1>
- <title>Return Value</title>
-
- <para><function>open</function> returns the new file descriptor on success.
- On error, -1 is returned, and <varname>errno</varname> is set appropriately.
- Possible error codes include:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>The requested access to the file is not allowed.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EMFILE</errorcode></term>
- <listitem>
- <para>The process already has the maximum number of files open.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENFILE</errorcode></term>
- <listitem>
- <para>The system limit on the total number of open files has been
- reached.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>Insufficient kernel memory was available.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENXIO</errorcode></term>
- <listitem>
- <para>No device corresponding to this device special file exists.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-func-poll.xml b/Documentation/DocBook/media/v4l/cec-func-poll.xml
deleted file mode 100644
index 1bddbde0142d..000000000000
--- a/Documentation/DocBook/media/v4l/cec-func-poll.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<refentry id="cec-func-poll">
- <refmeta>
- <refentrytitle>cec poll()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>cec-poll</refname>
- <refpurpose>Wait for some event on a file descriptor</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;sys/poll.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>poll</function></funcdef>
- <paramdef>struct pollfd *<parameter>ufds</parameter></paramdef>
- <paramdef>unsigned int <parameter>nfds</parameter></paramdef>
- <paramdef>int <parameter>timeout</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>With the <function>poll()</function> function applications
-can wait for CEC events.</para>
-
- <para>On success <function>poll()</function> returns the number of
-file descriptors that have been selected (that is, file descriptors
-for which the <structfield>revents</structfield> field of the
-respective <structname>pollfd</structname> structure is non-zero).
-CEC devices set the <constant>POLLIN</constant> and
-<constant>POLLRDNORM</constant> flags in the
-<structfield>revents</structfield> field if there are messages in the
-receive queue. If the transmit queue has room for new messages, the
-<constant>POLLOUT</constant> and <constant>POLLWRNORM</constant>
-flags are set. If there are events in the event queue, then the
-<constant>POLLPRI</constant> flag is set.
-When the function timed out it returns a value of zero, on
-failure it returns <returnvalue>-1</returnvalue> and the
-<varname>errno</varname> variable is set appropriately.
-</para>
-
- <para>For more details see the
-<function>poll()</function> manual page.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success, <function>poll()</function> returns the number
-structures which have non-zero <structfield>revents</structfield>
-fields, or zero if the call timed out. On error
-<returnvalue>-1</returnvalue> is returned, and the
-<varname>errno</varname> variable is set appropriately:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para>One or more of the <parameter>ufds</parameter> members
-specify an invalid file descriptor.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EFAULT</errorcode></term>
- <listitem>
- <para><parameter>ufds</parameter> references an inaccessible
-memory area.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINTR</errorcode></term>
- <listitem>
- <para>The call was interrupted by a signal.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <parameter>nfds</parameter> argument is greater
-than <constant>OPEN_MAX</constant>.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-caps.xml b/Documentation/DocBook/media/v4l/cec-ioc-adap-g-caps.xml
deleted file mode 100644
index 3523ef2259b1..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-caps.xml
+++ /dev/null
@@ -1,151 +0,0 @@
-<refentry id="cec-ioc-adap-g-caps">
- <refmeta>
- <refentrytitle>ioctl CEC_ADAP_G_CAPS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_ADAP_G_CAPS</refname>
- <refpurpose>Query device capabilities</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct cec_caps *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_ADAP_G_CAPS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>All cec devices must support the <constant>CEC_ADAP_G_CAPS</constant>
- ioctl. To query device information, applications call the ioctl with a
- pointer to a &cec-caps;. The driver fills the structure and returns
- the information to the application.
- The ioctl never fails.</para>
-
- <table pgwide="1" frame="none" id="cec-caps">
- <title>struct <structname>cec_caps</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>char</entry>
- <entry><structfield>driver[32]</structfield></entry>
- <entry>The name of the cec adapter driver.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>name[32]</structfield></entry>
- <entry>The name of this CEC adapter. The combination <structfield>driver</structfield>
- and <structfield>name</structfield> must be unique.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capabilities</structfield></entry>
- <entry>The capabilities of the CEC adapter, see <xref
- linkend="cec-capabilities" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>version</structfield></entry>
- <entry>CEC Framework API version, formatted with the
- <constant>KERNEL_VERSION()</constant> macro.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-capabilities">
- <title>CEC Capabilities Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_CAP_PHYS_ADDR</constant></entry>
- <entry>0x00000001</entry>
- <entry>Userspace has to configure the physical address by
- calling &CEC-ADAP-S-PHYS-ADDR;. If this capability isn't set,
- then setting the physical address is handled by the kernel
- whenever the EDID is set (for an HDMI receiver) or read (for
- an HDMI transmitter).</entry>
- </row>
- <row>
- <entry><constant>CEC_CAP_LOG_ADDRS</constant></entry>
- <entry>0x00000002</entry>
- <entry>Userspace has to configure the logical addresses by
- calling &CEC-ADAP-S-LOG-ADDRS;. If this capability isn't set,
- then the kernel will have configured this.</entry>
- </row>
- <row>
- <entry><constant>CEC_CAP_TRANSMIT</constant></entry>
- <entry>0x00000004</entry>
- <entry>Userspace can transmit CEC messages by calling &CEC-TRANSMIT;. This
- implies that userspace can be a follower as well, since being able to
- transmit messages is a prerequisite of becoming a follower. If this
- capability isn't set, then the kernel will handle all CEC transmits
- and process all CEC messages it receives.
- </entry>
- </row>
- <row>
- <entry><constant>CEC_CAP_PASSTHROUGH</constant></entry>
- <entry>0x00000008</entry>
- <entry>Userspace can use the passthrough mode by
- calling &CEC-S-MODE;.</entry>
- </row>
- <row>
- <entry><constant>CEC_CAP_RC</constant></entry>
- <entry>0x00000010</entry>
- <entry>This adapter supports the remote control protocol.</entry>
- </row>
- <row>
- <entry><constant>CEC_CAP_MONITOR_ALL</constant></entry>
- <entry>0x00000020</entry>
- <entry>The CEC hardware can monitor all messages, not just directed and
- broadcast messages.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-log-addrs.xml b/Documentation/DocBook/media/v4l/cec-ioc-adap-g-log-addrs.xml
deleted file mode 100644
index 302b8294f7fc..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-log-addrs.xml
+++ /dev/null
@@ -1,329 +0,0 @@
-<refentry id="cec-ioc-adap-g-log-addrs">
- <refmeta>
- <refentrytitle>ioctl CEC_ADAP_G_LOG_ADDRS, CEC_ADAP_S_LOG_ADDRS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_ADAP_G_LOG_ADDRS</refname>
- <refname>CEC_ADAP_S_LOG_ADDRS</refname>
- <refpurpose>Get or set the logical addresses</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct cec_log_addrs *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_ADAP_G_LOG_ADDRS, CEC_ADAP_S_LOG_ADDRS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>To query the current CEC logical addresses, applications call the
-<constant>CEC_ADAP_G_LOG_ADDRS</constant> ioctl with a pointer to a
-<structname>cec_log_addrs</structname> structure where the drivers stores the
-logical addresses.</para>
-
- <para>To set new logical addresses, applications fill in struct <structname>cec_log_addrs</structname>
-and call the <constant>CEC_ADAP_S_LOG_ADDRS</constant> ioctl with a pointer to this struct.
-The <constant>CEC_ADAP_S_LOG_ADDRS</constant> ioctl is only available if
-<constant>CEC_CAP_LOG_ADDRS</constant> is set (&ENOTTY; is returned otherwise). This ioctl will block until all
-requested logical addresses have been claimed. <constant>CEC_ADAP_S_LOG_ADDRS</constant>
-can only be called by a file handle in initiator mode (see &CEC-S-MODE;).</para>
-
- <table pgwide="1" frame="none" id="cec-log-addrs">
- <title>struct <structname>cec_log_addrs</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>log_addr</structfield>[CEC_MAX_LOG_ADDRS]</entry>
- <entry>The actual logical addresses that were claimed. This is set by the
- driver. If no logical address could be claimed, then it is set to
- <constant>CEC_LOG_ADDR_INVALID</constant>. If this adapter is Unregistered,
- then <structfield>log_addr[0]</structfield> is set to 0xf and all others to
- <constant>CEC_LOG_ADDR_INVALID</constant>.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>log_addr_mask</structfield></entry>
- <entry>The bitmask of all logical addresses this adapter has claimed.
- If this adapter is Unregistered then <structfield>log_addr_mask</structfield>
- sets bit 15 and clears all other bits. If this adapter is not configured at all, then
- <structfield>log_addr_mask</structfield> is set to 0. Set by the driver.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>cec_version</structfield></entry>
- <entry>The CEC version that this adapter shall use. See
- <xref linkend="cec-versions" />.
- Used to implement the <constant>CEC_MSG_CEC_VERSION</constant> and
- <constant>CEC_MSG_REPORT_FEATURES</constant> messages. Note that
- <constant>CEC_OP_CEC_VERSION_1_3A</constant> is not allowed
- by the CEC framework.
- </entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>num_log_addrs</structfield></entry>
- <entry>Number of logical addresses to set up. Must be &le;
- <structfield>available_log_addrs</structfield> as returned by
- &CEC-ADAP-G-CAPS;. All arrays in this structure are only filled up to
- index <structfield>available_log_addrs</structfield>-1. The remaining
- array elements will be ignored. Note that the CEC 2.0 standard allows
- for a maximum of 2 logical addresses, although some hardware has support
- for more. <constant>CEC_MAX_LOG_ADDRS</constant> is 4. The driver will
- return the actual number of logical addresses it could claim, which may
- be less than what was requested. If this field is set to 0, then the
- CEC adapter shall clear all claimed logical addresses and all other
- fields will be ignored.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>vendor_id</structfield></entry>
- <entry>The vendor ID is a 24-bit number that identifies the specific
- vendor or entity. Based on this ID vendor specific commands may be
- defined. If you do not want a vendor ID then set it to
- <constant>CEC_VENDOR_ID_NONE</constant>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags. No flags are defined yet, so set this to 0.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>osd_name</structfield>[15]</entry>
- <entry>The On-Screen Display name as is returned by the
- <constant>CEC_MSG_SET_OSD_NAME</constant> message.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>primary_device_type</structfield>[CEC_MAX_LOG_ADDRS]</entry>
- <entry>Primary device type for each logical address. See
- <xref linkend="cec-prim-dev-types" /> for possible types.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>log_addr_type</structfield>[CEC_MAX_LOG_ADDRS]</entry>
- <entry>Logical address types. See <xref linkend="cec-log-addr-types" /> for
- possible types. The driver will update this with the actual logical address
- type that it claimed (e.g. it may have to fallback to
- <constant>CEC_LOG_ADDR_TYPE_UNREGISTERED</constant>).</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>all_device_types</structfield>[CEC_MAX_LOG_ADDRS]</entry>
- <entry>CEC 2.0 specific: all device types. See <xref linkend="cec-all-dev-types-flags" />.
- Used to implement the <constant>CEC_MSG_REPORT_FEATURES</constant> message.
- This field is ignored if <structfield>cec_version</structfield> &lt;
- <constant>CEC_OP_CEC_VERSION_2_0</constant>.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>features</structfield>[CEC_MAX_LOG_ADDRS][12]</entry>
- <entry>Features for each logical address. Used to implement the
- <constant>CEC_MSG_REPORT_FEATURES</constant> message. The 12 bytes include
- both the RC Profile and the Device Features.
- This field is ignored if <structfield>cec_version</structfield> &lt;
- <constant>CEC_OP_CEC_VERSION_2_0</constant>.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-versions">
- <title>CEC Versions</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_OP_CEC_VERSION_1_3A</constant></entry>
- <entry>4</entry>
- <entry>CEC version according to the HDMI 1.3a standard.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_CEC_VERSION_1_4B</constant></entry>
- <entry>5</entry>
- <entry>CEC version according to the HDMI 1.4b standard.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_CEC_VERSION_2_0</constant></entry>
- <entry>6</entry>
- <entry>CEC version according to the HDMI 2.0 standard.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-prim-dev-types">
- <title>CEC Primary Device Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_TV</constant></entry>
- <entry>0</entry>
- <entry>Use for a TV.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_RECORD</constant></entry>
- <entry>1</entry>
- <entry>Use for a recording device.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_TUNER</constant></entry>
- <entry>3</entry>
- <entry>Use for a device with a tuner.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_PLAYBACK</constant></entry>
- <entry>4</entry>
- <entry>Use for a playback device.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM</constant></entry>
- <entry>5</entry>
- <entry>Use for an audio system (e.g. an audio/video receiver).</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_SWITCH</constant></entry>
- <entry>6</entry>
- <entry>Use for a CEC switch.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_PRIM_DEVTYPE_VIDEOPROC</constant></entry>
- <entry>7</entry>
- <entry>Use for a video processor device.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-log-addr-types">
- <title>CEC Logical Address Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_TV</constant></entry>
- <entry>0</entry>
- <entry>Use for a TV.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_RECORD</constant></entry>
- <entry>1</entry>
- <entry>Use for a recording device.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_TUNER</constant></entry>
- <entry>2</entry>
- <entry>Use for a tuner device.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_PLAYBACK</constant></entry>
- <entry>3</entry>
- <entry>Use for a playback device.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_AUDIOSYSTEM</constant></entry>
- <entry>4</entry>
- <entry>Use for an audio system device.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_SPECIFIC</constant></entry>
- <entry>5</entry>
- <entry>Use for a second TV or for a video processor device.</entry>
- </row>
- <row>
- <entry><constant>CEC_LOG_ADDR_TYPE_UNREGISTERED</constant></entry>
- <entry>6</entry>
- <entry>Use this if you just want to remain unregistered.
- Used for pure CEC switches or CDC-only devices (CDC:
- Capability Discovery and Control).</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-all-dev-types-flags">
- <title>CEC All Device Types Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_TV</constant></entry>
- <entry>0x80</entry>
- <entry>This supports the TV type.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_RECORD</constant></entry>
- <entry>0x40</entry>
- <entry>This supports the Recording type.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_TUNER</constant></entry>
- <entry>0x20</entry>
- <entry>This supports the Tuner type.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_PLAYBACK</constant></entry>
- <entry>0x10</entry>
- <entry>This supports the Playback type.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM</constant></entry>
- <entry>0x08</entry>
- <entry>This supports the Audio System type.</entry>
- </row>
- <row>
- <entry><constant>CEC_OP_ALL_DEVTYPE_SWITCH</constant></entry>
- <entry>0x04</entry>
- <entry>This supports the CEC Switch or Video Processing type.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-phys-addr.xml b/Documentation/DocBook/media/v4l/cec-ioc-adap-g-phys-addr.xml
deleted file mode 100644
index d95f1785080c..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-adap-g-phys-addr.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-<refentry id="cec-ioc-adap-g-phys-addr">
- <refmeta>
- <refentrytitle>ioctl CEC_ADAP_G_PHYS_ADDR, CEC_ADAP_S_PHYS_ADDR</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_ADAP_G_PHYS_ADDR</refname>
- <refname>CEC_ADAP_S_PHYS_ADDR</refname>
- <refpurpose>Get or set the physical address</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>__u16 *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_ADAP_G_PHYS_ADDR, CEC_ADAP_S_PHYS_ADDR</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>To query the current physical address applications call the
-<constant>CEC_ADAP_G_PHYS_ADDR</constant> ioctl with a pointer to an __u16
-where the driver stores the physical address.</para>
-
- <para>To set a new physical address applications store the physical address in
-an __u16 and call the <constant>CEC_ADAP_S_PHYS_ADDR</constant> ioctl with a
-pointer to this integer. <constant>CEC_ADAP_S_PHYS_ADDR</constant> is only
-available if <constant>CEC_CAP_PHYS_ADDR</constant> is set (&ENOTTY; will be returned
-otherwise). <constant>CEC_ADAP_S_PHYS_ADDR</constant>
-can only be called by a file handle in initiator mode (see &CEC-S-MODE;), if not
-&EBUSY; will be returned.</para>
-
- <para>The physical address is a 16-bit number where each group of 4 bits
-represent a digit of the physical address a.b.c.d where the most significant
-4 bits represent 'a'. The CEC root device (usually the TV) has address 0.0.0.0.
-Every device that is hooked up to an input of the TV has address a.0.0.0 (where
-'a' is &ge; 1), devices hooked up to those in turn have addresses a.b.0.0, etc.
-So a topology of up to 5 devices deep is supported. The physical address a
-device shall use is stored in the EDID of the sink.</para>
-
-<para>For example, the EDID for each HDMI input of the TV will have a different
-physical address of the form a.0.0.0 that the sources will read out and use as
-their physical address.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-dqevent.xml b/Documentation/DocBook/media/v4l/cec-ioc-dqevent.xml
deleted file mode 100644
index 697dde575cd4..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-dqevent.xml
+++ /dev/null
@@ -1,202 +0,0 @@
-<refentry id="cec-ioc-g-event">
- <refmeta>
- <refentrytitle>ioctl CEC_DQEVENT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_DQEVENT</refname>
- <refpurpose>Dequeue a CEC event</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct cec_event *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_DQEVENT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>CEC devices can send asynchronous events. These can be retrieved by calling
- the <constant>CEC_DQEVENT</constant> ioctl. If the file descriptor is in non-blocking
- mode and no event is pending, then it will return -1 and set errno to the &EAGAIN;.</para>
-
- <para>The internal event queues are per-filehandle and per-event type. If there is
- no more room in a queue then the last event is overwritten with the new one. This
- means that intermediate results can be thrown away but that the latest event is always
- available. This also means that is it possible to read two successive events that have
- the same value (e.g. two CEC_EVENT_STATE_CHANGE events with the same state). In that
- case the intermediate state changes were lost but it is guaranteed that the state
- did change in between the two events.</para>
-
- <table pgwide="1" frame="none" id="cec-event-state-change">
- <title>struct <structname>cec_event_state_change</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u16</entry>
- <entry><structfield>phys_addr</structfield></entry>
- <entry>The current physical address.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>log_addr_mask</structfield></entry>
- <entry>The current set of claimed logical addresses.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-event-lost-msgs">
- <title>struct <structname>cec_event_lost_msgs</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>lost_msgs</structfield></entry>
- <entry>Set to the number of lost messages since the filehandle
- was opened or since the last time this event was dequeued for
- this filehandle. The messages lost are the oldest messages. So
- when a new message arrives and there is no more room, then the
- oldest message is discarded to make room for the new one. The
- internal size of the message queue guarantees that all messages
- received in the last two seconds will be stored. Since messages
- should be replied to within a second according to the CEC
- specification, this is more than enough.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-event">
- <title>struct <structname>cec_event</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u64</entry>
- <entry><structfield>ts</structfield></entry>
- <entry>Timestamp of the event in ns.</entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>event</structfield></entry>
- <entry>The CEC event type, see <xref linkend="cec-events" />.</entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Event flags, see <xref linkend="cec-event-flags" />.</entry>
- <entry></entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct cec_event_state_change</entry>
- <entry><structfield>state_change</structfield></entry>
- <entry>The new adapter state as sent by the <constant>CEC_EVENT_STATE_CHANGE</constant>
- event.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct cec_event_lost_msgs</entry>
- <entry><structfield>lost_msgs</structfield></entry>
- <entry>The number of lost messages as sent by the <constant>CEC_EVENT_LOST_MSGS</constant>
- event.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-events">
- <title>CEC Events Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_EVENT_STATE_CHANGE</constant></entry>
- <entry>1</entry>
- <entry>Generated when the CEC Adapter's state changes. When open() is
- called an initial event will be generated for that filehandle with the
- CEC Adapter's state at that time.
- </entry>
- </row>
- <row>
- <entry><constant>CEC_EVENT_LOST_MSGS</constant></entry>
- <entry>2</entry>
- <entry>Generated if one or more CEC messages were lost because the
- application didn't dequeue CEC messages fast enough.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-event-flags">
- <title>CEC Event Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_EVENT_FL_INITIAL_VALUE</constant></entry>
- <entry>1</entry>
- <entry>Set for the initial events that are generated when the device is
- opened. See the table above for which events do this. This allows
- applications to learn the initial state of the CEC adapter at open()
- time.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-g-mode.xml b/Documentation/DocBook/media/v4l/cec-ioc-g-mode.xml
deleted file mode 100644
index 26b4282ad134..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-g-mode.xml
+++ /dev/null
@@ -1,255 +0,0 @@
-<refentry id="cec-ioc-g-mode">
- <refmeta>
- <refentrytitle>ioctl CEC_G_MODE, CEC_S_MODE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_G_MODE</refname>
- <refname>CEC_S_MODE</refname>
- <refpurpose>Get or set exclusive use of the CEC adapter</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>__u32 *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_G_MODE, CEC_S_MODE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>By default any filehandle can use &CEC-TRANSMIT; and &CEC-RECEIVE;, but
-in order to prevent applications from stepping on each others toes it must be possible
-to obtain exclusive access to the CEC adapter. This ioctl sets the filehandle
-to initiator and/or follower mode which can be exclusive depending on the chosen
-mode. The initiator is the filehandle that is used
-to initiate messages, i.e. it commands other CEC devices. The follower is the filehandle
-that receives messages sent to the CEC adapter and processes them. The same filehandle
-can be both initiator and follower, or this role can be taken by two different
-filehandles.</para>
-
- <para>When a CEC message is received, then the CEC framework will decide how
-it will be processed. If the message is a reply to an earlier transmitted message,
-then the reply is sent back to the filehandle that is waiting for it. In addition
-the CEC framework will process it.</para>
-
- <para>If the message is not a reply, then the CEC framework will process it
-first. If there is no follower, then the message is just discarded and a feature
-abort is sent back to the initiator if the framework couldn't process it. If there
-is a follower, then the message is passed on to the follower who will use
-&CEC-RECEIVE; to dequeue the new message. The framework expects the follower to
-make the right decisions.</para>
-
- <para>The CEC framework will process core messages unless requested otherwise
-by the follower. The follower can enable the passthrough mode. In that case, the
-CEC framework will pass on most core messages without processing them and
-the follower will have to implement those messages. There are some messages
-that the core will always process, regardless of the passthrough mode. See
-<xref linkend="cec-core-processing" /> for details.</para>
-
- <para>If there is no initiator, then any CEC filehandle can use &CEC-TRANSMIT;.
-If there is an exclusive initiator then only that initiator can call &CEC-TRANSMIT;.
-The follower can of course always call &CEC-TRANSMIT;.</para>
-
- <para>Available initiator modes are:</para>
-
- <table pgwide="1" frame="none" id="cec-mode-initiator">
- <title>Initiator Modes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_MODE_NO_INITIATOR</constant></entry>
- <entry>0x0</entry>
- <entry>This is not an initiator, i.e. it cannot transmit CEC messages
- or make any other changes to the CEC adapter.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_INITIATOR</constant></entry>
- <entry>0x1</entry>
- <entry>This is an initiator (the default when the device is opened) and it
- can transmit CEC messages and make changes to the CEC adapter, unless there
- is an exclusive initiator.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_EXCL_INITIATOR</constant></entry>
- <entry>0x2</entry>
- <entry>This is an exclusive initiator and this file descriptor is the only one
- that can transmit CEC messages and make changes to the CEC adapter. If someone
- else is already the exclusive initiator then an attempt to become one will return
- the &EBUSY; error.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Available follower modes are:</para>
-
- <table pgwide="1" frame="none" id="cec-mode-follower">
- <title>Follower Modes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_MODE_NO_FOLLOWER</constant></entry>
- <entry>0x00</entry>
- <entry>This is not a follower (the default when the device is opened).</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_FOLLOWER</constant></entry>
- <entry>0x10</entry>
- <entry>This is a follower and it will receive CEC messages unless there is
- an exclusive follower. You cannot become a follower if <constant>CEC_CAP_TRANSMIT</constant>
- is not set or if <constant>CEC_MODE_NO_INITIATOR</constant> was specified,
- &EINVAL; is returned in that case.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_EXCL_FOLLOWER</constant></entry>
- <entry>0x20</entry>
- <entry>This is an exclusive follower and only this file descriptor will receive
- CEC messages for processing. If someone else is already the exclusive follower
- then an attempt to become one will return the &EBUSY; error. You cannot become
- a follower if <constant>CEC_CAP_TRANSMIT</constant> is not set or if
- <constant>CEC_MODE_NO_INITIATOR</constant> was specified, &EINVAL; is returned
- in that case.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_EXCL_FOLLOWER_PASSTHRU</constant></entry>
- <entry>0x30</entry>
- <entry>This is an exclusive follower and only this file descriptor will receive
- CEC messages for processing. In addition it will put the CEC device into
- passthrough mode, allowing the exclusive follower to handle most core messages
- instead of relying on the CEC framework for that. If someone else is already the
- exclusive follower then an attempt to become one will return the &EBUSY; error.
- You cannot become a follower if <constant>CEC_CAP_TRANSMIT</constant>
- is not set or if <constant>CEC_MODE_NO_INITIATOR</constant> was specified,
- &EINVAL; is returned in that case.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_MONITOR</constant></entry>
- <entry>0xe0</entry>
- <entry>Put the file descriptor into monitor mode. Can only be used in combination
- with <constant>CEC_MODE_NO_INITIATOR</constant>, otherwise &EINVAL; will be
- returned. In monitor mode all messages this CEC device transmits and all messages
- it receives (both broadcast messages and directed messages for one its logical
- addresses) will be reported. This is very useful for debugging. This is only
- allowed if the process has the <constant>CAP_NET_ADMIN</constant>
- capability. If that is not set, then &EPERM; is returned.</entry>
- </row>
- <row>
- <entry><constant>CEC_MODE_MONITOR_ALL</constant></entry>
- <entry>0xf0</entry>
- <entry>Put the file descriptor into 'monitor all' mode. Can only be used in combination
- with <constant>CEC_MODE_NO_INITIATOR</constant>, otherwise &EINVAL; will be
- returned. In 'monitor all' mode all messages this CEC device transmits and all messages
- it receives, including directed messages for other CEC devices will be reported. This
- is very useful for debugging, but not all devices support this. This mode requires that
- the <constant>CEC_CAP_MONITOR_ALL</constant> capability is set, otherwise &EINVAL; is
- returned. This is only allowed if the process has the <constant>CAP_NET_ADMIN</constant>
- capability. If that is not set, then &EPERM; is returned.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Core message processing details:</para>
-
- <table pgwide="1" frame="none" id="cec-core-processing">
- <title>Core Message Processing</title>
- <tgroup cols="2">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_MSG_GET_CEC_VERSION</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will return the CEC version that was set with &CEC-ADAP-S-LOG-ADDRS;.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_GIVE_DEVICE_VENDOR_ID</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will return the vendor ID that was set with &CEC-ADAP-S-LOG-ADDRS;.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_ABORT</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will return a feature refused message as per the specification.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_GIVE_PHYSICAL_ADDR</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will report the current physical address.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_GIVE_OSD_NAME</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will report the current OSD name as was set with
- &CEC-ADAP-S-LOG-ADDRS;.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_GIVE_FEATURES</constant></entry>
- <entry>When in passthrough mode this message has to be handled by userspace,
- otherwise the core will report the current features as was set with
- &CEC-ADAP-S-LOG-ADDRS; or the message is ignore if the CEC version was
- older than 2.0.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_USER_CONTROL_PRESSED</constant></entry>
- <entry>If <constant>CEC_CAP_RC</constant> is set, then generate a remote control
- key press. This message is always passed on to userspace.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_USER_CONTROL_RELEASED</constant></entry>
- <entry>If <constant>CEC_CAP_RC</constant> is set, then generate a remote control
- key release. This message is always passed on to userspace.</entry>
- </row>
- <row>
- <entry><constant>CEC_MSG_REPORT_PHYSICAL_ADDR</constant></entry>
- <entry>The CEC framework will make note of the reported physical address
- and then just pass the message on to userspace.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/cec-ioc-receive.xml b/Documentation/DocBook/media/v4l/cec-ioc-receive.xml
deleted file mode 100644
index fde9f8678e67..000000000000
--- a/Documentation/DocBook/media/v4l/cec-ioc-receive.xml
+++ /dev/null
@@ -1,274 +0,0 @@
-<refentry id="cec-ioc-receive">
- <refmeta>
- <refentrytitle>ioctl CEC_RECEIVE, CEC_TRANSMIT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>CEC_RECEIVE</refname>
- <refname>CEC_TRANSMIT</refname>
- <refpurpose>Receive or transmit a CEC message</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct cec_msg *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='cec-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>CEC_RECEIVE, CEC_TRANSMIT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>
- Note: this documents the proposed CEC API. This API is not yet finalized and
- is currently only available as a staging kernel module.
- </para>
-
- <para>To receive a CEC message the application has to fill in the
- <structname>cec_msg</structname> structure and pass it to the
- <constant>CEC_RECEIVE</constant> ioctl. <constant>CEC_RECEIVE</constant> is
- only available if <constant>CEC_CAP_RECEIVE</constant> is set. If the
- file descriptor is in non-blocking mode and there are no received
- messages pending, then it will return -1 and set errno to the &EAGAIN;.
- If the file descriptor is in blocking mode and <structfield>timeout</structfield>
- is non-zero and no message arrived within <structfield>timeout</structfield>
- milliseconds, then it will return -1 and set errno to the &ETIMEDOUT;.</para>
-
- <para>To send a CEC message the application has to fill in the
- <structname>cec_msg</structname> structure and pass it to the
- <constant>CEC_TRANSMIT</constant> ioctl. <constant>CEC_TRANSMIT</constant> is
- only available if <constant>CEC_CAP_TRANSMIT</constant> is set.
- If there is no more room in the transmit queue, then it will return
- -1 and set errno to the &EBUSY;.</para>
-
- <table pgwide="1" frame="none" id="cec-msg">
- <title>struct <structname>cec_msg</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u64</entry>
- <entry><structfield>ts</structfield></entry>
- <entry>Timestamp of when the message was transmitted in ns in the case
- of <constant>CEC_TRANSMIT</constant> with <structfield>reply</structfield>
- set to 0, or the timestamp of the received message in all other cases.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>len</structfield></entry>
- <entry>The length of the message. For <constant>CEC_TRANSMIT</constant> this
- is filled in by the application. The driver will fill this in for
- <constant>CEC_RECEIVE</constant> and for <constant>CEC_TRANSMIT</constant>
- it will be filled in with the length of the reply message if
- <structfield>reply</structfield> was set.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>timeout</structfield></entry>
- <entry>The timeout in milliseconds. This is the time the device will wait for a message to
- be received before timing out. If it is set to 0, then it will wait indefinitely when it
- is called by <constant>CEC_RECEIVE</constant>. If it is 0 and it is called by
- <constant>CEC_TRANSMIT</constant>, then it will be replaced by 1000 if the
- <structfield>reply</structfield> is non-zero or ignored if <structfield>reply</structfield>
- is 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sequence</structfield></entry>
- <entry>The sequence number is automatically assigned by the CEC
- framework for all transmitted messages. It can be later used by the
- framework to generate an event if a reply for a message was
- requested and the message was transmitted in a non-blocking mode.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags. No flags are defined yet, so set this to 0.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>rx_status</structfield></entry>
- <entry>The status bits of the received message. See <xref linkend="cec-rx-status" />
- for the possible status values. It is 0 if this message was transmitted, not
- received, unless this is the reply to a transmitted message. In that case both
- <structfield>rx_status</structfield> and <structfield>tx_status</structfield>
- are set.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>tx_status</structfield></entry>
- <entry>The status bits of the transmitted message. See <xref linkend="cec-tx-status" />
- for the possible status values. It is 0 if this messages was received, not
- transmitted.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>msg</structfield>[16]</entry>
- <entry>The message payload. For <constant>CEC_TRANSMIT</constant> this
- is filled in by the application. The driver will fill this in for
- <constant>CEC_RECEIVE</constant> and for <constant>CEC_TRANSMIT</constant>
- it will be filled in with the payload of the reply message if
- <structfield>reply</structfield> was set.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>reply</structfield></entry>
- <entry>Wait until this message is replied. If <structfield>reply</structfield>
- is 0 and the <structfield>timeout</structfield> is 0, then don't wait for a reply but
- return after transmitting the message. If there was an error as indicated by a non-zero
- <structfield>tx_status</structfield> field, then <structfield>reply</structfield> and
- <structfield>timeout</structfield> are both set to 0 by the driver. Ignored by
- <constant>CEC_RECEIVE</constant>. The case where <structfield>reply</structfield> is 0
- (this is the opcode for the Feature Abort message) and <structfield>timeout</structfield>
- is non-zero is specifically allowed to send a message and wait up to <structfield>timeout</structfield>
- milliseconds for a Feature Abort reply. In this case <structfield>rx_status</structfield>
- will either be set to <constant>CEC_RX_STATUS_TIMEOUT</constant> or
- <constant>CEC_RX_STATUS_FEATURE_ABORT</constant>.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>tx_arb_lost_cnt</structfield></entry>
- <entry>A counter of the number of transmit attempts that resulted in the
- Arbitration Lost error. This is only set if the hardware supports this, otherwise
- it is always 0. This counter is only valid if the <constant>CEC_TX_STATUS_ARB_LOST</constant>
- status bit is set.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>tx_nack_cnt</structfield></entry>
- <entry>A counter of the number of transmit attempts that resulted in the
- Not Acknowledged error. This is only set if the hardware supports this, otherwise
- it is always 0. This counter is only valid if the <constant>CEC_TX_STATUS_NACK</constant>
- status bit is set.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>tx_low_drive_cnt</structfield></entry>
- <entry>A counter of the number of transmit attempts that resulted in the
- Arbitration Lost error. This is only set if the hardware supports this, otherwise
- it is always 0. This counter is only valid if the <constant>CEC_TX_STATUS_LOW_DRIVE</constant>
- status bit is set.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>tx_error_cnt</structfield></entry>
- <entry>A counter of the number of transmit errors other than Arbitration Lost
- or Not Acknowledged. This is only set if the hardware supports this, otherwise
- it is always 0. This counter is only valid if the <constant>CEC_TX_STATUS_ERROR</constant>
- status bit is set.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-tx-status">
- <title>CEC Transmit Status</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_TX_STATUS_OK</constant></entry>
- <entry>0x01</entry>
- <entry>The message was transmitted successfully. This is mutually exclusive with
- <constant>CEC_TX_STATUS_MAX_RETRIES</constant>. Other bits can still be set if
- earlier attempts met with failure before the transmit was eventually successful.</entry>
- </row>
- <row>
- <entry><constant>CEC_TX_STATUS_ARB_LOST</constant></entry>
- <entry>0x02</entry>
- <entry>CEC line arbitration was lost.</entry>
- </row>
- <row>
- <entry><constant>CEC_TX_STATUS_NACK</constant></entry>
- <entry>0x04</entry>
- <entry>Message was not acknowledged.</entry>
- </row>
- <row>
- <entry><constant>CEC_TX_STATUS_LOW_DRIVE</constant></entry>
- <entry>0x08</entry>
- <entry>Low drive was detected on the CEC bus. This indicates that a follower
- detected an error on the bus and requests a retransmission.</entry>
- </row>
- <row>
- <entry><constant>CEC_TX_STATUS_ERROR</constant></entry>
- <entry>0x10</entry>
- <entry>Some error occurred. This is used for any errors that do not
- fit the previous two, either because the hardware could not tell
- which error occurred, or because the hardware tested for other conditions
- besides those two.</entry>
- </row>
- <row>
- <entry><constant>CEC_TX_STATUS_MAX_RETRIES</constant></entry>
- <entry>0x20</entry>
- <entry>The transmit failed after one or more retries. This status bit is mutually
- exclusive with <constant>CEC_TX_STATUS_OK</constant>. Other bits can still be set
- to explain which failures were seen.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="cec-rx-status">
- <title>CEC Receive Status</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>CEC_RX_STATUS_OK</constant></entry>
- <entry>0x01</entry>
- <entry>The message was received successfully.</entry>
- </row>
- <row>
- <entry><constant>CEC_RX_STATUS_TIMEOUT</constant></entry>
- <entry>0x02</entry>
- <entry>The reply to an earlier transmitted message timed out.</entry>
- </row>
- <row>
- <entry><constant>CEC_RX_STATUS_FEATURE_ABORT</constant></entry>
- <entry>0x04</entry>
- <entry>The message was received successfully but the reply was
- <constant>CEC_MSG_FEATURE_ABORT</constant>. This status is only
- set if this message was the reply to an earlier transmitted
- message.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/common.xml b/Documentation/DocBook/media/v4l/common.xml
deleted file mode 100644
index 8b5e014224d6..000000000000
--- a/Documentation/DocBook/media/v4l/common.xml
+++ /dev/null
@@ -1,1102 +0,0 @@
- <title>Common API Elements</title>
-
- <para>Programming a V4L2 device consists of these
-steps:</para>
-
- <itemizedlist>
- <listitem>
- <para>Opening the device</para>
- </listitem>
- <listitem>
- <para>Changing device properties, selecting a video and audio
-input, video standard, picture brightness a.&nbsp;o.</para>
- </listitem>
- <listitem>
- <para>Negotiating a data format</para>
- </listitem>
- <listitem>
- <para>Negotiating an input/output method</para>
- </listitem>
- <listitem>
- <para>The actual input/output loop</para>
- </listitem>
- <listitem>
- <para>Closing the device</para>
- </listitem>
- </itemizedlist>
-
- <para>In practice most steps are optional and can be executed out of
-order. It depends on the V4L2 device type, you can read about the
-details in <xref linkend="devices" />. In this chapter we will discuss
-the basic concepts applicable to all devices.</para>
-
- <section id="open">
- <title>Opening and Closing Devices</title>
-
- <section>
- <title>Device Naming</title>
-
- <para>V4L2 drivers are implemented as kernel modules, loaded
-manually by the system administrator or automatically when a device is
-first discovered. The driver modules plug into the "videodev" kernel
-module. It provides helper functions and a common application
-interface specified in this document.</para>
-
- <para>Each driver thus loaded registers one or more device nodes
-with major number 81 and a minor number between 0 and 255. Minor numbers
-are allocated dynamically unless the kernel is compiled with the kernel
-option CONFIG_VIDEO_FIXED_MINOR_RANGES. In that case minor numbers are
-allocated in ranges depending on the device node type (video, radio, etc.).</para>
-
- <para>Many drivers support "video_nr", "radio_nr" or "vbi_nr"
-module options to select specific video/radio/vbi node numbers. This allows
-the user to request that the device node is named e.g. /dev/video5 instead
-of leaving it to chance. When the driver supports multiple devices of the same
-type more than one device node number can be assigned, separated by commas:
- <informalexample>
- <screen>
-&gt; modprobe mydriver video_nr=0,1 radio_nr=0,1</screen>
- </informalexample></para>
-
- <para>In <filename>/etc/modules.conf</filename> this may be
-written as: <informalexample>
- <screen>
-options mydriver video_nr=0,1 radio_nr=0,1
- </screen>
- </informalexample> When no device node number is given as module
-option the driver supplies a default.</para>
-
- <para>Normally udev will create the device nodes in /dev automatically
-for you. If udev is not installed, then you need to enable the
-CONFIG_VIDEO_FIXED_MINOR_RANGES kernel option in order to be able to correctly
-relate a minor number to a device node number. I.e., you need to be certain
-that minor number 5 maps to device node name video5. With this kernel option
-different device types have different minor number ranges. These ranges are
-listed in <xref linkend="devices" />.
-</para>
-
- <para>The creation of character special files (with
-<application>mknod</application>) is a privileged operation and
-devices cannot be opened by major and minor number. That means
-applications cannot <emphasis>reliable</emphasis> scan for loaded or
-installed drivers. The user must enter a device name, or the
-application can try the conventional device names.</para>
- </section>
-
- <section id="related">
- <title>Related Devices</title>
-
- <para>Devices can support several functions. For example
-video capturing, VBI capturing and radio support.</para>
-
- <para>The V4L2 API creates different nodes for each of these functions.</para>
-
- <para>The V4L2 API was designed with the idea that one device node could support
-all functions. However, in practice this never worked: this 'feature'
-was never used by applications and many drivers did not support it and if
-they did it was certainly never tested. In addition, switching a device
-node between different functions only works when using the streaming I/O
-API, not with the read()/write() API.</para>
-
- <para>Today each device node supports just one function.</para>
-
- <para>Besides video input or output the hardware may also
-support audio sampling or playback. If so, these functions are
-implemented as ALSA PCM devices with optional ALSA audio mixer
-devices.</para>
-
- <para>One problem with all these devices is that the V4L2 API
-makes no provisions to find these related devices. Some really
-complex devices use the Media Controller (see <xref linkend="media_controller" />)
-which can be used for this purpose. But most drivers do not use it,
-and while some code exists that uses sysfs to discover related devices
-(see libmedia_dev in the <ulink url="http://git.linuxtv.org/cgit.cgi/v4l-utils.git/">v4l-utils</ulink>
-git repository), there is no library yet that can provide a single API towards
-both Media Controller-based devices and devices that do not use the Media Controller.
-If you want to work on this please write to the linux-media mailing list: &v4l-ml;.</para>
- </section>
-
- <section>
- <title>Multiple Opens</title>
-
- <para>V4L2 devices can be opened more than once.<footnote><para>
-There are still some old and obscure drivers that have not been updated to
-allow for multiple opens. This implies that for such drivers &func-open; can
-return an &EBUSY; when the device is already in use.</para></footnote>
-When this is supported by the driver, users can for example start a
-"panel" application to change controls like brightness or audio
-volume, while another application captures video and audio. In other words, panel
-applications are comparable to an ALSA audio mixer application.
-Just opening a V4L2 device should not change the state of the device.<footnote>
-<para>Unfortunately, opening a radio device often switches the state of the
-device to radio mode in many drivers. This behavior should be fixed eventually
-as it violates the V4L2 specification.</para></footnote></para>
-
- <para>Once an application has allocated the memory buffers needed for
-streaming data (by calling the &VIDIOC-REQBUFS; or &VIDIOC-CREATE-BUFS; ioctls,
-or implicitly by calling the &func-read; or &func-write; functions) that
-application (filehandle) becomes the owner of the device. It is no longer
-allowed to make changes that would affect the buffer sizes (e.g. by calling
-the &VIDIOC-S-FMT; ioctl) and other applications are no longer allowed to allocate
-buffers or start or stop streaming. The &EBUSY; will be returned instead.</para>
-
- <para>Merely opening a V4L2 device does not grant exclusive
-access.<footnote>
- <para>Drivers could recognize the
-<constant>O_EXCL</constant> open flag. Presently this is not required,
-so applications cannot know if it really works.</para>
- </footnote> Initiating data exchange however assigns the right
-to read or write the requested type of data, and to change related
-properties, to this file descriptor. Applications can request
-additional access privileges using the priority mechanism described in
-<xref linkend="app-pri" />.</para>
- </section>
-
- <section>
- <title>Shared Data Streams</title>
-
- <para>V4L2 drivers should not support multiple applications
-reading or writing the same data stream on a device by copying
-buffers, time multiplexing or similar means. This is better handled by
-a proxy application in user space.</para>
- </section>
-
- <section>
- <title>Functions</title>
-
- <para>To open and close V4L2 devices applications use the
-&func-open; and &func-close; function, respectively. Devices are
-programmed using the &func-ioctl; function as explained in the
-following sections.</para>
- </section>
- </section>
-
- <section id="querycap">
- <title>Querying Capabilities</title>
-
- <para>Because V4L2 covers a wide variety of devices not all
-aspects of the API are equally applicable to all types of devices.
-Furthermore devices of the same type have different capabilities and
-this specification permits the omission of a few complicated and less
-important parts of the API.</para>
-
- <para>The &VIDIOC-QUERYCAP; ioctl is available to check if the kernel
-device is compatible with this specification, and to query the <link
-linkend="devices">functions</link> and <link linkend="io">I/O
-methods</link> supported by the device.</para>
-
- <para>Starting with kernel version 3.1, VIDIOC-QUERYCAP will return the
-V4L2 API version used by the driver, with generally matches the Kernel version.
-There's no need of using &VIDIOC-QUERYCAP; to check if a specific ioctl is
-supported, the V4L2 core now returns ENOTTY if a driver doesn't provide
-support for an ioctl.</para>
-
- <para>Other features can be queried
-by calling the respective ioctl, for example &VIDIOC-ENUMINPUT;
-to learn about the number, types and names of video connectors on the
-device. Although abstraction is a major objective of this API, the
-&VIDIOC-QUERYCAP; ioctl also allows driver specific applications to reliably identify
-the driver.</para>
-
- <para>All V4L2 drivers must support
-<constant>VIDIOC_QUERYCAP</constant>. Applications should always call
-this ioctl after opening the device.</para>
- </section>
-
- <section id="app-pri">
- <title>Application Priority</title>
-
- <para>When multiple applications share a device it may be
-desirable to assign them different priorities. Contrary to the
-traditional "rm -rf /" school of thought a video recording application
-could for example block other applications from changing video
-controls or switching the current TV channel. Another objective is to
-permit low priority applications working in background, which can be
-preempted by user controlled applications and automatically regain
-control of the device at a later time.</para>
-
- <para>Since these features cannot be implemented entirely in user
-space V4L2 defines the &VIDIOC-G-PRIORITY; and &VIDIOC-S-PRIORITY;
-ioctls to request and query the access priority associate with a file
-descriptor. Opening a device assigns a medium priority, compatible
-with earlier versions of V4L2 and drivers not supporting these ioctls.
-Applications requiring a different priority will usually call
-<constant>VIDIOC_S_PRIORITY</constant> after verifying the device with
-the &VIDIOC-QUERYCAP; ioctl.</para>
-
- <para>Ioctls changing driver properties, such as &VIDIOC-S-INPUT;,
-return an &EBUSY; after another application obtained higher priority.</para>
- </section>
-
- <section id="video">
- <title>Video Inputs and Outputs</title>
-
- <para>Video inputs and outputs are physical connectors of a
-device. These can be for example RF connectors (antenna/cable), CVBS
-a.k.a. Composite Video, S-Video or RGB connectors. Video and VBI
-capture devices have inputs. Video and VBI output devices have outputs,
-at least one each. Radio devices have no video inputs or outputs.</para>
-
- <para>To learn about the number and attributes of the
-available inputs and outputs applications can enumerate them with the
-&VIDIOC-ENUMINPUT; and &VIDIOC-ENUMOUTPUT; ioctl, respectively. The
-&v4l2-input; returned by the <constant>VIDIOC_ENUMINPUT</constant>
-ioctl also contains signal status information applicable when the
-current video input is queried.</para>
-
- <para>The &VIDIOC-G-INPUT; and &VIDIOC-G-OUTPUT; ioctls return the
-index of the current video input or output. To select a different
-input or output applications call the &VIDIOC-S-INPUT; and
-&VIDIOC-S-OUTPUT; ioctls. Drivers must implement all the input ioctls
-when the device has one or more inputs, all the output ioctls when the
-device has one or more outputs.</para>
-
- <example>
- <title>Information about the current video input</title>
-
- <programlisting>
-&v4l2-input; input;
-int index;
-
-if (-1 == ioctl(fd, &VIDIOC-G-INPUT;, &amp;index)) {
- perror("VIDIOC_G_INPUT");
- exit(EXIT_FAILURE);
-}
-
-memset(&amp;input, 0, sizeof(input));
-input.index = index;
-
-if (-1 == ioctl(fd, &VIDIOC-ENUMINPUT;, &amp;input)) {
- perror("VIDIOC_ENUMINPUT");
- exit(EXIT_FAILURE);
-}
-
-printf("Current input: %s\n", input.name);
- </programlisting>
- </example>
-
- <example>
- <title>Switching to the first video input</title>
-
- <programlisting>
-int index;
-
-index = 0;
-
-if (-1 == ioctl(fd, &VIDIOC-S-INPUT;, &amp;index)) {
- perror("VIDIOC_S_INPUT");
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
- </section>
-
- <section id="audio">
- <title>Audio Inputs and Outputs</title>
-
- <para>Audio inputs and outputs are physical connectors of a
-device. Video capture devices have inputs, output devices have
-outputs, zero or more each. Radio devices have no audio inputs or
-outputs. They have exactly one tuner which in fact
-<emphasis>is</emphasis> an audio source, but this API associates
-tuners with video inputs or outputs only, and radio devices have
-none of these.<footnote>
- <para>Actually &v4l2-audio; ought to have a
-<structfield>tuner</structfield> field like &v4l2-input;, not only
-making the API more consistent but also permitting radio devices with
-multiple tuners.</para>
- </footnote> A connector on a TV card to loop back the received
-audio signal to a sound card is not considered an audio output.</para>
-
- <para>Audio and video inputs and outputs are associated. Selecting
-a video source also selects an audio source. This is most evident when
-the video and audio source is a tuner. Further audio connectors can
-combine with more than one video input or output. Assumed two
-composite video inputs and two audio inputs exist, there may be up to
-four valid combinations. The relation of video and audio connectors
-is defined in the <structfield>audioset</structfield> field of the
-respective &v4l2-input; or &v4l2-output;, where each bit represents
-the index number, starting at zero, of one audio input or output.</para>
-
- <para>To learn about the number and attributes of the
-available inputs and outputs applications can enumerate them with the
-&VIDIOC-ENUMAUDIO; and &VIDIOC-ENUMAUDOUT; ioctl, respectively. The
-&v4l2-audio; returned by the <constant>VIDIOC_ENUMAUDIO</constant> ioctl
-also contains signal status information applicable when the current
-audio input is queried.</para>
-
- <para>The &VIDIOC-G-AUDIO; and &VIDIOC-G-AUDOUT; ioctls report
-the current audio input and output, respectively. Note that, unlike
-&VIDIOC-G-INPUT; and &VIDIOC-G-OUTPUT; these ioctls return a structure
-as <constant>VIDIOC_ENUMAUDIO</constant> and
-<constant>VIDIOC_ENUMAUDOUT</constant> do, not just an index.</para>
-
- <para>To select an audio input and change its properties
-applications call the &VIDIOC-S-AUDIO; ioctl. To select an audio
-output (which presently has no changeable properties) applications
-call the &VIDIOC-S-AUDOUT; ioctl.</para>
-
- <para>Drivers must implement all audio input ioctls when the device
-has multiple selectable audio inputs, all audio output ioctls when the
-device has multiple selectable audio outputs. When the device has any
-audio inputs or outputs the driver must set the <constant>V4L2_CAP_AUDIO</constant>
-flag in the &v4l2-capability; returned by the &VIDIOC-QUERYCAP; ioctl.</para>
-
- <example>
- <title>Information about the current audio input</title>
-
- <programlisting>
-&v4l2-audio; audio;
-
-memset(&amp;audio, 0, sizeof(audio));
-
-if (-1 == ioctl(fd, &VIDIOC-G-AUDIO;, &amp;audio)) {
- perror("VIDIOC_G_AUDIO");
- exit(EXIT_FAILURE);
-}
-
-printf("Current input: %s\n", audio.name);
- </programlisting>
- </example>
-
- <example>
- <title>Switching to the first audio input</title>
-
- <programlisting>
-&v4l2-audio; audio;
-
-memset(&amp;audio, 0, sizeof(audio)); /* clear audio.mode, audio.reserved */
-
-audio.index = 0;
-
-if (-1 == ioctl(fd, &VIDIOC-S-AUDIO;, &amp;audio)) {
- perror("VIDIOC_S_AUDIO");
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
- </section>
-
- <section id="tuner">
- <title>Tuners and Modulators</title>
-
- <section>
- <title>Tuners</title>
-
- <para>Video input devices can have one or more tuners
-demodulating a RF signal. Each tuner is associated with one or more
-video inputs, depending on the number of RF connectors on the tuner.
-The <structfield>type</structfield> field of the respective
-&v4l2-input; returned by the &VIDIOC-ENUMINPUT; ioctl is set to
-<constant>V4L2_INPUT_TYPE_TUNER</constant> and its
-<structfield>tuner</structfield> field contains the index number of
-the tuner.</para>
-
- <para>Radio input devices have exactly one tuner with index zero, no
-video inputs.</para>
-
- <para>To query and change tuner properties applications use the
-&VIDIOC-G-TUNER; and &VIDIOC-S-TUNER; ioctls, respectively. The
-&v4l2-tuner; returned by <constant>VIDIOC_G_TUNER</constant> also
-contains signal status information applicable when the tuner of the
-current video or radio input is queried. Note that
-<constant>VIDIOC_S_TUNER</constant> does not switch the current tuner,
-when there is more than one at all. The tuner is solely determined by
-the current video input. Drivers must support both ioctls and set the
-<constant>V4L2_CAP_TUNER</constant> flag in the &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl when the device has one or
-more tuners.</para>
- </section>
-
- <section>
- <title>Modulators</title>
-
- <para>Video output devices can have one or more modulators, uh,
-modulating a video signal for radiation or connection to the antenna
-input of a TV set or video recorder. Each modulator is associated with
-one or more video outputs, depending on the number of RF connectors on
-the modulator. The <structfield>type</structfield> field of the
-respective &v4l2-output; returned by the &VIDIOC-ENUMOUTPUT; ioctl is
-set to <constant>V4L2_OUTPUT_TYPE_MODULATOR</constant> and its
-<structfield>modulator</structfield> field contains the index number
-of the modulator.</para>
-
- <para>Radio output devices have exactly one modulator with index
-zero, no video outputs.</para>
-
- <para>A video or radio device cannot support both a tuner and a
-modulator. Two separate device nodes will have to be used for such
-hardware, one that supports the tuner functionality and one that supports
-the modulator functionality. The reason is a limitation with the
-&VIDIOC-S-FREQUENCY; ioctl where you cannot specify whether the frequency
-is for a tuner or a modulator.</para>
-
- <para>To query and change modulator properties applications use
-the &VIDIOC-G-MODULATOR; and &VIDIOC-S-MODULATOR; ioctl. Note that
-<constant>VIDIOC_S_MODULATOR</constant> does not switch the current
-modulator, when there is more than one at all. The modulator is solely
-determined by the current video output. Drivers must support both
-ioctls and set the <constant>V4L2_CAP_MODULATOR</constant> flag in
-the &v4l2-capability; returned by the &VIDIOC-QUERYCAP; ioctl when the
-device has one or more modulators.</para>
- </section>
-
- <section>
- <title>Radio Frequency</title>
-
- <para>To get and set the tuner or modulator radio frequency
-applications use the &VIDIOC-G-FREQUENCY; and &VIDIOC-S-FREQUENCY;
-ioctl which both take a pointer to a &v4l2-frequency;. These ioctls
-are used for TV and radio devices alike. Drivers must support both
-ioctls when the tuner or modulator ioctls are supported, or
-when the device is a radio device.</para>
- </section>
- </section>
-
- <section id="standard">
- <title>Video Standards</title>
-
- <para>Video devices typically support one or more different video
-standards or variations of standards. Each video input and output may
-support another set of standards. This set is reported by the
-<structfield>std</structfield> field of &v4l2-input; and
-&v4l2-output; returned by the &VIDIOC-ENUMINPUT; and
-&VIDIOC-ENUMOUTPUT; ioctls, respectively.</para>
-
- <para>V4L2 defines one bit for each analog video standard
-currently in use worldwide, and sets aside bits for driver defined
-standards, &eg; hybrid standards to watch NTSC video tapes on PAL TVs
-and vice versa. Applications can use the predefined bits to select a
-particular standard, although presenting the user a menu of supported
-standards is preferred. To enumerate and query the attributes of the
-supported standards applications use the &VIDIOC-ENUMSTD; ioctl.</para>
-
- <para>Many of the defined standards are actually just variations
-of a few major standards. The hardware may in fact not distinguish
-between them, or do so internal and switch automatically. Therefore
-enumerated standards also contain sets of one or more standard
-bits.</para>
-
- <para>Assume a hypothetic tuner capable of demodulating B/PAL,
-G/PAL and I/PAL signals. The first enumerated standard is a set of B
-and G/PAL, switched automatically depending on the selected radio
-frequency in UHF or VHF band. Enumeration gives a "PAL-B/G" or "PAL-I"
-choice. Similar a Composite input may collapse standards, enumerating
-"PAL-B/G/H/I", "NTSC-M" and "SECAM-D/K".<footnote>
- <para>Some users are already confused by technical terms PAL,
-NTSC and SECAM. There is no point asking them to distinguish between
-B, G, D, or K when the software or hardware can do that
-automatically.</para>
- </footnote></para>
-
- <para>To query and select the standard used by the current video
-input or output applications call the &VIDIOC-G-STD; and
-&VIDIOC-S-STD; ioctl, respectively. The <emphasis>received</emphasis>
-standard can be sensed with the &VIDIOC-QUERYSTD; ioctl. Note that the
-parameter of all these ioctls is a pointer to a &v4l2-std-id; type
-(a standard set), <emphasis>not</emphasis> an index into the standard
-enumeration. Drivers must implement all video standard ioctls
-when the device has one or more video inputs or outputs.</para>
-
- <para>Special rules apply to devices such as USB cameras where the notion of video
-standards makes little sense. More generally for any capture or output device
-which is: <itemizedlist>
- <listitem>
- <para>incapable of capturing fields or frames at the nominal
-rate of the video standard, or</para>
- </listitem>
- <listitem>
- <para>that does not support the video standard formats at all.</para>
- </listitem>
- </itemizedlist> Here the driver shall set the
-<structfield>std</structfield> field of &v4l2-input; and &v4l2-output;
-to zero and the <constant>VIDIOC_G_STD</constant>,
-<constant>VIDIOC_S_STD</constant>,
-<constant>VIDIOC_QUERYSTD</constant> and
-<constant>VIDIOC_ENUMSTD</constant> ioctls shall return the
-&ENOTTY; or the &EINVAL;.</para>
- <para>Applications can make use of the <xref linkend="input-capabilities" /> and
-<xref linkend="output-capabilities"/> flags to determine whether the video standard ioctls
-can be used with the given input or output.</para>
-
- <example>
- <title>Information about the current video standard</title>
-
- <programlisting>
-&v4l2-std-id; std_id;
-&v4l2-standard; standard;
-
-if (-1 == ioctl(fd, &VIDIOC-G-STD;, &amp;std_id)) {
- /* Note when VIDIOC_ENUMSTD always returns ENOTTY this
- is no video device or it falls under the USB exception,
- and VIDIOC_G_STD returning ENOTTY is no error. */
-
- perror("VIDIOC_G_STD");
- exit(EXIT_FAILURE);
-}
-
-memset(&amp;standard, 0, sizeof(standard));
-standard.index = 0;
-
-while (0 == ioctl(fd, &VIDIOC-ENUMSTD;, &amp;standard)) {
- if (standard.id &amp; std_id) {
- printf("Current video standard: %s\n", standard.name);
- exit(EXIT_SUCCESS);
- }
-
- standard.index++;
-}
-
-/* EINVAL indicates the end of the enumeration, which cannot be
- empty unless this device falls under the USB exception. */
-
-if (errno == EINVAL || standard.index == 0) {
- perror("VIDIOC_ENUMSTD");
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
-
- <example>
- <title>Listing the video standards supported by the current
-input</title>
-
- <programlisting>
-&v4l2-input; input;
-&v4l2-standard; standard;
-
-memset(&amp;input, 0, sizeof(input));
-
-if (-1 == ioctl(fd, &VIDIOC-G-INPUT;, &amp;input.index)) {
- perror("VIDIOC_G_INPUT");
- exit(EXIT_FAILURE);
-}
-
-if (-1 == ioctl(fd, &VIDIOC-ENUMINPUT;, &amp;input)) {
- perror("VIDIOC_ENUM_INPUT");
- exit(EXIT_FAILURE);
-}
-
-printf("Current input %s supports:\n", input.name);
-
-memset(&amp;standard, 0, sizeof(standard));
-standard.index = 0;
-
-while (0 == ioctl(fd, &VIDIOC-ENUMSTD;, &amp;standard)) {
- if (standard.id &amp; input.std)
- printf("%s\n", standard.name);
-
- standard.index++;
-}
-
-/* EINVAL indicates the end of the enumeration, which cannot be
- empty unless this device falls under the USB exception. */
-
-if (errno != EINVAL || standard.index == 0) {
- perror("VIDIOC_ENUMSTD");
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
-
- <example>
- <title>Selecting a new video standard</title>
-
- <programlisting>
-&v4l2-input; input;
-&v4l2-std-id; std_id;
-
-memset(&amp;input, 0, sizeof(input));
-
-if (-1 == ioctl(fd, &VIDIOC-G-INPUT;, &amp;input.index)) {
- perror("VIDIOC_G_INPUT");
- exit(EXIT_FAILURE);
-}
-
-if (-1 == ioctl(fd, &VIDIOC-ENUMINPUT;, &amp;input)) {
- perror("VIDIOC_ENUM_INPUT");
- exit(EXIT_FAILURE);
-}
-
-if (0 == (input.std &amp; V4L2_STD_PAL_BG)) {
- fprintf(stderr, "Oops. B/G PAL is not supported.\n");
- exit(EXIT_FAILURE);
-}
-
-/* Note this is also supposed to work when only B
- <emphasis>or</emphasis> G/PAL is supported. */
-
-std_id = V4L2_STD_PAL_BG;
-
-if (-1 == ioctl(fd, &VIDIOC-S-STD;, &amp;std_id)) {
- perror("VIDIOC_S_STD");
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
- </section>
- <section id="dv-timings">
- <title>Digital Video (DV) Timings</title>
- <para>
- The video standards discussed so far have been dealing with Analog TV and the
-corresponding video timings. Today there are many more different hardware interfaces
-such as High Definition TV interfaces (HDMI), VGA, DVI connectors etc., that carry
-video signals and there is a need to extend the API to select the video timings
-for these interfaces. Since it is not possible to extend the &v4l2-std-id; due to
-the limited bits available, a new set of ioctls was added to set/get video timings at
-the input and output.</para>
-
- <para>These ioctls deal with the detailed digital video timings that define
-each video format. This includes parameters such as the active video width and height,
-signal polarities, frontporches, backporches, sync widths etc. The <filename>linux/v4l2-dv-timings.h</filename>
-header can be used to get the timings of the formats in the <xref linkend="cea861" /> and
-<xref linkend="vesadmt" /> standards.
- </para>
-
- <para>To enumerate and query the attributes of the DV timings supported by a device
- applications use the &VIDIOC-ENUM-DV-TIMINGS; and &VIDIOC-DV-TIMINGS-CAP; ioctls.
- To set DV timings for the device applications use the
-&VIDIOC-S-DV-TIMINGS; ioctl and to get current DV timings they use the
-&VIDIOC-G-DV-TIMINGS; ioctl. To detect the DV timings as seen by the video receiver applications
-use the &VIDIOC-QUERY-DV-TIMINGS; ioctl.</para>
- <para>Applications can make use of the <xref linkend="input-capabilities" /> and
-<xref linkend="output-capabilities"/> flags to determine whether the digital video ioctls
-can be used with the given input or output.</para>
- </section>
-
- &sub-controls;
-
- <section id="format">
- <title>Data Formats</title>
-
- <section>
- <title>Data Format Negotiation</title>
-
- <para>Different devices exchange different kinds of data with
-applications, for example video images, raw or sliced VBI data, RDS
-datagrams. Even within one kind many different formats are possible,
-in particular an abundance of image formats. Although drivers must
-provide a default and the selection persists across closing and
-reopening a device, applications should always negotiate a data format
-before engaging in data exchange. Negotiation means the application
-asks for a particular format and the driver selects and reports the
-best the hardware can do to satisfy the request. Of course
-applications can also just query the current selection.</para>
-
- <para>A single mechanism exists to negotiate all data formats
-using the aggregate &v4l2-format; and the &VIDIOC-G-FMT; and
-&VIDIOC-S-FMT; ioctls. Additionally the &VIDIOC-TRY-FMT; ioctl can be
-used to examine what the hardware <emphasis>could</emphasis> do,
-without actually selecting a new data format. The data formats
-supported by the V4L2 API are covered in the respective device section
-in <xref linkend="devices" />. For a closer look at image formats see
-<xref linkend="pixfmt" />.</para>
-
- <para>The <constant>VIDIOC_S_FMT</constant> ioctl is a major
-turning-point in the initialization sequence. Prior to this point
-multiple panel applications can access the same device concurrently to
-select the current input, change controls or modify other properties.
-The first <constant>VIDIOC_S_FMT</constant> assigns a logical stream
-(video data, VBI data etc.) exclusively to one file descriptor.</para>
-
- <para>Exclusive means no other application, more precisely no
-other file descriptor, can grab this stream or change device
-properties inconsistent with the negotiated parameters. A video
-standard change for example, when the new standard uses a different
-number of scan lines, can invalidate the selected image format.
-Therefore only the file descriptor owning the stream can make
-invalidating changes. Accordingly multiple file descriptors which
-grabbed different logical streams prevent each other from interfering
-with their settings. When for example video overlay is about to start
-or already in progress, simultaneous video capturing may be restricted
-to the same cropping and image size.</para>
-
- <para>When applications omit the
-<constant>VIDIOC_S_FMT</constant> ioctl its locking side effects are
-implied by the next step, the selection of an I/O method with the
-&VIDIOC-REQBUFS; ioctl or implicit with the first &func-read; or
-&func-write; call.</para>
-
- <para>Generally only one logical stream can be assigned to a
-file descriptor, the exception being drivers permitting simultaneous
-video capturing and overlay using the same file descriptor for
-compatibility with V4L and earlier versions of V4L2. Switching the
-logical stream or returning into "panel mode" is possible by closing
-and reopening the device. Drivers <emphasis>may</emphasis> support a
-switch using <constant>VIDIOC_S_FMT</constant>.</para>
-
- <para>All drivers exchanging data with
-applications must support the <constant>VIDIOC_G_FMT</constant> and
-<constant>VIDIOC_S_FMT</constant> ioctl. Implementation of the
-<constant>VIDIOC_TRY_FMT</constant> is highly recommended but
-optional.</para>
- </section>
-
- <section>
- <title>Image Format Enumeration</title>
-
- <para>Apart of the generic format negotiation functions
-a special ioctl to enumerate all image formats supported by video
-capture, overlay or output devices is available.<footnote>
- <para>Enumerating formats an application has no a-priori
-knowledge of (otherwise it could explicitly ask for them and need not
-enumerate) seems useless, but there are applications serving as proxy
-between drivers and the actual video applications for which this is
-useful.</para>
- </footnote></para>
-
- <para>The &VIDIOC-ENUM-FMT; ioctl must be supported
-by all drivers exchanging image data with applications.</para>
-
- <important>
- <para>Drivers are not supposed to convert image formats in
-kernel space. They must enumerate only formats directly supported by
-the hardware. If necessary driver writers should publish an example
-conversion routine or library for integration into applications.</para>
- </important>
- </section>
- </section>
-
- &sub-planar-apis;
-
- <section id="crop">
- <title>Image Cropping, Insertion and Scaling</title>
-
- <para>Some video capture devices can sample a subsection of the
-picture and shrink or enlarge it to an image of arbitrary size. We
-call these abilities cropping and scaling. Some video output devices
-can scale an image up or down and insert it at an arbitrary scan line
-and horizontal offset into a video signal.</para>
-
- <para>Applications can use the following API to select an area in
-the video signal, query the default area and the hardware limits.
-<emphasis>Despite their name, the &VIDIOC-CROPCAP;, &VIDIOC-G-CROP;
-and &VIDIOC-S-CROP; ioctls apply to input as well as output
-devices.</emphasis></para>
-
- <para>Scaling requires a source and a target. On a video capture
-or overlay device the source is the video signal, and the cropping
-ioctls determine the area actually sampled. The target are images
-read by the application or overlaid onto the graphics screen. Their
-size (and position for an overlay) is negotiated with the
-&VIDIOC-G-FMT; and &VIDIOC-S-FMT; ioctls.</para>
-
- <para>On a video output device the source are the images passed in
-by the application, and their size is again negotiated with the
-<constant>VIDIOC_G/S_FMT</constant> ioctls, or may be encoded in a
-compressed video stream. The target is the video signal, and the
-cropping ioctls determine the area where the images are
-inserted.</para>
-
- <para>Source and target rectangles are defined even if the device
-does not support scaling or the <constant>VIDIOC_G/S_CROP</constant>
-ioctls. Their size (and position where applicable) will be fixed in
-this case. <emphasis>All capture and output device must support the
-<constant>VIDIOC_CROPCAP</constant> ioctl such that applications can
-determine if scaling takes place.</emphasis></para>
-
- <section>
- <title>Cropping Structures</title>
-
- <figure id="crop-scale">
- <title>Image Cropping, Insertion and Scaling</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="crop.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="crop.gif" format="GIF" />
- </imageobject>
- <textobject>
- <phrase>The cropping, insertion and scaling process</phrase>
- </textobject>
- </mediaobject>
- </figure>
-
- <para>For capture devices the coordinates of the top left
-corner, width and height of the area which can be sampled is given by
-the <structfield>bounds</structfield> substructure of the
-&v4l2-cropcap; returned by the <constant>VIDIOC_CROPCAP</constant>
-ioctl. To support a wide range of hardware this specification does not
-define an origin or units. However by convention drivers should
-horizontally count unscaled samples relative to 0H (the leading edge
-of the horizontal sync pulse, see <xref linkend="vbi-hsync" />).
-Vertically ITU-R line
-numbers of the first field (<xref linkend="vbi-525" />, <xref
-linkend="vbi-625" />), multiplied by two if the driver can capture both
-fields.</para>
-
- <para>The top left corner, width and height of the source
-rectangle, that is the area actually sampled, is given by &v4l2-crop;
-using the same coordinate system as &v4l2-cropcap;. Applications can
-use the <constant>VIDIOC_G_CROP</constant> and
-<constant>VIDIOC_S_CROP</constant> ioctls to get and set this
-rectangle. It must lie completely within the capture boundaries and
-the driver may further adjust the requested size and/or position
-according to hardware limitations.</para>
-
- <para>Each capture device has a default source rectangle, given
-by the <structfield>defrect</structfield> substructure of
-&v4l2-cropcap;. The center of this rectangle shall align with the
-center of the active picture area of the video signal, and cover what
-the driver writer considers the complete picture. Drivers shall reset
-the source rectangle to the default when the driver is first loaded,
-but not later.</para>
-
- <para>For output devices these structures and ioctls are used
-accordingly, defining the <emphasis>target</emphasis> rectangle where
-the images will be inserted into the video signal.</para>
-
- </section>
-
- <section>
- <title>Scaling Adjustments</title>
-
- <para>Video hardware can have various cropping, insertion and
-scaling limitations. It may only scale up or down, support only
-discrete scaling factors, or have different scaling abilities in
-horizontal and vertical direction. Also it may not support scaling at
-all. At the same time the &v4l2-crop; rectangle may have to be
-aligned, and both the source and target rectangles may have arbitrary
-upper and lower size limits. In particular the maximum
-<structfield>width</structfield> and <structfield>height</structfield>
-in &v4l2-crop; may be smaller than the
-&v4l2-cropcap;.<structfield>bounds</structfield> area. Therefore, as
-usual, drivers are expected to adjust the requested parameters and
-return the actual values selected.</para>
-
- <para>Applications can change the source or the target rectangle
-first, as they may prefer a particular image size or a certain area in
-the video signal. If the driver has to adjust both to satisfy hardware
-limitations, the last requested rectangle shall take priority, and the
-driver should preferably adjust the opposite one. The &VIDIOC-TRY-FMT;
-ioctl however shall not change the driver state and therefore only
-adjust the requested rectangle.</para>
-
- <para>Suppose scaling on a video capture device is restricted to
-a factor 1:1 or 2:1 in either direction and the target image size must
-be a multiple of 16&nbsp;&times;&nbsp;16 pixels. The source cropping
-rectangle is set to defaults, which are also the upper limit in this
-example, of 640&nbsp;&times;&nbsp;400 pixels at offset 0,&nbsp;0. An
-application requests an image size of 300&nbsp;&times;&nbsp;225
-pixels, assuming video will be scaled down from the "full picture"
-accordingly. The driver sets the image size to the closest possible
-values 304&nbsp;&times;&nbsp;224, then chooses the cropping rectangle
-closest to the requested size, that is 608&nbsp;&times;&nbsp;224
-(224&nbsp;&times;&nbsp;2:1 would exceed the limit 400). The offset
-0,&nbsp;0 is still valid, thus unmodified. Given the default cropping
-rectangle reported by <constant>VIDIOC_CROPCAP</constant> the
-application can easily propose another offset to center the cropping
-rectangle.</para>
-
- <para>Now the application may insist on covering an area using a
-picture aspect ratio closer to the original request, so it asks for a
-cropping rectangle of 608&nbsp;&times;&nbsp;456 pixels. The present
-scaling factors limit cropping to 640&nbsp;&times;&nbsp;384, so the
-driver returns the cropping size 608&nbsp;&times;&nbsp;384 and adjusts
-the image size to closest possible 304&nbsp;&times;&nbsp;192.</para>
-
- </section>
-
- <section>
- <title>Examples</title>
-
- <para>Source and target rectangles shall remain unchanged across
-closing and reopening a device, such that piping data into or out of a
-device will work without special preparations. More advanced
-applications should ensure the parameters are suitable before starting
-I/O.</para>
-
- <example>
- <title>Resetting the cropping parameters</title>
-
- <para>(A video capture device is assumed; change
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> for other
-devices.)</para>
-
- <programlisting>
-&v4l2-cropcap; cropcap;
-&v4l2-crop; crop;
-
-memset (&amp;cropcap, 0, sizeof (cropcap));
-cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-if (-1 == ioctl (fd, &VIDIOC-CROPCAP;, &amp;cropcap)) {
- perror ("VIDIOC_CROPCAP");
- exit (EXIT_FAILURE);
-}
-
-memset (&amp;crop, 0, sizeof (crop));
-crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-crop.c = cropcap.defrect;
-
-/* Ignore if cropping is not supported (EINVAL). */
-
-if (-1 == ioctl (fd, &VIDIOC-S-CROP;, &amp;crop)
- &amp;&amp; errno != EINVAL) {
- perror ("VIDIOC_S_CROP");
- exit (EXIT_FAILURE);
-}
- </programlisting>
- </example>
-
- <example>
- <title>Simple downscaling</title>
-
- <para>(A video capture device is assumed.)</para>
-
- <programlisting>
-&v4l2-cropcap; cropcap;
-&v4l2-format; format;
-
-reset_cropping_parameters ();
-
-/* Scale down to 1/4 size of full picture. */
-
-memset (&amp;format, 0, sizeof (format)); /* defaults */
-
-format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-format.fmt.pix.width = cropcap.defrect.width &gt;&gt; 1;
-format.fmt.pix.height = cropcap.defrect.height &gt;&gt; 1;
-format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
-
-if (-1 == ioctl (fd, &VIDIOC-S-FMT;, &amp;format)) {
- perror ("VIDIOC_S_FORMAT");
- exit (EXIT_FAILURE);
-}
-
-/* We could check the actual image size now, the actual scaling factor
- or if the driver can scale at all. */
- </programlisting>
- </example>
-
- <example>
- <title>Selecting an output area</title>
-
- <programlisting>
-&v4l2-cropcap; cropcap;
-&v4l2-crop; crop;
-
-memset (&amp;cropcap, 0, sizeof (cropcap));
-cropcap.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-
-if (-1 == ioctl (fd, VIDIOC_CROPCAP;, &amp;cropcap)) {
- perror ("VIDIOC_CROPCAP");
- exit (EXIT_FAILURE);
-}
-
-memset (&amp;crop, 0, sizeof (crop));
-
-crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-crop.c = cropcap.defrect;
-
-/* Scale the width and height to 50 % of their original size
- and center the output. */
-
-crop.c.width /= 2;
-crop.c.height /= 2;
-crop.c.left += crop.c.width / 2;
-crop.c.top += crop.c.height / 2;
-
-/* Ignore if cropping is not supported (EINVAL). */
-
-if (-1 == ioctl (fd, VIDIOC_S_CROP, &amp;crop)
- &amp;&amp; errno != EINVAL) {
- perror ("VIDIOC_S_CROP");
- exit (EXIT_FAILURE);
-}
-</programlisting>
- </example>
-
- <example>
- <title>Current scaling factor and pixel aspect</title>
-
- <para>(A video capture device is assumed.)</para>
-
- <programlisting>
-&v4l2-cropcap; cropcap;
-&v4l2-crop; crop;
-&v4l2-format; format;
-double hscale, vscale;
-double aspect;
-int dwidth, dheight;
-
-memset (&amp;cropcap, 0, sizeof (cropcap));
-cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-if (-1 == ioctl (fd, &VIDIOC-CROPCAP;, &amp;cropcap)) {
- perror ("VIDIOC_CROPCAP");
- exit (EXIT_FAILURE);
-}
-
-memset (&amp;crop, 0, sizeof (crop));
-crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-if (-1 == ioctl (fd, &VIDIOC-G-CROP;, &amp;crop)) {
- if (errno != EINVAL) {
- perror ("VIDIOC_G_CROP");
- exit (EXIT_FAILURE);
- }
-
- /* Cropping not supported. */
- crop.c = cropcap.defrect;
-}
-
-memset (&amp;format, 0, sizeof (format));
-format.fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
-if (-1 == ioctl (fd, &VIDIOC-G-FMT;, &amp;format)) {
- perror ("VIDIOC_G_FMT");
- exit (EXIT_FAILURE);
-}
-
-/* The scaling applied by the driver. */
-
-hscale = format.fmt.pix.width / (double) crop.c.width;
-vscale = format.fmt.pix.height / (double) crop.c.height;
-
-aspect = cropcap.pixelaspect.numerator /
- (double) cropcap.pixelaspect.denominator;
-aspect = aspect * hscale / vscale;
-
-/* Devices following ITU-R BT.601 do not capture
- square pixels. For playback on a computer monitor
- we should scale the images to this size. */
-
-dwidth = format.fmt.pix.width / aspect;
-dheight = format.fmt.pix.height;
- </programlisting>
- </example>
- </section>
- </section>
-
- &sub-selection-api;
-
- <section id="streaming-par">
- <title>Streaming Parameters</title>
-
- <para>Streaming parameters are intended to optimize the video
-capture process as well as I/O. Presently applications can request a
-high quality capture mode with the &VIDIOC-S-PARM; ioctl.</para>
-
- <para>The current video standard determines a nominal number of
-frames per second. If less than this number of frames is to be
-captured or output, applications can request frame skipping or
-duplicating on the driver side. This is especially useful when using
-the &func-read; or &func-write;, which are not augmented by timestamps
-or sequence counters, and to avoid unnecessary data copying.</para>
-
- <para>Finally these ioctls can be used to determine the number of
-buffers used internally by a driver in read/write mode. For
-implications see the section discussing the &func-read;
-function.</para>
-
- <para>To get and set the streaming parameters applications call
-the &VIDIOC-G-PARM; and &VIDIOC-S-PARM; ioctl, respectively. They take
-a pointer to a &v4l2-streamparm;, which contains a union holding
-separate parameters for input and output devices.</para>
-
- <para>These ioctls are optional, drivers need not implement
-them. If so, they return the &EINVAL;.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
deleted file mode 100644
index 82fa328abd58..000000000000
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ /dev/null
@@ -1,2723 +0,0 @@
- <title>Changes</title>
-
- <para>The following chapters document the evolution of the V4L2 API,
-errata or extensions. They are also intended to help application and
-driver writers to port or update their code.</para>
-
- <section id="diff-v4l">
- <title>Differences between V4L and V4L2</title>
-
- <para>The Video For Linux API was first introduced in Linux 2.1 to
-unify and replace various TV and radio device related interfaces,
-developed independently by driver writers in prior years. Starting
-with Linux 2.5 the much improved V4L2 API replaces the V4L API.
-The support for the old V4L calls were removed from Kernel, but the
-library <xref linkend="libv4l" /> supports the conversion of a V4L
-API system call into a V4L2 one.</para>
-
- <section>
- <title>Opening and Closing Devices</title>
-
- <para>For compatibility reasons the character device file names
-recommended for V4L2 video capture, overlay, radio and raw
-vbi capture devices did not change from those used by V4L. They are
-listed in <xref linkend="devices" /> and below in <xref
- linkend="v4l-dev" />.</para>
-
- <para>The teletext devices (minor range 192-223) have been removed in
-V4L2 and no longer exist. There is no hardware available anymore for handling
-pure teletext. Instead raw or sliced VBI is used.</para>
-
- <para>The V4L <filename>videodev</filename> module automatically
-assigns minor numbers to drivers in load order, depending on the
-registered device type. We recommend that V4L2 drivers by default
-register devices with the same numbers, but the system administrator
-can assign arbitrary minor numbers using driver module options. The
-major device number remains 81.</para>
-
- <table id="v4l-dev">
- <title>V4L Device Types, Names and Numbers</title>
- <tgroup cols="3">
- <thead>
- <row>
- <entry>Device Type</entry>
- <entry>File Name</entry>
- <entry>Minor Numbers</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Video capture and overlay</entry>
- <entry><para><filename>/dev/video</filename> and
-<filename>/dev/bttv0</filename><footnote> <para>According to
-Documentation/devices.txt these should be symbolic links to
-<filename>/dev/video0</filename>. Note the original bttv interface is
-not compatible with V4L or V4L2.</para> </footnote>,
-<filename>/dev/video0</filename> to
-<filename>/dev/video63</filename></para></entry>
- <entry>0-63</entry>
- </row>
- <row>
- <entry>Radio receiver</entry>
- <entry><para><filename>/dev/radio</filename><footnote>
- <para>According to
-<filename>Documentation/devices.txt</filename> a symbolic link to
-<filename>/dev/radio0</filename>.</para>
- </footnote>, <filename>/dev/radio0</filename> to
-<filename>/dev/radio63</filename></para></entry>
- <entry>64-127</entry>
- </row>
- <row>
- <entry>Raw VBI capture</entry>
- <entry><para><filename>/dev/vbi</filename>,
-<filename>/dev/vbi0</filename> to
-<filename>/dev/vbi31</filename></para></entry>
- <entry>224-255</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>V4L prohibits (or used to prohibit) multiple opens of a
-device file. V4L2 drivers <emphasis>may</emphasis> support multiple
-opens, see <xref linkend="open" /> for details and consequences.</para>
-
- <para>V4L drivers respond to V4L2 ioctls with an &EINVAL;.</para>
- </section>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>The V4L <constant>VIDIOCGCAP</constant> ioctl is
-equivalent to V4L2's &VIDIOC-QUERYCAP;.</para>
-
- <para>The <structfield>name</structfield> field in struct
-<structname>video_capability</structname> became
-<structfield>card</structfield> in &v4l2-capability;,
-<structfield>type</structfield> was replaced by
-<structfield>capabilities</structfield>. Note V4L2 does not
-distinguish between device types like this, better think of basic
-video input, video output and radio devices supporting a set of
-related functions like video capturing, video overlay and VBI
-capturing. See <xref linkend="open" /> for an
-introduction.<informaltable>
- <tgroup cols="3">
- <thead>
- <row>
- <entry>struct
-<structname>video_capability</structname>
-<structfield>type</structfield></entry>
- <entry>&v4l2-capability;
-<structfield>capabilities</structfield> flags</entry>
- <entry>Purpose</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>VID_TYPE_CAPTURE</constant></entry>
- <entry><constant>V4L2_CAP_VIDEO_CAPTURE</constant></entry>
- <entry>The <link linkend="capture">video
-capture</link> interface is supported.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_TUNER</constant></entry>
- <entry><constant>V4L2_CAP_TUNER</constant></entry>
- <entry>The device has a <link linkend="tuner">tuner or
-modulator</link>.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_TELETEXT</constant></entry>
- <entry><constant>V4L2_CAP_VBI_CAPTURE</constant></entry>
- <entry>The <link linkend="raw-vbi">raw VBI
-capture</link> interface is supported.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_OVERLAY</constant></entry>
- <entry><constant>V4L2_CAP_VIDEO_OVERLAY</constant></entry>
- <entry>The <link linkend="overlay">video
-overlay</link> interface is supported.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_CHROMAKEY</constant></entry>
- <entry><constant>V4L2_FBUF_CAP_CHROMAKEY</constant> in
-field <structfield>capability</structfield> of
-&v4l2-framebuffer;</entry>
- <entry>Whether chromakey overlay is supported. For
-more information on overlay see
-<xref linkend="overlay" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_CLIPPING</constant></entry>
- <entry><constant>V4L2_FBUF_CAP_LIST_CLIPPING</constant>
-and <constant>V4L2_FBUF_CAP_BITMAP_CLIPPING</constant> in field
-<structfield>capability</structfield> of &v4l2-framebuffer;</entry>
- <entry>Whether clipping the overlaid image is
-supported, see <xref linkend="overlay" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_FRAMERAM</constant></entry>
- <entry><constant>V4L2_FBUF_CAP_EXTERNOVERLAY</constant>
-<emphasis>not set</emphasis> in field
-<structfield>capability</structfield> of &v4l2-framebuffer;</entry>
- <entry>Whether overlay overwrites frame buffer memory,
-see <xref linkend="overlay" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_SCALES</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>This flag indicates if the hardware can scale
-images. The V4L2 API implies the scale factor by setting the cropping
-dimensions and image size with the &VIDIOC-S-CROP; and &VIDIOC-S-FMT;
-ioctl, respectively. The driver returns the closest sizes possible.
-For more information on cropping and scaling see <xref
- linkend="crop" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_MONOCHROME</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>Applications can enumerate the supported image
-formats with the &VIDIOC-ENUM-FMT; ioctl to determine if the device
-supports grey scale capturing only. For more information on image
-formats see <xref linkend="pixfmt" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_SUBCAPTURE</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>Applications can call the &VIDIOC-G-CROP; ioctl
-to determine if the device supports capturing a subsection of the full
-picture ("cropping" in V4L2). If not, the ioctl returns the &EINVAL;.
-For more information on cropping and scaling see <xref
- linkend="crop" />.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_MPEG_DECODER</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>Applications can enumerate the supported image
-formats with the &VIDIOC-ENUM-FMT; ioctl to determine if the device
-supports MPEG streams.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_MPEG_ENCODER</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>See above.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_MJPEG_DECODER</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>See above.</entry>
- </row>
- <row>
- <entry><constant>VID_TYPE_MJPEG_ENCODER</constant></entry>
- <entry><constant>-</constant></entry>
- <entry>See above.</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>The <structfield>audios</structfield> field was replaced
-by <structfield>capabilities</structfield> flag
-<constant>V4L2_CAP_AUDIO</constant>, indicating
-<emphasis>if</emphasis> the device has any audio inputs or outputs. To
-determine their number applications can enumerate audio inputs with
-the &VIDIOC-G-AUDIO; ioctl. The audio ioctls are described in <xref
- linkend="audio" />.</para>
-
- <para>The <structfield>maxwidth</structfield>,
-<structfield>maxheight</structfield>,
-<structfield>minwidth</structfield> and
-<structfield>minheight</structfield> fields were removed. Calling the
-&VIDIOC-S-FMT; or &VIDIOC-TRY-FMT; ioctl with the desired dimensions
-returns the closest size possible, taking into account the current
-video standard, cropping and scaling limitations.</para>
- </section>
-
- <section>
- <title>Video Sources</title>
-
- <para>V4L provides the <constant>VIDIOCGCHAN</constant> and
-<constant>VIDIOCSCHAN</constant> ioctl using struct
-<structname>video_channel</structname> to enumerate
-the video inputs of a V4L device. The equivalent V4L2 ioctls
-are &VIDIOC-ENUMINPUT;, &VIDIOC-G-INPUT; and &VIDIOC-S-INPUT;
-using &v4l2-input; as discussed in <xref linkend="video" />.</para>
-
- <para>The <structfield>channel</structfield> field counting
-inputs was renamed to <structfield>index</structfield>, the video
-input types were renamed as follows: <informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>struct <structname>video_channel</structname>
-<structfield>type</structfield></entry>
- <entry>&v4l2-input;
-<structfield>type</structfield></entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>VIDEO_TYPE_TV</constant></entry>
- <entry><constant>V4L2_INPUT_TYPE_TUNER</constant></entry>
- </row>
- <row>
- <entry><constant>VIDEO_TYPE_CAMERA</constant></entry>
- <entry><constant>V4L2_INPUT_TYPE_CAMERA</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>Unlike the <structfield>tuners</structfield> field
-expressing the number of tuners of this input, V4L2 assumes each video
-input is connected to at most one tuner. However a tuner can have more
-than one input, &ie; RF connectors, and a device can have multiple
-tuners. The index number of the tuner associated with the input, if
-any, is stored in field <structfield>tuner</structfield> of
-&v4l2-input;. Enumeration of tuners is discussed in <xref
- linkend="tuner" />.</para>
-
- <para>The redundant <constant>VIDEO_VC_TUNER</constant> flag was
-dropped. Video inputs associated with a tuner are of type
-<constant>V4L2_INPUT_TYPE_TUNER</constant>. The
-<constant>VIDEO_VC_AUDIO</constant> flag was replaced by the
-<structfield>audioset</structfield> field. V4L2 considers devices with
-up to 32 audio inputs. Each set bit in the
-<structfield>audioset</structfield> field represents one audio input
-this video input combines with. For information about audio inputs and
-how to switch between them see <xref linkend="audio" />.</para>
-
- <para>The <structfield>norm</structfield> field describing the
-supported video standards was replaced by
-<structfield>std</structfield>. The V4L specification mentions a flag
-<constant>VIDEO_VC_NORM</constant> indicating whether the standard can
-be changed. This flag was a later addition together with the
-<structfield>norm</structfield> field and has been removed in the
-meantime. V4L2 has a similar, albeit more comprehensive approach
-to video standards, see <xref linkend="standard" /> for more
-information.</para>
- </section>
-
- <section>
- <title>Tuning</title>
-
- <para>The V4L <constant>VIDIOCGTUNER</constant> and
-<constant>VIDIOCSTUNER</constant> ioctl and struct
-<structname>video_tuner</structname> can be used to enumerate the
-tuners of a V4L TV or radio device. The equivalent V4L2 ioctls are
-&VIDIOC-G-TUNER; and &VIDIOC-S-TUNER; using &v4l2-tuner;. Tuners are
-covered in <xref linkend="tuner" />.</para>
-
- <para>The <structfield>tuner</structfield> field counting tuners
-was renamed to <structfield>index</structfield>. The fields
-<structfield>name</structfield>, <structfield>rangelow</structfield>
-and <structfield>rangehigh</structfield> remained unchanged.</para>
-
- <para>The <constant>VIDEO_TUNER_PAL</constant>,
-<constant>VIDEO_TUNER_NTSC</constant> and
-<constant>VIDEO_TUNER_SECAM</constant> flags indicating the supported
-video standards were dropped. This information is now contained in the
-associated &v4l2-input;. No replacement exists for the
-<constant>VIDEO_TUNER_NORM</constant> flag indicating whether the
-video standard can be switched. The <structfield>mode</structfield>
-field to select a different video standard was replaced by a whole new
-set of ioctls and structures described in <xref linkend="standard" />.
-Due to its ubiquity it should be mentioned the BTTV driver supports
-several standards in addition to the regular
-<constant>VIDEO_MODE_PAL</constant> (0),
-<constant>VIDEO_MODE_NTSC</constant>,
-<constant>VIDEO_MODE_SECAM</constant> and
-<constant>VIDEO_MODE_AUTO</constant> (3). Namely N/PAL Argentina,
-M/PAL, N/PAL, and NTSC Japan with numbers 3-6 (sic).</para>
-
- <para>The <constant>VIDEO_TUNER_STEREO_ON</constant> flag
-indicating stereo reception became
-<constant>V4L2_TUNER_SUB_STEREO</constant> in field
-<structfield>rxsubchans</structfield>. This field also permits the
-detection of monaural and bilingual audio, see the definition of
-&v4l2-tuner; for details. Presently no replacement exists for the
-<constant>VIDEO_TUNER_RDS_ON</constant> and
-<constant>VIDEO_TUNER_MBS_ON</constant> flags.</para>
-
- <para> The <constant>VIDEO_TUNER_LOW</constant> flag was renamed
-to <constant>V4L2_TUNER_CAP_LOW</constant> in the &v4l2-tuner;
-<structfield>capability</structfield> field.</para>
-
- <para>The <constant>VIDIOCGFREQ</constant> and
-<constant>VIDIOCSFREQ</constant> ioctl to change the tuner frequency
-where renamed to &VIDIOC-G-FREQUENCY; and &VIDIOC-S-FREQUENCY;. They
-take a pointer to a &v4l2-frequency; instead of an unsigned long
-integer.</para>
- </section>
-
- <section id="v4l-image-properties">
- <title>Image Properties</title>
-
- <para>V4L2 has no equivalent of the
-<constant>VIDIOCGPICT</constant> and <constant>VIDIOCSPICT</constant>
-ioctl and struct <structname>video_picture</structname>. The following
-fields where replaced by V4L2 controls accessible with the
-&VIDIOC-QUERYCTRL;, &VIDIOC-G-CTRL; and &VIDIOC-S-CTRL; ioctls:<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>struct <structname>video_picture</structname></entry>
- <entry>V4L2 Control ID</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><structfield>brightness</structfield></entry>
- <entry><constant>V4L2_CID_BRIGHTNESS</constant></entry>
- </row>
- <row>
- <entry><structfield>hue</structfield></entry>
- <entry><constant>V4L2_CID_HUE</constant></entry>
- </row>
- <row>
- <entry><structfield>colour</structfield></entry>
- <entry><constant>V4L2_CID_SATURATION</constant></entry>
- </row>
- <row>
- <entry><structfield>contrast</structfield></entry>
- <entry><constant>V4L2_CID_CONTRAST</constant></entry>
- </row>
- <row>
- <entry><structfield>whiteness</structfield></entry>
- <entry><constant>V4L2_CID_WHITENESS</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>The V4L picture controls are assumed to range from 0 to
-65535 with no particular reset value. The V4L2 API permits arbitrary
-limits and defaults which can be queried with the &VIDIOC-QUERYCTRL;
-ioctl. For general information about controls see <xref
-linkend="control" />.</para>
-
- <para>The <structfield>depth</structfield> (average number of
-bits per pixel) of a video image is implied by the selected image
-format. V4L2 does not explicitly provide such information assuming
-applications recognizing the format are aware of the image depth and
-others need not know. The <structfield>palette</structfield> field
-moved into the &v4l2-pix-format;:<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>struct <structname>video_picture</structname>
-<structfield>palette</structfield></entry>
- <entry>&v4l2-pix-format;
-<structfield>pixfmt</structfield></entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>VIDEO_PALETTE_GREY</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-GREY"><constant>V4L2_PIX_FMT_GREY</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_HI240</constant></entry>
- <entry><para><link
-linkend="pixfmt-reserved"><constant>V4L2_PIX_FMT_HI240</constant></link><footnote>
- <para>This is a custom format used by the BTTV
-driver, not one of the V4L2 standard formats.</para>
- </footnote></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_RGB565</constant></entry>
- <entry><para><link
-linkend="pixfmt-rgb"><constant>V4L2_PIX_FMT_RGB565</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_RGB555</constant></entry>
- <entry><para><link
-linkend="pixfmt-rgb"><constant>V4L2_PIX_FMT_RGB555</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_RGB24</constant></entry>
- <entry><para><link
-linkend="pixfmt-rgb"><constant>V4L2_PIX_FMT_BGR24</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_RGB32</constant></entry>
- <entry><para><link
-linkend="pixfmt-rgb"><constant>V4L2_PIX_FMT_BGR32</constant></link><footnote>
- <para>Presumably all V4L RGB formats are
-little-endian, although some drivers might interpret them according to machine endianness. V4L2 defines little-endian, big-endian and red/blue
-swapped variants. For details see <xref linkend="pixfmt-rgb" />.</para>
- </footnote></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV422</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YUYV"><constant>V4L2_PIX_FMT_YUYV</constant></link></para></entry>
- </row>
- <row>
- <entry><para><constant>VIDEO_PALETTE_YUYV</constant><footnote>
- <para><constant>VIDEO_PALETTE_YUV422</constant>
-and <constant>VIDEO_PALETTE_YUYV</constant> are the same formats. Some
-V4L drivers respond to one, some to the other.</para>
- </footnote></para></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YUYV"><constant>V4L2_PIX_FMT_YUYV</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_UYVY</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-UYVY"><constant>V4L2_PIX_FMT_UYVY</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV420</constant></entry>
- <entry>None</entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV411</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-Y41P"><constant>V4L2_PIX_FMT_Y41P</constant></link><footnote>
- <para>Not to be confused with
-<constant>V4L2_PIX_FMT_YUV411P</constant>, which is a planar
-format.</para> </footnote></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_RAW</constant></entry>
- <entry><para>None<footnote> <para>V4L explains this
-as: "RAW capture (BT848)"</para> </footnote></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV422P</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YUV422P"><constant>V4L2_PIX_FMT_YUV422P</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV411P</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YUV411P"><constant>V4L2_PIX_FMT_YUV411P</constant></link><footnote>
- <para>Not to be confused with
-<constant>V4L2_PIX_FMT_Y41P</constant>, which is a packed
-format.</para> </footnote></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV420P</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YVU420"><constant>V4L2_PIX_FMT_YVU420</constant></link></para></entry>
- </row>
- <row>
- <entry><constant>VIDEO_PALETTE_YUV410P</constant></entry>
- <entry><para><link
-linkend="V4L2-PIX-FMT-YVU410"><constant>V4L2_PIX_FMT_YVU410</constant></link></para></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>V4L2 image formats are defined in <xref
-linkend="pixfmt" />. The image format can be selected with the
-&VIDIOC-S-FMT; ioctl.</para>
- </section>
-
- <section>
- <title>Audio</title>
-
- <para>The <constant>VIDIOCGAUDIO</constant> and
-<constant>VIDIOCSAUDIO</constant> ioctl and struct
-<structname>video_audio</structname> are used to enumerate the
-audio inputs of a V4L device. The equivalent V4L2 ioctls are
-&VIDIOC-G-AUDIO; and &VIDIOC-S-AUDIO; using &v4l2-audio; as
-discussed in <xref linkend="audio" />.</para>
-
- <para>The <structfield>audio</structfield> "channel number"
-field counting audio inputs was renamed to
-<structfield>index</structfield>.</para>
-
- <para>On <constant>VIDIOCSAUDIO</constant> the
-<structfield>mode</structfield> field selects <emphasis>one</emphasis>
-of the <constant>VIDEO_SOUND_MONO</constant>,
-<constant>VIDEO_SOUND_STEREO</constant>,
-<constant>VIDEO_SOUND_LANG1</constant> or
-<constant>VIDEO_SOUND_LANG2</constant> audio demodulation modes. When
-the current audio standard is BTSC
-<constant>VIDEO_SOUND_LANG2</constant> refers to SAP and
-<constant>VIDEO_SOUND_LANG1</constant> is meaningless. Also
-undocumented in the V4L specification, there is no way to query the
-selected mode. On <constant>VIDIOCGAUDIO</constant> the driver returns
-the <emphasis>actually received</emphasis> audio programmes in this
-field. In the V4L2 API this information is stored in the &v4l2-tuner;
-<structfield>rxsubchans</structfield> and
-<structfield>audmode</structfield> fields, respectively. See <xref
-linkend="tuner" /> for more information on tuners. Related to audio
-modes &v4l2-audio; also reports if this is a mono or stereo
-input, regardless if the source is a tuner.</para>
-
- <para>The following fields where replaced by V4L2 controls
-accessible with the &VIDIOC-QUERYCTRL;, &VIDIOC-G-CTRL; and
-&VIDIOC-S-CTRL; ioctls:<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>struct
-<structname>video_audio</structname></entry>
- <entry>V4L2 Control ID</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><structfield>volume</structfield></entry>
- <entry><constant>V4L2_CID_AUDIO_VOLUME</constant></entry>
- </row>
- <row>
- <entry><structfield>bass</structfield></entry>
- <entry><constant>V4L2_CID_AUDIO_BASS</constant></entry>
- </row>
- <row>
- <entry><structfield>treble</structfield></entry>
- <entry><constant>V4L2_CID_AUDIO_TREBLE</constant></entry>
- </row>
- <row>
- <entry><structfield>balance</structfield></entry>
- <entry><constant>V4L2_CID_AUDIO_BALANCE</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>To determine which of these controls are supported by a
-driver V4L provides the <structfield>flags</structfield>
-<constant>VIDEO_AUDIO_VOLUME</constant>,
-<constant>VIDEO_AUDIO_BASS</constant>,
-<constant>VIDEO_AUDIO_TREBLE</constant> and
-<constant>VIDEO_AUDIO_BALANCE</constant>. In the V4L2 API the
-&VIDIOC-QUERYCTRL; ioctl reports if the respective control is
-supported. Accordingly the <constant>VIDEO_AUDIO_MUTABLE</constant>
-and <constant>VIDEO_AUDIO_MUTE</constant> flags where replaced by the
-boolean <constant>V4L2_CID_AUDIO_MUTE</constant> control.</para>
-
- <para>All V4L2 controls have a <structfield>step</structfield>
-attribute replacing the struct <structname>video_audio</structname>
-<structfield>step</structfield> field. The V4L audio controls are
-assumed to range from 0 to 65535 with no particular reset value. The
-V4L2 API permits arbitrary limits and defaults which can be queried
-with the &VIDIOC-QUERYCTRL; ioctl. For general information about
-controls see <xref linkend="control" />.</para>
- </section>
-
- <section>
- <title>Frame Buffer Overlay</title>
-
- <para>The V4L2 ioctls equivalent to
-<constant>VIDIOCGFBUF</constant> and <constant>VIDIOCSFBUF</constant>
-are &VIDIOC-G-FBUF; and &VIDIOC-S-FBUF;. The
-<structfield>base</structfield> field of struct
-<structname>video_buffer</structname> remained unchanged, except V4L2
-defines a flag to indicate non-destructive overlays instead of a
-<constant>NULL</constant> pointer. All other fields moved into the
-&v4l2-pix-format; <structfield>fmt</structfield> substructure of
-&v4l2-framebuffer;. The <structfield>depth</structfield> field was
-replaced by <structfield>pixelformat</structfield>. See <xref
- linkend="pixfmt-rgb" /> for a list of RGB formats and their
-respective color depths.</para>
-
- <para>Instead of the special ioctls
-<constant>VIDIOCGWIN</constant> and <constant>VIDIOCSWIN</constant>
-V4L2 uses the general-purpose data format negotiation ioctls
-&VIDIOC-G-FMT; and &VIDIOC-S-FMT;. They take a pointer to a
-&v4l2-format; as argument. Here the <structfield>win</structfield>
-member of the <structfield>fmt</structfield> union is used, a
-&v4l2-window;.</para>
-
- <para>The <structfield>x</structfield>,
-<structfield>y</structfield>, <structfield>width</structfield> and
-<structfield>height</structfield> fields of struct
-<structname>video_window</structname> moved into &v4l2-rect;
-substructure <structfield>w</structfield> of struct
-<structname>v4l2_window</structname>. The
-<structfield>chromakey</structfield>,
-<structfield>clips</structfield>, and
-<structfield>clipcount</structfield> fields remained unchanged. Struct
-<structname>video_clip</structname> was renamed to &v4l2-clip;, also
-containing a struct <structname>v4l2_rect</structname>, but the
-semantics are still the same.</para>
-
- <para>The <constant>VIDEO_WINDOW_INTERLACE</constant> flag was
-dropped. Instead applications must set the
-<structfield>field</structfield> field to
-<constant>V4L2_FIELD_ANY</constant> or
-<constant>V4L2_FIELD_INTERLACED</constant>. The
-<constant>VIDEO_WINDOW_CHROMAKEY</constant> flag moved into
-&v4l2-framebuffer;, under the new name
-<constant>V4L2_FBUF_FLAG_CHROMAKEY</constant>.</para>
-
- <para>In V4L, storing a bitmap pointer in
-<structfield>clips</structfield> and setting
-<structfield>clipcount</structfield> to
-<constant>VIDEO_CLIP_BITMAP</constant> (-1) requests bitmap
-clipping, using a fixed size bitmap of 1024 &times; 625 bits. Struct
-<structname>v4l2_window</structname> has a separate
-<structfield>bitmap</structfield> pointer field for this purpose and
-the bitmap size is determined by <structfield>w.width</structfield> and
-<structfield>w.height</structfield>.</para>
-
- <para>The <constant>VIDIOCCAPTURE</constant> ioctl to enable or
-disable overlay was renamed to &VIDIOC-OVERLAY;.</para>
- </section>
-
- <section>
- <title>Cropping</title>
-
- <para>To capture only a subsection of the full picture V4L
-defines the <constant>VIDIOCGCAPTURE</constant> and
-<constant>VIDIOCSCAPTURE</constant> ioctls using struct
-<structname>video_capture</structname>. The equivalent V4L2 ioctls are
-&VIDIOC-G-CROP; and &VIDIOC-S-CROP; using &v4l2-crop;, and the related
-&VIDIOC-CROPCAP; ioctl. This is a rather complex matter, see
-<xref linkend="crop" /> for details.</para>
-
- <para>The <structfield>x</structfield>,
-<structfield>y</structfield>, <structfield>width</structfield> and
-<structfield>height</structfield> fields moved into &v4l2-rect;
-substructure <structfield>c</structfield> of struct
-<structname>v4l2_crop</structname>. The
-<structfield>decimation</structfield> field was dropped. In the V4L2
-API the scaling factor is implied by the size of the cropping
-rectangle and the size of the captured or overlaid image.</para>
-
- <para>The <constant>VIDEO_CAPTURE_ODD</constant>
-and <constant>VIDEO_CAPTURE_EVEN</constant> flags to capture only the
-odd or even field, respectively, were replaced by
-<constant>V4L2_FIELD_TOP</constant> and
-<constant>V4L2_FIELD_BOTTOM</constant> in the field named
-<structfield>field</structfield> of &v4l2-pix-format; and
-&v4l2-window;. These structures are used to select a capture or
-overlay format with the &VIDIOC-S-FMT; ioctl.</para>
- </section>
-
- <section>
- <title>Reading Images, Memory Mapping</title>
-
- <section>
- <title>Capturing using the read method</title>
-
- <para>There is no essential difference between reading images
-from a V4L or V4L2 device using the &func-read; function, however V4L2
-drivers are not required to support this I/O method. Applications can
-determine if the function is available with the &VIDIOC-QUERYCAP;
-ioctl. All V4L2 devices exchanging data with applications must support
-the &func-select; and &func-poll; functions.</para>
-
- <para>To select an image format and size, V4L provides the
-<constant>VIDIOCSPICT</constant> and <constant>VIDIOCSWIN</constant>
-ioctls. V4L2 uses the general-purpose data format negotiation ioctls
-&VIDIOC-G-FMT; and &VIDIOC-S-FMT;. They take a pointer to a
-&v4l2-format; as argument, here the &v4l2-pix-format; named
-<structfield>pix</structfield> of its <structfield>fmt</structfield>
-union is used.</para>
-
- <para>For more information about the V4L2 read interface see
-<xref linkend="rw" />.</para>
- </section>
- <section>
- <title>Capturing using memory mapping</title>
-
- <para>Applications can read from V4L devices by mapping
-buffers in device memory, or more often just buffers allocated in
-DMA-able system memory, into their address space. This avoids the data
-copying overhead of the read method. V4L2 supports memory mapping as
-well, with a few differences.</para>
-
- <informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>V4L</entry>
- <entry>V4L2</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>The image format must be selected before
-buffers are allocated, with the &VIDIOC-S-FMT; ioctl. When no format
-is selected the driver may use the last, possibly by another
-application requested format.</entry>
- </row>
- <row>
- <entry><para>Applications cannot change the number of
-buffers. The it is built into the driver, unless it has a module
-option to change the number when the driver module is
-loaded.</para></entry>
- <entry><para>The &VIDIOC-REQBUFS; ioctl allocates the
-desired number of buffers, this is a required step in the initialization
-sequence.</para></entry>
- </row>
- <row>
- <entry><para>Drivers map all buffers as one contiguous
-range of memory. The <constant>VIDIOCGMBUF</constant> ioctl is
-available to query the number of buffers, the offset of each buffer
-from the start of the virtual file, and the overall amount of memory
-used, which can be used as arguments for the &func-mmap;
-function.</para></entry>
- <entry><para>Buffers are individually mapped. The
-offset and size of each buffer can be determined with the
-&VIDIOC-QUERYBUF; ioctl.</para></entry>
- </row>
- <row>
- <entry><para>The <constant>VIDIOCMCAPTURE</constant>
-ioctl prepares a buffer for capturing. It also determines the image
-format for this buffer. The ioctl returns immediately, eventually with
-an &EAGAIN; if no video signal had been detected. When the driver
-supports more than one buffer applications can call the ioctl multiple
-times and thus have multiple outstanding capture
-requests.</para><para>The <constant>VIDIOCSYNC</constant> ioctl
-suspends execution until a particular buffer has been
-filled.</para></entry>
- <entry><para>Drivers maintain an incoming and outgoing
-queue. &VIDIOC-QBUF; enqueues any empty buffer into the incoming
-queue. Filled buffers are dequeued from the outgoing queue with the
-&VIDIOC-DQBUF; ioctl. To wait until filled buffers become available this
-function, &func-select; or &func-poll; can be used. The
-&VIDIOC-STREAMON; ioctl must be called once after enqueuing one or
-more buffers to start capturing. Its counterpart
-&VIDIOC-STREAMOFF; stops capturing and dequeues all buffers from both
-queues. Applications can query the signal status, if known, with the
-&VIDIOC-ENUMINPUT; ioctl.</para></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
-
- <para>For a more in-depth discussion of memory mapping and
-examples, see <xref linkend="mmap" />.</para>
- </section>
- </section>
-
- <section>
- <title>Reading Raw VBI Data</title>
-
- <para>Originally the V4L API did not specify a raw VBI capture
-interface, only the device file <filename>/dev/vbi</filename> was
-reserved for this purpose. The only driver supporting this interface
-was the BTTV driver, de-facto defining the V4L VBI interface. Reading
-from the device yields a raw VBI image with the following
-parameters:<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>&v4l2-vbi-format;</entry>
- <entry>V4L, BTTV driver</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>sampling_rate</entry>
- <entry>28636363&nbsp;Hz NTSC (or any other 525-line
-standard); 35468950&nbsp;Hz PAL and SECAM (625-line standards)</entry>
- </row>
- <row>
- <entry>offset</entry>
- <entry>?</entry>
- </row>
- <row>
- <entry>samples_per_line</entry>
- <entry>2048</entry>
- </row>
- <row>
- <entry>sample_format</entry>
- <entry>V4L2_PIX_FMT_GREY. The last four bytes (a
-machine endianness integer) contain a frame counter.</entry>
- </row>
- <row>
- <entry>start[]</entry>
- <entry>10, 273 NTSC; 22, 335 PAL and SECAM</entry>
- </row>
- <row>
- <entry>count[]</entry>
- <entry><para>16, 16<footnote><para>Old driver
-versions used different values, eventually the custom
-<constant>BTTV_VBISIZE</constant> ioctl was added to query the
-correct values.</para></footnote></para></entry>
- </row>
- <row>
- <entry>flags</entry>
- <entry>0</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>Undocumented in the V4L specification, in Linux 2.3 the
-<constant>VIDIOCGVBIFMT</constant> and
-<constant>VIDIOCSVBIFMT</constant> ioctls using struct
-<structname>vbi_format</structname> were added to determine the VBI
-image parameters. These ioctls are only partially compatible with the
-V4L2 VBI interface specified in <xref linkend="raw-vbi" />.</para>
-
- <para>An <structfield>offset</structfield> field does not
-exist, <structfield>sample_format</structfield> is supposed to be
-<constant>VIDEO_PALETTE_RAW</constant>, equivalent to
-<constant>V4L2_PIX_FMT_GREY</constant>. The remaining fields are
-probably equivalent to &v4l2-vbi-format;.</para>
-
- <para>Apparently only the Zoran (ZR 36120) driver implements
-these ioctls. The semantics differ from those specified for V4L2 in two
-ways. The parameters are reset on &func-open; and
-<constant>VIDIOCSVBIFMT</constant> always returns an &EINVAL; if the
-parameters are invalid.</para>
- </section>
-
- <section>
- <title>Miscellaneous</title>
-
- <para>V4L2 has no equivalent of the
-<constant>VIDIOCGUNIT</constant> ioctl. Applications can find the VBI
-device associated with a video capture device (or vice versa) by
-reopening the device and requesting VBI data. For details see
-<xref linkend="open" />.</para>
-
- <para>No replacement exists for <constant>VIDIOCKEY</constant>,
-and the V4L functions for microcode programming. A new interface for
-MPEG compression and playback devices is documented in <xref
- linkend="extended-controls" />.</para>
- </section>
-
- </section>
-
- <section id="hist-v4l2">
- <title>Changes of the V4L2 API</title>
-
- <para>Soon after the V4L API was added to the kernel it was
-criticised as too inflexible. In August 1998 Bill Dirks proposed a
-number of improvements and began to work on documentation, example
-drivers and applications. With the help of other volunteers this
-eventually became the V4L2 API, not just an extension but a
-replacement for the V4L API. However it took another four years and
-two stable kernel releases until the new API was finally accepted for
-inclusion into the kernel in its present form.</para>
-
- <section>
- <title>Early Versions</title>
- <para>1998-08-20: First version.</para>
-
- <para>1998-08-27: The &func-select; function was introduced.</para>
-
- <para>1998-09-10: New video standard interface.</para>
-
- <para>1998-09-18: The <constant>VIDIOC_NONCAP</constant> ioctl
-was replaced by the otherwise meaningless <constant>O_TRUNC</constant>
-&func-open; flag, and the aliases <constant>O_NONCAP</constant> and
-<constant>O_NOIO</constant> were defined. Applications can set this
-flag if they intend to access controls only, as opposed to capture
-applications which need exclusive access. The
-<constant>VIDEO_STD_XXX</constant> identifiers are now ordinals
-instead of flags, and the <function>video_std_construct()</function>
-helper function takes id and transmission arguments.</para>
-
- <para>1998-09-28: Revamped video standard. Made video controls
-individually enumerable.</para>
-
- <para>1998-10-02: The <structfield>id</structfield> field was
-removed from struct <structname>video_standard</structname> and the
-color subcarrier fields were renamed. The &VIDIOC-QUERYSTD; ioctl was
-renamed to &VIDIOC-ENUMSTD;, &VIDIOC-G-INPUT; to &VIDIOC-ENUMINPUT;. A
-first draft of the Codec API was released.</para>
-
- <para>1998-11-08: Many minor changes. Most symbols have been
-renamed. Some material changes to &v4l2-capability;.</para>
-
- <para>1998-11-12: The read/write directon of some ioctls was misdefined.</para>
-
- <para>1998-11-14: <constant>V4L2_PIX_FMT_RGB24</constant>
-changed to <constant>V4L2_PIX_FMT_BGR24</constant>, and
-<constant>V4L2_PIX_FMT_RGB32</constant> changed to
-<constant>V4L2_PIX_FMT_BGR32</constant>. Audio controls are now
-accessible with the &VIDIOC-G-CTRL; and &VIDIOC-S-CTRL; ioctls under
-names starting with <constant>V4L2_CID_AUDIO</constant>. The
-<constant>V4L2_MAJOR</constant> define was removed from
-<filename>videodev.h</filename> since it was only used once in the
-<filename>videodev</filename> kernel module. The
-<constant>YUV422</constant> and <constant>YUV411</constant> planar
-image formats were added.</para>
-
- <para>1998-11-28: A few ioctl symbols changed. Interfaces for codecs and
-video output devices were added.</para>
-
- <para>1999-01-14: A raw VBI capture interface was added.</para>
-
- <para>1999-01-19: The <constant>VIDIOC_NEXTBUF</constant> ioctl
- was removed.</para>
- </section>
-
- <section>
- <title>V4L2 Version 0.16 1999-01-31</title>
- <para>1999-01-27: There is now one QBUF ioctl, VIDIOC_QWBUF and VIDIOC_QRBUF
-are gone. VIDIOC_QBUF takes a v4l2_buffer as a parameter. Added
-digital zoom (cropping) controls.</para>
- </section>
-
- <!-- Where's 0.17? mhs couldn't find that videodev.h, perhaps Bill
- forgot to bump the version number or never released it. -->
-
- <section>
- <title>V4L2 Version 0.18 1999-03-16</title>
- <para>Added a v4l to V4L2 ioctl compatibility layer to
-videodev.c. Driver writers, this changes how you implement your ioctl
-handler. See the Driver Writer's Guide. Added some more control id
-codes.</para>
- </section>
-
- <section>
- <title>V4L2 Version 0.19 1999-06-05</title>
- <para>1999-03-18: Fill in the category and catname fields of
-v4l2_queryctrl objects before passing them to the driver. Required a
-minor change to the VIDIOC_QUERYCTRL handlers in the sample
-drivers.</para>
- <para>1999-03-31: Better compatibility for v4l memory capture
-ioctls. Requires changes to drivers to fully support new compatibility
-features, see Driver Writer's Guide and v4l2cap.c. Added new control
-IDs: V4L2_CID_HFLIP, _VFLIP. Changed V4L2_PIX_FMT_YUV422P to _YUV422P,
-and _YUV411P to _YUV411P.</para>
- <para>1999-04-04: Added a few more control IDs.</para>
- <para>1999-04-07: Added the button control type.</para>
- <para>1999-05-02: Fixed a typo in videodev.h, and added the
-V4L2_CTRL_FLAG_GRAYED (later V4L2_CTRL_FLAG_GRABBED) flag.</para>
- <para>1999-05-20: Definition of VIDIOC_G_CTRL was wrong causing
-a malfunction of this ioctl.</para>
- <para>1999-06-05: Changed the value of
-V4L2_CID_WHITENESS.</para>
- </section>
-
- <section>
- <title>V4L2 Version 0.20 (1999-09-10)</title>
-
- <para>Version 0.20 introduced a number of changes which were
-<emphasis>not backward compatible</emphasis> with 0.19 and earlier
-versions. Purpose of these changes was to simplify the API, while
-making it more extensible and following common Linux driver API
-conventions.</para>
-
- <orderedlist>
- <listitem>
- <para>Some typos in <constant>V4L2_FMT_FLAG</constant>
-symbols were fixed. &v4l2-clip; was changed for compatibility with
-v4l. (1999-08-30)</para>
- </listitem>
-
- <listitem>
- <para><constant>V4L2_TUNER_SUB_LANG1</constant> was added.
-(1999-09-05)</para>
- </listitem>
-
- <listitem>
- <para>All ioctl() commands that used an integer argument now
-take a pointer to an integer. Where it makes sense, ioctls will return
-the actual new value in the integer pointed to by the argument, a
-common convention in the V4L2 API. The affected ioctls are:
-VIDIOC_PREVIEW, VIDIOC_STREAMON, VIDIOC_STREAMOFF, VIDIOC_S_FREQ,
-VIDIOC_S_INPUT, VIDIOC_S_OUTPUT, VIDIOC_S_EFFECT. For example
-<programlisting>
-err = ioctl (fd, VIDIOC_XXX, V4L2_XXX);
-</programlisting> becomes <programlisting>
-int a = V4L2_XXX; err = ioctl(fd, VIDIOC_XXX, &amp;a);
-</programlisting>
- </para>
- </listitem>
-
- <listitem>
- <para>All the different get- and set-format commands were
-swept into one &VIDIOC-G-FMT; and &VIDIOC-S-FMT; ioctl taking a union
-and a type field selecting the union member as parameter. Purpose is to
-simplify the API by eliminating several ioctls and to allow new and
-driver private data streams without adding new ioctls.</para>
-
- <para>This change obsoletes the following ioctls:
-<constant>VIDIOC_S_INFMT</constant>,
-<constant>VIDIOC_G_INFMT</constant>,
-<constant>VIDIOC_S_OUTFMT</constant>,
-<constant>VIDIOC_G_OUTFMT</constant>,
-<constant>VIDIOC_S_VBIFMT</constant> and
-<constant>VIDIOC_G_VBIFMT</constant>. The image format structure
-<structname>v4l2_format</structname> was renamed to &v4l2-pix-format;,
-while &v4l2-format; is now the envelopping structure for all format
-negotiations.</para>
- </listitem>
-
- <listitem>
- <para>Similar to the changes above, the
-<constant>VIDIOC_G_PARM</constant> and
-<constant>VIDIOC_S_PARM</constant> ioctls were merged with
-<constant>VIDIOC_G_OUTPARM</constant> and
-<constant>VIDIOC_S_OUTPARM</constant>. A
-<structfield>type</structfield> field in the new &v4l2-streamparm;
-selects the respective union member.</para>
-
- <para>This change obsoletes the
-<constant>VIDIOC_G_OUTPARM</constant> and
-<constant>VIDIOC_S_OUTPARM</constant> ioctls.</para>
- </listitem>
-
- <listitem>
- <para>Control enumeration was simplified, and two new
-control flags were introduced and one dropped. The
-<structfield>catname</structfield> field was replaced by a
-<structfield>group</structfield> field.</para>
-
- <para>Drivers can now flag unsupported and temporarily
-unavailable controls with <constant>V4L2_CTRL_FLAG_DISABLED</constant>
-and <constant>V4L2_CTRL_FLAG_GRABBED</constant> respectively. The
-<structfield>group</structfield> name indicates a possibly narrower
-classification than the <structfield>category</structfield>. In other
-words, there may be multiple groups within a category. Controls within
-a group would typically be drawn within a group box. Controls in
-different categories might have a greater separation, or may even
-appear in separate windows.</para>
- </listitem>
-
- <listitem>
- <para>The &v4l2-buffer; <structfield>timestamp</structfield>
-was changed to a 64 bit integer, containing the sampling or output
-time of the frame in nanoseconds. Additionally timestamps will be in
-absolute system time, not starting from zero at the beginning of a
-stream. The data type name for timestamps is stamp_t, defined as a
-signed 64-bit integer. Output devices should not send a buffer out
-until the time in the timestamp field has arrived. I would like to
-follow SGI's lead, and adopt a multimedia timestamping system like
-their UST (Unadjusted System Time). See
-http://web.archive.org/web/*/http://reality.sgi.com
-/cpirazzi_engr/lg/time/intro.html.
-UST uses timestamps that are 64-bit signed integers
-(not struct timeval's) and given in nanosecond units. The UST clock
-starts at zero when the system is booted and runs continuously and
-uniformly. It takes a little over 292 years for UST to overflow. There
-is no way to set the UST clock. The regular Linux time-of-day clock
-can be changed periodically, which would cause errors if it were being
-used for timestamping a multimedia stream. A real UST style clock will
-require some support in the kernel that is not there yet. But in
-anticipation, I will change the timestamp field to a 64-bit integer,
-and I will change the v4l2_masterclock_gettime() function (used only
-by drivers) to return a 64-bit integer.</para>
- </listitem>
-
- <listitem>
- <para>A <structfield>sequence</structfield> field was added
-to &v4l2-buffer;. The <structfield>sequence</structfield> field counts
-captured frames, it is ignored by output devices. When a capture
-driver drops a frame, the sequence number of that frame is
-skipped.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 Version 0.20 incremental changes</title>
- <!-- Version number didn't change anymore, reason unknown. -->
-
- <para>1999-12-23: In &v4l2-vbi-format; the
-<structfield>reserved1</structfield> field became
-<structfield>offset</structfield>. Previously drivers were required to
-clear the <structfield>reserved1</structfield> field.</para>
-
- <para>2000-01-13: The
- <constant>V4L2_FMT_FLAG_NOT_INTERLACED</constant> flag was added.</para>
-
- <para>2000-07-31: The <filename>linux/poll.h</filename> header
-is now included by <filename>videodev.h</filename> for compatibility
-with the original <filename>videodev.h</filename> file.</para>
-
- <para>2000-11-20: <constant>V4L2_TYPE_VBI_OUTPUT</constant> and
-<constant>V4L2_PIX_FMT_Y41P</constant> were added.</para>
-
- <para>2000-11-25: <constant>V4L2_TYPE_VBI_INPUT</constant> was
-added.</para>
-
- <para>2000-12-04: A couple typos in symbol names were fixed.</para>
-
- <para>2001-01-18: To avoid namespace conflicts the
-<constant>fourcc</constant> macro defined in the
-<filename>videodev.h</filename> header file was renamed to
-<constant>v4l2_fourcc</constant>.</para>
-
- <para>2001-01-25: A possible driver-level compatibility problem
-between the <filename>videodev.h</filename> file in Linux 2.4.0 and
-the <filename>videodev.h</filename> file included in the
-<filename>videodevX</filename> patch was fixed. Users of an earlier
-version of <filename>videodevX</filename> on Linux 2.4.0 should
-recompile their V4L and V4L2 drivers.</para>
-
- <para>2001-01-26: A possible kernel-level incompatibility
-between the <filename>videodev.h</filename> file in the
-<filename>videodevX</filename> patch and the
-<filename>videodev.h</filename> file in Linux 2.2.x with devfs patches
-applied was fixed.</para>
-
- <para>2001-03-02: Certain V4L ioctls which pass data in both
-direction although they are defined with read-only parameter, did not
-work correctly through the backward compatibility layer.
-[Solution?]</para>
-
- <para>2001-04-13: Big endian 16-bit RGB formats were added.</para>
-
- <para>2001-09-17: New YUV formats and the &VIDIOC-G-FREQUENCY; and
-&VIDIOC-S-FREQUENCY; ioctls were added. (The old
-<constant>VIDIOC_G_FREQ</constant> and
-<constant>VIDIOC_S_FREQ</constant> ioctls did not take multiple tuners
-into account.)</para>
-
- <para>2000-09-18: <constant>V4L2_BUF_TYPE_VBI</constant> was
-added. This may <emphasis>break compatibility</emphasis> as the
-&VIDIOC-G-FMT; and &VIDIOC-S-FMT; ioctls may fail now if the struct
-<structname>v4l2_fmt</structname> <structfield>type</structfield>
-field does not contain <constant>V4L2_BUF_TYPE_VBI</constant>. In the
-documentation of the &v4l2-vbi-format;
-<structfield>offset</structfield> field the ambiguous phrase "rising
-edge" was changed to "leading edge".</para>
- </section>
-
- <section>
- <title>V4L2 Version 0.20 2000-11-23</title>
-
- <para>A number of changes were made to the raw VBI
-interface.</para>
-
- <orderedlist>
- <listitem>
- <para>Figures clarifying the line numbering scheme were
-added to the V4L2 API specification. The
-<structfield>start</structfield>[0] and
-<structfield>start</structfield>[1] fields no longer count line
-numbers beginning at zero. Rationale: a) The previous definition was
-unclear. b) The <structfield>start</structfield>[] values are ordinal
-numbers. c) There is no point in inventing a new line numbering
-scheme. We now use line number as defined by ITU-R, period.
-Compatibility: Add one to the start values. Applications depending on
-the previous semantics may not function correctly.</para>
- </listitem>
-
- <listitem>
- <para>The restriction "count[0] &gt; 0 and count[1] &gt; 0"
-has been relaxed to "(count[0] + count[1]) &gt; 0". Rationale:
-Drivers may allocate resources at scan line granularity and some data
-services are transmitted only on the first field. The comment that
-both <structfield>count</structfield> values will usually be equal is
-misleading and pointless and has been removed. This change
-<emphasis>breaks compatibility</emphasis> with earlier versions:
-Drivers may return EINVAL, applications may not function
-correctly.</para>
- </listitem>
-
- <listitem>
- <para>Drivers are again permitted to return negative
-(unknown) start values as proposed earlier. Why this feature was
-dropped is unclear. This change may <emphasis>break
-compatibility</emphasis> with applications depending on the start
-values being positive. The use of <constant>EBUSY</constant> and
-<constant>EINVAL</constant> error codes with the &VIDIOC-S-FMT; ioctl
-was clarified. The &EBUSY; was finally documented, and the
-<structfield>reserved2</structfield> field which was previously
-mentioned only in the <filename>videodev.h</filename> header
-file.</para>
- </listitem>
-
- <listitem>
- <para>New buffer types
-<constant>V4L2_TYPE_VBI_INPUT</constant> and
-<constant>V4L2_TYPE_VBI_OUTPUT</constant> were added. The former is an
-alias for the old <constant>V4L2_TYPE_VBI</constant>, the latter was
-missing in the <filename>videodev.h</filename> file.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 Version 0.20 2002-07-25</title>
- <para>Added sliced VBI interface proposal.</para>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.5.46, 2002-10</title>
-
- <para>Around October-November 2002, prior to an announced
-feature freeze of Linux 2.5, the API was revised, drawing from
-experience with V4L2 0.20. This unnamed version was finally merged
-into Linux 2.5.46.</para>
-
- <orderedlist>
- <listitem>
- <para>As specified in <xref linkend="related" />, drivers
-must make related device functions available under all minor device
-numbers.</para>
- </listitem>
-
- <listitem>
- <para>The &func-open; function requires access mode
-<constant>O_RDWR</constant> regardless of the device type. All V4L2
-drivers exchanging data with applications must support the
-<constant>O_NONBLOCK</constant> flag. The <constant>O_NOIO</constant>
-flag, a V4L2 symbol which aliased the meaningless
-<constant>O_TRUNC</constant> to indicate accesses without data
-exchange (panel applications) was dropped. Drivers must stay in "panel
-mode" until the application attempts to initiate a data exchange, see
-<xref linkend="open" />.</para>
- </listitem>
-
- <listitem>
- <para>The &v4l2-capability; changed dramatically. Note that
-also the size of the structure changed, which is encoded in the ioctl
-request code, thus older V4L2 devices will respond with an &EINVAL; to
-the new &VIDIOC-QUERYCAP; ioctl.</para>
-
- <para>There are new fields to identify the driver, a new RDS
-device function <constant>V4L2_CAP_RDS_CAPTURE</constant>, the
-<constant>V4L2_CAP_AUDIO</constant> flag indicates if the device has
-any audio connectors, another I/O capability
-<constant>V4L2_CAP_ASYNCIO</constant> can be flagged. In response to
-these changes the <structfield>type</structfield> field became a bit
-set and was merged into the <structfield>flags</structfield> field.
-<constant>V4L2_FLAG_TUNER</constant> was renamed to
-<constant>V4L2_CAP_TUNER</constant>,
-<constant>V4L2_CAP_VIDEO_OVERLAY</constant> replaced
-<constant>V4L2_FLAG_PREVIEW</constant> and
-<constant>V4L2_CAP_VBI_CAPTURE</constant> and
-<constant>V4L2_CAP_VBI_OUTPUT</constant> replaced
-<constant>V4L2_FLAG_DATA_SERVICE</constant>.
-<constant>V4L2_FLAG_READ</constant> and
-<constant>V4L2_FLAG_WRITE</constant> were merged into
-<constant>V4L2_CAP_READWRITE</constant>.</para>
-
- <para>The redundant fields
-<structfield>inputs</structfield>, <structfield>outputs</structfield>
-and <structfield>audios</structfield> were removed. These properties
-can be determined as described in <xref linkend="video" /> and <xref
-linkend="audio" />.</para>
-
- <para>The somewhat volatile and therefore barely useful
-fields <structfield>maxwidth</structfield>,
-<structfield>maxheight</structfield>,
-<structfield>minwidth</structfield>,
-<structfield>minheight</structfield>,
-<structfield>maxframerate</structfield> were removed. This information
-is available as described in <xref linkend="format" /> and
-<xref linkend="standard" />.</para>
-
- <para><constant>V4L2_FLAG_SELECT</constant> was removed. We
-believe the select() function is important enough to require support
-of it in all V4L2 drivers exchanging data with applications. The
-redundant <constant>V4L2_FLAG_MONOCHROME</constant> flag was removed,
-this information is available as described in <xref
- linkend="format" />.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-input; the
-<structfield>assoc_audio</structfield> field and the
-<structfield>capability</structfield> field and its only flag
-<constant>V4L2_INPUT_CAP_AUDIO</constant> was replaced by the new
-<structfield>audioset</structfield> field. Instead of linking one
-video input to one audio input this field reports all audio inputs
-this video input combines with.</para>
-
- <para>New fields are <structfield>tuner</structfield>
-(reversing the former link from tuners to video inputs),
-<structfield>std</structfield> and
-<structfield>status</structfield>.</para>
-
- <para>Accordingly &v4l2-output; lost its
-<structfield>capability</structfield> and
-<structfield>assoc_audio</structfield> fields.
-<structfield>audioset</structfield>,
-<structfield>modulator</structfield> and
-<structfield>std</structfield> where added instead.</para>
- </listitem>
-
- <listitem>
- <para>The &v4l2-audio; field
-<structfield>audio</structfield> was renamed to
-<structfield>index</structfield>, for consistency with other
-structures. A new capability flag
-<constant>V4L2_AUDCAP_STEREO</constant> was added to indicated if the
-audio input in question supports stereo sound.
-<constant>V4L2_AUDCAP_EFFECTS</constant> and the corresponding
-<constant>V4L2_AUDMODE</constant> flags where removed. This can be
-easily implemented using controls. (However the same applies to AVL
-which is still there.)</para>
-
- <para>Again for consistency the &v4l2-audioout; field
-<structfield>audio</structfield> was renamed to
-<structfield>index</structfield>.</para>
- </listitem>
-
- <listitem>
- <para>The &v4l2-tuner;
-<structfield>input</structfield> field was replaced by an
-<structfield>index</structfield> field, permitting devices with
-multiple tuners. The link between video inputs and tuners is now
-reversed, inputs point to their tuner. The
-<structfield>std</structfield> substructure became a
-simple set (more about this below) and moved into &v4l2-input;. A
-<structfield>type</structfield> field was added.</para>
-
- <para>Accordingly in &v4l2-modulator; the
-<structfield>output</structfield> was replaced by an
-<structfield>index</structfield> field.</para>
-
- <para>In &v4l2-frequency; the
-<structfield>port</structfield> field was replaced by a
-<structfield>tuner</structfield> field containing the respective tuner
-or modulator index number. A tuner <structfield>type</structfield>
-field was added and the <structfield>reserved</structfield> field
-became larger for future extensions (satellite tuners in
-particular).</para>
- </listitem>
-
- <listitem>
- <para>The idea of completely transparent video standards was
-dropped. Experience showed that applications must be able to work with
-video standards beyond presenting the user a menu. Instead of
-enumerating supported standards with an ioctl applications can now
-refer to standards by &v4l2-std-id; and symbols defined in the
-<filename>videodev2.h</filename> header file. For details see <xref
- linkend="standard" />. The &VIDIOC-G-STD; and
-&VIDIOC-S-STD; now take a pointer to this type as argument.
-&VIDIOC-QUERYSTD; was added to autodetect the received standard, if
-the hardware has this capability. In &v4l2-standard; an
-<structfield>index</structfield> field was added for &VIDIOC-ENUMSTD;.
-A &v4l2-std-id; field named <structfield>id</structfield> was added as
-machine readable identifier, also replacing the
-<structfield>transmission</structfield> field. The misleading
-<structfield>framerate</structfield> field was renamed
-to <structfield>frameperiod</structfield>. The now obsolete
-<structfield>colorstandard</structfield> information, originally
-needed to distguish between variations of standards, were
-removed.</para>
-
- <para>Struct <structname>v4l2_enumstd</structname> ceased to
-be. &VIDIOC-ENUMSTD; now takes a pointer to a &v4l2-standard;
-directly. The information which standards are supported by a
-particular video input or output moved into &v4l2-input; and
-&v4l2-output; fields named <structfield>std</structfield>,
-respectively.</para>
- </listitem>
-
- <listitem>
- <para>The &v4l2-queryctrl; fields
-<structfield>category</structfield> and
-<structfield>group</structfield> did not catch on and/or were not
-implemented as expected and therefore removed.</para>
- </listitem>
-
- <listitem>
- <para>The &VIDIOC-TRY-FMT; ioctl was added to negotiate data
-formats as with &VIDIOC-S-FMT;, but without the overhead of
-programming the hardware and regardless of I/O in progress.</para>
-
- <para>In &v4l2-format; the <structfield>fmt</structfield>
-union was extended to contain &v4l2-window;. All image format
-negotiations are now possible with <constant>VIDIOC_G_FMT</constant>,
-<constant>VIDIOC_S_FMT</constant> and
-<constant>VIDIOC_TRY_FMT</constant>; ioctl. The
-<constant>VIDIOC_G_WIN</constant> and
-<constant>VIDIOC_S_WIN</constant> ioctls to prepare for a video
-overlay were removed. The <structfield>type</structfield> field
-changed to type &v4l2-buf-type; and the buffer type names changed as
-follows.<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>Old defines</entry>
- <entry>&v4l2-buf-type;</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_BUF_TYPE_CAPTURE</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant></entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_CODECIN</constant></entry>
- <entry>Omitted for now</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_CODECOUT</constant></entry>
- <entry>Omitted for now</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_EFFECTSIN</constant></entry>
- <entry>Omitted for now</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_EFFECTSIN2</constant></entry>
- <entry>Omitted for now</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_EFFECTSOUT</constant></entry>
- <entry>Omitted for now</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEOOUT</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_VBI_CAPTURE</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_SLICED_VBI_CAPTURE</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_SLICED_VBI_OUTPUT</constant></entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_PRIVATE_BASE</constant></entry>
- <entry><constant>V4L2_BUF_TYPE_PRIVATE</constant> (but this is deprecated)</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-fmtdesc; a &v4l2-buf-type; field named
-<structfield>type</structfield> was added as in &v4l2-format;. The
-<constant>VIDIOC_ENUM_FBUFFMT</constant> ioctl is no longer needed and
-was removed. These calls can be replaced by &VIDIOC-ENUM-FMT; with
-type <constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-pix-format; the
-<structfield>depth</structfield> field was removed, assuming
-applications which recognize the format by its four-character-code
-already know the color depth, and others do not care about it. The
-same rationale lead to the removal of the
-<constant>V4L2_FMT_FLAG_COMPRESSED</constant> flag. The
-<constant>V4L2_FMT_FLAG_SWCONVECOMPRESSED</constant> flag was removed
-because drivers are not supposed to convert images in kernel space. A
-user library of conversion functions should be provided instead. The
-<constant>V4L2_FMT_FLAG_BYTESPERLINE</constant> flag was redundant.
-Applications can set the <structfield>bytesperline</structfield> field
-to zero to get a reasonable default. Since the remaining flags were
-replaced as well, the <structfield>flags</structfield> field itself
-was removed.</para>
- <para>The interlace flags were replaced by a &v4l2-field;
-value in a newly added <structfield>field</structfield>
-field.<informaltable>
- <tgroup cols="2">
- <thead>
- <row>
- <entry>Old flag</entry>
- <entry>&v4l2-field;</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FMT_FLAG_NOT_INTERLACED</constant></entry>
- <entry>?</entry>
- </row>
- <row>
- <entry><constant>V4L2_FMT_FLAG_INTERLACED</constant>
-= <constant>V4L2_FMT_FLAG_COMBINED</constant></entry>
- <entry><constant>V4L2_FIELD_INTERLACED</constant></entry>
- </row>
- <row>
- <entry><constant>V4L2_FMT_FLAG_TOPFIELD</constant>
-= <constant>V4L2_FMT_FLAG_ODDFIELD</constant></entry>
- <entry><constant>V4L2_FIELD_TOP</constant></entry>
- </row>
- <row>
- <entry><constant>V4L2_FMT_FLAG_BOTFIELD</constant>
-= <constant>V4L2_FMT_FLAG_EVENFIELD</constant></entry>
- <entry><constant>V4L2_FIELD_BOTTOM</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_FIELD_SEQ_TB</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_FIELD_SEQ_BT</constant></entry>
- </row>
- <row>
- <entry><constant>-</constant></entry>
- <entry><constant>V4L2_FIELD_ALTERNATE</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
-
- <para>The color space flags were replaced by a
-&v4l2-colorspace; value in a newly added
-<structfield>colorspace</structfield> field, where one of
-<constant>V4L2_COLORSPACE_SMPTE170M</constant>,
-<constant>V4L2_COLORSPACE_BT878</constant>,
-<constant>V4L2_COLORSPACE_470_SYSTEM_M</constant> or
-<constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant> replaces
-<constant>V4L2_FMT_CS_601YUV</constant>.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-requestbuffers; the
-<structfield>type</structfield> field was properly defined as
-&v4l2-buf-type;. Buffer types changed as mentioned above. A new
-<structfield>memory</structfield> field of type &v4l2-memory; was
-added to distinguish between I/O methods using buffers allocated
-by the driver or the application. See <xref linkend="io" /> for
-details.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-buffer; the <structfield>type</structfield>
-field was properly defined as &v4l2-buf-type;. Buffer types changed as
-mentioned above. A <structfield>field</structfield> field of type
-&v4l2-field; was added to indicate if a buffer contains a top or
-bottom field. The old field flags were removed. Since no unadjusted
-system time clock was added to the kernel as planned, the
-<structfield>timestamp</structfield> field changed back from type
-stamp_t, an unsigned 64 bit integer expressing the sample time in
-nanoseconds, to struct <structname>timeval</structname>. With the
-addition of a second memory mapping method the
-<structfield>offset</structfield> field moved into union
-<structfield>m</structfield>, and a new
-<structfield>memory</structfield> field of type &v4l2-memory; was
-added to distinguish between I/O methods. See <xref linkend="io" />
-for details.</para>
-
- <para>The <constant>V4L2_BUF_REQ_CONTIG</constant>
-flag was used by the V4L compatibility layer, after changes to this
-code it was no longer needed. The
-<constant>V4L2_BUF_ATTR_DEVICEMEM</constant> flag would indicate if
-the buffer was indeed allocated in device memory rather than DMA-able
-system memory. It was barely useful and so was removed.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-framebuffer; the
-<structfield>base[3]</structfield> array anticipating double- and
-triple-buffering in off-screen video memory, however without defining
-a synchronization mechanism, was replaced by a single pointer. The
-<constant>V4L2_FBUF_CAP_SCALEUP</constant> and
-<constant>V4L2_FBUF_CAP_SCALEDOWN</constant> flags were removed.
-Applications can determine this capability more accurately using the
-new cropping and scaling interface. The
-<constant>V4L2_FBUF_CAP_CLIPPING</constant> flag was replaced by
-<constant>V4L2_FBUF_CAP_LIST_CLIPPING</constant> and
-<constant>V4L2_FBUF_CAP_BITMAP_CLIPPING</constant>.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-clip; the <structfield>x</structfield>,
-<structfield>y</structfield>, <structfield>width</structfield> and
-<structfield>height</structfield> field moved into a
-<structfield>c</structfield> substructure of type &v4l2-rect;. The
-<structfield>x</structfield> and <structfield>y</structfield> fields
-were renamed to <structfield>left</structfield> and
-<structfield>top</structfield>, &ie; offsets to a context dependent
-origin.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-window; the <structfield>x</structfield>,
-<structfield>y</structfield>, <structfield>width</structfield> and
-<structfield>height</structfield> field moved into a
-<structfield>w</structfield> substructure as above. A
-<structfield>field</structfield> field of type %v4l2-field; was added
-to distinguish between field and frame (interlaced) overlay.</para>
- </listitem>
-
- <listitem>
- <para>The digital zoom interface, including struct
-<structname>v4l2_zoomcap</structname>, struct
-<structname>v4l2_zoom</structname>,
-<constant>V4L2_ZOOM_NONCAP</constant> and
-<constant>V4L2_ZOOM_WHILESTREAMING</constant> was replaced by a new
-cropping and scaling interface. The previously unused struct
-<structname>v4l2_cropcap</structname> and
-<structname>v4l2_crop</structname> where redefined for this purpose.
-See <xref linkend="crop" /> for details.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-vbi-format; the
-<structfield>SAMPLE_FORMAT</structfield> field now contains a
-four-character-code as used to identify video image formats and
-<constant>V4L2_PIX_FMT_GREY</constant> replaces the
-<constant>V4L2_VBI_SF_UBYTE</constant> define. The
-<structfield>reserved</structfield> field was extended.</para>
- </listitem>
-
- <listitem>
- <para>In &v4l2-captureparm; the type of the
-<structfield>timeperframe</structfield> field changed from unsigned
-long to &v4l2-fract;. This allows the accurate expression of multiples
-of the NTSC-M frame rate 30000 / 1001. A new field
-<structfield>readbuffers</structfield> was added to control the driver
-behaviour in read I/O mode.</para>
-
- <para>Similar changes were made to &v4l2-outputparm;.</para>
- </listitem>
-
- <listitem>
- <para>The struct <structname>v4l2_performance</structname>
-and <constant>VIDIOC_G_PERF</constant> ioctl were dropped. Except when
-using the <link linkend="rw">read/write I/O method</link>, which is
-limited anyway, this information is already available to
-applications.</para>
- </listitem>
-
- <listitem>
- <para>The example transformation from RGB to YCbCr color
-space in the old V4L2 documentation was inaccurate, this has been
-corrected in <xref linkend="pixfmt" />.<!-- 0.5670G should be
-0.587, and 127/112 != 255/224 --></para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 2003-06-19</title>
-
- <orderedlist>
- <listitem>
- <para>A new capability flag
-<constant>V4L2_CAP_RADIO</constant> was added for radio devices. Prior
-to this change radio devices would identify solely by having exactly one
-tuner whose type field reads <constant>V4L2_TUNER_RADIO</constant>.</para>
- </listitem>
-
- <listitem>
- <para>An optional driver access priority mechanism was
-added, see <xref linkend="app-pri" /> for details.</para>
- </listitem>
-
- <listitem>
- <para>The audio input and output interface was found to be
-incomplete.</para>
- <para>Previously the &VIDIOC-G-AUDIO;
-ioctl would enumerate the available audio inputs. An ioctl to
-determine the current audio input, if more than one combines with the
-current video input, did not exist. So
-<constant>VIDIOC_G_AUDIO</constant> was renamed to
-<constant>VIDIOC_G_AUDIO_OLD</constant>, this ioctl was removed on
-Kernel 2.6.39. The &VIDIOC-ENUMAUDIO; ioctl was added to enumerate
-audio inputs, while &VIDIOC-G-AUDIO; now reports the current audio
-input.</para>
- <para>The same changes were made to &VIDIOC-G-AUDOUT; and
-&VIDIOC-ENUMAUDOUT;.</para>
- <para>Until further the "videodev" module will automatically
-translate between the old and new ioctls, but drivers and applications
-must be updated to successfully compile again.</para>
- </listitem>
-
- <listitem>
- <para>The &VIDIOC-OVERLAY; ioctl was incorrectly defined with
-write-read parameter. It was changed to write-only, while the write-read
-version was renamed to <constant>VIDIOC_OVERLAY_OLD</constant>. The old
-ioctl was removed on Kernel 2.6.39. Until further the "videodev"
-kernel module will automatically translate to the new version, so drivers
-must be recompiled, but not applications.</para>
- </listitem>
-
- <listitem>
- <para><xref linkend="overlay" /> incorrectly stated that
-clipping rectangles define regions where the video can be seen.
-Correct is that clipping rectangles define regions where
-<emphasis>no</emphasis> video shall be displayed and so the graphics
-surface can be seen.</para>
- </listitem>
-
- <listitem>
- <para>The &VIDIOC-S-PARM; and &VIDIOC-S-CTRL; ioctls were
-defined with write-only parameter, inconsistent with other ioctls
-modifying their argument. They were changed to write-read, while a
-<constant>_OLD</constant> suffix was added to the write-only versions.
-The old ioctls were removed on Kernel 2.6.39. Drivers and
-applications assuming a constant parameter need an update.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 2003-11-05</title>
- <orderedlist>
- <listitem>
- <para>In <xref linkend="pixfmt-rgb" /> the following pixel
-formats were incorrectly transferred from Bill Dirks' V4L2
-specification. Descriptions below refer to bytes in memory, in
-ascending address order.<informaltable>
- <tgroup cols="3">
- <thead>
- <row>
- <entry>Symbol</entry>
- <entry>In this document prior to revision
-0.5</entry>
- <entry>Corrected</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_PIX_FMT_RGB24</constant></entry>
- <entry>B, G, R</entry>
- <entry>R, G, B</entry>
- </row>
- <row>
- <entry><constant>V4L2_PIX_FMT_BGR24</constant></entry>
- <entry>R, G, B</entry>
- <entry>B, G, R</entry>
- </row>
- <row>
- <entry><constant>V4L2_PIX_FMT_RGB32</constant></entry>
- <entry>B, G, R, X</entry>
- <entry>R, G, B, X</entry>
- </row>
- <row>
- <entry><constant>V4L2_PIX_FMT_BGR32</constant></entry>
- <entry>R, G, B, X</entry>
- <entry>B, G, R, X</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable> The
-<constant>V4L2_PIX_FMT_BGR24</constant> example was always
-correct.</para>
- <para>In <xref linkend="v4l-image-properties" /> the mapping
-of the V4L <constant>VIDEO_PALETTE_RGB24</constant> and
-<constant>VIDEO_PALETTE_RGB32</constant> formats to V4L2 pixel formats
-was accordingly corrected.</para>
- </listitem>
-
- <listitem>
- <para>Unrelated to the fixes above, drivers may still
-interpret some V4L2 RGB pixel formats differently. These issues have
-yet to be addressed, for details see <xref
- linkend="pixfmt-rgb" />.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.6, 2004-05-09</title>
- <orderedlist>
- <listitem>
- <para>The &VIDIOC-CROPCAP; ioctl was incorrectly defined
-with read-only parameter. It is now defined as write-read ioctl, while
-the read-only version was renamed to
-<constant>VIDIOC_CROPCAP_OLD</constant>. The old ioctl was removed
-on Kernel 2.6.39.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.8</title>
- <orderedlist>
- <listitem>
- <para>A new field <structfield>input</structfield> (former
-<structfield>reserved[0]</structfield>) was added to the &v4l2-buffer;
-structure. Purpose of this field is to alternate between video inputs
-(&eg; cameras) in step with the video capturing process. This function
-must be enabled with the new <constant>V4L2_BUF_FLAG_INPUT</constant>
-flag. The <structfield>flags</structfield> field is no longer
-read-only.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2004-08-01</title>
-
- <orderedlist>
- <listitem>
- <para>The return value of the
-<xref linkend="func-open" /> function was incorrectly documented.</para>
- </listitem>
-
- <listitem>
- <para>Audio output ioctls end in -AUDOUT, not -AUDIOOUT.</para>
- </listitem>
-
- <listitem>
- <para>In the Current Audio Input example the
-<constant>VIDIOC_G_AUDIO</constant> ioctl took the wrong
-argument.</para>
- </listitem>
-
- <listitem>
- <para>The documentation of the &VIDIOC-QBUF; and
-&VIDIOC-DQBUF; ioctls did not mention the &v4l2-buffer;
-<structfield>memory</structfield> field. It was also missing from
-examples. Also on the <constant>VIDIOC_DQBUF</constant> page the &EIO;
-was not documented.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.14</title>
- <orderedlist>
- <listitem>
- <para>A new sliced VBI interface was added. It is documented
-in <xref linkend="sliced" /> and replaces the interface first
-proposed in V4L2 specification 0.8.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.15</title>
- <orderedlist>
- <listitem>
- <para>The &VIDIOC-LOG-STATUS; ioctl was added.</para>
- </listitem>
-
- <listitem>
- <para>New video standards
-<constant>V4L2_STD_NTSC_443</constant>,
-<constant>V4L2_STD_SECAM_LC</constant>,
-<constant>V4L2_STD_SECAM_DK</constant> (a set of SECAM D, K and K1),
-and <constant>V4L2_STD_ATSC</constant> (a set of
-<constant>V4L2_STD_ATSC_8_VSB</constant> and
-<constant>V4L2_STD_ATSC_16_VSB</constant>) were defined. Note the
-<constant>V4L2_STD_525_60</constant> set now includes
-<constant>V4L2_STD_NTSC_443</constant>. See also <xref
- linkend="v4l2-std-id" />.</para>
- </listitem>
-
- <listitem>
- <para>The <constant>VIDIOC_G_COMP</constant> and
-<constant>VIDIOC_S_COMP</constant> ioctl were renamed to
-<constant>VIDIOC_G_MPEGCOMP</constant> and
-<constant>VIDIOC_S_MPEGCOMP</constant> respectively. Their argument
-was replaced by a struct
-<structname>v4l2_mpeg_compression</structname> pointer. (The
-<constant>VIDIOC_G_MPEGCOMP</constant> and
-<constant>VIDIOC_S_MPEGCOMP</constant> ioctls where removed in Linux
-2.6.25.)</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2005-11-27</title>
- <para>The capture example in <xref linkend="capture-example" />
-called the &VIDIOC-S-CROP; ioctl without checking if cropping is
-supported. In the video standard selection example in
-<xref linkend="standard" /> the &VIDIOC-S-STD; call used the wrong
-argument type.</para>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2006-01-10</title>
- <orderedlist>
- <listitem>
- <para>The <constant>V4L2_IN_ST_COLOR_KILL</constant> flag in
-&v4l2-input; not only indicates if the color killer is enabled, but
-also if it is active. (The color killer disables color decoding when
-it detects no color in the video signal to improve the image
-quality.)</para>
- </listitem>
-
- <listitem>
- <para>&VIDIOC-S-PARM; is a write-read ioctl, not write-only as
-stated on its reference page. The ioctl changed in 2003 as noted above.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2006-02-03</title>
- <orderedlist>
- <listitem>
- <para>In &v4l2-captureparm; and &v4l2-outputparm; the
-<structfield>timeperframe</structfield> field gives the time in
-seconds, not microseconds.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2006-02-04</title>
- <orderedlist>
- <listitem>
- <para>The <structfield>clips</structfield> field in
-&v4l2-window; must point to an array of &v4l2-clip;, not a linked
-list, because drivers ignore the struct
-<structname>v4l2_clip</structname>.<structfield>next</structfield>
-pointer.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.17</title>
- <orderedlist>
- <listitem>
- <para>New video standard macros were added:
-<constant>V4L2_STD_NTSC_M_KR</constant> (NTSC M South Korea), and the
-sets <constant>V4L2_STD_MN</constant>,
-<constant>V4L2_STD_B</constant>, <constant>V4L2_STD_GH</constant> and
-<constant>V4L2_STD_DK</constant>. The
-<constant>V4L2_STD_NTSC</constant> and
-<constant>V4L2_STD_SECAM</constant> sets now include
-<constant>V4L2_STD_NTSC_M_KR</constant> and
-<constant>V4L2_STD_SECAM_LC</constant> respectively.</para>
- </listitem>
-
- <listitem>
- <para>A new <constant>V4L2_TUNER_MODE_LANG1_LANG2</constant>
-was defined to record both languages of a bilingual program. The
-use of <constant>V4L2_TUNER_MODE_STEREO</constant> for this purpose
-is deprecated now. See the &VIDIOC-G-TUNER; section for
-details.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2006-09-23 (Draft 0.15)</title>
- <orderedlist>
- <listitem>
- <para>In various places
-<constant>V4L2_BUF_TYPE_SLICED_VBI_CAPTURE</constant> and
-<constant>V4L2_BUF_TYPE_SLICED_VBI_OUTPUT</constant> of the sliced VBI
-interface were not mentioned along with other buffer types.</para>
- </listitem>
-
- <listitem>
- <para>In <xref linkend="vidioc-g-audio" /> it was clarified
-that the &v4l2-audio; <structfield>mode</structfield> field is a flags
-field.</para>
- </listitem>
-
- <listitem>
- <para><xref linkend="vidioc-querycap" /> did not mention the
-sliced VBI and radio capability flags.</para>
- </listitem>
-
- <listitem>
- <para>In <xref linkend="vidioc-g-frequency" /> it was
-clarified that applications must initialize the tuner
-<structfield>type</structfield> field of &v4l2-frequency; before
-calling &VIDIOC-S-FREQUENCY;.</para>
- </listitem>
-
- <listitem>
- <para>The <structfield>reserved</structfield> array
-in &v4l2-requestbuffers; has 2 elements, not 32.</para>
- </listitem>
-
- <listitem>
- <para>In <xref linkend="output" /> and <xref
- linkend="raw-vbi" /> the device file names
-<filename>/dev/vout</filename> which never caught on were replaced
-by <filename>/dev/video</filename>.</para>
- </listitem>
-
- <listitem>
- <para>With Linux 2.6.15 the possible range for VBI device minor
-numbers was extended from 224-239 to 224-255. Accordingly device file names
-<filename>/dev/vbi0</filename> to <filename>/dev/vbi31</filename> are
-possible now.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.18</title>
- <orderedlist>
- <listitem>
- <para>New ioctls &VIDIOC-G-EXT-CTRLS;, &VIDIOC-S-EXT-CTRLS;
-and &VIDIOC-TRY-EXT-CTRLS; were added, a flag to skip unsupported
-controls with &VIDIOC-QUERYCTRL;, new control types
-<constant>V4L2_CTRL_TYPE_INTEGER64</constant> and
-<constant>V4L2_CTRL_TYPE_CTRL_CLASS</constant> (<xref
- linkend="v4l2-ctrl-type" />), and new control flags
-<constant>V4L2_CTRL_FLAG_READ_ONLY</constant>,
-<constant>V4L2_CTRL_FLAG_UPDATE</constant>,
-<constant>V4L2_CTRL_FLAG_INACTIVE</constant> and
-<constant>V4L2_CTRL_FLAG_SLIDER</constant> (<xref
- linkend="control-flags" />). See <xref
- linkend="extended-controls" /> for details.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.19</title>
- <orderedlist>
- <listitem>
- <para>In &v4l2-sliced-vbi-cap; a buffer type field was added
-replacing a reserved field. Note on architectures where the size of
-enum types differs from int types the size of the structure changed.
-The &VIDIOC-G-SLICED-VBI-CAP; ioctl was redefined from being read-only
-to write-read. Applications must initialize the type field and clear
-the reserved fields now. These changes may <emphasis>break the
-compatibility</emphasis> with older drivers and applications.</para>
- </listitem>
-
- <listitem>
- <para>The ioctls &VIDIOC-ENUM-FRAMESIZES; and
-&VIDIOC-ENUM-FRAMEINTERVALS; were added.</para>
- </listitem>
-
- <listitem>
- <para>A new pixel format <constant>V4L2_PIX_FMT_RGB444</constant> (<xref
-linkend="rgb-formats" />) was added.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 spec erratum 2006-10-12 (Draft 0.17)</title>
- <orderedlist>
- <listitem>
- <para><constant>V4L2_PIX_FMT_HM12</constant> (<xref
-linkend="reserved-formats" />) is a YUV 4:2:0, not 4:2:2 format.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.21</title>
- <orderedlist>
- <listitem>
- <para>The <filename>videodev2.h</filename> header file is
-now dual licensed under GNU General Public License version two or
-later, and under a 3-clause BSD-style license.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.22</title>
- <orderedlist>
- <listitem>
- <para>Two new field orders
- <constant>V4L2_FIELD_INTERLACED_TB</constant> and
- <constant>V4L2_FIELD_INTERLACED_BT</constant> were
- added. See <xref linkend="v4l2-field" /> for details.</para>
- </listitem>
-
- <listitem>
- <para>Three new clipping/blending methods with a global or
-straight or inverted local alpha value were added to the video overlay
-interface. See the description of the &VIDIOC-G-FBUF; and
-&VIDIOC-S-FBUF; ioctls for details.</para>
- <para>A new <structfield>global_alpha</structfield> field
-was added to <link
-linkend="v4l2-window"><structname>v4l2_window</structname></link>,
-extending the structure. This may <emphasis>break
-compatibility</emphasis> with applications using a struct
-<structname>v4l2_window</structname> directly. However the <link
-linkend="vidioc-g-fmt">VIDIOC_G/S/TRY_FMT</link> ioctls, which take a
-pointer to a <link linkend="v4l2-format">v4l2_format</link> parent
-structure with padding bytes at the end, are not affected.</para>
- </listitem>
-
- <listitem>
- <para>The format of the <structfield>chromakey</structfield>
-field in &v4l2-window; changed from "host order RGB32" to a pixel
-value in the same format as the framebuffer. This may <emphasis>break
-compatibility</emphasis> with existing applications. Drivers
-supporting the "host order RGB32" format are not known.</para>
- </listitem>
-
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.24</title>
- <orderedlist>
- <listitem>
- <para>The pixel formats
-<constant>V4L2_PIX_FMT_PAL8</constant>,
-<constant>V4L2_PIX_FMT_YUV444</constant>,
-<constant>V4L2_PIX_FMT_YUV555</constant>,
-<constant>V4L2_PIX_FMT_YUV565</constant> and
-<constant>V4L2_PIX_FMT_YUV32</constant> were added.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.25</title>
- <orderedlist>
- <listitem>
- <para>The pixel formats <link linkend="V4L2-PIX-FMT-Y16">
-<constant>V4L2_PIX_FMT_Y16</constant></link> and <link
-linkend="V4L2-PIX-FMT-SBGGR16">
-<constant>V4L2_PIX_FMT_SBGGR16</constant></link> were added.</para>
- </listitem>
- <listitem>
- <para>New <link linkend="control">controls</link>
-<constant>V4L2_CID_POWER_LINE_FREQUENCY</constant>,
-<constant>V4L2_CID_HUE_AUTO</constant>,
-<constant>V4L2_CID_WHITE_BALANCE_TEMPERATURE</constant>,
-<constant>V4L2_CID_SHARPNESS</constant> and
-<constant>V4L2_CID_BACKLIGHT_COMPENSATION</constant> were added. The
-controls <constant>V4L2_CID_BLACK_LEVEL</constant>,
-<constant>V4L2_CID_WHITENESS</constant>,
-<constant>V4L2_CID_HCENTER</constant> and
-<constant>V4L2_CID_VCENTER</constant> were deprecated.
-</para>
- </listitem>
- <listitem>
- <para>A <link linkend="camera-controls">Camera controls
-class</link> was added, with the new controls
-<constant>V4L2_CID_EXPOSURE_AUTO</constant>,
-<constant>V4L2_CID_EXPOSURE_ABSOLUTE</constant>,
-<constant>V4L2_CID_EXPOSURE_AUTO_PRIORITY</constant>,
-<constant>V4L2_CID_PAN_RELATIVE</constant>,
-<constant>V4L2_CID_TILT_RELATIVE</constant>,
-<constant>V4L2_CID_PAN_RESET</constant>,
-<constant>V4L2_CID_TILT_RESET</constant>,
-<constant>V4L2_CID_PAN_ABSOLUTE</constant>,
-<constant>V4L2_CID_TILT_ABSOLUTE</constant>,
-<constant>V4L2_CID_FOCUS_ABSOLUTE</constant>,
-<constant>V4L2_CID_FOCUS_RELATIVE</constant> and
-<constant>V4L2_CID_FOCUS_AUTO</constant>.</para>
- </listitem>
- <listitem>
- <para>The <constant>VIDIOC_G_MPEGCOMP</constant> and
-<constant>VIDIOC_S_MPEGCOMP</constant> ioctls, which were superseded
-by the <link linkend="extended-controls">extended controls</link>
-interface in Linux 2.6.18, where finally removed from the
-<filename>videodev2.h</filename> header file.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.26</title>
- <orderedlist>
- <listitem>
- <para>The pixel formats
-<constant>V4L2_PIX_FMT_Y16</constant> and
-<constant>V4L2_PIX_FMT_SBGGR16</constant> were added.</para>
- </listitem>
- <listitem>
- <para>Added user controls
-<constant>V4L2_CID_CHROMA_AGC</constant> and
-<constant>V4L2_CID_COLOR_KILLER</constant>.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.27</title>
- <orderedlist>
- <listitem>
- <para>The &VIDIOC-S-HW-FREQ-SEEK; ioctl and the
-<constant>V4L2_CAP_HW_FREQ_SEEK</constant> capability were added.</para>
- </listitem>
- <listitem>
- <para>The pixel formats
-<constant>V4L2_PIX_FMT_YVYU</constant>,
-<constant>V4L2_PIX_FMT_PCA501</constant>,
-<constant>V4L2_PIX_FMT_PCA505</constant>,
-<constant>V4L2_PIX_FMT_PCA508</constant>,
-<constant>V4L2_PIX_FMT_PCA561</constant>,
-<constant>V4L2_PIX_FMT_SGBRG8</constant>,
-<constant>V4L2_PIX_FMT_PAC207</constant> and
-<constant>V4L2_PIX_FMT_PJPG</constant> were added.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.28</title>
- <orderedlist>
- <listitem>
- <para>Added <constant>V4L2_MPEG_AUDIO_ENCODING_AAC</constant> and
-<constant>V4L2_MPEG_AUDIO_ENCODING_AC3</constant> MPEG audio encodings.</para>
- </listitem>
- <listitem>
- <para>Added <constant>V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC</constant> MPEG
-video encoding.</para>
- </listitem>
- <listitem>
- <para>The pixel formats
-<constant>V4L2_PIX_FMT_SGRBG10</constant> and
-<constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant> were added.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 2.6.29</title>
- <orderedlist>
- <listitem>
- <para>The <constant>VIDIOC_G_CHIP_IDENT</constant> ioctl was renamed
-to <constant>VIDIOC_G_CHIP_IDENT_OLD</constant> and <constant>VIDIOC_DBG_G_CHIP_IDENT</constant>
-was introduced in its place. The old struct <structname>v4l2_chip_ident</structname>
-was renamed to <structname id="v4l2-chip-ident-old">v4l2_chip_ident_old</structname>.</para>
- </listitem>
- <listitem>
- <para>The pixel formats
-<constant>V4L2_PIX_FMT_VYUY</constant>,
-<constant>V4L2_PIX_FMT_NV16</constant> and
-<constant>V4L2_PIX_FMT_NV61</constant> were added.</para>
- </listitem>
- <listitem>
- <para>Added camera controls
-<constant>V4L2_CID_ZOOM_ABSOLUTE</constant>,
-<constant>V4L2_CID_ZOOM_RELATIVE</constant>,
-<constant>V4L2_CID_ZOOM_CONTINUOUS</constant> and
-<constant>V4L2_CID_PRIVACY</constant>.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.30</title>
- <orderedlist>
- <listitem>
- <para>New control flag <constant>V4L2_CTRL_FLAG_WRITE_ONLY</constant> was added.</para>
- </listitem>
- <listitem>
- <para>New control <constant>V4L2_CID_COLORFX</constant> was added.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.32</title>
- <orderedlist>
- <listitem>
- <para>In order to be easier to compare a V4L2 API and a kernel
-version, now V4L2 API is numbered using the Linux Kernel version numeration.</para>
- </listitem>
- <listitem>
- <para>Finalized the RDS capture API. See <xref linkend="rds" /> for
-more information.</para>
- </listitem>
- <listitem>
- <para>Added new capabilities for modulators and RDS encoders.</para>
- </listitem>
- <listitem>
- <para>Add description for libv4l API.</para>
- </listitem>
- <listitem>
- <para>Added support for string controls via new type <constant>V4L2_CTRL_TYPE_STRING</constant>.</para>
- </listitem>
- <listitem>
- <para>Added <constant>V4L2_CID_BAND_STOP_FILTER</constant> documentation.</para>
- </listitem>
- <listitem>
- <para>Added FM Modulator (FM TX) Extended Control Class: <constant>V4L2_CTRL_CLASS_FM_TX</constant> and their Control IDs.</para>
- </listitem>
-<listitem>
- <para>Added FM Receiver (FM RX) Extended Control Class: <constant>V4L2_CTRL_CLASS_FM_RX</constant> and their Control IDs.</para>
- </listitem>
- <listitem>
- <para>Added Remote Controller chapter, describing the default Remote Controller mapping for media devices.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.33</title>
- <orderedlist>
- <listitem>
- <para>Added support for Digital Video timings in order to support HDTV receivers and transmitters.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.34</title>
- <orderedlist>
- <listitem>
- <para>Added
-<constant>V4L2_CID_IRIS_ABSOLUTE</constant> and
-<constant>V4L2_CID_IRIS_RELATIVE</constant> controls to the
- <link linkend="camera-controls">Camera controls class</link>.
- </para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.37</title>
- <orderedlist>
- <listitem>
- <para>Remove the vtx (videotext/teletext) API. This API was no longer
-used and no hardware exists to verify the API. Nor were any userspace applications found
-that used it. It was originally scheduled for removal in 2.6.35.
- </para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 2.6.39</title>
- <orderedlist>
- <listitem>
- <para>The old VIDIOC_*_OLD symbols and V4L1 support were removed.</para>
- </listitem>
- <listitem>
- <para>Multi-planar API added. Does not affect the compatibility of
- current drivers and applications. See
- <link linkend="planar-apis">multi-planar API</link>
- for details.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 3.1</title>
- <orderedlist>
- <listitem>
- <para>VIDIOC_QUERYCAP now returns a per-subsystem version instead of a per-driver one.</para>
- <para>Standardize an error code for invalid ioctl.</para>
- <para>Added V4L2_CTRL_TYPE_BITMASK.</para>
- </listitem>
- </orderedlist>
- </section>
- <section>
- <title>V4L2 in Linux 3.2</title>
- <orderedlist>
- <listitem>
- <para>V4L2_CTRL_FLAG_VOLATILE was added to signal volatile controls to userspace.</para>
- </listitem>
- <listitem>
- <para>Add selection API for extended control over cropping
- and composing. Does not affect the compatibility of current
- drivers and applications. See <link
- linkend="selection-api"> selection API </link> for
- details.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.3</title>
- <orderedlist>
- <listitem>
- <para>Added <constant>V4L2_CID_ALPHA_COMPONENT</constant> control
- to the <link linkend="control">User controls class</link>.
- </para>
- </listitem>
- <listitem>
- <para>Added the device_caps field to struct v4l2_capabilities and added the new
- V4L2_CAP_DEVICE_CAPS capability.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.4</title>
- <orderedlist>
- <listitem>
- <para>Added <link linkend="jpeg-controls">JPEG compression control
- class</link>.</para>
- </listitem>
- <listitem>
- <para>Extended the DV Timings API:
- &VIDIOC-ENUM-DV-TIMINGS;, &VIDIOC-QUERY-DV-TIMINGS; and
- &VIDIOC-DV-TIMINGS-CAP;.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.5</title>
- <orderedlist>
- <listitem>
- <para>Added integer menus, the new type will be
- V4L2_CTRL_TYPE_INTEGER_MENU.</para>
- </listitem>
- <listitem>
- <para>Added selection API for V4L2 subdev interface:
- &VIDIOC-SUBDEV-G-SELECTION; and
- &VIDIOC-SUBDEV-S-SELECTION;.</para>
- </listitem>
- <listitem>
- <para> Added <constant>V4L2_COLORFX_ANTIQUE</constant>,
- <constant>V4L2_COLORFX_ART_FREEZE</constant>,
- <constant>V4L2_COLORFX_AQUA</constant>,
- <constant>V4L2_COLORFX_SILHOUETTE</constant>,
- <constant>V4L2_COLORFX_SOLARIZATION</constant>,
- <constant>V4L2_COLORFX_VIVID</constant> and
- <constant>V4L2_COLORFX_ARBITRARY_CBCR</constant> menu items
- to the <constant>V4L2_CID_COLORFX</constant> control.</para>
- </listitem>
- <listitem>
- <para> Added <constant>V4L2_CID_COLORFX_CBCR</constant> control.</para>
- </listitem>
- <listitem>
- <para> Added camera controls <constant>V4L2_CID_AUTO_EXPOSURE_BIAS</constant>,
- <constant>V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE</constant>,
- <constant>V4L2_CID_IMAGE_STABILIZATION</constant>,
- <constant>V4L2_CID_ISO_SENSITIVITY</constant>,
- <constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>,
- <constant>V4L2_CID_EXPOSURE_METERING</constant>,
- <constant>V4L2_CID_SCENE_MODE</constant>,
- <constant>V4L2_CID_3A_LOCK</constant>,
- <constant>V4L2_CID_AUTO_FOCUS_START</constant>,
- <constant>V4L2_CID_AUTO_FOCUS_STOP</constant>,
- <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant> and
- <constant>V4L2_CID_AUTO_FOCUS_RANGE</constant>.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.6</title>
- <orderedlist>
- <listitem>
- <para>Replaced <structfield>input</structfield> in
- <structname>v4l2_buffer</structname> by
- <structfield>reserved2</structfield> and removed
- <constant>V4L2_BUF_FLAG_INPUT</constant>.</para>
- </listitem>
- <listitem>
- <para>Added V4L2_CAP_VIDEO_M2M and V4L2_CAP_VIDEO_M2M_MPLANE capabilities.</para>
- </listitem>
- <listitem>
- <para>Added support for frequency band enumerations: &VIDIOC-ENUM-FREQ-BANDS;.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.9</title>
- <orderedlist>
- <listitem>
- <para>Added timestamp types to
- <structfield>flags</structfield> field in
- <structname>v4l2_buffer</structname>. See <xref
- linkend="buffer-flags" />.</para>
- </listitem>
- <listitem>
- <para>Added <constant>V4L2_EVENT_CTRL_CH_RANGE</constant> control event
- changes flag. See <xref linkend="ctrl-changes-flags"/>.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.10</title>
- <orderedlist>
- <listitem>
- <para>Removed obsolete and unused DV_PRESET ioctls
- VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET, VIDIOC_QUERY_DV_PRESET and
- VIDIOC_ENUM_DV_PRESET. Remove the related v4l2_input/output capability
- flags V4L2_IN_CAP_PRESETS and V4L2_OUT_CAP_PRESETS.
- </para>
- </listitem>
- <listitem>
- <para>Added new debugging ioctl &VIDIOC-DBG-G-CHIP-INFO;.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.11</title>
- <orderedlist>
- <listitem>
- <para>Remove obsolete <constant>VIDIOC_DBG_G_CHIP_IDENT</constant> ioctl.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.14</title>
- <orderedlist>
- <listitem>
- <para> In struct <structname>v4l2_rect</structname>, the type
-of <structfield>width</structfield> and <structfield>height</structfield>
-fields changed from _s32 to _u32.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.15</title>
- <orderedlist>
- <listitem>
- <para>Added Software Defined Radio (SDR) Interface.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.16</title>
- <orderedlist>
- <listitem>
- <para>Added event V4L2_EVENT_SOURCE_CHANGE.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.17</title>
- <orderedlist>
- <listitem>
- <para>Extended &v4l2-pix-format;. Added format flags.
- </para>
- </listitem>
- <listitem>
- <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.18</title>
- <orderedlist>
- <listitem>
- <para>Added <constant>V4L2_CID_PAN_SPEED</constant> and
- <constant>V4L2_CID_TILT_SPEED</constant> camera controls.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 3.19</title>
- <orderedlist>
- <listitem>
- <para>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding;
-and &v4l2-quantization; fields to &v4l2-pix-format;, &v4l2-pix-format-mplane;
-and &v4l2-mbus-framefmt;.
- </para>
- </listitem>
- </orderedlist>
- </section>
-
- <section>
- <title>V4L2 in Linux 4.4</title>
- <orderedlist>
- <listitem>
- <para>Renamed <constant>V4L2_TUNER_ADC</constant> to
-<constant>V4L2_TUNER_SDR</constant>. The use of
-<constant>V4L2_TUNER_ADC</constant> is deprecated now.
- </para>
- </listitem>
- <listitem>
- <para>Added <constant>V4L2_CID_RF_TUNER_RF_GAIN</constant>
-RF Tuner control.</para>
- </listitem>
- <listitem>
- <para>Added transmitter support for Software Defined Radio (SDR)
-Interface.</para>
- </listitem>
- </orderedlist>
- </section>
-
- <section id="other">
- <title>Relation of V4L2 to other Linux multimedia APIs</title>
-
- <section id="xvideo">
- <title>X Video Extension</title>
-
- <para>The X Video Extension (abbreviated XVideo or just Xv) is
-an extension of the X Window system, implemented for example by the
-XFree86 project. Its scope is similar to V4L2, an API to video capture
-and output devices for X clients. Xv allows applications to display
-live video in a window, send window contents to a TV output, and
-capture or output still images in XPixmaps<footnote>
- <para>This is not implemented in XFree86.</para>
- </footnote>. With their implementation XFree86 makes the
-extension available across many operating systems and
-architectures.</para>
-
- <para>Because the driver is embedded into the X server Xv has a
-number of advantages over the V4L2 <link linkend="overlay">video
-overlay interface</link>. The driver can easily determine the overlay
-target, &ie; visible graphics memory or off-screen buffers for a
-destructive overlay. It can program the RAMDAC for a non-destructive
-overlay, scaling or color-keying, or the clipping functions of the
-video capture hardware, always in sync with drawing operations or
-windows moving or changing their stacking order.</para>
-
- <para>To combine the advantages of Xv and V4L a special Xv
-driver exists in XFree86 and XOrg, just programming any overlay capable
-Video4Linux device it finds. To enable it
-<filename>/etc/X11/XF86Config</filename> must contain these lines:</para>
- <para><screen>
-Section "Module"
- Load "v4l"
-EndSection</screen></para>
-
- <para>As of XFree86 4.2 this driver still supports only V4L
-ioctls, however it should work just fine with all V4L2 devices through
-the V4L2 backward-compatibility layer. Since V4L2 permits multiple
-opens it is possible (if supported by the V4L2 driver) to capture
-video while an X client requested video overlay. Restrictions of
-simultaneous capturing and overlay are discussed in <xref
- linkend="overlay" /> apply.</para>
-
- <para>Only marginally related to V4L2, XFree86 extended Xv to
-support hardware YUV to RGB conversion and scaling for faster video
-playback, and added an interface to MPEG-2 decoding hardware. This API
-is useful to display images captured with V4L2 devices.</para>
- </section>
-
- <section>
- <title>Digital Video</title>
-
- <para>V4L2 does not support digital terrestrial, cable or
-satellite broadcast. A separate project aiming at digital receivers
-exists. You can find its homepage at <ulink
-url="https://linuxtv.org">https://linuxtv.org</ulink>. The Linux DVB API
-has no connection to the V4L2 API except that drivers for hybrid
-hardware may support both.</para>
- </section>
-
- <section>
- <title>Audio Interfaces</title>
-
- <para>[to do - OSS/ALSA]</para>
- </section>
- </section>
-
- <section id="experimental">
- <title>Experimental API Elements</title>
-
- <para>The following V4L2 API elements are currently experimental
-and may change in the future.</para>
-
- <itemizedlist>
- <listitem>
- <para>&VIDIOC-DBG-G-REGISTER; and &VIDIOC-DBG-S-REGISTER;
-ioctls.</para>
- </listitem>
- <listitem>
- <para>&VIDIOC-DBG-G-CHIP-INFO; ioctl.</para>
- </listitem>
- </itemizedlist>
- </section>
-
- <section id="obsolete">
- <title>Obsolete API Elements</title>
-
- <para>The following V4L2 API elements were superseded by new
-interfaces and should not be implemented in new drivers.</para>
-
- <itemizedlist>
- <listitem>
- <para><constant>VIDIOC_G_MPEGCOMP</constant> and
-<constant>VIDIOC_S_MPEGCOMP</constant> ioctls. Use Extended Controls,
-<xref linkend="extended-controls" />.</para>
- </listitem>
- <listitem>
- <para>VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET, VIDIOC_ENUM_DV_PRESETS and
- VIDIOC_QUERY_DV_PRESET ioctls. Use the DV Timings API (<xref linkend="dv-timings" />).</para>
- </listitem>
- <listitem>
- <para><constant>VIDIOC_SUBDEV_G_CROP</constant> and
- <constant>VIDIOC_SUBDEV_S_CROP</constant> ioctls. Use
- <constant>VIDIOC_SUBDEV_G_SELECTION</constant> and
- <constant>VIDIOC_SUBDEV_S_SELECTION</constant>, <xref
- linkend="vidioc-subdev-g-selection" />.</para>
- </listitem>
- </itemizedlist>
- </section>
- </section>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
deleted file mode 100644
index e2e5484d2d9b..000000000000
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ /dev/null
@@ -1,5505 +0,0 @@
- <section id="control">
- <title>User Controls</title>
-
- <para>Devices typically have a number of user-settable controls
-such as brightness, saturation and so on, which would be presented to
-the user on a graphical user interface. But, different devices
-will have different controls available, and furthermore, the range of
-possible values, and the default value will vary from device to
-device. The control ioctls provide the information and a mechanism to
-create a nice user interface for these controls that will work
-correctly with any device.</para>
-
- <para>All controls are accessed using an ID value. V4L2 defines
-several IDs for specific purposes. Drivers can also implement their
-own custom controls using <constant>V4L2_CID_PRIVATE_BASE</constant>
-<footnote><para>The use of <constant>V4L2_CID_PRIVATE_BASE</constant>
-is problematic because different drivers may use the same
-<constant>V4L2_CID_PRIVATE_BASE</constant> ID for different controls.
-This makes it hard to programatically set such controls since the meaning
-of the control with that ID is driver dependent. In order to resolve this
-drivers use unique IDs and the <constant>V4L2_CID_PRIVATE_BASE</constant>
-IDs are mapped to those unique IDs by the kernel. Consider these
-<constant>V4L2_CID_PRIVATE_BASE</constant> IDs as aliases to the real
-IDs.</para>
-<para>Many applications today still use the <constant>V4L2_CID_PRIVATE_BASE</constant>
-IDs instead of using &VIDIOC-QUERYCTRL; with the <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant>
-flag to enumerate all IDs, so support for <constant>V4L2_CID_PRIVATE_BASE</constant>
-is still around.</para></footnote>
-and higher values. The pre-defined control IDs have the prefix
-<constant>V4L2_CID_</constant>, and are listed in <xref
-linkend="control-id" />. The ID is used when querying the attributes of
-a control, and when getting or setting the current value.</para>
-
- <para>Generally applications should present controls to the user
-without assumptions about their purpose. Each control comes with a
-name string the user is supposed to understand. When the purpose is
-non-intuitive the driver writer should provide a user manual, a user
-interface plug-in or a driver specific panel application. Predefined
-IDs were introduced to change a few controls programmatically, for
-example to mute a device during a channel switch.</para>
-
- <para>Drivers may enumerate different controls after switching
-the current video input or output, tuner or modulator, or audio input
-or output. Different in the sense of other bounds, another default and
-current value, step size or other menu items. A control with a certain
-<emphasis>custom</emphasis> ID can also change name and
-type.</para>
-
- <para>If a control is not applicable to the current configuration
-of the device (for example, it doesn't apply to the current video input)
-drivers set the <constant>V4L2_CTRL_FLAG_INACTIVE</constant> flag.</para>
-
- <para>Control values are stored globally, they do not
-change when switching except to stay within the reported bounds. They
-also do not change &eg; when the device is opened or closed, when the
-tuner radio frequency is changed or generally never without
-application request.</para>
-
- <para>V4L2 specifies an event mechanism to notify applications
-when controls change value (see &VIDIOC-SUBSCRIBE-EVENT;, event
-<constant>V4L2_EVENT_CTRL</constant>), panel applications might want to make
-use of that in order to always reflect the correct control value.</para>
-
- <para>
- All controls use machine endianness.
- </para>
-
- <table pgwide="1" frame="none" id="control-id">
- <title>Control IDs</title>
- <tgroup cols="3">
- &cs-def;
- <thead>
- <row>
- <entry>ID</entry>
- <entry>Type</entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CID_BASE</constant></entry>
- <entry></entry>
- <entry>First predefined ID, equal to
-<constant>V4L2_CID_BRIGHTNESS</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_USER_BASE</constant></entry>
- <entry></entry>
- <entry>Synonym of <constant>V4L2_CID_BASE</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_BRIGHTNESS</constant></entry>
- <entry>integer</entry>
- <entry>Picture brightness, or more precisely, the black
-level.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_CONTRAST</constant></entry>
- <entry>integer</entry>
- <entry>Picture contrast or luma gain.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_SATURATION</constant></entry>
- <entry>integer</entry>
- <entry>Picture color saturation or chroma gain.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_HUE</constant></entry>
- <entry>integer</entry>
- <entry>Hue or color balance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_VOLUME</constant></entry>
- <entry>integer</entry>
- <entry>Overall audio volume. Note some drivers also
-provide an OSS or ALSA mixer interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_BALANCE</constant></entry>
- <entry>integer</entry>
- <entry>Audio stereo balance. Minimum corresponds to all
-the way left, maximum to right.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_BASS</constant></entry>
- <entry>integer</entry>
- <entry>Audio bass adjustment.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_TREBLE</constant></entry>
- <entry>integer</entry>
- <entry>Audio treble adjustment.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_MUTE</constant></entry>
- <entry>boolean</entry>
- <entry>Mute audio, &ie; set the volume to zero, however
-without affecting <constant>V4L2_CID_AUDIO_VOLUME</constant>. Like
-ALSA drivers, V4L2 drivers must mute at load time to avoid excessive
-noise. Actually the entire device should be reset to a low power
-consumption state.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUDIO_LOUDNESS</constant></entry>
- <entry>boolean</entry>
- <entry>Loudness mode (bass boost).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_BLACK_LEVEL</constant></entry>
- <entry>integer</entry>
- <entry>Another name for brightness (not a synonym of
-<constant>V4L2_CID_BRIGHTNESS</constant>). This control is deprecated
-and should not be used in new drivers and applications.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUTO_WHITE_BALANCE</constant></entry>
- <entry>boolean</entry>
- <entry>Automatic white balance (cameras).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_DO_WHITE_BALANCE</constant></entry>
- <entry>button</entry>
- <entry>This is an action control. When set (the value is
-ignored), the device will do a white balance and then hold the current
-setting. Contrast this with the boolean
-<constant>V4L2_CID_AUTO_WHITE_BALANCE</constant>, which, when
-activated, keeps adjusting the white balance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_RED_BALANCE</constant></entry>
- <entry>integer</entry>
- <entry>Red chroma balance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_BLUE_BALANCE</constant></entry>
- <entry>integer</entry>
- <entry>Blue chroma balance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_GAMMA</constant></entry>
- <entry>integer</entry>
- <entry>Gamma adjust.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_WHITENESS</constant></entry>
- <entry>integer</entry>
- <entry>Whiteness for grey-scale devices. This is a synonym
-for <constant>V4L2_CID_GAMMA</constant>. This control is deprecated
-and should not be used in new drivers and applications.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_EXPOSURE</constant></entry>
- <entry>integer</entry>
- <entry>Exposure (cameras). [Unit?]</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUTOGAIN</constant></entry>
- <entry>boolean</entry>
- <entry>Automatic gain/exposure control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_GAIN</constant></entry>
- <entry>integer</entry>
- <entry>Gain control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_HFLIP</constant></entry>
- <entry>boolean</entry>
- <entry>Mirror the picture horizontally.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_VFLIP</constant></entry>
- <entry>boolean</entry>
- <entry>Mirror the picture vertically.</entry>
- </row>
- <row id="v4l2-power-line-frequency">
- <entry><constant>V4L2_CID_POWER_LINE_FREQUENCY</constant></entry>
- <entry>enum</entry>
- <entry>Enables a power line frequency filter to avoid
-flicker. Possible values for <constant>enum v4l2_power_line_frequency</constant> are:
-<constant>V4L2_CID_POWER_LINE_FREQUENCY_DISABLED</constant> (0),
-<constant>V4L2_CID_POWER_LINE_FREQUENCY_50HZ</constant> (1),
-<constant>V4L2_CID_POWER_LINE_FREQUENCY_60HZ</constant> (2) and
-<constant>V4L2_CID_POWER_LINE_FREQUENCY_AUTO</constant> (3).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_HUE_AUTO</constant></entry>
- <entry>boolean</entry>
- <entry>Enables automatic hue control by the device. The
-effect of setting <constant>V4L2_CID_HUE</constant> while automatic
-hue control is enabled is undefined, drivers should ignore such
-request.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_WHITE_BALANCE_TEMPERATURE</constant></entry>
- <entry>integer</entry>
- <entry>This control specifies the white balance settings
-as a color temperature in Kelvin. A driver should have a minimum of
-2800 (incandescent) to 6500 (daylight). For more information about
-color temperature see <ulink
-url="http://en.wikipedia.org/wiki/Color_temperature">Wikipedia</ulink>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_SHARPNESS</constant></entry>
- <entry>integer</entry>
- <entry>Adjusts the sharpness filters in a camera. The
-minimum value disables the filters, higher values give a sharper
-picture.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_BACKLIGHT_COMPENSATION</constant></entry>
- <entry>integer</entry>
- <entry>Adjusts the backlight compensation in a camera. The
-minimum value disables backlight compensation.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_CHROMA_AGC</constant></entry>
- <entry>boolean</entry>
- <entry>Chroma automatic gain control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_CHROMA_GAIN</constant></entry>
- <entry>integer</entry>
- <entry>Adjusts the Chroma gain control (for use when chroma AGC
- is disabled).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_COLOR_KILLER</constant></entry>
- <entry>boolean</entry>
- <entry>Enable the color killer (&ie; force a black &amp; white image in case of a weak video signal).</entry>
- </row>
- <row id="v4l2-colorfx">
- <entry><constant>V4L2_CID_COLORFX</constant></entry>
- <entry>enum</entry>
- <entry>Selects a color effect. The following values are defined:
- </entry>
- </row><row>
- <entry></entry>
- <entry></entry>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_COLORFX_NONE</constant>&nbsp;</entry>
- <entry>Color effect is disabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_ANTIQUE</constant>&nbsp;</entry>
- <entry>An aging (old photo) effect.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_ART_FREEZE</constant>&nbsp;</entry>
- <entry>Frost color effect.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_AQUA</constant>&nbsp;</entry>
- <entry>Water color, cool tone.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_BW</constant>&nbsp;</entry>
- <entry>Black and white.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_EMBOSS</constant>&nbsp;</entry>
- <entry>Emboss, the highlights and shadows replace light/dark boundaries
- and low contrast areas are set to a gray background.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_GRASS_GREEN</constant>&nbsp;</entry>
- <entry>Grass green.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_NEGATIVE</constant>&nbsp;</entry>
- <entry>Negative.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SEPIA</constant>&nbsp;</entry>
- <entry>Sepia tone.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SKETCH</constant>&nbsp;</entry>
- <entry>Sketch.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SKIN_WHITEN</constant>&nbsp;</entry>
- <entry>Skin whiten.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SKY_BLUE</constant>&nbsp;</entry>
- <entry>Sky blue.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SOLARIZATION</constant>&nbsp;</entry>
- <entry>Solarization, the image is partially reversed in tone,
- only color values above or below a certain threshold are inverted.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SILHOUETTE</constant>&nbsp;</entry>
- <entry>Silhouette (outline).</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_VIVID</constant>&nbsp;</entry>
- <entry>Vivid colors.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORFX_SET_CBCR</constant>&nbsp;</entry>
- <entry>The Cb and Cr chroma components are replaced by fixed
- coefficients determined by <constant>V4L2_CID_COLORFX_CBCR</constant>
- control.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry><constant>V4L2_CID_COLORFX_CBCR</constant></entry>
- <entry>integer</entry>
- <entry>Determines the Cb and Cr coefficients for <constant>V4L2_COLORFX_SET_CBCR</constant>
- color effect. Bits [7:0] of the supplied 32 bit value are interpreted as
- Cr component, bits [15:8] as Cb component and bits [31:16] must be zero.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_AUTOBRIGHTNESS</constant></entry>
- <entry>boolean</entry>
- <entry>Enable Automatic Brightness.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_ROTATE</constant></entry>
- <entry>integer</entry>
- <entry>Rotates the image by specified angle. Common angles are 90,
- 270 and 180. Rotating the image to 90 and 270 will reverse the height
- and width of the display window. It is necessary to set the new height and
- width of the picture using the &VIDIOC-S-FMT; ioctl according to
- the rotation angle selected.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_BG_COLOR</constant></entry>
- <entry>integer</entry>
- <entry>Sets the background color on the current output device.
- Background color needs to be specified in the RGB24 format. The
- supplied 32 bit value is interpreted as bits 0-7 Red color information,
- bits 8-15 Green color information, bits 16-23 Blue color
- information and bits 24-31 must be zero.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_ILLUMINATORS_1</constant>
- <constant>V4L2_CID_ILLUMINATORS_2</constant></entry>
- <entry>boolean</entry>
- <entry>Switch on or off the illuminator 1 or 2 of the device
- (usually a microscope).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MIN_BUFFERS_FOR_CAPTURE</constant></entry>
- <entry>integer</entry>
- <entry>This is a read-only control that can be read by the application
-and used as a hint to determine the number of CAPTURE buffers to pass to REQBUFS.
-The value is the minimum number of CAPTURE buffers that is necessary for hardware
-to work.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MIN_BUFFERS_FOR_OUTPUT</constant></entry>
- <entry>integer</entry>
- <entry>This is a read-only control that can be read by the application
-and used as a hint to determine the number of OUTPUT buffers to pass to REQBUFS.
-The value is the minimum number of OUTPUT buffers that is necessary for hardware
-to work.</entry>
- </row>
- <row id="v4l2-alpha-component">
- <entry><constant>V4L2_CID_ALPHA_COMPONENT</constant></entry>
- <entry>integer</entry>
- <entry>Sets the alpha color component. When a capture device (or
- capture queue of a mem-to-mem device) produces a frame format that
- includes an alpha component
- (e.g. <link linkend="rgb-formats">packed RGB image formats</link>)
- and the alpha value is not defined by the device or the mem-to-mem
- input data this control lets you select the alpha component value of
- all pixels. When an output device (or output queue of a mem-to-mem
- device) consumes a frame format that doesn't include an alpha
- component and the device supports alpha channel processing this
- control lets you set the alpha component value of all pixels for
- further processing in the device.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_LASTP1</constant></entry>
- <entry></entry>
- <entry>End of the predefined control IDs (currently
- <constant>V4L2_CID_ALPHA_COMPONENT</constant> + 1).</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_PRIVATE_BASE</constant></entry>
- <entry></entry>
- <entry>ID of the first custom (driver specific) control.
-Applications depending on particular custom controls should check the
-driver name and version, see <xref linkend="querycap" />.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Applications can enumerate the available controls with the
-&VIDIOC-QUERYCTRL; and &VIDIOC-QUERYMENU; ioctls, get and set a
-control value with the &VIDIOC-G-CTRL; and &VIDIOC-S-CTRL; ioctls.
-Drivers must implement <constant>VIDIOC_QUERYCTRL</constant>,
-<constant>VIDIOC_G_CTRL</constant> and
-<constant>VIDIOC_S_CTRL</constant> when the device has one or more
-controls, <constant>VIDIOC_QUERYMENU</constant> when it has one or
-more menu type controls.</para>
-
- <example id="enum_all_controls">
- <title>Enumerating all user controls</title>
-
- <programlisting>
-&v4l2-queryctrl; queryctrl;
-&v4l2-querymenu; querymenu;
-
-static void enumerate_menu(void)
-{
- printf(" Menu items:\n");
-
- memset(&amp;querymenu, 0, sizeof(querymenu));
- querymenu.id = queryctrl.id;
-
- for (querymenu.index = queryctrl.minimum;
- querymenu.index &lt;= queryctrl.maximum;
- querymenu.index++) {
- if (0 == ioctl(fd, &VIDIOC-QUERYMENU;, &amp;querymenu)) {
- printf(" %s\n", querymenu.name);
- }
- }
-}
-
-memset(&amp;queryctrl, 0, sizeof(queryctrl));
-
-for (queryctrl.id = V4L2_CID_BASE;
- queryctrl.id &lt; V4L2_CID_LASTP1;
- queryctrl.id++) {
- if (0 == ioctl(fd, &VIDIOC-QUERYCTRL;, &amp;queryctrl)) {
- if (queryctrl.flags &amp; V4L2_CTRL_FLAG_DISABLED)
- continue;
-
- printf("Control %s\n", queryctrl.name);
-
- if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
- enumerate_menu();
- } else {
- if (errno == EINVAL)
- continue;
-
- perror("VIDIOC_QUERYCTRL");
- exit(EXIT_FAILURE);
- }
-}
-
-for (queryctrl.id = V4L2_CID_PRIVATE_BASE;;
- queryctrl.id++) {
- if (0 == ioctl(fd, &VIDIOC-QUERYCTRL;, &amp;queryctrl)) {
- if (queryctrl.flags &amp; V4L2_CTRL_FLAG_DISABLED)
- continue;
-
- printf("Control %s\n", queryctrl.name);
-
- if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
- enumerate_menu();
- } else {
- if (errno == EINVAL)
- break;
-
- perror("VIDIOC_QUERYCTRL");
- exit(EXIT_FAILURE);
- }
-}
-</programlisting>
- </example>
-
- <example>
- <title>Enumerating all user controls (alternative)</title>
- <programlisting>
-memset(&amp;queryctrl, 0, sizeof(queryctrl));
-
-queryctrl.id = V4L2_CTRL_CLASS_USER | V4L2_CTRL_FLAG_NEXT_CTRL;
-while (0 == ioctl(fd, &VIDIOC-QUERYCTRL;, &amp;queryctrl)) {
- if (V4L2_CTRL_ID2CLASS(queryctrl.id) != V4L2_CTRL_CLASS_USER)
- break;
- if (queryctrl.flags &amp; V4L2_CTRL_FLAG_DISABLED)
- continue;
-
- printf("Control %s\n", queryctrl.name);
-
- if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
- enumerate_menu();
-
- queryctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL;
-}
-if (errno != EINVAL) {
- perror("VIDIOC_QUERYCTRL");
- exit(EXIT_FAILURE);
-}
-</programlisting>
- </example>
-
- <example>
- <title>Changing controls</title>
-
- <programlisting>
-&v4l2-queryctrl; queryctrl;
-&v4l2-control; control;
-
-memset(&amp;queryctrl, 0, sizeof(queryctrl));
-queryctrl.id = V4L2_CID_BRIGHTNESS;
-
-if (-1 == ioctl(fd, &VIDIOC-QUERYCTRL;, &amp;queryctrl)) {
- if (errno != EINVAL) {
- perror("VIDIOC_QUERYCTRL");
- exit(EXIT_FAILURE);
- } else {
- printf("V4L2_CID_BRIGHTNESS is not supported\n");
- }
-} else if (queryctrl.flags &amp; V4L2_CTRL_FLAG_DISABLED) {
- printf("V4L2_CID_BRIGHTNESS is not supported\n");
-} else {
- memset(&amp;control, 0, sizeof (control));
- control.id = V4L2_CID_BRIGHTNESS;
- control.value = queryctrl.default_value;
-
- if (-1 == ioctl(fd, &VIDIOC-S-CTRL;, &amp;control)) {
- perror("VIDIOC_S_CTRL");
- exit(EXIT_FAILURE);
- }
-}
-
-memset(&amp;control, 0, sizeof(control));
-control.id = V4L2_CID_CONTRAST;
-
-if (0 == ioctl(fd, &VIDIOC-G-CTRL;, &amp;control)) {
- control.value += 1;
-
- /* The driver may clamp the value or return ERANGE, ignored here */
-
- if (-1 == ioctl(fd, &VIDIOC-S-CTRL;, &amp;control)
- &amp;&amp; errno != ERANGE) {
- perror("VIDIOC_S_CTRL");
- exit(EXIT_FAILURE);
- }
-/* Ignore if V4L2_CID_CONTRAST is unsupported */
-} else if (errno != EINVAL) {
- perror("VIDIOC_G_CTRL");
- exit(EXIT_FAILURE);
-}
-
-control.id = V4L2_CID_AUDIO_MUTE;
-control.value = 1; /* silence */
-
-/* Errors ignored */
-ioctl(fd, VIDIOC_S_CTRL, &amp;control);
-</programlisting>
- </example>
- </section>
-
- <section id="extended-controls">
- <title>Extended Controls</title>
-
- <section>
- <title>Introduction</title>
-
- <para>The control mechanism as originally designed was meant
-to be used for user settings (brightness, saturation, etc). However,
-it turned out to be a very useful model for implementing more
-complicated driver APIs where each driver implements only a subset of
-a larger API.</para>
-
- <para>The MPEG encoding API was the driving force behind
-designing and implementing this extended control mechanism: the MPEG
-standard is quite large and the currently supported hardware MPEG
-encoders each only implement a subset of this standard. Further more,
-many parameters relating to how the video is encoded into an MPEG
-stream are specific to the MPEG encoding chip since the MPEG standard
-only defines the format of the resulting MPEG stream, not how the
-video is actually encoded into that format.</para>
-
- <para>Unfortunately, the original control API lacked some
-features needed for these new uses and so it was extended into the
-(not terribly originally named) extended control API.</para>
-
- <para>Even though the MPEG encoding API was the first effort
-to use the Extended Control API, nowadays there are also other classes
-of Extended Controls, such as Camera Controls and FM Transmitter Controls.
-The Extended Controls API as well as all Extended Controls classes are
-described in the following text.</para>
- </section>
-
- <section>
- <title>The Extended Control API</title>
-
- <para>Three new ioctls are available: &VIDIOC-G-EXT-CTRLS;,
-&VIDIOC-S-EXT-CTRLS; and &VIDIOC-TRY-EXT-CTRLS;. These ioctls act on
-arrays of controls (as opposed to the &VIDIOC-G-CTRL; and
-&VIDIOC-S-CTRL; ioctls that act on a single control). This is needed
-since it is often required to atomically change several controls at
-once.</para>
-
- <para>Each of the new ioctls expects a pointer to a
-&v4l2-ext-controls;. This structure contains a pointer to the control
-array, a count of the number of controls in that array and a control
-class. Control classes are used to group similar controls into a
-single class. For example, control class
-<constant>V4L2_CTRL_CLASS_USER</constant> contains all user controls
-(&ie; all controls that can also be set using the old
-<constant>VIDIOC_S_CTRL</constant> ioctl). Control class
-<constant>V4L2_CTRL_CLASS_MPEG</constant> contains all controls
-relating to MPEG encoding, etc.</para>
-
- <para>All controls in the control array must belong to the
-specified control class. An error is returned if this is not the
-case.</para>
-
- <para>It is also possible to use an empty control array (count
-== 0) to check whether the specified control class is
-supported.</para>
-
- <para>The control array is a &v4l2-ext-control; array. The
-<structname>v4l2_ext_control</structname> structure is very similar to
-&v4l2-control;, except for the fact that it also allows for 64-bit
-values and pointers to be passed.</para>
-
- <para>Since the &v4l2-ext-control; supports pointers it is now
-also possible to have controls with compound types such as N-dimensional arrays
-and/or structures. You need to specify the <constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant>
-when enumerating controls to actually be able to see such compound controls.
-In other words, these controls with compound types should only be used
-programmatically.</para>
-
- <para>Since such compound controls need to expose more information
-about themselves than is possible with &VIDIOC-QUERYCTRL; the
-&VIDIOC-QUERY-EXT-CTRL; ioctl was added. In particular, this ioctl gives
-the dimensions of the N-dimensional array if this control consists of more than
-one element.</para>
-
- <para>It is important to realize that due to the flexibility of
-controls it is necessary to check whether the control you want to set
-actually is supported in the driver and what the valid range of values
-is. So use the &VIDIOC-QUERYCTRL; (or &VIDIOC-QUERY-EXT-CTRL;) and
-&VIDIOC-QUERYMENU; ioctls to check this. Also note that it is possible
-that some of the menu indices in a control of type
-<constant>V4L2_CTRL_TYPE_MENU</constant> may not be supported
-(<constant>VIDIOC_QUERYMENU</constant> will return an error). A good
-example is the list of supported MPEG audio bitrates. Some drivers only
-support one or two bitrates, others support a wider range.</para>
-
- <para>
- All controls use machine endianness.
- </para>
- </section>
-
- <section>
- <title>Enumerating Extended Controls</title>
-
- <para>The recommended way to enumerate over the extended
-controls is by using &VIDIOC-QUERYCTRL; in combination with the
-<constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> flag:</para>
-
- <informalexample>
- <programlisting>
-&v4l2-queryctrl; qctrl;
-
-qctrl.id = V4L2_CTRL_FLAG_NEXT_CTRL;
-while (0 == ioctl (fd, &VIDIOC-QUERYCTRL;, &amp;qctrl)) {
- /* ... */
- qctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL;
-}
-</programlisting>
- </informalexample>
-
- <para>The initial control ID is set to 0 ORed with the
-<constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> flag. The
-<constant>VIDIOC_QUERYCTRL</constant> ioctl will return the first
-control with a higher ID than the specified one. When no such controls
-are found an error is returned.</para>
-
- <para>If you want to get all controls within a specific control
-class, then you can set the initial
-<structfield>qctrl.id</structfield> value to the control class and add
-an extra check to break out of the loop when a control of another
-control class is found:</para>
-
- <informalexample>
- <programlisting>
-qctrl.id = V4L2_CTRL_CLASS_MPEG | V4L2_CTRL_FLAG_NEXT_CTRL;
-while (0 == ioctl(fd, &VIDIOC-QUERYCTRL;, &amp;qctrl)) {
- if (V4L2_CTRL_ID2CLASS(qctrl.id) != V4L2_CTRL_CLASS_MPEG)
- break;
- /* ... */
- qctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL;
-}
-</programlisting>
- </informalexample>
-
- <para>The 32-bit <structfield>qctrl.id</structfield> value is
-subdivided into three bit ranges: the top 4 bits are reserved for
-flags (&eg; <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant>) and are not
-actually part of the ID. The remaining 28 bits form the control ID, of
-which the most significant 12 bits define the control class and the
-least significant 16 bits identify the control within the control
-class. It is guaranteed that these last 16 bits are always non-zero
-for controls. The range of 0x1000 and up are reserved for
-driver-specific controls. The macro
-<constant>V4L2_CTRL_ID2CLASS(id)</constant> returns the control class
-ID based on a control ID.</para>
-
- <para>If the driver does not support extended controls, then
-<constant>VIDIOC_QUERYCTRL</constant> will fail when used in
-combination with <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant>. In
-that case the old method of enumerating control should be used (see
-<xref linkend="enum_all_controls" />). But if it is supported, then it is guaranteed to enumerate over
-all controls, including driver-private controls.</para>
- </section>
-
- <section>
- <title>Creating Control Panels</title>
-
- <para>It is possible to create control panels for a graphical
-user interface where the user can select the various controls.
-Basically you will have to iterate over all controls using the method
-described above. Each control class starts with a control of type
-<constant>V4L2_CTRL_TYPE_CTRL_CLASS</constant>.
-<constant>VIDIOC_QUERYCTRL</constant> will return the name of this
-control class which can be used as the title of a tab page within a
-control panel.</para>
-
- <para>The flags field of &v4l2-queryctrl; also contains hints on
-the behavior of the control. See the &VIDIOC-QUERYCTRL; documentation
-for more details.</para>
- </section>
-
- <section id="mpeg-controls">
- <title>Codec Control Reference</title>
-
- <para>Below all controls within the Codec control class are
-described. First the generic controls, then controls specific for
-certain hardware.</para>
-
- <para>Note: These controls are applicable to all codecs and
-not just MPEG. The defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG
-as the controls were originally made for MPEG codecs and later
-extended to cover all encoding formats.</para>
-
- <section>
- <title>Generic Codec Controls</title>
-
- <table pgwide="1" frame="none" id="mpeg-control-id">
- <title>Codec Control IDs</title>
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The Codec class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class. This description can be used as the
-caption of a Tab page in a GUI, for example.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-stream-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_stream_type</entry>
- </row><row><entry spanname="descr">The MPEG-1, -2 or -4
-output stream type. One cannot assume anything here. Each hardware
-MPEG encoder tends to support different subsets of the available MPEG
-stream types. This control is specific to multiplexed MPEG streams.
-The currently defined stream types are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG2_PS</constant>&nbsp;</entry>
- <entry>MPEG-2 program stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG2_TS</constant>&nbsp;</entry>
- <entry>MPEG-2 transport stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG1_SS</constant>&nbsp;</entry>
- <entry>MPEG-1 system stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG2_DVD</constant>&nbsp;</entry>
- <entry>MPEG-2 DVD-compatible stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG1_VCD</constant>&nbsp;</entry>
- <entry>MPEG-1 VCD-compatible stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD</constant>&nbsp;</entry>
- <entry>MPEG-2 SVCD-compatible stream</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PID_PMT</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Program Map Table
-Packet ID for the MPEG transport stream (default 16)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PID_AUDIO</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Audio Packet ID for
-the MPEG transport stream (default 256)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PID_VIDEO</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Video Packet ID for
-the MPEG transport stream (default 260)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PID_PCR</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Packet ID for the
-MPEG transport stream carrying PCR fields (default 259)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PES_ID_AUDIO</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Audio ID for MPEG
-PES</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_PES_ID_VIDEO</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Video ID for MPEG
-PES</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-stream-vbi-fmt">
- <entry spanname="id"><constant>V4L2_CID_MPEG_STREAM_VBI_FMT</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_stream_vbi_fmt</entry>
- </row><row><entry spanname="descr">Some cards can embed
-VBI data (&eg; Closed Caption, Teletext) into the MPEG stream. This
-control selects whether VBI data should be embedded, and if so, what
-embedding method should be used. The list of possible VBI formats
-depends on the driver. The currently defined VBI format types
-are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_STREAM_VBI_FMT_NONE</constant>&nbsp;</entry>
- <entry>No VBI in the MPEG stream</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_STREAM_VBI_FMT_IVTV</constant>&nbsp;</entry>
- <entry>VBI in private packets, IVTV format (documented
-in the kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.vbi</filename>)</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-sampling-freq">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_sampling_freq</entry>
- </row><row><entry spanname="descr">MPEG Audio sampling
-frequency. Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100</constant>&nbsp;</entry>
- <entry>44.1 kHz</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000</constant>&nbsp;</entry>
- <entry>48 kHz</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000</constant>&nbsp;</entry>
- <entry>32 kHz</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-encoding">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_ENCODING</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_encoding</entry>
- </row><row><entry spanname="descr">MPEG Audio encoding.
-This control is specific to multiplexed MPEG streams.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_ENCODING_LAYER_1</constant>&nbsp;</entry>
- <entry>MPEG-1/2 Layer I encoding</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_ENCODING_LAYER_2</constant>&nbsp;</entry>
- <entry>MPEG-1/2 Layer II encoding</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_ENCODING_LAYER_3</constant>&nbsp;</entry>
- <entry>MPEG-1/2 Layer III encoding</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_ENCODING_AAC</constant>&nbsp;</entry>
- <entry>MPEG-2/4 AAC (Advanced Audio Coding)</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_ENCODING_AC3</constant>&nbsp;</entry>
- <entry>AC-3 aka ATSC A/52 encoding</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-l1-bitrate">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_L1_BITRATE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_l1_bitrate</entry>
- </row><row><entry spanname="descr">MPEG-1/2 Layer I bitrate.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_32K</constant>&nbsp;</entry>
- <entry>32 kbit/s</entry></row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_64K</constant>&nbsp;</entry>
- <entry>64 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_96K</constant>&nbsp;</entry>
- <entry>96 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_128K</constant>&nbsp;</entry>
- <entry>128 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_160K</constant>&nbsp;</entry>
- <entry>160 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_192K</constant>&nbsp;</entry>
- <entry>192 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_224K</constant>&nbsp;</entry>
- <entry>224 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_256K</constant>&nbsp;</entry>
- <entry>256 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_288K</constant>&nbsp;</entry>
- <entry>288 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_320K</constant>&nbsp;</entry>
- <entry>320 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_352K</constant>&nbsp;</entry>
- <entry>352 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_384K</constant>&nbsp;</entry>
- <entry>384 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_416K</constant>&nbsp;</entry>
- <entry>416 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L1_BITRATE_448K</constant>&nbsp;</entry>
- <entry>448 kbit/s</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-l2-bitrate">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_L2_BITRATE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_l2_bitrate</entry>
- </row><row><entry spanname="descr">MPEG-1/2 Layer II bitrate.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_32K</constant>&nbsp;</entry>
- <entry>32 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_48K</constant>&nbsp;</entry>
- <entry>48 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_56K</constant>&nbsp;</entry>
- <entry>56 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_64K</constant>&nbsp;</entry>
- <entry>64 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_80K</constant>&nbsp;</entry>
- <entry>80 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_96K</constant>&nbsp;</entry>
- <entry>96 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_112K</constant>&nbsp;</entry>
- <entry>112 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_128K</constant>&nbsp;</entry>
- <entry>128 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_160K</constant>&nbsp;</entry>
- <entry>160 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_192K</constant>&nbsp;</entry>
- <entry>192 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_224K</constant>&nbsp;</entry>
- <entry>224 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_256K</constant>&nbsp;</entry>
- <entry>256 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_320K</constant>&nbsp;</entry>
- <entry>320 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L2_BITRATE_384K</constant>&nbsp;</entry>
- <entry>384 kbit/s</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-l3-bitrate">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_L3_BITRATE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_l3_bitrate</entry>
- </row><row><entry spanname="descr">MPEG-1/2 Layer III bitrate.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_32K</constant>&nbsp;</entry>
- <entry>32 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_40K</constant>&nbsp;</entry>
- <entry>40 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_48K</constant>&nbsp;</entry>
- <entry>48 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_56K</constant>&nbsp;</entry>
- <entry>56 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_64K</constant>&nbsp;</entry>
- <entry>64 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_80K</constant>&nbsp;</entry>
- <entry>80 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_96K</constant>&nbsp;</entry>
- <entry>96 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_112K</constant>&nbsp;</entry>
- <entry>112 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_128K</constant>&nbsp;</entry>
- <entry>128 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_160K</constant>&nbsp;</entry>
- <entry>160 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_192K</constant>&nbsp;</entry>
- <entry>192 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_224K</constant>&nbsp;</entry>
- <entry>224 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_256K</constant>&nbsp;</entry>
- <entry>256 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_L3_BITRATE_320K</constant>&nbsp;</entry>
- <entry>320 kbit/s</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_AAC_BITRATE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">AAC bitrate in bits per second.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-ac3-bitrate">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_AC3_BITRATE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_ac3_bitrate</entry>
- </row><row><entry spanname="descr">AC-3 bitrate.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_32K</constant>&nbsp;</entry>
- <entry>32 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_40K</constant>&nbsp;</entry>
- <entry>40 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_48K</constant>&nbsp;</entry>
- <entry>48 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_56K</constant>&nbsp;</entry>
- <entry>56 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_64K</constant>&nbsp;</entry>
- <entry>64 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_80K</constant>&nbsp;</entry>
- <entry>80 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_96K</constant>&nbsp;</entry>
- <entry>96 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_112K</constant>&nbsp;</entry>
- <entry>112 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_128K</constant>&nbsp;</entry>
- <entry>128 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_160K</constant>&nbsp;</entry>
- <entry>160 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_192K</constant>&nbsp;</entry>
- <entry>192 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_224K</constant>&nbsp;</entry>
- <entry>224 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_256K</constant>&nbsp;</entry>
- <entry>256 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_320K</constant>&nbsp;</entry>
- <entry>320 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_384K</constant>&nbsp;</entry>
- <entry>384 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_448K</constant>&nbsp;</entry>
- <entry>448 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_512K</constant>&nbsp;</entry>
- <entry>512 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_576K</constant>&nbsp;</entry>
- <entry>576 kbit/s</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_AC3_BITRATE_640K</constant>&nbsp;</entry>
- <entry>640 kbit/s</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_mode</entry>
- </row><row><entry spanname="descr">MPEG Audio mode.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_STEREO</constant>&nbsp;</entry>
- <entry>Stereo</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_JOINT_STEREO</constant>&nbsp;</entry>
- <entry>Joint Stereo</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_DUAL</constant>&nbsp;</entry>
- <entry>Bilingual</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_MONO</constant>&nbsp;</entry>
- <entry>Mono</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-mode-extension">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_MODE_EXTENSION</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_mode_extension</entry>
- </row><row><entry spanname="descr">Joint Stereo
-audio mode extension. In Layer I and II they indicate which subbands
-are in intensity stereo. All other subbands are coded in stereo. Layer
-III is not (yet) supported. Possible values
-are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4</constant>&nbsp;</entry>
- <entry>Subbands 4-31 in intensity stereo</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8</constant>&nbsp;</entry>
- <entry>Subbands 8-31 in intensity stereo</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12</constant>&nbsp;</entry>
- <entry>Subbands 12-31 in intensity stereo</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16</constant>&nbsp;</entry>
- <entry>Subbands 16-31 in intensity stereo</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-emphasis">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_EMPHASIS</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_emphasis</entry>
- </row><row><entry spanname="descr">Audio Emphasis.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_EMPHASIS_NONE</constant>&nbsp;</entry>
- <entry>None</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS</constant>&nbsp;</entry>
- <entry>50/15 microsecond emphasis</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17</constant>&nbsp;</entry>
- <entry>CCITT J.17</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-crc">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_CRC</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_crc</entry>
- </row><row><entry spanname="descr">CRC method. Possible
-values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_CRC_NONE</constant>&nbsp;</entry>
- <entry>None</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_CRC_CRC16</constant>&nbsp;</entry>
- <entry>16 bit parity check</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_MUTE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Mutes the audio when
-capturing. This is not done by muting audio hardware, which can still
-produce a slight hiss, but in the encoder itself, guaranteeing a fixed
-and reproducible audio bitstream. 0 = unmuted, 1 = muted.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-dec-playback">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_dec_playback</entry>
- </row><row><entry spanname="descr">Determines how monolingual audio should be played back.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO</constant>&nbsp;</entry>
- <entry>Automatically determines the best playback mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO</constant>&nbsp;</entry>
- <entry>Stereo playback.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT</constant>&nbsp;</entry>
- <entry>Left channel playback.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT</constant>&nbsp;</entry>
- <entry>Right channel playback.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO</constant>&nbsp;</entry>
- <entry>Mono playback.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO</constant>&nbsp;</entry>
- <entry>Stereo playback with swapped left and right channels.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-audio-dec-multilingual-playback">
- <entry spanname="id"><constant>V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_audio_dec_playback</entry>
- </row><row><entry spanname="descr">Determines how multilingual audio should be played back.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-encoding">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_ENCODING</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_encoding</entry>
- </row><row><entry spanname="descr">MPEG Video encoding
-method. This control is specific to multiplexed MPEG streams.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ENCODING_MPEG_1</constant>&nbsp;</entry>
- <entry>MPEG-1 Video encoding</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ENCODING_MPEG_2</constant>&nbsp;</entry>
- <entry>MPEG-2 Video encoding</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC</constant>&nbsp;</entry>
- <entry>MPEG-4 AVC (H.264) Video encoding</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-aspect">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_ASPECT</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_aspect</entry>
- </row><row><entry spanname="descr">Video aspect.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ASPECT_1x1</constant>&nbsp;</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ASPECT_4x3</constant>&nbsp;</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ASPECT_16x9</constant>&nbsp;</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_ASPECT_221x100</constant>&nbsp;</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_B_FRAMES</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Number of B-Frames
-(default 2)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_GOP_SIZE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">GOP size (default
-12)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_GOP_CLOSURE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">GOP closure (default
-1)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_PULLDOWN</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Enable 3:2 pulldown
-(default 0)</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-bitrate-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_BITRATE_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_bitrate_mode</entry>
- </row><row><entry spanname="descr">Video bitrate mode.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_BITRATE_MODE_VBR</constant>&nbsp;</entry>
- <entry>Variable bitrate</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_BITRATE_MODE_CBR</constant>&nbsp;</entry>
- <entry>Constant bitrate</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_BITRATE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Video bitrate in bits
-per second.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_BITRATE_PEAK</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Peak video bitrate in
-bits per second. Must be larger or equal to the average video bitrate.
-It is ignored if the video bitrate mode is set to constant
-bitrate.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">For every captured
-frame, skip this many subsequent frames (default 0).</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MUTE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">"Mutes" the video to a
-fixed color when capturing. This is useful for testing, to produce a
-fixed video bitstream. 0 = unmuted, 1 = muted.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MUTE_YUV</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Sets the "mute" color
-of the video. The supplied 32-bit integer is interpreted as follows (bit
-0 = least significant bit):</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry>Bit 0:7</entry>
- <entry>V chrominance information</entry>
- </row>
- <row>
- <entry>Bit 8:15</entry>
- <entry>U chrominance information</entry>
- </row>
- <row>
- <entry>Bit 16:23</entry>
- <entry>Y luminance information</entry>
- </row>
- <row>
- <entry>Bit 24:31</entry>
- <entry>Must be zero.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-dec-pts">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_DEC_PTS</constant>&nbsp;</entry>
- <entry>integer64</entry>
- </row><row><entry spanname="descr">This read-only control returns the
-33-bit video Presentation Time Stamp as defined in ITU T-REC-H.222.0 and ISO/IEC 13818-1 of
-the currently displayed frame. This is the same PTS as is used in &VIDIOC-DECODER-CMD;.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-dec-frame">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_DEC_FRAME</constant>&nbsp;</entry>
- <entry>integer64</entry>
- </row><row><entry spanname="descr">This read-only control returns the
-frame counter of the frame that is currently displayed (decoded). This value is reset to 0 whenever
-the decoder is started.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If enabled the decoder expects to receive a single slice per buffer, otherwise
-the decoder expects a single frame in per buffer. Applicable to the decoder, all codecs.
-</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enable writing sample aspect ratio in the Video Usability Information.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-vui-sar-idc">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_vui_sar_idc</entry>
- </row>
- <row><entry spanname="descr">VUI sample aspect ratio indicator for H.264 encoding. The value
-is defined in the table E-1 in the standard. Applicable to the H264 encoder.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
-
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED</constant>&nbsp;</entry>
- <entry>Unspecified</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1</constant>&nbsp;</entry>
- <entry>1x1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11</constant>&nbsp;</entry>
- <entry>12x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11</constant>&nbsp;</entry>
- <entry>10x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11</constant>&nbsp;</entry>
- <entry>16x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33</constant>&nbsp;</entry>
- <entry>40x33</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11</constant>&nbsp;</entry>
- <entry>24x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11</constant>&nbsp;</entry>
- <entry>20x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11</constant>&nbsp;</entry>
- <entry>32x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33</constant>&nbsp;</entry>
- <entry>80x33</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11</constant>&nbsp;</entry>
- <entry>18x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11</constant>&nbsp;</entry>
- <entry>15x11</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33</constant>&nbsp;</entry>
- <entry>64x33</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99</constant>&nbsp;</entry>
- <entry>160x99</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3</constant>&nbsp;</entry>
- <entry>4x3</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2</constant>&nbsp;</entry>
- <entry>3x2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1</constant>&nbsp;</entry>
- <entry>2x1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED</constant>&nbsp;</entry>
- <entry>Extended SAR</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Extended sample aspect ratio width for H.264 VUI encoding.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Extended sample aspect ratio height for H.264 VUI encoding.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-level">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LEVEL</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_level</entry>
- </row>
- <row><entry spanname="descr">The level information for the H264 video elementary stream.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_1_0</constant>&nbsp;</entry>
- <entry>Level 1.0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_1B</constant>&nbsp;</entry>
- <entry>Level 1B</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_1_1</constant>&nbsp;</entry>
- <entry>Level 1.1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_1_2</constant>&nbsp;</entry>
- <entry>Level 1.2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_1_3</constant>&nbsp;</entry>
- <entry>Level 1.3</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_2_0</constant>&nbsp;</entry>
- <entry>Level 2.0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_2_1</constant>&nbsp;</entry>
- <entry>Level 2.1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_2_2</constant>&nbsp;</entry>
- <entry>Level 2.2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_3_0</constant>&nbsp;</entry>
- <entry>Level 3.0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_3_1</constant>&nbsp;</entry>
- <entry>Level 3.1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_3_2</constant>&nbsp;</entry>
- <entry>Level 3.2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_4_0</constant>&nbsp;</entry>
- <entry>Level 4.0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_4_1</constant>&nbsp;</entry>
- <entry>Level 4.1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_4_2</constant>&nbsp;</entry>
- <entry>Level 4.2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_5_0</constant>&nbsp;</entry>
- <entry>Level 5.0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LEVEL_5_1</constant>&nbsp;</entry>
- <entry>Level 5.1</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-mpeg4-level">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_level</entry>
- </row>
- <row><entry spanname="descr">The level information for the MPEG4 elementary stream.
-Applicable to the MPEG4 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_0</constant>&nbsp;</entry>
- <entry>Level 0</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_0B</constant>&nbsp;</entry>
- <entry>Level 0b</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_1</constant>&nbsp;</entry>
- <entry>Level 1</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_2</constant>&nbsp;</entry>
- <entry>Level 2</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_3</constant>&nbsp;</entry>
- <entry>Level 3</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_3B</constant>&nbsp;</entry>
- <entry>Level 3b</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_4</constant>&nbsp;</entry>
- <entry>Level 4</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_LEVEL_5</constant>&nbsp;</entry>
- <entry>Level 5</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-profile">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_PROFILE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_profile</entry>
- </row>
- <row><entry spanname="descr">The profile information for H264.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE</constant>&nbsp;</entry>
- <entry>Baseline profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE</constant>&nbsp;</entry>
- <entry>Constrained Baseline profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_MAIN</constant>&nbsp;</entry>
- <entry>Main profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED</constant>&nbsp;</entry>
- <entry>Extended profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH</constant>&nbsp;</entry>
- <entry>High profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10</constant>&nbsp;</entry>
- <entry>High 10 profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422</constant>&nbsp;</entry>
- <entry>High 422 profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE</constant>&nbsp;</entry>
- <entry>High 444 Predictive profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA</constant>&nbsp;</entry>
- <entry>High 10 Intra profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA</constant>&nbsp;</entry>
- <entry>High 422 Intra profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA</constant>&nbsp;</entry>
- <entry>High 444 Intra profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA</constant>&nbsp;</entry>
- <entry>CAVLC 444 Intra profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE</constant>&nbsp;</entry>
- <entry>Scalable Baseline profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH</constant>&nbsp;</entry>
- <entry>Scalable High profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA</constant>&nbsp;</entry>
- <entry>Scalable High Intra profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH</constant>&nbsp;</entry>
- <entry>Stereo High profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH</constant>&nbsp;</entry>
- <entry>Multiview High profile</entry>
- </row>
-
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-mpeg4-profile">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_mpeg4_profile</entry>
- </row>
- <row><entry spanname="descr">The profile information for MPEG4.
-Applicable to the MPEG4 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_PROFILE_SIMPLE</constant>&nbsp;</entry>
- <entry>Simple profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE</constant>&nbsp;</entry>
- <entry>Advanced Simple profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_PROFILE_CORE</constant>&nbsp;</entry>
- <entry>Core profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE</constant>&nbsp;</entry>
- <entry>Simple Scalable profile</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY</constant>&nbsp;</entry>
- <entry></entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MAX_REF_PIC</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">The maximum number of reference pictures used for encoding.
-Applicable to the encoder.
-</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-multi-slice-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_multi_slice_mode</entry>
- </row>
- <row><entry spanname="descr">Determines how the encoder should handle division of frame into slices.
-Applicable to the encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE</constant>&nbsp;</entry>
- <entry>Single slice per frame.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB</constant>&nbsp;</entry>
- <entry>Multiple slices with set maximum number of macroblocks per slice.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES</constant>&nbsp;</entry>
- <entry>Multiple slice with set maximum size in bytes per slice.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">The maximum number of macroblocks in a slice. Used when
-<constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant> is set to <constant>V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB</constant>.
-Applicable to the encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">The maximum size of a slice in bytes. Used when
-<constant>V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE</constant> is set to <constant>V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES</constant>.
-Applicable to the encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-loop-filter-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_loop_filter_mode</entry>
- </row>
- <row><entry spanname="descr">Loop filter mode for H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED</constant>&nbsp;</entry>
- <entry>Loop filter is enabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED</constant>&nbsp;</entry>
- <entry>Loop filter is disabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY</constant>&nbsp;</entry>
- <entry>Loop filter is disabled at the slice boundary.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Loop filter alpha coefficient, defined in the H264 standard.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Loop filter beta coefficient, defined in the H264 standard.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-entropy-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_entropy_mode</entry>
- </row>
- <row><entry spanname="descr">Entropy coding mode for H264 - CABAC/CAVALC.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC</constant>&nbsp;</entry>
- <entry>Use CAVLC entropy coding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC</constant>&nbsp;</entry>
- <entry>Use CABAC entropy coding.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enable 8X8 transform for H264. Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Cyclic intra macroblock refresh. This is the number of continuous macroblocks
-refreshed every frame. Each frame a successive set of macroblocks is refreshed until the cycle completes and starts from the
-top of the frame. Applicable to H264, H263 and MPEG4 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Frame level rate control enable.
-If this control is disabled then the quantization parameter for each frame type is constant and set with appropriate controls
-(e.g. <constant>V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP</constant>).
-If frame rate control is enabled then quantization parameter is adjusted to meet the chosen bitrate. Minimum and maximum value
-for the quantization parameter can be set with appropriate controls (e.g. <constant>V4L2_CID_MPEG_VIDEO_H263_MIN_QP</constant>).
-Applicable to encoders.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Macroblock level rate control enable.
-Applicable to the MPEG4 and H264 encoders.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_QPEL</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an I frame for H263. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H263_MIN_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Minimum quantization parameter for H263. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H263_MAX_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Maximum quantization parameter for H263. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an P frame for H263. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an B frame for H263. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an I frame for H264. Valid range: from 0 to 51.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_MIN_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Minimum quantization parameter for H264. Valid range: from 0 to 51.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_MAX_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Maximum quantization parameter for H264. Valid range: from 0 to 51.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an P frame for H264. Valid range: from 0 to 51.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an B frame for H264. Valid range: from 0 to 51.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an I frame for MPEG4. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Minimum quantization parameter for MPEG4. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Maximum quantization parameter for MPEG4. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an P frame for MPEG4. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an B frame for MPEG4. Valid range: from 1 to 31.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VBV_SIZE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">The Video Buffer Verifier size in kilobytes, it is used as a limitation of frame skip.
-The VBV is defined in the standard as a mean to verify that the produced stream will be successfully decoded.
-The standard describes it as "Part of a hypothetical decoder that is conceptually connected to the
-output of the encoder. Its purpose is to provide a constraint on the variability of the data rate that an
-encoder or editing process may produce.".
-Applicable to the MPEG1, MPEG2, MPEG4 encoders.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-vbv-delay">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VBV_DELAY</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Sets the initial delay in milliseconds for
-VBV buffer control.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-hor-search-range">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Horizontal search range defines maximum horizontal search area in pixels
-to search and match for the present Macroblock (MB) in the reference picture. This V4L2 control macro is used to set
-horizontal search range for motion estimation module in video encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-vert-search-range">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Vertical search range defines maximum vertical search area in pixels
-to search and match for the present Macroblock (MB) in the reference picture. This V4L2 control macro is used to set
-vertical search range for motion estimation module in video encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-force-key-frame">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME</constant>&nbsp;</entry>
- <entry>button</entry>
- </row><row><entry spanname="descr">Force a key frame for the next queued buffer. Applicable to encoders.
-This is a general, codec-agnostic keyframe control.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">The Coded Picture Buffer size in kilobytes, it is used as a limitation of frame skip.
-The CPB is defined in the H264 standard as a mean to verify that the produced stream will be successfully decoded.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_I_PERIOD</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Period between I-frames in the open GOP for H264. In case of an open GOP
-this is the period between two I-frames. The period between IDR (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE control.
-An IDR frame, which stands for Instantaneous Decoding Refresh is an I-frame after which no prior frames are
-referenced. This means that a stream can be restarted from an IDR frame without the need to store or decode any
-previous frames. Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-header-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_HEADER_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_header_mode</entry>
- </row>
- <row><entry spanname="descr">Determines whether the header is returned as the first buffer or is
-it returned together with the first frame. Applicable to encoders.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE</constant>&nbsp;</entry>
- <entry>The stream header is returned separately in the first buffer.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME</constant>&nbsp;</entry>
- <entry>The stream header is returned together with the first encoded frame.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Repeat the video sequence headers. Repeating these
-headers makes random access to the video stream easier. Applicable to the MPEG1, 2 and 4 encoder.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Enabled the deblocking post processing filter for MPEG4 decoder.
-Applicable to the MPEG4 decoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">vop_time_increment_resolution value for MPEG4. Applicable to the MPEG4 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">vop_time_increment value for MPEG4. Applicable to the MPEG4 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enable generation of frame packing supplemental enhancement information in the encoded bitstream.
-The frame packing SEI message contains the arrangement of L and R planes for 3D viewing. Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Sets current frame as frame0 in frame packing SEI.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-sei-fp-arrangement-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_sei_fp_arrangement_type</entry>
- </row>
- <row><entry spanname="descr">Frame packing arrangement type for H264 SEI.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD</constant>&nbsp;</entry>
- <entry>Pixels are alternatively from L and R.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN</constant>&nbsp;</entry>
- <entry>L and R are interlaced by column.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW</constant>&nbsp;</entry>
- <entry>L and R are interlaced by row.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE</constant>&nbsp;</entry>
- <entry>L is on the left, R on the right.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM</constant>&nbsp;</entry>
- <entry>L is on top, R on bottom.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL</constant>&nbsp;</entry>
- <entry>One view per frame.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables flexible macroblock ordering in the encoded bitstream. It is a technique
-used for restructuring the ordering of macroblocks in pictures. Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-fmo-map-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_fmo_map_type</entry>
- </row>
- <row><entry spanname="descr">When using FMO, the map type divides the image in different scan patterns of macroblocks.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES</constant>&nbsp;</entry>
- <entry>Slices are interleaved one after other with macroblocks in run length order.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES</constant>&nbsp;</entry>
- <entry>Scatters the macroblocks based on a mathematical function known to both encoder and decoder.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER</constant>&nbsp;</entry>
- <entry>Macroblocks arranged in rectangular areas or regions of interest.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT</constant>&nbsp;</entry>
- <entry>Slice groups grow in a cyclic way from centre to outwards.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN</constant>&nbsp;</entry>
- <entry>Slice groups grow in raster scan pattern from left to right.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN</constant>&nbsp;</entry>
- <entry>Slice groups grow in wipe scan pattern from top to bottom.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT</constant>&nbsp;</entry>
- <entry>User defined map type.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Number of slice groups in FMO.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-fmo-change-direction">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_fmo_change_dir</entry>
- </row>
- <row><entry spanname="descr">Specifies a direction of the slice group change for raster and wipe maps.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT</constant>&nbsp;</entry>
- <entry>Raster scan or wipe right.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT</constant>&nbsp;</entry>
- <entry>Reverse raster scan or wipe left.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Specifies the size of the first slice group for raster and wipe map.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Specifies the number of consecutive macroblocks for the interleaved map.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ASO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables arbitrary slice ordering in encoded bitstream.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Specifies the slice order in ASO. Applicable to the H264 encoder.
-The supplied 32-bit integer is interpreted as follows (bit
-0 = least significant bit):</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry>Bit 0:15</entry>
- <entry>Slice ID</entry>
- </row>
- <row>
- <entry>Bit 16:32</entry>
- <entry>Slice position or order</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables H264 hierarchical coding.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-mpeg-video-h264-hierarchical-coding-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_video_h264_hierarchical_coding_type</entry>
- </row>
- <row><entry spanname="descr">Specifies the hierarchical coding type.
-Applicable to the H264 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B</constant>&nbsp;</entry>
- <entry>Hierarchical B coding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P</constant>&nbsp;</entry>
- <entry>Hierarchical P coding.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Specifies the number of hierarchical coding layers.
-Applicable to the H264 encoder.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Specifies a user defined QP for each layer. Applicable to the H264 encoder.
-The supplied 32-bit integer is interpreted as follows (bit
-0 = least significant bit):</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry>Bit 0:15</entry>
- <entry>QP value</entry>
- </row>
- <row>
- <entry>Bit 16:32</entry>
- <entry>Layer number</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>MFC 5.1 MPEG Controls</title>
-
- <para>The following MPEG class controls deal with MPEG
-decoding and encoding settings that are specific to the Multi Format Codec 5.1 device present
-in the S5P family of SoCs by Samsung.
-</para>
-
- <table pgwide="1" frame="none" id="mfc51-control-id">
- <title>MFC 5.1 Control IDs</title>
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">If the display delay is enabled then the decoder is forced to return a
-CAPTURE buffer (decoded frame) after processing a certain number of OUTPUT buffers. The delay can be set through
-<constant>V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY</constant>. This feature can be used for example
-for generating thumbnails of videos. Applicable to the H264 decoder.
- </entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Display delay value for H264 decoder.
-The decoder is forced to return a decoded frame after the set 'display delay' number of frames. If this number is
-low it may result in frames returned out of dispaly order, in addition the hardware may still be using the returned buffer
-as a reference picture for subsequent frames.
-</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">The number of reference pictures used for encoding a P picture.
-Applicable to the H264 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_PADDING</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Padding enable in the encoder - use a color instead of repeating border pixels.
-Applicable to encoders.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Padding color in the encoder. Applicable to encoders. The supplied 32-bit integer is interpreted as follows (bit
-0 = least significant bit):</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry>Bit 0:7</entry>
- <entry>V chrominance information</entry>
- </row>
- <row>
- <entry>Bit 8:15</entry>
- <entry>U chrominance information</entry>
- </row>
- <row>
- <entry>Bit 16:23</entry>
- <entry>Y luminance information</entry>
- </row>
- <row>
- <entry>Bit 24:31</entry>
- <entry>Must be zero.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Reaction coefficient for MFC rate control. Applicable to encoders.
-<para>Note 1: Valid only when the frame level RC is enabled.</para>
-<para>Note 2: For tight CBR, this field must be small (ex. 2 ~ 10).
-For VBR, this field must be large (ex. 100 ~ 1000).</para>
-<para>Note 3: It is not recommended to use the greater number than FRAME_RATE * (10^9 / BIT_RATE).</para>
-</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Adaptive rate control for dark region.
-Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE</constant>).
-Applicable to the H264 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Adaptive rate control for smooth region.
-Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE</constant>).
-Applicable to the H264 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Adaptive rate control for static region.
-Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE</constant>).
-Applicable to the H264 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Adaptive rate control for activity region.
-Valid only when H.264 and macroblock level RC is enabled (<constant>V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE</constant>).
-Applicable to the H264 encoder.</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-mfc51-video-frame-skip-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_mfc51_video_frame_skip_mode</entry>
- </row>
- <row><entry spanname="descr">
-Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then
-a chosen data limit then the frame will be skipped.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED</constant>&nbsp;</entry>
- <entry>Frame skip mode is disabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT</constant>&nbsp;</entry>
- <entry>Frame skip mode enabled and buffer limit is set by the chosen level and is defined by the standard.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT</constant>&nbsp;</entry>
- <entry>Frame skip mode enabled and buffer limit is set by the VBV (MPEG1/2/4) or CPB (H264) buffer size control.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Enable rate-control with fixed target bit.
-If this setting is enabled, then the rate control logic of the encoder will calculate the average bitrate
-for a GOP and keep it below or equal the set bitrate target. Otherwise the rate control logic calculates the
-overall average bitrate for the stream and keeps it below or equal to the set bitrate. In the first case
-the average bitrate for the whole stream will be smaller then the set bitrate. This is caused because the
-average is calculated for smaller number of frames, on the other hand enabling this setting will ensure that
-the stream will meet tight bandwidth constraints. Applicable to encoders.
-</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-mfc51-video-force-frame-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_mfc51_video_force_frame_type</entry>
- </row>
- <row><entry spanname="descr">Force a frame type for the next queued buffer. Applicable to encoders.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED</constant>&nbsp;</entry>
- <entry>Forcing a specific frame type disabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME</constant>&nbsp;</entry>
- <entry>Force an I-frame.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED</constant>&nbsp;</entry>
- <entry>Force a non-coded frame.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>CX2341x MPEG Controls</title>
-
- <para>The following MPEG class controls deal with MPEG
-encoding settings that are specific to the Conexant CX23415 and
-CX23416 MPEG encoding chips.</para>
-
- <table pgwide="1" frame="none" id="cx2341x-control-id">
- <title>CX2341x Control IDs</title>
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row id="v4l2-mpeg-cx2341x-video-spatial-filter-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_cx2341x_video_spatial_filter_mode</entry>
- </row><row><entry spanname="descr">Sets the Spatial
-Filter mode (default <constant>MANUAL</constant>). Possible values
-are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL</constant>&nbsp;</entry>
- <entry>Choose the filter manually</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO</constant>&nbsp;</entry>
- <entry>Choose the filter automatically</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-15)</entry>
- </row><row><entry spanname="descr">The setting for the
-Spatial Filter. 0 = off, 15 = maximum. (Default is 0.)</entry>
- </row>
- <row><entry></entry></row>
- <row id="luma-spatial-filter-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_cx2341x_video_luma_spatial_filter_type</entry>
- </row><row><entry spanname="descr">Select the algorithm
-to use for the Luma Spatial Filter (default
-<constant>1D_HOR</constant>). Possible values:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF</constant>&nbsp;</entry>
- <entry>No filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR</constant>&nbsp;</entry>
- <entry>One-dimensional horizontal</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT</constant>&nbsp;</entry>
- <entry>One-dimensional vertical</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE</constant>&nbsp;</entry>
- <entry>Two-dimensional separable</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE</constant>&nbsp;</entry>
- <entry>Two-dimensional symmetrical
-non-separable</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="chroma-spatial-filter-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type</entry>
- </row><row><entry spanname="descr">Select the algorithm
-for the Chroma Spatial Filter (default <constant>1D_HOR</constant>).
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF</constant>&nbsp;</entry>
- <entry>No filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR</constant>&nbsp;</entry>
- <entry>One-dimensional horizontal</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-cx2341x-video-temporal-filter-mode">
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_cx2341x_video_temporal_filter_mode</entry>
- </row><row><entry spanname="descr">Sets the Temporal
-Filter mode (default <constant>MANUAL</constant>). Possible values
-are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL</constant>&nbsp;</entry>
- <entry>Choose the filter manually</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO</constant>&nbsp;</entry>
- <entry>Choose the filter automatically</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-31)</entry>
- </row><row><entry spanname="descr">The setting for the
-Temporal Filter. 0 = off, 31 = maximum. (Default is 8 for full-scale
-capturing and 0 for scaled capturing.)</entry>
- </row>
- <row><entry></entry></row>
- <row id="v4l2-mpeg-cx2341x-video-median-filter-type">
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_mpeg_cx2341x_video_median_filter_type</entry>
- </row><row><entry spanname="descr">Median Filter Type
-(default <constant>OFF</constant>). Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF</constant>&nbsp;</entry>
- <entry>No filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR</constant>&nbsp;</entry>
- <entry>Horizontal filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT</constant>&nbsp;</entry>
- <entry>Vertical filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT</constant>&nbsp;</entry>
- <entry>Horizontal and vertical filter</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG</constant>&nbsp;</entry>
- <entry>Diagonal filter</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-255)</entry>
- </row><row><entry spanname="descr">Threshold above which
-the luminance median filter is enabled (default 0)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-255)</entry>
- </row><row><entry spanname="descr">Threshold below which
-the luminance median filter is enabled (default 255)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-255)</entry>
- </row><row><entry spanname="descr">Threshold above which
-the chroma median filter is enabled (default 0)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP</constant>&nbsp;</entry>
- <entry>integer&nbsp;(0-255)</entry>
- </row><row><entry spanname="descr">Threshold below which
-the chroma median filter is enabled (default 255)</entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">The CX2341X MPEG encoder
-can insert one empty MPEG-2 PES packet into the stream between every
-four video frames. The packet size is 2048 bytes, including the
-packet_start_code_prefix and stream_id fields. The stream_id is 0xBF
-(private stream 2). The payload consists of 0x00 bytes, to be filled
-in by the application. 0 = do not insert, 1 = insert packets.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>VPX Control Reference</title>
-
- <para>The VPX controls include controls for encoding parameters
- of VPx video codec.</para>
-
- <table pgwide="1" frame="none" id="vpx-control-id">
- <title>VPX Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
-
- <row><entry></entry></row>
- <row id="v4l2-vpx-num-partitions">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS</constant></entry>
- <entry>enum v4l2_vp8_num_partitions</entry>
- </row>
- <row><entry spanname="descr">The number of token partitions to use in VP8 encoder.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION</constant></entry>
- <entry>1 coefficient partition</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS</constant></entry>
- <entry>2 coefficient partitions</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS</constant></entry>
- <entry>4 coefficient partitions</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS</constant></entry>
- <entry>8 coefficient partitions</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4</constant></entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Setting this prevents intra 4x4 mode in the intra mode decision.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-vpx-num-ref-frames">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES</constant></entry>
- <entry>enum v4l2_vp8_num_ref_frames</entry>
- </row>
- <row><entry spanname="descr">The number of reference pictures for encoding P frames.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME</constant></entry>
- <entry>Last encoded frame will be searched</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME</constant></entry>
- <entry>Two frames will be searched among the last encoded frame, the golden frame
-and the alternate reference (altref) frame. The encoder implementation will decide which two are chosen.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME</constant></entry>
- <entry>The last encoded frame, the golden frame and the altref frame will be searched.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL</constant></entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Indicates the loop filter level. The adjustment of the loop
-filter level is done via a delta value against a baseline loop filter value.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS</constant></entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">This parameter affects the loop filter. Anything above
-zero weakens the deblocking effect on the loop filter.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD</constant></entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the refresh period for the golden frame. The period is defined
-in number of frames. For a value of 'n', every nth frame starting from the first key frame will be taken as a golden frame.
-For eg. for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden frame refresh period is set as 4, the frames
-0, 4, 8 etc will be taken as the golden frames as frame 0 is always a key frame.</entry>
- </row>
-
- <row><entry></entry></row>
- <row id="v4l2-vpx-golden-frame-sel">
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL</constant></entry>
- <entry>enum v4l2_vp8_golden_frame_sel</entry>
- </row>
- <row><entry spanname="descr">Selects the golden frame for encoding.
-Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV</constant></entry>
- <entry>Use the (n-2)th frame as a golden frame, current frame index being 'n'.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD</constant></entry>
- <entry>Use the previous specific frame indicated by
-V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD as a golden frame.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_MIN_QP</constant></entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Minimum quantization parameter for VP8.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_MAX_QP</constant></entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Maximum quantization parameter for VP8.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for an I frame for VP8.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Quantization parameter for a P frame for VP8.</entry>
- </row>
-
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_PROFILE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Select the desired profile for VPx encoder.
-Acceptable values are 0, 1, 2 and 3 corresponding to encoder profiles 0, 1, 2 and 3.</entry>
- </row>
-
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
- </section>
-
- <section id="camera-controls">
- <title>Camera Control Reference</title>
-
- <para>The Camera class includes controls for mechanical (or
-equivalent digital) features of a device such as controllable lenses
-or sensors.</para>
-
- <table pgwide="1" frame="none" id="camera-control-id">
- <title>Camera Control IDs</title>
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_CAMERA_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The Camera class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class.</entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-exposure-auto-type">
- <entry spanname="id"><constant>V4L2_CID_EXPOSURE_AUTO</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_exposure_auto_type</entry>
- </row><row><entry spanname="descr">Enables automatic
-adjustments of the exposure time and/or iris aperture. The effect of
-manual changes of the exposure time or iris aperture while these
-features are enabled is undefined, drivers should ignore such
-requests. Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EXPOSURE_AUTO</constant>&nbsp;</entry>
- <entry>Automatic exposure time, automatic iris
-aperture.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_MANUAL</constant>&nbsp;</entry>
- <entry>Manual exposure time, manual iris.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_SHUTTER_PRIORITY</constant>&nbsp;</entry>
- <entry>Manual exposure time, auto iris.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_APERTURE_PRIORITY</constant>&nbsp;</entry>
- <entry>Auto exposure time, manual iris.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_EXPOSURE_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Determines the exposure
-time of the camera sensor. The exposure time is limited by the frame
-interval. Drivers should interpret the values as 100 &micro;s units,
-where the value 1 stands for 1/10000th of a second, 10000 for 1 second
-and 100000 for 10 seconds.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_EXPOSURE_AUTO_PRIORITY</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">When
-<constant>V4L2_CID_EXPOSURE_AUTO</constant> is set to
-<constant>AUTO</constant> or <constant>APERTURE_PRIORITY</constant>,
-this control determines if the device may dynamically vary the frame
-rate. By default this feature is disabled (0) and the frame rate must
-remain constant.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_EXPOSURE_BIAS</constant>&nbsp;</entry>
- <entry>integer menu</entry>
- </row><row><entry spanname="descr"> Determines the automatic
-exposure compensation, it is effective only when <constant>V4L2_CID_EXPOSURE_AUTO</constant>
-control is set to <constant>AUTO</constant>, <constant>SHUTTER_PRIORITY </constant>
-or <constant>APERTURE_PRIORITY</constant>.
-It is expressed in terms of EV, drivers should interpret the values as 0.001 EV
-units, where the value 1000 stands for +1 EV.
-<para>Increasing the exposure compensation value is equivalent to decreasing
-the exposure value (EV) and will increase the amount of light at the image
-sensor. The camera performs the exposure compensation by adjusting absolute
-exposure time and/or aperture.</para></entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-exposure-metering">
- <entry spanname="id"><constant>V4L2_CID_EXPOSURE_METERING</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_exposure_metering</entry>
- </row><row><entry spanname="descr">Determines how the camera measures
-the amount of light available for the frame exposure. Possible values are:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EXPOSURE_METERING_AVERAGE</constant>&nbsp;</entry>
- <entry>Use the light information coming from the entire frame
-and average giving no weighting to any particular portion of the metered area.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_METERING_CENTER_WEIGHTED</constant>&nbsp;</entry>
- <entry>Average the light information coming from the entire frame
-giving priority to the center of the metered area.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_METERING_SPOT</constant>&nbsp;</entry>
- <entry>Measure only very small area at the center of the frame.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EXPOSURE_METERING_MATRIX</constant>&nbsp;</entry>
- <entry>A multi-zone metering. The light intensity is measured
-in several points of the frame and the results are combined. The
-algorithm of the zones selection and their significance in calculating the
-final value is device dependent.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_PAN_RELATIVE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control turns the
-camera horizontally by the specified amount. The unit is undefined. A
-positive value moves the camera to the right (clockwise when viewed
-from above), a negative value to the left. A value of zero does not
-cause motion. This is a write-only control.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_TILT_RELATIVE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control turns the
-camera vertically by the specified amount. The unit is undefined. A
-positive value moves the camera up, a negative value down. A value of
-zero does not cause motion. This is a write-only control.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_PAN_RESET</constant>&nbsp;</entry>
- <entry>button</entry>
- </row><row><entry spanname="descr">When this control is set,
-the camera moves horizontally to the default position.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_TILT_RESET</constant>&nbsp;</entry>
- <entry>button</entry>
- </row><row><entry spanname="descr">When this control is set,
-the camera moves vertically to the default position.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_PAN_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control
-turns the camera horizontally to the specified position. Positive
-values move the camera to the right (clockwise when viewed from above),
-negative values to the left. Drivers should interpret the values as arc
-seconds, with valid values between -180 * 3600 and +180 * 3600
-inclusive.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_TILT_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control
-turns the camera vertically to the specified position. Positive values
-move the camera up, negative values down. Drivers should interpret the
-values as arc seconds, with valid values between -180 * 3600 and +180
-* 3600 inclusive.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_FOCUS_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control sets the
-focal point of the camera to the specified position. The unit is
-undefined. Positive values set the focus closer to the camera,
-negative values towards infinity.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_FOCUS_RELATIVE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control moves the
-focal point of the camera by the specified amount. The unit is
-undefined. Positive values move the focus closer to the camera,
-negative values towards infinity. This is a write-only control.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_FOCUS_AUTO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Enables continuous automatic
-focus adjustments. The effect of manual focus adjustments while this feature
-is enabled is undefined, drivers should ignore such requests.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUTO_FOCUS_START</constant>&nbsp;</entry>
- <entry>button</entry>
- </row><row><entry spanname="descr">Starts single auto focus process.
-The effect of setting this control when <constant>V4L2_CID_FOCUS_AUTO</constant>
-is set to <constant>TRUE</constant> (1) is undefined, drivers should ignore
-such requests.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUTO_FOCUS_STOP</constant>&nbsp;</entry>
- <entry>button</entry>
- </row><row><entry spanname="descr">Aborts automatic focusing
-started with <constant>V4L2_CID_AUTO_FOCUS_START</constant> control. It is
-effective only when the continuous autofocus is disabled, that is when
-<constant>V4L2_CID_FOCUS_AUTO</constant> control is set to <constant>FALSE
-</constant> (0).</entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-auto-focus-status">
- <entry spanname="id">
- <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant>&nbsp;</entry>
- <entry>bitmask</entry>
- </row>
- <row><entry spanname="descr">The automatic focus status. This is a read-only
- control.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_STATUS_IDLE</constant>&nbsp;</entry>
- <entry>Automatic focus is not active.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_STATUS_BUSY</constant>&nbsp;</entry>
- <entry>Automatic focusing is in progress.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_STATUS_REACHED</constant>&nbsp;</entry>
- <entry>Focus has been reached.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_STATUS_FAILED</constant>&nbsp;</entry>
- <entry>Automatic focus has failed, the driver will not
- transition from this state until another action is
- performed by an application.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry spanname="descr">
-Setting <constant>V4L2_LOCK_FOCUS</constant> lock bit of the <constant>V4L2_CID_3A_LOCK
-</constant> control may stop updates of the <constant>V4L2_CID_AUTO_FOCUS_STATUS</constant>
-control value.</entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-auto-focus-range">
- <entry spanname="id">
- <constant>V4L2_CID_AUTO_FOCUS_RANGE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_auto_focus_range</entry>
- </row>
- <row><entry spanname="descr">Determines auto focus distance range
-for which lens may be adjusted. </entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_RANGE_AUTO</constant>&nbsp;</entry>
- <entry>The camera automatically selects the focus range.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_RANGE_NORMAL</constant>&nbsp;</entry>
- <entry>Normal distance range, limited for best automatic focus
-performance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_RANGE_MACRO</constant>&nbsp;</entry>
- <entry>Macro (close-up) auto focus. The camera will
-use its minimum possible distance for auto focus.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUTO_FOCUS_RANGE_INFINITY</constant>&nbsp;</entry>
- <entry>The lens is set to focus on an object at infinite distance.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_ZOOM_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Specify the objective lens
-focal length as an absolute value. The zoom unit is driver-specific and its
-value should be a positive integer.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_ZOOM_RELATIVE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Specify the objective lens
-focal length relatively to the current value. Positive values move the zoom
-lens group towards the telephoto direction, negative values towards the
-wide-angle direction. The zoom unit is driver-specific. This is a write-only control.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_ZOOM_CONTINUOUS</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Move the objective lens group
-at the specified speed until it reaches physical device limits or until an
-explicit request to stop the movement. A positive value moves the zoom lens
-group towards the telephoto direction. A value of zero stops the zoom lens
-group movement. A negative value moves the zoom lens group towards the
-wide-angle direction. The zoom speed unit is driver-specific.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_IRIS_ABSOLUTE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control sets the
-camera's aperture to the specified value. The unit is undefined.
-Larger values open the iris wider, smaller values close it.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_IRIS_RELATIVE</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control modifies the
-camera's aperture by the specified amount. The unit is undefined.
-Positive values open the iris one step further, negative values close
-it one step further. This is a write-only control.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_PRIVACY</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Prevent video from being acquired
-by the camera. When this control is set to <constant>TRUE</constant> (1), no
-image can be captured by the camera. Common means to enforce privacy are
-mechanical obturation of the sensor and firmware image processing, but the
-device is not restricted to these methods. Devices that implement the privacy
-control must support read access and may support write access.</entry>
- </row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_BAND_STOP_FILTER</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">Switch the band-stop filter of a
-camera sensor on or off, or specify its strength. Such band-stop filters can
-be used, for example, to filter out the fluorescent light component.</entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-auto-n-preset-white-balance">
- <entry spanname="id"><constant>V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_auto_n_preset_white_balance</entry>
- </row><row><entry spanname="descr">Sets white balance to automatic,
-manual or a preset. The presets determine color temperature of the light as
-a hint to the camera for white balance adjustments resulting in most accurate
-color representation. The following white balance presets are listed in order
-of increasing color temperature.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_MANUAL</constant>&nbsp;</entry>
- <entry>Manual white balance.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_AUTO</constant>&nbsp;</entry>
- <entry>Automatic white balance adjustments.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_INCANDESCENT</constant>&nbsp;</entry>
- <entry>White balance setting for incandescent (tungsten) lighting.
-It generally cools down the colors and corresponds approximately to 2500...3500 K
-color temperature range.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_FLUORESCENT</constant>&nbsp;</entry>
- <entry>White balance preset for fluorescent lighting.
-It corresponds approximately to 4000...5000 K color temperature.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_FLUORESCENT_H</constant>&nbsp;</entry>
- <entry>With this setting the camera will compensate for
-fluorescent H lighting.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_HORIZON</constant>&nbsp;</entry>
- <entry>White balance setting for horizon daylight.
-It corresponds approximately to 5000 K color temperature.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_DAYLIGHT</constant>&nbsp;</entry>
- <entry>White balance preset for daylight (with clear sky).
-It corresponds approximately to 5000...6500 K color temperature.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_FLASH</constant>&nbsp;</entry>
- <entry>With this setting the camera will compensate for the flash
-light. It slightly warms up the colors and corresponds roughly to 5000...5500 K
-color temperature.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_CLOUDY</constant>&nbsp;</entry>
- <entry>White balance preset for moderately overcast sky.
-This option corresponds approximately to 6500...8000 K color temperature
-range.</entry>
- </row>
- <row>
- <entry><constant>V4L2_WHITE_BALANCE_SHADE</constant>&nbsp;</entry>
- <entry>White balance preset for shade or heavily overcast
-sky. It corresponds approximately to 9000...10000 K color temperature.
-</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-wide-dynamic-range">
- <entry spanname="id"><constant>V4L2_CID_WIDE_DYNAMIC_RANGE</constant></entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables or disables the camera's wide dynamic
-range feature. This feature allows to obtain clear images in situations where
-intensity of the illumination varies significantly throughout the scene, i.e.
-there are simultaneously very dark and very bright areas. It is most commonly
-realized in cameras by combining two subsequent frames with different exposure
-times. <footnote id="ctypeconv"><para> This control may be changed to a menu
-control in the future, if more options are required.</para></footnote></entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-image-stabilization">
- <entry spanname="id"><constant>V4L2_CID_IMAGE_STABILIZATION</constant></entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables or disables image stabilization.
- <footnoteref linkend="ctypeconv"/></entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_ISO_SENSITIVITY</constant>&nbsp;</entry>
- <entry>integer menu</entry>
- </row><row><entry spanname="descr">Determines ISO equivalent of an
-image sensor indicating the sensor's sensitivity to light. The numbers are
-expressed in arithmetic scale, as per <xref linkend="iso12232" /> standard,
-where doubling the sensor sensitivity is represented by doubling the numerical
-ISO value. Applications should interpret the values as standard ISO values
-multiplied by 1000, e.g. control value 800 stands for ISO 0.8. Drivers will
-usually support only a subset of standard ISO values. The effect of setting
-this control while the <constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>
-control is set to a value other than <constant>V4L2_CID_ISO_SENSITIVITY_MANUAL
-</constant> is undefined, drivers should ignore such requests.</entry>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-iso-sensitivity-auto-type">
- <entry spanname="id"><constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_iso_sensitivity_type</entry>
- </row><row><entry spanname="descr">Enables or disables automatic ISO
-sensitivity adjustments.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CID_ISO_SENSITIVITY_MANUAL</constant>&nbsp;</entry>
- <entry>Manual ISO sensitivity.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CID_ISO_SENSITIVITY_AUTO</constant>&nbsp;</entry>
- <entry>Automatic ISO sensitivity adjustments.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row id="v4l2-scene-mode">
- <entry spanname="id"><constant>V4L2_CID_SCENE_MODE</constant>&nbsp;</entry>
- <entry>enum&nbsp;v4l2_scene_mode</entry>
- </row><row><entry spanname="descr">This control allows to select
-scene programs as the camera automatic modes optimized for common shooting
-scenes. Within these modes the camera determines best exposure, aperture,
-focusing, light metering, white balance and equivalent sensitivity. The
-controls of those parameters are influenced by the scene mode control.
-An exact behavior in each mode is subject to the camera specification.
-
-<para>When the scene mode feature is not used, this control should be set to
-<constant>V4L2_SCENE_MODE_NONE</constant> to make sure the other possibly
-related controls are accessible. The following scene programs are defined:
-</para>
-</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SCENE_MODE_NONE</constant>&nbsp;</entry>
- <entry>The scene mode feature is disabled.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_BACKLIGHT</constant>&nbsp;</entry>
- <entry>Backlight. Compensates for dark shadows when light is
- coming from behind a subject, also by automatically turning
- on the flash.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_BEACH_SNOW</constant>&nbsp;</entry>
- <entry>Beach and snow. This mode compensates for all-white or
-bright scenes, which tend to look gray and low contrast, when camera's automatic
-exposure is based on an average scene brightness. To compensate, this mode
-automatically slightly overexposes the frames. The white balance may also be
-adjusted to compensate for the fact that reflected snow looks bluish rather
-than white.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_CANDLELIGHT</constant>&nbsp;</entry>
- <entry>Candle light. The camera generally raises the ISO
-sensitivity and lowers the shutter speed. This mode compensates for relatively
-close subject in the scene. The flash is disabled in order to preserve the
-ambiance of the light.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_DAWN_DUSK</constant>&nbsp;</entry>
- <entry>Dawn and dusk. Preserves the colors seen in low
-natural light before dusk and after down. The camera may turn off the flash,
-and automatically focus at infinity. It will usually boost saturation and
-lower the shutter speed.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_FALL_COLORS</constant>&nbsp;</entry>
- <entry>Fall colors. Increases saturation and adjusts white
-balance for color enhancement. Pictures of autumn leaves get saturated reds
-and yellows.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_FIREWORKS</constant>&nbsp;</entry>
- <entry>Fireworks. Long exposure times are used to capture
-the expanding burst of light from a firework. The camera may invoke image
-stabilization.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_LANDSCAPE</constant>&nbsp;</entry>
- <entry>Landscape. The camera may choose a small aperture to
-provide deep depth of field and long exposure duration to help capture detail
-in dim light conditions. The focus is fixed at infinity. Suitable for distant
-and wide scenery.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_NIGHT</constant>&nbsp;</entry>
- <entry>Night, also known as Night Landscape. Designed for low
-light conditions, it preserves detail in the dark areas without blowing out bright
-objects. The camera generally sets itself to a medium-to-high ISO sensitivity,
-with a relatively long exposure time, and turns flash off. As such, there will be
-increased image noise and the possibility of blurred image.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_PARTY_INDOOR</constant>&nbsp;</entry>
- <entry>Party and indoor. Designed to capture indoor scenes
-that are lit by indoor background lighting as well as the flash. The camera
-usually increases ISO sensitivity, and adjusts exposure for the low light
-conditions.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_PORTRAIT</constant>&nbsp;</entry>
- <entry>Portrait. The camera adjusts the aperture so that the
-depth of field is reduced, which helps to isolate the subject against a smooth
-background. Most cameras recognize the presence of faces in the scene and focus
-on them. The color hue is adjusted to enhance skin tones. The intensity of the
-flash is often reduced.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_SPORTS</constant>&nbsp;</entry>
- <entry>Sports. Significantly increases ISO and uses a fast
-shutter speed to freeze motion of rapidly-moving subjects. Increased image
-noise may be seen in this mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_SUNSET</constant>&nbsp;</entry>
- <entry>Sunset. Preserves deep hues seen in sunsets and
-sunrises. It bumps up the saturation.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SCENE_MODE_TEXT</constant>&nbsp;</entry>
- <entry>Text. It applies extra contrast and sharpness, it is
-typically a black-and-white mode optimized for readability. Automatic focus
-may be switched to close-up mode and this setting may also involve some
-lens-distortion correction.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_3A_LOCK</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">This control locks or unlocks the automatic
-focus, exposure and white balance. The automatic adjustments can be paused
-independently by setting the corresponding lock bit to 1. The camera then retains
-the settings until the lock bit is cleared. The following lock bits are defined:
-</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_LOCK_EXPOSURE</constant></entry>
- <entry>Automatic exposure adjustments lock.</entry>
- </row>
- <row>
- <entry><constant>V4L2_LOCK_WHITE_BALANCE</constant></entry>
- <entry>Automatic white balance adjustments lock.</entry>
- </row>
- <row>
- <entry><constant>V4L2_LOCK_FOCUS</constant></entry>
- <entry>Automatic focus lock.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry spanname="descr">
-When a given algorithm is not enabled, drivers should ignore requests
-to lock it and should return no error. An example might be an application
-setting bit <constant>V4L2_LOCK_WHITE_BALANCE</constant> when the
-<constant>V4L2_CID_AUTO_WHITE_BALANCE</constant> control is set to
-<constant>FALSE</constant>. The value of this control may be changed
-by exposure, white balance or focus controls.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_PAN_SPEED</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control turns the
-camera horizontally at the specific speed. The unit is undefined. A
-positive value moves the camera to the right (clockwise when viewed
-from above), a negative value to the left. A value of zero stops the motion
-if one is in progress and has no effect otherwise.</entry>
- </row>
- <row><entry></entry></row>
-
- <row>
- <entry spanname="id"><constant>V4L2_CID_TILT_SPEED</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row><row><entry spanname="descr">This control turns the
-camera vertically at the specified speed. The unit is undefined. A
-positive value moves the camera up, a negative value down. A value of zero
-stops the motion if one is in progress and has no effect otherwise.</entry>
- </row>
- <row><entry></entry></row>
-
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section id="fm-tx-controls">
- <title>FM Transmitter Control Reference</title>
-
- <para>The FM Transmitter (FM_TX) class includes controls for common features of
-FM transmissions capable devices. Currently this class includes parameters for audio
-compression, pilot tone generation, audio deviation limiter, RDS transmission and
-tuning power features.</para>
-
- <table pgwide="1" frame="none" id="fm-tx-control-id">
- <title>FM_TX Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FM_TX_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The FM_TX class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_DEVIATION</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Configures RDS signal frequency deviation level in Hz.
-The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_PI</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the RDS Programme Identification field
-for transmission.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_PTY</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the RDS Programme Type field for transmission.
-This encodes up to 31 pre-defined programme types.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_PS_NAME</constant>&nbsp;</entry>
- <entry>string</entry>
- </row>
- <row><entry spanname="descr">Sets the Programme Service name (PS_NAME) for transmission.
-It is intended for static display on a receiver. It is the primary aid to listeners in programme service
-identification and selection. In Annex E of <xref linkend="iec62106" />, the RDS specification,
-there is a full description of the correct character encoding for Programme Service name strings.
-Also from RDS specification, PS is usually a single eight character text. However, it is also possible
-to find receivers which can scroll strings sized as 8 x N characters. So, this control must be configured
-with steps of 8 characters. The result is it must always contain a string with size multiple of 8.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_RADIO_TEXT</constant>&nbsp;</entry>
- <entry>string</entry>
- </row>
- <row><entry spanname="descr">Sets the Radio Text info for transmission. It is a textual description of
-what is being broadcasted. RDS Radio Text can be applied when broadcaster wishes to transmit longer PS names,
-programme-related information or any other text. In these cases, RadioText should be used in addition to
-<constant>V4L2_CID_RDS_TX_PS_NAME</constant>. The encoding for Radio Text strings is also fully described
-in Annex E of <xref linkend="iec62106" />. The length of Radio Text strings depends on which RDS Block is being
-used to transmit it, either 32 (2A block) or 64 (2B block). However, it is also possible
-to find receivers which can scroll strings sized as 32 x N or 64 x N characters. So, this control must be configured
-with steps of 32 or 64 characters. The result is it must always contain a string with size multiple of 32 or 64. </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_MONO_STEREO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Sets the Mono/Stereo bit of the Decoder Identification code. If set,
-then the audio was recorded as stereo.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_ARTIFICIAL_HEAD</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Sets the
-<ulink url="http://en.wikipedia.org/wiki/Artificial_head">Artificial Head</ulink> bit of the Decoder
-Identification code. If set, then the audio was recorded using an artificial head.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_COMPRESSED</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Sets the Compressed bit of the Decoder Identification code. If set,
-then the audio is compressed.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_DYNAMIC_PTY</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Sets the Dynamic PTY bit of the Decoder Identification code. If set,
-then the PTY code is dynamically switched.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then a traffic announcement is in progress.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_TRAFFIC_PROGRAM</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then the tuned programme carries traffic announcements.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_MUSIC_SPEECH</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then this channel broadcasts music. If cleared, then it
-broadcasts speech. If the transmitter doesn't make this distinction, then it should be set.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_ALT_FREQS_ENABLE</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then transmit alternate frequencies.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_TX_ALT_FREQS</constant>&nbsp;</entry>
- <entry>__u32 array</entry>
- </row>
- <row><entry spanname="descr">The alternate frequencies in kHz units. The RDS standard allows
-for up to 25 frequencies to be defined. Drivers may support fewer frequencies so check
-the array size.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_LIMITER_ENABLED</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables or disables the audio deviation limiter feature.
-The limiter is useful when trying to maximize the audio volume, minimize receiver-generated
-distortion and prevent overmodulation.
-</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_LIMITER_RELEASE_TIME</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the audio deviation limiter feature release time.
-Unit is in useconds. Step and range are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_LIMITER_DEVIATION</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Configures audio frequency deviation level in Hz.
-The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_COMPRESSION_ENABLED</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables or disables the audio compression feature.
-This feature amplifies signals below the threshold by a fixed gain and compresses audio
-signals above the threshold by the ratio of Threshold/(Gain + Threshold).</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_COMPRESSION_GAIN</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the gain for audio compression feature. It is
-a dB value. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_COMPRESSION_THRESHOLD</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the threshold level for audio compression freature.
-It is a dB value. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the attack time for audio compression feature.
-It is a useconds value. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the release time for audio compression feature.
-It is a useconds value. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_PILOT_TONE_ENABLED</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enables or disables the pilot tone generation feature.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_PILOT_TONE_DEVIATION</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Configures pilot tone frequency deviation level. Unit is
-in Hz. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_PILOT_TONE_FREQUENCY</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Configures pilot tone frequency value. Unit is
-in Hz. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TUNE_PREEMPHASIS</constant>&nbsp;</entry>
- <entry>enum v4l2_preemphasis</entry>
- </row>
- <row id="v4l2-preemphasis"><entry spanname="descr">Configures the pre-emphasis value for broadcasting.
-A pre-emphasis filter is applied to the broadcast to accentuate the high audio frequencies.
-Depending on the region, a time constant of either 50 or 75 useconds is used. The enum&nbsp;v4l2_preemphasis
-defines possible values for pre-emphasis. Here they are:</entry>
- </row><row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_PREEMPHASIS_DISABLED</constant>&nbsp;</entry>
- <entry>No pre-emphasis is applied.</entry>
- </row>
- <row>
- <entry><constant>V4L2_PREEMPHASIS_50_uS</constant>&nbsp;</entry>
- <entry>A pre-emphasis of 50 uS is used.</entry>
- </row>
- <row>
- <entry><constant>V4L2_PREEMPHASIS_75_uS</constant>&nbsp;</entry>
- <entry>A pre-emphasis of 75 uS is used.</entry>
- </row>
- </tbody>
- </entrytbl>
-
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TUNE_POWER_LEVEL</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the output power level for signal transmission.
-Unit is in dBuV. Range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TUNE_ANTENNA_CAPACITOR</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">This selects the value of antenna tuning capacitor
-manually or automatically if set to zero. Unit, range and step are driver-specific.</entry>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
-
-<para>For more details about RDS specification, refer to
-<xref linkend="iec62106" /> document, from CENELEC.</para>
- </section>
-
- <section id="flash-controls">
- <title>Flash Control Reference</title>
-
- <para>
- The V4L2 flash controls are intended to provide generic access
- to flash controller devices. Flash controller devices are
- typically used in digital cameras.
- </para>
-
- <para>
- The interface can support both LED and xenon flash devices. As
- of writing this, there is no xenon flash driver using this
- interface.
- </para>
-
- <section id="flash-controls-use-cases">
- <title>Supported use cases</title>
-
- <section>
- <title>Unsynchronised LED flash (software strobe)</title>
-
- <para>
- Unsynchronised LED flash is controlled directly by the
- host as the sensor. The flash must be enabled by the host
- before the exposure of the image starts and disabled once
- it ends. The host is fully responsible for the timing of
- the flash.
- </para>
-
- <para>Example of such device: Nokia N900.</para>
- </section>
-
- <section>
- <title>Synchronised LED flash (hardware strobe)</title>
-
- <para>
- The synchronised LED flash is pre-programmed by the host
- (power and timeout) but controlled by the sensor through a
- strobe signal from the sensor to the flash.
- </para>
-
- <para>
- The sensor controls the flash duration and timing. This
- information typically must be made available to the
- sensor.
- </para>
-
- </section>
-
- <section>
- <title>LED flash as torch</title>
-
- <para>
- LED flash may be used as torch in conjunction with another
- use case involving camera or individually.
- </para>
-
-
- <table pgwide="1" frame="none" id="flash-control-id">
- <title>Flash Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_CLASS</constant></entry>
- <entry>class</entry>
- </row>
- <row>
- <entry spanname="descr">The FLASH class descriptor.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_LED_MODE</constant></entry>
- <entry>menu</entry>
- </row>
- <row id="v4l2-flash-led-mode">
- <entry spanname="descr">Defines the mode of the flash LED,
- the high-power white LED attached to the flash controller.
- Setting this control may not be possible in presence of
- some faults. See V4L2_CID_FLASH_FAULT.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FLASH_LED_MODE_NONE</constant></entry>
- <entry>Off.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_LED_MODE_FLASH</constant></entry>
- <entry>Flash mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_LED_MODE_TORCH</constant></entry>
- <entry>Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_STROBE_SOURCE</constant></entry>
- <entry>menu</entry>
- </row>
- <row id="v4l2-flash-strobe-source"><entry
- spanname="descr">Defines the source of the flash LED
- strobe.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FLASH_STROBE_SOURCE_SOFTWARE</constant></entry>
- <entry>The flash strobe is triggered by using
- the V4L2_CID_FLASH_STROBE control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_STROBE_SOURCE_EXTERNAL</constant></entry>
- <entry>The flash strobe is triggered by an
- external source. Typically this is a sensor,
- which makes it possible to synchronises the
- flash strobe start to exposure start.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_STROBE</constant></entry>
- <entry>button</entry>
- </row>
- <row>
- <entry spanname="descr">Strobe flash. Valid when
- V4L2_CID_FLASH_LED_MODE is set to
- V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE
- is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this
- control may not be possible in presence of some faults.
- See V4L2_CID_FLASH_FAULT.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_STROBE_STOP</constant></entry>
- <entry>button</entry>
- </row>
- <row><entry spanname="descr">Stop flash strobe immediately.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_STROBE_STATUS</constant></entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Strobe status: whether the flash
- is strobing at the moment or not. This is a read-only
- control.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_TIMEOUT</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Hardware timeout for flash. The
- flash strobe is stopped after this period of time has
- passed from the start of the strobe.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_INTENSITY</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Intensity of the flash strobe when
- the flash LED is in flash mode
- (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps
- (mA) if possible.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_TORCH_INTENSITY</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Intensity of the flash LED in
- torch mode (V4L2_FLASH_LED_MODE_TORCH). The unit should be
- milliamps (mA) if possible. Setting this control may not
- be possible in presence of some faults. See
- V4L2_CID_FLASH_FAULT.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_INDICATOR_INTENSITY</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Intensity of the indicator LED.
- The indicator LED may be fully independent of the flash
- LED. The unit should be microamps (uA) if possible.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_FAULT</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">Faults related to the flash. The
- faults tell about specific problems in the flash chip
- itself or the LEDs attached to it. Faults may prevent
- further use of some of the flash controls. In particular,
- V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE
- if the fault affects the flash LED. Exactly which faults
- have such an effect is chip dependent. Reading the faults
- resets the control and returns the chip to a usable state
- if possible.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FLASH_FAULT_OVER_VOLTAGE</constant></entry>
- <entry>Flash controller voltage to the flash LED
- has exceeded the limit specific to the flash
- controller.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_TIMEOUT</constant></entry>
- <entry>The flash strobe was still on when
- the timeout set by the user ---
- V4L2_CID_FLASH_TIMEOUT control --- has expired.
- Not all flash controllers may set this in all
- such conditions.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_OVER_TEMPERATURE</constant></entry>
- <entry>The flash controller has overheated.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_SHORT_CIRCUIT</constant></entry>
- <entry>The short circuit protection of the flash
- controller has been triggered.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_OVER_CURRENT</constant></entry>
- <entry>Current in the LED power supply has exceeded the limit
- specific to the flash controller.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_INDICATOR</constant></entry>
- <entry>The flash controller has detected a short or open
- circuit condition on the indicator LED.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_UNDER_VOLTAGE</constant></entry>
- <entry>Flash controller voltage to the flash LED
- has been below the minimum limit specific to the flash
- controller.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_INPUT_VOLTAGE</constant></entry>
- <entry>The input voltage of the flash controller is below
- the limit under which strobing the flash at full current
- will not be possible.The condition persists until this flag
- is no longer set.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE</constant></entry>
- <entry>The temperature of the LED has exceeded its
- allowed upper limit.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_CHARGE</constant></entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">Enable or disable charging of the xenon
- flash capacitor.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FLASH_READY</constant></entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Is the flash ready to strobe?
- Xenon flashes require their capacitors charged before
- strobing. LED flashes often require a cooldown period
- after strobe during which another strobe will not be
- possible. This is a read-only control.</entry>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
- </section>
- </section>
- </section>
-
- <section id="jpeg-controls">
- <title>JPEG Control Reference</title>
- <para>The JPEG class includes controls for common features of JPEG
- encoders and decoders. Currently it includes features for codecs
- implementing progressive baseline DCT compression process with
- Huffman entrophy coding.</para>
- <table pgwide="1" frame="none" id="jpeg-control-id">
- <title>JPEG Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_JPEG_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The JPEG class descriptor. Calling
- &VIDIOC-QUERYCTRL; for this control will return a description of this
- control class.
-
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_JPEG_CHROMA_SUBSAMPLING</constant></entry>
- <entry>menu</entry>
- </row>
- <row id="v4l2-jpeg-chroma-subsampling">
- <entry spanname="descr">The chroma subsampling factors describe how
- each component of an input image is sampled, in respect to maximum
- sample rate in each spatial dimension. See <xref linkend="itu-t81"/>,
- clause A.1.1. for more details. The <constant>
- V4L2_CID_JPEG_CHROMA_SUBSAMPLING</constant> control determines how
- Cb and Cr components are downsampled after coverting an input image
- from RGB to Y'CbCr color space.
- </entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_444</constant>
- </entry><entry>No chroma subsampling, each pixel has
- Y, Cr and Cb values.</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_422</constant>
- </entry><entry>Horizontally subsample Cr, Cb components
- by a factor of 2.</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_420</constant>
- </entry><entry>Subsample Cr, Cb components horizontally
- and vertically by 2.</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_411</constant>
- </entry><entry>Horizontally subsample Cr, Cb components
- by a factor of 4.</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_410</constant>
- </entry><entry>Subsample Cr, Cb components horizontally
- by 4 and vertically by 2.</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY</constant>
- </entry><entry>Use only luminance component.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_JPEG_RESTART_INTERVAL</constant>
- </entry><entry>integer</entry>
- </row>
- <row><entry spanname="descr">
- The restart interval determines an interval of inserting RSTm
- markers (m = 0..7). The purpose of these markers is to additionally
- reinitialize the encoder process, in order to process blocks of
- an image independently.
- For the lossy compression processes the restart interval unit is
- MCU (Minimum Coded Unit) and its value is contained in DRI
- (Define Restart Interval) marker. If <constant>
- V4L2_CID_JPEG_RESTART_INTERVAL</constant> control is set to 0,
- DRI and RSTm markers will not be inserted.
- </entry>
- </row>
- <row id="jpeg-quality-control">
- <entry spanname="id"><constant>V4L2_CID_JPEG_COMPRESSION_QUALITY</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">
- <constant>V4L2_CID_JPEG_COMPRESSION_QUALITY</constant> control
- determines trade-off between image quality and size.
- It provides simpler method for applications to control image quality,
- without a need for direct reconfiguration of luminance and chrominance
- quantization tables.
-
- In cases where a driver uses quantization tables configured directly
- by an application, using interfaces defined elsewhere, <constant>
- V4L2_CID_JPEG_COMPRESSION_QUALITY</constant> control should be set
- by driver to 0.
-
- <para>The value range of this control is driver-specific. Only
- positive, non-zero values are meaningful. The recommended range
- is 1 - 100, where larger values correspond to better image quality.
- </para>
- </entry>
- </row>
- <row id="jpeg-active-marker-control">
- <entry spanname="id"><constant>V4L2_CID_JPEG_ACTIVE_MARKER</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">Specify which JPEG markers are included
- in compressed stream. This control is valid only for encoders.
- </entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_JPEG_ACTIVE_MARKER_APP0</constant></entry>
- <entry>Application data segment APP<subscript>0</subscript>.</entry>
- </row><row>
- <entry><constant>V4L2_JPEG_ACTIVE_MARKER_APP1</constant></entry>
- <entry>Application data segment APP<subscript>1</subscript>.</entry>
- </row><row>
- <entry><constant>V4L2_JPEG_ACTIVE_MARKER_COM</constant></entry>
- <entry>Comment segment.</entry>
- </row><row>
- <entry><constant>V4L2_JPEG_ACTIVE_MARKER_DQT</constant></entry>
- <entry>Quantization tables segment.</entry>
- </row><row>
- <entry><constant>V4L2_JPEG_ACTIVE_MARKER_DHT</constant></entry>
- <entry>Huffman tables segment.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
- <para>For more details about JPEG specification, refer
- to <xref linkend="itu-t81"/>, <xref linkend="jfif"/>,
- <xref linkend="w3c-jpeg-jfif"/>.</para>
- </section>
-
- <section id="image-source-controls">
- <title>Image Source Control Reference</title>
-
- <para>
- The Image Source control class is intended for low-level
- control of image source devices such as image sensors. The
- devices feature an analogue to digital converter and a bus
- transmitter to transmit the image data out of the device.
- </para>
-
- <table pgwide="1" frame="none" id="image-source-control-id">
- <title>Image Source Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_IMAGE_SOURCE_CLASS</constant></entry>
- <entry>class</entry>
- </row>
- <row>
- <entry spanname="descr">The IMAGE_SOURCE class descriptor.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_VBLANK</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Vertical blanking. The idle period
- after every frame during which no image data is produced.
- The unit of vertical blanking is a line. Every line has
- length of the image width plus horizontal blanking at the
- pixel rate defined by
- <constant>V4L2_CID_PIXEL_RATE</constant> control in the
- same sub-device.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_HBLANK</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Horizontal blanking. The idle
- period after every line of image data during which no
- image data is produced. The unit of horizontal blanking is
- pixels.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_ANALOGUE_GAIN</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Analogue gain is gain affecting
- all colour components in the pixel matrix. The gain
- operation is performed in the analogue domain before A/D
- conversion.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_RED</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Test pattern red colour component.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_GREENR</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Test pattern green (next to red)
- colour component.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_BLUE</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Test pattern blue colour component.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN_GREENB</constant></entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Test pattern green (next to blue)
- colour component.
- </entry>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
- <section id="image-process-controls">
- <title>Image Process Control Reference</title>
-
- <para>
- The Image Process control class is intended for low-level control of
- image processing functions. Unlike
- <constant>V4L2_CID_IMAGE_SOURCE_CLASS</constant>, the controls in
- this class affect processing the image, and do not control capturing
- of it.
- </para>
-
- <table pgwide="1" frame="none" id="image-process-control-id">
- <title>Image Process Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_IMAGE_PROC_CLASS</constant></entry>
- <entry>class</entry>
- </row>
- <row>
- <entry spanname="descr">The IMAGE_PROC class descriptor.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_LINK_FREQ</constant></entry>
- <entry>integer menu</entry>
- </row>
- <row>
- <entry spanname="descr">Data bus frequency. Together with the
- media bus pixel code, bus type (clock cycles per sample), the
- data bus frequency defines the pixel rate
- (<constant>V4L2_CID_PIXEL_RATE</constant>) in the
- pixel array (or possibly elsewhere, if the device is not an
- image sensor). The frame rate can be calculated from the pixel
- clock, image width and height and horizontal and vertical
- blanking. While the pixel rate control may be defined elsewhere
- than in the subdev containing the pixel array, the frame rate
- cannot be obtained from that information. This is because only
- on the pixel array it can be assumed that the vertical and
- horizontal blanking information is exact: no other blanking is
- allowed in the pixel array. The selection of frame rate is
- performed by selecting the desired horizontal and vertical
- blanking. The unit of this control is Hz. </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_PIXEL_RATE</constant></entry>
- <entry>64-bit integer</entry>
- </row>
- <row>
- <entry spanname="descr">Pixel rate in the source pads of
- the subdev. This control is read-only and its unit is
- pixels / second.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TEST_PATTERN</constant></entry>
- <entry>menu</entry>
- </row>
- <row id="v4l2-test-pattern">
- <entry spanname="descr"> Some capture/display/sensor devices have
- the capability to generate test pattern images. These hardware
- specific test patterns can be used to test if a device is working
- properly.</entry>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
- <section id="dv-controls">
- <title>Digital Video Control Reference</title>
-
- <para>
- The Digital Video control class is intended to control receivers
- and transmitters for <ulink url="http://en.wikipedia.org/wiki/Vga">VGA</ulink>,
- <ulink url="http://en.wikipedia.org/wiki/Digital_Visual_Interface">DVI</ulink>
- (Digital Visual Interface), HDMI (<xref linkend="hdmi" />) and DisplayPort (<xref linkend="dp" />).
- These controls are generally expected to be private to the receiver or transmitter
- subdevice that implements them, so they are only exposed on the
- <filename>/dev/v4l-subdev*</filename> device node.
- </para>
-
- <para>Note that these devices can have multiple input or output pads which are
- hooked up to e.g. HDMI connectors. Even though the subdevice will receive or
- transmit video from/to only one of those pads, the other pads can still be
- active when it comes to EDID (Extended Display Identification Data,
- <xref linkend="vesaedid" />) and HDCP (High-bandwidth Digital Content
- Protection System, <xref linkend="hdcp" />) processing, allowing the device
- to do the fairly slow EDID/HDCP handling in advance. This allows for quick
- switching between connectors.</para>
-
- <para>These pads appear in several of the controls in this section as
- bitmasks, one bit for each pad. Bit 0 corresponds to pad 0, bit 1 to pad 1,
- etc. The maximum value of the control is the set of valid pads.</para>
-
- <table pgwide="1" frame="none" id="dv-control-id">
- <title>Digital Video Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_CLASS</constant></entry>
- <entry>class</entry>
- </row>
- <row>
- <entry spanname="descr">The Digital Video class descriptor.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_HOTPLUG</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">Many connectors have a hotplug pin which is high
- if EDID information is available from the source. This control shows the
- state of the hotplug pin as seen by the transmitter.
- Each bit corresponds to an output pad on the transmitter. If an output pad
- does not have an associated hotplug pin, then the bit for that pad will be 0.
- This read-only control is applicable to DVI-D, HDMI and DisplayPort connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_RXSENSE</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">Rx Sense is the detection of pull-ups on the TMDS
- clock lines. This normally means that the sink has left/entered standby (i.e.
- the transmitter can sense that the receiver is ready to receive video).
- Each bit corresponds to an output pad on the transmitter. If an output pad
- does not have an associated Rx Sense, then the bit for that pad will be 0.
- This read-only control is applicable to DVI-D and HDMI devices.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_EDID_PRESENT</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">When the transmitter sees the hotplug signal from the
- receiver it will attempt to read the EDID. If set, then the transmitter has read
- at least the first block (= 128 bytes).
- Each bit corresponds to an output pad on the transmitter. If an output pad
- does not support EDIDs, then the bit for that pad will be 0.
- This read-only control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_MODE</constant></entry>
- <entry id="v4l2-dv-tx-mode">enum v4l2_dv_tx_mode</entry>
- </row>
- <row>
- <entry spanname="descr">HDMI transmitters can transmit in DVI-D mode (just video)
- or in HDMI mode (video + audio + auxiliary data). This control selects which mode
- to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI.
- This control is applicable to HDMI connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_RGB_RANGE</constant></entry>
- <entry id="v4l2-dv-rgb-range">enum v4l2_dv_rgb_range</entry>
- </row>
- <row>
- <entry spanname="descr">Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO
- follows the RGB quantization range specified in the standard for the video interface
- (ie. <xref linkend="cea861" /> for HDMI). V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the standard
- to be compatible with sinks that have not implemented the standard correctly
- (unfortunately quite common for HDMI and DVI-D). Full range allows all possible values to be
- used whereas limited range sets the range to (16 &lt;&lt; (N-8)) - (235 &lt;&lt; (N-8))
- where N is the number of bits per component.
- This control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_TX_IT_CONTENT_TYPE</constant></entry>
- <entry id="v4l2-dv-content-type">enum v4l2_dv_it_content_type</entry>
- </row>
- <row><entry spanname="descr">Configures the IT Content Type
- of the transmitted video. This information is sent over HDMI and DisplayPort connectors
- as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates
- from a computer as opposed to content from a TV broadcast or an analog source. The
- enum&nbsp;v4l2_dv_it_content_type defines the possible content types:</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_DV_IT_CONTENT_TYPE_GRAPHICS</constant>&nbsp;</entry>
- <entry>Graphics content. Pixel data should be passed unfiltered and without
- analog reconstruction.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DV_IT_CONTENT_TYPE_PHOTO</constant>&nbsp;</entry>
- <entry>Photo content. The content is derived from digital still pictures.
- The content should be passed through with minimal scaling and picture
- enhancements.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DV_IT_CONTENT_TYPE_CINEMA</constant>&nbsp;</entry>
- <entry>Cinema content.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DV_IT_CONTENT_TYPE_GAME</constant>&nbsp;</entry>
- <entry>Game content. Audio and video latency should be minimized.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DV_IT_CONTENT_TYPE_NO_ITC</constant>&nbsp;</entry>
- <entry>No IT Content information is available and the ITC bit in the AVI
- InfoFrame is set to 0.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_RX_POWER_PRESENT</constant></entry>
- <entry>bitmask</entry>
- </row>
- <row>
- <entry spanname="descr">Detects whether the receiver receives power from the source
- (e.g. HDMI carries 5V on one of the pins). This is often used to power an eeprom
- which contains EDID information, such that the source can read the EDID even if
- the sink is in standby/power off.
- Each bit corresponds to an input pad on the transmitter. If an input pad
- cannot detect whether power is present, then the bit for that pad will be 0.
- This read-only control is applicable to DVI-D, HDMI and DisplayPort connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_RX_RGB_RANGE</constant></entry>
- <entry>enum v4l2_dv_rgb_range</entry>
- </row>
- <row>
- <entry spanname="descr">Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO
- follows the RGB quantization range specified in the standard for the video interface
- (ie. <xref linkend="cea861" /> for HDMI). V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the standard
- to be compatible with sources that have not implemented the standard correctly
- (unfortunately quite common for HDMI and DVI-D). Full range allows all possible values to be
- used whereas limited range sets the range to (16 &lt;&lt; (N-8)) - (235 &lt;&lt; (N-8))
- where N is the number of bits per component.
- This control is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors.
- </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DV_RX_IT_CONTENT_TYPE</constant></entry>
- <entry>enum v4l2_dv_it_content_type</entry>
- </row>
- <row><entry spanname="descr">Reads the IT Content Type
- of the received video. This information is sent over HDMI and DisplayPort connectors
- as part of the AVI InfoFrame. The term 'IT Content' is used for content that originates
- from a computer as opposed to content from a TV broadcast or an analog source. See
- <constant>V4L2_CID_DV_TX_IT_CONTENT_TYPE</constant> for the available content types.</entry>
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
- <section id="fm-rx-controls">
- <title>FM Receiver Control Reference</title>
-
- <para>The FM Receiver (FM_RX) class includes controls for common features of
- FM Reception capable devices.</para>
-
- <table pgwide="1" frame="none" id="fm-rx-control-id">
- <title>FM_RX Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_FM_RX_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The FM_RX class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RECEPTION</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row><row><entry spanname="descr">Enables/disables RDS
- reception by the radio tuner</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_PTY</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Gets RDS Programme Type field.
-This encodes up to 31 pre-defined programme types.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_PS_NAME</constant>&nbsp;</entry>
- <entry>string</entry>
- </row>
- <row><entry spanname="descr">Gets the Programme Service name (PS_NAME).
-It is intended for static display on a receiver. It is the primary aid to listeners in programme service
-identification and selection. In Annex E of <xref linkend="iec62106" />, the RDS specification,
-there is a full description of the correct character encoding for Programme Service name strings.
-Also from RDS specification, PS is usually a single eight character text. However, it is also possible
-to find receivers which can scroll strings sized as 8 x N characters. So, this control must be configured
-with steps of 8 characters. The result is it must always contain a string with size multiple of 8.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_RADIO_TEXT</constant>&nbsp;</entry>
- <entry>string</entry>
- </row>
- <row><entry spanname="descr">Gets the Radio Text info. It is a textual description of
-what is being broadcasted. RDS Radio Text can be applied when broadcaster wishes to transmit longer PS names,
-programme-related information or any other text. In these cases, RadioText can be used in addition to
-<constant>V4L2_CID_RDS_RX_PS_NAME</constant>. The encoding for Radio Text strings is also fully described
-in Annex E of <xref linkend="iec62106" />. The length of Radio Text strings depends on which RDS Block is being
-used to transmit it, either 32 (2A block) or 64 (2B block). However, it is also possible
-to find receivers which can scroll strings sized as 32 x N or 64 x N characters. So, this control must be configured
-with steps of 32 or 64 characters. The result is it must always contain a string with size multiple of 32 or 64. </entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then a traffic announcement is in progress.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_TRAFFIC_PROGRAM</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then the tuned programme carries traffic announcements.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RDS_RX_MUSIC_SPEECH</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row><entry spanname="descr">If set, then this channel broadcasts music. If cleared, then it
-broadcasts speech. If the transmitter doesn't make this distinction, then it will be set.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_TUNE_DEEMPHASIS</constant>&nbsp;</entry>
- <entry>enum v4l2_deemphasis</entry>
- </row>
- <row id="v4l2-deemphasis"><entry spanname="descr">Configures the de-emphasis value for reception.
-A de-emphasis filter is applied to the broadcast to accentuate the high audio frequencies.
-Depending on the region, a time constant of either 50 or 75 useconds is used. The enum&nbsp;v4l2_deemphasis
-defines possible values for de-emphasis. Here they are:</entry>
- </row><row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_DEEMPHASIS_DISABLED</constant>&nbsp;</entry>
- <entry>No de-emphasis is applied.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DEEMPHASIS_50_uS</constant>&nbsp;</entry>
- <entry>A de-emphasis of 50 uS is used.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DEEMPHASIS_75_uS</constant>&nbsp;</entry>
- <entry>A de-emphasis of 75 uS is used.</entry>
- </row>
- </tbody>
- </entrytbl>
-
- </row>
- <row><entry></entry></row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section id="detect-controls">
- <title>Detect Control Reference</title>
-
- <para>The Detect class includes controls for common features of
- various motion or object detection capable devices.</para>
-
- <table pgwide="1" frame="none" id="detect-control-id">
- <title>Detect Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DETECT_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The Detect class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DETECT_MD_MODE</constant>&nbsp;</entry>
- <entry>menu</entry>
- </row><row><entry spanname="descr">Sets the motion detection mode.</entry>
- </row>
- <row>
- <entrytbl spanname="descr" cols="2">
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_DETECT_MD_MODE_DISABLED</constant>
- </entry><entry>Disable motion detection.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DETECT_MD_MODE_GLOBAL</constant>
- </entry><entry>Use a single motion detection threshold.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DETECT_MD_MODE_THRESHOLD_GRID</constant>
- </entry><entry>The image is divided into a grid, each cell with its own
- motion detection threshold. These thresholds are set through the
- <constant>V4L2_CID_DETECT_MD_THRESHOLD_GRID</constant> matrix control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_DETECT_MD_MODE_REGION_GRID</constant>
- </entry><entry>The image is divided into a grid, each cell with its own
- region value that specifies which per-region motion detection thresholds
- should be used. Each region has its own thresholds. How these per-region
- thresholds are set up is driver-specific. The region values for the grid are set
- through the <constant>V4L2_CID_DETECT_MD_REGION_GRID</constant> matrix
- control.</entry>
- </row>
- </tbody>
- </entrytbl>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row><entry spanname="descr">Sets the global motion detection threshold to be
- used with the <constant>V4L2_DETECT_MD_MODE_GLOBAL</constant> motion detection mode.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DETECT_MD_THRESHOLD_GRID</constant>&nbsp;</entry>
- <entry>__u16 matrix</entry>
- </row>
- <row><entry spanname="descr">Sets the motion detection thresholds for each cell in the grid.
- To be used with the <constant>V4L2_DETECT_MD_MODE_THRESHOLD_GRID</constant>
- motion detection mode. Matrix element (0, 0) represents the cell at the top-left of the
- grid.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_DETECT_MD_REGION_GRID</constant>&nbsp;</entry>
- <entry>__u8 matrix</entry>
- </row>
- <row><entry spanname="descr">Sets the motion detection region value for each cell in the grid.
- To be used with the <constant>V4L2_DETECT_MD_MODE_REGION_GRID</constant>
- motion detection mode. Matrix element (0, 0) represents the cell at the top-left of the
- grid.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
- <section id="rf-tuner-controls">
- <title>RF Tuner Control Reference</title>
-
- <para>
-The RF Tuner (RF_TUNER) class includes controls for common features of devices
-having RF tuner.
- </para>
- <para>
-In this context, RF tuner is radio receiver circuit between antenna and
-demodulator. It receives radio frequency (RF) from the antenna and converts that
-received signal to lower intermediate frequency (IF) or baseband frequency (BB).
-Tuners that could do baseband output are often called Zero-IF tuners. Older
-tuners were typically simple PLL tuners inside a metal box, whilst newer ones
-are highly integrated chips without a metal box "silicon tuners". These controls
-are mostly applicable for new feature rich silicon tuners, just because older
-tuners does not have much adjustable features.
- </para>
- <para>
-For more information about RF tuners see
-<ulink url="http://en.wikipedia.org/wiki/Tuner_%28radio%29">Tuner (radio)</ulink>
-and
-<ulink url="http://en.wikipedia.org/wiki/RF_front_end">RF front end</ulink>
-from Wikipedia.
- </para>
-
- <table pgwide="1" frame="none" id="rf-tuner-control-id">
- <title>RF_TUNER Control IDs</title>
-
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="6*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="6*" />
- <spanspec namest="c1" nameend="c2" spanname="id" />
- <spanspec namest="c2" nameend="c4" spanname="descr" />
- <thead>
- <row>
- <entry spanname="id" align="left">ID</entry>
- <entry align="left">Type</entry>
- </row>
- <row rowsep="1">
- <entry spanname="descr" align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row><entry></entry></row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_CLASS</constant>&nbsp;</entry>
- <entry>class</entry>
- </row><row><entry spanname="descr">The RF_TUNER class
-descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
-description of this control class.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_BANDWIDTH_AUTO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables/disables tuner radio channel
-bandwidth configuration. In automatic mode bandwidth configuration is performed
-by the driver.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_BANDWIDTH</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Filter(s) on tuner signal path are used to
-filter signal according to receiving party needs. Driver configures filters to
-fulfill desired bandwidth requirement. Used when V4L2_CID_RF_TUNER_BANDWIDTH_AUTO is not
-set. Unit is in Hz. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_LNA_GAIN_AUTO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables/disables LNA automatic gain control (AGC)</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables/disables mixer automatic gain control (AGC)</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_IF_GAIN_AUTO</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Enables/disables IF automatic gain control (AGC)</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_RF_GAIN</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">The RF amplifier is the very first
-amplifier on the receiver signal path, just right after the antenna input.
-The difference between the LNA gain and the RF gain in this document is that
-the LNA gain is integrated in the tuner chip while the RF gain is a separate
-chip. There may be both RF and LNA gain controls in the same device.
-The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_LNA_GAIN</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">LNA (low noise amplifier) gain is first
-gain stage on the RF tuner signal path. It is located very close to tuner
-antenna input. Used when <constant>V4L2_CID_RF_TUNER_LNA_GAIN_AUTO</constant> is not set.
-See <constant>V4L2_CID_RF_TUNER_RF_GAIN</constant> to understand how RF gain
-and LNA gain differs from the each others.
-The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_MIXER_GAIN</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">Mixer gain is second gain stage on the RF
-tuner signal path. It is located inside mixer block, where RF signal is
-down-converted by the mixer. Used when <constant>V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO</constant>
-is not set. The range and step are driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_IF_GAIN</constant>&nbsp;</entry>
- <entry>integer</entry>
- </row>
- <row>
- <entry spanname="descr">IF gain is last gain stage on the RF tuner
-signal path. It is located on output of RF tuner. It controls signal level of
-intermediate frequency output or baseband output. Used when
-<constant>V4L2_CID_RF_TUNER_IF_GAIN_AUTO</constant> is not set. The range and step are
-driver-specific.</entry>
- </row>
- <row>
- <entry spanname="id"><constant>V4L2_CID_RF_TUNER_PLL_LOCK</constant>&nbsp;</entry>
- <entry>boolean</entry>
- </row>
- <row>
- <entry spanname="descr">Is synthesizer PLL locked? RF tuner is
-receiving given frequency when that control is set. This is a read-only control.
-</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-</section>
diff --git a/Documentation/DocBook/media/v4l/crop.pdf b/Documentation/DocBook/media/v4l/crop.pdf
deleted file mode 100644
index c9fb81cd32f3..000000000000
--- a/Documentation/DocBook/media/v4l/crop.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/dev-capture.xml b/Documentation/DocBook/media/v4l/dev-capture.xml
deleted file mode 100644
index e1c5f9406d6a..000000000000
--- a/Documentation/DocBook/media/v4l/dev-capture.xml
+++ /dev/null
@@ -1,110 +0,0 @@
- <title>Video Capture Interface</title>
-
- <para>Video capture devices sample an analog video signal and store
-the digitized images in memory. Today nearly all devices can capture
-at full 25 or 30 frames/second. With this interface applications can
-control the capture process and move images from the driver into user
-space.</para>
-
- <para>Conventionally V4L2 video capture devices are accessed through
-character device special files named <filename>/dev/video</filename>
-and <filename>/dev/video0</filename> to
-<filename>/dev/video63</filename> with major number 81 and minor
-numbers 0 to 63. <filename>/dev/video</filename> is typically a
-symbolic link to the preferred video device. Note the same device
-files are used for video output devices.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the video capture interface set the
-<constant>V4L2_CAP_VIDEO_CAPTURE</constant> or
-<constant>V4L2_CAP_VIDEO_CAPTURE_MPLANE</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. As secondary device functions
-they may also support the <link linkend="overlay">video overlay</link>
-(<constant>V4L2_CAP_VIDEO_OVERLAY</constant>) and the <link
-linkend="raw-vbi">raw VBI capture</link>
-(<constant>V4L2_CAP_VBI_CAPTURE</constant>) interface. At least one of
-the read/write or streaming I/O methods must be supported. Tuners and
-audio inputs are optional.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>Video capture devices shall support <link
-linkend="audio">audio input</link>, <link
-linkend="tuner">tuner</link>, <link linkend="control">controls</link>,
-<link linkend="crop">cropping and scaling</link> and <link
-linkend="streaming-par">streaming parameter</link> ioctls as needed.
-The <link linkend="video">video input</link> and <link
-linkend="standard">video standard</link> ioctls must be supported by
-all video capture devices.</para>
- </section>
-
- <section>
- <title>Image Format Negotiation</title>
-
- <para>The result of a capture operation is determined by
-cropping and image format parameters. The former select an area of the
-video picture to capture, the latter how images are stored in memory,
-&ie; in RGB or YUV format, the number of bits per pixel or width and
-height. Together they also define how images are scaled in the
-process.</para>
-
- <para>As usual these parameters are <emphasis>not</emphasis> reset
-at &func-open; time to permit Unix tool chains, programming a device
-and then reading from it as if it was a plain file. Well written V4L2
-applications ensure they really get what they want, including cropping
-and scaling.</para>
-
- <para>Cropping initialization at minimum requires to reset the
-parameters to defaults. An example is given in <xref
-linkend="crop" />.</para>
-
- <para>To query the current image format applications set the
-<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> or
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant> and call the
-&VIDIOC-G-FMT; ioctl with a pointer to this structure. Drivers fill
-the &v4l2-pix-format; <structfield>pix</structfield> or the
-&v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member of the
-<structfield>fmt</structfield> union.</para>
-
- <para>To request different parameters applications set the
-<structfield>type</structfield> field of a &v4l2-format; as above and
-initialize all fields of the &v4l2-pix-format;
-<structfield>vbi</structfield> member of the
-<structfield>fmt</structfield> union, or better just modify the
-results of <constant>VIDIOC_G_FMT</constant>, and call the
-&VIDIOC-S-FMT; ioctl with a pointer to this structure. Drivers may
-adjust the parameters and finally return the actual parameters as
-<constant>VIDIOC_G_FMT</constant> does.</para>
-
- <para>Like <constant>VIDIOC_S_FMT</constant> the
-&VIDIOC-TRY-FMT; ioctl can be used to learn about hardware limitations
-without disabling I/O or possibly time consuming hardware
-preparations.</para>
-
- <para>The contents of &v4l2-pix-format; and &v4l2-pix-format-mplane;
-are discussed in <xref linkend="pixfmt" />. See also the specification of the
-<constant>VIDIOC_G_FMT</constant>, <constant>VIDIOC_S_FMT</constant>
-and <constant>VIDIOC_TRY_FMT</constant> ioctls for details. Video
-capture devices must implement both the
-<constant>VIDIOC_G_FMT</constant> and
-<constant>VIDIOC_S_FMT</constant> ioctl, even if
-<constant>VIDIOC_S_FMT</constant> ignores all requests and always
-returns default parameters as <constant>VIDIOC_G_FMT</constant> does.
-<constant>VIDIOC_TRY_FMT</constant> is optional.</para>
- </section>
-
- <section>
- <title>Reading Images</title>
-
- <para>A video capture device may support the <link
-linkend="rw">read() function</link> and/or streaming (<link
-linkend="mmap">memory mapping</link> or <link
-linkend="userp">user pointer</link>) I/O. See <xref
-linkend="io" /> for details.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-codec.xml b/Documentation/DocBook/media/v4l/dev-codec.xml
deleted file mode 100644
index ff44c16fc080..000000000000
--- a/Documentation/DocBook/media/v4l/dev-codec.xml
+++ /dev/null
@@ -1,27 +0,0 @@
- <title>Codec Interface</title>
-
- <para>A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory. Typically
-such devices are memory-to-memory devices (i.e. devices with the
-<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
-capability set).
-</para>
-
- <para>A memory-to-memory video node acts just like a normal video node, but it
-supports both output (sending frames from memory to the codec hardware) and
-capture (receiving the processed frames from the codec hardware into memory)
-stream I/O. An application will have to setup the stream
-I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
-to start the codec.</para>
-
- <para>Video compression codecs use the MPEG controls to setup their codec parameters
-(note that the MPEG controls actually support many more codecs than just MPEG).
-See <xref linkend="mpeg-controls"></xref>.</para>
-
- <para>Memory-to-memory devices can often be used as a shared resource: you can
-open the video node multiple times, each application setting up their own codec properties
-that are local to the file handle, and each can use it independently from the others.
-The driver will arbitrate access to the codec and reprogram it whenever another file
-handler gets access. This is different from the usual video node behavior where the video properties
-are global to the device (i.e. changing something through one file handle is visible
-through another file handle).</para>
diff --git a/Documentation/DocBook/media/v4l/dev-effect.xml b/Documentation/DocBook/media/v4l/dev-effect.xml
deleted file mode 100644
index 2350a67c0710..000000000000
--- a/Documentation/DocBook/media/v4l/dev-effect.xml
+++ /dev/null
@@ -1,17 +0,0 @@
- <title>Effect Devices Interface</title>
-
- <note>
- <title>Suspended</title>
-
- <para>This interface has been be suspended from the V4L2 API
-implemented in Linux 2.6 until we have more experience with effect
-device interfaces.</para>
- </note>
-
- <para>A V4L2 video effect device can do image effects, filtering, or
-combine two or more images or image streams. For example video
-transitions or wipes. Applications send data to be processed and
-receive the result data either with &func-read; and &func-write;
-functions, or through the streaming I/O mechanism.</para>
-
- <para>[to do]</para>
diff --git a/Documentation/DocBook/media/v4l/dev-event.xml b/Documentation/DocBook/media/v4l/dev-event.xml
deleted file mode 100644
index 19f4becfae34..000000000000
--- a/Documentation/DocBook/media/v4l/dev-event.xml
+++ /dev/null
@@ -1,43 +0,0 @@
- <title>Event Interface</title>
-
- <para>The V4L2 event interface provides a means for a user to get
- immediately notified on certain conditions taking place on a device.
- This might include start of frame or loss of signal events, for
- example. Changes in the value or state of a V4L2 control can also be
- reported through events.
- </para>
-
- <para>To receive events, the events the user is interested in first must
- be subscribed using the &VIDIOC-SUBSCRIBE-EVENT; ioctl. Once an event is
- subscribed, the events of subscribed types are dequeueable using the
- &VIDIOC-DQEVENT; ioctl. Events may be unsubscribed using
- VIDIOC_UNSUBSCRIBE_EVENT ioctl. The special event type V4L2_EVENT_ALL may
- be used to unsubscribe all the events the driver supports.</para>
-
- <para>The event subscriptions and event queues are specific to file
- handles. Subscribing an event on one file handle does not affect
- other file handles.</para>
-
- <para>The information on dequeueable events is obtained by using select or
- poll system calls on video devices. The V4L2 events use POLLPRI events on
- poll system call and exceptions on select system call.</para>
-
- <para>Starting with kernel 3.1 certain guarantees can be given with
- regards to events:<orderedlist>
- <listitem>
- <para>Each subscribed event has its own internal dedicated event queue.
-This means that flooding of one event type will not interfere with other
-event types.</para>
- </listitem>
- <listitem>
- <para>If the internal event queue for a particular subscribed event
-becomes full, then the oldest event in that queue will be dropped.</para>
- </listitem>
- <listitem>
- <para>Where applicable, certain event types can ensure that the payload
-of the oldest event that is about to be dropped will be merged with the payload
-of the next oldest event. Thus ensuring that no information is lost, but only an
-intermediate step leading up to that information. See the documentation for the
-event you want to subscribe to whether this is applicable for that event or not.</para>
- </listitem>
- </orderedlist></para>
diff --git a/Documentation/DocBook/media/v4l/dev-osd.xml b/Documentation/DocBook/media/v4l/dev-osd.xml
deleted file mode 100644
index 54853329140b..000000000000
--- a/Documentation/DocBook/media/v4l/dev-osd.xml
+++ /dev/null
@@ -1,149 +0,0 @@
- <title>Video Output Overlay Interface</title>
- <subtitle>Also known as On-Screen Display (OSD)</subtitle>
-
- <para>Some video output devices can overlay a framebuffer image onto
-the outgoing video signal. Applications can set up such an overlay
-using this interface, which borrows structures and ioctls of the <link
-linkend="overlay">Video Overlay</link> interface.</para>
-
- <para>The OSD function is accessible through the same character
-special file as the <link linkend="capture">Video Output</link> function.
-Note the default function of such a <filename>/dev/video</filename> device
-is video capturing or output. The OSD function is only available after
-calling the &VIDIOC-S-FMT; ioctl.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the <wordasword>Video Output
-Overlay</wordasword> interface set the
-<constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl.</para>
- </section>
-
- <section>
- <title>Framebuffer</title>
-
- <para>Contrary to the <wordasword>Video Overlay</wordasword>
-interface the framebuffer is normally implemented on the TV card and
-not the graphics card. On Linux it is accessible as a framebuffer
-device (<filename>/dev/fbN</filename>). Given a V4L2 device,
-applications can find the corresponding framebuffer device by calling
-the &VIDIOC-G-FBUF; ioctl. It returns, amongst other information, the
-physical address of the framebuffer in the
-<structfield>base</structfield> field of &v4l2-framebuffer;. The
-framebuffer device ioctl <constant>FBIOGET_FSCREENINFO</constant>
-returns the same address in the <structfield>smem_start</structfield>
-field of struct <structname>fb_fix_screeninfo</structname>. The
-<constant>FBIOGET_FSCREENINFO</constant> ioctl and struct
-<structname>fb_fix_screeninfo</structname> are defined in the
-<filename>linux/fb.h</filename> header file.</para>
-
- <para>The width and height of the framebuffer depends on the
-current video standard. A V4L2 driver may reject attempts to change
-the video standard (or any other ioctl which would imply a framebuffer
-size change) with an &EBUSY; until all applications closed the
-framebuffer device.</para>
-
- <example>
- <title>Finding a framebuffer device for OSD</title>
-
- <programlisting>
-#include &lt;linux/fb.h&gt;
-
-&v4l2-framebuffer; fbuf;
-unsigned int i;
-int fb_fd;
-
-if (-1 == ioctl(fd, VIDIOC_G_FBUF, &amp;fbuf)) {
- perror("VIDIOC_G_FBUF");
- exit(EXIT_FAILURE);
-}
-
-for (i = 0; i &lt; 30; i++) {
- char dev_name[16];
- struct fb_fix_screeninfo si;
-
- snprintf(dev_name, sizeof(dev_name), "/dev/fb%u", i);
-
- fb_fd = open(dev_name, O_RDWR);
- if (-1 == fb_fd) {
- switch (errno) {
- case ENOENT: /* no such file */
- case ENXIO: /* no driver */
- continue;
-
- default:
- perror("open");
- exit(EXIT_FAILURE);
- }
- }
-
- if (0 == ioctl(fb_fd, FBIOGET_FSCREENINFO, &amp;si)) {
- if (si.smem_start == (unsigned long)fbuf.base)
- break;
- } else {
- /* Apparently not a framebuffer device. */
- }
-
- close(fb_fd);
- fb_fd = -1;
-}
-
-/* fb_fd is the file descriptor of the framebuffer device
- for the video output overlay, or -1 if no device was found. */
-</programlisting>
- </example>
- </section>
-
- <section>
- <title>Overlay Window and Scaling</title>
-
- <para>The overlay is controlled by source and target rectangles.
-The source rectangle selects a subsection of the framebuffer image to
-be overlaid, the target rectangle an area in the outgoing video signal
-where the image will appear. Drivers may or may not support scaling,
-and arbitrary sizes and positions of these rectangles. Further drivers
-may support any (or none) of the clipping/blending methods defined for
-the <link linkend="overlay">Video Overlay</link> interface.</para>
-
- <para>A &v4l2-window; defines the size of the source rectangle,
-its position in the framebuffer and the clipping/blending method to be
-used for the overlay. To get the current parameters applications set
-the <structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant> and call the
-&VIDIOC-G-FMT; ioctl. The driver fills the
-<structname>v4l2_window</structname> substructure named
-<structfield>win</structfield>. It is not possible to retrieve a
-previously programmed clipping list or bitmap.</para>
-
- <para>To program the source rectangle applications set the
-<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant>, initialize
-the <structfield>win</structfield> substructure and call the
-&VIDIOC-S-FMT; ioctl. The driver adjusts the parameters against
-hardware limits and returns the actual parameters as
-<constant>VIDIOC_G_FMT</constant> does. Like
-<constant>VIDIOC_S_FMT</constant>, the &VIDIOC-TRY-FMT; ioctl can be
-used to learn about driver capabilities without actually changing
-driver state. Unlike <constant>VIDIOC_S_FMT</constant> this also works
-after the overlay has been enabled.</para>
-
- <para>A &v4l2-crop; defines the size and position of the target
-rectangle. The scaling factor of the overlay is implied by the width
-and height given in &v4l2-window; and &v4l2-crop;. The cropping API
-applies to <wordasword>Video Output</wordasword> and <wordasword>Video
-Output Overlay</wordasword> devices in the same way as to
-<wordasword>Video Capture</wordasword> and <wordasword>Video
-Overlay</wordasword> devices, merely reversing the direction of the
-data flow. For more information see <xref linkend="crop" />.</para>
- </section>
-
- <section>
- <title>Enabling Overlay</title>
-
- <para>There is no V4L2 ioctl to enable or disable the overlay,
-however the framebuffer interface of the driver may support the
-<constant>FBIOBLANK</constant> ioctl.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-output.xml b/Documentation/DocBook/media/v4l/dev-output.xml
deleted file mode 100644
index 9130a3dc7880..000000000000
--- a/Documentation/DocBook/media/v4l/dev-output.xml
+++ /dev/null
@@ -1,106 +0,0 @@
- <title>Video Output Interface</title>
-
- <para>Video output devices encode stills or image sequences as
-analog video signal. With this interface applications can
-control the encoding process and move images from user space to
-the driver.</para>
-
- <para>Conventionally V4L2 video output devices are accessed through
-character device special files named <filename>/dev/video</filename>
-and <filename>/dev/video0</filename> to
-<filename>/dev/video63</filename> with major number 81 and minor
-numbers 0 to 63. <filename>/dev/video</filename> is typically a
-symbolic link to the preferred video device. Note the same device
-files are used for video capture devices.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the video output interface set the
-<constant>V4L2_CAP_VIDEO_OUTPUT</constant> or
-<constant>V4L2_CAP_VIDEO_OUTPUT_MPLANE</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. As secondary device functions
-they may also support the <link linkend="raw-vbi">raw VBI
-output</link> (<constant>V4L2_CAP_VBI_OUTPUT</constant>) interface. At
-least one of the read/write or streaming I/O methods must be
-supported. Modulators and audio outputs are optional.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>Video output devices shall support <link
-linkend="audio">audio output</link>, <link
-linkend="tuner">modulator</link>, <link linkend="control">controls</link>,
-<link linkend="crop">cropping and scaling</link> and <link
-linkend="streaming-par">streaming parameter</link> ioctls as needed.
-The <link linkend="video">video output</link> and <link
-linkend="standard">video standard</link> ioctls must be supported by
-all video output devices.</para>
- </section>
-
- <section>
- <title>Image Format Negotiation</title>
-
- <para>The output is determined by cropping and image format
-parameters. The former select an area of the video picture where the
-image will appear, the latter how images are stored in memory, &ie; in
-RGB or YUV format, the number of bits per pixel or width and height.
-Together they also define how images are scaled in the process.</para>
-
- <para>As usual these parameters are <emphasis>not</emphasis> reset
-at &func-open; time to permit Unix tool chains, programming a device
-and then writing to it as if it was a plain file. Well written V4L2
-applications ensure they really get what they want, including cropping
-and scaling.</para>
-
- <para>Cropping initialization at minimum requires to reset the
-parameters to defaults. An example is given in <xref
-linkend="crop" />.</para>
-
- <para>To query the current image format applications set the
-<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> or
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant> and call the
-&VIDIOC-G-FMT; ioctl with a pointer to this structure. Drivers fill
-the &v4l2-pix-format; <structfield>pix</structfield> or the
-&v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member of the
-<structfield>fmt</structfield> union.</para>
-
- <para>To request different parameters applications set the
-<structfield>type</structfield> field of a &v4l2-format; as above and
-initialize all fields of the &v4l2-pix-format;
-<structfield>vbi</structfield> member of the
-<structfield>fmt</structfield> union, or better just modify the
-results of <constant>VIDIOC_G_FMT</constant>, and call the
-&VIDIOC-S-FMT; ioctl with a pointer to this structure. Drivers may
-adjust the parameters and finally return the actual parameters as
-<constant>VIDIOC_G_FMT</constant> does.</para>
-
- <para>Like <constant>VIDIOC_S_FMT</constant> the
-&VIDIOC-TRY-FMT; ioctl can be used to learn about hardware limitations
-without disabling I/O or possibly time consuming hardware
-preparations.</para>
-
- <para>The contents of &v4l2-pix-format; and &v4l2-pix-format-mplane;
-are discussed in <xref linkend="pixfmt" />. See also the specification of the
-<constant>VIDIOC_G_FMT</constant>, <constant>VIDIOC_S_FMT</constant>
-and <constant>VIDIOC_TRY_FMT</constant> ioctls for details. Video
-output devices must implement both the
-<constant>VIDIOC_G_FMT</constant> and
-<constant>VIDIOC_S_FMT</constant> ioctl, even if
-<constant>VIDIOC_S_FMT</constant> ignores all requests and always
-returns default parameters as <constant>VIDIOC_G_FMT</constant> does.
-<constant>VIDIOC_TRY_FMT</constant> is optional.</para>
- </section>
-
- <section>
- <title>Writing Images</title>
-
- <para>A video output device may support the <link
-linkend="rw">write() function</link> and/or streaming (<link
-linkend="mmap">memory mapping</link> or <link
-linkend="userp">user pointer</link>) I/O. See <xref
-linkend="io" /> for details.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-overlay.xml b/Documentation/DocBook/media/v4l/dev-overlay.xml
deleted file mode 100644
index cc6e0c5c960c..000000000000
--- a/Documentation/DocBook/media/v4l/dev-overlay.xml
+++ /dev/null
@@ -1,368 +0,0 @@
- <title>Video Overlay Interface</title>
- <subtitle>Also known as Framebuffer Overlay or Previewing</subtitle>
-
- <para>Video overlay devices have the ability to genlock (TV-)video
-into the (VGA-)video signal of a graphics card, or to store captured
-images directly in video memory of a graphics card, typically with
-clipping. This can be considerable more efficient than capturing
-images and displaying them by other means. In the old days when only
-nuclear power plants needed cooling towers this used to be the only
-way to put live video into a window.</para>
-
- <para>Video overlay devices are accessed through the same character
-special files as <link linkend="capture">video capture</link> devices.
-Note the default function of a <filename>/dev/video</filename> device
-is video capturing. The overlay function is only available after
-calling the &VIDIOC-S-FMT; ioctl.</para>
-
- <para>The driver may support simultaneous overlay and capturing
-using the read/write and streaming I/O methods. If so, operation at
-the nominal frame rate of the video standard is not guaranteed. Frames
-may be directed away from overlay to capture, or one field may be used
-for overlay and the other for capture if the capture parameters permit
-this.</para>
-
- <para>Applications should use different file descriptors for
-capturing and overlay. This must be supported by all drivers capable
-of simultaneous capturing and overlay. Optionally these drivers may
-also permit capturing and overlay with a single file descriptor for
-compatibility with V4L and earlier versions of V4L2.<footnote>
- <para>A common application of two file descriptors is the
-XFree86 <link linkend="xvideo">Xv/V4L</link> interface driver and
-a V4L2 application. While the X server controls video overlay, the
-application can take advantage of memory mapping and DMA.</para>
- <para>In the opinion of the designers of this API, no driver
-writer taking the efforts to support simultaneous capturing and
-overlay will restrict this ability by requiring a single file
-descriptor, as in V4L and earlier versions of V4L2. Making this
-optional means applications depending on two file descriptors need
-backup routines to be compatible with all drivers, which is
-considerable more work than using two fds in applications which do
-not. Also two fd's fit the general concept of one file descriptor for
-each logical stream. Hence as a complexity trade-off drivers
-<emphasis>must</emphasis> support two file descriptors and
-<emphasis>may</emphasis> support single fd operation.</para>
- </footnote></para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the video overlay interface set the
-<constant>V4L2_CAP_VIDEO_OVERLAY</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. The overlay I/O method specified
-below must be supported. Tuners and audio inputs are optional.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>Video overlay devices shall support <link
-linkend="audio">audio input</link>, <link
-linkend="tuner">tuner</link>, <link linkend="control">controls</link>,
-<link linkend="crop">cropping and scaling</link> and <link
-linkend="streaming-par">streaming parameter</link> ioctls as needed.
-The <link linkend="video">video input</link> and <link
-linkend="standard">video standard</link> ioctls must be supported by
-all video overlay devices.</para>
- </section>
-
- <section>
- <title>Setup</title>
-
- <para>Before overlay can commence applications must program the
-driver with frame buffer parameters, namely the address and size of
-the frame buffer and the image format, for example RGB 5:6:5. The
-&VIDIOC-G-FBUF; and &VIDIOC-S-FBUF; ioctls are available to get
-and set these parameters, respectively. The
-<constant>VIDIOC_S_FBUF</constant> ioctl is privileged because it
-allows to set up DMA into physical memory, bypassing the memory
-protection mechanisms of the kernel. Only the superuser can change the
-frame buffer address and size. Users are not supposed to run TV
-applications as root or with SUID bit set. A small helper application
-with suitable privileges should query the graphics system and program
-the V4L2 driver at the appropriate time.</para>
-
- <para>Some devices add the video overlay to the output signal
-of the graphics card. In this case the frame buffer is not modified by
-the video device, and the frame buffer address and pixel format are
-not needed by the driver. The <constant>VIDIOC_S_FBUF</constant> ioctl
-is not privileged. An application can check for this type of device by
-calling the <constant>VIDIOC_G_FBUF</constant> ioctl.</para>
-
- <para>A driver may support any (or none) of five clipping/blending
-methods:<orderedlist>
- <listitem>
- <para>Chroma-keying displays the overlaid image only where
-pixels in the primary graphics surface assume a certain color.</para>
- </listitem>
- <listitem>
- <para>A bitmap can be specified where each bit corresponds
-to a pixel in the overlaid image. When the bit is set, the
-corresponding video pixel is displayed, otherwise a pixel of the
-graphics surface.</para>
- </listitem>
- <listitem>
- <para>A list of clipping rectangles can be specified. In
-these regions <emphasis>no</emphasis> video is displayed, so the
-graphics surface can be seen here.</para>
- </listitem>
- <listitem>
- <para>The framebuffer has an alpha channel that can be used
-to clip or blend the framebuffer with the video.</para>
- </listitem>
- <listitem>
- <para>A global alpha value can be specified to blend the
-framebuffer contents with video images.</para>
- </listitem>
- </orderedlist></para>
-
- <para>When simultaneous capturing and overlay is supported and
-the hardware prohibits different image and frame buffer formats, the
-format requested first takes precedence. The attempt to capture
-(&VIDIOC-S-FMT;) or overlay (&VIDIOC-S-FBUF;) may fail with an
-&EBUSY; or return accordingly modified parameters..</para>
- </section>
-
- <section>
- <title>Overlay Window</title>
-
- <para>The overlaid image is determined by cropping and overlay
-window parameters. The former select an area of the video picture to
-capture, the latter how images are overlaid and clipped. Cropping
-initialization at minimum requires to reset the parameters to
-defaults. An example is given in <xref linkend="crop" />.</para>
-
- <para>The overlay window is described by a &v4l2-window;. It
-defines the size of the image, its position over the graphics surface
-and the clipping to be applied. To get the current parameters
-applications set the <structfield>type</structfield> field of a
-&v4l2-format; to <constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant> and
-call the &VIDIOC-G-FMT; ioctl. The driver fills the
-<structname>v4l2_window</structname> substructure named
-<structfield>win</structfield>. It is not possible to retrieve a
-previously programmed clipping list or bitmap.</para>
-
- <para>To program the overlay window applications set the
-<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>, initialize the
-<structfield>win</structfield> substructure and call the
-&VIDIOC-S-FMT; ioctl. The driver adjusts the parameters against
-hardware limits and returns the actual parameters as
-<constant>VIDIOC_G_FMT</constant> does. Like
-<constant>VIDIOC_S_FMT</constant>, the &VIDIOC-TRY-FMT; ioctl can be
-used to learn about driver capabilities without actually changing
-driver state. Unlike <constant>VIDIOC_S_FMT</constant> this also works
-after the overlay has been enabled.</para>
-
- <para>The scaling factor of the overlaid image is implied by the
-width and height given in &v4l2-window; and the size of the cropping
-rectangle. For more information see <xref linkend="crop" />.</para>
-
- <para>When simultaneous capturing and overlay is supported and
-the hardware prohibits different image and window sizes, the size
-requested first takes precedence. The attempt to capture or overlay as
-well (&VIDIOC-S-FMT;) may fail with an &EBUSY; or return accordingly
-modified parameters.</para>
-
- <table pgwide="1" frame="none" id="v4l2-window">
- <title>struct <structname>v4l2_window</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>w</structfield></entry>
- <entry>Size and position of the window relative to the
-top, left corner of the frame buffer defined with &VIDIOC-S-FBUF;. The
-window can extend the frame buffer width and height, the
-<structfield>x</structfield> and <structfield>y</structfield>
-coordinates can be negative, and it can lie completely outside the
-frame buffer. The driver clips the window accordingly, or if that is
-not possible, modifies its size and/or position.</entry>
- </row>
- <row>
- <entry>&v4l2-field;</entry>
- <entry><structfield>field</structfield></entry>
- <entry>Applications set this field to determine which
-video field shall be overlaid, typically one of
-<constant>V4L2_FIELD_ANY</constant> (0),
-<constant>V4L2_FIELD_TOP</constant>,
-<constant>V4L2_FIELD_BOTTOM</constant> or
-<constant>V4L2_FIELD_INTERLACED</constant>. Drivers may have to choose
-a different field order and return the actual setting here.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>chromakey</structfield></entry>
- <entry>When chroma-keying has been negotiated with
-&VIDIOC-S-FBUF; applications set this field to the desired pixel value
-for the chroma key. The format is the same as the pixel format of the
-framebuffer (&v4l2-framebuffer;
-<structfield>fmt.pixelformat</structfield> field), with bytes in host
-order. E.&nbsp;g. for <link
-linkend="V4L2-PIX-FMT-BGR32"><constant>V4L2_PIX_FMT_BGR24</constant></link>
-the value should be 0xRRGGBB on a little endian, 0xBBGGRR on a big
-endian host.</entry>
- </row>
- <row>
- <entry>&v4l2-clip; *</entry>
- <entry><structfield>clips</structfield></entry>
- <entry>When chroma-keying has <emphasis>not</emphasis>
-been negotiated and &VIDIOC-G-FBUF; indicated this capability,
-applications can set this field to point to an array of
-clipping rectangles.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Like the window coordinates
-<structfield>w</structfield>, clipping rectangles are defined relative
-to the top, left corner of the frame buffer. However clipping
-rectangles must not extend the frame buffer width and height, and they
-must not overlap. If possible applications should merge adjacent
-rectangles. Whether this must create x-y or y-x bands, or the order of
-rectangles, is not defined. When clip lists are not supported the
-driver ignores this field. Its contents after calling &VIDIOC-S-FMT;
-are undefined.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>clipcount</structfield></entry>
- <entry>When the application set the
-<structfield>clips</structfield> field, this field must contain the
-number of clipping rectangles in the list. When clip lists are not
-supported the driver ignores this field, its contents after calling
-<constant>VIDIOC_S_FMT</constant> are undefined. When clip lists are
-supported but no clipping is desired this field must be set to
-zero.</entry>
- </row>
- <row>
- <entry>void *</entry>
- <entry><structfield>bitmap</structfield></entry>
- <entry>When chroma-keying has
-<emphasis>not</emphasis> been negotiated and &VIDIOC-G-FBUF; indicated
-this capability, applications can set this field to point to a
-clipping bit mask.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>It must be of the same size
-as the window, <structfield>w.width</structfield> and
-<structfield>w.height</structfield>. Each bit corresponds to a pixel
-in the overlaid image, which is displayed only when the bit is
-<emphasis>set</emphasis>. Pixel coordinates translate to bits like:
-<programlisting>
-((__u8 *) <structfield>bitmap</structfield>)[<structfield>w.width</structfield> * y + x / 8] &amp; (1 &lt;&lt; (x &amp; 7))</programlisting></para><para>where <structfield>0</structfield> &le; x &lt;
-<structfield>w.width</structfield> and <structfield>0</structfield> &le;
-y &lt;<structfield>w.height</structfield>.<footnote>
- <para>Should we require
- <structfield>w.width</structfield> to be a multiple of
- eight?</para>
- </footnote></para><para>When a clipping
-bit mask is not supported the driver ignores this field, its contents
-after calling &VIDIOC-S-FMT; are undefined. When a bit mask is supported
-but no clipping is desired this field must be set to
-<constant>NULL</constant>.</para><para>Applications need not create a
-clip list or bit mask. When they pass both, or despite negotiating
-chroma-keying, the results are undefined. Regardless of the chosen
-method, the clipping abilities of the hardware may be limited in
-quantity or quality. The results when these limits are exceeded are
-undefined.<footnote>
- <para>When the image is written into frame buffer
-memory it will be undesirable if the driver clips out less pixels
-than expected, because the application and graphics system are not
-aware these regions need to be refreshed. The driver should clip out
-more pixels or not write the image at all.</para>
- </footnote></para></entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>global_alpha</structfield></entry>
- <entry>The global alpha value used to blend the
-framebuffer with video images, if global alpha blending has been
-negotiated (<constant>V4L2_FBUF_FLAG_GLOBAL_ALPHA</constant>, see
-&VIDIOC-S-FBUF;, <xref linkend="framebuffer-flags" />).</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Note this field was added in Linux 2.6.23, extending the structure. However
-the <link linkend="vidioc-g-fmt">VIDIOC_G/S/TRY_FMT</link> ioctls,
-which take a pointer to a <link
-linkend="v4l2-format">v4l2_format</link> parent structure with padding
-bytes at the end, are not affected.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-clip">
- <title>struct <structname>v4l2_clip</structname><footnote>
- <para>The X Window system defines "regions" which are
-vectors of struct BoxRec { short x1, y1, x2, y2; } with width = x2 -
-x1 and height = y2 - y1, so one cannot pass X11 clip lists
-directly.</para>
- </footnote></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>c</structfield></entry>
- <entry>Coordinates of the clipping rectangle, relative to
-the top, left corner of the frame buffer. Only window pixels
-<emphasis>outside</emphasis> all clipping rectangles are
-displayed.</entry>
- </row>
- <row>
- <entry>&v4l2-clip; *</entry>
- <entry><structfield>next</structfield></entry>
- <entry>Pointer to the next clipping rectangle, NULL when
-this is the last rectangle. Drivers ignore this field, it cannot be
-used to pass a linked list of clipping rectangles.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- NB for easier reading this table is duplicated
- in the vidioc-cropcap chapter.-->
-
- <table pgwide="1" frame="none" id="v4l2-rect">
- <title>struct <structname>v4l2_rect</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__s32</entry>
- <entry><structfield>left</structfield></entry>
- <entry>Horizontal offset of the top, left corner of the
-rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>top</structfield></entry>
- <entry>Vertical offset of the top, left corner of the
-rectangle, in pixels. Offsets increase to the right and down.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Width of the rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Height of the rectangle, in pixels.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>Enabling Overlay</title>
-
- <para>To start or stop the frame buffer overlay applications call
-the &VIDIOC-OVERLAY; ioctl.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-radio.xml b/Documentation/DocBook/media/v4l/dev-radio.xml
deleted file mode 100644
index 3e6ac73b36af..000000000000
--- a/Documentation/DocBook/media/v4l/dev-radio.xml
+++ /dev/null
@@ -1,49 +0,0 @@
- <title>Radio Interface</title>
-
- <para>This interface is intended for AM and FM (analog) radio
-receivers and transmitters.</para>
-
- <para>Conventionally V4L2 radio devices are accessed through
-character device special files named <filename>/dev/radio</filename>
-and <filename>/dev/radio0</filename> to
-<filename>/dev/radio63</filename> with major number 81 and minor
-numbers 64 to 127.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the radio interface set the
-<constant>V4L2_CAP_RADIO</constant> and
-<constant>V4L2_CAP_TUNER</constant> or
-<constant>V4L2_CAP_MODULATOR</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. Other combinations of
-capability flags are reserved for future extensions.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>Radio devices can support <link
-linkend="control">controls</link>, and must support the <link
-linkend="tuner">tuner or modulator</link> ioctls.</para>
-
- <para>They do not support the video input or output, audio input
-or output, video standard, cropping and scaling, compression and
-streaming parameter, or overlay ioctls. All other ioctls and I/O
-methods are reserved for future extensions.</para>
- </section>
-
- <section>
- <title>Programming</title>
-
- <para>Radio devices may have a couple audio controls (as discussed
-in <xref linkend="control" />) such as a volume control, possibly custom
-controls. Further all radio devices have one tuner or modulator (these are
-discussed in <xref linkend="tuner" />) with index number zero to select
-the radio frequency and to determine if a monaural or FM stereo
-program is received/emitted. Drivers switch automatically between AM and FM
-depending on the selected frequency. The &VIDIOC-G-TUNER; or
-&VIDIOC-G-MODULATOR; ioctl
-reports the supported frequency range.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-raw-vbi.xml b/Documentation/DocBook/media/v4l/dev-raw-vbi.xml
deleted file mode 100644
index 78599bbd58f7..000000000000
--- a/Documentation/DocBook/media/v4l/dev-raw-vbi.xml
+++ /dev/null
@@ -1,345 +0,0 @@
- <title>Raw VBI Data Interface</title>
-
- <para>VBI is an abbreviation of Vertical Blanking Interval, a gap
-in the sequence of lines of an analog video signal. During VBI
-no picture information is transmitted, allowing some time while the
-electron beam of a cathode ray tube TV returns to the top of the
-screen. Using an oscilloscope you will find here the vertical
-synchronization pulses and short data packages ASK
-modulated<footnote><para>ASK: Amplitude-Shift Keying. A high signal
-level represents a '1' bit, a low level a '0' bit.</para></footnote>
-onto the video signal. These are transmissions of services such as
-Teletext or Closed Caption.</para>
-
- <para>Subject of this interface type is raw VBI data, as sampled off
-a video signal, or to be added to a signal for output.
-The data format is similar to uncompressed video images, a number of
-lines times a number of samples per line, we call this a VBI image.</para>
-
- <para>Conventionally V4L2 VBI devices are accessed through character
-device special files named <filename>/dev/vbi</filename> and
-<filename>/dev/vbi0</filename> to <filename>/dev/vbi31</filename> with
-major number 81 and minor numbers 224 to 255.
-<filename>/dev/vbi</filename> is typically a symbolic link to the
-preferred VBI device. This convention applies to both input and output
-devices.</para>
-
- <para>To address the problems of finding related video and VBI
-devices VBI capturing and output is also available as device function
-under <filename>/dev/video</filename>. To capture or output raw VBI
-data with these devices applications must call the &VIDIOC-S-FMT;
-ioctl. Accessed as <filename>/dev/vbi</filename>, raw VBI capturing
-or output is the default device function.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the raw VBI capturing or output API set
-the <constant>V4L2_CAP_VBI_CAPTURE</constant> or
-<constant>V4L2_CAP_VBI_OUTPUT</constant> flags, respectively, in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. At least one of the
-read/write, streaming or asynchronous I/O methods must be
-supported. VBI devices may or may not have a tuner or modulator.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>VBI devices shall support <link linkend="video">video
-input or output</link>, <link linkend="tuner">tuner or
-modulator</link>, and <link linkend="control">controls</link> ioctls
-as needed. The <link linkend="standard">video standard</link> ioctls provide
-information vital to program a VBI device, therefore must be
-supported.</para>
- </section>
-
- <section>
- <title>Raw VBI Format Negotiation</title>
-
- <para>Raw VBI sampling abilities can vary, in particular the
-sampling frequency. To properly interpret the data V4L2 specifies an
-ioctl to query the sampling parameters. Moreover, to allow for some
-flexibility applications can also suggest different parameters.</para>
-
- <para>As usual these parameters are <emphasis>not</emphasis>
-reset at &func-open; time to permit Unix tool chains, programming a
-device and then reading from it as if it was a plain file. Well
-written V4L2 applications should always ensure they really get what
-they want, requesting reasonable parameters and then checking if the
-actual parameters are suitable.</para>
-
- <para>To query the current raw VBI capture parameters
-applications set the <structfield>type</structfield> field of a
-&v4l2-format; to <constant>V4L2_BUF_TYPE_VBI_CAPTURE</constant> or
-<constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant>, and call the
-&VIDIOC-G-FMT; ioctl with a pointer to this structure. Drivers fill
-the &v4l2-vbi-format; <structfield>vbi</structfield> member of the
-<structfield>fmt</structfield> union.</para>
-
- <para>To request different parameters applications set the
-<structfield>type</structfield> field of a &v4l2-format; as above and
-initialize all fields of the &v4l2-vbi-format;
-<structfield>vbi</structfield> member of the
-<structfield>fmt</structfield> union, or better just modify the
-results of <constant>VIDIOC_G_FMT</constant>, and call the
-&VIDIOC-S-FMT; ioctl with a pointer to this structure. Drivers return
-an &EINVAL; only when the given parameters are ambiguous, otherwise
-they modify the parameters according to the hardware capabilities and
-return the actual parameters. When the driver allocates resources at
-this point, it may return an &EBUSY; to indicate the returned
-parameters are valid but the required resources are currently not
-available. That may happen for instance when the video and VBI areas
-to capture would overlap, or when the driver supports multiple opens
-and another process already requested VBI capturing or output. Anyway,
-applications must expect other resource allocation points which may
-return <errorcode>EBUSY</errorcode>, at the &VIDIOC-STREAMON; ioctl
-and the first read(), write() and select() call.</para>
-
- <para>VBI devices must implement both the
-<constant>VIDIOC_G_FMT</constant> and
-<constant>VIDIOC_S_FMT</constant> ioctl, even if
-<constant>VIDIOC_S_FMT</constant> ignores all requests and always
-returns default parameters as <constant>VIDIOC_G_FMT</constant> does.
-<constant>VIDIOC_TRY_FMT</constant> is optional.</para>
-
- <table pgwide="1" frame="none" id="v4l2-vbi-format">
- <title>struct <structname>v4l2_vbi_format</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>sampling_rate</structfield></entry>
- <entry>Samples per second, i.&nbsp;e. unit 1 Hz.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>offset</structfield></entry>
- <entry><para>Horizontal offset of the VBI image,
-relative to the leading edge of the line synchronization pulse and
-counted in samples: The first sample in the VBI image will be located
-<structfield>offset</structfield> /
-<structfield>sampling_rate</structfield> seconds following the leading
-edge. See also <xref linkend="vbi-hsync" />.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>samples_per_line</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sample_format</structfield></entry>
- <entry><para>Defines the sample format as in <xref
-linkend="pixfmt" />, a four-character-code.<footnote>
- <para>A few devices may be unable to
-sample VBI data at all but can extend the video capture window to the
-VBI region.</para>
- </footnote> Usually this is
-<constant>V4L2_PIX_FMT_GREY</constant>, i.&nbsp;e. each sample
-consists of 8 bits with lower values oriented towards the black level.
-Do not assume any other correlation of values with the signal level.
-For example, the MSB does not necessarily indicate if the signal is
-'high' or 'low' because 128 may not be the mean value of the
-signal. Drivers shall not convert the sample format by software.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>start</structfield>[2]</entry>
- <entry>This is the scanning system line number
-associated with the first line of the VBI image, of the first and the
-second field respectively. See <xref linkend="vbi-525" /> and
-<xref linkend="vbi-625" /> for valid values.
-The <constant>V4L2_VBI_ITU_525_F1_START</constant>,
-<constant>V4L2_VBI_ITU_525_F2_START</constant>,
-<constant>V4L2_VBI_ITU_625_F1_START</constant> and
-<constant>V4L2_VBI_ITU_625_F2_START</constant> defines give the start line
-numbers for each field for each 525 or 625 line format as a convenience.
-Don't forget that ITU line numbering starts at 1, not 0.
-VBI input drivers can return start values 0 if the hardware cannot
-reliable identify scanning lines, VBI acquisition may not require this
-information.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>count</structfield>[2]</entry>
- <entry>The number of lines in the first and second
-field image, respectively.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>Drivers should be as
-flexibility as possible. For example, it may be possible to extend or
-move the VBI capture window down to the picture area, implementing a
-'full field mode' to capture data service transmissions embedded in
-the picture.</para><para>An application can set the first or second
-<structfield>count</structfield> value to zero if no data is required
-from the respective field; <structfield>count</structfield>[1] if the
-scanning system is progressive, &ie; not interlaced. The
-corresponding start value shall be ignored by the application and
-driver. Anyway, drivers may not support single field capturing and
-return both count values non-zero.</para><para>Both
-<structfield>count</structfield> values set to zero, or line numbers
-outside the bounds depicted in <xref linkend="vbi-525" /> and <xref
- linkend="vbi-625" />, or a field image covering
-lines of two fields, are invalid and shall not be returned by the
-driver.</para><para>To initialize the <structfield>start</structfield>
-and <structfield>count</structfield> fields, applications must first
-determine the current video standard selection. The &v4l2-std-id; or
-the <structfield>framelines</structfield> field of &v4l2-standard; can
-be evaluated for this purpose.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>See <xref linkend="vbifmt-flags" /> below. Currently
-only drivers set flags, applications must set this field to
-zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>This array is reserved for future extensions.
-Drivers and applications must set it to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="vbifmt-flags">
- <title>Raw VBI Format Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_VBI_UNSYNC</constant></entry>
- <entry>0x0001</entry>
- <entry><para>This flag indicates hardware which does not
-properly distinguish between fields. Normally the VBI image stores the
-first field (lower scanning line numbers) first in memory. This may be
-a top or bottom field depending on the video standard. When this flag
-is set the first or second field may be stored first, however the
-fields are still in correct temporal order with the older field first
-in memory.<footnote>
- <para>Most VBI services transmit on both fields, but
-some have different semantics depending on the field number. These
-cannot be reliable decoded or encoded when
-<constant>V4L2_VBI_UNSYNC</constant> is set.</para>
- </footnote></para></entry>
- </row>
- <row>
- <entry><constant>V4L2_VBI_INTERLACED</constant></entry>
- <entry>0x0002</entry>
- <entry>By default the two field images will be passed
-sequentially; all lines of the first field followed by all lines of
-the second field (compare <xref linkend="field-order" />
-<constant>V4L2_FIELD_SEQ_TB</constant> and
-<constant>V4L2_FIELD_SEQ_BT</constant>, whether the top or bottom
-field is first in memory depends on the video standard). When this
-flag is set, the two fields are interlaced (cf.
-<constant>V4L2_FIELD_INTERLACED</constant>). The first line of the
-first field followed by the first line of the second field, then the
-two second lines, and so on. Such a layout may be necessary when the
-hardware has been programmed to capture or output interlaced video
-images and is unable to separate the fields for VBI capturing at
-the same time. For simplicity setting this flag implies that both
-<structfield>count</structfield> values are equal and non-zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <figure id="vbi-hsync">
- <title>Line synchronization</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="vbi_hsync.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="vbi_hsync.gif" format="GIF" />
- </imageobject>
- <textobject>
- <phrase>Line synchronization diagram</phrase>
- </textobject>
- </mediaobject>
- </figure>
-
- <figure id="vbi-525">
- <title>ITU-R 525 line numbering (M/NTSC and M/PAL)</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="vbi_525.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="vbi_525.gif" format="GIF" />
- </imageobject>
- <textobject>
- <phrase>NTSC field synchronization diagram</phrase>
- </textobject>
- <caption>
- <para>(1) For the purpose of this specification field 2
-starts in line 264 and not 263.5 because half line capturing is not
-supported.</para>
- </caption>
- </mediaobject>
- </figure>
-
- <figure id="vbi-625">
- <title>ITU-R 625 line numbering</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="vbi_625.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="vbi_625.gif" format="GIF" />
- </imageobject>
- <textobject>
- <phrase>PAL/SECAM field synchronization diagram</phrase>
- </textobject>
- <caption>
- <para>(1) For the purpose of this specification field 2
-starts in line 314 and not 313.5 because half line capturing is not
-supported.</para>
- </caption>
- </mediaobject>
- </figure>
-
- <para>Remember the VBI image format depends on the selected
-video standard, therefore the application must choose a new standard or
-query the current standard first. Attempts to read or write data ahead
-of format negotiation, or after switching the video standard which may
-invalidate the negotiated VBI parameters, should be refused by the
-driver. A format change during active I/O is not permitted.</para>
- </section>
-
- <section>
- <title>Reading and writing VBI images</title>
-
- <para>To assure synchronization with the field number and easier
-implementation, the smallest unit of data passed at a time is one
-frame, consisting of two fields of VBI images immediately following in
-memory.</para>
-
- <para>The total size of a frame computes as follows:</para>
-
- <programlisting>
-(<structfield>count</structfield>[0] + <structfield>count</structfield>[1]) *
-<structfield>samples_per_line</structfield> * sample size in bytes</programlisting>
-
- <para>The sample size is most likely always one byte,
-applications must check the <structfield>sample_format</structfield>
-field though, to function properly with other drivers.</para>
-
- <para>A VBI device may support <link
- linkend="rw">read/write</link> and/or streaming (<link
- linkend="mmap">memory mapping</link> or <link
- linkend="userp">user pointer</link>) I/O. The latter bears the
-possibility of synchronizing video and
-VBI data by using buffer timestamps.</para>
-
- <para>Remember the &VIDIOC-STREAMON; ioctl and the first read(),
-write() and select() call can be resource allocation points returning
-an &EBUSY; if the required hardware resources are temporarily
-unavailable, for example the device is already in use by another
-process.</para>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-rds.xml b/Documentation/DocBook/media/v4l/dev-rds.xml
deleted file mode 100644
index be2f33737323..000000000000
--- a/Documentation/DocBook/media/v4l/dev-rds.xml
+++ /dev/null
@@ -1,196 +0,0 @@
- <title>RDS Interface</title>
-
- <para>The Radio Data System transmits supplementary
-information in binary format, for example the station name or travel
-information, on an inaudible audio subcarrier of a radio program. This
-interface is aimed at devices capable of receiving and/or transmitting RDS
-information.</para>
-
- <para>For more information see the core RDS standard <xref linkend="iec62106" />
-and the RBDS standard <xref linkend="nrsc4" />.</para>
-
- <para>Note that the RBDS standard as is used in the USA is almost identical
-to the RDS standard. Any RDS decoder/encoder can also handle RBDS. Only some of the
-fields have slightly different meanings. See the RBDS standard for more
-information.</para>
-
- <para>The RBDS standard also specifies support for MMBS (Modified Mobile Search).
-This is a proprietary format which seems to be discontinued. The RDS interface does not
-support this format. Should support for MMBS (or the so-called 'E blocks' in general)
-be needed, then please contact the linux-media mailing list: &v4l-ml;.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the RDS capturing API set
-the <constant>V4L2_CAP_RDS_CAPTURE</constant> flag in
-the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. Any tuner that supports RDS
-will set the <constant>V4L2_TUNER_CAP_RDS</constant> flag in
-the <structfield>capability</structfield> field of &v4l2-tuner;. If
-the driver only passes RDS blocks without interpreting the data
-the <constant>V4L2_TUNER_CAP_RDS_BLOCK_IO</constant> flag has to be
-set, see <link linkend="reading-rds-data">Reading RDS data</link>.
-For future use the
-flag <constant>V4L2_TUNER_CAP_RDS_CONTROLS</constant> has also been
-defined. However, a driver for a radio tuner with this capability does
-not yet exist, so if you are planning to write such a driver you
-should discuss this on the linux-media mailing list: &v4l-ml;.</para>
-
- <para> Whether an RDS signal is present can be detected by looking
-at the <structfield>rxsubchans</structfield> field of &v4l2-tuner;:
-the <constant>V4L2_TUNER_SUB_RDS</constant> will be set if RDS data
-was detected.</para>
-
- <para>Devices supporting the RDS output API
-set the <constant>V4L2_CAP_RDS_OUTPUT</constant> flag in
-the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl.
-Any modulator that supports RDS will set the
-<constant>V4L2_TUNER_CAP_RDS</constant> flag in the <structfield>capability</structfield>
-field of &v4l2-modulator;.
-In order to enable the RDS transmission one must set the <constant>V4L2_TUNER_SUB_RDS</constant>
-bit in the <structfield>txsubchans</structfield> field of &v4l2-modulator;.
-If the driver only passes RDS blocks without interpreting the data
-the <constant>V4L2_TUNER_CAP_RDS_BLOCK_IO</constant> flag has to be set. If the
-tuner is capable of handling RDS entities like program identification codes and radio
-text, the flag <constant>V4L2_TUNER_CAP_RDS_CONTROLS</constant> should be set,
-see <link linkend="writing-rds-data">Writing RDS data</link> and
-<link linkend="fm-tx-controls">FM Transmitter Control Reference</link>.</para>
- </section>
-
- <section id="reading-rds-data">
- <title>Reading RDS data</title>
-
- <para>RDS data can be read from the radio device
-with the &func-read; function. The data is packed in groups of three bytes.</para>
- </section>
-
- <section id="writing-rds-data">
- <title>Writing RDS data</title>
-
- <para>RDS data can be written to the radio device
-with the &func-write; function. The data is packed in groups of three bytes,
-as follows:</para>
- </section>
-
- <section>
- <title>RDS datastructures</title>
- <table frame="none" pgwide="1" id="v4l2-rds-data">
- <title>struct
-<structname>v4l2_rds_data</structname></title>
- <tgroup cols="3">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="5*" />
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>lsb</structfield></entry>
- <entry>Least Significant Byte of RDS Block</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>msb</structfield></entry>
- <entry>Most Significant Byte of RDS Block</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>block</structfield></entry>
- <entry>Block description</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table frame="none" pgwide="1" id="v4l2-rds-block">
- <title>Block description</title>
- <tgroup cols="2">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="5*" />
- <tbody valign="top">
- <row>
- <entry>Bits 0-2</entry>
- <entry>Block (aka offset) of the received data.</entry>
- </row>
- <row>
- <entry>Bits 3-5</entry>
- <entry>Deprecated. Currently identical to bits 0-2. Do not use these bits.</entry>
- </row>
- <row>
- <entry>Bit 6</entry>
- <entry>Corrected bit. Indicates that an error was corrected for this data block.</entry>
- </row>
- <row>
- <entry>Bit 7</entry>
- <entry>Error bit. Indicates that an uncorrectable error occurred during reception of this block.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-rds-block-codes">
- <title>Block defines</title>
- <tgroup cols="4">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="1*" />
- <colspec colname="c4" colwidth="5*" />
- <tbody valign="top">
- <row>
- <entry>V4L2_RDS_BLOCK_MSK</entry>
- <entry> </entry>
- <entry>7</entry>
- <entry>Mask for bits 0-2 to get the block ID.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_A</entry>
- <entry> </entry>
- <entry>0</entry>
- <entry>Block A.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_B</entry>
- <entry> </entry>
- <entry>1</entry>
- <entry>Block B.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_C</entry>
- <entry> </entry>
- <entry>2</entry>
- <entry>Block C.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_D</entry>
- <entry> </entry>
- <entry>3</entry>
- <entry>Block D.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_C_ALT</entry>
- <entry> </entry>
- <entry>4</entry>
- <entry>Block C'.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_INVALID</entry>
- <entry>read-only</entry>
- <entry>7</entry>
- <entry>An invalid block.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_CORRECTED</entry>
- <entry>read-only</entry>
- <entry>0x40</entry>
- <entry>A bit error was detected but corrected.</entry>
- </row>
- <row>
- <entry>V4L2_RDS_BLOCK_ERROR</entry>
- <entry>read-only</entry>
- <entry>0x80</entry>
- <entry>An uncorrectable error occurred.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-sdr.xml b/Documentation/DocBook/media/v4l/dev-sdr.xml
deleted file mode 100644
index 6da1157fb5bd..000000000000
--- a/Documentation/DocBook/media/v4l/dev-sdr.xml
+++ /dev/null
@@ -1,126 +0,0 @@
- <title>Software Defined Radio Interface (SDR)</title>
-
- <para>
-SDR is an abbreviation of Software Defined Radio, the radio device
-which uses application software for modulation or demodulation. This interface
-is intended for controlling and data streaming of such devices.
- </para>
-
- <para>
-SDR devices are accessed through character device special files named
-<filename>/dev/swradio0</filename> to <filename>/dev/swradio255</filename>
-with major number 81 and dynamically allocated minor numbers 0 to 255.
- </para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>
-Devices supporting the SDR receiver interface set the
-<constant>V4L2_CAP_SDR_CAPTURE</constant> and
-<constant>V4L2_CAP_TUNER</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. That flag means the device has an
-Analog to Digital Converter (ADC), which is a mandatory element for the SDR receiver.
- </para>
- <para>
-Devices supporting the SDR transmitter interface set the
-<constant>V4L2_CAP_SDR_OUTPUT</constant> and
-<constant>V4L2_CAP_MODULATOR</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. That flag means the device has an
-Digital to Analog Converter (DAC), which is a mandatory element for the SDR transmitter.
- </para>
- <para>
-At least one of the read/write, streaming or asynchronous I/O methods must
-be supported.
- </para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>
-SDR devices can support <link linkend="control">controls</link>, and must
-support the <link linkend="tuner">tuner</link> ioctls. Tuner ioctls are used
-for setting the ADC/DAC sampling rate (sampling frequency) and the possible
-radio frequency (RF).
- </para>
-
- <para>
-The <constant>V4L2_TUNER_SDR</constant> tuner type is used for setting SDR
-device ADC/DAC frequency, and the <constant>V4L2_TUNER_RF</constant>
-tuner type is used for setting radio frequency.
-The tuner index of the RF tuner (if any) must always follow the SDR tuner index.
-Normally the SDR tuner is #0 and the RF tuner is #1.
- </para>
-
- <para>
-The &VIDIOC-S-HW-FREQ-SEEK; ioctl is not supported.
- </para>
- </section>
-
- <section>
- <title>Data Format Negotiation</title>
-
- <para>
-The SDR device uses the <link linkend="format">format</link> ioctls to
-select the capture and output format. Both the sampling resolution and the data
-streaming format are bound to that selectable format. In addition to the basic
-<link linkend="format">format</link> ioctls, the &VIDIOC-ENUM-FMT; ioctl
-must be supported as well.
- </para>
-
- <para>
-To use the <link linkend="format">format</link> ioctls applications set the
-<structfield>type</structfield> field of a &v4l2-format; to
-<constant>V4L2_BUF_TYPE_SDR_CAPTURE</constant> or
-<constant>V4L2_BUF_TYPE_SDR_OUTPUT</constant> and use the &v4l2-sdr-format;
-<structfield>sdr</structfield> member of the <structfield>fmt</structfield>
-union as needed per the desired operation.
-Currently there is two fields, <structfield>pixelformat</structfield> and
-<structfield>buffersize</structfield>, of struct &v4l2-sdr-format; which are
-used. Content of the <structfield>pixelformat</structfield> is V4L2 FourCC
-code of the data format. The <structfield>buffersize</structfield> field is
-maximum buffer size in bytes required for data transfer, set by the driver in
-order to inform application.
- </para>
-
- <table pgwide="1" frame="none" id="v4l2-sdr-format">
- <title>struct <structname>v4l2_sdr_format</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixelformat</structfield></entry>
- <entry>
-The data format or type of compression, set by the application. This is a
-little endian <link linkend="v4l2-fourcc">four character code</link>.
-V4L2 defines SDR formats in <xref linkend="sdr-formats" />.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>buffersize</structfield></entry>
- <entry>
-Maximum size in bytes required for data. Value is set by the driver.
- </entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>reserved[24]</structfield></entry>
- <entry>This array is reserved for future extensions.
-Drivers and applications must set it to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>
-An SDR device may support <link linkend="rw">read/write</link>
-and/or streaming (<link linkend="mmap">memory mapping</link>
-or <link linkend="userp">user pointer</link>) I/O.
- </para>
-
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml b/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml
deleted file mode 100644
index 0aec62ed2bf8..000000000000
--- a/Documentation/DocBook/media/v4l/dev-sliced-vbi.xml
+++ /dev/null
@@ -1,706 +0,0 @@
- <title>Sliced VBI Data Interface</title>
-
- <para>VBI stands for Vertical Blanking Interval, a gap in the
-sequence of lines of an analog video signal. During VBI no picture
-information is transmitted, allowing some time while the electron beam
-of a cathode ray tube TV returns to the top of the screen.</para>
-
- <para>Sliced VBI devices use hardware to demodulate data transmitted
-in the VBI. V4L2 drivers shall <emphasis>not</emphasis> do this by
-software, see also the <link linkend="raw-vbi">raw VBI
-interface</link>. The data is passed as short packets of fixed size,
-covering one scan line each. The number of packets per video frame is
-variable.</para>
-
- <para>Sliced VBI capture and output devices are accessed through the
-same character special files as raw VBI devices. When a driver
-supports both interfaces, the default function of a
-<filename>/dev/vbi</filename> device is <emphasis>raw</emphasis> VBI
-capturing or output, and the sliced VBI function is only available
-after calling the &VIDIOC-S-FMT; ioctl as defined below. Likewise a
-<filename>/dev/video</filename> device may support the sliced VBI API,
-however the default function here is video capturing or output.
-Different file descriptors must be used to pass raw and sliced VBI
-data simultaneously, if this is supported by the driver.</para>
-
- <section>
- <title>Querying Capabilities</title>
-
- <para>Devices supporting the sliced VBI capturing or output API
-set the <constant>V4L2_CAP_SLICED_VBI_CAPTURE</constant> or
-<constant>V4L2_CAP_SLICED_VBI_OUTPUT</constant> flag respectively, in
-the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl. At least one of the
-read/write, streaming or asynchronous <link linkend="io">I/O
-methods</link> must be supported. Sliced VBI devices may have a tuner
-or modulator.</para>
- </section>
-
- <section>
- <title>Supplemental Functions</title>
-
- <para>Sliced VBI devices shall support <link linkend="video">video
-input or output</link> and <link linkend="tuner">tuner or
-modulator</link> ioctls if they have these capabilities, and they may
-support <link linkend="control">control</link> ioctls. The <link
-linkend="standard">video standard</link> ioctls provide information
-vital to program a sliced VBI device, therefore must be
-supported.</para>
- </section>
-
- <section id="sliced-vbi-format-negotitation">
- <title>Sliced VBI Format Negotiation</title>
-
- <para>To find out which data services are supported by the
-hardware applications can call the &VIDIOC-G-SLICED-VBI-CAP; ioctl.
-All drivers implementing the sliced VBI interface must support this
-ioctl. The results may differ from those of the &VIDIOC-S-FMT; ioctl
-when the number of VBI lines the hardware can capture or output per
-frame, or the number of services it can identify on a given line are
-limited. For example on PAL line 16 the hardware may be able to look
-for a VPS or Teletext signal, but not both at the same time.</para>
-
- <para>To determine the currently selected services applications
-set the <structfield>type </structfield> field of &v4l2-format; to
-<constant> V4L2_BUF_TYPE_SLICED_VBI_CAPTURE</constant> or <constant>
-V4L2_BUF_TYPE_SLICED_VBI_OUTPUT</constant>, and the &VIDIOC-G-FMT;
-ioctl fills the <structfield>fmt.sliced</structfield> member, a
-&v4l2-sliced-vbi-format;.</para>
-
- <para>Applications can request different parameters by
-initializing or modifying the <structfield>fmt.sliced</structfield>
-member and calling the &VIDIOC-S-FMT; ioctl with a pointer to the
-<structname>v4l2_format</structname> structure.</para>
-
- <para>The sliced VBI API is more complicated than the raw VBI API
-because the hardware must be told which VBI service to expect on each
-scan line. Not all services may be supported by the hardware on all
-lines (this is especially true for VBI output where Teletext is often
-unsupported and other services can only be inserted in one specific
-line). In many cases, however, it is sufficient to just set the
-<structfield>service_set</structfield> field to the required services
-and let the driver fill the <structfield>service_lines</structfield>
-array according to hardware capabilities. Only if more precise control
-is needed should the programmer set the
-<structfield>service_lines</structfield> array explicitly.</para>
-
- <para>The &VIDIOC-S-FMT; ioctl modifies the parameters
-according to hardware capabilities. When the driver allocates
-resources at this point, it may return an &EBUSY; if the required
-resources are temporarily unavailable. Other resource allocation
-points which may return <errorcode>EBUSY</errorcode> can be the
-&VIDIOC-STREAMON; ioctl and the first &func-read;, &func-write; and
-&func-select; call.</para>
-
- <table frame="none" pgwide="1" id="v4l2-sliced-vbi-format">
- <title>struct
-<structname>v4l2_sliced_vbi_format</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" colwidth="3*" />
- <colspec colname="c2" colwidth="3*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="2*" />
- <colspec colname="c5" colwidth="2*" />
- <spanspec namest="c3" nameend="c5" spanname="hspan" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>service_set</structfield></entry>
- <entry spanname="hspan"><para>If
-<structfield>service_set</structfield> is non-zero when passed with
-&VIDIOC-S-FMT; or &VIDIOC-TRY-FMT;, the
-<structfield>service_lines</structfield> array will be filled by the
-driver according to the services specified in this field. For example,
-if <structfield>service_set</structfield> is initialized with
-<constant>V4L2_SLICED_TELETEXT_B | V4L2_SLICED_WSS_625</constant>, a
-driver for the cx25840 video decoder sets lines 7-22 of both
-fields<footnote><para>According to <link
-linkend="ets300706">ETS&nbsp;300&nbsp;706</link> lines 6-22 of the
-first field and lines 5-22 of the second field may carry Teletext
-data.</para></footnote> to <constant>V4L2_SLICED_TELETEXT_B</constant>
-and line 23 of the first field to
-<constant>V4L2_SLICED_WSS_625</constant>. If
-<structfield>service_set</structfield> is set to zero, then the values
-of <structfield>service_lines</structfield> will be used instead.
-</para><para>On return the driver sets this field to the union of all
-elements of the returned <structfield>service_lines</structfield>
-array. It may contain less services than requested, perhaps just one,
-if the hardware cannot handle more services simultaneously. It may be
-empty (zero) if none of the requested services are supported by the
-hardware.</para></entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>service_lines</structfield>[2][24]</entry>
- <entry spanname="hspan"><para>Applications initialize this
-array with sets of data services the driver shall look for or insert
-on the respective scan line. Subject to hardware capabilities drivers
-return the requested set, a subset, which may be just a single
-service, or an empty set. When the hardware cannot handle multiple
-services on the same line the driver shall choose one. No assumptions
-can be made on which service the driver chooses.</para><para>Data
-services are defined in <xref linkend="vbi-services2" />. Array indices
-map to ITU-R line numbers (see also <xref linkend="vbi-525" /> and <xref
- linkend="vbi-625" />) as follows: <!-- No nested
-tables, sigh. --></para></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Element</entry>
- <entry>525 line systems</entry>
- <entry>625 line systems</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[0][1]</entry>
- <entry align="center">1</entry>
- <entry align="center">1</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[0][23]</entry>
- <entry align="center">23</entry>
- <entry align="center">23</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[1][1]</entry>
- <entry align="center">264</entry>
- <entry align="center">314</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[1][23]</entry>
- <entry align="center">286</entry>
- <entry align="center">336</entry>
- </row>
- <!-- End of line numbers table. -->
- <row>
- <entry></entry>
- <entry></entry>
- <entry spanname="hspan">Drivers must set
-<structfield>service_lines</structfield>[0][0] and
-<structfield>service_lines</structfield>[1][0] to zero.
-The <constant>V4L2_VBI_ITU_525_F1_START</constant>,
-<constant>V4L2_VBI_ITU_525_F2_START</constant>,
-<constant>V4L2_VBI_ITU_625_F1_START</constant> and
-<constant>V4L2_VBI_ITU_625_F2_START</constant> defines give the start
-line numbers for each field for each 525 or 625 line format as a
-convenience. Don't forget that ITU line numbering starts at 1, not 0.
-</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>io_size</structfield></entry>
- <entry spanname="hspan">Maximum number of bytes passed by
-one &func-read; or &func-write; call, and the buffer size in bytes for
-the &VIDIOC-QBUF; and &VIDIOC-DQBUF; ioctl. Drivers set this field to
-the size of &v4l2-sliced-vbi-data; times the number of non-zero
-elements in the returned <structfield>service_lines</structfield>
-array (that is the number of lines potentially carrying data).</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry spanname="hspan">This array is reserved for future
-extensions. Applications and drivers must set it to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- See also vidioc-g-sliced-vbi-cap.sgml -->
- <table frame="none" pgwide="1" id="vbi-services2">
- <title>Sliced VBI services</title>
- <tgroup cols="5">
- <colspec colname="c1" colwidth="2*" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="1*" />
- <colspec colname="c4" colwidth="2*" />
- <colspec colname="c5" colwidth="2*" />
- <spanspec namest="c3" nameend="c5" spanname="rlp" />
- <thead>
- <row>
- <entry>Symbol</entry>
- <entry>Value</entry>
- <entry>Reference</entry>
- <entry>Lines, usually</entry>
- <entry>Payload</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SLICED_TELETEXT_B</constant>
-(Teletext System B)</entry>
- <entry>0x0001</entry>
- <entry><xref linkend="ets300706" />, <xref linkend="itu653" /></entry>
- <entry>PAL/SECAM line 7-22, 320-335 (second field 7-22)</entry>
- <entry>Last 42 of the 45 byte Teletext packet, that is
-without clock run-in and framing code, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VPS</constant></entry>
- <entry>0x0400</entry>
- <entry><xref linkend="ets300231" /></entry>
- <entry>PAL line 16</entry>
- <entry>Byte number 3 to 15 according to Figure 9 of
-ETS&nbsp;300&nbsp;231, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_CAPTION_525</constant></entry>
- <entry>0x1000</entry>
- <entry><xref linkend="cea608" /></entry>
- <entry>NTSC line 21, 284 (second field 21)</entry>
- <entry>Two bytes in transmission order, including parity
-bit, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_WSS_625</constant></entry>
- <entry>0x4000</entry>
- <entry><xref linkend="itu1119" />, <xref linkend="en300294" /></entry>
- <entry>PAL/SECAM line 23</entry>
- <entry><screen>
-Byte 0 1
- msb lsb msb lsb
- Bit 7 6 5 4 3 2 1 0 x x 13 12 11 10 9
-</screen></entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VBI_525</constant></entry>
- <entry>0x1000</entry>
- <entry spanname="rlp">Set of services applicable to 525
-line systems.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VBI_625</constant></entry>
- <entry>0x4401</entry>
- <entry spanname="rlp">Set of services applicable to 625
-line systems.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Drivers may return an &EINVAL; when applications attempt to
-read or write data without prior format negotiation, after switching
-the video standard (which may invalidate the negotiated VBI
-parameters) and after switching the video input (which may change the
-video standard as a side effect). The &VIDIOC-S-FMT; ioctl may return
-an &EBUSY; when applications attempt to change the format while i/o is
-in progress (between a &VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; call,
-and after the first &func-read; or &func-write; call).</para>
- </section>
-
- <section>
- <title>Reading and writing sliced VBI data</title>
-
- <para>A single &func-read; or &func-write; call must pass all data
-belonging to one video frame. That is an array of
-<structname>v4l2_sliced_vbi_data</structname> structures with one or
-more elements and a total size not exceeding
-<structfield>io_size</structfield> bytes. Likewise in streaming I/O
-mode one buffer of <structfield>io_size</structfield> bytes must
-contain data of one video frame. The <structfield>id</structfield> of
-unused <structname>v4l2_sliced_vbi_data</structname> elements must be
-zero.</para>
-
- <table frame="none" pgwide="1" id="v4l2-sliced-vbi-data">
- <title>struct
-<structname>v4l2_sliced_vbi_data</structname></title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry>A flag from <xref linkend="vbi-services" />
-identifying the type of data in this packet. Only a single bit must be
-set. When the <structfield>id</structfield> of a captured packet is
-zero, the packet is empty and the contents of other fields are
-undefined. Applications shall ignore empty packets. When the
-<structfield>id</structfield> of a packet for output is zero the
-contents of the <structfield>data</structfield> field are undefined
-and the driver must no longer insert data on the requested
-<structfield>field</structfield> and
-<structfield>line</structfield>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>field</structfield></entry>
- <entry>The video field number this data has been captured
-from, or shall be inserted at. <constant>0</constant> for the first
-field, <constant>1</constant> for the second field.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>line</structfield></entry>
- <entry>The field (as opposed to frame) line number this
-data has been captured from, or shall be inserted at. See <xref
- linkend="vbi-525" /> and <xref linkend="vbi-625" /> for valid
-values. Sliced VBI capture devices can set the line number of all
-packets to <constant>0</constant> if the hardware cannot reliably
-identify scan lines. The field number must always be valid.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield></entry>
- <entry>This field is reserved for future extensions.
-Applications and drivers must set it to zero.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>data</structfield>[48]</entry>
- <entry>The packet payload. See <xref
- linkend="vbi-services" /> for the contents and number of
-bytes passed for each data type. The contents of padding bytes at the
-end of this array are undefined, drivers and applications shall ignore
-them.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Packets are always passed in ascending line number order,
-without duplicate line numbers. The &func-write; function and the
-&VIDIOC-QBUF; ioctl must return an &EINVAL; when applications violate
-this rule. They must also return an &EINVAL; when applications pass an
-incorrect field or line number, or a combination of
-<structfield>field</structfield>, <structfield>line</structfield> and
-<structfield>id</structfield> which has not been negotiated with the
-&VIDIOC-G-FMT; or &VIDIOC-S-FMT; ioctl. When the line numbers are
-unknown the driver must pass the packets in transmitted order. The
-driver can insert empty packets with <structfield>id</structfield> set
-to zero anywhere in the packet array.</para>
-
- <para>To assure synchronization and to distinguish from frame
-dropping, when a captured frame does not carry any of the requested
-data services drivers must pass one or more empty packets. When an
-application fails to pass VBI data in time for output, the driver
-must output the last VPS and WSS packet again, and disable the output
-of Closed Caption and Teletext data, or output data which is ignored
-by Closed Caption and Teletext decoders.</para>
-
- <para>A sliced VBI device may support <link
-linkend="rw">read/write</link> and/or streaming (<link
-linkend="mmap">memory mapping</link> and/or <link linkend="userp">user
-pointer</link>) I/O. The latter bears the possibility of synchronizing
-video and VBI data by using buffer timestamps.</para>
-
- </section>
-
- <section>
- <title>Sliced VBI Data in MPEG Streams</title>
-
- <para>If a device can produce an MPEG output stream, it may be
-capable of providing <link
-linkend="sliced-vbi-format-negotitation">negotiated sliced VBI
-services</link> as data embedded in the MPEG stream. Users or
-applications control this sliced VBI data insertion with the <link
-linkend="v4l2-mpeg-stream-vbi-fmt">V4L2_CID_MPEG_STREAM_VBI_FMT</link>
-control.</para>
-
- <para>If the driver does not provide the <link
-linkend="v4l2-mpeg-stream-vbi-fmt">V4L2_CID_MPEG_STREAM_VBI_FMT</link>
-control, or only allows that control to be set to <link
-linkend="v4l2-mpeg-stream-vbi-fmt"><constant>
-V4L2_MPEG_STREAM_VBI_FMT_NONE</constant></link>, then the device
-cannot embed sliced VBI data in the MPEG stream.</para>
-
- <para>The <link linkend="v4l2-mpeg-stream-vbi-fmt">
-V4L2_CID_MPEG_STREAM_VBI_FMT</link> control does not implicitly set
-the device driver to capture nor cease capturing sliced VBI data. The
-control only indicates to embed sliced VBI data in the MPEG stream, if
-an application has negotiated sliced VBI service be captured.</para>
-
- <para>It may also be the case that a device can embed sliced VBI
-data in only certain types of MPEG streams: for example in an MPEG-2
-PS but not an MPEG-2 TS. In this situation, if sliced VBI data
-insertion is requested, the sliced VBI data will be embedded in MPEG
-stream types when supported, and silently omitted from MPEG stream
-types where sliced VBI data insertion is not supported by the device.
-</para>
-
- <para>The following subsections specify the format of the
-embedded sliced VBI data.</para>
-
- <section>
- <title>MPEG Stream Embedded, Sliced VBI Data Format: NONE</title>
- <para>The <link linkend="v4l2-mpeg-stream-vbi-fmt"><constant>
-V4L2_MPEG_STREAM_VBI_FMT_NONE</constant></link> embedded sliced VBI
-format shall be interpreted by drivers as a control to cease
-embedding sliced VBI data in MPEG streams. Neither the device nor
-driver shall insert "empty" embedded sliced VBI data packets in the
-MPEG stream when this format is set. No MPEG stream data structures
-are specified for this format.</para>
- </section>
-
- <section>
- <title>MPEG Stream Embedded, Sliced VBI Data Format: IVTV</title>
- <para>The <link linkend="v4l2-mpeg-stream-vbi-fmt"><constant>
-V4L2_MPEG_STREAM_VBI_FMT_IVTV</constant></link> embedded sliced VBI
-format, when supported, indicates to the driver to embed up to 36
-lines of sliced VBI data per frame in an MPEG-2 <emphasis>Private
-Stream 1 PES</emphasis> packet encapsulated in an MPEG-2 <emphasis>
-Program Pack</emphasis> in the MPEG stream.</para>
-
- <para><emphasis>Historical context</emphasis>: This format
-specification originates from a custom, embedded, sliced VBI data
-format used by the <filename>ivtv</filename> driver. This format
-has already been informally specified in the kernel sources in the
-file <filename>Documentation/video4linux/cx2341x/README.vbi</filename>
-. The maximum size of the payload and other aspects of this format
-are driven by the CX23415 MPEG decoder's capabilities and limitations
-with respect to extracting, decoding, and displaying sliced VBI data
-embedded within an MPEG stream.</para>
-
- <para>This format's use is <emphasis>not</emphasis> exclusive to
-the <filename>ivtv</filename> driver <emphasis>nor</emphasis>
-exclusive to CX2341x devices, as the sliced VBI data packet insertion
-into the MPEG stream is implemented in driver software. At least the
-<filename>cx18</filename> driver provides sliced VBI data insertion
-into an MPEG-2 PS in this format as well.</para>
-
- <para>The following definitions specify the payload of the
-MPEG-2 <emphasis>Private Stream 1 PES</emphasis> packets that contain
-sliced VBI data when <link linkend="v4l2-mpeg-stream-vbi-fmt">
-<constant>V4L2_MPEG_STREAM_VBI_FMT_IVTV</constant></link> is set.
-(The MPEG-2 <emphasis>Private Stream 1 PES</emphasis> packet header
-and encapsulating MPEG-2 <emphasis>Program Pack</emphasis> header are
-not detailed here. Please refer to the MPEG-2 specifications for
-details on those packet headers.)</para>
-
- <para>The payload of the MPEG-2 <emphasis>Private Stream 1 PES
-</emphasis> packets that contain sliced VBI data is specified by
-&v4l2-mpeg-vbi-fmt-ivtv;. The payload is variable
-length, depending on the actual number of lines of sliced VBI data
-present in a video frame. The payload may be padded at the end with
-unspecified fill bytes to align the end of the payload to a 4-byte
-boundary. The payload shall never exceed 1552 bytes (2 fields with
-18 lines/field with 43 bytes of data/line and a 4 byte magic number).
-</para>
-
- <table frame="none" pgwide="1" id="v4l2-mpeg-vbi-fmt-ivtv">
- <title>struct <structname>v4l2_mpeg_vbi_fmt_ivtv</structname>
- </title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>magic</structfield>[4]</entry>
- <entry></entry>
- <entry>A "magic" constant from <xref
- linkend="v4l2-mpeg-vbi-fmt-ivtv-magic" /> that indicates
-this is a valid sliced VBI data payload and also indicates which
-member of the anonymous union, <structfield>itv0</structfield> or
-<structfield>ITV0</structfield>, to use for the payload data.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct <link linkend="v4l2-mpeg-vbi-itv0">
- <structname>v4l2_mpeg_vbi_itv0</structname></link>
- </entry>
- <entry><structfield>itv0</structfield></entry>
- <entry>The primary form of the sliced VBI data payload
-that contains anywhere from 1 to 35 lines of sliced VBI data.
-Line masks are provided in this form of the payload indicating
-which VBI lines are provided.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct <link linkend="v4l2-mpeg-vbi-itv0-1">
- <structname>v4l2_mpeg_vbi_ITV0</structname></link>
- </entry>
- <entry><structfield>ITV0</structfield></entry>
- <entry>An alternate form of the sliced VBI data payload
-used when 36 lines of sliced VBI data are present. No line masks are
-provided in this form of the payload; all valid line mask bits are
-implcitly set.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-mpeg-vbi-fmt-ivtv-magic">
- <title>Magic Constants for &v4l2-mpeg-vbi-fmt-ivtv;
- <structfield>magic</structfield> field</title>
- <tgroup cols="3">
- &cs-def;
- <thead>
- <row>
- <entry align="left">Defined Symbol</entry>
- <entry align="left">Value</entry>
- <entry align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_MAGIC0</constant>
- </entry>
- <entry>"itv0"</entry>
- <entry>Indicates the <structfield>itv0</structfield>
-member of the union in &v4l2-mpeg-vbi-fmt-ivtv; is valid.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_MAGIC1</constant>
- </entry>
- <entry>"ITV0"</entry>
- <entry>Indicates the <structfield>ITV0</structfield>
-member of the union in &v4l2-mpeg-vbi-fmt-ivtv; is valid and
-that 36 lines of sliced VBI data are present.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-mpeg-vbi-itv0">
- <title>struct <structname>v4l2_mpeg_vbi_itv0</structname>
- </title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__le32</entry>
- <entry><structfield>linemask</structfield>[2]</entry>
- <entry><para>Bitmasks indicating the VBI service lines
-present. These <structfield>linemask</structfield> values are stored
-in little endian byte order in the MPEG stream. Some reference
-<structfield>linemask</structfield> bit positions with their
-corresponding VBI line number and video field are given below.
-b<subscript>0</subscript> indicates the least significant bit of a
-<structfield>linemask</structfield> value:<screen>
-<structfield>linemask</structfield>[0] b<subscript>0</subscript>: line 6 first field
-<structfield>linemask</structfield>[0] b<subscript>17</subscript>: line 23 first field
-<structfield>linemask</structfield>[0] b<subscript>18</subscript>: line 6 second field
-<structfield>linemask</structfield>[0] b<subscript>31</subscript>: line 19 second field
-<structfield>linemask</structfield>[1] b<subscript>0</subscript>: line 20 second field
-<structfield>linemask</structfield>[1] b<subscript>3</subscript>: line 23 second field
-<structfield>linemask</structfield>[1] b<subscript>4</subscript>-b<subscript>31</subscript>: unused and set to 0</screen></para></entry>
- </row>
- <row>
- <entry>struct <link linkend="v4l2-mpeg-vbi-itv0-line">
- <structname>v4l2_mpeg_vbi_itv0_line</structname></link>
- </entry>
- <entry><structfield>line</structfield>[35]</entry>
- <entry>This is a variable length array that holds from 1
-to 35 lines of sliced VBI data. The sliced VBI data lines present
-correspond to the bits set in the <structfield>linemask</structfield>
-array, starting from b<subscript>0</subscript> of <structfield>
-linemask</structfield>[0] up through b<subscript>31</subscript> of
-<structfield>linemask</structfield>[0], and from b<subscript>0
-</subscript> of <structfield>linemask</structfield>[1] up through b
-<subscript>3</subscript> of <structfield>linemask</structfield>[1].
-<structfield>line</structfield>[0] corresponds to the first bit
-found set in the <structfield>linemask</structfield> array,
-<structfield>line</structfield>[1] corresponds to the second bit
-found set in the <structfield>linemask</structfield> array, etc.
-If no <structfield>linemask</structfield> array bits are set, then
-<structfield>line</structfield>[0] may contain one line of
-unspecified data that should be ignored by applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-mpeg-vbi-itv0-1">
- <title>struct <structname>v4l2_mpeg_vbi_ITV0</structname>
- </title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>struct <link linkend="v4l2-mpeg-vbi-itv0-line">
- <structname>v4l2_mpeg_vbi_itv0_line</structname></link>
- </entry>
- <entry><structfield>line</structfield>[36]</entry>
- <entry>A fixed length array of 36 lines of sliced VBI
-data. <structfield>line</structfield>[0] through <structfield>line
-</structfield>[17] correspond to lines 6 through 23 of the
-first field. <structfield>line</structfield>[18] through
-<structfield>line</structfield>[35] corresponds to lines 6
-through 23 of the second field.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-mpeg-vbi-itv0-line">
- <title>struct <structname>v4l2_mpeg_vbi_itv0_line</structname>
- </title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>id</structfield></entry>
- <entry>A line identifier value from
-<xref linkend="ITV0-Line-Identifier-Constants" /> that indicates
-the type of sliced VBI data stored on this line.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>data</structfield>[42]</entry>
- <entry>The sliced VBI data for the line.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="ITV0-Line-Identifier-Constants">
- <title>Line Identifiers for struct <link
- linkend="v4l2-mpeg-vbi-itv0-line"><structname>
-v4l2_mpeg_vbi_itv0_line</structname></link> <structfield>id
-</structfield> field</title>
- <tgroup cols="3">
- &cs-def;
- <thead>
- <row>
- <entry align="left">Defined Symbol</entry>
- <entry align="left">Value</entry>
- <entry align="left">Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_TELETEXT_B</constant>
- </entry>
- <entry>1</entry>
- <entry>Refer to <link linkend="vbi-services2">
-Sliced VBI services</link> for a description of the line payload.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_CAPTION_525</constant>
- </entry>
- <entry>4</entry>
- <entry>Refer to <link linkend="vbi-services2">
-Sliced VBI services</link> for a description of the line payload.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_WSS_625</constant>
- </entry>
- <entry>5</entry>
- <entry>Refer to <link linkend="vbi-services2">
-Sliced VBI services</link> for a description of the line payload.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MPEG_VBI_IVTV_VPS</constant>
- </entry>
- <entry>7</entry>
- <entry>Refer to <link linkend="vbi-services2">
-Sliced VBI services</link> for a description of the line payload.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
- </section>
diff --git a/Documentation/DocBook/media/v4l/dev-subdev.xml b/Documentation/DocBook/media/v4l/dev-subdev.xml
deleted file mode 100644
index f4bc27af83eb..000000000000
--- a/Documentation/DocBook/media/v4l/dev-subdev.xml
+++ /dev/null
@@ -1,478 +0,0 @@
- <title>Sub-device Interface</title>
-
- <para>The complex nature of V4L2 devices, where hardware is often made of
- several integrated circuits that need to interact with each other in a
- controlled way, leads to complex V4L2 drivers. The drivers usually reflect
- the hardware model in software, and model the different hardware components
- as software blocks called sub-devices.</para>
-
- <para>V4L2 sub-devices are usually kernel-only objects. If the V4L2 driver
- implements the media device API, they will automatically inherit from media
- entities. Applications will be able to enumerate the sub-devices and discover
- the hardware topology using the media entities, pads and links enumeration
- API.</para>
-
- <para>In addition to make sub-devices discoverable, drivers can also choose
- to make them directly configurable by applications. When both the sub-device
- driver and the V4L2 device driver support this, sub-devices will feature a
- character device node on which ioctls can be called to
- <itemizedlist>
- <listitem><para>query, read and write sub-devices controls</para></listitem>
- <listitem><para>subscribe and unsubscribe to events and retrieve them</para></listitem>
- <listitem><para>negotiate image formats on individual pads</para></listitem>
- </itemizedlist>
- </para>
-
- <para>Sub-device character device nodes, conventionally named
- <filename>/dev/v4l-subdev*</filename>, use major number 81.</para>
-
- <section>
- <title>Controls</title>
- <para>Most V4L2 controls are implemented by sub-device hardware. Drivers
- usually merge all controls and expose them through video device nodes.
- Applications can control all sub-devices through a single interface.</para>
-
- <para>Complex devices sometimes implement the same control in different
- pieces of hardware. This situation is common in embedded platforms, where
- both sensors and image processing hardware implement identical functions,
- such as contrast adjustment, white balance or faulty pixels correction. As
- the V4L2 controls API doesn't support several identical controls in a single
- device, all but one of the identical controls are hidden.</para>
-
- <para>Applications can access those hidden controls through the sub-device
- node with the V4L2 control API described in <xref linkend="control" />. The
- ioctls behave identically as when issued on V4L2 device nodes, with the
- exception that they deal only with controls implemented in the sub-device.
- </para>
-
- <para>Depending on the driver, those controls might also be exposed through
- one (or several) V4L2 device nodes.</para>
- </section>
-
- <section>
- <title>Events</title>
- <para>V4L2 sub-devices can notify applications of events as described in
- <xref linkend="event" />. The API behaves identically as when used on V4L2
- device nodes, with the exception that it only deals with events generated by
- the sub-device. Depending on the driver, those events might also be reported
- on one (or several) V4L2 device nodes.</para>
- </section>
-
- <section id="pad-level-formats">
- <title>Pad-level Formats</title>
-
- <warning><para>Pad-level formats are only applicable to very complex device that
- need to expose low-level format configuration to user space. Generic V4L2
- applications do <emphasis>not</emphasis> need to use the API described in
- this section.</para></warning>
-
- <note><para>For the purpose of this section, the term
- <wordasword>format</wordasword> means the combination of media bus data
- format, frame width and frame height.</para></note>
-
- <para>Image formats are typically negotiated on video capture and
- output devices using the format and <link
- linkend="vidioc-subdev-g-selection">selection</link> ioctls. The
- driver is responsible for configuring every block in the video
- pipeline according to the requested format at the pipeline input
- and/or output.</para>
-
- <para>For complex devices, such as often found in embedded systems,
- identical image sizes at the output of a pipeline can be achieved using
- different hardware configurations. One such example is shown on
- <xref linkend="pipeline-scaling" />, where
- image scaling can be performed on both the video sensor and the host image
- processing hardware.</para>
-
- <figure id="pipeline-scaling">
- <title>Image Format Negotiation on Pipelines</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="pipeline.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="pipeline.png" format="PNG" />
- </imageobject>
- <textobject>
- <phrase>High quality and high speed pipeline configuration</phrase>
- </textobject>
- </mediaobject>
- </figure>
-
- <para>The sensor scaler is usually of less quality than the host scaler, but
- scaling on the sensor is required to achieve higher frame rates. Depending
- on the use case (quality vs. speed), the pipeline must be configured
- differently. Applications need to configure the formats at every point in
- the pipeline explicitly.</para>
-
- <para>Drivers that implement the <link linkend="media-controller-intro">media
- API</link> can expose pad-level image format configuration to applications.
- When they do, applications can use the &VIDIOC-SUBDEV-G-FMT; and
- &VIDIOC-SUBDEV-S-FMT; ioctls. to negotiate formats on a per-pad basis.</para>
-
- <para>Applications are responsible for configuring coherent parameters on
- the whole pipeline and making sure that connected pads have compatible
- formats. The pipeline is checked for formats mismatch at &VIDIOC-STREAMON;
- time, and an &EPIPE; is then returned if the configuration is
- invalid.</para>
-
- <para>Pad-level image format configuration support can be tested by calling
- the &VIDIOC-SUBDEV-G-FMT; ioctl on pad 0. If the driver returns an &EINVAL;
- pad-level format configuration is not supported by the sub-device.</para>
-
- <section>
- <title>Format Negotiation</title>
-
- <para>Acceptable formats on pads can (and usually do) depend on a number
- of external parameters, such as formats on other pads, active links, or
- even controls. Finding a combination of formats on all pads in a video
- pipeline, acceptable to both application and driver, can't rely on formats
- enumeration only. A format negotiation mechanism is required.</para>
-
- <para>Central to the format negotiation mechanism are the get/set format
- operations. When called with the <structfield>which</structfield> argument
- set to <constant>V4L2_SUBDEV_FORMAT_TRY</constant>, the
- &VIDIOC-SUBDEV-G-FMT; and &VIDIOC-SUBDEV-S-FMT; ioctls operate on a set of
- formats parameters that are not connected to the hardware configuration.
- Modifying those 'try' formats leaves the device state untouched (this
- applies to both the software state stored in the driver and the hardware
- state stored in the device itself).</para>
-
- <para>While not kept as part of the device state, try formats are stored
- in the sub-device file handles. A &VIDIOC-SUBDEV-G-FMT; call will return
- the last try format set <emphasis>on the same sub-device file
- handle</emphasis>. Several applications querying the same sub-device at
- the same time will thus not interact with each other.</para>
-
- <para>To find out whether a particular format is supported by the device,
- applications use the &VIDIOC-SUBDEV-S-FMT; ioctl. Drivers verify and, if
- needed, change the requested <structfield>format</structfield> based on
- device requirements and return the possibly modified value. Applications
- can then choose to try a different format or accept the returned value and
- continue.</para>
-
- <para>Formats returned by the driver during a negotiation iteration are
- guaranteed to be supported by the device. In particular, drivers guarantee
- that a returned format will not be further changed if passed to an
- &VIDIOC-SUBDEV-S-FMT; call as-is (as long as external parameters, such as
- formats on other pads or links' configuration are not changed).</para>
-
- <para>Drivers automatically propagate formats inside sub-devices. When a
- try or active format is set on a pad, corresponding formats on other pads
- of the same sub-device can be modified by the driver. Drivers are free to
- modify formats as required by the device. However, they should comply with
- the following rules when possible:
- <itemizedlist>
- <listitem><para>Formats should be propagated from sink pads to source pads.
- Modifying a format on a source pad should not modify the format on any
- sink pad.</para></listitem>
- <listitem><para>Sub-devices that scale frames using variable scaling factors
- should reset the scale factors to default values when sink pads formats
- are modified. If the 1:1 scaling ratio is supported, this means that
- source pads formats should be reset to the sink pads formats.</para></listitem>
- </itemizedlist>
- </para>
-
- <para>Formats are not propagated across links, as that would involve
- propagating them from one sub-device file handle to another. Applications
- must then take care to configure both ends of every link explicitly with
- compatible formats. Identical formats on the two ends of a link are
- guaranteed to be compatible. Drivers are free to accept different formats
- matching device requirements as being compatible.</para>
-
- <para><xref linkend="sample-pipeline-config" />
- shows a sample configuration sequence for the pipeline described in
- <xref linkend="pipeline-scaling" /> (table
- columns list entity names and pad numbers).</para>
-
- <table pgwide="0" frame="none" id="sample-pipeline-config">
- <title>Sample Pipeline Configuration</title>
- <tgroup cols="3">
- <colspec colname="what"/>
- <colspec colname="sensor-0 format" />
- <colspec colname="frontend-0 format" />
- <colspec colname="frontend-1 format" />
- <colspec colname="scaler-0 format" />
- <colspec colname="scaler-0 compose" />
- <colspec colname="scaler-1 format" />
- <thead>
- <row>
- <entry></entry>
- <entry>Sensor/0 format</entry>
- <entry>Frontend/0 format</entry>
- <entry>Frontend/1 format</entry>
- <entry>Scaler/0 format</entry>
- <entry>Scaler/0 compose selection rectangle</entry>
- <entry>Scaler/1 format</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Initial state</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- </row>
- <row>
- <entry>Configure frontend sink format</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry><emphasis>2048x1536/SGRBG8_1X8</emphasis></entry>
- <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- <entry>(default)</entry>
- </row>
- <row>
- <entry>Configure scaler sink format</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry>2046x1534/SGRBG8_1X8</entry>
- <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
- <entry><emphasis>0,0/2046x1534</emphasis></entry>
- <entry><emphasis>2046x1534/SGRBG8_1X8</emphasis></entry>
- </row>
- <row>
- <entry>Configure scaler sink compose selection</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry>2048x1536/SGRBG8_1X8</entry>
- <entry>2046x1534/SGRBG8_1X8</entry>
- <entry>2046x1534/SGRBG8_1X8</entry>
- <entry><emphasis>0,0/1280x960</emphasis></entry>
- <entry><emphasis>1280x960/SGRBG8_1X8</emphasis></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>
- <orderedlist>
- <listitem><para>Initial state. The sensor source pad format is
- set to its native 3MP size and V4L2_MBUS_FMT_SGRBG8_1X8
- media bus code. Formats on the host frontend and scaler sink
- and source pads have the default values, as well as the
- compose rectangle on the scaler's sink pad.</para></listitem>
-
- <listitem><para>The application configures the frontend sink
- pad format's size to 2048x1536 and its media bus code to
- V4L2_MBUS_FMT_SGRBG_1X8. The driver propagates the format to
- the frontend source pad.</para></listitem>
-
- <listitem><para>The application configures the scaler sink pad
- format's size to 2046x1534 and the media bus code to
- V4L2_MBUS_FMT_SGRBG_1X8 to match the frontend source size and
- media bus code. The media bus code on the sink pad is set to
- V4L2_MBUS_FMT_SGRBG_1X8. The driver propagates the size to the
- compose selection rectangle on the scaler's sink pad, and the
- format to the scaler source pad.</para></listitem>
-
- <listitem><para>The application configures the size of the compose
- selection rectangle of the scaler's sink pad 1280x960. The driver
- propagates the size to the scaler's source pad
- format.</para></listitem>
-
- </orderedlist>
- </para>
-
- <para>When satisfied with the try results, applications can set the active
- formats by setting the <structfield>which</structfield> argument to
- <constant>V4L2_SUBDEV_FORMAT_ACTIVE</constant>. Active formats are changed
- exactly as try formats by drivers. To avoid modifying the hardware state
- during format negotiation, applications should negotiate try formats first
- and then modify the active settings using the try formats returned during
- the last negotiation iteration. This guarantees that the active format
- will be applied as-is by the driver without being modified.
- </para>
- </section>
-
- <section id="v4l2-subdev-selections">
- <title>Selections: cropping, scaling and composition</title>
-
- <para>Many sub-devices support cropping frames on their input or output
- pads (or possible even on both). Cropping is used to select the area of
- interest in an image, typically on an image sensor or a video decoder. It can
- also be used as part of digital zoom implementations to select the area of
- the image that will be scaled up.</para>
-
- <para>Crop settings are defined by a crop rectangle and represented in a
- &v4l2-rect; by the coordinates of the top left corner and the rectangle
- size. Both the coordinates and sizes are expressed in pixels.</para>
-
- <para>As for pad formats, drivers store try and active
- rectangles for the selection targets <xref
- linkend="v4l2-selections-common" />.</para>
-
- <para>On sink pads, cropping is applied relative to the
- current pad format. The pad format represents the image size as
- received by the sub-device from the previous block in the
- pipeline, and the crop rectangle represents the sub-image that
- will be transmitted further inside the sub-device for
- processing.</para>
-
- <para>The scaling operation changes the size of the image by
- scaling it to new dimensions. The scaling ratio isn't specified
- explicitly, but is implied from the original and scaled image
- sizes. Both sizes are represented by &v4l2-rect;.</para>
-
- <para>Scaling support is optional. When supported by a subdev,
- the crop rectangle on the subdev's sink pad is scaled to the
- size configured using the &VIDIOC-SUBDEV-S-SELECTION; IOCTL
- using <constant>V4L2_SEL_TGT_COMPOSE</constant>
- selection target on the same pad. If the subdev supports scaling
- but not composing, the top and left values are not used and must
- always be set to zero.</para>
-
- <para>On source pads, cropping is similar to sink pads, with the
- exception that the source size from which the cropping is
- performed, is the COMPOSE rectangle on the sink pad. In both
- sink and source pads, the crop rectangle must be entirely
- contained inside the source image size for the crop
- operation.</para>
-
- <para>The drivers should always use the closest possible
- rectangle the user requests on all selection targets, unless
- specifically told otherwise.
- <constant>V4L2_SEL_FLAG_GE</constant> and
- <constant>V4L2_SEL_FLAG_LE</constant> flags may be
- used to round the image size either up or down. <xref
- linkend="v4l2-selection-flags" /></para>
- </section>
-
- <section>
- <title>Types of selection targets</title>
-
- <section>
- <title>Actual targets</title>
-
- <para>Actual targets (without a postfix) reflect the actual
- hardware configuration at any point of time. There is a BOUNDS
- target corresponding to every actual target.</para>
- </section>
-
- <section>
- <title>BOUNDS targets</title>
-
- <para>BOUNDS targets is the smallest rectangle that contains all
- valid actual rectangles. It may not be possible to set the actual
- rectangle as large as the BOUNDS rectangle, however. This may be
- because e.g. a sensor's pixel array is not rectangular but
- cross-shaped or round. The maximum size may also be smaller than the
- BOUNDS rectangle.</para>
- </section>
-
- </section>
-
- <section>
- <title>Order of configuration and format propagation</title>
-
- <para>Inside subdevs, the order of image processing steps will
- always be from the sink pad towards the source pad. This is also
- reflected in the order in which the configuration must be
- performed by the user: the changes made will be propagated to
- any subsequent stages. If this behaviour is not desired, the
- user must set
- <constant>V4L2_SEL_FLAG_KEEP_CONFIG</constant> flag. This
- flag causes no propagation of the changes are allowed in any
- circumstances. This may also cause the accessed rectangle to be
- adjusted by the driver, depending on the properties of the
- underlying hardware.</para>
-
- <para>The coordinates to a step always refer to the actual size
- of the previous step. The exception to this rule is the source
- compose rectangle, which refers to the sink compose bounds
- rectangle --- if it is supported by the hardware.</para>
-
- <orderedlist>
- <listitem><para>Sink pad format. The user configures the sink pad
- format. This format defines the parameters of the image the
- entity receives through the pad for further processing.</para></listitem>
-
- <listitem><para>Sink pad actual crop selection. The sink pad crop
- defines the crop performed to the sink pad format.</para></listitem>
-
- <listitem><para>Sink pad actual compose selection. The size of the
- sink pad compose rectangle defines the scaling ratio compared
- to the size of the sink pad crop rectangle. The location of
- the compose rectangle specifies the location of the actual
- sink compose rectangle in the sink compose bounds
- rectangle.</para></listitem>
-
- <listitem><para>Source pad actual crop selection. Crop on the source
- pad defines crop performed to the image in the sink compose
- bounds rectangle.</para></listitem>
-
- <listitem><para>Source pad format. The source pad format defines the
- output pixel format of the subdev, as well as the other
- parameters with the exception of the image width and height.
- Width and height are defined by the size of the source pad
- actual crop selection.</para></listitem>
- </orderedlist>
-
- <para>Accessing any of the above rectangles not supported by the
- subdev will return <constant>EINVAL</constant>. Any rectangle
- referring to a previous unsupported rectangle coordinates will
- instead refer to the previous supported rectangle. For example,
- if sink crop is not supported, the compose selection will refer
- to the sink pad format dimensions instead.</para>
-
- <figure id="subdev-image-processing-crop">
- <title>Image processing in subdevs: simple crop example</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="subdev-image-processing-crop.svg"
- format="SVG" scale="200" />
- </imageobject>
- </mediaobject>
- </figure>
-
- <para>In the above example, the subdev supports cropping on its
- sink pad. To configure it, the user sets the media bus format on
- the subdev's sink pad. Now the actual crop rectangle can be set
- on the sink pad --- the location and size of this rectangle
- reflect the location and size of a rectangle to be cropped from
- the sink format. The size of the sink crop rectangle will also
- be the size of the format of the subdev's source pad.</para>
-
- <figure id="subdev-image-processing-scaling-multi-source">
- <title>Image processing in subdevs: scaling with multiple sources</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="subdev-image-processing-scaling-multi-source.svg"
- format="SVG" scale="200" />
- </imageobject>
- </mediaobject>
- </figure>
-
- <para>In this example, the subdev is capable of first cropping,
- then scaling and finally cropping for two source pads
- individually from the resulting scaled image. The location of
- the scaled image in the cropped image is ignored in sink compose
- target. Both of the locations of the source crop rectangles
- refer to the sink scaling rectangle, independently cropping an
- area at location specified by the source crop rectangle from
- it.</para>
-
- <figure id="subdev-image-processing-full">
- <title>Image processing in subdevs: scaling and composition
- with multiple sinks and sources</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="subdev-image-processing-full.svg"
- format="SVG" scale="200" />
- </imageobject>
- </mediaobject>
- </figure>
-
- <para>The subdev driver supports two sink pads and two source
- pads. The images from both of the sink pads are individually
- cropped, then scaled and further composed on the composition
- bounds rectangle. From that, two independent streams are cropped
- and sent out of the subdev from the source pads.</para>
-
- </section>
-
- </section>
-
- &sub-subdev-formats;
diff --git a/Documentation/DocBook/media/v4l/dev-teletext.xml b/Documentation/DocBook/media/v4l/dev-teletext.xml
deleted file mode 100644
index bd21c64d70f3..000000000000
--- a/Documentation/DocBook/media/v4l/dev-teletext.xml
+++ /dev/null
@@ -1,29 +0,0 @@
- <title>Teletext Interface</title>
-
- <para>This interface was aimed at devices receiving and demodulating
-Teletext data [<xref linkend="ets300706" />, <xref linkend="itu653" />], evaluating the
-Teletext packages and storing formatted pages in cache memory. Such
-devices are usually implemented as microcontrollers with serial
-interface (I<superscript>2</superscript>C) and could be found on old
-TV cards, dedicated Teletext decoding cards and home-brew devices
-connected to the PC parallel port.</para>
-
- <para>The Teletext API was designed by Martin Buck. It was defined in
-the kernel header file <filename>linux/videotext.h</filename>, the
-specification is available from <ulink url="ftp://ftp.gwdg.de/pub/linux/misc/videotext/">
-ftp://ftp.gwdg.de/pub/linux/misc/videotext/</ulink>. (Videotext is the name of
-the German public television Teletext service.)</para>
-
- <para>Eventually the Teletext API was integrated into the V4L API
-with character device file names <filename>/dev/vtx0</filename> to
-<filename>/dev/vtx31</filename>, device major number 81, minor numbers
-192 to 223.</para>
-
- <para>However, teletext decoders were quickly replaced by more
-generic VBI demodulators and those dedicated teletext decoders no longer exist.
-For many years the vtx devices were still around, even though nobody used
-them. So the decision was made to finally remove support for the Teletext API in
-kernel 2.6.37.</para>
-
- <para>Modern devices all use the <link linkend="raw-vbi">raw</link> or
-<link linkend="sliced">sliced</link> VBI API.</para>
diff --git a/Documentation/DocBook/media/v4l/driver.xml b/Documentation/DocBook/media/v4l/driver.xml
deleted file mode 100644
index 7c6638bacedb..000000000000
--- a/Documentation/DocBook/media/v4l/driver.xml
+++ /dev/null
@@ -1,200 +0,0 @@
- <title>V4L2 Driver Programming</title>
-
- <!-- This part defines the interface between the "videodev"
- module and individual drivers. -->
-
- <para>to do</para>
-<!--
- <para>V4L2 is a two-layer driver system. The top layer is the "videodev"
-kernel module. When videodev initializes it registers as character device
-with major number 81, and it registers a set of file operations. All V4L2
-drivers are really clients of videodev, which calls V4L2 drivers through
-driver method functions. V4L2 drivers are also written as kernel modules.
-After probing the hardware they register one or more devices with
-videodev.</para>
-
- <section id="driver-modules">
- <title>Driver Modules</title>
-
- <para>V4L2 driver modules must have an initialization function which is
-called after the module was loaded into kernel, an exit function whis is
-called before the module is removed. When the driver is compiled into the
-kernel these functions called at system boot and shutdown time.</para>
-
- <informalexample>
- <programlisting>
-#include &lt;linux/module.h&gt;
-
-/* Export information about this module. For details and other useful
- macros see <filename>linux/module.h</filename>. */
-MODULE_DESCRIPTION("my - driver for my hardware");
-MODULE_AUTHOR("Your name here");
-MODULE_LICENSE("GPL");
-
-static void
-my_module_exit (void)
-{
- /* Free all resources allocated by my_module_init(). */
-}
-
-static int
-my_module_init (void)
-{
- /* Bind the driver to the supported hardware, see
- <link linkend="driver-pci"> and
- <link linkend="driver-usb"> for examples. */
-
- return 0; /* a negative value on error, 0 on success. */
-}
-
-/* Export module functions. */
-module_init (my_module_init);
-module_exit (my_module_exit);
-</programlisting>
- </informalexample>
-
- <para>Users can add parameters when kernel modules are inserted:</para>
-
- <informalexample>
- <programlisting>
-include &lt;linux/moduleparam.h&gt;
-
-static int my_option = 123;
-static int my_option_array[47];
-
-/* Export the symbol, an int, with access permissions 0664.
- See <filename>linux/moduleparam.h</filename> for other types. */
-module_param (my_option, int, 0644);
-module_param_array (my_option_array, int, NULL, 0644);
-
-MODULE_PARM_DESC (my_option, "Does magic things, default 123");
-</programlisting>
- </informalexample>
-
- <para>One parameter should be supported by all V4L2 drivers, the minor
-number of the device it will register. Purpose is to predictably link V4L2
-drivers to device nodes if more than one video device is installed. Use the
-name of the device node followed by a "_nr" suffix, for example "video_nr"
-for <filename>/dev/video</filename>.</para>
-
- <informalexample>
- <programlisting>
-/* Minor number of the device, -1 to allocate the first unused. */
-static int video_nr = -1;
-
-module_param (video_nr, int, 0444);
-</programlisting>
- </informalexample>
- </section>
-
- <section id="driver-pci">
- <title>PCI Devices</title>
-
- <para>PCI devices are initialized like this:</para>
-
- <informalexample>
- <programlisting>
-typedef struct {
- /* State of one physical device. */
-} my_device;
-
-static int
-my_resume (struct pci_dev * pci_dev)
-{
- /* Restore the suspended device to working state. */
-}
-
-static int
-my_suspend (struct pci_dev * pci_dev,
- pm_message_t state)
-{
- /* This function is called before the system goes to sleep.
- Stop all DMAs and disable interrupts, then put the device
- into a low power state. For details see the kernel
- sources under <filename>Documentation/power</filename>. */
-
- return 0; /* a negative value on error, 0 on success. */
-}
-
-static void
-my_remove (struct pci_dev * pci_dev)
-{
- my_device *my = pci_get_drvdata (pci_dev);
-
- /* Describe me. */
-}
-
-static int
-my_probe (struct pci_dev * pci_dev,
- const struct pci_device_id * pci_id)
-{
- my_device *my;
-
- /* Describe me. */
-
- /* You can allocate per-device data here and store a pointer
- to it in the pci_dev structure. */
- my = ...;
- pci_set_drvdata (pci_dev, my);
-
- return 0; /* a negative value on error, 0 on success. */
-}
-
-/* A list of supported PCI devices. */
-static struct pci_device_id
-my_pci_device_ids [] = {
- { PCI_VENDOR_ID_FOO, PCI_DEVICE_ID_BAR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { 0 } /* end of list */
-};
-
-/* Load our module if supported PCI devices are installed. */
-MODULE_DEVICE_TABLE (pci, my_pci_device_ids);
-
-static struct pci_driver
-my_pci_driver = {
- .name = "my",
- .id_table = my_pci_device_ids,
-
- .probe = my_probe,
- .remove = my_remove,
-
- /* Power management functions. */
- .suspend = my_suspend,
- .resume = my_resume,
-};
-
-static void
-my_module_exit (void)
-{
- pci_unregister_driver (&my_pci_driver);
-}
-
-static int
-my_module_init (void)
-{
- return pci_register_driver (&my_pci_driver);
-}
-</programlisting>
- </informalexample>
- </section>
-
- <section id="driver-usb">
- <title>USB Devices</title>
- <para>to do</para>
- </section>
- <section id="driver-registering">
- <title>Registering V4L2 Drivers</title>
-
- <para>After a V4L2 driver probed the hardware it registers one or more
-devices with the videodev module.</para>
- </section>
- <section id="driver-file-ops">
- <title>File Operations</title>
- <para>to do</para>
- </section>
- <section id="driver-internal-api">
- <title>Internal API</title>
- <para>to do</para>
- </section>
--->
diff --git a/Documentation/DocBook/media/v4l/fdl-appendix.xml b/Documentation/DocBook/media/v4l/fdl-appendix.xml
deleted file mode 100644
index 71299a3897c4..000000000000
--- a/Documentation/DocBook/media/v4l/fdl-appendix.xml
+++ /dev/null
@@ -1,671 +0,0 @@
-<!--
- The GNU Free Documentation License 1.1 in DocBook
- Markup by Eric Baudais <baudais@okstate.edu>
- Maintained by the GNOME Documentation Project
- http://live.gnome.org/DocumentationProject
- Version: 1.0.1
- Last Modified: Nov 16, 2000
--->
-
-<appendix id="fdl">
- <appendixinfo>
- <releaseinfo>
- Version 1.1, March 2000
- </releaseinfo>
- <copyright>
- <year>2000</year><holder>Free Software Foundation, Inc.</holder>
- </copyright>
- <legalnotice id="fdl-legalnotice">
- <para>
- <address>Free Software Foundation, Inc. <street>59 Temple Place,
- Suite 330</street>, <city>Boston</city>, <state>MA</state>
- <postcode>02111-1307</postcode> <country>USA</country></address>
- Everyone is permitted to copy and distribute verbatim copies of this
- license document, but changing it is not allowed.
- </para>
- </legalnotice>
- </appendixinfo>
- <title>GNU Free Documentation License</title>
-
- <sect1 id="fdl-preamble">
- <title>0. PREAMBLE</title>
- <para>
- The purpose of this License is to make a manual, textbook, or
- other written document <quote>free</quote> in the sense of
- freedom: to assure everyone the effective freedom to copy and
- redistribute it, with or without modifying it, either
- commercially or noncommercially. Secondarily, this License
- preserves for the author and publisher a way to get credit for
- their work, while not being considered responsible for
- modifications made by others.
- </para>
-
- <para>
- This License is a kind of <quote>copyleft</quote>, which means
- that derivative works of the document must themselves be free in
- the same sense. It complements the GNU General Public License,
- which is a copyleft license designed for free software.
- </para>
-
- <para>
- We have designed this License in order to use it for manuals for
- free software, because free software needs free documentation: a
- free program should come with manuals providing the same
- freedoms that the software does. But this License is not limited
- to software manuals; it can be used for any textual work,
- regardless of subject matter or whether it is published as a
- printed book. We recommend this License principally for works
- whose purpose is instruction or reference.
- </para>
- </sect1>
- <sect1 id="fdl-section1">
- <title>1. APPLICABILITY AND DEFINITIONS</title>
- <para id="fdl-document">
- This License applies to any manual or other work that contains a
- notice placed by the copyright holder saying it can be
- distributed under the terms of this License. The
- <quote>Document</quote>, below, refers to any such manual or
- work. Any member of the public is a licensee, and is addressed
- as <quote>you</quote>.
- </para>
-
- <para id="fdl-modified">
- A <quote>Modified Version</quote> of the Document means any work
- containing the Document or a portion of it, either copied
- verbatim, or with modifications and/or translated into another
- language.
- </para>
-
- <para id="fdl-secondary">
- A <quote>Secondary Section</quote> is a named appendix or a
- front-matter section of the <link
- linkend="fdl-document">Document</link> that deals exclusively
- with the relationship of the publishers or authors of the
- Document to the Document's overall subject (or to related
- matters) and contains nothing that could fall directly within
- that overall subject. (For example, if the Document is in part a
- textbook of mathematics, a Secondary Section may not explain any
- mathematics.) The relationship could be a matter of historical
- connection with the subject or with related matters, or of
- legal, commercial, philosophical, ethical or political position
- regarding them.
- </para>
-
- <para id="fdl-invariant">
- The <quote>Invariant Sections</quote> are certain <link
- linkend="fdl-secondary"> Secondary Sections</link> whose titles
- are designated, as being those of Invariant Sections, in the
- notice that says that the <link
- linkend="fdl-document">Document</link> is released under this
- License.
- </para>
-
- <para id="fdl-cover-texts">
- The <quote>Cover Texts</quote> are certain short passages of
- text that are listed, as Front-Cover Texts or Back-Cover Texts,
- in the notice that says that the <link
- linkend="fdl-document">Document</link> is released under this
- License.
- </para>
-
- <para id="fdl-transparent">
- A <quote>Transparent</quote> copy of the <link
- linkend="fdl-document"> Document</link> means a machine-readable
- copy, represented in a format whose specification is available
- to the general public, whose contents can be viewed and edited
- directly and straightforwardly with generic text editors or (for
- images composed of pixels) generic paint programs or (for
- drawings) some widely available drawing editor, and that is
- suitable for input to text formatters or for automatic
- translation to a variety of formats suitable for input to text
- formatters. A copy made in an otherwise Transparent file format
- whose markup has been designed to thwart or discourage
- subsequent modification by readers is not Transparent. A copy
- that is not <quote>Transparent</quote> is called
- <quote>Opaque</quote>.
- </para>
-
- <para>
- Examples of suitable formats for Transparent copies include
- plain ASCII without markup, Texinfo input format, LaTeX input
- format, SGML or XML using a publicly available DTD, and
- standard-conforming simple HTML designed for human
- modification. Opaque formats include PostScript, PDF,
- proprietary formats that can be read and edited only by
- proprietary word processors, SGML or XML for which the DTD
- and/or processing tools are not generally available, and the
- machine-generated HTML produced by some word processors for
- output purposes only.
- </para>
-
- <para id="fdl-title-page">
- The <quote>Title Page</quote> means, for a printed book, the
- title page itself, plus such following pages as are needed to
- hold, legibly, the material this License requires to appear in
- the title page. For works in formats which do not have any title
- page as such, <quote>Title Page</quote> means the text near the
- most prominent appearance of the work's title, preceding the
- beginning of the body of the text.
- </para>
- </sect1>
-
- <sect1 id="fdl-section2">
- <title>2. VERBATIM COPYING</title>
- <para>
- You may copy and distribute the <link
- linkend="fdl-document">Document</link> in any medium, either
- commercially or noncommercially, provided that this License, the
- copyright notices, and the license notice saying this License
- applies to the Document are reproduced in all copies, and that
- you add no other conditions whatsoever to those of this
- License. You may not use technical measures to obstruct or
- control the reading or further copying of the copies you make or
- distribute. However, you may accept compensation in exchange for
- copies. If you distribute a large enough number of copies you
- must also follow the conditions in <link
- linkend="fdl-section3">section 3</link>.
- </para>
-
- <para>
- You may also lend copies, under the same conditions stated
- above, and you may publicly display copies.
- </para>
- </sect1>
-
- <sect1 id="fdl-section3">
- <title>3. COPYING IN QUANTITY</title>
- <para>
- If you publish printed copies of the <link
- linkend="fdl-document">Document</link> numbering more than 100,
- and the Document's license notice requires <link
- linkend="fdl-cover-texts">Cover Texts</link>, you must enclose
- the copies in covers that carry, clearly and legibly, all these
- Cover Texts: Front-Cover Texts on the front cover, and
- Back-Cover Texts on the back cover. Both covers must also
- clearly and legibly identify you as the publisher of these
- copies. The front cover must present the full title with all
- words of the title equally prominent and visible. You may add
- other material on the covers in addition. Copying with changes
- limited to the covers, as long as they preserve the title of the
- <link linkend="fdl-document">Document</link> and satisfy these
- conditions, can be treated as verbatim copying in other
- respects.
- </para>
-
- <para>
- If the required texts for either cover are too voluminous to fit
- legibly, you should put the first ones listed (as many as fit
- reasonably) on the actual cover, and continue the rest onto
- adjacent pages.
- </para>
-
- <para>
- If you publish or distribute <link
- linkend="fdl-transparent">Opaque</link> copies of the <link
- linkend="fdl-document">Document</link> numbering more than 100,
- you must either include a machine-readable <link
- linkend="fdl-transparent">Transparent</link> copy along with
- each Opaque copy, or state in or with each Opaque copy a
- publicly-accessible computer-network location containing a
- complete Transparent copy of the Document, free of added
- material, which the general network-using public has access to
- download anonymously at no charge using public-standard network
- protocols. If you use the latter option, you must take
- reasonably prudent steps, when you begin distribution of Opaque
- copies in quantity, to ensure that this Transparent copy will
- remain thus accessible at the stated location until at least one
- year after the last time you distribute an Opaque copy (directly
- or through your agents or retailers) of that edition to the
- public.
- </para>
-
- <para>
- It is requested, but not required, that you contact the authors
- of the <link linkend="fdl-document">Document</link> well before
- redistributing any large number of copies, to give them a chance
- to provide you with an updated version of the Document.
- </para>
- </sect1>
-
- <sect1 id="fdl-section4">
- <title>4. MODIFICATIONS</title>
- <para>
- You may copy and distribute a <link
- linkend="fdl-modified">Modified Version</link> of the <link
- linkend="fdl-document">Document</link> under the conditions of
- sections <link linkend="fdl-section2">2</link> and <link
- linkend="fdl-section3">3</link> above, provided that you release
- the Modified Version under precisely this License, with the
- Modified Version filling the role of the Document, thus
- licensing distribution and modification of the Modified Version
- to whoever possesses a copy of it. In addition, you must do
- these things in the Modified Version:
- </para>
-
- <itemizedlist mark="opencircle">
- <listitem>
- <formalpara>
- <title>A</title>
- <para>
- Use in the <link linkend="fdl-title-page">Title
- Page</link> (and on the covers, if any) a title distinct
- from that of the <link
- linkend="fdl-document">Document</link>, and from those of
- previous versions (which should, if there were any, be
- listed in the History section of the Document). You may
- use the same title as a previous version if the original
- publisher of that version gives permission.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>B</title>
- <para>
- List on the <link linkend="fdl-title-page">Title
- Page</link>, as authors, one or more persons or entities
- responsible for authorship of the modifications in the
- <link linkend="fdl-modified">Modified Version</link>,
- together with at least five of the principal authors of
- the <link linkend="fdl-document">Document</link> (all of
- its principal authors, if it has less than five).
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>C</title>
- <para>
- State on the <link linkend="fdl-title-page">Title
- Page</link> the name of the publisher of the <link
- linkend="fdl-modified">Modified Version</link>, as the
- publisher.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>D</title>
- <para>
- Preserve all the copyright notices of the <link
- linkend="fdl-document">Document</link>.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>E</title>
- <para>
- Add an appropriate copyright notice for your modifications
- adjacent to the other copyright notices.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>F</title>
- <para>
- Include, immediately after the copyright notices, a
- license notice giving the public permission to use the
- <link linkend="fdl-modified">Modified Version</link> under
- the terms of this License, in the form shown in the
- Addendum below.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>G</title>
- <para>
- Preserve in that license notice the full lists of <link
- linkend="fdl-invariant"> Invariant Sections</link> and
- required <link linkend="fdl-cover-texts">Cover
- Texts</link> given in the <link
- linkend="fdl-document">Document's</link> license notice.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>H</title>
- <para>
- Include an unaltered copy of this License.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>I</title>
- <para>
- Preserve the section entitled <quote>History</quote>, and
- its title, and add to it an item stating at least the
- title, year, new authors, and publisher of the <link
- linkend="fdl-modified">Modified Version </link>as given on
- the <link linkend="fdl-title-page">Title Page</link>. If
- there is no section entitled <quote>History</quote> in the
- <link linkend="fdl-document">Document</link>, create one
- stating the title, year, authors, and publisher of the
- Document as given on its Title Page, then add an item
- describing the Modified Version as stated in the previous
- sentence.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>J</title>
- <para>
- Preserve the network location, if any, given in the <link
- linkend="fdl-document">Document</link> for public access
- to a <link linkend="fdl-transparent">Transparent</link>
- copy of the Document, and likewise the network locations
- given in the Document for previous versions it was based
- on. These may be placed in the <quote>History</quote>
- section. You may omit a network location for a work that
- was published at least four years before the Document
- itself, or if the original publisher of the version it
- refers to gives permission.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>K</title>
- <para>
- In any section entitled <quote>Acknowledgements</quote> or
- <quote>Dedications</quote>, preserve the section's title,
- and preserve in the section all the substance and tone of
- each of the contributor acknowledgements and/or
- dedications given therein.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>L</title>
- <para>
- Preserve all the <link linkend="fdl-invariant">Invariant
- Sections</link> of the <link
- linkend="fdl-document">Document</link>, unaltered in their
- text and in their titles. Section numbers or the
- equivalent are not considered part of the section titles.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>M</title>
- <para>
- Delete any section entitled
- <quote>Endorsements</quote>. Such a section may not be
- included in the <link linkend="fdl-modified">Modified
- Version</link>.
- </para>
- </formalpara>
- </listitem>
-
- <listitem>
- <formalpara>
- <title>N</title>
- <para>
- Do not retitle any existing section as
- <quote>Endorsements</quote> or to conflict in title with
- any <link linkend="fdl-invariant">Invariant
- Section</link>.
- </para>
- </formalpara>
- </listitem>
- </itemizedlist>
-
- <para>
- If the <link linkend="fdl-modified">Modified Version</link>
- includes new front-matter sections or appendices that qualify as
- <link linkend="fdl-secondary">Secondary Sections</link> and
- contain no material copied from the Document, you may at your
- option designate some or all of these sections as invariant. To
- do this, add their titles to the list of <link
- linkend="fdl-invariant">Invariant Sections</link> in the
- Modified Version's license notice. These titles must be
- distinct from any other section titles.
- </para>
-
- <para>
- You may add a section entitled <quote>Endorsements</quote>,
- provided it contains nothing but endorsements of your <link
- linkend="fdl-modified">Modified Version</link> by various
- parties--for example, statements of peer review or that the text
- has been approved by an organization as the authoritative
- definition of a standard.
- </para>
-
- <para>
- You may add a passage of up to five words as a <link
- linkend="fdl-cover-texts">Front-Cover Text</link>, and a passage
- of up to 25 words as a <link
- linkend="fdl-cover-texts">Back-Cover Text</link>, to the end of
- the list of <link linkend="fdl-cover-texts">Cover Texts</link>
- in the <link linkend="fdl-modified">Modified Version</link>.
- Only one passage of Front-Cover Text and one of Back-Cover Text
- may be added by (or through arrangements made by) any one
- entity. If the <link linkend="fdl-document">Document</link>
- already includes a cover text for the same cover, previously
- added by you or by arrangement made by the same entity you are
- acting on behalf of, you may not add another; but you may
- replace the old one, on explicit permission from the previous
- publisher that added the old one.
- </para>
-
- <para>
- The author(s) and publisher(s) of the <link
- linkend="fdl-document">Document</link> do not by this License
- give permission to use their names for publicity for or to
- assert or imply endorsement of any <link
- linkend="fdl-modified">Modified Version </link>.
- </para>
- </sect1>
-
- <sect1 id="fdl-section5">
- <title>5. COMBINING DOCUMENTS</title>
- <para>
- You may combine the <link linkend="fdl-document">Document</link>
- with other documents released under this License, under the
- terms defined in <link linkend="fdl-section4">section 4</link>
- above for modified versions, provided that you include in the
- combination all of the <link linkend="fdl-invariant">Invariant
- Sections</link> of all of the original documents, unmodified,
- and list them all as Invariant Sections of your combined work in
- its license notice.
- </para>
-
- <para>
- The combined work need only contain one copy of this License,
- and multiple identical <link linkend="fdl-invariant">Invariant
- Sections</link> may be replaced with a single copy. If there are
- multiple Invariant Sections with the same name but different
- contents, make the title of each such section unique by adding
- at the end of it, in parentheses, the name of the original
- author or publisher of that section if known, or else a unique
- number. Make the same adjustment to the section titles in the
- list of Invariant Sections in the license notice of the combined
- work.
- </para>
-
- <para>
- In the combination, you must combine any sections entitled
- <quote>History</quote> in the various original documents,
- forming one section entitled <quote>History</quote>; likewise
- combine any sections entitled <quote>Acknowledgements</quote>,
- and any sections entitled <quote>Dedications</quote>. You must
- delete all sections entitled <quote>Endorsements.</quote>
- </para>
- </sect1>
-
- <sect1 id="fdl-section6">
- <title>6. COLLECTIONS OF DOCUMENTS</title>
- <para>
- You may make a collection consisting of the <link
- linkend="fdl-document">Document</link> and other documents
- released under this License, and replace the individual copies
- of this License in the various documents with a single copy that
- is included in the collection, provided that you follow the
- rules of this License for verbatim copying of each of the
- documents in all other respects.
- </para>
-
- <para>
- You may extract a single document from such a collection, and
- distribute it individually under this License, provided you
- insert a copy of this License into the extracted document, and
- follow this License in all other respects regarding verbatim
- copying of that document.
- </para>
- </sect1>
-
- <sect1 id="fdl-section7">
- <title>7. AGGREGATION WITH INDEPENDENT WORKS</title>
- <para>
- A compilation of the <link
- linkend="fdl-document">Document</link> or its derivatives with
- other separate and independent documents or works, in or on a
- volume of a storage or distribution medium, does not as a whole
- count as a <link linkend="fdl-modified">Modified Version</link>
- of the Document, provided no compilation copyright is claimed
- for the compilation. Such a compilation is called an
- <quote>aggregate</quote>, and this License does not apply to the
- other self-contained works thus compiled with the Document , on
- account of their being thus compiled, if they are not themselves
- derivative works of the Document. If the <link
- linkend="fdl-cover-texts">Cover Text</link> requirement of <link
- linkend="fdl-section3">section 3</link> is applicable to these
- copies of the Document, then if the Document is less than one
- quarter of the entire aggregate, the Document's Cover Texts may
- be placed on covers that surround only the Document within the
- aggregate. Otherwise they must appear on covers around the whole
- aggregate.
- </para>
- </sect1>
-
- <sect1 id="fdl-section8">
- <title>8. TRANSLATION</title>
- <para>
- Translation is considered a kind of modification, so you may
- distribute translations of the <link
- linkend="fdl-document">Document</link> under the terms of <link
- linkend="fdl-section4">section 4</link>. Replacing <link
- linkend="fdl-invariant"> Invariant Sections</link> with
- translations requires special permission from their copyright
- holders, but you may include translations of some or all
- Invariant Sections in addition to the original versions of these
- Invariant Sections. You may include a translation of this
- License provided that you also include the original English
- version of this License. In case of a disagreement between the
- translation and the original English version of this License,
- the original English version will prevail.
- </para>
- </sect1>
-
- <sect1 id="fdl-section9">
- <title>9. TERMINATION</title>
- <para>
- You may not copy, modify, sublicense, or distribute the <link
- linkend="fdl-document">Document</link> except as expressly
- provided for under this License. Any other attempt to copy,
- modify, sublicense or distribute the Document is void, and will
- automatically terminate your rights under this License. However,
- parties who have received copies, or rights, from you under this
- License will not have their licenses terminated so long as such
- parties remain in full compliance.
- </para>
- </sect1>
-
- <sect1 id="fdl-section10">
- <title>10. FUTURE REVISIONS OF THIS LICENSE</title>
- <para>
- The <ulink type="http"
- url="http://www.gnu.org/fsf/fsf.html">Free Software
- Foundation</ulink> may publish new, revised versions of the GNU
- Free Documentation License from time to time. Such new versions
- will be similar in spirit to the present version, but may differ
- in detail to address new problems or concerns. See <ulink
- type="http"
- url="http://www.gnu.org/copyleft">http://www.gnu.org/copyleft/</ulink>.
- </para>
-
- <para>
- Each version of the License is given a distinguishing version
- number. If the <link linkend="fdl-document">Document</link>
- specifies that a particular numbered version of this License
- <quote>or any later version</quote> applies to it, you have the
- option of following the terms and conditions either of that
- specified version or of any later version that has been
- published (not as a draft) by the Free Software Foundation. If
- the Document does not specify a version number of this License,
- you may choose any version ever published (not as a draft) by
- the Free Software Foundation.
- </para>
- </sect1>
-
- <sect1 id="fdl-using">
- <title>Addendum</title>
- <para>
- To use this License in a document you have written, include a copy of
- the License in the document and put the following copyright and
- license notices just after the title page:
- </para>
-
- <blockquote>
- <para>
- Copyright &copy; YEAR YOUR NAME.
- </para>
- <para>
- Permission is granted to copy, distribute and/or modify this
- document under the terms of the GNU Free Documentation
- License, Version 1.1 or any later version published by the
- Free Software Foundation; with the <link
- linkend="fdl-invariant">Invariant Sections</link> being LIST
- THEIR TITLES, with the <link
- linkend="fdl-cover-texts">Front-Cover Texts</link> being LIST,
- and with the <link linkend="fdl-cover-texts">Back-Cover
- Texts</link> being LIST. A copy of the license is included in
- the section entitled <quote>GNU Free Documentation
- License</quote>.
- </para>
- </blockquote>
-
- <para>
- If you have no <link linkend="fdl-invariant">Invariant
- Sections</link>, write <quote>with no Invariant Sections</quote>
- instead of saying which ones are invariant. If you have no
- <link linkend="fdl-cover-texts">Front-Cover Texts</link>, write
- <quote>no Front-Cover Texts</quote> instead of
- <quote>Front-Cover Texts being LIST</quote>; likewise for <link
- linkend="fdl-cover-texts">Back-Cover Texts</link>.
- </para>
-
- <para>
- If your document contains nontrivial examples of program code,
- we recommend releasing these examples in parallel under your
- choice of free software license, such as the <ulink type="http"
- url="http://www.gnu.org/copyleft/gpl.html"> GNU General Public
- License</ulink>, to permit their use in free software.
- </para>
- </sect1>
-</appendix>
-
-
-
-
-
-
diff --git a/Documentation/DocBook/media/v4l/fieldseq_bt.pdf b/Documentation/DocBook/media/v4l/fieldseq_bt.pdf
deleted file mode 100644
index 26598b23f80d..000000000000
--- a/Documentation/DocBook/media/v4l/fieldseq_bt.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/fieldseq_tb.pdf b/Documentation/DocBook/media/v4l/fieldseq_tb.pdf
deleted file mode 100644
index 4965b22ddb3a..000000000000
--- a/Documentation/DocBook/media/v4l/fieldseq_tb.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/func-close.xml b/Documentation/DocBook/media/v4l/func-close.xml
deleted file mode 100644
index 232920d2f3c6..000000000000
--- a/Documentation/DocBook/media/v4l/func-close.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<refentry id="func-close">
- <refmeta>
- <refentrytitle>V4L2 close()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-close</refname>
- <refpurpose>Close a V4L2 device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>close</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Closes the device. Any I/O in progress is terminated and
-resources associated with the file descriptor are freed. However data
-format parameters, current input or output, control values or other
-properties remain unchanged.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>The function returns <returnvalue>0</returnvalue> on
-success, <returnvalue>-1</returnvalue> on failure and the
-<varname>errno</varname> is set appropriately. Possible error
-codes:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid open file
-descriptor.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-ioctl.xml b/Documentation/DocBook/media/v4l/func-ioctl.xml
deleted file mode 100644
index 4394184a1a6d..000000000000
--- a/Documentation/DocBook/media/v4l/func-ioctl.xml
+++ /dev/null
@@ -1,71 +0,0 @@
-<refentry id="func-ioctl">
- <refmeta>
- <refentrytitle>V4L2 ioctl()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-ioctl</refname>
- <refpurpose>Program a V4L2 device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;sys/ioctl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>void *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>V4L2 ioctl request code as defined in the <filename>videodev2.h</filename> header file, for example
-VIDIOC_QUERYCAP.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to a function parameter, usually a structure.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The <function>ioctl()</function> function is used to program
-V4L2 devices. The argument <parameter>fd</parameter> must be an open
-file descriptor. An ioctl <parameter>request</parameter> has encoded
-in it whether the argument is an input, output or read/write
-parameter, and the size of the argument <parameter>argp</parameter> in
-bytes. Macros and defines specifying V4L2 ioctl requests are located
-in the <filename>videodev2.h</filename> header file.
-Applications should use their own copy, not include the version in the
-kernel sources on the system they compile on. All V4L2 ioctl requests,
-their respective function and parameters are specified in <xref
- linkend="user-func" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
- <para>When an ioctl that takes an output or read/write parameter fails,
- the parameter remains unmodified.</para>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-mmap.xml b/Documentation/DocBook/media/v4l/func-mmap.xml
deleted file mode 100644
index f31ad71bf301..000000000000
--- a/Documentation/DocBook/media/v4l/func-mmap.xml
+++ /dev/null
@@ -1,183 +0,0 @@
-<refentry id="func-mmap">
- <refmeta>
- <refentrytitle>V4L2 mmap()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-mmap</refname>
- <refpurpose>Map device memory into application address space</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>
-#include &lt;unistd.h&gt;
-#include &lt;sys/mman.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>void *<function>mmap</function></funcdef>
- <paramdef>void *<parameter>start</parameter></paramdef>
- <paramdef>size_t <parameter>length</parameter></paramdef>
- <paramdef>int <parameter>prot</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>off_t <parameter>offset</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>start</parameter></term>
- <listitem>
- <para>Map the buffer to this address in the
-application's address space. When the <constant>MAP_FIXED</constant>
-flag is specified, <parameter>start</parameter> must be a multiple of the
-pagesize and mmap will fail when the specified address
-cannot be used. Use of this option is discouraged; applications should
-just specify a <constant>NULL</constant> pointer here.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>length</parameter></term>
- <listitem>
- <para>Length of the memory area to map. This must be the
-same value as returned by the driver in the &v4l2-buffer;
-<structfield>length</structfield> field for the
-single-planar API, and the same value as returned by the driver
-in the &v4l2-plane; <structfield>length</structfield> field for the
-multi-planar API.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>prot</parameter></term>
- <listitem>
- <para>The <parameter>prot</parameter> argument describes the
-desired memory protection. Regardless of the device type and the
-direction of data exchange it should be set to
-<constant>PROT_READ</constant> | <constant>PROT_WRITE</constant>,
-permitting read and write access to image buffers. Drivers should
-support at least this combination of flags. Note the Linux
-<filename>video-buf</filename> kernel module, which is used by the
-bttv, saa7134, saa7146, cx88 and vivi driver supports only
-<constant>PROT_READ</constant> | <constant>PROT_WRITE</constant>. When
-the driver does not support the desired protection the
-<function>mmap()</function> function fails.</para>
- <para>Note device memory accesses (&eg; the memory on a
-graphics card with video capturing hardware) may incur a performance
-penalty compared to main memory accesses, or reads may be
-significantly slower than writes or vice versa. Other I/O methods may
-be more efficient in this case.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>The <parameter>flags</parameter> parameter
-specifies the type of the mapped object, mapping options and whether
-modifications made to the mapped copy of the page are private to the
-process or are to be shared with other references.</para>
- <para><constant>MAP_FIXED</constant> requests that the
-driver selects no other address than the one specified. If the
-specified address cannot be used, <function>mmap()</function> will fail. If
-<constant>MAP_FIXED</constant> is specified,
-<parameter>start</parameter> must be a multiple of the pagesize. Use
-of this option is discouraged.</para>
- <para>One of the <constant>MAP_SHARED</constant> or
-<constant>MAP_PRIVATE</constant> flags must be set.
-<constant>MAP_SHARED</constant> allows applications to share the
-mapped memory with other (&eg; child-) processes. Note the Linux
-<filename>video-buf</filename> module which is used by the bttv,
-saa7134, saa7146, cx88 and vivi driver supports only
-<constant>MAP_SHARED</constant>. <constant>MAP_PRIVATE</constant>
-requests copy-on-write semantics. V4L2 applications should not set the
-<constant>MAP_PRIVATE</constant>, <constant>MAP_DENYWRITE</constant>,
-<constant>MAP_EXECUTABLE</constant> or <constant>MAP_ANON</constant>
-flag.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>offset</parameter></term>
- <listitem>
- <para>Offset of the buffer in device memory. This must be the
-same value as returned by the driver in the &v4l2-buffer;
-<structfield>m</structfield> union <structfield>offset</structfield> field for
-the single-planar API, and the same value as returned by the driver
-in the &v4l2-plane; <structfield>m</structfield> union
-<structfield>mem_offset</structfield> field for the multi-planar API.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The <function>mmap()</function> function asks to map
-<parameter>length</parameter> bytes starting at
-<parameter>offset</parameter> in the memory of the device specified by
-<parameter>fd</parameter> into the application address space,
-preferably at address <parameter>start</parameter>. This latter
-address is a hint only, and is usually specified as 0.</para>
-
- <para>Suitable length and offset parameters are queried with the
-&VIDIOC-QUERYBUF; ioctl. Buffers must be allocated with the
-&VIDIOC-REQBUFS; ioctl before they can be queried.</para>
-
- <para>To unmap buffers the &func-munmap; function is used.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success <function>mmap()</function> returns a pointer to
-the mapped buffer. On error <constant>MAP_FAILED</constant> (-1) is
-returned, and the <varname>errno</varname> variable is set
-appropriately. Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid file
-descriptor.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is
-not open for reading and writing.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <parameter>start</parameter> or
-<parameter>length</parameter> or <parameter>offset</parameter> are not
-suitable. (E.&nbsp;g. they are too large, or not aligned on a
-<constant>PAGESIZE</constant> boundary.)</para>
- <para>The <parameter>flags</parameter> or
-<parameter>prot</parameter> value is not supported.</para>
- <para>No buffers have been allocated with the
-&VIDIOC-REQBUFS; ioctl.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>Not enough physical or virtual memory was available to
-complete the request.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-munmap.xml b/Documentation/DocBook/media/v4l/func-munmap.xml
deleted file mode 100644
index 860d49ca54a5..000000000000
--- a/Documentation/DocBook/media/v4l/func-munmap.xml
+++ /dev/null
@@ -1,76 +0,0 @@
-<refentry id="func-munmap">
- <refmeta>
- <refentrytitle>V4L2 munmap()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-munmap</refname>
- <refpurpose>Unmap device memory</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>
-#include &lt;unistd.h&gt;
-#include &lt;sys/mman.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>munmap</function></funcdef>
- <paramdef>void *<parameter>start</parameter></paramdef>
- <paramdef>size_t <parameter>length</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
- <refsect1>
- <title>Arguments</title>
- <variablelist>
- <varlistentry>
- <term><parameter>start</parameter></term>
- <listitem>
- <para>Address of the mapped buffer as returned by the
-&func-mmap; function.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>length</parameter></term>
- <listitem>
- <para>Length of the mapped buffer. This must be the same
-value as given to <function>mmap()</function> and returned by the
-driver in the &v4l2-buffer; <structfield>length</structfield>
-field for the single-planar API and in the &v4l2-plane;
-<structfield>length</structfield> field for the multi-planar API.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Unmaps a previously with the &func-mmap; function mapped
-buffer and frees it, if possible. <!-- ? This function (not freeing)
-has no impact on I/O in progress, specifically it does not imply
-&VIDIOC-STREAMOFF; to terminate I/O. Unmapped buffers can still be
-enqueued, dequeued or queried, they are just not accessible by the
-application.--></para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success <function>munmap()</function> returns 0, on
-failure -1 and the <varname>errno</varname> variable is set
-appropriately:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <parameter>start</parameter> or
-<parameter>length</parameter> is incorrect, or no buffers have been
-mapped yet.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-open.xml b/Documentation/DocBook/media/v4l/func-open.xml
deleted file mode 100644
index cf64e207c3ee..000000000000
--- a/Documentation/DocBook/media/v4l/func-open.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<refentry id="func-open">
- <refmeta>
- <refentrytitle>V4L2 open()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-open</refname>
- <refpurpose>Open a V4L2 device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;fcntl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>open</function></funcdef>
- <paramdef>const char *<parameter>device_name</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>device_name</parameter></term>
- <listitem>
- <para>Device to be opened.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>Open flags. Access mode must be
-<constant>O_RDWR</constant>. This is just a technicality, input devices
-still support only reading and output devices only writing.</para>
- <para>When the <constant>O_NONBLOCK</constant> flag is
-given, the read() function and the &VIDIOC-DQBUF; ioctl will return
-the &EAGAIN; when no data is available or no buffer is in the driver
-outgoing queue, otherwise these functions block until data becomes
-available. All V4L2 drivers exchanging data with applications must
-support the <constant>O_NONBLOCK</constant> flag.</para>
- <para>Other flags have no effect.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
- <refsect1>
- <title>Description</title>
-
- <para>To open a V4L2 device applications call
-<function>open()</function> with the desired device name. This
-function has no side effects; all data format parameters, current
-input or output, control values or other properties remain unchanged.
-At the first <function>open()</function> call after loading the driver
-they will be reset to default values, drivers are never in an
-undefined state.</para>
- </refsect1>
- <refsect1>
- <title>Return Value</title>
-
- <para>On success <function>open</function> returns the new file
-descriptor. On error -1 is returned, and the <varname>errno</varname>
-variable is set appropriately. Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>The caller has no permission to access the
-device.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The driver does not support multiple opens and the
-device is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENXIO</errorcode></term>
- <listitem>
- <para>No device corresponding to this device special file
-exists.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>Not enough kernel memory was available to complete the
-request.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EMFILE</errorcode></term>
- <listitem>
- <para>The process already has the maximum number of
-files open.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENFILE</errorcode></term>
- <listitem>
- <para>The limit on the total number of files open on the
-system has been reached.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-poll.xml b/Documentation/DocBook/media/v4l/func-poll.xml
deleted file mode 100644
index 4c73f115219b..000000000000
--- a/Documentation/DocBook/media/v4l/func-poll.xml
+++ /dev/null
@@ -1,142 +0,0 @@
-<refentry id="func-poll">
- <refmeta>
- <refentrytitle>V4L2 poll()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-poll</refname>
- <refpurpose>Wait for some event on a file descriptor</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;sys/poll.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>poll</function></funcdef>
- <paramdef>struct pollfd *<parameter>ufds</parameter></paramdef>
- <paramdef>unsigned int <parameter>nfds</parameter></paramdef>
- <paramdef>int <parameter>timeout</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
-
- <para>With the <function>poll()</function> function applications
-can suspend execution until the driver has captured data or is ready
-to accept data for output.</para>
-
- <para>When streaming I/O has been negotiated this function waits
-until a buffer has been filled by the capture device and can be dequeued
-with the &VIDIOC-DQBUF; ioctl. For output devices this function waits
-until the device is ready to accept a new buffer to be queued up with
-the &VIDIOC-QBUF; ioctl for display. When buffers are already in the outgoing
-queue of the driver (capture) or the incoming queue isn't full (display)
-the function returns immediately.</para>
-
- <para>On success <function>poll()</function> returns the number of
-file descriptors that have been selected (that is, file descriptors
-for which the <structfield>revents</structfield> field of the
-respective <structname>pollfd</structname> structure is non-zero).
-Capture devices set the <constant>POLLIN</constant> and
-<constant>POLLRDNORM</constant> flags in the
-<structfield>revents</structfield> field, output devices the
-<constant>POLLOUT</constant> and <constant>POLLWRNORM</constant>
-flags. When the function timed out it returns a value of zero, on
-failure it returns <returnvalue>-1</returnvalue> and the
-<varname>errno</varname> variable is set appropriately. When the
-application did not call &VIDIOC-STREAMON; the
-<function>poll()</function> function succeeds, but sets the
-<constant>POLLERR</constant> flag in the
-<structfield>revents</structfield> field. When the
-application has called &VIDIOC-STREAMON; for a capture device but hasn't
-yet called &VIDIOC-QBUF;, the <function>poll()</function> function
-succeeds and sets the <constant>POLLERR</constant> flag in the
-<structfield>revents</structfield> field. For output devices this
-same situation will cause <function>poll()</function> to succeed
-as well, but it sets the <constant>POLLOUT</constant> and
-<constant>POLLWRNORM</constant> flags in the <structfield>revents</structfield>
-field.</para>
-
- <para>If an event occurred (see &VIDIOC-DQEVENT;) then
-<constant>POLLPRI</constant> will be set in the <structfield>revents</structfield>
-field and <function>poll()</function> will return.</para>
-
- <para>When use of the <function>read()</function> function has
-been negotiated and the driver does not capture yet, the
-<function>poll</function> function starts capturing. When that fails
-it returns a <constant>POLLERR</constant> as above. Otherwise it waits
-until data has been captured and can be read. When the driver captures
-continuously (as opposed to, for example, still images) the function
-may return immediately.</para>
-
- <para>When use of the <function>write()</function> function has
-been negotiated and the driver does not stream yet, the
-<function>poll</function> function starts streaming. When that fails
-it returns a <constant>POLLERR</constant> as above. Otherwise it waits
-until the driver is ready for a non-blocking
-<function>write()</function> call.</para>
-
- <para>If the caller is only interested in events (just
-<constant>POLLPRI</constant> is set in the <structfield>events</structfield>
-field), then <function>poll()</function> will <emphasis>not</emphasis>
-start streaming if the driver does not stream yet. This makes it
-possible to just poll for events and not for buffers.</para>
-
- <para>All drivers implementing the <function>read()</function> or
-<function>write()</function> function or streaming I/O must also
-support the <function>poll()</function> function.</para>
-
- <para>For more details see the
-<function>poll()</function> manual page.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success, <function>poll()</function> returns the number
-structures which have non-zero <structfield>revents</structfield>
-fields, or zero if the call timed out. On error
-<returnvalue>-1</returnvalue> is returned, and the
-<varname>errno</varname> variable is set appropriately:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para>One or more of the <parameter>ufds</parameter> members
-specify an invalid file descriptor.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The driver does not support multiple read or write
-streams and the device is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EFAULT</errorcode></term>
- <listitem>
- <para><parameter>ufds</parameter> references an inaccessible
-memory area.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINTR</errorcode></term>
- <listitem>
- <para>The call was interrupted by a signal.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <parameter>nfds</parameter> argument is greater
-than <constant>OPEN_MAX</constant>.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-read.xml b/Documentation/DocBook/media/v4l/func-read.xml
deleted file mode 100644
index e218bbfbd362..000000000000
--- a/Documentation/DocBook/media/v4l/func-read.xml
+++ /dev/null
@@ -1,181 +0,0 @@
-<refentry id="func-read">
- <refmeta>
- <refentrytitle>V4L2 read()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-read</refname>
- <refpurpose>Read from a V4L2 device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>ssize_t <function>read</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>void *<parameter>buf</parameter></paramdef>
- <paramdef>size_t <parameter>count</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>buf</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>count</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para><function>read()</function> attempts to read up to
-<parameter>count</parameter> bytes from file descriptor
-<parameter>fd</parameter> into the buffer starting at
-<parameter>buf</parameter>. The layout of the data in the buffer is
-discussed in the respective device interface section, see ##. If <parameter>count</parameter> is zero,
-<function>read()</function> returns zero and has no other results. If
-<parameter>count</parameter> is greater than
-<constant>SSIZE_MAX</constant>, the result is unspecified. Regardless
-of the <parameter>count</parameter> value each
-<function>read()</function> call will provide at most one frame (two
-fields) worth of data.</para>
-
- <para>By default <function>read()</function> blocks until data
-becomes available. When the <constant>O_NONBLOCK</constant> flag was
-given to the &func-open; function it
-returns immediately with an &EAGAIN; when no data is available. The
-&func-select; or &func-poll; functions
-can always be used to suspend execution until data becomes available. All
-drivers supporting the <function>read()</function> function must also
-support <function>select()</function> and
-<function>poll()</function>.</para>
-
- <para>Drivers can implement read functionality in different
-ways, using a single or multiple buffers and discarding the oldest or
-newest frames once the internal buffers are filled.</para>
-
- <para><function>read()</function> never returns a "snapshot" of a
-buffer being filled. Using a single buffer the driver will stop
-capturing when the application starts reading the buffer until the
-read is finished. Thus only the period of the vertical blanking
-interval is available for reading, or the capture rate must fall below
-the nominal frame rate of the video standard.</para>
-
-<para>The behavior of
-<function>read()</function> when called during the active picture
-period or the vertical blanking separating the top and bottom field
-depends on the discarding policy. A driver discarding the oldest
-frames keeps capturing into an internal buffer, continuously
-overwriting the previously, not read frame, and returns the frame
-being received at the time of the <function>read()</function> call as
-soon as it is complete.</para>
-
- <para>A driver discarding the newest frames stops capturing until
-the next <function>read()</function> call. The frame being received at
-<function>read()</function> time is discarded, returning the following
-frame instead. Again this implies a reduction of the capture rate to
-one half or less of the nominal frame rate. An example of this model
-is the video read mode of the bttv driver, initiating a DMA to user
-memory when <function>read()</function> is called and returning when
-the DMA finished.</para>
-
- <para>In the multiple buffer model drivers maintain a ring of
-internal buffers, automatically advancing to the next free buffer.
-This allows continuous capturing when the application can empty the
-buffers fast enough. Again, the behavior when the driver runs out of
-free buffers depends on the discarding policy.</para>
-
- <para>Applications can get and set the number of buffers used
-internally by the driver with the &VIDIOC-G-PARM; and &VIDIOC-S-PARM;
-ioctls. They are optional, however. The discarding policy is not
-reported and cannot be changed. For minimum requirements see <xref
- linkend="devices" />.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success, the number of bytes read is returned. It is not
-an error if this number is smaller than the number of bytes requested,
-or the amount of data required for one frame. This may happen for
-example because <function>read()</function> was interrupted by a
-signal. On error, -1 is returned, and the <varname>errno</varname>
-variable is set appropriately. In this case the next read will start
-at the beginning of a new frame. Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EAGAIN</errorcode></term>
- <listitem>
- <para>Non-blocking I/O has been selected using
-O_NONBLOCK and no data was immediately available for reading.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid file
-descriptor or is not open for reading, or the process already has the
-maximum number of files open.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The driver does not support multiple read streams and the
-device is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EFAULT</errorcode></term>
- <listitem>
- <para><parameter>buf</parameter> references an inaccessible
-memory area.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINTR</errorcode></term>
- <listitem>
- <para>The call was interrupted by a signal before any
-data was read.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EIO</errorcode></term>
- <listitem>
- <para>I/O error. This indicates some hardware problem or a
-failure to communicate with a remote device (USB camera etc.).</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <function>read()</function> function is not
-supported by this driver, not on this device, or generally not on this
-type of device.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-select.xml b/Documentation/DocBook/media/v4l/func-select.xml
deleted file mode 100644
index e12a60d9bd85..000000000000
--- a/Documentation/DocBook/media/v4l/func-select.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<refentry id="func-select">
- <refmeta>
- <refentrytitle>V4L2 select()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-select</refname>
- <refpurpose>Synchronous I/O multiplexing</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>
-#include &lt;sys/time.h&gt;
-#include &lt;sys/types.h&gt;
-#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>select</function></funcdef>
- <paramdef>int <parameter>nfds</parameter></paramdef>
- <paramdef>fd_set *<parameter>readfds</parameter></paramdef>
- <paramdef>fd_set *<parameter>writefds</parameter></paramdef>
- <paramdef>fd_set *<parameter>exceptfds</parameter></paramdef>
- <paramdef>struct timeval *<parameter>timeout</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
-
- <para>With the <function>select()</function> function applications
-can suspend execution until the driver has captured data or is ready
-to accept data for output.</para>
-
- <para>When streaming I/O has been negotiated this function waits
-until a buffer has been filled or displayed and can be dequeued with
-the &VIDIOC-DQBUF; ioctl. When buffers are already in the outgoing
-queue of the driver the function returns immediately.</para>
-
- <para>On success <function>select()</function> returns the total
-number of bits set in the <structname>fd_set</structname>s. When the
-function timed out it returns a value of zero. On failure it returns
-<returnvalue>-1</returnvalue> and the <varname>errno</varname>
-variable is set appropriately. When the application did not call
-&VIDIOC-QBUF; or &VIDIOC-STREAMON; yet the
-<function>select()</function> function succeeds, setting the bit of
-the file descriptor in <parameter>readfds</parameter> or
-<parameter>writefds</parameter>, but subsequent &VIDIOC-DQBUF; calls
-will fail.<footnote><para>The Linux kernel implements
-<function>select()</function> like the &func-poll; function, but
-<function>select()</function> cannot return a
-<constant>POLLERR</constant>.</para>
- </footnote></para>
-
- <para>When use of the <function>read()</function> function has
-been negotiated and the driver does not capture yet, the
-<function>select()</function> function starts capturing. When that
-fails, <function>select()</function> returns successful and a
-subsequent <function>read()</function> call, which also attempts to
-start capturing, will return an appropriate error code. When the
-driver captures continuously (as opposed to, for example, still
-images) and data is already available the
-<function>select()</function> function returns immediately.</para>
-
- <para>When use of the <function>write()</function> function has
-been negotiated the <function>select()</function> function just waits
-until the driver is ready for a non-blocking
-<function>write()</function> call.</para>
-
- <para>All drivers implementing the <function>read()</function> or
-<function>write()</function> function or streaming I/O must also
-support the <function>select()</function> function.</para>
-
- <para>For more details see the <function>select()</function>
-manual page.</para>
-
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success, <function>select()</function> returns the number
-of descriptors contained in the three returned descriptor sets, which
-will be zero if the timeout expired. On error
-<returnvalue>-1</returnvalue> is returned, and the
-<varname>errno</varname> variable is set appropriately; the sets and
-<parameter>timeout</parameter> are undefined. Possible error codes
-are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para>One or more of the file descriptor sets specified a
-file descriptor that is not open.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The driver does not support multiple read or write
-streams and the device is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EFAULT</errorcode></term>
- <listitem>
- <para>The <parameter>readfds</parameter>,
-<parameter>writefds</parameter>, <parameter>exceptfds</parameter> or
-<parameter>timeout</parameter> pointer references an inaccessible memory
-area.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINTR</errorcode></term>
- <listitem>
- <para>The call was interrupted by a signal.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <parameter>nfds</parameter> argument is less than
-zero or greater than <constant>FD_SETSIZE</constant>.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/func-write.xml b/Documentation/DocBook/media/v4l/func-write.xml
deleted file mode 100644
index 575207885726..000000000000
--- a/Documentation/DocBook/media/v4l/func-write.xml
+++ /dev/null
@@ -1,128 +0,0 @@
-<refentry id="func-write">
- <refmeta>
- <refentrytitle>V4L2 write()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>v4l2-write</refname>
- <refpurpose>Write to a V4L2 device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>ssize_t <function>write</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>void *<parameter>buf</parameter></paramdef>
- <paramdef>size_t <parameter>count</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>buf</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>count</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para><function>write()</function> writes up to
-<parameter>count</parameter> bytes to the device referenced by the
-file descriptor <parameter>fd</parameter> from the buffer starting at
-<parameter>buf</parameter>. When the hardware outputs are not active
-yet, this function enables them. When <parameter>count</parameter> is
-zero, <function>write()</function> returns
-<returnvalue>0</returnvalue> without any other effect.</para>
-
- <para>When the application does not provide more data in time, the
-previous video frame, raw VBI image, sliced VPS or WSS data is
-displayed again. Sliced Teletext or Closed Caption data is not
-repeated, the driver inserts a blank line instead.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para>On success, the number of bytes written are returned. Zero
-indicates nothing was written. On error, <returnvalue>-1</returnvalue>
-is returned, and the <varname>errno</varname> variable is set
-appropriately. In this case the next write will start at the beginning
-of a new frame. Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EAGAIN</errorcode></term>
- <listitem>
- <para>Non-blocking I/O has been selected using the <link
-linkend="func-open"><constant>O_NONBLOCK</constant></link> flag and no
-buffer space was available to write the data immediately.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid file
-descriptor or is not open for writing.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The driver does not support multiple write streams and the
-device is already in use.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EFAULT</errorcode></term>
- <listitem>
- <para><parameter>buf</parameter> references an inaccessible
-memory area.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINTR</errorcode></term>
- <listitem>
- <para>The call was interrupted by a signal before any
-data was written.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EIO</errorcode></term>
- <listitem>
- <para>I/O error. This indicates some hardware problem.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <function>write()</function> function is not
-supported by this driver, not on this device, or generally not on this
-type of device.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/gen-errors.xml b/Documentation/DocBook/media/v4l/gen-errors.xml
deleted file mode 100644
index 7e29a4e1f696..000000000000
--- a/Documentation/DocBook/media/v4l/gen-errors.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<title>Generic Error Codes</title>
-
-<table frame="none" pgwide="1" id="gen-errors">
- <title>Generic error codes</title>
- <tgroup cols="2">
- &cs-str;
- <tbody valign="top">
- <!-- Keep it ordered alphabetically -->
- <row>
- <entry>EAGAIN (aka EWOULDBLOCK)</entry>
- <entry>The ioctl can't be handled because the device is in state where
- it can't perform it. This could happen for example in case where
- device is sleeping and ioctl is performed to query statistics.
- It is also returned when the ioctl would need to wait
- for an event, but the device was opened in non-blocking mode.
- </entry>
- </row>
- <row>
- <entry>EBADF</entry>
- <entry>The file descriptor is not a valid.</entry>
- </row>
- <row>
- <entry>EBUSY</entry>
- <entry>The ioctl can't be handled because the device is busy. This is
- typically return while device is streaming, and an ioctl tried to
- change something that would affect the stream, or would require the
- usage of a hardware resource that was already allocated. The ioctl
- must not be retried without performing another action to fix the
- problem first (typically: stop the stream before retrying).</entry>
- </row>
- <row>
- <entry>EFAULT</entry>
- <entry>There was a failure while copying data from/to userspace,
- probably caused by an invalid pointer reference.</entry>
- </row>
- <row>
- <entry>EINVAL</entry>
- <entry>One or more of the ioctl parameters are invalid or out of the
- allowed range. This is a widely used error code. See the individual
- ioctl requests for specific causes.</entry>
- </row>
- <row>
- <entry>ENODEV</entry>
- <entry>Device not found or was removed.</entry>
- </row>
- <row>
- <entry>ENOMEM</entry>
- <entry>There's not enough memory to handle the desired operation.</entry>
- </row>
- <row>
- <entry>ENOTTY</entry>
- <entry>The ioctl is not supported by the driver, actually meaning that
- the required functionality is not available, or the file
- descriptor is not for a media device.</entry>
- </row>
- <row>
- <entry>ENOSPC</entry>
- <entry>On USB devices, the stream ioctl's can return this error, meaning
- that this request would overcommit the usb bandwidth reserved
- for periodic transfers (up to 80% of the USB bandwidth).</entry>
- </row>
- <row>
- <entry>EPERM</entry>
- <entry>Permission denied. Can be returned if the device needs write
- permission, or some special capabilities is needed
- (e. g. root)</entry>
- </row>
- </tbody>
- </tgroup>
-</table>
-
-<para>Note 1: ioctls may return other error codes. Since errors may have side
-effects such as a driver reset, applications should abort on unexpected errors.
-</para>
-
-<para>Note 2: Request-specific error codes are listed in the individual
-requests descriptions.</para>
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
deleted file mode 100644
index 21a3dde8f95d..000000000000
--- a/Documentation/DocBook/media/v4l/io.xml
+++ /dev/null
@@ -1,1545 +0,0 @@
- <title>Input/Output</title>
-
- <para>The V4L2 API defines several different methods to read from or
-write to a device. All drivers exchanging data with applications must
-support at least one of them.</para>
-
- <para>The classic I/O method using the <function>read()</function>
-and <function>write()</function> function is automatically selected
-after opening a V4L2 device. When the driver does not support this
-method attempts to read or write will fail at any time.</para>
-
- <para>Other methods must be negotiated. To select the streaming I/O
-method with memory mapped or user buffers applications call the
-&VIDIOC-REQBUFS; ioctl. The asynchronous I/O method is not defined
-yet.</para>
-
- <para>Video overlay can be considered another I/O method, although
-the application does not directly receive the image data. It is
-selected by initiating video overlay with the &VIDIOC-S-FMT; ioctl.
-For more information see <xref linkend="overlay" />.</para>
-
- <para>Generally exactly one I/O method, including overlay, is
-associated with each file descriptor. The only exceptions are
-applications not exchanging data with a driver ("panel applications",
-see <xref linkend="open" />) and drivers permitting simultaneous video capturing
-and overlay using the same file descriptor, for compatibility with V4L
-and earlier versions of V4L2.</para>
-
- <para><constant>VIDIOC_S_FMT</constant> and
-<constant>VIDIOC_REQBUFS</constant> would permit this to some degree,
-but for simplicity drivers need not support switching the I/O method
-(after first switching away from read/write) other than by closing
-and reopening the device.</para>
-
- <para>The following sections describe the various I/O methods in
-more detail.</para>
-
- <section id="rw">
- <title>Read/Write</title>
-
- <para>Input and output devices support the
-<function>read()</function> and <function>write()</function> function,
-respectively, when the <constant>V4L2_CAP_READWRITE</constant> flag in
-the <structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl is set.</para>
-
- <para>Drivers may need the CPU to copy the data, but they may also
-support DMA to or from user memory, so this I/O method is not
-necessarily less efficient than other methods merely exchanging buffer
-pointers. It is considered inferior though because no meta-information
-like frame counters or timestamps are passed. This information is
-necessary to recognize frame dropping and to synchronize with other
-data streams. However this is also the simplest I/O method, requiring
-little or no setup to exchange data. It permits command line stunts
-like this (the <application>vidctrl</application> tool is
-fictitious):</para>
-
- <informalexample>
- <screen>
-&gt; vidctrl /dev/video --input=0 --format=YUYV --size=352x288
-&gt; dd if=/dev/video of=myimage.422 bs=202752 count=1
-</screen>
- </informalexample>
-
- <para>To read from the device applications use the
-&func-read; function, to write the &func-write; function.
-Drivers must implement one I/O method if they
-exchange data with applications, but it need not be this.<footnote>
- <para>It would be desirable if applications could depend on
-drivers supporting all I/O interfaces, but as much as the complex
-memory mapping I/O can be inadequate for some devices we have no
-reason to require this interface, which is most useful for simple
-applications capturing still images.</para>
- </footnote> When reading or writing is supported, the driver
-must also support the &func-select; and &func-poll;
-function.<footnote>
- <para>At the driver level <function>select()</function> and
-<function>poll()</function> are the same, and
-<function>select()</function> is too important to be optional.</para>
- </footnote></para>
- </section>
-
- <section id="mmap">
- <title>Streaming I/O (Memory Mapping)</title>
-
- <para>Input and output devices support this I/O method when the
-<constant>V4L2_CAP_STREAMING</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl is set. There are two
-streaming methods, to determine if the memory mapping flavor is
-supported applications must call the &VIDIOC-REQBUFS; ioctl with the memory type set to <constant>V4L2_MEMORY_MMAP</constant>.</para>
-
- <para>Streaming is an I/O method where only pointers to buffers
-are exchanged between application and driver, the data itself is not
-copied. Memory mapping is primarily intended to map buffers in device
-memory into the application's address space. Device memory can be for
-example the video memory on a graphics card with a video capture
-add-on. However, being the most efficient I/O method available for a
-long time, many other drivers support streaming as well, allocating
-buffers in DMA-able main memory.</para>
-
- <para>A driver can support many sets of buffers. Each set is
-identified by a unique buffer type value. The sets are independent and
-each set can hold a different type of data. To access different sets
-at the same time different file descriptors must be used.<footnote>
- <para>One could use one file descriptor and set the buffer
-type field accordingly when calling &VIDIOC-QBUF; etc., but it makes
-the <function>select()</function> function ambiguous. We also like the
-clean approach of one file descriptor per logical stream. Video
-overlay for example is also a logical stream, although the CPU is not
-needed for continuous operation.</para>
- </footnote></para>
-
- <para>To allocate device buffers applications call the
-&VIDIOC-REQBUFS; ioctl with the desired number of buffers and buffer
-type, for example <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>.
-This ioctl can also be used to change the number of buffers or to free
-the allocated memory, provided none of the buffers are still
-mapped.</para>
-
- <para>Before applications can access the buffers they must map
-them into their address space with the &func-mmap; function. The
-location of the buffers in device memory can be determined with the
-&VIDIOC-QUERYBUF; ioctl. In the single-planar API case, the
-<structfield>m.offset</structfield> and <structfield>length</structfield>
-returned in a &v4l2-buffer; are passed as sixth and second parameter to the
-<function>mmap()</function> function. When using the multi-planar API,
-&v4l2-buffer; contains an array of &v4l2-plane; structures, each
-containing its own <structfield>m.offset</structfield> and
-<structfield>length</structfield>. When using the multi-planar API, every
-plane of every buffer has to be mapped separately, so the number of
-calls to &func-mmap; should be equal to number of buffers times number of
-planes in each buffer. The offset and length values must not be modified.
-Remember, the buffers are allocated in physical memory, as opposed to virtual
-memory, which can be swapped out to disk. Applications should free the buffers
-as soon as possible with the &func-munmap; function.</para>
-
- <example>
- <title>Mapping buffers in the single-planar API</title>
- <programlisting>
-&v4l2-requestbuffers; reqbuf;
-struct {
- void *start;
- size_t length;
-} *buffers;
-unsigned int i;
-
-memset(&amp;reqbuf, 0, sizeof(reqbuf));
-reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-reqbuf.memory = V4L2_MEMORY_MMAP;
-reqbuf.count = 20;
-
-if (-1 == ioctl (fd, &VIDIOC-REQBUFS;, &amp;reqbuf)) {
- if (errno == EINVAL)
- printf("Video capturing or mmap-streaming is not supported\n");
- else
- perror("VIDIOC_REQBUFS");
-
- exit(EXIT_FAILURE);
-}
-
-/* We want at least five buffers. */
-
-if (reqbuf.count &lt; 5) {
- /* You may need to free the buffers here. */
- printf("Not enough buffer memory\n");
- exit(EXIT_FAILURE);
-}
-
-buffers = calloc(reqbuf.count, sizeof(*buffers));
-assert(buffers != NULL);
-
-for (i = 0; i &lt; reqbuf.count; i++) {
- &v4l2-buffer; buffer;
-
- memset(&amp;buffer, 0, sizeof(buffer));
- buffer.type = reqbuf.type;
- buffer.memory = V4L2_MEMORY_MMAP;
- buffer.index = i;
-
- if (-1 == ioctl (fd, &VIDIOC-QUERYBUF;, &amp;buffer)) {
- perror("VIDIOC_QUERYBUF");
- exit(EXIT_FAILURE);
- }
-
- buffers[i].length = buffer.length; /* remember for munmap() */
-
- buffers[i].start = mmap(NULL, buffer.length,
- PROT_READ | PROT_WRITE, /* recommended */
- MAP_SHARED, /* recommended */
- fd, buffer.m.offset);
-
- if (MAP_FAILED == buffers[i].start) {
- /* If you do not exit here you should unmap() and free()
- the buffers mapped so far. */
- perror("mmap");
- exit(EXIT_FAILURE);
- }
-}
-
-/* Cleanup. */
-
-for (i = 0; i &lt; reqbuf.count; i++)
- munmap(buffers[i].start, buffers[i].length);
- </programlisting>
- </example>
-
- <example>
- <title>Mapping buffers in the multi-planar API</title>
- <programlisting>
-&v4l2-requestbuffers; reqbuf;
-/* Our current format uses 3 planes per buffer */
-#define FMT_NUM_PLANES = 3
-
-struct {
- void *start[FMT_NUM_PLANES];
- size_t length[FMT_NUM_PLANES];
-} *buffers;
-unsigned int i, j;
-
-memset(&amp;reqbuf, 0, sizeof(reqbuf));
-reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
-reqbuf.memory = V4L2_MEMORY_MMAP;
-reqbuf.count = 20;
-
-if (ioctl(fd, &VIDIOC-REQBUFS;, &amp;reqbuf) &lt; 0) {
- if (errno == EINVAL)
- printf("Video capturing or mmap-streaming is not supported\n");
- else
- perror("VIDIOC_REQBUFS");
-
- exit(EXIT_FAILURE);
-}
-
-/* We want at least five buffers. */
-
-if (reqbuf.count &lt; 5) {
- /* You may need to free the buffers here. */
- printf("Not enough buffer memory\n");
- exit(EXIT_FAILURE);
-}
-
-buffers = calloc(reqbuf.count, sizeof(*buffers));
-assert(buffers != NULL);
-
-for (i = 0; i &lt; reqbuf.count; i++) {
- &v4l2-buffer; buffer;
- &v4l2-plane; planes[FMT_NUM_PLANES];
-
- memset(&amp;buffer, 0, sizeof(buffer));
- buffer.type = reqbuf.type;
- buffer.memory = V4L2_MEMORY_MMAP;
- buffer.index = i;
- /* length in struct v4l2_buffer in multi-planar API stores the size
- * of planes array. */
- buffer.length = FMT_NUM_PLANES;
- buffer.m.planes = planes;
-
- if (ioctl(fd, &VIDIOC-QUERYBUF;, &amp;buffer) &lt; 0) {
- perror("VIDIOC_QUERYBUF");
- exit(EXIT_FAILURE);
- }
-
- /* Every plane has to be mapped separately */
- for (j = 0; j &lt; FMT_NUM_PLANES; j++) {
- buffers[i].length[j] = buffer.m.planes[j].length; /* remember for munmap() */
-
- buffers[i].start[j] = mmap(NULL, buffer.m.planes[j].length,
- PROT_READ | PROT_WRITE, /* recommended */
- MAP_SHARED, /* recommended */
- fd, buffer.m.planes[j].m.offset);
-
- if (MAP_FAILED == buffers[i].start[j]) {
- /* If you do not exit here you should unmap() and free()
- the buffers and planes mapped so far. */
- perror("mmap");
- exit(EXIT_FAILURE);
- }
- }
-}
-
-/* Cleanup. */
-
-for (i = 0; i &lt; reqbuf.count; i++)
- for (j = 0; j &lt; FMT_NUM_PLANES; j++)
- munmap(buffers[i].start[j], buffers[i].length[j]);
- </programlisting>
- </example>
-
- <para>Conceptually streaming drivers maintain two buffer queues, an incoming
-and an outgoing queue. They separate the synchronous capture or output
-operation locked to a video clock from the application which is
-subject to random disk or network delays and preemption by
-other processes, thereby reducing the probability of data loss.
-The queues are organized as FIFOs, buffers will be
-output in the order enqueued in the incoming FIFO, and were
-captured in the order dequeued from the outgoing FIFO.</para>
-
- <para>The driver may require a minimum number of buffers enqueued
-at all times to function, apart of this no limit exists on the number
-of buffers applications can enqueue in advance, or dequeue and
-process. They can also enqueue in a different order than buffers have
-been dequeued, and the driver can <emphasis>fill</emphasis> enqueued
-<emphasis>empty</emphasis> buffers in any order. <footnote>
- <para>Random enqueue order permits applications processing
-images out of order (such as video codecs) to return buffers earlier,
-reducing the probability of data loss. Random fill order allows
-drivers to reuse buffers on a LIFO-basis, taking advantage of caches
-holding scatter-gather lists and the like.</para>
- </footnote> The index number of a buffer (&v4l2-buffer;
-<structfield>index</structfield>) plays no role here, it only
-identifies the buffer.</para>
-
- <para>Initially all mapped buffers are in dequeued state,
-inaccessible by the driver. For capturing applications it is customary
-to first enqueue all mapped buffers, then to start capturing and enter
-the read loop. Here the application waits until a filled buffer can be
-dequeued, and re-enqueues the buffer when the data is no longer
-needed. Output applications fill and enqueue buffers, when enough
-buffers are stacked up the output is started with
-<constant>VIDIOC_STREAMON</constant>. In the write loop, when
-the application runs out of free buffers, it must wait until an empty
-buffer can be dequeued and reused.</para>
-
- <para>To enqueue and dequeue a buffer applications use the
-&VIDIOC-QBUF; and &VIDIOC-DQBUF; ioctl. The status of a buffer being
-mapped, enqueued, full or empty can be determined at any time using the
-&VIDIOC-QUERYBUF; ioctl. Two methods exist to suspend execution of the
-application until one or more buffers can be dequeued. By default
-<constant>VIDIOC_DQBUF</constant> blocks when no buffer is in the
-outgoing queue. When the <constant>O_NONBLOCK</constant> flag was
-given to the &func-open; function, <constant>VIDIOC_DQBUF</constant>
-returns immediately with an &EAGAIN; when no buffer is available. The
-&func-select; or &func-poll; functions are always available.</para>
-
- <para>To start and stop capturing or output applications call the
-&VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; ioctl. Note
-<constant>VIDIOC_STREAMOFF</constant> removes all buffers from both
-queues as a side effect. Since there is no notion of doing anything
-"now" on a multitasking system, if an application needs to synchronize
-with another event it should examine the &v4l2-buffer;
-<structfield>timestamp</structfield> of captured or outputted buffers.
-</para>
-
- <para>Drivers implementing memory mapping I/O must
-support the <constant>VIDIOC_REQBUFS</constant>,
-<constant>VIDIOC_QUERYBUF</constant>,
-<constant>VIDIOC_QBUF</constant>, <constant>VIDIOC_DQBUF</constant>,
-<constant>VIDIOC_STREAMON</constant> and
-<constant>VIDIOC_STREAMOFF</constant> ioctl, the
-<function>mmap()</function>, <function>munmap()</function>,
-<function>select()</function> and <function>poll()</function>
-function.<footnote>
- <para>At the driver level <function>select()</function> and
-<function>poll()</function> are the same, and
-<function>select()</function> is too important to be optional. The
-rest should be evident.</para>
- </footnote></para>
-
- <para>[capture example]</para>
-
- </section>
-
- <section id="userp">
- <title>Streaming I/O (User Pointers)</title>
-
- <para>Input and output devices support this I/O method when the
-<constant>V4L2_CAP_STREAMING</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability;
-returned by the &VIDIOC-QUERYCAP; ioctl is set. If the particular user
-pointer method (not only memory mapping) is supported must be
-determined by calling the &VIDIOC-REQBUFS; ioctl with the memory type set to <constant>V4L2_MEMORY_USERPTR</constant>.</para>
-
- <para>This I/O method combines advantages of the read/write and
-memory mapping methods. Buffers (planes) are allocated by the application
-itself, and can reside for example in virtual or shared memory. Only
-pointers to data are exchanged, these pointers and meta-information
-are passed in &v4l2-buffer; (or in &v4l2-plane; in the multi-planar API case).
-The driver must be switched into user pointer I/O mode by calling the
-&VIDIOC-REQBUFS; with the desired buffer type. No buffers (planes) are allocated
-beforehand, consequently they are not indexed and cannot be queried like mapped
-buffers with the <constant>VIDIOC_QUERYBUF</constant> ioctl.</para>
-
- <example>
- <title>Initiating streaming I/O with user pointers</title>
-
- <programlisting>
-&v4l2-requestbuffers; reqbuf;
-
-memset (&amp;reqbuf, 0, sizeof (reqbuf));
-reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-reqbuf.memory = V4L2_MEMORY_USERPTR;
-
-if (ioctl (fd, &VIDIOC-REQBUFS;, &amp;reqbuf) == -1) {
- if (errno == EINVAL)
- printf ("Video capturing or user pointer streaming is not supported\n");
- else
- perror ("VIDIOC_REQBUFS");
-
- exit (EXIT_FAILURE);
-}
- </programlisting>
- </example>
-
- <para>Buffer (plane) addresses and sizes are passed on the fly with the
-&VIDIOC-QBUF; ioctl. Although buffers are commonly cycled,
-applications can pass different addresses and sizes at each
-<constant>VIDIOC_QBUF</constant> call. If required by the hardware the
-driver swaps memory pages within physical memory to create a
-continuous area of memory. This happens transparently to the
-application in the virtual memory subsystem of the kernel. When buffer
-pages have been swapped out to disk they are brought back and finally
-locked in physical memory for DMA.<footnote>
- <para>We expect that frequently used buffers are typically not
-swapped out. Anyway, the process of swapping, locking or generating
-scatter-gather lists may be time consuming. The delay can be masked by
-the depth of the incoming buffer queue, and perhaps by maintaining
-caches assuming a buffer will be soon enqueued again. On the other
-hand, to optimize memory usage drivers can limit the number of buffers
-locked in advance and recycle the most recently used buffers first. Of
-course, the pages of empty buffers in the incoming queue need not be
-saved to disk. Output buffers must be saved on the incoming and
-outgoing queue because an application may share them with other
-processes.</para>
- </footnote></para>
-
- <para>Filled or displayed buffers are dequeued with the
-&VIDIOC-DQBUF; ioctl. The driver can unlock the memory pages at any
-time between the completion of the DMA and this ioctl. The memory is
-also unlocked when &VIDIOC-STREAMOFF; is called, &VIDIOC-REQBUFS;, or
-when the device is closed. Applications must take care not to free
-buffers without dequeuing. For once, the buffers remain locked until
-further, wasting physical memory. Second the driver will not be
-notified when the memory is returned to the application's free list
-and subsequently reused for other purposes, possibly completing the
-requested DMA and overwriting valuable data.</para>
-
- <para>For capturing applications it is customary to enqueue a
-number of empty buffers, to start capturing and enter the read loop.
-Here the application waits until a filled buffer can be dequeued, and
-re-enqueues the buffer when the data is no longer needed. Output
-applications fill and enqueue buffers, when enough buffers are stacked
-up output is started. In the write loop, when the application
-runs out of free buffers it must wait until an empty buffer can be
-dequeued and reused. Two methods exist to suspend execution of the
-application until one or more buffers can be dequeued. By default
-<constant>VIDIOC_DQBUF</constant> blocks when no buffer is in the
-outgoing queue. When the <constant>O_NONBLOCK</constant> flag was
-given to the &func-open; function, <constant>VIDIOC_DQBUF</constant>
-returns immediately with an &EAGAIN; when no buffer is available. The
-&func-select; or &func-poll; function are always available.</para>
-
- <para>To start and stop capturing or output applications call the
-&VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; ioctl. Note
-<constant>VIDIOC_STREAMOFF</constant> removes all buffers from both
-queues and unlocks all buffers as a side effect. Since there is no
-notion of doing anything "now" on a multitasking system, if an
-application needs to synchronize with another event it should examine
-the &v4l2-buffer; <structfield>timestamp</structfield> of captured
-or outputted buffers.</para>
-
- <para>Drivers implementing user pointer I/O must
-support the <constant>VIDIOC_REQBUFS</constant>,
-<constant>VIDIOC_QBUF</constant>, <constant>VIDIOC_DQBUF</constant>,
-<constant>VIDIOC_STREAMON</constant> and
-<constant>VIDIOC_STREAMOFF</constant> ioctl, the
-<function>select()</function> and <function>poll()</function> function.<footnote>
- <para>At the driver level <function>select()</function> and
-<function>poll()</function> are the same, and
-<function>select()</function> is too important to be optional. The
-rest should be evident.</para>
- </footnote></para>
- </section>
-
- <section id="dmabuf">
- <title>Streaming I/O (DMA buffer importing)</title>
-
-<para>The DMABUF framework provides a generic method for sharing buffers
-between multiple devices. Device drivers that support DMABUF can export a DMA
-buffer to userspace as a file descriptor (known as the exporter role), import a
-DMA buffer from userspace using a file descriptor previously exported for a
-different or the same device (known as the importer role), or both. This
-section describes the DMABUF importer role API in V4L2.</para>
-
- <para>Refer to <link linkend="vidioc-expbuf">DMABUF exporting</link> for
-details about exporting V4L2 buffers as DMABUF file descriptors.</para>
-
-<para>Input and output devices support the streaming I/O method when the
-<constant>V4L2_CAP_STREAMING</constant> flag in the
-<structfield>capabilities</structfield> field of &v4l2-capability; returned by
-the &VIDIOC-QUERYCAP; ioctl is set. Whether importing DMA buffers through
-DMABUF file descriptors is supported is determined by calling the
-&VIDIOC-REQBUFS; ioctl with the memory type set to
-<constant>V4L2_MEMORY_DMABUF</constant>.</para>
-
- <para>This I/O method is dedicated to sharing DMA buffers between different
-devices, which may be V4L devices or other video-related devices (e.g. DRM).
-Buffers (planes) are allocated by a driver on behalf of an application. Next,
-these buffers are exported to the application as file descriptors using an API
-which is specific for an allocator driver. Only such file descriptor are
-exchanged. The descriptors and meta-information are passed in &v4l2-buffer; (or
-in &v4l2-plane; in the multi-planar API case). The driver must be switched
-into DMABUF I/O mode by calling the &VIDIOC-REQBUFS; with the desired buffer
-type.</para>
-
- <example>
- <title>Initiating streaming I/O with DMABUF file descriptors</title>
-
- <programlisting>
-&v4l2-requestbuffers; reqbuf;
-
-memset(&amp;reqbuf, 0, sizeof (reqbuf));
-reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-reqbuf.memory = V4L2_MEMORY_DMABUF;
-reqbuf.count = 1;
-
-if (ioctl(fd, &VIDIOC-REQBUFS;, &amp;reqbuf) == -1) {
- if (errno == EINVAL)
- printf("Video capturing or DMABUF streaming is not supported\n");
- else
- perror("VIDIOC_REQBUFS");
-
- exit(EXIT_FAILURE);
-}
- </programlisting>
- </example>
-
- <para>The buffer (plane) file descriptor is passed on the fly with the
-&VIDIOC-QBUF; ioctl. In case of multiplanar buffers, every plane can be
-associated with a different DMABUF descriptor. Although buffers are commonly
-cycled, applications can pass a different DMABUF descriptor at each
-<constant>VIDIOC_QBUF</constant> call.</para>
-
- <example>
- <title>Queueing DMABUF using single plane API</title>
-
- <programlisting>
-int buffer_queue(int v4lfd, int index, int dmafd)
-{
- &v4l2-buffer; buf;
-
- memset(&amp;buf, 0, sizeof buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_DMABUF;
- buf.index = index;
- buf.m.fd = dmafd;
-
- if (ioctl(v4lfd, &VIDIOC-QBUF;, &amp;buf) == -1) {
- perror("VIDIOC_QBUF");
- return -1;
- }
-
- return 0;
-}
- </programlisting>
- </example>
-
- <example>
- <title>Queueing DMABUF using multi plane API</title>
-
- <programlisting>
-int buffer_queue_mp(int v4lfd, int index, int dmafd[], int n_planes)
-{
- &v4l2-buffer; buf;
- &v4l2-plane; planes[VIDEO_MAX_PLANES];
- int i;
-
- memset(&amp;buf, 0, sizeof buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- buf.memory = V4L2_MEMORY_DMABUF;
- buf.index = index;
- buf.m.planes = planes;
- buf.length = n_planes;
-
- memset(&amp;planes, 0, sizeof planes);
-
- for (i = 0; i &lt; n_planes; ++i)
- buf.m.planes[i].m.fd = dmafd[i];
-
- if (ioctl(v4lfd, &VIDIOC-QBUF;, &amp;buf) == -1) {
- perror("VIDIOC_QBUF");
- return -1;
- }
-
- return 0;
-}
- </programlisting>
- </example>
-
- <para>Captured or displayed buffers are dequeued with the
-&VIDIOC-DQBUF; ioctl. The driver can unlock the buffer at any
-time between the completion of the DMA and this ioctl. The memory is
-also unlocked when &VIDIOC-STREAMOFF; is called, &VIDIOC-REQBUFS;, or
-when the device is closed.</para>
-
- <para>For capturing applications it is customary to enqueue a
-number of empty buffers, to start capturing and enter the read loop.
-Here the application waits until a filled buffer can be dequeued, and
-re-enqueues the buffer when the data is no longer needed. Output
-applications fill and enqueue buffers, when enough buffers are stacked
-up output is started. In the write loop, when the application
-runs out of free buffers it must wait until an empty buffer can be
-dequeued and reused. Two methods exist to suspend execution of the
-application until one or more buffers can be dequeued. By default
-<constant>VIDIOC_DQBUF</constant> blocks when no buffer is in the
-outgoing queue. When the <constant>O_NONBLOCK</constant> flag was
-given to the &func-open; function, <constant>VIDIOC_DQBUF</constant>
-returns immediately with an &EAGAIN; when no buffer is available. The
-&func-select; and &func-poll; functions are always available.</para>
-
- <para>To start and stop capturing or displaying applications call the
-&VIDIOC-STREAMON; and &VIDIOC-STREAMOFF; ioctls. Note that
-<constant>VIDIOC_STREAMOFF</constant> removes all buffers from both queues and
-unlocks all buffers as a side effect. Since there is no notion of doing
-anything "now" on a multitasking system, if an application needs to synchronize
-with another event it should examine the &v4l2-buffer;
-<structfield>timestamp</structfield> of captured or outputted buffers.</para>
-
- <para>Drivers implementing DMABUF importing I/O must support the
-<constant>VIDIOC_REQBUFS</constant>, <constant>VIDIOC_QBUF</constant>,
-<constant>VIDIOC_DQBUF</constant>, <constant>VIDIOC_STREAMON</constant> and
-<constant>VIDIOC_STREAMOFF</constant> ioctls, and the
-<function>select()</function> and <function>poll()</function> functions.</para>
-
- </section>
-
- <section id="async">
- <title>Asynchronous I/O</title>
-
- <para>This method is not defined yet.</para>
- </section>
-
- <section id="buffer">
- <title>Buffers</title>
-
- <para>A buffer contains data exchanged by application and
-driver using one of the Streaming I/O methods. In the multi-planar API, the
-data is held in planes, while the buffer structure acts as a container
-for the planes. Only pointers to buffers (planes) are exchanged, the data
-itself is not copied. These pointers, together with meta-information like
-timestamps or field parity, are stored in a struct
-<structname>v4l2_buffer</structname>, argument to
-the &VIDIOC-QUERYBUF;, &VIDIOC-QBUF; and &VIDIOC-DQBUF; ioctl.
-In the multi-planar API, some plane-specific members of struct
-<structname>v4l2_buffer</structname>, such as pointers and sizes for each
-plane, are stored in struct <structname>v4l2_plane</structname> instead.
-In that case, struct <structname>v4l2_buffer</structname> contains an array of
-plane structures.</para>
-
- <para>Dequeued video buffers come with timestamps. The driver
- decides at which part of the frame and with which clock the
- timestamp is taken. Please see flags in the masks
- <constant>V4L2_BUF_FLAG_TIMESTAMP_MASK</constant> and
- <constant>V4L2_BUF_FLAG_TSTAMP_SRC_MASK</constant> in <xref
- linkend="buffer-flags" />. These flags are always valid and constant
- across all buffers during the whole video stream. Changes in these
- flags may take place as a side effect of &VIDIOC-S-INPUT; or
- &VIDIOC-S-OUTPUT; however. The
- <constant>V4L2_BUF_FLAG_TIMESTAMP_COPY</constant> timestamp type
- which is used by e.g. on mem-to-mem devices is an exception to the
- rule: the timestamp source flags are copied from the OUTPUT video
- buffer to the CAPTURE video buffer.</para>
-
- <table frame="none" pgwide="1" id="v4l2-buffer">
- <title>struct <structname>v4l2_buffer</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry></entry>
- <entry>Number of the buffer, set by the application except
-when calling &VIDIOC-DQBUF;, then it is set by the driver.
-This field can range from zero to the number of buffers allocated
-with the &VIDIOC-REQBUFS; ioctl (&v4l2-requestbuffers; <structfield>count</structfield>),
-plus any buffers allocated with &VIDIOC-CREATE-BUFS; minus one.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>Type of the buffer, same as &v4l2-format;
-<structfield>type</structfield> or &v4l2-requestbuffers;
-<structfield>type</structfield>, set by the application. See <xref
-linkend="v4l2-buf-type" /></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>bytesused</structfield></entry>
- <entry></entry>
- <entry>The number of bytes occupied by the data in the
-buffer. It depends on the negotiated data format and may change with
-each buffer for compressed variable size data like JPEG images.
-Drivers must set this field when <structfield>type</structfield>
-refers to a capture stream, applications when it refers to an output stream.
-If the application sets this to 0 for an output stream, then
-<structfield>bytesused</structfield> will be set to the size of the
-buffer (see the <structfield>length</structfield> field of this struct) by
-the driver. For multiplanar formats this field is ignored and the
-<structfield>planes</structfield> pointer is used instead.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry>Flags set by the application or driver, see <xref
-linkend="buffer-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>field</structfield></entry>
- <entry></entry>
- <entry>Indicates the field order of the image in the
-buffer, see <xref linkend="v4l2-field" />. This field is not used when
-the buffer contains VBI data. Drivers must set it when
-<structfield>type</structfield> refers to a capture stream,
-applications when it refers to an output stream.</entry>
- </row>
- <row>
- <entry>struct timeval</entry>
- <entry><structfield>timestamp</structfield></entry>
- <entry></entry>
- <entry><para>For capture streams this is time when the first data
- byte was captured, as returned by the
- <function>clock_gettime()</function> function for the relevant
- clock id; see <constant>V4L2_BUF_FLAG_TIMESTAMP_*</constant> in
- <xref linkend="buffer-flags" />. For output streams the driver
- stores the time at which the last data byte was actually sent out
- in the <structfield>timestamp</structfield> field. This permits
- applications to monitor the drift between the video and system
- clock. For output streams that use <constant>V4L2_BUF_FLAG_TIMESTAMP_COPY</constant>
- the application has to fill in the timestamp which will be copied
- by the driver to the capture stream.</para></entry>
- </row>
- <row>
- <entry>&v4l2-timecode;</entry>
- <entry><structfield>timecode</structfield></entry>
- <entry></entry>
- <entry>When <structfield>type</structfield> is
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> and the
-<constant>V4L2_BUF_FLAG_TIMECODE</constant> flag is set in
-<structfield>flags</structfield>, this structure contains a frame
-timecode. In <link linkend="v4l2-field">V4L2_FIELD_ALTERNATE</link>
-mode the top and bottom field contain the same timecode.
-Timecodes are intended to help video editing and are typically recorded on
-video tapes, but also embedded in compressed formats like MPEG. This
-field is independent of the <structfield>timestamp</structfield> and
-<structfield>sequence</structfield> fields.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sequence</structfield></entry>
- <entry></entry>
- <entry>Set by the driver, counting the frames (not fields!) in
-sequence. This field is set for both input and output devices.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>In <link
-linkend="v4l2-field">V4L2_FIELD_ALTERNATE</link> mode the top and
-bottom field have the same sequence number. The count starts at zero
-and includes dropped or repeated frames. A dropped frame was received
-by an input device but could not be stored due to lack of free buffer
-space. A repeated frame was displayed again by an output device
-because the application did not pass new data in
-time.</para><para>Note this may count the frames received
-e.g. over USB, without taking into account the frames dropped by the
-remote hardware due to limited compression throughput or bus
-bandwidth. These devices identify by not enumerating any video
-standards, see <xref linkend="standard" />.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>memory</structfield></entry>
- <entry></entry>
- <entry>This field must be set by applications and/or drivers
-in accordance with the selected I/O method. See <xref linkend="v4l2-memory"
- /></entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield>m</structfield></entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>offset</structfield></entry>
- <entry>For the single-planar API and when
-<structfield>memory</structfield> is <constant>V4L2_MEMORY_MMAP</constant> this
-is the offset of the buffer from the start of the device memory. The value is
-returned by the driver and apart of serving as parameter to the &func-mmap;
-function not useful for applications. See <xref linkend="mmap" /> for details
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>unsigned long</entry>
- <entry><structfield>userptr</structfield></entry>
- <entry>For the single-planar API and when
-<structfield>memory</structfield> is <constant>V4L2_MEMORY_USERPTR</constant>
-this is a pointer to the buffer (casted to unsigned long type) in virtual
-memory, set by the application. See <xref linkend="userp" /> for details.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct v4l2_plane</entry>
- <entry><structfield>*planes</structfield></entry>
- <entry>When using the multi-planar API, contains a userspace pointer
- to an array of &v4l2-plane;. The size of the array should be put
- in the <structfield>length</structfield> field of this
- <structname>v4l2_buffer</structname> structure.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>int</entry>
- <entry><structfield>fd</structfield></entry>
- <entry>For the single-plane API and when
-<structfield>memory</structfield> is <constant>V4L2_MEMORY_DMABUF</constant> this
-is the file descriptor associated with a DMABUF buffer.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>length</structfield></entry>
- <entry></entry>
- <entry>Size of the buffer (not the payload) in bytes for the
- single-planar API. This is set by the driver based on the calls to
- &VIDIOC-REQBUFS; and/or &VIDIOC-CREATE-BUFS;. For the multi-planar API the application sets
- this to the number of elements in the <structfield>planes</structfield>
- array. The driver will fill in the actual number of valid elements in
- that array.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved2</structfield></entry>
- <entry></entry>
- <entry>A place holder for future extensions. Drivers and applications
-must set this to 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield></entry>
- <entry></entry>
- <entry>A place holder for future extensions. Drivers and applications
-must set this to 0.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-plane">
- <title>struct <structname>v4l2_plane</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>bytesused</structfield></entry>
- <entry></entry>
- <entry>The number of bytes occupied by data in the plane
- (its payload). Drivers must set this field when <structfield>type</structfield>
- refers to a capture stream, applications when it refers to an output stream.
- If the application sets this to 0 for an output stream, then
- <structfield>bytesused</structfield> will be set to the size of the
- plane (see the <structfield>length</structfield> field of this struct)
- by the driver. Note that the actual image data starts at
- <structfield>data_offset</structfield> which may not be 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>length</structfield></entry>
- <entry></entry>
- <entry>Size in bytes of the plane (not its payload). This is set by the driver
- based on the calls to &VIDIOC-REQBUFS; and/or &VIDIOC-CREATE-BUFS;.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield>m</structfield></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>mem_offset</structfield></entry>
- <entry>When the memory type in the containing &v4l2-buffer; is
- <constant>V4L2_MEMORY_MMAP</constant>, this is the value that
- should be passed to &func-mmap;, similar to the
- <structfield>offset</structfield> field in &v4l2-buffer;.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>unsigned long</entry>
- <entry><structfield>userptr</structfield></entry>
- <entry>When the memory type in the containing &v4l2-buffer; is
- <constant>V4L2_MEMORY_USERPTR</constant>, this is a userspace
- pointer to the memory allocated for this plane by an application.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>int</entry>
- <entry><structfield>fd</structfield></entry>
- <entry>When the memory type in the containing &v4l2-buffer; is
- <constant>V4L2_MEMORY_DMABUF</constant>, this is a file
- descriptor associated with a DMABUF buffer, similar to the
- <structfield>fd</structfield> field in &v4l2-buffer;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>data_offset</structfield></entry>
- <entry></entry>
- <entry>Offset in bytes to video data in the plane.
- Drivers must set this field when <structfield>type</structfield>
- refers to a capture stream, applications when it refers to an output stream.
- Note that data_offset is included in <structfield>bytesused</structfield>.
- So the size of the image in the plane is
- <structfield>bytesused</structfield>-<structfield>data_offset</structfield> at
- offset <structfield>data_offset</structfield> from the start of the plane.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[11]</structfield></entry>
- <entry></entry>
- <entry>Reserved for future use. Should be zeroed by drivers and
- applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-buf-type">
- <title>enum v4l2_buf_type</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant></entry>
- <entry>1</entry>
- <entry>Buffer of a single-planar video capture stream, see <xref
- linkend="capture" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
- </entry>
- <entry>9</entry>
- <entry>Buffer of a multi-planar video capture stream, see <xref
- linkend="capture" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant></entry>
- <entry>2</entry>
- <entry>Buffer of a single-planar video output stream, see <xref
- linkend="output" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>
- </entry>
- <entry>10</entry>
- <entry>Buffer of a multi-planar video output stream, see <xref
- linkend="output" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant></entry>
- <entry>3</entry>
- <entry>Buffer for video overlay, see <xref linkend="overlay" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VBI_CAPTURE</constant></entry>
- <entry>4</entry>
- <entry>Buffer of a raw VBI capture stream, see <xref
- linkend="raw-vbi" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant></entry>
- <entry>5</entry>
- <entry>Buffer of a raw VBI output stream, see <xref
- linkend="raw-vbi" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_SLICED_VBI_CAPTURE</constant></entry>
- <entry>6</entry>
- <entry>Buffer of a sliced VBI capture stream, see <xref
- linkend="sliced" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_SLICED_VBI_OUTPUT</constant></entry>
- <entry>7</entry>
- <entry>Buffer of a sliced VBI output stream, see <xref
- linkend="sliced" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY</constant></entry>
- <entry>8</entry>
- <entry>Buffer for video output overlay (OSD), see <xref
- linkend="osd" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_SDR_CAPTURE</constant></entry>
- <entry>11</entry>
- <entry>Buffer for Software Defined Radio (SDR) capture stream, see
- <xref linkend="sdr" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_TYPE_SDR_OUTPUT</constant></entry>
- <entry>12</entry>
- <entry>Buffer for Software Defined Radio (SDR) output stream, see
- <xref linkend="sdr" />.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="buffer-flags">
- <title>Buffer Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_BUF_FLAG_MAPPED</constant></entry>
- <entry>0x00000001</entry>
- <entry>The buffer resides in device memory and has been mapped
-into the application's address space, see <xref linkend="mmap" /> for details.
-Drivers set or clear this flag when the
-<link linkend="vidioc-querybuf">VIDIOC_QUERYBUF</link>, <link
- linkend="vidioc-qbuf">VIDIOC_QBUF</link> or <link
- linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl is called. Set by the driver.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_QUEUED</constant></entry>
- <entry>0x00000002</entry>
- <entry>Internally drivers maintain two buffer queues, an
-incoming and outgoing queue. When this flag is set, the buffer is
-currently on the incoming queue. It automatically moves to the
-outgoing queue after the buffer has been filled (capture devices) or
-displayed (output devices). Drivers set or clear this flag when the
-<constant>VIDIOC_QUERYBUF</constant> ioctl is called. After
-(successful) calling the <constant>VIDIOC_QBUF </constant>ioctl it is
-always set and after <constant>VIDIOC_DQBUF</constant> always
-cleared.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_DONE</constant></entry>
- <entry>0x00000004</entry>
- <entry>When this flag is set, the buffer is currently on
-the outgoing queue, ready to be dequeued from the driver. Drivers set
-or clear this flag when the <constant>VIDIOC_QUERYBUF</constant> ioctl
-is called. After calling the <constant>VIDIOC_QBUF</constant> or
-<constant>VIDIOC_DQBUF</constant> it is always cleared. Of course a
-buffer cannot be on both queues at the same time, the
-<constant>V4L2_BUF_FLAG_QUEUED</constant> and
-<constant>V4L2_BUF_FLAG_DONE</constant> flag are mutually exclusive.
-They can be both cleared however, then the buffer is in "dequeued"
-state, in the application domain so to say.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_ERROR</constant></entry>
- <entry>0x00000040</entry>
- <entry>When this flag is set, the buffer has been dequeued
- successfully, although the data might have been corrupted.
- This is recoverable, streaming may continue as normal and
- the buffer may be reused normally.
- Drivers set this flag when the <constant>VIDIOC_DQBUF</constant>
- ioctl is called.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_KEYFRAME</constant></entry>
- <entry>0x00000008</entry>
- <entry>Drivers set or clear this flag when calling the
-<constant>VIDIOC_DQBUF</constant> ioctl. It may be set by video
-capture devices when the buffer contains a compressed image which is a
-key frame (or field), &ie; can be decompressed on its own. Also known as
-an I-frame. Applications can set this bit when <structfield>type</structfield>
-refers to an output stream.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_PFRAME</constant></entry>
- <entry>0x00000010</entry>
- <entry>Similar to <constant>V4L2_BUF_FLAG_KEYFRAME</constant>
-this flags predicted frames or fields which contain only differences to a
-previous key frame. Applications can set this bit when <structfield>type</structfield>
-refers to an output stream.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_BFRAME</constant></entry>
- <entry>0x00000020</entry>
- <entry>Similar to <constant>V4L2_BUF_FLAG_KEYFRAME</constant>
-this flags a bi-directional predicted frame or field which contains only
-the differences between the current frame and both the preceding and following
-key frames to specify its content. Applications can set this bit when
-<structfield>type</structfield> refers to an output stream.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TIMECODE</constant></entry>
- <entry>0x00000100</entry>
- <entry>The <structfield>timecode</structfield> field is valid.
-Drivers set or clear this flag when the <constant>VIDIOC_DQBUF</constant>
-ioctl is called. Applications can set this bit and the corresponding
-<structfield>timecode</structfield> structure when <structfield>type</structfield>
-refers to an output stream.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_PREPARED</constant></entry>
- <entry>0x00000400</entry>
- <entry>The buffer has been prepared for I/O and can be queued by the
-application. Drivers set or clear this flag when the
-<link linkend="vidioc-querybuf">VIDIOC_QUERYBUF</link>, <link
- linkend="vidioc-qbuf">VIDIOC_PREPARE_BUF</link>, <link
- linkend="vidioc-qbuf">VIDIOC_QBUF</link> or <link
- linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl is called.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_NO_CACHE_INVALIDATE</constant></entry>
- <entry>0x00000800</entry>
- <entry>Caches do not have to be invalidated for this buffer.
-Typically applications shall use this flag if the data captured in the buffer
-is not going to be touched by the CPU, instead the buffer will, probably, be
-passed on to a DMA-capable hardware unit for further processing or output.
-</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_NO_CACHE_CLEAN</constant></entry>
- <entry>0x00001000</entry>
- <entry>Caches do not have to be cleaned for this buffer.
-Typically applications shall use this flag for output buffers if the data
-in this buffer has not been created by the CPU but by some DMA-capable unit,
-in which case caches have not been used.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_LAST</constant></entry>
- <entry>0x00100000</entry>
- <entry>Last buffer produced by the hardware. mem2mem codec drivers
-set this flag on the capture queue for the last buffer when the
-<link linkend="vidioc-querybuf">VIDIOC_QUERYBUF</link> or
-<link linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl is called. Due to hardware
-limitations, the last buffer may be empty. In this case the driver will set the
-<structfield>bytesused</structfield> field to 0, regardless of the format. Any
-Any subsequent call to the <link linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl
-will not block anymore, but return an &EPIPE;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_MASK</constant></entry>
- <entry>0x0000e000</entry>
- <entry>Mask for timestamp types below. To test the
- timestamp type, mask out bits not belonging to timestamp
- type by performing a logical and operation with buffer
- flags and timestamp mask.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN</constant></entry>
- <entry>0x00000000</entry>
- <entry>Unknown timestamp type. This type is used by
- drivers before Linux 3.9 and may be either monotonic (see
- below) or realtime (wall clock). Monotonic clock has been
- favoured in embedded systems whereas most of the drivers
- use the realtime clock. Either kinds of timestamps are
- available in user space via
- <function>clock_gettime(2)</function> using clock IDs
- <constant>CLOCK_MONOTONIC</constant> and
- <constant>CLOCK_REALTIME</constant>, respectively.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC</constant></entry>
- <entry>0x00002000</entry>
- <entry>The buffer timestamp has been taken from the
- <constant>CLOCK_MONOTONIC</constant> clock. To access the
- same clock outside V4L2, use
- <function>clock_gettime(2)</function>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TIMESTAMP_COPY</constant></entry>
- <entry>0x00004000</entry>
- <entry>The CAPTURE buffer timestamp has been taken from the
- corresponding OUTPUT buffer. This flag applies only to mem2mem devices.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TSTAMP_SRC_MASK</constant></entry>
- <entry>0x00070000</entry>
- <entry>Mask for timestamp sources below. The timestamp source
- defines the point of time the timestamp is taken in relation to
- the frame. Logical 'and' operation between the
- <structfield>flags</structfield> field and
- <constant>V4L2_BUF_FLAG_TSTAMP_SRC_MASK</constant> produces the
- value of the timestamp source. Applications must set the timestamp
- source when <structfield>type</structfield> refers to an output stream
- and <constant>V4L2_BUF_FLAG_TIMESTAMP_COPY</constant> is set.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TSTAMP_SRC_EOF</constant></entry>
- <entry>0x00000000</entry>
- <entry>End Of Frame. The buffer timestamp has been taken
- when the last pixel of the frame has been received or the
- last pixel of the frame has been transmitted. In practice,
- software generated timestamps will typically be read from
- the clock a small amount of time after the last pixel has
- been received or transmitten, depending on the system and
- other activity in it.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BUF_FLAG_TSTAMP_SRC_SOE</constant></entry>
- <entry>0x00010000</entry>
- <entry>Start Of Exposure. The buffer timestamp has been
- taken when the exposure of the frame has begun. This is
- only valid for the
- <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> buffer
- type.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-memory">
- <title>enum v4l2_memory</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MEMORY_MMAP</constant></entry>
- <entry>1</entry>
- <entry>The buffer is used for <link linkend="mmap">memory
-mapping</link> I/O.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MEMORY_USERPTR</constant></entry>
- <entry>2</entry>
- <entry>The buffer is used for <link linkend="userp">user
-pointer</link> I/O.</entry>
- </row>
- <row>
- <entry><constant>V4L2_MEMORY_OVERLAY</constant></entry>
- <entry>3</entry>
- <entry>[to do]</entry>
- </row>
- <row>
- <entry><constant>V4L2_MEMORY_DMABUF</constant></entry>
- <entry>4</entry>
- <entry>The buffer is used for <link linkend="dmabuf">DMA shared
-buffer</link> I/O.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <section>
- <title>Timecodes</title>
-
- <para>The <structname>v4l2_timecode</structname> structure is
-designed to hold a <xref linkend="smpte12m" /> or similar timecode.
-(struct <structname>timeval</structname> timestamps are stored in
-&v4l2-buffer; field <structfield>timestamp</structfield>.)</para>
-
- <table frame="none" pgwide="1" id="v4l2-timecode">
- <title>struct <structname>v4l2_timecode</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Frame rate the timecodes are based on, see <xref
- linkend="timecode-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Timecode flags, see <xref linkend="timecode-flags" />.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>frames</structfield></entry>
- <entry>Frame count, 0 ... 23/24/29/49/59, depending on the
- type of timecode.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>seconds</structfield></entry>
- <entry>Seconds count, 0 ... 59. This is a binary, not BCD number.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>minutes</structfield></entry>
- <entry>Minutes count, 0 ... 59. This is a binary, not BCD number.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>hours</structfield></entry>
- <entry>Hours count, 0 ... 29. This is a binary, not BCD number.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>userbits</structfield>[4]</entry>
- <entry>The "user group" bits from the timecode.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="timecode-type">
- <title>Timecode Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TC_TYPE_24FPS</constant></entry>
- <entry>1</entry>
- <entry>24 frames per second, i.&nbsp;e. film.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_TYPE_25FPS</constant></entry>
- <entry>2</entry>
- <entry>25 frames per second, &ie; PAL or SECAM video.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_TYPE_30FPS</constant></entry>
- <entry>3</entry>
- <entry>30 frames per second, &ie; NTSC video.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_TYPE_50FPS</constant></entry>
- <entry>4</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_TYPE_60FPS</constant></entry>
- <entry>5</entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="timecode-flags">
- <title>Timecode Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TC_FLAG_DROPFRAME</constant></entry>
- <entry>0x0001</entry>
- <entry>Indicates "drop frame" semantics for counting frames
-in 29.97 fps material. When set, frame numbers 0 and 1 at the start of
-each minute, except minutes 0, 10, 20, 30, 40, 50 are omitted from the
-count.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_FLAG_COLORFRAME</constant></entry>
- <entry>0x0002</entry>
- <entry>The "color frame" flag.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_USERBITS_field</constant></entry>
- <entry>0x000C</entry>
- <entry>Field mask for the "binary group flags".</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_USERBITS_USERDEFINED</constant></entry>
- <entry>0x0000</entry>
- <entry>Unspecified format.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TC_USERBITS_8BITCHARS</constant></entry>
- <entry>0x0008</entry>
- <entry>8-bit ISO characters.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
- </section>
-
- <section id="field-order">
- <title>Field Order</title>
-
- <para>We have to distinguish between progressive and interlaced
-video. Progressive video transmits all lines of a video image
-sequentially. Interlaced video divides an image into two fields,
-containing only the odd and even lines of the image, respectively.
-Alternating the so called odd and even field are transmitted, and due
-to a small delay between fields a cathode ray TV displays the lines
-interleaved, yielding the original frame. This curious technique was
-invented because at refresh rates similar to film the image would
-fade out too quickly. Transmitting fields reduces the flicker without
-the necessity of doubling the frame rate and with it the bandwidth
-required for each channel.</para>
-
- <para>It is important to understand a video camera does not expose
-one frame at a time, merely transmitting the frames separated into
-fields. The fields are in fact captured at two different instances in
-time. An object on screen may well move between one field and the
-next. For applications analysing motion it is of paramount importance
-to recognize which field of a frame is older, the <emphasis>temporal
-order</emphasis>.</para>
-
- <para>When the driver provides or accepts images field by field
-rather than interleaved, it is also important applications understand
-how the fields combine to frames. We distinguish between top (aka odd) and
-bottom (aka even) fields, the <emphasis>spatial order</emphasis>: The first line
-of the top field is the first line of an interlaced frame, the first
-line of the bottom field is the second line of that frame.</para>
-
- <para>However because fields were captured one after the other,
-arguing whether a frame commences with the top or bottom field is
-pointless. Any two successive top and bottom, or bottom and top fields
-yield a valid frame. Only when the source was progressive to begin
-with, &eg; when transferring film to video, two fields may come from
-the same frame, creating a natural order.</para>
-
- <para>Counter to intuition the top field is not necessarily the
-older field. Whether the older field contains the top or bottom lines
-is a convention determined by the video standard. Hence the
-distinction between temporal and spatial order of fields. The diagrams
-below should make this clearer.</para>
-
- <para>All video capture and output devices must report the current
-field order. Some drivers may permit the selection of a different
-order, to this end applications initialize the
-<structfield>field</structfield> field of &v4l2-pix-format; before
-calling the &VIDIOC-S-FMT; ioctl. If this is not desired it should
-have the value <constant>V4L2_FIELD_ANY</constant> (0).</para>
-
- <table frame="none" pgwide="1" id="v4l2-field">
- <title>enum v4l2_field</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FIELD_ANY</constant></entry>
- <entry>0</entry>
- <entry>Applications request this field order when any
-one of the <constant>V4L2_FIELD_NONE</constant>,
-<constant>V4L2_FIELD_TOP</constant>,
-<constant>V4L2_FIELD_BOTTOM</constant>, or
-<constant>V4L2_FIELD_INTERLACED</constant> formats is acceptable.
-Drivers choose depending on hardware capabilities or e.&nbsp;g. the
-requested image size, and return the actual field order. Drivers must
-never return <constant>V4L2_FIELD_ANY</constant>. If multiple
-field orders are possible the driver must choose one of the possible
-field orders during &VIDIOC-S-FMT; or &VIDIOC-TRY-FMT;. &v4l2-buffer;
-<structfield>field</structfield> can never be
-<constant>V4L2_FIELD_ANY</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_NONE</constant></entry>
- <entry>1</entry>
- <entry>Images are in progressive format, not interlaced.
-The driver may also indicate this order when it cannot distinguish
-between <constant>V4L2_FIELD_TOP</constant> and
-<constant>V4L2_FIELD_BOTTOM</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_TOP</constant></entry>
- <entry>2</entry>
- <entry>Images consist of the top (aka odd) field only.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_BOTTOM</constant></entry>
- <entry>3</entry>
- <entry>Images consist of the bottom (aka even) field only.
-Applications may wish to prevent a device from capturing interlaced
-images because they will have "comb" or "feathering" artefacts around
-moving objects.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_INTERLACED</constant></entry>
- <entry>4</entry>
- <entry>Images contain both fields, interleaved line by
-line. The temporal order of the fields (whether the top or bottom
-field is first transmitted) depends on the current video standard.
-M/NTSC transmits the bottom field first, all other standards the top
-field first.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_SEQ_TB</constant></entry>
- <entry>5</entry>
- <entry>Images contain both fields, the top field lines
-are stored first in memory, immediately followed by the bottom field
-lines. Fields are always stored in temporal order, the older one first
-in memory. Image sizes refer to the frame, not fields.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_SEQ_BT</constant></entry>
- <entry>6</entry>
- <entry>Images contain both fields, the bottom field
-lines are stored first in memory, immediately followed by the top
-field lines. Fields are always stored in temporal order, the older one
-first in memory. Image sizes refer to the frame, not fields.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_ALTERNATE</constant></entry>
- <entry>7</entry>
- <entry>The two fields of a frame are passed in separate
-buffers, in temporal order, &ie; the older one first. To indicate the field
-parity (whether the current field is a top or bottom field) the driver
-or application, depending on data direction, must set &v4l2-buffer;
-<structfield>field</structfield> to
-<constant>V4L2_FIELD_TOP</constant> or
-<constant>V4L2_FIELD_BOTTOM</constant>. Any two successive fields pair
-to build a frame. If fields are successive, without any dropped fields
-between them (fields can drop individually), can be determined from
-the &v4l2-buffer; <structfield>sequence</structfield> field. This format
-cannot be selected when using the read/write I/O method since there
-is no way to communicate if a field was a top or bottom field.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_INTERLACED_TB</constant></entry>
- <entry>8</entry>
- <entry>Images contain both fields, interleaved line by
-line, top field first. The top field is transmitted first.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FIELD_INTERLACED_BT</constant></entry>
- <entry>9</entry>
- <entry>Images contain both fields, interleaved line by
-line, top field first. The bottom field is transmitted first.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <figure id="fieldseq-tb">
- <title>Field Order, Top Field First Transmitted</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="fieldseq_tb.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="fieldseq_tb.gif" format="GIF" />
- </imageobject>
- </mediaobject>
- </figure>
-
- <figure id="fieldseq-bt">
- <title>Field Order, Bottom Field First Transmitted</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="fieldseq_bt.pdf" format="PS" />
- </imageobject>
- <imageobject>
- <imagedata fileref="fieldseq_bt.gif" format="GIF" />
- </imageobject>
- </mediaobject>
- </figure>
- </section>
diff --git a/Documentation/DocBook/media/v4l/keytable.c.xml b/Documentation/DocBook/media/v4l/keytable.c.xml
deleted file mode 100644
index d53254a3be15..000000000000
--- a/Documentation/DocBook/media/v4l/keytable.c.xml
+++ /dev/null
@@ -1,172 +0,0 @@
-<programlisting>
-/* keytable.c - This program allows checking/replacing keys at IR
-
- Copyright (C) 2006-2009 Mauro Carvalho Chehab &lt;mchehab@infradead.org&gt;
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-
-#include &lt;ctype.h&gt;
-#include &lt;errno.h&gt;
-#include &lt;fcntl.h&gt;
-#include &lt;stdio.h&gt;
-#include &lt;stdlib.h&gt;
-#include &lt;string.h&gt;
-#include &lt;linux/input.h&gt;
-#include &lt;sys/ioctl.h&gt;
-
-#include "parse.h"
-
-void prtcode (int *codes)
-{
- struct parse_key *p;
-
- for (p=keynames;p-&gt;name!=NULL;p++) {
- if (p-&gt;value == (unsigned)codes[1]) {
- printf("scancode 0x%04x = %s (0x%02x)\n", codes[0], p-&gt;name, codes[1]);
- return;
- }
- }
-
- if (isprint (codes[1]))
- printf("scancode %d = '%c' (0x%02x)\n", codes[0], codes[1], codes[1]);
- else
- printf("scancode %d = 0x%02x\n", codes[0], codes[1]);
-}
-
-int parse_code(char *string)
-{
- struct parse_key *p;
-
- for (p=keynames;p-&gt;name!=NULL;p++) {
- if (!strcasecmp(p-&gt;name, string)) {
- return p-&gt;value;
- }
- }
- return -1;
-}
-
-int main (int argc, char *argv[])
-{
- int fd;
- unsigned int i, j;
- int codes[2];
-
- if (argc&lt;2 || argc&gt;4) {
- printf ("usage: %s &lt;device&gt; to get table; or\n"
- " %s &lt;device&gt; &lt;scancode&gt; &lt;keycode&gt;\n"
- " %s &lt;device&gt; &lt;keycode_file&gt;\n",*argv,*argv,*argv);
- return -1;
- }
-
- if ((fd = open(argv[1], O_RDONLY)) &lt; 0) {
- perror("Couldn't open input device");
- return(-1);
- }
-
- if (argc==4) {
- int value;
-
- value=parse_code(argv[3]);
-
- if (value==-1) {
- value = strtol(argv[3], NULL, 0);
- if (errno)
- perror("value");
- }
-
- codes [0] = (unsigned) strtol(argv[2], NULL, 0);
- codes [1] = (unsigned) value;
-
- if(ioctl(fd, EVIOCSKEYCODE, codes))
- perror ("EVIOCSKEYCODE");
-
- if(ioctl(fd, EVIOCGKEYCODE, codes)==0)
- prtcode(codes);
- return 0;
- }
-
- if (argc==3) {
- FILE *fin;
- int value;
- char *scancode, *keycode, s[2048];
-
- fin=fopen(argv[2],"r");
- if (fin==NULL) {
- perror ("opening keycode file");
- return -1;
- }
-
- /* Clears old table */
- for (j = 0; j &lt; 256; j++) {
- for (i = 0; i &lt; 256; i++) {
- codes[0] = (j &lt;&lt; 8) | i;
- codes[1] = KEY_RESERVED;
- ioctl(fd, EVIOCSKEYCODE, codes);
- }
- }
-
- while (fgets(s,sizeof(s),fin)) {
- scancode=strtok(s,"\n\t =:");
- if (!scancode) {
- perror ("parsing input file scancode");
- return -1;
- }
- if (!strcasecmp(scancode, "scancode")) {
- scancode = strtok(NULL,"\n\t =:");
- if (!scancode) {
- perror ("parsing input file scancode");
- return -1;
- }
- }
-
- keycode=strtok(NULL,"\n\t =:(");
- if (!keycode) {
- perror ("parsing input file keycode");
- return -1;
- }
-
- // printf ("parsing %s=%s:", scancode, keycode);
- value=parse_code(keycode);
- // printf ("\tvalue=%d\n",value);
-
- if (value==-1) {
- value = strtol(keycode, NULL, 0);
- if (errno)
- perror("value");
- }
-
- codes [0] = (unsigned) strtol(scancode, NULL, 0);
- codes [1] = (unsigned) value;
-
- // printf("\t%04x=%04x\n",codes[0], codes[1]);
- if(ioctl(fd, EVIOCSKEYCODE, codes)) {
- fprintf(stderr, "Setting scancode 0x%04x with 0x%04x via ",codes[0], codes[1]);
- perror ("EVIOCSKEYCODE");
- }
-
- if(ioctl(fd, EVIOCGKEYCODE, codes)==0)
- prtcode(codes);
- }
- return 0;
- }
-
- /* Get scancode table */
- for (j = 0; j &lt; 256; j++) {
- for (i = 0; i &lt; 256; i++) {
- codes[0] = (j &lt;&lt; 8) | i;
- if (!ioctl(fd, EVIOCGKEYCODE, codes) &amp;&amp; codes[1] != KEY_RESERVED)
- prtcode(codes);
- }
- }
- return 0;
-}
-
-</programlisting>
diff --git a/Documentation/DocBook/media/v4l/libv4l.xml b/Documentation/DocBook/media/v4l/libv4l.xml
deleted file mode 100644
index d3b71e20003c..000000000000
--- a/Documentation/DocBook/media/v4l/libv4l.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<title>Libv4l Userspace Library</title>
-<section id="libv4l-introduction">
- <title>Introduction</title>
-
- <para>libv4l is a collection of libraries which adds a thin abstraction
-layer on top of video4linux2 devices. The purpose of this (thin) layer
-is to make it easy for application writers to support a wide variety of
-devices without having to write separate code for different devices in the
-same class.</para>
-<para>An example of using libv4l is provided by
-<link linkend='v4l2grab-example'>v4l2grab</link>.
-</para>
-
- <para>libv4l consists of 3 different libraries:</para>
- <section>
- <title>libv4lconvert</title>
-
- <para>libv4lconvert is a library that converts several
-different pixelformats found in V4L2 drivers into a few common RGB and
-YUY formats.</para>
- <para>It currently accepts the following V4L2 driver formats:
-<link linkend="V4L2-PIX-FMT-BGR24"><constant>V4L2_PIX_FMT_BGR24</constant></link>,
-<link linkend="V4L2-PIX-FMT-HM12"><constant>V4L2_PIX_FMT_HM12</constant></link>,
-<link linkend="V4L2-PIX-FMT-JPEG"><constant>V4L2_PIX_FMT_JPEG</constant></link>,
-<link linkend="V4L2-PIX-FMT-MJPEG"><constant>V4L2_PIX_FMT_MJPEG</constant></link>,
-<link linkend="V4L2-PIX-FMT-MR97310A"><constant>V4L2_PIX_FMT_MR97310A</constant></link>,
-<link linkend="V4L2-PIX-FMT-OV511"><constant>V4L2_PIX_FMT_OV511</constant></link>,
-<link linkend="V4L2-PIX-FMT-OV518"><constant>V4L2_PIX_FMT_OV518</constant></link>,
-<link linkend="V4L2-PIX-FMT-PAC207"><constant>V4L2_PIX_FMT_PAC207</constant></link>,
-<link linkend="V4L2-PIX-FMT-PJPG"><constant>V4L2_PIX_FMT_PJPG</constant></link>,
-<link linkend="V4L2-PIX-FMT-RGB24"><constant>V4L2_PIX_FMT_RGB24</constant></link>,
-<link linkend="V4L2-PIX-FMT-SBGGR8"><constant>V4L2_PIX_FMT_SBGGR8</constant></link>,
-<link linkend="V4L2-PIX-FMT-SGBRG8"><constant>V4L2_PIX_FMT_SGBRG8</constant></link>,
-<link linkend="V4L2-PIX-FMT-SGRBG8"><constant>V4L2_PIX_FMT_SGRBG8</constant></link>,
-<link linkend="V4L2-PIX-FMT-SN9C10X"><constant>V4L2_PIX_FMT_SN9C10X</constant></link>,
-<link linkend="V4L2-PIX-FMT-SN9C20X-I420"><constant>V4L2_PIX_FMT_SN9C20X_I420</constant></link>,
-<link linkend="V4L2-PIX-FMT-SPCA501"><constant>V4L2_PIX_FMT_SPCA501</constant></link>,
-<link linkend="V4L2-PIX-FMT-SPCA505"><constant>V4L2_PIX_FMT_SPCA505</constant></link>,
-<link linkend="V4L2-PIX-FMT-SPCA508"><constant>V4L2_PIX_FMT_SPCA508</constant></link>,
-<link linkend="V4L2-PIX-FMT-SPCA561"><constant>V4L2_PIX_FMT_SPCA561</constant></link>,
-<link linkend="V4L2-PIX-FMT-SQ905C"><constant>V4L2_PIX_FMT_SQ905C</constant></link>,
-<constant>V4L2_PIX_FMT_SRGGB8</constant>,
-<link linkend="V4L2-PIX-FMT-UYVY"><constant>V4L2_PIX_FMT_UYVY</constant></link>,
-<link linkend="V4L2-PIX-FMT-YUV420"><constant>V4L2_PIX_FMT_YUV420</constant></link>,
-<link linkend="V4L2-PIX-FMT-YUYV"><constant>V4L2_PIX_FMT_YUYV</constant></link>,
-<link linkend="V4L2-PIX-FMT-YVU420"><constant>V4L2_PIX_FMT_YVU420</constant></link>,
-and <link linkend="V4L2-PIX-FMT-YVYU"><constant>V4L2_PIX_FMT_YVYU</constant></link>.
-</para>
- <para>Later on libv4lconvert was expanded to also be able to do
-various video processing functions to improve webcam video quality.
-The video processing is split in to 2 parts: libv4lconvert/control and
-libv4lconvert/processing.</para>
-
- <para>The control part is used to offer video controls which can
-be used to control the video processing functions made available by
- libv4lconvert/processing. These controls are stored application wide
-(until reboot) by using a persistent shared memory object.</para>
-
- <para>libv4lconvert/processing offers the actual video
-processing functionality.</para>
- </section>
- <section>
- <title>libv4l1</title>
- <para>This library offers functions that can be used to quickly
-make v4l1 applications work with v4l2 devices. These functions work exactly
-like the normal open/close/etc, except that libv4l1 does full emulation of
-the v4l1 api on top of v4l2 drivers, in case of v4l1 drivers it
-will just pass calls through.</para>
- <para>Since those functions are emulations of the old V4L1 API,
-it shouldn't be used for new applications.</para>
- </section>
- <section>
- <title>libv4l2</title>
- <para>This library should be used for all modern V4L2
-applications.</para>
- <para>It provides handles to call V4L2 open/ioctl/close/poll
-methods. Instead of just providing the raw output of the device, it enhances
-the calls in the sense that it will use libv4lconvert to provide more video
-formats and to enhance the image quality.</para>
- <para>In most cases, libv4l2 just passes the calls directly
-through to the v4l2 driver, intercepting the calls to
-<link linkend='vidioc-g-fmt'><constant>VIDIOC_TRY_FMT</constant></link>,
-<link linkend='vidioc-g-fmt'><constant>VIDIOC_G_FMT</constant></link>
-<link linkend='vidioc-g-fmt'><constant>VIDIOC_S_FMT</constant></link>
-<link linkend='vidioc-enum-framesizes'><constant>VIDIOC_ENUM_FRAMESIZES</constant></link>
-and <link linkend='vidioc-enum-frameintervals'><constant>VIDIOC_ENUM_FRAMEINTERVALS</constant></link>
-in order to emulate the formats
-<link linkend="V4L2-PIX-FMT-BGR24"><constant>V4L2_PIX_FMT_BGR24</constant></link>,
-<link linkend="V4L2-PIX-FMT-RGB24"><constant>V4L2_PIX_FMT_RGB24</constant></link>,
-<link linkend="V4L2-PIX-FMT-YUV420"><constant>V4L2_PIX_FMT_YUV420</constant></link>,
-and <link linkend="V4L2-PIX-FMT-YVU420"><constant>V4L2_PIX_FMT_YVU420</constant></link>,
-if they aren't available in the driver.
-<link linkend='vidioc-enum-fmt'><constant>VIDIOC_ENUM_FMT</constant></link>
-keeps enumerating the hardware supported formats, plus the emulated formats
-offered by libv4l at the end.
-</para>
- <section id="libv4l-ops">
- <title>Libv4l device control functions</title>
- <para>The common file operation methods are provided by
-libv4l.</para>
- <para>Those functions operate just like glibc
-open/close/dup/ioctl/read/mmap/munmap:</para>
-<itemizedlist><listitem>
- <para>int v4l2_open(const char *file, int oflag,
-...) -
-operates like the standard <link linkend='func-open'>open()</link> function.
-</para></listitem><listitem>
- <para>int v4l2_close(int fd) -
-operates like the standard <link linkend='func-close'>close()</link> function.
-</para></listitem><listitem>
- <para>int v4l2_dup(int fd) -
-operates like the standard dup() function, duplicating a file handler.
-</para></listitem><listitem>
- <para>int v4l2_ioctl (int fd, unsigned long int request, ...) -
-operates like the standard <link linkend='func-ioctl'>ioctl()</link> function.
-</para></listitem><listitem>
- <para>int v4l2_read (int fd, void* buffer, size_t n) -
-operates like the standard <link linkend='func-read'>read()</link> function.
-</para></listitem><listitem>
- <para>void v4l2_mmap(void *start, size_t length, int prot, int flags, int fd, int64_t offset); -
-operates like the standard <link linkend='func-mmap'>mmap()</link> function.
-</para></listitem><listitem>
- <para>int v4l2_munmap(void *_start, size_t length); -
-operates like the standard <link linkend='func-munmap'>munmap()</link> function.
-</para></listitem>
-</itemizedlist>
- <para>Those functions provide additional control:</para>
-<itemizedlist><listitem>
- <para>int v4l2_fd_open(int fd, int v4l2_flags) -
-opens an already opened fd for further use through v4l2lib and possibly
-modify libv4l2's default behavior through the v4l2_flags argument.
-Currently, v4l2_flags can be <constant>V4L2_DISABLE_CONVERSION</constant>,
-to disable format conversion.
-</para></listitem><listitem>
- <para>int v4l2_set_control(int fd, int cid, int value) -
-This function takes a value of 0 - 65535, and then scales that range to
-the actual range of the given v4l control id, and then if the cid exists
-and is not locked sets the cid to the scaled value.
-</para></listitem><listitem>
- <para>int v4l2_get_control(int fd, int cid) -
-This function returns a value of 0 - 65535, scaled to from the actual range
-of the given v4l control id. when the cid does not exist, could not be
-accessed for some reason, or some error occurred 0 is returned.
-</para></listitem>
-</itemizedlist>
- </section>
- </section>
- <section>
-
- <title>v4l1compat.so wrapper library</title>
-
- <para>This library intercepts calls to
-open/close/ioctl/mmap/mmunmap operations and redirects them to the libv4l
-counterparts, by using LD_PRELOAD=/usr/lib/v4l1compat.so. It also
-emulates V4L1 calls via V4L2 API.</para>
- <para>It allows usage of binary legacy applications that
-still don't use libv4l.</para>
- </section>
-
-</section>
diff --git a/Documentation/DocBook/media/v4l/lirc_device_interface.xml b/Documentation/DocBook/media/v4l/lirc_device_interface.xml
deleted file mode 100644
index f53ad58027a7..000000000000
--- a/Documentation/DocBook/media/v4l/lirc_device_interface.xml
+++ /dev/null
@@ -1,255 +0,0 @@
-<section id="lirc_dev">
-<title>LIRC Device Interface</title>
-
-
-<section id="lirc_dev_intro">
-<title>Introduction</title>
-
-<para>The LIRC device interface is a bi-directional interface for
-transporting raw IR data between userspace and kernelspace. Fundamentally,
-it is just a chardev (/dev/lircX, for X = 0, 1, 2, ...), with a number
-of standard struct file_operations defined on it. With respect to
-transporting raw IR data to and fro, the essential fops are read, write
-and ioctl.</para>
-
-<para>Example dmesg output upon a driver registering w/LIRC:</para>
- <blockquote>
- <para>$ dmesg |grep lirc_dev</para>
- <para>lirc_dev: IR Remote Control driver registered, major 248</para>
- <para>rc rc0: lirc_dev: driver ir-lirc-codec (mceusb) registered at minor = 0</para>
- </blockquote>
-
-<para>What you should see for a chardev:</para>
- <blockquote>
- <para>$ ls -l /dev/lirc*</para>
- <para>crw-rw---- 1 root root 248, 0 Jul 2 22:20 /dev/lirc0</para>
- </blockquote>
-</section>
-
-<section id="lirc_read">
-<title>LIRC read fop</title>
-
-<para>The lircd userspace daemon reads raw IR data from the LIRC chardev. The
-exact format of the data depends on what modes a driver supports, and what
-mode has been selected. lircd obtains supported modes and sets the active mode
-via the ioctl interface, detailed at <xref linkend="lirc_ioctl"/>. The generally
-preferred mode is LIRC_MODE_MODE2, in which packets containing an int value
-describing an IR signal are read from the chardev.</para>
-
-<para>See also <ulink url="http://www.lirc.org/html/technical.html">http://www.lirc.org/html/technical.html</ulink> for more info.</para>
-</section>
-
-<section id="lirc_write">
-<title>LIRC write fop</title>
-
-<para>The data written to the chardev is a pulse/space sequence of integer
-values. Pulses and spaces are only marked implicitly by their position. The
-data must start and end with a pulse, therefore, the data must always include
-an uneven number of samples. The write function must block until the data has
-been transmitted by the hardware. If more data is provided than the hardware
-can send, the driver returns EINVAL.</para>
-
-</section>
-
-<section id="lirc_ioctl">
-<title>LIRC ioctl fop</title>
-
-<para>The LIRC device's ioctl definition is bound by the ioctl function
-definition of struct file_operations, leaving us with an unsigned int
-for the ioctl command and an unsigned long for the arg. For the purposes
-of ioctl portability across 32-bit and 64-bit, these values are capped
-to their 32-bit sizes.</para>
-
-<para>The following ioctls can be used to change specific hardware settings.
-In general each driver should have a default set of settings. The driver
-implementation is expected to re-apply the default settings when the device
-is closed by user-space, so that every application opening the device can rely
-on working with the default settings initially.</para>
-
-<variablelist>
- <varlistentry>
- <term>LIRC_GET_FEATURES</term>
- <listitem>
- <para>Obviously, get the underlying hardware device's features. If a driver
- does not announce support of certain features, calling of the corresponding
- ioctls is undefined.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_SEND_MODE</term>
- <listitem>
- <para>Get supported transmit mode. Only LIRC_MODE_PULSE is supported by lircd.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_REC_MODE</term>
- <listitem>
- <para>Get supported receive modes. Only LIRC_MODE_MODE2 and LIRC_MODE_LIRCCODE
- are supported by lircd.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_SEND_CARRIER</term>
- <listitem>
- <para>Get carrier frequency (in Hz) currently used for transmit.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_REC_CARRIER</term>
- <listitem>
- <para>Get carrier frequency (in Hz) currently used for IR reception.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_{G,S}ET_{SEND,REC}_DUTY_CYCLE</term>
- <listitem>
- <para>Get/set the duty cycle (from 0 to 100) of the carrier signal. Currently,
- no special meaning is defined for 0 or 100, but this could be used to switch
- off carrier generation in the future, so these values should be reserved.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_REC_RESOLUTION</term>
- <listitem>
- <para>Some receiver have maximum resolution which is defined by internal
- sample rate or data format limitations. E.g. it's common that signals can
- only be reported in 50 microsecond steps. This integer value is used by
- lircd to automatically adjust the steps tolerance value in the lircd
- config file.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_M{IN,AX}_TIMEOUT</term>
- <listitem>
- <para>Some devices have internal timers that can be used to detect when
- there's no IR activity for a long time. This can help lircd in detecting
- that a IR signal is finished and can speed up the decoding process.
- Returns an integer value with the minimum/maximum timeout that can be
- set. Some devices have a fixed timeout, in that case both ioctls will
- return the same value even though the timeout cannot be changed.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_M{IN,AX}_FILTER_{PULSE,SPACE}</term>
- <listitem>
- <para>Some devices are able to filter out spikes in the incoming signal
- using given filter rules. These ioctls return the hardware capabilities
- that describe the bounds of the possible filters. Filter settings depend
- on the IR protocols that are expected. lircd derives the settings from
- all protocols definitions found in its config file.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_GET_LENGTH</term>
- <listitem>
- <para>Retrieves the code length in bits (only for LIRC_MODE_LIRCCODE).
- Reads on the device must be done in blocks matching the bit count.
- The bit could should be rounded up so that it matches full bytes.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_{SEND,REC}_MODE</term>
- <listitem>
- <para>Set send/receive mode. Largely obsolete for send, as only
- LIRC_MODE_PULSE is supported.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_{SEND,REC}_CARRIER</term>
- <listitem>
- <para>Set send/receive carrier (in Hz). Return 0 on success.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_TRANSMITTER_MASK</term>
- <listitem>
- <para>This enables the given set of transmitters. The first transmitter
- is encoded by the least significant bit, etc. When an invalid bit mask
- is given, i.e. a bit is set, even though the device does not have so many
- transitters, then this ioctl returns the number of available transitters
- and does nothing otherwise.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_REC_TIMEOUT</term>
- <listitem>
- <para>Sets the integer value for IR inactivity timeout (cf.
- LIRC_GET_MIN_TIMEOUT and LIRC_GET_MAX_TIMEOUT). A value of 0 (if
- supported by the hardware) disables all hardware timeouts and data should
- be reported as soon as possible. If the exact value cannot be set, then
- the next possible value _greater_ than the given value should be set.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_REC_TIMEOUT_REPORTS</term>
- <listitem>
- <para>Enable (1) or disable (0) timeout reports in LIRC_MODE_MODE2. By
- default, timeout reports should be turned off.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_REC_FILTER_{,PULSE,SPACE}</term>
- <listitem>
- <para>Pulses/spaces shorter than this are filtered out by hardware. If
- filters cannot be set independently for pulse/space, the corresponding
- ioctls must return an error and LIRC_SET_REC_FILTER shall be used instead.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_MEASURE_CARRIER_MODE</term>
- <listitem>
- <para>Enable (1)/disable (0) measure mode. If enabled, from the next key
- press on, the driver will send LIRC_MODE2_FREQUENCY packets. By default
- this should be turned off.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_REC_{DUTY_CYCLE,CARRIER}_RANGE</term>
- <listitem>
- <para>To set a range use LIRC_SET_REC_DUTY_CYCLE_RANGE/LIRC_SET_REC_CARRIER_RANGE
- with the lower bound first and later LIRC_SET_REC_DUTY_CYCLE/LIRC_SET_REC_CARRIER
- with the upper bound.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_NOTIFY_DECODE</term>
- <listitem>
- <para>This ioctl is called by lircd whenever a successful decoding of an
- incoming IR signal could be done. This can be used by supporting hardware
- to give visual feedback to the user e.g. by flashing a LED.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SETUP_{START,END}</term>
- <listitem>
- <para>Setting of several driver parameters can be optimized by encapsulating
- the according ioctl calls with LIRC_SETUP_START/LIRC_SETUP_END. When a
- driver receives a LIRC_SETUP_START ioctl it can choose to not commit
- further setting changes to the hardware until a LIRC_SETUP_END is received.
- But this is open to the driver implementation and every driver must also
- handle parameter changes which are not encapsulated by LIRC_SETUP_START
- and LIRC_SETUP_END. Drivers can also choose to ignore these ioctls.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>LIRC_SET_WIDEBAND_RECEIVER</term>
- <listitem>
- <para>Some receivers are equipped with special wide band receiver which is intended
- to be used to learn output of existing remote.
- Calling that ioctl with (1) will enable it, and with (0) disable it.
- This might be useful of receivers that have otherwise narrow band receiver
- that prevents them to be used with some remotes.
- Wide band receiver might also be more precise
- On the other hand its disadvantage it usually reduced range of reception.
- Note: wide band receiver might be implictly enabled if you enable
- carrier reports. In that case it will be disabled as soon as you disable
- carrier reports. Trying to disable wide band receiver while carrier
- reports are active will do nothing.</para>
- </listitem>
- </varlistentry>
-</variablelist>
-<section id="lirc_dev_errors">
- &return-value;
-</section>
-</section>
-</section>
diff --git a/Documentation/DocBook/media/v4l/media-controller.xml b/Documentation/DocBook/media/v4l/media-controller.xml
deleted file mode 100644
index 5f2fc07a93d7..000000000000
--- a/Documentation/DocBook/media/v4l/media-controller.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<partinfo>
- <authorgroup>
- <author>
- <firstname>Laurent</firstname>
- <surname>Pinchart</surname>
- <affiliation><address><email>laurent.pinchart@ideasonboard.com</email></address></affiliation>
- <contrib>Initial version.</contrib>
- </author>
- </authorgroup>
- <copyright>
- <year>2010</year>
- <holder>Laurent Pinchart</holder>
- </copyright>
-
- <revhistory>
- <!-- Put document revisions here, newest first. -->
- <revision>
- <revnumber>1.0.0</revnumber>
- <date>2010-11-10</date>
- <authorinitials>lp</authorinitials>
- <revremark>Initial revision</revremark>
- </revision>
- </revhistory>
-</partinfo>
-
-<title>Media Controller API</title>
-
-<chapter id="media_controller">
- <title>Media Controller</title>
-
- <section id="media-controller-intro">
- <title>Introduction</title>
- <para>Media devices increasingly handle multiple related functions. Many USB
- cameras include microphones, video capture hardware can also output video,
- or SoC camera interfaces also perform memory-to-memory operations similar to
- video codecs.</para>
- <para>Independent functions, even when implemented in the same hardware, can
- be modelled as separate devices. A USB camera with a microphone will be
- presented to userspace applications as V4L2 and ALSA capture devices. The
- devices' relationships (when using a webcam, end-users shouldn't have to
- manually select the associated USB microphone), while not made available
- directly to applications by the drivers, can usually be retrieved from
- sysfs.</para>
- <para>With more and more advanced SoC devices being introduced, the current
- approach will not scale. Device topologies are getting increasingly complex
- and can't always be represented by a tree structure. Hardware blocks are
- shared between different functions, creating dependencies between seemingly
- unrelated devices.</para>
- <para>Kernel abstraction APIs such as V4L2 and ALSA provide means for
- applications to access hardware parameters. As newer hardware expose an
- increasingly high number of those parameters, drivers need to guess what
- applications really require based on limited information, thereby
- implementing policies that belong to userspace.</para>
- <para>The media controller API aims at solving those problems.</para>
- </section>
-
- <section id="media-controller-model">
- <title>Media device model</title>
- <para>Discovering a device internal topology, and configuring it at runtime,
- is one of the goals of the media controller API. To achieve this, hardware
- devices and Linux Kernel interfaces are modelled as graph objects on
- an oriented graph. The object types that constitute the graph are:</para>
- <itemizedlist>
- <listitem><para>An <emphasis role="bold">entity</emphasis>
- is a basic media hardware or software building block. It can correspond to
- a large variety of logical blocks such as physical hardware devices
- (CMOS sensor for instance), logical hardware devices (a building block in
- a System-on-Chip image processing pipeline), DMA channels or physical
- connectors.</para></listitem>
- <listitem><para>An <emphasis role="bold">interface</emphasis>
- is a graph representation of a Linux Kernel userspace API interface,
- like a device node or a sysfs file that controls one or more entities
- in the graph.</para></listitem>
- <listitem><para>A <emphasis role="bold">pad</emphasis>
- is a data connection endpoint through which an entity can interact with
- other entities. Data (not restricted to video) produced by an entity
- flows from the entity's output to one or more entity inputs. Pads should
- not be confused with physical pins at chip boundaries.</para></listitem>
- <listitem><para>A <emphasis role="bold">data link</emphasis>
- is a point-to-point oriented connection between two pads, either on the
- same entity or on different entities. Data flows from a source pad to a
- sink pad.</para></listitem>
- <listitem><para>An <emphasis role="bold">interface link</emphasis>
- is a point-to-point bidirectional control connection between a Linux
- Kernel interface and an entity.m</para></listitem>
- </itemizedlist>
- </section>
-
- <!-- All non-ioctl specific data types go here. -->
- &sub-media-types;
-</chapter>
-
-<appendix id="media-user-func">
- <title>Function Reference</title>
- <!-- Keep this alphabetically sorted. -->
- &sub-media-func-open;
- &sub-media-func-close;
- &sub-media-func-ioctl;
- <!-- All ioctls go here. -->
- &sub-media-ioc-device-info;
- &sub-media-ioc-g-topology;
- &sub-media-ioc-enum-entities;
- &sub-media-ioc-enum-links;
- &sub-media-ioc-setup-link;
-</appendix>
diff --git a/Documentation/DocBook/media/v4l/media-func-close.xml b/Documentation/DocBook/media/v4l/media-func-close.xml
deleted file mode 100644
index be149c802aeb..000000000000
--- a/Documentation/DocBook/media/v4l/media-func-close.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<refentry id="media-func-close">
- <refmeta>
- <refentrytitle>media close()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>media-close</refname>
- <refpurpose>Close a media device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;unistd.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>close</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Closes the media device. Resources associated with the file descriptor
- are freed. The device configuration remain unchanged.</para>
- </refsect1>
-
- <refsect1>
- <title>Return Value</title>
-
- <para><function>close</function> returns 0 on success. On error, -1 is
- returned, and <varname>errno</varname> is set appropriately. Possible error
- codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBADF</errorcode></term>
- <listitem>
- <para><parameter>fd</parameter> is not a valid open file descriptor.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-func-ioctl.xml b/Documentation/DocBook/media/v4l/media-func-ioctl.xml
deleted file mode 100644
index 39478d0fbcaa..000000000000
--- a/Documentation/DocBook/media/v4l/media-func-ioctl.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<refentry id="media-func-ioctl">
- <refmeta>
- <refentrytitle>media ioctl()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>media-ioctl</refname>
- <refpurpose>Control a media device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;sys/ioctl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>void *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>Media ioctl request code as defined in the media.h header file,
- for example MEDIA_IOC_SETUP_LINK.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to a request-specific structure.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>The <function>ioctl()</function> function manipulates media device
- parameters. The argument <parameter>fd</parameter> must be an open file
- descriptor.</para>
- <para>The ioctl <parameter>request</parameter> code specifies the media
- function to be called. It has encoded in it whether the argument is an
- input, output or read/write parameter, and the size of the argument
- <parameter>argp</parameter> in bytes.</para>
- <para>Macros and structures definitions specifying media ioctl requests and
- their parameters are located in the media.h header file. All media ioctl
- requests, their respective function and parameters are specified in
- <xref linkend="media-user-func" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <para>Request-specific error codes are listed in the
- individual requests descriptions.</para>
- <para>When an ioctl that takes an output or read/write parameter fails,
- the parameter remains unmodified.</para>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-func-open.xml b/Documentation/DocBook/media/v4l/media-func-open.xml
deleted file mode 100644
index 122374a3e894..000000000000
--- a/Documentation/DocBook/media/v4l/media-func-open.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<refentry id="media-func-open">
- <refmeta>
- <refentrytitle>media open()</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>media-open</refname>
- <refpurpose>Open a media device</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcsynopsisinfo>#include &lt;fcntl.h&gt;</funcsynopsisinfo>
- <funcprototype>
- <funcdef>int <function>open</function></funcdef>
- <paramdef>const char *<parameter>device_name</parameter></paramdef>
- <paramdef>int <parameter>flags</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>device_name</parameter></term>
- <listitem>
- <para>Device to be opened.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>flags</parameter></term>
- <listitem>
- <para>Open flags. Access mode must be either <constant>O_RDONLY</constant>
- or <constant>O_RDWR</constant>. Other flags have no effect.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
- <refsect1>
- <title>Description</title>
- <para>To open a media device applications call <function>open()</function>
- with the desired device name. The function has no side effects; the device
- configuration remain unchanged.</para>
- <para>When the device is opened in read-only mode, attempts to modify its
- configuration will result in an error, and <varname>errno</varname> will be
- set to <errorcode>EBADF</errorcode>.</para>
- </refsect1>
- <refsect1>
- <title>Return Value</title>
-
- <para><function>open</function> returns the new file descriptor on success.
- On error, -1 is returned, and <varname>errno</varname> is set appropriately.
- Possible error codes are:</para>
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>The requested access to the file is not allowed.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EMFILE</errorcode></term>
- <listitem>
- <para>The process already has the maximum number of files open.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENFILE</errorcode></term>
- <listitem>
- <para>The system limit on the total number of open files has been
- reached.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>Insufficient kernel memory was available.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENXIO</errorcode></term>
- <listitem>
- <para>No device corresponding to this device special file exists.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-device-info.xml b/Documentation/DocBook/media/v4l/media-ioc-device-info.xml
deleted file mode 100644
index b0a21ac300b8..000000000000
--- a/Documentation/DocBook/media/v4l/media-ioc-device-info.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-<refentry id="media-ioc-device-info">
- <refmeta>
- <refentrytitle>ioctl MEDIA_IOC_DEVICE_INFO</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>MEDIA_IOC_DEVICE_INFO</refname>
- <refpurpose>Query device information</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct media_device_info *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='media-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>MEDIA_IOC_DEVICE_INFO</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>All media devices must support the <constant>MEDIA_IOC_DEVICE_INFO</constant>
- ioctl. To query device information, applications call the ioctl with a
- pointer to a &media-device-info;. The driver fills the structure and returns
- the information to the application.
- The ioctl never fails.</para>
-
- <table pgwide="1" frame="none" id="media-device-info">
- <title>struct <structname>media_device_info</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>char</entry>
- <entry><structfield>driver</structfield>[16]</entry>
- <entry><para>Name of the driver implementing the media API as a
- NUL-terminated ASCII string. The driver version is stored in the
- <structfield>driver_version</structfield> field.</para>
- <para>Driver specific applications can use this information to
- verify the driver identity. It is also useful to work around
- known bugs, or to identify drivers in error reports.</para></entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>model</structfield>[32]</entry>
- <entry>Device model name as a NUL-terminated UTF-8 string. The
- device version is stored in the <structfield>device_version</structfield>
- field and is not be appended to the model name.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>serial</structfield>[40]</entry>
- <entry>Serial number as a NUL-terminated ASCII string.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>bus_info</structfield>[32]</entry>
- <entry>Location of the device in the system as a NUL-terminated
- ASCII string. This includes the bus type name (PCI, USB, ...) and a
- bus-specific identifier.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>media_version</structfield></entry>
- <entry>Media API version, formatted with the
- <constant>KERNEL_VERSION()</constant> macro.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>hw_revision</structfield></entry>
- <entry>Hardware device revision in a driver-specific format.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>driver_version</structfield></entry>
- <entry>Media device driver version, formatted with the
- <constant>KERNEL_VERSION()</constant> macro. Together with the
- <structfield>driver</structfield> field this identifies a particular
- driver.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[31]</entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set this array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>The <structfield>serial</structfield> and <structfield>bus_info</structfield>
- fields can be used to distinguish between multiple instances of otherwise
- identical hardware. The serial number takes precedence when provided and can
- be assumed to be unique. If the serial number is an empty string, the
- <structfield>bus_info</structfield> field can be used instead. The
- <structfield>bus_info</structfield> field is guaranteed to be unique, but
- can vary across reboots or device unplug/replug.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml b/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml
deleted file mode 100644
index 0c4f96bfc2de..000000000000
--- a/Documentation/DocBook/media/v4l/media-ioc-enum-entities.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<refentry id="media-ioc-enum-entities">
- <refmeta>
- <refentrytitle>ioctl MEDIA_IOC_ENUM_ENTITIES</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>MEDIA_IOC_ENUM_ENTITIES</refname>
- <refpurpose>Enumerate entities and their properties</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct media_entity_desc *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='media-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>MEDIA_IOC_ENUM_ENTITIES</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>To query the attributes of an entity, applications set the id field
- of a &media-entity-desc; structure and call the MEDIA_IOC_ENUM_ENTITIES
- ioctl with a pointer to this structure. The driver fills the rest of the
- structure or returns an &EINVAL; when the id is invalid.</para>
- <para>Entities can be enumerated by or'ing the id with the
- <constant>MEDIA_ENT_ID_FLAG_NEXT</constant> flag. The driver will return
- information about the entity with the smallest id strictly larger than the
- requested one ('next entity'), or the &EINVAL; if there is none.</para>
- <para>Entity IDs can be non-contiguous. Applications must
- <emphasis>not</emphasis> try to enumerate entities by calling
- MEDIA_IOC_ENUM_ENTITIES with increasing id's until they get an error.</para>
-
- <table pgwide="1" frame="none" id="media-entity-desc">
- <title>struct <structname>media_entity_desc</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity id, set by the application. When the id is or'ed with
- <constant>MEDIA_ENT_ID_FLAG_NEXT</constant>, the driver clears the
- flag and returns the first entity with a larger id.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity name as an UTF-8 NULL-terminated string.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity type, see <xref linkend="media-entity-type" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>revision</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity revision. Always zero (obsolete)</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity flags, see <xref linkend="media-entity-flag" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>group_id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity group ID. Always zero (obsolete)</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>pads</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Number of pads</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>links</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Total number of outbound links. Inbound links are not counted
- in this field.</entry>
- </row>
- <row>
- <entry>union</entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>dev</structfield></entry>
- <entry></entry>
- <entry>Valid for (sub-)devices that create a single device node.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>major</structfield></entry>
- <entry>Device node major number.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>minor</structfield></entry>
- <entry>Device node minor number.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8</entry>
- <entry><structfield>raw</structfield>[184]</entry>
- <entry></entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &media-entity-desc; <structfield>id</structfield> references
- a non-existing entity.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml b/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml
deleted file mode 100644
index 2bbeea9f3e18..000000000000
--- a/Documentation/DocBook/media/v4l/media-ioc-enum-links.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<refentry id="media-ioc-enum-links">
- <refmeta>
- <refentrytitle>ioctl MEDIA_IOC_ENUM_LINKS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>MEDIA_IOC_ENUM_LINKS</refname>
- <refpurpose>Enumerate all pads and links for a given entity</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct media_links_enum *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='media-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>MEDIA_IOC_ENUM_LINKS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To enumerate pads and/or links for a given entity, applications set
- the entity field of a &media-links-enum; structure and initialize the
- &media-pad-desc; and &media-link-desc; structure arrays pointed by the
- <structfield>pads</structfield> and <structfield>links</structfield> fields.
- They then call the MEDIA_IOC_ENUM_LINKS ioctl with a pointer to this
- structure.</para>
- <para>If the <structfield>pads</structfield> field is not NULL, the driver
- fills the <structfield>pads</structfield> array with information about the
- entity's pads. The array must have enough room to store all the entity's
- pads. The number of pads can be retrieved with the &MEDIA-IOC-ENUM-ENTITIES;
- ioctl.</para>
- <para>If the <structfield>links</structfield> field is not NULL, the driver
- fills the <structfield>links</structfield> array with information about the
- entity's outbound links. The array must have enough room to store all the
- entity's outbound links. The number of outbound links can be retrieved with
- the &MEDIA-IOC-ENUM-ENTITIES; ioctl.</para>
- <para>Only forward links that originate at one of the entity's source pads
- are returned during the enumeration process.</para>
-
- <table pgwide="1" frame="none" id="media-links-enum">
- <title>struct <structname>media_links_enum</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>entity</structfield></entry>
- <entry>Entity id, set by the application.</entry>
- </row>
- <row>
- <entry>&media-pad-desc;</entry>
- <entry>*<structfield>pads</structfield></entry>
- <entry>Pointer to a pads array allocated by the application. Ignored
- if NULL.</entry>
- </row>
- <row>
- <entry>&media-link-desc;</entry>
- <entry>*<structfield>links</structfield></entry>
- <entry>Pointer to a links array allocated by the application. Ignored
- if NULL.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-pad-desc">
- <title>struct <structname>media_pad_desc</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>entity</structfield></entry>
- <entry>ID of the entity this pad belongs to.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>index</structfield></entry>
- <entry>0-based pad index.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Pad flags, see <xref linkend="media-pad-flag" /> for more details.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-link-desc">
- <title>struct <structname>media_link_desc</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>&media-pad-desc;</entry>
- <entry><structfield>source</structfield></entry>
- <entry>Pad at the origin of this link.</entry>
- </row>
- <row>
- <entry>&media-pad-desc;</entry>
- <entry><structfield>sink</structfield></entry>
- <entry>Pad at the target of this link.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Link flags, see <xref linkend="media-link-flag" /> for more details.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &media-links-enum; <structfield>id</structfield> references
- a non-existing entity.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml b/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml
deleted file mode 100644
index e0d49fa329f0..000000000000
--- a/Documentation/DocBook/media/v4l/media-ioc-g-topology.xml
+++ /dev/null
@@ -1,391 +0,0 @@
-<refentry id="media-g-topology">
- <refmeta>
- <refentrytitle>ioctl MEDIA_IOC_G_TOPOLOGY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>MEDIA_IOC_G_TOPOLOGY</refname>
- <refpurpose>Enumerate the graph topology and graph element properties</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct media_v2_topology *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='media-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>MEDIA_IOC_G_TOPOLOGY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>The typical usage of this ioctl is to call it twice.
- On the first call, the structure defined at &media-v2-topology; should
- be zeroed. At return, if no errors happen, this ioctl will return the
- <constant>topology_version</constant> and the total number of entities,
- interfaces, pads and links.</para>
- <para>Before the second call, the userspace should allocate arrays to
- store the graph elements that are desired, putting the pointers to them
- at the ptr_entities, ptr_interfaces, ptr_links and/or ptr_pads, keeping
- the other values untouched.</para>
- <para>If the <constant>topology_version</constant> remains the same, the
- ioctl should fill the desired arrays with the media graph elements.</para>
-
- <table pgwide="1" frame="none" id="media-v2-topology">
- <title>struct <structname>media_v2_topology</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u64</entry>
- <entry><structfield>topology_version</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Version of the media graph topology. When the graph is
- created, this field starts with zero. Every time a graph
- element is added or removed, this field is
- incremented.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>num_entities</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Number of entities in the graph</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>ptr_entities</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>A pointer to a memory area where the entities array
- will be stored, converted to a 64-bits integer.
- It can be zero. if zero, the ioctl won't store the
- entities. It will just update
- <constant>num_entities</constant></entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>num_interfaces</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Number of interfaces in the graph</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>ptr_interfaces</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>A pointer to a memory area where the interfaces array
- will be stored, converted to a 64-bits integer.
- It can be zero. if zero, the ioctl won't store the
- interfaces. It will just update
- <constant>num_interfaces</constant></entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>num_pads</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Total number of pads in the graph</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>ptr_pads</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>A pointer to a memory area where the pads array
- will be stored, converted to a 64-bits integer.
- It can be zero. if zero, the ioctl won't store the
- pads. It will just update
- <constant>num_pads</constant></entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>num_links</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Total number of data and interface links in the graph</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>ptr_links</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>A pointer to a memory area where the links array
- will be stored, converted to a 64-bits integer.
- It can be zero. if zero, the ioctl won't store the
- links. It will just update
- <constant>num_links</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-v2-entity">
- <title>struct <structname>media_v2_entity</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Unique ID for the entity.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>name</structfield>[64]</entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity name as an UTF-8 NULL-terminated string.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>function</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Entity main function, see <xref linkend="media-entity-type" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[12]</entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set this array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-v2-interface">
- <title>struct <structname>media_v2_interface</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Unique ID for the interface.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>intf_type</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Interface type, see <xref linkend="media-intf-type" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Interface flags. Currently unused.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
- <entry></entry>
- <entry></entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set this array to zero.</entry>
- </row>
- <row>
- <entry>struct media_v2_intf_devnode</entry>
- <entry><structfield>devnode</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Used only for device node interfaces. See <xref linkend="media-v2-intf-devnode" /> for details..</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-v2-intf-devnode">
- <title>struct <structname>media_v2_interface</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>major</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Device node major number.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>minor</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Device node minor number.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-v2-pad">
- <title>struct <structname>media_v2_pad</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Unique ID for the pad.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>entity_id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Unique ID for the entity where this pad belongs.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Pad flags, see <xref linkend="media-pad-flag" /> for more details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
- <entry></entry>
- <entry></entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set this array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="media-v2-link">
- <title>struct <structname>media_v2_pad</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Unique ID for the pad.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>source_id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>
- <para>On pad to pad links: unique ID for the source pad.</para>
- <para>On interface to entity links: unique ID for the interface.</para>
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sink_id</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>
- <para>On pad to pad links: unique ID for the sink pad.</para>
- <para>On interface to entity links: unique ID for the entity.</para>
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Link flags, see <xref linkend="media-link-flag" /> for more details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[5]</entry>
- <entry></entry>
- <entry></entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set this array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>ENOSPC</errorcode></term>
- <listitem>
- <para>This is returned when either one or more of the num_entities,
- num_interfaces, num_links or num_pads are non-zero and are smaller
- than the actual number of elements inside the graph. This may happen
- if the <constant>topology_version</constant> changed when compared
- to the last time this ioctl was called. Userspace should usually
- free the area for the pointers, zero the struct elements and call
- this ioctl again.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-ioc-setup-link.xml b/Documentation/DocBook/media/v4l/media-ioc-setup-link.xml
deleted file mode 100644
index fc2e522ee65a..000000000000
--- a/Documentation/DocBook/media/v4l/media-ioc-setup-link.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<refentry id="media-ioc-setup-link">
- <refmeta>
- <refentrytitle>ioctl MEDIA_IOC_SETUP_LINK</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>MEDIA_IOC_SETUP_LINK</refname>
- <refpurpose>Modify the properties of a link</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct media_link_desc *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>File descriptor returned by
- <link linkend='media-func-open'><function>open()</function></link>.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>MEDIA_IOC_SETUP_LINK</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To change link properties applications fill a &media-link-desc; with
- link identification information (source and sink pad) and the new requested
- link flags. They then call the MEDIA_IOC_SETUP_LINK ioctl with a pointer to
- that structure.</para>
- <para>The only configurable property is the <constant>ENABLED</constant>
- link flag to enable/disable a link. Links marked with the
- <constant>IMMUTABLE</constant> link flag can not be enabled or disabled.
- </para>
- <para>Link configuration has no side effect on other links. If an enabled
- link at the sink pad prevents the link from being enabled, the driver
- returns with an &EBUSY;.</para>
- <para>Only links marked with the <constant>DYNAMIC</constant> link flag can
- be enabled/disabled while streaming media data. Attempting to enable or
- disable a streaming non-dynamic link will return an &EBUSY;.</para>
- <para>If the specified link can't be found the driver returns with an
- &EINVAL;.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &media-link-desc; references a non-existing link, or the
- link is immutable and an attempt to modify its configuration was made.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/media-types.xml b/Documentation/DocBook/media/v4l/media-types.xml
deleted file mode 100644
index 95aa1f9c836a..000000000000
--- a/Documentation/DocBook/media/v4l/media-types.xml
+++ /dev/null
@@ -1,379 +0,0 @@
-<section id="media-controller-types">
-<title>Types and flags used to represent the media graph elements</title>
-
- <table frame="none" pgwide="1" id="media-entity-type">
- <title>Media entity types</title>
- <tgroup cols="2">
- <colspec colname="c1"/>
- <colspec colname="c2"/>
- <tbody valign="top">
- <row>
- <entry><constant>MEDIA_ENT_F_UNKNOWN</constant> and <constant>MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN</constant></entry>
- <entry>Unknown entity. That generally indicates that
- a driver didn't initialize properly the entity, with is a Kernel bug</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IO_V4L</constant></entry>
- <entry>Data streaming input and/or output entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IO_VBI</constant></entry>
- <entry>V4L VBI streaming input or output entity</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IO_SWRADIO</constant></entry>
- <entry>V4L Software Digital Radio (SDR) streaming input or output entity</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IO_DTV</constant></entry>
- <entry>DVB Digital TV streaming input or output entity</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_DTV_DEMOD</constant></entry>
- <entry>Digital TV demodulator entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_TS_DEMUX</constant></entry>
- <entry>MPEG Transport stream demux entity. Could be implemented on hardware or in Kernelspace by the Linux DVB subsystem.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_DTV_CA</constant></entry>
- <entry>Digital TV Conditional Access module (CAM) entity</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_DTV_NET_DECAP</constant></entry>
- <entry>Digital TV network ULE/MLE desencapsulation entity. Could be implemented on hardware or in Kernelspace</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_CONN_RF</constant></entry>
- <entry>Connector for a Radio Frequency (RF) signal.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_CONN_SVIDEO</constant></entry>
- <entry>Connector for a S-Video signal.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_CONN_COMPOSITE</constant></entry>
- <entry>Connector for a RGB composite signal.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
- <entry>Camera video sensor entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_FLASH</constant></entry>
- <entry>Flash controller entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_LENS</constant></entry>
- <entry>Lens controller entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_ATV_DECODER</constant></entry>
- <entry>Analog video decoder, the basic function of the video decoder
- is to accept analogue video from a wide variety of sources such as
- broadcast, DVD players, cameras and video cassette recorders, in
- either NTSC, PAL, SECAM or HD format, separating the stream
- into its component parts, luminance and chrominance, and output
- it in some digital video standard, with appropriate timing
- signals.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_TUNER</constant></entry>
- <entry>Digital TV, analog TV, radio and/or software radio tuner,
- with consists on a PLL tuning stage that converts radio
- frequency (RF) signal into an Intermediate Frequency (IF).
- Modern tuners have internally IF-PLL decoders for audio
- and video, but older models have those stages implemented
- on separate entities.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IF_VID_DECODER</constant></entry>
- <entry>IF-PLL video decoder. It receives the IF from a PLL
- and decodes the analog TV video signal. This is commonly
- found on some very old analog tuners, like Philips MK3
- designs. They all contain a tda9887 (or some software
- compatible similar chip, like tda9885). Those devices
- use a different I2C address than the tuner PLL.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_IF_AUD_DECODER</constant></entry>
- <entry>IF-PLL sound decoder. It receives the IF from a PLL
- and decodes the analog TV audio signal. This is commonly
- found on some very old analog hardware, like Micronas
- msp3400, Philips tda9840, tda985x, etc. Those devices
- use a different I2C address than the tuner PLL and
- should be controlled together with the IF-PLL video
- decoder.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_AUDIO_CAPTURE</constant></entry>
- <entry>Audio Capture Function Entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_AUDIO_PLAYBACK</constant></entry>
- <entry>Audio Playback Function Entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_AUDIO_MIXER</constant></entry>
- <entry>Audio Mixer Function Entity.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_COMPOSER</constant></entry>
- <entry>Video composer (blender). An entity capable of video
- composing must have at least two sink pads and one source
- pad, and composes input video frames onto output video
- frames. Composition can be performed using alpha blending,
- color keying, raster operations (ROP), stitching or any other
- means.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER</constant></entry>
- <entry>Video pixel formatter. An entity capable of pixel formatting
- must have at least one sink pad and one source pad. Read
- pixel formatters read pixels from memory and perform a subset
- of unpacking, cropping, color keying, alpha multiplication
- and pixel encoding conversion. Write pixel formatters perform
- a subset of dithering, pixel encoding conversion and packing
- and write pixels to memory.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV</constant></entry>
- <entry>Video pixel encoding converter. An entity capable of pixel
- enconding conversion must have at least one sink pad and one
- source pad, and convert the encoding of pixels received on
- its sink pad(s) to a different encoding output on its source
- pad(s). Pixel encoding conversion includes but isn't limited
- to RGB to/from HSV, RGB to/from YUV and CFA (Bayer) to RGB
- conversions.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_LUT</constant></entry>
- <entry>Video look-up table. An entity capable of video lookup table
- processing must have one sink pad and one source pad. It uses
- the values of the pixels received on its sink pad to look up
- entries in internal tables and output them on its source pad.
- The lookup processing can be performed on all components
- separately or combine them for multi-dimensional table
- lookups.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_SCALER</constant></entry>
- <entry>Video scaler. An entity capable of video scaling must have
- at least one sink pad and one source pad, and scale the
- video frame(s) received on its sink pad(s) to a different
- resolution output on its source pad(s). The range of
- supported scaling ratios is entity-specific and can differ
- between the horizontal and vertical directions (in particular
- scaling can be supported in one direction only). Binning and
- skipping are considered as scaling.
- </entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_F_PROC_VIDEO_STATISTICS</constant></entry>
- <entry>Video statistics computation (histogram, 3A, ...). An entity
- capable of statistics computation must have one sink pad and
- one source pad. It computes statistics over the frames
- received on its sink pad and outputs the statistics data on
- its source pad.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="media-entity-flag">
- <title>Media entity flags</title>
- <tgroup cols="2">
- <colspec colname="c1"/>
- <colspec colname="c2"/>
- <tbody valign="top">
- <row>
- <entry><constant>MEDIA_ENT_FL_DEFAULT</constant></entry>
- <entry>Default entity for its type. Used to discover the default
- audio, VBI and video devices, the default camera sensor, ...</entry>
- </row>
- <row>
- <entry><constant>MEDIA_ENT_FL_CONNECTOR</constant></entry>
- <entry>The entity represents a data conector</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="media-intf-type">
- <title>Media interface types</title>
- <tgroup cols="3">
- <colspec colname="c1"/>
- <colspec colname="c2"/>
- <colspec colname="c3"/>
- <tbody valign="top">
- <row>
- <entry><constant>MEDIA_INTF_T_DVB_FE</constant></entry>
- <entry>Device node interface for the Digital TV frontend</entry>
- <entry>typically, /dev/dvb/adapter?/frontend?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_DVB_DEMUX</constant></entry>
- <entry>Device node interface for the Digital TV demux</entry>
- <entry>typically, /dev/dvb/adapter?/demux?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_DVB_DVR</constant></entry>
- <entry>Device node interface for the Digital TV DVR</entry>
- <entry>typically, /dev/dvb/adapter?/dvr?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_DVB_CA</constant></entry>
- <entry>Device node interface for the Digital TV Conditional Access</entry>
- <entry>typically, /dev/dvb/adapter?/ca?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_DVB_FE</constant></entry>
- <entry>Device node interface for the Digital TV network control</entry>
- <entry>typically, /dev/dvb/adapter?/net?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_V4L_VIDEO</constant></entry>
- <entry>Device node interface for video (V4L)</entry>
- <entry>typically, /dev/video?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_V4L_VBI</constant></entry>
- <entry>Device node interface for VBI (V4L)</entry>
- <entry>typically, /dev/vbi?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_V4L_RADIO</constant></entry>
- <entry>Device node interface for radio (V4L)</entry>
- <entry>typically, /dev/vbi?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_V4L_SUBDEV</constant></entry>
- <entry>Device node interface for a V4L subdevice</entry>
- <entry>typically, /dev/v4l-subdev?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_V4L_SWRADIO</constant></entry>
- <entry>Device node interface for Software Defined Radio (V4L)</entry>
- <entry>typically, /dev/swradio?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_PCM_CAPTURE</constant></entry>
- <entry>Device node interface for ALSA PCM Capture</entry>
- <entry>typically, /dev/snd/pcmC?D?c</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_PCM_PLAYBACK</constant></entry>
- <entry>Device node interface for ALSA PCM Playback</entry>
- <entry>typically, /dev/snd/pcmC?D?p</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_CONTROL</constant></entry>
- <entry>Device node interface for ALSA Control</entry>
- <entry>typically, /dev/snd/controlC?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_COMPRESS</constant></entry>
- <entry>Device node interface for ALSA Compress</entry>
- <entry>typically, /dev/snd/compr?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_RAWMIDI</constant></entry>
- <entry>Device node interface for ALSA Raw MIDI</entry>
- <entry>typically, /dev/snd/midi?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_HWDEP</constant></entry>
- <entry>Device node interface for ALSA Hardware Dependent</entry>
- <entry>typically, /dev/snd/hwC?D?</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_SEQUENCER</constant></entry>
- <entry>Device node interface for ALSA Sequencer</entry>
- <entry>typically, /dev/snd/seq</entry>
- </row>
- <row>
- <entry><constant>MEDIA_INTF_T_ALSA_TIMER</constant></entry>
- <entry>Device node interface for ALSA Timer</entry>
- <entry>typically, /dev/snd/timer</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="media-pad-flag">
- <title>Media pad flags</title>
- <tgroup cols="2">
- <colspec colname="c1"/>
- <colspec colname="c2"/>
- <tbody valign="top">
- <row>
- <entry><constant>MEDIA_PAD_FL_SINK</constant></entry>
- <entry>Input pad, relative to the entity. Input pads sink data and
- are targets of links.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_PAD_FL_SOURCE</constant></entry>
- <entry>Output pad, relative to the entity. Output pads source data
- and are origins of links.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_PAD_FL_MUST_CONNECT</constant></entry>
- <entry>If this flag is set and the pad is linked to any other
- pad, then at least one of those links must be enabled for the
- entity to be able to stream. There could be temporary reasons
- (e.g. device configuration dependent) for the pad to need
- enabled links even when this flag isn't set; the absence of the
- flag doesn't imply there is none.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>One and only one of <constant>MEDIA_PAD_FL_SINK</constant> and
- <constant>MEDIA_PAD_FL_SOURCE</constant> must be set for every pad.</para>
-
- <table frame="none" pgwide="1" id="media-link-flag">
- <title>Media link flags</title>
- <tgroup cols="2">
- <colspec colname="c1"/>
- <colspec colname="c2"/>
- <tbody valign="top">
- <row>
- <entry><constant>MEDIA_LNK_FL_ENABLED</constant></entry>
- <entry>The link is enabled and can be used to transfer media data.
- When two or more links target a sink pad, only one of them can be
- enabled at a time.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_LNK_FL_IMMUTABLE</constant></entry>
- <entry>The link enabled state can't be modified at runtime. An
- immutable link is always enabled.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_LNK_FL_DYNAMIC</constant></entry>
- <entry>The link enabled state can be modified during streaming. This
- flag is set by drivers and is read-only for applications.</entry>
- </row>
- <row>
- <entry><constant>MEDIA_LNK_FL_LINK_TYPE</constant></entry>
- <entry><para>This is a bitmask that defines the type of the link.
- Currently, two types of links are supported:</para>
- <para><constant>MEDIA_LNK_FL_DATA_LINK</constant>
- if the link is between two pads</para>
- <para><constant>MEDIA_LNK_FL_INTERFACE_LINK</constant>
- if the link is between an interface and an entity</para></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
-</section>
diff --git a/Documentation/DocBook/media/v4l/pipeline.pdf b/Documentation/DocBook/media/v4l/pipeline.pdf
deleted file mode 100644
index ee3e37f04b6a..000000000000
--- a/Documentation/DocBook/media/v4l/pipeline.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/pixfmt-grey.xml b/Documentation/DocBook/media/v4l/pixfmt-grey.xml
deleted file mode 100644
index bee970d3f76d..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-grey.xml
+++ /dev/null
@@ -1,62 +0,0 @@
- <refentry id="V4L2-PIX-FMT-GREY">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_GREY ('GREY')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_GREY</constant></refname>
- <refpurpose>Grey-scale image</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image. It is really a degenerate
-Y'CbCr format which simply contains no Cb or Cr data.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_GREY</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-m420.xml b/Documentation/DocBook/media/v4l/pixfmt-m420.xml
deleted file mode 100644
index aadae92c5d04..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-m420.xml
+++ /dev/null
@@ -1,139 +0,0 @@
- <refentry id="V4L2-PIX-FMT-M420">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_M420 ('M420')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_M420</constant></refname>
- <refpurpose>Format with &frac12; horizontal and vertical chroma
- resolution, also known as YUV 4:2:0. Hybrid plane line-interleaved
- layout.</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>M420 is a YUV format with &frac12; horizontal and vertical chroma
- subsampling (YUV 4:2:0). Pixels are organized as interleaved luma and
- chroma planes. Two lines of luma data are followed by one line of chroma
- data.</para>
- <para>The luma plane has one byte per pixel. The chroma plane contains
- interleaved CbCr pixels subsampled by &frac12; in the horizontal and
- vertical directions. Each CbCr pair belongs to four pixels. For example,
-Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
-Y'<subscript>00</subscript>, Y'<subscript>01</subscript>,
-Y'<subscript>10</subscript>, Y'<subscript>11</subscript>.</para>
-
- <para>All line lengths are identical: if the Y lines include pad bytes
- so do the CbCr lines.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_M420</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12.xml
deleted file mode 100644
index 84dd4fd7cb80..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12.xml
+++ /dev/null
@@ -1,143 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV12 ('NV12'), V4L2_PIX_FMT_NV21 ('NV21')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV12"><constant>V4L2_PIX_FMT_NV12</constant></refname>
- <refname id="V4L2-PIX-FMT-NV21"><constant>V4L2_PIX_FMT_NV21</constant></refname>
- <refpurpose>Formats with &frac12; horizontal and vertical
-chroma resolution, also known as YUV 4:2:0. One luminance and one
-chrominance plane with alternating chroma samples as opposed to
-<constant>V4L2_PIX_FMT_YVU420</constant></refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These are two-plane versions of the YUV 4:2:0 format.
-The three components are separated into two sub-images or planes. The
-Y plane is first. The Y plane has one byte per pixel. For
-<constant>V4L2_PIX_FMT_NV12</constant>, a combined CbCr plane
-immediately follows the Y plane in memory. The CbCr plane is the same
-width, in bytes, as the Y plane (and of the image), but is half as
-tall in pixels. Each CbCr pair belongs to four pixels. For example,
-Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
-Y'<subscript>00</subscript>, Y'<subscript>01</subscript>,
-Y'<subscript>10</subscript>, Y'<subscript>11</subscript>.
-<constant>V4L2_PIX_FMT_NV21</constant> is the same except the Cb and
-Cr bytes are swapped, the CrCb plane starts with a Cr byte.</para>
-
- <para>If the Y plane has pad bytes after each row, then the
-CbCr plane has as many pad bytes after its rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_NV12</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
deleted file mode 100644
index f3a3d459fcdf..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml
+++ /dev/null
@@ -1,153 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV12M ('NM12'), V4L2_PIX_FMT_NV21M ('NM21'), V4L2_PIX_FMT_NV12MT_16X16</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV12M"><constant>V4L2_PIX_FMT_NV12M</constant></refname>
- <refname id="V4L2-PIX-FMT-NV21M"><constant>V4L2_PIX_FMT_NV21M</constant></refname>
- <refname id="V4L2-PIX-FMT-NV12MT-16X16"><constant>V4L2_PIX_FMT_NV12MT_16X16</constant></refname>
- <refpurpose>Variation of <constant>V4L2_PIX_FMT_NV12</constant> and <constant>V4L2_PIX_FMT_NV21</constant> with planes
- non contiguous in memory. </refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a multi-planar, two-plane version of the YUV 4:2:0 format.
-The three components are separated into two sub-images or planes.
-<constant>V4L2_PIX_FMT_NV12M</constant> differs from <constant>V4L2_PIX_FMT_NV12
-</constant> in that the two planes are non-contiguous in memory, i.e. the chroma
-plane do not necessarily immediately follows the luma plane.
-The luminance data occupies the first plane. The Y plane has one byte per pixel.
-In the second plane there is a chrominance data with alternating chroma samples.
-The CbCr plane is the same width, in bytes, as the Y plane (and of the image),
-but is half as tall in pixels. Each CbCr pair belongs to four pixels. For example,
-Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
-Y'<subscript>00</subscript>, Y'<subscript>01</subscript>,
-Y'<subscript>10</subscript>, Y'<subscript>11</subscript>.
-<constant>V4L2_PIX_FMT_NV12MT_16X16</constant> is the tiled version of
-<constant>V4L2_PIX_FMT_NV12M</constant> with 16x16 macroblock tiles. Here pixels
-are arranged in 16x16 2D tiles and tiles are arranged in linear order in memory.
-<constant>V4L2_PIX_FMT_NV21M</constant> is the same as <constant>V4L2_PIX_FMT_NV12M</constant>
-except the Cb and Cr bytes are swapped, the CrCb plane starts with a Cr byte.</para>
-
- <para><constant>V4L2_PIX_FMT_NV12M</constant> is intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
- <para>If the Y plane has pad bytes after each row, then the
-CbCr plane has as many pad bytes after its rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_NV12M</constant> 4 &times; 4 pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start0&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;4:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12mt.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12mt.xml
deleted file mode 100644
index 8a70a1707b7a..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv12mt.xml
+++ /dev/null
@@ -1,66 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV12MT ('TM12')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV12MT"><constant>V4L2_PIX_FMT_NV12MT
-</constant></refname>
- <refpurpose>Formats with &frac12; horizontal and vertical
-chroma resolution. This format has two planes - one for luminance and one for
-chrominance. Chroma samples are interleaved. The difference to
-<constant>V4L2_PIX_FMT_NV12</constant> is the memory layout. Pixels are
-grouped in macroblocks of 64x32 size. The order of macroblocks in memory is
-also not standard.
- </refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is the two-plane versions of the YUV 4:2:0 format where data
-is grouped into 64x32 macroblocks. The three components are separated into two
-sub-images or planes. The Y plane has one byte per pixel and pixels are grouped
-into 64x32 macroblocks. The CbCr plane has the same width, in bytes, as the Y
-plane (and the image), but is half as tall in pixels. The chroma plane is also
-grouped into 64x32 macroblocks.</para>
- <para>Width of the buffer has to be aligned to the multiple of 128, and
-height alignment is 32. Every four adjacent buffers - two horizontally and two
-vertically are grouped together and are located in memory in Z or flipped Z
-order. </para>
- <para>Layout of macroblocks in memory is presented in the following
-figure.</para>
- <para><figure id="nv12mt">
- <title><constant>V4L2_PIX_FMT_NV12MT</constant> macroblock Z shape
-memory layout</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="nv12mt.gif" format="GIF" />
- </imageobject>
- </mediaobject>
- </figure>
- The requirement that width is multiple of 128 is implemented because,
-the Z shape cannot be cut in half horizontally. In case the vertical resolution
-of macroblocks is odd then the last row of macroblocks is arranged in a linear
-order. </para>
- <para>In case of chroma the layout is identical. Cb and Cr samples are
-interleaved. Height of the buffer is aligned to 32.
- </para>
- <example>
- <title>Memory layout of macroblocks in <constant>V4L2_PIX_FMT_NV12
-</constant> format pixel image - extreme case</title>
- <para>
- <figure id="nv12mt_ex">
- <title>Example <constant>V4L2_PIX_FMT_NV12MT</constant> memory
-layout of macroblocks</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="nv12mt_example.gif" format="GIF" />
- </imageobject>
- </mediaobject>
- </figure>
- Memory layout of macroblocks of <constant>V4L2_PIX_FMT_NV12MT
-</constant> format in most extreme case.
- </para>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv16.xml b/Documentation/DocBook/media/v4l/pixfmt-nv16.xml
deleted file mode 100644
index 8ae1f8a810d0..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv16.xml
+++ /dev/null
@@ -1,166 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV16 ('NV16'), V4L2_PIX_FMT_NV61 ('NV61')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV16"><constant>V4L2_PIX_FMT_NV16</constant></refname>
- <refname id="V4L2-PIX-FMT-NV61"><constant>V4L2_PIX_FMT_NV61</constant></refname>
- <refpurpose>Formats with &frac12; horizontal
-chroma resolution, also known as YUV 4:2:2. One luminance and one
-chrominance plane with alternating chroma samples as opposed to
-<constant>V4L2_PIX_FMT_YVU420</constant></refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These are two-plane versions of the YUV 4:2:2 format.
-The three components are separated into two sub-images or planes. The
-Y plane is first. The Y plane has one byte per pixel. For
-<constant>V4L2_PIX_FMT_NV16</constant>, a combined CbCr plane
-immediately follows the Y plane in memory. The CbCr plane is the same
-width and height, in bytes, as the Y plane (and of the image).
-Each CbCr pair belongs to two pixels. For example,
-Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
-Y'<subscript>00</subscript>, Y'<subscript>01</subscript>.
-<constant>V4L2_PIX_FMT_NV61</constant> is the same except the Cb and
-Cr bytes are swapped, the CrCb plane starts with a Cr byte.</para>
-
- <para>If the Y plane has pad bytes after each row, then the
-CbCr plane has as many pad bytes after its rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_NV16</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;28:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml
deleted file mode 100644
index fb2b5e35d665..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml
+++ /dev/null
@@ -1,170 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV16M ('NM16'), V4L2_PIX_FMT_NV61M ('NM61')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV16M"><constant>V4L2_PIX_FMT_NV16M</constant></refname>
- <refname id="V4L2-PIX-FMT-NV61M"><constant>V4L2_PIX_FMT_NV61M</constant></refname>
- <refpurpose>Variation of <constant>V4L2_PIX_FMT_NV16</constant> and <constant>V4L2_PIX_FMT_NV61</constant> with planes
- non contiguous in memory. </refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a multi-planar, two-plane version of the YUV 4:2:2 format.
-The three components are separated into two sub-images or planes.
-<constant>V4L2_PIX_FMT_NV16M</constant> differs from <constant>V4L2_PIX_FMT_NV16
-</constant> in that the two planes are non-contiguous in memory, i.e. the chroma
-plane does not necessarily immediately follow the luma plane.
-The luminance data occupies the first plane. The Y plane has one byte per pixel.
-In the second plane there is chrominance data with alternating chroma samples.
-The CbCr plane is the same width and height, in bytes, as the Y plane.
-Each CbCr pair belongs to two pixels. For example,
-Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
-Y'<subscript>00</subscript>, Y'<subscript>01</subscript>.
-<constant>V4L2_PIX_FMT_NV61M</constant> is the same as <constant>V4L2_PIX_FMT_NV16M</constant>
-except the Cb and Cr bytes are swapped, the CrCb plane starts with a Cr byte.</para>
-
- <para><constant>V4L2_PIX_FMT_NV16M</constant> and
-<constant>V4L2_PIX_FMT_NV61M</constant> are intended to be used only in drivers
-and applications that support the multi-planar API, described in
-<xref linkend="planar-apis"/>. </para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_NV16M</constant> 4 &times; 4 pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start0&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>02</subscript></entry>
- <entry>Cr<subscript>02</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;4:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>12</subscript></entry>
- <entry>Cr<subscript>12</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;8:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cb<subscript>22</subscript></entry>
- <entry>Cr<subscript>22</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;12:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cb<subscript>32</subscript></entry>
- <entry>Cr<subscript>32</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv24.xml b/Documentation/DocBook/media/v4l/pixfmt-nv24.xml
deleted file mode 100644
index fb255f2ca9dd..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-nv24.xml
+++ /dev/null
@@ -1,121 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_NV24 ('NV24'), V4L2_PIX_FMT_NV42 ('NV42')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-NV24"><constant>V4L2_PIX_FMT_NV24</constant></refname>
- <refname id="V4L2-PIX-FMT-NV42"><constant>V4L2_PIX_FMT_NV42</constant></refname>
- <refpurpose>Formats with full horizontal and vertical
-chroma resolutions, also known as YUV 4:4:4. One luminance and one
-chrominance plane with alternating chroma samples as opposed to
-<constant>V4L2_PIX_FMT_YVU420</constant></refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These are two-plane versions of the YUV 4:4:4 format. The three
- components are separated into two sub-images or planes. The Y plane is
- first, with each Y sample stored in one byte per pixel. For
- <constant>V4L2_PIX_FMT_NV24</constant>, a combined CbCr plane
- immediately follows the Y plane in memory. The CbCr plane has the same
- width and height, in pixels, as the Y plane (and the image). Each line
- contains one CbCr pair per pixel, with each Cb and Cr sample stored in
- one byte. <constant>V4L2_PIX_FMT_NV42</constant> is the same except that
- the Cb and Cr samples are swapped, the CrCb plane starts with a Cr
- sample.</para>
-
- <para>If the Y plane has pad bytes after each row, then the CbCr plane
- has twice as many pad bytes after its rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_NV24</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Cb<subscript>02</subscript></entry>
- <entry>Cr<subscript>02</subscript></entry>
- <entry>Cb<subscript>03</subscript></entry>
- <entry>Cr<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Cb<subscript>12</subscript></entry>
- <entry>Cr<subscript>12</subscript></entry>
- <entry>Cb<subscript>13</subscript></entry>
- <entry>Cr<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;32:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Cb<subscript>22</subscript></entry>
- <entry>Cr<subscript>22</subscript></entry>
- <entry>Cb<subscript>23</subscript></entry>
- <entry>Cr<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;40:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Cb<subscript>32</subscript></entry>
- <entry>Cr<subscript>32</subscript></entry>
- <entry>Cb<subscript>33</subscript></entry>
- <entry>Cr<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml b/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
deleted file mode 100644
index b60fb935b91b..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml
+++ /dev/null
@@ -1,937 +0,0 @@
-<refentry id="packed-rgb">
- <refmeta>
- <refentrytitle>Packed RGB formats</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>Packed RGB formats</refname>
- <refpurpose>Packed RGB formats</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These formats are designed to match the pixel formats of
-typical PC graphics frame buffers. They occupy 8, 16, 24 or 32 bits
-per pixel. These are all packed-pixel formats, meaning all the data
-for a pixel lie next to each other in memory.</para>
-
- <table pgwide="1" frame="none" id="rgb-formats">
- <title>Packed RGB Image Formats</title>
- <tgroup cols="37" align="center">
- <colspec colname="id" align="left" />
- <colspec colname="fourcc" />
- <colspec colname="bit" />
-
- <colspec colnum="4" colname="b07" align="center" />
- <colspec colnum="5" colname="b06" align="center" />
- <colspec colnum="6" colname="b05" align="center" />
- <colspec colnum="7" colname="b04" align="center" />
- <colspec colnum="8" colname="b03" align="center" />
- <colspec colnum="9" colname="b02" align="center" />
- <colspec colnum="10" colname="b01" align="center" />
- <colspec colnum="11" colname="b00" align="center" />
-
- <colspec colnum="13" colname="b17" align="center" />
- <colspec colnum="14" colname="b16" align="center" />
- <colspec colnum="15" colname="b15" align="center" />
- <colspec colnum="16" colname="b14" align="center" />
- <colspec colnum="17" colname="b13" align="center" />
- <colspec colnum="18" colname="b12" align="center" />
- <colspec colnum="19" colname="b11" align="center" />
- <colspec colnum="20" colname="b10" align="center" />
-
- <colspec colnum="22" colname="b27" align="center" />
- <colspec colnum="23" colname="b26" align="center" />
- <colspec colnum="24" colname="b25" align="center" />
- <colspec colnum="25" colname="b24" align="center" />
- <colspec colnum="26" colname="b23" align="center" />
- <colspec colnum="27" colname="b22" align="center" />
- <colspec colnum="28" colname="b21" align="center" />
- <colspec colnum="29" colname="b20" align="center" />
-
- <colspec colnum="31" colname="b37" align="center" />
- <colspec colnum="32" colname="b36" align="center" />
- <colspec colnum="33" colname="b35" align="center" />
- <colspec colnum="34" colname="b34" align="center" />
- <colspec colnum="35" colname="b33" align="center" />
- <colspec colnum="36" colname="b32" align="center" />
- <colspec colnum="37" colname="b31" align="center" />
- <colspec colnum="38" colname="b30" align="center" />
-
- <spanspec namest="b07" nameend="b00" spanname="b0" />
- <spanspec namest="b17" nameend="b10" spanname="b1" />
- <spanspec namest="b27" nameend="b20" spanname="b2" />
- <spanspec namest="b37" nameend="b30" spanname="b3" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>&nbsp;</entry>
- <entry spanname="b0">Byte&nbsp;0 in memory</entry>
- <entry spanname="b1">Byte&nbsp;1</entry>
- <entry spanname="b2">Byte&nbsp;2</entry>
- <entry spanname="b3">Byte&nbsp;3</entry>
- </row>
- <row>
- <entry>&nbsp;</entry>
- <entry>&nbsp;</entry>
- <entry>Bit</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="V4L2-PIX-FMT-RGB332">
- <entry><constant>V4L2_PIX_FMT_RGB332</constant></entry>
- <entry>'RGB1'</entry>
- <entry></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-ARGB444">
- <entry><constant>V4L2_PIX_FMT_ARGB444</constant></entry>
- <entry>'AR12'</entry>
- <entry></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-XRGB444">
- <entry><constant>V4L2_PIX_FMT_XRGB444</constant></entry>
- <entry>'XR12'</entry>
- <entry></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-ARGB555">
- <entry><constant>V4L2_PIX_FMT_ARGB555</constant></entry>
- <entry>'AR15'</entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-XRGB555">
- <entry><constant>V4L2_PIX_FMT_XRGB555</constant></entry>
- <entry>'XR15'</entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB565">
- <entry><constant>V4L2_PIX_FMT_RGB565</constant></entry>
- <entry>'RGBP'</entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-ARGB555X">
- <entry><constant>V4L2_PIX_FMT_ARGB555X</constant></entry>
- <entry>'AR15' | (1 &lt;&lt; 31)</entry>
- <entry></entry>
- <entry>a</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-XRGB555X">
- <entry><constant>V4L2_PIX_FMT_XRGB555X</constant></entry>
- <entry>'XR15' | (1 &lt;&lt; 31)</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB565X">
- <entry><constant>V4L2_PIX_FMT_RGB565X</constant></entry>
- <entry>'RGBR'</entry>
- <entry></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-BGR24">
- <entry><constant>V4L2_PIX_FMT_BGR24</constant></entry>
- <entry>'BGR3'</entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB24">
- <entry><constant>V4L2_PIX_FMT_RGB24</constant></entry>
- <entry>'RGB3'</entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-BGR666">
- <entry><constant>V4L2_PIX_FMT_BGR666</constant></entry>
- <entry>'BGRH'</entry>
- <entry></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- </row>
- <row id="V4L2-PIX-FMT-ABGR32">
- <entry><constant>V4L2_PIX_FMT_ABGR32</constant></entry>
- <entry>'AR24'</entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-XBGR32">
- <entry><constant>V4L2_PIX_FMT_XBGR32</constant></entry>
- <entry>'XR24'</entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- </row>
- <row id="V4L2-PIX-FMT-ARGB32">
- <entry><constant>V4L2_PIX_FMT_ARGB32</constant></entry>
- <entry>'BA24'</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-XRGB32">
- <entry><constant>V4L2_PIX_FMT_XRGB32</constant></entry>
- <entry>'BX24'</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Bit 7 is the most significant bit.</para>
-
- <para>The usage and value of the alpha bits (a) in the ARGB and ABGR formats
- (collectively referred to as alpha formats) depend on the device type and
- hardware operation. <link linkend="capture">Capture</link> devices
- (including capture queues of mem-to-mem devices) fill the alpha component in
- memory. When the device outputs an alpha channel the alpha component will
- have a meaningful value. Otherwise, when the device doesn't output an alpha
- channel but can set the alpha bit to a user-configurable value, the <link
- linkend="v4l2-alpha-component"><constant>V4L2_CID_ALPHA_COMPONENT</constant>
- </link> control is used to specify that alpha value, and the alpha component
- of all pixels will be set to the value specified by that control. Otherwise
- a corresponding format without an alpha component (XRGB or XBGR) must be
- used instead of an alpha format.</para>
-
- <para><link linkend="output">Output</link> devices (including output queues
- of mem-to-mem devices and <link linkend="osd">video output overlay</link>
- devices) read the alpha component from memory. When the device processes the
- alpha channel the alpha component must be filled with meaningful values by
- applications. Otherwise a corresponding format without an alpha component
- (XRGB or XBGR) must be used instead of an alpha format.</para>
-
- <para>The XRGB and XBGR formats contain undefined bits (-). Applications,
- devices and drivers must ignore those bits, for both <link
- linkend="capture">capture</link> and <link linkend="output">output</link>
- devices.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_BGR24</constant> 4 &times; 4 pixel
-image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="13" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00</subscript></entry>
- <entry>G<subscript>00</subscript></entry>
- <entry>R<subscript>00</subscript></entry>
- <entry>B<subscript>01</subscript></entry>
- <entry>G<subscript>01</subscript></entry>
- <entry>R<subscript>01</subscript></entry>
- <entry>B<subscript>02</subscript></entry>
- <entry>G<subscript>02</subscript></entry>
- <entry>R<subscript>02</subscript></entry>
- <entry>B<subscript>03</subscript></entry>
- <entry>G<subscript>03</subscript></entry>
- <entry>R<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>B<subscript>10</subscript></entry>
- <entry>G<subscript>10</subscript></entry>
- <entry>R<subscript>10</subscript></entry>
- <entry>B<subscript>11</subscript></entry>
- <entry>G<subscript>11</subscript></entry>
- <entry>R<subscript>11</subscript></entry>
- <entry>B<subscript>12</subscript></entry>
- <entry>G<subscript>12</subscript></entry>
- <entry>R<subscript>12</subscript></entry>
- <entry>B<subscript>13</subscript></entry>
- <entry>G<subscript>13</subscript></entry>
- <entry>R<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>B<subscript>20</subscript></entry>
- <entry>G<subscript>20</subscript></entry>
- <entry>R<subscript>20</subscript></entry>
- <entry>B<subscript>21</subscript></entry>
- <entry>G<subscript>21</subscript></entry>
- <entry>R<subscript>21</subscript></entry>
- <entry>B<subscript>22</subscript></entry>
- <entry>G<subscript>22</subscript></entry>
- <entry>R<subscript>22</subscript></entry>
- <entry>B<subscript>23</subscript></entry>
- <entry>G<subscript>23</subscript></entry>
- <entry>R<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;36:</entry>
- <entry>B<subscript>30</subscript></entry>
- <entry>G<subscript>30</subscript></entry>
- <entry>R<subscript>30</subscript></entry>
- <entry>B<subscript>31</subscript></entry>
- <entry>G<subscript>31</subscript></entry>
- <entry>R<subscript>31</subscript></entry>
- <entry>B<subscript>32</subscript></entry>
- <entry>G<subscript>32</subscript></entry>
- <entry>R<subscript>32</subscript></entry>
- <entry>B<subscript>33</subscript></entry>
- <entry>G<subscript>33</subscript></entry>
- <entry>R<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
-
- <para>Formats defined in <xref linkend="rgb-formats-deprecated"/> are
- deprecated and must not be used by new drivers. They are documented here for
- reference. The meaning of their alpha bits (a) is ill-defined and
- interpreted as in either the corresponding ARGB or XRGB format, depending on
- the driver.</para>
-
- <table pgwide="1" frame="none" id="rgb-formats-deprecated">
- <title>Deprecated Packed RGB Image Formats</title>
- <tgroup cols="37" align="center">
- <colspec colname="id" align="left" />
- <colspec colname="fourcc" />
- <colspec colname="bit" />
-
- <colspec colnum="4" colname="b07" align="center" />
- <colspec colnum="5" colname="b06" align="center" />
- <colspec colnum="6" colname="b05" align="center" />
- <colspec colnum="7" colname="b04" align="center" />
- <colspec colnum="8" colname="b03" align="center" />
- <colspec colnum="9" colname="b02" align="center" />
- <colspec colnum="10" colname="b01" align="center" />
- <colspec colnum="11" colname="b00" align="center" />
-
- <colspec colnum="13" colname="b17" align="center" />
- <colspec colnum="14" colname="b16" align="center" />
- <colspec colnum="15" colname="b15" align="center" />
- <colspec colnum="16" colname="b14" align="center" />
- <colspec colnum="17" colname="b13" align="center" />
- <colspec colnum="18" colname="b12" align="center" />
- <colspec colnum="19" colname="b11" align="center" />
- <colspec colnum="20" colname="b10" align="center" />
-
- <colspec colnum="22" colname="b27" align="center" />
- <colspec colnum="23" colname="b26" align="center" />
- <colspec colnum="24" colname="b25" align="center" />
- <colspec colnum="25" colname="b24" align="center" />
- <colspec colnum="26" colname="b23" align="center" />
- <colspec colnum="27" colname="b22" align="center" />
- <colspec colnum="28" colname="b21" align="center" />
- <colspec colnum="29" colname="b20" align="center" />
-
- <colspec colnum="31" colname="b37" align="center" />
- <colspec colnum="32" colname="b36" align="center" />
- <colspec colnum="33" colname="b35" align="center" />
- <colspec colnum="34" colname="b34" align="center" />
- <colspec colnum="35" colname="b33" align="center" />
- <colspec colnum="36" colname="b32" align="center" />
- <colspec colnum="37" colname="b31" align="center" />
- <colspec colnum="38" colname="b30" align="center" />
-
- <spanspec namest="b07" nameend="b00" spanname="b0" />
- <spanspec namest="b17" nameend="b10" spanname="b1" />
- <spanspec namest="b27" nameend="b20" spanname="b2" />
- <spanspec namest="b37" nameend="b30" spanname="b3" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>&nbsp;</entry>
- <entry spanname="b0">Byte&nbsp;0 in memory</entry>
- <entry spanname="b1">Byte&nbsp;1</entry>
- <entry spanname="b2">Byte&nbsp;2</entry>
- <entry spanname="b3">Byte&nbsp;3</entry>
- </row>
- <row>
- <entry>&nbsp;</entry>
- <entry>&nbsp;</entry>
- <entry>Bit</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody>
- <row id="V4L2-PIX-FMT-RGB444">
- <entry><constant>V4L2_PIX_FMT_RGB444</constant></entry>
- <entry>'R444'</entry>
- <entry></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB555">
- <entry><constant>V4L2_PIX_FMT_RGB555</constant></entry>
- <entry>'RGBO'</entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB555X">
- <entry><constant>V4L2_PIX_FMT_RGB555X</constant></entry>
- <entry>'RGBQ'</entry>
- <entry></entry>
- <entry>a</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-BGR32">
- <entry><constant>V4L2_PIX_FMT_BGR32</constant></entry>
- <entry>'BGR4'</entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- </row>
- <row id="V4L2-PIX-FMT-RGB32">
- <entry><constant>V4L2_PIX_FMT_RGB32</constant></entry>
- <entry>'RGB4'</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>A test utility to determine which RGB formats a driver
-actually supports is available from the LinuxTV v4l-dvb repository.
-See &v4l-dvb; for access instructions.</para>
-
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-packed-yuv.xml b/Documentation/DocBook/media/v4l/pixfmt-packed-yuv.xml
deleted file mode 100644
index 33fa5a47a865..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-packed-yuv.xml
+++ /dev/null
@@ -1,236 +0,0 @@
-<refentry id="packed-yuv">
- <refmeta>
- <refentrytitle>Packed YUV formats</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>Packed YUV formats</refname>
- <refpurpose>Packed YUV formats</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>Similar to the packed RGB formats these formats store
-the Y, Cb and Cr component of each pixel in one 16 or 32 bit
-word.</para>
-
- <table pgwide="1" frame="none">
- <title>Packed YUV Image Formats</title>
- <tgroup cols="37" align="center">
- <colspec colname="id" align="left" />
- <colspec colname="fourcc" />
- <colspec colname="bit" />
-
- <colspec colnum="4" colname="b07" align="center" />
- <colspec colnum="5" colname="b06" align="center" />
- <colspec colnum="6" colname="b05" align="center" />
- <colspec colnum="7" colname="b04" align="center" />
- <colspec colnum="8" colname="b03" align="center" />
- <colspec colnum="9" colname="b02" align="center" />
- <colspec colnum="10" colname="b01" align="center" />
- <colspec colnum="11" colname="b00" align="center" />
-
- <colspec colnum="13" colname="b17" align="center" />
- <colspec colnum="14" colname="b16" align="center" />
- <colspec colnum="15" colname="b15" align="center" />
- <colspec colnum="16" colname="b14" align="center" />
- <colspec colnum="17" colname="b13" align="center" />
- <colspec colnum="18" colname="b12" align="center" />
- <colspec colnum="19" colname="b11" align="center" />
- <colspec colnum="20" colname="b10" align="center" />
-
- <colspec colnum="22" colname="b27" align="center" />
- <colspec colnum="23" colname="b26" align="center" />
- <colspec colnum="24" colname="b25" align="center" />
- <colspec colnum="25" colname="b24" align="center" />
- <colspec colnum="26" colname="b23" align="center" />
- <colspec colnum="27" colname="b22" align="center" />
- <colspec colnum="28" colname="b21" align="center" />
- <colspec colnum="29" colname="b20" align="center" />
-
- <colspec colnum="31" colname="b37" align="center" />
- <colspec colnum="32" colname="b36" align="center" />
- <colspec colnum="33" colname="b35" align="center" />
- <colspec colnum="34" colname="b34" align="center" />
- <colspec colnum="35" colname="b33" align="center" />
- <colspec colnum="36" colname="b32" align="center" />
- <colspec colnum="37" colname="b31" align="center" />
- <colspec colnum="38" colname="b30" align="center" />
-
- <spanspec namest="b07" nameend="b00" spanname="b0" />
- <spanspec namest="b17" nameend="b10" spanname="b1" />
- <spanspec namest="b27" nameend="b20" spanname="b2" />
- <spanspec namest="b37" nameend="b30" spanname="b3" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>&nbsp;</entry>
- <entry spanname="b0">Byte&nbsp;0 in memory</entry>
- <entry spanname="b1">Byte&nbsp;1</entry>
- <entry spanname="b2">Byte&nbsp;2</entry>
- <entry spanname="b3">Byte&nbsp;3</entry>
- </row>
- <row>
- <entry>&nbsp;</entry>
- <entry>&nbsp;</entry>
- <entry>Bit</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- <entry>&nbsp;</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="V4L2-PIX-FMT-YUV444">
- <entry><constant>V4L2_PIX_FMT_YUV444</constant></entry>
- <entry>'Y444'</entry>
- <entry></entry>
- <entry>Cb<subscript>3</subscript></entry>
- <entry>Cb<subscript>2</subscript></entry>
- <entry>Cb<subscript>1</subscript></entry>
- <entry>Cb<subscript>0</subscript></entry>
- <entry>Cr<subscript>3</subscript></entry>
- <entry>Cr<subscript>2</subscript></entry>
- <entry>Cr<subscript>1</subscript></entry>
- <entry>Cr<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>Y'<subscript>3</subscript></entry>
- <entry>Y'<subscript>2</subscript></entry>
- <entry>Y'<subscript>1</subscript></entry>
- <entry>Y'<subscript>0</subscript></entry>
- </row>
-
- <row id="V4L2-PIX-FMT-YUV555">
- <entry><constant>V4L2_PIX_FMT_YUV555</constant></entry>
- <entry>'YUVO'</entry>
- <entry></entry>
- <entry>Cb<subscript>2</subscript></entry>
- <entry>Cb<subscript>1</subscript></entry>
- <entry>Cb<subscript>0</subscript></entry>
- <entry>Cr<subscript>4</subscript></entry>
- <entry>Cr<subscript>3</subscript></entry>
- <entry>Cr<subscript>2</subscript></entry>
- <entry>Cr<subscript>1</subscript></entry>
- <entry>Cr<subscript>0</subscript></entry>
- <entry></entry>
- <entry>a</entry>
- <entry>Y'<subscript>4</subscript></entry>
- <entry>Y'<subscript>3</subscript></entry>
- <entry>Y'<subscript>2</subscript></entry>
- <entry>Y'<subscript>1</subscript></entry>
- <entry>Y'<subscript>0</subscript></entry>
- <entry>Cb<subscript>4</subscript></entry>
- <entry>Cb<subscript>3</subscript></entry>
- </row>
-
- <row id="V4L2-PIX-FMT-YUV565">
- <entry><constant>V4L2_PIX_FMT_YUV565</constant></entry>
- <entry>'YUVP'</entry>
- <entry></entry>
- <entry>Cb<subscript>2</subscript></entry>
- <entry>Cb<subscript>1</subscript></entry>
- <entry>Cb<subscript>0</subscript></entry>
- <entry>Cr<subscript>4</subscript></entry>
- <entry>Cr<subscript>3</subscript></entry>
- <entry>Cr<subscript>2</subscript></entry>
- <entry>Cr<subscript>1</subscript></entry>
- <entry>Cr<subscript>0</subscript></entry>
- <entry></entry>
- <entry>Y'<subscript>4</subscript></entry>
- <entry>Y'<subscript>3</subscript></entry>
- <entry>Y'<subscript>2</subscript></entry>
- <entry>Y'<subscript>1</subscript></entry>
- <entry>Y'<subscript>0</subscript></entry>
- <entry>Cb<subscript>5</subscript></entry>
- <entry>Cb<subscript>4</subscript></entry>
- <entry>Cb<subscript>3</subscript></entry>
- </row>
-
- <row id="V4L2-PIX-FMT-YUV32">
- <entry><constant>V4L2_PIX_FMT_YUV32</constant></entry>
- <entry>'YUV4'</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry></entry>
- <entry>Y'<subscript>7</subscript></entry>
- <entry>Y'<subscript>6</subscript></entry>
- <entry>Y'<subscript>5</subscript></entry>
- <entry>Y'<subscript>4</subscript></entry>
- <entry>Y'<subscript>3</subscript></entry>
- <entry>Y'<subscript>2</subscript></entry>
- <entry>Y'<subscript>1</subscript></entry>
- <entry>Y'<subscript>0</subscript></entry>
- <entry></entry>
- <entry>Cb<subscript>7</subscript></entry>
- <entry>Cb<subscript>6</subscript></entry>
- <entry>Cb<subscript>5</subscript></entry>
- <entry>Cb<subscript>4</subscript></entry>
- <entry>Cb<subscript>3</subscript></entry>
- <entry>Cb<subscript>2</subscript></entry>
- <entry>Cb<subscript>1</subscript></entry>
- <entry>Cb<subscript>0</subscript></entry>
- <entry></entry>
- <entry>Cr<subscript>7</subscript></entry>
- <entry>Cr<subscript>6</subscript></entry>
- <entry>Cr<subscript>5</subscript></entry>
- <entry>Cr<subscript>4</subscript></entry>
- <entry>Cr<subscript>3</subscript></entry>
- <entry>Cr<subscript>2</subscript></entry>
- <entry>Cr<subscript>1</subscript></entry>
- <entry>Cr<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>Bit 7 is the most significant bit. The value of a = alpha
-bits is undefined when reading from the driver, ignored when writing
-to the driver, except when alpha blending has been negotiated for a
-<link linkend="overlay">Video Overlay</link> or <link
-linkend="osd">Video Output Overlay</link>.</para>
-
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sbggr16.xml b/Documentation/DocBook/media/v4l/pixfmt-sbggr16.xml
deleted file mode 100644
index 6494b05d84a1..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sbggr16.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<refentry id="V4L2-PIX-FMT-SBGGR16">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SBGGR16 ('BYR2')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_SBGGR16</constant></refname>
- <refpurpose>Bayer RGB format</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This format is similar to <link
-linkend="V4L2-PIX-FMT-SBGGR8">
-<constant>V4L2_PIX_FMT_SBGGR8</constant></link>, except each pixel has
-a depth of 16 bits. The least significant byte is stored at lower
-memory addresses (little-endian). Note the actual sampling precision
-may be lower than 16 bits, for example 10 bits per pixel with values
-in range 0 to 1023.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SBGGR16</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00low</subscript></entry>
- <entry>B<subscript>00high</subscript></entry>
- <entry>G<subscript>01low</subscript></entry>
- <entry>G<subscript>01high</subscript></entry>
- <entry>B<subscript>02low</subscript></entry>
- <entry>B<subscript>02high</subscript></entry>
- <entry>G<subscript>03low</subscript></entry>
- <entry>G<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>G<subscript>10low</subscript></entry>
- <entry>G<subscript>10high</subscript></entry>
- <entry>R<subscript>11low</subscript></entry>
- <entry>R<subscript>11high</subscript></entry>
- <entry>G<subscript>12low</subscript></entry>
- <entry>G<subscript>12high</subscript></entry>
- <entry>R<subscript>13low</subscript></entry>
- <entry>R<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>B<subscript>20low</subscript></entry>
- <entry>B<subscript>20high</subscript></entry>
- <entry>G<subscript>21low</subscript></entry>
- <entry>G<subscript>21high</subscript></entry>
- <entry>B<subscript>22low</subscript></entry>
- <entry>B<subscript>22high</subscript></entry>
- <entry>G<subscript>23low</subscript></entry>
- <entry>G<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>G<subscript>30low</subscript></entry>
- <entry>G<subscript>30high</subscript></entry>
- <entry>R<subscript>31low</subscript></entry>
- <entry>R<subscript>31high</subscript></entry>
- <entry>G<subscript>32low</subscript></entry>
- <entry>G<subscript>32high</subscript></entry>
- <entry>R<subscript>33low</subscript></entry>
- <entry>R<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sbggr8.xml b/Documentation/DocBook/media/v4l/pixfmt-sbggr8.xml
deleted file mode 100644
index 5eaf2b42d3f7..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sbggr8.xml
+++ /dev/null
@@ -1,67 +0,0 @@
- <refentry id="V4L2-PIX-FMT-SBGGR8">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SBGGR8 ('BA81')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_SBGGR8</constant></refname>
- <refpurpose>Bayer RGB format</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is commonly the native format of digital cameras,
-reflecting the arrangement of sensors on the CCD device. Only one red,
-green or blue value is given for each pixel. Missing components must
-be interpolated from neighbouring pixels. From left to right the first
-row consists of a blue and green value, the second row of a green and
-red value. This scheme repeats to the right and down for every two
-columns and rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SBGGR8</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00</subscript></entry>
- <entry>G<subscript>01</subscript></entry>
- <entry>B<subscript>02</subscript></entry>
- <entry>G<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>G<subscript>10</subscript></entry>
- <entry>R<subscript>11</subscript></entry>
- <entry>G<subscript>12</subscript></entry>
- <entry>R<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>B<subscript>20</subscript></entry>
- <entry>G<subscript>21</subscript></entry>
- <entry>B<subscript>22</subscript></entry>
- <entry>G<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>G<subscript>30</subscript></entry>
- <entry>R<subscript>31</subscript></entry>
- <entry>G<subscript>32</subscript></entry>
- <entry>R<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sdr-cs08.xml b/Documentation/DocBook/media/v4l/pixfmt-sdr-cs08.xml
deleted file mode 100644
index 6118d8f7a20c..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sdr-cs08.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<refentry id="V4L2-SDR-FMT-CS08">
- <refmeta>
- <refentrytitle>V4L2_SDR_FMT_CS8 ('CS08')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>
- <constant>V4L2_SDR_FMT_CS8</constant>
- </refname>
- <refpurpose>Complex signed 8-bit IQ sample</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>
-This format contains sequence of complex number samples. Each complex number
-consist two parts, called In-phase and Quadrature (IQ). Both I and Q are
-represented as a 8 bit signed number. I value comes first and Q value after
-that.
- </para>
- <example>
- <title><constant>V4L2_SDR_FMT_CS8</constant> 1 sample</title>
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="2" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>I'<subscript>0</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;1:</entry>
- <entry>Q'<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sdr-cs14le.xml b/Documentation/DocBook/media/v4l/pixfmt-sdr-cs14le.xml
deleted file mode 100644
index e4b494ce1369..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sdr-cs14le.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<refentry id="V4L2-SDR-FMT-CS14LE">
- <refmeta>
- <refentrytitle>V4L2_SDR_FMT_CS14LE ('CS14')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>
- <constant>V4L2_SDR_FMT_CS14LE</constant>
- </refname>
- <refpurpose>Complex signed 14-bit little endian IQ sample</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>
-This format contains sequence of complex number samples. Each complex number
-consist two parts, called In-phase and Quadrature (IQ). Both I and Q are
-represented as a 14 bit signed little endian number. I value comes first
-and Q value after that. 14 bit value is stored in 16 bit space with unused
-high bits padded with 0.
- </para>
- <example>
- <title><constant>V4L2_SDR_FMT_CS14LE</constant> 1 sample</title>
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="3" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>I'<subscript>0[7:0]</subscript></entry>
- <entry>I'<subscript>0[13:8]</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;2:</entry>
- <entry>Q'<subscript>0[7:0]</subscript></entry>
- <entry>Q'<subscript>0[13:8]</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sdr-cu08.xml b/Documentation/DocBook/media/v4l/pixfmt-sdr-cu08.xml
deleted file mode 100644
index 2d80104c178b..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sdr-cu08.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<refentry id="V4L2-SDR-FMT-CU08">
- <refmeta>
- <refentrytitle>V4L2_SDR_FMT_CU8 ('CU08')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>
- <constant>V4L2_SDR_FMT_CU8</constant>
- </refname>
- <refpurpose>Complex unsigned 8-bit IQ sample</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>
-This format contains sequence of complex number samples. Each complex number
-consist two parts, called In-phase and Quadrature (IQ). Both I and Q are
-represented as a 8 bit unsigned number. I value comes first and Q value after
-that.
- </para>
- <example>
- <title><constant>V4L2_SDR_FMT_CU8</constant> 1 sample</title>
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="2" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>I'<subscript>0</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;1:</entry>
- <entry>Q'<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sdr-cu16le.xml b/Documentation/DocBook/media/v4l/pixfmt-sdr-cu16le.xml
deleted file mode 100644
index 26288ffa9071..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sdr-cu16le.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<refentry id="V4L2-SDR-FMT-CU16LE">
- <refmeta>
- <refentrytitle>V4L2_SDR_FMT_CU16LE ('CU16')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>
- <constant>V4L2_SDR_FMT_CU16LE</constant>
- </refname>
- <refpurpose>Complex unsigned 16-bit little endian IQ sample</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>
-This format contains sequence of complex number samples. Each complex number
-consist two parts, called In-phase and Quadrature (IQ). Both I and Q are
-represented as a 16 bit unsigned little endian number. I value comes first
-and Q value after that.
- </para>
- <example>
- <title><constant>V4L2_SDR_FMT_CU16LE</constant> 1 sample</title>
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="3" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>I'<subscript>0[7:0]</subscript></entry>
- <entry>I'<subscript>0[15:8]</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;2:</entry>
- <entry>Q'<subscript>0[7:0]</subscript></entry>
- <entry>Q'<subscript>0[15:8]</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sdr-ru12le.xml b/Documentation/DocBook/media/v4l/pixfmt-sdr-ru12le.xml
deleted file mode 100644
index 3df076b99f94..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sdr-ru12le.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<refentry id="V4L2-SDR-FMT-RU12LE">
- <refmeta>
- <refentrytitle>V4L2_SDR_FMT_RU12LE ('RU12')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname>
- <constant>V4L2_SDR_FMT_RU12LE</constant>
- </refname>
- <refpurpose>Real unsigned 12-bit little endian sample</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>
-This format contains sequence of real number samples. Each sample is
-represented as a 12 bit unsigned little endian number. Sample is stored
-in 16 bit space with unused high bits padded with 0.
- </para>
- <example>
- <title><constant>V4L2_SDR_FMT_RU12LE</constant> 1 sample</title>
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="3" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>I'<subscript>0[7:0]</subscript></entry>
- <entry>I'<subscript>0[11:8]</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sgbrg8.xml b/Documentation/DocBook/media/v4l/pixfmt-sgbrg8.xml
deleted file mode 100644
index fee65dca79c5..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sgbrg8.xml
+++ /dev/null
@@ -1,67 +0,0 @@
- <refentry id="V4L2-PIX-FMT-SGBRG8">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SGBRG8 ('GBRG')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_SGBRG8</constant></refname>
- <refpurpose>Bayer RGB format</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is commonly the native format of digital cameras,
-reflecting the arrangement of sensors on the CCD device. Only one red,
-green or blue value is given for each pixel. Missing components must
-be interpolated from neighbouring pixels. From left to right the first
-row consists of a green and blue value, the second row of a red and
-green value. This scheme repeats to the right and down for every two
-columns and rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SGBRG8</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>G<subscript>00</subscript></entry>
- <entry>B<subscript>01</subscript></entry>
- <entry>G<subscript>02</subscript></entry>
- <entry>B<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>R<subscript>10</subscript></entry>
- <entry>G<subscript>11</subscript></entry>
- <entry>R<subscript>12</subscript></entry>
- <entry>G<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>G<subscript>20</subscript></entry>
- <entry>B<subscript>21</subscript></entry>
- <entry>G<subscript>22</subscript></entry>
- <entry>B<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>R<subscript>30</subscript></entry>
- <entry>G<subscript>31</subscript></entry>
- <entry>R<subscript>32</subscript></entry>
- <entry>G<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml b/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml
deleted file mode 100644
index 7803b8c41b45..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-sgrbg8.xml
+++ /dev/null
@@ -1,67 +0,0 @@
- <refentry id="V4L2-PIX-FMT-SGRBG8">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SGRBG8 ('GRBG')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_SGRBG8</constant></refname>
- <refpurpose>Bayer RGB format</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is commonly the native format of digital cameras,
-reflecting the arrangement of sensors on the CCD device. Only one red,
-green or blue value is given for each pixel. Missing components must
-be interpolated from neighbouring pixels. From left to right the first
-row consists of a green and blue value, the second row of a red and
-green value. This scheme repeats to the right and down for every two
-columns and rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SGRBG8</constant> 4 &times;
-4 pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>G<subscript>00</subscript></entry>
- <entry>R<subscript>01</subscript></entry>
- <entry>G<subscript>02</subscript></entry>
- <entry>R<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>B<subscript>10</subscript></entry>
- <entry>G<subscript>11</subscript></entry>
- <entry>B<subscript>12</subscript></entry>
- <entry>G<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>G<subscript>20</subscript></entry>
- <entry>R<subscript>21</subscript></entry>
- <entry>G<subscript>22</subscript></entry>
- <entry>R<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>B<subscript>30</subscript></entry>
- <entry>G<subscript>31</subscript></entry>
- <entry>B<subscript>32</subscript></entry>
- <entry>G<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
deleted file mode 100644
index f34d03ebda3a..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10.xml
+++ /dev/null
@@ -1,90 +0,0 @@
- <refentry id="pixfmt-srggb10">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SRGGB10 ('RG10'),
- V4L2_PIX_FMT_SGRBG10 ('BA10'),
- V4L2_PIX_FMT_SGBRG10 ('GB10'),
- V4L2_PIX_FMT_SBGGR10 ('BG10'),
- </refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-SRGGB10"><constant>V4L2_PIX_FMT_SRGGB10</constant></refname>
- <refname id="V4L2-PIX-FMT-SGRBG10"><constant>V4L2_PIX_FMT_SGRBG10</constant></refname>
- <refname id="V4L2-PIX-FMT-SGBRG10"><constant>V4L2_PIX_FMT_SGBRG10</constant></refname>
- <refname id="V4L2-PIX-FMT-SBGGR10"><constant>V4L2_PIX_FMT_SBGGR10</constant></refname>
- <refpurpose>10-bit Bayer formats expanded to 16 bits</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These four pixel formats are raw sRGB / Bayer formats with
-10 bits per colour. Each colour component is stored in a 16-bit word, with 6
-unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
-and n/2 blue or red samples, with alternating red and blue rows. Bytes are
-stored in memory in little endian order. They are conventionally described
-as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example of one of these
-formats</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SBGGR10</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte, high 6 bits in high bytes are 0.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00low</subscript></entry>
- <entry>B<subscript>00high</subscript></entry>
- <entry>G<subscript>01low</subscript></entry>
- <entry>G<subscript>01high</subscript></entry>
- <entry>B<subscript>02low</subscript></entry>
- <entry>B<subscript>02high</subscript></entry>
- <entry>G<subscript>03low</subscript></entry>
- <entry>G<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>G<subscript>10low</subscript></entry>
- <entry>G<subscript>10high</subscript></entry>
- <entry>R<subscript>11low</subscript></entry>
- <entry>R<subscript>11high</subscript></entry>
- <entry>G<subscript>12low</subscript></entry>
- <entry>G<subscript>12high</subscript></entry>
- <entry>R<subscript>13low</subscript></entry>
- <entry>R<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>B<subscript>20low</subscript></entry>
- <entry>B<subscript>20high</subscript></entry>
- <entry>G<subscript>21low</subscript></entry>
- <entry>G<subscript>21high</subscript></entry>
- <entry>B<subscript>22low</subscript></entry>
- <entry>B<subscript>22high</subscript></entry>
- <entry>G<subscript>23low</subscript></entry>
- <entry>G<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>G<subscript>30low</subscript></entry>
- <entry>G<subscript>30high</subscript></entry>
- <entry>R<subscript>31low</subscript></entry>
- <entry>R<subscript>31high</subscript></entry>
- <entry>G<subscript>32low</subscript></entry>
- <entry>G<subscript>32high</subscript></entry>
- <entry>R<subscript>33low</subscript></entry>
- <entry>R<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
deleted file mode 100644
index d2e5845e57fb..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10alaw8.xml
+++ /dev/null
@@ -1,34 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>
- V4L2_PIX_FMT_SBGGR10ALAW8 ('aBA8'),
- V4L2_PIX_FMT_SGBRG10ALAW8 ('aGA8'),
- V4L2_PIX_FMT_SGRBG10ALAW8 ('agA8'),
- V4L2_PIX_FMT_SRGGB10ALAW8 ('aRA8'),
- </refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-SBGGR10ALAW8">
- <constant>V4L2_PIX_FMT_SBGGR10ALAW8</constant>
- </refname>
- <refname id="V4L2-PIX-FMT-SGBRG10ALAW8">
- <constant>V4L2_PIX_FMT_SGBRG10ALAW8</constant>
- </refname>
- <refname id="V4L2-PIX-FMT-SGRBG10ALAW8">
- <constant>V4L2_PIX_FMT_SGRBG10ALAW8</constant>
- </refname>
- <refname id="V4L2-PIX-FMT-SRGGB10ALAW8">
- <constant>V4L2_PIX_FMT_SRGGB10ALAW8</constant>
- </refname>
- <refpurpose>10-bit Bayer formats compressed to 8 bits</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>These four pixel formats are raw sRGB / Bayer
- formats with 10 bits per color compressed to 8 bits each,
- using the A-LAW algorithm. Each color component consumes 8
- bits of memory. In other respects this format is similar to
- <xref linkend="V4L2-PIX-FMT-SRGGB8"></xref>.</para>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
deleted file mode 100644
index bde89878c5c5..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10dpcm8.xml
+++ /dev/null
@@ -1,28 +0,0 @@
- <refentry id="pixfmt-srggb10dpcm8">
- <refmeta>
- <refentrytitle>
- V4L2_PIX_FMT_SBGGR10DPCM8 ('bBA8'),
- V4L2_PIX_FMT_SGBRG10DPCM8 ('bGA8'),
- V4L2_PIX_FMT_SGRBG10DPCM8 ('BD10'),
- V4L2_PIX_FMT_SRGGB10DPCM8 ('bRA8'),
- </refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-SBGGR10DPCM8"><constant>V4L2_PIX_FMT_SBGGR10DPCM8</constant></refname>
- <refname id="V4L2-PIX-FMT-SGBRG10DPCM8"><constant>V4L2_PIX_FMT_SGBRG10DPCM8</constant></refname>
- <refname id="V4L2-PIX-FMT-SGRBG10DPCM8"><constant>V4L2_PIX_FMT_SGRBG10DPCM8</constant></refname>
- <refname id="V4L2-PIX-FMT-SRGGB10DPCM8"><constant>V4L2_PIX_FMT_SRGGB10DPCM8</constant></refname>
- <refpurpose>10-bit Bayer formats compressed to 8 bits</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These four pixel formats are raw sRGB / Bayer formats
- with 10 bits per colour compressed to 8 bits each, using DPCM
- compression. DPCM, differential pulse-code modulation, is lossy.
- Each colour component consumes 8 bits of memory. In other respects
- this format is similar to <xref linkend="pixfmt-srggb10" />.</para>
-
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
deleted file mode 100644
index a8cc102cde4f..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb10p.xml
+++ /dev/null
@@ -1,99 +0,0 @@
- <refentry id="pixfmt-srggb10p">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SRGGB10P ('pRAA'),
- V4L2_PIX_FMT_SGRBG10P ('pgAA'),
- V4L2_PIX_FMT_SGBRG10P ('pGAA'),
- V4L2_PIX_FMT_SBGGR10P ('pBAA'),
- </refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-SRGGB10P"><constant>V4L2_PIX_FMT_SRGGB10P</constant></refname>
- <refname id="V4L2-PIX-FMT-SGRBG10P"><constant>V4L2_PIX_FMT_SGRBG10P</constant></refname>
- <refname id="V4L2-PIX-FMT-SGBRG10P"><constant>V4L2_PIX_FMT_SGBRG10P</constant></refname>
- <refname id="V4L2-PIX-FMT-SBGGR10P"><constant>V4L2_PIX_FMT_SBGGR10P</constant></refname>
- <refpurpose>10-bit packed Bayer formats</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These four pixel formats are packed raw sRGB /
- Bayer formats with 10 bits per colour. Every four consecutive
- colour components are packed into 5 bytes. Each of the first 4
- bytes contain the 8 high order bits of the pixels, and the
- fifth byte contains the two least significants bits of each
- pixel, in the same order.</para>
-
- <para>Each n-pixel row contains n/2 green samples and n/2 blue
- or red samples, with alternating green-red and green-blue
- rows. They are conventionally described as GRGR... BGBG...,
- RGRG... GBGB..., etc. Below is an example of one of these
- formats:</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SBGGR10P</constant> 4 &times; 4
- pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="topbot" colsep="1" rowsep="1">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00high</subscript></entry>
- <entry>G<subscript>01high</subscript></entry>
- <entry>B<subscript>02high</subscript></entry>
- <entry>G<subscript>03high</subscript></entry>
- <entry>B<subscript>00low</subscript>(bits 7--6)
- G<subscript>01low</subscript>(bits 5--4)
- B<subscript>02low</subscript>(bits 3--2)
- G<subscript>03low</subscript>(bits 1--0)
- </entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;5:</entry>
- <entry>G<subscript>10high</subscript></entry>
- <entry>R<subscript>11high</subscript></entry>
- <entry>G<subscript>12high</subscript></entry>
- <entry>R<subscript>13high</subscript></entry>
- <entry>G<subscript>10low</subscript>(bits 7--6)
- R<subscript>11low</subscript>(bits 5--4)
- G<subscript>12low</subscript>(bits 3--2)
- R<subscript>13low</subscript>(bits 1--0)
- </entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;10:</entry>
- <entry>B<subscript>20high</subscript></entry>
- <entry>G<subscript>21high</subscript></entry>
- <entry>B<subscript>22high</subscript></entry>
- <entry>G<subscript>23high</subscript></entry>
- <entry>B<subscript>20low</subscript>(bits 7--6)
- G<subscript>21low</subscript>(bits 5--4)
- B<subscript>22low</subscript>(bits 3--2)
- G<subscript>23low</subscript>(bits 1--0)
- </entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;15:</entry>
- <entry>G<subscript>30high</subscript></entry>
- <entry>R<subscript>31high</subscript></entry>
- <entry>G<subscript>32high</subscript></entry>
- <entry>R<subscript>33high</subscript></entry>
- <entry>G<subscript>30low</subscript>(bits 7--6)
- R<subscript>31low</subscript>(bits 5--4)
- G<subscript>32low</subscript>(bits 3--2)
- R<subscript>33low</subscript>(bits 1--0)
- </entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml
deleted file mode 100644
index 0c8e4adf417f..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb12.xml
+++ /dev/null
@@ -1,90 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SRGGB12 ('RG12'),
- V4L2_PIX_FMT_SGRBG12 ('BA12'),
- V4L2_PIX_FMT_SGBRG12 ('GB12'),
- V4L2_PIX_FMT_SBGGR12 ('BG12'),
- </refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-SRGGB12"><constant>V4L2_PIX_FMT_SRGGB12</constant></refname>
- <refname id="V4L2-PIX-FMT-SGRBG12"><constant>V4L2_PIX_FMT_SGRBG12</constant></refname>
- <refname id="V4L2-PIX-FMT-SGBRG12"><constant>V4L2_PIX_FMT_SGBRG12</constant></refname>
- <refname id="V4L2-PIX-FMT-SBGGR12"><constant>V4L2_PIX_FMT_SBGGR12</constant></refname>
- <refpurpose>12-bit Bayer formats expanded to 16 bits</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These four pixel formats are raw sRGB / Bayer formats with
-12 bits per colour. Each colour component is stored in a 16-bit word, with 4
-unused high bits filled with zeros. Each n-pixel row contains n/2 green samples
-and n/2 blue or red samples, with alternating red and blue rows. Bytes are
-stored in memory in little endian order. They are conventionally described
-as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example of one of these
-formats</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SBGGR12</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte, high 6 bits in high bytes are 0.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>B<subscript>00low</subscript></entry>
- <entry>B<subscript>00high</subscript></entry>
- <entry>G<subscript>01low</subscript></entry>
- <entry>G<subscript>01high</subscript></entry>
- <entry>B<subscript>02low</subscript></entry>
- <entry>B<subscript>02high</subscript></entry>
- <entry>G<subscript>03low</subscript></entry>
- <entry>G<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>G<subscript>10low</subscript></entry>
- <entry>G<subscript>10high</subscript></entry>
- <entry>R<subscript>11low</subscript></entry>
- <entry>R<subscript>11high</subscript></entry>
- <entry>G<subscript>12low</subscript></entry>
- <entry>G<subscript>12high</subscript></entry>
- <entry>R<subscript>13low</subscript></entry>
- <entry>R<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>B<subscript>20low</subscript></entry>
- <entry>B<subscript>20high</subscript></entry>
- <entry>G<subscript>21low</subscript></entry>
- <entry>G<subscript>21high</subscript></entry>
- <entry>B<subscript>22low</subscript></entry>
- <entry>B<subscript>22high</subscript></entry>
- <entry>G<subscript>23low</subscript></entry>
- <entry>G<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>G<subscript>30low</subscript></entry>
- <entry>G<subscript>30high</subscript></entry>
- <entry>R<subscript>31low</subscript></entry>
- <entry>R<subscript>31high</subscript></entry>
- <entry>G<subscript>32low</subscript></entry>
- <entry>G<subscript>32high</subscript></entry>
- <entry>R<subscript>33low</subscript></entry>
- <entry>R<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-srggb8.xml b/Documentation/DocBook/media/v4l/pixfmt-srggb8.xml
deleted file mode 100644
index 2570e3be3cf1..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-srggb8.xml
+++ /dev/null
@@ -1,67 +0,0 @@
- <refentry id="V4L2-PIX-FMT-SRGGB8">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_SRGGB8 ('RGGB')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_SRGGB8</constant></refname>
- <refpurpose>Bayer RGB format</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is commonly the native format of digital cameras,
-reflecting the arrangement of sensors on the CCD device. Only one red,
-green or blue value is given for each pixel. Missing components must
-be interpolated from neighbouring pixels. From left to right the first
-row consists of a red and green value, the second row of a green and
-blue value. This scheme repeats to the right and down for every two
-columns and rows.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_SRGGB8</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>R<subscript>00</subscript></entry>
- <entry>G<subscript>01</subscript></entry>
- <entry>R<subscript>02</subscript></entry>
- <entry>G<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>G<subscript>10</subscript></entry>
- <entry>B<subscript>11</subscript></entry>
- <entry>G<subscript>12</subscript></entry>
- <entry>B<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>R<subscript>20</subscript></entry>
- <entry>G<subscript>21</subscript></entry>
- <entry>R<subscript>22</subscript></entry>
- <entry>G<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>G<subscript>30</subscript></entry>
- <entry>B<subscript>31</subscript></entry>
- <entry>G<subscript>32</subscript></entry>
- <entry>B<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-uv8.xml b/Documentation/DocBook/media/v4l/pixfmt-uv8.xml
deleted file mode 100644
index c507c1f73cd0..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-uv8.xml
+++ /dev/null
@@ -1,62 +0,0 @@
- <refentry id="V4L2-PIX-FMT-UV8">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_UV8 ('UV8')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_UV8</constant></refname>
- <refpurpose>UV plane interleaved</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
- <para>In this format there is no Y plane, Only CbCr plane. ie
- (UV interleaved)</para>
- <example>
- <title>
- <constant>V4L2_PIX_FMT_UV8</constant>
- pixel image
- </title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-uyvy.xml b/Documentation/DocBook/media/v4l/pixfmt-uyvy.xml
deleted file mode 100644
index b1f6801a17ff..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-uyvy.xml
+++ /dev/null
@@ -1,120 +0,0 @@
- <refentry id="V4L2-PIX-FMT-UYVY">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_UYVY ('UYVY')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_UYVY</constant></refname>
- <refpurpose>Variation of
-<constant>V4L2_PIX_FMT_YUYV</constant> with different order of samples
-in memory</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>In this format each four bytes is two pixels. Each four
-bytes is two Y's, a Cb and a Cr. Each Y goes to one of the pixels, and
-the Cb and Cr belong to both pixels. As you can see, the Cr and Cb
-components have half the horizontal resolution of the Y
-component.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_UYVY</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-vyuy.xml b/Documentation/DocBook/media/v4l/pixfmt-vyuy.xml
deleted file mode 100644
index 82803408b389..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-vyuy.xml
+++ /dev/null
@@ -1,120 +0,0 @@
- <refentry id="V4L2-PIX-FMT-VYUY">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_VYUY ('VYUY')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_VYUY</constant></refname>
- <refpurpose>Variation of
-<constant>V4L2_PIX_FMT_YUYV</constant> with different order of samples
-in memory</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>In this format each four bytes is two pixels. Each four
-bytes is two Y's, a Cb and a Cr. Each Y goes to one of the pixels, and
-the Cb and Cr belong to both pixels. As you can see, the Cr and Cb
-components have half the horizontal resolution of the Y
-component.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_VYUY</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y10.xml b/Documentation/DocBook/media/v4l/pixfmt-y10.xml
deleted file mode 100644
index d065043db8d8..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y10.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y10">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y10 ('Y10 ')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y10</constant></refname>
- <refpurpose>Grey-scale image</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 10 bits per pixel. Pixels
-are stored in 16-bit words with unused high bits padded with 0. The least
-significant byte is stored at lower memory addresses (little-endian).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y10</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00low</subscript></entry>
- <entry>Y'<subscript>00high</subscript></entry>
- <entry>Y'<subscript>01low</subscript></entry>
- <entry>Y'<subscript>01high</subscript></entry>
- <entry>Y'<subscript>02low</subscript></entry>
- <entry>Y'<subscript>02high</subscript></entry>
- <entry>Y'<subscript>03low</subscript></entry>
- <entry>Y'<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10low</subscript></entry>
- <entry>Y'<subscript>10high</subscript></entry>
- <entry>Y'<subscript>11low</subscript></entry>
- <entry>Y'<subscript>11high</subscript></entry>
- <entry>Y'<subscript>12low</subscript></entry>
- <entry>Y'<subscript>12high</subscript></entry>
- <entry>Y'<subscript>13low</subscript></entry>
- <entry>Y'<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20low</subscript></entry>
- <entry>Y'<subscript>20high</subscript></entry>
- <entry>Y'<subscript>21low</subscript></entry>
- <entry>Y'<subscript>21high</subscript></entry>
- <entry>Y'<subscript>22low</subscript></entry>
- <entry>Y'<subscript>22high</subscript></entry>
- <entry>Y'<subscript>23low</subscript></entry>
- <entry>Y'<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30low</subscript></entry>
- <entry>Y'<subscript>30high</subscript></entry>
- <entry>Y'<subscript>31low</subscript></entry>
- <entry>Y'<subscript>31high</subscript></entry>
- <entry>Y'<subscript>32low</subscript></entry>
- <entry>Y'<subscript>32high</subscript></entry>
- <entry>Y'<subscript>33low</subscript></entry>
- <entry>Y'<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y10b.xml b/Documentation/DocBook/media/v4l/pixfmt-y10b.xml
deleted file mode 100644
index adb0ad808c93..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y10b.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y10BPACK">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y10BPACK ('Y10B')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y10BPACK</constant></refname>
- <refpurpose>Grey-scale image as a bit-packed array</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a packed grey-scale image format with a depth of 10 bits per
- pixel. Pixels are stored in a bit-packed array of 10bit bits per pixel,
- with no padding between them and with the most significant bits coming
- first from the left.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y10BPACK</constant> 4 pixel data stream taking 5 bytes</title>
-
- <formalpara>
- <title>Bit-packed representation</title>
- <para>pixels cross the byte boundary and have a ratio of 5 bytes for each 4
- pixels.
- <informaltable frame="all">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>Y'<subscript>00[9:2]</subscript></entry>
- <entry>Y'<subscript>00[1:0]</subscript>Y'<subscript>01[9:4]</subscript></entry>
- <entry>Y'<subscript>01[3:0]</subscript>Y'<subscript>02[9:6]</subscript></entry>
- <entry>Y'<subscript>02[5:0]</subscript>Y'<subscript>03[9:8]</subscript></entry>
- <entry>Y'<subscript>03[7:0]</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y12.xml b/Documentation/DocBook/media/v4l/pixfmt-y12.xml
deleted file mode 100644
index ff417b858cc9..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y12.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y12">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y12 ('Y12 ')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y12</constant></refname>
- <refpurpose>Grey-scale image</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 12 bits per pixel. Pixels
-are stored in 16-bit words with unused high bits padded with 0. The least
-significant byte is stored at lower memory addresses (little-endian).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y12</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00low</subscript></entry>
- <entry>Y'<subscript>00high</subscript></entry>
- <entry>Y'<subscript>01low</subscript></entry>
- <entry>Y'<subscript>01high</subscript></entry>
- <entry>Y'<subscript>02low</subscript></entry>
- <entry>Y'<subscript>02high</subscript></entry>
- <entry>Y'<subscript>03low</subscript></entry>
- <entry>Y'<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10low</subscript></entry>
- <entry>Y'<subscript>10high</subscript></entry>
- <entry>Y'<subscript>11low</subscript></entry>
- <entry>Y'<subscript>11high</subscript></entry>
- <entry>Y'<subscript>12low</subscript></entry>
- <entry>Y'<subscript>12high</subscript></entry>
- <entry>Y'<subscript>13low</subscript></entry>
- <entry>Y'<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20low</subscript></entry>
- <entry>Y'<subscript>20high</subscript></entry>
- <entry>Y'<subscript>21low</subscript></entry>
- <entry>Y'<subscript>21high</subscript></entry>
- <entry>Y'<subscript>22low</subscript></entry>
- <entry>Y'<subscript>22high</subscript></entry>
- <entry>Y'<subscript>23low</subscript></entry>
- <entry>Y'<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30low</subscript></entry>
- <entry>Y'<subscript>30high</subscript></entry>
- <entry>Y'<subscript>31low</subscript></entry>
- <entry>Y'<subscript>31high</subscript></entry>
- <entry>Y'<subscript>32low</subscript></entry>
- <entry>Y'<subscript>32high</subscript></entry>
- <entry>Y'<subscript>33low</subscript></entry>
- <entry>Y'<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y12i.xml b/Documentation/DocBook/media/v4l/pixfmt-y12i.xml
deleted file mode 100644
index 4a2d1e5f67e4..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y12i.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y12I">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y12I ('Y12I')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y12I</constant></refname>
- <refpurpose>Interleaved grey-scale image, e.g. from a stereo-pair</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 12 bits per pixel, but with
-pixels from 2 sources interleaved and bit-packed. Each pixel is stored in a
-24-bit word in the little-endian order. On a little-endian machine these pixels
-can be deinterlaced using</para>
-
-<para>
-<programlisting>
-__u8 *buf;
-left0 = 0xfff &amp; *(__u16 *)buf;
-right0 = *(__u16 *)(buf + 1) >> 4;
-</programlisting>
-</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y12I</constant> 2 pixel data stream taking 3 bytes</title>
-
- <formalpara>
- <title>Bit-packed representation</title>
- <para>pixels cross the byte boundary and have a ratio of 3 bytes for each
- interleaved pixel.
- <informaltable frame="all">
- <tgroup cols="3" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>Y'<subscript>0left[7:0]</subscript></entry>
- <entry>Y'<subscript>0right[3:0]</subscript>Y'<subscript>0left[11:8]</subscript></entry>
- <entry>Y'<subscript>0right[11:4]</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y16-be.xml b/Documentation/DocBook/media/v4l/pixfmt-y16-be.xml
deleted file mode 100644
index cea53e1eaa43..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y16-be.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y16-BE">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y16_BE ('Y16 ' | (1 &lt;&lt; 31))</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y16_BE</constant></refname>
- <refpurpose>Grey-scale image</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 16 bits per
-pixel. The most significant byte is stored at lower memory addresses
-(big-endian). Note the actual sampling precision may be lower than
-16 bits, for example 10 bits per pixel with values in range 0 to
-1023.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y16_BE</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00high</subscript></entry>
- <entry>Y'<subscript>00low</subscript></entry>
- <entry>Y'<subscript>01high</subscript></entry>
- <entry>Y'<subscript>01low</subscript></entry>
- <entry>Y'<subscript>02high</subscript></entry>
- <entry>Y'<subscript>02low</subscript></entry>
- <entry>Y'<subscript>03high</subscript></entry>
- <entry>Y'<subscript>03low</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10high</subscript></entry>
- <entry>Y'<subscript>10low</subscript></entry>
- <entry>Y'<subscript>11high</subscript></entry>
- <entry>Y'<subscript>11low</subscript></entry>
- <entry>Y'<subscript>12high</subscript></entry>
- <entry>Y'<subscript>12low</subscript></entry>
- <entry>Y'<subscript>13high</subscript></entry>
- <entry>Y'<subscript>13low</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20high</subscript></entry>
- <entry>Y'<subscript>20low</subscript></entry>
- <entry>Y'<subscript>21high</subscript></entry>
- <entry>Y'<subscript>21low</subscript></entry>
- <entry>Y'<subscript>22high</subscript></entry>
- <entry>Y'<subscript>22low</subscript></entry>
- <entry>Y'<subscript>23high</subscript></entry>
- <entry>Y'<subscript>23low</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30high</subscript></entry>
- <entry>Y'<subscript>30low</subscript></entry>
- <entry>Y'<subscript>31high</subscript></entry>
- <entry>Y'<subscript>31low</subscript></entry>
- <entry>Y'<subscript>32high</subscript></entry>
- <entry>Y'<subscript>32low</subscript></entry>
- <entry>Y'<subscript>33high</subscript></entry>
- <entry>Y'<subscript>33low</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y16.xml b/Documentation/DocBook/media/v4l/pixfmt-y16.xml
deleted file mode 100644
index ff4f727d5624..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y16.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y16">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y16 ('Y16 ')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y16</constant></refname>
- <refpurpose>Grey-scale image</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 16 bits per
-pixel. The least significant byte is stored at lower memory addresses
-(little-endian). Note the actual sampling precision may be lower than
-16 bits, for example 10 bits per pixel with values in range 0 to
-1023.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y16</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00low</subscript></entry>
- <entry>Y'<subscript>00high</subscript></entry>
- <entry>Y'<subscript>01low</subscript></entry>
- <entry>Y'<subscript>01high</subscript></entry>
- <entry>Y'<subscript>02low</subscript></entry>
- <entry>Y'<subscript>02high</subscript></entry>
- <entry>Y'<subscript>03low</subscript></entry>
- <entry>Y'<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10low</subscript></entry>
- <entry>Y'<subscript>10high</subscript></entry>
- <entry>Y'<subscript>11low</subscript></entry>
- <entry>Y'<subscript>11high</subscript></entry>
- <entry>Y'<subscript>12low</subscript></entry>
- <entry>Y'<subscript>12high</subscript></entry>
- <entry>Y'<subscript>13low</subscript></entry>
- <entry>Y'<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20low</subscript></entry>
- <entry>Y'<subscript>20high</subscript></entry>
- <entry>Y'<subscript>21low</subscript></entry>
- <entry>Y'<subscript>21high</subscript></entry>
- <entry>Y'<subscript>22low</subscript></entry>
- <entry>Y'<subscript>22high</subscript></entry>
- <entry>Y'<subscript>23low</subscript></entry>
- <entry>Y'<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30low</subscript></entry>
- <entry>Y'<subscript>30high</subscript></entry>
- <entry>Y'<subscript>31low</subscript></entry>
- <entry>Y'<subscript>31high</subscript></entry>
- <entry>Y'<subscript>32low</subscript></entry>
- <entry>Y'<subscript>32high</subscript></entry>
- <entry>Y'<subscript>33low</subscript></entry>
- <entry>Y'<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y41p.xml b/Documentation/DocBook/media/v4l/pixfmt-y41p.xml
deleted file mode 100644
index 98dcb91d2917..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y41p.xml
+++ /dev/null
@@ -1,149 +0,0 @@
- <refentry id="V4L2-PIX-FMT-Y41P">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y41P ('Y41P')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y41P</constant></refname>
- <refpurpose>Format with &frac14; horizontal chroma
-resolution, also known as YUV 4:1:1</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>In this format each 12 bytes is eight pixels. In the
-twelve bytes are two CbCr pairs and eight Y's. The first CbCr pair
-goes with the first four Y's, and the second CbCr pair goes with the
-other four Y's. The Cb and Cr components have one fourth the
-horizontal resolution of the Y component.</para>
-
- <para>Do not confuse this format with <link
-linkend="V4L2-PIX-FMT-YUV411P"><constant>V4L2_PIX_FMT_YUV411P</constant></link>.
-Y41P is derived from "YUV 4:1:1 <emphasis>packed</emphasis>", while
-YUV411P stands for "YUV 4:1:1 <emphasis>planar</emphasis>".</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y41P</constant> 8 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="13" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- <entry>Y'<subscript>04</subscript></entry>
- <entry>Y'<subscript>05</subscript></entry>
- <entry>Y'<subscript>06</subscript></entry>
- <entry>Y'<subscript>07</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- <entry>Y'<subscript>14</subscript></entry>
- <entry>Y'<subscript>15</subscript></entry>
- <entry>Y'<subscript>16</subscript></entry>
- <entry>Y'<subscript>17</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- <entry>Y'<subscript>24</subscript></entry>
- <entry>Y'<subscript>25</subscript></entry>
- <entry>Y'<subscript>26</subscript></entry>
- <entry>Y'<subscript>27</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;36:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- <entry>Y'<subscript>34</subscript></entry>
- <entry>Y'<subscript>35</subscript></entry>
- <entry>Y'<subscript>36</subscript></entry>
- <entry>Y'<subscript>37</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable></para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="15" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry><entry></entry>
- <entry>4</entry><entry></entry><entry>5</entry><entry></entry>
- <entry>6</entry><entry></entry><entry>7</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-y8i.xml b/Documentation/DocBook/media/v4l/pixfmt-y8i.xml
deleted file mode 100644
index 99f389d4c6c8..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-y8i.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Y8I">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Y8I ('Y8I ')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Y8I</constant></refname>
- <refpurpose>Interleaved grey-scale image, e.g. from a stereo-pair</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a grey-scale image with a depth of 8 bits per pixel, but with
-pixels from 2 sources interleaved. Each pixel is stored in a 16-bit word. E.g.
-the R200 RealSense camera stores pixel from the left sensor in lower and from
-the right sensor in the higher 8 bits.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Y8I</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00left</subscript></entry>
- <entry>Y'<subscript>00right</subscript></entry>
- <entry>Y'<subscript>01left</subscript></entry>
- <entry>Y'<subscript>01right</subscript></entry>
- <entry>Y'<subscript>02left</subscript></entry>
- <entry>Y'<subscript>02right</subscript></entry>
- <entry>Y'<subscript>03left</subscript></entry>
- <entry>Y'<subscript>03right</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10left</subscript></entry>
- <entry>Y'<subscript>10right</subscript></entry>
- <entry>Y'<subscript>11left</subscript></entry>
- <entry>Y'<subscript>11right</subscript></entry>
- <entry>Y'<subscript>12left</subscript></entry>
- <entry>Y'<subscript>12right</subscript></entry>
- <entry>Y'<subscript>13left</subscript></entry>
- <entry>Y'<subscript>13right</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20left</subscript></entry>
- <entry>Y'<subscript>20right</subscript></entry>
- <entry>Y'<subscript>21left</subscript></entry>
- <entry>Y'<subscript>21right</subscript></entry>
- <entry>Y'<subscript>22left</subscript></entry>
- <entry>Y'<subscript>22right</subscript></entry>
- <entry>Y'<subscript>23left</subscript></entry>
- <entry>Y'<subscript>23right</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30left</subscript></entry>
- <entry>Y'<subscript>30right</subscript></entry>
- <entry>Y'<subscript>31left</subscript></entry>
- <entry>Y'<subscript>31right</subscript></entry>
- <entry>Y'<subscript>32left</subscript></entry>
- <entry>Y'<subscript>32right</subscript></entry>
- <entry>Y'<subscript>33left</subscript></entry>
- <entry>Y'<subscript>33right</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv410.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv410.xml
deleted file mode 100644
index 0869dce5f92c..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv410.xml
+++ /dev/null
@@ -1,133 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YVU410 ('YVU9'), V4L2_PIX_FMT_YUV410 ('YUV9')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-YVU410"><constant>V4L2_PIX_FMT_YVU410</constant></refname>
- <refname id="V4L2-PIX-FMT-YUV410"><constant>V4L2_PIX_FMT_YUV410</constant></refname>
- <refpurpose>Planar formats with &frac14; horizontal and
-vertical chroma resolution, also known as YUV 4:1:0</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These are planar formats, as opposed to a packed format.
-The three components are separated into three sub-images or planes.
-The Y plane is first. The Y plane has one byte per pixel. For
-<constant>V4L2_PIX_FMT_YVU410</constant>, the Cr plane immediately
-follows the Y plane in memory. The Cr plane is &frac14; the width and
-&frac14; the height of the Y plane (and of the image). Each Cr belongs
-to 16 pixels, a four-by-four square of the image. Following the Cr
-plane is the Cb plane, just like the Cr plane.
-<constant>V4L2_PIX_FMT_YUV410</constant> is the same, except the Cb
-plane comes first, then the Cr plane.</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have &frac14; as many pad bytes after their rows. In
-other words, four Cx rows (including padding) are exactly as long as
-one Y row (including padding).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YVU410</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;17:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry></entry><entry></entry><entry>C</entry>
- <entry></entry><entry></entry><entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv411p.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv411p.xml
deleted file mode 100644
index 086dc731bf02..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv411p.xml
+++ /dev/null
@@ -1,147 +0,0 @@
- <refentry id="V4L2-PIX-FMT-YUV411P">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUV411P ('411P')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_YUV411P</constant></refname>
- <refpurpose>Format with &frac14; horizontal chroma resolution,
-also known as YUV 4:1:1. Planar layout as opposed to
-<constant>V4L2_PIX_FMT_Y41P</constant></refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This format is not commonly used. This is a planar
-format similar to the 4:2:2 planar format except with half as many
-chroma. The three components are separated into three sub-images or
-planes. The Y plane is first. The Y plane has one byte per pixel. The
-Cb plane immediately follows the Y plane in memory. The Cb plane is
-&frac14; the width of the Y plane (and of the image). Each Cb belongs
-to 4 pixels all on the same row. For example,
-Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>, Y'<subscript>02</subscript> and
-Y'<subscript>03</subscript>. Following the Cb plane is the Cr plane,
-just like the Cb plane.</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have &frac14; as many pad bytes after their rows. In
-other words, four C x rows (including padding) is exactly as long as
-one Y row (including padding).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUV411P</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;17:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;18:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;19:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;21:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;22:</entry>
- <entry>Cr<subscript>20</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;23:</entry>
- <entry>Cr<subscript>30</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry>C</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420.xml
deleted file mode 100644
index 48649fac1596..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv420.xml
+++ /dev/null
@@ -1,149 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YVU420 ('YV12'), V4L2_PIX_FMT_YUV420 ('YU12')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-YVU420"><constant>V4L2_PIX_FMT_YVU420</constant></refname>
- <refname id="V4L2-PIX-FMT-YUV420"><constant>V4L2_PIX_FMT_YUV420</constant></refname>
- <refpurpose>Planar formats with &frac12; horizontal and
-vertical chroma resolution, also known as YUV 4:2:0</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>These are planar formats, as opposed to a packed format.
-The three components are separated into three sub- images or planes.
-The Y plane is first. The Y plane has one byte per pixel. For
-<constant>V4L2_PIX_FMT_YVU420</constant>, the Cr plane immediately
-follows the Y plane in memory. The Cr plane is half the width and half
-the height of the Y plane (and of the image). Each Cr belongs to four
-pixels, a two-by-two square of the image. For example,
-Cr<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
-Y'<subscript>11</subscript>. Following the Cr plane is the Cb plane,
-just like the Cr plane. <constant>V4L2_PIX_FMT_YUV420</constant> is
-the same except the Cb plane comes first, then the Cr plane.</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YVU420</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;18:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;22:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
deleted file mode 100644
index 7d13fe96657d..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
+++ /dev/null
@@ -1,162 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-YUV420M"><constant>V4L2_PIX_FMT_YUV420M</constant></refname>
- <refname id="V4L2-PIX-FMT-YVU420M"><constant>V4L2_PIX_FMT_YVU420M</constant></refname>
- <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant> and
- <constant>V4L2_PIX_FMT_YVU420</constant> with planes non contiguous
- in memory.</refpurpose>
- </refnamediv>
-
- <refsect1>
- <title>Description</title>
-
- <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub-images or planes.</para>
-
- <para>The Y plane is first. The Y plane has one byte per pixel.
-For <constant>V4L2_PIX_FMT_YUV420M</constant> the Cb data
-constitutes the second plane which is half the width and half
-the height of the Y plane (and of the image). Each Cb belongs to four
-pixels, a two-by-two square of the image. For example,
-Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
-Y'<subscript>11</subscript>. The Cr data, just like the Cb plane, is
-in the third plane.</para>
-
- <para><constant>V4L2_PIX_FMT_YVU420M</constant> is the same except
-the Cr data is stored in the second plane and the Cb data in the third plane.
-</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cb
-and Cr planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
- <para><constant>V4L2_PIX_FMT_YUV420M</constant> and
-<constant>V4L2_PIX_FMT_YVU420M</constant> are intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUV420M</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start0&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start1&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;2:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start2&nbsp;+&nbsp;0:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;2:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry><entry>C</entry><entry></entry><entry></entry>
- <entry></entry><entry>C</entry><entry></entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry></entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml
deleted file mode 100644
index dd502802cb75..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv422m.xml
+++ /dev/null
@@ -1,166 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUV422M ('YM16'), V4L2_PIX_FMT_YVU422M ('YM61')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-YUV422M"><constant>V4L2_PIX_FMT_YUV422M</constant></refname>
- <refname id="V4L2-PIX-FMT-YVU422M"><constant>V4L2_PIX_FMT_YVU422M</constant></refname>
- <refpurpose>Planar formats with &frac12; horizontal resolution, also
- known as YUV and YVU 4:2:2</refpurpose>
- </refnamediv>
-
- <refsect1>
- <title>Description</title>
-
- <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub-images or planes.</para>
-
- <para>The Y plane is first. The Y plane has one byte per pixel.
-For <constant>V4L2_PIX_FMT_YUV422M</constant> the Cb data
-constitutes the second plane which is half the width of the Y plane (and of the
-image). Each Cb belongs to two pixels. For example,
-Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>. The Cr data, just like the Cb plane, is
-in the third plane. </para>
-
- <para><constant>V4L2_PIX_FMT_YVU422M</constant> is the same except
-the Cr data is stored in the second plane and the Cb data in the third plane.
-</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cb
-and Cr planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
- <para><constant>V4L2_PIX_FMT_YUV422M</constant> and
-<constant>V4L2_PIX_FMT_YVU422M</constant> are intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUV422M</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start0&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start1&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;2:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;4:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;6:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start2&nbsp;+&nbsp;0:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;2:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;4:</entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;6:</entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv422p.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv422p.xml
deleted file mode 100644
index 4ce6463fe0a5..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv422p.xml
+++ /dev/null
@@ -1,153 +0,0 @@
- <refentry id="V4L2-PIX-FMT-YUV422P">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUV422P ('422P')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_YUV422P</constant></refname>
- <refpurpose>Format with &frac12; horizontal chroma resolution,
-also known as YUV 4:2:2. Planar layout as opposed to
-<constant>V4L2_PIX_FMT_YUYV</constant></refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This format is not commonly used. This is a planar
-version of the YUYV format. The three components are separated into
-three sub-images or planes. The Y plane is first. The Y plane has one
-byte per pixel. The Cb plane immediately follows the Y plane in
-memory. The Cb plane is half the width of the Y plane (and of the
-image). Each Cb belongs to two pixels. For example,
-Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>. Following the Cb plane is the Cr plane,
-just like the Cb plane.</para>
-
- <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUV422P</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;18:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;20:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;22:</entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;26:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;28:</entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;30:</entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml
deleted file mode 100644
index 1b7335940bc7..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuv444m.xml
+++ /dev/null
@@ -1,177 +0,0 @@
- <refentry>
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUV444M ('YM24'), V4L2_PIX_FMT_YVU444M ('YM42')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname id="V4L2-PIX-FMT-YUV444M"><constant>V4L2_PIX_FMT_YUV444M</constant></refname>
- <refname id="V4L2-PIX-FMT-YVU444M"><constant>V4L2_PIX_FMT_YVU444M</constant></refname>
- <refpurpose>Planar formats with full horizontal resolution, also
- known as YUV and YVU 4:4:4</refpurpose>
- </refnamediv>
-
- <refsect1>
- <title>Description</title>
-
- <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub-images or planes.</para>
-
- <para>The Y plane is first. The Y plane has one byte per pixel.
-For <constant>V4L2_PIX_FMT_YUV444M</constant> the Cb data
-constitutes the second plane which is the same width and height as the Y plane
-(and as the image). The Cr data, just like the Cb plane, is in the third plane.
-</para>
-
- <para><constant>V4L2_PIX_FMT_YVU444M</constant> is the same except
-the Cr data is stored in the second plane and the Cb data in the third plane.
-</para>
- <para>If the Y plane has pad bytes after each row, then the Cb
-and Cr planes have the same number of pad bytes after their rows.</para>
-
- <para><constant>V4L2_PIX_FMT_YUV444M</constant> and
-<constant>V4L2_PIX_FMT_YUV444M</constant> are intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUV444M</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="5" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start0&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;4:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start0&nbsp;+&nbsp;12:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start1&nbsp;+&nbsp;0:</entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Cb<subscript>02</subscript></entry>
- <entry>Cb<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;4:</entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Cb<subscript>12</subscript></entry>
- <entry>Cb<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;8:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Cb<subscript>22</subscript></entry>
- <entry>Cb<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start1&nbsp;+&nbsp;12:</entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Cb<subscript>32</subscript></entry>
- <entry>Cb<subscript>33</subscript></entry>
- </row>
- <row><entry></entry></row>
- <row>
- <entry>start2&nbsp;+&nbsp;0:</entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Cr<subscript>02</subscript></entry>
- <entry>Cr<subscript>03</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;4:</entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Cr<subscript>12</subscript></entry>
- <entry>Cr<subscript>13</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;8:</entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Cr<subscript>22</subscript></entry>
- <entry>Cr<subscript>23</subscript></entry>
- </row>
- <row>
- <entry>start2&nbsp;+&nbsp;12:</entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Cr<subscript>32</subscript></entry>
- <entry>Cr<subscript>33</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry>
- <entry>YC</entry><entry></entry><entry>YC</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry>
- <entry>YC</entry><entry></entry><entry>YC</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry>
- <entry>YC</entry><entry></entry><entry>YC</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>YC</entry><entry></entry><entry>YC</entry><entry></entry>
- <entry>YC</entry><entry></entry><entry>YC</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuyv.xml b/Documentation/DocBook/media/v4l/pixfmt-yuyv.xml
deleted file mode 100644
index 58384092251a..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yuyv.xml
+++ /dev/null
@@ -1,120 +0,0 @@
- <refentry id="V4L2-PIX-FMT-YUYV">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YUYV ('YUYV')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_YUYV</constant></refname>
- <refpurpose>Packed format with &frac12; horizontal chroma
-resolution, also known as YUV 4:2:2</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>In this format each four bytes is two pixels. Each four
-bytes is two Y's, a Cb and a Cr. Each Y goes to one of the pixels, and
-the Cb and Cr belong to both pixels. As you can see, the Cr and Cb
-components have half the horizontal resolution of the Y component.
-<constant>V4L2_PIX_FMT_YUYV </constant> is known in the Windows
-environment as YUY2.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YUYV</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yvyu.xml b/Documentation/DocBook/media/v4l/pixfmt-yvyu.xml
deleted file mode 100644
index bfffdc76d3da..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-yvyu.xml
+++ /dev/null
@@ -1,120 +0,0 @@
- <refentry id="V4L2-PIX-FMT-YVYU">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_YVYU ('YVYU')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_YVYU</constant></refname>
- <refpurpose>Variation of
-<constant>V4L2_PIX_FMT_YUYV</constant> with different order of samples
-in memory</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>In this format each four bytes is two pixels. Each four
-bytes is two Y's, a Cb and a Cr. Each Y goes to one of the pixels, and
-the Cb and Cr belong to both pixels. As you can see, the Cr and Cb
-components have half the horizontal resolution of the Y
-component.</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_YVYU</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Y'<subscript>00</subscript></entry>
- <entry>Cr<subscript>00</subscript></entry>
- <entry>Y'<subscript>01</subscript></entry>
- <entry>Cb<subscript>00</subscript></entry>
- <entry>Y'<subscript>02</subscript></entry>
- <entry>Cr<subscript>01</subscript></entry>
- <entry>Y'<subscript>03</subscript></entry>
- <entry>Cb<subscript>01</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Y'<subscript>10</subscript></entry>
- <entry>Cr<subscript>10</subscript></entry>
- <entry>Y'<subscript>11</subscript></entry>
- <entry>Cb<subscript>10</subscript></entry>
- <entry>Y'<subscript>12</subscript></entry>
- <entry>Cr<subscript>11</subscript></entry>
- <entry>Y'<subscript>13</subscript></entry>
- <entry>Cb<subscript>11</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Y'<subscript>20</subscript></entry>
- <entry>Cr<subscript>20</subscript></entry>
- <entry>Y'<subscript>21</subscript></entry>
- <entry>Cb<subscript>20</subscript></entry>
- <entry>Y'<subscript>22</subscript></entry>
- <entry>Cr<subscript>21</subscript></entry>
- <entry>Y'<subscript>23</subscript></entry>
- <entry>Cb<subscript>21</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Y'<subscript>30</subscript></entry>
- <entry>Cr<subscript>30</subscript></entry>
- <entry>Y'<subscript>31</subscript></entry>
- <entry>Cb<subscript>30</subscript></entry>
- <entry>Y'<subscript>32</subscript></entry>
- <entry>Cr<subscript>31</subscript></entry>
- <entry>Y'<subscript>33</subscript></entry>
- <entry>Cb<subscript>31</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
-
- <formalpara>
- <title>Color Sample Location.</title>
- <para>
- <informaltable frame="none">
- <tgroup cols="7" align="center">
- <tbody valign="top">
- <row>
- <entry></entry>
- <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
- <entry>2</entry><entry></entry><entry>3</entry>
- </row>
- <row>
- <entry>0</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>1</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>2</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- <row>
- <entry>3</entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry><entry></entry>
- <entry>Y</entry><entry>C</entry><entry>Y</entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
- </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt-z16.xml b/Documentation/DocBook/media/v4l/pixfmt-z16.xml
deleted file mode 100644
index 1d9cb1684bd3..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt-z16.xml
+++ /dev/null
@@ -1,81 +0,0 @@
-<refentry id="V4L2-PIX-FMT-Z16">
- <refmeta>
- <refentrytitle>V4L2_PIX_FMT_Z16 ('Z16 ')</refentrytitle>
- &manvol;
- </refmeta>
- <refnamediv>
- <refname><constant>V4L2_PIX_FMT_Z16</constant></refname>
- <refpurpose>16-bit depth data with distance values at each pixel</refpurpose>
- </refnamediv>
- <refsect1>
- <title>Description</title>
-
- <para>This is a 16-bit format, representing depth data. Each pixel is a
-distance to the respective point in the image coordinates. Distance unit can
-vary and has to be negotiated with the device separately. Each pixel is stored
-in a 16-bit word in the little endian byte order.
-</para>
-
- <example>
- <title><constant>V4L2_PIX_FMT_Z16</constant> 4 &times; 4
-pixel image</title>
-
- <formalpara>
- <title>Byte Order.</title>
- <para>Each cell is one byte.
- <informaltable frame="none">
- <tgroup cols="9" align="center">
- <colspec align="left" colwidth="2*" />
- <tbody valign="top">
- <row>
- <entry>start&nbsp;+&nbsp;0:</entry>
- <entry>Z<subscript>00low</subscript></entry>
- <entry>Z<subscript>00high</subscript></entry>
- <entry>Z<subscript>01low</subscript></entry>
- <entry>Z<subscript>01high</subscript></entry>
- <entry>Z<subscript>02low</subscript></entry>
- <entry>Z<subscript>02high</subscript></entry>
- <entry>Z<subscript>03low</subscript></entry>
- <entry>Z<subscript>03high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;8:</entry>
- <entry>Z<subscript>10low</subscript></entry>
- <entry>Z<subscript>10high</subscript></entry>
- <entry>Z<subscript>11low</subscript></entry>
- <entry>Z<subscript>11high</subscript></entry>
- <entry>Z<subscript>12low</subscript></entry>
- <entry>Z<subscript>12high</subscript></entry>
- <entry>Z<subscript>13low</subscript></entry>
- <entry>Z<subscript>13high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;16:</entry>
- <entry>Z<subscript>20low</subscript></entry>
- <entry>Z<subscript>20high</subscript></entry>
- <entry>Z<subscript>21low</subscript></entry>
- <entry>Z<subscript>21high</subscript></entry>
- <entry>Z<subscript>22low</subscript></entry>
- <entry>Z<subscript>22high</subscript></entry>
- <entry>Z<subscript>23low</subscript></entry>
- <entry>Z<subscript>23high</subscript></entry>
- </row>
- <row>
- <entry>start&nbsp;+&nbsp;24:</entry>
- <entry>Z<subscript>30low</subscript></entry>
- <entry>Z<subscript>30high</subscript></entry>
- <entry>Z<subscript>31low</subscript></entry>
- <entry>Z<subscript>31high</subscript></entry>
- <entry>Z<subscript>32low</subscript></entry>
- <entry>Z<subscript>32high</subscript></entry>
- <entry>Z<subscript>33low</subscript></entry>
- <entry>Z<subscript>33high</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </informaltable>
- </para>
- </formalpara>
- </example>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
deleted file mode 100644
index 5a08aeea4360..000000000000
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ /dev/null
@@ -1,2003 +0,0 @@
- <title>Image Formats</title>
-
- <para>The V4L2 API was primarily designed for devices exchanging
-image data with applications. The
-<structname>v4l2_pix_format</structname> and <structname>v4l2_pix_format_mplane
-</structname> structures define the format and layout of an image in memory.
-The former is used with the single-planar API, while the latter is used with the
-multi-planar version (see <xref linkend="planar-apis"/>). Image formats are
-negotiated with the &VIDIOC-S-FMT; ioctl. (The explanations here focus on video
-capturing and output, for overlay frame buffer formats see also
-&VIDIOC-G-FBUF;.)</para>
-
-<section>
- <title>Single-planar format structure</title>
- <table pgwide="1" frame="none" id="v4l2-pix-format">
- <title>struct <structname>v4l2_pix_format</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Image width in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Image height in pixels. If <structfield>field</structfield> is
- one of <constant>V4L2_FIELD_TOP</constant>, <constant>V4L2_FIELD_BOTTOM</constant>
- or <constant>V4L2_FIELD_ALTERNATE</constant> then height refers to the
- number of lines in the field, otherwise it refers to the number of
- lines in the frame (which is twice the field height for interlaced
- formats).</entry>
- </row>
- <row>
- <entry spanname="hspan">Applications set these fields to
-request an image size, drivers return the closest possible values. In
-case of planar formats the <structfield>width</structfield> and
-<structfield>height</structfield> applies to the largest plane. To
-avoid ambiguities drivers must return values rounded up to a multiple
-of the scale factor of any smaller planes. For example when the image
-format is YUV 4:2:0, <structfield>width</structfield> and
-<structfield>height</structfield> must be multiples of two.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixelformat</structfield></entry>
- <entry>The pixel format or type of compression, set by the
-application. This is a little endian <link
-linkend="v4l2-fourcc">four character code</link>. V4L2 defines
-standard RGB formats in <xref linkend="rgb-formats" />, YUV formats in <xref
-linkend="yuv-formats" />, and reserved codes in <xref
-linkend="reserved-formats" /></entry>
- </row>
- <row>
- <entry>&v4l2-field;</entry>
- <entry><structfield>field</structfield></entry>
- <entry>Video images are typically interlaced. Applications
-can request to capture or output only the top or bottom field, or both
-fields interlaced or sequentially stored in one buffer or alternating
-in separate buffers. Drivers return the actual field order selected.
-For more details on fields see <xref linkend="field-order" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>bytesperline</structfield></entry>
- <entry>Distance in bytes between the leftmost pixels in two
-adjacent lines.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>Both applications and drivers
-can set this field to request padding bytes at the end of each line.
-Drivers however may ignore the value requested by the application,
-returning <structfield>width</structfield> times bytes per pixel or a
-larger value required by the hardware. That implies applications can
-just set this field to zero to get a reasonable
-default.</para><para>Video hardware may access padding bytes,
-therefore they must reside in accessible memory. Consider cases where
-padding bytes after the last line of an image cross a system page
-boundary. Input devices may write padding bytes, the value is
-undefined. Output devices ignore the contents of padding
-bytes.</para><para>When the image format is planar the
-<structfield>bytesperline</structfield> value applies to the first
-plane and is divided by the same factor as the
-<structfield>width</structfield> field for the other planes. For
-example the Cb and Cr planes of a YUV 4:2:0 image have half as many
-padding bytes following each line as the Y plane. To avoid ambiguities
-drivers must return a <structfield>bytesperline</structfield> value
-rounded up to a multiple of the scale factor.</para>
-<para>For compressed formats the <structfield>bytesperline</structfield>
-value makes no sense. Applications and drivers must set this to 0 in
-that case.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sizeimage</structfield></entry>
- <entry>Size in bytes of the buffer to hold a complete image,
-set by the driver. Usually this is
-<structfield>bytesperline</structfield> times
-<structfield>height</structfield>. When the image consists of variable
-length compressed data this is the maximum number of bytes required to
-hold an image.</entry>
- </row>
- <row>
- <entry>&v4l2-colorspace;</entry>
- <entry><structfield>colorspace</structfield></entry>
- <entry>This information supplements the
-<structfield>pixelformat</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>priv</structfield></entry>
- <entry><para>This field indicates whether the remaining fields of the
-<structname>v4l2_pix_format</structname> structure, also called the extended
-fields, are valid. When set to <constant>V4L2_PIX_FMT_PRIV_MAGIC</constant>, it
-indicates that the extended fields have been correctly initialized. When set to
-any other value it indicates that the extended fields contain undefined values.
-</para>
-<para>Applications that wish to use the pixel format extended fields must first
-ensure that the feature is supported by querying the device for the
-<link linkend="querycap"><constant>V4L2_CAP_EXT_PIX_FORMAT</constant></link>
-capability. If the capability isn't set the pixel format extended fields are not
-supported and using the extended fields will lead to undefined results.</para>
-<para>To use the extended fields, applications must set the
-<structfield>priv</structfield> field to
-<constant>V4L2_PIX_FMT_PRIV_MAGIC</constant>, initialize all the extended fields
-and zero the unused bytes of the <structname>v4l2_format</structname>
-<structfield>raw_data</structfield> field.</para>
-<para>When the <structfield>priv</structfield> field isn't set to
-<constant>V4L2_PIX_FMT_PRIV_MAGIC</constant> drivers must act as if all the
-extended fields were set to zero. On return drivers must set the
-<structfield>priv</structfield> field to
-<constant>V4L2_PIX_FMT_PRIV_MAGIC</constant> and all the extended fields to
-applicable values.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags set by the application or driver, see <xref
-linkend="format-flags" />.</entry>
- </row>
- <row>
- <entry>&v4l2-ycbcr-encoding;</entry>
- <entry><structfield>ycbcr_enc</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-quantization;</entry>
- <entry><structfield>quantization</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-xfer-func;</entry>
- <entry><structfield>xfer_func</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-</section>
-
-<section>
- <title>Multi-planar format structures</title>
- <para>The <structname>v4l2_plane_pix_format</structname> structures define
- size and layout for each of the planes in a multi-planar format.
- The <structname>v4l2_pix_format_mplane</structname> structure contains
- information common to all planes (such as image width and height) and
- an array of <structname>v4l2_plane_pix_format</structname> structures,
- describing all planes of that format.</para>
- <table pgwide="1" frame="none" id="v4l2-plane-pix-format">
- <title>struct <structname>v4l2_plane_pix_format</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>sizeimage</structfield></entry>
- <entry>Maximum size in bytes required for image data in this plane.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>bytesperline</structfield></entry>
- <entry>Distance in bytes between the leftmost pixels in two adjacent
- lines. See &v4l2-pix-format;.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>reserved[6]</structfield></entry>
- <entry>Reserved for future extensions. Should be zeroed by drivers and
- applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table pgwide="1" frame="none" id="v4l2-pix-format-mplane">
- <title>struct <structname>v4l2_pix_format_mplane</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Image width in pixels. See &v4l2-pix-format;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Image height in pixels. See &v4l2-pix-format;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixelformat</structfield></entry>
- <entry>The pixel format. Both single- and multi-planar four character
-codes can be used.</entry>
- </row>
- <row>
- <entry>&v4l2-field;</entry>
- <entry><structfield>field</structfield></entry>
- <entry>See &v4l2-pix-format;.</entry>
- </row>
- <row>
- <entry>&v4l2-colorspace;</entry>
- <entry><structfield>colorspace</structfield></entry>
- <entry>See &v4l2-pix-format;.</entry>
- </row>
- <row>
- <entry>&v4l2-plane-pix-format;</entry>
- <entry><structfield>plane_fmt[VIDEO_MAX_PLANES]</structfield></entry>
- <entry>An array of structures describing format of each plane this
- pixel format consists of. The number of valid entries in this array
- has to be put in the <structfield>num_planes</structfield>
- field.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>num_planes</structfield></entry>
- <entry>Number of planes (i.e. separate memory buffers) for this format
- and the number of valid entries in the
- <structfield>plane_fmt</structfield> array.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags set by the application or driver, see <xref
-linkend="format-flags" />.</entry>
- </row>
- <row>
- <entry>&v4l2-ycbcr-encoding;</entry>
- <entry><structfield>ycbcr_enc</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-quantization;</entry>
- <entry><structfield>quantization</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-xfer-func;</entry>
- <entry><structfield>xfer_func</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>reserved[7]</structfield></entry>
- <entry>Reserved for future extensions. Should be zeroed by drivers
- and applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-</section>
-
- <section>
- <title>Standard Image Formats</title>
-
- <para>In order to exchange images between drivers and
-applications, it is necessary to have standard image data formats
-which both sides will interpret the same way. V4L2 includes several
-such formats, and this section is intended to be an unambiguous
-specification of the standard image data formats in V4L2.</para>
-
- <para>V4L2 drivers are not limited to these formats, however.
-Driver-specific formats are possible. In that case the application may
-depend on a codec to convert images to one of the standard formats
-when needed. But the data can still be stored and retrieved in the
-proprietary format. For example, a device may support a proprietary
-compressed format. Applications can still capture and save the data in
-the compressed format, saving much disk space, and later use a codec
-to convert the images to the X Windows screen format when the video is
-to be displayed.</para>
-
- <para>Even so, ultimately, some standard formats are needed, so
-the V4L2 specification would not be complete without well-defined
-standard formats.</para>
-
- <para>The V4L2 standard formats are mainly uncompressed formats. The
-pixels are always arranged in memory from left to right, and from top
-to bottom. The first byte of data in the image buffer is always for
-the leftmost pixel of the topmost row. Following that is the pixel
-immediately to its right, and so on until the end of the top row of
-pixels. Following the rightmost pixel of the row there may be zero or
-more bytes of padding to guarantee that each row of pixel data has a
-certain alignment. Following the pad bytes, if any, is data for the
-leftmost pixel of the second row from the top, and so on. The last row
-has just as many pad bytes after it as the other rows.</para>
-
- <para>In V4L2 each format has an identifier which looks like
-<constant>PIX_FMT_XXX</constant>, defined in the <link
-linkend="videodev">videodev2.h</link> header file. These identifiers
-represent <link linkend="v4l2-fourcc">four character (FourCC) codes</link>
-which are also listed below, however they are not the same as those
-used in the Windows world.</para>
-
- <para>For some formats, data is stored in separate, discontiguous
-memory buffers. Those formats are identified by a separate set of FourCC codes
-and are referred to as "multi-planar formats". For example, a YUV422 frame is
-normally stored in one memory buffer, but it can also be placed in two or three
-separate buffers, with Y component in one buffer and CbCr components in another
-in the 2-planar version or with each component in its own buffer in the
-3-planar case. Those sub-buffers are referred to as "planes".</para>
- </section>
-
- <section id="colorspaces">
- <title>Colorspaces</title>
-
- <para>'Color' is a very complex concept and depends on physics, chemistry and
-biology. Just because you have three numbers that describe the 'red', 'green'
-and 'blue' components of the color of a pixel does not mean that you can accurately
-display that color. A colorspace defines what it actually <emphasis>means</emphasis>
-to have an RGB value of e.g. (255,&nbsp;0,&nbsp;0). That is, which color should be
-reproduced on the screen in a perfectly calibrated environment.</para>
-
- <para>In order to do that we first need to have a good definition of
-color, i.e. some way to uniquely and unambiguously define a color so that someone
-else can reproduce it. Human color vision is trichromatic since the human eye has
-color receptors that are sensitive to three different wavelengths of light. Hence
-the need to use three numbers to describe color. Be glad you are not a mantis shrimp
-as those are sensitive to 12 different wavelengths, so instead of RGB we would be
-using the ABCDEFGHIJKL colorspace...</para>
-
- <para>Color exists only in the eye and brain and is the result of how strongly
-color receptors are stimulated. This is based on the Spectral
-Power Distribution (SPD) which is a graph showing the intensity (radiant power)
-of the light at wavelengths covering the visible spectrum as it enters the eye.
-The science of colorimetry is about the relationship between the SPD and color as
-perceived by the human brain.</para>
-
- <para>Since the human eye has only three color receptors it is perfectly
-possible that different SPDs will result in the same stimulation of those receptors
-and are perceived as the same color, even though the SPD of the light is
-different.</para>
-
- <para>In the 1920s experiments were devised to determine the relationship
-between SPDs and the perceived color and that resulted in the CIE 1931 standard
-that defines spectral weighting functions that model the perception of color.
-Specifically that standard defines functions that can take an SPD and calculate
-the stimulus for each color receptor. After some further mathematical transforms
-these stimuli are known as the <emphasis>CIE XYZ tristimulus</emphasis> values
-and these X, Y and Z values describe a color as perceived by a human unambiguously.
-These X, Y and Z values are all in the range [0&hellip;1].</para>
-
- <para>The Y value in the CIE XYZ colorspace corresponds to luminance. Often
-the CIE XYZ colorspace is transformed to the normalized CIE xyY colorspace:</para>
-
- <para>x = X / (X + Y + Z)</para>
- <para>y = Y / (X + Y + Z)</para>
-
- <para>The x and y values are the chromaticity coordinates and can be used to
-define a color without the luminance component Y. It is very confusing to
-have such similar names for these colorspaces. Just be aware that if colors
-are specified with lower case 'x' and 'y', then the CIE xyY colorspace is
-used. Upper case 'X' and 'Y' refer to the CIE XYZ colorspace. Also, y has nothing
-to do with luminance. Together x and y specify a color, and Y the luminance.
-That is really all you need to remember from a practical point of view. At
-the end of this section you will find reading resources that go into much more
-detail if you are interested.
-</para>
-
- <para>A monitor or TV will reproduce colors by emitting light at three
-different wavelengths, the combination of which will stimulate the color receptors
-in the eye and thus cause the perception of color. Historically these wavelengths
-were defined by the red, green and blue phosphors used in the displays. These
-<emphasis>color primaries</emphasis> are part of what defines a colorspace.</para>
-
- <para>Different display devices will have different primaries and some
-primaries are more suitable for some display technologies than others. This has
-resulted in a variety of colorspaces that are used for different display
-technologies or uses. To define a colorspace you need to define the three
-color primaries (these are typically defined as x,&nbsp;y chromaticity coordinates
-from the CIE xyY colorspace) but also the white reference: that is the color obtained
-when all three primaries are at maximum power. This determines the relative power
-or energy of the primaries. This is usually chosen to be close to daylight which has
-been defined as the CIE D65 Illuminant.</para>
-
- <para>To recapitulate: the CIE XYZ colorspace uniquely identifies colors.
-Other colorspaces are defined by three chromaticity coordinates defined in the
-CIE xyY colorspace. Based on those a 3x3 matrix can be constructed that
-transforms CIE XYZ colors to colors in the new colorspace.
-</para>
-
- <para>Both the CIE XYZ and the RGB colorspace that are derived from the
-specific chromaticity primaries are linear colorspaces. But neither the eye,
-nor display technology is linear. Doubling the values of all components in
-the linear colorspace will not be perceived as twice the intensity of the color.
-So each colorspace also defines a transfer function that takes a linear color
-component value and transforms it to the non-linear component value, which is a
-closer match to the non-linear performance of both the eye and displays. Linear
-component values are denoted RGB, non-linear are denoted as R'G'B'. In general
-colors used in graphics are all R'G'B', except in openGL which uses linear RGB.
-Special care should be taken when dealing with openGL to provide linear RGB colors
-or to use the built-in openGL support to apply the inverse transfer function.</para>
-
- <para>The final piece that defines a colorspace is a function that
-transforms non-linear R'G'B' to non-linear Y'CbCr. This function is determined
-by the so-called luma coefficients. There may be multiple possible Y'CbCr
-encodings allowed for the same colorspace. Many encodings of color
-prefer to use luma (Y') and chroma (CbCr) instead of R'G'B'. Since the human
-eye is more sensitive to differences in luminance than in color this encoding
-allows one to reduce the amount of color information compared to the luma
-data. Note that the luma (Y') is unrelated to the Y in the CIE XYZ colorspace.
-Also note that Y'CbCr is often called YCbCr or YUV even though these are
-strictly speaking wrong.</para>
-
- <para>Sometimes people confuse Y'CbCr as being a colorspace. This is not
-correct, it is just an encoding of an R'G'B' color into luma and chroma
-values. The underlying colorspace that is associated with the R'G'B' color
-is also associated with the Y'CbCr color.</para>
-
- <para>The final step is how the RGB, R'G'B' or Y'CbCr values are
-quantized. The CIE XYZ colorspace where X, Y and Z are in the range
-[0&hellip;1] describes all colors that humans can perceive, but the transform to
-another colorspace will produce colors that are outside the [0&hellip;1] range.
-Once clamped to the [0&hellip;1] range those colors can no longer be reproduced
-in that colorspace. This clamping is what reduces the extent or gamut of the
-colorspace. How the range of [0&hellip;1] is translated to integer values in the
-range of [0&hellip;255] (or higher, depending on the color depth) is called the
-quantization. This is <emphasis>not</emphasis> part of the colorspace
-definition. In practice RGB or R'G'B' values are full range, i.e. they
-use the full [0&hellip;255] range. Y'CbCr values on the other hand are limited
-range with Y' using [16&hellip;235] and Cb and Cr using [16&hellip;240].</para>
-
- <para>Unfortunately, in some cases limited range RGB is also used
-where the components use the range [16&hellip;235]. And full range Y'CbCr also exists
-using the [0&hellip;255] range.</para>
-
- <para>In order to correctly interpret a color you need to know the
-quantization range, whether it is R'G'B' or Y'CbCr, the used Y'CbCr encoding
-and the colorspace.
-From that information you can calculate the corresponding CIE XYZ color
-and map that again to whatever colorspace your display device uses.</para>
-
- <para>The colorspace definition itself consists of the three
-chromaticity primaries, the white reference chromaticity, a transfer
-function and the luma coefficients needed to transform R'G'B' to Y'CbCr. While
-some colorspace standards correctly define all four, quite often the colorspace
-standard only defines some, and you have to rely on other standards for
-the missing pieces. The fact that colorspaces are often a mix of different
-standards also led to very confusing naming conventions where the name of
-a standard was used to name a colorspace when in fact that standard was
-part of various other colorspaces as well.</para>
-
- <para>If you want to read more about colors and colorspaces, then the
-following resources are useful: <xref linkend="poynton" /> is a good practical
-book for video engineers, <xref linkend="colimg" /> has a much broader scope and
-describes many more aspects of color (physics, chemistry, biology, etc.).
-The <ulink url="http://www.brucelindbloom.com">http://www.brucelindbloom.com</ulink>
-website is an excellent resource, especially with respect to the mathematics behind
-colorspace conversions. The wikipedia <ulink url="http://en.wikipedia.org/wiki/CIE_1931_color_space#CIE_xy_chromaticity_diagram_and_the_CIE_xyY_color_space">CIE 1931 colorspace</ulink> article
-is also very useful.</para>
- </section>
-
- <section>
- <title>Defining Colorspaces in V4L2</title>
- <para>In V4L2 colorspaces are defined by four values. The first is the colorspace
-identifier (&v4l2-colorspace;) which defines the chromaticities, the default transfer
-function, the default Y'CbCr encoding and the default quantization method. The second
-is the transfer function identifier (&v4l2-xfer-func;) to specify non-standard
-transfer functions. The third is the Y'CbCr encoding identifier (&v4l2-ycbcr-encoding;)
-to specify non-standard Y'CbCr encodings and the fourth is the quantization identifier
-(&v4l2-quantization;) to specify non-standard quantization methods. Most of the time
-only the colorspace field of &v4l2-pix-format; or &v4l2-pix-format-mplane; needs to
-be filled in. Note that the default R'G'B' quantization is full range for all
-colorspaces except for BT.2020 which uses limited range R'G'B' quantization.</para>
-
- <table pgwide="1" frame="none" id="v4l2-colorspace">
- <title>V4L2 Colorspaces</title>
- <tgroup cols="2" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_COLORSPACE_DEFAULT</constant></entry>
- <entry>The default colorspace. This can be used by applications to let the
- driver fill in the colorspace.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_SMPTE170M</constant></entry>
- <entry>See <xref linkend="col-smpte-170m" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_REC709</constant></entry>
- <entry>See <xref linkend="col-rec709" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_SRGB</constant></entry>
- <entry>See <xref linkend="col-srgb" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_ADOBERGB</constant></entry>
- <entry>See <xref linkend="col-adobergb" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_BT2020</constant></entry>
- <entry>See <xref linkend="col-bt2020" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_DCI_P3</constant></entry>
- <entry>See <xref linkend="col-dcip3" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_SMPTE240M</constant></entry>
- <entry>See <xref linkend="col-smpte-240m" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_470_SYSTEM_M</constant></entry>
- <entry>See <xref linkend="col-sysm" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant></entry>
- <entry>See <xref linkend="col-sysbg" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_JPEG</constant></entry>
- <entry>See <xref linkend="col-jpeg" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_COLORSPACE_RAW</constant></entry>
- <entry>The raw colorspace. This is used for raw image capture where
- the image is minimally processed and is using the internal colorspace
- of the device. The software that processes an image using this
- 'colorspace' will have to know the internals of the capture device.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-xfer-func">
- <title>V4L2 Transfer Function</title>
- <tgroup cols="2" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_XFER_FUNC_DEFAULT</constant></entry>
- <entry>Use the default transfer function as defined by the colorspace.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_709</constant></entry>
- <entry>Use the Rec. 709 transfer function.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_SRGB</constant></entry>
- <entry>Use the sRGB transfer function.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_ADOBERGB</constant></entry>
- <entry>Use the AdobeRGB transfer function.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_SMPTE240M</constant></entry>
- <entry>Use the SMPTE 240M transfer function.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_NONE</constant></entry>
- <entry>Do not use a transfer function (i.e. use linear RGB values).</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_DCI_P3</constant></entry>
- <entry>Use the DCI-P3 transfer function.</entry>
- </row>
- <row>
- <entry><constant>V4L2_XFER_FUNC_SMPTE2084</constant></entry>
- <entry>Use the SMPTE 2084 transfer function.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-ycbcr-encoding">
- <title>V4L2 Y'CbCr Encodings</title>
- <tgroup cols="2" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_YCBCR_ENC_DEFAULT</constant></entry>
- <entry>Use the default Y'CbCr encoding as defined by the colorspace.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_601</constant></entry>
- <entry>Use the BT.601 Y'CbCr encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_709</constant></entry>
- <entry>Use the Rec. 709 Y'CbCr encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_XV601</constant></entry>
- <entry>Use the extended gamut xvYCC BT.601 encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_XV709</constant></entry>
- <entry>Use the extended gamut xvYCC Rec. 709 encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_SYCC</constant></entry>
- <entry>Use the extended gamut sYCC encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_BT2020</constant></entry>
- <entry>Use the default non-constant luminance BT.2020 Y'CbCr encoding.</entry>
- </row>
- <row>
- <entry><constant>V4L2_YCBCR_ENC_BT2020_CONST_LUM</constant></entry>
- <entry>Use the constant luminance BT.2020 Yc'CbcCrc encoding.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-quantization">
- <title>V4L2 Quantization Methods</title>
- <tgroup cols="2" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_QUANTIZATION_DEFAULT</constant></entry>
- <entry>Use the default quantization encoding as defined by the colorspace.
-This is always full range for R'G'B' (except for the BT.2020 colorspace) and usually
-limited range for Y'CbCr.</entry>
- </row>
- <row>
- <entry><constant>V4L2_QUANTIZATION_FULL_RANGE</constant></entry>
- <entry>Use the full range quantization encoding. I.e. the range [0&hellip;1]
-is mapped to [0&hellip;255] (with possible clipping to [1&hellip;254] to avoid the
-0x00 and 0xff values). Cb and Cr are mapped from [-0.5&hellip;0.5] to [0&hellip;255]
-(with possible clipping to [1&hellip;254] to avoid the 0x00 and 0xff values).</entry>
- </row>
- <row>
- <entry><constant>V4L2_QUANTIZATION_LIM_RANGE</constant></entry>
- <entry>Use the limited range quantization encoding. I.e. the range [0&hellip;1]
-is mapped to [16&hellip;235]. Cb and Cr are mapped from [-0.5&hellip;0.5] to [16&hellip;240].
-</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>Detailed Colorspace Descriptions</title>
- <section id="col-smpte-170m">
- <title>Colorspace SMPTE 170M (<constant>V4L2_COLORSPACE_SMPTE170M</constant>)</title>
- <para>The <xref linkend="smpte170m" /> standard defines the colorspace used by NTSC and PAL and by SDTV
-in general. The default transfer function is <constant>V4L2_XFER_FUNC_709</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>.
-The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and
-the white reference are:</para>
- <table frame="none">
- <title>SMPTE 170M Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.630</entry>
- <entry>0.340</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.310</entry>
- <entry>0.595</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.155</entry>
- <entry>0.070</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>The red, green and blue chromaticities are also often referred to
-as the SMPTE C set, so this colorspace is sometimes called SMPTE C as well.</para>
- <variablelist>
- <varlistentry>
- <term>The transfer function defined for SMPTE 170M is the same as the
-one defined in Rec. 709.</term>
- <listitem>
- <para>L' = -1.099(-L)<superscript>0.45</superscript>&nbsp;+&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&le;&nbsp;-0.018</para>
- <para>L' = 4.5L&nbsp;for&nbsp;-0.018&nbsp;&lt;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
- <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&ge;&nbsp;0.018</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = -((L'&nbsp;-&nbsp;0.099)&nbsp;/&nbsp;-1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&le;&nbsp;-0.081</para>
- <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;-0.081&nbsp;&lt;&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
- <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with
-the following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
- <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5]. This conversion to Y'CbCr is identical to the one
-defined in the <xref linkend="itu601" /> standard and this colorspace is sometimes called BT.601 as well, even
-though BT.601 does not mention any color primaries.</para>
- <para>The default quantization is limited range, but full range is possible although
-rarely seen.</para>
- </section>
-
- <section id="col-rec709">
- <title>Colorspace Rec. 709 (<constant>V4L2_COLORSPACE_REC709</constant>)</title>
- <para>The <xref linkend="itu709" /> standard defines the colorspace used by HDTV in general.
-The default transfer function is <constant>V4L2_XFER_FUNC_709</constant>. The default
-Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_709</constant>. The default Y'CbCr quantization is
-limited range. The chromaticities of the primary colors and the white reference are:</para>
- <table frame="none">
- <title>Rec. 709 Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.640</entry>
- <entry>0.330</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.300</entry>
- <entry>0.600</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.150</entry>
- <entry>0.060</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>The full name of this standard is Rec. ITU-R BT.709-5.</para>
- <variablelist>
- <varlistentry>
- <term>Transfer function. Normally L is in the range [0&hellip;1], but for the extended
-gamut xvYCC encoding values outside that range are allowed.</term>
- <listitem>
- <para>L' = -1.099(-L)<superscript>0.45</superscript>&nbsp;+&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&le;&nbsp;-0.018</para>
- <para>L' = 4.5L&nbsp;for&nbsp;-0.018&nbsp;&lt;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
- <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;L&nbsp;&ge;&nbsp;0.018</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = -((L'&nbsp;-&nbsp;0.099)&nbsp;/&nbsp;-1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&le;&nbsp;-0.081</para>
- <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;-0.081&nbsp;&lt;&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
- <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the following
-<constant>V4L2_YCBCR_ENC_709</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B'</para>
- <para>Cb&nbsp;=&nbsp;-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5].</para>
- <para>The default quantization is limited range, but full range is possible although
-rarely seen.</para>
- <para>The <constant>V4L2_YCBCR_ENC_709</constant> encoding described above is the default
-for this colorspace, but it can be overridden with <constant>V4L2_YCBCR_ENC_601</constant>, in which
-case the BT.601 Y'CbCr encoding is used.</para>
- <para>Two additional extended gamut Y'CbCr encodings are also possible with this colorspace:</para>
- <variablelist>
- <varlistentry>
- <term>The xvYCC 709 encoding (<constant>V4L2_YCBCR_ENC_XV709</constant>, <xref linkend="xvycc" />)
-is similar to the Rec. 709 encoding, but it allows for R', G' and B' values that are outside the range
-[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.2126R'&nbsp;+&nbsp;0.7152G'&nbsp;+&nbsp;0.0722B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;256)</para>
- <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(-0.1146R'&nbsp;-&nbsp;0.3854G'&nbsp;+&nbsp;0.5B')</para>
- <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.4542G'&nbsp;-&nbsp;0.0458B')</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The xvYCC 601 encoding (<constant>V4L2_YCBCR_ENC_XV601</constant>, <xref linkend="xvycc" />) is similar
-to the BT.601 encoding, but it allows for R', G' and B' values that are outside the range
-[0&hellip;1]. The resulting Y', Cb and Cr values are scaled and offset:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;(219&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B')&nbsp;+&nbsp;(16&nbsp;/&nbsp;256)</para>
- <para>Cb&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B')</para>
- <para>Cr&nbsp;=&nbsp;(224&nbsp;/&nbsp;256)&nbsp;*&nbsp;(0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B')</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
-to the range [-0.5&hellip;0.5]. The non-standard xvYCC 709 or xvYCC 601 encodings can be used by
-selecting <constant>V4L2_YCBCR_ENC_XV709</constant> or <constant>V4L2_YCBCR_ENC_XV601</constant>.
-The xvYCC encodings always use full range quantization.</para>
- </section>
-
- <section id="col-srgb">
- <title>Colorspace sRGB (<constant>V4L2_COLORSPACE_SRGB</constant>)</title>
- <para>The <xref linkend="srgb" /> standard defines the colorspace used by most webcams
-and computer graphics. The default transfer function is <constant>V4L2_XFER_FUNC_SRGB</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SYCC</constant>. The default Y'CbCr
-quantization is full range. The chromaticities of the primary colors and the white
-reference are:</para>
- <table frame="none">
- <title>sRGB Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.640</entry>
- <entry>0.330</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.300</entry>
- <entry>0.600</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.150</entry>
- <entry>0.060</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>These chromaticities are identical to the Rec. 709 colorspace.</para>
- <variablelist>
- <varlistentry>
- <term>Transfer function. Note that negative values for L are only used by the Y'CbCr conversion.</term>
- <listitem>
- <para>L' = -1.055(-L)<superscript>1/2.4</superscript>&nbsp;+&nbsp;0.055&nbsp;for&nbsp;L&nbsp;&lt;&nbsp;-0.0031308</para>
- <para>L' = 12.92L&nbsp;for&nbsp;-0.0031308&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;0.0031308</para>
- <para>L' = 1.055L<superscript>1/2.4</superscript>&nbsp;-&nbsp;0.055&nbsp;for&nbsp;0.0031308&nbsp;&lt;&nbsp;L&nbsp;&le;&nbsp;1</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = -((-L'&nbsp;+&nbsp;0.055)&nbsp;/&nbsp;1.055)<superscript>2.4</superscript>&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;-0.04045</para>
- <para>L = L'&nbsp;/&nbsp;12.92&nbsp;for&nbsp;-0.04045&nbsp;&le;&nbsp;L'&nbsp;&le;&nbsp;0.04045</para>
- <para>L = ((L'&nbsp;+&nbsp;0.055)&nbsp;/&nbsp;1.055)<superscript>2.4</superscript>&nbsp;for&nbsp;L'&nbsp;&gt;&nbsp;0.04045</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the following
-<constant>V4L2_YCBCR_ENC_SYCC</constant> encoding as defined by <xref linkend="sycc" />:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.2990R'&nbsp;+&nbsp;0.5870G'&nbsp;+&nbsp;0.1140B'</para>
- <para>Cb&nbsp;=&nbsp;-0.1687R'&nbsp;-&nbsp;0.3313G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4187G'&nbsp;-&nbsp;0.0813B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are clamped
-to the range [-0.5&hellip;0.5]. The <constant>V4L2_YCBCR_ENC_SYCC</constant> quantization is always
-full range. Although this Y'CbCr encoding looks very similar to the <constant>V4L2_YCBCR_ENC_XV601</constant>
-encoding, it is not. The <constant>V4L2_YCBCR_ENC_XV601</constant> scales and offsets the Y'CbCr
-values before quantization, but this encoding does not do that.</para>
- </section>
-
- <section id="col-adobergb">
- <title>Colorspace Adobe RGB (<constant>V4L2_COLORSPACE_ADOBERGB</constant>)</title>
- <para>The <xref linkend="adobergb" /> standard defines the colorspace used by computer graphics
-that use the AdobeRGB colorspace. This is also known as the <xref linkend="oprgb" /> standard.
-The default transfer function is <constant>V4L2_XFER_FUNC_ADOBERGB</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>. The default Y'CbCr
-quantization is limited range. The chromaticities of the primary colors and the white reference
-are:</para>
- <table frame="none">
- <title>Adobe RGB Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.6400</entry>
- <entry>0.3300</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.2100</entry>
- <entry>0.7100</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.1500</entry>
- <entry>0.0600</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <variablelist>
- <varlistentry>
- <term>Transfer function:</term>
- <listitem>
- <para>L' = L<superscript>1/2.19921875</superscript></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'<superscript>2.19921875</superscript></para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
-following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
- <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5]. This transform is identical to one defined in
-SMPTE 170M/BT.601. The Y'CbCr quantization is limited range.</para>
- </section>
-
- <section id="col-bt2020">
- <title>Colorspace BT.2020 (<constant>V4L2_COLORSPACE_BT2020</constant>)</title>
- <para>The <xref linkend="itu2020" /> standard defines the colorspace used by Ultra-high definition
-television (UHDTV). The default transfer function is <constant>V4L2_XFER_FUNC_709</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_BT2020</constant>.
-The default R'G'B' quantization is limited range (!), and so is the default Y'CbCr quantization.
-The chromaticities of the primary colors and the white reference are:</para>
- <table frame="none">
- <title>BT.2020 Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.708</entry>
- <entry>0.292</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.170</entry>
- <entry>0.797</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.131</entry>
- <entry>0.046</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <variablelist>
- <varlistentry>
- <term>Transfer function (same as Rec. 709):</term>
- <listitem>
- <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
- <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
- <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
-following <constant>V4L2_YCBCR_ENC_BT2020</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.2627R'&nbsp;+&nbsp;0.6780G'&nbsp;+&nbsp;0.0593B'</para>
- <para>Cb&nbsp;=&nbsp;-0.1396R'&nbsp;-&nbsp;0.3604G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4598G'&nbsp;-&nbsp;0.0402B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.</para>
- <para>There is also an alternate constant luminance R'G'B' to Yc'CbcCrc
-(<constant>V4L2_YCBCR_ENC_BT2020_CONST_LUM</constant>) encoding:</para>
- <variablelist>
- <varlistentry>
- <term>Luma:</term>
- <listitem>
- <para>Yc'&nbsp;=&nbsp;(0.2627R&nbsp;+&nbsp;0.6780G&nbsp;+&nbsp;0.0593B)'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>B'&nbsp;-&nbsp;Yc'&nbsp;&le;&nbsp;0:</term>
- <listitem>
- <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Yc')&nbsp;/&nbsp;1.9404</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>B'&nbsp;-&nbsp;Yc'&nbsp;&gt;&nbsp;0:</term>
- <listitem>
- <para>Cbc&nbsp;=&nbsp;(B'&nbsp;-&nbsp;Yc')&nbsp;/&nbsp;1.5816</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>R'&nbsp;-&nbsp;Yc'&nbsp;&le;&nbsp;0:</term>
- <listitem>
- <para>Crc&nbsp;=&nbsp;(R'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;1.7184</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>R'&nbsp;-&nbsp;Yc'&nbsp;&gt;&nbsp;0:</term>
- <listitem>
- <para>Crc&nbsp;=&nbsp;(R'&nbsp;-&nbsp;Y')&nbsp;/&nbsp;0.9936</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Yc' is clamped to the range [0&hellip;1] and Cbc and Crc are
-clamped to the range [-0.5&hellip;0.5]. The Yc'CbcCrc quantization is limited range.</para>
- </section>
-
- <section id="col-dcip3">
- <title>Colorspace DCI-P3 (<constant>V4L2_COLORSPACE_DCI_P3</constant>)</title>
- <para>The <xref linkend="smpte431" /> standard defines the colorspace used by cinema
-projectors that use the DCI-P3 colorspace.
-The default transfer function is <constant>V4L2_XFER_FUNC_DCI_P3</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_709</constant>. Note that this
-colorspace does not specify a Y'CbCr encoding since it is not meant to be encoded
-to Y'CbCr. So this default Y'CbCr encoding was picked because it is the HDTV
-encoding. The default Y'CbCr quantization is limited range. The chromaticities of
-the primary colors and the white reference are:</para>
- <table frame="none">
- <title>DCI-P3 Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.6800</entry>
- <entry>0.3200</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.2650</entry>
- <entry>0.6900</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.1500</entry>
- <entry>0.0600</entry>
- </row>
- <row>
- <entry>White Reference</entry>
- <entry>0.3140</entry>
- <entry>0.3510</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <variablelist>
- <varlistentry>
- <term>Transfer function:</term>
- <listitem>
- <para>L' = L<superscript>1/2.6</superscript></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'<superscript>2.6</superscript></para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y'CbCr encoding is not specified. V4L2 defaults to Rec. 709.</para>
- </section>
-
- <section id="col-smpte-240m">
- <title>Colorspace SMPTE 240M (<constant>V4L2_COLORSPACE_SMPTE240M</constant>)</title>
- <para>The <xref linkend="smpte240m" /> standard was an interim standard used during
-the early days of HDTV (1988-1998). It has been superseded by Rec. 709.
-The default transfer function is <constant>V4L2_XFER_FUNC_SMPTE240M</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_SMPTE240M</constant>.
-The default Y'CbCr quantization is limited range. The chromaticities of the primary colors and the
-white reference are:</para>
- <table frame="none">
- <title>SMPTE 240M Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.630</entry>
- <entry>0.340</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.310</entry>
- <entry>0.595</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.155</entry>
- <entry>0.070</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>These chromaticities are identical to the SMPTE 170M colorspace.</para>
- <variablelist>
- <varlistentry>
- <term>Transfer function:</term>
- <listitem>
- <para>L' = 4L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.0228</para>
- <para>L' = 1.1115L<superscript>0.45</superscript>&nbsp;-&nbsp;0.1115&nbsp;for&nbsp;0.0228&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'&nbsp;/&nbsp;4&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L'&nbsp;&lt;&nbsp;0.0913</para>
- <para>L = ((L'&nbsp;+&nbsp;0.1115)&nbsp;/&nbsp;1.1115)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.0913</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
-following <constant>V4L2_YCBCR_ENC_SMPTE240M</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.2122R'&nbsp;+&nbsp;0.7013G'&nbsp;+&nbsp;0.0865B'</para>
- <para>Cb&nbsp;=&nbsp;-0.1161R'&nbsp;-&nbsp;0.3839G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.4451G'&nbsp;-&nbsp;0.0549B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Yc' is clamped to the range [0&hellip;1] and Cbc and Crc are
-clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.</para>
- </section>
-
- <section id="col-sysm">
- <title>Colorspace NTSC 1953 (<constant>V4L2_COLORSPACE_470_SYSTEM_M</constant>)</title>
- <para>This standard defines the colorspace used by NTSC in 1953. In practice this
-colorspace is obsolete and SMPTE 170M should be used instead.
-The default transfer function is <constant>V4L2_XFER_FUNC_709</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>.
-The default Y'CbCr quantization is limited range.
-The chromaticities of the primary colors and the white reference are:</para>
- <table frame="none">
- <title>NTSC 1953 Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.67</entry>
- <entry>0.33</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.21</entry>
- <entry>0.71</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.14</entry>
- <entry>0.08</entry>
- </row>
- <row>
- <entry>White Reference (C)</entry>
- <entry>0.310</entry>
- <entry>0.316</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <para>Note that this colorspace uses Illuminant C instead of D65 as the
-white reference. To correctly convert an image in this colorspace to another
-that uses D65 you need to apply a chromatic adaptation algorithm such as the
-Bradford method.</para>
- <variablelist>
- <varlistentry>
- <term>The transfer function was never properly defined for NTSC 1953. The
-Rec. 709 transfer function is recommended in the literature:</term>
- <listitem>
- <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
- <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
- <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
-following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
- <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.
-This transform is identical to one defined in SMPTE 170M/BT.601.</para>
- </section>
-
- <section id="col-sysbg">
- <title>Colorspace EBU Tech. 3213 (<constant>V4L2_COLORSPACE_470_SYSTEM_BG</constant>)</title>
- <para>The <xref linkend="tech3213" /> standard defines the colorspace used by PAL/SECAM in 1975. In practice this
-colorspace is obsolete and SMPTE 170M should be used instead.
-The default transfer function is <constant>V4L2_XFER_FUNC_709</constant>.
-The default Y'CbCr encoding is <constant>V4L2_YCBCR_ENC_601</constant>.
-The default Y'CbCr quantization is limited range.
-The chromaticities of the primary colors and the white reference are:</para>
- <table frame="none">
- <title>EBU Tech. 3213 Chromaticities</title>
- <tgroup cols="3" align="left">
- &cs-str;
- <thead>
- <row>
- <entry>Color</entry>
- <entry>x</entry>
- <entry>y</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Red</entry>
- <entry>0.64</entry>
- <entry>0.33</entry>
- </row>
- <row>
- <entry>Green</entry>
- <entry>0.29</entry>
- <entry>0.60</entry>
- </row>
- <row>
- <entry>Blue</entry>
- <entry>0.15</entry>
- <entry>0.06</entry>
- </row>
- <row>
- <entry>White Reference (D65)</entry>
- <entry>0.3127</entry>
- <entry>0.3290</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <variablelist>
- <varlistentry>
- <term>The transfer function was never properly defined for this colorspace.
-The Rec. 709 transfer function is recommended in the literature:</term>
- <listitem>
- <para>L' = 4.5L&nbsp;for&nbsp;0&nbsp;&le;&nbsp;L&nbsp;&lt;&nbsp;0.018</para>
- <para>L' = 1.099L<superscript>0.45</superscript>&nbsp;-&nbsp;0.099&nbsp;for&nbsp;0.018&nbsp;&le;&nbsp;L&nbsp;&le;&nbsp;1</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = L'&nbsp;/&nbsp;4.5&nbsp;for&nbsp;L'&nbsp;&lt;&nbsp;0.081</para>
- <para>L = ((L'&nbsp;+&nbsp;0.099)&nbsp;/&nbsp;1.099)<superscript>1/0.45</superscript>&nbsp;for&nbsp;L'&nbsp;&ge;&nbsp;0.081</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>The luminance (Y') and color difference (Cb and Cr) are obtained with the
-following <constant>V4L2_YCBCR_ENC_601</constant> encoding:</term>
- <listitem>
- <para>Y'&nbsp;=&nbsp;0.299R'&nbsp;+&nbsp;0.587G'&nbsp;+&nbsp;0.114B'</para>
- <para>Cb&nbsp;=&nbsp;-0.169R'&nbsp;-&nbsp;0.331G'&nbsp;+&nbsp;0.5B'</para>
- <para>Cr&nbsp;=&nbsp;0.5R'&nbsp;-&nbsp;0.419G'&nbsp;-&nbsp;0.081B'</para>
- </listitem>
- </varlistentry>
- </variablelist>
- <para>Y' is clamped to the range [0&hellip;1] and Cb and Cr are
-clamped to the range [-0.5&hellip;0.5]. The Y'CbCr quantization is limited range.
-This transform is identical to one defined in SMPTE 170M/BT.601.</para>
- </section>
-
- <section id="col-jpeg">
- <title>Colorspace JPEG (<constant>V4L2_COLORSPACE_JPEG</constant>)</title>
- <para>This colorspace defines the colorspace used by most (Motion-)JPEG formats. The chromaticities
-of the primary colors and the white reference are identical to sRGB. The transfer
-function use is <constant>V4L2_XFER_FUNC_SRGB</constant>. The Y'CbCr encoding is
-<constant>V4L2_YCBCR_ENC_601</constant> with full range quantization where
-Y' is scaled to [0&hellip;255] and Cb/Cr are scaled to [-128&hellip;128] and
-then clipped to [-128&hellip;127].</para>
- <para>Note that the JPEG standard does not actually store colorspace information.
-So if something other than sRGB is used, then the driver will have to set that information
-explicitly. Effectively <constant>V4L2_COLORSPACE_JPEG</constant> can be considered to be
-an abbreviation for <constant>V4L2_COLORSPACE_SRGB</constant>, <constant>V4L2_YCBCR_ENC_601</constant>
-and <constant>V4L2_QUANTIZATION_FULL_RANGE</constant>.</para>
- </section>
-
- </section>
-
- <section>
- <title>Detailed Transfer Function Descriptions</title>
- <section id="xf-smpte-2084">
- <title>Transfer Function SMPTE 2084 (<constant>V4L2_XFER_FUNC_SMPTE2084</constant>)</title>
- <para>The <xref linkend="smpte2084" /> standard defines the transfer function used by
-High Dynamic Range content.</para>
- <variablelist>
- <varlistentry>
- <term>Constants:</term>
- <listitem>
- <para>m1 = (2610 / 4096) / 4</para>
- <para>m2 = (2523 / 4096) * 128</para>
- <para>c1 = 3424 / 4096</para>
- <para>c2 = (2413 / 4096) * 32</para>
- <para>c3 = (2392 / 4096) * 32</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>Transfer function:</term>
- <listitem>
- <para>L' = ((c1 + c2 * L<superscript>m1</superscript>) / (1 + c3 * L<superscript>m1</superscript>))<superscript>m2</superscript></para>
- </listitem>
- </varlistentry>
- </variablelist>
- <variablelist>
- <varlistentry>
- <term>Inverse Transfer function:</term>
- <listitem>
- <para>L = (max(L'<superscript>1/m2</superscript> - c1, 0) / (c2 - c3 * L'<superscript>1/m2</superscript>))<superscript>1/m1</superscript></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </section>
- </section>
-
- <section id="pixfmt-indexed">
- <title>Indexed Format</title>
-
- <para>In this format each pixel is represented by an 8 bit index
-into a 256 entry ARGB palette. It is intended for <link
-linkend="osd">Video Output Overlays</link> only. There are no ioctls to
-access the palette, this must be done with ioctls of the Linux framebuffer API.</para>
-
- <table pgwide="0" frame="none">
- <title>Indexed Image Format</title>
- <tgroup cols="37" align="center">
- <colspec colname="id" align="left" />
- <colspec colname="fourcc" />
- <colspec colname="bit" />
-
- <colspec colnum="4" colname="b07" align="center" />
- <colspec colnum="5" colname="b06" align="center" />
- <colspec colnum="6" colname="b05" align="center" />
- <colspec colnum="7" colname="b04" align="center" />
- <colspec colnum="8" colname="b03" align="center" />
- <colspec colnum="9" colname="b02" align="center" />
- <colspec colnum="10" colname="b01" align="center" />
- <colspec colnum="11" colname="b00" align="center" />
-
- <spanspec namest="b07" nameend="b00" spanname="b0" />
- <spanspec namest="b17" nameend="b10" spanname="b1" />
- <spanspec namest="b27" nameend="b20" spanname="b2" />
- <spanspec namest="b37" nameend="b30" spanname="b3" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>&nbsp;</entry>
- <entry spanname="b0">Byte&nbsp;0</entry>
- </row>
- <row>
- <entry>&nbsp;</entry>
- <entry>&nbsp;</entry>
- <entry>Bit</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="V4L2-PIX-FMT-PAL8">
- <entry><constant>V4L2_PIX_FMT_PAL8</constant></entry>
- <entry>'PAL8'</entry>
- <entry></entry>
- <entry>i<subscript>7</subscript></entry>
- <entry>i<subscript>6</subscript></entry>
- <entry>i<subscript>5</subscript></entry>
- <entry>i<subscript>4</subscript></entry>
- <entry>i<subscript>3</subscript></entry>
- <entry>i<subscript>2</subscript></entry>
- <entry>i<subscript>1</subscript></entry>
- <entry>i<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section id="pixfmt-rgb">
- <title>RGB Formats</title>
-
- &sub-packed-rgb;
- &sub-sbggr8;
- &sub-sgbrg8;
- &sub-sgrbg8;
- &sub-srggb8;
- &sub-sbggr16;
- &sub-srggb10;
- &sub-srggb10p;
- &sub-srggb10alaw8;
- &sub-srggb10dpcm8;
- &sub-srggb12;
- </section>
-
- <section id="yuv-formats">
- <title>YUV Formats</title>
-
- <para>YUV is the format native to TV broadcast and composite video
-signals. It separates the brightness information (Y) from the color
-information (U and V or Cb and Cr). The color information consists of
-red and blue <emphasis>color difference</emphasis> signals, this way
-the green component can be reconstructed by subtracting from the
-brightness component. See <xref linkend="colorspaces" /> for conversion
-examples. YUV was chosen because early television would only transmit
-brightness information. To add color in a way compatible with existing
-receivers a new signal carrier was added to transmit the color
-difference signals. Secondary in the YUV format the U and V components
-usually have lower resolution than the Y component. This is an analog
-video compression technique taking advantage of a property of the
-human visual system, being more sensitive to brightness
-information.</para>
-
- &sub-packed-yuv;
- &sub-grey;
- &sub-y10;
- &sub-y12;
- &sub-y10b;
- &sub-y16;
- &sub-y16-be;
- &sub-y8i;
- &sub-y12i;
- &sub-uv8;
- &sub-yuyv;
- &sub-uyvy;
- &sub-yvyu;
- &sub-vyuy;
- &sub-y41p;
- &sub-yuv420;
- &sub-yuv420m;
- &sub-yuv422m;
- &sub-yuv444m;
- &sub-yuv410;
- &sub-yuv422p;
- &sub-yuv411p;
- &sub-nv12;
- &sub-nv12m;
- &sub-nv12mt;
- &sub-nv16;
- &sub-nv16m;
- &sub-nv24;
- &sub-m420;
- </section>
-
- <section id="depth-formats">
- <title>Depth Formats</title>
- <para>Depth data provides distance to points, mapped onto the image plane
- </para>
-
- &sub-z16;
- </section>
-
- <section>
- <title>Compressed Formats</title>
-
- <table pgwide="1" frame="none" id="compressed-formats">
- <title>Compressed Image Formats</title>
- <tgroup cols="3" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="V4L2-PIX-FMT-JPEG">
- <entry><constant>V4L2_PIX_FMT_JPEG</constant></entry>
- <entry>'JPEG'</entry>
- <entry>TBD. See also &VIDIOC-G-JPEGCOMP;,
- &VIDIOC-S-JPEGCOMP;.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MPEG">
- <entry><constant>V4L2_PIX_FMT_MPEG</constant></entry>
- <entry>'MPEG'</entry>
- <entry>MPEG multiplexed stream. The actual format is determined by
-extended control <constant>V4L2_CID_MPEG_STREAM_TYPE</constant>, see
-<xref linkend="mpeg-control-id" />.</entry>
- </row>
- <row id="V4L2-PIX-FMT-H264">
- <entry><constant>V4L2_PIX_FMT_H264</constant></entry>
- <entry>'H264'</entry>
- <entry>H264 video elementary stream with start codes.</entry>
- </row>
- <row id="V4L2-PIX-FMT-H264-NO-SC">
- <entry><constant>V4L2_PIX_FMT_H264_NO_SC</constant></entry>
- <entry>'AVC1'</entry>
- <entry>H264 video elementary stream without start codes.</entry>
- </row>
- <row id="V4L2-PIX-FMT-H264-MVC">
- <entry><constant>V4L2_PIX_FMT_H264_MVC</constant></entry>
- <entry>'M264'</entry>
- <entry>H264 MVC video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-H263">
- <entry><constant>V4L2_PIX_FMT_H263</constant></entry>
- <entry>'H263'</entry>
- <entry>H263 video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MPEG1">
- <entry><constant>V4L2_PIX_FMT_MPEG1</constant></entry>
- <entry>'MPG1'</entry>
- <entry>MPEG1 video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MPEG2">
- <entry><constant>V4L2_PIX_FMT_MPEG2</constant></entry>
- <entry>'MPG2'</entry>
- <entry>MPEG2 video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MPEG4">
- <entry><constant>V4L2_PIX_FMT_MPEG4</constant></entry>
- <entry>'MPG4'</entry>
- <entry>MPEG4 video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-XVID">
- <entry><constant>V4L2_PIX_FMT_XVID</constant></entry>
- <entry>'XVID'</entry>
- <entry>Xvid video elementary stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-VC1-ANNEX-G">
- <entry><constant>V4L2_PIX_FMT_VC1_ANNEX_G</constant></entry>
- <entry>'VC1G'</entry>
- <entry>VC1, SMPTE 421M Annex G compliant stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-VC1-ANNEX-L">
- <entry><constant>V4L2_PIX_FMT_VC1_ANNEX_L</constant></entry>
- <entry>'VC1L'</entry>
- <entry>VC1, SMPTE 421M Annex L compliant stream.</entry>
- </row>
- <row id="V4L2-PIX-FMT-VP8">
- <entry><constant>V4L2_PIX_FMT_VP8</constant></entry>
- <entry>'VP80'</entry>
- <entry>VP8 video elementary stream.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section id="sdr-formats">
- <title>SDR Formats</title>
-
- <para>These formats are used for <link linkend="sdr">SDR</link>
-interface only.</para>
-
- &sub-sdr-cu08;
- &sub-sdr-cu16le;
- &sub-sdr-cs08;
- &sub-sdr-cs14le;
- &sub-sdr-ru12le;
-
- </section>
-
- <section id="pixfmt-reserved">
- <title>Reserved Format Identifiers</title>
-
- <para>These formats are not defined by this specification, they
-are just listed for reference and to avoid naming conflicts. If you
-want to register your own format, send an e-mail to the linux-media mailing
-list &v4l-ml; for inclusion in the <filename>videodev2.h</filename>
-file. If you want to share your format with other developers add a
-link to your documentation and send a copy to the linux-media mailing list
-for inclusion in this section. If you think your format should be listed
-in a standard format section please make a proposal on the linux-media mailing
-list.</para>
-
- <table pgwide="1" frame="none" id="reserved-formats">
- <title>Reserved Image Formats</title>
- <tgroup cols="3" align="left">
- &cs-def;
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>Details</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="V4L2-PIX-FMT-DV">
- <entry><constant>V4L2_PIX_FMT_DV</constant></entry>
- <entry>'dvsd'</entry>
- <entry>unknown</entry>
- </row>
- <row id="V4L2-PIX-FMT-ET61X251">
- <entry><constant>V4L2_PIX_FMT_ET61X251</constant></entry>
- <entry>'E625'</entry>
- <entry>Compressed format of the ET61X251 driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-HI240">
- <entry><constant>V4L2_PIX_FMT_HI240</constant></entry>
- <entry>'HI24'</entry>
- <entry><para>8 bit RGB format used by the BTTV driver.</para></entry>
- </row>
- <row id="V4L2-PIX-FMT-HM12">
- <entry><constant>V4L2_PIX_FMT_HM12</constant></entry>
- <entry>'HM12'</entry>
- <entry><para>YUV 4:2:0 format used by the
-IVTV driver, <ulink url="http://www.ivtvdriver.org/">
-http://www.ivtvdriver.org/</ulink></para><para>The format is documented in the
-kernel sources in the file <filename>Documentation/video4linux/cx2341x/README.hm12</filename>
-</para></entry>
- </row>
- <row id="V4L2-PIX-FMT-CPIA1">
- <entry><constant>V4L2_PIX_FMT_CPIA1</constant></entry>
- <entry>'CPIA'</entry>
- <entry>YUV format used by the gspca cpia1 driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-JPGL">
- <entry><constant>V4L2_PIX_FMT_JPGL</constant></entry>
- <entry>'JPGL'</entry>
- <entry>JPEG-Light format (Pegasus Lossless JPEG)
- used in Divio webcams NW 80x.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SPCA501">
- <entry><constant>V4L2_PIX_FMT_SPCA501</constant></entry>
- <entry>'S501'</entry>
- <entry>YUYV per line used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SPCA505">
- <entry><constant>V4L2_PIX_FMT_SPCA505</constant></entry>
- <entry>'S505'</entry>
- <entry>YYUV per line used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SPCA508">
- <entry><constant>V4L2_PIX_FMT_SPCA508</constant></entry>
- <entry>'S508'</entry>
- <entry>YUVY per line used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SPCA561">
- <entry><constant>V4L2_PIX_FMT_SPCA561</constant></entry>
- <entry>'S561'</entry>
- <entry>Compressed GBRG Bayer format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-PAC207">
- <entry><constant>V4L2_PIX_FMT_PAC207</constant></entry>
- <entry>'P207'</entry>
- <entry>Compressed BGGR Bayer format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MR97310A">
- <entry><constant>V4L2_PIX_FMT_MR97310A</constant></entry>
- <entry>'M310'</entry>
- <entry>Compressed BGGR Bayer format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-JL2005BCD">
- <entry><constant>V4L2_PIX_FMT_JL2005BCD</constant></entry>
- <entry>'JL20'</entry>
- <entry>JPEG compressed RGGB Bayer format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-OV511">
- <entry><constant>V4L2_PIX_FMT_OV511</constant></entry>
- <entry>'O511'</entry>
- <entry>OV511 JPEG format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-OV518">
- <entry><constant>V4L2_PIX_FMT_OV518</constant></entry>
- <entry>'O518'</entry>
- <entry>OV518 JPEG format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-PJPG">
- <entry><constant>V4L2_PIX_FMT_PJPG</constant></entry>
- <entry>'PJPG'</entry>
- <entry>Pixart 73xx JPEG format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SE401">
- <entry><constant>V4L2_PIX_FMT_SE401</constant></entry>
- <entry>'S401'</entry>
- <entry>Compressed RGB format used by the gspca se401 driver</entry>
- </row>
- <row id="V4L2-PIX-FMT-SQ905C">
- <entry><constant>V4L2_PIX_FMT_SQ905C</constant></entry>
- <entry>'905C'</entry>
- <entry>Compressed RGGB bayer format used by the gspca driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-MJPEG">
- <entry><constant>V4L2_PIX_FMT_MJPEG</constant></entry>
- <entry>'MJPG'</entry>
- <entry>Compressed format used by the Zoran driver</entry>
- </row>
- <row id="V4L2-PIX-FMT-PWC1">
- <entry><constant>V4L2_PIX_FMT_PWC1</constant></entry>
- <entry>'PWC1'</entry>
- <entry>Compressed format of the PWC driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-PWC2">
- <entry><constant>V4L2_PIX_FMT_PWC2</constant></entry>
- <entry>'PWC2'</entry>
- <entry>Compressed format of the PWC driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SN9C10X">
- <entry><constant>V4L2_PIX_FMT_SN9C10X</constant></entry>
- <entry>'S910'</entry>
- <entry>Compressed format of the SN9C102 driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SN9C20X-I420">
- <entry><constant>V4L2_PIX_FMT_SN9C20X_I420</constant></entry>
- <entry>'S920'</entry>
- <entry>YUV 4:2:0 format of the gspca sn9c20x driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-SN9C2028">
- <entry><constant>V4L2_PIX_FMT_SN9C2028</constant></entry>
- <entry>'SONX'</entry>
- <entry>Compressed GBRG bayer format of the gspca sn9c2028 driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-STV0680">
- <entry><constant>V4L2_PIX_FMT_STV0680</constant></entry>
- <entry>'S680'</entry>
- <entry>Bayer format of the gspca stv0680 driver.</entry>
- </row>
- <row id="V4L2-PIX-FMT-WNVA">
- <entry><constant>V4L2_PIX_FMT_WNVA</constant></entry>
- <entry>'WNVA'</entry>
- <entry><para>Used by the Winnov Videum driver, <ulink
-url="http://www.thedirks.org/winnov/">
-http://www.thedirks.org/winnov/</ulink></para></entry>
- </row>
- <row id="V4L2-PIX-FMT-TM6000">
- <entry><constant>V4L2_PIX_FMT_TM6000</constant></entry>
- <entry>'TM60'</entry>
- <entry><para>Used by Trident tm6000</para></entry>
- </row>
- <row id="V4L2-PIX-FMT-CIT-YYVYUY">
- <entry><constant>V4L2_PIX_FMT_CIT_YYVYUY</constant></entry>
- <entry>'CITV'</entry>
- <entry><para>Used by xirlink CIT, found at IBM webcams.</para>
- <para>Uses one line of Y then 1 line of VYUY</para>
- </entry>
- </row>
- <row id="V4L2-PIX-FMT-KONICA420">
- <entry><constant>V4L2_PIX_FMT_KONICA420</constant></entry>
- <entry>'KONI'</entry>
- <entry><para>Used by Konica webcams.</para>
- <para>YUV420 planar in blocks of 256 pixels.</para>
- </entry>
- </row>
- <row id="V4L2-PIX-FMT-YYUV">
- <entry><constant>V4L2_PIX_FMT_YYUV</constant></entry>
- <entry>'YYUV'</entry>
- <entry>unknown</entry>
- </row>
- <row id="V4L2-PIX-FMT-Y4">
- <entry><constant>V4L2_PIX_FMT_Y4</constant></entry>
- <entry>'Y04 '</entry>
- <entry>Old 4-bit greyscale format. Only the most significant 4 bits of each byte are used,
-the other bits are set to 0.</entry>
- </row>
- <row id="V4L2-PIX-FMT-Y6">
- <entry><constant>V4L2_PIX_FMT_Y6</constant></entry>
- <entry>'Y06 '</entry>
- <entry>Old 6-bit greyscale format. Only the most significant 6 bits of each byte are used,
-the other bits are set to 0.</entry>
- </row>
- <row id="V4L2-PIX-FMT-S5C-UYVY-JPG">
- <entry><constant>V4L2_PIX_FMT_S5C_UYVY_JPG</constant></entry>
- <entry>'S5CI'</entry>
- <entry>Two-planar format used by Samsung S5C73MX cameras. The
-first plane contains interleaved JPEG and UYVY image data, followed by meta data
-in form of an array of offsets to the UYVY data blocks. The actual pointer array
-follows immediately the interleaved JPEG/UYVY data, the number of entries in
-this array equals the height of the UYVY image. Each entry is a 4-byte unsigned
-integer in big endian order and it's an offset to a single pixel line of the
-UYVY image. The first plane can start either with JPEG or UYVY data chunk. The
-size of a single UYVY block equals the UYVY image's width multiplied by 2. The
-size of a JPEG chunk depends on the image and can vary with each line.
-<para>The second plane, at an offset of 4084 bytes, contains a 4-byte offset to
-the pointer array in the first plane. This offset is followed by a 4-byte value
-indicating size of the pointer array. All numbers in the second plane are also
-in big endian order. Remaining data in the second plane is undefined. The
-information in the second plane allows to easily find location of the pointer
-array, which can be different for each frame. The size of the pointer array is
-constant for given UYVY image height.</para>
-<para>In order to extract UYVY and JPEG frames an application can initially set
-a data pointer to the start of first plane and then add an offset from the first
-entry of the pointers table. Such a pointer indicates start of an UYVY image
-pixel line. Whole UYVY line can be copied to a separate buffer. These steps
-should be repeated for each line, i.e. the number of entries in the pointer
-array. Anything what's in between the UYVY lines is JPEG data and should be
-concatenated to form the JPEG stream. </para>
-</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="format-flags">
- <title>Format Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_PIX_FMT_FLAG_PREMUL_ALPHA</constant></entry>
- <entry>0x00000001</entry>
- <entry>The color values are premultiplied by the alpha channel
-value. For example, if a light blue pixel with 50% transparency was described by
-RGBA values (128, 192, 255, 128), the same pixel described with premultiplied
-colors would be described by RGBA values (64, 96, 128, 128) </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
diff --git a/Documentation/DocBook/media/v4l/planar-apis.xml b/Documentation/DocBook/media/v4l/planar-apis.xml
deleted file mode 100644
index 878ce2040488..000000000000
--- a/Documentation/DocBook/media/v4l/planar-apis.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<section id="planar-apis">
- <title>Single- and multi-planar APIs</title>
-
- <para>Some devices require data for each input or output video frame
- to be placed in discontiguous memory buffers. In such cases, one
- video frame has to be addressed using more than one memory address, i.e. one
- pointer per "plane". A plane is a sub-buffer of the current frame. For
- examples of such formats see <xref linkend="pixfmt" />.</para>
-
- <para>Initially, V4L2 API did not support multi-planar buffers and a set of
- extensions has been introduced to handle them. Those extensions constitute
- what is being referred to as the "multi-planar API".</para>
-
- <para>Some of the V4L2 API calls and structures are interpreted differently,
- depending on whether single- or multi-planar API is being used. An application
- can choose whether to use one or the other by passing a corresponding buffer
- type to its ioctl calls. Multi-planar versions of buffer types are suffixed
- with an `_MPLANE' string. For a list of available multi-planar buffer types
- see &v4l2-buf-type;.
- </para>
-
- <section>
- <title>Multi-planar formats</title>
- <para>Multi-planar API introduces new multi-planar formats. Those formats
- use a separate set of FourCC codes. It is important to distinguish between
- the multi-planar API and a multi-planar format. Multi-planar API calls can
- handle all single-planar formats as well (as long as they are passed in
- multi-planar API structures), while the single-planar API cannot
- handle multi-planar formats.</para>
- </section>
-
- <section>
- <title>Calls that distinguish between single and multi-planar APIs</title>
- <variablelist>
- <varlistentry>
- <term>&VIDIOC-QUERYCAP;</term>
- <listitem><para>Two additional multi-planar capabilities are added. They can
- be set together with non-multi-planar ones for devices that handle
- both single- and multi-planar formats.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>&VIDIOC-G-FMT;, &VIDIOC-S-FMT;, &VIDIOC-TRY-FMT;</term>
- <listitem><para>New structures for describing multi-planar formats are added:
- &v4l2-pix-format-mplane; and &v4l2-plane-pix-format;. Drivers may
- define new multi-planar formats, which have distinct FourCC codes from
- the existing single-planar ones.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>&VIDIOC-QBUF;, &VIDIOC-DQBUF;, &VIDIOC-QUERYBUF;</term>
- <listitem><para>A new &v4l2-plane; structure for describing planes is added.
- Arrays of this structure are passed in the new
- <structfield>m.planes</structfield> field of &v4l2-buffer;.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term>&VIDIOC-REQBUFS;</term>
- <listitem><para>Will allocate multi-planar buffers as requested.</para></listitem>
- </varlistentry>
- </variablelist>
- </section>
-</section>
diff --git a/Documentation/DocBook/media/v4l/remote_controllers.xml b/Documentation/DocBook/media/v4l/remote_controllers.xml
deleted file mode 100644
index b86844e80257..000000000000
--- a/Documentation/DocBook/media/v4l/remote_controllers.xml
+++ /dev/null
@@ -1,320 +0,0 @@
-<partinfo>
-<authorgroup>
-<author>
-<firstname>Mauro</firstname>
-<surname>Chehab</surname>
-<othername role="mi">Carvalho</othername>
-<affiliation><address><email>m.chehab@samsung.com</email></address></affiliation>
-<contrib>Initial version.</contrib>
-</author>
-</authorgroup>
-<copyright>
- <year>2009-2014</year>
- <holder>Mauro Carvalho Chehab</holder>
-</copyright>
-
-<revhistory>
-<!-- Put document revisions here, newest first. -->
-<revision>
-<revnumber>3.15</revnumber>
-<date>2014-02-06</date>
-<authorinitials>mcc</authorinitials>
-<revremark>Added the interface description and the RC sysfs class description.</revremark>
-</revision>
-<revision>
-<revnumber>1.0</revnumber>
-<date>2009-09-06</date>
-<authorinitials>mcc</authorinitials>
-<revremark>Initial revision</revremark>
-</revision>
-</revhistory>
-</partinfo>
-
- <title>Remote Controller API</title>
- <chapter id="remote_controllers">
-
-<title>Remote Controllers</title>
-
-<section id="Remote_controllers_Intro">
-<title>Introduction</title>
-
-<para>Currently, most analog and digital devices have a Infrared input for remote controllers. Each
-manufacturer has their own type of control. It is not rare for the same manufacturer to ship different
-types of controls, depending on the device.</para>
-<para>A Remote Controller interface is mapped as a normal evdev/input interface, just like a keyboard or a mouse.
-So, it uses all ioctls already defined for any other input devices.</para>
-<para>However, remove controllers are more flexible than a normal input device, as the IR
-receiver (and/or transmitter) can be used in conjunction with a wide variety of different IR remotes.</para>
-<para>In order to allow flexibility, the Remote Controller subsystem allows controlling the
-RC-specific attributes via <link linkend="remote_controllers_sysfs_nodes">the sysfs class nodes</link>.</para>
-</section>
-
-<section id="remote_controllers_sysfs_nodes">
-<title>Remote Controller's sysfs nodes</title>
-<para>As defined at <constant>Documentation/ABI/testing/sysfs-class-rc</constant>, those are the sysfs nodes that control the Remote Controllers:</para>
-
-<section id="sys_class_rc">
-<title>/sys/class/rc/</title>
-<para>The <constant>/sys/class/rc/</constant> class sub-directory belongs to the Remote Controller
-core and provides a sysfs interface for configuring infrared remote controller receivers.
-</para>
-
-</section>
-<section id="sys_class_rc_rcN">
-<title>/sys/class/rc/rcN/</title>
-<para>A <constant>/sys/class/rc/rcN</constant> directory is created for each remote
- control receiver device where N is the number of the receiver.</para>
-
-</section>
-<section id="sys_class_rc_rcN_protocols">
-<title>/sys/class/rc/rcN/protocols</title>
-<para>Reading this file returns a list of available protocols, something like:</para>
-<para><constant>rc5 [rc6] nec jvc [sony]</constant></para>
-<para>Enabled protocols are shown in [] brackets.</para>
-<para>Writing "+proto" will add a protocol to the list of enabled protocols.</para>
-<para>Writing "-proto" will remove a protocol from the list of enabled protocols.</para>
-<para>Writing "proto" will enable only "proto".</para>
-<para>Writing "none" will disable all protocols.</para>
-<para>Write fails with EINVAL if an invalid protocol combination or unknown protocol name is used.</para>
-
-</section>
-<section id="sys_class_rc_rcN_filter">
-<title>/sys/class/rc/rcN/filter</title>
-<para>Sets the scancode filter expected value.</para>
-<para>Use in combination with <constant>/sys/class/rc/rcN/filter_mask</constant> to set the
-expected value of the bits set in the filter mask.
-If the hardware supports it then scancodes which do not match
-the filter will be ignored. Otherwise the write will fail with
-an error.</para>
-<para>This value may be reset to 0 if the current protocol is altered.</para>
-
-</section>
-<section id="sys_class_rc_rcN_filter_mask">
-<title>/sys/class/rc/rcN/filter_mask</title>
-<para>Sets the scancode filter mask of bits to compare.
-Use in combination with <constant>/sys/class/rc/rcN/filter</constant> to set the bits
-of the scancode which should be compared against the expected
-value. A value of 0 disables the filter to allow all valid
-scancodes to be processed.</para>
-<para>If the hardware supports it then scancodes which do not match
-the filter will be ignored. Otherwise the write will fail with
-an error.</para>
-<para>This value may be reset to 0 if the current protocol is altered.</para>
-
-</section>
-<section id="sys_class_rc_rcN_wakeup_protocols">
-<title>/sys/class/rc/rcN/wakeup_protocols</title>
-<para>Reading this file returns a list of available protocols to use for the
-wakeup filter, something like:</para>
-<para><constant>rc5 rc6 nec jvc [sony]</constant></para>
-<para>The enabled wakeup protocol is shown in [] brackets.</para>
-<para>Writing "+proto" will add a protocol to the list of enabled wakeup
-protocols.</para>
-<para>Writing "-proto" will remove a protocol from the list of enabled wakeup
-protocols.</para>
-<para>Writing "proto" will use "proto" for wakeup events.</para>
-<para>Writing "none" will disable wakeup.</para>
-<para>Write fails with EINVAL if an invalid protocol combination or unknown
-protocol name is used, or if wakeup is not supported by the hardware.</para>
-
-</section>
-<section id="sys_class_rc_rcN_wakeup_filter">
-<title>/sys/class/rc/rcN/wakeup_filter</title>
-<para>Sets the scancode wakeup filter expected value.
-Use in combination with <constant>/sys/class/rc/rcN/wakeup_filter_mask</constant> to
-set the expected value of the bits set in the wakeup filter mask
-to trigger a system wake event.</para>
-<para>If the hardware supports it and wakeup_filter_mask is not 0 then
-scancodes which match the filter will wake the system from e.g.
-suspend to RAM or power off.
-Otherwise the write will fail with an error.</para>
-<para>This value may be reset to 0 if the wakeup protocol is altered.</para>
-
-</section>
-<section id="sys_class_rc_rcN_wakeup_filter_mask">
-<title>/sys/class/rc/rcN/wakeup_filter_mask</title>
-<para>Sets the scancode wakeup filter mask of bits to compare.
-Use in combination with <constant>/sys/class/rc/rcN/wakeup_filter</constant> to set
-the bits of the scancode which should be compared against the
-expected value to trigger a system wake event.</para>
-<para>If the hardware supports it and wakeup_filter_mask is not 0 then
-scancodes which match the filter will wake the system from e.g.
-suspend to RAM or power off.
-Otherwise the write will fail with an error.</para>
-<para>This value may be reset to 0 if the wakeup protocol is altered.</para>
-</section>
-</section>
-
-<section id="Remote_controllers_tables">
-<title>Remote controller tables</title>
-<para>Unfortunately, for several years, there was no effort to create uniform IR keycodes for
-different devices. This caused the same IR keyname to be mapped completely differently on
-different IR devices. This resulted that the same IR keyname to be mapped completely different on
-different IR's. Due to that, V4L2 API now specifies a standard for mapping Media keys on IR.</para>
-<para>This standard should be used by both V4L/DVB drivers and userspace applications</para>
-<para>The modules register the remote as keyboard within the linux input layer. This means that the IR key strokes will look like normal keyboard key strokes (if CONFIG_INPUT_KEYBOARD is enabled). Using the event devices (CONFIG_INPUT_EVDEV) it is possible for applications to access the remote via /dev/input/event devices.</para>
-
-<table pgwide="1" frame="none" id="rc_standard_keymap">
-<title>IR default keymapping</title>
-<tgroup cols="3">
-&cs-str;
-<tbody valign="top">
-<row>
-<entry>Key code</entry>
-<entry>Meaning</entry>
-<entry>Key examples on IR</entry>
-</row>
-
-<row><entry><emphasis role="bold">Numeric keys</emphasis></entry></row>
-
-<row><entry><constant>KEY_0</constant></entry><entry>Keyboard digit 0</entry><entry>0</entry></row>
-<row><entry><constant>KEY_1</constant></entry><entry>Keyboard digit 1</entry><entry>1</entry></row>
-<row><entry><constant>KEY_2</constant></entry><entry>Keyboard digit 2</entry><entry>2</entry></row>
-<row><entry><constant>KEY_3</constant></entry><entry>Keyboard digit 3</entry><entry>3</entry></row>
-<row><entry><constant>KEY_4</constant></entry><entry>Keyboard digit 4</entry><entry>4</entry></row>
-<row><entry><constant>KEY_5</constant></entry><entry>Keyboard digit 5</entry><entry>5</entry></row>
-<row><entry><constant>KEY_6</constant></entry><entry>Keyboard digit 6</entry><entry>6</entry></row>
-<row><entry><constant>KEY_7</constant></entry><entry>Keyboard digit 7</entry><entry>7</entry></row>
-<row><entry><constant>KEY_8</constant></entry><entry>Keyboard digit 8</entry><entry>8</entry></row>
-<row><entry><constant>KEY_9</constant></entry><entry>Keyboard digit 9</entry><entry>9</entry></row>
-
-<row><entry><emphasis role="bold">Movie play control</emphasis></entry></row>
-
-<row><entry><constant>KEY_FORWARD</constant></entry><entry>Instantly advance in time</entry><entry>&gt;&gt; / FORWARD</entry></row>
-<row><entry><constant>KEY_BACK</constant></entry><entry>Instantly go back in time</entry><entry>&lt;&lt;&lt; / BACK</entry></row>
-<row><entry><constant>KEY_FASTFORWARD</constant></entry><entry>Play movie faster</entry><entry>&gt;&gt;&gt; / FORWARD</entry></row>
-<row><entry><constant>KEY_REWIND</constant></entry><entry>Play movie back</entry><entry>REWIND / BACKWARD</entry></row>
-<row><entry><constant>KEY_NEXT</constant></entry><entry>Select next chapter / sub-chapter / interval</entry><entry>NEXT / SKIP</entry></row>
-<row><entry><constant>KEY_PREVIOUS</constant></entry><entry>Select previous chapter / sub-chapter / interval</entry><entry>&lt;&lt; / PREV / PREVIOUS</entry></row>
-<row><entry><constant>KEY_AGAIN</constant></entry><entry>Repeat the video or a video interval</entry><entry>REPEAT / LOOP / RECALL</entry></row>
-<row><entry><constant>KEY_PAUSE</constant></entry><entry>Pause sroweam</entry><entry>PAUSE / FREEZE</entry></row>
-<row><entry><constant>KEY_PLAY</constant></entry><entry>Play movie at the normal timeshift</entry><entry>NORMAL TIMESHIFT / LIVE / &gt;</entry></row>
-<row><entry><constant>KEY_PLAYPAUSE</constant></entry><entry>Alternate between play and pause</entry><entry>PLAY / PAUSE</entry></row>
-<row><entry><constant>KEY_STOP</constant></entry><entry>Stop sroweam</entry><entry>STOP</entry></row>
-<row><entry><constant>KEY_RECORD</constant></entry><entry>Start/stop recording sroweam</entry><entry>CAPTURE / REC / RECORD/PAUSE</entry></row>
-<row><entry><constant>KEY_CAMERA</constant></entry><entry>Take a picture of the image</entry><entry>CAMERA ICON / CAPTURE / SNAPSHOT</entry></row>
-<row><entry><constant>KEY_SHUFFLE</constant></entry><entry>Enable shuffle mode</entry><entry>SHUFFLE</entry></row>
-<row><entry><constant>KEY_TIME</constant></entry><entry>Activate time shift mode</entry><entry>TIME SHIFT</entry></row>
-<row><entry><constant>KEY_TITLE</constant></entry><entry>Allow changing the chapter</entry><entry>CHAPTER</entry></row>
-<row><entry><constant>KEY_SUBTITLE</constant></entry><entry>Allow changing the subtitle</entry><entry>SUBTITLE</entry></row>
-
-<row><entry><emphasis role="bold">Image control</emphasis></entry></row>
-
-<row><entry><constant>KEY_BRIGHTNESSDOWN</constant></entry><entry>Decrease Brightness</entry><entry>BRIGHTNESS DECREASE</entry></row>
-<row><entry><constant>KEY_BRIGHTNESSUP</constant></entry><entry>Increase Brightness</entry><entry>BRIGHTNESS INCREASE</entry></row>
-
-<row><entry><constant>KEY_ANGLE</constant></entry><entry>Switch video camera angle (on videos with more than one angle stored)</entry><entry>ANGLE / SWAP</entry></row>
-<row><entry><constant>KEY_EPG</constant></entry><entry>Open the Elecrowonic Play Guide (EPG)</entry><entry>EPG / GUIDE</entry></row>
-<row><entry><constant>KEY_TEXT</constant></entry><entry>Activate/change closed caption mode</entry><entry>CLOSED CAPTION/TELETEXT / DVD TEXT / TELETEXT / TTX</entry></row>
-
-<row><entry><emphasis role="bold">Audio control</emphasis></entry></row>
-
-<row><entry><constant>KEY_AUDIO</constant></entry><entry>Change audio source</entry><entry>AUDIO SOURCE / AUDIO / MUSIC</entry></row>
-<row><entry><constant>KEY_MUTE</constant></entry><entry>Mute/unmute audio</entry><entry>MUTE / DEMUTE / UNMUTE</entry></row>
-<row><entry><constant>KEY_VOLUMEDOWN</constant></entry><entry>Decrease volume</entry><entry>VOLUME- / VOLUME DOWN</entry></row>
-<row><entry><constant>KEY_VOLUMEUP</constant></entry><entry>Increase volume</entry><entry>VOLUME+ / VOLUME UP</entry></row>
-<row><entry><constant>KEY_MODE</constant></entry><entry>Change sound mode</entry><entry>MONO/STEREO</entry></row>
-<row><entry><constant>KEY_LANGUAGE</constant></entry><entry>Select Language</entry><entry>1ST / 2ND LANGUAGE / DVD LANG / MTS/SAP / MTS SEL</entry></row>
-
-<row><entry><emphasis role="bold">Channel control</emphasis></entry></row>
-
-<row><entry><constant>KEY_CHANNEL</constant></entry><entry>Go to the next favorite channel</entry><entry>ALT / CHANNEL / CH SURFING / SURF / FAV</entry></row>
-<row><entry><constant>KEY_CHANNELDOWN</constant></entry><entry>Decrease channel sequencially</entry><entry>CHANNEL - / CHANNEL DOWN / DOWN</entry></row>
-<row><entry><constant>KEY_CHANNELUP</constant></entry><entry>Increase channel sequencially</entry><entry>CHANNEL + / CHANNEL UP / UP</entry></row>
-<row><entry><constant>KEY_DIGITS</constant></entry><entry>Use more than one digit for channel</entry><entry>PLUS / 100/ 1xx / xxx / -/-- / Single Double Triple Digit</entry></row>
-<row><entry><constant>KEY_SEARCH</constant></entry><entry>Start channel autoscan</entry><entry>SCAN / AUTOSCAN</entry></row>
-
-<row><entry><emphasis role="bold">Colored keys</emphasis></entry></row>
-
-<row><entry><constant>KEY_BLUE</constant></entry><entry>IR Blue key</entry><entry>BLUE</entry></row>
-<row><entry><constant>KEY_GREEN</constant></entry><entry>IR Green Key</entry><entry>GREEN</entry></row>
-<row><entry><constant>KEY_RED</constant></entry><entry>IR Red key</entry><entry>RED</entry></row>
-<row><entry><constant>KEY_YELLOW</constant></entry><entry>IR Yellow key</entry><entry> YELLOW</entry></row>
-
-<row><entry><emphasis role="bold">Media selection</emphasis></entry></row>
-
-<row><entry><constant>KEY_CD</constant></entry><entry>Change input source to Compact Disc</entry><entry>CD</entry></row>
-<row><entry><constant>KEY_DVD</constant></entry><entry>Change input to DVD</entry><entry>DVD / DVD MENU</entry></row>
-<row><entry><constant>KEY_EJECTCLOSECD</constant></entry><entry>Open/close the CD/DVD player</entry><entry>-&gt; ) / CLOSE / OPEN</entry></row>
-
-<row><entry><constant>KEY_MEDIA</constant></entry><entry>Turn on/off Media application</entry><entry>PC/TV / TURN ON/OFF APP</entry></row>
-<row><entry><constant>KEY_PC</constant></entry><entry>Selects from TV to PC</entry><entry>PC</entry></row>
-<row><entry><constant>KEY_RADIO</constant></entry><entry>Put into AM/FM radio mode</entry><entry>RADIO / TV/FM / TV/RADIO / FM / FM/RADIO</entry></row>
-<row><entry><constant>KEY_TV</constant></entry><entry>Select tv mode</entry><entry>TV / LIVE TV</entry></row>
-<row><entry><constant>KEY_TV2</constant></entry><entry>Select Cable mode</entry><entry>AIR/CBL</entry></row>
-<row><entry><constant>KEY_VCR</constant></entry><entry>Select VCR mode</entry><entry>VCR MODE / DTR</entry></row>
-<row><entry><constant>KEY_VIDEO</constant></entry><entry>Alternate between input modes</entry><entry>SOURCE / SELECT / DISPLAY / SWITCH INPUTS / VIDEO</entry></row>
-
-<row><entry><emphasis role="bold">Power control</emphasis></entry></row>
-
-<row><entry><constant>KEY_POWER</constant></entry><entry>Turn on/off computer</entry><entry>SYSTEM POWER / COMPUTER POWER</entry></row>
-<row><entry><constant>KEY_POWER2</constant></entry><entry>Turn on/off application</entry><entry>TV ON/OFF / POWER</entry></row>
-<row><entry><constant>KEY_SLEEP</constant></entry><entry>Activate sleep timer</entry><entry>SLEEP / SLEEP TIMER</entry></row>
-<row><entry><constant>KEY_SUSPEND</constant></entry><entry>Put computer into suspend mode</entry><entry>STANDBY / SUSPEND</entry></row>
-
-<row><entry><emphasis role="bold">Window control</emphasis></entry></row>
-
-<row><entry><constant>KEY_CLEAR</constant></entry><entry>Stop sroweam and return to default input video/audio</entry><entry>CLEAR / RESET / BOSS KEY</entry></row>
-<row><entry><constant>KEY_CYCLEWINDOWS</constant></entry><entry>Minimize windows and move to the next one</entry><entry>ALT-TAB / MINIMIZE / DESKTOP</entry></row>
-<row><entry><constant>KEY_FAVORITES</constant></entry><entry>Open the favorites sroweam window</entry><entry>TV WALL / Favorites</entry></row>
-<row><entry><constant>KEY_MENU</constant></entry><entry>Call application menu</entry><entry>2ND CONTROLS (USA: MENU) / DVD/MENU / SHOW/HIDE CTRL</entry></row>
-<row><entry><constant>KEY_NEW</constant></entry><entry>Open/Close Picture in Picture</entry><entry>PIP</entry></row>
-<row><entry><constant>KEY_OK</constant></entry><entry>Send a confirmation code to application</entry><entry>OK / ENTER / RETURN</entry></row>
-<row><entry><constant>KEY_SCREEN</constant></entry><entry>Select screen aspect ratio</entry><entry>4:3 16:9 SELECT</entry></row>
-<row><entry><constant>KEY_ZOOM</constant></entry><entry>Put device into zoom/full screen mode</entry><entry>ZOOM / FULL SCREEN / ZOOM+ / HIDE PANNEL / SWITCH</entry></row>
-
-<row><entry><emphasis role="bold">Navigation keys</emphasis></entry></row>
-
-<row><entry><constant>KEY_ESC</constant></entry><entry>Cancel current operation</entry><entry>CANCEL / BACK</entry></row>
-<row><entry><constant>KEY_HELP</constant></entry><entry>Open a Help window</entry><entry>HELP</entry></row>
-<row><entry><constant>KEY_HOMEPAGE</constant></entry><entry>Navigate to Homepage</entry><entry>HOME</entry></row>
-<row><entry><constant>KEY_INFO</constant></entry><entry>Open On Screen Display</entry><entry>DISPLAY INFORMATION / OSD</entry></row>
-<row><entry><constant>KEY_WWW</constant></entry><entry>Open the default browser</entry><entry>WEB</entry></row>
-<row><entry><constant>KEY_UP</constant></entry><entry>Up key</entry><entry>UP</entry></row>
-<row><entry><constant>KEY_DOWN</constant></entry><entry>Down key</entry><entry>DOWN</entry></row>
-<row><entry><constant>KEY_LEFT</constant></entry><entry>Left key</entry><entry>LEFT</entry></row>
-<row><entry><constant>KEY_RIGHT</constant></entry><entry>Right key</entry><entry>RIGHT</entry></row>
-
-<row><entry><emphasis role="bold">Miscellaneous keys</emphasis></entry></row>
-
-<row><entry><constant>KEY_DOT</constant></entry><entry>Return a dot</entry><entry>.</entry></row>
-<row><entry><constant>KEY_FN</constant></entry><entry>Select a function</entry><entry>FUNCTION</entry></row>
-
-</tbody>
-</tgroup>
-</table>
-
-<para>It should be noted that, sometimes, there some fundamental missing keys at some cheaper IR's. Due to that, it is recommended to:</para>
-
-<table pgwide="1" frame="none" id="rc_keymap_notes">
-<title>Notes</title>
-<tgroup cols="1">
-&cs-str;
-<tbody valign="top">
-<row>
-<entry>On simpler IR's, without separate channel keys, you need to map UP as <constant>KEY_CHANNELUP</constant></entry>
-</row><row>
-<entry>On simpler IR's, without separate channel keys, you need to map DOWN as <constant>KEY_CHANNELDOWN</constant></entry>
-</row><row>
-<entry>On simpler IR's, without separate volume keys, you need to map LEFT as <constant>KEY_VOLUMEDOWN</constant></entry>
-</row><row>
-<entry>On simpler IR's, without separate volume keys, you need to map RIGHT as <constant>KEY_VOLUMEUP</constant></entry>
-</row>
-</tbody>
-</tgroup>
-</table>
-
-</section>
-
-<section id="Remote_controllers_table_change">
-<title>Changing default Remote Controller mappings</title>
-<para>The event interface provides two ioctls to be used against
-the /dev/input/event device, to allow changing the default
-keymapping.</para>
-
-<para>This program demonstrates how to replace the keymap tables.</para>
-&sub-keytable-c;
-</section>
-
-&sub-lirc_device_interface;
-</chapter>
diff --git a/Documentation/DocBook/media/v4l/selection-api.xml b/Documentation/DocBook/media/v4l/selection-api.xml
deleted file mode 100644
index b764cba150d1..000000000000
--- a/Documentation/DocBook/media/v4l/selection-api.xml
+++ /dev/null
@@ -1,317 +0,0 @@
-<section id="selection-api">
-
- <title>API for cropping, composing and scaling</title>
-
- <section>
- <title>Introduction</title>
-
-<para>Some video capture devices can sample a subsection of a picture and
-shrink or enlarge it to an image of arbitrary size. Next, the devices can
-insert the image into larger one. Some video output devices can crop part of an
-input image, scale it up or down and insert it at an arbitrary scan line and
-horizontal offset into a video signal. We call these abilities cropping,
-scaling and composing.</para>
-
-<para>On a video <emphasis>capture</emphasis> device the source is a video
-signal, and the cropping target determine the area actually sampled. The sink
-is an image stored in a memory buffer. The composing area specifies which part
-of the buffer is actually written to by the hardware. </para>
-
-<para>On a video <emphasis>output</emphasis> device the source is an image in a
-memory buffer, and the cropping target is a part of an image to be shown on a
-display. The sink is the display or the graphics screen. The application may
-select the part of display where the image should be displayed. The size and
-position of such a window is controlled by the compose target.</para>
-
-<para>Rectangles for all cropping and composing targets are defined even if the
-device does supports neither cropping nor composing. Their size and position
-will be fixed in such a case. If the device does not support scaling then the
-cropping and composing rectangles have the same size.</para>
-
- </section>
-
- <section>
- <title>Selection targets</title>
-
- <para>
- <figure id="sel-targets-capture">
- <title>Cropping and composing targets</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="selection.png" format="PNG" />
- </imageobject>
- <textobject>
- <phrase>Targets used by a cropping, composing and scaling
- process</phrase>
- </textobject>
- </mediaobject>
- </figure>
- </para>
-
- <para>See <xref linkend="v4l2-selection-targets" /> for more
- information.</para>
- </section>
-
- <section>
-
- <title>Configuration</title>
-
-<para>Applications can use the <link linkend="vidioc-g-selection">selection
-API</link> to select an area in a video signal or a buffer, and to query for
-default settings and hardware limits.</para>
-
-<para>Video hardware can have various cropping, composing and scaling
-limitations. It may only scale up or down, support only discrete scaling
-factors, or have different scaling abilities in the horizontal and vertical
-directions. Also it may not support scaling at all. At the same time the
-cropping/composing rectangles may have to be aligned, and both the source and
-the sink may have arbitrary upper and lower size limits. Therefore, as usual,
-drivers are expected to adjust the requested parameters and return the actual
-values selected. An application can control the rounding behaviour using <link
-linkend="v4l2-selection-flags"> constraint flags </link>.</para>
-
- <section>
-
- <title>Configuration of video capture</title>
-
-<para>See figure <xref linkend="sel-targets-capture" /> for examples of the
-selection targets available for a video capture device. It is recommended to
-configure the cropping targets before to the composing targets.</para>
-
-<para>The range of coordinates of the top left corner, width and height of
-areas that can be sampled is given by the <constant>V4L2_SEL_TGT_CROP_BOUNDS</constant>
-target. It is recommended for the driver developers to put the
-top/left corner at position <constant>(0,0)</constant>. The rectangle's
-coordinates are expressed in pixels.</para>
-
-<para>The top left corner, width and height of the source rectangle, that is
-the area actually sampled, is given by the <constant>V4L2_SEL_TGT_CROP</constant>
-target. It uses the same coordinate system as <constant>V4L2_SEL_TGT_CROP_BOUNDS</constant>.
-The active cropping area must lie completely inside the capture boundaries. The
-driver may further adjust the requested size and/or position according to hardware
-limitations.</para>
-
-<para>Each capture device has a default source rectangle, given by the
-<constant>V4L2_SEL_TGT_CROP_DEFAULT</constant> target. This rectangle shall
-over what the driver writer considers the complete picture. Drivers shall set
-the active crop rectangle to the default when the driver is first loaded, but
-not later.</para>
-
-<para>The composing targets refer to a memory buffer. The limits of composing
-coordinates are obtained using <constant>V4L2_SEL_TGT_COMPOSE_BOUNDS</constant>.
-All coordinates are expressed in pixels. The rectangle's top/left
-corner must be located at position <constant>(0,0)</constant>. The width and
-height are equal to the image size set by <constant>VIDIOC_S_FMT</constant>.
-</para>
-
-<para>The part of a buffer into which the image is inserted by the hardware is
-controlled by the <constant>V4L2_SEL_TGT_COMPOSE</constant> target.
-The rectangle's coordinates are also expressed in the same coordinate system as
-the bounds rectangle. The composing rectangle must lie completely inside bounds
-rectangle. The driver must adjust the composing rectangle to fit to the
-bounding limits. Moreover, the driver can perform other adjustments according
-to hardware limitations. The application can control rounding behaviour using
-<link linkend="v4l2-selection-flags"> constraint flags</link>.</para>
-
-<para>For capture devices the default composing rectangle is queried using
-<constant>V4L2_SEL_TGT_COMPOSE_DEFAULT</constant>. It is usually equal to the
-bounding rectangle.</para>
-
-<para>The part of a buffer that is modified by the hardware is given by
-<constant>V4L2_SEL_TGT_COMPOSE_PADDED</constant>. It contains all pixels
-defined using <constant>V4L2_SEL_TGT_COMPOSE</constant> plus all
-padding data modified by hardware during insertion process. All pixels outside
-this rectangle <emphasis>must not</emphasis> be changed by the hardware. The
-content of pixels that lie inside the padded area but outside active area is
-undefined. The application can use the padded and active rectangles to detect
-where the rubbish pixels are located and remove them if needed.</para>
-
- </section>
-
- <section>
-
- <title>Configuration of video output</title>
-
-<para>For output devices targets and ioctls are used similarly to the video
-capture case. The <emphasis>composing</emphasis> rectangle refers to the
-insertion of an image into a video signal. The cropping rectangles refer to a
-memory buffer. It is recommended to configure the composing targets before to
-the cropping targets.</para>
-
-<para>The cropping targets refer to the memory buffer that contains an image to
-be inserted into a video signal or graphical screen. The limits of cropping
-coordinates are obtained using <constant>V4L2_SEL_TGT_CROP_BOUNDS</constant>.
-All coordinates are expressed in pixels. The top/left corner is always point
-<constant>(0,0)</constant>. The width and height is equal to the image size
-specified using <constant>VIDIOC_S_FMT</constant> ioctl.</para>
-
-<para>The top left corner, width and height of the source rectangle, that is
-the area from which image date are processed by the hardware, is given by the
-<constant>V4L2_SEL_TGT_CROP</constant>. Its coordinates are expressed
-in in the same coordinate system as the bounds rectangle. The active cropping
-area must lie completely inside the crop boundaries and the driver may further
-adjust the requested size and/or position according to hardware
-limitations.</para>
-
-<para>For output devices the default cropping rectangle is queried using
-<constant>V4L2_SEL_TGT_CROP_DEFAULT</constant>. It is usually equal to the
-bounding rectangle.</para>
-
-<para>The part of a video signal or graphics display where the image is
-inserted by the hardware is controlled by <constant>V4L2_SEL_TGT_COMPOSE</constant>
-target. The rectangle's coordinates are expressed in pixels. The composing
-rectangle must lie completely inside the bounds rectangle. The driver must
-adjust the area to fit to the bounding limits. Moreover, the driver can
-perform other adjustments according to hardware limitations.</para>
-
-<para>The device has a default composing rectangle, given by the
-<constant>V4L2_SEL_TGT_COMPOSE_DEFAULT</constant> target. This rectangle shall cover what
-the driver writer considers the complete picture. It is recommended for the
-driver developers to put the top/left corner at position <constant>(0,0)</constant>.
-Drivers shall set the active composing rectangle to the default
-one when the driver is first loaded.</para>
-
-<para>The devices may introduce additional content to video signal other than
-an image from memory buffers. It includes borders around an image. However,
-such a padded area is driver-dependent feature not covered by this document.
-Driver developers are encouraged to keep padded rectangle equal to active one.
-The padded target is accessed by the <constant>V4L2_SEL_TGT_COMPOSE_PADDED</constant>
-identifier. It must contain all pixels from the <constant>V4L2_SEL_TGT_COMPOSE</constant>
-target.</para>
-
- </section>
-
- <section>
-
- <title>Scaling control</title>
-
-<para>An application can detect if scaling is performed by comparing the width
-and the height of rectangles obtained using <constant>V4L2_SEL_TGT_CROP</constant>
-and <constant>V4L2_SEL_TGT_COMPOSE</constant> targets. If
-these are not equal then the scaling is applied. The application can compute
-the scaling ratios using these values.</para>
-
- </section>
-
- </section>
-
- <section>
-
- <title>Comparison with old cropping API</title>
-
-<para>The selection API was introduced to cope with deficiencies of previous
-<link linkend="crop"> API</link>, that was designed to control simple capture
-devices. Later the cropping API was adopted by video output drivers. The ioctls
-are used to select a part of the display were the video signal is inserted. It
-should be considered as an API abuse because the described operation is
-actually the composing. The selection API makes a clear distinction between
-composing and cropping operations by setting the appropriate targets. The V4L2
-API lacks any support for composing to and cropping from an image inside a
-memory buffer. The application could configure a capture device to fill only a
-part of an image by abusing V4L2 API. Cropping a smaller image from a larger
-one is achieved by setting the field
-&v4l2-pix-format;<structfield>::bytesperline</structfield>. Introducing an image offsets
-could be done by modifying field &v4l2-buffer;<structfield>::m_userptr</structfield>
-before calling <constant>VIDIOC_QBUF</constant>. Those
-operations should be avoided because they are not portable (endianness), and do
-not work for macroblock and Bayer formats and mmap buffers. The selection API
-deals with configuration of buffer cropping/composing in a clear, intuitive and
-portable way. Next, with the selection API the concepts of the padded target
-and constraints flags are introduced. Finally, &v4l2-crop; and &v4l2-cropcap;
-have no reserved fields. Therefore there is no way to extend their functionality.
-The new &v4l2-selection; provides a lot of place for future
-extensions. Driver developers are encouraged to implement only selection API.
-The former cropping API would be simulated using the new one.</para>
-
- </section>
-
- <section>
- <title>Examples</title>
- <example>
- <title>Resetting the cropping parameters</title>
-
- <para>(A video capture device is assumed; change
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> for other devices; change target to
-<constant>V4L2_SEL_TGT_COMPOSE_*</constant> family to configure composing
-area)</para>
-
- <programlisting>
-
- &v4l2-selection; sel = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .target = V4L2_SEL_TGT_CROP_DEFAULT,
- };
- ret = ioctl(fd, &VIDIOC-G-SELECTION;, &amp;sel);
- if (ret)
- exit(-1);
- sel.target = V4L2_SEL_TGT_CROP;
- ret = ioctl(fd, &VIDIOC-S-SELECTION;, &amp;sel);
- if (ret)
- exit(-1);
-
- </programlisting>
- </example>
-
- <example>
- <title>Simple downscaling</title>
- <para>Setting a composing area on output of size of <emphasis> at most
-</emphasis> half of limit placed at a center of a display.</para>
- <programlisting>
-
- &v4l2-selection; sel = {
- .type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
- .target = V4L2_SEL_TGT_COMPOSE_BOUNDS,
- };
- struct v4l2_rect r;
-
- ret = ioctl(fd, &VIDIOC-G-SELECTION;, &amp;sel);
- if (ret)
- exit(-1);
- /* setting smaller compose rectangle */
- r.width = sel.r.width / 2;
- r.height = sel.r.height / 2;
- r.left = sel.r.width / 4;
- r.top = sel.r.height / 4;
- sel.r = r;
- sel.target = V4L2_SEL_TGT_COMPOSE;
- sel.flags = V4L2_SEL_FLAG_LE;
- ret = ioctl(fd, &VIDIOC-S-SELECTION;, &amp;sel);
- if (ret)
- exit(-1);
-
- </programlisting>
- </example>
-
- <example>
- <title>Querying for scaling factors</title>
- <para>A video output device is assumed; change
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> for other devices</para>
- <programlisting>
-
- &v4l2-selection; compose = {
- .type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
- .target = V4L2_SEL_TGT_COMPOSE,
- };
- &v4l2-selection; crop = {
- .type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
- .target = V4L2_SEL_TGT_CROP,
- };
- double hscale, vscale;
-
- ret = ioctl(fd, &VIDIOC-G-SELECTION;, &amp;compose);
- if (ret)
- exit(-1);
- ret = ioctl(fd, &VIDIOC-G-SELECTION;, &amp;crop);
- if (ret)
- exit(-1);
-
- /* computing scaling factors */
- hscale = (double)compose.r.width / crop.r.width;
- vscale = (double)compose.r.height / crop.r.height;
-
- </programlisting>
- </example>
-
- </section>
-
-</section>
diff --git a/Documentation/DocBook/media/v4l/selections-common.xml b/Documentation/DocBook/media/v4l/selections-common.xml
deleted file mode 100644
index d6d56fb6f9c0..000000000000
--- a/Documentation/DocBook/media/v4l/selections-common.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<section id="v4l2-selections-common">
-
- <title>Common selection definitions</title>
-
- <para>While the <link linkend="selection-api">V4L2 selection
- API</link> and <link linkend="v4l2-subdev-selections">V4L2 subdev
- selection APIs</link> are very similar, there's one fundamental
- difference between the two. On sub-device API, the selection
- rectangle refers to the media bus format, and is bound to a
- sub-device's pad. On the V4L2 interface the selection rectangles
- refer to the in-memory pixel format.</para>
-
- <para>This section defines the common definitions of the
- selection interfaces on the two APIs.</para>
-
- <section id="v4l2-selection-targets">
-
- <title>Selection targets</title>
-
- <para>The precise meaning of the selection targets may be
- dependent on which of the two interfaces they are used.</para>
-
- <table pgwide="1" frame="none" id="v4l2-selection-targets-table">
- <title>Selection target definitions</title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- &cs-def;
- <thead>
- <row rowsep="1">
- <entry align="left">Target name</entry>
- <entry align="left">id</entry>
- <entry align="left">Definition</entry>
- <entry align="left">Valid for V4L2</entry>
- <entry align="left">Valid for V4L2 subdev</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP</constant></entry>
- <entry>0x0000</entry>
- <entry>Crop rectangle. Defines the cropped area.</entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP_DEFAULT</constant></entry>
- <entry>0x0001</entry>
- <entry>Suggested cropping rectangle that covers the "whole picture".</entry>
- <entry>Yes</entry>
- <entry>No</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP_BOUNDS</constant></entry>
- <entry>0x0002</entry>
- <entry>Bounds of the crop rectangle. All valid crop
- rectangles fit inside the crop bounds rectangle.
- </entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_NATIVE_SIZE</constant></entry>
- <entry>0x0003</entry>
- <entry>The native size of the device, e.g. a sensor's
- pixel array. <structfield>left</structfield> and
- <structfield>top</structfield> fields are zero for this
- target. Setting the native size will generally only make
- sense for memory to memory devices where the software can
- create a canvas of a given size in which for example a
- video frame can be composed. In that case
- V4L2_SEL_TGT_NATIVE_SIZE can be used to configure the size
- of that canvas.
- </entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE</constant></entry>
- <entry>0x0100</entry>
- <entry>Compose rectangle. Used to configure scaling
- and composition.</entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_DEFAULT</constant></entry>
- <entry>0x0101</entry>
- <entry>Suggested composition rectangle that covers the "whole picture".</entry>
- <entry>Yes</entry>
- <entry>No</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_BOUNDS</constant></entry>
- <entry>0x0102</entry>
- <entry>Bounds of the compose rectangle. All valid compose
- rectangles fit inside the compose bounds rectangle.</entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_PADDED</constant></entry>
- <entry>0x0103</entry>
- <entry>The active area and all padding pixels that are inserted or
- modified by hardware.</entry>
- <entry>Yes</entry>
- <entry>No</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
- <section id="v4l2-selection-flags">
-
- <title>Selection flags</title>
-
- <table pgwide="1" frame="none" id="v4l2-selection-flags-table">
- <title>Selection flag definitions</title>
- <tgroup cols="5">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- &cs-def;
- <thead>
- <row rowsep="1">
- <entry align="left">Flag name</entry>
- <entry align="left">id</entry>
- <entry align="left">Definition</entry>
- <entry align="left">Valid for V4L2</entry>
- <entry align="left">Valid for V4L2 subdev</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SEL_FLAG_GE</constant></entry>
- <entry>(1 &lt;&lt; 0)</entry>
- <entry>Suggest the driver it should choose greater or
- equal rectangle (in size) than was requested. Albeit the
- driver may choose a lesser size, it will only do so due to
- hardware limitations. Without this flag (and
- <constant>V4L2_SEL_FLAG_LE</constant>) the
- behaviour is to choose the closest possible
- rectangle.</entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_FLAG_LE</constant></entry>
- <entry>(1 &lt;&lt; 1)</entry>
- <entry>Suggest the driver it
- should choose lesser or equal rectangle (in size) than was
- requested. Albeit the driver may choose a greater size, it
- will only do so due to hardware limitations.</entry>
- <entry>Yes</entry>
- <entry>Yes</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_FLAG_KEEP_CONFIG</constant></entry>
- <entry>(1 &lt;&lt; 2)</entry>
- <entry>The configuration must not be propagated to any
- further processing steps. If this flag is not given, the
- configuration is propagated inside the subdevice to all
- further processing steps.</entry>
- <entry>No</entry>
- <entry>Yes</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </section>
-
-</section>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
deleted file mode 100644
index 199c84e3aede..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ /dev/null
@@ -1,4040 +0,0 @@
-<section id="v4l2-mbus-format">
- <title>Media Bus Formats</title>
-
- <table pgwide="1" frame="none" id="v4l2-mbus-framefmt">
- <title>struct <structname>v4l2_mbus_framefmt</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Image width, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Image height, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>code</structfield></entry>
- <entry>Format code, from &v4l2-mbus-pixelcode;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>field</structfield></entry>
- <entry>Field order, from &v4l2-field;. See
- <xref linkend="field-order" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>colorspace</structfield></entry>
- <entry>Image colorspace, from &v4l2-colorspace;. See
- <xref linkend="colorspaces" /> for details.</entry>
- </row>
- <row>
- <entry>&v4l2-ycbcr-encoding;</entry>
- <entry><structfield>ycbcr_enc</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-quantization;</entry>
- <entry><structfield>quantization</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>&v4l2-xfer-func;</entry>
- <entry><structfield>xfer_func</structfield></entry>
- <entry>This information supplements the
-<structfield>colorspace</structfield> and must be set by the driver for
-capture streams and by the application for output streams,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>reserved</structfield>[11]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <section id="v4l2-mbus-pixelcode">
- <title>Media Bus Pixel Codes</title>
-
- <para>The media bus pixel codes describe image formats as flowing over
- physical busses (both between separate physical components and inside SoC
- devices). This should not be confused with the V4L2 pixel formats that
- describe, using four character codes, image formats as stored in memory.
- </para>
-
- <para>While there is a relationship between image formats on busses and
- image formats in memory (a raw Bayer image won't be magically converted to
- JPEG just by storing it to memory), there is no one-to-one correspondance
- between them.</para>
-
- <section>
- <title>Packed RGB Formats</title>
-
- <para>Those formats transfer pixel data as red, green and blue components.
- The format code is made of the following information.
- <itemizedlist>
- <listitem><para>The red, green and blue components order code, as encoded in a
- pixel sample. Possible values are RGB and BGR.</para></listitem>
- <listitem><para>The number of bits per component, for each component. The values
- can be different for all components. Common values are 555 and 565.</para>
- </listitem>
- <listitem><para>The number of bus samples per pixel. Pixels that are wider than
- the bus width must be transferred in multiple samples. Common values are
- 1 and 2.</para></listitem>
- <listitem><para>The bus width.</para></listitem>
- <listitem><para>For formats where the total number of bits per pixel is smaller
- than the number of bus samples per pixel times the bus width, a padding
- value stating if the bytes are padded in their most high order bits
- (PADHI) or low order bits (PADLO). A "C" prefix is used for component-wise
- padding in the most high order bits (CPADHI) or low order bits (CPADLO)
- of each separate component.</para></listitem>
- <listitem><para>For formats where the number of bus samples per pixel is larger
- than 1, an endianness value stating if the pixel is transferred MSB first
- (BE) or LSB first (LE).</para></listitem>
- </itemizedlist>
- </para>
-
- <para>For instance, a format where pixels are encoded as 5-bits red, 5-bits
- green and 5-bit blue values padded on the high bit, transferred as 2 8-bit
- samples per pixel with the most significant bits (padding, red and half of
- the green value) transferred first will be named
- <constant>MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE</constant>.
- </para>
-
- <para>The following tables list existing packed RGB formats.</para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-rgb">
- <title>RGB formats</title>
- <tgroup cols="27">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="center"/>
- <colspec colname="bit" />
- <colspec colnum="4" colname="b31" align="center" />
- <colspec colnum="5" colname="b20" align="center" />
- <colspec colnum="6" colname="b29" align="center" />
- <colspec colnum="7" colname="b28" align="center" />
- <colspec colnum="8" colname="b27" align="center" />
- <colspec colnum="9" colname="b26" align="center" />
- <colspec colnum="10" colname="b25" align="center" />
- <colspec colnum="11" colname="b24" align="center" />
- <colspec colnum="12" colname="b23" align="center" />
- <colspec colnum="13" colname="b22" align="center" />
- <colspec colnum="14" colname="b21" align="center" />
- <colspec colnum="15" colname="b20" align="center" />
- <colspec colnum="16" colname="b19" align="center" />
- <colspec colnum="17" colname="b18" align="center" />
- <colspec colnum="18" colname="b17" align="center" />
- <colspec colnum="19" colname="b16" align="center" />
- <colspec colnum="20" colname="b15" align="center" />
- <colspec colnum="21" colname="b14" align="center" />
- <colspec colnum="22" colname="b13" align="center" />
- <colspec colnum="23" colname="b12" align="center" />
- <colspec colnum="24" colname="b11" align="center" />
- <colspec colnum="25" colname="b10" align="center" />
- <colspec colnum="26" colname="b09" align="center" />
- <colspec colnum="27" colname="b08" align="center" />
- <colspec colnum="28" colname="b07" align="center" />
- <colspec colnum="29" colname="b06" align="center" />
- <colspec colnum="30" colname="b05" align="center" />
- <colspec colnum="31" colname="b04" align="center" />
- <colspec colnum="32" colname="b03" align="center" />
- <colspec colnum="33" colname="b02" align="center" />
- <colspec colnum="34" colname="b01" align="center" />
- <colspec colnum="35" colname="b00" align="center" />
- <spanspec namest="b31" nameend="b00" spanname="b0" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry></entry>
- <entry spanname="b0">Data organization</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Bit</entry>
- <entry>31</entry>
- <entry>30</entry>
- <entry>29</entry>
- <entry>28</entry>
- <entry>27</entry>
- <entry>26</entry>
- <entry>25</entry>
- <entry>24</entry>
- <entry>23</entry>
- <entry>22</entry>
- <entry>21</entry>
- <entry>20</entry>
- <entry>19</entry>
- <entry>18</entry>
- <entry>17</entry>
- <entry>16</entry>
- <entry>15</entry>
- <entry>14</entry>
- <entry>13</entry>
- <entry>12</entry>
- <entry>11</entry>
- <entry>10</entry>
- <entry>9</entry>
- <entry>8</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-RGB444-1X12">
- <entry>MEDIA_BUS_FMT_RGB444_1X12</entry>
- <entry>0x1016</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB444-2X8-PADHI-BE">
- <entry>MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE</entry>
- <entry>0x1001</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB444-2X8-PADHI-LE">
- <entry>MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE</entry>
- <entry>0x1002</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB555-2X8-PADHI-BE">
- <entry>MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE</entry>
- <entry>0x1003</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>0</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB555-2X8-PADHI-LE">
- <entry>MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE</entry>
- <entry>0x1004</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>0</entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB565-1X16">
- <entry>MEDIA_BUS_FMT_RGB565_1X16</entry>
- <entry>0x1017</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-BGR565-2X8-BE">
- <entry>MEDIA_BUS_FMT_BGR565_2X8_BE</entry>
- <entry>0x1005</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-BGR565-2X8-LE">
- <entry>MEDIA_BUS_FMT_BGR565_2X8_LE</entry>
- <entry>0x1006</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB565-2X8-BE">
- <entry>MEDIA_BUS_FMT_RGB565_2X8_BE</entry>
- <entry>0x1007</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB565-2X8-LE">
- <entry>MEDIA_BUS_FMT_RGB565_2X8_LE</entry>
- <entry>0x1008</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB666-1X18">
- <entry>MEDIA_BUS_FMT_RGB666_1X18</entry>
- <entry>0x1009</entry>
- <entry></entry>
- &dash-ent-14;
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RBG888-1X24">
- <entry>MEDIA_BUS_FMT_RBG888_1X24</entry>
- <entry>0x100e</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB666-1X24_CPADHI">
- <entry>MEDIA_BUS_FMT_RGB666_1X24_CPADHI</entry>
- <entry>0x1015</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>0</entry>
- <entry>0</entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-BGR888-1X24">
- <entry>MEDIA_BUS_FMT_BGR888_1X24</entry>
- <entry>0x1013</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-GBR888-1X24">
- <entry>MEDIA_BUS_FMT_GBR888_1X24</entry>
- <entry>0x1014</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-1X24">
- <entry>MEDIA_BUS_FMT_RGB888_1X24</entry>
- <entry>0x100a</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-2X12-BE">
- <entry>MEDIA_BUS_FMT_RGB888_2X12_BE</entry>
- <entry>0x100b</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-2X12-LE">
- <entry>MEDIA_BUS_FMT_RGB888_2X12_LE</entry>
- <entry>0x100c</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-ARGB888-1X32">
- <entry>MEDIA_BUS_FMT_ARGB888_1X32</entry>
- <entry>0x100d</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-1X32-PADHI">
- <entry>MEDIA_BUS_FMT_RGB888_1X32_PADHI</entry>
- <entry>0x100f</entry>
- <entry></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para>On LVDS buses, usually each sample is transferred serialized in
- seven time slots per pixel clock, on three (18-bit) or four (24-bit)
- differential data pairs at the same time. The remaining bits are used for
- control signals as defined by SPWG/PSWG/VESA or JEIDA standards.
- The 24-bit RGB format serialized in seven time slots on four lanes using
- JEIDA defined bit mapping will be named
- <constant>MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA</constant>, for example.
- </para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-rgb-lvds">
- <title>LVDS RGB formats</title>
- <tgroup cols="8">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="center" />
- <colspec colname="slot" align="center" />
- <colspec colname="lane" />
- <colspec colnum="5" colname="l03" align="center" />
- <colspec colnum="6" colname="l02" align="center" />
- <colspec colnum="7" colname="l01" align="center" />
- <colspec colnum="8" colname="l00" align="center" />
- <spanspec namest="l03" nameend="l00" spanname="l0" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry></entry>
- <entry></entry>
- <entry spanname="l0">Data organization</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Timeslot</entry>
- <entry>Lane</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-RGB666-1X7X3-SPWG">
- <entry>MEDIA_BUS_FMT_RGB666_1X7X3_SPWG</entry>
- <entry>0x1010</entry>
- <entry>0</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>d</entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>1</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>d</entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>2</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>d</entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>3</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>4</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>5</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>6</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-1X7X4-SPWG">
- <entry>MEDIA_BUS_FMT_RGB888_1X7X4_SPWG</entry>
- <entry>0x1011</entry>
- <entry>0</entry>
- <entry></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>1</entry>
- <entry></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>d</entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>2</entry>
- <entry></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>d</entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>3</entry>
- <entry></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>4</entry>
- <entry></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>5</entry>
- <entry></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>6</entry>
- <entry></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-RGB888-1X7X4-JEIDA">
- <entry>MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA</entry>
- <entry>0x1012</entry>
- <entry>0</entry>
- <entry></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>1</entry>
- <entry></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>d</entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>2</entry>
- <entry></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>d</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>3</entry>
- <entry></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>4</entry>
- <entry></entry>
- <entry>g<subscript>0</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>5</entry>
- <entry></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>6</entry>
- <entry></entry>
- <entry>r<subscript>0</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>Bayer Formats</title>
-
- <para>Those formats transfer pixel data as red, green and blue components.
- The format code is made of the following information.
- <itemizedlist>
- <listitem><para>The red, green and blue components order code, as encoded in a
- pixel sample. The possible values are shown in <xref
- linkend="bayer-patterns" />.</para></listitem>
- <listitem><para>The number of bits per pixel component. All components are
- transferred on the same number of bits. Common values are 8, 10 and 12.</para>
- </listitem>
- <listitem><para>The compression (optional). If the pixel components are
- ALAW- or DPCM-compressed, a mention of the compression scheme and the
- number of bits per compressed pixel component.</para></listitem>
- <listitem><para>The number of bus samples per pixel. Pixels that are wider than
- the bus width must be transferred in multiple samples. Common values are
- 1 and 2.</para></listitem>
- <listitem><para>The bus width.</para></listitem>
- <listitem><para>For formats where the total number of bits per pixel is smaller
- than the number of bus samples per pixel times the bus width, a padding
- value stating if the bytes are padded in their most high order bits
- (PADHI) or low order bits (PADLO).</para></listitem>
- <listitem><para>For formats where the number of bus samples per pixel is larger
- than 1, an endianness value stating if the pixel is transferred MSB first
- (BE) or LSB first (LE).</para></listitem>
- </itemizedlist>
- </para>
-
- <para>For instance, a format with uncompressed 10-bit Bayer components
- arranged in a red, green, green, blue pattern transferred as 2 8-bit
- samples per pixel with the least significant bits transferred first will
- be named <constant>MEDIA_BUS_FMT_SRGGB10_2X8_PADHI_LE</constant>.
- </para>
-
- <figure id="bayer-patterns">
- <title>Bayer Patterns</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="bayer.png" format="PNG" />
- </imageobject>
- <textobject>
- <phrase>Bayer filter color patterns</phrase>
- </textobject>
- </mediaobject>
- </figure>
-
- <para>The following table lists existing packed Bayer formats. The data
- organization is given as an example for the first pixel only.</para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-bayer">
- <title>Bayer Formats</title>
- <tgroup cols="15">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="center"/>
- <colspec colname="bit" />
- <colspec colnum="4" colname="b11" align="center" />
- <colspec colnum="5" colname="b10" align="center" />
- <colspec colnum="6" colname="b09" align="center" />
- <colspec colnum="7" colname="b08" align="center" />
- <colspec colnum="8" colname="b07" align="center" />
- <colspec colnum="9" colname="b06" align="center" />
- <colspec colnum="10" colname="b05" align="center" />
- <colspec colnum="11" colname="b04" align="center" />
- <colspec colnum="12" colname="b03" align="center" />
- <colspec colnum="13" colname="b02" align="center" />
- <colspec colnum="14" colname="b01" align="center" />
- <colspec colnum="15" colname="b00" align="center" />
- <spanspec namest="b11" nameend="b00" spanname="b0" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry></entry>
- <entry spanname="b0">Data organization</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Bit</entry>
- <entry>11</entry>
- <entry>10</entry>
- <entry>9</entry>
- <entry>8</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-SBGGR8-1X8">
- <entry>MEDIA_BUS_FMT_SBGGR8_1X8</entry>
- <entry>0x3001</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGBRG8-1X8">
- <entry>MEDIA_BUS_FMT_SGBRG8_1X8</entry>
- <entry>0x3013</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGRBG8-1X8">
- <entry>MEDIA_BUS_FMT_SGRBG8_1X8</entry>
- <entry>0x3002</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SRGGB8-1X8">
- <entry>MEDIA_BUS_FMT_SRGGB8_1X8</entry>
- <entry>0x3014</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-ALAW8-1X8">
- <entry>MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8</entry>
- <entry>0x3015</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGBRG10-ALAW8-1X8">
- <entry>MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8</entry>
- <entry>0x3016</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGRBG10-ALAW8-1X8">
- <entry>MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8</entry>
- <entry>0x3017</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SRGGB10-ALAW8-1X8">
- <entry>MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8</entry>
- <entry>0x3018</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-DPCM8-1X8">
- <entry>MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8</entry>
- <entry>0x300b</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGBRG10-DPCM8-1X8">
- <entry>MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8</entry>
- <entry>0x300c</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGRBG10-DPCM8-1X8">
- <entry>MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8</entry>
- <entry>0x3009</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SRGGB10-DPCM8-1X8">
- <entry>MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8</entry>
- <entry>0x300d</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-BE">
- <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE</entry>
- <entry>0x3003</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADHI-LE">
- <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE</entry>
- <entry>0x3004</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-BE">
- <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE</entry>
- <entry>0x3005</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-2X8-PADLO-LE">
- <entry>MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE</entry>
- <entry>0x3006</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR10-1X10">
- <entry>MEDIA_BUS_FMT_SBGGR10_1X10</entry>
- <entry>0x3007</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGBRG10-1X10">
- <entry>MEDIA_BUS_FMT_SGBRG10_1X10</entry>
- <entry>0x300e</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>9</subscript></entry>
- <entry>g<subscript>8</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGRBG10-1X10">
- <entry>MEDIA_BUS_FMT_SGRBG10_1X10</entry>
- <entry>0x300a</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>g<subscript>9</subscript></entry>
- <entry>g<subscript>8</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SRGGB10-1X10">
- <entry>MEDIA_BUS_FMT_SRGGB10_1X10</entry>
- <entry>0x300f</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>r<subscript>9</subscript></entry>
- <entry>r<subscript>8</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SBGGR12-1X12">
- <entry>MEDIA_BUS_FMT_SBGGR12_1X12</entry>
- <entry>0x3008</entry>
- <entry></entry>
- <entry>b<subscript>11</subscript></entry>
- <entry>b<subscript>10</subscript></entry>
- <entry>b<subscript>9</subscript></entry>
- <entry>b<subscript>8</subscript></entry>
- <entry>b<subscript>7</subscript></entry>
- <entry>b<subscript>6</subscript></entry>
- <entry>b<subscript>5</subscript></entry>
- <entry>b<subscript>4</subscript></entry>
- <entry>b<subscript>3</subscript></entry>
- <entry>b<subscript>2</subscript></entry>
- <entry>b<subscript>1</subscript></entry>
- <entry>b<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGBRG12-1X12">
- <entry>MEDIA_BUS_FMT_SGBRG12_1X12</entry>
- <entry>0x3010</entry>
- <entry></entry>
- <entry>g<subscript>11</subscript></entry>
- <entry>g<subscript>10</subscript></entry>
- <entry>g<subscript>9</subscript></entry>
- <entry>g<subscript>8</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SGRBG12-1X12">
- <entry>MEDIA_BUS_FMT_SGRBG12_1X12</entry>
- <entry>0x3011</entry>
- <entry></entry>
- <entry>g<subscript>11</subscript></entry>
- <entry>g<subscript>10</subscript></entry>
- <entry>g<subscript>9</subscript></entry>
- <entry>g<subscript>8</subscript></entry>
- <entry>g<subscript>7</subscript></entry>
- <entry>g<subscript>6</subscript></entry>
- <entry>g<subscript>5</subscript></entry>
- <entry>g<subscript>4</subscript></entry>
- <entry>g<subscript>3</subscript></entry>
- <entry>g<subscript>2</subscript></entry>
- <entry>g<subscript>1</subscript></entry>
- <entry>g<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-SRGGB12-1X12">
- <entry>MEDIA_BUS_FMT_SRGGB12_1X12</entry>
- <entry>0x3012</entry>
- <entry></entry>
- <entry>r<subscript>11</subscript></entry>
- <entry>r<subscript>10</subscript></entry>
- <entry>r<subscript>9</subscript></entry>
- <entry>r<subscript>8</subscript></entry>
- <entry>r<subscript>7</subscript></entry>
- <entry>r<subscript>6</subscript></entry>
- <entry>r<subscript>5</subscript></entry>
- <entry>r<subscript>4</subscript></entry>
- <entry>r<subscript>3</subscript></entry>
- <entry>r<subscript>2</subscript></entry>
- <entry>r<subscript>1</subscript></entry>
- <entry>r<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>Packed YUV Formats</title>
-
- <para>Those data formats transfer pixel data as (possibly downsampled) Y, U
- and V components. Some formats include dummy bits in some of their samples
- and are collectively referred to as "YDYC" (Y-Dummy-Y-Chroma) formats.
- One cannot rely on the values of these dummy bits as those are undefined.
- </para>
- <para>The format code is made of the following information.
- <itemizedlist>
- <listitem><para>The Y, U and V components order code, as transferred on the
- bus. Possible values are YUYV, UYVY, YVYU and VYUY for formats with no
- dummy bit, and YDYUYDYV, YDYVYDYU, YUYDYVYD and YVYDYUYD for YDYC formats.
- </para></listitem>
- <listitem><para>The number of bits per pixel component. All components are
- transferred on the same number of bits. Common values are 8, 10 and 12.</para>
- </listitem>
- <listitem><para>The number of bus samples per pixel. Pixels that are wider than
- the bus width must be transferred in multiple samples. Common values are
- 1, 1.5 (encoded as 1_5) and 2.</para></listitem>
- <listitem><para>The bus width. When the bus width is larger than the number of
- bits per pixel component, several components are packed in a single bus
- sample. The components are ordered as specified by the order code, with
- components on the left of the code transferred in the high order bits.
- Common values are 8 and 16.</para>
- </listitem>
- </itemizedlist>
- </para>
-
- <para>For instance, a format where pixels are encoded as 8-bit YUV values
- downsampled to 4:2:2 and transferred as 2 8-bit bus samples per pixel in the
- U, Y, V, Y order will be named <constant>MEDIA_BUS_FMT_UYVY8_2X8</constant>.
- </para>
-
- <para><xref linkend="v4l2-mbus-pixelcode-yuv8"/> lists existing packed YUV
- formats and describes the organization of each pixel data in each sample.
- When a format pattern is split across multiple samples each of the samples
- in the pattern is described.</para>
-
- <para>The role of each bit transferred over the bus is identified by one
- of the following codes.</para>
-
- <itemizedlist>
- <listitem><para>y<subscript>x</subscript> for luma component bit number x</para></listitem>
- <listitem><para>u<subscript>x</subscript> for blue chroma component bit number x</para></listitem>
- <listitem><para>v<subscript>x</subscript> for red chroma component bit number x</para></listitem>
- <listitem><para>a<subscript>x</subscript> for alpha component bit number x</para></listitem>
- <listitem><para>- for non-available bits (for positions higher than the bus width)</para></listitem>
- <listitem><para>d for dummy bits</para></listitem>
- </itemizedlist>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-yuv8">
- <title>YUV Formats</title>
- <tgroup cols="23">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="center"/>
- <colspec colname="bit" />
- <colspec colnum="4" colname="b31" align="center" />
- <colspec colnum="5" colname="b20" align="center" />
- <colspec colnum="6" colname="b29" align="center" />
- <colspec colnum="7" colname="b28" align="center" />
- <colspec colnum="8" colname="b27" align="center" />
- <colspec colnum="9" colname="b26" align="center" />
- <colspec colnum="10" colname="b25" align="center" />
- <colspec colnum="11" colname="b24" align="center" />
- <colspec colnum="12" colname="b23" align="center" />
- <colspec colnum="13" colname="b22" align="center" />
- <colspec colnum="14" colname="b21" align="center" />
- <colspec colnum="15" colname="b20" align="center" />
- <colspec colnum="16" colname="b19" align="center" />
- <colspec colnum="17" colname="b18" align="center" />
- <colspec colnum="18" colname="b17" align="center" />
- <colspec colnum="19" colname="b16" align="center" />
- <colspec colnum="20" colname="b15" align="center" />
- <colspec colnum="21" colname="b14" align="center" />
- <colspec colnum="22" colname="b13" align="center" />
- <colspec colnum="23" colname="b12" align="center" />
- <colspec colnum="24" colname="b11" align="center" />
- <colspec colnum="25" colname="b10" align="center" />
- <colspec colnum="26" colname="b09" align="center" />
- <colspec colnum="27" colname="b08" align="center" />
- <colspec colnum="28" colname="b07" align="center" />
- <colspec colnum="29" colname="b06" align="center" />
- <colspec colnum="30" colname="b05" align="center" />
- <colspec colnum="31" colname="b04" align="center" />
- <colspec colnum="32" colname="b03" align="center" />
- <colspec colnum="33" colname="b02" align="center" />
- <colspec colnum="34" colname="b01" align="center" />
- <colspec colnum="35" colname="b00" align="center" />
- <spanspec namest="b31" nameend="b00" spanname="b0" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry></entry>
- <entry spanname="b0">Data organization</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Bit</entry>
- <entry>31</entry>
- <entry>30</entry>
- <entry>29</entry>
- <entry>28</entry>
- <entry>27</entry>
- <entry>26</entry>
- <entry>25</entry>
- <entry>24</entry>
- <entry>23</entry>
- <entry>22</entry>
- <entry>21</entry>
- <entry>10</entry>
- <entry>19</entry>
- <entry>18</entry>
- <entry>17</entry>
- <entry>16</entry>
- <entry>15</entry>
- <entry>14</entry>
- <entry>13</entry>
- <entry>12</entry>
- <entry>11</entry>
- <entry>10</entry>
- <entry>9</entry>
- <entry>8</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-Y8-1X8">
- <entry>MEDIA_BUS_FMT_Y8_1X8</entry>
- <entry>0x2001</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UV8-1X8">
- <entry>MEDIA_BUS_FMT_UV8_1X8</entry>
- <entry>0x2015</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY8-1_5X8">
- <entry>MEDIA_BUS_FMT_UYVY8_1_5X8</entry>
- <entry>0x2002</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY8-1_5X8">
- <entry>MEDIA_BUS_FMT_VYUY8_1_5X8</entry>
- <entry>0x2003</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV8-1_5X8">
- <entry>MEDIA_BUS_FMT_YUYV8_1_5X8</entry>
- <entry>0x2004</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU8-1_5X8">
- <entry>MEDIA_BUS_FMT_YVYU8_1_5X8</entry>
- <entry>0x2005</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY8-2X8">
- <entry>MEDIA_BUS_FMT_UYVY8_2X8</entry>
- <entry>0x2006</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY8-2X8">
- <entry>MEDIA_BUS_FMT_VYUY8_2X8</entry>
- <entry>0x2007</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV8-2X8">
- <entry>MEDIA_BUS_FMT_YUYV8_2X8</entry>
- <entry>0x2008</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU8-2X8">
- <entry>MEDIA_BUS_FMT_YVYU8_2X8</entry>
- <entry>0x2009</entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-24;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-Y10-1X10">
- <entry>MEDIA_BUS_FMT_Y10_1X10</entry>
- <entry>0x200a</entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY10-2X10">
- <entry>MEDIA_BUS_FMT_UYVY10_2X10</entry>
- <entry>0x2018</entry>
- <entry></entry>
- &dash-ent-22;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY10-2X10">
- <entry>MEDIA_BUS_FMT_VYUY10_2X10</entry>
- <entry>0x2019</entry>
- <entry></entry>
- &dash-ent-22;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV10-2X10">
- <entry>MEDIA_BUS_FMT_YUYV10_2X10</entry>
- <entry>0x200b</entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU10-2X10">
- <entry>MEDIA_BUS_FMT_YVYU10_2X10</entry>
- <entry>0x200c</entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-22;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-Y12-1X12">
- <entry>MEDIA_BUS_FMT_Y12_1X12</entry>
- <entry>0x2013</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY12-2X12">
- <entry>MEDIA_BUS_FMT_UYVY12_2X12</entry>
- <entry>0x201c</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY12-2X12">
- <entry>MEDIA_BUS_FMT_VYUY12_2X12</entry>
- <entry>0x201d</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV12-2X12">
- <entry>MEDIA_BUS_FMT_YUYV12_2X12</entry>
- <entry>0x201e</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU12-2X12">
- <entry>MEDIA_BUS_FMT_YVYU12_2X12</entry>
- <entry>0x201f</entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-20;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY8-1X16">
- <entry>MEDIA_BUS_FMT_UYVY8_1X16</entry>
- <entry>0x200f</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY8-1X16">
- <entry>MEDIA_BUS_FMT_VYUY8_1X16</entry>
- <entry>0x2010</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV8-1X16">
- <entry>MEDIA_BUS_FMT_YUYV8_1X16</entry>
- <entry>0x2011</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU8-1X16">
- <entry>MEDIA_BUS_FMT_YVYU8_1X16</entry>
- <entry>0x2012</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YDYUYDYV8-1X16">
- <entry>MEDIA_BUS_FMT_YDYUYDYV8_1X16</entry>
- <entry>0x2014</entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- <entry>d</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-16;
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY10-1X20">
- <entry>MEDIA_BUS_FMT_UYVY10_1X20</entry>
- <entry>0x201a</entry>
- <entry></entry>
- &dash-ent-12;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-12;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY10-1X20">
- <entry>MEDIA_BUS_FMT_VYUY10_1X20</entry>
- <entry>0x201b</entry>
- <entry></entry>
- &dash-ent-12;
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-12;
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV10-1X20">
- <entry>MEDIA_BUS_FMT_YUYV10_1X20</entry>
- <entry>0x200d</entry>
- <entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU10-1X20">
- <entry>MEDIA_BUS_FMT_YVYU10_1X20</entry>
- <entry>0x200e</entry>
- <entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-12;
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VUY8-1X24">
- <entry>MEDIA_BUS_FMT_VUY8_1X24</entry>
- <entry>0x201a</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUV8-1X24">
- <entry>MEDIA_BUS_FMT_YUV8_1X24</entry>
- <entry>0x2025</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-UYVY12-1X24">
- <entry>MEDIA_BUS_FMT_UYVY12_1X24</entry>
- <entry>0x2020</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-8;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-VYUY12-1X24">
- <entry>MEDIA_BUS_FMT_VYUY12_1X24</entry>
- <entry>0x2021</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-8;
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUYV12-1X24">
- <entry>MEDIA_BUS_FMT_YUYV12_1X24</entry>
- <entry>0x2022</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-8;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YVYU12-1X24">
- <entry>MEDIA_BUS_FMT_YVYU12_1X24</entry>
- <entry>0x2023</entry>
- <entry></entry>
- &dash-ent-8;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>v<subscript>11</subscript></entry>
- <entry>v<subscript>10</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- &dash-ent-8;
- <entry>y<subscript>11</subscript></entry>
- <entry>y<subscript>10</subscript></entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>11</subscript></entry>
- <entry>u<subscript>10</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-YUV10-1X30">
- <entry>MEDIA_BUS_FMT_YUV10_1X30</entry>
- <entry>0x2016</entry>
- <entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>y<subscript>9</subscript></entry>
- <entry>y<subscript>8</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>9</subscript></entry>
- <entry>u<subscript>8</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>v<subscript>9</subscript></entry>
- <entry>v<subscript>8</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- <row id="MEDIA-BUS-FMT-AYUV8-1X32">
- <entry>MEDIA_BUS_FMT_AYUV8_1X32</entry>
- <entry>0x2017</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>y<subscript>7</subscript></entry>
- <entry>y<subscript>6</subscript></entry>
- <entry>y<subscript>5</subscript></entry>
- <entry>y<subscript>4</subscript></entry>
- <entry>y<subscript>3</subscript></entry>
- <entry>y<subscript>2</subscript></entry>
- <entry>y<subscript>1</subscript></entry>
- <entry>y<subscript>0</subscript></entry>
- <entry>u<subscript>7</subscript></entry>
- <entry>u<subscript>6</subscript></entry>
- <entry>u<subscript>5</subscript></entry>
- <entry>u<subscript>4</subscript></entry>
- <entry>u<subscript>3</subscript></entry>
- <entry>u<subscript>2</subscript></entry>
- <entry>u<subscript>1</subscript></entry>
- <entry>u<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>HSV/HSL Formats</title>
-
- <para>Those formats transfer pixel data as RGB values in a cylindrical-coordinate
- system using Hue-Saturation-Value or Hue-Saturation-Lightness components. The
- format code is made of the following information.
- <itemizedlist>
- <listitem><para>The hue, saturation, value or lightness and optional alpha
- components order code, as encoded in a pixel sample. The only currently
- supported value is AHSV.
- </para></listitem>
- <listitem><para>The number of bits per component, for each component. The values
- can be different for all components. The only currently supported value is 8888.
- </para></listitem>
- <listitem><para>The number of bus samples per pixel. Pixels that are wider than
- the bus width must be transferred in multiple samples. The only currently
- supported value is 1.</para></listitem>
- <listitem><para>The bus width.</para></listitem>
- <listitem><para>For formats where the total number of bits per pixel is smaller
- than the number of bus samples per pixel times the bus width, a padding
- value stating if the bytes are padded in their most high order bits
- (PADHI) or low order bits (PADLO).</para></listitem>
- <listitem><para>For formats where the number of bus samples per pixel is larger
- than 1, an endianness value stating if the pixel is transferred MSB first
- (BE) or LSB first (LE).</para></listitem>
- </itemizedlist>
- </para>
-
- <para>The following table lists existing HSV/HSL formats.</para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-hsv">
- <title>HSV/HSL formats</title>
- <tgroup cols="27">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="center"/>
- <colspec colname="bit" />
- <colspec colnum="4" colname="b31" align="center" />
- <colspec colnum="5" colname="b20" align="center" />
- <colspec colnum="6" colname="b29" align="center" />
- <colspec colnum="7" colname="b28" align="center" />
- <colspec colnum="8" colname="b27" align="center" />
- <colspec colnum="9" colname="b26" align="center" />
- <colspec colnum="10" colname="b25" align="center" />
- <colspec colnum="11" colname="b24" align="center" />
- <colspec colnum="12" colname="b23" align="center" />
- <colspec colnum="13" colname="b22" align="center" />
- <colspec colnum="14" colname="b21" align="center" />
- <colspec colnum="15" colname="b20" align="center" />
- <colspec colnum="16" colname="b19" align="center" />
- <colspec colnum="17" colname="b18" align="center" />
- <colspec colnum="18" colname="b17" align="center" />
- <colspec colnum="19" colname="b16" align="center" />
- <colspec colnum="20" colname="b15" align="center" />
- <colspec colnum="21" colname="b14" align="center" />
- <colspec colnum="22" colname="b13" align="center" />
- <colspec colnum="23" colname="b12" align="center" />
- <colspec colnum="24" colname="b11" align="center" />
- <colspec colnum="25" colname="b10" align="center" />
- <colspec colnum="26" colname="b09" align="center" />
- <colspec colnum="27" colname="b08" align="center" />
- <colspec colnum="28" colname="b07" align="center" />
- <colspec colnum="29" colname="b06" align="center" />
- <colspec colnum="30" colname="b05" align="center" />
- <colspec colnum="31" colname="b04" align="center" />
- <colspec colnum="32" colname="b03" align="center" />
- <colspec colnum="33" colname="b02" align="center" />
- <colspec colnum="34" colname="b01" align="center" />
- <colspec colnum="35" colname="b00" align="center" />
- <spanspec namest="b31" nameend="b00" spanname="b0" />
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry></entry>
- <entry spanname="b0">Data organization</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Bit</entry>
- <entry>31</entry>
- <entry>30</entry>
- <entry>29</entry>
- <entry>28</entry>
- <entry>27</entry>
- <entry>26</entry>
- <entry>25</entry>
- <entry>24</entry>
- <entry>23</entry>
- <entry>22</entry>
- <entry>21</entry>
- <entry>20</entry>
- <entry>19</entry>
- <entry>18</entry>
- <entry>17</entry>
- <entry>16</entry>
- <entry>15</entry>
- <entry>14</entry>
- <entry>13</entry>
- <entry>12</entry>
- <entry>11</entry>
- <entry>10</entry>
- <entry>9</entry>
- <entry>8</entry>
- <entry>7</entry>
- <entry>6</entry>
- <entry>5</entry>
- <entry>4</entry>
- <entry>3</entry>
- <entry>2</entry>
- <entry>1</entry>
- <entry>0</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-AHSV8888-1X32">
- <entry>MEDIA_BUS_FMT_AHSV8888_1X32</entry>
- <entry>0x6001</entry>
- <entry></entry>
- <entry>a<subscript>7</subscript></entry>
- <entry>a<subscript>6</subscript></entry>
- <entry>a<subscript>5</subscript></entry>
- <entry>a<subscript>4</subscript></entry>
- <entry>a<subscript>3</subscript></entry>
- <entry>a<subscript>2</subscript></entry>
- <entry>a<subscript>1</subscript></entry>
- <entry>a<subscript>0</subscript></entry>
- <entry>h<subscript>7</subscript></entry>
- <entry>h<subscript>6</subscript></entry>
- <entry>h<subscript>5</subscript></entry>
- <entry>h<subscript>4</subscript></entry>
- <entry>h<subscript>3</subscript></entry>
- <entry>h<subscript>2</subscript></entry>
- <entry>h<subscript>1</subscript></entry>
- <entry>h<subscript>0</subscript></entry>
- <entry>s<subscript>7</subscript></entry>
- <entry>s<subscript>6</subscript></entry>
- <entry>s<subscript>5</subscript></entry>
- <entry>s<subscript>4</subscript></entry>
- <entry>s<subscript>3</subscript></entry>
- <entry>s<subscript>2</subscript></entry>
- <entry>s<subscript>1</subscript></entry>
- <entry>s<subscript>0</subscript></entry>
- <entry>v<subscript>7</subscript></entry>
- <entry>v<subscript>6</subscript></entry>
- <entry>v<subscript>5</subscript></entry>
- <entry>v<subscript>4</subscript></entry>
- <entry>v<subscript>3</subscript></entry>
- <entry>v<subscript>2</subscript></entry>
- <entry>v<subscript>1</subscript></entry>
- <entry>v<subscript>0</subscript></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section>
- <title>JPEG Compressed Formats</title>
-
- <para>Those data formats consist of an ordered sequence of 8-bit bytes
- obtained from JPEG compression process. Additionally to the
- <constant>_JPEG</constant> postfix the format code is made of
- the following information.
- <itemizedlist>
- <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem>
- <listitem><para>The bus width.</para></listitem>
- </itemizedlist>
- </para>
-
- <para>For instance, for a JPEG baseline process and an 8-bit bus width
- the format will be named <constant>MEDIA_BUS_FMT_JPEG_1X8</constant>.
- </para>
-
- <para>The following table lists existing JPEG compressed formats.</para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-jpeg">
- <title>JPEG Formats</title>
- <tgroup cols="3">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="left"/>
- <colspec colname="remarks" align="left"/>
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>Remarks</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-JPEG-1X8">
- <entry>MEDIA_BUS_FMT_JPEG_1X8</entry>
- <entry>0x4001</entry>
- <entry>Besides of its usage for the parallel bus this format is
- recommended for transmission of JPEG data over MIPI CSI bus
- using the User Defined 8-bit Data types.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- <section id="v4l2-mbus-vendor-spec-fmts">
- <title>Vendor and Device Specific Formats</title>
-
- <para>This section lists complex data formats that are either vendor or
- device specific.
- </para>
-
- <para>The following table lists the existing vendor and device specific
- formats.</para>
-
- <table pgwide="0" frame="none" id="v4l2-mbus-pixelcode-vendor-specific">
- <title>Vendor and device specific formats</title>
- <tgroup cols="3">
- <colspec colname="id" align="left" />
- <colspec colname="code" align="left"/>
- <colspec colname="remarks" align="left"/>
- <thead>
- <row>
- <entry>Identifier</entry>
- <entry>Code</entry>
- <entry>Comments</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row id="MEDIA-BUS-FMT-S5C-UYVY-JPEG-1X8">
- <entry>MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8</entry>
- <entry>0x5001</entry>
- <entry>
- Interleaved raw UYVY and JPEG image format with embedded
- meta-data used by Samsung S3C73MX camera sensors.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </section>
-
- </section>
-</section>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia
deleted file mode 100644
index e32ba5362e1d..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.dia
+++ /dev/null
@@ -1,614 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
- <dia:diagramdata>
- <dia:attribute name="background">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="pagebreak">
- <dia:color val="#000099"/>
- </dia:attribute>
- <dia:attribute name="paper">
- <dia:composite type="paper">
- <dia:attribute name="name">
- <dia:string>#A4#</dia:string>
- </dia:attribute>
- <dia:attribute name="tmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="bmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="lmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="rmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="is_portrait">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="scaling">
- <dia:real val="0.49000000953674316"/>
- </dia:attribute>
- <dia:attribute name="fitto">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="grid">
- <dia:composite type="grid">
- <dia:attribute name="width_x">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="width_y">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_x">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_y">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:composite type="color"/>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#d8e5e5"/>
- </dia:attribute>
- <dia:attribute name="guides">
- <dia:composite type="guides">
- <dia:attribute name="hguides"/>
- <dia:attribute name="vguides"/>
- </dia:composite>
- </dia:attribute>
- </dia:diagramdata>
- <dia:layer name="Background" visible="true" active="true">
- <dia:object type="Standard - Box" version="0" id="O0">
- <dia:attribute name="obj_pos">
- <dia:point val="-0.4,6.5"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-0.45,6.45;23.1387,16.2"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-0.4,6.5"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="23.48871579904775"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="9.6500000000000004"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O1">
- <dia:attribute name="obj_pos">
- <dia:point val="0.225,9.45"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.175,9.4;8.225,14.7"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="0.225,9.45"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="7.9499999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="5.1999999999999975"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O2">
- <dia:attribute name="obj_pos">
- <dia:point val="3.175,10.55"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.125,10.5;7.925,14.45"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="3.175,10.55"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="4.6999999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.8499999999999979"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O3">
- <dia:attribute name="obj_pos">
- <dia:point val="3.725,11.3875"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.725,10.7925;6.6025,13.14"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink
-crop
-selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="3.725,11.3875"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O4">
- <dia:attribute name="obj_pos">
- <dia:point val="1.475,7.9"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="1.475,7.305;1.475,8.0525"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>##</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="1.475,7.9"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O5">
- <dia:attribute name="obj_pos">
- <dia:point val="0.426918,7.89569"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.426918,7.30069;3.90942,8.84819"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="0.426918,7.89569"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O6">
- <dia:attribute name="obj_pos">
- <dia:point val="17.4887,7.75"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="17.4887,7.155;21.8112,8.7025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#source media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="17.4887,7.75"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O7">
- <dia:attribute name="obj_pos">
- <dia:point val="17.5244,9.5417"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="17.4744,9.4917;22.2387,13.35"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="17.5244,9.5417"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="4.6643157990477508"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.758300000000002"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O8">
- <dia:attribute name="obj_pos">
- <dia:point val="17.5244,13.3"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.12132,13.2463;17.5781,14.4537"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="17.5244,13.3"/>
- <dia:point val="3.175,14.4"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O7" connection="5"/>
- <dia:connection handle="1" to="O2" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O9">
- <dia:attribute name="obj_pos">
- <dia:point val="17.5244,9.5417"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.12162,9.48832;17.5778,10.6034"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="17.5244,9.5417"/>
- <dia:point val="3.175,10.55"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O7" connection="0"/>
- <dia:connection handle="1" to="O2" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O10">
- <dia:attribute name="obj_pos">
- <dia:point val="22.1887,13.3"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.82132,13.2463;22.2424,14.4537"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="22.1887,13.3"/>
- <dia:point val="7.875,14.4"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O7" connection="7"/>
- <dia:connection handle="1" to="O2" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O11">
- <dia:attribute name="obj_pos">
- <dia:point val="22.1887,9.5417"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.82161,9.48831;22.2421,10.6034"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="22.1887,9.5417"/>
- <dia:point val="7.875,10.55"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O7" connection="2"/>
- <dia:connection handle="1" to="O2" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O12">
- <dia:attribute name="obj_pos">
- <dia:point val="23.23,10.5742"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="23.18,10.5242;24.13,11.4742"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="23.23,10.5742"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O13">
- <dia:attribute name="obj_pos">
- <dia:point val="24.08,10.9992"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.03,10.6388;32.4953,11.3624"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="24.08,10.9992"/>
- <dia:point val="32.3835,11.0007"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O12" connection="3"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O14">
- <dia:attribute name="obj_pos">
- <dia:point val="25.3454,10.49"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.3454,9.895;29.9904,10.6425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 1 (source)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="25.3454,10.49"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O15">
- <dia:attribute name="obj_pos">
- <dia:point val="-1.44491,11.6506"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-1.49491,11.6006;-0.54491,12.5506"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-1.44491,11.6506"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O16">
- <dia:attribute name="obj_pos">
- <dia:point val="-9.61991,12.09"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-9.67,11.7149;-1.33311,12.4385"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="-9.61991,12.09"/>
- <dia:point val="-1.44491,12.0756"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O15" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O17">
- <dia:attribute name="obj_pos">
- <dia:point val="-7.39291,11.49"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-7.39291,10.895;-3.58791,11.6425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 0 (sink)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="-7.39291,11.49"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- </dia:layer>
-</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg
deleted file mode 100644
index 18b0f5de9ed2..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-crop.svg
+++ /dev/null
@@ -1,63 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="43cm" height="10cm" viewBox="-194 128 844 196" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="469.774" height="193"/>
- <g>
- <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="63.5" y="211" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="63.5" y="211" width="94" height="77"/>
- </g>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="74.5" y="227.75">
- <tspan x="74.5" y="227.75">sink</tspan>
- <tspan x="74.5" y="243.75">crop</tspan>
- <tspan x="74.5" y="259.75">selection</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
- <tspan x="29.5" y="158"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
- <tspan x="8.53836" y="157.914">sink media</tspan>
- <tspan x="8.53836" y="173.914">bus format</tspan>
- </text>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="349.774" y="155">
- <tspan x="349.774" y="155">source media</tspan>
- <tspan x="349.774" y="171">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="350.488" y="190.834" width="93.2863" height="75.166"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="350.488" y="190.834" width="93.2863" height="75.166"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="266" x2="63.5" y2="288"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="350.488" y1="190.834" x2="63.5" y2="211"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="266" x2="157.5" y2="288"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="443.774" y1="190.834" x2="157.5" y2="211"/>
- <g>
- <ellipse style="fill: #ffffff" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="473.1" cy="219.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="481.6" y1="219.984" x2="637.934" y2="220.012"/>
- <polygon style="fill: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="506.908" y="209.8">
- <tspan x="506.908" y="209.8">pad 1 (source)</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
- <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
- <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
- </text>
-</svg>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia
deleted file mode 100644
index a0d782927840..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-full.dia
+++ /dev/null
@@ -1,1588 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
- <dia:diagramdata>
- <dia:attribute name="background">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="pagebreak">
- <dia:color val="#000099"/>
- </dia:attribute>
- <dia:attribute name="paper">
- <dia:composite type="paper">
- <dia:attribute name="name">
- <dia:string>#A4#</dia:string>
- </dia:attribute>
- <dia:attribute name="tmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="bmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="lmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="rmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="is_portrait">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="scaling">
- <dia:real val="0.49000000953674316"/>
- </dia:attribute>
- <dia:attribute name="fitto">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="grid">
- <dia:composite type="grid">
- <dia:attribute name="width_x">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="width_y">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_x">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_y">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:composite type="color"/>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#d8e5e5"/>
- </dia:attribute>
- <dia:attribute name="guides">
- <dia:composite type="guides">
- <dia:attribute name="hguides"/>
- <dia:attribute name="vguides"/>
- </dia:composite>
- </dia:attribute>
- </dia:diagramdata>
- <dia:layer name="Background" visible="true" active="true">
- <dia:object type="Standard - Box" version="0" id="O0">
- <dia:attribute name="obj_pos">
- <dia:point val="15.945,6.45"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="15.895,6.4;26.4,18.95"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="15.945,6.45"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="10.404999999254942"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="12.449999999999992"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#ff765a"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O1">
- <dia:attribute name="obj_pos">
- <dia:point val="-0.1,3.65"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-0.15,3.6;40.25,20.85"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-0.1,3.65"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="40.300000000000004"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="17.149999999999999"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O2">
- <dia:attribute name="obj_pos">
- <dia:point val="-1.05,7.9106"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-1.1,7.8606;-0.15,8.8106"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-1.05,7.9106"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O3">
- <dia:attribute name="obj_pos">
- <dia:point val="40.3366,9.8342"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="40.2866,9.7842;41.2366,10.7342"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="40.3366,9.8342"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O4">
- <dia:attribute name="obj_pos">
- <dia:point val="-9.225,8.35"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-9.27509,7.97487;-0.938197,8.69848"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="-9.225,8.35"/>
- <dia:point val="-1.05,8.3356"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O2" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O5">
- <dia:attribute name="obj_pos">
- <dia:point val="41.1866,10.2592"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="41.1366,9.89879;49.6019,10.6224"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="41.1866,10.2592"/>
- <dia:point val="49.4901,10.2607"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O3" connection="3"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O6">
- <dia:attribute name="obj_pos">
- <dia:point val="-6.998,7.75"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-6.998,7.155;-3.193,7.9025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 0 (sink)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="-6.998,7.75"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O7">
- <dia:attribute name="obj_pos">
- <dia:point val="42.452,9.75"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="42.452,9.155;47.097,9.9025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 2 (source)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="42.452,9.75"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O8">
- <dia:attribute name="obj_pos">
- <dia:point val="0.275,6"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.225,5.95;8.275,11.25"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="0.275,6"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="7.9499999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="5.1999999999999975"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O9">
- <dia:attribute name="obj_pos">
- <dia:point val="3.125,6.8"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.075,6.75;7.875,10.7"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="3.125,6.8"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="4.6999999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.8499999999999979"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O10">
- <dia:attribute name="obj_pos">
- <dia:point val="1.525,4.45"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="1.525,3.855;1.525,4.6025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>##</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="1.525,4.45"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O11">
- <dia:attribute name="obj_pos">
- <dia:point val="0.476918,4.44569"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.476918,3.85069;3.95942,5.39819"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="0.476918,4.44569"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O12">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="16.6322,9.23251;24.9922,17.9564"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="8.2600228398861297"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="8.6238900617957164"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#00ff00"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O13">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,17.9064"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.05732,10.5823;16.7499,17.9741"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="16.6822,17.9064"/>
- <dia:point val="3.125,10.65"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O12" connection="5"/>
- <dia:connection handle="1" to="O9" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O14">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3.06681,6.74181;16.7404,9.3407"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="16.6822,9.28251"/>
- <dia:point val="3.125,6.8"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O12" connection="0"/>
- <dia:connection handle="1" to="O9" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O15">
- <dia:attribute name="obj_pos">
- <dia:point val="24.9422,17.9064"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.75945,10.5845;25.0077,17.9719"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="24.9422,17.9064"/>
- <dia:point val="7.825,10.65"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O12" connection="7"/>
- <dia:connection handle="1" to="O9" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O16">
- <dia:attribute name="obj_pos">
- <dia:point val="24.9422,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.76834,6.74334;24.9989,9.33917"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="24.9422,9.28251"/>
- <dia:point val="7.825,6.8"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O12" connection="2"/>
- <dia:connection handle="1" to="O9" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O17">
- <dia:attribute name="obj_pos">
- <dia:point val="16.7352,7.47209"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="16.7352,6.87709;22.5602,8.42459"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink compose
-selection (scaling)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="16.7352,7.47209"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#00ff00"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O18">
- <dia:attribute name="obj_pos">
- <dia:point val="20.4661,9.72825"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="20.4161,9.67825;25.5254,13.3509"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="20.4661,9.72825"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O19">
- <dia:attribute name="obj_pos">
- <dia:point val="34.475,5.2564"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.475,4.6614;38.7975,6.2089"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#source media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="34.475,5.2564"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O20">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4244,8.6917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.3744,8.6417;39.4837,12.3143"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="34.4244,8.6917"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O21">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4244,12.2643"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="20.4125,12.2107;34.478,13.3545"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.4244,12.2643"/>
- <dia:point val="20.4661,13.3009"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O20" connection="5"/>
- <dia:connection handle="1" to="O18" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O22">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4244,8.6917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="20.4125,8.63813;34.478,9.78182"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.4244,8.6917"/>
- <dia:point val="20.4661,9.72825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O20" connection="0"/>
- <dia:connection handle="1" to="O18" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O23">
- <dia:attribute name="obj_pos">
- <dia:point val="39.4337,12.2643"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.4218,12.2107;39.4873,13.3545"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.4337,12.2643"/>
- <dia:point val="25.4754,13.3009"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O20" connection="7"/>
- <dia:connection handle="1" to="O18" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O24">
- <dia:attribute name="obj_pos">
- <dia:point val="39.4337,8.6917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.4218,8.63813;39.4873,9.78182"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.4337,8.6917"/>
- <dia:point val="25.4754,9.72825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O20" connection="2"/>
- <dia:connection handle="1" to="O18" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O25">
- <dia:attribute name="obj_pos">
- <dia:point val="16.25,5.15"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="16.25,4.555;21.68,6.1025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink compose
-bounds selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="16.25,5.15"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#ff765a"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O26">
- <dia:attribute name="obj_pos">
- <dia:point val="-1.02991,16.6506"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-1.07991,16.6006;-0.12991,17.5506"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-1.02991,16.6506"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O27">
- <dia:attribute name="obj_pos">
- <dia:point val="-9.20491,17.09"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-9.255,16.7149;-0.918107,17.4385"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="-9.20491,17.09"/>
- <dia:point val="-1.02991,17.0756"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O26" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O28">
- <dia:attribute name="obj_pos">
- <dia:point val="-6.95,16.45"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-6.95,15.855;-3.145,16.6025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 1 (sink)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="-6.95,16.45"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O29">
- <dia:attribute name="obj_pos">
- <dia:point val="0.390412,14.64"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.340412,14.59;6.045,18.8"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="0.390412,14.64"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.604587512785236"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="4.1099999999999994"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O30">
- <dia:attribute name="obj_pos">
- <dia:point val="2.645,15.74"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.595,15.69;5.6,18.3"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="2.645,15.74"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="2.904999999254942"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="2.5100000000000016"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O31">
- <dia:attribute name="obj_pos">
- <dia:point val="1.595,12.99"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="1.595,12.395;1.595,13.1425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>##</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="1.595,12.99"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O32">
- <dia:attribute name="obj_pos">
- <dia:point val="17.945,12.595"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.58596,12.536;18.004,15.799"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="17.945,12.595"/>
- <dia:point val="2.645,15.74"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O36" connection="0"/>
- <dia:connection handle="1" to="O30" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O33">
- <dia:attribute name="obj_pos">
- <dia:point val="17.945,15.8"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.58772,15.7427;18.0023,18.3073"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="17.945,15.8"/>
- <dia:point val="2.645,18.25"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O36" connection="5"/>
- <dia:connection handle="1" to="O30" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O34">
- <dia:attribute name="obj_pos">
- <dia:point val="21.7,15.8"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="5.49307,15.7431;21.7569,18.3069"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="21.7,15.8"/>
- <dia:point val="5.55,18.25"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O36" connection="7"/>
- <dia:connection handle="1" to="O30" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O35">
- <dia:attribute name="obj_pos">
- <dia:point val="21.7,12.595"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="5.49136,12.5364;21.7586,15.7986"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="21.7,12.595"/>
- <dia:point val="5.55,15.74"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O36" connection="2"/>
- <dia:connection handle="1" to="O30" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O36">
- <dia:attribute name="obj_pos">
- <dia:point val="17.945,12.595"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="17.895,12.545;21.75,15.85"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="17.945,12.595"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="3.7549999992549452"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.2049999992549427"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#00ff00"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O37">
- <dia:attribute name="obj_pos">
- <dia:point val="22.1631,14.2233"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="22.1131,14.1733;25.45,16.7"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="22.1631,14.2233"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="3.2369000000000021"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="2.4267000000000003"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O38">
- <dia:attribute name="obj_pos">
- <dia:point val="34.6714,16.2367"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.6214,16.1867;37.9,18.75"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="34.6714,16.2367"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="3.178600000000003"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="2.4632999999999967"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O39">
- <dia:attribute name="obj_pos">
- <dia:point val="34.6714,18.7"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="22.1057,16.5926;34.7288,18.7574"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.6714,18.7"/>
- <dia:point val="22.1631,16.65"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O38" connection="5"/>
- <dia:connection handle="1" to="O37" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O40">
- <dia:attribute name="obj_pos">
- <dia:point val="34.6714,16.2367"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="22.1058,14.166;34.7287,16.294"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.6714,16.2367"/>
- <dia:point val="22.1631,14.2233"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O38" connection="0"/>
- <dia:connection handle="1" to="O37" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O41">
- <dia:attribute name="obj_pos">
- <dia:point val="37.85,18.7"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.3425,16.5925;37.9075,18.7575"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="37.85,18.7"/>
- <dia:point val="25.4,16.65"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O38" connection="7"/>
- <dia:connection handle="1" to="O37" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O42">
- <dia:attribute name="obj_pos">
- <dia:point val="37.85,16.2367"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.3427,14.166;37.9073,16.294"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="37.85,16.2367"/>
- <dia:point val="25.4,14.2233"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O38" connection="2"/>
- <dia:connection handle="1" to="O37" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O43">
- <dia:attribute name="obj_pos">
- <dia:point val="40.347,16.7742"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="40.297,16.7242;41.247,17.6742"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="40.347,16.7742"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O44">
- <dia:attribute name="obj_pos">
- <dia:point val="41.197,17.1992"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="41.147,16.8388;49.6123,17.5624"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="41.197,17.1992"/>
- <dia:point val="49.5005,17.2007"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O43" connection="3"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O45">
- <dia:attribute name="obj_pos">
- <dia:point val="42.4624,16.69"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="42.4624,16.095;47.1074,16.8425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 3 (source)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="42.4624,16.69"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O46">
- <dia:attribute name="obj_pos">
- <dia:point val="9.85,4.55"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="9.85,3.955;12.7275,6.3025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink
-crop
-selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="9.85,4.55"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O47">
- <dia:attribute name="obj_pos">
- <dia:point val="27.65,4.75"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="27.65,4.155;30.5275,6.5025"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#source
-crop
-selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="27.65,4.75"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O48">
- <dia:attribute name="obj_pos">
- <dia:point val="10.55,6.6"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.7135,6.39438;10.6035,7.11605"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="10.55,6.6"/>
- <dia:point val="7.825,6.8"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O9" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O49">
- <dia:attribute name="obj_pos">
- <dia:point val="10.45,6.55"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="5.48029,6.48236;10.5176,15.8387"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="10.45,6.55"/>
- <dia:point val="5.55,15.74"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O30" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O50">
- <dia:attribute name="obj_pos">
- <dia:point val="27.5246,6.66071"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.406,6.59136;27.594,9.82122"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="27.5246,6.66071"/>
- <dia:point val="25.4754,9.72825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O18" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O51">
- <dia:attribute name="obj_pos">
- <dia:point val="27.5036,6.68935"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="25.2161,6.62775;27.5652,14.331"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="27.5036,6.68935"/>
- <dia:point val="25.4,14.2233"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O37" connection="2"/>
- </dia:connections>
- </dia:object>
- </dia:layer>
-</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg
deleted file mode 100644
index 3322cf4c0093..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-full.svg
+++ /dev/null
@@ -1,163 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="59cm" height="18cm" viewBox="-186 71 1178 346" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <g>
- <rect style="fill: #ffffff" x="318.9" y="129" width="208.1" height="249"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #ff765a" x="318.9" y="129" width="208.1" height="249"/>
- </g>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-2" y="73" width="806" height="343"/>
- <g>
- <ellipse style="fill: #ffffff" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.5" cy="166.712" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <ellipse style="fill: #ffffff" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.232" cy="205.184" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.5" y1="167" x2="-30.7361" y2="166.729"/>
- <polygon style="fill: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.732" y1="205.184" x2="980.066" y2="205.212"/>
- <polygon style="fill: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139.96" y="155">
- <tspan x="-139.96" y="155">pad 0 (sink)</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.04" y="195">
- <tspan x="849.04" y="195">pad 2 (source)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="5.5" y="120" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="5.5" y="120" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="62.5" y="136" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="62.5" y="136" width="94" height="77"/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="30.5" y="89">
- <tspan x="30.5" y="89"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="9.53836" y="88.9138">
- <tspan x="9.53836" y="88.9138">sink media</tspan>
- <tspan x="9.53836" y="104.914">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="62.5" y2="213"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="62.5" y2="136"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="156.5" y2="213"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="156.5" y2="136"/>
- <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
- <tspan x="334.704" y="149.442">sink compose</tspan>
- <tspan x="334.704" y="165.442">selection (scaling)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="409.322" y="194.565" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="409.322" y="194.565" width="100.186" height="71.4523"/>
- </g>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="689.5" y="105.128">
- <tspan x="689.5" y="105.128">source media</tspan>
- <tspan x="689.5" y="121.128">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="688.488" y="173.834" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="688.488" y="173.834" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="245.286" x2="409.322" y2="266.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="688.488" y1="173.834" x2="409.322" y2="194.565"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="245.286" x2="509.508" y2="266.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="788.674" y1="173.834" x2="509.508" y2="194.565"/>
- <text style="fill: #ff765a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="325" y="103">
- <tspan x="325" y="103">sink compose</tspan>
- <tspan x="325" y="119">bounds selection</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-12.0982" cy="341.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-184.098" y1="341.8" x2="-30.3343" y2="341.529"/>
- <polygon style="fill: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-139" y="329">
- <tspan x="-139" y="329">pad 1 (sink)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="7.80824" y="292.8" width="112.092" height="82.2"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="7.80824" y="292.8" width="112.092" height="82.2"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="52.9" y="314.8" width="58.1" height="50.2"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="52.9" y="314.8" width="58.1" height="50.2"/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="31.9" y="259.8">
- <tspan x="31.9" y="259.8"></tspan>
- </text>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="251.9" x2="52.9" y2="314.8"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="358.9" y1="316" x2="52.9" y2="365"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="316" x2="111" y2="365"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="434" y1="251.9" x2="111" y2="314.8"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="358.9" y="251.9" width="75.1" height="64.1"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="443.262" y="284.466" width="64.738" height="48.534"/>
- <g>
- <rect style="fill: #ffffff" x="693.428" y="324.734" width="63.572" height="49.266"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="693.428" y="324.734" width="63.572" height="49.266"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="374" x2="443.262" y2="333"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="693.428" y1="324.734" x2="443.262" y2="284.466"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="374" x2="508" y2="333"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="757" y1="324.734" x2="508" y2="284.466"/>
- <g>
- <ellipse style="fill: #ffffff" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="815.44" cy="343.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="823.94" y1="343.984" x2="980.274" y2="344.012"/>
- <polygon style="fill: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="849.248" y="333.8">
- <tspan x="849.248" y="333.8">pad 3 (source)</tspan>
- </text>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="197" y="91">
- <tspan x="197" y="91">sink</tspan>
- <tspan x="197" y="107">crop</tspan>
- <tspan x="197" y="123">selection</tspan>
- </text>
- <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="553" y="95">
- <tspan x="553" y="95">source</tspan>
- <tspan x="553" y="111">crop</tspan>
- <tspan x="553" y="127">selection</tspan>
- </text>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="211" y1="132" x2="166.21" y2="135.287"/>
- <polygon style="fill: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x1="209" y1="131" x2="115.581" y2="306.209"/>
- <polygon style="fill: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.492" y1="133.214" x2="514.916" y2="186.469"/>
- <polygon style="fill: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="550.072" y1="133.787" x2="510.618" y2="275.089"/>
- <polygon style="fill: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "/>
- </g>
-</svg>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia
deleted file mode 100644
index 0cd50a7bda80..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.dia
+++ /dev/null
@@ -1,1152 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<dia:diagram xmlns:dia="http://www.lysator.liu.se/~alla/dia/">
- <dia:diagramdata>
- <dia:attribute name="background">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="pagebreak">
- <dia:color val="#000099"/>
- </dia:attribute>
- <dia:attribute name="paper">
- <dia:composite type="paper">
- <dia:attribute name="name">
- <dia:string>#A4#</dia:string>
- </dia:attribute>
- <dia:attribute name="tmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="bmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="lmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="rmargin">
- <dia:real val="2.8222000598907471"/>
- </dia:attribute>
- <dia:attribute name="is_portrait">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="scaling">
- <dia:real val="0.49000000953674316"/>
- </dia:attribute>
- <dia:attribute name="fitto">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="grid">
- <dia:composite type="grid">
- <dia:attribute name="width_x">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="width_y">
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_x">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="visible_y">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:composite type="color"/>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#d8e5e5"/>
- </dia:attribute>
- <dia:attribute name="guides">
- <dia:composite type="guides">
- <dia:attribute name="hguides"/>
- <dia:attribute name="vguides"/>
- </dia:composite>
- </dia:attribute>
- </dia:diagramdata>
- <dia:layer name="Background" visible="true" active="true">
- <dia:object type="Standard - Box" version="0" id="O0">
- <dia:attribute name="obj_pos">
- <dia:point val="-0.4,6.5"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-0.45,6.45;39.95,22.9"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-0.4,6.5"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="40.299999999999997"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="16.349999999999998"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O1">
- <dia:attribute name="obj_pos">
- <dia:point val="0.225,9.45"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.175,9.4;8.225,14.7"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="0.225,9.45"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="7.9499999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="5.1999999999999975"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O2">
- <dia:attribute name="obj_pos">
- <dia:point val="2.475,10.2"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.425,10.15;7.225,14.1"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="2.475,10.2"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="4.6999999999999975"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.8499999999999979"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O3">
- <dia:attribute name="obj_pos">
- <dia:point val="3,11.2"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="3,10.605;5.8775,12.9525"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink
-crop
-selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="3,11.2"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#0000ff"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O4">
- <dia:attribute name="obj_pos">
- <dia:point val="1.475,7.9"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="1.475,7.305;1.475,8.0525"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>##</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="1.475,7.9"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O5">
- <dia:attribute name="obj_pos">
- <dia:point val="0.426918,7.89569"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="0.426918,7.30069;3.90942,8.84819"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="0.426918,7.89569"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#a52a2a"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O6">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="16.6322,9.23251;24.9922,17.9564"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="8.2600228398861297"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="8.6238900617957164"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#00ff00"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O7">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,17.9064"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.41365,13.9886;16.7436,17.9678"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="16.6822,17.9064"/>
- <dia:point val="2.475,14.05"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O6" connection="5"/>
- <dia:connection handle="1" to="O2" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O8">
- <dia:attribute name="obj_pos">
- <dia:point val="16.6822,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="2.42188,9.22939;16.7353,10.2531"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="16.6822,9.28251"/>
- <dia:point val="2.475,10.2"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O6" connection="0"/>
- <dia:connection handle="1" to="O2" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O9">
- <dia:attribute name="obj_pos">
- <dia:point val="24.9422,17.9064"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.11553,13.9905;25.0017,17.9659"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="24.9422,17.9064"/>
- <dia:point val="7.175,14.05"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O6" connection="7"/>
- <dia:connection handle="1" to="O2" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O10">
- <dia:attribute name="obj_pos">
- <dia:point val="24.9422,9.28251"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="7.12249,9.23;24.9947,10.2525"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="24.9422,9.28251"/>
- <dia:point val="7.175,10.2"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O6" connection="2"/>
- <dia:connection handle="1" to="O2" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O11">
- <dia:attribute name="obj_pos">
- <dia:point val="16.7352,7.47209"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="16.7352,6.87709;22.5602,8.42459"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#sink compose
-selection (scaling)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="16.7352,7.47209"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#00ff00"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O12">
- <dia:attribute name="obj_pos">
- <dia:point val="19.1161,9.97825"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.0661,9.92825;24.1754,13.6009"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="19.1161,9.97825"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O13">
- <dia:attribute name="obj_pos">
- <dia:point val="27.1661,7.47209"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="27.1661,6.87709;30.0436,9.22459"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#source
-crop
-selection#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="27.1661,7.47209"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O14">
- <dia:attribute name="obj_pos">
- <dia:point val="34.575,7.8564"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.575,7.2614;38.8975,8.8089"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#source media
-bus format#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="34.575,7.8564"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O15">
- <dia:attribute name="obj_pos">
- <dia:point val="34.5244,11.2917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.4744,11.2417;39.5837,14.9143"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="34.5244,11.2917"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O16">
- <dia:attribute name="obj_pos">
- <dia:point val="34.5244,14.8643"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.062,13.4968;34.5785,14.9184"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.5244,14.8643"/>
- <dia:point val="19.1161,13.5509"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O15" connection="5"/>
- <dia:connection handle="1" to="O12" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O17">
- <dia:attribute name="obj_pos">
- <dia:point val="34.5244,11.2917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.062,9.92418;34.5785,11.3458"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.5244,11.2917"/>
- <dia:point val="19.1161,9.97825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O15" connection="0"/>
- <dia:connection handle="1" to="O12" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O18">
- <dia:attribute name="obj_pos">
- <dia:point val="39.5337,14.8643"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.0713,13.4968;39.5878,14.9184"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.5337,14.8643"/>
- <dia:point val="24.1254,13.5509"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O15" connection="7"/>
- <dia:connection handle="1" to="O12" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O19">
- <dia:attribute name="obj_pos">
- <dia:point val="39.5337,11.2917"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.0713,9.92418;39.5878,11.3458"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.5337,11.2917"/>
- <dia:point val="24.1254,9.97825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O15" connection="2"/>
- <dia:connection handle="1" to="O12" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O20">
- <dia:attribute name="obj_pos">
- <dia:point val="39.98,12.0742"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="39.93,12.0242;40.88,12.9742"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="39.98,12.0742"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O21">
- <dia:attribute name="obj_pos">
- <dia:point val="40.83,12.4992"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="40.78,12.1388;49.2453,12.8624"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="40.83,12.4992"/>
- <dia:point val="49.1335,12.5007"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O20" connection="3"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O22">
- <dia:attribute name="obj_pos">
- <dia:point val="42.0954,11.99"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="42.0954,11.395;46.7404,12.1425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 1 (source)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="42.0954,11.99"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O23">
- <dia:attribute name="obj_pos">
- <dia:point val="-1.44491,11.6506"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-1.49491,11.6006;-0.54491,12.5506"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="-1.44491,11.6506"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O24">
- <dia:attribute name="obj_pos">
- <dia:point val="-9.61991,12.09"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-9.67,11.7149;-1.33311,12.4385"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="-9.61991,12.09"/>
- <dia:point val="-1.44491,12.0756"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O23" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O25">
- <dia:attribute name="obj_pos">
- <dia:point val="-7.39291,11.49"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="-7.39291,10.895;-3.58791,11.6425"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 0 (sink)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="-7.39291,11.49"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O26">
- <dia:attribute name="obj_pos">
- <dia:point val="19.4911,13.8333"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.4411,13.7833;24.5504,17.4559"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="19.4911,13.8333"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="false"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Box" version="0" id="O27">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4994,17.2967"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="34.4494,17.2467;39.5587,20.9193"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="34.4994,17.2967"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="5.009308462554376"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="3.5726155970598077"/>
- </dia:attribute>
- <dia:attribute name="border_width">
- <dia:real val="0.10000000149011612"/>
- </dia:attribute>
- <dia:attribute name="border_color">
- <dia:color val="#8b6914"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O28">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4994,20.8693"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.4311,17.3459;34.5594,20.9293"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.4994,20.8693"/>
- <dia:point val="19.4911,17.4059"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O27" connection="5"/>
- <dia:connection handle="1" to="O26" connection="5"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O29">
- <dia:attribute name="obj_pos">
- <dia:point val="34.4994,17.2967"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="19.4311,13.7733;34.5594,17.3567"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="34.4994,17.2967"/>
- <dia:point val="19.4911,13.8333"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O27" connection="0"/>
- <dia:connection handle="1" to="O26" connection="0"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O30">
- <dia:attribute name="obj_pos">
- <dia:point val="39.5087,20.8693"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.4404,17.3459;39.5687,20.9293"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.5087,20.8693"/>
- <dia:point val="24.5004,17.4059"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O27" connection="7"/>
- <dia:connection handle="1" to="O26" connection="7"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O31">
- <dia:attribute name="obj_pos">
- <dia:point val="39.5087,17.2967"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.4404,13.7733;39.5687,17.3567"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="39.5087,17.2967"/>
- <dia:point val="24.5004,13.8333"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#e60505"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="4"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O27" connection="2"/>
- <dia:connection handle="1" to="O26" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Geometric - Perfect Circle" version="1" id="O32">
- <dia:attribute name="obj_pos">
- <dia:point val="39.855,18.7792"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="39.805,18.7292;40.755,19.6792"/>
- </dia:attribute>
- <dia:attribute name="meta">
- <dia:composite type="dict"/>
- </dia:attribute>
- <dia:attribute name="elem_corner">
- <dia:point val="39.855,18.7792"/>
- </dia:attribute>
- <dia:attribute name="elem_width">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="elem_height">
- <dia:real val="0.84999999999999787"/>
- </dia:attribute>
- <dia:attribute name="line_width">
- <dia:real val="0.10000000000000001"/>
- </dia:attribute>
- <dia:attribute name="line_colour">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="fill_colour">
- <dia:color val="#ffffff"/>
- </dia:attribute>
- <dia:attribute name="show_background">
- <dia:boolean val="true"/>
- </dia:attribute>
- <dia:attribute name="line_style">
- <dia:enum val="0"/>
- <dia:real val="1"/>
- </dia:attribute>
- <dia:attribute name="flip_horizontal">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="flip_vertical">
- <dia:boolean val="false"/>
- </dia:attribute>
- <dia:attribute name="subscale">
- <dia:real val="1"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O33">
- <dia:attribute name="obj_pos">
- <dia:point val="40.705,19.2042"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="40.655,18.8438;49.1203,19.5674"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="40.705,19.2042"/>
- <dia:point val="49.0085,19.2057"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="0" to="O32" connection="3"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Text" version="1" id="O34">
- <dia:attribute name="obj_pos">
- <dia:point val="41.9704,18.695"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="41.9704,18.1;46.6154,18.8475"/>
- </dia:attribute>
- <dia:attribute name="text">
- <dia:composite type="text">
- <dia:attribute name="string">
- <dia:string>#pad 2 (source)#</dia:string>
- </dia:attribute>
- <dia:attribute name="font">
- <dia:font family="sans" style="0" name="Helvetica"/>
- </dia:attribute>
- <dia:attribute name="height">
- <dia:real val="0.80000000000000004"/>
- </dia:attribute>
- <dia:attribute name="pos">
- <dia:point val="41.9704,18.695"/>
- </dia:attribute>
- <dia:attribute name="color">
- <dia:color val="#000000"/>
- </dia:attribute>
- <dia:attribute name="alignment">
- <dia:enum val="0"/>
- </dia:attribute>
- </dia:composite>
- </dia:attribute>
- <dia:attribute name="valign">
- <dia:enum val="3"/>
- </dia:attribute>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O35">
- <dia:attribute name="obj_pos">
- <dia:point val="27.3,9.55"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.0146,9.49376;27.3562,10.255"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="27.3,9.55"/>
- <dia:point val="24.1254,9.97825"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O12" connection="2"/>
- </dia:connections>
- </dia:object>
- <dia:object type="Standard - Line" version="0" id="O36">
- <dia:attribute name="obj_pos">
- <dia:point val="27.3454,9.53624"/>
- </dia:attribute>
- <dia:attribute name="obj_bb">
- <dia:rectangle val="24.4311,9.46695;27.4147,13.9265"/>
- </dia:attribute>
- <dia:attribute name="conn_endpoints">
- <dia:point val="27.3454,9.53624"/>
- <dia:point val="24.5004,13.8333"/>
- </dia:attribute>
- <dia:attribute name="numcp">
- <dia:int val="1"/>
- </dia:attribute>
- <dia:attribute name="line_color">
- <dia:color val="#a020f0"/>
- </dia:attribute>
- <dia:attribute name="end_arrow">
- <dia:enum val="22"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_length">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:attribute name="end_arrow_width">
- <dia:real val="0.5"/>
- </dia:attribute>
- <dia:connections>
- <dia:connection handle="1" to="O26" connection="2"/>
- </dia:connections>
- </dia:object>
- </dia:layer>
-</dia:diagram>
diff --git a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg
deleted file mode 100644
index 2340c0f8bc92..000000000000
--- a/Documentation/DocBook/media/v4l/subdev-image-processing-scaling-multi-source.svg
+++ /dev/null
@@ -1,116 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/PR-SVG-20010719/DTD/svg10.dtd">
-<svg width="59cm" height="17cm" viewBox="-194 128 1179 330" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x="-8" y="130" width="806" height="327"/>
- <g>
- <rect style="fill: #ffffff" x="4.5" y="189" width="159" height="104"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a52a2a" x="4.5" y="189" width="159" height="104"/>
- </g>
- <g>
- <rect style="fill: #ffffff" x="49.5" y="204" width="94" height="77"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #0000ff" x="49.5" y="204" width="94" height="77"/>
- </g>
- <text style="fill: #0000ff;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="60" y="224">
- <tspan x="60" y="224">sink</tspan>
- <tspan x="60" y="240">crop</tspan>
- <tspan x="60" y="256">selection</tspan>
- </text>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="29.5" y="158">
- <tspan x="29.5" y="158"></tspan>
- </text>
- <text style="fill: #a52a2a;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="8.53836" y="157.914">
- <tspan x="8.53836" y="157.914">sink media</tspan>
- <tspan x="8.53836" y="173.914">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="333.644" y="185.65" width="165.2" height="172.478"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #00ff00" x="333.644" y="185.65" width="165.2" height="172.478"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="358.128" x2="49.5" y2="281"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="333.644" y1="185.65" x2="49.5" y2="204"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="358.128" x2="143.5" y2="281"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="498.844" y1="185.65" x2="143.5" y2="204"/>
- <text style="fill: #00ff00;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="334.704" y="149.442">
- <tspan x="334.704" y="149.442">sink compose</tspan>
- <tspan x="334.704" y="165.442">selection (scaling)</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="382.322" y="199.565" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="382.322" y="199.565" width="100.186" height="71.4523"/>
- </g>
- <text style="fill: #a020f0;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="543.322" y="149.442">
- <tspan x="543.322" y="149.442">source</tspan>
- <tspan x="543.322" y="165.442">crop</tspan>
- <tspan x="543.322" y="181.442">selection</tspan>
- </text>
- <text style="fill: #8b6914;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="691.5" y="157.128">
- <tspan x="691.5" y="157.128">source media</tspan>
- <tspan x="691.5" y="173.128">bus format</tspan>
- </text>
- <g>
- <rect style="fill: #ffffff" x="690.488" y="225.834" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="690.488" y="225.834" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="297.286" x2="382.322" y2="271.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="690.488" y1="225.834" x2="382.322" y2="199.565"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="297.286" x2="482.508" y2="271.018"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.674" y1="225.834" x2="482.508" y2="199.565"/>
- <g>
- <ellipse style="fill: #ffffff" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="808.1" cy="249.984" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="816.6" y1="249.984" x2="972.934" y2="250.012"/>
- <polygon style="fill: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="841.908" y="239.8">
- <tspan x="841.908" y="239.8">pad 1 (source)</tspan>
- </text>
- <g>
- <ellipse style="fill: #ffffff" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="-20.3982" cy="241.512" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="-192.398" y1="241.8" x2="-38.6343" y2="241.529"/>
- <polygon style="fill: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="-147.858" y="229.8">
- <tspan x="-147.858" y="229.8">pad 0 (sink)</tspan>
- </text>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x="389.822" y="276.666" width="100.186" height="71.4523"/>
- <g>
- <rect style="fill: #ffffff" x="689.988" y="345.934" width="100.186" height="71.4523"/>
- <rect style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #8b6914" x="689.988" y="345.934" width="100.186" height="71.4523"/>
- </g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="417.386" x2="389.822" y2="348.118"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="689.988" y1="345.934" x2="389.822" y2="276.666"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="417.386" x2="490.008" y2="348.118"/>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke-dasharray: 4; stroke: #e60505" x1="790.174" y1="345.934" x2="490.008" y2="276.666"/>
- <g>
- <ellipse style="fill: #ffffff" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- <ellipse style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" cx="805.6" cy="384.084" rx="8.5" ry="8.5"/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" x1="814.1" y1="384.084" x2="970.434" y2="384.112"/>
- <polygon style="fill: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #000000" points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "/>
- </g>
- <text style="fill: #000000;text-anchor:start;font-size:12.8;font-family:sanserif;font-style:normal;font-weight:normal" x="839.408" y="373.9">
- <tspan x="839.408" y="373.9">pad 2 (source)</tspan>
- </text>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546" y1="191" x2="492.157" y2="198.263"/>
- <polygon style="fill: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "/>
- </g>
- <g>
- <line style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" x1="546.908" y1="190.725" x2="495.383" y2="268.548"/>
- <polygon style="fill: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
- <polygon style="fill: none; fill-opacity:0; stroke-width: 2; stroke: #a020f0" points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "/>
- </g>
-</svg>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
deleted file mode 100644
index 42e626d6c936..000000000000
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ /dev/null
@@ -1,728 +0,0 @@
- <partinfo>
- <authorgroup>
- <author>
- <firstname>Michael</firstname>
- <surname>Schimek</surname>
- <othername role="mi">H</othername>
- <affiliation>
- <address>
- <email>mschimek@gmx.at</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Bill</firstname>
- <surname>Dirks</surname>
- <!-- Commented until Bill opts in to be spammed.
- <affiliation>
- <address>
- <email>bill@thedirks.org</email>
- </address>
- </affiliation> -->
- <contrib>Original author of the V4L2 API and
-documentation.</contrib>
- </author>
-
- <author>
- <firstname>Hans</firstname>
- <surname>Verkuil</surname>
- <contrib>Designed and documented the VIDIOC_LOG_STATUS ioctl,
-the extended control ioctls, major parts of the sliced VBI API, the
-MPEG encoder and decoder APIs and the DV Timings API.</contrib>
- <affiliation>
- <address>
- <email>hverkuil@xs4all.nl</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Martin</firstname>
- <surname>Rubli</surname>
- <!--
- <affiliation>
- <address>
- <email>martin_rubli@logitech.com</email>
- </address>
- </affiliation> -->
- <contrib>Designed and documented the VIDIOC_ENUM_FRAMESIZES
-and VIDIOC_ENUM_FRAMEINTERVALS ioctls.</contrib>
- </author>
-
- <author>
- <firstname>Andy</firstname>
- <surname>Walls</surname>
- <contrib>Documented the fielded V4L2_MPEG_STREAM_VBI_FMT_IVTV
-MPEG stream embedded, sliced VBI data format in this specification.
-</contrib>
- <affiliation>
- <address>
- <email>awalls@md.metrocast.net</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Mauro</firstname>
- <surname>Carvalho Chehab</surname>
- <contrib>Documented libv4l, designed and added v4l2grab example,
-Remote Controller chapter.</contrib>
- <affiliation>
- <address>
- <email>m.chehab@samsung.com</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Muralidharan</firstname>
- <surname>Karicheri</surname>
- <contrib>Documented the Digital Video timings API.</contrib>
- <affiliation>
- <address>
- <email>m-karicheri2@ti.com</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Pawel</firstname>
- <surname>Osciak</surname>
- <contrib>Designed and documented the multi-planar API.</contrib>
- <affiliation>
- <address>
- <email>pawel AT osciak.com</email>
- </address>
- </affiliation>
- </author>
-
- <author>
- <firstname>Sakari</firstname>
- <surname>Ailus</surname>
- <contrib>Subdev selections API.</contrib>
- <affiliation>
- <address>
- <email>sakari.ailus@iki.fi</email>
- </address>
- </affiliation>
- </author>
- <author>
- <firstname>Antti</firstname>
- <surname>Palosaari</surname>
- <contrib>SDR API.</contrib>
- <affiliation>
- <address>
- <email>crope@iki.fi</email>
- </address>
- </affiliation>
- </author>
- </authorgroup>
-
- <copyright>
- <year>1999</year>
- <year>2000</year>
- <year>2001</year>
- <year>2002</year>
- <year>2003</year>
- <year>2004</year>
- <year>2005</year>
- <year>2006</year>
- <year>2007</year>
- <year>2008</year>
- <year>2009</year>
- <year>2010</year>
- <year>2011</year>
- <year>2012</year>
- <year>2013</year>
- <year>2014</year>
- <year>2015</year>
- <holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
-Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab,
- Pawel Osciak</holder>
- </copyright>
- <legalnotice>
- <para>Except when explicitly stated as GPL, programming examples within
- this part can be used and distributed without restrictions.</para>
- </legalnotice>
- <revhistory>
- <!-- Put document revisions here, newest first. -->
- <!-- API revisions (changes and additions of defines, enums,
-structs, ioctls) must be noted in more detail in the history chapter
-(compat.xml), along with the possible impact on existing drivers and
-applications. -->
- <revision>
- <revnumber>4.5</revnumber>
- <date>2015-10-29</date>
- <authorinitials>rr</authorinitials>
- <revremark>Extend vidioc-g-ext-ctrls;. Replace ctrl_class with a new
-union with ctrl_class and which. Which is used to select the current value of
-the control or the default value.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>4.4</revnumber>
- <date>2015-05-26</date>
- <authorinitials>ap</authorinitials>
- <revremark>Renamed V4L2_TUNER_ADC to V4L2_TUNER_SDR.
-Added V4L2_CID_RF_TUNER_RF_GAIN control.
-Added transmitter support for Software Defined Radio (SDR) Interface.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>4.1</revnumber>
- <date>2015-02-13</date>
- <authorinitials>mcc</authorinitials>
- <revremark>Fix documentation for media controller device nodes and add support for DVB device nodes.
-Add support for Tuner sub-device.
- </revremark>
- </revision>
- <revision>
- <revnumber>3.19</revnumber>
- <date>2014-12-05</date>
- <authorinitials>hv</authorinitials>
- <revremark>Rewrote Colorspace chapter, added new &v4l2-ycbcr-encoding; and &v4l2-quantization; fields
-to &v4l2-pix-format;, &v4l2-pix-format-mplane; and &v4l2-mbus-framefmt;.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.17</revnumber>
- <date>2014-08-04</date>
- <authorinitials>lp, hv</authorinitials>
- <revremark>Extended &v4l2-pix-format;. Added format flags. Added compound control types
-and VIDIOC_QUERY_EXT_CTRL.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.15</revnumber>
- <date>2014-02-03</date>
- <authorinitials>hv, ap</authorinitials>
- <revremark>Update several sections of "Common API Elements": "Opening and Closing Devices"
-"Querying Capabilities", "Application Priority", "Video Inputs and Outputs", "Audio Inputs and Outputs"
-"Tuners and Modulators", "Video Standards" and "Digital Video (DV) Timings". Added SDR API.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.14</revnumber>
- <date>2013-11-25</date>
- <authorinitials>rr</authorinitials>
- <revremark>Set width and height as unsigned on v4l2_rect.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.11</revnumber>
- <date>2013-05-26</date>
- <authorinitials>hv</authorinitials>
- <revremark>Remove obsolete VIDIOC_DBG_G_CHIP_IDENT ioctl.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.10</revnumber>
- <date>2013-03-25</date>
- <authorinitials>hv</authorinitials>
- <revremark>Remove obsolete and unused DV_PRESET ioctls:
- VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET, VIDIOC_QUERY_DV_PRESET and
- VIDIOC_ENUM_DV_PRESET. Remove the related v4l2_input/output capability
- flags V4L2_IN_CAP_PRESETS and V4L2_OUT_CAP_PRESETS. Added VIDIOC_DBG_G_CHIP_INFO.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.9</revnumber>
- <date>2012-12-03</date>
- <authorinitials>sa, sn</authorinitials>
- <revremark>Added timestamp types to v4l2_buffer.
- Added V4L2_EVENT_CTRL_CH_RANGE control event changes flag.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.6</revnumber>
- <date>2012-07-02</date>
- <authorinitials>hv</authorinitials>
- <revremark>Added VIDIOC_ENUM_FREQ_BANDS.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.5</revnumber>
- <date>2012-05-07</date>
- <authorinitials>sa, sn, hv</authorinitials>
- <revremark>Added V4L2_CTRL_TYPE_INTEGER_MENU and V4L2 subdev
- selections API. Improved the description of V4L2_CID_COLORFX
- control, added V4L2_CID_COLORFX_CBCR control.
- Added camera controls V4L2_CID_AUTO_EXPOSURE_BIAS,
- V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE, V4L2_CID_IMAGE_STABILIZATION,
- V4L2_CID_ISO_SENSITIVITY, V4L2_CID_ISO_SENSITIVITY_AUTO,
- V4L2_CID_EXPOSURE_METERING, V4L2_CID_SCENE_MODE,
- V4L2_CID_3A_LOCK, V4L2_CID_AUTO_FOCUS_START,
- V4L2_CID_AUTO_FOCUS_STOP, V4L2_CID_AUTO_FOCUS_STATUS
- and V4L2_CID_AUTO_FOCUS_RANGE.
- Added VIDIOC_ENUM_DV_TIMINGS, VIDIOC_QUERY_DV_TIMINGS and
- VIDIOC_DV_TIMINGS_CAP.
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.4</revnumber>
- <date>2012-01-25</date>
- <authorinitials>sn</authorinitials>
- <revremark>Added <link linkend="jpeg-controls">JPEG compression
- control class.</link>
- </revremark>
- </revision>
-
- <revision>
- <revnumber>3.3</revnumber>
- <date>2012-01-11</date>
- <authorinitials>hv</authorinitials>
- <revremark>Added device_caps field to struct v4l2_capabilities.</revremark>
- </revision>
-
- <revision>
- <revnumber>3.2</revnumber>
- <date>2011-08-26</date>
- <authorinitials>hv</authorinitials>
- <revremark>Added V4L2_CTRL_FLAG_VOLATILE.</revremark>
- </revision>
-
- <revision>
- <revnumber>3.1</revnumber>
- <date>2011-06-27</date>
- <authorinitials>mcc, po, hv</authorinitials>
- <revremark>Documented that VIDIOC_QUERYCAP now returns a per-subsystem version instead of a per-driver one.
- Standardize an error code for invalid ioctl.
- Added V4L2_CTRL_TYPE_BITMASK.</revremark>
- </revision>
-
- <revision>
- <revnumber>2.6.39</revnumber>
- <date>2011-03-01</date>
- <authorinitials>mcc, po</authorinitials>
- <revremark>Removed VIDIOC_*_OLD from videodev2.h header and update it to reflect latest changes. Added the <link linkend="planar-apis">multi-planar API</link>.</revremark>
- </revision>
-
- <revision>
- <revnumber>2.6.37</revnumber>
- <date>2010-08-06</date>
- <authorinitials>hv</authorinitials>
- <revremark>Removed obsolete vtx (videotext) API.</revremark>
- </revision>
-
- <revision>
- <revnumber>2.6.33</revnumber>
- <date>2009-12-03</date>
- <authorinitials>mk</authorinitials>
- <revremark>Added documentation for the Digital Video timings API.</revremark>
- </revision>
-
- <revision>
- <revnumber>2.6.32</revnumber>
- <date>2009-08-31</date>
- <authorinitials>mcc</authorinitials>
- <revremark>Now, revisions will match the kernel version where
-the V4L2 API changes will be used by the Linux Kernel.
-Also added Remote Controller chapter.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.29</revnumber>
- <date>2009-08-26</date>
- <authorinitials>ev</authorinitials>
- <revremark>Added documentation for string controls and for FM Transmitter controls.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.28</revnumber>
- <date>2009-08-26</date>
- <authorinitials>gl</authorinitials>
- <revremark>Added V4L2_CID_BAND_STOP_FILTER documentation.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.27</revnumber>
- <date>2009-08-15</date>
- <authorinitials>mcc</authorinitials>
- <revremark>Added libv4l and Remote Controller documentation;
-added v4l2grab and keytable application examples.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.26</revnumber>
- <date>2009-07-23</date>
- <authorinitials>hv</authorinitials>
- <revremark>Finalized the RDS capture API. Added modulator and RDS encoder
-capabilities. Added support for string controls.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.25</revnumber>
- <date>2009-01-18</date>
- <authorinitials>hv</authorinitials>
- <revremark>Added pixel formats VYUY, NV16 and NV61, and changed
-the debug ioctls VIDIOC_DBG_G/S_REGISTER and VIDIOC_DBG_G_CHIP_IDENT.
-Added camera controls V4L2_CID_ZOOM_ABSOLUTE, V4L2_CID_ZOOM_RELATIVE,
-V4L2_CID_ZOOM_CONTINUOUS and V4L2_CID_PRIVACY.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.24</revnumber>
- <date>2008-03-04</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Added pixel formats Y16 and SBGGR16, new controls
-and a camera controls class. Removed VIDIOC_G/S_MPEGCOMP.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.23</revnumber>
- <date>2007-08-30</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Fixed a typo in VIDIOC_DBG_G/S_REGISTER.
-Clarified the byte order of packed pixel formats.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.22</revnumber>
- <date>2007-08-29</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Added the Video Output Overlay interface, new MPEG
-controls, V4L2_FIELD_INTERLACED_TB and V4L2_FIELD_INTERLACED_BT,
-VIDIOC_DBG_G/S_REGISTER, VIDIOC_(TRY_)ENCODER_CMD,
-VIDIOC_G_CHIP_IDENT, VIDIOC_G_ENC_INDEX, new pixel formats.
-Clarifications in the cropping chapter, about RGB pixel formats, the
-mmap(), poll(), select(), read() and write() functions. Typographical
-fixes.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.21</revnumber>
- <date>2006-12-19</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Fixed a link in the VIDIOC_G_EXT_CTRLS section.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.20</revnumber>
- <date>2006-11-24</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Clarified the purpose of the audioset field in
-struct v4l2_input and v4l2_output.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.19</revnumber>
- <date>2006-10-19</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Documented V4L2_PIX_FMT_RGB444.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.18</revnumber>
- <date>2006-10-18</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Added the description of extended controls by Hans
-Verkuil. Linked V4L2_PIX_FMT_MPEG to V4L2_CID_MPEG_STREAM_TYPE.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.17</revnumber>
- <date>2006-10-12</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Corrected V4L2_PIX_FMT_HM12 description.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.16</revnumber>
- <date>2006-10-08</date>
- <authorinitials>mhs</authorinitials>
- <revremark>VIDIOC_ENUM_FRAMESIZES and
-VIDIOC_ENUM_FRAMEINTERVALS are now part of the API.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.15</revnumber>
- <date>2006-09-23</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Cleaned up the bibliography, added BT.653 and
-BT.1119. capture.c/start_capturing() for user pointer I/O did not
-initialize the buffer index. Documented the V4L MPEG and MJPEG
-VID_TYPEs and V4L2_PIX_FMT_SBGGR8. Updated the list of reserved pixel
-formats. See the history chapter for API changes.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.14</revnumber>
- <date>2006-09-14</date>
- <authorinitials>mr</authorinitials>
- <revremark>Added VIDIOC_ENUM_FRAMESIZES and
-VIDIOC_ENUM_FRAMEINTERVALS proposal for frame format enumeration of
-digital devices.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.13</revnumber>
- <date>2006-04-07</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Corrected the description of struct v4l2_window
-clips. New V4L2_STD_ and V4L2_TUNER_MODE_LANG1_LANG2
-defines.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.12</revnumber>
- <date>2006-02-03</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Corrected the description of struct
-v4l2_captureparm and v4l2_outputparm.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.11</revnumber>
- <date>2006-01-27</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Improved the description of struct
-v4l2_tuner.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.10</revnumber>
- <date>2006-01-10</date>
- <authorinitials>mhs</authorinitials>
- <revremark>VIDIOC_G_INPUT and VIDIOC_S_PARM
-clarifications.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.9</revnumber>
- <date>2005-11-27</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Improved the 525 line numbering diagram. Hans
-Verkuil and I rewrote the sliced VBI section. He also contributed a
-VIDIOC_LOG_STATUS page. Fixed VIDIOC_S_STD call in the video standard
-selection example. Various updates.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.8</revnumber>
- <date>2004-10-04</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Somehow a piece of junk slipped into the capture
-example, removed.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.7</revnumber>
- <date>2004-09-19</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Fixed video standard selection, control
-enumeration, downscaling and aspect example. Added read and user
-pointer i/o to video capture example.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.6</revnumber>
- <date>2004-08-01</date>
- <authorinitials>mhs</authorinitials>
- <revremark>v4l2_buffer changes, added video capture example,
-various corrections.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.5</revnumber>
- <date>2003-11-05</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Pixel format erratum.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.4</revnumber>
- <date>2003-09-17</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Corrected source and Makefile to generate a PDF.
-SGML fixes. Added latest API changes. Closed gaps in the history
-chapter.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.3</revnumber>
- <date>2003-02-05</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Another draft, more corrections.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.2</revnumber>
- <date>2003-01-15</date>
- <authorinitials>mhs</authorinitials>
- <revremark>Second draft, with corrections pointed out by Gerd
-Knorr.</revremark>
- </revision>
-
- <revision>
- <revnumber>0.1</revnumber>
- <date>2002-12-01</date>
- <authorinitials>mhs</authorinitials>
- <revremark>First draft, based on documentation by Bill Dirks
-and discussions on the V4L mailing list.</revremark>
- </revision>
- </revhistory>
-</partinfo>
-
-<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 4.4</subtitle>
-
- <chapter id="common">
- &sub-common;
- </chapter>
-
- <chapter id="pixfmt">
- &sub-pixfmt;
- </chapter>
-
- <chapter id="io">
- &sub-io;
- </chapter>
-
- <chapter id="devices">
- <title>Interfaces</title>
-
- <section id="capture"> &sub-dev-capture; </section>
- <section id="overlay"> &sub-dev-overlay; </section>
- <section id="output"> &sub-dev-output; </section>
- <section id="osd"> &sub-dev-osd; </section>
- <section id="codec"> &sub-dev-codec; </section>
- <section id="effect"> &sub-dev-effect; </section>
- <section id="raw-vbi"> &sub-dev-raw-vbi; </section>
- <section id="sliced"> &sub-dev-sliced-vbi; </section>
- <section id="ttx"> &sub-dev-teletext; </section>
- <section id="radio"> &sub-dev-radio; </section>
- <section id="rds"> &sub-dev-rds; </section>
- <section id="sdr"> &sub-dev-sdr; </section>
- <section id="event"> &sub-dev-event; </section>
- <section id="subdev"> &sub-dev-subdev; </section>
- </chapter>
-
- <chapter id="driver">
- &sub-driver;
- </chapter>
-
- <chapter id="libv4l">
- &sub-libv4l;
- </chapter>
-
- <chapter id="compat">
- &sub-compat;
- </chapter>
-
- <appendix id="user-func">
- <title>Function Reference</title>
-
- <!-- Keep this alphabetically sorted. -->
-
- &sub-close;
- &sub-ioctl;
- <!-- All ioctls go here. -->
- &sub-create-bufs;
- &sub-cropcap;
- &sub-dbg-g-chip-info;
- &sub-dbg-g-register;
- &sub-decoder-cmd;
- &sub-dqevent;
- &sub-dv-timings-cap;
- &sub-encoder-cmd;
- &sub-enumaudio;
- &sub-enumaudioout;
- &sub-enum-dv-timings;
- &sub-enum-fmt;
- &sub-enum-framesizes;
- &sub-enum-frameintervals;
- &sub-enum-freq-bands;
- &sub-enuminput;
- &sub-enumoutput;
- &sub-enumstd;
- &sub-expbuf;
- &sub-g-audio;
- &sub-g-audioout;
- &sub-g-crop;
- &sub-g-ctrl;
- &sub-g-dv-timings;
- &sub-g-edid;
- &sub-g-enc-index;
- &sub-g-ext-ctrls;
- &sub-g-fbuf;
- &sub-g-fmt;
- &sub-g-frequency;
- &sub-g-input;
- &sub-g-jpegcomp;
- &sub-g-modulator;
- &sub-g-output;
- &sub-g-parm;
- &sub-g-priority;
- &sub-g-selection;
- &sub-g-sliced-vbi-cap;
- &sub-g-std;
- &sub-g-tuner;
- &sub-log-status;
- &sub-overlay;
- &sub-prepare-buf;
- &sub-qbuf;
- &sub-querybuf;
- &sub-querycap;
- &sub-queryctrl;
- &sub-query-dv-timings;
- &sub-querystd;
- &sub-reqbufs;
- &sub-s-hw-freq-seek;
- &sub-streamon;
- &sub-subdev-enum-frame-interval;
- &sub-subdev-enum-frame-size;
- &sub-subdev-enum-mbus-code;
- &sub-subdev-g-crop;
- &sub-subdev-g-fmt;
- &sub-subdev-g-frame-interval;
- &sub-subdev-g-selection;
- &sub-subscribe-event;
- <!-- End of ioctls. -->
- &sub-mmap;
- &sub-munmap;
- &sub-open;
- &sub-poll;
- &sub-read;
- &sub-select;
- &sub-write;
- </appendix>
-
- <appendix>
- <title>Common definitions for V4L2 and V4L2 subdev interfaces</title>
- &sub-selections-common;
- </appendix>
-
- <appendix id="videodev">
- <title>Video For Linux Two Header File</title>
- &sub-videodev2-h;
- </appendix>
-
- <appendix id="capture-example">
- <title>Video Capture Example</title>
- &sub-capture-c;
- </appendix>
-
- <appendix id="v4l2grab-example">
- <title>Video Grabber example using libv4l</title>
- <para>This program demonstrates how to grab V4L2 images in ppm format by
-using libv4l handlers. The advantage is that this grabber can potentially work
-with any V4L2 driver.</para>
- &sub-v4l2grab-c;
- </appendix>
-
- &sub-media-indices;
-
- &sub-biblio;
-
diff --git a/Documentation/DocBook/media/v4l/v4l2grab.c.xml b/Documentation/DocBook/media/v4l/v4l2grab.c.xml
deleted file mode 100644
index bed12e40be27..000000000000
--- a/Documentation/DocBook/media/v4l/v4l2grab.c.xml
+++ /dev/null
@@ -1,164 +0,0 @@
-<programlisting>
-/* V4L2 video picture grabber
- Copyright (C) 2009 Mauro Carvalho Chehab &lt;mchehab@infradead.org&gt;
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- */
-
-#include &lt;stdio.h&gt;
-#include &lt;stdlib.h&gt;
-#include &lt;string.h&gt;
-#include &lt;fcntl.h&gt;
-#include &lt;errno.h&gt;
-#include &lt;sys/ioctl.h&gt;
-#include &lt;sys/types.h&gt;
-#include &lt;sys/time.h&gt;
-#include &lt;sys/mman.h&gt;
-#include &lt;linux/videodev2.h&gt;
-#include "../libv4l/include/libv4l2.h"
-
-#define CLEAR(x) memset(&amp;(x), 0, sizeof(x))
-
-struct buffer {
- void *start;
- size_t length;
-};
-
-static void xioctl(int fh, int request, void *arg)
-{
- int r;
-
- do {
- r = v4l2_ioctl(fh, request, arg);
- } while (r == -1 &amp;&amp; ((errno == EINTR) || (errno == EAGAIN)));
-
- if (r == -1) {
- fprintf(stderr, "error %d, %s\n", errno, strerror(errno));
- exit(EXIT_FAILURE);
- }
-}
-
-int main(int argc, char **argv)
-{
- struct <link linkend="v4l2-format">v4l2_format</link> fmt;
- struct <link linkend="v4l2-buffer">v4l2_buffer</link> buf;
- struct <link linkend="v4l2-requestbuffers">v4l2_requestbuffers</link> req;
- enum <link linkend="v4l2-buf-type">v4l2_buf_type</link> type;
- fd_set fds;
- struct timeval tv;
- int r, fd = -1;
- unsigned int i, n_buffers;
- char *dev_name = "/dev/video0";
- char out_name[256];
- FILE *fout;
- struct buffer *buffers;
-
- fd = v4l2_open(dev_name, O_RDWR | O_NONBLOCK, 0);
- if (fd &lt; 0) {
- perror("Cannot open device");
- exit(EXIT_FAILURE);
- }
-
- CLEAR(fmt);
- fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fmt.fmt.pix.width = 640;
- fmt.fmt.pix.height = 480;
- fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
- fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
- xioctl(fd, VIDIOC_S_FMT, &amp;fmt);
- if (fmt.fmt.pix.pixelformat != V4L2_PIX_FMT_RGB24) {
- printf("Libv4l didn't accept RGB24 format. Can't proceed.\n");
- exit(EXIT_FAILURE);
- }
- if ((fmt.fmt.pix.width != 640) || (fmt.fmt.pix.height != 480))
- printf("Warning: driver is sending image at %dx%d\n",
- fmt.fmt.pix.width, fmt.fmt.pix.height);
-
- CLEAR(req);
- req.count = 2;
- req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- req.memory = V4L2_MEMORY_MMAP;
- xioctl(fd, VIDIOC_REQBUFS, &amp;req);
-
- buffers = calloc(req.count, sizeof(*buffers));
- for (n_buffers = 0; n_buffers &lt; req.count; ++n_buffers) {
- CLEAR(buf);
-
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = n_buffers;
-
- xioctl(fd, VIDIOC_QUERYBUF, &amp;buf);
-
- buffers[n_buffers].length = buf.length;
- buffers[n_buffers].start = v4l2_mmap(NULL, buf.length,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- fd, buf.m.offset);
-
- if (MAP_FAILED == buffers[n_buffers].start) {
- perror("mmap");
- exit(EXIT_FAILURE);
- }
- }
-
- for (i = 0; i &lt; n_buffers; ++i) {
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = i;
- xioctl(fd, VIDIOC_QBUF, &amp;buf);
- }
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- xioctl(fd, VIDIOC_STREAMON, &amp;type);
- for (i = 0; i &lt; 20; i++) {
- do {
- FD_ZERO(&amp;fds);
- FD_SET(fd, &amp;fds);
-
- /* Timeout. */
- tv.tv_sec = 2;
- tv.tv_usec = 0;
-
- r = select(fd + 1, &amp;fds, NULL, NULL, &amp;tv);
- } while ((r == -1 &amp;&amp; (errno = EINTR)));
- if (r == -1) {
- perror("select");
- return errno;
- }
-
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- xioctl(fd, VIDIOC_DQBUF, &amp;buf);
-
- sprintf(out_name, "out%03d.ppm", i);
- fout = fopen(out_name, "w");
- if (!fout) {
- perror("Cannot open image");
- exit(EXIT_FAILURE);
- }
- fprintf(fout, "P6\n%d %d 255\n",
- fmt.fmt.pix.width, fmt.fmt.pix.height);
- fwrite(buffers[buf.index].start, buf.bytesused, 1, fout);
- fclose(fout);
-
- xioctl(fd, VIDIOC_QBUF, &amp;buf);
- }
-
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- xioctl(fd, VIDIOC_STREAMOFF, &amp;type);
- for (i = 0; i &lt; n_buffers; ++i)
- v4l2_munmap(buffers[i].start, buffers[i].length);
- v4l2_close(fd);
-
- return 0;
-}
-</programlisting>
diff --git a/Documentation/DocBook/media/v4l/vbi_525.pdf b/Documentation/DocBook/media/v4l/vbi_525.pdf
deleted file mode 100644
index 9e72c25b208d..000000000000
--- a/Documentation/DocBook/media/v4l/vbi_525.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/vbi_625.pdf b/Documentation/DocBook/media/v4l/vbi_625.pdf
deleted file mode 100644
index 765235e33a4d..000000000000
--- a/Documentation/DocBook/media/v4l/vbi_625.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/vbi_hsync.pdf b/Documentation/DocBook/media/v4l/vbi_hsync.pdf
deleted file mode 100644
index 200b668189bf..000000000000
--- a/Documentation/DocBook/media/v4l/vbi_hsync.pdf
+++ /dev/null
Binary files differ
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
deleted file mode 100644
index 6528e97b8990..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ /dev/null
@@ -1,158 +0,0 @@
-<refentry id="vidioc-create-bufs">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_CREATE_BUFS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_CREATE_BUFS</refname>
- <refpurpose>Create buffers for Memory Mapped or User Pointer or DMA Buffer
- I/O</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_create_buffers *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_CREATE_BUFS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl is used to create buffers for <link linkend="mmap">memory
-mapped</link> or <link linkend="userp">user pointer</link> or <link
-linkend="dmabuf">DMA buffer</link> I/O. It can be used as an alternative or in
-addition to the &VIDIOC-REQBUFS; ioctl, when a tighter
-control over buffers is required. This ioctl can be called multiple times to
-create buffers of different sizes.</para>
-
- <para>To allocate the device buffers applications must initialize the
-relevant fields of the <structname>v4l2_create_buffers</structname> structure.
-The <structfield>count</structfield> field must be set to the number of
-requested buffers, the <structfield>memory</structfield> field specifies the
-requested I/O method and the <structfield>reserved</structfield> array must be
-zeroed.</para>
-
- <para>The <structfield>format</structfield> field specifies the image format
-that the buffers must be able to handle. The application has to fill in this
-&v4l2-format;. Usually this will be done using the &VIDIOC-TRY-FMT; or &VIDIOC-G-FMT; ioctls
-to ensure that the requested format is supported by the driver.
-Based on the format's <structfield>type</structfield> field the requested buffer
-size (for single-planar) or plane sizes (for multi-planar formats) will be
-used for the allocated buffers. The driver may return an error if the size(s)
-are not supported by the hardware (usually because they are too small).</para>
-
- <para>The buffers created by this ioctl will have as minimum size the size
-defined by the <structfield>format.pix.sizeimage</structfield> field (or the
-corresponding fields for other format types). Usually if the
-<structfield>format.pix.sizeimage</structfield> field is less than the minimum
-required for the given format, then an error will be returned since drivers will
-typically not allow this. If it is larger, then the value will be used as-is.
-In other words, the driver may reject the requested size, but if it is accepted
-the driver will use it unchanged.</para>
-
- <para>When the ioctl is called with a pointer to this structure the driver
-will attempt to allocate up to the requested number of buffers and store the
-actual number allocated and the starting index in the
-<structfield>count</structfield> and the <structfield>index</structfield> fields
-respectively. On return <structfield>count</structfield> can be smaller than
-the number requested.</para>
-
- <table pgwide="1" frame="none" id="v4l2-create-buffers">
- <title>struct <structname>v4l2_create_buffers</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>The starting buffer index, returned by the driver.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>count</structfield></entry>
- <entry>The number of buffers requested or granted. If count == 0, then
- <constant>VIDIOC_CREATE_BUFS</constant> will set <structfield>index</structfield>
- to the current number of created buffers, and it will check the validity of
- <structfield>memory</structfield> and <structfield>format.type</structfield>.
- If those are invalid -1 is returned and errno is set to &EINVAL;,
- otherwise <constant>VIDIOC_CREATE_BUFS</constant> returns 0. It will
- never set errno to &EBUSY; in this particular case.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>memory</structfield></entry>
- <entry>Applications set this field to
-<constant>V4L2_MEMORY_MMAP</constant>,
-<constant>V4L2_MEMORY_DMABUF</constant> or
-<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
-/></entry>
- </row>
- <row>
- <entry>&v4l2-format;</entry>
- <entry><structfield>format</structfield></entry>
- <entry>Filled in by the application, preserved by the driver.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>A place holder for future extensions. Drivers and applications
-must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>ENOMEM</errorcode></term>
- <listitem>
- <para>No memory to allocate buffers for <link linkend="mmap">memory
-mapped</link> I/O.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer type (<structfield>format.type</structfield> field),
-requested I/O method (<structfield>memory</structfield>) or format
-(<structfield>format</structfield> field) is not valid.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml b/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
deleted file mode 100644
index 50cb940cbe5c..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-cropcap.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-<refentry id="vidioc-cropcap">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_CROPCAP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_CROPCAP</refname>
- <refpurpose>Information about the video cropping and scaling abilities</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_cropcap
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_CROPCAP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Applications use this function to query the cropping
-limits, the pixel aspect of images and to calculate scale factors.
-They set the <structfield>type</structfield> field of a v4l2_cropcap
-structure to the respective buffer (stream) type and call the
-<constant>VIDIOC_CROPCAP</constant> ioctl with a pointer to this
-structure. Drivers fill the rest of the structure. The results are
-constant except when switching the video standard. Remember this
-switch can occur implicit when switching the video input or
-output.</para>
-
-<para>Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
-instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
-and use <constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>.</para>
-
- <para>This ioctl must be implemented for video capture or output devices that
-support cropping and/or scaling and/or have non-square pixels, and for overlay devices.</para>
-
- <table pgwide="1" frame="none" id="v4l2-cropcap">
- <title>struct <structname>v4l2_cropcap</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the data stream, set by the application.
-Only these types are valid here:
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> and
-<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>. See <xref linkend="v4l2-buf-type" />.</entry>
- </row>
- <row>
- <entry>struct <link linkend="v4l2-rect-crop">v4l2_rect</link></entry>
- <entry><structfield>bounds</structfield></entry>
- <entry>Defines the window within capturing or output is
-possible, this may exclude for example the horizontal and vertical
-blanking areas. The cropping rectangle cannot exceed these limits.
-Width and height are defined in pixels, the driver writer is free to
-choose origin and units of the coordinate system in the analog
-domain.</entry>
- </row>
- <row>
- <entry>struct <link linkend="v4l2-rect-crop">v4l2_rect</link></entry>
- <entry><structfield>defrect</structfield></entry>
- <entry>Default cropping rectangle, it shall cover the
-"whole picture". Assuming pixel aspect 1/1 this could be for example a
-640&nbsp;&times;&nbsp;480 rectangle for NTSC, a
-768&nbsp;&times;&nbsp;576 rectangle for PAL and SECAM centered over
-the active picture area. The same co-ordinate system as for
- <structfield>bounds</structfield> is used.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>pixelaspect</structfield></entry>
- <entry><para>This is the pixel aspect (y / x) when no
-scaling is applied, the ratio of the actual sampling
-frequency and the frequency required to get square
-pixels.</para><para>When cropping coordinates refer to square pixels,
-the driver sets <structfield>pixelaspect</structfield> to 1/1. Other
-common values are 54/59 for PAL and SECAM, 11/10 for NTSC sampled
-according to [<xref linkend="itu601" />].</para></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- NB this table is duplicated in the overlay chapter. -->
-
- <table pgwide="1" frame="none" id="v4l2-rect-crop">
- <title>struct <structname>v4l2_rect</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__s32</entry>
- <entry><structfield>left</structfield></entry>
- <entry>Horizontal offset of the top, left corner of the
-rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>top</structfield></entry>
- <entry>Vertical offset of the top, left corner of the
-rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Width of the rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Height of the rectangle, in pixels.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-cropcap; <structfield>type</structfield> is
-invalid.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml b/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml
deleted file mode 100644
index f14a3bb1afaa..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-dbg-g-chip-info.xml
+++ /dev/null
@@ -1,207 +0,0 @@
-<refentry id="vidioc-dbg-g-chip-info">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_DBG_G_CHIP_INFO</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_DBG_G_CHIP_INFO</refname>
- <refpurpose>Identify the chips on a TV card</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_dbg_chip_info
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_DBG_G_CHIP_INFO</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <note>
- <title>Experimental</title>
-
- <para>This is an <link
-linkend="experimental">experimental</link> interface and may change in
-the future.</para>
- </note>
-
- <para>For driver debugging purposes this ioctl allows test
-applications to query the driver about the chips present on the TV
-card. Regular applications must not use it. When you found a chip
-specific bug, please contact the linux-media mailing list (&v4l-ml;)
-so it can be fixed.</para>
-
- <para>Additionally the Linux kernel must be compiled with the
-<constant>CONFIG_VIDEO_ADV_DEBUG</constant> option to enable this ioctl.</para>
-
- <para>To query the driver applications must initialize the
-<structfield>match.type</structfield> and
-<structfield>match.addr</structfield> or <structfield>match.name</structfield>
-fields of a &v4l2-dbg-chip-info;
-and call <constant>VIDIOC_DBG_G_CHIP_INFO</constant> with a pointer to
-this structure. On success the driver stores information about the
-selected chip in the <structfield>name</structfield> and
-<structfield>flags</structfield> fields.</para>
-
- <para>When <structfield>match.type</structfield> is
-<constant>V4L2_CHIP_MATCH_BRIDGE</constant>,
-<structfield>match.addr</structfield> selects the nth bridge 'chip'
-on the TV card. You can enumerate all chips by starting at zero and
-incrementing <structfield>match.addr</structfield> by one until
-<constant>VIDIOC_DBG_G_CHIP_INFO</constant> fails with an &EINVAL;.
-The number zero always selects the bridge chip itself, &eg; the chip
-connected to the PCI or USB bus. Non-zero numbers identify specific
-parts of the bridge chip such as an AC97 register block.</para>
-
- <para>When <structfield>match.type</structfield> is
-<constant>V4L2_CHIP_MATCH_SUBDEV</constant>,
-<structfield>match.addr</structfield> selects the nth sub-device. This
-allows you to enumerate over all sub-devices.</para>
-
- <para>On success, the <structfield>name</structfield> field will
-contain a chip name and the <structfield>flags</structfield> field will
-contain <constant>V4L2_CHIP_FL_READABLE</constant> if the driver supports
-reading registers from the device or <constant>V4L2_CHIP_FL_WRITABLE</constant>
-if the driver supports writing registers to the device.</para>
-
- <para>We recommended the <application>v4l2-dbg</application>
-utility over calling this ioctl directly. It is available from the
-LinuxTV v4l-dvb repository; see <ulink
-url="https://linuxtv.org/repo/">https://linuxtv.org/repo/</ulink> for
-access instructions.</para>
-
- <!-- Note for convenience vidioc-dbg-g-register.sgml
- contains a duplicate of this table. -->
- <table pgwide="1" frame="none" id="name-v4l2-dbg-match">
- <title>struct <structname>v4l2_dbg_match</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>See <xref linkend="name-chip-match-types" /> for a list of
-possible types.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>addr</structfield></entry>
- <entry>Match a chip by this number, interpreted according
-to the <structfield>type</structfield> field.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>char</entry>
- <entry><structfield>name[32]</structfield></entry>
- <entry>Match a chip by this name, interpreted according
-to the <structfield>type</structfield> field. Currently unused.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-dbg-chip-info">
- <title>struct <structname>v4l2_dbg_chip_info</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>struct v4l2_dbg_match</entry>
- <entry><structfield>match</structfield></entry>
- <entry>How to match the chip, see <xref linkend="name-v4l2-dbg-match" />.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>name[32]</structfield></entry>
- <entry>The name of the chip.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Set by the driver. If <constant>V4L2_CHIP_FL_READABLE</constant>
-is set, then the driver supports reading registers from the device. If
-<constant>V4L2_CHIP_FL_WRITABLE</constant> is set, then it supports writing registers.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[8]</structfield></entry>
- <entry>Reserved fields, both application and driver must set these to 0.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- Note for convenience vidioc-dbg-g-register.sgml
- contains a duplicate of this table. -->
- <table pgwide="1" frame="none" id="name-chip-match-types">
- <title>Chip Match Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CHIP_MATCH_BRIDGE</constant></entry>
- <entry>0</entry>
- <entry>Match the nth chip on the card, zero for the
- bridge chip. Does not match sub-devices.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CHIP_MATCH_SUBDEV</constant></entry>
- <entry>4</entry>
- <entry>Match the nth sub-device.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>match_type</structfield> is invalid or
-no device could be matched.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml b/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml
deleted file mode 100644
index 5877f68a5820..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-dbg-g-register.xml
+++ /dev/null
@@ -1,227 +0,0 @@
-<refentry id="vidioc-dbg-g-register">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_DBG_G_REGISTER, VIDIOC_DBG_S_REGISTER</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_DBG_G_REGISTER</refname>
- <refname>VIDIOC_DBG_S_REGISTER</refname>
- <refpurpose>Read or write hardware registers</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_dbg_register *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_dbg_register
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_DBG_G_REGISTER, VIDIOC_DBG_S_REGISTER</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <note>
- <title>Experimental</title>
-
- <para>This is an <link linkend="experimental">experimental</link>
-interface and may change in the future.</para>
- </note>
-
- <para>For driver debugging purposes these ioctls allow test
-applications to access hardware registers directly. Regular
-applications must not use them.</para>
-
- <para>Since writing or even reading registers can jeopardize the
-system security, its stability and damage the hardware, both ioctls
-require superuser privileges. Additionally the Linux kernel must be
-compiled with the <constant>CONFIG_VIDEO_ADV_DEBUG</constant> option
-to enable these ioctls.</para>
-
- <para>To write a register applications must initialize all fields
-of a &v4l2-dbg-register; except for <structfield>size</structfield> and call
-<constant>VIDIOC_DBG_S_REGISTER</constant> with a pointer to this
-structure. The <structfield>match.type</structfield> and
-<structfield>match.addr</structfield> or <structfield>match.name</structfield>
-fields select a chip on the TV
-card, the <structfield>reg</structfield> field specifies a register
-number and the <structfield>val</structfield> field the value to be
-written into the register.</para>
-
- <para>To read a register applications must initialize the
-<structfield>match.type</structfield>,
-<structfield>match.addr</structfield> or <structfield>match.name</structfield> and
-<structfield>reg</structfield> fields, and call
-<constant>VIDIOC_DBG_G_REGISTER</constant> with a pointer to this
-structure. On success the driver stores the register value in the
-<structfield>val</structfield> field and the size (in bytes) of the
-value in <structfield>size</structfield>.</para>
-
- <para>When <structfield>match.type</structfield> is
-<constant>V4L2_CHIP_MATCH_BRIDGE</constant>,
-<structfield>match.addr</structfield> selects the nth non-sub-device chip
-on the TV card. The number zero always selects the host chip, &eg; the
-chip connected to the PCI or USB bus. You can find out which chips are
-present with the &VIDIOC-DBG-G-CHIP-INFO; ioctl.</para>
-
- <para>When <structfield>match.type</structfield> is
-<constant>V4L2_CHIP_MATCH_SUBDEV</constant>,
-<structfield>match.addr</structfield> selects the nth sub-device.</para>
-
- <para>These ioctls are optional, not all drivers may support them.
-However when a driver supports these ioctls it must also support
-&VIDIOC-DBG-G-CHIP-INFO;. Conversely it may support
-<constant>VIDIOC_DBG_G_CHIP_INFO</constant> but not these ioctls.</para>
-
- <para><constant>VIDIOC_DBG_G_REGISTER</constant> and
-<constant>VIDIOC_DBG_S_REGISTER</constant> were introduced in Linux
-2.6.21, but their API was changed to the one described here in kernel 2.6.29.</para>
-
- <para>We recommended the <application>v4l2-dbg</application>
-utility over calling these ioctls directly. It is available from the
-LinuxTV v4l-dvb repository; see <ulink
-url="https://linuxtv.org/repo/">https://linuxtv.org/repo/</ulink> for
-access instructions.</para>
-
- <!-- Note for convenience vidioc-dbg-g-chip-info.sgml
- contains a duplicate of this table. -->
- <table pgwide="1" frame="none" id="v4l2-dbg-match">
- <title>struct <structname>v4l2_dbg_match</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>See <xref linkend="chip-match-types" /> for a list of
-possible types.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>addr</structfield></entry>
- <entry>Match a chip by this number, interpreted according
-to the <structfield>type</structfield> field.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>char</entry>
- <entry><structfield>name[32]</structfield></entry>
- <entry>Match a chip by this name, interpreted according
-to the <structfield>type</structfield> field. Currently unused.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
-
- <table pgwide="1" frame="none" id="v4l2-dbg-register">
- <title>struct <structname>v4l2_dbg_register</structname></title>
- <tgroup cols="4">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c4" />
- <tbody valign="top">
- <row>
- <entry>struct v4l2_dbg_match</entry>
- <entry><structfield>match</structfield></entry>
- <entry>How to match the chip, see <xref linkend="v4l2-dbg-match" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>size</structfield></entry>
- <entry>The register size in bytes.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>reg</structfield></entry>
- <entry>A register number.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>val</structfield></entry>
- <entry>The value read from, or to be written into the
-register.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- Note for convenience vidioc-dbg-g-chip-info.sgml
- contains a duplicate of this table. -->
- <table pgwide="1" frame="none" id="chip-match-types">
- <title>Chip Match Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CHIP_MATCH_BRIDGE</constant></entry>
- <entry>0</entry>
- <entry>Match the nth chip on the card, zero for the
- bridge chip. Does not match sub-devices.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CHIP_MATCH_SUBDEV</constant></entry>
- <entry>4</entry>
- <entry>Match the nth sub-device.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EPERM</errorcode></term>
- <listitem>
- <para>Insufficient permissions. Root privileges are required
-to execute these ioctls.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-decoder-cmd.xml b/Documentation/DocBook/media/v4l/vidioc-decoder-cmd.xml
deleted file mode 100644
index 73eb5cfe698a..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-decoder-cmd.xml
+++ /dev/null
@@ -1,259 +0,0 @@
-<refentry id="vidioc-decoder-cmd">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_DECODER_CMD, VIDIOC_TRY_DECODER_CMD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_DECODER_CMD</refname>
- <refname>VIDIOC_TRY_DECODER_CMD</refname>
- <refpurpose>Execute an decoder command</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_decoder_cmd *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_DECODER_CMD, VIDIOC_TRY_DECODER_CMD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls control an audio/video (usually MPEG-) decoder.
-<constant>VIDIOC_DECODER_CMD</constant> sends a command to the
-decoder, <constant>VIDIOC_TRY_DECODER_CMD</constant> can be used to
-try a command without actually executing it. To send a command applications
-must initialize all fields of a &v4l2-decoder-cmd; and call
-<constant>VIDIOC_DECODER_CMD</constant> or <constant>VIDIOC_TRY_DECODER_CMD</constant>
-with a pointer to this structure.</para>
-
- <para>The <structfield>cmd</structfield> field must contain the
-command code. Some commands use the <structfield>flags</structfield> field for
-additional information.
-</para>
-
- <para>A <function>write</function>() or &VIDIOC-STREAMON; call sends an implicit
-START command to the decoder if it has not been started yet.
-</para>
-
- <para>A <function>close</function>() or &VIDIOC-STREAMOFF; call of a streaming
-file descriptor sends an implicit immediate STOP command to the decoder, and all
-buffered data is discarded.</para>
-
- <para>These ioctls are optional, not all drivers may support
-them. They were introduced in Linux 3.3.</para>
-
- <table pgwide="1" frame="none" id="v4l2-decoder-cmd">
- <title>struct <structname>v4l2_decoder_cmd</structname></title>
- <tgroup cols="5">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>cmd</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>The decoder command, see <xref linkend="decoder-cmds" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry></entry>
- <entry>Flags to go with the command. If no flags are defined for
-this command, drivers and applications must set this field to zero.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>start</structfield></entry>
- <entry></entry>
- <entry>Structure containing additional data for the
-<constant>V4L2_DEC_CMD_START</constant> command.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__s32</entry>
- <entry><structfield>speed</structfield></entry>
- <entry>Playback speed and direction. The playback speed is defined as
-<structfield>speed</structfield>/1000 of the normal speed. So 1000 is normal playback.
-Negative numbers denote reverse playback, so -1000 does reverse playback at normal
-speed. Speeds -1, 0 and 1 have special meanings: speed 0 is shorthand for 1000
-(normal playback). A speed of 1 steps just one frame forward, a speed of -1 steps
-just one frame back.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>format</structfield></entry>
- <entry>Format restrictions. This field is set by the driver, not the
-application. Possible values are <constant>V4L2_DEC_START_FMT_NONE</constant> if
-there are no format restrictions or <constant>V4L2_DEC_START_FMT_GOP</constant>
-if the decoder operates on full GOPs (<wordasword>Group Of Pictures</wordasword>).
-This is usually the case for reverse playback: the decoder needs full GOPs, which
-it can then play in reverse order. So to implement reverse playback the application
-must feed the decoder the last GOP in the video file, then the GOP before that, etc. etc.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>stop</structfield></entry>
- <entry></entry>
- <entry>Structure containing additional data for the
-<constant>V4L2_DEC_CMD_STOP</constant> command.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u64</entry>
- <entry><structfield>pts</structfield></entry>
- <entry>Stop playback at this <structfield>pts</structfield> or immediately
-if the playback is already past that timestamp. Leave to 0 if you want to stop after the
-last frame was decoded.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>struct</entry>
- <entry><structfield>raw</structfield></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>data</structfield>[16]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="decoder-cmds">
- <title>Decoder Commands</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_DEC_CMD_START</constant></entry>
- <entry>0</entry>
- <entry>Start the decoder. When the decoder is already
-running or paused, this command will just change the playback speed.
-That means that calling <constant>V4L2_DEC_CMD_START</constant> when
-the decoder was paused will <emphasis>not</emphasis> resume the decoder.
-You have to explicitly call <constant>V4L2_DEC_CMD_RESUME</constant> for that.
-This command has one flag:
-<constant>V4L2_DEC_CMD_START_MUTE_AUDIO</constant>. If set, then audio will
-be muted when playing back at a non-standard speed.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_DEC_CMD_STOP</constant></entry>
- <entry>1</entry>
- <entry>Stop the decoder. When the decoder is already stopped,
-this command does nothing. This command has two flags:
-if <constant>V4L2_DEC_CMD_STOP_TO_BLACK</constant> is set, then the decoder will
-set the picture to black after it stopped decoding. Otherwise the last image will
-repeat. mem2mem decoders will stop producing new frames altogether. They will send
-a <constant>V4L2_EVENT_EOS</constant> event when the last frame has been decoded
-and all frames are ready to be dequeued and will set the
-<constant>V4L2_BUF_FLAG_LAST</constant> buffer flag on the last buffer of the
-capture queue to indicate there will be no new buffers produced to dequeue. This
-buffer may be empty, indicated by the driver setting the
-<structfield>bytesused</structfield> field to 0. Once the
-<constant>V4L2_BUF_FLAG_LAST</constant> flag was set, the
-<link linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl will not block anymore,
-but return an &EPIPE;.
-If <constant>V4L2_DEC_CMD_STOP_IMMEDIATELY</constant> is set, then the decoder
-stops immediately (ignoring the <structfield>pts</structfield> value), otherwise it
-will keep decoding until timestamp >= pts or until the last of the pending data from
-its internal buffers was decoded.
-</entry>
- </row>
- <row>
- <entry><constant>V4L2_DEC_CMD_PAUSE</constant></entry>
- <entry>2</entry>
- <entry>Pause the decoder. When the decoder has not been
-started yet, the driver will return an &EPERM;. When the decoder is
-already paused, this command does nothing. This command has one flag:
-if <constant>V4L2_DEC_CMD_PAUSE_TO_BLACK</constant> is set, then set the
-decoder output to black when paused.
-</entry>
- </row>
- <row>
- <entry><constant>V4L2_DEC_CMD_RESUME</constant></entry>
- <entry>3</entry>
- <entry>Resume decoding after a PAUSE command. When the
-decoder has not been started yet, the driver will return an &EPERM;.
-When the decoder is already running, this command does nothing. No
-flags are defined for this command.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>cmd</structfield> field is invalid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EPERM</errorcode></term>
- <listitem>
- <para>The application sent a PAUSE or RESUME command when
-the decoder was not running.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml b/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
deleted file mode 100644
index c9c3c7713832..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-dqevent.xml
+++ /dev/null
@@ -1,471 +0,0 @@
-<refentry id="vidioc-dqevent">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_DQEVENT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_DQEVENT</refname>
- <refpurpose>Dequeue event</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_event
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_DQEVENT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Dequeue an event from a video device. No input is required
- for this ioctl. All the fields of the &v4l2-event; structure are
- filled by the driver. The file handle will also receive exceptions
- which the application may get by e.g. using the select system
- call.</para>
-
- <table frame="none" pgwide="1" id="v4l2-event">
- <title>struct <structname>v4l2_event</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>Type of the event, see <xref linkend="event-type" />.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield>u</structfield></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-event-vsync;</entry>
- <entry><structfield>vsync</structfield></entry>
- <entry>Event data for event <constant>V4L2_EVENT_VSYNC</constant>.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-event-ctrl;</entry>
- <entry><structfield>ctrl</structfield></entry>
- <entry>Event data for event <constant>V4L2_EVENT_CTRL</constant>.
- </entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-event-frame-sync;</entry>
- <entry><structfield>frame_sync</structfield></entry>
- <entry>Event data for event
- <constant>V4L2_EVENT_FRAME_SYNC</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-event-motion-det;</entry>
- <entry><structfield>motion_det</structfield></entry>
- <entry>Event data for event V4L2_EVENT_MOTION_DET.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-event-src-change;</entry>
- <entry><structfield>src_change</structfield></entry>
- <entry>Event data for event V4L2_EVENT_SOURCE_CHANGE.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8</entry>
- <entry><structfield>data</structfield>[64]</entry>
- <entry>Event data. Defined by the event type. The union
- should be used to define easily accessible type for
- events.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pending</structfield></entry>
- <entry></entry>
- <entry>Number of pending events excluding this one.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>sequence</structfield></entry>
- <entry></entry>
- <entry>Event sequence number. The sequence number is
- incremented for every subscribed event that takes place.
- If sequence numbers are not contiguous it means that
- events have been lost.
- </entry>
- </row>
- <row>
- <entry>struct timespec</entry>
- <entry><structfield>timestamp</structfield></entry>
- <entry></entry>
- <entry>Event timestamp. The timestamp has been taken from the
- <constant>CLOCK_MONOTONIC</constant> clock. To access the
- same clock outside V4L2, use <function>clock_gettime(2)</function>.
- </entry>
- </row>
- <row>
- <entry>u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry>The ID associated with the event source. If the event does not
- have an associated ID (this depends on the event type), then this
- is 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry></entry>
- <entry>Reserved for future extensions. Drivers must set
- the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="event-type">
- <title>Event Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EVENT_ALL</constant></entry>
- <entry>0</entry>
- <entry>All events. V4L2_EVENT_ALL is valid only for
- VIDIOC_UNSUBSCRIBE_EVENT for unsubscribing all events at once.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_VSYNC</constant></entry>
- <entry>1</entry>
- <entry>This event is triggered on the vertical sync.
- This event has a &v4l2-event-vsync; associated with it.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_EOS</constant></entry>
- <entry>2</entry>
- <entry>This event is triggered when the end of a stream is reached.
- This is typically used with MPEG decoders to report to the application
- when the last of the MPEG stream has been decoded.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_CTRL</constant></entry>
- <entry>3</entry>
- <entry><para>This event requires that the <structfield>id</structfield>
- matches the control ID from which you want to receive events.
- This event is triggered if the control's value changes, if a
- button control is pressed or if the control's flags change.
- This event has a &v4l2-event-ctrl; associated with it. This struct
- contains much of the same information as &v4l2-queryctrl; and
- &v4l2-control;.</para>
-
- <para>If the event is generated due to a call to &VIDIOC-S-CTRL; or
- &VIDIOC-S-EXT-CTRLS;, then the event will <emphasis>not</emphasis> be sent to
- the file handle that called the ioctl function. This prevents
- nasty feedback loops. If you <emphasis>do</emphasis> want to get the
- event, then set the <constant>V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK</constant>
- flag.
- </para>
-
- <para>This event type will ensure that no information is lost when
- more events are raised than there is room internally. In that
- case the &v4l2-event-ctrl; of the second-oldest event is kept,
- but the <structfield>changes</structfield> field of the
- second-oldest event is ORed with the <structfield>changes</structfield>
- field of the oldest event.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_FRAME_SYNC</constant></entry>
- <entry>4</entry>
- <entry>
- <para>Triggered immediately when the reception of a
- frame has begun. This event has a
- &v4l2-event-frame-sync; associated with it.</para>
-
- <para>If the hardware needs to be stopped in the case of a
- buffer underrun it might not be able to generate this event.
- In such cases the <structfield>frame_sequence</structfield>
- field in &v4l2-event-frame-sync; will not be incremented. This
- causes two consecutive frame sequence numbers to have n times
- frame interval in between them.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_SOURCE_CHANGE</constant></entry>
- <entry>5</entry>
- <entry>
- <para>This event is triggered when a source parameter change is
- detected during runtime by the video device. It can be a
- runtime resolution change triggered by a video decoder or the
- format change happening on an input connector.
- This event requires that the <structfield>id</structfield>
- matches the input index (when used with a video device node)
- or the pad index (when used with a subdevice node) from which
- you want to receive events.</para>
-
- <para>This event has a &v4l2-event-src-change; associated
- with it. The <structfield>changes</structfield> bitfield denotes
- what has changed for the subscribed pad. If multiple events
- occurred before application could dequeue them, then the changes
- will have the ORed value of all the events generated.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_MOTION_DET</constant></entry>
- <entry>6</entry>
- <entry>
- <para>Triggered whenever the motion detection state for one or more of the regions
- changes. This event has a &v4l2-event-motion-det; associated with it.</para>
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_PRIVATE_START</constant></entry>
- <entry>0x08000000</entry>
- <entry>Base event number for driver-private events.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-event-vsync">
- <title>struct <structname>v4l2_event_vsync</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>field</structfield></entry>
- <entry>The upcoming field. See &v4l2-field;.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-event-ctrl">
- <title>struct <structname>v4l2_event_ctrl</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>changes</structfield></entry>
- <entry></entry>
- <entry>A bitmask that tells what has changed. See <xref linkend="ctrl-changes-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>The type of the control. See &v4l2-ctrl-type;.</entry>
- </row>
- <row>
- <entry>union (anonymous)</entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>__s32</entry>
- <entry><structfield>value</structfield></entry>
- <entry>The 32-bit value of the control for 32-bit control types.
- This is 0 for string controls since the value of a string
- cannot be passed using &VIDIOC-DQEVENT;.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__s64</entry>
- <entry><structfield>value64</structfield></entry>
- <entry>The 64-bit value of the control for 64-bit control types.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry>The control flags. See <xref linkend="control-flags" />.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>minimum</structfield></entry>
- <entry></entry>
- <entry>The minimum value of the control. See &v4l2-queryctrl;.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>maximum</structfield></entry>
- <entry></entry>
- <entry>The maximum value of the control. See &v4l2-queryctrl;.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>step</structfield></entry>
- <entry></entry>
- <entry>The step value of the control. See &v4l2-queryctrl;.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>default_value</structfield></entry>
- <entry></entry>
- <entry>The default value value of the control. See &v4l2-queryctrl;.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-event-frame-sync">
- <title>struct <structname>v4l2_event_frame_sync</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>frame_sequence</structfield></entry>
- <entry>
- The sequence number of the frame being received.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-event-src-change">
- <title>struct <structname>v4l2_event_src_change</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>changes</structfield></entry>
- <entry>
- A bitmask that tells what has changed. See <xref linkend="src-changes-flags" />.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="v4l2-event-motion-det">
- <title>struct <structname>v4l2_event_motion_det</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>
- Currently only one flag is available: if <constant>V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ</constant>
- is set, then the <structfield>frame_sequence</structfield> field is valid,
- otherwise that field should be ignored.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>frame_sequence</structfield></entry>
- <entry>
- The sequence number of the frame being received. Only valid if the
- <constant>V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ</constant> flag was set.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>region_mask</structfield></entry>
- <entry>
- The bitmask of the regions that reported motion. There is at least one
- region. If this field is 0, then no motion was detected at all.
- If there is no <constant>V4L2_CID_DETECT_MD_REGION_GRID</constant> control
- (see <xref linkend="detect-controls" />) to assign a different region
- to each cell in the motion detection grid, then that all cells
- are automatically assigned to the default region 0.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="ctrl-changes-flags">
- <title>Control Changes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EVENT_CTRL_CH_VALUE</constant></entry>
- <entry>0x0001</entry>
- <entry>This control event was triggered because the value of the control
- changed. Special cases: Volatile controls do no generate this event;
- If a control has the <constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant>
- flag set, then this event is sent as well, regardless its value.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_CTRL_CH_FLAGS</constant></entry>
- <entry>0x0002</entry>
- <entry>This control event was triggered because the control flags
- changed.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_CTRL_CH_RANGE</constant></entry>
- <entry>0x0004</entry>
- <entry>This control event was triggered because the minimum,
- maximum, step or the default value of the control changed.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="src-changes-flags">
- <title>Source Changes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EVENT_SRC_CH_RESOLUTION</constant></entry>
- <entry>0x0001</entry>
- <entry>This event gets triggered when a resolution change is
- detected at an input. This can come from an input connector or
- from a video decoder.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
deleted file mode 100644
index ca9ffce9b4c1..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
+++ /dev/null
@@ -1,210 +0,0 @@
-<refentry id="vidioc-dv-timings-cap">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_DV_TIMINGS_CAP, VIDIOC_SUBDEV_DV_TIMINGS_CAP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_DV_TIMINGS_CAP</refname>
- <refname>VIDIOC_SUBDEV_DV_TIMINGS_CAP</refname>
- <refpurpose>The capabilities of the Digital Video receiver/transmitter</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_dv_timings_cap *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_DV_TIMINGS_CAP, VIDIOC_SUBDEV_DV_TIMINGS_CAP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the capabilities of the DV receiver/transmitter applications initialize the
-<structfield>pad</structfield> field to 0, zero the reserved array of &v4l2-dv-timings-cap;
-and call the <constant>VIDIOC_DV_TIMINGS_CAP</constant> ioctl on a video node
-and the driver will fill in the structure. Note that drivers may return
-different values after switching the video input or output.</para>
-
- <para>When implemented by the driver DV capabilities of subdevices can be
-queried by calling the <constant>VIDIOC_SUBDEV_DV_TIMINGS_CAP</constant> ioctl
-directly on a subdevice node. The capabilities are specific to inputs (for DV
-receivers) or outputs (for DV transmitters), applications must specify the
-desired pad number in the &v4l2-dv-timings-cap; <structfield>pad</structfield>
-field and zero the <structfield>reserved</structfield> array. Attempts to query
-capabilities on a pad that doesn't support them will return an &EINVAL;.</para>
-
- <table pgwide="1" frame="none" id="v4l2-bt-timings-cap">
- <title>struct <structname>v4l2_bt_timings_cap</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_width</structfield></entry>
- <entry>Minimum width of the active video in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_width</structfield></entry>
- <entry>Maximum width of the active video in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_height</structfield></entry>
- <entry>Minimum height of the active video in lines.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_height</structfield></entry>
- <entry>Maximum height of the active video in lines.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>min_pixelclock</structfield></entry>
- <entry>Minimum pixelclock frequency in Hz.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>max_pixelclock</structfield></entry>
- <entry>Maximum pixelclock frequency in Hz.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>standards</structfield></entry>
- <entry>The video standard(s) supported by the hardware.
- See <xref linkend="dv-bt-standards"/> for a list of standards.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capabilities</structfield></entry>
- <entry>Several flags giving more information about the capabilities.
- See <xref linkend="dv-bt-cap-capabilities"/> for a description of the flags.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[16]</entry>
- <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-dv-timings-cap">
- <title>struct <structname>v4l2_dv_timings_cap</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of DV timings as listed in <xref linkend="dv-timing-types"/>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API. This field
- is only used when operating on a subdevice node. When operating on a
- video node applications must set this field to zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set the array to zero.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield></structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-bt-timings-cap;</entry>
- <entry><structfield>bt</structfield></entry>
- <entry>BT.656/1120 timings capabilities of the hardware.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>raw_data</structfield>[32]</entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="dv-bt-cap-capabilities">
- <title>DV BT Timing capabilities</title>
- <tgroup cols="2">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>Flag</entry>
- <entry>Description</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_CAP_INTERLACED</entry>
- <entry>Interlaced formats are supported.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_CAP_PROGRESSIVE</entry>
- <entry>Progressive formats are supported.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_CAP_REDUCED_BLANKING</entry>
- <entry>CVT/GTF specific: the timings can make use of reduced blanking (CVT)
-or the 'Secondary GTF' curve (GTF).
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_CAP_CUSTOM</entry>
- <entry>Can support non-standard timings, i.e. timings not belonging to the
-standards set in the <structfield>standards</structfield> field.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml b/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml
deleted file mode 100644
index 70a4a08e9404..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-encoder-cmd.xml
+++ /dev/null
@@ -1,197 +0,0 @@
-<refentry id="vidioc-encoder-cmd">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENCODER_CMD, VIDIOC_TRY_ENCODER_CMD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENCODER_CMD</refname>
- <refname>VIDIOC_TRY_ENCODER_CMD</refname>
- <refpurpose>Execute an encoder command</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_encoder_cmd *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENCODER_CMD, VIDIOC_TRY_ENCODER_CMD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls control an audio/video (usually MPEG-) encoder.
-<constant>VIDIOC_ENCODER_CMD</constant> sends a command to the
-encoder, <constant>VIDIOC_TRY_ENCODER_CMD</constant> can be used to
-try a command without actually executing it.</para>
-
- <para>To send a command applications must initialize all fields of a
- &v4l2-encoder-cmd; and call
- <constant>VIDIOC_ENCODER_CMD</constant> or
- <constant>VIDIOC_TRY_ENCODER_CMD</constant> with a pointer to this
- structure.</para>
-
- <para>The <structfield>cmd</structfield> field must contain the
-command code. The <structfield>flags</structfield> field is currently
-only used by the STOP command and contains one bit: If the
-<constant>V4L2_ENC_CMD_STOP_AT_GOP_END</constant> flag is set,
-encoding will continue until the end of the current <wordasword>Group
-Of Pictures</wordasword>, otherwise it will stop immediately.</para>
-
- <para>A <function>read</function>() or &VIDIOC-STREAMON; call sends an implicit
-START command to the encoder if it has not been started yet. After a STOP command,
-<function>read</function>() calls will read the remaining data
-buffered by the driver. When the buffer is empty,
-<function>read</function>() will return zero and the next
-<function>read</function>() call will restart the encoder.</para>
-
- <para>A <function>close</function>() or &VIDIOC-STREAMOFF; call of a streaming
-file descriptor sends an implicit immediate STOP to the encoder, and all buffered
-data is discarded.</para>
-
- <para>These ioctls are optional, not all drivers may support
-them. They were introduced in Linux 2.6.21.</para>
-
- <table pgwide="1" frame="none" id="v4l2-encoder-cmd">
- <title>struct <structname>v4l2_encoder_cmd</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>cmd</structfield></entry>
- <entry>The encoder command, see <xref linkend="encoder-cmds" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags to go with the command, see <xref
- linkend="encoder-flags" />. If no flags are defined for
-this command, drivers and applications must set this field to
-zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>data</structfield>[8]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="encoder-cmds">
- <title>Encoder Commands</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_ENC_CMD_START</constant></entry>
- <entry>0</entry>
- <entry>Start the encoder. When the encoder is already
-running or paused, this command does nothing. No flags are defined for
-this command.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_CMD_STOP</constant></entry>
- <entry>1</entry>
- <entry>Stop the encoder. When the
-<constant>V4L2_ENC_CMD_STOP_AT_GOP_END</constant> flag is set,
-encoding will continue until the end of the current <wordasword>Group
-Of Pictures</wordasword>, otherwise encoding will stop immediately.
-When the encoder is already stopped, this command does
-nothing. mem2mem encoders will send a <constant>V4L2_EVENT_EOS</constant> event
-when the last frame has been encoded and all frames are ready to be dequeued and
-will set the <constant>V4L2_BUF_FLAG_LAST</constant> buffer flag on the last
-buffer of the capture queue to indicate there will be no new buffers produced to
-dequeue. This buffer may be empty, indicated by the driver setting the
-<structfield>bytesused</structfield> field to 0. Once the
-<constant>V4L2_BUF_FLAG_LAST</constant> flag was set, the
-<link linkend="vidioc-qbuf">VIDIOC_DQBUF</link> ioctl will not block anymore,
-but return an &EPIPE;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_CMD_PAUSE</constant></entry>
- <entry>2</entry>
- <entry>Pause the encoder. When the encoder has not been
-started yet, the driver will return an &EPERM;. When the encoder is
-already paused, this command does nothing. No flags are defined for
-this command.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_CMD_RESUME</constant></entry>
- <entry>3</entry>
- <entry>Resume encoding after a PAUSE command. When the
-encoder has not been started yet, the driver will return an &EPERM;.
-When the encoder is already running, this command does nothing. No
-flags are defined for this command.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="encoder-flags">
- <title>Encoder Command Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_ENC_CMD_STOP_AT_GOP_END</constant></entry>
- <entry>0x0001</entry>
- <entry>Stop encoding at the end of the current <wordasword>Group Of
-Pictures</wordasword>, rather than immediately.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>cmd</structfield> field is invalid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EPERM</errorcode></term>
- <listitem>
- <para>The application sent a PAUSE or RESUME command when
-the encoder was not running.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
deleted file mode 100644
index 9b3d42018b69..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enum-dv-timings.xml
+++ /dev/null
@@ -1,128 +0,0 @@
-<refentry id="vidioc-enum-dv-timings">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUM_DV_TIMINGS, VIDIOC_SUBDEV_ENUM_DV_TIMINGS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUM_DV_TIMINGS</refname>
- <refname>VIDIOC_SUBDEV_ENUM_DV_TIMINGS</refname>
- <refpurpose>Enumerate supported Digital Video timings</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_enum_dv_timings *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUM_DV_TIMINGS, VIDIOC_SUBDEV_ENUM_DV_TIMINGS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>While some DV receivers or transmitters support a wide range of timings, others
-support only a limited number of timings. With this ioctl applications can enumerate a list
-of known supported timings. Call &VIDIOC-DV-TIMINGS-CAP; to check if it also supports other
-standards or even custom timings that are not in this list.</para>
-
- <para>To query the available timings, applications initialize the
-<structfield>index</structfield> field, set the <structfield>pad</structfield> field to 0,
-zero the reserved array of &v4l2-enum-dv-timings; and call the
-<constant>VIDIOC_ENUM_DV_TIMINGS</constant> ioctl on a video node with a
-pointer to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all supported DV timings,
-applications shall begin at index zero, incrementing by one until the
-driver returns <errorcode>EINVAL</errorcode>. Note that drivers may enumerate a
-different set of DV timings after switching the video input or
-output.</para>
-
- <para>When implemented by the driver DV timings of subdevices can be queried
-by calling the <constant>VIDIOC_SUBDEV_ENUM_DV_TIMINGS</constant> ioctl directly
-on a subdevice node. The DV timings are specific to inputs (for DV receivers) or
-outputs (for DV transmitters), applications must specify the desired pad number
-in the &v4l2-enum-dv-timings; <structfield>pad</structfield> field. Attempts to
-enumerate timings on a pad that doesn't support them will return an &EINVAL;.</para>
-
- <table pgwide="1" frame="none" id="v4l2-enum-dv-timings">
- <title>struct <structname>v4l2_enum_dv_timings</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the DV timings, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API. This field
- is only used when operating on a subdevice node. When operating on a
- video node applications must set this field to zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers and applications must
- set the array to zero.</entry>
- </row>
- <row>
- <entry>&v4l2-dv-timings;</entry>
- <entry><structfield>timings</structfield></entry>
- <entry>The timings.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-enum-dv-timings; <structfield>index</structfield>
-is out of bounds or the <structfield>pad</structfield> number is invalid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Digital video presets are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml
deleted file mode 100644
index f8dfeed34fca..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enum-fmt.xml
+++ /dev/null
@@ -1,159 +0,0 @@
-<refentry id="vidioc-enum-fmt">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUM_FMT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUM_FMT</refname>
- <refpurpose>Enumerate image formats</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_fmtdesc
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUM_FMT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To enumerate image formats applications initialize the
-<structfield>type</structfield> and <structfield>index</structfield>
-field of &v4l2-fmtdesc; and call the
-<constant>VIDIOC_ENUM_FMT</constant> ioctl with a pointer to this
-structure. Drivers fill the rest of the structure or return an
-&EINVAL;. All formats are enumerable by beginning at index zero and
-incrementing by one until <errorcode>EINVAL</errorcode> is
-returned.</para>
-
- <para>Note that after switching input or output the list of enumerated image
-formats may be different.</para>
-
- <table pgwide="1" frame="none" id="v4l2-fmtdesc">
- <title>struct <structname>v4l2_fmtdesc</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the format in the enumeration, set by
-the application. This is in no way related to the <structfield>
-pixelformat</structfield> field.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the data stream, set by the application.
-Only these types are valid here:
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant> and
-<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>. See <xref linkend="v4l2-buf-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>See <xref linkend="fmtdesc-flags" /></entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>description</structfield>[32]</entry>
- <entry>Description of the format, a NUL-terminated ASCII
-string. This information is intended for the user, for example: "YUV
-4:2:2".</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixelformat</structfield></entry>
- <entry>The image format identifier. This is a
-four character code as computed by the v4l2_fourcc()
-macro:</entry>
- </row>
- <row>
- <entry spanname="hspan"><para><programlisting id="v4l2-fourcc">
-#define v4l2_fourcc(a,b,c,d) (((__u32)(a)&lt;&lt;0)|((__u32)(b)&lt;&lt;8)|((__u32)(c)&lt;&lt;16)|((__u32)(d)&lt;&lt;24))
-</programlisting></para><para>Several image formats are already
-defined by this specification in <xref linkend="pixfmt" />. Note these
-codes are not the same as those used in the Windows world.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="fmtdesc-flags">
- <title>Image Format Description Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FMT_FLAG_COMPRESSED</constant></entry>
- <entry>0x0001</entry>
- <entry>This is a compressed format.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FMT_FLAG_EMULATED</constant></entry>
- <entry>0x0002</entry>
- <entry>This format is not native to the device but emulated
-through software (usually libv4l2), where possible try to use a native format
-instead for better performance.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-fmtdesc; <structfield>type</structfield>
-is not supported or the <structfield>index</structfield> is out of
-bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-frameintervals.xml b/Documentation/DocBook/media/v4l/vidioc-enum-frameintervals.xml
deleted file mode 100644
index 7c839ab0afbb..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enum-frameintervals.xml
+++ /dev/null
@@ -1,260 +0,0 @@
-<refentry id="vidioc-enum-frameintervals">
-
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUM_FRAMEINTERVALS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUM_FRAMEINTERVALS</refname>
- <refpurpose>Enumerate frame intervals</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_frmivalenum *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUM_FRAMEINTERVALS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to a &v4l2-frmivalenum; structure that
-contains a pixel format and size and receives a frame interval.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl allows applications to enumerate all frame
-intervals that the device supports for the given pixel format and
-frame size.</para>
- <para>The supported pixel formats and frame sizes can be obtained
-by using the &VIDIOC-ENUM-FMT; and &VIDIOC-ENUM-FRAMESIZES;
-functions.</para>
- <para>The return value and the content of the
-<structfield>v4l2_frmivalenum.type</structfield> field depend on the
-type of frame intervals the device supports. Here are the semantics of
-the function for the different cases:</para>
- <itemizedlist>
- <listitem>
- <para><emphasis role="bold">Discrete:</emphasis> The function
-returns success if the given index value (zero-based) is valid. The
-application should increase the index by one for each call until
-<constant>EINVAL</constant> is returned. The `v4l2_frmivalenum.type`
-field is set to `V4L2_FRMIVAL_TYPE_DISCRETE` by the driver. Of the
-union only the `discrete` member is valid.</para>
- </listitem>
- <listitem>
- <para><emphasis role="bold">Step-wise:</emphasis> The function
-returns success if the given index value is zero and
-<constant>EINVAL</constant> for any other index value. The
-<structfield>v4l2_frmivalenum.type</structfield> field is set to
-<constant>V4L2_FRMIVAL_TYPE_STEPWISE</constant> by the driver. Of the
-union only the <structfield>stepwise</structfield> member is
-valid.</para>
- </listitem>
- <listitem>
- <para><emphasis role="bold">Continuous:</emphasis> This is a
-special case of the step-wise type above. The function returns success
-if the given index value is zero and <constant>EINVAL</constant> for
-any other index value. The
-<structfield>v4l2_frmivalenum.type</structfield> field is set to
-<constant>V4L2_FRMIVAL_TYPE_CONTINUOUS</constant> by the driver. Of
-the union only the <structfield>stepwise</structfield> member is valid
-and the <structfield>step</structfield> value is set to 1.</para>
- </listitem>
- </itemizedlist>
-
- <para>When the application calls the function with index zero, it
-must check the <structfield>type</structfield> field to determine the
-type of frame interval enumeration the device supports. Only for the
-<constant>V4L2_FRMIVAL_TYPE_DISCRETE</constant> type does it make
-sense to increase the index value to receive more frame
-intervals.</para>
- <para>Note that the order in which the frame intervals are
-returned has no special meaning. In particular does it not say
-anything about potential default frame intervals.</para>
- <para>Applications can assume that the enumeration data does not
-change without any interaction from the application itself. This means
-that the enumeration data is consistent if the application does not
-perform any other ioctl calls while it runs the frame interval
-enumeration.</para>
- </refsect1>
-
- <refsect1>
- <title>Notes</title>
-
- <itemizedlist>
- <listitem>
- <para><emphasis role="bold">Frame intervals and frame
-rates:</emphasis> The V4L2 API uses frame intervals instead of frame
-rates. Given the frame interval the frame rate can be computed as
-follows:<screen>frame_rate = 1 / frame_interval</screen></para>
- </listitem>
- </itemizedlist>
-
- </refsect1>
-
- <refsect1>
- <title>Structs</title>
-
- <para>In the structs below, <emphasis>IN</emphasis> denotes a
-value that has to be filled in by the application,
-<emphasis>OUT</emphasis> denotes values that the driver fills in. The
-application should zero out all members except for the
-<emphasis>IN</emphasis> fields.</para>
-
- <table pgwide="1" frame="none" id="v4l2-frmival-stepwise">
- <title>struct <structname>v4l2_frmival_stepwise</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>min</structfield></entry>
- <entry>Minimum frame interval [s].</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>max</structfield></entry>
- <entry>Maximum frame interval [s].</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>step</structfield></entry>
- <entry>Frame interval step size [s].</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-frmivalenum">
- <title>struct <structname>v4l2_frmivalenum</structname></title>
- <tgroup cols="4">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry></entry>
- <entry>IN: Index of the given frame interval in the
-enumeration.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixel_format</structfield></entry>
- <entry></entry>
- <entry>IN: Pixel format for which the frame intervals are
-enumerated.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry></entry>
- <entry>IN: Frame width for which the frame intervals are
-enumerated.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry></entry>
- <entry>IN: Frame height for which the frame intervals are
-enumerated.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>OUT: Frame interval type the device supports.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry></entry>
- <entry></entry>
- <entry>OUT: Frame interval with the given index.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>discrete</structfield></entry>
- <entry>Frame interval [s].</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-frmival-stepwise;</entry>
- <entry><structfield>stepwise</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[2]</structfield></entry>
- <entry></entry>
- <entry>Reserved space for future use. Must be zeroed by drivers and
- applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- <title>Enums</title>
-
- <table pgwide="1" frame="none" id="v4l2-frmivaltypes">
- <title>enum <structname>v4l2_frmivaltypes</structname></title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FRMIVAL_TYPE_DISCRETE</constant></entry>
- <entry>1</entry>
- <entry>Discrete frame interval.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FRMIVAL_TYPE_CONTINUOUS</constant></entry>
- <entry>2</entry>
- <entry>Continuous frame interval.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FRMIVAL_TYPE_STEPWISE</constant></entry>
- <entry>3</entry>
- <entry>Step-wise defined frame interval.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-framesizes.xml b/Documentation/DocBook/media/v4l/vidioc-enum-framesizes.xml
deleted file mode 100644
index 9ed68ac8f474..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enum-framesizes.xml
+++ /dev/null
@@ -1,265 +0,0 @@
-<refentry id="vidioc-enum-framesizes">
-
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUM_FRAMESIZES</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUM_FRAMESIZES</refname>
- <refpurpose>Enumerate frame sizes</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_frmsizeenum *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUM_FRAMESIZES</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to a &v4l2-frmsizeenum; that contains an index
-and pixel format and receives a frame width and height.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl allows applications to enumerate all frame sizes
-(&ie; width and height in pixels) that the device supports for the
-given pixel format.</para>
- <para>The supported pixel formats can be obtained by using the
-&VIDIOC-ENUM-FMT; function.</para>
- <para>The return value and the content of the
-<structfield>v4l2_frmsizeenum.type</structfield> field depend on the
-type of frame sizes the device supports. Here are the semantics of the
-function for the different cases:</para>
-
- <itemizedlist>
- <listitem>
- <para><emphasis role="bold">Discrete:</emphasis> The function
-returns success if the given index value (zero-based) is valid. The
-application should increase the index by one for each call until
-<constant>EINVAL</constant> is returned. The
-<structfield>v4l2_frmsizeenum.type</structfield> field is set to
-<constant>V4L2_FRMSIZE_TYPE_DISCRETE</constant> by the driver. Of the
-union only the <structfield>discrete</structfield> member is
-valid.</para>
- </listitem>
- <listitem>
- <para><emphasis role="bold">Step-wise:</emphasis> The function
-returns success if the given index value is zero and
-<constant>EINVAL</constant> for any other index value. The
-<structfield>v4l2_frmsizeenum.type</structfield> field is set to
-<constant>V4L2_FRMSIZE_TYPE_STEPWISE</constant> by the driver. Of the
-union only the <structfield>stepwise</structfield> member is
-valid.</para>
- </listitem>
- <listitem>
- <para><emphasis role="bold">Continuous:</emphasis> This is a
-special case of the step-wise type above. The function returns success
-if the given index value is zero and <constant>EINVAL</constant> for
-any other index value. The
-<structfield>v4l2_frmsizeenum.type</structfield> field is set to
-<constant>V4L2_FRMSIZE_TYPE_CONTINUOUS</constant> by the driver. Of
-the union only the <structfield>stepwise</structfield> member is valid
-and the <structfield>step_width</structfield> and
-<structfield>step_height</structfield> values are set to 1.</para>
- </listitem>
- </itemizedlist>
-
- <para>When the application calls the function with index zero, it
-must check the <structfield>type</structfield> field to determine the
-type of frame size enumeration the device supports. Only for the
-<constant>V4L2_FRMSIZE_TYPE_DISCRETE</constant> type does it make
-sense to increase the index value to receive more frame sizes.</para>
- <para>Note that the order in which the frame sizes are returned
-has no special meaning. In particular does it not say anything about
-potential default format sizes.</para>
- <para>Applications can assume that the enumeration data does not
-change without any interaction from the application itself. This means
-that the enumeration data is consistent if the application does not
-perform any other ioctl calls while it runs the frame size
-enumeration.</para>
- </refsect1>
-
- <refsect1>
- <title>Structs</title>
-
- <para>In the structs below, <emphasis>IN</emphasis> denotes a
-value that has to be filled in by the application,
-<emphasis>OUT</emphasis> denotes values that the driver fills in. The
-application should zero out all members except for the
-<emphasis>IN</emphasis> fields.</para>
-
- <table pgwide="1" frame="none" id="v4l2-frmsize-discrete">
- <title>struct <structname>v4l2_frmsize_discrete</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Width of the frame [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Height of the frame [pixel].</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-frmsize-stepwise">
- <title>struct <structname>v4l2_frmsize_stepwise</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_width</structfield></entry>
- <entry>Minimum frame width [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_width</structfield></entry>
- <entry>Maximum frame width [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>step_width</structfield></entry>
- <entry>Frame width step size [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_height</structfield></entry>
- <entry>Minimum frame height [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_height</structfield></entry>
- <entry>Maximum frame height [pixel].</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>step_height</structfield></entry>
- <entry>Frame height step size [pixel].</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-frmsizeenum">
- <title>struct <structname>v4l2_frmsizeenum</structname></title>
- <tgroup cols="4">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry></entry>
- <entry>IN: Index of the given frame size in the enumeration.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pixel_format</structfield></entry>
- <entry></entry>
- <entry>IN: Pixel format for which the frame sizes are enumerated.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>OUT: Frame size type the device supports.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry></entry>
- <entry></entry>
- <entry>OUT: Frame size with the given index.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-frmsize-discrete;</entry>
- <entry><structfield>discrete</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-frmsize-stepwise;</entry>
- <entry><structfield>stepwise</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[2]</structfield></entry>
- <entry></entry>
- <entry>Reserved space for future use. Must be zeroed by drivers and
- applications.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- <title>Enums</title>
-
- <table pgwide="1" frame="none" id="v4l2-frmsizetypes">
- <title>enum <structname>v4l2_frmsizetypes</structname></title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FRMSIZE_TYPE_DISCRETE</constant></entry>
- <entry>1</entry>
- <entry>Discrete frame size.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FRMSIZE_TYPE_CONTINUOUS</constant></entry>
- <entry>2</entry>
- <entry>Continuous frame size.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FRMSIZE_TYPE_STEPWISE</constant></entry>
- <entry>3</entry>
- <entry>Step-wise defined frame size.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml b/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
deleted file mode 100644
index a0608abc1ab8..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
+++ /dev/null
@@ -1,175 +0,0 @@
-<refentry id="vidioc-enum-freq-bands">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUM_FREQ_BANDS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUM_FREQ_BANDS</refname>
- <refpurpose>Enumerate supported frequency bands</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_frequency_band
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUM_FREQ_BANDS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Enumerates the frequency bands that a tuner or modulator supports.
-To do this applications initialize the <structfield>tuner</structfield>,
-<structfield>type</structfield> and <structfield>index</structfield> fields,
-and zero out the <structfield>reserved</structfield> array of a &v4l2-frequency-band; and
-call the <constant>VIDIOC_ENUM_FREQ_BANDS</constant> ioctl with a pointer
-to this structure.</para>
-
- <para>This ioctl is supported if the <constant>V4L2_TUNER_CAP_FREQ_BANDS</constant> capability
- of the corresponding tuner/modulator is set.</para>
-
- <table pgwide="1" frame="none" id="v4l2-frequency-band">
- <title>struct <structname>v4l2_frequency_band</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>tuner</structfield></entry>
- <entry>The tuner or modulator index number. This is the
-same value as in the &v4l2-input; <structfield>tuner</structfield>
-field and the &v4l2-tuner; <structfield>index</structfield> field, or
-the &v4l2-output; <structfield>modulator</structfield> field and the
-&v4l2-modulator; <structfield>index</structfield> field.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The type must be set
-to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
-device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
-for all others. Set this field to <constant>V4L2_TUNER_RADIO</constant> for
-modulators (currently only radio modulators are supported).
-See <xref linkend="v4l2-tuner-type" /></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the frequency band, set by the application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry spanname="hspan">The tuner/modulator capability flags for
-this frequency band, see <xref linkend="tuner-capability" />. The <constant>V4L2_TUNER_CAP_LOW</constant>
-or <constant>V4L2_TUNER_CAP_1HZ</constant> capability must be the same for all frequency bands of the selected tuner/modulator.
-So either all bands have that capability set, or none of them have that capability.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangelow</structfield></entry>
- <entry spanname="hspan">The lowest tunable frequency in
-units of 62.5 kHz, or if the <structfield>capability</structfield>
-flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, for this frequency band. A 1 Hz unit is used when the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangehigh</structfield></entry>
- <entry spanname="hspan">The highest tunable frequency in
-units of 62.5 kHz, or if the <structfield>capability</structfield>
-flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, for this frequency band. A 1 Hz unit is used when the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>modulation</structfield></entry>
- <entry spanname="hspan">The supported modulation systems of this frequency band.
- See <xref linkend="band-modulation" />. Note that currently only one
- modulation system per frequency band is supported. More work will need to
- be done if multiple modulation systems are possible. Contact the
- linux-media mailing list (&v4l-ml;) if you need that functionality.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
- <entry>Reserved for future extensions. Applications and drivers
- must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="band-modulation">
- <title>Band Modulation Systems</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_BAND_MODULATION_VSB</constant></entry>
- <entry>0x02</entry>
- <entry>Vestigial Sideband modulation, used for analog TV.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BAND_MODULATION_FM</constant></entry>
- <entry>0x04</entry>
- <entry>Frequency Modulation, commonly used for analog radio.</entry>
- </row>
- <row>
- <entry><constant>V4L2_BAND_MODULATION_AM</constant></entry>
- <entry>0x08</entry>
- <entry>Amplitude Modulation, commonly used for analog radio.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>tuner</structfield> or <structfield>index</structfield>
-is out of bounds or the <structfield>type</structfield> field is wrong.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumaudio.xml b/Documentation/DocBook/media/v4l/vidioc-enumaudio.xml
deleted file mode 100644
index ea816ab2e49e..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enumaudio.xml
+++ /dev/null
@@ -1,76 +0,0 @@
-<refentry id="vidioc-enumaudio">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUMAUDIO</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUMAUDIO</refname>
- <refpurpose>Enumerate audio inputs</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_audio *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUMAUDIO</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of an audio input applications
-initialize the <structfield>index</structfield> field and zero out the
-<structfield>reserved</structfield> array of a &v4l2-audio;
-and call the <constant>VIDIOC_ENUMAUDIO</constant> ioctl with a pointer
-to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all audio
-inputs applications shall begin at index zero, incrementing by one
-until the driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <para>See <xref linkend="vidioc-g-audio" /> for a description of
-&v4l2-audio;.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The number of the audio input is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumaudioout.xml b/Documentation/DocBook/media/v4l/vidioc-enumaudioout.xml
deleted file mode 100644
index 2e87cedb0d32..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enumaudioout.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-<refentry id="vidioc-enumaudioout">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUMAUDOUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUMAUDOUT</refname>
- <refpurpose>Enumerate audio outputs</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_audioout *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUMAUDOUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of an audio output applications
-initialize the <structfield>index</structfield> field and zero out the
-<structfield>reserved</structfield> array of a &v4l2-audioout; and
-call the <constant>VIDIOC_G_AUDOUT</constant> ioctl with a pointer
-to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all audio
-outputs applications shall begin at index zero, incrementing by one
-until the driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <para>Note connectors on a TV card to loop back the received audio
-signal to a sound card are not audio outputs in this sense.</para>
-
- <para>See <xref linkend="vidioc-g-audioout" /> for a description of
-&v4l2-audioout;.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The number of the audio output is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml b/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
deleted file mode 100644
index 603fecef9083..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enuminput.xml
+++ /dev/null
@@ -1,316 +0,0 @@
-<refentry id="vidioc-enuminput">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUMINPUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUMINPUT</refname>
- <refpurpose>Enumerate video inputs</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_input
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUMINPUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a video input applications
-initialize the <structfield>index</structfield> field of &v4l2-input;
-and call the <constant>VIDIOC_ENUMINPUT</constant> ioctl with a
-pointer to this structure. Drivers fill the rest of the structure or
-return an &EINVAL; when the index is out of bounds. To enumerate all
-inputs applications shall begin at index zero, incrementing by one
-until the driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <table frame="none" pgwide="1" id="v4l2-input">
- <title>struct <structname>v4l2_input</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the input, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the video input, a NUL-terminated ASCII
-string, for example: "Vin (Composite 2)". This information is intended
-for the user, preferably the connector label on the device itself.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the input, see <xref
- linkend="input-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>audioset</structfield></entry>
- <entry><para>Drivers can enumerate up to 32 video and
-audio inputs. This field shows which audio inputs were selectable as
-audio source if this was the currently selected video input. It is a
-bit mask. The LSB corresponds to audio input 0, the MSB to input 31.
-Any number of bits can be set, or none.</para><para>When the driver
-does not enumerate audio inputs no bits must be set. Applications
-shall not interpret this as lack of audio support. Some drivers
-automatically select audio sources and do not enumerate them since
-there is no choice anyway.</para><para>For details on audio inputs and
-how to select the current input see <xref
- linkend="audio" />.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>tuner</structfield></entry>
- <entry>Capture devices can have zero or more tuners (RF
-demodulators). When the <structfield>type</structfield> is set to
-<constant>V4L2_INPUT_TYPE_TUNER</constant> this is an RF connector and
-this field identifies the tuner. It corresponds to
-&v4l2-tuner; field <structfield>index</structfield>. For details on
-tuners see <xref linkend="tuner" />.</entry>
- </row>
- <row>
- <entry>&v4l2-std-id;</entry>
- <entry><structfield>std</structfield></entry>
- <entry>Every video input supports one or more different
-video standards. This field is a set of all supported standards. For
-details on video standards and how to switch see <xref
-linkend="standard" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>status</structfield></entry>
- <entry>This field provides status information about the
-input. See <xref linkend="input-status" /> for flags.
-With the exception of the sensor orientation bits <structfield>status</structfield> is only valid when this is the
-current input.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capabilities</structfield></entry>
- <entry>This field provides capabilities for the
-input. See <xref linkend="input-capabilities" /> for flags.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[3]</entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="input-type">
- <title>Input Types</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_INPUT_TYPE_TUNER</constant></entry>
- <entry>1</entry>
- <entry>This input uses a tuner (RF demodulator).</entry>
- </row>
- <row>
- <entry><constant>V4L2_INPUT_TYPE_CAMERA</constant></entry>
- <entry>2</entry>
- <entry>Analog baseband input, for example CVBS /
-Composite Video, S-Video, RGB.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- Status flags based on proposal by Mark McClelland,
-video4linux-list@redhat.com on 18 Oct 2002, subject "Re: [V4L] Re:
-v4l2 api". "Why are some of them inverted? So that the driver doesn't
-have to lie about the status in cases where it can't tell one way or
-the other. Plus, a status of zero would generally mean that everything
-is OK." -->
-
- <table frame="none" pgwide="1" id="input-status">
- <title>Input Status Flags</title>
- <tgroup cols="3">
- <colspec colname="c1" />
- <colspec colname="c2" align="center" />
- <colspec colname="c3" />
- <spanspec namest="c1" nameend="c3" spanname="hspan"
- align="left" />
- <tbody valign="top">
- <row>
- <entry spanname="hspan">General</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_POWER</constant></entry>
- <entry>0x00000001</entry>
- <entry>Attached device is off.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_SIGNAL</constant></entry>
- <entry>0x00000002</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_COLOR</constant></entry>
- <entry>0x00000004</entry>
- <entry>The hardware supports color decoding, but does not
-detect color modulation in the signal.</entry>
- </row>
- <row>
- <entry spanname="hspan">Sensor Orientation</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_HFLIP</constant></entry>
- <entry>0x00000010</entry>
- <entry>The input is connected to a device that produces a signal
-that is flipped horizontally and does not correct this before passing the
-signal to userspace.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_VFLIP</constant></entry>
- <entry>0x00000020</entry>
- <entry>The input is connected to a device that produces a signal
-that is flipped vertically and does not correct this before passing the
-signal to userspace. Note that a 180 degree rotation is the same as HFLIP | VFLIP</entry>
- </row>
- <row>
- <entry spanname="hspan">Analog Video</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_H_LOCK</constant></entry>
- <entry>0x00000100</entry>
- <entry>No horizontal sync lock.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_COLOR_KILL</constant></entry>
- <entry>0x00000200</entry>
- <entry>A color killer circuit automatically disables color
-decoding when it detects no color modulation. When this flag is set
-the color killer is enabled <emphasis>and</emphasis> has shut off
-color decoding.</entry>
- </row>
- <row>
- <entry spanname="hspan">Digital Video</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_SYNC</constant></entry>
- <entry>0x00010000</entry>
- <entry>No synchronization lock.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_EQU</constant></entry>
- <entry>0x00020000</entry>
- <entry>No equalizer lock.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_CARRIER</constant></entry>
- <entry>0x00040000</entry>
- <entry>Carrier recovery failed.</entry>
- </row>
- <row>
- <entry spanname="hspan">VCR and Set-Top Box</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_MACROVISION</constant></entry>
- <entry>0x01000000</entry>
- <entry>Macrovision is an analog copy prevention system
-mangling the video signal to confuse video recorders. When this
-flag is set Macrovision has been detected.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_NO_ACCESS</constant></entry>
- <entry>0x02000000</entry>
- <entry>Conditional access denied.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_ST_VTR</constant></entry>
- <entry>0x04000000</entry>
- <entry>VTR time constant. [?]</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- Capability flags based on video timings RFC by Muralidharan
-Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
-input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
- -->
- <table frame="none" pgwide="1" id="input-capabilities">
- <title>Input capabilities</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_IN_CAP_DV_TIMINGS</constant></entry>
- <entry>0x00000002</entry>
- <entry>This input supports setting video timings by using VIDIOC_S_DV_TIMINGS.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_CAP_STD</constant></entry>
- <entry>0x00000004</entry>
- <entry>This input supports setting the TV standard by using VIDIOC_S_STD.</entry>
- </row>
- <row>
- <entry><constant>V4L2_IN_CAP_NATIVE_SIZE</constant></entry>
- <entry>0x00000008</entry>
- <entry>This input supports setting the native size using
- the <constant>V4L2_SEL_TGT_NATIVE_SIZE</constant>
- selection target, see <xref
- linkend="v4l2-selections-common"/>.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-input; <structfield>index</structfield> is
-out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml b/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
deleted file mode 100644
index 773fb1258c24..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enumoutput.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<refentry id="vidioc-enumoutput">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUMOUTPUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUMOUTPUT</refname>
- <refpurpose>Enumerate video outputs</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_output *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUMOUTPUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a video outputs applications
-initialize the <structfield>index</structfield> field of &v4l2-output;
-and call the <constant>VIDIOC_ENUMOUTPUT</constant> ioctl with a
-pointer to this structure. Drivers fill the rest of the structure or
-return an &EINVAL; when the index is out of bounds. To enumerate all
-outputs applications shall begin at index zero, incrementing by one
-until the driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <table frame="none" pgwide="1" id="v4l2-output">
- <title>struct <structname>v4l2_output</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the output, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the video output, a NUL-terminated ASCII
-string, for example: "Vout". This information is intended for the
-user, preferably the connector label on the device itself.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the output, see <xref
- linkend="output-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>audioset</structfield></entry>
- <entry><para>Drivers can enumerate up to 32 video and
-audio outputs. This field shows which audio outputs were
-selectable as the current output if this was the currently selected
-video output. It is a bit mask. The LSB corresponds to audio output 0,
-the MSB to output 31. Any number of bits can be set, or
-none.</para><para>When the driver does not enumerate audio outputs no
-bits must be set. Applications shall not interpret this as lack of
-audio support. Drivers may automatically select audio outputs without
-enumerating them.</para><para>For details on audio outputs and how to
-select the current output see <xref linkend="audio" />.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>modulator</structfield></entry>
- <entry>Output devices can have zero or more RF modulators.
-When the <structfield>type</structfield> is
-<constant>V4L2_OUTPUT_TYPE_MODULATOR</constant> this is an RF
-connector and this field identifies the modulator. It corresponds to
-&v4l2-modulator; field <structfield>index</structfield>. For details
-on modulators see <xref linkend="tuner" />.</entry>
- </row>
- <row>
- <entry>&v4l2-std-id;</entry>
- <entry><structfield>std</structfield></entry>
- <entry>Every video output supports one or more different
-video standards. This field is a set of all supported standards. For
-details on video standards and how to switch see <xref
- linkend="standard" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capabilities</structfield></entry>
- <entry>This field provides capabilities for the
-output. See <xref linkend="output-capabilities" /> for flags.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[3]</entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table frame="none" pgwide="1" id="output-type">
- <title>Output Type</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_OUTPUT_TYPE_MODULATOR</constant></entry>
- <entry>1</entry>
- <entry>This output is an analog TV modulator.</entry>
- </row>
- <row>
- <entry><constant>V4L2_OUTPUT_TYPE_ANALOG</constant></entry>
- <entry>2</entry>
- <entry>Analog baseband output, for example Composite /
-CVBS, S-Video, RGB.</entry>
- </row>
- <row>
- <entry><constant>V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY</constant></entry>
- <entry>3</entry>
- <entry>[?]</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- Capabilities flags based on video timings RFC by Muralidharan
-Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
-input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
- -->
- <table frame="none" pgwide="1" id="output-capabilities">
- <title>Output capabilities</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_OUT_CAP_DV_TIMINGS</constant></entry>
- <entry>0x00000002</entry>
- <entry>This output supports setting video timings by using VIDIOC_S_DV_TIMINGS.</entry>
- </row>
- <row>
- <entry><constant>V4L2_OUT_CAP_STD</constant></entry>
- <entry>0x00000004</entry>
- <entry>This output supports setting the TV standard by using VIDIOC_S_STD.</entry>
- </row>
- <row>
- <entry><constant>V4L2_OUT_CAP_NATIVE_SIZE</constant></entry>
- <entry>0x00000008</entry>
- <entry>This output supports setting the native size using
- the <constant>V4L2_SEL_TGT_NATIVE_SIZE</constant>
- selection target, see <xref
- linkend="v4l2-selections-common"/>.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-output; <structfield>index</structfield>
-is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enumstd.xml b/Documentation/DocBook/media/v4l/vidioc-enumstd.xml
deleted file mode 100644
index f18454e91752..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-enumstd.xml
+++ /dev/null
@@ -1,389 +0,0 @@
-<refentry id="vidioc-enumstd">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_ENUMSTD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_ENUMSTD</refname>
- <refpurpose>Enumerate supported video standards</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_standard *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_ENUMSTD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a video standard,
-especially a custom (driver defined) one, applications initialize the
-<structfield>index</structfield> field of &v4l2-standard; and call the
-<constant>VIDIOC_ENUMSTD</constant> ioctl with a pointer to this
-structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all standards
-applications shall begin at index zero, incrementing by one until the
-driver returns <errorcode>EINVAL</errorcode>. Drivers may enumerate a
-different set of standards after switching the video input or
-output.<footnote>
- <para>The supported standards may overlap and we need an
-unambiguous set to find the current standard returned by
-<constant>VIDIOC_G_STD</constant>.</para>
- </footnote></para>
-
- <table pgwide="1" frame="none" id="v4l2-standard">
- <title>struct <structname>v4l2_standard</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the video standard, set by the
-application.</entry>
- </row>
- <row>
- <entry>&v4l2-std-id;</entry>
- <entry><structfield>id</structfield></entry>
- <entry>The bits in this field identify the standard as
-one of the common standards listed in <xref linkend="v4l2-std-id" />,
-or if bits 32 to 63 are set as custom standards. Multiple bits can be
-set if the hardware does not distinguish between these standards,
-however separate indices do not indicate the opposite. The
-<structfield>id</structfield> must be unique. No other enumerated
-<structname>v4l2_standard</structname> structure, for this input or
-output anyway, can contain the same set of bits.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[24]</entry>
- <entry>Name of the standard, a NUL-terminated ASCII
-string, for example: "PAL-B/G", "NTSC Japan". This information is
-intended for the user.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>frameperiod</structfield></entry>
- <entry>The frame period (not field period) is numerator
-/ denominator. For example M/NTSC has a frame period of 1001 /
-30000 seconds.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>framelines</structfield></entry>
- <entry>Total lines per frame including blanking,
-e.&nbsp;g. 625 for B/PAL.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-fract">
- <title>struct <structname>v4l2_fract</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>numerator</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>denominator</structfield></entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-std-id">
- <title>typedef <structname>v4l2_std_id</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u64</entry>
- <entry><structfield>v4l2_std_id</structfield></entry>
- <entry>This type is a set, each bit representing another
-video standard as listed below and in <xref
-linkend="video-standards" />. The 32 most significant bits are reserved
-for custom (driver defined) video standards.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <para><programlisting>
-#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
-#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
-#define V4L2_STD_PAL_G ((v4l2_std_id)0x00000004)
-#define V4L2_STD_PAL_H ((v4l2_std_id)0x00000008)
-#define V4L2_STD_PAL_I ((v4l2_std_id)0x00000010)
-#define V4L2_STD_PAL_D ((v4l2_std_id)0x00000020)
-#define V4L2_STD_PAL_D1 ((v4l2_std_id)0x00000040)
-#define V4L2_STD_PAL_K ((v4l2_std_id)0x00000080)
-
-#define V4L2_STD_PAL_M ((v4l2_std_id)0x00000100)
-#define V4L2_STD_PAL_N ((v4l2_std_id)0x00000200)
-#define V4L2_STD_PAL_Nc ((v4l2_std_id)0x00000400)
-#define V4L2_STD_PAL_60 ((v4l2_std_id)0x00000800)
-</programlisting></para><para><constant>V4L2_STD_PAL_60</constant> is
-a hybrid standard with 525 lines, 60 Hz refresh rate, and PAL color
-modulation with a 4.43 MHz color subcarrier. Some PAL video recorders
-can play back NTSC tapes in this mode for display on a 50/60 Hz agnostic
-PAL TV.</para><para><programlisting>
-#define V4L2_STD_NTSC_M ((v4l2_std_id)0x00001000)
-#define V4L2_STD_NTSC_M_JP ((v4l2_std_id)0x00002000)
-#define V4L2_STD_NTSC_443 ((v4l2_std_id)0x00004000)
-</programlisting></para><para><constant>V4L2_STD_NTSC_443</constant>
-is a hybrid standard with 525 lines, 60 Hz refresh rate, and NTSC
-color modulation with a 4.43 MHz color
-subcarrier.</para><para><programlisting>
-#define V4L2_STD_NTSC_M_KR ((v4l2_std_id)0x00008000)
-
-#define V4L2_STD_SECAM_B ((v4l2_std_id)0x00010000)
-#define V4L2_STD_SECAM_D ((v4l2_std_id)0x00020000)
-#define V4L2_STD_SECAM_G ((v4l2_std_id)0x00040000)
-#define V4L2_STD_SECAM_H ((v4l2_std_id)0x00080000)
-#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000)
-#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000)
-#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000)
-#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000)
-
-/* ATSC/HDTV */
-#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
-#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000)
-</programlisting></para><para><!-- ATSC proposal by Mark McClelland,
-video4linux-list@redhat.com on 17 Oct 2002
---><constant>V4L2_STD_ATSC_8_VSB</constant> and
-<constant>V4L2_STD_ATSC_16_VSB</constant> are U.S. terrestrial digital
-TV standards. Presently the V4L2 API does not support digital TV. See
-also the Linux DVB API at <ulink
-url="https://linuxtv.org">https://linuxtv.org</ulink>.</para>
-<para><programlisting>
-#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\
- V4L2_STD_PAL_B1 |\
- V4L2_STD_PAL_G)
-#define V4L2_STD_B (V4L2_STD_PAL_B |\
- V4L2_STD_PAL_B1 |\
- V4L2_STD_SECAM_B)
-#define V4L2_STD_GH (V4L2_STD_PAL_G |\
- V4L2_STD_PAL_H |\
- V4L2_STD_SECAM_G |\
- V4L2_STD_SECAM_H)
-#define V4L2_STD_PAL_DK (V4L2_STD_PAL_D |\
- V4L2_STD_PAL_D1 |\
- V4L2_STD_PAL_K)
-#define V4L2_STD_PAL (V4L2_STD_PAL_BG |\
- V4L2_STD_PAL_DK |\
- V4L2_STD_PAL_H |\
- V4L2_STD_PAL_I)
-#define V4L2_STD_NTSC (V4L2_STD_NTSC_M |\
- V4L2_STD_NTSC_M_JP |\
- V4L2_STD_NTSC_M_KR)
-#define V4L2_STD_MN (V4L2_STD_PAL_M |\
- V4L2_STD_PAL_N |\
- V4L2_STD_PAL_Nc |\
- V4L2_STD_NTSC)
-#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
- V4L2_STD_SECAM_K |\
- V4L2_STD_SECAM_K1)
-#define V4L2_STD_DK (V4L2_STD_PAL_DK |\
- V4L2_STD_SECAM_DK)
-
-#define V4L2_STD_SECAM (V4L2_STD_SECAM_B |\
- V4L2_STD_SECAM_G |\
- V4L2_STD_SECAM_H |\
- V4L2_STD_SECAM_DK |\
- V4L2_STD_SECAM_L |\
- V4L2_STD_SECAM_LC)
-
-#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\
- V4L2_STD_PAL_60 |\
- V4L2_STD_NTSC |\
- V4L2_STD_NTSC_443)
-#define V4L2_STD_625_50 (V4L2_STD_PAL |\
- V4L2_STD_PAL_N |\
- V4L2_STD_PAL_Nc |\
- V4L2_STD_SECAM)
-
-#define V4L2_STD_UNKNOWN 0
-#define V4L2_STD_ALL (V4L2_STD_525_60 |\
- V4L2_STD_625_50)
-</programlisting></para>
-
- <table pgwide="1" id="video-standards" orient="land">
- <title>Video Standards (based on [<xref linkend="itu470" />])</title>
- <tgroup cols="12" colsep="1" rowsep="1" align="center">
- <colspec colname="c1" align="left" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <colspec colname="c5" />
- <colspec colnum="7" colname="c7" />
- <colspec colnum="9" colname="c9" />
- <colspec colnum="12" colname="c12" />
- <spanspec namest="c2" nameend="c3" spanname="m" align="center" />
- <spanspec namest="c4" nameend="c12" spanname="x" align="center" />
- <spanspec namest="c5" nameend="c7" spanname="b" align="center" />
- <spanspec namest="c9" nameend="c12" spanname="s" align="center" />
- <thead>
- <row>
- <entry>Characteristics</entry>
- <entry><para>M/NTSC<footnote><para>Japan uses a standard
-similar to M/NTSC
-(V4L2_STD_NTSC_M_JP).</para></footnote></para></entry>
- <entry>M/PAL</entry>
- <entry><para>N/PAL<footnote><para> The values in
-brackets apply to the combination N/PAL a.k.a.
-N<subscript>C</subscript> used in Argentina
-(V4L2_STD_PAL_Nc).</para></footnote></para></entry>
- <entry align="center">B, B1, G/PAL</entry>
- <entry align="center">D, D1, K/PAL</entry>
- <entry align="center">H/PAL</entry>
- <entry align="center">I/PAL</entry>
- <entry align="center">B, G/SECAM</entry>
- <entry align="center">D, K/SECAM</entry>
- <entry align="center">K1/SECAM</entry>
- <entry align="center">L/SECAM</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry>Frame lines</entry>
- <entry spanname="m">525</entry>
- <entry spanname="x">625</entry>
- </row>
- <row>
- <entry>Frame period (s)</entry>
- <entry spanname="m">1001/30000</entry>
- <entry spanname="x">1/25</entry>
- </row>
- <row>
- <entry>Chrominance sub-carrier frequency (Hz)</entry>
- <entry>3579545 &plusmn;&nbsp;10</entry>
- <entry>3579611.49 &plusmn;&nbsp;10</entry>
- <entry>4433618.75 &plusmn;&nbsp;5 (3582056.25
-&plusmn;&nbsp;5)</entry>
- <entry spanname="b">4433618.75 &plusmn;&nbsp;5</entry>
- <entry>4433618.75 &plusmn;&nbsp;1</entry>
- <entry spanname="s">f<subscript>OR</subscript>&nbsp;=
-4406250 &plusmn;&nbsp;2000, f<subscript>OB</subscript>&nbsp;= 4250000
-&plusmn;&nbsp;2000</entry>
- </row>
- <row>
- <entry>Nominal radio-frequency channel bandwidth
-(MHz)</entry>
- <entry>6</entry>
- <entry>6</entry>
- <entry>6</entry>
- <entry>B: 7; B1, G: 8</entry>
- <entry>8</entry>
- <entry>8</entry>
- <entry>8</entry>
- <entry>8</entry>
- <entry>8</entry>
- <entry>8</entry>
- <entry>8</entry>
- </row>
- <row>
- <entry>Sound carrier relative to vision carrier
-(MHz)</entry>
- <entry>+&nbsp;4.5</entry>
- <entry>+&nbsp;4.5</entry>
- <entry>+&nbsp;4.5</entry>
- <entry><para>+&nbsp;5.5 &plusmn;&nbsp;0.001
-<footnote><para>In the Federal Republic of Germany, Austria, Italy,
-the Netherlands, Slovakia and Switzerland a system of two sound
-carriers is used, the frequency of the second carrier being
-242.1875&nbsp;kHz above the frequency of the first sound carrier. For
-stereophonic sound transmissions a similar system is used in
-Australia.</para></footnote> <footnote><para>New Zealand uses a sound
-carrier displaced 5.4996 &plusmn;&nbsp;0.0005 MHz from the vision
-carrier.</para></footnote> <footnote><para>In Denmark, Finland, New
-Zealand, Sweden and Spain a system of two sound carriers is used. In
-Iceland, Norway and Poland the same system is being introduced. The
-second carrier is 5.85&nbsp;MHz above the vision carrier and is DQPSK
-modulated with 728&nbsp;kbit/s sound and data multiplex. (NICAM
-system)</para></footnote> <footnote><para>In the United Kingdom, a
-system of two sound carriers is used. The second sound carrier is
-6.552&nbsp;MHz above the vision carrier and is DQPSK modulated with a
-728&nbsp;kbit/s sound and data multiplex able to carry two sound
-channels. (NICAM system)</para></footnote></para></entry>
- <entry>+&nbsp;6.5 &plusmn;&nbsp;0.001</entry>
- <entry>+&nbsp;5.5</entry>
- <entry>+&nbsp;5.9996 &plusmn;&nbsp;0.0005</entry>
- <entry>+&nbsp;5.5 &plusmn;&nbsp;0.001</entry>
- <entry>+&nbsp;6.5 &plusmn;&nbsp;0.001</entry>
- <entry>+&nbsp;6.5</entry>
- <entry><para>+&nbsp;6.5 <footnote><para>In France, a
-digital carrier 5.85 MHz away from the vision carrier may be used in
-addition to the main sound carrier. It is modulated in differentially
-encoded QPSK with a 728 kbit/s sound and data multiplexer capable of
-carrying two sound channels. (NICAM
-system)</para></footnote></para></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-standard; <structfield>index</structfield>
-is out of bounds.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Standard video timings are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
deleted file mode 100644
index a6558a676ef3..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ /dev/null
@@ -1,205 +0,0 @@
-<refentry id="vidioc-expbuf">
-
- <refmeta>
- <refentrytitle>ioctl VIDIOC_EXPBUF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_EXPBUF</refname>
- <refpurpose>Export a buffer as a DMABUF file descriptor.</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_exportbuffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_EXPBUF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>This ioctl is an extension to the <link linkend="mmap">memory
-mapping</link> I/O method, therefore it is available only for
-<constant>V4L2_MEMORY_MMAP</constant> buffers. It can be used to export a
-buffer as a DMABUF file at any time after buffers have been allocated with the
-&VIDIOC-REQBUFS; ioctl.</para>
-
-<para> To export a buffer, applications fill &v4l2-exportbuffer;. The
-<structfield>type</structfield> field is set to the same buffer type as was
-previously used with &v4l2-requestbuffers; <structfield>type</structfield>.
-Applications must also set the <structfield>index</structfield> field. Valid
-index numbers range from zero to the number of buffers allocated with
-&VIDIOC-REQBUFS; (&v4l2-requestbuffers; <structfield>count</structfield>)
-minus one. For the multi-planar API, applications set the <structfield>plane</structfield>
-field to the index of the plane to be exported. Valid planes
-range from zero to the maximal number of valid planes for the currently active
-format. For the single-planar API, applications must set <structfield>plane</structfield>
-to zero. Additional flags may be posted in the <structfield>flags</structfield>
-field. Refer to a manual for open() for details.
-Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
-other fields must be set to zero.
-In the case of multi-planar API, every plane is exported separately using
-multiple <constant>VIDIOC_EXPBUF</constant> calls.</para>
-
-<para>After calling <constant>VIDIOC_EXPBUF</constant> the <structfield>fd</structfield>
-field will be set by a driver. This is a DMABUF file
-descriptor. The application may pass it to other DMABUF-aware devices. Refer to
-<link linkend="dmabuf">DMABUF importing</link> for details about importing
-DMABUF files into V4L2 nodes. It is recommended to close a DMABUF file when it
-is no longer used to allow the associated memory to be reclaimed.</para>
- </refsect1>
-
- <refsect1>
- <title>Examples</title>
-
- <example>
- <title>Exporting a buffer.</title>
- <programlisting>
-int buffer_export(int v4lfd, &v4l2-buf-type; bt, int index, int *dmafd)
-{
- &v4l2-exportbuffer; expbuf;
-
- memset(&amp;expbuf, 0, sizeof(expbuf));
- expbuf.type = bt;
- expbuf.index = index;
- if (ioctl(v4lfd, &VIDIOC-EXPBUF;, &amp;expbuf) == -1) {
- perror("VIDIOC_EXPBUF");
- return -1;
- }
-
- *dmafd = expbuf.fd;
-
- return 0;
-}
- </programlisting>
- </example>
-
- <example>
- <title>Exporting a buffer using the multi-planar API.</title>
- <programlisting>
-int buffer_export_mp(int v4lfd, &v4l2-buf-type; bt, int index,
- int dmafd[], int n_planes)
-{
- int i;
-
- for (i = 0; i &lt; n_planes; ++i) {
- &v4l2-exportbuffer; expbuf;
-
- memset(&amp;expbuf, 0, sizeof(expbuf));
- expbuf.type = bt;
- expbuf.index = index;
- expbuf.plane = i;
- if (ioctl(v4lfd, &VIDIOC-EXPBUF;, &amp;expbuf) == -1) {
- perror("VIDIOC_EXPBUF");
- while (i)
- close(dmafd[--i]);
- return -1;
- }
- dmafd[i] = expbuf.fd;
- }
-
- return 0;
-}
- </programlisting>
- </example>
-
- <table pgwide="1" frame="none" id="v4l2-exportbuffer">
- <title>struct <structname>v4l2_exportbuffer</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the buffer, same as &v4l2-format;
-<structfield>type</structfield> or &v4l2-requestbuffers;
-<structfield>type</structfield>, set by the application. See <xref
-linkend="v4l2-buf-type" /></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the buffer, set by the application. This field is
-only used for <link linkend="mmap">memory mapping</link> I/O and can range from
-zero to the number of buffers allocated with the &VIDIOC-REQBUFS; and/or
-&VIDIOC-CREATE-BUFS; ioctls. </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>plane</structfield></entry>
- <entry>Index of the plane to be exported when using the
-multi-planar API. Otherwise this value must be set to zero. </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags for the newly created file, currently only
-<constant>O_CLOEXEC</constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY</constant>,
-and <constant>O_RDWR</constant> are supported, refer to the manual
-of open() for more details.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>fd</structfield></entry>
- <entry>The DMABUF file descriptor associated with a buffer. Set by
- the driver.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[11]</structfield></entry>
- <entry>Reserved field for future use. Drivers and applications must
-set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>A queue is not in MMAP mode or DMABUF exporting is not
-supported or <structfield>flags</structfield> or <structfield>type</structfield>
-or <structfield>index</structfield> or <structfield>plane</structfield> fields
-are invalid.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-audio.xml b/Documentation/DocBook/media/v4l/vidioc-g-audio.xml
deleted file mode 100644
index d7bb9b3738f6..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-audio.xml
+++ /dev/null
@@ -1,172 +0,0 @@
-<refentry id="vidioc-g-audio">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_AUDIO, VIDIOC_S_AUDIO</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_AUDIO</refname>
- <refname>VIDIOC_S_AUDIO</refname>
- <refpurpose>Query or select the current audio input and its
-attributes</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_audio *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_audio *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_AUDIO, VIDIOC_S_AUDIO</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the current audio input applications zero out the
-<structfield>reserved</structfield> array of a &v4l2-audio;
-and call the <constant>VIDIOC_G_AUDIO</constant> ioctl with a pointer
-to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the device has no audio inputs, or none which combine
-with the current video input.</para>
-
- <para>Audio inputs have one writable property, the audio mode. To
-select the current audio input <emphasis>and</emphasis> change the
-audio mode, applications initialize the
-<structfield>index</structfield> and <structfield>mode</structfield>
-fields, and the
-<structfield>reserved</structfield> array of a
-<structname>v4l2_audio</structname> structure and call the
-<constant>VIDIOC_S_AUDIO</constant> ioctl. Drivers may switch to a
-different audio mode if the request cannot be satisfied. However, this
-is a write-only ioctl, it does not return the actual new audio
-mode.</para>
-
- <table pgwide="1" frame="none" id="v4l2-audio">
- <title>struct <structname>v4l2_audio</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the audio input, set by the
-driver or application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the audio input, a NUL-terminated ASCII
-string, for example: "Line In". This information is intended for the
-user, preferably the connector label on the device itself.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry>Audio capability flags, see <xref
- linkend="audio-capability" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>mode</structfield></entry>
- <entry>Audio mode flags set by drivers and applications (on
- <constant>VIDIOC_S_AUDIO</constant> ioctl), see <xref linkend="audio-mode" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="audio-capability">
- <title>Audio Capability Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_AUDCAP_STEREO</constant></entry>
- <entry>0x00001</entry>
- <entry>This is a stereo input. The flag is intended to
-automatically disable stereo recording etc. when the signal is always
-monaural. The API provides no means to detect if stereo is
-<emphasis>received</emphasis>, unless the audio input belongs to a
-tuner.</entry>
- </row>
- <row>
- <entry><constant>V4L2_AUDCAP_AVL</constant></entry>
- <entry>0x00002</entry>
- <entry>Automatic Volume Level mode is supported.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="audio-mode">
- <title>Audio Mode Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_AUDMODE_AVL</constant></entry>
- <entry>0x00001</entry>
- <entry>AVL mode is on.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>No audio inputs combine with the current video input,
-or the number of the selected audio input is out of bounds or it does
-not combine.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-audioout.xml b/Documentation/DocBook/media/v4l/vidioc-g-audioout.xml
deleted file mode 100644
index 200a2704a970..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-audioout.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<refentry id="vidioc-g-audioout">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_AUDOUT, VIDIOC_S_AUDOUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_AUDOUT</refname>
- <refname>VIDIOC_S_AUDOUT</refname>
- <refpurpose>Query or select the current audio output</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_audioout *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_audioout *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_AUDOUT, VIDIOC_S_AUDOUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the current audio output applications zero out the
-<structfield>reserved</structfield> array of a &v4l2-audioout; and
-call the <constant>VIDIOC_G_AUDOUT</constant> ioctl with a pointer
-to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the device has no audio inputs, or none which combine
-with the current video output.</para>
-
- <para>Audio outputs have no writable properties. Nevertheless, to
-select the current audio output applications can initialize the
-<structfield>index</structfield> field and
-<structfield>reserved</structfield> array (which in the future may
-contain writable properties) of a
-<structname>v4l2_audioout</structname> structure and call the
-<constant>VIDIOC_S_AUDOUT</constant> ioctl. Drivers switch to the
-requested output or return the &EINVAL; when the index is out of
-bounds. This is a write-only ioctl, it does not return the current
-audio output attributes as <constant>VIDIOC_G_AUDOUT</constant>
-does.</para>
-
- <para>Note connectors on a TV card to loop back the received audio
-signal to a sound card are not audio outputs in this sense.</para>
-
- <table pgwide="1" frame="none" id="v4l2-audioout">
- <title>struct <structname>v4l2_audioout</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the audio output, set by the
-driver or application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the audio output, a NUL-terminated ASCII
-string, for example: "Line Out". This information is intended for the
-user, preferably the connector label on the device itself.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry>Audio capability flags, none defined yet. Drivers
-must set this field to zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>mode</structfield></entry>
- <entry>Audio mode, none defined yet. Drivers and
-applications (on <constant>VIDIOC_S_AUDOUT</constant>) must set this
-field to zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>No audio outputs combine with the current video
-output, or the number of the selected audio output is out of bounds or
-it does not combine.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml b/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
deleted file mode 100644
index e6c4efb9e8b4..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-crop.xml
+++ /dev/null
@@ -1,129 +0,0 @@
-<refentry id="vidioc-g-crop">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_CROP, VIDIOC_S_CROP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_CROP</refname>
- <refname>VIDIOC_S_CROP</refname>
- <refpurpose>Get or set the current cropping rectangle</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_crop *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_crop *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_CROP, VIDIOC_S_CROP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the cropping rectangle size and position
-applications set the <structfield>type</structfield> field of a
-<structname>v4l2_crop</structname> structure to the respective buffer
-(stream) type and call the <constant>VIDIOC_G_CROP</constant> ioctl
-with a pointer to this structure. The driver fills the rest of the
-structure or returns the &EINVAL; if cropping is not supported.</para>
-
- <para>To change the cropping rectangle applications initialize the
-<structfield>type</structfield> and &v4l2-rect; substructure named
-<structfield>c</structfield> of a v4l2_crop structure and call the
-<constant>VIDIOC_S_CROP</constant> ioctl with a pointer to this
-structure.</para>
-
-<para>Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
-instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>
-and use <constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>.</para>
-
- <para>The driver first adjusts the requested dimensions against
-hardware limits, &ie; the bounds given by the capture/output window,
-and it rounds to the closest possible values of horizontal and
-vertical offset, width and height. In particular the driver must round
-the vertical offset of the cropping rectangle to frame lines modulo
-two, such that the field order cannot be confused.</para>
-
- <para>Second the driver adjusts the image size (the opposite
-rectangle of the scaling process, source or target depending on the
-data direction) to the closest size possible while maintaining the
-current horizontal and vertical scaling factor.</para>
-
- <para>Finally the driver programs the hardware with the actual
-cropping and image parameters. <constant>VIDIOC_S_CROP</constant> is a
-write-only ioctl, it does not return the actual parameters. To query
-them applications must call <constant>VIDIOC_G_CROP</constant> and
-&VIDIOC-G-FMT;. When the parameters are unsuitable the application may
-modify the cropping or image parameters and repeat the cycle until
-satisfactory parameters have been negotiated.</para>
-
- <para>When cropping is not supported then no parameters are
-changed and <constant>VIDIOC_S_CROP</constant> returns the
-&EINVAL;.</para>
-
- <table pgwide="1" frame="none" id="v4l2-crop">
- <title>struct <structname>v4l2_crop</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the data stream, set by the application.
-Only these types are valid here: <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> and
-<constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>. See <xref linkend="v4l2-buf-type" />.</entry>
- </row>
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>c</structfield></entry>
- <entry>Cropping rectangle. The same co-ordinate system as
-for &v4l2-cropcap; <structfield>bounds</structfield> is used.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml b/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml
deleted file mode 100644
index ee2820d6ca66..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-ctrl.xml
+++ /dev/null
@@ -1,133 +0,0 @@
-<refentry id="vidioc-g-ctrl">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_CTRL, VIDIOC_S_CTRL</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_CTRL</refname>
- <refname>VIDIOC_S_CTRL</refname>
- <refpurpose>Get or set the value of a control</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_control
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_CTRL, VIDIOC_S_CTRL</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To get the current value of a control applications
-initialize the <structfield>id</structfield> field of a struct
-<structname>v4l2_control</structname> and call the
-<constant>VIDIOC_G_CTRL</constant> ioctl with a pointer to this
-structure. To change the value of a control applications initialize
-the <structfield>id</structfield> and <structfield>value</structfield>
-fields of a struct <structname>v4l2_control</structname> and call the
-<constant>VIDIOC_S_CTRL</constant> ioctl.</para>
-
- <para>When the <structfield>id</structfield> is invalid drivers
-return an &EINVAL;. When the <structfield>value</structfield> is out
-of bounds drivers can choose to take the closest valid value or return
-an &ERANGE;, whatever seems more appropriate. However,
-<constant>VIDIOC_S_CTRL</constant> is a write-only ioctl, it does not
-return the actual new value. If the <structfield>value</structfield>
-is inappropriate for the control (e.g. if it refers to an unsupported
-menu index of a menu control), then &EINVAL; is returned as well.</para>
-
- <para>These ioctls work only with user controls. For other
-control classes the &VIDIOC-G-EXT-CTRLS;, &VIDIOC-S-EXT-CTRLS; or
-&VIDIOC-TRY-EXT-CTRLS; must be used.</para>
-
- <table pgwide="1" frame="none" id="v4l2-control">
- <title>struct <structname>v4l2_control</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry>Identifies the control, set by the
-application.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>value</structfield></entry>
- <entry>New value or current value.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-control; <structfield>id</structfield> is
-invalid or the <structfield>value</structfield> is inappropriate for
-the given control (i.e. if a menu item is selected that is not supported
-by the driver according to &VIDIOC-QUERYMENU;).</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ERANGE</errorcode></term>
- <listitem>
- <para>The &v4l2-control; <structfield>value</structfield>
-is out of bounds.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The control is temporarily not changeable, possibly
-because another applications took over control of the device function
-this control belongs to.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>Attempt to set a read-only control or to get a
- write-only control.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
deleted file mode 100644
index 06952d7cc770..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
+++ /dev/null
@@ -1,343 +0,0 @@
-<refentry id="vidioc-g-dv-timings">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_DV_TIMINGS</refname>
- <refname>VIDIOC_S_DV_TIMINGS</refname>
- <refname>VIDIOC_SUBDEV_G_DV_TIMINGS</refname>
- <refname>VIDIOC_SUBDEV_S_DV_TIMINGS</refname>
- <refpurpose>Get or set DV timings for input or output</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_dv_timings *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS, VIDIOC_SUBDEV_G_DV_TIMINGS, VIDIOC_SUBDEV_S_DV_TIMINGS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>To set DV timings for the input or output, applications use the
-<constant>VIDIOC_S_DV_TIMINGS</constant> ioctl and to get the current timings,
-applications use the <constant>VIDIOC_G_DV_TIMINGS</constant> ioctl. The detailed timing
-information is filled in using the structure &v4l2-dv-timings;. These ioctls take
-a pointer to the &v4l2-dv-timings; structure as argument. If the ioctl is not supported
-or the timing values are not correct, the driver returns &EINVAL;.</para>
-<para>The <filename>linux/v4l2-dv-timings.h</filename> header can be used to get the
-timings of the formats in the <xref linkend="cea861" /> and <xref linkend="vesadmt" />
-standards. If the current input or output does not support DV timings (e.g. if
-&VIDIOC-ENUMINPUT; does not set the <constant>V4L2_IN_CAP_DV_TIMINGS</constant> flag), then
-&ENODATA; is returned.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>This ioctl is not supported, or the
-<constant>VIDIOC_S_DV_TIMINGS</constant> parameter was unsuitable.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Digital video timings are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The device is busy and therefore can not change the timings.</para>
- </listitem>
- </varlistentry>
- </variablelist>
-
- <table pgwide="1" frame="none" id="v4l2-bt-timings">
- <title>struct <structname>v4l2_bt_timings</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Width of the active video in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Height of the active video frame in lines. So for interlaced formats the
- height of the active video in each field is <structfield>height</structfield>/2.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>interlaced</structfield></entry>
- <entry>Progressive (0) or interlaced (1)</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>polarities</structfield></entry>
- <entry>This is a bit mask that defines polarities of sync signals.
-bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_HSYNC_POS_POL) is for horizontal sync polarity. If the bit is set
-(1) it is positive polarity and if is cleared (0), it is negative polarity.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>pixelclock</structfield></entry>
- <entry>Pixel clock in Hz. Ex. 74.25MHz->74250000</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>hfrontporch</structfield></entry>
- <entry>Horizontal front porch in pixels</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>hsync</structfield></entry>
- <entry>Horizontal sync length in pixels</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>hbackporch</structfield></entry>
- <entry>Horizontal back porch in pixels</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>vfrontporch</structfield></entry>
- <entry>Vertical front porch in lines. For interlaced formats this refers to the
- odd field (aka field 1).</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>vsync</structfield></entry>
- <entry>Vertical sync length in lines. For interlaced formats this refers to the
- odd field (aka field 1).</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>vbackporch</structfield></entry>
- <entry>Vertical back porch in lines. For interlaced formats this refers to the
- odd field (aka field 1).</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>il_vfrontporch</structfield></entry>
- <entry>Vertical front porch in lines for the even field (aka field 2) of
- interlaced field formats. Must be 0 for progressive formats.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>il_vsync</structfield></entry>
- <entry>Vertical sync length in lines for the even field (aka field 2) of
- interlaced field formats. Must be 0 for progressive formats.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>il_vbackporch</structfield></entry>
- <entry>Vertical back porch in lines for the even field (aka field 2) of
- interlaced field formats. Must be 0 for progressive formats.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>standards</structfield></entry>
- <entry>The video standard(s) this format belongs to. This will be filled in by
- the driver. Applications must set this to 0. See <xref linkend="dv-bt-standards"/>
- for a list of standards.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Several flags giving more information about the format.
- See <xref linkend="dv-bt-flags"/> for a description of the flags.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-dv-timings">
- <title>struct <structname>v4l2_dv_timings</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>Type of DV timings as listed in <xref linkend="dv-timing-types"/>.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield></structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-bt-timings;</entry>
- <entry><structfield>bt</structfield></entry>
- <entry>Timings defined by BT.656/1120 specifications</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[32]</entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="dv-timing-types">
- <title>DV Timing types</title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>Timing type</entry>
- <entry>value</entry>
- <entry>Description</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_656_1120</entry>
- <entry>0</entry>
- <entry>BT.656/1120 timings</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table pgwide="1" frame="none" id="dv-bt-standards">
- <title>DV BT Timing standards</title>
- <tgroup cols="2">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>Timing standard</entry>
- <entry>Description</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_STD_CEA861</entry>
- <entry>The timings follow the CEA-861 Digital TV Profile standard</entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_STD_DMT</entry>
- <entry>The timings follow the VESA Discrete Monitor Timings standard</entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_STD_CVT</entry>
- <entry>The timings follow the VESA Coordinated Video Timings standard</entry>
- </row>
- <row>
- <entry>V4L2_DV_BT_STD_GTF</entry>
- <entry>The timings follow the VESA Generalized Timings Formula standard</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- <table pgwide="1" frame="none" id="dv-bt-flags">
- <title>DV BT Timing flags</title>
- <tgroup cols="2">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>Flag</entry>
- <entry>Description</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry>V4L2_DV_FL_REDUCED_BLANKING</entry>
- <entry>CVT/GTF specific: the timings use reduced blanking (CVT) or the 'Secondary
-GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
-intervals are reduced, allowing a higher resolution over the same
-bandwidth. This is a read-only flag, applications must not set this.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_FL_CAN_REDUCE_FPS</entry>
- <entry>CEA-861 specific: set for CEA-861 formats with a framerate that is a multiple
-of six. These formats can be optionally played at 1 / 1.001 speed to
-be compatible with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
-29.97 frames per second. If the transmitter can't generate such frequencies, then the
-flag will also be cleared. This is a read-only flag, applications must not set this.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_FL_REDUCED_FPS</entry>
- <entry>CEA-861 specific: only valid for video transmitters, the flag is cleared
-by receivers. It is also only valid for formats with the V4L2_DV_FL_CAN_REDUCE_FPS flag
-set, for other formats the flag will be cleared by the driver.
-
-If the application sets this flag, then the pixelclock used to set up the transmitter is
-divided by 1.001 to make it compatible with NTSC framerates. If the transmitter
-can't generate such frequencies, then the flag will also be cleared.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_FL_HALF_LINE</entry>
- <entry>Specific to interlaced formats: if set, then the vertical frontporch
-of field 1 (aka the odd field) is really one half-line longer and the vertical backporch
-of field 2 (aka the even field) is really one half-line shorter, so each field has exactly
-the same number of half-lines. Whether half-lines can be detected or used depends on
-the hardware.
- </entry>
- </row>
- <row>
- <entry>V4L2_DV_FL_IS_CE_VIDEO</entry>
- <entry>If set, then this is a Consumer Electronics (CE) video format.
-Such formats differ from other formats (commonly called IT formats) in that if
-R'G'B' encoding is used then by default the R'G'B' values use limited range
-(i.e. 16-235) as opposed to full range (i.e. 0-255). All formats defined in CEA-861
-except for the 640x480p59.94 format are CE formats.
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml b/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
deleted file mode 100644
index b7602d30f596..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-edid.xml
+++ /dev/null
@@ -1,173 +0,0 @@
-<refentry id="vidioc-g-edid">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_EDID, VIDIOC_S_EDID, VIDIOC_SUBDEV_G_EDID, VIDIOC_SUBDEV_S_EDID</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_EDID</refname>
- <refname>VIDIOC_S_EDID</refname>
- <refname>VIDIOC_SUBDEV_G_EDID</refname>
- <refname>VIDIOC_SUBDEV_S_EDID</refname>
- <refpurpose>Get or set the EDID of a video receiver/transmitter</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_edid *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_edid *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_EDID, VIDIOC_S_EDID, VIDIOC_SUBDEV_G_EDID, VIDIOC_SUBDEV_S_EDID</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
- <para>These ioctls can be used to get or set an EDID associated with an input
- from a receiver or an output of a transmitter device. They can be
- used with subdevice nodes (/dev/v4l-subdevX) or with video nodes (/dev/videoX).</para>
-
- <para>When used with video nodes the <structfield>pad</structfield> field represents the
- input (for video capture devices) or output (for video output devices) index as
- is returned by &VIDIOC-ENUMINPUT; and &VIDIOC-ENUMOUTPUT; respectively. When used
- with subdevice nodes the <structfield>pad</structfield> field represents the
- input or output pad of the subdevice. If there is no EDID support for the given
- <structfield>pad</structfield> value, then the &EINVAL; will be returned.</para>
-
- <para>To get the EDID data the application has to fill in the <structfield>pad</structfield>,
- <structfield>start_block</structfield>, <structfield>blocks</structfield> and <structfield>edid</structfield>
- fields, zero the <structfield>reserved</structfield> array and call
- <constant>VIDIOC_G_EDID</constant>. The current EDID from block
- <structfield>start_block</structfield> and of size <structfield>blocks</structfield>
- will be placed in the memory <structfield>edid</structfield> points to. The <structfield>edid</structfield>
- pointer must point to memory at least <structfield>blocks</structfield>&nbsp;*&nbsp;128 bytes
- large (the size of one block is 128 bytes).</para>
-
- <para>If there are fewer blocks than specified, then the driver will set <structfield>blocks</structfield>
- to the actual number of blocks. If there are no EDID blocks available at all, then the error code
- ENODATA is set.</para>
-
- <para>If blocks have to be retrieved from the sink, then this call will block until they
- have been read.</para>
-
- <para>If <structfield>start_block</structfield> and <structfield>blocks</structfield> are
- both set to 0 when <constant>VIDIOC_G_EDID</constant> is called, then the driver will
- set <structfield>blocks</structfield> to the total number of available EDID blocks
- and it will return 0 without copying any data. This is an easy way to discover how many
- EDID blocks there are. Note that if there are no EDID blocks available at all, then
- the driver will set <structfield>blocks</structfield> to 0 and it returns 0.</para>
-
- <para>To set the EDID blocks of a receiver the application has to fill in the <structfield>pad</structfield>,
- <structfield>blocks</structfield> and <structfield>edid</structfield> fields, set
- <structfield>start_block</structfield> to 0 and zero the <structfield>reserved</structfield> array.
- It is not possible to set part of an EDID,
- it is always all or nothing. Setting the EDID data is only valid for receivers as it makes
- no sense for a transmitter.</para>
-
- <para>The driver assumes that the full EDID is passed in. If there are more EDID blocks than
- the hardware can handle then the EDID is not written, but instead the error code E2BIG is set
- and <structfield>blocks</structfield> is set to the maximum that the hardware supports.
- If <structfield>start_block</structfield> is any
- value other than 0 then the error code EINVAL is set.</para>
-
- <para>To disable an EDID you set <structfield>blocks</structfield> to 0. Depending on the
- hardware this will drive the hotplug pin low and/or block the source from reading the EDID
- data in some way. In any case, the end result is the same: the EDID is no longer available.
- </para>
-
- <table pgwide="1" frame="none" id="v4l2-edid">
- <title>struct <structname>v4l2_edid</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad for which to get/set the EDID blocks. When used with a video device
- node the pad represents the input or output index as returned by
- &VIDIOC-ENUMINPUT; and &VIDIOC-ENUMOUTPUT; respectively.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>start_block</structfield></entry>
- <entry>Read the EDID from starting with this block. Must be 0 when setting
- the EDID.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>blocks</structfield></entry>
- <entry>The number of blocks to get or set. Must be less or equal to 256 (the
- maximum number of blocks as defined by the standard). When you set the EDID and
- <structfield>blocks</structfield> is 0, then the EDID is disabled or erased.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[5]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- <row>
- <entry>__u8&nbsp;*</entry>
- <entry><structfield>edid</structfield></entry>
- <entry>Pointer to memory that contains the EDID. The minimum size is
- <structfield>blocks</structfield>&nbsp;*&nbsp;128.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>The EDID data is not available.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>E2BIG</errorcode></term>
- <listitem>
- <para>The EDID data you provided is more than the hardware can handle.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-enc-index.xml b/Documentation/DocBook/media/v4l/vidioc-g-enc-index.xml
deleted file mode 100644
index be25029a16f1..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-enc-index.xml
+++ /dev/null
@@ -1,189 +0,0 @@
-<refentry id="vidioc-g-enc-index">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_ENC_INDEX</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_ENC_INDEX</refname>
- <refpurpose>Get meta data about a compressed video stream</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_enc_idx *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_ENC_INDEX</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The <constant>VIDIOC_G_ENC_INDEX</constant> ioctl provides
-meta data about a compressed video stream the same or another
-application currently reads from the driver, which is useful for
-random access into the stream without decoding it.</para>
-
- <para>To read the data applications must call
-<constant>VIDIOC_G_ENC_INDEX</constant> with a pointer to a
-&v4l2-enc-idx;. On success the driver fills the
-<structfield>entry</structfield> array, stores the number of elements
-written in the <structfield>entries</structfield> field, and
-initializes the <structfield>entries_cap</structfield> field.</para>
-
- <para>Each element of the <structfield>entry</structfield> array
-contains meta data about one picture. A
-<constant>VIDIOC_G_ENC_INDEX</constant> call reads up to
-<constant>V4L2_ENC_IDX_ENTRIES</constant> entries from a driver
-buffer, which can hold up to <structfield>entries_cap</structfield>
-entries. This number can be lower or higher than
-<constant>V4L2_ENC_IDX_ENTRIES</constant>, but not zero. When the
-application fails to read the meta data in time the oldest entries
-will be lost. When the buffer is empty or no capturing/encoding is in
-progress, <structfield>entries</structfield> will be zero.</para>
-
- <para>Currently this ioctl is only defined for MPEG-2 program
-streams and video elementary streams.</para>
-
- <table pgwide="1" frame="none" id="v4l2-enc-idx">
- <title>struct <structname>v4l2_enc_idx</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>entries</structfield></entry>
- <entry>The number of entries the driver stored in the
-<structfield>entry</structfield> array.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>entries_cap</structfield></entry>
- <entry>The number of entries the driver can
-buffer. Must be greater than zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry spanname="hspan">Reserved for future extensions.
-Drivers must set the array to zero.</entry>
- </row>
- <row>
- <entry>&v4l2-enc-idx-entry;</entry>
- <entry><structfield>entry</structfield>[<constant>V4L2_ENC_IDX_ENTRIES</constant>]</entry>
- <entry>Meta data about a compressed video stream. Each
-element of the array corresponds to one picture, sorted in ascending
-order by their <structfield>offset</structfield>.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-enc-idx-entry">
- <title>struct <structname>v4l2_enc_idx_entry</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u64</entry>
- <entry><structfield>offset</structfield></entry>
- <entry>The offset in bytes from the beginning of the
-compressed video stream to the beginning of this picture, that is a
-<wordasword>PES packet header</wordasword> as defined in <xref
- linkend="mpeg2part1" /> or a <wordasword>picture
-header</wordasword> as defined in <xref linkend="mpeg2part2" />. When
-the encoder is stopped, the driver resets the offset to zero.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>pts</structfield></entry>
- <entry>The 33 bit <wordasword>Presentation Time
-Stamp</wordasword> of this picture as defined in <xref
- linkend="mpeg2part1" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>length</structfield></entry>
- <entry>The length of this picture in bytes.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags containing the coding type of this picture, see <xref
- linkend="enc-idx-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions.
-Drivers must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="enc-idx-flags">
- <title>Index Entry Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_ENC_IDX_FRAME_I</constant></entry>
- <entry>0x00</entry>
- <entry>This is an Intra-coded picture.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_IDX_FRAME_P</constant></entry>
- <entry>0x01</entry>
- <entry>This is a Predictive-coded picture.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_IDX_FRAME_B</constant></entry>
- <entry>0x02</entry>
- <entry>This is a Bidirectionally predictive-coded
-picture.</entry>
- </row>
- <row>
- <entry><constant>V4L2_ENC_IDX_FRAME_MASK</constant></entry>
- <entry>0x0F</entry>
- <entry><wordasword>AND</wordasword> the flags field with
-this mask to obtain the picture coding type.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
deleted file mode 100644
index eb82f7e7d06b..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ /dev/null
@@ -1,456 +0,0 @@
-<refentry id="vidioc-g-ext-ctrls">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_EXT_CTRLS, VIDIOC_S_EXT_CTRLS,
-VIDIOC_TRY_EXT_CTRLS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_EXT_CTRLS</refname>
- <refname>VIDIOC_S_EXT_CTRLS</refname>
- <refname>VIDIOC_TRY_EXT_CTRLS</refname>
- <refpurpose>Get or set the value of several controls, try control
-values</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_ext_controls
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_EXT_CTRLS, VIDIOC_S_EXT_CTRLS,
-VIDIOC_TRY_EXT_CTRLS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls allow the caller to get or set multiple
-controls atomically. Control IDs are grouped into control classes (see
-<xref linkend="ctrl-class" />) and all controls in the control array
-must belong to the same control class.</para>
-
- <para>Applications must always fill in the
-<structfield>count</structfield>,
-<structfield>which</structfield>,
-<structfield>controls</structfield> and
-<structfield>reserved</structfield> fields of &v4l2-ext-controls;, and
-initialize the &v4l2-ext-control; array pointed to by the
-<structfield>controls</structfield> fields.</para>
-
- <para>To get the current value of a set of controls applications
-initialize the <structfield>id</structfield>,
-<structfield>size</structfield> and <structfield>reserved2</structfield> fields
-of each &v4l2-ext-control; and call the
-<constant>VIDIOC_G_EXT_CTRLS</constant> ioctl. String controls controls
-must also set the <structfield>string</structfield> field. Controls
-of compound types (<constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant> is set)
-must set the <structfield>ptr</structfield> field.</para>
-
- <para>If the <structfield>size</structfield> is too small to
-receive the control result (only relevant for pointer-type controls
-like strings), then the driver will set <structfield>size</structfield>
-to a valid value and return an &ENOSPC;. You should re-allocate the
-memory to this new size and try again. For the string type it is possible that
-the same issue occurs again if the string has grown in the meantime. It is
-recommended to call &VIDIOC-QUERYCTRL; first and use
-<structfield>maximum</structfield>+1 as the new <structfield>size</structfield>
-value. It is guaranteed that that is sufficient memory.
-</para>
-
- <para>N-dimensional arrays are set and retrieved row-by-row. You cannot set a partial
-array, all elements have to be set or retrieved. The total size is calculated
-as <structfield>elems</structfield> * <structfield>elem_size</structfield>.
-These values can be obtained by calling &VIDIOC-QUERY-EXT-CTRL;.</para>
-
- <para>To change the value of a set of controls applications
-initialize the <structfield>id</structfield>, <structfield>size</structfield>,
-<structfield>reserved2</structfield> and
-<structfield>value/value64/string/ptr</structfield> fields of each &v4l2-ext-control; and
-call the <constant>VIDIOC_S_EXT_CTRLS</constant> ioctl. The controls
-will only be set if <emphasis>all</emphasis> control values are
-valid.</para>
-
- <para>To check if a set of controls have correct values applications
-initialize the <structfield>id</structfield>, <structfield>size</structfield>,
-<structfield>reserved2</structfield> and
-<structfield>value/value64/string/ptr</structfield> fields of each &v4l2-ext-control; and
-call the <constant>VIDIOC_TRY_EXT_CTRLS</constant> ioctl. It is up to
-the driver whether wrong values are automatically adjusted to a valid
-value or if an error is returned.</para>
-
- <para>When the <structfield>id</structfield> or
-<structfield>which</structfield> is invalid drivers return an
-&EINVAL;. When the value is out of bounds drivers can choose to take
-the closest valid value or return an &ERANGE;, whatever seems more
-appropriate. In the first case the new value is set in
-&v4l2-ext-control;. If the new control value is inappropriate (e.g. the
-given menu index is not supported by the menu control), then this will
-also result in an &EINVAL; error.</para>
-
- <para>The driver will only set/get these controls if all control
-values are correct. This prevents the situation where only some of the
-controls were set/get. Only low-level errors (&eg; a failed i2c
-command) can still cause this situation.</para>
-
- <table pgwide="1" frame="none" id="v4l2-ext-control">
- <title>struct <structname>v4l2_ext_control</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry></entry>
- <entry>Identifies the control, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>size</structfield></entry>
- <entry></entry>
- <entry>The total size in bytes of the payload of this
-control. This is normally 0, but for pointer controls this should be
-set to the size of the memory containing the payload, or that will
-receive the payload. If <constant>VIDIOC_G_EXT_CTRLS</constant> finds
-that this value is less than is required to store
-the payload result, then it is set to a value large enough to store the
-payload result and ENOSPC is returned. Note that for string controls
-this <structfield>size</structfield> field should not be confused with the length of the string.
-This field refers to the size of the memory that contains the string.
-The actual <emphasis>length</emphasis> of the string may well be much smaller.
-</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved2</structfield>[1]</entry>
- <entry></entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__s32</entry>
- <entry><structfield>value</structfield></entry>
- <entry>New value or current value. Valid if this control is not of
-type <constant>V4L2_CTRL_TYPE_INTEGER64</constant> and
-<constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant> is not set.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__s64</entry>
- <entry><structfield>value64</structfield></entry>
- <entry>New value or current value. Valid if this control is of
-type <constant>V4L2_CTRL_TYPE_INTEGER64</constant> and
-<constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant> is not set.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>char *</entry>
- <entry><structfield>string</structfield></entry>
- <entry>A pointer to a string. Valid if this control is of
-type <constant>V4L2_CTRL_TYPE_STRING</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8 *</entry>
- <entry><structfield>p_u8</structfield></entry>
- <entry>A pointer to a matrix control of unsigned 8-bit values.
-Valid if this control is of type <constant>V4L2_CTRL_TYPE_U8</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u16 *</entry>
- <entry><structfield>p_u16</structfield></entry>
- <entry>A pointer to a matrix control of unsigned 16-bit values.
-Valid if this control is of type <constant>V4L2_CTRL_TYPE_U16</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32 *</entry>
- <entry><structfield>p_u32</structfield></entry>
- <entry>A pointer to a matrix control of unsigned 32-bit values.
-Valid if this control is of type <constant>V4L2_CTRL_TYPE_U32</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>void *</entry>
- <entry><structfield>ptr</structfield></entry>
- <entry>A pointer to a compound type which can be an N-dimensional array and/or a
-compound type (the control's type is >= <constant>V4L2_CTRL_COMPOUND_TYPES</constant>).
-Valid if <constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant> is set for this control.
-</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-ext-controls">
- <title>struct <structname>v4l2_ext_controls</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>union</entry>
- <entry>(anonymous)</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>ctrl_class</structfield></entry>
- <entry>The control class to which all controls belong, see
-<xref linkend="ctrl-class" />. Drivers that use a kernel framework for handling
-controls will also accept a value of 0 here, meaning that the controls can
-belong to any control class. Whether drivers support this can be tested by setting
-<structfield>ctrl_class</structfield> to 0 and calling <constant>VIDIOC_TRY_EXT_CTRLS</constant>
-with a <structfield>count</structfield> of 0. If that succeeds, then the driver
-supports this feature.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry><para>Which value of the control to get/set/try. <constant>V4L2_CTRL_WHICH_CUR_VAL</constant>
-will return the current value of the control and <constant>V4L2_CTRL_WHICH_DEF_VAL</constant> will
-return the default value of the control. Please note that you can only get the default value of the
-control, you cannot set or try it.</para>
-<para>For backwards compatibility you can also use a control class here (see
-<xref linkend="ctrl-class" />). In that case all controls have to belong to that
-control class. This usage is deprecated, instead just use <constant>V4L2_CTRL_WHICH_CUR_VAL</constant>.
-There are some very old drivers that do not yet support <constant>V4L2_CTRL_WHICH_CUR_VAL</constant>
-and that require a control class here. You can test for such drivers by setting ctrl_class to
-<constant>V4L2_CTRL_WHICH_CUR_VAL</constant> and calling VIDIOC_TRY_EXT_CTRLS with a count of 0.
-If that fails, then the driver does not support <constant>V4L2_CTRL_WHICH_CUR_VAL</constant>.</para>
-</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>count</structfield></entry>
- <entry>The number of controls in the controls array. May
-also be zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>error_idx</structfield></entry>
- <entry><para>Set by the driver in case of an error. If the error is
-associated with a particular control, then <structfield>error_idx</structfield>
-is set to the index of that control. If the error is not related to a specific
-control, or the validation step failed (see below), then
-<structfield>error_idx</structfield> is set to <structfield>count</structfield>.
-The value is undefined if the ioctl returned 0 (success).</para>
-
-<para>Before controls are read from/written to hardware a validation step
-takes place: this checks if all controls in the list are valid controls,
-if no attempt is made to write to a read-only control or read from a write-only
-control, and any other up-front checks that can be done without accessing the
-hardware. The exact validations done during this step are driver dependent
-since some checks might require hardware access for some devices, thus making
-it impossible to do those checks up-front. However, drivers should make a
-best-effort to do as many up-front checks as possible.</para>
-
-<para>This check is done to avoid leaving the hardware in an inconsistent state due
-to easy-to-avoid problems. But it leads to another problem: the application needs to
-know whether an error came from the validation step (meaning that the hardware
-was not touched) or from an error during the actual reading from/writing to hardware.</para>
-
-<para>The, in hindsight quite poor, solution for that is to set <structfield>error_idx</structfield>
-to <structfield>count</structfield> if the validation failed. This has the
-unfortunate side-effect that it is not possible to see which control failed the
-validation. If the validation was successful and the error happened while
-accessing the hardware, then <structfield>error_idx</structfield> is less than
-<structfield>count</structfield> and only the controls up to
-<structfield>error_idx-1</structfield> were read or written correctly, and the
-state of the remaining controls is undefined.</para>
-
-<para>Since <constant>VIDIOC_TRY_EXT_CTRLS</constant> does not access hardware
-there is also no need to handle the validation step in this special way,
-so <structfield>error_idx</structfield> will just be set to the control that
-failed the validation step instead of to <structfield>count</structfield>.
-This means that if <constant>VIDIOC_S_EXT_CTRLS</constant> fails with
-<structfield>error_idx</structfield> set to <structfield>count</structfield>,
-then you can call <constant>VIDIOC_TRY_EXT_CTRLS</constant> to try to discover
-the actual control that failed the validation step. Unfortunately, there
-is no <constant>TRY</constant> equivalent for <constant>VIDIOC_G_EXT_CTRLS</constant>.
-</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- <row>
- <entry>&v4l2-ext-control; *</entry>
- <entry><structfield>controls</structfield></entry>
- <entry>Pointer to an array of
-<structfield>count</structfield> v4l2_ext_control structures. Ignored
-if <structfield>count</structfield> equals zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="ctrl-class">
- <title>Control classes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CTRL_CLASS_USER</constant></entry>
- <entry>0x980000</entry>
- <entry>The class containing user controls. These controls
-are described in <xref linkend="control" />. All controls that can be set
-using the &VIDIOC-S-CTRL; and &VIDIOC-G-CTRL; ioctl belong to this
-class.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_MPEG</constant></entry>
- <entry>0x990000</entry>
- <entry>The class containing MPEG compression controls.
-These controls are described in <xref
- linkend="mpeg-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_CAMERA</constant></entry>
- <entry>0x9a0000</entry>
- <entry>The class containing camera controls.
-These controls are described in <xref
- linkend="camera-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_FM_TX</constant></entry>
- <entry>0x9b0000</entry>
- <entry>The class containing FM Transmitter (FM TX) controls.
-These controls are described in <xref
- linkend="fm-tx-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_FLASH</constant></entry>
- <entry>0x9c0000</entry>
- <entry>The class containing flash device controls.
-These controls are described in <xref
- linkend="flash-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
- <entry>0x9d0000</entry>
- <entry>The class containing JPEG compression controls.
-These controls are described in <xref
- linkend="jpeg-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_IMAGE_SOURCE</constant></entry>
- <entry>0x9e0000</entry> <entry>The class containing image
- source controls. These controls are described in <xref
- linkend="image-source-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_IMAGE_PROC</constant></entry>
- <entry>0x9f0000</entry> <entry>The class containing image
- processing controls. These controls are described in <xref
- linkend="image-process-controls" />.</entry>
- </row>
-
- <row>
- <entry><constant>V4L2_CTRL_CLASS_FM_RX</constant></entry>
- <entry>0xa10000</entry>
- <entry>The class containing FM Receiver (FM RX) controls.
-These controls are described in <xref
- linkend="fm-rx-controls" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_RF_TUNER</constant></entry>
- <entry>0xa20000</entry>
- <entry>The class containing RF tuner controls.
-These controls are described in <xref linkend="rf-tuner-controls" />.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-ext-control; <structfield>id</structfield>
-is invalid, the &v4l2-ext-controls;
-<structfield>which</structfield> is invalid, or the &v4l2-ext-control;
-<structfield>value</structfield> was inappropriate (e.g. the given menu
-index is not supported by the driver). This error code is
-also returned by the <constant>VIDIOC_S_EXT_CTRLS</constant> and
-<constant>VIDIOC_TRY_EXT_CTRLS</constant> ioctls if two or more
-control values are in conflict.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ERANGE</errorcode></term>
- <listitem>
- <para>The &v4l2-ext-control; <structfield>value</structfield>
-is out of bounds.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The control is temporarily not changeable, possibly
-because another applications took over control of the device function
-this control belongs to.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOSPC</errorcode></term>
- <listitem>
- <para>The space reserved for the control's payload is insufficient.
-The field <structfield>size</structfield> is set to a value that is enough
-to store the payload and this error code is returned.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>Attempt to try or set a read-only control or to get a
- write-only control.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
-
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml b/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
deleted file mode 100644
index 77607cc19688..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
+++ /dev/null
@@ -1,459 +0,0 @@
-<refentry id="vidioc-g-fbuf">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_FBUF, VIDIOC_S_FBUF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_FBUF</refname>
- <refname>VIDIOC_S_FBUF</refname>
- <refpurpose>Get or set frame buffer overlay parameters</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_framebuffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_framebuffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_FBUF, VIDIOC_S_FBUF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Applications can use the <constant>VIDIOC_G_FBUF</constant> and
-<constant>VIDIOC_S_FBUF</constant> ioctl to get and set the
-framebuffer parameters for a <link linkend="overlay">Video
-Overlay</link> or <link linkend="osd">Video Output Overlay</link>
-(OSD). The type of overlay is implied by the device type (capture or
-output device) and can be determined with the &VIDIOC-QUERYCAP; ioctl.
-One <filename>/dev/videoN</filename> device must not support both
-kinds of overlay.</para>
-
- <para>The V4L2 API distinguishes destructive and non-destructive
-overlays. A destructive overlay copies captured video images into the
-video memory of a graphics card. A non-destructive overlay blends
-video images into a VGA signal or graphics into a video signal.
-<wordasword>Video Output Overlays</wordasword> are always
-non-destructive.</para>
-
- <para>To get the current parameters applications call the
-<constant>VIDIOC_G_FBUF</constant> ioctl with a pointer to a
-<structname>v4l2_framebuffer</structname> structure. The driver fills
-all fields of the structure or returns an &EINVAL; when overlays are
-not supported.</para>
-
- <para>To set the parameters for a <wordasword>Video Output
-Overlay</wordasword>, applications must initialize the
-<structfield>flags</structfield> field of a struct
-<structname>v4l2_framebuffer</structname>. Since the framebuffer is
-implemented on the TV card all other parameters are determined by the
-driver. When an application calls <constant>VIDIOC_S_FBUF</constant>
-with a pointer to this structure, the driver prepares for the overlay
-and returns the framebuffer parameters as
-<constant>VIDIOC_G_FBUF</constant> does, or it returns an error
-code.</para>
-
- <para>To set the parameters for a <wordasword>non-destructive
-Video Overlay</wordasword>, applications must initialize the
-<structfield>flags</structfield> field, the
-<structfield>fmt</structfield> substructure, and call
-<constant>VIDIOC_S_FBUF</constant>. Again the driver prepares for the
-overlay and returns the framebuffer parameters as
-<constant>VIDIOC_G_FBUF</constant> does, or it returns an error
-code.</para>
-
- <para>For a <wordasword>destructive Video Overlay</wordasword>
-applications must additionally provide a
-<structfield>base</structfield> address. Setting up a DMA to a
-random memory location can jeopardize the system security, its
-stability or even damage the hardware, therefore only the superuser
-can set the parameters for a destructive video overlay.</para>
-
- <!-- NB v4l2_pix_format is also specified in pixfmt.sgml.-->
-
- <table pgwide="1" frame="none" id="v4l2-framebuffer">
- <title>struct <structname>v4l2_framebuffer</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry></entry>
- <entry>Overlay capability flags set by the driver, see
-<xref linkend="framebuffer-cap" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry></entry>
- <entry>Overlay control flags set by application and
-driver, see <xref linkend="framebuffer-flags" /></entry>
- </row>
- <row>
- <entry>void *</entry>
- <entry><structfield>base</structfield></entry>
- <entry></entry>
- <entry>Physical base address of the framebuffer,
-that is the address of the pixel in the top left corner of the
-framebuffer.<footnote><para>A physical base address may not suit all
-platforms. GK notes in theory we should pass something like PCI device
-+ memory region + offset instead. If you encounter problems please
-discuss on the linux-media mailing list: &v4l-ml;.</para></footnote></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>This field is irrelevant to
-<wordasword>non-destructive Video Overlays</wordasword>. For
-<wordasword>destructive Video Overlays</wordasword> applications must
-provide a base address. The driver may accept only base addresses
-which are a multiple of two, four or eight bytes. For
-<wordasword>Video Output Overlays</wordasword> the driver must return
-a valid base address, so applications can find the corresponding Linux
-framebuffer device (see <xref linkend="osd" />).</entry>
- </row>
- <row>
- <entry>struct</entry>
- <entry><structfield>fmt</structfield></entry>
- <entry></entry>
- <entry>Layout of the frame buffer.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Width of the frame buffer in pixels.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Height of the frame buffer in pixels.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>pixelformat</structfield></entry>
- <entry>The pixel format of the
-framebuffer.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>For <wordasword>non-destructive Video
-Overlays</wordasword> this field only defines a format for the
-&v4l2-window; <structfield>chromakey</structfield> field.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>For <wordasword>destructive Video
-Overlays</wordasword> applications must initialize this field. For
-<wordasword>Video Output Overlays</wordasword> the driver must return
-a valid format.</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- <entry>Usually this is an RGB format (for example
-<link linkend="V4L2-PIX-FMT-RGB565"><constant>V4L2_PIX_FMT_RGB565</constant></link>)
-but YUV formats (only packed YUV formats when chroma keying is used,
-not including <constant>V4L2_PIX_FMT_YUYV</constant> and
-<constant>V4L2_PIX_FMT_UYVY</constant>) and the
-<constant>V4L2_PIX_FMT_PAL8</constant> format are also permitted. The
-behavior of the driver when an application requests a compressed
-format is undefined. See <xref linkend="pixfmt" /> for information on
-pixel formats.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-field;</entry>
- <entry><structfield>field</structfield></entry>
- <entry>Drivers and applications shall ignore this field.
-If applicable, the field order is selected with the &VIDIOC-S-FMT;
-ioctl, using the <structfield>field</structfield> field of
-&v4l2-window;.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>bytesperline</structfield></entry>
- <entry>Distance in bytes between the leftmost pixels in
-two adjacent lines.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>This field is irrelevant to
-<wordasword>non-destructive Video
-Overlays</wordasword>.</para><para>For <wordasword>destructive Video
-Overlays</wordasword> both applications and drivers can set this field
-to request padding bytes at the end of each line. Drivers however may
-ignore the requested value, returning <structfield>width</structfield>
-times bytes-per-pixel or a larger value required by the hardware. That
-implies applications can just set this field to zero to get a
-reasonable default.</para><para>For <wordasword>Video Output
-Overlays</wordasword> the driver must return a valid
-value.</para><para>Video hardware may access padding bytes, therefore
-they must reside in accessible memory. Consider for example the case
-where padding bytes after the last line of an image cross a system
-page boundary. Capture devices may write padding bytes, the value is
-undefined. Output devices ignore the contents of padding
-bytes.</para><para>When the image format is planar the
-<structfield>bytesperline</structfield> value applies to the first
-plane and is divided by the same factor as the
-<structfield>width</structfield> field for the other planes. For
-example the Cb and Cr planes of a YUV 4:2:0 image have half as many
-padding bytes following each line as the Y plane. To avoid ambiguities
-drivers must return a <structfield>bytesperline</structfield> value
-rounded up to a multiple of the scale factor.</para></entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>sizeimage</structfield></entry>
- <entry><para>This field is irrelevant to
-<wordasword>non-destructive Video Overlays</wordasword>. For
-<wordasword>destructive Video Overlays</wordasword> applications must
-initialize this field. For <wordasword>Video Output
-Overlays</wordasword> the driver must return a valid
-format.</para><para>Together with <structfield>base</structfield> it
-defines the framebuffer memory accessible by the
-driver.</para></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-colorspace;</entry>
- <entry><structfield>colorspace</structfield></entry>
- <entry>This information supplements the
-<structfield>pixelformat</structfield> and must be set by the driver,
-see <xref linkend="colorspaces" />.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u32</entry>
- <entry><structfield>priv</structfield></entry>
- <entry>Reserved. Drivers and applications must set this field to
-zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="framebuffer-cap">
- <title>Frame Buffer Capability Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FBUF_CAP_EXTERNOVERLAY</constant></entry>
- <entry>0x0001</entry>
- <entry>The device is capable of non-destructive overlays.
-When the driver clears this flag, only destructive overlays are
-supported. There are no drivers yet which support both destructive and
-non-destructive overlays. Video Output Overlays are in practice always
-non-destructive.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_CHROMAKEY</constant></entry>
- <entry>0x0002</entry>
- <entry>The device supports clipping by chroma-keying the
-images. That is, image pixels replace pixels in the VGA or video
-signal only where the latter assume a certain color. Chroma-keying
-makes no sense for destructive overlays.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_LIST_CLIPPING</constant></entry>
- <entry>0x0004</entry>
- <entry>The device supports clipping using a list of clip
-rectangles.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_BITMAP_CLIPPING</constant></entry>
- <entry>0x0008</entry>
- <entry>The device supports clipping using a bit mask.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_LOCAL_ALPHA</constant></entry>
- <entry>0x0010</entry>
- <entry>The device supports clipping/blending using the
-alpha channel of the framebuffer or VGA signal. Alpha blending makes
-no sense for destructive overlays.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_GLOBAL_ALPHA</constant></entry>
- <entry>0x0020</entry>
- <entry>The device supports alpha blending using a global
-alpha value. Alpha blending makes no sense for destructive overlays.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_LOCAL_INV_ALPHA</constant></entry>
- <entry>0x0040</entry>
- <entry>The device supports clipping/blending using the
-inverted alpha channel of the framebuffer or VGA signal. Alpha
-blending makes no sense for destructive overlays.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_CAP_SRC_CHROMAKEY</constant></entry>
- <entry>0x0080</entry>
- <entry>The device supports Source Chroma-keying. Video pixels
-with the chroma-key colors are replaced by framebuffer pixels, which is exactly opposite of
-<constant>V4L2_FBUF_CAP_CHROMAKEY</constant></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="framebuffer-flags">
- <title>Frame Buffer Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_FBUF_FLAG_PRIMARY</constant></entry>
- <entry>0x0001</entry>
- <entry>The framebuffer is the primary graphics surface.
-In other words, the overlay is destructive. This flag is typically set by any
-driver that doesn't have the <constant>V4L2_FBUF_CAP_EXTERNOVERLAY</constant>
-capability and it is cleared otherwise.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry>
- <entry>0x0002</entry>
- <entry>If this flag is set for a video capture device, then the
-driver will set the initial overlay size to cover the full framebuffer size,
-otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
-
-Only one video capture driver (bttv) supports this flag. The use of this flag
-for capture devices is deprecated. There is no way to detect which drivers
-support this flag, so the only reliable method of setting the overlay size is
-through &VIDIOC-S-FMT;.
-
-If this flag is set for a video output device, then the video output overlay
-window is relative to the top-left corner of the framebuffer and restricted
-to the size of the framebuffer. If it is cleared, then the video output
-overlay window is relative to the video output display.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry>
- <entry>0x0004</entry>
- <entry>Use chroma-keying. The chroma-key color is
-determined by the <structfield>chromakey</structfield> field of
-&v4l2-window; and negotiated with the &VIDIOC-S-FMT; ioctl, see <xref
- linkend="overlay" />
-and
- <xref linkend="osd" />.</entry>
- </row>
- <row>
- <entry spanname="hspan">There are no flags to enable
-clipping using a list of clip rectangles or a bitmap. These methods
-are negotiated with the &VIDIOC-S-FMT; ioctl, see <xref
- linkend="overlay" /> and <xref linkend="osd" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_LOCAL_ALPHA</constant></entry>
- <entry>0x0008</entry>
- <entry>Use the alpha channel of the framebuffer to clip or
-blend framebuffer pixels with video images. The blend
-function is: output = framebuffer pixel * alpha + video pixel * (1 -
-alpha). The actual alpha depth depends on the framebuffer pixel
-format.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_GLOBAL_ALPHA</constant></entry>
- <entry>0x0010</entry>
- <entry>Use a global alpha value to blend the framebuffer
-with video images. The blend function is: output = (framebuffer pixel
-* alpha + video pixel * (255 - alpha)) / 255. The alpha value is
-determined by the <structfield>global_alpha</structfield> field of
-&v4l2-window; and negotiated with the &VIDIOC-S-FMT; ioctl, see <xref
- linkend="overlay" />
-and <xref linkend="osd" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_LOCAL_INV_ALPHA</constant></entry>
- <entry>0x0020</entry>
- <entry>Like
-<constant>V4L2_FBUF_FLAG_LOCAL_ALPHA</constant>, use the alpha channel
-of the framebuffer to clip or blend framebuffer pixels with video
-images, but with an inverted alpha value. The blend function is:
-output = framebuffer pixel * (1 - alpha) + video pixel * alpha. The
-actual alpha depth depends on the framebuffer pixel format.</entry>
- </row>
- <row>
- <entry><constant>V4L2_FBUF_FLAG_SRC_CHROMAKEY</constant></entry>
- <entry>0x0040</entry>
- <entry>Use source chroma-keying. The source chroma-key color is
-determined by the <structfield>chromakey</structfield> field of
-&v4l2-window; and negotiated with the &VIDIOC-S-FMT; ioctl, see <xref
-linkend="overlay" /> and <xref linkend="osd" />.
-Both chroma-keying are mutual exclusive to each other, so same
-<structfield>chromakey</structfield> field of &v4l2-window; is being used.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EPERM</errorcode></term>
- <listitem>
- <para><constant>VIDIOC_S_FBUF</constant> can only be called
-by a privileged user to negotiate the parameters for a destructive
-overlay.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <constant>VIDIOC_S_FBUF</constant> parameters are unsuitable.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
deleted file mode 100644
index ffcb448251f0..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-fmt.xml
+++ /dev/null
@@ -1,204 +0,0 @@
-<refentry id="vidioc-g-fmt">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_FMT, VIDIOC_S_FMT,
-VIDIOC_TRY_FMT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_FMT</refname>
- <refname>VIDIOC_S_FMT</refname>
- <refname>VIDIOC_TRY_FMT</refname>
- <refpurpose>Get or set the data format, try a format</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_format
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_FMT, VIDIOC_S_FMT, VIDIOC_TRY_FMT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls are used to negotiate the format of data
-(typically image format) exchanged between driver and
-application.</para>
-
- <para>To query the current parameters applications set the
-<structfield>type</structfield> field of a struct
-<structname>v4l2_format</structname> to the respective buffer (stream)
-type. For example video capture devices use
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant> or
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>. When the application
-calls the <constant>VIDIOC_G_FMT</constant> ioctl with a pointer to
-this structure the driver fills the respective member of the
-<structfield>fmt</structfield> union. In case of video capture devices
-that is either the &v4l2-pix-format; <structfield>pix</structfield> or
-the &v4l2-pix-format-mplane; <structfield>pix_mp</structfield> member.
-When the requested buffer type is not supported drivers return an
-&EINVAL;.</para>
-
- <para>To change the current format parameters applications
-initialize the <structfield>type</structfield> field and all
-fields of the respective <structfield>fmt</structfield>
-union member. For details see the documentation of the various devices
-types in <xref linkend="devices" />. Good practice is to query the
-current parameters first, and to
-modify only those parameters not suitable for the application. When
-the application calls the <constant>VIDIOC_S_FMT</constant> ioctl
-with a pointer to a <structname>v4l2_format</structname> structure
-the driver checks
-and adjusts the parameters against hardware abilities. Drivers
-should not return an error code unless the <structfield>type</structfield> field is invalid, this is
-a mechanism to fathom device capabilities and to approach parameters
-acceptable for both the application and driver. On success the driver
-may program the hardware, allocate resources and generally prepare for
-data exchange.
-Finally the <constant>VIDIOC_S_FMT</constant> ioctl returns the
-current format parameters as <constant>VIDIOC_G_FMT</constant> does.
-Very simple, inflexible devices may even ignore all input and always
-return the default parameters. However all V4L2 devices exchanging
-data with the application must implement the
-<constant>VIDIOC_G_FMT</constant> and
-<constant>VIDIOC_S_FMT</constant> ioctl. When the requested buffer
-type is not supported drivers return an &EINVAL; on a
-<constant>VIDIOC_S_FMT</constant> attempt. When I/O is already in
-progress or the resource is not available for other reasons drivers
-return the &EBUSY;.</para>
-
- <para>The <constant>VIDIOC_TRY_FMT</constant> ioctl is equivalent
-to <constant>VIDIOC_S_FMT</constant> with one exception: it does not
-change driver state. It can also be called at any time, never
-returning <errorcode>EBUSY</errorcode>. This function is provided to
-negotiate parameters, to learn about hardware limitations, without
-disabling I/O or possibly time consuming hardware preparations.
-Although strongly recommended drivers are not required to implement
-this ioctl.</para>
-
- <para>The format as returned by <constant>VIDIOC_TRY_FMT</constant>
-must be identical to what <constant>VIDIOC_S_FMT</constant> returns for
-the same input or output.</para>
-
- <table pgwide="1" frame="none" id="v4l2-format">
- <title>struct <structname>v4l2_format</structname></title>
- <tgroup cols="4">
- <colspec colname="c1" />
- <colspec colname="c2" />
- <colspec colname="c3" />
- <colspec colname="c4" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>Type of the data stream, see <xref
- linkend="v4l2-buf-type" />.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield>fmt</structfield></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-pix-format;</entry>
- <entry><structfield>pix</structfield></entry>
- <entry>Definition of an image format, see <xref
- linkend="pixfmt" />, used by video capture and output
-devices.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-pix-format-mplane;</entry>
- <entry><structfield>pix_mp</structfield></entry>
- <entry>Definition of an image format, see <xref
- linkend="pixfmt" />, used by video capture and output
-devices that support the <link linkend="planar-apis">multi-planar
-version of the API</link>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-window;</entry>
- <entry><structfield>win</structfield></entry>
- <entry>Definition of an overlaid image, see <xref
- linkend="overlay" />, used by video overlay devices.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-vbi-format;</entry>
- <entry><structfield>vbi</structfield></entry>
- <entry>Raw VBI capture or output parameters. This is
-discussed in more detail in <xref linkend="raw-vbi" />. Used by raw VBI
-capture and output devices.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-sliced-vbi-format;</entry>
- <entry><structfield>sliced</structfield></entry>
- <entry>Sliced VBI capture or output parameters. See
-<xref linkend="sliced" /> for details. Used by sliced VBI
-capture and output devices.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-sdr-format;</entry>
- <entry><structfield>sdr</structfield></entry>
- <entry>Definition of a data format, see
-<xref linkend="pixfmt" />, used by SDR capture and output devices.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8</entry>
- <entry><structfield>raw_data</structfield>[200]</entry>
- <entry>Place holder for future extensions.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-format; <structfield>type</structfield>
-field is invalid or the requested buffer type not supported.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
deleted file mode 100644
index d1034fb61d15..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ /dev/null
@@ -1,148 +0,0 @@
-<refentry id="vidioc-g-frequency">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_FREQUENCY, VIDIOC_S_FREQUENCY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_FREQUENCY</refname>
- <refname>VIDIOC_S_FREQUENCY</refname>
- <refpurpose>Get or set tuner or modulator radio
-frequency</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_frequency
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_frequency
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_FREQUENCY, VIDIOC_S_FREQUENCY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To get the current tuner or modulator radio frequency
-applications set the <structfield>tuner</structfield> field of a
-&v4l2-frequency; to the respective tuner or modulator number (only
-input devices have tuners, only output devices have modulators), zero
-out the <structfield>reserved</structfield> array and
-call the <constant>VIDIOC_G_FREQUENCY</constant> ioctl with a pointer
-to this structure. The driver stores the current frequency in the
-<structfield>frequency</structfield> field.</para>
-
- <para>To change the current tuner or modulator radio frequency
-applications initialize the <structfield>tuner</structfield>,
-<structfield>type</structfield> and
-<structfield>frequency</structfield> fields, and the
-<structfield>reserved</structfield> array of a &v4l2-frequency; and
-call the <constant>VIDIOC_S_FREQUENCY</constant> ioctl with a pointer
-to this structure. When the requested frequency is not possible the
-driver assumes the closest possible value. However
-<constant>VIDIOC_S_FREQUENCY</constant> is a write-only ioctl, it does
-not return the actual new frequency.</para>
-
- <table pgwide="1" frame="none" id="v4l2-frequency">
- <title>struct <structname>v4l2_frequency</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>tuner</structfield></entry>
- <entry>The tuner or modulator index number. This is the
-same value as in the &v4l2-input; <structfield>tuner</structfield>
-field and the &v4l2-tuner; <structfield>index</structfield> field, or
-the &v4l2-output; <structfield>modulator</structfield> field and the
-&v4l2-modulator; <structfield>index</structfield> field.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The type must be set
-to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
-device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
-for all others. Set this field to <constant>V4L2_TUNER_RADIO</constant> for
-modulators (currently only radio modulators are supported).
-See <xref linkend="v4l2-tuner-type" /></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>frequency</structfield></entry>
- <entry>Tuning frequency in units of 62.5 kHz, or if the
-&v4l2-tuner; or &v4l2-modulator; <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz. A 1 Hz unit is used when the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Drivers and
- applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>tuner</structfield> index is out of
-bounds or the value in the <structfield>type</structfield> field is
-wrong.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>A hardware seek is in progress.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-input.xml b/Documentation/DocBook/media/v4l/vidioc-g-input.xml
deleted file mode 100644
index 1d43065090dd..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-input.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<refentry id="vidioc-g-input">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_INPUT, VIDIOC_S_INPUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_INPUT</refname>
- <refname>VIDIOC_S_INPUT</refname>
- <refpurpose>Query or select the current video input</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>int *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_INPUT, VIDIOC_S_INPUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the current video input applications call the
-<constant>VIDIOC_G_INPUT</constant> ioctl with a pointer to an integer
-where the driver stores the number of the input, as in the
-&v4l2-input; <structfield>index</structfield> field. This ioctl will
-fail only when there are no video inputs, returning
-<errorcode>EINVAL</errorcode>.</para>
-
- <para>To select a video input applications store the number of the
-desired input in an integer and call the
-<constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this
-integer. Side effects are possible. For example inputs may support
-different video standards, so the driver may implicitly switch the
-current standard. Because of these possible side effects applications
-must select an input before querying or negotiating any other parameters.</para>
-
- <para>Information about video inputs is available using the
-&VIDIOC-ENUMINPUT; ioctl.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The number of the video input is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml b/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
deleted file mode 100644
index 098ff483802e..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
+++ /dev/null
@@ -1,175 +0,0 @@
-<refentry id="vidioc-g-jpegcomp">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_JPEGCOMP, VIDIOC_S_JPEGCOMP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_JPEGCOMP</refname>
- <refname>VIDIOC_S_JPEGCOMP</refname>
- <refpurpose></refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>v4l2_jpegcompression *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const v4l2_jpegcompression *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_JPEGCOMP, VIDIOC_S_JPEGCOMP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls are <emphasis role="bold">deprecated</emphasis>.
- New drivers and applications should use <link linkend="jpeg-controls">
- JPEG class controls</link> for image quality and JPEG markers control.
- </para>
-
- <para>[to do]</para>
-
- <para>Ronald Bultje elaborates:</para>
-
- <!-- See video4linux-list@redhat.com on 16 Oct 2002, subject
-"Re: [V4L] Re: v4l2 api / Zoran v4l2_jpegcompression" -->
-
- <para>APP is some application-specific information. The
-application can set it itself, and it'll be stored in the JPEG-encoded
-fields (eg; interlacing information for in an AVI or so). COM is the
-same, but it's comments, like 'encoded by me' or so.</para>
-
- <para>jpeg_markers describes whether the huffman tables,
-quantization tables and the restart interval information (all
-JPEG-specific stuff) should be stored in the JPEG-encoded fields.
-These define how the JPEG field is encoded. If you omit them,
-applications assume you've used standard encoding. You usually do want
-to add them.</para>
-
- <!-- NB VIDIOC_S_JPEGCOMP is w/o. -->
-
- <table pgwide="1" frame="none" id="v4l2-jpegcompression">
- <title>struct <structname>v4l2_jpegcompression</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>int</entry>
- <entry><structfield>quality</structfield></entry>
- <entry>Deprecated. If <link linkend="jpeg-quality-control"><constant>
- V4L2_CID_JPEG_COMPRESSION_QUALITY</constant></link> control is exposed
- by a driver applications should use it instead and ignore this field.
- </entry>
- </row>
- <row>
- <entry>int</entry>
- <entry><structfield>APPn</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>int</entry>
- <entry><structfield>APP_len</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>APP_data</structfield>[60]</entry>
- <entry></entry>
- </row>
- <row>
- <entry>int</entry>
- <entry><structfield>COM_len</structfield></entry>
- <entry></entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>COM_data</structfield>[60]</entry>
- <entry></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>jpeg_markers</structfield></entry>
- <entry>See <xref linkend="jpeg-markers"/>. Deprecated.
- If <link linkend="jpeg-active-marker-control"><constant>
- V4L2_CID_JPEG_ACTIVE_MARKER</constant></link> control
- is exposed by a driver applications should use it instead
- and ignore this field.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="jpeg-markers">
- <title>JPEG Markers Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_JPEG_MARKER_DHT</constant></entry>
- <entry>(1&lt;&lt;3)</entry>
- <entry>Define Huffman Tables</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_MARKER_DQT</constant></entry>
- <entry>(1&lt;&lt;4)</entry>
- <entry>Define Quantization Tables</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_MARKER_DRI</constant></entry>
- <entry>(1&lt;&lt;5)</entry>
- <entry>Define Restart Interval</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_MARKER_COM</constant></entry>
- <entry>(1&lt;&lt;6)</entry>
- <entry>Comment segment</entry>
- </row>
- <row>
- <entry><constant>V4L2_JPEG_MARKER_APP</constant></entry>
- <entry>(1&lt;&lt;7)</entry>
- <entry>App segment, driver will always use APP0</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml b/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml
deleted file mode 100644
index 96e17b344c5d..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-modulator.xml
+++ /dev/null
@@ -1,252 +0,0 @@
-<refentry id="vidioc-g-modulator">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_MODULATOR, VIDIOC_S_MODULATOR</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_MODULATOR</refname>
- <refname>VIDIOC_S_MODULATOR</refname>
- <refpurpose>Get or set modulator attributes</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_modulator
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_modulator
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_MODULATOR, VIDIOC_S_MODULATOR</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a modulator applications initialize
-the <structfield>index</structfield> field and zero out the
-<structfield>reserved</structfield> array of a &v4l2-modulator; and
-call the <constant>VIDIOC_G_MODULATOR</constant> ioctl with a pointer
-to this structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all modulators
-applications shall begin at index zero, incrementing by one until the
-driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <para>Modulators have two writable properties, an audio
-modulation set and the radio frequency. To change the modulated audio
-subprograms, applications initialize the <structfield>index
-</structfield> and <structfield>txsubchans</structfield> fields and the
-<structfield>reserved</structfield> array and call the
-<constant>VIDIOC_S_MODULATOR</constant> ioctl. Drivers may choose a
-different audio modulation if the request cannot be satisfied. However
-this is a write-only ioctl, it does not return the actual audio
-modulation selected.</para>
-
- <para><link linkend="sdr">SDR</link> specific modulator types are
-<constant>V4L2_TUNER_SDR</constant> and <constant>V4L2_TUNER_RF</constant>.
-For SDR devices <structfield>txsubchans</structfield> field must be
-initialized to zero.
-The term 'modulator' means SDR transmitter in this context.</para>
-
- <para>To change the radio frequency the &VIDIOC-S-FREQUENCY; ioctl
-is available.</para>
-
- <table pgwide="1" frame="none" id="v4l2-modulator">
- <title>struct <structname>v4l2_modulator</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Identifies the modulator, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the modulator, a NUL-terminated ASCII
-string. This information is intended for the user.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry>Modulator capability flags. No flags are defined
-for this field, the tuner flags in &v4l2-tuner;
-are used accordingly. The audio flags indicate the ability
-to encode audio subprograms. They will <emphasis>not</emphasis>
-change for example with the current video standard.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangelow</structfield></entry>
- <entry>The lowest tunable frequency in units of 62.5
-KHz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set, in units of 1 Hz.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangehigh</structfield></entry>
- <entry>The highest tunable frequency in units of 62.5
-KHz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set, in units of 1 Hz.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>txsubchans</structfield></entry>
- <entry>With this field applications can determine how
-audio sub-carriers shall be modulated. It contains a set of flags as
-defined in <xref linkend="modulator-txsubchans" />. Note the tuner
-<structfield>rxsubchans</structfield> flags are reused, but the
-semantics are different. Video output devices are assumed to have an
-analog or PCM audio input with 1-3 channels. The
-<structfield>txsubchans</structfield> flags select one or more
-channels for modulation, together with some audio subprogram
-indicator, for example a stereo pilot tone.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry spanname="hspan">Type of the modulator, see <xref
- linkend="v4l2-tuner-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[3]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="modulator-txsubchans">
- <title>Modulator Audio Transmission Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TUNER_SUB_MONO</constant></entry>
- <entry>0x0001</entry>
- <entry>Modulate channel 1 as mono audio, when the input
-has more channels, a down-mix of channel 1 and 2. This flag does not
-combine with <constant>V4L2_TUNER_SUB_STEREO</constant> or
-<constant>V4L2_TUNER_SUB_LANG1</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_STEREO</constant></entry>
- <entry>0x0002</entry>
- <entry>Modulate channel 1 and 2 as left and right
-channel of a stereo audio signal. When the input has only one channel
-or two channels and <constant>V4L2_TUNER_SUB_SAP</constant> is also
-set, channel 1 is encoded as left and right channel. This flag does
-not combine with <constant>V4L2_TUNER_SUB_MONO</constant> or
-<constant>V4L2_TUNER_SUB_LANG1</constant>. When the driver does not
-support stereo audio it shall fall back to mono.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_LANG1</constant></entry>
- <entry>0x0008</entry>
- <entry>Modulate channel 1 and 2 as primary and secondary
-language of a bilingual audio signal. When the input has only one
-channel it is used for both languages. It is not possible to encode
-the primary or secondary language only. This flag does not combine
-with <constant>V4L2_TUNER_SUB_MONO</constant>,
-<constant>V4L2_TUNER_SUB_STEREO</constant> or
-<constant>V4L2_TUNER_SUB_SAP</constant>. If the hardware does not
-support the respective audio matrix, or the current video standard
-does not permit bilingual audio the
-<constant>VIDIOC_S_MODULATOR</constant> ioctl shall return an &EINVAL;
-and the driver shall fall back to mono or stereo mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_LANG2</constant></entry>
- <entry>0x0004</entry>
- <entry>Same effect as
-<constant>V4L2_TUNER_SUB_SAP</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_SAP</constant></entry>
- <entry>0x0004</entry>
- <entry>When combined with <constant>V4L2_TUNER_SUB_MONO
-</constant> the first channel is encoded as mono audio, the last
-channel as Second Audio Program. When the input has only one channel
-it is used for both audio tracks. When the input has three channels
-the mono track is a down-mix of channel 1 and 2. When combined with
-<constant>V4L2_TUNER_SUB_STEREO</constant> channel 1 and 2 are
-encoded as left and right stereo audio, channel 3 as Second Audio
-Program. When the input has only two channels, the first is encoded as
-left and right channel and the second as SAP. When the input has only
-one channel it is used for all audio tracks. It is not possible to
-encode a Second Audio Program only. This flag must combine with
-<constant>V4L2_TUNER_SUB_MONO</constant> or
-<constant>V4L2_TUNER_SUB_STEREO</constant>. If the hardware does not
-support the respective audio matrix, or the current video standard
-does not permit SAP the <constant>VIDIOC_S_MODULATOR</constant> ioctl
-shall return an &EINVAL; and driver shall fall back to mono or stereo
-mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_RDS</constant></entry>
- <entry>0x0010</entry>
- <entry>Enable the RDS encoder for a radio FM transmitter.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-modulator;
-<structfield>index</structfield> is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-output.xml b/Documentation/DocBook/media/v4l/vidioc-g-output.xml
deleted file mode 100644
index 4533068ecb8a..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-output.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<refentry id="vidioc-g-output">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_OUTPUT, VIDIOC_S_OUTPUT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_OUTPUT</refname>
- <refname>VIDIOC_S_OUTPUT</refname>
- <refpurpose>Query or select the current video output</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>int *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_OUTPUT, VIDIOC_S_OUTPUT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the current video output applications call the
-<constant>VIDIOC_G_OUTPUT</constant> ioctl with a pointer to an integer
-where the driver stores the number of the output, as in the
-&v4l2-output; <structfield>index</structfield> field. This ioctl
-will fail only when there are no video outputs, returning the
-&EINVAL;.</para>
-
- <para>To select a video output applications store the number of the
-desired output in an integer and call the
-<constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer.
-Side effects are possible. For example outputs may support different
-video standards, so the driver may implicitly switch the current
-standard.
-standard. Because of these possible side effects applications
-must select an output before querying or negotiating any other parameters.</para>
-
- <para>Information about video outputs is available using the
-&VIDIOC-ENUMOUTPUT; ioctl.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The number of the video output is out of bounds, or
-there are no video outputs at all.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml b/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
deleted file mode 100644
index 721728745407..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-parm.xml
+++ /dev/null
@@ -1,314 +0,0 @@
-<refentry id="vidioc-g-parm">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_PARM, VIDIOC_S_PARM</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_PARM</refname>
- <refname>VIDIOC_S_PARM</refname>
- <refpurpose>Get or set streaming parameters</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>v4l2_streamparm *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_PARM, VIDIOC_S_PARM</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The current video standard determines a nominal number of
-frames per second. If less than this number of frames is to be
-captured or output, applications can request frame skipping or
-duplicating on the driver side. This is especially useful when using
-the <function>read()</function> or <function>write()</function>, which
-are not augmented by timestamps or sequence counters, and to avoid
-unnecessary data copying.</para>
-
- <para>Further these ioctls can be used to determine the number of
-buffers used internally by a driver in read/write mode. For
-implications see the section discussing the &func-read;
-function.</para>
-
- <para>To get and set the streaming parameters applications call
-the <constant>VIDIOC_G_PARM</constant> and
-<constant>VIDIOC_S_PARM</constant> ioctl, respectively. They take a
-pointer to a struct <structname>v4l2_streamparm</structname> which
-contains a union holding separate parameters for input and output
-devices.</para>
-
- <table pgwide="1" frame="none" id="v4l2-streamparm">
- <title>struct <structname>v4l2_streamparm</structname></title>
- <tgroup cols="4">
- &cs-ustr;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry></entry>
- <entry>The buffer (stream) type, same as &v4l2-format;
-<structfield>type</structfield>, set by the application. See <xref
- linkend="v4l2-buf-type" /></entry>
- </row>
- <row>
- <entry>union</entry>
- <entry><structfield>parm</structfield></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-captureparm;</entry>
- <entry><structfield>capture</structfield></entry>
- <entry>Parameters for capture devices, used when
-<structfield>type</structfield> is
-<constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>&v4l2-outputparm;</entry>
- <entry><structfield>output</structfield></entry>
- <entry>Parameters for output devices, used when
-<structfield>type</structfield> is
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8</entry>
- <entry><structfield>raw_data</structfield>[200]</entry>
- <entry>A place holder for future extensions.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-captureparm">
- <title>struct <structname>v4l2_captureparm</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry>See <xref linkend="parm-caps" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capturemode</structfield></entry>
- <entry>Set by drivers and applications, see <xref linkend="parm-flags" />.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>timeperframe</structfield></entry>
- <entry><para>This is the desired period between
-successive frames captured by the driver, in seconds. The
-field is intended to skip frames on the driver side, saving I/O
-bandwidth.</para><para>Applications store here the desired frame
-period, drivers return the actual frame period, which must be greater
-or equal to the nominal frame period determined by the current video
-standard (&v4l2-standard; <structfield>frameperiod</structfield>
-field). Changing the video standard (also implicitly by switching the
-video input) may reset this parameter to the nominal frame period. To
-reset manually applications can just set this field to
-zero.</para><para>Drivers support this function only when they set the
-<constant>V4L2_CAP_TIMEPERFRAME</constant> flag in the
-<structfield>capability</structfield> field.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>extendedmode</structfield></entry>
- <entry>Custom (driver specific) streaming parameters. When
-unused, applications and drivers must set this field to zero.
-Applications using this field should check the driver name and
-version, see <xref linkend="querycap" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>readbuffers</structfield></entry>
- <entry>Applications set this field to the desired number
-of buffers used internally by the driver in &func-read; mode. Drivers
-return the actual number of buffers. When an application requests zero
-buffers, drivers should just return the current setting rather than
-the minimum or an error code. For details see <xref
- linkend="rw" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-outputparm">
- <title>struct <structname>v4l2_outputparm</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry>See <xref linkend="parm-caps" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>outputmode</structfield></entry>
- <entry>Set by drivers and applications, see <xref
- linkend="parm-flags" />.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>timeperframe</structfield></entry>
- <entry>This is the desired period between
-successive frames output by the driver, in seconds.</entry>
- </row>
- <row>
- <entry spanname="hspan"><para>The field is intended to
-repeat frames on the driver side in &func-write; mode (in streaming
-mode timestamps can be used to throttle the output), saving I/O
-bandwidth.</para><para>Applications store here the desired frame
-period, drivers return the actual frame period, which must be greater
-or equal to the nominal frame period determined by the current video
-standard (&v4l2-standard; <structfield>frameperiod</structfield>
-field). Changing the video standard (also implicitly by switching the
-video output) may reset this parameter to the nominal frame period. To
-reset manually applications can just set this field to
-zero.</para><para>Drivers support this function only when they set the
-<constant>V4L2_CAP_TIMEPERFRAME</constant> flag in the
-<structfield>capability</structfield> field.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>extendedmode</structfield></entry>
- <entry>Custom (driver specific) streaming parameters. When
-unused, applications and drivers must set this field to zero.
-Applications using this field should check the driver name and
-version, see <xref linkend="querycap" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>writebuffers</structfield></entry>
- <entry>Applications set this field to the desired number
-of buffers used internally by the driver in
-<function>write()</function> mode. Drivers return the actual number of
-buffers. When an application requests zero buffers, drivers should
-just return the current setting rather than the minimum or an error
-code. For details see <xref linkend="rw" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry>Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="parm-caps">
- <title>Streaming Parameters Capabilites</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CAP_TIMEPERFRAME</constant></entry>
- <entry>0x1000</entry>
- <entry>The frame skipping/repeating controlled by the
-<structfield>timeperframe</structfield> field is supported.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="parm-flags">
- <title>Capture Parameters Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_MODE_HIGHQUALITY</constant></entry>
- <entry>0x0001</entry>
- <entry><para>High quality imaging mode. High quality mode
-is intended for still imaging applications. The idea is to get the
-best possible image quality that the hardware can deliver. It is not
-defined how the driver writer may achieve that; it will depend on the
-hardware and the ingenuity of the driver writer. High quality mode is
-a different mode from the regular motion video capture modes. In
-high quality mode:<itemizedlist>
- <listitem>
- <para>The driver may be able to capture higher
-resolutions than for motion capture.</para>
- </listitem>
- <listitem>
- <para>The driver may support fewer pixel formats
-than motion capture (eg; true color).</para>
- </listitem>
- <listitem>
- <para>The driver may capture and arithmetically
-combine multiple successive fields or frames to remove color edge
-artifacts and reduce the noise in the video data.
-</para>
- </listitem>
- <listitem>
- <para>The driver may capture images in slices like
-a scanner in order to handle larger format images than would otherwise
-be possible. </para>
- </listitem>
- <listitem>
- <para>An image capture operation may be
-significantly slower than motion capture. </para>
- </listitem>
- <listitem>
- <para>Moving objects in the image might have
-excessive motion blur. </para>
- </listitem>
- <listitem>
- <para>Capture might only work through the
-<function>read()</function> call.</para>
- </listitem>
- </itemizedlist></para></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-priority.xml b/Documentation/DocBook/media/v4l/vidioc-g-priority.xml
deleted file mode 100644
index 6a81b4fe9538..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-priority.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<refentry id="vidioc-g-priority">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_PRIORITY, VIDIOC_S_PRIORITY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_PRIORITY</refname>
- <refname>VIDIOC_S_PRIORITY</refname>
- <refpurpose>Query or request the access priority associated with a
-file descriptor</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>enum v4l2_priority *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const enum v4l2_priority *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_PRIORITY, VIDIOC_S_PRIORITY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para>Pointer to an enum v4l2_priority type.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the current access priority
-applications call the <constant>VIDIOC_G_PRIORITY</constant> ioctl
-with a pointer to an enum v4l2_priority variable where the driver stores
-the current priority.</para>
-
- <para>To request an access priority applications store the
-desired priority in an enum v4l2_priority variable and call
-<constant>VIDIOC_S_PRIORITY</constant> ioctl with a pointer to this
-variable.</para>
-
- <table frame="none" pgwide="1" id="v4l2-priority">
- <title>enum v4l2_priority</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_PRIORITY_UNSET</constant></entry>
- <entry>0</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_PRIORITY_BACKGROUND</constant></entry>
- <entry>1</entry>
- <entry>Lowest priority, usually applications running in
-background, for example monitoring VBI transmissions. A proxy
-application running in user space will be necessary if multiple
-applications want to read from a device at this priority.</entry>
- </row>
- <row>
- <entry><constant>V4L2_PRIORITY_INTERACTIVE</constant></entry>
- <entry>2</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_PRIORITY_DEFAULT</constant></entry>
- <entry>2</entry>
- <entry>Medium priority, usually applications started and
-interactively controlled by the user. For example TV viewers, Teletext
-browsers, or just "panel" applications to change the channel or video
-controls. This is the default priority unless an application requests
-another.</entry>
- </row>
- <row>
- <entry><constant>V4L2_PRIORITY_RECORD</constant></entry>
- <entry>3</entry>
- <entry>Highest priority. Only one file descriptor can have
-this priority, it blocks any other fd from changing device properties.
-Usually applications which must not be interrupted, like video
-recording.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The requested priority value is invalid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>Another application already requested higher
-priority.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
deleted file mode 100644
index 997f4e96f297..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
+++ /dev/null
@@ -1,233 +0,0 @@
-<refentry id="vidioc-g-selection">
-
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_SELECTION, VIDIOC_S_SELECTION</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_SELECTION</refname>
- <refname>VIDIOC_S_SELECTION</refname>
- <refpurpose>Get or set one of the selection rectangles</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_selection *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_SELECTION, VIDIOC_S_SELECTION</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The ioctls are used to query and configure selection rectangles.</para>
-
-<para>To query the cropping (composing) rectangle set &v4l2-selection;
-<structfield> type </structfield> field to the respective buffer type.
-Do not use the multiplanar buffer types. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
-instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant> and use
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>. The next step is
-setting the value of &v4l2-selection; <structfield>target</structfield> field
-to <constant>V4L2_SEL_TGT_CROP</constant> (<constant>V4L2_SEL_TGT_COMPOSE</constant>).
-Please refer to table <xref linkend="v4l2-selections-common" /> or <xref linkend="selection-api" />
-for additional targets. The <structfield>flags</structfield> and <structfield>reserved
-</structfield> fields of &v4l2-selection; are ignored and they must be filled
-with zeros. The driver fills the rest of the structure or
-returns &EINVAL; if incorrect buffer type or target was used. If cropping
-(composing) is not supported then the active rectangle is not mutable and it is
-always equal to the bounds rectangle. Finally, the &v4l2-rect;
-<structfield>r</structfield> rectangle is filled with the current cropping
-(composing) coordinates. The coordinates are expressed in driver-dependent
-units. The only exception are rectangles for images in raw formats, whose
-coordinates are always expressed in pixels.</para>
-
-<para>To change the cropping (composing) rectangle set the &v4l2-selection;
-<structfield>type</structfield> field to the respective buffer type. Do not
-use multiplanar buffers. Use <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE</constant>
-instead of <constant>V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE</constant>. Use
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant> instead of
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>. The next step is
-setting the value of &v4l2-selection; <structfield>target</structfield> to
-<constant>V4L2_SEL_TGT_CROP</constant> (<constant>V4L2_SEL_TGT_COMPOSE</constant>).
-Please refer to table <xref linkend="v4l2-selections-common" /> or <xref linkend="selection-api" />
-for additional targets. The &v4l2-rect; <structfield>r</structfield> rectangle need to be
-set to the desired active area. Field &v4l2-selection; <structfield> reserved
-</structfield> is ignored and must be filled with zeros. The driver may adjust
-coordinates of the requested rectangle. An application may
-introduce constraints to control rounding behaviour. The &v4l2-selection;
-<structfield>flags</structfield> field must be set to one of the following:
-
-<itemizedlist>
- <listitem>
-<para><constant>0</constant> - The driver can adjust the rectangle size freely
-and shall choose a crop/compose rectangle as close as possible to the requested
-one.</para>
- </listitem>
- <listitem>
-<para><constant>V4L2_SEL_FLAG_GE</constant> - The driver is not allowed to
-shrink the rectangle. The original rectangle must lay inside the adjusted
-one.</para>
- </listitem>
- <listitem>
-<para><constant>V4L2_SEL_FLAG_LE</constant> - The driver is not allowed to
-enlarge the rectangle. The adjusted rectangle must lay inside the original
-one.</para>
- </listitem>
- <listitem>
-<para><constant>V4L2_SEL_FLAG_GE | V4L2_SEL_FLAG_LE</constant> - The driver
-must choose the size exactly the same as in the requested rectangle.</para>
- </listitem>
-</itemizedlist>
-
-Please refer to <xref linkend="sel-const-adjust" />.
-
-</para>
-
-<para> The driver may have to adjusts the requested dimensions against hardware
-limits and other parts as the pipeline, i.e. the bounds given by the
-capture/output window or TV display. The closest possible values of horizontal
-and vertical offset and sizes are chosen according to following priority:
-
-<orderedlist>
- <listitem>
- <para>Satisfy constraints from &v4l2-selection; <structfield>flags</structfield>.</para>
- </listitem>
- <listitem>
- <para>Adjust width, height, left, and top to hardware limits and alignments.</para>
- </listitem>
- <listitem>
- <para>Keep center of adjusted rectangle as close as possible to the original one.</para>
- </listitem>
- <listitem>
- <para>Keep width and height as close as possible to original ones.</para>
- </listitem>
- <listitem>
- <para>Keep horizontal and vertical offset as close as possible to original ones.</para>
- </listitem>
-</orderedlist>
-
-On success the &v4l2-rect; <structfield>r</structfield> field contains
-the adjusted rectangle. When the parameters are unsuitable the application may
-modify the cropping (composing) or image parameters and repeat the cycle until
-satisfactory parameters have been negotiated. If constraints flags have to be
-violated at then ERANGE is returned. The error indicates that <emphasis>there
-exist no rectangle</emphasis> that satisfies the constraints.</para>
-
- <para>Selection targets and flags are documented in <xref
- linkend="v4l2-selections-common"/>.</para>
-
- <para>
- <figure id="sel-const-adjust">
- <title>Size adjustments with constraint flags.</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="constraints.png" format="PNG" />
- </imageobject>
- <textobject>
- <phrase>Behaviour of rectangle adjustment for different constraint
- flags.</phrase>
- </textobject>
- </mediaobject>
- </figure>
- </para>
-
- <para>
- <table pgwide="1" frame="none" id="v4l2-selection">
- <title>struct <structname>v4l2_selection</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the buffer (from &v4l2-buf-type;).</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>target</structfield></entry>
- <entry>Used to select between <link linkend="v4l2-selections-common"> cropping
- and composing rectangles</link>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags controlling the selection rectangle adjustments, refer to
- <link linkend="v4l2-selection-flags">selection flags</link>.</entry>
- </row>
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>r</structfield></entry>
- <entry>The selection rectangle.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved[9]</structfield></entry>
- <entry>Reserved fields for future use. Drivers and applications must zero this array.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </para>
- </refsect1>
-
- <refsect1>
- &return-value;
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>Given buffer type <structfield>type</structfield> or
-the selection target <structfield>target</structfield> is not supported,
-or the <structfield>flags</structfield> argument is not valid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ERANGE</errorcode></term>
- <listitem>
- <para>It is not possible to adjust &v4l2-rect; <structfield>
-r</structfield> rectangle to satisfy all constraints given in the
-<structfield>flags</structfield> argument.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>It is not possible to apply change of the selection rectangle
-at the moment. Usually because streaming is in progress.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml b/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
deleted file mode 100644
index d05623c55403..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-sliced-vbi-cap.xml
+++ /dev/null
@@ -1,255 +0,0 @@
-<refentry id="vidioc-g-sliced-vbi-cap">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_SLICED_VBI_CAP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_SLICED_VBI_CAP</refname>
- <refpurpose>Query sliced VBI capabilities</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_sliced_vbi_cap *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_SLICED_VBI_CAP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To find out which data services are supported by a sliced
-VBI capture or output device, applications initialize the
-<structfield>type</structfield> field of a &v4l2-sliced-vbi-cap;,
-clear the <structfield>reserved</structfield> array and
-call the <constant>VIDIOC_G_SLICED_VBI_CAP</constant> ioctl. The
-driver fills in the remaining fields or returns an &EINVAL; if the
-sliced VBI API is unsupported or <structfield>type</structfield>
-is invalid.</para>
-
- <para>Note the <structfield>type</structfield> field was added,
-and the ioctl changed from read-only to write-read, in Linux 2.6.19.</para>
-
- <table pgwide="1" frame="none" id="v4l2-sliced-vbi-cap">
- <title>struct <structname>v4l2_sliced_vbi_cap</structname></title>
- <tgroup cols="5">
- <colspec colname="c1" colwidth="3*" />
- <colspec colname="c2" colwidth="3*" />
- <colspec colname="c3" colwidth="2*" />
- <colspec colname="c4" colwidth="2*" />
- <colspec colname="c5" colwidth="2*" />
- <spanspec spanname="hspan" namest="c3" nameend="c5" />
- <tbody valign="top">
- <row>
- <entry>__u16</entry>
- <entry><structfield>service_set</structfield></entry>
- <entry spanname="hspan">A set of all data services
-supported by the driver. Equal to the union of all elements of the
-<structfield>service_lines </structfield> array.</entry>
- </row>
- <row>
- <entry>__u16</entry>
- <entry><structfield>service_lines</structfield>[2][24]</entry>
- <entry spanname="hspan">Each element of this array
-contains a set of data services the hardware can look for or insert
-into a particular scan line. Data services are defined in <xref
- linkend="vbi-services" />. Array indices map to ITU-R
-line numbers (see also <xref
- linkend="vbi-525" /> and <xref
-linkend="vbi-625" />) as follows:</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry>Element</entry>
- <entry>525 line systems</entry>
- <entry>625 line systems</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[0][1]</entry>
- <entry align="center">1</entry>
- <entry align="center">1</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[0][23]</entry>
- <entry align="center">23</entry>
- <entry align="center">23</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[1][1]</entry>
- <entry align="center">264</entry>
- <entry align="center">314</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><structfield>service_lines</structfield>[1][23]</entry>
- <entry align="center">286</entry>
- <entry align="center">336</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry spanname="hspan">The number of VBI lines the
-hardware can capture or output per frame, or the number of services it
-can identify on a given line may be limited. For example on PAL line
-16 the hardware may be able to look for a VPS or Teletext signal, but
-not both at the same time. Applications can learn about these limits
-using the &VIDIOC-S-FMT; ioctl as described in <xref
- linkend="sliced" />.</entry>
- </row>
- <row>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry spanname="hspan">Drivers must set
-<structfield>service_lines</structfield>[0][0] and
-<structfield>service_lines</structfield>[1][0] to zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the data stream, see <xref
- linkend="v4l2-buf-type" />. Should be
-<constant>V4L2_BUF_TYPE_SLICED_VBI_CAPTURE</constant> or
-<constant>V4L2_BUF_TYPE_SLICED_VBI_OUTPUT</constant>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[3]</entry>
- <entry spanname="hspan">This array is reserved for future
-extensions. Applications and drivers must set it to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <!-- See also dev-sliced-vbi.sgml -->
- <table pgwide="1" frame="none" id="vbi-services">
- <title>Sliced VBI services</title>
- <tgroup cols="5">
- <colspec colname="c1" colwidth="2*" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="1*" />
- <colspec colname="c4" colwidth="2*" />
- <colspec colname="c5" colwidth="2*" />
- <spanspec spanname='rlp' namest='c3' nameend='c5' />
- <thead>
- <row>
- <entry>Symbol</entry>
- <entry>Value</entry>
- <entry>Reference</entry>
- <entry>Lines, usually</entry>
- <entry>Payload</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SLICED_TELETEXT_B</constant> (Teletext
-System B)</entry>
- <entry>0x0001</entry>
- <entry><xref linkend="ets300706" />, <xref linkend="itu653" /></entry>
- <entry>PAL/SECAM line 7-22, 320-335 (second field 7-22)</entry>
- <entry>Last 42 of the 45 byte Teletext packet, that is
-without clock run-in and framing code, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VPS</constant></entry>
- <entry>0x0400</entry>
- <entry><xref linkend="ets300231" /></entry>
- <entry>PAL line 16</entry>
- <entry>Byte number 3 to 15 according to Figure 9 of
-ETS&nbsp;300&nbsp;231, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_CAPTION_525</constant></entry>
- <entry>0x1000</entry>
- <entry><xref linkend="cea608" /></entry>
- <entry>NTSC line 21, 284 (second field 21)</entry>
- <entry>Two bytes in transmission order, including parity
-bit, lsb first transmitted.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_WSS_625</constant></entry>
- <entry>0x4000</entry>
- <entry><xref linkend="en300294" />, <xref linkend="itu1119" /></entry>
- <entry>PAL/SECAM line 23</entry>
- <entry><screen>
-Byte 0 1
- msb lsb msb lsb
-Bit 7 6 5 4 3 2 1 0 x x 13 12 11 10 9
-</screen></entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VBI_525</constant></entry>
- <entry>0x1000</entry>
- <entry spanname="rlp">Set of services applicable to 525
-line systems.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SLICED_VBI_625</constant></entry>
- <entry>0x4401</entry>
- <entry spanname="rlp">Set of services applicable to 625
-line systems.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The value in the <structfield>type</structfield> field is
-wrong.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-std.xml b/Documentation/DocBook/media/v4l/vidioc-g-std.xml
deleted file mode 100644
index 4a898417de28..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-std.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<refentry id="vidioc-g-std">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_STD, VIDIOC_S_STD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_STD</refname>
- <refname>VIDIOC_S_STD</refname>
- <refpurpose>Query or select the video standard of the current input</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>v4l2_std_id
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const v4l2_std_id
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_STD, VIDIOC_S_STD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query and select the current video standard applications
-use the <constant>VIDIOC_G_STD</constant> and <constant>VIDIOC_S_STD</constant> ioctls which take a pointer to a
-&v4l2-std-id; type as argument. <constant>VIDIOC_G_STD</constant> can
-return a single flag or a set of flags as in &v4l2-standard; field
-<structfield>id</structfield>. The flags must be unambiguous such
-that they appear in only one enumerated <structname>v4l2_standard</structname> structure.</para>
-
- <para><constant>VIDIOC_S_STD</constant> accepts one or more
-flags, being a write-only ioctl it does not return the actual new standard as
-<constant>VIDIOC_G_STD</constant> does. When no flags are given or
-the current input does not support the requested standard the driver
-returns an &EINVAL;. When the standard set is ambiguous drivers may
-return <errorcode>EINVAL</errorcode> or choose any of the requested
-standards. If the current input or output does not support standard video timings (e.g. if
-&VIDIOC-ENUMINPUT; does not set the <constant>V4L2_IN_CAP_STD</constant> flag), then
-&ENODATA; is returned.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <constant>VIDIOC_S_STD</constant> parameter was unsuitable.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Standard video timings are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
deleted file mode 100644
index 459b7e561f3c..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ /dev/null
@@ -1,594 +0,0 @@
-<refentry id="vidioc-g-tuner">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_G_TUNER, VIDIOC_S_TUNER</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_G_TUNER</refname>
- <refname>VIDIOC_S_TUNER</refname>
- <refpurpose>Get or set tuner attributes</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_tuner
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_tuner
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_G_TUNER, VIDIOC_S_TUNER</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a tuner applications initialize the
-<structfield>index</structfield> field and zero out the
-<structfield>reserved</structfield> array of a &v4l2-tuner; and call the
-<constant>VIDIOC_G_TUNER</constant> ioctl with a pointer to this
-structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all tuners
-applications shall begin at index zero, incrementing by one until the
-driver returns <errorcode>EINVAL</errorcode>.</para>
-
- <para>Tuners have two writable properties, the audio mode and
-the radio frequency. To change the audio mode, applications initialize
-the <structfield>index</structfield>,
-<structfield>audmode</structfield> and
-<structfield>reserved</structfield> fields and call the
-<constant>VIDIOC_S_TUNER</constant> ioctl. This will
-<emphasis>not</emphasis> change the current tuner, which is determined
-by the current video input. Drivers may choose a different audio mode
-if the requested mode is invalid or unsupported. Since this is a
-<!-- FIXME -->write-only ioctl, it does not return the actually
-selected audio mode.</para>
-
- <para><link linkend="sdr">SDR</link> specific tuner types are
-<constant>V4L2_TUNER_SDR</constant> and <constant>V4L2_TUNER_RF</constant>.
-For SDR devices <structfield>audmode</structfield> field must be
-initialized to zero.
-The term 'tuner' means SDR receiver in this context.</para>
-
- <para>To change the radio frequency the &VIDIOC-S-FREQUENCY; ioctl
-is available.</para>
-
- <table pgwide="1" frame="none" id="v4l2-tuner">
- <title>struct <structname>v4l2_tuner</structname></title>
- <tgroup cols="3">
- <colspec colname="c1" colwidth="1*" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colname="c3" colwidth="1*" />
- <colspec colname="c4" colwidth="1*" />
- <spanspec spanname="hspan" namest="c3" nameend="c4" />
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry spanname="hspan">Identifies the tuner, set by the
-application.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry spanname="hspan"><para>Name of the tuner, a
-NUL-terminated ASCII string. This information is intended for the
-user.<!-- FIXME Video inputs already have a name, the purpose of this
-field is not quite clear.--></para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry spanname="hspan">Type of the tuner, see <xref
- linkend="v4l2-tuner-type" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capability</structfield></entry>
- <entry spanname="hspan"><para>Tuner capability flags, see
-<xref linkend="tuner-capability" />. Audio flags indicate the ability
-to decode audio subprograms. They will <emphasis>not</emphasis>
-change, for example with the current video standard.</para><para>When
-the structure refers to a radio tuner the
-<constant>V4L2_TUNER_CAP_LANG1</constant>,
-<constant>V4L2_TUNER_CAP_LANG2</constant> and
-<constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para>
-<para>If multiple frequency bands are supported, then
-<structfield>capability</structfield> is the union of all
-<structfield>capability</structfield> fields of each &v4l2-frequency-band;.
-</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangelow</structfield></entry>
- <entry spanname="hspan">The lowest tunable frequency in
-units of 62.5 kHz, or if the <structfield>capability</structfield>
-flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set, in units of 1 Hz.
-If multiple frequency bands are supported, then
-<structfield>rangelow</structfield> is the lowest frequency
-of all the frequency bands.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangehigh</structfield></entry>
- <entry spanname="hspan">The highest tunable frequency in
-units of 62.5 kHz, or if the <structfield>capability</structfield>
-flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz, or if the <structfield>capability</structfield> flag
-<constant>V4L2_TUNER_CAP_1HZ</constant> is set, in units of 1 Hz.
-If multiple frequency bands are supported, then
-<structfield>rangehigh</structfield> is the highest frequency
-of all the frequency bands.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rxsubchans</structfield></entry>
- <entry spanname="hspan"><para>Some tuners or audio
-decoders can determine the received audio subprograms by analyzing
-audio carriers, pilot tones or other indicators. To pass this
-information drivers set flags defined in <xref
- linkend="tuner-rxsubchans" /> in this field. For
-example:</para></entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><constant>V4L2_TUNER_SUB_MONO</constant></entry>
- <entry>receiving mono audio</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><constant>STEREO | SAP</constant></entry>
- <entry>receiving stereo audio and a secondary audio
-program</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><constant>MONO | STEREO</constant></entry>
- <entry>receiving mono or stereo audio, the hardware cannot
-distinguish</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><constant>LANG1 | LANG2</constant></entry>
- <entry>receiving bilingual audio</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry><constant>MONO | STEREO | LANG1 | LANG2</constant></entry>
- <entry>receiving mono, stereo or bilingual
-audio</entry>
- </row>
- <row>
- <entry></entry>
- <entry></entry>
- <entry spanname="hspan"><para>When the
-<constant>V4L2_TUNER_CAP_STEREO</constant>,
-<constant>_LANG1</constant>, <constant>_LANG2</constant> or
-<constant>_SAP</constant> flag is cleared in the
-<structfield>capability</structfield> field, the corresponding
-<constant>V4L2_TUNER_SUB_</constant> flag must not be set
-here.</para><para>This field is valid only if this is the tuner of the
-current video input, or when the structure refers to a radio
-tuner.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>audmode</structfield></entry>
- <entry spanname="hspan"><para>The selected audio mode, see
-<xref linkend="tuner-audmode" /> for valid values. The audio mode does
-not affect audio subprogram detection, and like a <link
-linkend="control">control</link> it does not automatically change
-unless the requested mode is invalid or unsupported. See <xref
- linkend="tuner-matrix" /> for possible results when
-the selected and received audio programs do not
-match.</para><para>Currently this is the only field of struct
-<structname>v4l2_tuner</structname> applications can
-change.</para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>signal</structfield></entry>
- <entry spanname="hspan">The signal strength if known, ranging
-from 0 to 65535. Higher values indicate a better signal.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>afc</structfield></entry>
- <entry spanname="hspan">Automatic frequency control: When the
-<structfield>afc</structfield> value is negative, the frequency is too
-low, when positive too high.<!-- FIXME need example what to do when it never
-settles at zero, &ie; range is what? --></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
- <entry spanname="hspan">Reserved for future extensions. Drivers and
-applications must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-tuner-type">
- <title>enum v4l2_tuner_type</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TUNER_RADIO</constant></entry>
- <entry>1</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_ANALOG_TV</constant></entry>
- <entry>2</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SDR</constant></entry>
- <entry>4</entry>
- <entry></entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_RF</constant></entry>
- <entry>5</entry>
- <entry></entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="tuner-capability">
- <title>Tuner and Modulator Capability Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TUNER_CAP_LOW</constant></entry>
- <entry>0x0001</entry>
- <entry>When set, tuning frequencies are expressed in units of
-62.5 Hz instead of 62.5 kHz.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_NORM</constant></entry>
- <entry>0x0002</entry>
- <entry>This is a multi-standard tuner; the video standard
-can or must be switched. (B/G PAL tuners for example are typically not
- considered multi-standard because the video standard is automatically
- determined from the frequency band.) The set of supported video
- standards is available from the &v4l2-input; pointing to this tuner,
- see the description of ioctl &VIDIOC-ENUMINPUT; for details. Only
- <constant>V4L2_TUNER_ANALOG_TV</constant> tuners can have this capability.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_HWSEEK_BOUNDED</constant></entry>
- <entry>0x0004</entry>
- <entry>If set, then this tuner supports the hardware seek functionality
- where the seek stops when it reaches the end of the frequency range.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_HWSEEK_WRAP</constant></entry>
- <entry>0x0008</entry>
- <entry>If set, then this tuner supports the hardware seek functionality
- where the seek wraps around when it reaches the end of the frequency range.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_STEREO</constant></entry>
- <entry>0x0010</entry>
- <entry>Stereo audio reception is supported.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_LANG1</constant></entry>
- <entry>0x0040</entry>
- <entry>Reception of the primary language of a bilingual
-audio program is supported. Bilingual audio is a feature of
-two-channel systems, transmitting the primary language monaural on the
-main audio carrier and a secondary language monaural on a second
-carrier. Only
- <constant>V4L2_TUNER_ANALOG_TV</constant> tuners can have this capability.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_LANG2</constant></entry>
- <entry>0x0020</entry>
- <entry>Reception of the secondary language of a bilingual
-audio program is supported. Only
- <constant>V4L2_TUNER_ANALOG_TV</constant> tuners can have this capability.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_SAP</constant></entry>
- <entry>0x0020</entry>
- <entry><para>Reception of a secondary audio program is
-supported. This is a feature of the BTSC system which accompanies the
-NTSC video standard. Two audio carriers are available for mono or
-stereo transmissions of a primary language, and an independent third
-carrier for a monaural secondary language. Only
- <constant>V4L2_TUNER_ANALOG_TV</constant> tuners can have this capability.</para><para>Note the
-<constant>V4L2_TUNER_CAP_LANG2</constant> and
-<constant>V4L2_TUNER_CAP_SAP</constant> flags are synonyms.
-<constant>V4L2_TUNER_CAP_SAP</constant> applies when the tuner
-supports the <constant>V4L2_STD_NTSC_M</constant> video
-standard.</para><!-- FIXME what if PAL+NTSC and Bi but not SAP? --></entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_RDS</constant></entry>
- <entry>0x0080</entry>
- <entry>RDS capture is supported. This capability is only valid for
-radio tuners.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_RDS_BLOCK_IO</constant></entry>
- <entry>0x0100</entry>
- <entry>The RDS data is passed as unparsed RDS blocks.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_RDS_CONTROLS</constant></entry>
- <entry>0x0200</entry>
- <entry>The RDS data is parsed by the hardware and set via controls.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_FREQ_BANDS</constant></entry>
- <entry>0x0400</entry>
- <entry>The &VIDIOC-ENUM-FREQ-BANDS; ioctl can be used to enumerate
- the available frequency bands.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_HWSEEK_PROG_LIM</constant></entry>
- <entry>0x0800</entry>
- <entry>The range to search when using the hardware seek functionality
- is programmable, see &VIDIOC-S-HW-FREQ-SEEK; for details.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_CAP_1HZ</constant></entry>
- <entry>0x1000</entry>
- <entry>When set, tuning frequencies are expressed in units of 1 Hz instead of 62.5 kHz.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="tuner-rxsubchans">
- <title>Tuner Audio Reception Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TUNER_SUB_MONO</constant></entry>
- <entry>0x0001</entry>
- <entry>The tuner receives a mono audio signal.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_STEREO</constant></entry>
- <entry>0x0002</entry>
- <entry>The tuner receives a stereo audio signal.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_LANG1</constant></entry>
- <entry>0x0008</entry>
- <entry>The tuner receives the primary language of a
-bilingual audio signal. Drivers must clear this flag when the current
-video standard is <constant>V4L2_STD_NTSC_M</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_LANG2</constant></entry>
- <entry>0x0004</entry>
- <entry>The tuner receives the secondary language of a
-bilingual audio signal (or a second audio program).</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_SAP</constant></entry>
- <entry>0x0004</entry>
- <entry>The tuner receives a Second Audio Program. Note the
-<constant>V4L2_TUNER_SUB_LANG2</constant> and
-<constant>V4L2_TUNER_SUB_SAP</constant> flags are synonyms. The
-<constant>V4L2_TUNER_SUB_SAP</constant> flag applies when the
-current video standard is <constant>V4L2_STD_NTSC_M</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_SUB_RDS</constant></entry>
- <entry>0x0010</entry>
- <entry>The tuner receives an RDS channel.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="tuner-audmode">
- <title>Tuner Audio Modes</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_TUNER_MODE_MONO</constant></entry>
- <entry>0</entry>
- <entry>Play mono audio. When the tuner receives a stereo
-signal this a down-mix of the left and right channel. When the tuner
-receives a bilingual or SAP signal this mode selects the primary
-language.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_MODE_STEREO</constant></entry>
- <entry>1</entry>
- <entry><para>Play stereo audio. When the tuner receives
-bilingual audio it may play different languages on the left and right
-channel or the primary language is played on both channels.</para><para>Playing
-different languages in this mode is
-deprecated. New drivers should do this only in
-<constant>MODE_LANG1_LANG2</constant>.</para><para>When the tuner
-receives no stereo signal or does not support stereo reception the
-driver shall fall back to <constant>MODE_MONO</constant>.</para></entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_MODE_LANG1</constant></entry>
- <entry>3</entry>
- <entry>Play the primary language, mono or stereo. Only
-<constant>V4L2_TUNER_ANALOG_TV</constant> tuners support this
-mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_MODE_LANG2</constant></entry>
- <entry>2</entry>
- <entry>Play the secondary language, mono. When the tuner
-receives no bilingual audio or SAP, or their reception is not
-supported the driver shall fall back to mono or stereo mode. Only
-<constant>V4L2_TUNER_ANALOG_TV</constant> tuners support this
-mode.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_MODE_SAP</constant></entry>
- <entry>2</entry>
- <entry>Play the Second Audio Program. When the tuner
-receives no bilingual audio or SAP, or their reception is not
-supported the driver shall fall back to mono or stereo mode. Only
-<constant>V4L2_TUNER_ANALOG_TV</constant> tuners support this mode.
-Note the <constant>V4L2_TUNER_MODE_LANG2</constant> and
-<constant>V4L2_TUNER_MODE_SAP</constant> are synonyms.</entry>
- </row>
- <row>
- <entry><constant>V4L2_TUNER_MODE_LANG1_LANG2</constant></entry>
- <entry>4</entry>
- <entry>Play the primary language on the left channel, the
-secondary language on the right channel. When the tuner receives no
-bilingual audio or SAP, it shall fall back to
-<constant>MODE_LANG1</constant> or <constant>MODE_MONO</constant>.
-Only <constant>V4L2_TUNER_ANALOG_TV</constant> tuners support this
-mode.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="all" id="tuner-matrix">
- <title>Tuner Audio Matrix</title>
- <tgroup cols="6" align="center">
- <colspec align="left" />
- <colspec colname="c2" colwidth="1*" />
- <colspec colwidth="1*" />
- <colspec colwidth="1*" />
- <colspec colnum="6" colname="c6" colwidth="1*" />
- <spanspec namest="c2" nameend="c6" spanname="hspan" align="center" />
- <thead>
- <row>
- <entry></entry>
- <entry spanname="hspan">Selected
-<constant>V4L2_TUNER_MODE_</constant></entry>
- </row>
- <row>
- <entry>Received <constant>V4L2_TUNER_SUB_</constant></entry>
- <entry><constant>MONO</constant></entry>
- <entry><constant>STEREO</constant></entry>
- <entry><constant>LANG1</constant></entry>
- <entry><constant>LANG2 = SAP</constant></entry>
- <entry><constant>LANG1_LANG2</constant><footnote><para>This
-mode has been added in Linux 2.6.17 and may not be supported by older
-drivers.</para></footnote></entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>MONO</constant></entry>
- <entry>Mono</entry>
- <entry>Mono/Mono</entry>
- <entry>Mono</entry>
- <entry>Mono</entry>
- <entry>Mono/Mono</entry>
- </row>
- <row>
- <entry><constant>MONO | SAP</constant></entry>
- <entry>Mono</entry>
- <entry>Mono/Mono</entry>
- <entry>Mono</entry>
- <entry>SAP</entry>
- <entry>Mono/SAP (preferred) or Mono/Mono</entry>
- </row>
- <row>
- <entry><constant>STEREO</constant></entry>
- <entry>L+R</entry>
- <entry>L/R</entry>
- <entry>Stereo L/R (preferred) or Mono L+R</entry>
- <entry>Stereo L/R (preferred) or Mono L+R</entry>
- <entry>L/R (preferred) or L+R/L+R</entry>
- </row>
- <row>
- <entry><constant>STEREO | SAP</constant></entry>
- <entry>L+R</entry>
- <entry>L/R</entry>
- <entry>Stereo L/R (preferred) or Mono L+R</entry>
- <entry>SAP</entry>
- <entry>L+R/SAP (preferred) or L/R or L+R/L+R</entry>
- </row>
- <row>
- <entry><constant>LANG1 | LANG2</constant></entry>
- <entry>Language&nbsp;1</entry>
- <entry>Lang1/Lang2 (deprecated<footnote><para>Playback of
-both languages in <constant>MODE_STEREO</constant> is deprecated. In
-the future drivers should produce only the primary language in this
-mode. Applications should request
-<constant>MODE_LANG1_LANG2</constant> to record both languages or a
-stereo signal.</para></footnote>) or
-Lang1/Lang1</entry>
- <entry>Language&nbsp;1</entry>
- <entry>Language&nbsp;2</entry>
- <entry>Lang1/Lang2 (preferred) or Lang1/Lang1</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-tuner; <structfield>index</structfield> is
-out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-log-status.xml b/Documentation/DocBook/media/v4l/vidioc-log-status.xml
deleted file mode 100644
index 5ded7d35e27b..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-log-status.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<refentry id="vidioc-log-status">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_LOG_STATUS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_LOG_STATUS</refname>
- <refpurpose>Log driver status information</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Description</title>
-
- <para>As the video/audio devices become more complicated it
-becomes harder to debug problems. When this ioctl is called the driver
-will output the current device status to the kernel log. This is
-particular useful when dealing with problems like no sound, no video
-and incorrectly tuned channels. Also many modern devices autodetect
-video and audio standards and this ioctl will report what the device
-thinks what the standard is. Mismatches may give an indication where
-the problem is.</para>
-
- <para>This ioctl is optional and not all drivers support it. It
-was introduced in Linux 2.6.15.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-overlay.xml b/Documentation/DocBook/media/v4l/vidioc-overlay.xml
deleted file mode 100644
index 250a7de1877f..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-overlay.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<refentry id="vidioc-overlay">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_OVERLAY</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_OVERLAY</refname>
- <refpurpose>Start or stop video overlay</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const int *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_OVERLAY</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl is part of the <link linkend="overlay">video
- overlay</link> I/O method. Applications call
- <constant>VIDIOC_OVERLAY</constant> to start or stop the
- overlay. It takes a pointer to an integer which must be set to
- zero by the application to stop overlay, to one to start.</para>
-
- <para>Drivers do not support &VIDIOC-STREAMON; or
-&VIDIOC-STREAMOFF; with <constant>V4L2_BUF_TYPE_VIDEO_OVERLAY</constant>.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The overlay parameters have not been set up. See <xref
-linkend="overlay" /> for the necessary steps.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml b/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
deleted file mode 100644
index 7bde698760e4..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-prepare-buf.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<refentry id="vidioc-prepare-buf">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_PREPARE_BUF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_PREPARE_BUF</refname>
- <refpurpose>Prepare a buffer for I/O</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_buffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_PREPARE_BUF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Applications can optionally call the
-<constant>VIDIOC_PREPARE_BUF</constant> ioctl to pass ownership of the buffer
-to the driver before actually enqueuing it, using the
-<constant>VIDIOC_QBUF</constant> ioctl, and to prepare it for future I/O.
-Such preparations may include cache invalidation or cleaning. Performing them
-in advance saves time during the actual I/O. In case such cache operations are
-not required, the application can use one of
-<constant>V4L2_BUF_FLAG_NO_CACHE_INVALIDATE</constant> and
-<constant>V4L2_BUF_FLAG_NO_CACHE_CLEAN</constant> flags to skip the respective
-step.</para>
-
- <para>The <structname>v4l2_buffer</structname> structure is
-specified in <xref linkend="buffer" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>File I/O is in progress.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer <structfield>type</structfield> is not
-supported, or the <structfield>index</structfield> is out of bounds,
-or no buffers have been allocated yet, or the
-<structfield>userptr</structfield> or
-<structfield>length</structfield> are invalid.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml b/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
deleted file mode 100644
index 8b98a0e421fc..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
+++ /dev/null
@@ -1,202 +0,0 @@
-<refentry id="vidioc-qbuf">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QBUF, VIDIOC_DQBUF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QBUF</refname>
- <refname>VIDIOC_DQBUF</refname>
- <refpurpose>Exchange a buffer with the driver</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_buffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QBUF, VIDIOC_DQBUF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Applications call the <constant>VIDIOC_QBUF</constant> ioctl
-to enqueue an empty (capturing) or filled (output) buffer in the
-driver's incoming queue. The semantics depend on the selected I/O
-method.</para>
-
- <para>To enqueue a buffer applications set the <structfield>type</structfield>
-field of a &v4l2-buffer; to the same buffer type as was previously used
-with &v4l2-format; <structfield>type</structfield> and &v4l2-requestbuffers;
-<structfield>type</structfield>. Applications must also set the
-<structfield>index</structfield> field. Valid index numbers range from
-zero to the number of buffers allocated with &VIDIOC-REQBUFS;
-(&v4l2-requestbuffers; <structfield>count</structfield>) minus one. The
-contents of the struct <structname>v4l2_buffer</structname> returned
-by a &VIDIOC-QUERYBUF; ioctl will do as well. When the buffer is
-intended for output (<structfield>type</structfield> is
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT</constant>,
-<constant>V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE</constant>, or
-<constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant>) applications must also
-initialize the <structfield>bytesused</structfield>,
-<structfield>field</structfield> and
-<structfield>timestamp</structfield> fields, see <xref
-linkend="buffer" /> for details.
-Applications must also set <structfield>flags</structfield> to 0.
-The <structfield>reserved2</structfield> and
-<structfield>reserved</structfield> fields must be set to 0. When using
-the <link linkend="planar-apis">multi-planar API</link>, the
-<structfield>m.planes</structfield> field must contain a userspace pointer
-to a filled-in array of &v4l2-plane; and the <structfield>length</structfield>
-field must be set to the number of elements in that array.
-</para>
-
- <para>To enqueue a <link linkend="mmap">memory mapped</link>
-buffer applications set the <structfield>memory</structfield>
-field to <constant>V4L2_MEMORY_MMAP</constant>. When
-<constant>VIDIOC_QBUF</constant> is called with a pointer to this
-structure the driver sets the
-<constant>V4L2_BUF_FLAG_MAPPED</constant> and
-<constant>V4L2_BUF_FLAG_QUEUED</constant> flags and clears the
-<constant>V4L2_BUF_FLAG_DONE</constant> flag in the
-<structfield>flags</structfield> field, or it returns an
-&EINVAL;.</para>
-
- <para>To enqueue a <link linkend="userp">user pointer</link>
-buffer applications set the <structfield>memory</structfield>
-field to <constant>V4L2_MEMORY_USERPTR</constant>, the
-<structfield>m.userptr</structfield> field to the address of the
-buffer and <structfield>length</structfield> to its size. When the multi-planar
-API is used, <structfield>m.userptr</structfield> and
-<structfield>length</structfield> members of the passed array of &v4l2-plane;
-have to be used instead. When <constant>VIDIOC_QBUF</constant> is called with
-a pointer to this structure the driver sets the
-<constant>V4L2_BUF_FLAG_QUEUED</constant> flag and clears the
-<constant>V4L2_BUF_FLAG_MAPPED</constant> and
-<constant>V4L2_BUF_FLAG_DONE</constant> flags in the
-<structfield>flags</structfield> field, or it returns an error code.
-This ioctl locks the memory pages of the buffer in physical memory,
-they cannot be swapped out to disk. Buffers remain locked until
-dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl is
-called, or until the device is closed.</para>
-
- <para>To enqueue a <link linkend="dmabuf">DMABUF</link> buffer applications
-set the <structfield>memory</structfield> field to
-<constant>V4L2_MEMORY_DMABUF</constant> and the <structfield>m.fd</structfield>
-field to a file descriptor associated with a DMABUF buffer. When the
-multi-planar API is used the <structfield>m.fd</structfield> fields of the
-passed array of &v4l2-plane; have to be used instead. When
-<constant>VIDIOC_QBUF</constant> is called with a pointer to this structure the
-driver sets the <constant>V4L2_BUF_FLAG_QUEUED</constant> flag and clears the
-<constant>V4L2_BUF_FLAG_MAPPED</constant> and
-<constant>V4L2_BUF_FLAG_DONE</constant> flags in the
-<structfield>flags</structfield> field, or it returns an error code. This
-ioctl locks the buffer. Locking a buffer means passing it to a driver for a
-hardware access (usually DMA). If an application accesses (reads/writes) a
-locked buffer then the result is undefined. Buffers remain locked until
-dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl is called, or
-until the device is closed.</para>
-
- <para>Applications call the <constant>VIDIOC_DQBUF</constant>
-ioctl to dequeue a filled (capturing) or displayed (output) buffer
-from the driver's outgoing queue. They just set the
-<structfield>type</structfield>, <structfield>memory</structfield>
-and <structfield>reserved</structfield>
-fields of a &v4l2-buffer; as above, when <constant>VIDIOC_DQBUF</constant>
-is called with a pointer to this structure the driver fills the
-remaining fields or returns an error code. The driver may also set
-<constant>V4L2_BUF_FLAG_ERROR</constant> in the <structfield>flags</structfield>
-field. It indicates a non-critical (recoverable) streaming error. In such case
-the application may continue as normal, but should be aware that data in the
-dequeued buffer might be corrupted. When using the multi-planar API, the
-planes array must be passed in as well.</para>
-
- <para>By default <constant>VIDIOC_DQBUF</constant> blocks when no
-buffer is in the outgoing queue. When the
-<constant>O_NONBLOCK</constant> flag was given to the &func-open;
-function, <constant>VIDIOC_DQBUF</constant> returns immediately
-with an &EAGAIN; when no buffer is available.</para>
-
- <para>The <structname>v4l2_buffer</structname> structure is
-specified in <xref linkend="buffer" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EAGAIN</errorcode></term>
- <listitem>
- <para>Non-blocking I/O has been selected using
-<constant>O_NONBLOCK</constant> and no buffer was in the outgoing
-queue.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer <structfield>type</structfield> is not
-supported, or the <structfield>index</structfield> is out of bounds,
-or no buffers have been allocated yet, or the
-<structfield>userptr</structfield> or
-<structfield>length</structfield> are invalid.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EIO</errorcode></term>
- <listitem>
- <para><constant>VIDIOC_DQBUF</constant> failed due to an
-internal error. Can also indicate temporary problems like signal
-loss. Note the driver might dequeue an (empty) buffer despite
-returning an error, or even stop capturing. Reusing such buffer may be unsafe
-though and its details (e.g. <structfield>index</structfield>) may not be
-returned either. It is recommended that drivers indicate recoverable errors
-by setting the <constant>V4L2_BUF_FLAG_ERROR</constant> and returning 0 instead.
-In that case the application should be able to safely reuse the buffer and
-continue streaming.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EPIPE</errorcode></term>
- <listitem>
- <para><constant>VIDIOC_DQBUF</constant> returns this on an empty
-capture queue for mem2mem codecs if a buffer with the
-<constant>V4L2_BUF_FLAG_LAST</constant> was already dequeued and no new buffers
-are expected to become available.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
deleted file mode 100644
index d41bf47ee5a2..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
+++ /dev/null
@@ -1,115 +0,0 @@
-<refentry id="vidioc-query-dv-timings">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QUERY_DV_TIMINGS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QUERY_DV_TIMINGS</refname>
- <refname>VIDIOC_SUBDEV_QUERY_DV_TIMINGS</refname>
- <refpurpose>Sense the DV preset received by the current
-input</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_dv_timings *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QUERY_DV_TIMINGS, VIDIOC_SUBDEV_QUERY_DV_TIMINGS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The hardware may be able to detect the current DV timings
-automatically, similar to sensing the video standard. To do so, applications
-call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a
-&v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the
-timings structure.</para>
-
-<para>Please note that drivers shall <emphasis>not</emphasis> switch timings automatically
-if new timings are detected. Instead, drivers should send the
-<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
-that userspace will take action by calling <constant>VIDIOC_QUERY_DV_TIMINGS</constant>.
-The reason is that new timings usually mean different buffer sizes as well, and you
-cannot change buffer sizes on the fly. In general, applications that receive the
-Source Change event will have to call <constant>VIDIOC_QUERY_DV_TIMINGS</constant>,
-and if the detected timings are valid they will have to stop streaming, set the new
-timings, allocate new buffers and start streaming again.</para>
-
-<para>If the timings could not be detected because there was no signal, then
-<errorcode>ENOLINK</errorcode> is returned. If a signal was detected, but
-it was unstable and the receiver could not lock to the signal, then
-<errorcode>ENOLCK</errorcode> is returned. If the receiver could lock to the signal,
-but the format is unsupported (e.g. because the pixelclock is out of range
-of the hardware capabilities), then the driver fills in whatever timings it
-could find and returns <errorcode>ERANGE</errorcode>. In that case the application
-can call &VIDIOC-DV-TIMINGS-CAP; to compare the found timings with the hardware's
-capabilities in order to give more precise feedback to the user.
-</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Digital video timings are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOLINK</errorcode></term>
- <listitem>
- <para>No timings could be detected because no signal was found.
-</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOLCK</errorcode></term>
- <listitem>
- <para>The signal was unstable and the hardware could not lock on to it.
-</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ERANGE</errorcode></term>
- <listitem>
- <para>Timings were found, but they are out of range of the hardware
-capabilities.
-</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querybuf.xml b/Documentation/DocBook/media/v4l/vidioc-querybuf.xml
deleted file mode 100644
index 50bfcb5e8508..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-querybuf.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<refentry id="vidioc-querybuf">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QUERYBUF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QUERYBUF</refname>
- <refpurpose>Query the status of a buffer</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_buffer *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QUERYBUF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl is part of the <link linkend="mmap">streaming
-</link> I/O method. It can be used to query the status of a
-buffer at any time after buffers have been allocated with the
-&VIDIOC-REQBUFS; ioctl.</para>
-
- <para>Applications set the <structfield>type</structfield> field
- of a &v4l2-buffer; to the same buffer type as was previously used with
-&v4l2-format; <structfield>type</structfield> and &v4l2-requestbuffers;
-<structfield>type</structfield>, and the <structfield>index</structfield>
- field. Valid index numbers range from zero
-to the number of buffers allocated with &VIDIOC-REQBUFS;
- (&v4l2-requestbuffers; <structfield>count</structfield>) minus one.
-The <structfield>reserved</structfield> and <structfield>reserved2 </structfield>
-fields must be set to 0.
-When using the <link linkend="planar-apis">multi-planar API</link>, the
-<structfield>m.planes</structfield> field must contain a userspace pointer to an
-array of &v4l2-plane; and the <structfield>length</structfield> field has
-to be set to the number of elements in that array.
-After calling <constant>VIDIOC_QUERYBUF</constant> with a pointer to
- this structure drivers return an error code or fill the rest of
-the structure.</para>
-
- <para>In the <structfield>flags</structfield> field the
-<constant>V4L2_BUF_FLAG_MAPPED</constant>,
-<constant>V4L2_BUF_FLAG_PREPARED</constant>,
-<constant>V4L2_BUF_FLAG_QUEUED</constant> and
-<constant>V4L2_BUF_FLAG_DONE</constant> flags will be valid. The
-<structfield>memory</structfield> field will be set to the current
-I/O method. For the single-planar API, the <structfield>m.offset</structfield>
-contains the offset of the buffer from the start of the device memory,
-the <structfield>length</structfield> field its size. For the multi-planar API,
-fields <structfield>m.mem_offset</structfield> and
-<structfield>length</structfield> in the <structfield>m.planes</structfield>
-array elements will be used instead and the <structfield>length</structfield>
-field of &v4l2-buffer; is set to the number of filled-in array elements.
-The driver may or may not set the remaining fields and flags, they are
-meaningless in this context.</para>
-
- <para>The <structname>v4l2_buffer</structname> structure is
- specified in <xref linkend="buffer" />.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer <structfield>type</structfield> is not
-supported, or the <structfield>index</structfield> is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querycap.xml b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
deleted file mode 100644
index cd82148dedd7..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-querycap.xml
+++ /dev/null
@@ -1,350 +0,0 @@
-<refentry id="vidioc-querycap">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QUERYCAP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QUERYCAP</refname>
- <refpurpose>Query device capabilities</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_capability *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QUERYCAP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>All V4L2 devices support the
-<constant>VIDIOC_QUERYCAP</constant> ioctl. It is used to identify
-kernel devices compatible with this specification and to obtain
-information about driver and hardware capabilities. The ioctl takes a
-pointer to a &v4l2-capability; which is filled by the driver. When the
-driver is not compatible with this specification the ioctl returns an
-&EINVAL;.</para>
-
- <table pgwide="1" frame="none" id="v4l2-capability">
- <title>struct <structname>v4l2_capability</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u8</entry>
- <entry><structfield>driver</structfield>[16]</entry>
- <entry><para>Name of the driver, a unique NUL-terminated
-ASCII string. For example: "bttv". Driver specific applications can
-use this information to verify the driver identity. It is also useful
-to work around known bugs, or to identify drivers in error reports.</para>
-<para>Storing strings in fixed sized arrays is bad
-practice but unavoidable here. Drivers and applications should take
-precautions to never read or write beyond the end of the array and to
-make sure the strings are properly NUL-terminated.</para></entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>card</structfield>[32]</entry>
- <entry>Name of the device, a NUL-terminated UTF-8 string.
-For example: "Yoyodyne TV/FM". One driver may support different brands
-or models of video hardware. This information is intended for users,
-for example in a menu of available devices. Since multiple TV cards of
-the same brand may be installed which are supported by the same
-driver, this name should be combined with the character device file
-name (&eg; <filename>/dev/video2</filename>) or the
-<structfield>bus_info</structfield> string to avoid
-ambiguities.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>bus_info</structfield>[32]</entry>
- <entry>Location of the device in the system, a
-NUL-terminated ASCII string. For example: "PCI:0000:05:06.0". This
-information is intended for users, to distinguish multiple
-identical devices. If no such information is available the field must
-simply count the devices controlled by the driver ("platform:vivi-000").
-The bus_info must start with "PCI:" for PCI boards, "PCIe:" for PCI Express boards,
-"usb-" for USB devices, "I2C:" for i2c devices, "ISA:" for ISA devices,
-"parport" for parallel port devices and "platform:" for platform devices.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>version</structfield></entry>
- <entry><para>Version number of the driver.</para>
-<para>Starting with kernel 3.1, the version reported is provided by the
-V4L2 subsystem following the kernel numbering scheme. However, it
-may not always return the same version as the kernel if, for example,
-a stable or distribution-modified kernel uses the V4L2 stack from a
-newer kernel.</para>
-<para>The version number is formatted using the
-<constant>KERNEL_VERSION()</constant> macro:</para></entry>
- </row>
- <row>
- <entry spanname="hspan"><para>
-<programlisting>
-#define KERNEL_VERSION(a,b,c) (((a) &lt;&lt; 16) + ((b) &lt;&lt; 8) + (c))
-
-__u32 version = KERNEL_VERSION(0, 8, 1);
-
-printf ("Version: %u.%u.%u\n",
- (version &gt;&gt; 16) &amp; 0xFF,
- (version &gt;&gt; 8) &amp; 0xFF,
- version &amp; 0xFF);
-</programlisting></para></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>capabilities</structfield></entry>
- <entry>Available capabilities of the physical device as a whole, see <xref
- linkend="device-capabilities" />. The same physical device can export
- multiple devices in /dev (e.g. /dev/videoX, /dev/vbiY and /dev/radioZ).
- The <structfield>capabilities</structfield> field should contain a union
- of all capabilities available around the several V4L2 devices exported
- to userspace.
- For all those devices the <structfield>capabilities</structfield> field
- returns the same set of capabilities. This allows applications to open
- just one of the devices (typically the video device) and discover whether
- video, vbi and/or radio are also supported.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>device_caps</structfield></entry>
- <entry>Device capabilities of the opened device, see <xref
- linkend="device-capabilities" />. Should contain the available capabilities
- of that specific device node. So, for example, <structfield>device_caps</structfield>
- of a radio device will only contain radio related capabilities and
- no video or vbi capabilities. This field is only set if the <structfield>capabilities</structfield>
- field contains the <constant>V4L2_CAP_DEVICE_CAPS</constant> capability.
- Only the <structfield>capabilities</structfield> field can have the
- <constant>V4L2_CAP_DEVICE_CAPS</constant> capability, <structfield>device_caps</structfield>
- will never set <constant>V4L2_CAP_DEVICE_CAPS</constant>.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[3]</entry>
- <entry>Reserved for future extensions. Drivers must set
-this array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="device-capabilities">
- <title>Device Capabilities Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CAP_VIDEO_CAPTURE</constant></entry>
- <entry>0x00000001</entry>
- <entry>The device supports the single-planar API through the <link
-linkend="capture">Video Capture</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_CAPTURE_MPLANE</constant></entry>
- <entry>0x00001000</entry>
- <entry>The device supports the
- <link linkend="planar-apis">multi-planar API</link> through the
- <link linkend="capture">Video Capture</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_OUTPUT</constant></entry>
- <entry>0x00000002</entry>
- <entry>The device supports the single-planar API through the <link
-linkend="output">Video Output</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_OUTPUT_MPLANE</constant></entry>
- <entry>0x00002000</entry>
- <entry>The device supports the
- <link linkend="planar-apis">multi-planar API</link> through the
- <link linkend="output">Video Output</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_M2M</constant></entry>
- <entry>0x00004000</entry>
- <entry>The device supports the single-planar API through the
- Video Memory-To-Memory interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant></entry>
- <entry>0x00008000</entry>
- <entry>The device supports the
- <link linkend="planar-apis">multi-planar API</link> through the
- Video Memory-To-Memory interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_OVERLAY</constant></entry>
- <entry>0x00000004</entry>
- <entry>The device supports the <link
-linkend="overlay">Video Overlay</link> interface. A video overlay device
-typically stores captured images directly in the video memory of a
-graphics card, with hardware clipping and scaling.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VBI_CAPTURE</constant></entry>
- <entry>0x00000010</entry>
- <entry>The device supports the <link linkend="raw-vbi">Raw
-VBI Capture</link> interface, providing Teletext and Closed Caption
-data.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VBI_OUTPUT</constant></entry>
- <entry>0x00000020</entry>
- <entry>The device supports the <link linkend="raw-vbi">Raw VBI Output</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_SLICED_VBI_CAPTURE</constant></entry>
- <entry>0x00000040</entry>
- <entry>The device supports the <link linkend="sliced">Sliced VBI Capture</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_SLICED_VBI_OUTPUT</constant></entry>
- <entry>0x00000080</entry>
- <entry>The device supports the <link linkend="sliced">Sliced VBI Output</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_RDS_CAPTURE</constant></entry>
- <entry>0x00000100</entry>
- <entry>The device supports the <link linkend="rds">RDS</link> capture interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_VIDEO_OUTPUT_OVERLAY</constant></entry>
- <entry>0x00000200</entry>
- <entry>The device supports the <link linkend="osd">Video
-Output Overlay</link> (OSD) interface. Unlike the <wordasword>Video
-Overlay</wordasword> interface, this is a secondary function of video
-output devices and overlays an image onto an outgoing video signal.
-When the driver sets this flag, it must clear the
-<constant>V4L2_CAP_VIDEO_OVERLAY</constant> flag and vice
-versa.<footnote><para>The &v4l2-framebuffer; lacks an
-&v4l2-buf-type; field, therefore the type of overlay is implied by the
-driver capabilities.</para></footnote></entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_HW_FREQ_SEEK</constant></entry>
- <entry>0x00000400</entry>
- <entry>The device supports the &VIDIOC-S-HW-FREQ-SEEK; ioctl for
-hardware frequency seeking.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_RDS_OUTPUT</constant></entry>
- <entry>0x00000800</entry>
- <entry>The device supports the <link linkend="rds">RDS</link> output interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_TUNER</constant></entry>
- <entry>0x00010000</entry>
- <entry>The device has some sort of tuner to
-receive RF-modulated video signals. For more information about
-tuner programming see
-<xref linkend="tuner" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_AUDIO</constant></entry>
- <entry>0x00020000</entry>
- <entry>The device has audio inputs or outputs. It may or
-may not support audio recording or playback, in PCM or compressed
-formats. PCM audio support must be implemented as ALSA or OSS
-interface. For more information on audio inputs and outputs see <xref
- linkend="audio" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_RADIO</constant></entry>
- <entry>0x00040000</entry>
- <entry>This is a radio receiver.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_MODULATOR</constant></entry>
- <entry>0x00080000</entry>
- <entry>The device has some sort of modulator to
-emit RF-modulated video/audio signals. For more information about
-modulator programming see
-<xref linkend="tuner" />.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_SDR_CAPTURE</constant></entry>
- <entry>0x00100000</entry>
- <entry>The device supports the
-<link linkend="sdr">SDR Capture</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_EXT_PIX_FORMAT</constant></entry>
- <entry>0x00200000</entry>
- <entry>The device supports the &v4l2-pix-format; extended
-fields.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_SDR_OUTPUT</constant></entry>
- <entry>0x00400000</entry>
- <entry>The device supports the
-<link linkend="sdr">SDR Output</link> interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_READWRITE</constant></entry>
- <entry>0x01000000</entry>
- <entry>The device supports the <link
-linkend="rw">read()</link> and/or <link linkend="rw">write()</link>
-I/O methods.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_ASYNCIO</constant></entry>
- <entry>0x02000000</entry>
- <entry>The device supports the <link
-linkend="async">asynchronous</link> I/O methods.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_STREAMING</constant></entry>
- <entry>0x04000000</entry>
- <entry>The device supports the <link
-linkend="mmap">streaming</link> I/O method.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CAP_DEVICE_CAPS</constant></entry>
- <entry>0x80000000</entry>
- <entry>The driver fills the <structfield>device_caps</structfield>
- field. This capability can only appear in the <structfield>capabilities</structfield>
- field and never in the <structfield>device_caps</structfield> field.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml b/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
deleted file mode 100644
index 55b7582cf314..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-queryctrl.xml
+++ /dev/null
@@ -1,661 +0,0 @@
-<refentry id="vidioc-queryctrl">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QUERYCTRL, VIDIOC_QUERY_EXT_CTRL, VIDIOC_QUERYMENU</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QUERYCTRL</refname>
- <refname>VIDIOC_QUERY_EXT_CTRL</refname>
- <refname>VIDIOC_QUERYMENU</refname>
- <refpurpose>Enumerate controls and menu control items</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_queryctrl *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_query_ext_ctrl *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_querymenu *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QUERYCTRL, VIDIOC_QUERY_EXT_CTRL, VIDIOC_QUERYMENU</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To query the attributes of a control applications set the
-<structfield>id</structfield> field of a &v4l2-queryctrl; and call the
-<constant>VIDIOC_QUERYCTRL</constant> ioctl with a pointer to this
-structure. The driver fills the rest of the structure or returns an
-&EINVAL; when the <structfield>id</structfield> is invalid.</para>
-
- <para>It is possible to enumerate controls by calling
-<constant>VIDIOC_QUERYCTRL</constant> with successive
-<structfield>id</structfield> values starting from
-<constant>V4L2_CID_BASE</constant> up to and exclusive
-<constant>V4L2_CID_LASTP1</constant>. Drivers may return
-<errorcode>EINVAL</errorcode> if a control in this range is not
-supported. Further applications can enumerate private controls, which
-are not defined in this specification, by starting at
-<constant>V4L2_CID_PRIVATE_BASE</constant> and incrementing
-<structfield>id</structfield> until the driver returns
-<errorcode>EINVAL</errorcode>.</para>
-
- <para>In both cases, when the driver sets the
-<constant>V4L2_CTRL_FLAG_DISABLED</constant> flag in the
-<structfield>flags</structfield> field this control is permanently
-disabled and should be ignored by the application.<footnote>
- <para><constant>V4L2_CTRL_FLAG_DISABLED</constant> was
-intended for two purposes: Drivers can skip predefined controls not
-supported by the hardware (although returning EINVAL would do as
-well), or disable predefined and private controls after hardware
-detection without the trouble of reordering control arrays and indices
-(EINVAL cannot be used to skip private controls because it would
-prematurely end the enumeration).</para></footnote></para>
-
- <para>When the application ORs <structfield>id</structfield> with
-<constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> the driver returns the
-next supported non-compound control, or <errorcode>EINVAL</errorcode>
-if there is none. In addition, the <constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant>
-flag can be specified to enumerate all compound controls (i.e. controls
-with type &ge; <constant>V4L2_CTRL_COMPOUND_TYPES</constant> and/or array
-control, in other words controls that contain more than one value).
-Specify both <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> and
-<constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant> in order to enumerate
-all controls, compound or not. Drivers which do not support these flags yet
-always return <errorcode>EINVAL</errorcode>.</para>
-
- <para>The <constant>VIDIOC_QUERY_EXT_CTRL</constant> ioctl was
-introduced in order to better support controls that can use compound
-types, and to expose additional control information that cannot be
-returned in &v4l2-queryctrl; since that structure is full.</para>
-
- <para><constant>VIDIOC_QUERY_EXT_CTRL</constant> is used in the
-same way as <constant>VIDIOC_QUERYCTRL</constant>, except that the
-<structfield>reserved</structfield> array must be zeroed as well.</para>
-
- <para>Additional information is required for menu controls: the
-names of the menu items. To query them applications set the
-<structfield>id</structfield> and <structfield>index</structfield>
-fields of &v4l2-querymenu; and call the
-<constant>VIDIOC_QUERYMENU</constant> ioctl with a pointer to this
-structure. The driver fills the rest of the structure or returns an
-&EINVAL; when the <structfield>id</structfield> or
-<structfield>index</structfield> is invalid. Menu items are enumerated
-by calling <constant>VIDIOC_QUERYMENU</constant> with successive
-<structfield>index</structfield> values from &v4l2-queryctrl;
-<structfield>minimum</structfield> to
-<structfield>maximum</structfield>, inclusive. Note that it is possible
-for <constant>VIDIOC_QUERYMENU</constant> to return an &EINVAL; for some
-indices between <structfield>minimum</structfield> and <structfield>maximum</structfield>.
-In that case that particular menu item is not supported by this driver. Also note that
-the <structfield>minimum</structfield> value is not necessarily 0.</para>
-
- <para>See also the examples in <xref linkend="control" />.</para>
-
- <table pgwide="1" frame="none" id="v4l2-queryctrl">
- <title>struct <structname>v4l2_queryctrl</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry>Identifies the control, set by the application. See
-<xref linkend="control-id" /> for predefined IDs. When the ID is ORed
-with V4L2_CTRL_FLAG_NEXT_CTRL the driver clears the flag and returns
-the first control with a higher ID. Drivers which do not support this
-flag yet always return an &EINVAL;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of control, see <xref
- linkend="v4l2-ctrl-type" />.</entry>
- </row>
- <row>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the control, a NUL-terminated ASCII
-string. This information is intended for the user.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>minimum</structfield></entry>
- <entry>Minimum value, inclusive. This field gives a lower
-bound for the control. See &v4l2-ctrl-type; how the minimum value is to
-be used for each possible control type. Note that this a signed 32-bit value.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>maximum</structfield></entry>
- <entry>Maximum value, inclusive. This field gives an upper
-bound for the control. See &v4l2-ctrl-type; how the maximum value is to
-be used for each possible control type. Note that this a signed 32-bit value.</entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>step</structfield></entry>
- <entry><para>This field gives a step size for the control.
-See &v4l2-ctrl-type; how the step value is to be used for each possible
-control type. Note that this an unsigned 32-bit value.
-</para><para>Generally drivers should not scale hardware
-control values. It may be necessary for example when the
-<structfield>name</structfield> or <structfield>id</structfield> imply
-a particular unit and the hardware actually accepts only multiples of
-said unit. If so, drivers must take care values are properly rounded
-when scaling, such that errors will not accumulate on repeated
-read-write cycles.</para><para>This field gives the smallest change of
-an integer control actually affecting hardware. Often the information
-is needed when the user can change controls by keyboard or GUI
-buttons, rather than a slider. When for example a hardware register
-accepts values 0-511 and the driver reports 0-65535, step should be
-128.</para><para>Note that although signed, the step value is supposed to
-be always positive.</para></entry>
- </row>
- <row>
- <entry>__s32</entry>
- <entry><structfield>default_value</structfield></entry>
- <entry>The default value of a
-<constant>V4L2_CTRL_TYPE_INTEGER</constant>,
-<constant>_BOOLEAN</constant>, <constant>_BITMASK</constant>,
-<constant>_MENU</constant> or <constant>_INTEGER_MENU</constant> control.
-Not valid for other types of controls.
-Note that drivers reset controls to their default value only when the
-driver is first loaded, never afterwards.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Control flags, see <xref
- linkend="control-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-query-ext-ctrl">
- <title>struct <structname>v4l2_query_ext_ctrl</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry>Identifies the control, set by the application. See
-<xref linkend="control-id" /> for predefined IDs. When the ID is ORed
-with <constant>V4L2_CTRL_FLAG_NEXT_CTRL</constant> the driver clears the
-flag and returns the first non-compound control with a higher ID. When the
-ID is ORed with <constant>V4L2_CTRL_FLAG_NEXT_COMPOUND</constant> the driver
-clears the flag and returns the first compound control with a higher ID.
-Set both to get the first control (compound or not) with a higher ID.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of control, see <xref
- linkend="v4l2-ctrl-type" />.</entry>
- </row>
- <row>
- <entry>char</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the control, a NUL-terminated ASCII
-string. This information is intended for the user.</entry>
- </row>
- <row>
- <entry>__s64</entry>
- <entry><structfield>minimum</structfield></entry>
- <entry>Minimum value, inclusive. This field gives a lower
-bound for the control. See &v4l2-ctrl-type; how the minimum value is to
-be used for each possible control type. Note that this a signed 64-bit value.</entry>
- </row>
- <row>
- <entry>__s64</entry>
- <entry><structfield>maximum</structfield></entry>
- <entry>Maximum value, inclusive. This field gives an upper
-bound for the control. See &v4l2-ctrl-type; how the maximum value is to
-be used for each possible control type. Note that this a signed 64-bit value.</entry>
- </row>
- <row>
- <entry>__u64</entry>
- <entry><structfield>step</structfield></entry>
- <entry><para>This field gives a step size for the control.
-See &v4l2-ctrl-type; how the step value is to be used for each possible
-control type. Note that this an unsigned 64-bit value.
-</para><para>Generally drivers should not scale hardware
-control values. It may be necessary for example when the
-<structfield>name</structfield> or <structfield>id</structfield> imply
-a particular unit and the hardware actually accepts only multiples of
-said unit. If so, drivers must take care values are properly rounded
-when scaling, such that errors will not accumulate on repeated
-read-write cycles.</para><para>This field gives the smallest change of
-an integer control actually affecting hardware. Often the information
-is needed when the user can change controls by keyboard or GUI
-buttons, rather than a slider. When for example a hardware register
-accepts values 0-511 and the driver reports 0-65535, step should be
-128.</para></entry>
- </row>
- <row>
- <entry>__s64</entry>
- <entry><structfield>default_value</structfield></entry>
- <entry>The default value of a
-<constant>V4L2_CTRL_TYPE_INTEGER</constant>, <constant>_INTEGER64</constant>,
-<constant>_BOOLEAN</constant>, <constant>_BITMASK</constant>,
-<constant>_MENU</constant>, <constant>_INTEGER_MENU</constant>,
-<constant>_U8</constant> or <constant>_U16</constant> control.
-Not valid for other types of controls.
-Note that drivers reset controls to their default value only when the
-driver is first loaded, never afterwards.
-</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Control flags, see <xref
- linkend="control-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>elem_size</structfield></entry>
- <entry>The size in bytes of a single element of the array.
-Given a char pointer <constant>p</constant> to a 3-dimensional array you can find the
-position of cell <constant>(z, y, x)</constant> as follows:
-<constant>p + ((z * dims[1] + y) * dims[0] + x) * elem_size</constant>. <structfield>elem_size</structfield>
-is always valid, also when the control isn't an array. For string controls
-<structfield>elem_size</structfield> is equal to <structfield>maximum + 1</structfield>.
-</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>elems</structfield></entry>
- <entry>The number of elements in the N-dimensional array. If this control
-is not an array, then <structfield>elems</structfield> is 1. The <structfield>elems</structfield>
-field can never be 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>nr_of_dims</structfield></entry>
- <entry>The number of dimension in the N-dimensional array. If this control
-is not an array, then this field is 0.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>dims[V4L2_CTRL_MAX_DIMS]</structfield></entry>
- <entry>The size of each dimension. The first <structfield>nr_of_dims</structfield>
-elements of this array must be non-zero, all remaining elements must be zero.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[32]</entry>
- <entry>Reserved for future extensions. Applications and drivers
-must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-querymenu">
- <title>struct <structname>v4l2_querymenu</structname></title>
- <tgroup cols="4">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry></entry>
- <entry><structfield>id</structfield></entry>
- <entry>Identifies the control, set by the application
-from the respective &v4l2-queryctrl;
-<structfield>id</structfield>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry></entry>
- <entry><structfield>index</structfield></entry>
- <entry>Index of the menu item, starting at zero, set by
- the application.</entry>
- </row>
- <row>
- <entry>union</entry>
- <entry></entry>
- <entry></entry>
- <entry></entry>
- </row>
- <row>
- <entry></entry>
- <entry>__u8</entry>
- <entry><structfield>name</structfield>[32]</entry>
- <entry>Name of the menu item, a NUL-terminated ASCII
-string. This information is intended for the user. This field is valid
-for <constant>V4L2_CTRL_FLAG_MENU</constant> type controls.</entry>
- </row>
- <row>
- <entry></entry>
- <entry>__s64</entry>
- <entry><structfield>value</structfield></entry>
- <entry>
- Value of the integer menu item. This field is valid for
- <constant>V4L2_CTRL_FLAG_INTEGER_MENU</constant> type
- controls.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry></entry>
- <entry><structfield>reserved</structfield></entry>
- <entry>Reserved for future extensions. Drivers must set
-the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-ctrl-type">
- <title>enum v4l2_ctrl_type</title>
- <tgroup cols="5" align="left">
- <colspec colwidth="30*" />
- <colspec colwidth="5*" align="center" />
- <colspec colwidth="5*" align="center" />
- <colspec colwidth="5*" align="center" />
- <colspec colwidth="55*" />
- <thead>
- <row>
- <entry>Type</entry>
- <entry><structfield>minimum</structfield></entry>
- <entry><structfield>step</structfield></entry>
- <entry><structfield>maximum</structfield></entry>
- <entry>Description</entry>
- </row>
- </thead>
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CTRL_TYPE_INTEGER</constant></entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>An integer-valued control ranging from minimum to
-maximum inclusive. The step value indicates the increment between
-values.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_BOOLEAN</constant></entry>
- <entry>0</entry>
- <entry>1</entry>
- <entry>1</entry>
- <entry>A boolean-valued control. Zero corresponds to
-"disabled", and one means "enabled".</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_MENU</constant></entry>
- <entry>&ge; 0</entry>
- <entry>1</entry>
- <entry>N-1</entry>
- <entry>The control has a menu of N choices. The names of
-the menu items can be enumerated with the
-<constant>VIDIOC_QUERYMENU</constant> ioctl.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_INTEGER_MENU</constant></entry>
- <entry>&ge; 0</entry>
- <entry>1</entry>
- <entry>N-1</entry>
- <entry>
- The control has a menu of N choices. The values of the
- menu items can be enumerated with the
- <constant>VIDIOC_QUERYMENU</constant> ioctl. This is
- similar to <constant>V4L2_CTRL_TYPE_MENU</constant>
- except that instead of strings, the menu items are
- signed 64-bit integers.
- </entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_BITMASK</constant></entry>
- <entry>0</entry>
- <entry>n/a</entry>
- <entry>any</entry>
- <entry>A bitmask field. The maximum value is the set of bits that can
-be used, all other bits are to be 0. The maximum value is interpreted as a __u32,
-allowing the use of bit 31 in the bitmask.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_BUTTON</constant></entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>0</entry>
- <entry>A control which performs an action when set.
-Drivers must ignore the value passed with
-<constant>VIDIOC_S_CTRL</constant> and return an &EINVAL; on a
-<constant>VIDIOC_G_CTRL</constant> attempt.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_INTEGER64</constant></entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>A 64-bit integer valued control. Minimum, maximum
-and step size cannot be queried using <constant>VIDIOC_QUERYCTRL</constant>.
-Only <constant>VIDIOC_QUERY_EXT_CTRL</constant> can retrieve the 64-bit
-min/max/step values, they should be interpreted as n/a when using
-<constant>VIDIOC_QUERYCTRL</constant>.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_STRING</constant></entry>
- <entry>&ge; 0</entry>
- <entry>&ge; 1</entry>
- <entry>&ge; 0</entry>
- <entry>The minimum and maximum string lengths. The step size
-means that the string must be (minimum + N * step) characters long for
-N &ge; 0. These lengths do not include the terminating zero, so in order to
-pass a string of length 8 to &VIDIOC-S-EXT-CTRLS; you need to set the
-<structfield>size</structfield> field of &v4l2-ext-control; to 9. For &VIDIOC-G-EXT-CTRLS; you can
-set the <structfield>size</structfield> field to <structfield>maximum</structfield> + 1.
-Which character encoding is used will depend on the string control itself and
-should be part of the control documentation.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_CTRL_CLASS</constant></entry>
- <entry>n/a</entry>
- <entry>n/a</entry>
- <entry>n/a</entry>
- <entry>This is not a control. When
-<constant>VIDIOC_QUERYCTRL</constant> is called with a control ID
-equal to a control class code (see <xref linkend="ctrl-class" />) + 1, the
-ioctl returns the name of the control class and this control type.
-Older drivers which do not support this feature return an
-&EINVAL;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_U8</constant></entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>An unsigned 8-bit valued control ranging from minimum to
-maximum inclusive. The step value indicates the increment between
-values.
-</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_U16</constant></entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>An unsigned 16-bit valued control ranging from minimum to
-maximum inclusive. The step value indicates the increment between
-values.
-</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_TYPE_U32</constant></entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>any</entry>
- <entry>An unsigned 32-bit valued control ranging from minimum to
-maximum inclusive. The step value indicates the increment between
-values.
-</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="control-flags">
- <title>Control Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_CTRL_FLAG_DISABLED</constant></entry>
- <entry>0x0001</entry>
- <entry>This control is permanently disabled and should be
-ignored by the application. Any attempt to change the control will
-result in an &EINVAL;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_GRABBED</constant></entry>
- <entry>0x0002</entry>
- <entry>This control is temporarily unchangeable, for
-example because another application took over control of the
-respective resource. Such controls may be displayed specially in a
-user interface. Attempts to change the control may result in an
-&EBUSY;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_READ_ONLY</constant></entry>
- <entry>0x0004</entry>
- <entry>This control is permanently readable only. Any
-attempt to change the control will result in an &EINVAL;.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_UPDATE</constant></entry>
- <entry>0x0008</entry>
- <entry>A hint that changing this control may affect the
-value of other controls within the same control class. Applications
-should update their user interface accordingly.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_INACTIVE</constant></entry>
- <entry>0x0010</entry>
- <entry>This control is not applicable to the current
-configuration and should be displayed accordingly in a user interface.
-For example the flag may be set on a MPEG audio level 2 bitrate
-control when MPEG audio encoding level 1 was selected with another
-control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_SLIDER</constant></entry>
- <entry>0x0020</entry>
- <entry>A hint that this control is best represented as a
-slider-like element in a user interface.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_WRITE_ONLY</constant></entry>
- <entry>0x0040</entry>
- <entry>This control is permanently writable only. Any
-attempt to read the control will result in an &EACCES; error code. This
-flag is typically present for relative controls or action controls where
-writing a value will cause the device to carry out a given action
-(&eg; motor control) but no meaningful value can be returned.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_VOLATILE</constant></entry>
- <entry>0x0080</entry>
- <entry>This control is volatile, which means that the value of the control
-changes continuously. A typical example would be the current gain value if the device
-is in auto-gain mode. In such a case the hardware calculates the gain value based on
-the lighting conditions which can change over time. Note that setting a new value for
-a volatile control will have no effect and no <constant>V4L2_EVENT_CTRL_CH_VALUE</constant>
-will be sent, unless the <constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant> flag
-(see below) is also set. Otherwise the new value will just be ignored.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_HAS_PAYLOAD</constant></entry>
- <entry>0x0100</entry>
- <entry>This control has a pointer type, so its value has to be accessed
-using one of the pointer fields of &v4l2-ext-control;. This flag is set for controls
-that are an array, string, or have a compound type. In all cases you have to set a
-pointer to memory containing the payload of the control.</entry>
- </row>
- <row>
- <entry><constant>V4L2_CTRL_FLAG_EXECUTE_ON_WRITE</constant></entry>
- <entry>0x0200</entry>
- <entry>The value provided to the control will be propagated to the driver
-even if it remains constant. This is required when the control represents an action
-on the hardware. For example: clearing an error flag or triggering the flash. All the
-controls of the type <constant>V4L2_CTRL_TYPE_BUTTON</constant> have this flag set.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-queryctrl; <structfield>id</structfield>
-is invalid. The &v4l2-querymenu; <structfield>id</structfield> is
-invalid or <structfield>index</structfield> is out of range (less than
-<structfield>minimum</structfield> or greater than <structfield>maximum</structfield>)
-or this particular menu item is not supported by the driver.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EACCES</errorcode></term>
- <listitem>
- <para>An attempt was made to read a write-only control.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querystd.xml b/Documentation/DocBook/media/v4l/vidioc-querystd.xml
deleted file mode 100644
index 3ceae35fab03..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-querystd.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<refentry id="vidioc-querystd">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_QUERYSTD</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_QUERYSTD</refname>
- <refpurpose>Sense the video standard received by the current
-input</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>v4l2_std_id *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_QUERYSTD</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The hardware may be able to detect the current video
-standard automatically. To do so, applications call <constant>
-VIDIOC_QUERYSTD</constant> with a pointer to a &v4l2-std-id; type. The
-driver stores here a set of candidates, this can be a single flag or a
-set of supported standards if for example the hardware can only
-distinguish between 50 and 60 Hz systems. If no signal was detected,
-then the driver will return V4L2_STD_UNKNOWN. When detection is not
-possible or fails, the set must contain all standards supported by the
-current video input or output.</para>
-
-<para>Please note that drivers shall <emphasis>not</emphasis> switch the video standard
-automatically if a new video standard is detected. Instead, drivers should send the
-<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
-that userspace will take action by calling <constant>VIDIOC_QUERYSTD</constant>.
-The reason is that a new video standard can mean different buffer sizes as well, and you
-cannot change buffer sizes on the fly. In general, applications that receive the
-Source Change event will have to call <constant>VIDIOC_QUERYSTD</constant>,
-and if the detected video standard is valid they will have to stop streaming, set the new
-standard, allocate new buffers and start streaming again.</para>
-
- </refsect1>
-
- <refsect1>
- &return-value;
- <variablelist>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>Standard video timings are not supported for this input or output.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml b/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
deleted file mode 100644
index 6f529e100ea4..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-reqbufs.xml
+++ /dev/null
@@ -1,137 +0,0 @@
-<refentry id="vidioc-reqbufs">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_REQBUFS</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_REQBUFS</refname>
- <refpurpose>Initiate Memory Mapping, User Pointer or DMA Buffer I/O</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_requestbuffers *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_REQBUFS</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
-<para>This ioctl is used to initiate <link linkend="mmap">memory mapped</link>,
-<link linkend="userp">user pointer</link> or <link
-linkend="dmabuf">DMABUF</link> based I/O. Memory mapped buffers are located in
-device memory and must be allocated with this ioctl before they can be mapped
-into the application's address space. User buffers are allocated by
-applications themselves, and this ioctl is merely used to switch the driver
-into user pointer I/O mode and to setup some internal structures.
-Similarly, DMABUF buffers are allocated by applications through a device
-driver, and this ioctl only configures the driver into DMABUF I/O mode without
-performing any direct allocation.</para>
-
- <para>To allocate device buffers applications initialize all fields of the
-<structname>v4l2_requestbuffers</structname> structure. They set the
-<structfield>type</structfield> field to the respective stream or buffer type,
-the <structfield>count</structfield> field to the desired number of buffers,
-<structfield>memory</structfield> must be set to the requested I/O method and
-the <structfield>reserved</structfield> array must be zeroed. When the ioctl is
-called with a pointer to this structure the driver will attempt to allocate the
-requested number of buffers and it stores the actual number allocated in the
-<structfield>count</structfield> field. It can be smaller than the number
-requested, even zero, when the driver runs out of free memory. A larger number
-is also possible when the driver requires more buffers to function correctly.
-For example video output requires at least two buffers, one displayed and one
-filled by the application.</para>
- <para>When the I/O method is not supported the ioctl
-returns an &EINVAL;.</para>
-
- <para>Applications can call <constant>VIDIOC_REQBUFS</constant>
-again to change the number of buffers, however this cannot succeed
-when any buffers are still mapped. A <structfield>count</structfield>
-value of zero frees all buffers, after aborting or finishing any DMA
-in progress, an implicit &VIDIOC-STREAMOFF;. <!-- mhs: I see no
-reason why munmap()ping one or even all buffers must imply
-streamoff.--></para>
-
- <table pgwide="1" frame="none" id="v4l2-requestbuffers">
- <title>struct <structname>v4l2_requestbuffers</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>count</structfield></entry>
- <entry>The number of buffers requested or granted.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the stream or buffers, this is the same
-as the &v4l2-format; <structfield>type</structfield> field. See <xref
- linkend="v4l2-buf-type" /> for valid values.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>memory</structfield></entry>
- <entry>Applications set this field to
-<constant>V4L2_MEMORY_MMAP</constant>,
-<constant>V4L2_MEMORY_DMABUF</constant> or
-<constant>V4L2_MEMORY_USERPTR</constant>. See <xref linkend="v4l2-memory"
-/>.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[2]</entry>
- <entry>A place holder for future extensions. Drivers and applications
-must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer type (<structfield>type</structfield> field) or the
-requested I/O method (<structfield>memory</structfield>) is not
-supported.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml b/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
deleted file mode 100644
index a5fc4c4880f3..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
+++ /dev/null
@@ -1,188 +0,0 @@
-<refentry id="vidioc-s-hw-freq-seek">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_S_HW_FREQ_SEEK</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_S_HW_FREQ_SEEK</refname>
- <refpurpose>Perform a hardware frequency seek</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_hw_freq_seek
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_S_HW_FREQ_SEEK</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Start a hardware frequency seek from the current frequency.
-To do this applications initialize the <structfield>tuner</structfield>,
-<structfield>type</structfield>, <structfield>seek_upward</structfield>,
-<structfield>wrap_around</structfield>, <structfield>spacing</structfield>,
-<structfield>rangelow</structfield> and <structfield>rangehigh</structfield>
-fields, and zero out the <structfield>reserved</structfield> array of a
-&v4l2-hw-freq-seek; and call the <constant>VIDIOC_S_HW_FREQ_SEEK</constant>
-ioctl with a pointer to this structure.</para>
-
- <para>The <structfield>rangelow</structfield> and
-<structfield>rangehigh</structfield> fields can be set to a non-zero value to
-tell the driver to search a specific band. If the &v4l2-tuner;
-<structfield>capability</structfield> field has the
-<constant>V4L2_TUNER_CAP_HWSEEK_PROG_LIM</constant> flag set, these values
-must fall within one of the bands returned by &VIDIOC-ENUM-FREQ-BANDS;. If
-the <constant>V4L2_TUNER_CAP_HWSEEK_PROG_LIM</constant> flag is not set,
-then these values must exactly match those of one of the bands returned by
-&VIDIOC-ENUM-FREQ-BANDS;. If the current frequency of the tuner does not fall
-within the selected band it will be clamped to fit in the band before the
-seek is started.</para>
-
- <para>If an error is returned, then the original frequency will
- be restored.</para>
-
- <para>This ioctl is supported if the <constant>V4L2_CAP_HW_FREQ_SEEK</constant> capability is set.</para>
-
- <para>If this ioctl is called from a non-blocking filehandle, then &EAGAIN; is
- returned and no seek takes place.</para>
-
- <table pgwide="1" frame="none" id="v4l2-hw-freq-seek">
- <title>struct <structname>v4l2_hw_freq_seek</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>tuner</structfield></entry>
- <entry>The tuner index number. This is the
-same value as in the &v4l2-input; <structfield>tuner</structfield>
-field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. See <xref
- linkend="v4l2-tuner-type" /></entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>seek_upward</structfield></entry>
- <entry>If non-zero, seek upward from the current frequency, else seek downward.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>wrap_around</structfield></entry>
- <entry>If non-zero, wrap around when at the end of the frequency range, else stop seeking.
- The &v4l2-tuner; <structfield>capability</structfield> field will tell you what the
- hardware supports.
- </entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>spacing</structfield></entry>
- <entry>If non-zero, defines the hardware seek resolution in Hz. The driver selects the nearest value that is supported by the device. If spacing is zero a reasonable default value is used.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangelow</structfield></entry>
- <entry>If non-zero, the lowest tunable frequency of the band to
-search in units of 62.5 kHz, or if the &v4l2-tuner;
-<structfield>capability</structfield> field has the
-<constant>V4L2_TUNER_CAP_LOW</constant> flag set, in units of 62.5 Hz or if the &v4l2-tuner;
-<structfield>capability</structfield> field has the
-<constant>V4L2_TUNER_CAP_1HZ</constant> flag set, in units of 1 Hz.
-If <structfield>rangelow</structfield> is zero a reasonable default value
-is used.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>rangehigh</structfield></entry>
- <entry>If non-zero, the highest tunable frequency of the band to
-search in units of 62.5 kHz, or if the &v4l2-tuner;
-<structfield>capability</structfield> field has the
-<constant>V4L2_TUNER_CAP_LOW</constant> flag set, in units of 62.5 Hz or if the &v4l2-tuner;
-<structfield>capability</structfield> field has the
-<constant>V4L2_TUNER_CAP_1HZ</constant> flag set, in units of 1 Hz.
-If <structfield>rangehigh</structfield> is zero a reasonable default value
-is used.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[5]</entry>
- <entry>Reserved for future extensions. Applications
- must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The <structfield>tuner</structfield> index is out of
-bounds, the <structfield>wrap_around</structfield> value is not supported or
-one of the values in the <structfield>type</structfield>,
-<structfield>rangelow</structfield> or <structfield>rangehigh</structfield>
-fields is wrong.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EAGAIN</errorcode></term>
- <listitem>
- <para>Attempted to call <constant>VIDIOC_S_HW_FREQ_SEEK</constant>
- with the filehandle in non-blocking mode.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENODATA</errorcode></term>
- <listitem>
- <para>The hardware seek found no channels.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>Another hardware seek is already in progress.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-streamon.xml b/Documentation/DocBook/media/v4l/vidioc-streamon.xml
deleted file mode 100644
index 89fd7ce964f9..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-streamon.xml
+++ /dev/null
@@ -1,136 +0,0 @@
-<refentry id="vidioc-streamon">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_STREAMON, VIDIOC_STREAMOFF</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_STREAMON</refname>
- <refname>VIDIOC_STREAMOFF</refname>
- <refpurpose>Start or stop streaming I/O</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const int *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_STREAMON, VIDIOC_STREAMOFF</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The <constant>VIDIOC_STREAMON</constant> and
-<constant>VIDIOC_STREAMOFF</constant> ioctl start and stop the capture
-or output process during streaming (<link linkend="mmap">memory
-mapping</link>, <link linkend="userp">user pointer</link> or
-<link linkend="dmabuf">DMABUF</link>) I/O.</para>
-
- <para>Capture hardware is disabled and no input
-buffers are filled (if there are any empty buffers in the incoming
-queue) until <constant>VIDIOC_STREAMON</constant> has been called.
-Output hardware is disabled and no video signal is
-produced until <constant>VIDIOC_STREAMON</constant> has been called.
-The ioctl will succeed when at least one output buffer is in the
-incoming queue.</para>
-
- <para>Memory-to-memory devices will not start until
-<constant>VIDIOC_STREAMON</constant> has been called for both the capture
-and output stream types.</para>
-
- <para>If <constant>VIDIOC_STREAMON</constant> fails then any already
-queued buffers will remain queued.</para>
-
- <para>The <constant>VIDIOC_STREAMOFF</constant> ioctl, apart of
-aborting or finishing any DMA in progress, unlocks any user pointer
-buffers locked in physical memory, and it removes all buffers from the
-incoming and outgoing queues. That means all images captured but not
-dequeued yet will be lost, likewise all images enqueued for output but
-not transmitted yet. I/O returns to the same state as after calling
-&VIDIOC-REQBUFS; and can be restarted accordingly.</para>
-
- <para>If buffers have been queued with &VIDIOC-QBUF; and
-<constant>VIDIOC_STREAMOFF</constant> is called without ever having
-called <constant>VIDIOC_STREAMON</constant>, then those queued buffers
-will also be removed from the incoming queue and all are returned to the
-same state as after calling &VIDIOC-REQBUFS; and can be restarted
-accordingly.</para>
-
- <para>Both ioctls take a pointer to an integer, the desired buffer or
-stream type. This is the same as &v4l2-requestbuffers;
-<structfield>type</structfield>.</para>
-
- <para>If <constant>VIDIOC_STREAMON</constant> is called when streaming
-is already in progress, or if <constant>VIDIOC_STREAMOFF</constant> is called
-when streaming is already stopped, then 0 is returned. Nothing happens in the
-case of <constant>VIDIOC_STREAMON</constant>, but <constant>VIDIOC_STREAMOFF</constant>
-will return queued buffers to their starting state as mentioned above.</para>
-
- <para>Note that applications can be preempted for unknown periods right
-before or after the <constant>VIDIOC_STREAMON</constant> or
-<constant>VIDIOC_STREAMOFF</constant> calls, there is no notion of
-starting or stopping "now". Buffer timestamps can be used to
-synchronize with other events.</para>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The buffer <structfield>type</structfield> is not supported,
- or no buffers have been allocated (memory mapping) or enqueued
- (output) yet.</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EPIPE</errorcode></term>
- <listitem>
- <para>The driver implements <link
- linkend="pad-level-formats">pad-level format configuration</link> and
- the pipeline configuration is invalid.
- </para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>ENOLINK</errorcode></term>
- <listitem>
- <para>The driver implements Media Controller interface and
- the pipeline link configuration is invalid.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
deleted file mode 100644
index 9d0251a27e5f..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-interval.xml
+++ /dev/null
@@ -1,151 +0,0 @@
-<refentry id="vidioc-subdev-enum-frame-interval">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL</refname>
- <refpurpose>Enumerate frame intervals</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_frame_interval_enum *
- <parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl lets applications enumerate available frame intervals on a
- given sub-device pad. Frame intervals only makes sense for sub-devices that
- can control the frame period on their own. This includes, for instance,
- image sensors and TV tuners.</para>
-
- <para>For the common use case of image sensors, the frame intervals
- available on the sub-device output pad depend on the frame format and size
- on the same pad. Applications must thus specify the desired format and size
- when enumerating frame intervals.</para>
-
- <para>To enumerate frame intervals applications initialize the
- <structfield>index</structfield>, <structfield>pad</structfield>,
- <structfield>which</structfield>, <structfield>code</structfield>,
- <structfield>width</structfield> and <structfield>height</structfield>
- fields of &v4l2-subdev-frame-interval-enum; and call the
- <constant>VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL</constant> ioctl with a pointer
- to this structure. Drivers fill the rest of the structure or return
- an &EINVAL; if one of the input fields is invalid. All frame intervals are
- enumerable by beginning at index zero and incrementing by one until
- <errorcode>EINVAL</errorcode> is returned.</para>
-
- <para>Available frame intervals may depend on the current 'try' formats
- at other pads of the sub-device, as well as on the current active links. See
- &VIDIOC-SUBDEV-G-FMT; for more information about the try formats.</para>
-
- <para>Sub-devices that support the frame interval enumeration ioctl should
- implemented it on a single pad only. Its behaviour when supported on
- multiple pads of the same sub-device is not defined.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-frame-interval-enum">
- <title>struct <structname>v4l2_subdev_frame_interval_enum</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the format in the enumeration, set by the
- application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>code</structfield></entry>
- <entry>The media bus format code, as defined in
- <xref linkend="v4l2-mbus-format" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>width</structfield></entry>
- <entry>Frame width, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>height</structfield></entry>
- <entry>Frame height, in pixels.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>interval</structfield></entry>
- <entry>Period, in seconds, between consecutive video frames.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Frame intervals to be enumerated, from &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-frame-interval-enum;
- <structfield>pad</structfield> references a non-existing pad, one of
- the <structfield>code</structfield>, <structfield>width</structfield>
- or <structfield>height</structfield> fields are invalid for the given
- pad or the <structfield>index</structfield> field is out of bounds.
- </para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
deleted file mode 100644
index 9b91b8332ba9..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-frame-size.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<refentry id="vidioc-subdev-enum-frame-size">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_ENUM_FRAME_SIZE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_ENUM_FRAME_SIZE</refname>
- <refpurpose>Enumerate media bus frame sizes</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_frame_size_enum *
- <parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_ENUM_FRAME_SIZE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>This ioctl allows applications to enumerate all frame sizes
- supported by a sub-device on the given pad for the given media bus format.
- Supported formats can be retrieved with the &VIDIOC-SUBDEV-ENUM-MBUS-CODE;
- ioctl.</para>
-
- <para>To enumerate frame sizes applications initialize the
- <structfield>pad</structfield>, <structfield>which</structfield> ,
- <structfield>code</structfield> and <structfield>index</structfield>
- fields of the &v4l2-subdev-mbus-code-enum; and call the
- <constant>VIDIOC_SUBDEV_ENUM_FRAME_SIZE</constant> ioctl with a pointer to
- the structure. Drivers fill the minimum and maximum frame sizes or return
- an &EINVAL; if one of the input parameters is invalid.</para>
-
- <para>Sub-devices that only support discrete frame sizes (such as most
- sensors) will return one or more frame sizes with identical minimum and
- maximum values.</para>
-
- <para>Not all possible sizes in given [minimum, maximum] ranges need to be
- supported. For instance, a scaler that uses a fixed-point scaling ratio
- might not be able to produce every frame size between the minimum and
- maximum values. Applications must use the &VIDIOC-SUBDEV-S-FMT; ioctl to
- try the sub-device for an exact supported frame size.</para>
-
- <para>Available frame sizes may depend on the current 'try' formats at other
- pads of the sub-device, as well as on the current active links and the
- current values of V4L2 controls. See &VIDIOC-SUBDEV-G-FMT; for more
- information about try formats.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-frame-size-enum">
- <title>struct <structname>v4l2_subdev_frame_size_enum</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the format in the enumeration, set by the
- application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>code</structfield></entry>
- <entry>The media bus format code, as defined in
- <xref linkend="v4l2-mbus-format" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_width</structfield></entry>
- <entry>Minimum frame width, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_width</structfield></entry>
- <entry>Maximum frame width, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>min_height</structfield></entry>
- <entry>Minimum frame height, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>max_height</structfield></entry>
- <entry>Maximum frame height, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Frame sizes to be enumerated, from &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-frame-size-enum; <structfield>pad</structfield>
- references a non-existing pad, the <structfield>code</structfield> is
- invalid for the given pad or the <structfield>index</structfield>
- field is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
deleted file mode 100644
index c67256ada87a..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-enum-mbus-code.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-<refentry id="vidioc-subdev-enum-mbus-code">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_ENUM_MBUS_CODE</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_ENUM_MBUS_CODE</refname>
- <refpurpose>Enumerate media bus formats</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_mbus_code_enum *
- <parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_ENUM_MBUS_CODE</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>To enumerate media bus formats available at a given sub-device pad
- applications initialize the <structfield>pad</structfield>, <structfield>which</structfield>
- and <structfield>index</structfield> fields of &v4l2-subdev-mbus-code-enum; and
- call the <constant>VIDIOC_SUBDEV_ENUM_MBUS_CODE</constant> ioctl with a
- pointer to this structure. Drivers fill the rest of the structure or return
- an &EINVAL; if either the <structfield>pad</structfield> or
- <structfield>index</structfield> are invalid. All media bus formats are
- enumerable by beginning at index zero and incrementing by one until
- <errorcode>EINVAL</errorcode> is returned.</para>
-
- <para>Available media bus formats may depend on the current 'try' formats
- at other pads of the sub-device, as well as on the current active links. See
- &VIDIOC-SUBDEV-G-FMT; for more information about the try formats.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-mbus-code-enum">
- <title>struct <structname>v4l2_subdev_mbus_code_enum</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>index</structfield></entry>
- <entry>Number of the format in the enumeration, set by the
- application.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>code</structfield></entry>
- <entry>The media bus format code, as defined in
- <xref linkend="v4l2-mbus-format" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Media bus format codes to be enumerated, from &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-mbus-code-enum; <structfield>pad</structfield>
- references a non-existing pad, or the <structfield>index</structfield>
- field is out of bounds.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml
deleted file mode 100644
index 4cddd788c589..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-crop.xml
+++ /dev/null
@@ -1,158 +0,0 @@
-<refentry id="vidioc-subdev-g-crop">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_G_CROP, VIDIOC_SUBDEV_S_CROP</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_G_CROP</refname>
- <refname>VIDIOC_SUBDEV_S_CROP</refname>
- <refpurpose>Get or set the crop rectangle on a subdev pad</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_crop *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>const struct v4l2_subdev_crop *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_G_CROP, VIDIOC_SUBDEV_S_CROP</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <note>
- <title>Obsolete</title>
-
- <para>This is an <link linkend="obsolete">obsolete</link>
- interface and may be removed in the future. It is superseded by
- <link linkend="vidioc-subdev-g-selection">the selection
- API</link>.</para>
- </note>
-
- <para>To retrieve the current crop rectangle applications set the
- <structfield>pad</structfield> field of a &v4l2-subdev-crop; to the
- desired pad number as reported by the media API and the
- <structfield>which</structfield> field to
- <constant>V4L2_SUBDEV_FORMAT_ACTIVE</constant>. They then call the
- <constant>VIDIOC_SUBDEV_G_CROP</constant> ioctl with a pointer to this
- structure. The driver fills the members of the <structfield>rect</structfield>
- field or returns &EINVAL; if the input arguments are invalid, or if cropping
- is not supported on the given pad.</para>
-
- <para>To change the current crop rectangle applications set both the
- <structfield>pad</structfield> and <structfield>which</structfield> fields
- and all members of the <structfield>rect</structfield> field. They then call
- the <constant>VIDIOC_SUBDEV_S_CROP</constant> ioctl with a pointer to this
- structure. The driver verifies the requested crop rectangle, adjusts it
- based on the hardware capabilities and configures the device. Upon return
- the &v4l2-subdev-crop; contains the current format as would be returned
- by a <constant>VIDIOC_SUBDEV_G_CROP</constant> call.</para>
-
- <para>Applications can query the device capabilities by setting the
- <structfield>which</structfield> to
- <constant>V4L2_SUBDEV_FORMAT_TRY</constant>. When set, 'try' crop
- rectangles are not applied to the device by the driver, but are mangled
- exactly as active crop rectangles and stored in the sub-device file handle.
- Two applications querying the same sub-device would thus not interact with
- each other.</para>
-
- <para>Drivers must not return an error solely because the requested crop
- rectangle doesn't match the device capabilities. They must instead modify
- the rectangle to match what the hardware can provide. The modified format
- should be as close as possible to the original request.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-crop">
- <title>struct <structname>v4l2_subdev_crop</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media framework.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Crop rectangle to get or set, from
- &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>rect</structfield></entry>
- <entry>Crop rectangle boundaries, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The crop rectangle can't be changed because the pad is currently
- busy. This can be caused, for instance, by an active video stream on
- the pad. The ioctl must not be retried without performing another
- action to fix the problem first. Only returned by
- <constant>VIDIOC_SUBDEV_S_CROP</constant></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-crop; <structfield>pad</structfield>
- references a non-existing pad, the <structfield>which</structfield>
- field references a non-existing format, or cropping is not supported
- on the given subdev pad.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml
deleted file mode 100644
index 781089cba453..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-fmt.xml
+++ /dev/null
@@ -1,177 +0,0 @@
-<refentry id="vidioc-subdev-g-fmt">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_G_FMT, VIDIOC_SUBDEV_S_FMT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_G_FMT</refname>
- <refname>VIDIOC_SUBDEV_S_FMT</refname>
- <refpurpose>Get or set the data format on a subdev pad</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_format *<parameter>argp</parameter>
- </paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_G_FMT, VIDIOC_SUBDEV_S_FMT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls are used to negotiate the frame format at specific
- subdev pads in the image pipeline.</para>
-
- <para>To retrieve the current format applications set the
- <structfield>pad</structfield> field of a &v4l2-subdev-format; to the
- desired pad number as reported by the media API and the
- <structfield>which</structfield> field to
- <constant>V4L2_SUBDEV_FORMAT_ACTIVE</constant>. When they call the
- <constant>VIDIOC_SUBDEV_G_FMT</constant> ioctl with a pointer to this
- structure the driver fills the members of the <structfield>format</structfield>
- field.</para>
-
- <para>To change the current format applications set both the
- <structfield>pad</structfield> and <structfield>which</structfield> fields
- and all members of the <structfield>format</structfield> field. When they
- call the <constant>VIDIOC_SUBDEV_S_FMT</constant> ioctl with a pointer to this
- structure the driver verifies the requested format, adjusts it based on the
- hardware capabilities and configures the device. Upon return the
- &v4l2-subdev-format; contains the current format as would be returned by a
- <constant>VIDIOC_SUBDEV_G_FMT</constant> call.</para>
-
- <para>Applications can query the device capabilities by setting the
- <structfield>which</structfield> to
- <constant>V4L2_SUBDEV_FORMAT_TRY</constant>. When set, 'try' formats are not
- applied to the device by the driver, but are changed exactly as active
- formats and stored in the sub-device file handle. Two applications querying
- the same sub-device would thus not interact with each other.</para>
-
- <para>For instance, to try a format at the output pad of a sub-device,
- applications would first set the try format at the sub-device input with the
- <constant>VIDIOC_SUBDEV_S_FMT</constant> ioctl. They would then either
- retrieve the default format at the output pad with the
- <constant>VIDIOC_SUBDEV_G_FMT</constant> ioctl, or set the desired output
- pad format with the <constant>VIDIOC_SUBDEV_S_FMT</constant> ioctl and check
- the returned value.</para>
-
- <para>Try formats do not depend on active formats, but can depend on the
- current links configuration or sub-device controls value. For instance, a
- low-pass noise filter might crop pixels at the frame boundaries, modifying
- its output frame size.</para>
-
- <para>Drivers must not return an error solely because the requested format
- doesn't match the device capabilities. They must instead modify the format
- to match what the hardware can provide. The modified format should be as
- close as possible to the original request.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-format">
- <title>struct <structname>v4l2_subdev_format</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Format to modified, from &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>&v4l2-mbus-framefmt;</entry>
- <entry><structfield>format</structfield></entry>
- <entry>Definition of an image format, see <xref
- linkend="v4l2-mbus-framefmt" /> for details.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-format-whence">
- <title>enum <structname>v4l2_subdev_format_whence</structname></title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry>V4L2_SUBDEV_FORMAT_TRY</entry>
- <entry>0</entry>
- <entry>Try formats, used for querying device capabilities.</entry>
- </row>
- <row>
- <entry>V4L2_SUBDEV_FORMAT_ACTIVE</entry>
- <entry>1</entry>
- <entry>Active formats, applied to the hardware.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The format can't be changed because the pad is currently busy.
- This can be caused, for instance, by an active video stream on the
- pad. The ioctl must not be retried without performing another action
- to fix the problem first. Only returned by
- <constant>VIDIOC_SUBDEV_S_FMT</constant></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-format; <structfield>pad</structfield>
- references a non-existing pad, or the <structfield>which</structfield>
- field references a non-existing format.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml
deleted file mode 100644
index 848ec789ddaa..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-frame-interval.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<refentry id="vidioc-subdev-g-frame-interval">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_G_FRAME_INTERVAL, VIDIOC_SUBDEV_S_FRAME_INTERVAL</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_G_FRAME_INTERVAL</refname>
- <refname>VIDIOC_SUBDEV_S_FRAME_INTERVAL</refname>
- <refpurpose>Get or set the frame interval on a subdev pad</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_frame_interval *<parameter>argp</parameter>
- </paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_G_FRAME_INTERVAL, VIDIOC_SUBDEV_S_FRAME_INTERVAL</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>These ioctls are used to get and set the frame interval at specific
- subdev pads in the image pipeline. The frame interval only makes sense for
- sub-devices that can control the frame period on their own. This includes,
- for instance, image sensors and TV tuners. Sub-devices that don't support
- frame intervals must not implement these ioctls.</para>
-
- <para>To retrieve the current frame interval applications set the
- <structfield>pad</structfield> field of a &v4l2-subdev-frame-interval; to
- the desired pad number as reported by the media controller API. When they
- call the <constant>VIDIOC_SUBDEV_G_FRAME_INTERVAL</constant> ioctl with a
- pointer to this structure the driver fills the members of the
- <structfield>interval</structfield> field.</para>
-
- <para>To change the current frame interval applications set both the
- <structfield>pad</structfield> field and all members of the
- <structfield>interval</structfield> field. When they call the
- <constant>VIDIOC_SUBDEV_S_FRAME_INTERVAL</constant> ioctl with a pointer to
- this structure the driver verifies the requested interval, adjusts it based
- on the hardware capabilities and configures the device. Upon return the
- &v4l2-subdev-frame-interval; contains the current frame interval as would be
- returned by a <constant>VIDIOC_SUBDEV_G_FRAME_INTERVAL</constant> call.
- </para>
-
- <para>Drivers must not return an error solely because the requested interval
- doesn't match the device capabilities. They must instead modify the interval
- to match what the hardware can provide. The modified interval should be as
- close as possible to the original request.</para>
-
- <para>Sub-devices that support the frame interval ioctls should implement
- them on a single pad only. Their behaviour when supported on multiple pads
- of the same sub-device is not defined.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-frame-interval">
- <title>struct <structname>v4l2_subdev_frame_interval</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media controller API.</entry>
- </row>
- <row>
- <entry>&v4l2-fract;</entry>
- <entry><structfield>interval</structfield></entry>
- <entry>Period, in seconds, between consecutive video frames.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[9]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The frame interval can't be changed because the pad is currently
- busy. This can be caused, for instance, by an active video stream on
- the pad. The ioctl must not be retried without performing another
- action to fix the problem first. Only returned by
- <constant>VIDIOC_SUBDEV_S_FRAME_INTERVAL</constant></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-frame-interval; <structfield>pad</structfield>
- references a non-existing pad, or the pad doesn't support frame
- intervals.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
deleted file mode 100644
index 8346b2e4a703..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
+++ /dev/null
@@ -1,159 +0,0 @@
-<refentry id="vidioc-subdev-g-selection">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBDEV_G_SELECTION, VIDIOC_SUBDEV_S_SELECTION</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBDEV_G_SELECTION</refname>
- <refname>VIDIOC_SUBDEV_S_SELECTION</refname>
- <refpurpose>Get or set selection rectangles on a subdev pad</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_subdev_selection *<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBDEV_G_SELECTION, VIDIOC_SUBDEV_S_SELECTION</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>The selections are used to configure various image
- processing functionality performed by the subdevs which affect the
- image size. This currently includes cropping, scaling and
- composition.</para>
-
- <para>The selection API replaces <link
- linkend="vidioc-subdev-g-crop">the old subdev crop API</link>. All
- the function of the crop API, and more, are supported by the
- selections API.</para>
-
- <para>See <xref linkend="subdev"></xref> for
- more information on how each selection target affects the image
- processing pipeline inside the subdevice.</para>
-
- <refsect2>
- <title>Types of selection targets</title>
-
- <para>There are two types of selection targets: actual and bounds. The
- actual targets are the targets which configure the hardware. The BOUNDS
- target will return a rectangle that contain all possible actual
- rectangles.</para>
- </refsect2>
-
- <refsect2>
- <title>Discovering supported features</title>
-
- <para>To discover which targets are supported, the user can
- perform <constant>VIDIOC_SUBDEV_G_SELECTION</constant> on them.
- Any unsupported target will return
- <constant>EINVAL</constant>.</para>
-
- <para>Selection targets and flags are documented in <xref
- linkend="v4l2-selections-common"/>.</para>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-selection">
- <title>struct <structname>v4l2_subdev_selection</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>which</structfield></entry>
- <entry>Active or try selection, from
- &v4l2-subdev-format-whence;.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>pad</structfield></entry>
- <entry>Pad number as reported by the media framework.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>target</structfield></entry>
- <entry>Target selection rectangle. See
- <xref linkend="v4l2-selections-common" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Flags. See
- <xref linkend="v4l2-selection-flags" />.</entry>
- </row>
- <row>
- <entry>&v4l2-rect;</entry>
- <entry><structfield>r</structfield></entry>
- <entry>Selection rectangle, in pixels.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[8]</entry>
- <entry>Reserved for future extensions. Applications and drivers must
- set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect2>
-
- </refsect1>
-
- <refsect1>
- &return-value;
-
- <variablelist>
- <varlistentry>
- <term><errorcode>EBUSY</errorcode></term>
- <listitem>
- <para>The selection rectangle can't be changed because the
- pad is currently busy. This can be caused, for instance, by
- an active video stream on the pad. The ioctl must not be
- retried without performing another action to fix the problem
- first. Only returned by
- <constant>VIDIOC_SUBDEV_S_SELECTION</constant></para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><errorcode>EINVAL</errorcode></term>
- <listitem>
- <para>The &v4l2-subdev-selection;
- <structfield>pad</structfield> references a non-existing
- pad, the <structfield>which</structfield> field references a
- non-existing format, or the selection target is not
- supported on the given subdev pad.</para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml b/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
deleted file mode 100644
index 5fd0ee78f880..000000000000
--- a/Documentation/DocBook/media/v4l/vidioc-subscribe-event.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<refentry id="vidioc-subscribe-event">
- <refmeta>
- <refentrytitle>ioctl VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</refentrytitle>
- &manvol;
- </refmeta>
-
- <refnamediv>
- <refname>VIDIOC_SUBSCRIBE_EVENT</refname>
- <refname>VIDIOC_UNSUBSCRIBE_EVENT</refname>
- <refpurpose>Subscribe or unsubscribe event</refpurpose>
- </refnamediv>
-
- <refsynopsisdiv>
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>ioctl</function></funcdef>
- <paramdef>int <parameter>fd</parameter></paramdef>
- <paramdef>int <parameter>request</parameter></paramdef>
- <paramdef>struct v4l2_event_subscription
-*<parameter>argp</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
- </refsynopsisdiv>
-
- <refsect1>
- <title>Arguments</title>
-
- <variablelist>
- <varlistentry>
- <term><parameter>fd</parameter></term>
- <listitem>
- <para>&fd;</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>request</parameter></term>
- <listitem>
- <para>VIDIOC_SUBSCRIBE_EVENT, VIDIOC_UNSUBSCRIBE_EVENT</para>
- </listitem>
- </varlistentry>
- <varlistentry>
- <term><parameter>argp</parameter></term>
- <listitem>
- <para></para>
- </listitem>
- </varlistentry>
- </variablelist>
- </refsect1>
-
- <refsect1>
- <title>Description</title>
-
- <para>Subscribe or unsubscribe V4L2 event. Subscribed events are
- dequeued by using the &VIDIOC-DQEVENT; ioctl.</para>
-
- <table frame="none" pgwide="1" id="v4l2-event-subscription">
- <title>struct <structname>v4l2_event_subscription</structname></title>
- <tgroup cols="3">
- &cs-str;
- <tbody valign="top">
- <row>
- <entry>__u32</entry>
- <entry><structfield>type</structfield></entry>
- <entry>Type of the event, see <xref linkend="event-type" />. Note that
-<constant>V4L2_EVENT_ALL</constant> can be used with VIDIOC_UNSUBSCRIBE_EVENT
-for unsubscribing all events at once.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>id</structfield></entry>
- <entry>ID of the event source. If there is no ID associated with
- the event source, then set this to 0. Whether or not an event
- needs an ID depends on the event type.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>flags</structfield></entry>
- <entry>Event flags, see <xref linkend="event-flags" />.</entry>
- </row>
- <row>
- <entry>__u32</entry>
- <entry><structfield>reserved</structfield>[5]</entry>
- <entry>Reserved for future extensions. Drivers and applications
- must set the array to zero.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="event-flags">
- <title>Event Flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_EVENT_SUB_FL_SEND_INITIAL</constant></entry>
- <entry>0x0001</entry>
- <entry>When this event is subscribed an initial event will be sent
- containing the current status. This only makes sense for events
- that are triggered by a status change such as <constant>V4L2_EVENT_CTRL</constant>.
- Other events will ignore this flag.</entry>
- </row>
- <row>
- <entry><constant>V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK</constant></entry>
- <entry>0x0002</entry>
- <entry><para>If set, then events directly caused by an ioctl will also be sent to
- the filehandle that called that ioctl. For example, changing a control using
- &VIDIOC-S-CTRL; will cause a V4L2_EVENT_CTRL to be sent back to that same
- filehandle. Normally such events are suppressed to prevent feedback loops
- where an application changes a control to a one value and then another, and
- then receives an event telling it that that control has changed to the first
- value.</para>
-
- <para>Since it can't tell whether that event was caused by another application
- or by the &VIDIOC-S-CTRL; call it is hard to decide whether to set the
- control to the value in the event, or ignore it.</para>
-
- <para>Think carefully when you set this flag so you won't get into situations
- like that.</para>
- </entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- </refsect1>
- <refsect1>
- &return-value;
- </refsect1>
-</refentry>
diff --git a/Documentation/DocBook/media/vbi_525.gif.b64 b/Documentation/DocBook/media/vbi_525.gif.b64
deleted file mode 100644
index d5dcf06f2aef..000000000000
--- a/Documentation/DocBook/media/vbi_525.gif.b64
+++ /dev/null
@@ -1,84 +0,0 @@
-R0lGODlhKgPIAIAAAAAAAP///yH5BAEAAAEALAAAAAAqA8gAAAL+jI+py+0Po5y02ouz3rz7D4bi
-SJbmiabqWgJs475LLCt0fdy4oeN9/QPuEEFZkXVcJZXDXNP5pC0TgGrMSrRMidhA1/uNbB9j2CZ8
-Kc+qHDXDTT2jK3BuPau13vFpdmc/p6Uh5SeYoXMHyFNomEeYiNEVKCFFx8Wz2Eh56YWp2bfnGXk1
-OEhaKnem2rYa6vp3KIqaBhULmsk4Ufc1KTbq4rfbhxkcOQx22limZ4P8STYH3PsGu8pqe439aw36
-eji9qT1rGCpraf5MkQynyJeuG0c73imvLYzuUAwF/P6WTK8vHDdj2Qia8hYL4bF2o/CpmydOXa6I
-uqQNPFepny/+d+cM0qsH8qNGCI8M3gvG7KG8iSJJVoNIp1w5h/C+gSPjgWE9hR0Lqmzp0RFPjLV+
-hoRki2XNPJyCVmy2U6KnHm6WnboRcOPFkS59xqQpEKZRpkDHfi1rdqlXgTMVKVVL7h/cnmi1rtxq
-t27Yn1n5xrySUi81iYAlvR2MN23Fm/nkyHzp9G9iSof3Ps1pE3PmyV2dhaSL1Jiee3/ZjI5Mkhlj
-xDPXGnkClgns1pxV0K6d4rbYF7pRv44CW7Dtojt6f/YxO7hxrrmVJ3/eZDnd4tCjVw+OPbv27dy7
-e/8OPrz48eTLmz+PPr369ezbu38PP778+fTr27+PP7/+/fz++/v/D2CAAg5IYIEGHohgggouSNFv
-1l2HHIRCACehgw9eOIR0001I4YVq8MJIVZItUpJiG564GG75VJaXb5aVthtljwnV1mauyXijVqtB
-FVRoK7Foxi0kNphaYdhYNRUxQMZDWZKd9IXTQTmmFluUDQln5TcqBrnlYEOhaGJXNZrUpR24sLPN
-kC6uaBGWMywERpWISeUZacIE5iZH8OApJ3FrtvhnY5AdR1iZVOw4p1BTZhljlGNG1aijfgIKl4+f
-kNZjoIL2ySOacX4kYlyyfDgooWBSWmikOH15mU5ksfqiqUVqNsySXN7FqZ5jWdoTr7sSqaOtTH6Y
-EajMNZX+kbC53qopDDMuymhprgLbGaTUbgrtm8smCqOqQRYbZrV58vijtzZgNW2TTHZEag7rHFuU
-Pp4aSq6sc9EJa7jinpVuq/Ruy+xSj9KibL0YyRXrXr7WlC+242qrDMJsEYYSVvAiUzGJwg7c7BqI
-GjyiuQ5f7PG/7j57VqkpqryyyJ0WDDBxC29ymr3+YFEzyRpLE5qG91qYYYVAR4hh0B0WTbTRR1Mn
-NBKTDs0h0lErTTXTSyddNdZabw311ET7nLDTTct2tddmn82bc2V3zbbYazMId9xyz0133XbfjXfe
-eu/Nd99+/w144IIPTnjhhh+OeOKKczcR2CYvDnnkkgf+XoTF2eUCs9uTb85554MrVUjmJGDuuMue
-n4566gKyxM+T2L37cNqqz0577QG2/ikpVxEie7LflW578MIPL1vroVdifOy3outkscD/THz00k+v
-ne46ApQT70o2ZWz1RT5Pffji2w4YWcqLkrzvMhNT/Wjuvy/6+PLPL/w/854vr+t58gP+vufySb8A
-CnB8phEBmo7nhDHwz3vQGKADH0jAT4UgVGZQILjeBsEManB6GqKgP+h0vtFtcIQk5KAJpqAa/znL
-Xc4CXv9KCMP2fMyA8fvDDCdYwzbg7IQbwZ0IqeHCGArRbj4UwgvxgDJSHXEfIUQVEpuIqiLycIhU
-jJv+FNO2RCeJQ4kPuuIHUMi+Kb4piFUso4K8yIQsYm8cIlKj9VrQQyiqUH9mrOPm0DgcN8YsXoLQ
-Ix1HAMY/ArKCdiyk5PDYHD+6qo1dlOPItIXIG0XSkJT02yR5qEg2EqyRHYyjzyrnyEqK8oyhTEgj
-7bFJo13SI2EwzCdDhDP4yXKWtKxYLWWJsVu+L5e6rFkv4bezX9pSmDd0XzdgZkwa7SJnFDMNMX35
-TFdGM5jE5GU1o4kn1WDzmXbg2TaFaSZrgvNks+ymOL9Jy3DesGUiSd5wmEhGt5SiHUipp+naCZL7
-6ZOV+WyixMJhT1MKlJ+CFCP2nmexf9plCZZbJWT+3Cm7MJIxSfGcp0WTglGC9CtL+9RERz3aT3pm
-FFeiuShBHcqNN75ToqjkaBhXqr8XJnSPIC0oHP2JU5FqdKQ2g5jyLNerfgo1qDolKTlMmsqTlrJa
-Km1OAmOGCKa+1KkstRBEUdDQpUpqoEk1KlF2ei2fftQoYyVrSFERUK9aQp4tRakmbXrTqtbUpXD9
-oVw1d9UTZLWiXO0jWnn61Y7xca5mJWxhifpXsKr1IWxV6kQPitc1GnZOTcVqFhRq0Lxmdqp6palb
-L5vYxQL0nkA9rGnVgql9FvWoiu2qX9uqVWxVtrNP/em6lsdZ2t6VbE9ap1B9y9qS9jWwwS2uzvD+
-OdmFDjWoIF0tcZ+7VqTWFLjMpS5Ri6krsaoJpt6M2hFLK7bGuha6DAPsqSi7XNSmV73NDa1xVSLe
-1xLUqlaLbViWCF7vJu27ns2pe8k72rCSq6z3XW+B22ve8rZWvuM9LW/xm13LPo2q9mUufScU3+gm
-OMCiDRtukytVEIcYsRuO44I1LNz5RrTCytXvfo/G3wnTNsOM/S98S+zED1vYwS0WsWxxGkLMbjXF
-DWbvhV185CS/GMm9ky6KOywmHM/xxz7WMY97bFbn3vjENR7ulSVM05QumcljXnGMabwnGysYylO2
-spG/TOUqo1fLa35vl4ksZ7uyeMRmrq8akav+5OI5+c5sFlRaezpgA/P5zXDGLZ05bOc0e5nRD/Zz
-mfscHWYiQdNKAK6n4wfAxSTi09wk5zipqctunvqct1T1L8P5i1GLLtTsdMRBrBvrHNoE18fEL6dH
-CexgC3vYxC62sY+N7GQre9nMbraznw3taEt72tSutrWvje1sa3vb3O62t78N7nCLe9zkLre5z43u
-dKt73exut7vfDe94y3ve9K63ve9t7SBkNdH47re/9Qq6CAP63wQvuGZ2mYneFoPWBm+4w8VUWiMB
-5IIPr7jFX2a/YCZ8zxfvuLnf1VB5QcnjJDd4YTKucN3xuuQsb7nLXw7zmMt85jSvuc1vjvP+nOt8
-5zzvuc9/DvSgC33oRC+60Y+O9KQrfelMb7rTnw71qEt96lSvutWvjvWsa33rXO+6178O9rCLfexk
-L7vZz472scG0vllD24rZzrW28bbtcl873N2uObUfqkQzJFaJPAO9Fm53W34/mcbO+7/t9j1ksfzY
-MiUO+DaXDPCLT9VpKr8yZnpQDM50JcmkyTOdNT5Enx8mxhAPaxApq/CULxjFV9S8kT9yhWts0zL4
-JVnX44uigl1481Cf8KsI3Kf+Er6biMXS18/+gy2JJfBzFw/Mc35U0NcXJxAh+4A1ENC69xdoER38
-34Mf+sZvF/5OP3yQ+QKAt8+14Z9/2dH+H3dnh4d/Als5f1MzMcdsCoj5SfwwqXVb/Mca6qd9WBaA
-R/J+1qddDHeAUZZy85c+mOcp/ndc5QMqGyMawrd5ACVx/8dYKrcsFQg7DAhEu6NAG7g9q3cU3RN4
-zBJV9jdwsXM/GQiCRuZWNWh7Msh3QmaAhoYSIyhja1ALbQJ/obM+L0iExvJry8d8LpiAuPdSN7h9
-3VOD3kdHW1AVsOOAxEclTySEIIQOHViF7XSFZQgUVFiGj8CCYpiGR+g8Axgt24c8Q9gpvTJbHjZg
-IjguFJQVZChbH2h/2rODJjgqxieDGTiFevgyFKWGAYOBj8gtVPF564IpLRKJgziAgAj+ieFniNxX
-fUo4LPcXhn2YEqMnif+TMYNHgKoWeTTYTGoifZzXeAsoivpXJ2f4PaHHik7oMZ1ni4yIi8fDib+I
-gen3g6pohE34gMa4cbO4ixJkh8m4d0HYi5Lniq1XjMqojcqgd2AmNXVnd3g3juRIYXT3dnGXjuZ4
-jl/zjboVjuvIjvB4d/NoUOiYd+qYj/Z4j+6IQXNXj/IojuAYkAK5j/yoZwV5kAa5kA2Zdg8JkREp
-kRNJkRVpkT73ZxwnjASpjwCJkIP0jv3Yke34kSAZjww5kPQ4kiSZkipZkhOkNifpkOWIkjQ5kzZJ
-NqyXi9uYeIrXho8TZtTlCjnEMfn+Z07jN3n3hIuC1ZNKeY2JiD6Zs0gC5iWzliav+Inv51vKx3wo
-WIrTV3uh2IqC9zjZN5ZL2DBgSZW+iI2GBpTT2IwmtpajqJSGIY232JRbuQ1myZZoKZZZmTt8ySV3
-ggapWEHRAJjU2JaL6YVMKYepMpe/GJlH6ZTI2Jdu6ZRcuZGQBJePqTCTmYRG2XyO6Q52Ui5QuJn7
-sA4amC2XOYeJCWukWVugeX2y+ZeiGZSO0ZrncpdGWYKwOZq2mV94SXwzEyymCULIo4u0h5rt95ZD
-uZuuyS2xSJuNeZZ3WJlhBmRQBAhCGVrLmRfGCXF1yTyg2ThkQlZ5eJ3lWYipOZ3+UKmd/uSDrwmf
-ciSY76kuacmY+Hk9lWmEwumJ8BmDSBl9/zKgpEmI6CkjGcOM/MmN3QicnRmX0OBpuvmW3GlD4jkr
-QEmUFuqfHXokUjkPGtoYDSqd+meiE+qMehmf0ZmQComTHtmScSWTMWqjHPmSMPmPMhpRGemjMYmP
-N4mjM0qjMHqjLkmkL5qjIPCjLXqhLqqkSWqSQXqkSFqTLHmlVpqlIrmkF+mlXwqmYSqmY7puiEim
-Zzogj4GEaMqmAIIQmtmmcTofbyqhcqp0GSlD1gCndvpvuqYldSU3dOqkfJpun/VFt1md5sFQjOKn
-hFpu+dObKVMXUnSMx5AfDBX+agfqqH0qQQtkCrMZf81gqBvnmemBTZtacuCyp98yFbyAD/NJSLiD
-p4dKoSuHqu62qJHqlpTYJ5AgcvKBqbfqclroUOUZBynoFP/pHrMqrI8KL2CErB1YQPHBrM06bjwJ
-lxsDJCkkqgD3WNZ6Ro16lT5gq0JCnBPGrfs5SerJcaOKm+BaH+4KC5kkZoR2nTTBrixToKCESTwK
-r2mqkatySi1lr/uJr7nFpJ6kooMWpf8KsHpErwQraed6sIAKLez6SQHrsHAjr6wQsSpGMzzIqp0U
-ZfwKR9W6sfzRsarwsXnWrYDJryurohjbWSibsvohs5MmaBI7se45qQhLq5L+YrIiZLM3ix85i2e/
-oRMHJLJesmfoArVPyWqldnivNrW1hGqvhk5Xi7VcW05ei0u9JrbKNLbS8nioyE1bC7bAtLYIt7Xo
-BLfmBLdWW7Vz20vq9E2mFrZ1u2qihrcdRHq19Vj5CoaFVqIMC2kAdq/U57KWqGh0hWBJu2WG67Q6
-y11AO6WEq6O71WjIhbRSBaubG1OVZrH7R7lAhLhyGWmLO4MHtmOUhoDqhWaJO7mru34YorlBC1mV
-Frr8RmWf61K9q7uaRaO5K1m26xKzq7qKa7CM+7qu27nadVaWC4GnCxXKS2HG+1CYm7nHG717FVnC
-Syuje7mlq0XIK7DUO2T+6Luwvhu97gu7iya7qVu97Fu5khtZ5ju+2ru94uu8v6ux1Oe/BUG8ema8
-A+y9T8Zg9suZCGqZjtu4pfm4wUu/68u8FqzAFwa8H7bBjgZVyAi+vDuo8xvAIVy/F5y++Eu7dZaI
-wym/sQvDL6xc2IvBLFy7C6zBJfxECPV9BIZe+ru/CZy96DfEWHm/DDxGFYyZ1luqcfa+EPy8MQy6
-SsyqXLbCPeti5fq74gq62JWtSMTFwavFUgyPFShlKVxkV7y8ienCkPvEEhzBEkzDS4zEBaq+ZXxp
-+RtopEs1MQYwCIzAQJzEZ1zFBPq/8evGiOzEWUbFR4zChZzG5bvHkoz+aWRmyZRsw5mMxRl8w51M
-sYcMvYrsZqFMwiq8xpp8yptcyavMynw8yXrcyqksy7d7x5D8yA46ymScyzKcyKUcySfsyWpMy5Z2
-yZjsyrGMzOBoxlYcsrXsyMHMum28yKSsyz8cub9cw8Kczc1MzK+szHl8zMX8zXVcuNh8uIT8zJ/c
-utUsvVHMyxTszA3MxOWMw8mMx+BcxOIczsY8y9s8zOZsy9DcvOv8zrvcy+zcgI0sz+RsugBdvPic
-z/Z8zxmSoqNT0aq4a1JiI92Q0bm2aqeqt3cb0qk20q1W0iYttbR4ax3N0RsNBBdNQ114QjCNQzLd
-AjRttDmt0zvN0z1u7dM/DdRBLdRDTdRFbdRHjdRJrdRLzdRN7dRPDdVRLdVTTdVVbdVXjdVfVBkx
-+APSnNU5bZaaCsVfPdQnR8TkJwlnTdZAnSwXJIidutZBHbhrqpqnuKpx/a9c3RdvndZ43dO+pCSY
-E9gqF8bNWgAAOw==
diff --git a/Documentation/DocBook/media/vbi_625.gif.b64 b/Documentation/DocBook/media/vbi_625.gif.b64
deleted file mode 100644
index 831f49a02821..000000000000
--- a/Documentation/DocBook/media/vbi_625.gif.b64
+++ /dev/null
@@ -1,90 +0,0 @@
-R0lGODlhKgPIAIAAAAAAAP///yH5BAEAAAEALAAAAAAqA8gAAAL+jI+py+0Po5y02ouz3rz7D4bi
-SJbmiabqWgJs475LLCt0fdy4oeN9/QPuEEFZkXVcJZXDXNP5pC0TgGrOCqVMidhAVdqVbLmx73Wc
-FXfNabGFzfbG3Rz0bDO/2G1hzJ7o8ceT56dB+Gb4JciD16fnh3VI97bmOCE4tyhVUSbHKOlg1xnp
-6aWFKDfaecrqQlrK2vqK2bjImPFaiLuKuxvY+2HLq1tniHcLzFmWy6mnitxMeWs5iaZo0xZhTahj
-rdzXHa3m6Eod+h1+LW7MXpx83P7962y+ju4O//5oGr8PHUvs36VjoCBsujTsxp5t0MIB1MZLYb07
-CBt+QlWRHz/+Zto62NLYD+Ouj7Q+ZlMj0J80kCr1iaSHT6WmeAXPAXOVzNs0hw8fHAwzkeLATz9E
-xVo2qCa2o7AA9Wz5cmXIgFAhKu2Yb2q1rFSrDmUZFeUgrQaLdhWriFZKGKt6LNTSlopXthevrIUB
-d9rSp6FGcbnLwCRYe2ELo+VK+CxEwF9XkoypeCtZn05dTiqlNupMxnyWxXkL17OVtHz7loMTdO+4
-pGsMsz0dKbVcyK7LXsWbyKSweTA95qatDHho4T7TqqsdWN1toaFbExNMHMkTzimgR2cSZfpgI9qt
-T8aePbz4IQebeLcsZDz56ecjv2g/9z37+fTNd6+vPb/+/fz++/v/D2CAAg5IYIEGHohgggouyGCD
-Dj4IYYQSTkhhhRZeiGGGGm7IYYcefghiiCKOSGKJJp6IYooqrsjidyrAh9yL+K2nng/31WgjjtzN
-mKOO8lFHxhlJxRjkkEY2tloWy51k2mxAVoaQQkImRiRuIyEmD5ZIomeVYMLIZhMkS6rWm4vJecZl
-cWBsRomUz+Vlymg4bWflYnGWo5FOGZ02FphPYmbkmHQmRxRSgzJXpntl/UlmcIca5ItvilJJx2OS
-TkrZo5k6CgemfBDFKJPF7ZRTIZsMgxUip4qKKFN5UropSKD54xasW9p6a65VBiYmb/dc2qZuwMaH
-laXvZEb+FbKPCKpkm68KutBoTshZWpN6MRqtm6H+8ZmTulabqplhXikuNtBhgqqnM6SLa7jE2nZd
-rGzK5CeUqMxJq6l2YavvTn6yGVG7zGn77aZgvOvuruvGexnCndXLq5YCC2Vsmg2LUzGcTSm8r7fg
-0pUKxMgwdOdY/O4JaMkFf/pqyiv/Jau9CY/asqatOlwnzuM6JvHMOsPsZaQZ/3zzV0NfdnS4HL3c
-KsBZpnIk01NCHbXP1o4MsSjgyAzp0xsddzHRHqOz2289d83wmb46e/aibauZNhXGMWuz3KjNG6Vz
-+fooHY/p8Q0ejYDL6PeO9hX+4+DVsRr4DjByPMLjE5v+ILnUJ1Qe9t+Cb855j4d/jrnVfSuOQuii
-N+5555qrbjjrrTt+Y4uyz0577bbfjnvuuu/Oe+++/w588MIPT3zxxh+PfPLKL8+87rWGYLqI0TdP
-ffWwM249oXKDgC/y02cPfvgkkPJ97t137075HKovfvvuQ1KXh9zKJ6V37A7P/vv6739Oa0BFnoRK
-QG9+2PlJMLDnu/zxb4EMxJPJ/DLA/sXvF0EogsgG5hQDkupeCOydAhkIwvcdAYJeqYdfymOMCvLK
-Swe7yKqgkLU4dZB3AaRbCG8YwhrOEGazUaHJNuKboqjQaRBMSDrqBkOu4W9uTAQbDp8IRSV2jFtm
-2Y7+thwIDyzi64VIBKIMvQip+/Gwit5Tkw2jiMbsGcVRPfyhBTdGq7gY6ovoG1UL6ximJSwtVLjT
-YRr/mMZZFctJRZSgLswiR73gMWcsqw0Jx0a8DwJyksAj4CCjRr7T2aSCiQTiIiMGsvg8UorBkyQl
-T7k7S3aNXQJEm2lWxcl9bRGFnWFM2TAIyuOZEpUpOqNHLhgMX9ahXqq02xZTQrCdRQyWdpolq+Yk
-uTdqMoG8BOEnZSSsHYLRRmukFAnFGKOA2ayVsBjhNkUgTVcab5fVNNE1F5fNk33wnY2y2iOBWbQ2
-8rFj9axLNBmZy3W2c4H0vFwXcTmUeXaxmBmUlf3+LkmSdJprn5kb50AvWruCUu6g3gKNQrtZmns+
-dJUU/WE/6bjRgAIUoyx1J0e599I0eNQ+INXVPaEH0ZTeAZzE2QI7WwrU7Hw0KzNdT00rOkqckjSm
-9jynUvMJyaBKVX5MDSJN9jHUj+UzqTCdGtWcOECJyAmf8CqSbWDTxLSiVa1MZA1b5+bWt5ImZHI1
-Dj2YZddgiSyvel1rXc3w17bSNbCiIWxhDUsGwyoWbNdYrGITO1jCJjatRXIsYs/gV7betbJkhZtM
-ndqChkaPJ6fYTdk2g9pyQUmVrJVJQDS6Qnak9pBX1RxXxyfa2o4LmoG7LW6nVdJjgfa3imzc/Ez+
-K9ubKNdiuWytSJz7XKbCliKzxapuE+fJ3k5wHVOoX3AB4tvIAYKnxEUp4Yp7Xj5Od6LLtS5tmYtQ
-8Lo2uq5Fbns5+N7Xei68T82ufl3J2/Tyt78Bxm6BS5fb9HJ0vXI57X2jcUv50pe7842uffOLX/f+
-t3UDPmAS59Xd8X63MR32sD9tO1zxfti4y0phcjEMYdV+dsISpnB9XfzgVuS4xgberk79S+Pdphid
-CRbwkEML3KpKmMH6OC6OYaxjKGtVNdDlMYn1e2ENZ3jLQdbuFxe34grL68hdRa+RyaviQo02g51F
-kpN74WApV0rGFumy0sQs3yxzOcpatjOY/eX+Zbols06wCXSbrwzWPyt5w9hdsHQfHVM0L5POMfPz
-mC09Zj3HWM6XZPToFo3nT7Nv0F7e3KhJ+WNHa5rPe04opUkN4FDf+cZwfnGfWY3pH59am2UGda51
-PZ5dj7glb+4Xp5d66yl3VNax/nVzHx3nZM9ZuCiutrV7vN9gZ3t1xW7xjqct7YoK2dlUZnasV+3q
-Y2cqwsL2tY2vLerrDfu68ea2t40dbmS32nIzfreVkYblJ+d73d8GOLxLzeFtHzzhC1e0qgW+705H
-fJrlJveyLb5sdIN74gSnNsM/DvJ6N1zk2H5dt1Vla45v8tWofjbG+01hjUt80wO/dMgRXvL+nOsc
-CHM1Qs/fw9fhkEtMmrBhovMW2Mn+Vel1Zbpcnf50r7KN6CMpOj6DjoSfZ/3o1dG6Erz+da5Pdexk
-L7vZz472tKt97Wxvu9vfDve4y33udK+73e+O97zrfe9877vf/w74wAt+8IQvvOEPj/jEK37xjG+8
-4x8P+chLfvKUr7zlL4/5zGt+85zvvOfx7sNrXfzzpC89gyQB6zqbfvWsL9Bh7xgyNbd+9rT3zxwr
-3aly1n73vAcdMw7rxt4Lf/iE4+LX2rJH4it/+bLNvSI7JXbmS3/61K++9a+P/exrf/vc7773vw/+
-8It//OQvv/nPj/70q3/97G+/+98P//j+y3/+9K+//e+P//zrf//877///w+AASiAA0iABWiAB4iA
-CaiAC8iADeiADwiBtoc4n+Y6FChvFYg6qaOBG/g6HNiBq3OBE7gua1I1FCd1JKhsXkVa4jaCPRRD
-XoOCKUg1MMeCtVQZ0RdVZQVD/+I1dzImWsMT0AKDUmeCR3I3HHOELXdSahMoP/g0n/GCUdKETvgn
-5MMnJ3MYX4VFRQgoUChIboMmybdSIHOFYqhSfQFoJlWDQGOEYjMLs2A5b7iC6kQzaCJ6ayhLX6VN
-JONAgHVUdSiHu2KFPoaHD5QykrZDsYEq3VQSUzQ5qzUyMniDOTiGNoeFGPE8/DZjQjj+XzhIiXfm
-ibymegeFLBqkiZFYM4XoMXqjiqNHiskSikqIKIX2iDA3K9mSJ9QiiZmAiq3YhrIIjCoYjOrFilQo
-dGamibzoMlxoViozBrhIg8yojDOYjM6hi9XoXZcohf/whVaBWYi4LZXQh7WYhNsiil9Gi6eIe4lY
-KsP4Um6yV+04jKVIV7U4ilVIVKkYKzXGUAZHS3QoGbEniRv0j/tYWpmojqT1h+5yTANZaY5Whc8g
-Q8QEJ/AIjlrTi+aIMkn0M7lgKAupPQTTjWiIexfpDBZhhp+4PQ/Zj2TYUNpYh81CkRsJezKYSUt4
-hi6piDBJkuOYkji5ks5nSUA4JZz+uI1KMpPHyBIjeTVqBpKvcYNRmCTRCJBNmYtPaZV22Ip5cHv8
-xpVEWJVQiZRMKZakYZRS+HNkyYRaqJYtaIRS6Y0zGI/zRmlEJoIKFoIeaIF6mYEg6Jcf+JeNlpd/
-Y0qFGTsY2JeCGZiKCZiNuZeO+ZiMCZnnZZikg2CWaVCYiWSaWV6I6XB8mZiRKZmiGYGlaZqniZqp
-qZqryZqt2WuDOZl4uZikKZux+ZmzGZq5WZu2mZmc2ZueeZm+aZfC2V+wyZupZpy0eZu4uZzHuZlE
-OYUK85UlaJA6uJTSuTXU6IvTeJbwpUw9CDluKTZAWZ3N8TZiWZdulZ7UaY9s6Z3+NqidDjmNmFiR
-ntAtKRiI9qknh+GFgoh842iTqvCR7QmWDmmI79mT6hJKCgpVBkpm5RmewQWODRqSP5mTMWmhFLow
-XyOPzdBCC/VfBVmJBqOS5BlfIPomJeqOGvqd40mX71gL53km8RQscdOi6siRCHqiOMqNDGouwCSi
-TUKCSXmUYLSfRzmHYYmeD3mK98meI+qLKgqhUbqWBEqIDpqhUOqS63mOXfqkPJp6SgpgF+RgTnNv
-6Uil8MiOKcpr9AhHzNgsUjpiSZMRXGqidzqCV7c2ERqkVLqicroXdEozb5qQZNSeikimiSiROEGk
-YMhm+FifPTo5v7dPGNkyWTr+pzJ6oQ6ahy76p16KqSy6oYLqp6DqpTB6qqU4oeeIkBjzhDv5iNMZ
-n1NapUlKq/DplOT4P1+6qTwqXbEoqp7lqakao5qKqz66klwqTFQkWJAzV0Z3V31KosT5msmpm7up
-nMH5OcCprdaKrdn6m9yqU5W5reK6meUKms05mteqruwart7aru46rncZr99qr/farelar/mqr+/K
-nPvqr//qmgNLsAVrsAeLsAl7O8ansNP3U9ZjKaHasID0sNxTsc3Dbi86sfxzaPzRsZOUse62sR9y
-Ho8BI+RUp1KhhlMVshc7sgMSG8N0pUGZi8HET2KRYUxGSS37sh60jMuCZgD+Sqgn6U+xtLLTJqIS
-5bInEkD7+LE9qyASQShBCBX3g0j66KHFZbRDS3CkhkfQtLQu9UqGKrJQmyD+s1O1MpciRrYn9opm
-xkrPMkO0VEVqe7QNdFlm2yIFpoxusap1ezO8lTWdFJVu25U3qjKpeDBhWyI1BKx6CyJJJWltyahW
-dCrRgowf9kKH26s3qXrSAkV+BLm086EvKaYNirIZpyqlK2Lsxbmiij5xG7qjKzwh9oxA8k8eCmtf
-m10+pTFXyrgkEry0GyDd5Q2ykbtmtE1DtFN2YUGY2ranyjzDq3ePi05PO3U+IEzF6rsV8byg25mT
-BpJS+0aryqnTe33mC1P+WUVv+iYE6otUMzss4utNpuu6yGlN6auxWWtUMbFGWZW8S6Gza1hiXHJg
-w4lD1Jt38EtBNOW/NMdN+ysqBYwwFDwXB1ycxCsgDGxV/du+7ssdHAyhFtwuJFy/Ioy4GuyxEjwQ
-7OtpMxfCLEwnJvwyNGxTD6qjKkwjLvy++QjBPVy2UmTD0zTETYXCWqrD9MHDMexxuMbEAdxGAZwJ
-sNoCQOGH2MtZjhVZSWdZr7d0W9x00cqseAV2Z7VXz2pZYNx0XRxXSafGXRzGUwjHbwVZcxzHscfG
-39hEWWzHalXH2/saYsWrxYqSMnxxA6xyhoRviTxpyMqkV/Zy9+iPEMf+v+q2cqaGw8BSxEsGaZyR
-jWsWZmdmyM92xLOGyD9cyfdWc7iBN5Dsb678b6ZMyaWVcqjcY6XcbKfMySAGiqO8iUFMaJncaxh8
-rpucboucyoxMXTksybP2ygZnYzIXRrXsxLfsy3Wmy5A8wGH6Wbh8admMaNesusCMS+AMw7RcawUH
-wgm5otzscs8sy+mMzNW8cSjmzeNmzrkcaUr4yYFGzhh0z738z4c80PaLcvK8yo08nu68o84cy/qM
-0Adtyay8rcRcXsY8yW56buKsptPTzwkX0C6Xzx03zy1MzcccngxdcfDcbNE8yyatziSdbSFdzgX9
-yxqdaRxdZIpm0b/+iaY+PcgeJs2UEW3KjKeQGMmPDM2cHNHJbMv1DNKAbMpYLNKJ2kH1I9W5TNWk
-nNWwTHJ9M9SKnNDL7Mgq7YpevdTa/NJuUNRPjXNvbWQKt3NwPdc8nSNhjRdtTc9wqtQOjdZ+PclN
-jc4TrdBy/dV0bdcjp62SZNYEdtdr3RF6jdKH2s6VLYqN/cuCDdOETdYX2G6f7dmGfdg3F9c7gtex
-FdOXvNCWrV6sDZF3KNGqbNT6FNqKDWyiXdqkXdeL/diazdYnDdXsfNmuXWVq7duRDdznPNqJrdvM
-vdu8XdG4DWan3bypTdFlTdzmNm4ufdzTbN2FbdvFLN3OvdzkHd7RF93bJf3b393ZKZ3dSY3Z2AzZ
-3s3ZAhzd551mNv3Ozw3U5lHGpfPfl3NGA351Rmfgj6XHd7xYUKdZCR51rGE2vVJ1E04eAU45Fl7F
-1htMGv5LHN7hXZ3EIS7iI07iJW7iJ47iKa7iK87iLe7iLw7jMS7jM07jNW7jN47jOa7jO87jPe7j
-Pw7kQV68E+EQhqrAQs6aZmirzYzkQC4aAmmIygHlTS7kP0G3gRJ8VB7kAGCRbQB8uqflTu6Ci4jl
-ehjmPs7laf58XB7Fau6DR56aBQAAOw==
diff --git a/Documentation/DocBook/media/vbi_hsync.gif.b64 b/Documentation/DocBook/media/vbi_hsync.gif.b64
deleted file mode 100644
index cdafabed5c11..000000000000
--- a/Documentation/DocBook/media/vbi_hsync.gif.b64
+++ /dev/null
@@ -1,43 +0,0 @@
-R0lGODlhBwHJAOcAAAAAAAEBAQICAgMDAwQEBAUFBQYGBgcHBwgICAkJCQoKCgsLCwwMDA0NDQ4O
-Dg8PDxAQEBERERISEhMTExQUFBUVFRYWFhcXFxgYGBkZGRoaGhsbGxwcHB0dHR4eHh8fHyAgICEh
-ISIiIiMjIyQkJCUlJSYmJicnJygoKCkpKSoqKisrKywsLC0tLS4uLi8vLzAwMDExMTIyMjMzMzQ0
-NDU1NTY2Njc3Nzg4ODk5OTo6Ojs7Ozw8PD09PT4+Pj8/P0BAQEFBQUJCQkNDQ0REREVFRUZGRkdH
-R0hISElJSUpKSktLS0xMTE1NTU5OTk9PT1BQUFFRUVJSUlNTU1RUVFVVVVZWVldXV1hYWFlZWVpa
-WltbW1xcXF1dXV5eXl9fX2BgYGFhYWJiYmNjY2RkZGVlZWZmZmdnZ2hoaGlpaWpqamtra2xsbG1t
-bW5ubm9vb3BwcHFxcXJycnNzc3R0dHV1dXZ2dnd3d3h4eHl5eXp6ent7e3x8fH19fX5+fn9/f4CA
-gIGBgYKCgoODg4SEhIWFhYaGhoeHh4iIiImJiYqKiouLi4yMjI2NjY6Ojo+Pj5CQkJGRkZKSkpOT
-k5SUlJWVlZaWlpeXl5iYmJmZmZqampubm5ycnJ2dnZ6enp+fn6CgoKGhoaKioqOjo6SkpKWlpaam
-pqenp6ioqKmpqaqqqqurq6ysrK2tra6urq+vr7CwsLGxsbKysrOzs7S0tLW1tba2tre3t7i4uLm5
-ubq6uru7u7y8vL29vb6+vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvLy8zM
-zM3Nzc7Ozs/Pz9DQ0NHR0dLS0tPT09TU1NXV1dbW1tfX19jY2NnZ2dra2tvb29zc3N3d3d7e3t/f
-3+Dg4OHh4eLi4uPj4+Tk5OXl5ebm5ufn5+jo6Onp6erq6uvr6+zs7O3t7e7u7u/v7/Dw8PHx8fLy
-8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/P39/f7+/v///ywAAAAABwHJAAAI/gD/CRxI
-sKDBgwgTKlzIsKHDhxAjSpxIsaLFixgzatzIsaPHjyBDihxJsqTJkyhTqlzJsqXLlzBjypxJs6bN
-mzhz6tzJs6fPn0CDCh1KtKjRo0iTKl3KtKnTp1CjSp1KtarVqyQBYN1aVSvXr1C9gh2rVOxCsV4B
-mE2b0GxDt2TjtnWo9l9du2rrar2bl+BavQL3ApZLeC5du3j77g2MF/FAtIv1AoZb+Gfey5gza97M
-ua/ByJ4XI8b8+PHl0ZkrE6XsuCDr1xD5ip7d2m9pv6IZqxYK+zPC3g/T0mabGLdk4YEH7wYK3PZB
-yqyXSw/++3l139OzS4R+Hbtr7eCp/nv/bp18+PMKuZcfj7792fXm47ufz/52fd308zu/X3u/fv3N
-+Sfgf/MFaJ98BLpnIH4IJojegv0d6GB7EEI4oXYVdnfhgxoOyOCG4WXIH4jTidggiSV2KOGHKGa3
-oIUtqvaiijEuNyN8NUp344g5EqYef9H1KNePJwYpJFlEehjhkT7iuCKLTMZl4olRgjWlklV+deWT
-WWpJ45JgdrnVllCKOeaXMJrZFZpfqmkVmWG6SRWcRsoZFZl12hkWmzxemCdXeAr555lOgjnof4de
-tSOVG0KWaFl3GVponH52ZumlmGaq6aaY0pjmhJppmRqQbTaKm6gewgnio2uSOumq/jpO+qmDrE5F
-p6AtSZZeSrf2WOtEoZEmm2C/Astnn6CapKtjbClWZki95lhsbLcRtxmlHkVb47TBWcuYcGvxeiyj
-fp7kGbOJEZscStrGyK1T7bb4blPxojgvU4Hiulu+vto4Lpck3rvUoljCuq+npZp6cKGz0uovwwmX
-u3CRESc7sZINJyhwWbJW7PDFXGZM4MZI1WsvyCF7rDHKZYqMKMuSvmqwS5yOypHJAcP0K8k4z5xr
-RTz/C7DPLO2crdDPEr2S0R31rDDNQB/dMbISQ01R0FOT+/TPV0vtqtZVc21s0wjLLONFJG8XNdkQ
-y5z2UNy+TW3XbN8Ho9xBxa3z/to3lz0i3nljBPhbfG+UZMoqG5db2+KJ9O7gDDHd99dUstpscsgR
-x6CzqC0O0uN70z05xVlHdNpwgvUHGWrFef5RppGHPjawNddue3nB5nYufsKmu/vrhL/3kuRqq1Tr
-6pd/G+6HymGLdvC7Dl+46cYD7aywoSleXGOtj5RnnZALP3vx7Bb2J/iyk6++subTZanz2ZJ2te2R
-st8+9NaFHx/x1Jff5GFz0Z9/+Dc3c9EnSK4ryfLG1z89GaY6AjwQARvnQLfBr24XpFrizGSk+tlv
-aOJbXwULxj3/gTB6DBwhCD2oQLBtkIR66mAEVTe9AqqQhCzMigvNhsIbrnCG/m6ZIAB9+MPqwfCB
-IryhDI14QiQ2kIiUyqH3dqhBHtoJfSZs4gu16CYsGpCKYDyinLz4QS5W8YwcjF0WkxbCJxKRjC0M
-oxnlmCU46tA19BPiCO04xZjM8IBq/GL63hjIMloNitiS4uv+aMUxRk5/ihQXIhMJSUaiUUzgq6RM
-LEmhR5qLk2LsoieVBco5YnKUCiwlG2OIyqyoMoNpPIsm/TjJRMKya698JYZiB7kELq2W6OvlLT8H
-TF62MJfM+R3+lnnIAB5zk8zBHOZks7/BqEuXwXwmLS1DzestDnmNud5MsqlDZPKGWMkzT+9CBc33
-5PGd8IynPOfJwkilLp37/gniN8dZyDgOcienCadudnc6anavnT30p/SKokvH9fOO/+RmqxIK0YUi
-EosBNVz2tnnRR9KzUxyFYjAzqpHehZSQbdxYEBEqUhcVM0WTbGhNZBor+7xNj8SMaT7TJc1Tgcug
-Bf2LNZnlKODp1KYCbR64ujcZ0OBxe5FR3jAfqsSdNiujucMnPnl3uaxiraNI3ep3hro8161uNLbB
-G00fNk3abG+aAiXqcKqlGG8Oy6hgLang+HnUjERyiBFV4VpZitKa5rWEgKJjldgpKs5d9KOQjeym
-XkrSMdnzpYatpWY3y1l6NXGB3RlsZ9eDzp7ydKmnW1dAlTnaQ94zruEkS2tUnfra1iIUdRvlHueu
-iS7N2daic1VncEEz3N/6MbVyNU1TV0tUdL3VuF6aKnQhJdrpWve62M2udrfL3e5697vgDa94x0ve
-8lIkIAA7
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
deleted file mode 100644
index a2765d8ad05c..000000000000
--- a/Documentation/DocBook/media_api.tmpl
+++ /dev/null
@@ -1,121 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
- "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
-<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
-<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
-
-<!ENTITY eg "e.&nbsp;g.">
-<!ENTITY ie "i.&nbsp;e.">
-<!ENTITY fd "File descriptor returned by <link linkend='func-open'><function>open()</function></link>.">
-<!ENTITY fe_fd "File descriptor returned by <link linkend='frontend_f_open'><function>open()</function></link>.">
-<!ENTITY i2c "I<superscript>2</superscript>C">
-<!ENTITY return-value "<title>Return Value</title><para>On success <returnvalue>0</returnvalue> is returned, on error <returnvalue>-1</returnvalue> and the <varname>errno</varname> variable is set appropriately. The generic error codes are described at the <link linkend='gen-errors'>Generic Error Codes</link> chapter.</para>">
-<!ENTITY return-value-dvb "<para>RETURN VALUE</para><para>On success <returnvalue>0</returnvalue> is returned, on error <returnvalue>-1</returnvalue> and the <varname>errno</varname> variable is set appropriately. The generic error codes are described at the <link linkend='gen-errors'>Generic Error Codes</link> chapter.</para>">
-<!ENTITY manvol "<manvolnum>2</manvolnum>">
-
-<!-- Table templates: structs, structs w/union, defines. -->
-<!ENTITY cs-str "<colspec colname='c1' colwidth='1*' /><colspec colname='c2' colwidth='1*' /><colspec colname='c3' colwidth='2*' /><spanspec spanname='hspan' namest='c1' nameend='c3' />">
-<!ENTITY cs-ustr "<colspec colname='c1' colwidth='1*' /><colspec colname='c2' colwidth='1*' /><colspec colname='c3' colwidth='1*' /><colspec colname='c4' colwidth='2*' /><spanspec spanname='hspan' namest='c1' nameend='c4' />">
-<!ENTITY cs-def "<colspec colname='c1' colwidth='3*' /><colspec colname='c2' colwidth='1*' /><colspec colname='c3' colwidth='4*' /><spanspec spanname='hspan' namest='c1' nameend='c3' />">
-
-<!-- Video for Linux mailing list address. -->
-<!ENTITY v4l-ml "<ulink url='https://linuxtv.org/lists.php'>https://linuxtv.org/lists.php</ulink>">
-
-<!-- LinuxTV v4l-dvb repository. -->
-<!ENTITY v4l-dvb "<ulink url='https://linuxtv.org/repo/'>https://linuxtv.org/repo/</ulink>">
-<!ENTITY dash-ent-8 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-10 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-12 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-14 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-16 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-20 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-22 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-<!ENTITY dash-ent-24 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
-]>
-
-<book id="media_api" lang="en">
-<bookinfo>
- <title>LINUX MEDIA INFRASTRUCTURE API</title>
-
- <copyright>
- <year>2009-2015</year>
- <holder>LinuxTV Developers</holder>
- </copyright>
-
- <legalnotice>
- <para>Permission is granted to copy, distribute and/or modify
- this document under the terms of the GNU Free Documentation License,
- Version 1.1 or any later version published by the Free Software
- Foundation. A copy of the license is included in the chapter entitled
- "GNU Free Documentation License"</para>
- </legalnotice>
-</bookinfo>
-
-<toc></toc> <!-- autogenerated -->
-
-<preface>
- <title>Introduction</title>
-
- <para>This document covers the Linux Kernel to Userspace API's used by
- video and radio streaming devices, including video cameras,
- analog and digital TV receiver cards, AM/FM receiver cards,
- streaming capture and output devices, codec devices and remote
- controllers.</para>
- <para>A typical media device hardware is shown at
- <xref linkend="typical_media_device" />.</para>
- <figure id="typical_media_device">
- <title>Typical Media Device</title>
- <mediaobject>
- <imageobject>
- <imagedata fileref="typical_media_device.svg" format="SVG" />
- </imageobject>
- <textobject>
- <phrase>Typical Media Device Block Diagram</phrase>
- </textobject>
- </mediaobject>
- </figure>
- <para>The media infrastructure API was designed to control such
- devices. It is divided into five parts.</para>
- <para>The first part covers radio, video capture and output,
- cameras, analog TV devices and codecs.</para>
- <para>The second part covers the
- API used for digital TV and Internet reception via one of the
- several digital tv standards. While it is called as DVB API,
- in fact it covers several different video standards including
- DVB-T/T2, DVB-S/S2, DVB-C, ATSC, ISDB-T, ISDB-S,etc. The complete
- list of supported standards can be found at
- <xref linkend="fe-delivery-system-t" />.</para>
- <para>The third part covers the Remote Controller API.</para>
- <para>The fourth part covers the Media Controller API.</para>
- <para>The fifth part covers the CEC (Consumer Electronics Control) API.</para>
- <para>It should also be noted that a media device may also have audio
- components, like mixers, PCM capture, PCM playback, etc, which
- are controlled via ALSA API.</para>
- <para>For additional information and for the latest development code,
- see: <ulink url="https://linuxtv.org">https://linuxtv.org</ulink>.</para>
- <para>For discussing improvements, reporting troubles, sending new drivers, etc, please mail to: <ulink url="http://vger.kernel.org/vger-lists.html#linux-media">Linux Media Mailing List (LMML).</ulink>.</para>
-</preface>
-
-<part id="v4l2spec">
-&sub-v4l2;
-</part>
-<part id="dvbapi">
-&sub-dvbapi;
-</part>
-<part id="remotes">
-&sub-remote_controllers;
-</part>
-<part id="media_common">
-&sub-media-controller;
-</part>
-<part id="cec">
-&sub-cec-api;
-</part>
-
-<chapter id="gen_errors">
-&sub-gen-errors;
-</chapter>
-
-&sub-fdl-appendix;
-
-</book>
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
index fd565e1f1368..857f1e273418 100644
--- a/Documentation/Makefile.sphinx
+++ b/Documentation/Makefile.sphinx
@@ -63,11 +63,12 @@ sgmldocs:
psdocs:
mandocs:
installmandocs:
-cleanmediadocs:
cleandocs:
$(Q)rm -rf $(BUILDDIR)
+endif # HAVE_SPHINX
+
dochelp:
@echo ' Linux kernel internal documentation in different formats (Sphinx):'
@echo ' htmldocs - HTML'
@@ -75,5 +76,3 @@ dochelp:
@echo ' epubdocs - EPUB'
@echo ' xmldocs - XML'
@echo ' cleandocs - clean all generated files'
-
-endif # HAVE_SPHINX
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index 1179850f453c..c55df2911136 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -78,422 +78,111 @@ CONFIG_PCI_MSI option.
4.2 Using MSI
-Most of the hard work is done for the driver in the PCI layer. It simply
-has to request that the PCI layer set up the MSI capability for this
+Most of the hard work is done for the driver in the PCI layer. The driver
+simply has to request that the PCI layer set up the MSI capability for this
device.
-4.2.1 pci_enable_msi
+To automatically use MSI or MSI-X interrupt vectors, use the following
+function:
-int pci_enable_msi(struct pci_dev *dev)
+ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
-A successful call allocates ONE interrupt to the device, regardless
-of how many MSIs the device supports. The device is switched from
-pin-based interrupt mode to MSI mode. The dev->irq number is changed
-to a new number which represents the message signaled interrupt;
-consequently, this function should be called before the driver calls
-request_irq(), because an MSI is delivered via a vector that is
-different from the vector of a pin-based interrupt.
+which allocates up to max_vecs interrupt vectors for a PCI device. It
+returns the number of vectors allocated or a negative error. If the device
+has a requirements for a minimum number of vectors the driver can pass a
+min_vecs argument set to this limit, and the PCI core will return -ENOSPC
+if it can't meet the minimum number of vectors.
-4.2.2 pci_enable_msi_range
+The flags argument should normally be set to 0, but can be used to pass the
+PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
+MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
+case the device does not support legacy interrupt lines.
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+By default this function will spread the interrupts around the available
+CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
+flag.
-This function allows a device driver to request any number of MSI
-interrupts within specified range from 'minvec' to 'maxvec'.
+To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
+vectors, use the following function:
-If this function returns a positive number it indicates the number of
-MSI interrupts that have been successfully allocated. In this case
-the device is switched from pin-based interrupt mode to MSI mode and
-updates dev->irq to be the lowest of the new interrupts assigned to it.
-The other interrupts assigned to the device are in the range dev->irq
-to dev->irq + returned value - 1. Device driver can use the returned
-number of successfully allocated MSI interrupts to further allocate
-and initialize device resources.
+ int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to request any more MSI interrupts for
-this device.
+Any allocated resources should be freed before removing the device using
+the following function:
-This function should be called before the driver calls request_irq(),
-because MSI interrupts are delivered via vectors that are different
-from the vector of a pin-based interrupt.
+ void pci_free_irq_vectors(struct pci_dev *dev);
-It is ideal if drivers can cope with a variable number of MSI interrupts;
-there are many reasons why the platform may not be able to provide the
-exact number that a driver asks for.
+If a device supports both MSI-X and MSI capabilities, this API will use the
+MSI-X facilities in preference to the MSI facilities. MSI-X supports any
+number of interrupts between 1 and 2048. In contrast, MSI is restricted to
+a maximum of 32 interrupts (and must be a power of two). In addition, the
+MSI interrupt vectors must be allocated consecutively, so the system might
+not be able to allocate as many vectors for MSI as it could for MSI-X. On
+some platforms, MSI interrupts must all be targeted at the same set of CPUs
+whereas MSI-X interrupts can all be targeted at different CPUs.
-There could be devices that can not operate with just any number of MSI
-interrupts within a range. See chapter 4.3.1.3 to get the idea how to
-handle such devices for MSI-X - the same logic applies to MSI.
+If a device supports neither MSI-X or MSI it will fall back to a single
+legacy IRQ vector.
-4.2.1.1 Maximum possible number of MSI interrupts
+The typical usage of MSI or MSI-X interrupts is to allocate as many vectors
+as possible, likely up to the limit supported by the device. If nvec is
+larger than the number supported by the device it will automatically be
+capped to the supported limit, so there is no need to query the number of
+vectors supported beforehand:
-The typical usage of MSI interrupts is to allocate as many vectors as
-possible, likely up to the limit returned by pci_msi_vec_count() function:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
- return pci_enable_msi_range(pdev, 1, nvec);
-}
-
-Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
-the value of 0 would be meaningless and could result in error.
-
-Some devices have a minimal limit on number of MSI interrupts.
-In this case the function could look like this:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
- return pci_enable_msi_range(pdev, FOO_DRIVER_MINIMUM_NVEC, nvec);
-}
-
-4.2.1.2 Exact number of MSI interrupts
+ nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
+ if (nvec < 0)
+ goto out_err;
If a driver is unable or unwilling to deal with a variable number of MSI
-interrupts it could request a particular number of interrupts by passing
-that number to pci_enable_msi_range() function as both 'minvec' and 'maxvec'
-parameters:
-
-static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
-{
- return pci_enable_msi_range(pdev, nvec, nvec);
-}
-
-Note, unlike pci_enable_msi_exact() function, which could be also used to
-enable a particular number of MSI-X interrupts, pci_enable_msi_range()
-returns either a negative errno or 'nvec' (not negative errno or 0 - as
-pci_enable_msi_exact() does).
-
-4.2.1.3 Single MSI mode
-
-The most notorious example of the request type described above is
-enabling the single MSI mode for a device. It could be done by passing
-two 1s as 'minvec' and 'maxvec':
-
-static int foo_driver_enable_single_msi(struct pci_dev *pdev)
-{
- return pci_enable_msi_range(pdev, 1, 1);
-}
-
-Note, unlike pci_enable_msi() function, which could be also used to
-enable the single MSI mode, pci_enable_msi_range() returns either a
-negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
-does).
-
-4.2.3 pci_enable_msi_exact
-
-int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
-
-This variation on pci_enable_msi_range() call allows a device driver to
-request exactly 'nvec' MSIs.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to request any more MSI interrupts for
-this device.
-
-By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
-returns zero in case of success, which indicates MSI interrupts have been
-successfully allocated.
-
-4.2.4 pci_disable_msi
-
-void pci_disable_msi(struct pci_dev *dev)
-
-This function should be used to undo the effect of pci_enable_msi_range().
-Calling it restores dev->irq to the pin-based interrupt number and frees
-the previously allocated MSIs. The interrupts may subsequently be assigned
-to another device, so drivers should not cache the value of dev->irq.
-
-Before calling this function, a device driver must always call free_irq()
-on any interrupt for which it previously called request_irq().
-Failure to do so results in a BUG_ON(), leaving the device with
-MSI enabled and thus leaking its vector.
-
-4.2.4 pci_msi_vec_count
-
-int pci_msi_vec_count(struct pci_dev *dev)
-
-This function could be used to retrieve the number of MSI vectors the
-device requested (via the Multiple Message Capable register). The MSI
-specification only allows the returned value to be a power of two,
-up to a maximum of 2^5 (32).
-
-If this function returns a negative number, it indicates the device is
-not capable of sending MSIs.
-
-If this function returns a positive number, it indicates the maximum
-number of MSI interrupt vectors that could be allocated.
-
-4.3 Using MSI-X
-
-The MSI-X capability is much more flexible than the MSI capability.
-It supports up to 2048 interrupts, each of which can be controlled
-independently. To support this flexibility, drivers must use an array of
-`struct msix_entry':
-
-struct msix_entry {
- u16 vector; /* kernel uses to write alloc vector */
- u16 entry; /* driver uses to specify entry */
-};
-
-This allows for the device to use these interrupts in a sparse fashion;
-for example, it could use interrupts 3 and 1027 and yet allocate only a
-two-element array. The driver is expected to fill in the 'entry' value
-in each element of the array to indicate for which entries the kernel
-should assign interrupts; it is invalid to fill in two entries with the
-same number.
-
-4.3.1 pci_enable_msix_range
-
-int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
- int minvec, int maxvec)
-
-Calling this function asks the PCI subsystem to allocate any number of
-MSI-X interrupts within specified range from 'minvec' to 'maxvec'.
-The 'entries' argument is a pointer to an array of msix_entry structs
-which should be at least 'maxvec' entries in size.
-
-On success, the device is switched into MSI-X mode and the function
-returns the number of MSI-X interrupts that have been successfully
-allocated. In this case the 'vector' member in entries numbered from
-0 to the returned value - 1 is populated with the interrupt number;
-the driver should then call request_irq() for each 'vector' that it
-decides to use. The device driver is responsible for keeping track of the
-interrupts assigned to the MSI-X vectors so it can free them again later.
-Device driver can use the returned number of successfully allocated MSI-X
-interrupts to further allocate and initialize device resources.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to allocate any more MSI-X interrupts for
-this device.
-
-This function, in contrast with pci_enable_msi_range(), does not adjust
-dev->irq. The device will not generate interrupts for this interrupt
-number once MSI-X is enabled.
-
-Device drivers should normally call this function once per device
-during the initialization phase.
-
-It is ideal if drivers can cope with a variable number of MSI-X interrupts;
-there are many reasons why the platform may not be able to provide the
-exact number that a driver asks for.
-
-There could be devices that can not operate with just any number of MSI-X
-interrupts within a range. E.g., an network adapter might need let's say
-four vectors per each queue it provides. Therefore, a number of MSI-X
-interrupts allocated should be a multiple of four. In this case interface
-pci_enable_msix_range() can not be used alone to request MSI-X interrupts
-(since it can allocate any number within the range, without any notion of
-the multiple of four) and the device driver should master a custom logic
-to request the required number of MSI-X interrupts.
-
-4.3.1.1 Maximum possible number of MSI-X interrupts
-
-The typical usage of MSI-X interrupts is to allocate as many vectors as
-possible, likely up to the limit returned by pci_msix_vec_count() function:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
- return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
- 1, nvec);
-}
-
-Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
-the value of 0 would be meaningless and could result in error.
-
-Some devices have a minimal limit on number of MSI-X interrupts.
-In this case the function could look like this:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
- return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
- FOO_DRIVER_MINIMUM_NVEC, nvec);
-}
-
-4.3.1.2 Exact number of MSI-X interrupts
-
-If a driver is unable or unwilling to deal with a variable number of MSI-X
-interrupts it could request a particular number of interrupts by passing
-that number to pci_enable_msix_range() function as both 'minvec' and 'maxvec'
-parameters:
-
-static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
-{
- return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
- nvec, nvec);
-}
-
-Note, unlike pci_enable_msix_exact() function, which could be also used to
-enable a particular number of MSI-X interrupts, pci_enable_msix_range()
-returns either a negative errno or 'nvec' (not negative errno or 0 - as
-pci_enable_msix_exact() does).
-
-4.3.1.3 Specific requirements to the number of MSI-X interrupts
-
-As noted above, there could be devices that can not operate with just any
-number of MSI-X interrupts within a range. E.g., let's assume a device that
-is only capable sending the number of MSI-X interrupts which is a power of
-two. A routine that enables MSI-X mode for such device might look like this:
-
-/*
- * Assume 'minvec' and 'maxvec' are non-zero
- */
-static int foo_driver_enable_msix(struct foo_adapter *adapter,
- int minvec, int maxvec)
-{
- int rc;
-
- minvec = roundup_pow_of_two(minvec);
- maxvec = rounddown_pow_of_two(maxvec);
-
- if (minvec > maxvec)
- return -ERANGE;
-
-retry:
- rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
- maxvec, maxvec);
- /*
- * -ENOSPC is the only error code allowed to be analyzed
- */
- if (rc == -ENOSPC) {
- if (maxvec == 1)
- return -ENOSPC;
-
- maxvec /= 2;
-
- if (minvec > maxvec)
- return -ENOSPC;
-
- goto retry;
- }
-
- return rc;
-}
-
-Note how pci_enable_msix_range() return value is analyzed for a fallback -
-any error code other than -ENOSPC indicates a fatal error and should not
-be retried.
-
-4.3.2 pci_enable_msix_exact
-
-int pci_enable_msix_exact(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
-
-This variation on pci_enable_msix_range() call allows a device driver to
-request exactly 'nvec' MSI-Xs.
-
-If this function returns a negative number, it indicates an error and
-the driver should not attempt to allocate any more MSI-X interrupts for
-this device.
-
-By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
-returns zero in case of success, which indicates MSI-X interrupts have been
-successfully allocated.
-
-Another version of a routine that enables MSI-X mode for a device with
-specific requirements described in chapter 4.3.1.3 might look like this:
-
-/*
- * Assume 'minvec' and 'maxvec' are non-zero
- */
-static int foo_driver_enable_msix(struct foo_adapter *adapter,
- int minvec, int maxvec)
-{
- int rc;
-
- minvec = roundup_pow_of_two(minvec);
- maxvec = rounddown_pow_of_two(maxvec);
-
- if (minvec > maxvec)
- return -ERANGE;
-
-retry:
- rc = pci_enable_msix_exact(adapter->pdev,
- adapter->msix_entries, maxvec);
-
- /*
- * -ENOSPC is the only error code allowed to be analyzed
- */
- if (rc == -ENOSPC) {
- if (maxvec == 1)
- return -ENOSPC;
-
- maxvec /= 2;
-
- if (minvec > maxvec)
- return -ENOSPC;
-
- goto retry;
- } else if (rc < 0) {
- return rc;
- }
-
- return maxvec;
-}
-
-4.3.3 pci_disable_msix
-
-void pci_disable_msix(struct pci_dev *dev)
-
-This function should be used to undo the effect of pci_enable_msix_range().
-It frees the previously allocated MSI-X interrupts. The interrupts may
-subsequently be assigned to another device, so drivers should not cache
-the value of the 'vector' elements over a call to pci_disable_msix().
-
-Before calling this function, a device driver must always call free_irq()
-on any interrupt for which it previously called request_irq().
-Failure to do so results in a BUG_ON(), leaving the device with
-MSI-X enabled and thus leaking its vector.
-
-4.3.3 The MSI-X Table
-
-The MSI-X capability specifies a BAR and offset within that BAR for the
-MSI-X Table. This address is mapped by the PCI subsystem, and should not
-be accessed directly by the device driver. If the driver wishes to
-mask or unmask an interrupt, it should call disable_irq() / enable_irq().
+interrupts it can request a particular number of interrupts by passing that
+number to pci_alloc_irq_vectors() function as both 'min_vecs' and
+'max_vecs' parameters:
-4.3.4 pci_msix_vec_count
+ ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
+ if (ret < 0)
+ goto out_err;
-int pci_msix_vec_count(struct pci_dev *dev)
+The most notorious example of the request type described above is enabling
+the single MSI mode for a device. It could be done by passing two 1s as
+'min_vecs' and 'max_vecs':
-This function could be used to retrieve number of entries in the device
-MSI-X table.
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ if (ret < 0)
+ goto out_err;
-If this function returns a negative number, it indicates the device is
-not capable of sending MSI-Xs.
+Some devices might not support using legacy line interrupts, in which case
+the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
+can't provide MSI or MSI-X interrupts:
-If this function returns a positive number, it indicates the maximum
-number of MSI-X interrupt vectors that could be allocated.
+ nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
+ if (nvec < 0)
+ goto out_err;
-4.4 Handling devices implementing both MSI and MSI-X capabilities
+4.3 Legacy APIs
-If a device implements both MSI and MSI-X capabilities, it can
-run in either MSI mode or MSI-X mode, but not both simultaneously.
-This is a requirement of the PCI spec, and it is enforced by the
-PCI layer. Calling pci_enable_msi_range() when MSI-X is already
-enabled or pci_enable_msix_range() when MSI is already enabled
-results in an error. If a device driver wishes to switch between MSI
-and MSI-X at runtime, it must first quiesce the device, then switch
-it back to pin-interrupt mode, before calling pci_enable_msi_range()
-or pci_enable_msix_range() and resuming operation. This is not expected
-to be a common operation but may be useful for debugging or testing
-during development.
+The following old APIs to enable and disable MSI or MSI-X interrupts should
+not be used in new code:
-4.5 Considerations when using MSIs
+ pci_enable_msi() /* deprecated */
+ pci_enable_msi_range() /* deprecated */
+ pci_enable_msi_exact() /* deprecated */
+ pci_disable_msi() /* deprecated */
+ pci_enable_msix_range() /* deprecated */
+ pci_enable_msix_exact() /* deprecated */
+ pci_disable_msix() /* deprecated */
-4.5.1 Choosing between MSI-X and MSI
+Additionally there are APIs to provide the number of supported MSI or MSI-X
+vectors: pci_msi_vec_count() and pci_msix_vec_count(). In general these
+should be avoided in favor of letting pci_alloc_irq_vectors() cap the
+number of vectors. If you have a legitimate special use case for the count
+of vectors we might have to revisit that decision and add a
+pci_nr_irq_vectors() helper that handles MSI and MSI-X transparently.
-If your device supports both MSI-X and MSI capabilities, you should use
-the MSI-X facilities in preference to the MSI facilities. As mentioned
-above, MSI-X supports any number of interrupts between 1 and 2048.
-In contrast, MSI is restricted to a maximum of 32 interrupts (and
-must be a power of two). In addition, the MSI interrupt vectors must
-be allocated consecutively, so the system might not be able to allocate
-as many vectors for MSI as it could for MSI-X. On some platforms, MSI
-interrupts must all be targeted at the same set of CPUs whereas MSI-X
-interrupts can all be targeted at different CPUs.
+4.4 Considerations when using MSIs
-4.5.2 Spinlocks
+4.4.1 Spinlocks
Most device drivers have a per-device spinlock which is taken in the
interrupt handler. With pin-based interrupts or a single MSI, it is not
@@ -505,7 +194,7 @@ acquire the spinlock. Such deadlocks can be avoided by using
spin_lock_irqsave() or spin_lock_irq() which disable local interrupts
and acquire the lock (see Documentation/DocBook/kernel-locking).
-4.6 How to tell whether MSI/MSI-X is enabled on a device
+4.5 How to tell whether MSI/MSI-X is enabled on a device
Using 'lspci -v' (as root) may show some devices with "MSI", "Message
Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities
diff --git a/Documentation/arm/Atmel/README b/Documentation/arm/Atmel/README
index 0931cf7e2e56..6ca78f818dbf 100644
--- a/Documentation/arm/Atmel/README
+++ b/Documentation/arm/Atmel/README
@@ -91,9 +91,15 @@ the Atmel website: http://www.atmel.com.
http://www.atmel.com/Images/Atmel-11238-32-bit-Cortex-A5-Microcontroller-SAMA5D4_Datasheet.pdf
- sama5d2 family
- - sama5d27
+ - sama5d21
+ - sama5d22
+ - sama5d23
+ - sama5d24
+ - sama5d26
+ - sama5d27 (device superset)
+ - sama5d28 (device superset + environmental monitors)
+ Datasheet
- Coming soon
+ http://www.atmel.com/Images/Atmel-11267-32-bit-Cortex-A5-Microcontroller-SAMA5D2_Datasheet.pdf
Linux kernel information
diff --git a/Documentation/binfmt_misc.txt b/Documentation/binfmt_misc.txt
index 6b1de7058371..ec83bbce547a 100644
--- a/Documentation/binfmt_misc.txt
+++ b/Documentation/binfmt_misc.txt
@@ -66,6 +66,13 @@ Here is what the fields mean:
This feature should be used with care as the interpreter
will run with root permissions when a setuid binary owned by root
is run with binfmt_misc.
+ 'F' - fix binary. The usual behaviour of binfmt_misc is to spawn the
+ binary lazily when the misc format file is invoked. However,
+ this doesn't work very well in the face of mount namespaces and
+ changeroots, so the F mode opens the binary as soon as the
+ emulation is installed and uses the opened image to spawn the
+ emulator, meaning it is always available once installed,
+ regardless of how the environment changes.
There are some restrictions:
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 026d13362aca..bcdb2b4c1f12 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
requests which haven't aged too much on the queue. Potentially this priority
could even be exposed to applications in some manner, providing higher level
tunability. Time based aging avoids starvation of lower priority
- requests. Some bits in the bi_rw flags field in the bio structure are
+ requests. Some bits in the bi_opf flags field in the bio structure are
intended to be used for this priority information.
@@ -432,7 +432,7 @@ struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev; /* target device */
unsigned long bi_flags; /* status, command, etc */
- unsigned long bi_rw; /* low bits: r/w, high: priority */
+ unsigned long bi_opf; /* low bits: r/w, high: priority */
unsigned int bi_vcnt; /* how may bio_vec's */
struct bvec_iter bi_iter; /* current index into bio_vec array */
diff --git a/Documentation/cgroup-v1/cgroups.txt b/Documentation/cgroup-v1/cgroups.txt
index 947e6fe31ef9..308e5ff7207a 100644
--- a/Documentation/cgroup-v1/cgroups.txt
+++ b/Documentation/cgroup-v1/cgroups.txt
@@ -2,7 +2,7 @@
-------
Written by Paul Menage <menage@google.com> based on
-Documentation/cgroups/cpusets.txt
+Documentation/cgroup-v1/cpusets.txt
Original copyright statements from cpusets.txt:
Portions Copyright (C) 2004 BULL SA.
@@ -72,7 +72,7 @@ On their own, the only use for cgroups is for simple job
tracking. The intention is that other subsystems hook into the generic
cgroup support to provide new attributes for cgroups, such as
accounting/limiting the resources which processes in a cgroup can
-access. For example, cpusets (see Documentation/cgroups/cpusets.txt) allow
+access. For example, cpusets (see Documentation/cgroup-v1/cpusets.txt) allow
you to associate a set of CPUs and a set of memory nodes with the
tasks in each cgroup.
diff --git a/Documentation/cgroup-v1/cpusets.txt b/Documentation/cgroup-v1/cpusets.txt
index e5cdcd445615..e5ac5da86682 100644
--- a/Documentation/cgroup-v1/cpusets.txt
+++ b/Documentation/cgroup-v1/cpusets.txt
@@ -48,7 +48,7 @@ hooks, beyond what is already present, required to manage dynamic
job placement on large systems.
Cpusets use the generic cgroup subsystem described in
-Documentation/cgroups/cgroups.txt.
+Documentation/cgroup-v1/cgroups.txt.
Requests by a task, using the sched_setaffinity(2) system call to
include CPUs in its CPU affinity mask, and using the mbind(2) and
diff --git a/Documentation/cgroup-v1/memcg_test.txt b/Documentation/cgroup-v1/memcg_test.txt
index 78a8c2963b38..5c7f310f32bb 100644
--- a/Documentation/cgroup-v1/memcg_test.txt
+++ b/Documentation/cgroup-v1/memcg_test.txt
@@ -6,7 +6,7 @@ Because VM is getting complex (one of reasons is memcg...), memcg's behavior
is complex. This is a document for memcg's internal behavior.
Please note that implementation details can be changed.
-(*) Topics on API should be in Documentation/cgroups/memory.txt)
+(*) Topics on API should be in Documentation/cgroup-v1/memory.txt)
0. How to record usage ?
2 objects are used.
@@ -256,7 +256,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
You can see charges have been moved by reading *.usage_in_bytes or
memory.stat of both A and B.
- See 8.2 of Documentation/cgroups/memory.txt to see what value should be
+ See 8.2 of Documentation/cgroup-v1/memory.txt to see what value should be
written to move_charge_at_immigrate.
9.10 Memory thresholds
diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt
index 7f773d51fdd9..01fb1dae3163 100644
--- a/Documentation/coccinelle.txt
+++ b/Documentation/coccinelle.txt
@@ -38,6 +38,15 @@ as a regular user, and install it with
sudo make install
+ Supplemental documentation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For supplemental documentation refer to the wiki:
+
+https://bottest.wiki.kernel.org/coccicheck
+
+The wiki documentation always refers to the linux-next version of the script.
+
Using Coccinelle on the Linux kernel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -94,11 +103,26 @@ To enable verbose messages set the V= variable, for example:
make coccicheck MODE=report V=1
+ Coccinelle parallelization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
By default, coccicheck tries to run as parallel as possible. To change
the parallelism, set the J= variable. For example, to run across 4 CPUs:
make coccicheck MODE=report J=4
+As of Coccinelle 1.0.2 Coccinelle uses Ocaml parmap for parallelization,
+if support for this is detected you will benefit from parmap parallelization.
+
+When parmap is enabled coccicheck will enable dynamic load balancing by using
+'--chunksize 1' argument, this ensures we keep feeding threads with work
+one by one, so that we avoid the situation where most work gets done by only
+a few threads. With dynamic load balancing, if a thread finishes early we keep
+feeding it more work.
+
+When parmap is enabled, if an error occurs in Coccinelle, this error
+value is propagated back, the return value of the 'make coccicheck'
+captures this return value.
Using Coccinelle with a single semantic patch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -142,15 +166,118 @@ semantic patch as shown in the previous section.
The "report" mode is the default. You can select another one with the
MODE variable explained above.
+ Debugging Coccinelle SmPL patches
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using coccicheck is best as it provides in the spatch command line
+include options matching the options used when we compile the kernel.
+You can learn what these options are by using V=1, you could then
+manually run Coccinelle with debug options added.
+
+Alternatively you can debug running Coccinelle against SmPL patches
+by asking for stderr to be redirected to stderr, by default stderr
+is redirected to /dev/null, if you'd like to capture stderr you
+can specify the DEBUG_FILE="file.txt" option to coccicheck. For
+instance:
+
+ rm -f cocci.err
+ make coccicheck COCCI=scripts/coccinelle/free/kfree.cocci MODE=report DEBUG_FILE=cocci.err
+ cat cocci.err
+
+You can use SPFLAGS to add debugging flags, for instance you may want to
+add both --profile --show-trying to SPFLAGS when debugging. For instance
+you may want to use:
+
+ rm -f err.log
+ export COCCI=scripts/coccinelle/misc/irqf_oneshot.cocci
+ make coccicheck DEBUG_FILE="err.log" MODE=report SPFLAGS="--profile --show-trying" M=./drivers/mfd/arizona-irq.c
+
+err.log will now have the profiling information, while stdout will
+provide some progress information as Coccinelle moves forward with
+work.
+
+DEBUG_FILE support is only supported when using coccinelle >= 1.2.
+
+ .cocciconfig support
+~~~~~~~~~~~~~~~~~~~~~~
+
+Coccinelle supports reading .cocciconfig for default Coccinelle options that
+should be used every time spatch is spawned, the order of precedence for
+variables for .cocciconfig is as follows:
+
+ o Your current user's home directory is processed first
+ o Your directory from which spatch is called is processed next
+ o The directory provided with the --dir option is processed last, if used
+
+Since coccicheck runs through make, it naturally runs from the kernel
+proper dir, as such the second rule above would be implied for picking up a
+.cocciconfig when using 'make coccicheck'.
+
+'make coccicheck' also supports using M= targets.If you do not supply
+any M= target, it is assumed you want to target the entire kernel.
+The kernel coccicheck script has:
+
+ if [ "$KBUILD_EXTMOD" = "" ] ; then
+ OPTIONS="--dir $srctree $COCCIINCLUDE"
+ else
+ OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
+ fi
+
+KBUILD_EXTMOD is set when an explicit target with M= is used. For both cases
+the spatch --dir argument is used, as such third rule applies when whether M=
+is used or not, and when M= is used the target directory can have its own
+.cocciconfig file. When M= is not passed as an argument to coccicheck the
+target directory is the same as the directory from where spatch was called.
+
+If not using the kernel's coccicheck target, keep the above precedence
+order logic of .cocciconfig reading. If using the kernel's coccicheck target,
+override any of the kernel's .coccicheck's settings using SPFLAGS.
+
+We help Coccinelle when used against Linux with a set of sensible defaults
+options for Linux with our own Linux .cocciconfig. This hints to coccinelle
+git can be used for 'git grep' queries over coccigrep. A timeout of 200
+seconds should suffice for now.
+
+The options picked up by coccinelle when reading a .cocciconfig do not appear
+as arguments to spatch processes running on your system, to confirm what
+options will be used by Coccinelle run:
+
+ spatch --print-options-only
+
+You can override with your own preferred index option by using SPFLAGS. Take
+note that when there are conflicting options Coccinelle takes precedence for
+the last options passed. Using .cocciconfig is possible to use idutils, however
+given the order of precedence followed by Coccinelle, since the kernel now
+carries its own .cocciconfig, you will need to use SPFLAGS to use idutils if
+desired. See below section "Additional flags" for more details on how to use
+idutils.
+
Additional flags
~~~~~~~~~~~~~~~~~~
Additional flags can be passed to spatch through the SPFLAGS
-variable.
+variable. This works as Coccinelle respects the last flags
+given to it when options are in conflict.
make SPFLAGS=--use-glimpse coccicheck
+
+Coccinelle supports idutils as well but requires coccinelle >= 1.0.6.
+When no ID file is specified coccinelle assumes your ID database file
+is in the file .id-utils.index on the top level of the kernel, coccinelle
+carries a script scripts/idutils_index.sh which creates the database with
+
+ mkid -i C --output .id-utils.index
+
+If you have another database filename you can also just symlink with this
+name.
+
make SPFLAGS=--use-idutils coccicheck
+Alternatively you can specify the database filename explicitly, for
+instance:
+
+ make SPFLAGS="--use-idutils /full-path/to/ID" coccicheck
+
See spatch --help to learn more about spatch options.
Note that the '--use-glimpse' and '--use-idutils' options
@@ -159,6 +286,25 @@ thus active by default. However, by indexing the code with
one of these tools, and according to the cocci file used,
spatch could proceed the entire code base more quickly.
+ SmPL patch specific options
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+SmPL patches can have their own requirements for options passed
+to Coccinelle. SmPL patch specific options can be provided by
+providing them at the top of the SmPL patch, for instance:
+
+// Options: --no-includes --include-headers
+
+ SmPL patch Coccinelle requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As Coccinelle features get added some more advanced SmPL patches
+may require newer versions of Coccinelle. If an SmPL patch requires
+at least a version of Coccinelle, this can be specified as follows,
+as an example if requiring at least Coccinelle >= 1.0.5:
+
+// Requires: 1.0.5
+
Proposing new semantic patches
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/device-mapper/dm-flakey.txt b/Documentation/device-mapper/dm-flakey.txt
index 6ff5c2327227..c43030718cef 100644
--- a/Documentation/device-mapper/dm-flakey.txt
+++ b/Documentation/device-mapper/dm-flakey.txt
@@ -42,7 +42,7 @@ Optional feature parameters:
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
'w' is incompatible with drop_writes.
<value>: The value (from 0-255) to write.
- <flags>: Perform the replacement only if bio->bi_rw has all the
+ <flags>: Perform the replacement only if bio->bi_opf has all the
selected flags set.
Examples:
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
index 313dabdc14f9..faa4b44572e3 100644
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt
@@ -87,10 +87,33 @@ Required properties:
implementation for the IDs to use. For Juno
R0 and Juno R1 refer to [3].
+Power domain bindings for the power domains based on SCPI Message Protocol
+------------------------------------------------------------
+
+This binding uses the generic power domain binding[4].
+
+PM domain providers
+===================
+
+Required properties:
+ - #power-domain-cells : Should be 1. Contains the device or the power
+ domain ID value used by SCPI commands.
+ - num-domains: Total number of power domains provided by SCPI. This is
+ needed as the SCPI message protocol lacks a mechanism to
+ query this information at runtime.
+
+PM domain consumers
+===================
+
+Required properties:
+ - power-domains : A phandle and PM domain specifier as defined by bindings of
+ the power controller specified by phandle.
+
[0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/thermal/thermal.txt
[3] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/apas03s22.html
+[4] Documentation/devicetree/bindings/power/power_domain.txt
Example:
@@ -144,6 +167,12 @@ scpi_protocol: scpi@2e000000 {
compatible = "arm,scpi-sensors";
#thermal-sensor-cells = <1>;
};
+
+ scpi_devpd: scpi-power-domains {
+ compatible = "arm,scpi-power-domains";
+ num-domains = <2>;
+ #power-domain-cells = <1>;
+ };
};
cpu@0 {
@@ -156,6 +185,7 @@ hdlcd@7ff60000 {
...
reg = <0 0x7ff60000 0 0x1000>;
clocks = <&scpi_clk 4>;
+ power-domains = <&scpi_devpd 1>;
};
thermal-zones {
@@ -186,3 +216,7 @@ The thermal-sensors property in the soc_thermal node uses the
temperature sensor provided by SCP firmware to setup a thermal
zone. The ID "3" is the sensor identifier for the temperature sensor
as used by the firmware.
+
+The num-domains property in scpi-power-domains domain specifies that
+SCPI provides 2 power domains. The hdlcd node uses the power domain with
+domain ID 1.
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
index 8240c023e202..e3f996920403 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm11351-cpu-method.txt
@@ -5,7 +5,7 @@ CPUs in the following Broadcom SoCs:
BCM11130, BCM11140, BCM11351, BCM28145, BCM28155, BCM21664
The enable method is specified by defining the following required
-properties in the "cpus" device tree node:
+properties in the "cpu" device tree node:
- enable-method = "brcm,bcm11351-cpu-method";
- secondary-boot-reg = <...>;
@@ -19,8 +19,6 @@ Example:
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "brcm,bcm11351-cpu-method";
- secondary-boot-reg = <0x3500417c>;
cpu0: cpu@0 {
device_type = "cpu";
@@ -32,5 +30,7 @@ Example:
device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
+ enable-method = "brcm,bcm11351-cpu-method";
+ secondary-boot-reg = <0x3500417c>;
};
};
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt
new file mode 100644
index 000000000000..a3af54c0e404
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550-cpu-method.txt
@@ -0,0 +1,36 @@
+Broadcom Kona Family CPU Enable Method
+--------------------------------------
+This binding defines the enable method used for starting secondary
+CPUs in the following Broadcom SoCs:
+ BCM23550
+
+The enable method is specified by defining the following required
+properties in the "cpu" device tree node:
+ - enable-method = "brcm,bcm23550";
+ - secondary-boot-reg = <...>;
+
+The secondary-boot-reg property is a u32 value that specifies the
+physical address of the register used to request the ROM holding pen
+code release a secondary CPU. The value written to the register is
+formed by encoding the target CPU id into the low bits of the
+physical start address it should jump to.
+
+Example:
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ enable-method = "brcm,bcm23550";
+ secondary-boot-reg = <0x3500417c>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt
new file mode 100644
index 000000000000..080baad923d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm23550.txt
@@ -0,0 +1,15 @@
+Broadcom BCM23550 device tree bindings
+--------------------------------------
+
+This document describes the device tree bindings for boards with the BCM23550
+SoC.
+
+Required root node property:
+ - compatible: brcm,bcm23550
+
+Example:
+ / {
+ model = "BCM23550 SoC";
+ compatible = "brcm,bcm23550";
+ [...]
+ }
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
index 11d3056dc2bd..6ffe08778465 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt
@@ -30,6 +30,10 @@ Raspberry Pi 2 Model B
Required root node properties:
compatible = "raspberrypi,2-model-b", "brcm,bcm2836";
+Raspberry Pi 3 Model B
+Required root node properties:
+compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
+
Raspberry Pi Compute Module
Required root node properties:
compatible = "raspberrypi,compute-module", "brcm,bcm2835";
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 3f0cbbb8395f..e6782d50cbcd 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -193,6 +193,8 @@ nodes to be present and contain the properties described below.
"allwinner,sun6i-a31"
"allwinner,sun8i-a23"
"arm,realview-smp"
+ "brcm,bcm11351-cpu-method"
+ "brcm,bcm23550"
"brcm,bcm-nsp-smp"
"brcm,brahma-b15"
"marvell,armada-375-smp"
@@ -204,6 +206,7 @@ nodes to be present and contain the properties described below.
"qcom,gcc-msm8660"
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
+ "renesas,apmu"
"rockchip,rk3036-smp"
"rockchip,rk3066-smp"
"ste,dbx500-smp"
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
new file mode 100644
index 000000000000..115c5be0bd0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
@@ -0,0 +1,14 @@
+* Hisilicon Hi3519 System Controller Block
+
+This bindings use the following binding:
+Documentation/devicetree/bindings/mfd/syscon.txt
+
+Required properties:
+- compatible: "hisilicon,hi3519-sysctrl".
+- reg: the register region of this block
+
+Examples:
+sysctrl: system-controller@12010000 {
+ compatible = "hisilicon,hi3519-sysctrl", "syscon";
+ reg = <0x12010000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt
index d9c2a37a4090..c860b245d8c8 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek.txt
@@ -10,6 +10,7 @@ compatible: Must contain one of
"mediatek,mt6580"
"mediatek,mt6589"
"mediatek,mt6592"
+ "mediatek,mt6755"
"mediatek,mt6795"
"mediatek,mt7623"
"mediatek,mt8127"
@@ -31,6 +32,9 @@ Supported boards:
- Evaluation board for MT6592:
Required root node properties:
- compatible = "mediatek,mt6592-evb", "mediatek,mt6592";
+- Evaluation phone for MT6755(Helio P10):
+ Required root node properties:
+ - compatible = "mediatek,mt6755-evb", "mediatek,mt6755";
- Evaluation board for MT6795(Helio X10):
Required root node properties:
- compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
diff --git a/Documentation/devicetree/bindings/arm/olimex.txt b/Documentation/devicetree/bindings/arm/olimex.txt
index 007fb5c685a1..d726aeca56be 100644
--- a/Documentation/devicetree/bindings/arm/olimex.txt
+++ b/Documentation/devicetree/bindings/arm/olimex.txt
@@ -1,5 +1,9 @@
-Olimex i.MX Platforms Device Tree Bindings
-------------------------------------------
+Olimex Device Tree Bindings
+---------------------------
+
+SAM9-L9260 Board
+Required root node properties:
+ - compatible = "olimex,sam9-l9260", "atmel,at91sam9260";
i.MX23 Olinuxino Low Cost Board
Required root node properties:
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 715d960d5eea..666864517069 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -107,6 +107,9 @@ Rockchip platforms device tree bindings
Required root node properties:
- compatible = "rockchip,rk3228-evb", "rockchip,rk3228";
+- Rockchip RK3229 Evaluation board:
+ - compatible = "rockchip,rk3229-evb", "rockchip,rk3229";
+
- Rockchip RK3399 evb:
Required root node properties:
- compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index f5deace2b380..0ea7f14ef294 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -47,6 +47,7 @@ Required root node properties:
- "hardkernel,odroid-u3" - for Exynos4412-based Hardkernel Odroid U3.
- "hardkernel,odroid-x" - for Exynos4412-based Hardkernel Odroid X.
- "hardkernel,odroid-x2" - for Exynos4412-based Hardkernel Odroid X2.
+ - "hardkernel,odroid-xu" - for Exynos5410-based Hardkernel Odroid XU.
- "hardkernel,odroid-xu3" - for Exynos5422-based Hardkernel Odroid XU3.
- "hardkernel,odroid-xu3-lite" - for Exynos5422-based Hardkernel
Odroid XU3 Lite board.
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index 9cf67e48f222..1df32d339da5 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -29,6 +29,8 @@ SoCs:
compatible = "renesas,r8a7794"
- R-Car H3 (R8A77950)
compatible = "renesas,r8a7795"
+ - R-Car M3-W (R8A77960)
+ compatible = "renesas,r8a7796"
Boards:
@@ -39,6 +41,8 @@ Boards:
compatible = "renesas,ape6evm", "renesas,r8a73a4"
- Atmark Techno Armadillo-800 EVA
compatible = "renesas,armadillo800eva"
+ - Blanche (RTP0RC7792SEB00010S)
+ compatible = "renesas,blanche", "renesas,r8a7792"
- BOCK-W
compatible = "renesas,bockw", "renesas,r8a7778"
- Genmai (RTK772100BC00000BR)
@@ -61,5 +65,7 @@ Boards:
compatible = "renesas,porter", "renesas,r8a7791"
- Salvator-X (RTP0RC7795SIPB0010S)
compatible = "renesas,salvator-x", "renesas,r8a7795";
+ - Salvator-X
+ compatible = "renesas,salvator-x", "renesas,r8a7796";
- SILK (RTP0RC7794LCB00011S)
compatible = "renesas,silk", "renesas,r8a7794"
diff --git a/Documentation/devicetree/bindings/arm/tegra.txt b/Documentation/devicetree/bindings/arm/tegra.txt
index 73278c6d2dc3..b5a4342c1d46 100644
--- a/Documentation/devicetree/bindings/arm/tegra.txt
+++ b/Documentation/devicetree/bindings/arm/tegra.txt
@@ -32,7 +32,11 @@ board-specific compatible values:
nvidia,whistler
toradex,apalis_t30
toradex,apalis_t30-eval
+ toradex,apalis-tk1
+ toradex,apalis-tk1-eval
toradex,colibri_t20-512
+ toradex,colibri_t30
+ toradex,colibri_t30-eval-v3
toradex,iris
Trusted Foundations
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 87adfb227ca9..fedc213b5f1a 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -10,6 +10,7 @@ PHYs.
Required properties:
- compatible : compatible string, one of:
- "allwinner,sun4i-a10-ahci"
+ - "brcm,iproc-ahci"
- "hisilicon,hisi-ahci"
- "cavium,octeon-7130-ahci"
- "ibm,476gtr-ahci"
diff --git a/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt
new file mode 100644
index 000000000000..7ff13be1750b
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/nvidia,tegra210-aconnect.txt
@@ -0,0 +1,45 @@
+NVIDIA Tegra ACONNECT Bus
+
+The Tegra ACONNECT bus is an AXI switch which is used to connnect various
+components inside the Audio Processing Engine (APE). All CPU accesses to
+the APE subsystem go through the ACONNECT via an APB to AXI wrapper.
+
+Required properties:
+- compatible: Must be "nvidia,tegra210-aconnect".
+- clocks: Must contain the entries for the APE clock (TEGRA210_CLK_APE),
+ and APE interface clock (TEGRA210_CLK_APB2APE).
+- clock-names: Must contain the names "ape" and "apb2ape" for the corresponding
+ 'clocks' entries.
+- power-domains: Must contain a phandle that points to the audio powergate
+ (namely 'aud') for Tegra210.
+- #address-cells: The number of cells used to represent physical base addresses
+ in the aconnect address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+ range in the aconnect address space. Should be 1.
+- ranges: Mapping of the aconnect address space to the CPU address space.
+
+All devices accessed via the ACONNNECT are described by child-nodes.
+
+Example:
+
+ aconnect@702c0000 {
+ compatible = "nvidia,tegra210-aconnect";
+ clocks = <&tegra_car TEGRA210_CLK_APE>,
+ <&tegra_car TEGRA210_CLK_APB2APE>;
+ clock-names = "ape", "apb2ape";
+ power-domains = <&pd_audio>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x702c0000 0x0 0x702c0000 0x00040000>;
+
+ status = "disabled";
+
+ child1 {
+ ...
+ };
+
+ child2 {
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/clps711x-clock.txt b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
index ce5a7476f05d..f1bd53f79d91 100644
--- a/Documentation/devicetree/bindings/clock/clps711x-clock.txt
+++ b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
@@ -1,7 +1,7 @@
* Clock bindings for the Cirrus Logic CLPS711X CPUs
Required properties:
-- compatible : Shall contain "cirrus,clps711x-clk".
+- compatible : Shall contain "cirrus,ep7209-clk".
- reg : Address of the internal register set.
- startup-frequency: Factory set CPU startup frequency in HZ.
- #clock-cells : Should be <1>.
@@ -13,7 +13,7 @@ for the full list of CLPS711X clock IDs.
Example:
clks: clks@80000000 {
#clock-cells = <1>;
- compatible = "cirrus,ep7312-clk", "cirrus,clps711x-clk";
+ compatible = "cirrus,ep7312-clk", "cirrus,ep7209-clk";
reg = <0x80000000 0xc000>;
startup-frequency = <73728000>;
};
diff --git a/Documentation/devicetree/bindings/display/arm,malidp.txt b/Documentation/devicetree/bindings/display/arm,malidp.txt
new file mode 100644
index 000000000000..2f7870983ef1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/arm,malidp.txt
@@ -0,0 +1,65 @@
+ARM Mali-DP
+
+The following bindings apply to a family of Display Processors sold as
+licensable IP by ARM Ltd. The bindings describe the Mali DP500, DP550 and
+DP650 processors that offer multiple composition layers, support for
+rotation and scaling output.
+
+Required properties:
+ - compatible: should be one of
+ "arm,mali-dp500"
+ "arm,mali-dp550"
+ "arm,mali-dp650"
+ depending on the particular implementation present in the hardware
+ - reg: Physical base address and size of the block of registers used by
+ the processor.
+ - interrupts: Interrupt list, as defined in ../interrupt-controller/interrupts.txt,
+ interrupt client nodes.
+ - interrupt-names: name of the engine inside the processor that will
+ use the corresponding interrupt. Should be one of "DE" or "SE".
+ - clocks: A list of phandle + clock-specifier pairs, one for each entry
+ in 'clock-names'
+ - clock-names: A list of clock names. It should contain:
+ - "pclk": for the APB interface clock
+ - "aclk": for the AXI interface clock
+ - "mclk": for the main processor clock
+ - "pxlclk": for the pixel clock feeding the output PLL of the processor.
+ - arm,malidp-output-port-lines: Array of u8 values describing the number
+ of output lines per channel (R, G and B).
+
+Required sub-nodes:
+ - port: The Mali DP connection to an encoder input port. The connection
+ is modelled using the OF graph bindings specified in
+ Documentation/devicetree/bindings/graph.txt
+
+Optional properties:
+ - memory-region: phandle to a node describing memory (see
+ Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
+ to be used for the framebuffer; if not present, the framebuffer may
+ be located anywhere in memory.
+
+
+Example:
+
+/ {
+ ...
+
+ dp0: malidp@6f200000 {
+ compatible = "arm,mali-dp650";
+ reg = <0 0x6f200000 0 0x20000>;
+ memory-region = <&display_reserved>;
+ interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 168 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "DE", "SE";
+ clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
+ clock-names = "pxlclk", "mclk", "aclk", "pclk";
+ arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
+ port {
+ dp0_output: endpoint {
+ remote-endpoint = <&tda998x_2_input>;
+ };
+ };
+ };
+
+ ...
+};
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 96c25ee01501..6532a59c9b43 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -1,13 +1,19 @@
-Analog Device ADV7511(W)/13 HDMI Encoders
+Analog Device ADV7511(W)/13/33 HDMI Encoders
-----------------------------------------
-The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP.
+S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
+the others support RGB interface.
Required properties:
-- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- compatible: Should be one of:
+ "adi,adv7511"
+ "adi,adv7511w"
+ "adi,adv7513"
+ "adi,adv7533"
+
- reg: I2C slave address
The ADV7511 supports a large number of input data formats that differ by their
@@ -32,6 +38,11 @@ The following input format properties are required except in "rgb 1x" and
- adi,input-justification: The input bit justification ("left", "evenly",
"right").
+The following properties are required for ADV7533:
+
+- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
+ be one of 1, 2, 3 or 4.
+
Optional properties:
- interrupts: Specifier for the ADV7511 interrupt
@@ -42,13 +53,18 @@ Optional properties:
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
+- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
+ generator. The chip will rely on the sync signals in the DSI data lanes,
+ rather than generate its own timings for HDMI output.
Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-- Video port 0 for the RGB or YUV input
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+ remote endpoint phandle should be a reference to a valid mipi_dsi_host device
+ node.
- Video port 1 for the HDMI output
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
index 4f2ba8c13d92..4a0f4f7682ad 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+++ b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
@@ -5,6 +5,7 @@ Required properties for dp-controller:
platform specific such as:
* "samsung,exynos5-dp"
* "rockchip,rk3288-dp"
+ * "rockchip,rk3399-edp"
-reg:
physical base address of the controller and length
of memory mapped region.
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
new file mode 100644
index 000000000000..56a3e68ccb80
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
@@ -0,0 +1,35 @@
+sii902x HDMI bridge bindings
+
+Required properties:
+ - compatible: "sil,sii9022"
+ - reg: i2c address of the bridge
+
+Optional properties:
+ - interrupts-extended or interrupt-parent + interrupts: describe
+ the interrupt line used to inform the host about hotplug events.
+ - reset-gpios: OF device-tree gpio specification for RST_N pin.
+
+Optional subnodes:
+ - video input: this subnode can contain a video input port node
+ to connect the bridge to a display controller output (See this
+ documentation [1]).
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ hdmi-bridge@39 {
+ compatible = "sil,sii9022";
+ reg = <0x39>;
+ reset-gpios = <&pioA 1 0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ bridge_in: endpoint {
+ remote-endpoint = <&dc_out>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
new file mode 100644
index 000000000000..e3f6aa6a214d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
@@ -0,0 +1,53 @@
+Toshiba TC358767 eDP bridge bindings
+
+Required properties:
+ - compatible: "toshiba,tc358767"
+ - reg: i2c address of the bridge, 0x68 or 0x0f, depending on bootstrap pins
+ - clock-names: should be "ref"
+ - clocks: OF device-tree clock specification for refclk input. The reference
+ clock rate must be 13 MHz, 19.2 MHz, 26 MHz, or 38.4 MHz.
+
+Optional properties:
+ - shutdown-gpios: OF device-tree gpio specification for SD pin
+ (active high shutdown input)
+ - reset-gpios: OF device-tree gpio specification for RSTX pin
+ (active low system reset)
+ - ports: the ports node can contain video interface port nodes to connect
+ to a DPI/DSI source and to an eDP/DP sink according to [1][2]:
+ - port@0: DSI input port
+ - port@1: DPI input port
+ - port@2: eDP/DP output port
+
+[1]: Documentation/devicetree/bindings/graph.txt
+[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+ edp-bridge@68 {
+ compatible = "toshiba,tc358767";
+ reg = <0x68>;
+ shutdown-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ clock-names = "ref";
+ clocks = <&edp_refclk>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ bridge_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt b/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
index d685be898d0c..e9c65746e2f1 100644
--- a/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
+++ b/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
@@ -1,7 +1,7 @@
* Currus Logic CLPS711X Framebuffer
Required properties:
-- compatible: Shall contain "cirrus,clps711x-fb".
+- compatible: Shall contain "cirrus,ep7209-fb".
- reg : Physical base address and length of the controller's registers +
location and size of the framebuffer memory.
- clocks : phandle + clock specifier pair of the FB reference clock.
@@ -18,7 +18,7 @@ Optional properties:
Example:
fb: fb@800002c0 {
- compatible = "cirrus,ep7312-fb", "cirrus,clps711x-fb";
+ compatible = "cirrus,ep7312-fb", "cirrus,ep7209-fb";
reg = <0x800002c0 0xd44>, <0x60000000 0xc000>;
clocks = <&clks 2>;
lcd-supply = <&reg5v0>;
diff --git a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
index acd5668b1ce1..508aee461e0d 100644
--- a/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
+++ b/Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
@@ -8,6 +8,7 @@ Required properties:
Optional properties:
- label: a symbolic name for the connector
- hpd-gpios: HPD GPIO number
+- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing
Required nodes:
- Video port for HDMI input
diff --git a/Documentation/devicetree/bindings/display/fsl,dcu.txt b/Documentation/devicetree/bindings/display/fsl,dcu.txt
index ae55cde1b69e..63ec2a624aa9 100644
--- a/Documentation/devicetree/bindings/display/fsl,dcu.txt
+++ b/Documentation/devicetree/bindings/display/fsl,dcu.txt
@@ -12,7 +12,7 @@ Required properties:
- clock-names: Should be "dcu" and "pix"
See ../clocks/clock-bindings.txt for details.
- big-endian Boolean property, LS1021A DCU registers are big-endian.
-- fsl,panel: The phandle to panel node.
+- port Video port for the panel output
Optional properties:
- fsl,tcon: The phandle to the timing controller node.
@@ -24,6 +24,11 @@ dcu: dcu@2ce0000 {
clocks = <&platform_clk 0>, <&platform_clk 0>;
clock-names = "dcu", "pix";
big-endian;
- fsl,panel = <&panel>;
fsl,tcon = <&tcon>;
+
+ port {
+ dcu_out: endpoint {
+ remote-endpoint = <&panel_out>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
new file mode 100644
index 000000000000..7b124242b0c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
@@ -0,0 +1,148 @@
+Mediatek HDMI Encoder
+=====================
+
+The Mediatek HDMI encoder can generate HDMI 1.4a or MHL 2.0 signals from
+its parallel input.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-hdmi".
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clocks
+ See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- clock-names: must contain "pixel", "pll", "bclk", and "spdif".
+- phys: phandle link to the HDMI PHY node.
+ See Documentation/devicetree/bindings/phy/phy-bindings.txt for details.
+- phy-names: must contain "hdmi"
+- mediatek,syscon-hdmi: phandle link and register offset to the system
+ configuration registers. For mt8173 this must be offset 0x900 into the
+ MMSYS_CONFIG region: <&mmsys 0x900>.
+- ports: A node containing input and output port nodes with endpoint
+ definitions as documented in Documentation/devicetree/bindings/graph.txt.
+- port@0: The input port in the ports node should be connected to a DPI output
+ port.
+- port@1: The output port in the ports node should be connected to the input
+ port of a connector node that contains a ddc-i2c-bus property, or to the
+ input port of an attached bridge chip, such as a SlimPort transmitter.
+
+HDMI CEC
+========
+
+The HDMI CEC controller handles hotplug detection and CEC communication.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-cec"
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clock
+
+HDMI DDC
+========
+
+The HDMI DDC i2c controller is used to interface with the HDMI DDC pins.
+The Mediatek's I2C controller is used to interface with I2C devices.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-hdmi-ddc"
+- reg: Physical base address and length of the controller's registers
+- clocks: device clock
+- clock-names: Should be "ddc-i2c".
+
+HDMI PHY
+========
+
+The HDMI PHY serializes the HDMI encoder's three channel 10-bit parallel
+output and drives the HDMI pads.
+
+Required properties:
+- compatible: "mediatek,<chip>-hdmi-phy"
+- reg: Physical base address and length of the module's registers
+- clocks: PLL reference clock
+- clock-names: must contain "pll_ref"
+- clock-output-names: must be "hdmitx_dig_cts" on mt8173
+- #phy-cells: must be <0>
+- #clock-cells: must be <0>
+
+Optional properties:
+- mediatek,ibias: TX DRV bias current for <1.65Gbps, defaults to 0xa
+- mediatek,ibias_up: TX DRV bias current for >1.65Gbps, defaults to 0x1c
+
+Example:
+
+cec: cec@10013000 {
+ compatible = "mediatek,mt8173-cec";
+ reg = <0 0x10013000 0 0xbc>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_CEC>;
+};
+
+hdmi_phy: hdmi-phy@10209100 {
+ compatible = "mediatek,mt8173-hdmi-phy";
+ reg = <0 0x10209100 0 0x24>;
+ clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
+ clock-names = "pll_ref";
+ clock-output-names = "hdmitx_dig_cts";
+ mediatek,ibias = <0xa>;
+ mediatek,ibias_up = <0x1c>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+};
+
+hdmi_ddc0: i2c@11012000 {
+ compatible = "mediatek,mt8173-hdmi-ddc";
+ reg = <0 0x11012000 0 0x1c>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_I2C5>;
+ clock-names = "ddc-i2c";
+};
+
+hdmi0: hdmi@14025000 {
+ compatible = "mediatek,mt8173-hdmi";
+ reg = <0 0x14025000 0 0x400>;
+ interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&mmsys CLK_MM_HDMI_PIXEL>,
+ <&mmsys CLK_MM_HDMI_PLLCK>,
+ <&mmsys CLK_MM_HDMI_AUDIO>,
+ <&mmsys CLK_MM_HDMI_SPDIF>;
+ clock-names = "pixel", "pll", "bclk", "spdif";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pin>;
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi";
+ mediatek,syscon-hdmi = <&mmsys 0x900>;
+ assigned-clocks = <&topckgen CLK_TOP_HDMI_SEL>;
+ assigned-clock-parents = <&hdmi_phy>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ hdmi0_in: endpoint {
+ remote-endpoint = <&dpi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+ };
+ };
+};
+
+connector {
+ compatible = "hdmi-connector";
+ type = "a";
+ ddc-i2c-bus = <&hdmiddc0>;
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index f5948c48b9a2..6b1cab17f52d 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -11,8 +11,7 @@ Required properties:
be 0 or 1, since we have 2 DSI controllers at most for now.
- interrupts: The interrupt signal from the DSI block.
- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks.
- clock-names: the following clocks are required:
* "mdp_core_clk"
* "iface_clk"
@@ -23,16 +22,21 @@ Required properties:
* "core_clk"
For DSIv2, we need an additional clock:
* "src_clk"
+- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
+- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
+ by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node
- vddio-supply: phandle to vdd-io regulator device node
- vdda-supply: phandle to vdda regulator device node
-- qcom,dsi-phy: phandle to DSI PHY device node
+- phys: phandle to DSI PHY device node
+- phy-names: the name of the corresponding PHY device
- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
+- ports: Contains 2 DSI controller ports as child nodes. Each port contains
+ an endpoint subnode as defined in [2] and [3].
Optional properties:
- panel@0: Node of panel connected to this DSI controller.
- See files in Documentation/devicetree/bindings/display/panel/ for each supported
- panel.
+ See files in [4] for each supported panel.
- qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
driving a panel which needs 2 DSI links.
- qcom,master-dsi: Boolean value indicating if the DSI controller is driving
@@ -44,34 +48,38 @@ Optional properties:
- pinctrl-names: the pin control state names; should contain "default"
- pinctrl-0: the default pinctrl state (active)
- pinctrl-n: the "sleep" pinctrl state
-- port: DSI controller output port, containing one endpoint subnode.
+- ports: contains DSI controller input and output ports as children, each
+ containing one endpoint subnode.
DSI Endpoint properties:
- - remote-endpoint: set to phandle of the connected panel's endpoint.
- See Documentation/devicetree/bindings/graph.txt for device graph info.
- - qcom,data-lane-map: this describes how the logical DSI lanes are mapped
- to the physical lanes on the given platform. The value contained in
- index n describes what logical data lane is mapped to the physical data
- lane n (DATAn, where n lies between 0 and 3).
+ - remote-endpoint: For port@0, set to phandle of the connected panel/bridge's
+ input endpoint. For port@1, set to the MDP interface output. See [2] for
+ device graph info.
+
+ - data-lanes: this describes how the physical DSI data lanes are mapped
+ to the logical lanes on the given platform. The value contained in
+ index n describes what physical lane is mapped to the logical lane n
+ (DATAn, where n lies between 0 and 3). The clock lane position is fixed
+ and can't be changed. Hence, they aren't a part of the DT bindings. See
+ [3] for more info on the data-lanes property.
For example:
- qcom,data-lane-map = <3 0 1 2>;
+ data-lanes = <3 0 1 2>;
- The above mapping describes that the logical data lane DATA3 is mapped to
- the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1
- to phys DATA2 and logic DATA2 to phys DATA3.
+ The above mapping describes that the logical data lane DATA0 is mapped to
+ the physical data lane DATA3, logical DATA1 to physical DATA0, logic DATA2
+ to phys DATA1 and logic DATA3 to phys DATA2.
There are only a limited number of physical to logical mappings possible:
-
- "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3;
- "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
- "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3;
- "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3;
- "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3;
- "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3;
- "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3;
- "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
+ <0 1 2 3>
+ <1 2 3 0>
+ <2 3 0 1>
+ <3 0 1 2>
+ <0 3 2 1>
+ <1 0 3 2>
+ <2 1 0 3>
+ <3 2 1 0>
DSI PHY:
Required properties:
@@ -86,11 +94,12 @@ Required properties:
* "dsi_pll"
* "dsi_phy"
* "dsi_phy_regulator"
+- clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating
+ 2 clocks: A byte clock (index 0), and a pixel clock (index 1).
- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should
be 0 or 1, since we have 2 DSI PHYs at most for now.
- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface_clk"
- vddio-supply: phandle to vdd-io regulator device node
@@ -99,11 +108,16 @@ Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
+[1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
+[2] Documentation/devicetree/bindings/graph.txt
+[3] Documentation/devicetree/bindings/media/video-interfaces.txt
+[4] Documentation/devicetree/bindings/display/panel/
+
Example:
- mdss_dsi0: qcom,mdss_dsi@fd922800 {
+ dsi0: dsi@fd922800 {
compatible = "qcom,mdss-dsi-ctrl";
qcom,dsi-host-index = <0>;
- interrupt-parent = <&mdss_mdp>;
+ interrupt-parent = <&mdp>;
interrupts = <4 0>;
reg-names = "dsi_ctrl";
reg = <0xfd922800 0x200>;
@@ -124,19 +138,48 @@ Example:
<&mmcc MDSS_AHB_CLK>,
<&mmcc MDSS_MDP_CLK>,
<&mmcc MDSS_PCLK0_CLK>;
+
+ assigned-clocks =
+ <&mmcc BYTE0_CLK_SRC>,
+ <&mmcc PCLK0_CLK_SRC>;
+ assigned-clock-parents =
+ <&dsi_phy0 0>,
+ <&dsi_phy0 1>;
+
vdda-supply = <&pma8084_l2>;
vdd-supply = <&pma8084_l22>;
vddio-supply = <&pma8084_l12>;
- qcom,dsi-phy = <&mdss_dsi_phy0>;
+ phys = <&dsi_phy0>;
+ phy-names ="dsi-phy";
qcom,dual-dsi-mode;
qcom,master-dsi;
qcom,sync-dual-dsi;
pinctrl-names = "default", "sleep";
- pinctrl-0 = <&mdss_dsi_active>;
- pinctrl-1 = <&mdss_dsi_suspend>;
+ pinctrl-0 = <&dsi_active>;
+ pinctrl-1 = <&dsi_suspend>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
panel: panel@0 {
compatible = "sharp,lq101r1sx01";
@@ -152,16 +195,9 @@ Example:
};
};
};
-
- port {
- dsi0_out: endpoint {
- remote-endpoint = <&panel_in>;
- lanes = <0 1 2 3>;
- };
- };
};
- mdss_dsi_phy0: qcom,mdss_dsi_phy@fd922a00 {
+ dsi_phy0: dsi-phy@fd922a00 {
compatible = "qcom,dsi-phy-28nm-hpm";
qcom,dsi-phy-index = <0>;
reg-names =
@@ -173,6 +209,7 @@ Example:
<0xfd922d80 0x7b>;
clock-names = "iface_clk";
clocks = <&mmcc MDSS_AHB_CLK>;
+ #clock-cells = <1>;
vddio-supply = <&pma8084_l12>;
qcom,dsi-phy-regulator-ldo-mode;
diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt
deleted file mode 100644
index a214f6cd0363..000000000000
--- a/Documentation/devicetree/bindings/display/msm/mdp.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Qualcomm adreno/snapdragon display controller
-
-Required properties:
-- compatible:
- * "qcom,mdp4" - mdp4
- * "qcom,mdp5" - mdp5
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The interrupt signal from the display controller.
-- connectors: array of phandles for output device(s)
-- clocks: device clocks
- See ../clocks/clock-bindings.txt for details.
-- clock-names: the following clocks are required.
- For MDP4:
- * "core_clk"
- * "iface_clk"
- * "lut_clk"
- * "src_clk"
- * "hdmi_clk"
- * "mdp_clk"
- For MDP5:
- * "bus_clk"
- * "iface_clk"
- * "core_clk_src"
- * "core_clk"
- * "lut_clk" (some MDP5 versions may not need this)
- * "vsync_clk"
-
-Optional properties:
-- gpus: phandle for gpu device
-- clock-names: the following clocks are optional:
- * "lut_clk"
-
-Example:
-
-/ {
- ...
-
- mdp: qcom,mdp@5100000 {
- compatible = "qcom,mdp4";
- reg = <0x05100000 0xf0000>;
- interrupts = <GIC_SPI 75 0>;
- connectors = <&hdmi>;
- gpus = <&gpu>;
- clock-names =
- "core_clk",
- "iface_clk",
- "lut_clk",
- "src_clk",
- "hdmi_clk",
- "mdp_clk";
- clocks =
- <&mmcc MDP_SRC>,
- <&mmcc MDP_AHB_CLK>,
- <&mmcc MDP_LUT_CLK>,
- <&mmcc TV_SRC>,
- <&mmcc HDMI_TV_CLK>,
- <&mmcc MDP_TV_CLK>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/msm/mdp4.txt b/Documentation/devicetree/bindings/display/msm/mdp4.txt
new file mode 100644
index 000000000000..3c341a15ccdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/mdp4.txt
@@ -0,0 +1,112 @@
+Qualcomm adreno/snapdragon MDP4 display controller
+
+Description:
+
+This is the bindings documentation for the MDP4 display controller found in
+SoCs like MSM8960, APQ8064 and MSM8660.
+
+Required properties:
+- compatible:
+ * "qcom,mdp4" - mdp4
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt signal from the display controller.
+- clocks: device clocks
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+ * "core_clk"
+ * "iface_clk"
+ * "bus_clk"
+ * "lut_clk"
+ * "hdmi_clk"
+ * "tv_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+ that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+ special case since it is a part of the MDP block itself).
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ The output port mappings are:
+ Port 0 -> LCDC/LVDS
+ Port 1 -> DSI1 Cmd/Video
+ Port 2 -> DSI2 Cmd/Video
+ Port 3 -> DTV
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+Example:
+
+/ {
+ ...
+
+ hdmi: hdmi@4a00000 {
+ ...
+ ports {
+ ...
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp_dtv_out>;
+ };
+ };
+ ...
+ };
+ ...
+ };
+
+ ...
+
+ mdp: mdp@5100000 {
+ compatible = "qcom,mdp4";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 0>;
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "lut_clk",
+ "hdmi_clk",
+ "tv_clk";
+ clocks =
+ <&mmcc MDP_CLK>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp_lvds_out: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdp_dsi1_out: endpoint {
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ mdp_dsi2_out: endpoint {
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ mdp_dtv_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt
new file mode 100644
index 000000000000..30c11ea83754
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/mdp5.txt
@@ -0,0 +1,160 @@
+Qualcomm adreno/snapdragon MDP5 display controller
+
+Description:
+
+This is the bindings documentation for the Mobile Display Subsytem(MDSS) that
+encapsulates sub-blocks like MDP5, DSI, HDMI, eDP etc, and the MDP5 display
+controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 and MSM8996.
+
+MDSS:
+Required properties:
+- compatible:
+ * "qcom,mdss" - MDSS
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+ * "mdss_phys"
+ * "vbif_phys"
+- interrupts: The interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+ source, should be 1.
+- power-domains: a power domain consumer specifier according to
+ Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+ * "iface_clk"
+ * "bus_clk"
+ * "vsync_clk"
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+MDP5:
+Required properties:
+- compatible:
+ * "qcom,mdp5" - MDP5
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+ * "mdp_phys"
+- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
+- interrupt-parent: phandle to the MDSS block
+ through MDP block
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+- * "bus_clk"
+- * "iface_clk"
+- * "core_clk"
+- * "vsync_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+ that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+ special case since it is a part of the MDP block itself).
+
+ Each output port contains an endpoint that describes how it is connected to an
+ external interface. These are described by the standard properties documented
+ here:
+ Documentation/devicetree/bindings/graph.txt
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+
+ The availability of output ports can vary across SoC revisions:
+
+ For MSM8974 and APQ8084:
+ Port 0 -> MDP_INTF0 (eDP)
+ Port 1 -> MDP_INTF1 (DSI1)
+ Port 2 -> MDP_INTF2 (DSI2)
+ Port 3 -> MDP_INTF3 (HDMI)
+
+ For MSM8916:
+ Port 0 -> MDP_INTF1 (DSI1)
+
+ For MSM8994 and MSM8996:
+ Port 0 -> MDP_INTF1 (DSI1)
+ Port 1 -> MDP_INTF2 (DSI2)
+ Port 2 -> MDP_INTF3 (HDMI)
+
+Optional properties:
+- clock-names: the following clocks are optional:
+ * "lut_clk"
+
+Example:
+
+/ {
+ ...
+
+ mdss: mdss@1a00000 {
+ compatible = "qcom,mdss";
+ reg = <0x1a00000 0x1000>,
+ <0x1ac8000 0x3000>;
+ reg-names = "mdss_phys", "vbif_phys";
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "vsync_clk"
+
+ interrupts = <0 72 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@1a01000 {
+ compatible = "qcom,mdp5";
+ reg = <0x1a01000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 0>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "vsync_clk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@1a98000 {
+ ...
+ ports {
+ ...
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&mdp5_intf1_out>;
+ };
+ };
+ ...
+ };
+ ...
+ };
+
+ dsi_phy0: dsi-phy@1a98300 {
+ ...
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
new file mode 100644
index 000000000000..b9877acad012
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
@@ -0,0 +1,7 @@
+LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp079qx1-sp0v"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
new file mode 100644
index 000000000000..42141516f078
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
@@ -0,0 +1,7 @@
+LG 9.7" (2048x1536 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp097qx1-spa1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
index 216c894d4f99..b52ac52757df 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
@@ -7,6 +7,8 @@ Required properties:
Optional properties:
- label: a symbolic name for the panel
- enable-gpios: panel enable gpio
+- reset-gpios: GPIO to control the RESET pin
+- vcc-supply: phandle of regulator that will be used to enable power to the display
Required nodes:
- "panel-timing" containing video timings
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
new file mode 100644
index 000000000000..dba298b43b24
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
@@ -0,0 +1,7 @@
+Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,lsn122dl01-c01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
new file mode 100644
index 000000000000..4aff25b8dfe6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
@@ -0,0 +1,7 @@
+Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq101k1ly04"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
new file mode 100644
index 000000000000..bcb0e8a29f71
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
@@ -0,0 +1,7 @@
+Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq123p1jx31"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
new file mode 100644
index 000000000000..1e87fe6078af
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
@@ -0,0 +1,7 @@
+Starry 12.2" (1920x1200 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "starry,kr122ea0sra"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
index e832ff98fd61..01cced1c2a18 100644
--- a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
@@ -2,7 +2,8 @@ Rockchip RK3288 specific extensions to the Analogix Display Port
================================
Required properties:
-- compatible: "rockchip,rk3288-edp";
+- compatible: "rockchip,rk3288-dp",
+ "rockchip,rk3399-edp";
- reg: physical base address of the controller and length
@@ -27,6 +28,12 @@ Required properties:
Port 0: contained 2 endpoints, connecting to the output of vop.
Port 1: contained 1 endpoint, connecting to the input of panel.
+Optional property for different chips:
+- clocks: from common clock binding: handle to grf_vio clock.
+
+- clock-names: from common clock binding:
+ Required elements: "grf"
+
For the below properties, please refer to Analogix DP binding document:
* Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
- phys (required)
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index a3bd8c050c4e..0fad7ed2ea19 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -208,6 +208,7 @@ of the following host1x client modules:
See ../clocks/clock-bindings.txt for details.
- clock-names: Must include the following entries:
- sor: clock input for the SOR hardware
+ - source: source clock for the SOR clock
- parent: input for the pixel clock
- dp: reference clock for the SOR clock
- safe: safe reference for the SOR clock during power up
@@ -226,9 +227,9 @@ of the following host1x client modules:
- nvidia,dpaux: phandle to a DispayPort AUX interface
- dpaux: DisplayPort AUX interface
- - compatible: For Tegra124, must contain "nvidia,tegra124-dpaux". Otherwise,
- must contain '"nvidia,<chip>-dpaux", "nvidia,tegra124-dpaux"', where
- <chip> is tegra132.
+ - compatible : Should contain one of the following:
+ - "nvidia,tegra124-dpaux": for Tegra124 and Tegra132
+ - "nvidia,tegra210-dpaux": for Tegra210
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt outputs from the controller.
- clocks: Must contain an entry for each entry in clock-names.
@@ -241,6 +242,12 @@ of the following host1x client modules:
- reset-names: Must include the following entries:
- dpaux
- vdd-supply: phandle of a supply that powers the DisplayPort link
+ - i2c-bus: Subnode where I2C slave devices are listed. This subnode
+ must be always present. If there are no I2C slave devices, an empty
+ node should be added. See ../../i2c/i2c.txt for more information.
+
+ See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
+ regarding the DPAUX pad controller bindings.
Example:
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 079b42a81d7c..18090e7226b4 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -15,7 +15,7 @@ Required properties:
- reg: Memory map of eDMA CC
- reg-names: "edma3_cc"
- interrupts: Interrupt lines for CCINT, MPERR and CCERRINT.
-- interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint"
+- interrupt-names: "edma3_ccint", "edma3_mperr" and "edma3_ccerrint"
- ti,tptcs: List of TPTCs associated with the eDMA in the following form:
<&tptc_phandle TC_priority_number>. The highest priority is 0.
@@ -48,7 +48,7 @@ edma: edma@49000000 {
reg = <0x49000000 0x10000>;
reg-names = "edma3_cc";
interrupts = <12 13 14>;
- interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint";
+ interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint";
dma-requests = <64>;
#dma-cells = <2>;
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
new file mode 100644
index 000000000000..3b4436e56865
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -0,0 +1,28 @@
+QCOM Secure Channel Manager (SCM)
+
+Qualcomm processors include an interface to communicate to the secure firmware.
+This interface allows for clients to request different types of actions. These
+can include CPU power up/down, HDCP requests, loading of firmware, and other
+assorted actions.
+
+Required properties:
+- compatible: must contain one of the following:
+ * "qcom,scm-apq8064" for APQ8064 platforms
+ * "qcom,scm-msm8660" for MSM8660 platforms
+ * "qcom,scm-msm8690" for MSM8690 platforms
+ * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
+- clocks: One to three clocks may be required based on compatible.
+ * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
+ * Core, iface, and bus clocks required for "qcom,scm"
+- clock-names: Must contain "core" for the core clock, "iface" for the interface
+ clock and "bus" for the bus clock per the requirements of the compatible.
+
+Example for MSM8916:
+
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CRYPTO_CLK> , <&gcc GCC_CRYPTO_AXI_CLK>, <&gcc GCC_CRYPTO_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/at91_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91_adc.txt
index 0f813dec5e08..f65b04fb7962 100644
--- a/Documentation/devicetree/bindings/iio/adc/at91_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/at91_adc.txt
@@ -59,28 +59,24 @@ adc0: adc@fffb0000 {
atmel,adc-res-names = "lowres", "highres";
atmel,adc-use-res = "lowres";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "external-rising";
trigger-value = <0x1>;
trigger-external;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "external-falling";
trigger-value = <0x2>;
trigger-external;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "external-any";
trigger-value = <0x3>;
trigger-external;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "continuous";
trigger-value = <0x6>;
};
diff --git a/Documentation/devicetree/bindings/input/clps711x-keypad.txt b/Documentation/devicetree/bindings/input/clps711x-keypad.txt
index e68d2bbc6c07..3eed8819d05d 100644
--- a/Documentation/devicetree/bindings/input/clps711x-keypad.txt
+++ b/Documentation/devicetree/bindings/input/clps711x-keypad.txt
@@ -1,7 +1,7 @@
* Cirrus Logic CLPS711X matrix keypad device tree bindings
Required Properties:
-- compatible: Shall contain "cirrus,clps711x-keypad".
+- compatible: Shall contain "cirrus,ep7209-keypad".
- row-gpios: List of GPIOs used as row lines.
- poll-interval: Poll interval time in milliseconds.
- linux,keymap: The definition can be found at
@@ -12,7 +12,7 @@ Optional Properties:
Example:
keypad {
- compatible = "cirrus,ep7312-keypad", "cirrus,clps711x-keypad";
+ compatible = "cirrus,ep7312-keypad", "cirrus,ep7209-keypad";
autorepeat;
poll-interval = <120>;
row-gpios = <&porta 0 0>,
diff --git a/Documentation/devicetree/bindings/input/rotary-encoder.txt b/Documentation/devicetree/bindings/input/rotary-encoder.txt
index 6c9f0c8a846c..e85ce3dea480 100644
--- a/Documentation/devicetree/bindings/input/rotary-encoder.txt
+++ b/Documentation/devicetree/bindings/input/rotary-encoder.txt
@@ -20,6 +20,8 @@ Optional properties:
2: Half-period mode
4: Quarter-period mode
- wakeup-source: Boolean, rotary encoder can wake up the system.
+- rotary-encoder,encoding: String, the method used to encode steps.
+ Supported are "gray" (the default and more common) and "binary".
Deprecated properties:
- rotary-encoder,half-period: Makes the driver work on half-period mode.
@@ -34,6 +36,7 @@ Example:
compatible = "rotary-encoder";
gpios = <&gpio 19 1>, <&gpio 20 0>; /* GPIO19 is inverted */
linux,axis = <0>; /* REL_X */
+ rotary-encoder,encoding = "gray";
rotary-encoder,relative-axis;
};
@@ -42,5 +45,6 @@ Example:
gpios = <&gpio 21 0>, <&gpio 22 0>;
linux,axis = <1>; /* ABS_Y */
rotary-encoder,steps = <24>;
+ rotary-encoder,encoding = "binary";
rotary-encoder,rollover;
};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
new file mode 100644
index 000000000000..1112e0d794e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -0,0 +1,36 @@
+* GSL 1680 touchscreen controller
+
+Required properties:
+- compatible : "silead,gsl1680"
+- reg : I2C slave address of the chip (0x40)
+- interrupt-parent : a phandle pointing to the interrupt controller
+ serving the interrupt for this chip
+- interrupts : interrupt specification for the gsl1680 interrupt
+- power-gpios : Specification for the pin connected to the gsl1680's
+ shutdown input. This needs to be driven high to take the
+ gsl1680 out of its low power state
+- touchscreen-size-x : See touchscreen.txt
+- touchscreen-size-y : See touchscreen.txt
+
+Optional properties:
+- touchscreen-inverted-x : See touchscreen.txt
+- touchscreen-inverted-y : See touchscreen.txt
+- touchscreen-swapped-x-y : See touchscreen.txt
+- silead,max-fingers : maximum number of fingers the touchscreen can detect
+
+Example:
+
+i2c@00000000 {
+ gsl1680: touchscreen@40 {
+ compatible = "silead,gsl1680";
+ reg = <0x40>;
+ interrupt-parent = <&pio>;
+ interrupts = <6 11 IRQ_TYPE_EDGE_FALLING>;
+ power-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ silead,max-fingers = <5>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
new file mode 100644
index 000000000000..d87ad14f1efe
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/sis_i2c.txt
@@ -0,0 +1,33 @@
+* SiS I2C Multiple Touch Controller
+
+Required properties:
+- compatible: must be "sis,9200-ts"
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+ (see interrupt binding [0])
+- interrupts: touch controller interrupt (see interrupt
+ binding [0])
+
+Optional properties:
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the
+ device (see pinctrl binding [1]).
+- attn-gpios: the gpio pin used as attention line
+- reset-gpios: the gpio pin used to reset the controller
+- wakeup-source: touchscreen can be used as a wakeup source
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+
+ sis9255@5c {
+ compatible = "sis,9200-ts";
+ reg = <0x5c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sis>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+ irq-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt
index 759339c34e4f..969b4582ec60 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/cirrus,clps711x-intc.txt
@@ -2,7 +2,7 @@ Cirrus Logic CLPS711X Interrupt Controller
Required properties:
-- compatible: Should be "cirrus,clps711x-intc".
+- compatible: Should be "cirrus,ep7209-intc".
- reg: Specifies base physical address of the registers set.
- interrupt-controller: Identifies the node as an interrupt controller.
- #interrupt-cells: Specifies the number of cells needed to encode an
@@ -34,7 +34,7 @@ ID Name Description
Example:
intc: interrupt-controller {
- compatible = "cirrus,clps711x-intc";
+ compatible = "cirrus,ep7312-intc", "cirrus,ep7209-intc";
reg = <0x80000000 0x4000>;
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index 8cf564d083d2..9d1d72c65489 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -9,6 +9,7 @@ Required properties:
"mediatek,mt8135-sysirq"
"mediatek,mt8127-sysirq"
"mediatek,mt6795-sysirq"
+ "mediatek,mt6755-sysirq"
"mediatek,mt6592-sysirq"
"mediatek,mt6589-sysirq"
"mediatek,mt6582-sysirq"
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index 947863acc2d4..7b94c88cf2ee 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -1,6 +1,6 @@
* ARM SMMUv3 Architecture Implementation
-The SMMUv3 architecture is a significant deparature from previous
+The SMMUv3 architecture is a significant departure from previous
revisions, replacing the MMIO register interface with in-memory command
and event queues and adding support for the ATS and PRI components of
the PCIe specification.
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
index cd1b1cd7b5c4..53c20cae309f 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
@@ -1,7 +1,9 @@
* Mediatek IOMMU Architecture Implementation
- Some Mediatek SOCs contain a Multimedia Memory Management Unit (M4U) which
-uses the ARM Short-Descriptor translation table format for address translation.
+ Some Mediatek SOCs contain a Multimedia Memory Management Unit (M4U), and
+this M4U have two generations of HW architecture. Generation one uses flat
+pagetable, and only supports 4K size page mapping. Generation two uses the
+ARM Short-Descriptor translation table format for address translation.
About the M4U Hardware Block Diagram, please check below:
@@ -36,7 +38,9 @@ in each larb. Take a example, There are many ports like MC, PP, VLD in the
video decode local arbiter, all these ports are according to the video HW.
Required properties:
-- compatible : must be "mediatek,mt8173-m4u".
+- compatible : must be one of the following string:
+ "mediatek,mt2701-m4u" for mt2701 which uses generation one m4u HW.
+ "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
- reg : m4u register base and size.
- interrupts : the interrupt of m4u.
- clocks : must contain one entry for each clock-names.
@@ -46,7 +50,8 @@ Required properties:
according to the local arbiter index, like larb0, larb1, larb2...
- iommu-cells : must be 1. This is the mtk_m4u_id according to the HW.
Specifies the mtk_m4u_id as defined in
- dt-binding/memory/mt8173-larb-port.h.
+ dt-binding/memory/mt2701-larb-port.h for mt2701 and
+ dt-binding/memory/mt8173-larb-port.h for mt8173
Example:
iommu: iommu@10205000 {
diff --git a/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
new file mode 100644
index 000000000000..20236385f26e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
@@ -0,0 +1,64 @@
+* QCOM IOMMU
+
+The MSM IOMMU is an implementation compatible with the ARM VMSA short
+descriptor page tables. It provides address translation for bus masters outside
+of the CPU, each connected to the IOMMU through a port called micro-TLB.
+
+Required Properties:
+
+ - compatible: Must contain "qcom,apq8064-iommu".
+ - reg: Base address and size of the IOMMU registers.
+ - interrupts: Specifiers for the MMU fault interrupts. For instances that
+ support secure mode two interrupts must be specified, for non-secure and
+ secure mode, in that order. For instances that don't support secure mode a
+ single interrupt must be specified.
+ - #iommu-cells: The number of cells needed to specify the stream id. This
+ is always 1.
+ - qcom,ncb: The total number of context banks in the IOMMU.
+ - clocks : List of clocks to be used during SMMU register access. See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ for information about the format. For each clock specified
+ here, there must be a corresponding entry in clock-names
+ (see below).
+
+ - clock-names : List of clock names corresponding to the clocks specified in
+ the "clocks" property (above).
+ Should be "smmu_pclk" for specifying the interface clock
+ required for iommu's register accesses.
+ Should be "smmu_clk" for specifying the functional clock
+ required by iommu for bus accesses.
+
+Each bus master connected to an IOMMU must reference the IOMMU in its device
+node with the following property:
+
+ - iommus: A reference to the IOMMU in multiple cells. The first cell is a
+ phandle to the IOMMU and the second cell is the stream id.
+ A single master device can be connected to more than one iommu
+ and multiple contexts in each of the iommu. So multiple entries
+ are required to list all the iommus and the stream ids that the
+ master is connected to.
+
+Example: mdp iommu and its bus master
+
+ mdp_port0: iommu@7500000 {
+ compatible = "qcom,apq8064-iommu";
+ #iommu-cells = <1>;
+ clock-names =
+ "smmu_pclk",
+ "smmu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 0>,
+ <GIC_SPI 64 0>;
+ qcom,ncb = <2>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp";
+ ...
+ iommus = <&mdp_port0 0
+ &mdp_port0 2>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/lp855x.txt b/Documentation/devicetree/bindings/leds/backlight/lp855x.txt
index 0a3ecbc3a1b9..88f56641fc28 100644
--- a/Documentation/devicetree/bindings/leds/backlight/lp855x.txt
+++ b/Documentation/devicetree/bindings/leds/backlight/lp855x.txt
@@ -13,6 +13,7 @@ Optional properties:
- rom-addr: Register address of ROM area to be updated (u8)
- rom-val: Register value to be updated (u8)
- power-supply: Regulator which controls the 3V rail
+ - enable-supply: Regulator which controls the EN/VDDIO input
Example:
@@ -57,6 +58,7 @@ Example:
backlight@2c {
compatible = "ti,lp8557";
reg = <0x2c>;
+ enable-supply = <&backlight_vddio>;
power-supply = <&backlight_vdd>;
dev-ctrl = /bits/ 8 <0x41>;
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
new file mode 100644
index 000000000000..411ccf421584
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
@@ -0,0 +1,23 @@
+The PDC driver manages data transfer to and from various offload engines
+on some Broadcom SoCs. An SoC may have multiple PDC hardware blocks. There is
+one device tree entry per block.
+
+Required properties:
+- compatible : Should be "brcm,iproc-pdc-mbox".
+- reg: Should contain PDC registers location and length.
+- interrupts: Should contain the IRQ line for the PDC.
+- #mbox-cells: 1
+- brcm,rx-status-len: Length of metadata preceding received frames, in bytes.
+
+Optional properties:
+- brcm,use-bcm-hdr: present if a BCM header precedes each frame.
+
+Example:
+ pdc0: iproc-pdc0@0x612c0000 {
+ compatible = "brcm,iproc-pdc-mbox";
+ reg = <0 0x612c0000 0 0x445>; /* PDC FS0 regs */
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>; /* one cell per mailbox channel */
+ brcm,rx-status-len = <32>;
+ brcm,use-bcm-hdr;
+ };
diff --git a/Documentation/devicetree/bindings/media/nokia,n900-ir b/Documentation/devicetree/bindings/media/nokia,n900-ir
new file mode 100644
index 000000000000..13a18ce37dd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/nokia,n900-ir
@@ -0,0 +1,20 @@
+Device-Tree bindings for LIRC TX driver for Nokia N900(RX51)
+
+Required properties:
+ - compatible: should be "nokia,n900-ir".
+ - pwms: specifies PWM used for IR signal transmission.
+
+Example node:
+
+ pwm9: dmtimer-pwm@9 {
+ compatible = "ti,omap-dmtimer-pwm";
+ ti,timers = <&timer9>;
+ ti,clock-source = <0x00>; /* timer_sys_ck */
+ #pwm-cells = <3>;
+ };
+
+ ir: n900-ir {
+ compatible = "nokia,n900-ir";
+
+ pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt b/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt
new file mode 100644
index 000000000000..9bb5f57e2066
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt
@@ -0,0 +1,136 @@
+* Device tree bindings for Atmel EBI
+
+The External Bus Interface (EBI) controller is a bus where you can connect
+asynchronous (NAND, NOR, SRAM, ....) and synchronous memories (SDR/DDR SDRAMs).
+The EBI provides a glue-less interface to asynchronous memories through the SMC
+(Static Memory Controller).
+
+Required properties:
+
+- compatible: "atmel,at91sam9260-ebi"
+ "atmel,at91sam9261-ebi"
+ "atmel,at91sam9263-ebi0"
+ "atmel,at91sam9263-ebi1"
+ "atmel,at91sam9rl-ebi"
+ "atmel,at91sam9g45-ebi"
+ "atmel,at91sam9x5-ebi"
+ "atmel,sama5d3-ebi"
+
+- reg: Contains offset/length value for EBI memory mapping.
+ This property might contain several entries if the EBI
+ memory range is not contiguous
+
+- #address-cells: Must be 2.
+ The first cell encodes the CS.
+ The second cell encode the offset into the CS memory
+ range.
+
+- #size-cells: Must be set to 1.
+
+- ranges: Encodes CS to memory region association.
+
+- clocks: Clock feeding the EBI controller.
+ See clock-bindings.txt
+
+Children device nodes are representing device connected to the EBI bus.
+
+Required device node properties:
+
+- reg: Contains the chip-select id, the offset and the length
+ of the memory region requested by the device.
+
+EBI bus configuration will be defined directly in the device subnode.
+
+Optional EBI/SMC properties:
+
+- atmel,smc-bus-width: width of the asynchronous device's data bus
+ 8, 16 or 32.
+ Default to 8 when undefined.
+
+- atmel,smc-byte-access-type "write" or "select" (see Atmel datasheet).
+ Default to "select" when undefined.
+
+- atmel,smc-read-mode "nrd" or "ncs".
+ Default to "ncs" when undefined.
+
+- atmel,smc-write-mode "nwe" or "ncs".
+ Default to "ncs" when undefined.
+
+- atmel,smc-exnw-mode "disabled", "frozen" or "ready".
+ Default to "disabled" when undefined.
+
+- atmel,smc-page-mode enable page mode if present. The provided value
+ defines the page size (supported values: 4, 8,
+ 16 and 32).
+
+- atmel,smc-tdf-mode: "normal" or "optimized". When set to
+ "optimized" the data float time is optimized
+ depending on the next device being accessed
+ (next device setup time is subtracted to the
+ current device data float time).
+ Default to "normal" when undefined.
+
+If at least one atmel,smc- property is defined the following SMC timing
+properties become mandatory. In the other hand, if none of the atmel,smc-
+properties are specified, we assume that the EBI bus configuration will be
+handled by the sub-device driver, and none of those properties should be
+defined.
+
+All the timings are expressed in nanoseconds (see Atmel datasheet for a full
+description).
+
+- atmel,smc-ncs-rd-setup-ns
+- atmel,smc-nrd-setup-ns
+- atmel,smc-ncs-wr-setup-ns
+- atmel,smc-nwe-setup-ns
+- atmel,smc-ncs-rd-pulse-ns
+- atmel,smc-nrd-pulse-ns
+- atmel,smc-ncs-wr-pulse-ns
+- atmel,smc-nwe-pulse-ns
+- atmel,smc-nwe-cycle-ns
+- atmel,smc-nrd-cycle-ns
+- atmel,smc-tdf-ns
+
+Example:
+
+ ebi: ebi@10000000 {
+ compatible = "atmel,sama5d3-ebi";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ atmel,smc = <&hsmc>;
+ atmel,matrix = <&matrix>;
+ reg = <0x10000000 0x10000000
+ 0x40000000 0x30000000>;
+ ranges = <0x0 0x0 0x10000000 0x10000000
+ 0x1 0x0 0x40000000 0x10000000
+ 0x2 0x0 0x50000000 0x10000000
+ 0x3 0x0 0x60000000 0x10000000>;
+ clocks = <&mck>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ebi_addr>;
+
+ nor: flash@0,0 {
+ compatible = "cfi-flash";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+
+ atmel,smc-read-mode = "nrd";
+ atmel,smc-write-mode = "nwe";
+ atmel,smc-bus-width = <16>;
+ atmel,smc-ncs-rd-setup-ns = <0>;
+ atmel,smc-ncs-wr-setup-ns = <0>;
+ atmel,smc-nwe-setup-ns = <8>;
+ atmel,smc-nrd-setup-ns = <16>;
+ atmel,smc-ncs-rd-pulse-ns = <84>;
+ atmel,smc-ncs-wr-pulse-ns = <84>;
+ atmel,smc-nrd-pulse-ns = <76>;
+ atmel,smc-nwe-pulse-ns = <76>;
+ atmel,smc-nrd-cycle-ns = <107>;
+ atmel,smc-nwe-cycle-ns = <84>;
+ atmel,smc-tdf-ns = <16>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
index 06a83ceebba7..aa614b2d7cab 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
@@ -2,16 +2,31 @@ SMI (Smart Multimedia Interface) Common
The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
+Mediatek SMI have two generations of HW architecture, mt8173 uses the second
+generation of SMI HW while mt2701 uses the first generation HW of SMI.
+
+There's slight differences between the two SMI, for generation 2, the
+register which control the iommu port is at each larb's register base. But
+for generation 1, the register is at smi ao base(smi always on register
+base). Besides that, the smi async clock should be prepared and enabled for
+SMI generation 1 to transform the smi clock into emi clock domain, but that is
+not needed for SMI generation 2.
+
Required properties:
-- compatible : must be "mediatek,mt8173-smi-common"
+- compatible : must be one of :
+ "mediatek,mt2701-smi-common"
+ "mediatek,mt8173-smi-common"
- reg : the register and size of the SMI block.
- power-domains : a phandle to the power domain of this local arbiter.
- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : must contain 2 entries, as follows:
+- clock-names : must contain 3 entries for generation 1 smi HW and 2 entries
+ for generation 2 smi HW as follows:
- "apb" : Advanced Peripheral Bus clock, It's the clock for setting
the register.
- "smi" : It's the clock for transfer data and command.
- They may be the same if both source clocks are the same.
+ They may be the same if both source clocks are the same.
+ - "async" : asynchronous clock, it help transform the smi clock into the emi
+ clock domain, this clock is only needed by generation 1 smi HW.
Example:
smi_common: smi@14022000 {
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
index 55ff3b7e0bb9..21277a56e94c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
@@ -3,7 +3,9 @@ SMI (Smart Multimedia Interface) Local Arbiter
The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
Required properties:
-- compatible : must be "mediatek,mt8173-smi-larb"
+- compatible : must be one of :
+ "mediatek,mt8173-smi-larb"
+ "mediatek,mt2701-smi-larb"
- reg : the register and size of this local arbiter.
- mediatek,smi : a phandle to the smi_common node.
- power-domains : a phandle to the power domain of this local arbiter.
diff --git a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
index 21055e210234..c1359f4d48d7 100644
--- a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
@@ -46,6 +46,10 @@ Required properties:
0 maps to GPMC_WAIT0 pin.
- gpio-cells: Must be set to 2
+Required properties when using NAND prefetch dma:
+ - dmas GPMC NAND prefetch dma channel
+ - dma-names Must be set to "rxtx"
+
Timing properties for child nodes. All are optional and default to 0.
- gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds
@@ -137,7 +141,8 @@ Example for an AM33xx board:
ti,hwmods = "gpmc";
reg = <0x50000000 0x2000>;
interrupts = <100>;
-
+ dmas = <&edma 52 0>;
+ dma-names = "rxtx";
gpmc,num-cs = <8>;
gpmc,num-waitpins = <2>;
#address-cells = <2>;
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index a41157b5d930..e6afdfa3543d 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -19,8 +19,8 @@ Required properties:
Optional properties, nodes:
- enable-active-high: To power on the twl6040 during boot.
-- clocks: phandle to the clk32k clock provider
-- clock-names: Must be "clk32k"
+- clocks: phandle to the clk32k and/or to mclk clock provider
+- clock-names: Must be "clk32k" for the 32K clock and "mclk" for the MCLK.
Vibra functionality
Required properties:
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 31b35c3a5e47..3404afa9b938 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -9,8 +9,12 @@ Device Tree Bindings for the Arasan SDHCI Controller
[4] Documentation/devicetree/bindings/phy/phy-bindings.txt
Required Properties:
- - compatible: Compatibility string. Must be 'arasan,sdhci-8.9a' or
- 'arasan,sdhci-4.9a' or 'arasan,sdhci-5.1'
+ - compatible: Compatibility string. One of:
+ - "arasan,sdhci-8.9a": generic Arasan SDHCI 8.9a PHY
+ - "arasan,sdhci-4.9a": generic Arasan SDHCI 4.9a PHY
+ - "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
+ - "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
+ For this device it is strongly suggested to include arasan,soc-ctl-syscon.
- reg: From mmc bindings: Register location and length.
- clocks: From clock bindings: Handles to clock inputs.
- clock-names: From clock bindings: Tuple including "clk_xin" and "clk_ahb"
@@ -22,6 +26,17 @@ Required Properties for "arasan,sdhci-5.1":
- phys: From PHY bindings: Phandle for the Generic PHY for arasan.
- phy-names: MUST be "phy_arasan".
+Optional Properties:
+ - arasan,soc-ctl-syscon: A phandle to a syscon device (see ../mfd/syscon.txt)
+ used to access core corecfg registers. Offsets of registers in this
+ syscon are determined based on the main compatible string for the device.
+ - clock-output-names: If specified, this will be the name of the card clock
+ which will be exposed by this device. Required if #clock-cells is
+ specified.
+ - #clock-cells: If specified this should be the value <0>. With this property
+ in place we will export a clock representing the Card Clock. This clock
+ is expected to be consumed by our PHY. You must also specify
+
Example:
sdhci@e0100000 {
compatible = "arasan,sdhci-8.9a";
@@ -42,3 +57,19 @@ Example:
phys = <&emmc_phy>;
phy-names = "phy_arasan";
} ;
+
+ sdhci: sdhci@fe330000 {
+ compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1";
+ reg = <0x0 0xfe330000 0x0 0x10000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>;
+ clock-names = "clk_xin", "clk_ahb";
+ arasan,soc-ctl-syscon = <&grf>;
+ assigned-clocks = <&cru SCLK_EMMC>;
+ assigned-clock-rates = <200000000>;
+ clock-output-names = "emmc_cardclock";
+ phys = <&emmc_phy>;
+ phy-names = "phy_arasan";
+ #clock-cells = <0>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt
deleted file mode 100644
index 59476fbdbfa1..000000000000
--- a/Documentation/devicetree/bindings/mmc/brcm,bcm2835-sdhci.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Broadcom BCM2835 SDHCI controller
-
-This file documents differences between the core properties described
-by mmc.txt and the properties that represent the BCM2835 controller.
-
-Required properties:
-- compatible : Should be "brcm,bcm2835-sdhci".
-- clocks : The clock feeding the SDHCI controller.
-
-Example:
-
-sdhci: sdhci {
- compatible = "brcm,bcm2835-sdhci";
- reg = <0x7e300000 0x100>;
- interrupts = <2 30>;
- clocks = <&clk_mmc>;
- bus-width = <4>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt b/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt
new file mode 100644
index 000000000000..82847174c37d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/brcm,bcm7425-sdhci.txt
@@ -0,0 +1,36 @@
+* BROADCOM BRCMSTB/BMIPS SDHCI Controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci-brcmstb driver.
+
+NOTE: The driver disables all UHS speed modes by default and depends
+on Device Tree properties to enable them for SoC/Board combinations
+that support them.
+
+Required properties:
+- compatible: "brcm,bcm7425-sdhci"
+
+Refer to clocks/clock-bindings.txt for generic clock consumer properties.
+
+Example:
+
+ sdhci@f03e0100 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0xf03e0000 0x100>;
+ interrupts = <0x0 0x26 0x0>;
+ sdhci,auto-cmd12;
+ clocks = <&sw_sdio>;
+ sd-uhs-sdr50;
+ sd-uhs-ddr50;
+ };
+
+ sdhci@f03e0300 {
+ non-removable;
+ bus-width = <0x8>;
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0xf03e0200 0x100>;
+ interrupts = <0x0 0x27 0x0>;
+ sdhci,auto-cmd12;
+ clocks = <sw_sdio>;
+ mmc-hs200-1_8v;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index dca56d6248f5..3e29050ec769 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -28,6 +28,8 @@ Optional properties:
transparent level shifters on the outputs of the controller. Two cells are
required, first cell specifies minimum slot voltage (mV), second cell
specifies maximum slot voltage (mV). Several ranges could be specified.
+- fsl,tuning-start-tap: Specify the start dealy cell point when send first CMD19
+ in tuning procedure.
- fsl,tuning-step: Specify the increasing delay cell steps in tuning procedure.
The uSDHC use one delay cell as default increasing step to do tuning process.
This property allows user to change the tuning step to more than one delay
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index ed23b9bedfdc..22d1e1f3f38b 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -46,8 +46,12 @@ Optional properties:
- mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported
- mmc-hs400-1_8v: eMMC HS400 mode(1.8V I/O) is supported
- mmc-hs400-1_2v: eMMC HS400 mode(1.2V I/O) is supported
+- mmc-hs400-enhanced-strobe: eMMC HS400 enhanced strobe mode is supported
- dsr: Value the card's (optional) Driver Stage Register (DSR) should be
programmed with. Valid range: [0 .. 0xffff].
+- no-sdio: controller is limited to send sdio cmd during initialization
+- no-sd: controller is limited to send sd cmd during initialization
+- no-mmc: controller is limited to send mmc cmd during initialization
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt
new file mode 100644
index 000000000000..489807005eda
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt
@@ -0,0 +1,32 @@
+* Atmel Quad Serial Peripheral Interface (QSPI)
+
+Required properties:
+- compatible: Should be "atmel,sama5d2-qspi".
+- reg: Should contain the locations and lengths of the base registers
+ and the mapped memory.
+- reg-names: Should contain the resource reg names:
+ - qspi_base: configuration register address space
+ - qspi_mmap: memory mapped address space
+- interrupts: Should contain the interrupt for the device.
+- clocks: The phandle of the clock needed by the QSPI controller.
+- #address-cells: Should be <1>.
+- #size-cells: Should be <0>.
+
+Example:
+
+spi@f0020000 {
+ compatible = "atmel,sama5d2-qspi";
+ reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>;
+ reg-names = "qspi_base", "qspi_mmap";
+ interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&spi0_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0_default>;
+ status = "okay";
+
+ m25p80@0 {
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 7066597c9a81..b40f3a492800 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -27,6 +27,7 @@ Required properties:
brcm,brcmnand-v6.2
brcm,brcmnand-v7.0
brcm,brcmnand-v7.1
+ brcm,brcmnand-v7.2
brcm,brcmnand
- reg : the register start and length for NAND register region.
(optional) Flash DMA register range (if present)
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
new file mode 100644
index 000000000000..f248056da24c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -0,0 +1,56 @@
+* Cadence Quad SPI controller
+
+Required properties:
+- compatible : Should be "cdns,qspi-nor".
+- reg : Contains two entries, each of which is a tuple consisting of a
+ physical address and length. The first entry is the address and
+ length of the controller register set. The second entry is the
+ address and length of the QSPI Controller data area.
+- interrupts : Unit interrupt specifier for the controller interrupt.
+- clocks : phandle to the Quad SPI clock.
+- cdns,fifo-depth : Size of the data FIFO in words.
+- cdns,fifo-width : Bus width of the data FIFO in bytes.
+- cdns,trigger-address : 32-bit indirect AHB trigger address.
+
+Optional properties:
+- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
+
+Optional subnodes:
+Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
+custom properties:
+- cdns,read-delay : Delay for read capture logic, in clock cycles
+- cdns,tshsl-ns : Delay in nanoseconds for the length that the master
+ mode chip select outputs are de-asserted between
+ transactions.
+- cdns,tsd2d-ns : Delay in nanoseconds between one chip select being
+ de-activated and the activation of another.
+- cdns,tchsh-ns : Delay in nanoseconds between last bit of current
+ transaction and deasserting the device chip select
+ (qspi_n_ss_out).
+- cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low
+ and first bit transfer.
+
+Example:
+
+ qspi: spi@ff705000 {
+ compatible = "cdns,qspi-nor";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xff705000 0x1000>,
+ <0xffa00000 0x1000>;
+ interrupts = <0 151 4>;
+ clocks = <&qspi_clk>;
+ cdns,is-decoded-cs;
+ cdns,fifo-depth = <128>;
+ cdns,fifo-width = <4>;
+ cdns,trigger-address = <0x00000000>;
+
+ flash0: n25q00@0 {
+ ...
+ cdns,read-delay = <4>;
+ cdns,tshsl-ns = <50>;
+ cdns,tsd2d-ns = <50>;
+ cdns,tchsh-ns = <4>;
+ cdns,tslch-ns = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
index 3ee7e202657c..174f68c26c1b 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt
@@ -39,7 +39,7 @@ Optional properties:
"prefetch-polled" Prefetch polled mode (default)
"polled" Polled mode, without prefetch
- "prefetch-dma" Prefetch enabled sDMA mode
+ "prefetch-dma" Prefetch enabled DMA mode
"prefetch-irq" Prefetch enabled irq mode
- elm_id: <deprecated> use "ti,elm-id" instead
diff --git a/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt b/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt
new file mode 100644
index 000000000000..74981520d6dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt
@@ -0,0 +1,24 @@
+HiSilicon SPI-NOR Flash Controller
+
+Required properties:
+- compatible : Should be "hisilicon,fmc-spi-nor" and one of the following strings:
+ "hisilicon,hi3519-spi-nor"
+- address-cells : Should be 1.
+- size-cells : Should be 0.
+- reg : Offset and length of the register set for the controller device.
+- reg-names : Must include the following two entries: "control", "memory".
+- clocks : handle to spi-nor flash controller clock.
+
+Example:
+spi-nor-controller@10000000 {
+ compatible = "hisilicon,hi3519-spi-nor", "hisilicon,fmc-spi-nor";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10000000 0x1000>, <0x14000000 0x1000000>;
+ reg-names = "control", "memory";
+ clocks = <&clock HI3519_FMC_CLK>;
+ spi-nor@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
new file mode 100644
index 000000000000..069c192ed5c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -0,0 +1,160 @@
+MTK SoCs NAND FLASH controller (NFC) DT binding
+
+This file documents the device tree bindings for MTK SoCs NAND controllers.
+The functional split of the controller requires two drivers to operate:
+the nand controller interface driver and the ECC engine driver.
+
+The hardware description for both devices must be captured as device
+tree nodes.
+
+1) NFC NAND Controller Interface (NFI):
+=======================================
+
+The first part of NFC is NAND Controller Interface (NFI) HW.
+Required NFI properties:
+- compatible: Should be "mediatek,mtxxxx-nfc".
+- reg: Base physical address and size of NFI.
+- interrupts: Interrupts of NFI.
+- clocks: NFI required clocks.
+- clock-names: NFI clocks internal name.
+- status: Disabled default. Then set "okay" by platform.
+- ecc-engine: Required ECC Engine node.
+- #address-cells: NAND chip index, should be 1.
+- #size-cells: Should be 0.
+
+Example:
+
+ nandc: nfi@1100d000 {
+ compatible = "mediatek,mt2701-nfc";
+ reg = <0 0x1100d000 0 0x1000>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_NFI>,
+ <&pericfg CLK_PERI_NFI_PAD>;
+ clock-names = "nfi_clk", "pad_clk";
+ status = "disabled";
+ ecc-engine = <&bch>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+Platform related properties, should be set in {platform_name}.dts:
+- children nodes: NAND chips.
+
+Children nodes properties:
+- reg: Chip Select Signal, default 0.
+ Set as reg = <0>, <1> when need 2 CS.
+Optional:
+- nand-on-flash-bbt: Store BBT on NAND Flash.
+- nand-ecc-mode: the NAND ecc mode (check driver for supported modes)
+- nand-ecc-step-size: Number of data bytes covered by a single ECC step.
+ valid values: 512 and 1024.
+ 1024 is recommended for large page NANDs.
+- nand-ecc-strength: Number of bits to correct per ECC step.
+ The valid values that the controller supports are: 4, 6,
+ 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44,
+ 48, 52, 56, 60.
+ The strength should be calculated as follows:
+ E = (S - F) * 8 / 14
+ S = O / (P / Q)
+ E : nand-ecc-strength.
+ S : spare size per sector.
+ F : FDM size, should be in the range [1,8].
+ It is used to store free oob data.
+ O : oob size.
+ P : page size.
+ Q : nand-ecc-step-size.
+ If the result does not match any one of the listed
+ choices above, please select the smaller valid value from
+ the list.
+ (otherwise the driver will do the adjustment at runtime)
+- pinctrl-names: Default NAND pin GPIO setting name.
+- pinctrl-0: GPIO setting node.
+
+Example:
+ &pio {
+ nand_pins_default: nanddefault {
+ pins_dat {
+ pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>,
+ <MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>,
+ <MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>,
+ <MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>,
+ <MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>,
+ <MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>,
+ <MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>,
+ <MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>,
+ <MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up;
+ };
+
+ pins_we {
+ pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_ale {
+ pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+ };
+
+ &nandc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins_default>;
+ nand@0 {
+ reg = <0>;
+ nand-on-flash-bbt;
+ nand-ecc-mode = "hw";
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+ };
+ };
+
+NAND chip optional subnodes:
+- Partitions, see Documentation/devicetree/bindings/mtd/partition.txt
+
+Example:
+ nand@0 {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ preloader@0 {
+ label = "pl";
+ read-only;
+ reg = <0x00000000 0x00400000>;
+ };
+ android@0x00400000 {
+ label = "android";
+ reg = <0x00400000 0x12c00000>;
+ };
+ };
+ };
+
+2) ECC Engine:
+==============
+
+Required BCH properties:
+- compatible: Should be "mediatek,mtxxxx-ecc".
+- reg: Base physical address and size of ECC.
+- interrupts: Interrupts of ECC.
+- clocks: ECC required clocks.
+- clock-names: ECC clocks internal name.
+- status: Disabled default. Then set "okay" by platform.
+
+Example:
+
+ bch: ecc@1100e000 {
+ compatible = "mediatek,mt2701-ecc";
+ reg = <0 0x1100e000 0 0x1000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_NFI_ECC>;
+ clock-names = "nfiecc_clk";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
index 086d6f44c4b9..f322f56aef74 100644
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -11,10 +11,16 @@ Required properties:
* "ahb" : AHB gating clock
* "mod" : nand controller clock
+Optional properties:
+- dmas : shall reference DMA channel associated to the NAND controller.
+- dma-names : shall be "rxtx".
+
Optional children nodes:
Children nodes represent the available nand chips.
Optional properties:
+- reset : phandle + reset specifier pair
+- reset-names : must contain "ahb"
- allwinner,rb : shall contain the native Ready/Busy ids.
or
- rb-gpios : shall contain the gpios used as R/B pins.
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index 05f705e32a4a..e41b2d59ca7f 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -59,8 +59,8 @@ Example:
compatible = "apm,xgene-enet";
status = "disabled";
reg = <0x0 0x17020000 0x0 0xd100>,
- <0x0 0X17030000 0x0 0X400>,
- <0x0 0X10000000 0x0 0X200>;
+ <0x0 0x17030000 0x0 0x400>,
+ <0x0 0x10000000 0x0 0x200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x3c 0x4>;
port-id = <0>;
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index b037a9d78d93..a1e3693cca16 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -27,6 +27,9 @@ Optional properties:
number to 1.
- fsl,magic-packet : If present, indicates that the hardware supports waking
up via magic packet.
+- fsl,err006687-workaround-present: If present indicates that the system has
+ the hardware workaround for ERR006687 applied and does not need a software
+ workaround.
Optional subnodes:
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
new file mode 100644
index 000000000000..bbcd9f4c501f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
@@ -0,0 +1,56 @@
+Aardvark PCIe controller
+
+This PCIe controller is used on the Marvell Armada 3700 ARM64 SoC.
+
+The Device Tree node describing an Aardvark PCIe controller must
+contain the following properties:
+
+ - compatible: Should be "marvell,armada-3700-pcie"
+ - reg: range of registers for the PCIe controller
+ - interrupts: the interrupt line of the PCIe controller
+ - #address-cells: set to <3>
+ - #size-cells: set to <2>
+ - device_type: set to "pci"
+ - ranges: ranges for the PCI memory and I/O regions
+ - #interrupt-cells: set to <1>
+ - msi-controller: indicates that the PCIe controller can itself
+ handle MSI interrupts
+ - msi-parent: pointer to the MSI controller to be used
+ - interrupt-map-mask and interrupt-map: standard PCI properties to
+ define the mapping of the PCIe interface to interrupt numbers.
+ - bus-range: PCI bus numbers covered
+
+In addition, the Device Tree describing an Aardvark PCIe controller
+must include a sub-node that describes the legacy interrupt controller
+built into the PCIe controller. This sub-node must have the following
+properties:
+
+ - interrupt-controller
+ - #interrupt-cells: set to <1>
+
+Example:
+
+ pcie0: pcie@d0070000 {
+ compatible = "marvell,armada-3700-pcie";
+ device_type = "pci";
+ status = "disabled";
+ reg = <0 0xd0070000 0 0x20000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ msi-controller;
+ msi-parent = <&pcie0>;
+ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
+ 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ pcie_intc: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
new file mode 100644
index 000000000000..330a45b5f0b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/axis,artpec6-pcie.txt
@@ -0,0 +1,46 @@
+* Axis ARTPEC-6 PCIe interface
+
+This PCIe host controller is based on the Synopsys DesignWare PCIe IP
+and thus inherits all the common properties defined in designware-pcie.txt.
+
+Required properties:
+- compatible: "axis,artpec6-pcie", "snps,dw-pcie"
+- reg: base addresses and lengths of the PCIe controller (DBI),
+ the phy controller, and configuration address space.
+- reg-names: Must include the following entries:
+ - "dbi"
+ - "phy"
+ - "config"
+- interrupts: A list of interrupt outputs of the controller. Must contain an
+ entry for each entry in the interrupt-names property.
+- interrupt-names: Must include the following entries:
+ - "msi": The interrupt that is asserted when an MSI is received
+- axis,syscon-pcie: A phandle pointing to the ARTPEC-6 system controller,
+ used to enable and control the Synopsys IP.
+
+Example:
+
+ pcie@f8050000 {
+ compatible = "axis,artpec6-pcie", "snps,dw-pcie";
+ reg = <0xf8050000 0x2000
+ 0xf8040000 0x1000
+ 0xc0000000 0x1000>;
+ reg-names = "dbi", "phy", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ /* downstream I/O */
+ ranges = <0x81000000 0 0x00010000 0xc0010000 0 0x00010000
+ /* non-prefetchable memory */
+ 0x82000000 0 0xc0020000 0xc0020000 0 0x1ffe0000>;
+ num-lanes = <2>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ axis,syscon-pcie = <&syscon>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index ef683b2fd23a..41e9f55a1467 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -24,6 +24,9 @@ Required properties:
The first entry must be a link to the SCFG device node
The second entry must be '0' or '1' based on physical PCIe controller index.
This is used to get SCFG PEXN registers
+- dma-coherent: Indicates that the hardware IP block can ensure the coherency
+ of the data transferred from/to the IP block. This can avoid the software
+ cache flush/invalid actions, and improve the performance significantly.
Example:
@@ -38,6 +41,7 @@ Example:
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <4>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
index 0bf1ae243552..3742c152c467 100644
--- a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
@@ -124,7 +124,7 @@ For Tegra124 and Tegra132, the list of valid PHY nodes is given below:
- functions: "usb3-ss", "sata"
For Tegra210, the list of valid PHY nodes is given below:
-- utmi: utmi-0, utmi-1, utmi-2, utmi-3
+- usb2: usb2-0, usb2-1, usb2-2, usb2-3
- functions: "snps", "xusb", "uart"
- hsic: hsic-0, hsic-1
- functions: "snps", "xusb"
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 555cb0f40690..e3ea55763b0a 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -7,6 +7,13 @@ Required properties:
- reg: PHY register address offset and length in "general
register files"
+Optional clocks using the clock bindings (see ../clock/clock-bindings.txt),
+specified by name:
+ - clock-names: Should contain "emmcclk". Although this is listed as optional
+ (because most boards can get basic functionality without having
+ access to it), it is strongly suggested.
+ - clocks: Should have a phandle to the card clock exported by the SDHCI driver.
+
Example:
@@ -20,6 +27,8 @@ grf: syscon@ff770000 {
emmcphy: phy@f780 {
compatible = "rockchip,rk3399-emmc-phy";
reg = <0xf780 0x20>;
+ clocks = <&sdhci>;
+ clock-names = "emmcclk";
#phy-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 32f4a2d6d0b3..fe7fe0b03cfb 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -5,6 +5,8 @@ Required properties for the root node:
"amlogic,meson8b-cbus-pinctrl"
"amlogic,meson8-aobus-pinctrl"
"amlogic,meson8b-aobus-pinctrl"
+ "amlogic,meson-gxbb-periphs-pinctrl"
+ "amlogic,meson-gxbb-aobus-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt
new file mode 100644
index 000000000000..f2abdaee9022
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt
@@ -0,0 +1,60 @@
+Device tree binding for NVIDIA Tegra DPAUX pad controller
+========================================================
+
+The Tegra Display Port Auxiliary (DPAUX) pad controller manages two pins
+which can be assigned to either the DPAUX channel or to an I2C
+controller.
+
+This document defines the device-specific binding for the DPAUX pad
+controller. Refer to pinctrl-bindings.txt in this directory for generic
+information about pin controller device tree bindings. Please refer to
+the binding document ../display/tegra/nvidia,tegra20-host1x.txt for more
+details on the DPAUX binding.
+
+Pin muxing:
+-----------
+
+Child nodes contain the pinmux configurations following the conventions
+from the pinctrl-bindings.txt document.
+
+Since only three configurations are possible, only three child nodes are
+needed to describe the pin mux'ing options for the DPAUX pads.
+Furthermore, given that the pad functions are only applicable to a
+single set of pads, the child nodes only need to describe the pad group
+the functions are being applied to rather than the individual pads.
+
+Required properties:
+- groups: Must be "dpaux-io"
+- function: Must be either "aux", "i2c" or "off".
+
+Example:
+--------
+
+ dpaux@545c0000 {
+ ...
+
+ state_dpaux_aux: pinmux-aux {
+ groups = "dpaux-io";
+ function = "aux";
+ };
+
+ state_dpaux_i2c: pinmux-i2c {
+ groups = "dpaux-io";
+ function = "i2c";
+ };
+
+ state_dpaux_off: pinmux-off {
+ groups = "dpaux-io";
+ function = "off";
+ };
+ };
+
+ ...
+
+ i2c@7000d100 {
+ ...
+ pinctrl-0 = <&state_dpaux_i2c>;
+ pinctrl-1 = <&state_dpaux_off>;
+ pinctrl-names = "default", "idle";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.txt b/Documentation/devicetree/bindings/power/renesas,apmu.txt
new file mode 100644
index 000000000000..84404c9edff7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.txt
@@ -0,0 +1,31 @@
+DT bindings for the Renesas Advanced Power Management Unit
+
+Renesas R-Car line of SoCs utilize one or more APMU hardware units
+for CPU core power domain control including SMP boot and CPU Hotplug.
+
+Required properties:
+
+- compatible: Should be "renesas,<soctype>-apmu", "renesas,apmu" as fallback.
+ Examples with soctypes are:
+ - "renesas,r8a7790-apmu" (R-Car H2)
+ - "renesas,r8a7791-apmu" (R-Car M2-W)
+ - "renesas,r8a7792-apmu" (R-Car V2H)
+ - "renesas,r8a7793-apmu" (R-Car M2-N)
+ - "renesas,r8a7794-apmu" (R-Car E2)
+
+- reg: Base address and length of the I/O registers used by the APMU.
+
+- cpus: This node contains a list of CPU cores, which should match the order
+ of CPU cores used by the WUPCR and PSTR registers in the Advanced Power
+ Management Unit section of the device's datasheet.
+
+
+Example:
+
+This shows the r8a7791 APMU that can control CPU0 and CPU1.
+
+ apmu@e6152000 {
+ compatible = "renesas,r8a7791-apmu", "renesas,apmu";
+ reg = <0 0xe6152000 0 0x188>;
+ cpus = <&cpu0 &cpu1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
index b74e4d4785ab..0725fb37a973 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
@@ -14,6 +14,7 @@ Required properties:
- "renesas,r8a7793-sysc" (R-Car M2-N)
- "renesas,r8a7794-sysc" (R-Car E2)
- "renesas,r8a7795-sysc" (R-Car H3)
+ - "renesas,r8a7796-sysc" (R-Car M3-W)
- reg: Address start and address range for the device.
- #power-domain-cells: Must be 1.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
deleted file mode 100644
index 29b28b8f9a89..000000000000
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-* Network
-
-Currently defined compatibles:
-- fsl,cpm1-scc-enet
-- fsl,cpm2-scc-enet
-- fsl,cpm1-fec-enet
-- fsl,cpm2-fcc-enet (third resource is GFEMR)
-- fsl,qe-enet
-
-Example:
-
- ethernet@11300 {
- compatible = "fsl,mpc8272-fcc-enet",
- "fsl,cpm2-fcc-enet";
- reg = <11300 20 8400 100 11390 1>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <20 8>;
- interrupt-parent = <&PIC>;
- phy-handle = <&PHY0>;
- fsl,cpm-command = <12000300>;
- };
-
-* MDIO
-
-Currently defined compatibles:
-fsl,pq1-fec-mdio (reg is same as first resource of FEC device)
-fsl,cpm2-mdio-bitbang (reg is port C registers)
-
-Properties for fsl,cpm2-mdio-bitbang:
-fsl,mdio-pin : pin of port C controlling mdio data
-fsl,mdc-pin : pin of port C controlling mdio clock
-
-Example:
- mdio@10d40 {
- compatible = "fsl,mpc8272ads-mdio-bitbang",
- "fsl,mpc8272-mdio-bitbang",
- "fsl,cpm2-mdio-bitbang";
- reg = <10d40 14>;
- #address-cells = <1>;
- #size-cells = <0>;
- fsl,mdio-pin = <12>;
- fsl,mdc-pin = <13>;
- };
diff --git a/Documentation/devicetree/bindings/powerpc/opal/oppanel-opal.txt b/Documentation/devicetree/bindings/powerpc/opal/oppanel-opal.txt
new file mode 100644
index 000000000000..dffb79108b61
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/opal/oppanel-opal.txt
@@ -0,0 +1,14 @@
+IBM OPAL Operator Panel Binding
+-------------------------------
+
+Required properties:
+- compatible : Should be "ibm,opal-oppanel".
+- #lines : Number of lines on the operator panel e.g. <0x2>.
+- #length : Number of characters per line of the operator panel e.g. <0x10>.
+
+Example:
+ oppanel {
+ compatible = "ibm,opal-oppanel";
+ #lines = <0x2>;
+ #length = <0x10>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
new file mode 100644
index 000000000000..21f75bbd6dae
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
@@ -0,0 +1,21 @@
+Broadcom iProc PWM controller device tree bindings
+
+This controller has 4 channels.
+
+Required Properties :
+- compatible: must be "brcm,iproc-pwm"
+- reg: physical base address and length of the controller's registers
+- clocks: phandle + clock specifier pair for the external clock
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a
+ description of the cells format.
+
+Refer to clocks/clock-bindings.txt for generic clock consumer properties.
+
+Example:
+
+pwm: pwm@18031000 {
+ compatible = "brcm,iproc-pwm";
+ reg = <0x18031000 0x28>;
+ clocks = <&osc>;
+ #pwm-cells = <3>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/cirrus,clps711x-pwm.txt b/Documentation/devicetree/bindings/pwm/cirrus,clps711x-pwm.txt
index a183db48f910..c0b2028238d6 100644
--- a/Documentation/devicetree/bindings/pwm/cirrus,clps711x-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/cirrus,clps711x-pwm.txt
@@ -1,15 +1,14 @@
* Cirris Logic CLPS711X PWM controller
Required properties:
-- compatible: Shall contain "cirrus,clps711x-pwm".
+- compatible: Shall contain "cirrus,ep7209-pwm".
- reg: Physical base address and length of the controller's registers.
- clocks: phandle + clock specifier pair of the PWM reference clock.
- #pwm-cells: Should be 1. The cell specifies the index of the channel.
Example:
pwm: pwm@80000400 {
- compatible = "cirrus,ep7312-pwm",
- "cirrus,clps711x-pwm";
+ compatible = "cirrus,ep7312-pwm", "cirrus,ep7209-pwm";
reg = <0x80000400 0x4>;
clocks = <&clks 8>;
#pwm-cells = <1>;
diff --git a/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.txt b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.txt
new file mode 100644
index 000000000000..472bd46ab5a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/google,cros-ec-pwm.txt
@@ -0,0 +1,23 @@
+* PWM controlled by ChromeOS EC
+
+Google's ChromeOS EC PWM is a simple PWM attached to the Embedded Controller
+(EC) and controlled via a host-command interface.
+
+An EC PWM node should be only found as a sub-node of the EC node (see
+Documentation/devicetree/bindings/mfd/cros-ec.txt).
+
+Required properties:
+- compatible: Must contain "google,cros-ec-pwm"
+- #pwm-cells: Should be 1. The cell specifies the PWM index.
+
+Example:
+ cros-ec@0 {
+ compatible = "google,cros-ec-spi";
+
+ ...
+
+ cros_ec_pwm: ec-pwm {
+ compatible = "google,cros-ec-pwm";
+ #pwm-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
index c52f03b5032f..b4e73778dda3 100644
--- a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -1,10 +1,14 @@
Tegra SoC PWFM controller
Required properties:
-- compatible: For Tegra20, must contain "nvidia,tegra20-pwm". For Tegra30,
- must contain "nvidia,tegra30-pwm". Otherwise, must contain
- "nvidia,<chip>-pwm", plus one of the above, where <chip> is tegra114,
- tegra124, tegra132, or tegra210.
+- compatible: Must be:
+ - "nvidia,tegra20-pwm": for Tegra20
+ - "nvidia,tegra30-pwm", "nvidia,tegra20-pwm": for Tegra30
+ - "nvidia,tegra114-pwm", "nvidia,tegra20-pwm": for Tegra114
+ - "nvidia,tegra124-pwm", "nvidia,tegra20-pwm": for Tegra124
+ - "nvidia,tegra132-pwm", "nvidia,tegra20-pwm": for Tegra132
+ - "nvidia,tegra210-pwm", "nvidia,tegra20-pwm": for Tegra210
+ - "nvidia,tegra186-pwm": for Tegra186
- reg: physical base address and length of the controller's registers
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
index 5befb538db95..2e53324fb720 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
@@ -9,6 +9,10 @@ Required properties:
Optional properties:
- ti,prescaler: Should be a value between 0 and 7, see the timers datasheet
+- ti,clock-source: Set dmtimer parent clock, values between 0 and 2:
+ - 0x00 - high-frequency system clock (timer_sys_ck)
+ - 0x01 - 32-kHz always-on clock (timer_32k_ck)
+ - 0x02 - external clock (timer_ext_ck, OMAP2 only)
Example:
pwm9: dmtimer-pwm@9 {
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
index fb81179dce37..8007e839a716 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -2,28 +2,48 @@ TI SOC ECAP based APWM controller
Required properties:
- compatible: Must be "ti,<soc>-ecap".
- for am33xx - compatible = "ti,am33xx-ecap";
- for da850 - compatible = "ti,da850-ecap", "ti,am33xx-ecap";
+ for am33xx - compatible = "ti,am3352-ecap", "ti,am33xx-ecap";
+ for am4372 - compatible = "ti,am4372-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
+ for da850 - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
+ for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap";
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
the cells format. The PWM channel index ranges from 0 to 4. The only third
cell flag supported by this binding is PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
Optional properties:
-- ti,hwmods: Name of the hwmod associated to the ECAP:
- "ecap<x>", <x> being the 0-based instance number from the HW spec
+- clocks: Handle to the ECAP's functional clock.
+- clock-names: Must be set to "fck".
Example:
-ecap0: ecap@0 { /* ECAP on am33xx */
- compatible = "ti,am33xx-ecap";
+ecap0: ecap@48300100 { /* ECAP on am33xx */
+ compatible = "ti,am3352-ecap", "ti,am33xx-ecap";
+ #pwm-cells = <3>;
+ reg = <0x48300100 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
+};
+
+ecap0: ecap@48300100 { /* ECAP on am4372 */
+ compatible = "ti,am4372-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48300100 0x80>;
ti,hwmods = "ecap0";
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
+};
+
+ecap0: ecap@1f06000 { /* ECAP on da850 */
+ compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
+ #pwm-cells = <3>;
+ reg = <0x1f06000 0x80>;
};
-ecap0: ecap@0 { /* ECAP on da850 */
- compatible = "ti,da850-ecap", "ti,am33xx-ecap";
+ecap0: ecap@4843e100 {
+ compatible = "ti,dra746-ecap", "ti,am3352-ecap";
#pwm-cells = <3>;
- reg = <0x306000 0x80>;
+ reg = <0x4843e100 0x80>;
+ clocks = <&l4_root_clk_div>;
+ clock-names = "fck";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
index 9c100b2c5b23..944fe356bb45 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -2,28 +2,48 @@ TI SOC EHRPWM based PWM controller
Required properties:
- compatible: Must be "ti,<soc>-ehrpwm".
- for am33xx - compatible = "ti,am33xx-ehrpwm";
- for da850 - compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
+ for am33xx - compatible = "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ for am4372 - compatible = "ti,am4372-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ for da850 - compatible = "ti,da850-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ for dra746 - compatible = "ti,dra746-ehrpwm", "ti-am3352-ehrpwm";
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
the cells format. The only third cell flag supported by this binding is
PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
Optional properties:
-- ti,hwmods: Name of the hwmod associated to the EHRPWM:
- "ehrpwm<x>", <x> being the 0-based instance number from the HW spec
+- clocks: Handle to the PWM's time-base and functional clock.
+- clock-names: Must be set to "tbclk" and "fck".
Example:
-ehrpwm0: ehrpwm@0 { /* EHRPWM on am33xx */
- compatible = "ti,am33xx-ehrpwm";
+ehrpwm0: pwm@48300200 { /* EHRPWM on am33xx */
+ compatible = "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48300200 0x100>;
+ clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
+};
+
+ehrpwm0: pwm@48300200 { /* EHRPWM on am4372 */
+ compatible = "ti,am4372-ehrpwm", "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x48300200 0x80>;
+ clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
ti,hwmods = "ehrpwm0";
};
-ehrpwm0: ehrpwm@0 { /* EHRPWM on da850 */
- compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
+ehrpwm0: pwm@1f00000 { /* EHRPWM on da850 */
+ compatible = "ti,da850-ehrpwm", "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x1f00000 0x2000>;
+};
+
+ehrpwm0: pwm@4843e200 { /* EHRPWM on dra746 */
+ compatible = "ti,dra746-ehrpwm", "ti,am3352-ehrpwm";
#pwm-cells = <3>;
- reg = <0x300000 0x2000>;
+ reg = <0x4843e200 0x80>;
+ clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>;
+ clock-names = "tbclk", "fck";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
index f7eae77f8354..1a5d7b71db89 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tipwmss.txt
@@ -1,7 +1,11 @@
TI SOC based PWM Subsystem
Required properties:
-- compatible: Must be "ti,am33xx-pwmss";
+- compatible: Must be "ti,<soc>-pwmss".
+ for am33xx - compatible = "ti,am33xx-pwmss";
+ for am4372 - compatible = "ti,am4372-pwmss","ti,am33xx-pwmss";
+ for dra746 - compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"
+
- reg: physical base address and size of the registers map.
- address-cells: Specify the number of u32 entries needed in child nodes.
Should set to 1.
@@ -16,7 +20,7 @@ Required properties:
Also child nodes should also populated under PWMSS DT node.
Example:
-pwmss0: pwmss@48300000 {
+epwmss0: epwmss@48300000 { /* PWMSS for am33xx */
compatible = "ti,am33xx-pwmss";
reg = <0x48300000 0x10>;
ti,hwmods = "epwmss0";
@@ -29,3 +33,28 @@ pwmss0: pwmss@48300000 {
/* child nodes go here */
};
+
+epwmss0: epwmss@48300000 { /* PWMSS for am4372 */
+ compatible = "ti,am4372-pwmss","ti,am33xx-pwmss"
+ reg = <0x48300000 0x10>;
+ ti,hwmods = "epwmss0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ ranges = <0x48300100 0x48300100 0x80 /* ECAP */
+ 0x48300180 0x48300180 0x80 /* EQEP */
+ 0x48300200 0x48300200 0x80>; /* EHRPWM */
+
+ /* child nodes go here */
+};
+
+epwmss0: epwmss@4843e000 { /* PWMSS for DRA7xx */
+ compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss";
+ reg = <0x4843e000 0x30>;
+ ti,hwmods = "epwmss0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /* child nodes go here */
+};
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index 0822a083fc57..d6de64335022 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -7,6 +7,7 @@ Required Properties:
- "renesas,pwm-r8a7790": for R-Car H2
- "renesas,pwm-r8a7791": for R-Car M2-W
- "renesas,pwm-r8a7794": for R-Car E2
+ - "renesas,pwm-r8a7795": for R-Car H3
- reg: base address and length of the registers block for the PWM.
- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt b/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt
new file mode 100644
index 000000000000..cb209646bf13
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt
@@ -0,0 +1,18 @@
+== ST STMPE PWM controller ==
+
+This is a PWM block embedded in the ST Microelectronics STMPE
+(ST Multi-Purpose Expander) chips. The PWM is registered as a
+subdevices of the STMPE MFD device.
+
+Required properties:
+- compatible: should be:
+ - "st,stmpe-pwm"
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
+
+Example:
+
+pwm0: pwm {
+ compatible = "st,stmpe-pwm";
+ #pwm-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
index dd6f59cf1455..3aeba9f86ed8 100644
--- a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
@@ -34,6 +34,18 @@ Only required for Voltage Table Mode:
First cell is voltage in microvolts (uV)
Second cell is duty-cycle in percent (%)
+Optional properties for Continuous mode:
+- pwm-dutycycle-unit: Integer value encoding the duty cycle unit. If not
+ defined, <100> is assumed, meaning that
+ pwm-dutycycle-range contains values expressed in
+ percent.
+
+- pwm-dutycycle-range: Should contain 2 entries. The first entry is encoding
+ the dutycycle for regulator-min-microvolt and the
+ second one the dutycycle for regulator-max-microvolt.
+ Duty cycle values are expressed in pwm-dutycycle-unit.
+ If not defined, <0 100> is assumed.
+
NB: To be clear, if voltage-table is provided, then the device will be used
in Voltage Table Mode. If no voltage-table is provided, then the device will
be used in Continuous Voltage Mode.
@@ -53,6 +65,13 @@ Continuous Voltage With Enable GPIO Example:
regulator-min-microvolt = <1016000>;
regulator-max-microvolt = <1114000>;
regulator-name = "vdd_logic";
+ /* unit == per-mille */
+ pwm-dutycycle-unit = <1000>;
+ /*
+ * Inverted PWM logic, and the duty cycle range is limited
+ * to 30%-70%.
+ */
+ pwm-dutycycle-range <700 300>; /* */
};
Voltage Table Example:
diff --git a/Documentation/devicetree/bindings/misc/ramoops.txt b/Documentation/devicetree/bindings/reserved-memory/ramoops.txt
index cd02cec67d38..e81f821a2135 100644
--- a/Documentation/devicetree/bindings/misc/ramoops.txt
+++ b/Documentation/devicetree/bindings/reserved-memory/ramoops.txt
@@ -2,8 +2,9 @@ Ramoops oops/panic logger
=========================
ramoops provides persistent RAM storage for oops and panics, so they can be
-recovered after a reboot. It is a backend to pstore, so this node is named
-"ramoops" after the backend, rather than "pstore" which is the subsystem.
+recovered after a reboot. This is a child-node of "/reserved-memory", and
+is named "ramoops" after the backend, rather than "pstore" which is the
+subsystem.
Parts of this storage may be set aside for other persistent log buffers, such
as kernel log messages, or for optional ECC error-correction data. The total
@@ -21,8 +22,7 @@ Required properties:
- compatible: must be "ramoops"
-- memory-region: phandle to a region of memory that is preserved between
- reboots
+- reg: region of memory that is preserved between reboots
Optional properties:
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.txt b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.txt
new file mode 100644
index 000000000000..e746b631793a
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.txt
@@ -0,0 +1,18 @@
+Amlogic Meson SoC Reset Controller
+=======================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: Should be "amlogic,meson8b-reset" or "amlogic,meson-gxbb-reset"
+- reg: should contain the register address base
+- #reset-cells: 1, see below
+
+example:
+
+reset: reset-controller {
+ compatible = "amlogic,meson-gxbb-reset";
+ reg = <0x0 0x04404 0x0 0x20>;
+ #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt
index e0b185a944ba..c25da39df707 100644
--- a/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi6220-reset.txt
@@ -8,7 +8,9 @@ The reset controller registers are part of the system-ctl block on
hi6220 SoC.
Required properties:
-- compatible: may be "hisilicon,hi6220-sysctrl"
+- compatible: should be one of the following:
+ - "hisilicon,hi6220-sysctrl", "syscon" : For peripheral reset controller.
+ - "hisilicon,hi6220-mediactrl", "syscon" : For media reset controller.
- reg: should be register base and length as documented in the
datasheet
- #reset-cells: 1, see below
diff --git a/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt b/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
new file mode 100644
index 000000000000..164c7f34c451
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
@@ -0,0 +1,91 @@
+TI SysCon Reset Controller
+=======================
+
+Almost all SoCs have hardware modules that require reset control in addition
+to clock and power control for their functionality. The reset control is
+typically provided by means of memory-mapped I/O registers. These registers are
+sometimes a part of a larger register space region implementing various
+functionalities. This register range is best represented as a syscon node to
+allow multiple entities to access their relevant registers in the common
+register space.
+
+A SysCon Reset Controller node defines a device that uses a syscon node
+and provides reset management functionality for various hardware modules
+present on the SoC.
+
+SysCon Reset Controller Node
+============================
+Each of the reset provider/controller nodes should be a child of a syscon
+node and have the following properties.
+
+Required properties:
+--------------------
+ - compatible : Should be,
+ "ti,k2e-pscrst"
+ "ti,k2l-pscrst"
+ "ti,k2hk-pscrst"
+ "ti,syscon-reset"
+ - #reset-cells : Should be 1. Please see the reset consumer node below
+ for usage details
+ - ti,reset-bits : Contains the reset control register information
+ Should contain 7 cells for each reset exposed to
+ consumers, defined as:
+ Cell #1 : offset of the reset assert control
+ register from the syscon register base
+ Cell #2 : bit position of the reset in the reset
+ assert control register
+ Cell #3 : offset of the reset deassert control
+ register from the syscon register base
+ Cell #4 : bit position of the reset in the reset
+ deassert control register
+ Cell #5 : offset of the reset status register
+ from the syscon register base
+ Cell #6 : bit position of the reset in the
+ reset status register
+ Cell #7 : Flags used to control reset behavior,
+ availible flags defined in the DT include
+ file <dt-bindings/reset/ti-syscon.h>
+
+SysCon Reset Consumer Nodes
+===========================
+Each of the reset consumer nodes should have the following properties,
+in addition to their own properties.
+
+Required properties:
+--------------------
+ - resets : A phandle to the reset controller node and an index number
+ to a reset specifier as defined above.
+
+Please also refer to Documentation/devicetree/bindings/reset/reset.txt for
+common reset controller usage by consumers.
+
+Example:
+--------
+The following example demonstrates a syscon node, the reset controller node
+using the syscon node, and a consumer (a DSP device) on the TI Keystone 2
+Edison SoC.
+
+/ {
+ soc {
+ psc: power-sleep-controller@02350000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x02350000 0x1000>;
+
+ pscrst: psc-reset {
+ compatible = "ti,k2e-pscrst", "ti,syscon-reset";
+ #reset-cells = <1>;
+
+ ti,reset-bits = <
+ 0xa3c 8 0xa3c 8 0x83c 8 (ASSERT_SET|DEASSERT_CLEAR|STATUS_SET) /* 0: pcrst-dsp0 */
+ 0xa40 5 0xa44 3 0 0 (ASSERT_SET|DEASSERT_CLEAR|STATUS_NONE) /* 1: pcrst-example */
+ >;
+ };
+ };
+
+ dsp0: dsp0 {
+ ...
+ resets = <&pscrst 0>;
+ ...
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt
new file mode 100644
index 000000000000..202f2d09a23f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.txt
@@ -0,0 +1,14 @@
+Amlogic Meson Random number generator
+=====================================
+
+Required properties:
+
+- compatible : should be "amlogic,meson-rng"
+- reg : Specifies base physical address and size of the registers.
+
+Example:
+
+rng {
+ compatible = "amlogic,meson-rng";
+ reg = <0x0 0xc8834000 0x0 0x4>;
+};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-opal.txt b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
index a1734e5cb75b..2340938cd0f5 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-opal.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
@@ -2,7 +2,7 @@ IBM OPAL real-time clock
------------------------
Required properties:
-- comapatible: Should be "ibm,opal-rtc"
+- compatible: Should be "ibm,opal-rtc"
Optional properties:
- wakeup-source: Decides if the wakeup is supported or not
diff --git a/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt
index caaeb2583579..07013fa60a48 100644
--- a/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt
+++ b/Documentation/devicetree/bindings/serial/cirrus,clps711x-uart.txt
@@ -1,7 +1,7 @@
* Cirrus Logic CLPS711X Universal Asynchronous Receiver/Transmitter (UART)
Required properties:
-- compatible: Should be "cirrus,clps711x-uart".
+- compatible: Should be "cirrus,ep7209-uart".
- reg: Address and length of the register set for the device.
- interrupts: Should contain UART TX and RX interrupt.
- clocks: Should contain UART core clock number.
@@ -20,7 +20,7 @@ Example:
};
uart1: uart@80000480 {
- compatible = "cirrus,clps711x-uart";
+ compatible = "cirrus,ep7312-uart","cirrus,ep7209-uart";
reg = <0x80000480 0x80>;
interrupts = <12 13>;
clocks = <&clks 11>;
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index e99e10ab9ecb..0015c722be7b 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -6,6 +6,7 @@ Required properties:
* "mediatek,mt6580-uart" for MT6580 compatible UARTS
* "mediatek,mt6582-uart" for MT6582 compatible UARTS
* "mediatek,mt6589-uart" for MT6589 compatible UARTS
+ * "mediatek,mt6755-uart" for MT6755 compatible UARTS
* "mediatek,mt6795-uart" for MT6795 compatible UARTS
* "mediatek,mt7623-uart" for MT7623 compatible UARTS
* "mediatek,mt8127-uart" for MT8127 compatible UARTS
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm.txt
index 160c752484b4..160c752484b4 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/brg.txt
index 4c7d45eaf025..4c7d45eaf025 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/brg.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/i2c.txt
index 87bc6048667e..87bc6048667e 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/i2c.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/pic.txt
index 8e3ee1681618..8e3ee1681618 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/pic.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/usb.txt
index 74bfda4bb824..74bfda4bb824 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/cpm/usb.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
index 349f79fd7076..349f79fd7076 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/gpio.txt
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
new file mode 100644
index 000000000000..03c741602c6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/network.txt
@@ -0,0 +1,124 @@
+* Network
+
+Currently defined compatibles:
+- fsl,cpm1-scc-enet
+- fsl,cpm2-scc-enet
+- fsl,cpm1-fec-enet
+- fsl,cpm2-fcc-enet (third resource is GFEMR)
+- fsl,qe-enet
+
+Example:
+
+ ethernet@11300 {
+ compatible = "fsl,mpc8272-fcc-enet",
+ "fsl,cpm2-fcc-enet";
+ reg = <11300 20 8400 100 11390 1>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <20 8>;
+ interrupt-parent = <&PIC>;
+ phy-handle = <&PHY0>;
+ fsl,cpm-command = <12000300>;
+ };
+
+* MDIO
+
+Currently defined compatibles:
+fsl,pq1-fec-mdio (reg is same as first resource of FEC device)
+fsl,cpm2-mdio-bitbang (reg is port C registers)
+
+Properties for fsl,cpm2-mdio-bitbang:
+fsl,mdio-pin : pin of port C controlling mdio data
+fsl,mdc-pin : pin of port C controlling mdio clock
+
+Example:
+ mdio@10d40 {
+ compatible = "fsl,mpc8272ads-mdio-bitbang",
+ "fsl,mpc8272-mdio-bitbang",
+ "fsl,cpm2-mdio-bitbang";
+ reg = <10d40 14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ fsl,mdio-pin = <12>;
+ fsl,mdc-pin = <13>;
+ };
+
+* HDLC
+
+Currently defined compatibles:
+- fsl,ucc-hdlc
+
+Properties for fsl,ucc-hdlc:
+- rx-clock-name
+- tx-clock-name
+ Usage: required
+ Value type: <string>
+ Definition : Must be "brg1"-"brg16" for internal clock source,
+ Must be "clk1"-"clk24" for external clock source.
+
+- fsl,tdm-interface
+ Usage: optional
+ Value type: <empty>
+ Definition : Specify that hdlc is based on tdm-interface
+
+The property below is dependent on fsl,tdm-interface:
+- fsl,rx-sync-clock
+ Usage: required
+ Value type: <string>
+ Definition : Must be "none", "rsync_pin", "brg9-11" and "brg13-15".
+
+- fsl,tx-sync-clock
+ Usage: required
+ Value type: <string>
+ Definition : Must be "none", "tsync_pin", "brg9-11" and "brg13-15".
+
+- fsl,tdm-framer-type
+ Usage: required for tdm interface
+ Value type: <string>
+ Definition : "e1" or "t1".Now e1 and t1 are used, other framer types
+ are not supported.
+
+- fsl,tdm-id
+ Usage: required for tdm interface
+ Value type: <u32>
+ Definition : number of TDM ID
+
+- fsl,tx-timeslot-mask
+- fsl,rx-timeslot-mask
+ Usage: required for tdm interface
+ Value type: <u32>
+ Definition : time slot mask for TDM operation. Indicates which time
+ slots used for transmitting and receiving.
+
+- fsl,siram-entry-id
+ Usage: required for tdm interface
+ Value type: <u32>
+ Definition : Must be 0,2,4...64. the number of TDM entry.
+
+- fsl,tdm-internal-loopback
+ usage: optional for tdm interface
+ value type: <empty>
+ Definition : Internal loopback connecting on TDM layer.
+
+Example for tdm interface:
+
+ ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+Example for hdlc without tdm interface:
+
+ ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "brg1";
+ tx-clock-name = "brg1";
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt
index 4f8930263dd9..d7afaff5faff 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe.txt
@@ -69,6 +69,58 @@ Example:
};
};
+* Interrupt Controller (IC)
+
+Required properties:
+- compatible : should be "fsl,qe-ic".
+- reg : Address range of IC register set.
+- interrupts : interrupts generated by the device.
+- interrupt-controller : this device is a interrupt controller.
+
+Example:
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <95 2 0 0 94 2 0 0>;
+ };
+
+* Serial Interface Block (SI)
+
+The SI manages the routing of eight TDM lines to the QE block serial drivers
+, the MCC and the UCCs, for receive and transmit.
+
+Required properties:
+- compatible : must be "fsl,<chip>-qe-si". For t1040, must contain
+ "fsl,t1040-qe-si".
+- reg : Address range of SI register set.
+
+Example:
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+* Serial Interface Block RAM(SIRAM)
+
+store the routing entries of SI
+
+Required properties:
+- compatible : should be "fsl,<chip>-qe-siram". For t1040, must contain
+ "fsl,t1040-qe-siram".
+- reg : Address range of SI RAM.
+
+Example:
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
* QE Firmware Node
This node defines a firmware binary that is embedded in the device tree, for
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/firmware.txt
index 249db3a15d15..249db3a15d15 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/firmware.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt
index 60984260207b..60984260207b 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt
index ec6ee2e864a2..ec6ee2e864a2 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
index e47734bee3f0..e47734bee3f0 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/ucc.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
index 9ccd5f30405b..9ccd5f30405b 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/usb.txt
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/serial.txt
index 2ea76d9d137c..2ea76d9d137c 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/serial.txt
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/uqe_serial.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/uqe_serial.txt
new file mode 100644
index 000000000000..8823c86c8085
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/uqe_serial.txt
@@ -0,0 +1,17 @@
+* Serial
+
+Required Properties:
+compatible : must be "fsl,<chip>-ucc-uart". For t1040, must be
+"fsl,t1040-ucc-uart".
+port-number : port number of UCC-UART
+tx/rx-clock-name : should be "brg1"-"brg16" for internal clock source,
+ should be "clk1"-"clk28" for external clock source.
+
+Example:
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
index 5cc82b8353d8..af9ca37221ce 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
@@ -68,7 +68,7 @@ important.
Value type: <u32>
Definition: must be 2 - denoting the bit in the entry and IRQ flags
-- #qcom,state-cells:
+- #qcom,smem-state-cells:
Usage: required for outgoing entries
Value type: <u32>
Definition: must be 1 - denoting the bit in the entry
@@ -92,7 +92,7 @@ wcnss-smp2p {
wcnss_smp2p_out: master-kernel {
qcom,entry-name = "master-kernel";
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
wcnss_smp2p_in: slave-kernel {
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt
index a6634c70850d..2993b5a97dd6 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smsm.txt
@@ -51,7 +51,7 @@ important.
Definition: specifies the offset, in words, of the first bit for this
entry
-- #qcom,state-cells:
+- #qcom,smem-state-cells:
Usage: required for local entry
Value type: <u32>
Definition: must be 1 - denotes bit number
@@ -91,7 +91,7 @@ smsm {
apps_smsm: apps@0 {
reg = <0>;
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
wcnss_smsm: wcnss@7 {
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt
new file mode 100644
index 000000000000..4ea39e9186a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt
@@ -0,0 +1,116 @@
+Qualcomm WCNSS Binding
+
+This binding describes the Qualcomm WCNSS hardware. It consists of control
+block and a BT, WiFi and FM radio block, all using SMD as command channels.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be: "qcom,wcnss",
+
+- qcom,smd-channel:
+ Usage: required
+ Value type: <string>
+ Definition: standard SMD property specifying the SMD channel used for
+ communication with the WiFi firmware.
+ Should be "WCNSS_CTRL".
+
+- qcom,mmio:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to a node specifying the wcnss "ccu" and "dxe"
+ register blocks. The node must be compatible with one of
+ the following:
+ "qcom,riva",
+ "qcom,pronto"
+
+= SUBNODES
+The subnodes of the wcnss node are optional and describe the individual blocks in
+the WCNSS.
+
+== Bluetooth
+The following properties are defined to the bluetooth node:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be:
+ "qcom,wcnss-bt"
+
+== WiFi
+The following properties are defined to the WiFi node:
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,wcnss-wlan",
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the "rx" and "tx" interrupts
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must contain "rx" and "tx"
+
+- qcom,smem-state:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should reference the tx-enable and tx-rings-empty SMEM states
+
+- qcom,smem-state-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must contain "tx-enable" and "tx-rings-empty"
+
+= EXAMPLE
+The following example represents a SMD node, with one edge representing the
+"pronto" subsystem, with the wcnss device and its wcn3680 BT and WiFi blocks
+described; as found on the 8974 platform.
+
+smd {
+ compatible = "qcom,smd";
+
+ pronto-edge {
+ interrupts = <0 142 1>;
+
+ qcom,ipc = <&apcs 8 17>;
+ qcom,smd-edge = <6>;
+
+ wcnss {
+ compatible = "qcom,wcnss";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,mmio = <&pronto>;
+
+ bt {
+ compatible = "qcom,wcnss-bt";
+ };
+
+ wlan {
+ compatible = "qcom,wcnss-wlan";
+
+ interrupts = <0 145 0>, <0 146 0>;
+ interrupt-names = "tx", "rx";
+
+ qcom,smem-state = <&apps_smsm 10>, <&apps_smsm 9>;
+ qcom,smem-state-names = "tx-enable", "tx-rings-empty";
+ };
+ };
+ };
+};
+
+soc {
+ pronto: pronto {
+ compatible = "qcom,pronto";
+
+ reg = <0xfb204000 0x2000>, <0xfb202000 0x1000>, <0xfb21b000 0x3000>;
+ reg-names = "ccu", "dxe", "pmu";
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/adi,adau17x1.txt b/Documentation/devicetree/bindings/sound/adi,adau17x1.txt
index 8dbce0e18dda..1447dec28125 100644
--- a/Documentation/devicetree/bindings/sound/adi,adau17x1.txt
+++ b/Documentation/devicetree/bindings/sound/adi,adau17x1.txt
@@ -13,6 +13,11 @@ Required properties:
- reg: The i2c address. Value depends on the state of ADDR0
and ADDR1, as wired in hardware.
+Optional properties:
+ - clock-names: If provided must be "mclk".
+ - clocks: phandle + clock-specifiers for the clock that provides
+ the audio master clock for the device.
+
Examples:
#include <dt-bindings/sound/adau17x1.h>
@@ -20,5 +25,8 @@ Examples:
adau1361@38 {
compatible = "adi,adau1761";
reg = <0x38>;
+
+ clock-names = "mclk";
+ clocks = <&audio_clock>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/adi,adau7002.txt b/Documentation/devicetree/bindings/sound/adi,adau7002.txt
new file mode 100644
index 000000000000..f144ee1abf85
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,adau7002.txt
@@ -0,0 +1,19 @@
+Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter
+
+Required properties:
+
+ - compatible: Must be "adi,adau7002"
+
+Optional properties:
+
+ - IOVDD-supply: Phandle and specifier for the power supply providing the IOVDD
+ supply as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+
+ If this property is not present it is assumed that the supply pin is
+ hardwired to always on.
+
+Example:
+ adau7002: pdm-to-i2s {
+ compatible = "adi,adau7002";
+ IOVDD-supply = <&supply>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt b/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt
new file mode 100644
index 000000000000..b139e66d2a11
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/brcm,cygnus-audio.txt
@@ -0,0 +1,67 @@
+BROADCOM Cygnus Audio I2S/TDM/SPDIF controller
+
+Required properties:
+ - compatible : "brcm,cygnus-audio"
+ - #address-cells: 32bit valued, 1 cell.
+ - #size-cells: 32bit valued, 0 cell.
+ - reg : Should contain audio registers location and length
+ - reg-names: names of the registers listed in "reg" property
+ Valid names are "aud" and "i2s_in". "aud" contains a
+ set of DMA, I2S_OUT and SPDIF registers. "i2s_in" contains
+ a set of I2S_IN registers.
+ - clocks: PLL and leaf clocks used by audio ports
+ - assigned-clocks: PLL and leaf clocks
+ - assigned-clock-parents: parent clocks of the assigned clocks
+ (usually the PLL)
+ - assigned-clock-rates: List of clock frequencies of the
+ assigned clocks
+ - clock-names: names of 3 leaf clocks used by audio ports
+ Valid names are "ch0_audio", "ch1_audio", "ch2_audio"
+ - interrupts: audio DMA interrupt number
+
+SSP Subnode properties:
+- reg: The index of ssp port interface to use
+ Valid value are 0, 1, 2, or 3 (for spdif)
+
+Example:
+ cygnus_audio: audio@180ae000 {
+ compatible = "brcm,cygnus-audio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x180ae000 0xafd>, <0x180aec00 0x1f8>;
+ reg-names = "aud", "i2s_in";
+ clocks = <&audiopll BCM_CYGNUS_AUDIOPLL_CH0>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH1>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH2>;
+ assigned-clocks = <&audiopll BCM_CYGNUS_AUDIOPLL>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH0>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH1>,
+ <&audiopll BCM_CYGNUS_AUDIOPLL_CH2>;
+ assigned-clock-parents = <&audiopll BCM_CYGNUS_AUDIOPLL>;
+ assigned-clock-rates = <1769470191>,
+ <0>,
+ <0>,
+ <0>;
+ clock-names = "ch0_audio", "ch1_audio", "ch2_audio";
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+
+ ssp0: ssp_port@0 {
+ reg = <0>;
+ status = "okay";
+ };
+
+ ssp1: ssp_port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ ssp2: ssp_port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ spdif: spdif_port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/bt-sco.txt b/Documentation/devicetree/bindings/sound/bt-sco.txt
index 29b8e5d40203..641edf75e184 100644
--- a/Documentation/devicetree/bindings/sound/bt-sco.txt
+++ b/Documentation/devicetree/bindings/sound/bt-sco.txt
@@ -4,7 +4,7 @@ This device support generic Bluetooth SCO link.
Required properties:
- - compatible : "delta,dfbmcs320"
+ - compatible : "delta,dfbmcs320" or "linux,bt-sco"
Example:
diff --git a/Documentation/devicetree/bindings/sound/cs35l33.txt b/Documentation/devicetree/bindings/sound/cs35l33.txt
new file mode 100644
index 000000000000..acfb47525b49
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l33.txt
@@ -0,0 +1,126 @@
+CS35L33 Speaker Amplifier
+
+Required properties:
+
+ - compatible : "cirrus,cs35l33"
+
+ - reg : the I2C address of the device for I2C
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : gpio used to reset the amplifier
+
+ - interrupt-parent : Specifies the phandle of the interrupt controller to
+ which the IRQs from CS35L33 are delivered to.
+ - interrupts : IRQ line info CS35L33.
+ (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+ for further information relating to interrupt properties)
+
+ - cirrus,boost-ctl : Booster voltage use to supply the amp. If the value is
+ 0, then VBST = VP. If greater than 0, the boost voltage will be 3300mV with
+ a value of 1 and will increase at a step size of 100mV until a maximum of
+ 8000mV.
+
+ - cirrus,ramp-rate : On power up, it affects the time from when the power
+ up sequence begins to the time the audio reaches a full-scale output.
+ On power down, it affects the time from when the power-down sequence
+ begins to when the amplifier disables the PWM outputs. If this property
+ is not set then soft ramping will be disabled and ramp time would be
+ 20ms. If this property is set to 0,1,2,3 then ramp times would be 40ms,
+ 60ms,100ms,175ms respectively for 48KHz sample rate.
+
+ - cirrus,boost-ipk : The maximum current allowed for the boost converter.
+ The range starts at 1850000uA and goes to a maximum of 3600000uA
+ with a step size of 15625uA. The default is 2500000uA.
+
+ - cirrus,imon-adc-scale : Configures the scaling of data bits from the IMON
+ ADC data word. This property can be set as a value of 0 for bits 15 down
+ to 0, 6 for 21 down to 6, 7, for 22 down to 7, 8 for 23 down to 8.
+
+
+Optional H/G Algorithm sub-node:
+
+The cs35l33 node can have a single "cirrus,hg-algo" sub-node that will enable
+the internal H/G Algorithm.
+
+ - cirrus,hg-algo : Sub-node for internal Class H/G algorithm that
+ controls the amplifier supplies.
+
+Optional properties for the "cirrus,hg-algo" sub-node:
+
+ - cirrus,mem-depth : Memory depth for the Class H/G algorithm measured in
+ LRCLK cycles. If this property is set to 0, 1, 2, or 3 then the memory
+ depths will be 1, 4, 8, 16 LRCLK cycles. The default is 16 LRCLK cycles.
+
+ cirrus,release-rate : The number of consecutive LRCLK periods before
+ allowing release condition tracking updates. The number of LRCLK periods
+ start at 3 to a maximum of 255.
+
+ - cirrus,ldo-thld : Configures the signal threshold at which the PWM output
+ stage enters LDO operation. Starts as a default value of 50mV for a value
+ of 1 and increases with a step size of 50mV to a maximum of 750mV (value of
+ 0xF).
+
+ - cirrus,ldo-path-disable : This is a boolean property. If present, the H/G
+ algorithm uses the max detection path. If not present, the LDO
+ detection path is used.
+
+ - cirrus,ldo-entry-delay : The LDO entry delay in milliseconds before the H/G
+ algorithm switches to the LDO voltage. This property can be set to values
+ from 0 to 7 for delays of 5ms, 10ms, 50ms, 100ms, 200ms, 500ms, 1000ms.
+ The default is 100ms.
+
+ - cirrus,vp-hg-auto : This is a boolean property. When set, class H/G VPhg
+ automatic updating is enabled.
+
+ - cirrus,vp-hg : Class H/G algorithm VPhg. Controls the H/G algorithm's
+ reference to the VP voltage for when to start generating a boosted VBST.
+ The reference voltage starts at 3000mV with a value of 0x3 and is increased
+ by 100mV per step to a maximum of 5500mV.
+
+ - cirrus,vp-hg-rate : The rate (number of LRCLK periods) at which the VPhg is
+ allowed to increase to a higher voltage when using VPhg automatic
+ tracking. This property can be set to values from 0 to 3 with rates of 128
+ periods, 2048 periods, 32768 periods, and 524288 periods.
+ The default is 32768 periods.
+
+ - cirrus,vp-hg-va : VA calculation reference for automatic VPhg tracking
+ using VPMON. This property can be set to values from 0 to 6 starting at
+ 1800mV with a step size of 50mV up to a maximum value of 1750mV.
+ Default is 1800mV.
+
+Example:
+
+cs35l33: cs35l33@40 {
+ compatible = "cirrus,cs35l33";
+ reg = <0x40>;
+
+ VA-supply = <&ldo5_reg>;
+ VP-supply = <&ldo5_reg>;
+
+ interrupt-parent = <&gpio8>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&cs47l91 34 0>;
+
+ cirrus,ramp-rate = <0x0>;
+ cirrus,boost-ctl = <0x30>; /* VBST = 8000mV */
+ cirrus,boost-ipk = <0xE0>; /* 3600mA */
+ cirrus,imon-adc-scale = <0> /* Bits 15 down to 0 */
+
+ cirrus,hg-algo {
+ cirrus,mem-depth = <0x3>;
+ cirrus,release-rate = <0x3>;
+ cirrus,ldo-thld = <0x1>;
+ cirrus,ldo-path-disable = <0x0>;
+ cirrus,ldo-entry-delay=<0x4>;
+ cirrus,vp-hg-auto;
+ cirrus,vp-hg=<0xF>;
+ cirrus,vp-hg-rate=<0x2>;
+ cirrus,vp-hg-va=<0x0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/cs53l30.txt b/Documentation/devicetree/bindings/sound/cs53l30.txt
new file mode 100644
index 000000000000..4dbfb8274cd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs53l30.txt
@@ -0,0 +1,44 @@
+CS53L30 audio CODEC
+
+Required properties:
+
+ - compatible : "cirrus,cs53l30"
+
+ - reg : the I2C address of the device
+
+ - VA-supply, VP-supply : power supplies for the device,
+ as covered in Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+ - reset-gpios : a GPIO spec for the reset pin.
+
+ - mute-gpios : a GPIO spec for the MUTE pin. The active state can be either
+ GPIO_ACTIVE_HIGH or GPIO_ACTIVE_LOW, which would be handled
+ by the driver automatically.
+
+ - cirrus,micbias-lvl : Set the output voltage level on the MICBIAS Pin.
+ 0 = Hi-Z
+ 1 = 1.80 V
+ 2 = 2.75 V
+
+ - cirrus,use-sdout2 : This is a boolean property. If present, it indicates
+ the hardware design connects both SDOUT1 and SDOUT2
+ pins to output data. Otherwise, it indicates that
+ only SDOUT1 is connected for data output.
+ * CS53l30 supports 4-channel data output in the same
+ * frame using two different ways:
+ * 1) Normal I2S mode on two data pins -- each SDOUT
+ * carries 2-channel data in the same time.
+ * 2) TDM mode on one signle data pin -- SDOUT1 carries
+ * 4-channel data per frame.
+
+Example:
+
+codec: cs53l30@48 {
+ compatible = "cirrus,cs53l30";
+ reg = <0x48>;
+ reset-gpios = <&gpio 54 0>;
+ VA-supply = <&cs53l30_va>;
+ VP-supply = <&cs53l30_vp>;
+};
diff --git a/Documentation/devicetree/bindings/sound/designware-i2s.txt b/Documentation/devicetree/bindings/sound/designware-i2s.txt
index 7bb54247f8e8..6a536d570e29 100644
--- a/Documentation/devicetree/bindings/sound/designware-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/designware-i2s.txt
@@ -12,6 +12,10 @@ Required properties:
one for receive.
- dma-names : "tx" for the transmit channel, "rx" for the receive channel.
+Optional properties:
+ - interrupts: The interrupt line number for the I2S controller. Add this
+ parameter if the I2S controller that you are using does not support DMA.
+
For more details on the 'dma', 'dma-names', 'clock' and 'clock-names'
properties please check:
* resource-names.txt
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
index ceaef5126989..f749e2744824 100644
--- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
@@ -58,7 +58,7 @@ Required properties:
* DMIC (stands for Digital Microphone Jack)
Note: The "Mic Jack" and "AMIC" are redundant while
- coexsiting in order to support the old bindings
+ coexisting in order to support the old bindings
of wm8962 and sgtl5000.
Optional properties:
diff --git a/Documentation/devicetree/bindings/sound/max98504.txt b/Documentation/devicetree/bindings/sound/max98504.txt
new file mode 100644
index 000000000000..583ed5fdfb28
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98504.txt
@@ -0,0 +1,44 @@
+Maxim MAX98504 class D mono speaker amplifier
+
+This device supports I2C control interface and an IRQ output signal. It features
+a PCM and PDM digital audio interface (DAI) and a differential analog input.
+
+Required properties:
+
+ - compatible : "maxim,max98504"
+ - reg : should contain the I2C slave device address
+ - DVDD-supply, DIOVDD-supply, PVDD-supply: power supplies for the device,
+ as covered in ../regulator/regulator.txt
+ - interrupts : should specify the interrupt line the device is connected to,
+ as described in ../interrupt-controller/interrupts.txt
+
+Optional properties:
+
+ - maxim,brownout-threshold - the PVDD brownout threshold, the value must be
+ from 0, 1...21 range, corresponding to 2.6V, 2.65V...3.65V voltage range
+ - maxim,brownout-attenuation - the brownout attenuation to the speaker gain
+ applied during the "attack hold" and "timed hold" phase, the value must be
+ from 0...6 (dB) range
+ - maxim,brownout-attack-hold-ms - the brownout attack hold phase time in ms,
+ 0...255 (VBATBROWN_ATTK_HOLD, register 0x0018)
+ - maxim,brownout-timed-hold-ms - the brownout timed hold phase time in ms,
+ 0...255 (VBATBROWN_TIME_HOLD, register 0x0019)
+ - maxim,brownout-release-rate-ms - the brownout release phase step time in ms,
+ 0...255 (VBATBROWN_RELEASE, register 0x001A)
+
+The default value when the above properties are not specified is 0,
+the maxim,brownout-threshold property must be specified to actually enable
+the PVDD brownout protection.
+
+Example:
+
+ max98504@31 {
+ compatible = "maxim,max98504";
+ reg = <0x31>;
+ interrupt-parent = <&gpio_bank_0>;
+ interrupts = <2 0>;
+
+ DVDD-supply = <&regulator>;
+ DIOVDD-supply = <&regulator>;
+ PVDD-supply = <&regulator>;
+};
diff --git a/Documentation/devicetree/bindings/sound/max9860.txt b/Documentation/devicetree/bindings/sound/max9860.txt
new file mode 100644
index 000000000000..e0d4e95e31b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max9860.txt
@@ -0,0 +1,28 @@
+MAX9860 Mono Audio Voice Codec
+
+Required properties:
+
+ - compatible : "maxim,max9860"
+
+ - reg : the I2C address of the device
+
+ - AVDD-supply, DVDD-supply and DVDDIO-supply : power supplies for
+ the device, as covered in bindings/regulator/regulator.txt
+
+ - clock-names : Required element: "mclk".
+
+ - clocks : A clock specifier for the clock connected as MCLK.
+
+Examples:
+
+ max9860: max9860@10 {
+ compatible = "maxim,max9860";
+ reg = <0x10>;
+
+ AVDD-supply = <&reg_1v8>;
+ DVDD-supply = <&reg_1v8>;
+ DVDDIO-supply = <&reg_3v0>;
+
+ clock-names = "mclk";
+ clocks = <&pck2>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
new file mode 100644
index 000000000000..3e623a724e55
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
@@ -0,0 +1,150 @@
+Mediatek AFE PCM controller for mt2701
+
+Required properties:
+- compatible = "mediatek,mt2701-audio";
+- reg: register location and size
+- interrupts: Should contain AFE interrupt
+- clock-names: should have these clock names:
+ "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_mux1_div",
+ "top_audio_mux2_div",
+ "top_audio_48k_timing",
+ "top_audio_44k_timing",
+ "top_audpll_mux_sel",
+ "top_apll_sel",
+ "top_aud1_pll_98M",
+ "top_aud2_pll_90M",
+ "top_hadds2_pll_98M",
+ "top_hadds2_pll_294M",
+ "top_audpll",
+ "top_audpll_d4",
+ "top_audpll_d8",
+ "top_audpll_d16",
+ "top_audpll_d24",
+ "top_audintbus_sel",
+ "clk_26m",
+ "top_syspll1_d4",
+ "top_aud_k1_src_sel",
+ "top_aud_k2_src_sel",
+ "top_aud_k3_src_sel",
+ "top_aud_k4_src_sel",
+ "top_aud_k5_src_sel",
+ "top_aud_k6_src_sel",
+ "top_aud_k1_src_div",
+ "top_aud_k2_src_div",
+ "top_aud_k3_src_div",
+ "top_aud_k4_src_div",
+ "top_aud_k5_src_div",
+ "top_aud_k6_src_div",
+ "top_aud_i2s1_mclk",
+ "top_aud_i2s2_mclk",
+ "top_aud_i2s3_mclk",
+ "top_aud_i2s4_mclk",
+ "top_aud_i2s5_mclk",
+ "top_aud_i2s6_mclk",
+ "top_asm_m_sel",
+ "top_asm_h_sel",
+ "top_univpll2_d4",
+ "top_univpll2_d2",
+ "top_syspll_d5";
+
+Example:
+
+ afe: mt2701-afe-pcm@11220000 {
+ compatible = "mediatek,mt2701-audio";
+ reg = <0 0x11220000 0 0x2000>,
+ <0 0x112A0000 0 0x20000>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infracfg CLK_INFRA_AUDIO>,
+ <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+ <&topckgen CLK_TOP_AUD_MUX2_DIV>,
+ <&topckgen CLK_TOP_AUD_48K_TIMING>,
+ <&topckgen CLK_TOP_AUD_44K_TIMING>,
+ <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
+ <&topckgen CLK_TOP_APLL_SEL>,
+ <&topckgen CLK_TOP_AUD1PLL_98M>,
+ <&topckgen CLK_TOP_AUD2PLL_90M>,
+ <&topckgen CLK_TOP_HADDS2PLL_98M>,
+ <&topckgen CLK_TOP_HADDS2PLL_294M>,
+ <&topckgen CLK_TOP_AUDPLL>,
+ <&topckgen CLK_TOP_AUDPLL_D4>,
+ <&topckgen CLK_TOP_AUDPLL_D8>,
+ <&topckgen CLK_TOP_AUDPLL_D16>,
+ <&topckgen CLK_TOP_AUDPLL_D24>,
+ <&topckgen CLK_TOP_AUDINTBUS_SEL>,
+ <&clk26m>,
+ <&topckgen CLK_TOP_SYSPLL1_D4>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
+ <&topckgen CLK_TOP_ASM_M_SEL>,
+ <&topckgen CLK_TOP_ASM_H_SEL>,
+ <&topckgen CLK_TOP_UNIVPLL2_D4>,
+ <&topckgen CLK_TOP_UNIVPLL2_D2>,
+ <&topckgen CLK_TOP_SYSPLL_D5>;
+
+ clock-names = "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_mux1_div",
+ "top_audio_mux2_div",
+ "top_audio_48k_timing",
+ "top_audio_44k_timing",
+ "top_audpll_mux_sel",
+ "top_apll_sel",
+ "top_aud1_pll_98M",
+ "top_aud2_pll_90M",
+ "top_hadds2_pll_98M",
+ "top_hadds2_pll_294M",
+ "top_audpll",
+ "top_audpll_d4",
+ "top_audpll_d8",
+ "top_audpll_d16",
+ "top_audpll_d24",
+ "top_audintbus_sel",
+ "clk_26m",
+ "top_syspll1_d4",
+ "top_aud_k1_src_sel",
+ "top_aud_k2_src_sel",
+ "top_aud_k3_src_sel",
+ "top_aud_k4_src_sel",
+ "top_aud_k5_src_sel",
+ "top_aud_k6_src_sel",
+ "top_aud_k1_src_div",
+ "top_aud_k2_src_div",
+ "top_aud_k3_src_div",
+ "top_aud_k4_src_div",
+ "top_aud_k5_src_div",
+ "top_aud_k6_src_div",
+ "top_aud_i2s1_mclk",
+ "top_aud_i2s2_mclk",
+ "top_aud_i2s3_mclk",
+ "top_aud_i2s4_mclk",
+ "top_aud_i2s5_mclk",
+ "top_aud_i2s6_mclk",
+ "top_asm_m_sel",
+ "top_asm_h_sel",
+ "top_univpll2_d4",
+ "top_univpll2_d2",
+ "top_syspll_d5";
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt2701-cs42448.txt b/Documentation/devicetree/bindings/sound/mt2701-cs42448.txt
new file mode 100644
index 000000000000..05574446ceb6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt2701-cs42448.txt
@@ -0,0 +1,43 @@
+MT2701 with CS42448 CODEC
+
+Required properties:
+- compatible: "mediatek,mt2701-cs42448-machine"
+- mediatek,platform: the phandle of MT2701 ASoC platform
+- audio-routing: a list of the connections between audio
+- mediatek,audio-codec: the phandles of cs42448 codec
+- mediatek,audio-codec-bt-mrg the phandles of bt-sco dummy codec
+- pinctrl-names: Should contain only one value - "default"
+- pinctrl-0: Should specify pin control groups used for this controller.
+- i2s1-in-sel-gpio1, i2s1-in-sel-gpio2: Should specify two gpio pins to
+ control I2S1-in mux.
+
+Example:
+
+ sound:sound {
+ compatible = "mediatek,mt2701-cs42448-machine";
+ mediatek,platform = <&afe>;
+ /* CS42448 Machine name */
+ audio-routing =
+ "Line Out Jack", "AOUT1L",
+ "Line Out Jack", "AOUT1R",
+ "Line Out Jack", "AOUT2L",
+ "Line Out Jack", "AOUT2R",
+ "Line Out Jack", "AOUT3L",
+ "Line Out Jack", "AOUT3R",
+ "Line Out Jack", "AOUT4L",
+ "Line Out Jack", "AOUT4R",
+ "AIN1L", "AMIC",
+ "AIN1R", "AMIC",
+ "AIN2L", "Tuner In",
+ "AIN2R", "Tuner In",
+ "AIN3L", "Satellite Tuner In",
+ "AIN3R", "Satellite Tuner In",
+ "AIN3L", "AUX In",
+ "AIN3R", "AUX In";
+ mediatek,audio-codec = <&cs42448>;
+ mediatek,audio-codec-bt-mrg = <&bt_sco_codec>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&aud_pins_default>;
+ i2s1-in-sel-gpio1 = <&pio 53 0>;
+ i2s1-in-sel-gpio2 = <&pio 54 0>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
index 5bfa6b60530b..29dce2ac8773 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
@@ -1,8 +1,9 @@
-MT8173 with RT5650 CODECS
+MT8173 with RT5650 CODECS and HDMI via I2S
Required properties:
- compatible : "mediatek,mt8173-rt5650"
- mediatek,audio-codec: the phandles of rt5650 codecs
+ and of the hdmi encoder node
- mediatek,platform: the phandle of MT8173 ASoC platform
Optional subnodes:
@@ -12,12 +13,17 @@ Required codec-capture subnode properties:
<&rt5650 0> : Default setting. Connect rt5650 I2S1 for capture. (dai_name = rt5645-aif1)
<&rt5650 1> : Connect rt5650 I2S2 for capture. (dai_name = rt5645-aif2)
+- mediatek,mclk: the MCLK source
+ 0 : external oscillator, MCLK = 12.288M
+ 1 : internal source from mt8173, MCLK = sampling rate*256
+
Example:
sound {
compatible = "mediatek,mt8173-rt5650";
- mediatek,audio-codec = <&rt5650>;
+ mediatek,audio-codec = <&rt5650 &hdmi0>;
mediatek,platform = <&afe>;
+ mediatek,mclk = <0>;
codec-capture {
sound-dai = <&rt5650 1>;
};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
index 0741dff048dd..6f6c2f8e908d 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -8,6 +8,8 @@ Required properties:
- interrupts: Interrupt number for McPDM
- interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM
+- clocks: phandle for the pdmclk provider, likely <&twl6040>
+- clock-names: Must be "pdmclk"
Example:
@@ -19,3 +21,11 @@ mcpdm: mcpdm@40132000 {
interrupt-parent = <&gic>;
ti,hwmods = "mcpdm";
};
+
+In board DTS file the pdmclk needs to be added:
+
+&mcpdm {
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index c7b29df4a963..15a7316e4c91 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -373,6 +373,8 @@ Optional properties:
- #clock-cells : it must be 0 if your system has audio_clkout
it must be 1 if your system has audio_clkout0/1/2/3
- clock-frequency : for all audio_clkout0/1/2/3
+- clkout-lr-asynchronous : boolean property. it indicates that audio_clkoutn
+ is asynchronizes with lr-clock.
SSI subnode properties:
- interrupts : Should contain SSI interrupt for PIO transfer
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
index 6e86d8aa29b4..4ea29aa9af59 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
@@ -23,6 +23,11 @@ Required properties:
- rockchip,playback-channels: max playback channels, if not set, 8 channels default.
- rockchip,capture-channels: max capture channels, if not set, 2 channels default.
+Required properties for controller which support multi channels
+playback/capture:
+
+- rockchip,grf: the phandle of the syscon node for GRF register.
+
Example for rk3288 I2S controller:
i2s@ff890000 {
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index e24436fc5ea9..9cabfc18cb57 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -8,6 +8,11 @@ Required properties:
- reg : The I2C address of the device.
+Optional properties:
+
+- clocks: The phandle of the master clock to the CODEC
+- clock-names: Should be "mclk"
+
Pins on the device (for linking into audio routes) for RT5514:
* DMIC1L
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroidx2-max98090.txt b/Documentation/devicetree/bindings/sound/samsung,odroidx2-max98090.txt
deleted file mode 100644
index 9148f72319e1..000000000000
--- a/Documentation/devicetree/bindings/sound/samsung,odroidx2-max98090.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Samsung Exynos Odroid X2/U3 audio complex with MAX98090 codec
-
-Required properties:
- - compatible : "samsung,odroidx2-audio" - for Odroid X2 board,
- "samsung,odroidu3-audio" - for Odroid U3 board
- - samsung,model : the user-visible name of this sound complex
- - samsung,i2s-controller : the phandle of the I2S controller
- - samsung,audio-codec : the phandle of the MAX98090 audio codec
- - samsung,audio-routing : a list of the connections between audio
- components; each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's source;
- valid names for sources and sinks are the MAX98090's pins (as
- documented in its binding), and the jacks on the board
- For Odroid X2:
- * Headphone Jack
- * Mic Jack
- * DMIC
-
- For Odroid U3:
- * Headphone Jack
- * Speakers
-
-Example:
-
-sound {
- compatible = "samsung,odroidu3-audio";
- samsung,i2s-controller = <&i2s0>;
- samsung,audio-codec = <&max98090>;
- samsung,model = "Odroid-X2";
- samsung,audio-routing =
- "Headphone Jack", "HPL",
- "Headphone Jack", "HPR",
- "IN1", "Mic Jack",
- "Mic Jack", "MICBIAS";
-};
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 0e5e4eb3ef1b..5666da7b8605 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -7,6 +7,14 @@ Required properties:
- clocks : the clock provider of SYS_MCLK
+- VDDA-supply : the regulator provider of VDDA
+
+- VDDIO-supply: the regulator provider of VDDIO
+
+Optional properties:
+
+- VDDD-supply : the regulator provider of VDDD
+
- micbias-resistor-k-ohms : the bias resistor to be used in kOmhs
The resistor can take values of 2k, 4k or 8k.
If set to 0 it will be off.
@@ -15,17 +23,9 @@ Required properties:
- micbias-voltage-m-volts : the bias voltage to be used in mVolts
The voltage can take values from 1.25V to 3V by 250mV steps
- If this node is not mentionned or the value is unknown, then
+ If this node is not mentioned or the value is unknown, then
the value is set to 1.25V.
-- VDDA-supply : the regulator provider of VDDA
-
-- VDDIO-supply: the regulator provider of VDDIO
-
-Optional properties:
-
-- VDDD-supply : the regulator provider of VDDD
-
Example:
codec: sgtl5000@0a {
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 4d9a83d9a017..16bcdfb6760e 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -33,11 +33,11 @@ Required properties:
"tx" for "st,sti-uni-player" compatibility
"rx" for "st,sti-uni-reader" compatibility
- - version: IP version integrated in SOC.
+ - st,version: IP version integrated in SOC.
- dai-name: DAI name that describes the IP.
- - IP mode: IP working mode depending on associated codec.
+ - st,mode: IP working mode depending on associated codec.
"HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
"SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
"PCM" PCM standard mode for I2S or TDM bus.
@@ -47,7 +47,7 @@ Required properties ("st,sti-uni-player" compatibility only):
- clocks: CPU_DAI IP clock source, listed in the same order than the
CPU_DAI properties.
- - uniperiph-id: internal SOC IP instance ID.
+ - st,uniperiph-id: internal SOC IP instance ID.
Optional properties:
- pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
@@ -84,9 +84,9 @@ Example:
dmas = <&fdma0 4 0 1>;
dai-name = "Uni Player #2 (DAC)";
dma-names = "tx";
- uniperiph-id = <2>;
- version = <5>;
- mode = "PCM";
+ st,uniperiph-id = <2>;
+ st,version = <5>;
+ st,mode = "PCM";
};
sti_uni_player3: sti-uni-player@3 {
@@ -100,9 +100,9 @@ Example:
dmas = <&fdma0 7 0 1>;
dma-names = "tx";
dai-name = "Uni Player #3 (SPDIF)";
- uniperiph-id = <3>;
- version = <5>;
- mode = "SPDIF";
+ st,uniperiph-id = <3>;
+ st,version = <5>;
+ st,mode = "SPDIF";
};
sti_uni_reader1: sti-uni-reader@1 {
@@ -115,7 +115,7 @@ Example:
dmas = <&fdma0 6 0 1>;
dma-names = "rx";
dai-name = "Uni Reader #1 (HDMI RX)";
- version = <3>;
+ st,version = <3>;
st,mode = "PCM";
};
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
new file mode 100644
index 000000000000..7b526ec64991
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -0,0 +1,34 @@
+* Allwinner A10 I2S controller
+
+The I2S bus (Inter-IC sound bus) is a serial link for digital
+audio data transfer between devices in the system.
+
+Required properties:
+
+- compatible: should be one of the followings
+ - "allwinner,sun4i-a10-i2s"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: should contain the I2S interrupt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: should include "tx" and "rx".
+- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names.
+- clock-names: should contain followings:
+ - "apb" : clock for the I2S bus interface
+ - "mod" : module clock for the I2S controller
+- #sound-dai-cells : Must be equal to 0
+
+Example:
+
+i2s0: i2s@01c22400 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c22400 0x400>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb0_gates 3>, <&i2s0_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 3>,
+ <&dma SUN4I_DMA_NORMAL 3>;
+ dma-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
index cd55b52548e4..d4c62e7b1714 100644
--- a/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
+++ b/Documentation/devicetree/bindings/timer/cirrus,clps711x-timer.txt
@@ -1,7 +1,7 @@
* Cirrus Logic CLPS711X Timer Counter
Required properties:
-- compatible: Shall contain "cirrus,clps711x-timer".
+- compatible: Shall contain "cirrus,ep7209-timer".
- reg : Address and length of the register set.
- interrupts: The interrupt number of the timer.
- clocks : phandle of timer reference clock.
@@ -15,14 +15,14 @@ Example:
};
timer1: timer@80000300 {
- compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
+ compatible = "cirrus,ep7312-timer", "cirrus,ep7209-timer";
reg = <0x80000300 0x4>;
interrupts = <8>;
clocks = <&clks 5>;
};
timer2: timer@80000340 {
- compatible = "cirrus,ep7312-timer", "cirrus,clps711x-timer";
+ compatible = "cirrus,ep7312-timer", "cirrus,ep7209-timer";
reg = <0x80000340 0x4>;
interrupts = <9>;
clocks = <&clks 6>;
diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
index 5883b73ea1b5..f4262ed60582 100644
--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
+++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
@@ -113,13 +113,13 @@ usb2: gadget@fff78000 {
clock-names = "hclk", "pclk";
atmel,vbus-gpio = <&pioB 19 0>;
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -127,7 +127,7 @@ usb2: gadget@fff78000 {
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -135,21 +135,21 @@ usb2: gadget@fff78000 {
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -157,7 +157,7 @@ usb2: gadget@fff78000 {
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
index d28295a3e55f..3eee9e505400 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
@@ -104,10 +104,10 @@ Example:
nvidia,xusb-padctl = <&padctl>;
- phys = <&{/padctl@0,7009f000/pads/usb2/usb2-1}>, /* mini-PCIe USB */
- <&{/padctl@0,7009f000/pads/usb2/usb2-2}>, /* USB A */
- <&{/padctl@0,7009f000/pads/pcie/pcie-0}>; /* USB A */
- phy-names = "utmi-1", "utmi-2", "usb3-0";
+ phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* mini-PCIe USB */
+ <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* USB A */
+ <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>; /* USB A */
+ phy-names = "usb2-1", "usb2-2", "usb3-0";
avddio-pex-supply = <&vdd_1v05_run>;
dvddio-pex-supply = <&vdd_1v05_run>;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index b9361816fc32..1992aa97d45a 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -238,6 +238,7 @@ simtek
sii Seiko Instruments, Inc.
silergy Silergy Corp.
sirf SiRF Technology, Inc.
+sis Silicon Integrated Systems Corp.
sitronix Sitronix Technology Corporation
skyworks Skyworks Solutions, Inc.
smsc Standard Microsystems Corporation
@@ -249,6 +250,7 @@ sony Sony Corporation
spansion Spansion Inc.
sprd Spreadtrum Communications Inc.
st STMicroelectronics
+starry Starry Electronic Technology (ShenZhen) Co., LTD
startek Startek
ste ST-Ericsson
stericsson ST-Ericsson
diff --git a/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
new file mode 100644
index 000000000000..c5e74d7b4406
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
@@ -0,0 +1,16 @@
+Aspeed Watchdog Timer
+
+Required properties:
+ - compatible: must be one of:
+ - "aspeed,ast2400-wdt"
+ - "aspeed,ast2500-wdt"
+
+ - reg: physical base address of the controller and length of memory mapped
+ region
+
+Example:
+
+ wdt1: watchdog@1e785000 {
+ compatible = "aspeed,ast2400-wdt";
+ reg = <0x1e785000 0x1c>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/meson-gxbb-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson-gxbb-wdt.txt
new file mode 100644
index 000000000000..c7fe36fa739c
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/meson-gxbb-wdt.txt
@@ -0,0 +1,16 @@
+Meson GXBB SoCs Watchdog timer
+
+Required properties:
+
+- compatible : should be "amlogic,meson-gxbb-wdt"
+- reg : Specifies base physical address and size of the registers.
+- clocks : Should be a phandle to the Watchdog clock source, for GXBB the xtal
+ is the default clock source.
+
+Example:
+
+wdt: watchdog@98d0 {
+ compatible = "amlogic,meson-gxbb-wdt";
+ reg = <0 0x98d0 0x0 0x10>;
+ clocks = <&xtal>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
index 4726924d034e..41aeaa2ff0f8 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
@@ -7,6 +7,10 @@ Required properties :
"qcom,kpss-wdt-msm8960"
"qcom,kpss-wdt-apq8064"
"qcom,kpss-wdt-ipq8064"
+ "qcom,kpss-wdt-ipq4019"
+ "qcom,kpss-timer"
+ "qcom,scss-timer"
+ "qcom,kpss-wdt"
- reg : shall contain base register location and length
- clocks : shall contain the input clock
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index b9512f1eb80a..da24e3133417 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -1,7 +1,11 @@
Renesas Watchdog Timer (WDT) Controller
Required properties:
-- compatible : Should be "renesas,r8a7795-wdt", or "renesas,rcar-gen3-wdt"
+- compatible : Should be "renesas,<soctype>-wdt", and
+ "renesas,rcar-gen3-wdt" as fallback.
+ Examples with soctypes are:
+ - "renesas,r8a7795-wdt" (R-Car H3)
+ - "renesas,r8a7796-wdt" (R-Car M3-W)
When compatible with the generic version, nodes must list the SoC-specific
version corresponding to the platform first, followed by the generic
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 8ea834f6b289..5385cba941d2 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -3,6 +3,7 @@
*.bc
*.bin
*.bz2
+*.c.[012]*.*
*.cis
*.cpio
*.csp
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index c63eea0c1c8c..b0d775d28e97 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -352,8 +352,15 @@ REGULATOR
devm_regulator_put()
devm_regulator_register()
+RESET
+ devm_reset_control_get()
+ devm_reset_controller_register()
+
SLAVE DMA ENGINE
devm_acpi_dma_controller_register()
SPI
devm_spi_register_master()
+
+WATCHDOG
+ devm_watchdog_register_device()
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 1b3c39a7de62..d30fb2cb5066 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -12,7 +12,7 @@ prototypes:
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
- int (*d_compare)(const struct dentry *, const struct dentry *,
+ int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(struct dentry *);
int (*d_init)(struct dentry *);
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 5b21ef76f751..c0727dc36271 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -267,7 +267,8 @@ among NILFS2 files can be depicted as follows:
`-- file (ino=yy)
( regular file, directory, or symlink )
-For detail on the format of each file, please see include/linux/nilfs2_fs.h.
+For detail on the format of each file, please see nilfs2_ondisk.h
+located at include/uapi/linux directory.
There are no patents or other intellectual property that we protect
with regard to the design of NILFS2. It is allowed to replicate the
diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt
index e1a0056a365f..1dfdec790946 100644
--- a/Documentation/filesystems/orangefs.txt
+++ b/Documentation/filesystems/orangefs.txt
@@ -281,7 +281,7 @@ on the wait queue and one attempt is made to recycle them. Obviously,
if the client-core stays dead too long, the arbitrary userspace processes
trying to use Orangefs will be negatively affected. Waiting ops
that can't be serviced will be removed from the request list and
-have their states set to "given up". In-progress ops that can't
+have their states set to "given up". In-progress ops that can't
be serviced will be removed from the in_progress hash table and
have their states set to "given up".
@@ -338,7 +338,7 @@ particular response.
PVFS2_VFS_OP_STATFS
fill a pvfs2_statfs_response_t with useless info <g>. It is hard for
us to know, in a timely fashion, these statistics about our
- distributed network filesystem.
+ distributed network filesystem.
PVFS2_VFS_OP_FS_MOUNT
fill a pvfs2_fs_mount_response_t which is just like a PVFS_object_kref
@@ -386,7 +386,7 @@ responses:
io_array[1].iov_base = address of global variable "pdev_magic" (int32_t)
io_array[1].iov_len = sizeof(int32_t)
-
+
io_array[2].iov_base = address of parameter "tag" (PVFS_id_gen_t)
io_array[2].iov_len = sizeof(int64_t)
@@ -402,5 +402,47 @@ Readdir responses initialize the fifth element io_array like this:
io_array[4].iov_len = contents of member trailer_size (PVFS_size)
from out_downcall member of global variable
vfs_request
-
+
+Orangefs exploits the dcache in order to avoid sending redundant
+requests to userspace. We keep object inode attributes up-to-date with
+orangefs_inode_getattr. Orangefs_inode_getattr uses two arguments to
+help it decide whether or not to update an inode: "new" and "bypass".
+Orangefs keeps private data in an object's inode that includes a short
+timeout value, getattr_time, which allows any iteration of
+orangefs_inode_getattr to know how long it has been since the inode was
+updated. When the object is not new (new == 0) and the bypass flag is not
+set (bypass == 0) orangefs_inode_getattr returns without updating the inode
+if getattr_time has not timed out. Getattr_time is updated each time the
+inode is updated.
+
+Creation of a new object (file, dir, sym-link) includes the evaluation of
+its pathname, resulting in a negative directory entry for the object.
+A new inode is allocated and associated with the dentry, turning it from
+a negative dentry into a "productive full member of society". Orangefs
+obtains the new inode from Linux with new_inode() and associates
+the inode with the dentry by sending the pair back to Linux with
+d_instantiate().
+
+The evaluation of a pathname for an object resolves to its corresponding
+dentry. If there is no corresponding dentry, one is created for it in
+the dcache. Whenever a dentry is modified or verified Orangefs stores a
+short timeout value in the dentry's d_time, and the dentry will be trusted
+for that amount of time. Orangefs is a network filesystem, and objects
+can potentially change out-of-band with any particular Orangefs kernel module
+instance, so trusting a dentry is risky. The alternative to trusting
+dentries is to always obtain the needed information from userspace - at
+least a trip to the client-core, maybe to the servers. Obtaining information
+from a dentry is cheap, obtaining it from userspace is relatively expensive,
+hence the motivation to use the dentry when possible.
+
+The timeout values d_time and getattr_time are jiffy based, and the
+code is designed to avoid the jiffy-wrap problem:
+
+"In general, if the clock may have wrapped around more than once, there
+is no way to tell how much time has elapsed. However, if the times t1
+and t2 are known to be fairly close, we can reliably compute the
+difference in a way that takes into account the possibility that the
+clock may have wrapped between times."
+
+ from course notes by instructor Andy Wang
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index a5fb89cac615..b1bd05ea66b2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -585,3 +585,10 @@ in your dentry operations instead.
in the instances. Rationale: !@#!@# security_d_instantiate() needs to be
called before we attach dentry to inode and !@#!@##!@$!$#!@#$!@$!@$ smack
->d_instantiate() uses not just ->getxattr() but ->setxattr() as well.
+--
+[mandatory]
+ ->d_compare() doesn't get parent as a separate argument anymore. If you
+ used it for finding the struct super_block involved, dentry->d_sb will
+ work just as well; if it's something more complicated, use dentry->d_parent.
+ Just be careful not to assume that fetching it more than once will yield
+ the same value - in RCU mode it could change under you.
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d9c11d25bf02..a85355cf85f4 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -98,7 +98,7 @@ A memory policy with a valid NodeList will be saved, as specified, for
use at file creation time. When a task allocates a file in the file
system, the mount option memory policy will be applied with a NodeList,
if any, modified by the calling task's cpuset constraints
-[See Documentation/cgroups/cpusets.txt] and any optional flags, listed
+[See Documentation/cgroup-v1/cpusets.txt] and any optional flags, listed
below. If the resulting NodeLists is the empty set, the effective memory
policy for the file will revert to "default" policy.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 8a196851f01d..9ace359d6cc5 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -931,7 +931,7 @@ struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
- int (*d_compare)(const struct dentry *, const struct dentry *,
+ int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
int (*d_init)(struct dentry *);
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt
new file mode 100644
index 000000000000..891c69464434
--- /dev/null
+++ b/Documentation/gcc-plugins.txt
@@ -0,0 +1,87 @@
+GCC plugin infrastructure
+=========================
+
+
+1. Introduction
+===============
+
+GCC plugins are loadable modules that provide extra features to the
+compiler [1]. They are useful for runtime instrumentation and static analysis.
+We can analyse, change and add further code during compilation via
+callbacks [2], GIMPLE [3], IPA [4] and RTL passes [5].
+
+The GCC plugin infrastructure of the kernel supports all gcc versions from
+4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
+separate directory.
+Plugin source files have to be compilable by both a C and a C++ compiler as well
+because gcc versions 4.5 and 4.6 are compiled by a C compiler,
+gcc-4.7 can be compiled by a C or a C++ compiler,
+and versions 4.8+ can only be compiled by a C++ compiler.
+
+Currently the GCC plugin infrastructure supports only the x86, arm and arm64
+architectures.
+
+This infrastructure was ported from grsecurity [6] and PaX [7].
+
+--
+[1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html
+[2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API
+[3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html
+[4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html
+[5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html
+[6] https://grsecurity.net/
+[7] https://pax.grsecurity.net/
+
+
+2. Files
+========
+
+$(src)/scripts/gcc-plugins
+ This is the directory of the GCC plugins.
+
+$(src)/scripts/gcc-plugins/gcc-common.h
+ This is a compatibility header for GCC plugins.
+ It should be always included instead of individual gcc headers.
+
+$(src)/scripts/gcc-plugin.sh
+ This script checks the availability of the included headers in
+ gcc-common.h and chooses the proper host compiler to build the plugins
+ (gcc-4.7 can be built by either gcc or g++).
+
+$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h
+$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h
+$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h
+$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h
+ These headers automatically generate the registration structures for
+ GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
+ from 4.5 to 6.0.
+ They should be preferred to creating the structures by hand.
+
+
+3. Usage
+========
+
+You must install the gcc plugin headers for your gcc version,
+e.g., on Ubuntu for gcc-4.9:
+
+ apt-get install gcc-4.9-plugin-dev
+
+Enable a GCC plugin based feature in the kernel config:
+
+ CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
+
+To compile only the plugin(s):
+
+ make gcc-plugins
+
+or just run the kernel make and compile the whole kernel with
+the cyclomatic complexity GCC plugin.
+
+
+4. How to add a new GCC plugin
+==============================
+
+The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
+here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
+$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
+See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
new file mode 100644
index 000000000000..3bb26135971f
--- /dev/null
+++ b/Documentation/gpu/drm-internals.rst
@@ -0,0 +1,381 @@
+=============
+DRM Internals
+=============
+
+This chapter documents DRM internals relevant to driver authors and
+developers working to add support for the latest features to existing
+drivers.
+
+First, we go over some typical driver initialization requirements, like
+setting up command buffers, creating an initial output configuration,
+and initializing core services. Subsequent sections cover core internals
+in more detail, providing implementation notes and examples.
+
+The DRM layer provides several services to graphics drivers, many of
+them driven by the application interfaces it provides through libdrm,
+the library that wraps most of the DRM ioctls. These include vblank
+event handling, memory management, output management, framebuffer
+management, command submission & fencing, suspend/resume support, and
+DMA services.
+
+Driver Initialization
+=====================
+
+At the core of every DRM driver is a :c:type:`struct drm_driver
+<drm_driver>` structure. Drivers typically statically initialize
+a drm_driver structure, and then pass it to
+:c:func:`drm_dev_alloc()` to allocate a device instance. After the
+device instance is fully initialized it can be registered (which makes
+it accessible from userspace) using :c:func:`drm_dev_register()`.
+
+The :c:type:`struct drm_driver <drm_driver>` structure
+contains static information that describes the driver and features it
+supports, and pointers to methods that the DRM core will call to
+implement the DRM API. We will first go through the :c:type:`struct
+drm_driver <drm_driver>` static information fields, and will
+then describe individual operations in details as they get used in later
+sections.
+
+Driver Information
+------------------
+
+Driver Features
+~~~~~~~~~~~~~~~
+
+Drivers inform the DRM core about their requirements and supported
+features by setting appropriate flags in the driver_features field.
+Since those flags influence the DRM core behaviour since registration
+time, most of them must be set to registering the :c:type:`struct
+drm_driver <drm_driver>` instance.
+
+u32 driver_features;
+
+DRIVER_USE_AGP
+ Driver uses AGP interface, the DRM core will manage AGP resources.
+
+DRIVER_REQUIRE_AGP
+ Driver needs AGP interface to function. AGP initialization failure
+ will become a fatal error.
+
+DRIVER_PCI_DMA
+ Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+ userspace will be enabled. Deprecated.
+
+DRIVER_SG
+ Driver can perform scatter/gather DMA, allocation and mapping of
+ scatter/gather buffers will be enabled. Deprecated.
+
+DRIVER_HAVE_DMA
+ Driver supports DMA, the userspace DMA API will be supported.
+ Deprecated.
+
+DRIVER_HAVE_IRQ; DRIVER_IRQ_SHARED
+ DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
+ managed by the DRM Core. The core will support simple IRQ handler
+ installation when the flag is set. The installation process is
+ described in ?.
+
+ DRIVER_IRQ_SHARED indicates whether the device & handler support
+ shared IRQs (note that this is required of PCI drivers).
+
+DRIVER_GEM
+ Driver use the GEM memory manager.
+
+DRIVER_MODESET
+ Driver supports mode setting interfaces (KMS).
+
+DRIVER_PRIME
+ Driver implements DRM PRIME buffer sharing.
+
+DRIVER_RENDER
+ Driver supports dedicated render nodes.
+
+DRIVER_ATOMIC
+ Driver supports atomic properties. In this case the driver must
+ implement appropriate obj->atomic_get_property() vfuncs for any
+ modeset objects with driver specific properties.
+
+Major, Minor and Patchlevel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+int major; int minor; int patchlevel;
+The DRM core identifies driver versions by a major, minor and patch
+level triplet. The information is printed to the kernel log at
+initialization time and passed to userspace through the
+DRM_IOCTL_VERSION ioctl.
+
+The major and minor numbers are also used to verify the requested driver
+API version passed to DRM_IOCTL_SET_VERSION. When the driver API
+changes between minor versions, applications can call
+DRM_IOCTL_SET_VERSION to select a specific version of the API. If the
+requested major isn't equal to the driver major, or the requested minor
+is larger than the driver minor, the DRM_IOCTL_SET_VERSION call will
+return an error. Otherwise the driver's set_version() method will be
+called with the requested version.
+
+Name, Description and Date
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+char \*name; char \*desc; char \*date;
+The driver name is printed to the kernel log at initialization time,
+used for IRQ registration and passed to userspace through
+DRM_IOCTL_VERSION.
+
+The driver description is a purely informative string passed to
+userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+the kernel.
+
+The driver date, formatted as YYYYMMDD, is meant to identify the date of
+the latest modification to the driver. However, as most drivers fail to
+update it, its value is mostly useless. The DRM core prints it to the
+kernel log at initialization time and passes it to userspace through the
+DRM_IOCTL_VERSION ioctl.
+
+Device Instance and Driver Handling
+-----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_drv.c
+ :doc: driver instance overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_drv.c
+ :export:
+
+Driver Load
+-----------
+
+IRQ Registration
+~~~~~~~~~~~~~~~~
+
+The DRM core tries to facilitate IRQ handler registration and
+unregistration by providing :c:func:`drm_irq_install()` and
+:c:func:`drm_irq_uninstall()` functions. Those functions only
+support a single interrupt per device, devices that use more than one
+IRQs need to be handled manually.
+
+Managed IRQ Registration
+''''''''''''''''''''''''
+
+:c:func:`drm_irq_install()` starts by calling the irq_preinstall
+driver operation. The operation is optional and must make sure that the
+interrupt will not get fired by clearing all pending interrupt flags or
+disabling the interrupt.
+
+The passed-in IRQ will then be requested by a call to
+:c:func:`request_irq()`. If the DRIVER_IRQ_SHARED driver feature
+flag is set, a shared (IRQF_SHARED) IRQ handler will be requested.
+
+The IRQ handler function must be provided as the mandatory irq_handler
+driver operation. It will get passed directly to
+:c:func:`request_irq()` and thus has the same prototype as all IRQ
+handlers. It will get called with a pointer to the DRM device as the
+second argument.
+
+Finally the function calls the optional irq_postinstall driver
+operation. The operation usually enables interrupts (excluding the
+vblank interrupt, which is enabled separately), but drivers may choose
+to enable/disable interrupts at a different time.
+
+:c:func:`drm_irq_uninstall()` is similarly used to uninstall an
+IRQ handler. It starts by waking up all processes waiting on a vblank
+interrupt to make sure they don't hang, and then calls the optional
+irq_uninstall driver operation. The operation must disable all hardware
+interrupts. Finally the function frees the IRQ by calling
+:c:func:`free_irq()`.
+
+Manual IRQ Registration
+'''''''''''''''''''''''
+
+Drivers that require multiple interrupt handlers can't use the managed
+IRQ registration functions. In that case IRQs must be registered and
+unregistered manually (usually with the :c:func:`request_irq()` and
+:c:func:`free_irq()` functions, or their :c:func:`devm_request_irq()` and
+:c:func:`devm_free_irq()` equivalents).
+
+When manually registering IRQs, drivers must not set the
+DRIVER_HAVE_IRQ driver feature flag, and must not provide the
+irq_handler driver operation. They must set the :c:type:`struct
+drm_device <drm_device>` irq_enabled field to 1 upon
+registration of the IRQs, and clear it to 0 after unregistering the
+IRQs.
+
+Memory Manager Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Every DRM driver requires a memory manager which must be initialized at
+load time. DRM currently contains two memory managers, the Translation
+Table Manager (TTM) and the Graphics Execution Manager (GEM). This
+document describes the use of the GEM memory manager only. See ? for
+details.
+
+Miscellaneous Device Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Another task that may be necessary for PCI devices during configuration
+is mapping the video BIOS. On many devices, the VBIOS describes device
+configuration, LCD panel timings (if any), and contains flags indicating
+device state. Mapping the BIOS can be done using the pci_map_rom()
+call, a convenience function that takes care of mapping the actual ROM,
+whether it has been shadowed into memory (typically at address 0xc0000)
+or exists on the PCI device in the ROM BAR. Note that after the ROM has
+been mapped and any necessary information has been extracted, it should
+be unmapped; on many devices, the ROM address decoder is shared with
+other BARs, so leaving it mapped could cause undesired behaviour like
+hangs or memory corruption.
+
+Bus-specific Device Registration and PCI Support
+------------------------------------------------
+
+A number of functions are provided to help with device registration. The
+functions deal with PCI and platform devices respectively and are only
+provided for historical reasons. These are all deprecated and shouldn't
+be used in new drivers. Besides that there's a few helpers for pci
+drivers.
+
+.. kernel-doc:: drivers/gpu/drm/drm_pci.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_platform.c
+ :export:
+
+Open/Close, File Operations and IOCTLs
+======================================
+
+Open and Close
+--------------
+
+Open and close handlers. None of those methods are mandatory::
+
+ int (*firstopen) (struct drm_device *);
+ void (*lastclose) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+
+The firstopen method is called by the DRM core for legacy UMS (User Mode
+Setting) drivers only when an application opens a device that has no
+other opened file handle. UMS drivers can implement it to acquire device
+resources. KMS drivers can't use the method and must acquire resources
+in the load method instead.
+
+Similarly the lastclose method is called when the last application
+holding a file handle opened on the device closes it, for both UMS and
+KMS drivers. Additionally, the method is also called at module unload
+time or, for hot-pluggable devices, when the device is unplugged. The
+firstopen and lastclose calls can thus be unbalanced.
+
+The open method is called every time the device is opened by an
+application. Drivers can allocate per-file private data in this method
+and store them in the struct :c:type:`struct drm_file
+<drm_file>` driver_priv field. Note that the open method is
+called before firstopen.
+
+The close operation is split into preclose and postclose methods.
+Drivers must stop and cleanup all per-file operations in the preclose
+method. For instance pending vertical blanking and page flip events must
+be cancelled. No per-file operation is allowed on the file handle after
+returning from the preclose method.
+
+Finally the postclose method is called as the last step of the close
+operation, right before calling the lastclose method if no other open
+file handle exists for the device. Drivers that have allocated per-file
+private data in the open method should free it here.
+
+The lastclose method should restore CRTC and plane properties to default
+value, so that a subsequent open of the device will not inherit state
+from the previous user. It can also be used to execute delayed power
+switching state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
+infrastructure. Beyond that KMS drivers should not do any
+further cleanup. Only legacy UMS drivers might need to clean up device
+state so that the vga console or an independent fbdev driver could take
+over.
+
+File Operations
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_fops.c
+ :doc: file operations
+
+.. kernel-doc:: drivers/gpu/drm/drm_fops.c
+ :export:
+
+IOCTLs
+------
+
+struct drm_ioctl_desc \*ioctls; int num_ioctls;
+ Driver-specific ioctls descriptors table.
+
+Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+descriptors table is indexed by the ioctl number offset from the base
+value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize
+the table entries.
+
+::
+
+ DRM_IOCTL_DEF_DRV(ioctl, func, flags)
+
+``ioctl`` is the ioctl name. Drivers must define the DRM_##ioctl and
+DRM_IOCTL_##ioctl macros to the ioctl number offset from
+DRM_COMMAND_BASE and the ioctl number respectively. The first macro is
+private to the device while the second must be exposed to userspace in a
+public header.
+
+``func`` is a pointer to the ioctl handler function compatible with the
+``drm_ioctl_t`` type.
+
+::
+
+ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+``flags`` is a bitmask combination of the following values. It restricts
+how the ioctl is allowed to be called.
+
+- DRM_AUTH - Only authenticated callers allowed
+
+- DRM_MASTER - The ioctl can only be called on the master file handle
+
+- DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+
+- DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+ device
+
+- DRM_UNLOCKED - The ioctl handler will be called without locking the
+ DRM global mutex. This is the enforced default for kms drivers (i.e.
+ using the DRIVER_MODESET flag) and hence shouldn't be used any more
+ for new drivers.
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+ :export:
+
+Legacy Support Code
+===================
+
+The section very briefly covers some of the old legacy support code
+which is only used by old DRM drivers which have done a so-called
+shadow-attach to the underlying device instead of registering as a real
+driver. This also includes some of the old generic buffer management and
+command submission code. Do not use any of this in new and modern
+drivers.
+
+Legacy Suspend/Resume
+---------------------
+
+The DRM core provides some suspend/resume code, but drivers wanting full
+suspend/resume support should provide save() and restore() functions.
+These are called at suspend, hibernate, or resume time, and should
+perform any state save or restore required by your device across suspend
+or hibernate states.
+
+int (\*suspend) (struct drm_device \*, pm_message_t state); int
+(\*resume) (struct drm_device \*);
+Those are legacy suspend and resume methods which *only* work with the
+legacy shadow-attach driver registration functions. New driver should
+use the power management interface provided by their bus type (usually
+through the :c:type:`struct device_driver <device_driver>`
+dev_pm_ops) and set these methods to NULL.
+
+Legacy DMA Services
+-------------------
+
+This should cover how DMA mapping etc. is supported by the core. These
+functions are deprecated and should not be used.
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
new file mode 100644
index 000000000000..0b302fedf1af
--- /dev/null
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -0,0 +1,260 @@
+=============================
+Mode Setting Helper Functions
+=============================
+
+The plane, CRTC, encoder and connector functions provided by the drivers
+implement the DRM API. They're called by the DRM core and ioctl handlers
+to handle device state changes and configuration request. As
+implementing those functions often requires logic not specific to
+drivers, mid-layer helper functions are available to avoid duplicating
+boilerplate code.
+
+The DRM core contains one mid-layer implementation. The mid-layer
+provides implementations of several plane, CRTC, encoder and connector
+functions (called from the top of the mid-layer) that pre-process
+requests and call lower-level functions provided by the driver (at the
+bottom of the mid-layer). For instance, the
+:c:func:`drm_crtc_helper_set_config()` function can be used to
+fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
+set_config field. When called, it will split the set_config operation
+in smaller, simpler operations and call the driver to handle them.
+
+To use the mid-layer, drivers call
+:c:func:`drm_crtc_helper_add()`,
+:c:func:`drm_encoder_helper_add()` and
+:c:func:`drm_connector_helper_add()` functions to install their
+mid-layer bottom operations handlers, and fill the :c:type:`struct
+drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
+drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
+drm_connector_funcs <drm_connector_funcs>` structures with
+pointers to the mid-layer top API functions. Installing the mid-layer
+bottom operation handlers is best done right after registering the
+corresponding KMS object.
+
+The mid-layer is not split between CRTC, encoder and connector
+operations. To use it, a driver must provide bottom functions for all of
+the three KMS entities.
+
+Atomic Modeset Helper Functions Reference
+=========================================
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :doc: overview
+
+Implementing Asynchronous Atomic Commit
+---------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :doc: implementing nonblocking commit
+
+Atomic State Reset and Initialization
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :doc: atomic state reset and initialization
+
+.. kernel-doc:: include/drm/drm_atomic_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :export:
+
+Modeset Helper Reference for Common Vtables
+===========================================
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+ :internal:
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+ :doc: overview
+
+Legacy CRTC/Modeset Helper Functions Reference
+==============================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+ :doc: overview
+
+Output Probing Helper Functions Reference
+=========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+ :doc: output probing helper overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+ :export:
+
+fbdev Helper Functions Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
+ :doc: fbdev helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_fb_helper.h
+ :internal:
+
+Framebuffer CMA Helper Functions Reference
+==========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
+ :doc: framebuffer cma helper functions
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
+ :export:
+
+Display Port Helper Functions Reference
+=======================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+ :doc: dp helpers
+
+.. kernel-doc:: include/drm/drm_dp_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+ :export:
+
+Display Port Dual Mode Adaptor Helper Functions Reference
+=========================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+ :doc: dp dual mode helpers
+
+.. kernel-doc:: include/drm/drm_dp_dual_mode_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+ :export:
+
+Display Port MST Helper Functions Reference
+===========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+ :doc: dp mst helper
+
+.. kernel-doc:: include/drm/drm_dp_mst_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+ :export:
+
+MIPI DSI Helper Functions Reference
+===================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
+ :doc: dsi helpers
+
+.. kernel-doc:: include/drm/drm_mipi_dsi.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
+ :export:
+
+EDID Helper Functions Reference
+===============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_edid.c
+ :export:
+
+Rectangle Utilities Reference
+=============================
+
+.. kernel-doc:: include/drm/drm_rect.h
+ :doc: rect utils
+
+.. kernel-doc:: include/drm/drm_rect.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_rect.c
+ :export:
+
+Flip-work Helper Reference
+==========================
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+ :doc: flip utils
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
+ :export:
+
+HDMI Infoframes Helper Reference
+================================
+
+Strictly speaking this is not a DRM helper library but generally useable
+by any driver interfacing with HDMI outputs like v4l or alsa drivers.
+But it nicely fits into the overall topic of mode setting helper
+libraries and hence is also included here.
+
+.. kernel-doc:: include/linux/hdmi.h
+ :internal:
+
+.. kernel-doc:: drivers/video/hdmi.c
+ :export:
+
+Plane Helper Reference
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+ :doc: overview
+
+Tile group
+----------
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :doc: Tile group
+
+Bridges
+=======
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :doc: overview
+
+Default bridge callback sequence
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :doc: bridge callbacks
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :export:
+
+Panel Helper Reference
+======================
+
+.. kernel-doc:: include/drm/drm_panel.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+ :doc: drm panel
+
+Simple KMS Helper Reference
+===========================
+
+.. kernel-doc:: include/drm/drm_simple_kms_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+ :doc: overview
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
new file mode 100644
index 000000000000..8dfa4b214b96
--- /dev/null
+++ b/Documentation/gpu/drm-kms.rst
@@ -0,0 +1,653 @@
+=========================
+Kernel Mode Setting (KMS)
+=========================
+
+Mode Setting
+============
+
+Drivers must initialize the mode setting core by calling
+:c:func:`drm_mode_config_init()` on the DRM device. The function
+initializes the :c:type:`struct drm_device <drm_device>`
+mode_config field and never fails. Once done, mode configuration must
+be setup by initializing the following fields.
+
+- int min_width, min_height; int max_width, max_height;
+ Minimum and maximum width and height of the frame buffers in pixel
+ units.
+
+- struct drm_mode_config_funcs \*funcs;
+ Mode setting functions.
+
+Display Modes Function Reference
+--------------------------------
+
+.. kernel-doc:: include/drm/drm_modes.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+ :export:
+
+Atomic Mode Setting Function Reference
+--------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :internal:
+
+Frame Buffer Abstraction
+------------------------
+
+Frame buffers are abstract memory objects that provide a source of
+pixels to scanout to a CRTC. Applications explicitly request the
+creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
+and receive an opaque handle that can be passed to the KMS CRTC control,
+plane configuration and page flip functions.
+
+Frame buffers rely on the underneath memory manager for low-level memory
+operations. When creating a frame buffer applications pass a memory
+handle (or a list of memory handles for multi-planar formats) through
+the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
+userspace buffer management interface this would be a GEM handle.
+Drivers are however free to use their own backing storage object
+handles, e.g. vmwgfx directly exposes special TTM handles to userspace
+and so expects TTM handles in the create ioctl and not GEM handles.
+
+The lifetime of a drm framebuffer is controlled with a reference count,
+drivers can grab additional references with
+:c:func:`drm_framebuffer_reference()`and drop them again with
+:c:func:`drm_framebuffer_unreference()`. For driver-private
+framebuffers for which the last reference is never dropped (e.g. for the
+fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
+<drm_framebuffer>` is embedded into the fbdev helper struct)
+drivers can manually clean up a framebuffer at module unload time with
+:c:func:`drm_framebuffer_unregister_private()`.
+
+DRM Format Handling
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
+ :export:
+
+Dumb Buffer Objects
+-------------------
+
+The KMS API doesn't standardize backing storage object creation and
+leaves it to driver-specific ioctls. Furthermore actually creating a
+buffer object even for GEM-based drivers is done through a
+driver-specific ioctl - GEM only has a common userspace interface for
+sharing and destroying objects. While not an issue for full-fledged
+graphics stacks that include device-specific userspace components (in
+libdrm for instance), this limit makes DRM-based early boot graphics
+unnecessarily complex.
+
+Dumb objects partly alleviate the problem by providing a standard API to
+create dumb buffers suitable for scanout, which can then be used to
+create KMS frame buffers.
+
+To support dumb objects drivers must implement the dumb_create,
+dumb_destroy and dumb_map_offset operations.
+
+- int (\*dumb_create)(struct drm_file \*file_priv, struct
+ drm_device \*dev, struct drm_mode_create_dumb \*args);
+ The dumb_create operation creates a driver object (GEM or TTM
+ handle) suitable for scanout based on the width, height and depth
+ from the struct :c:type:`struct drm_mode_create_dumb
+ <drm_mode_create_dumb>` argument. It fills the argument's
+ handle, pitch and size fields with a handle for the newly created
+ object and its line pitch and size in bytes.
+
+- int (\*dumb_destroy)(struct drm_file \*file_priv, struct
+ drm_device \*dev, uint32_t handle);
+ The dumb_destroy operation destroys a dumb object created by
+ dumb_create.
+
+- int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
+ drm_device \*dev, uint32_t handle, uint64_t \*offset);
+ The dumb_map_offset operation associates an mmap fake offset with
+ the object given by the handle and returns it. Drivers must use the
+ :c:func:`drm_gem_create_mmap_offset()` function to associate
+ the fake offset as described in ?.
+
+Note that dumb objects may not be used for gpu acceleration, as has been
+attempted on some ARM embedded platforms. Such drivers really must have
+a hardware-specific ioctl to allocate suitable buffer objects.
+
+Output Polling
+--------------
+
+void (\*output_poll_changed)(struct drm_device \*dev);
+This operation notifies the driver that the status of one or more
+connectors has changed. Drivers that use the fb helper can just call the
+:c:func:`drm_fb_helper_hotplug_event()` function to handle this
+operation.
+
+KMS Initialization and Cleanup
+==============================
+
+A KMS device is abstracted and exposed as a set of planes, CRTCs,
+encoders and connectors. KMS drivers must thus create and initialize all
+those objects at load time after initializing mode setting.
+
+CRTCs (:c:type:`struct drm_crtc <drm_crtc>`)
+--------------------------------------------
+
+A CRTC is an abstraction representing a part of the chip that contains a
+pointer to a scanout buffer. Therefore, the number of CRTCs available
+determines how many independent scanout buffers can be active at any
+given time. The CRTC structure contains several fields to support this:
+a pointer to some video memory (abstracted as a frame buffer object), a
+display mode, and an (x, y) offset into the video memory to support
+panning or configurations where one piece of video memory spans multiple
+CRTCs.
+
+CRTC Initialization
+~~~~~~~~~~~~~~~~~~~
+
+A KMS device must create and register at least one struct
+:c:type:`struct drm_crtc <drm_crtc>` instance. The instance is
+allocated and zeroed by the driver, possibly as part of a larger
+structure, and registered with a call to :c:func:`drm_crtc_init()`
+with a pointer to CRTC functions.
+
+Planes (:c:type:`struct drm_plane <drm_plane>`)
+-----------------------------------------------
+
+A plane represents an image source that can be blended with or overlayed
+on top of a CRTC during the scanout process. Planes are associated with
+a frame buffer to crop a portion of the image memory (source) and
+optionally scale it to a destination size. The result is then blended
+with or overlayed on top of a CRTC.
+
+The DRM core recognizes three types of planes:
+
+- DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
+ Primary planes are the planes operated upon by CRTC modesetting and
+ flipping operations described in the page_flip hook in
+ :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
+- DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
+ Cursor planes are the planes operated upon by the
+ DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
+- DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
+ planes. Some drivers refer to these types of planes as "sprites"
+ internally.
+
+For compatibility with legacy userspace, only overlay planes are made
+available to userspace by default. Userspace clients may set the
+DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
+that they wish to receive a universal plane list containing all plane
+types.
+
+Plane Initialization
+~~~~~~~~~~~~~~~~~~~~
+
+To create a plane, a KMS drivers allocates and zeroes an instances of
+:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
+larger structure) and registers it with a call to
+:c:func:`drm_universal_plane_init()`. The function takes a
+bitmask of the CRTCs that can be associated with the plane, a pointer to
+the plane functions, a list of format supported formats, and the type of
+plane (primary, cursor, or overlay) being initialized.
+
+Cursor and overlay planes are optional. All drivers should provide one
+primary plane per CRTC (although this requirement may change in the
+future); drivers that do not wish to provide special handling for
+primary planes may make use of the helper functions described in ? to
+create and register a primary plane with standard capabilities.
+
+Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
+-----------------------------------------------------
+
+An encoder takes pixel data from a CRTC and converts it to a format
+suitable for any attached connectors. On some devices, it may be
+possible to have a CRTC send data to more than one encoder. In that
+case, both encoders would receive data from the same scanout buffer,
+resulting in a "cloned" display configuration across the connectors
+attached to each encoder.
+
+Encoder Initialization
+~~~~~~~~~~~~~~~~~~~~~~
+
+As for CRTCs, a KMS driver must create, initialize and register at least
+one :c:type:`struct drm_encoder <drm_encoder>` instance. The
+instance is allocated and zeroed by the driver, possibly as part of a
+larger structure.
+
+Drivers must initialize the :c:type:`struct drm_encoder
+<drm_encoder>` possible_crtcs and possible_clones fields before
+registering the encoder. Both fields are bitmasks of respectively the
+CRTCs that the encoder can be connected to, and sibling encoders
+candidate for cloning.
+
+After being initialized, the encoder must be registered with a call to
+:c:func:`drm_encoder_init()`. The function takes a pointer to the
+encoder functions and an encoder type. Supported types are
+
+- DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+- DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+- DRM_MODE_ENCODER_LVDS for display panels
+- DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+ Component, SCART)
+- DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+
+Encoders must be attached to a CRTC to be used. DRM drivers leave
+encoders unattached at initialization time. Applications (or the fbdev
+compatibility layer when implemented) are responsible for attaching the
+encoders they want to use to a CRTC.
+
+Connectors (:c:type:`struct drm_connector <drm_connector>`)
+-----------------------------------------------------------
+
+A connector is the final destination for pixel data on a device, and
+usually connects directly to an external display device like a monitor
+or laptop panel. A connector can only be attached to one encoder at a
+time. The connector is also the structure where information about the
+attached display is kept, so it contains fields for display data, EDID
+data, DPMS & connection status, and information about modes supported on
+the attached displays.
+
+Connector Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally a KMS driver must create, initialize, register and attach at
+least one :c:type:`struct drm_connector <drm_connector>`
+instance. The instance is created as other KMS objects and initialized
+by setting the following fields.
+
+interlace_allowed
+ Whether the connector can handle interlaced modes.
+
+doublescan_allowed
+ Whether the connector can handle doublescan.
+
+display_info
+ Display information is filled from EDID information when a display
+ is detected. For non hot-pluggable displays such as flat panels in
+ embedded systems, the driver should initialize the
+ display_info.width_mm and display_info.height_mm fields with the
+ physical size of the display.
+
+polled
+ Connector polling mode, a combination of
+
+ DRM_CONNECTOR_POLL_HPD
+ The connector generates hotplug events and doesn't need to be
+ periodically polled. The CONNECT and DISCONNECT flags must not
+ be set together with the HPD flag.
+
+ DRM_CONNECTOR_POLL_CONNECT
+ Periodically poll the connector for connection.
+
+ DRM_CONNECTOR_POLL_DISCONNECT
+ Periodically poll the connector for disconnection.
+
+ Set to 0 for connectors that don't support connection status
+ discovery.
+
+The connector is then registered with a call to
+:c:func:`drm_connector_init()` with a pointer to the connector
+functions and a connector type, and exposed through sysfs with a call to
+:c:func:`drm_connector_register()`.
+
+Supported connector types are
+
+- DRM_MODE_CONNECTOR_VGA
+- DRM_MODE_CONNECTOR_DVII
+- DRM_MODE_CONNECTOR_DVID
+- DRM_MODE_CONNECTOR_DVIA
+- DRM_MODE_CONNECTOR_Composite
+- DRM_MODE_CONNECTOR_SVIDEO
+- DRM_MODE_CONNECTOR_LVDS
+- DRM_MODE_CONNECTOR_Component
+- DRM_MODE_CONNECTOR_9PinDIN
+- DRM_MODE_CONNECTOR_DisplayPort
+- DRM_MODE_CONNECTOR_HDMIA
+- DRM_MODE_CONNECTOR_HDMIB
+- DRM_MODE_CONNECTOR_TV
+- DRM_MODE_CONNECTOR_eDP
+- DRM_MODE_CONNECTOR_VIRTUAL
+
+Connectors must be attached to an encoder to be used. For devices that
+map connectors to encoders 1:1, the connector should be attached at
+initialization time with a call to
+:c:func:`drm_mode_connector_attach_encoder()`. The driver must
+also set the :c:type:`struct drm_connector <drm_connector>`
+encoder field to point to the attached encoder.
+
+Finally, drivers must initialize the connectors state change detection
+with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
+one connector is pollable but can't generate hotplug interrupts
+(indicated by the DRM_CONNECTOR_POLL_CONNECT and
+DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+automatically be queued to periodically poll for changes. Connectors
+that can generate hotplug interrupts must be marked with the
+DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+call :c:func:`drm_helper_hpd_irq_event()`. The function will
+queue a delayed work to check the state of all connectors, but no
+periodic polling will be done.
+
+Connector Operations
+~~~~~~~~~~~~~~~~~~~~
+
+ **Note**
+
+ Unless otherwise state, all operations are mandatory.
+
+DPMS
+''''
+
+void (\*dpms)(struct drm_connector \*connector, int mode);
+The DPMS operation sets the power state of a connector. The mode
+argument is one of
+
+- DRM_MODE_DPMS_ON
+
+- DRM_MODE_DPMS_STANDBY
+
+- DRM_MODE_DPMS_SUSPEND
+
+- DRM_MODE_DPMS_OFF
+
+In all but DPMS_ON mode the encoder to which the connector is attached
+should put the display in low-power mode by driving its signals
+appropriately. If more than one connector is attached to the encoder
+care should be taken not to change the power state of other displays as
+a side effect. Low-power mode should be propagated to the encoders and
+CRTCs when all related connectors are put in low-power mode.
+
+Modes
+'''''
+
+int (\*fill_modes)(struct drm_connector \*connector, uint32_t
+max_width, uint32_t max_height);
+Fill the mode list with all supported modes for the connector. If the
+``max_width`` and ``max_height`` arguments are non-zero, the
+implementation must ignore all modes wider than ``max_width`` or higher
+than ``max_height``.
+
+The connector must also fill in this operation its display_info
+width_mm and height_mm fields with the connected display physical size
+in millimeters. The fields should be set to 0 if the value isn't known
+or is not applicable (for instance for projector devices).
+
+Connection Status
+'''''''''''''''''
+
+The connection status is updated through polling or hotplug events when
+supported (see ?). The status value is reported to userspace through
+ioctls and must not be used inside the driver, as it only gets
+initialized by a call to :c:func:`drm_mode_getconnector()` from
+userspace.
+
+enum drm_connector_status (\*detect)(struct drm_connector
+\*connector, bool force);
+Check to see if anything is attached to the connector. The ``force``
+parameter is set to false whilst polling or to true when checking the
+connector due to user request. ``force`` can be used by the driver to
+avoid expensive, destructive operations during automated probing.
+
+Return connector_status_connected if something is connected to the
+connector, connector_status_disconnected if nothing is connected and
+connector_status_unknown if the connection state isn't known.
+
+Drivers should only return connector_status_connected if the
+connection status has really been probed as connected. Connectors that
+can't detect the connection status, or failed connection status probes,
+should return connector_status_unknown.
+
+Cleanup
+-------
+
+The DRM core manages its objects' lifetime. When an object is not needed
+anymore the core calls its destroy function, which must clean up and
+free every resource allocated for the object. Every
+:c:func:`drm_\*_init()` call must be matched with a corresponding
+:c:func:`drm_\*_cleanup()` call to cleanup CRTCs
+(:c:func:`drm_crtc_cleanup()`), planes
+(:c:func:`drm_plane_cleanup()`), encoders
+(:c:func:`drm_encoder_cleanup()`) and connectors
+(:c:func:`drm_connector_cleanup()`). Furthermore, connectors that
+have been added to sysfs must be removed by a call to
+:c:func:`drm_connector_unregister()` before calling
+:c:func:`drm_connector_cleanup()`.
+
+Connectors state change detection must be cleanup up with a call to
+:c:func:`drm_kms_helper_poll_fini()`.
+
+Output discovery and initialization example
+-------------------------------------------
+
+::
+
+ void intel_crt_init(struct drm_device *dev)
+ {
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+
+ intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, &intel_output->base,
+ &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+
+ /* Set up the DDC bus. */
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+ if (!intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ return;
+ }
+
+ intel_output->type = INTEL_OUTPUT_ANALOG;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+ drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+ drm_connector_register(connector);
+ }
+
+In the example above (taken from the i915 driver), a CRTC, connector and
+encoder combination is created. A device-specific i2c bus is also
+created for fetching EDID data and performing monitor detection. Once
+the process is complete, the new connector is registered with sysfs to
+make its properties available to applications.
+
+KMS API Functions
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+ :export:
+
+KMS Data Structures
+-------------------
+
+.. kernel-doc:: include/drm/drm_crtc.h
+ :internal:
+
+KMS Locking
+-----------
+
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
+ :doc: kms locking
+
+.. kernel-doc:: include/drm/drm_modeset_lock.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
+ :export:
+
+KMS Properties
+==============
+
+Drivers may need to expose additional parameters to applications than
+those described in the previous sections. KMS supports attaching
+properties to CRTCs, connectors and planes and offers a userspace API to
+list, get and set the property values.
+
+Properties are identified by a name that uniquely defines the property
+purpose, and store an associated value. For all property types except
+blob properties the value is a 64-bit unsigned integer.
+
+KMS differentiates between properties and property instances. Drivers
+first create properties and then create and associate individual
+instances of those properties to objects. A property can be instantiated
+multiple times and associated with different objects. Values are stored
+in property instances, and all other property information are stored in
+the property and shared between all instances of the property.
+
+Every property is created with a type that influences how the KMS core
+handles the property. Supported property types are
+
+DRM_MODE_PROP_RANGE
+ Range properties report their minimum and maximum admissible values.
+ The KMS core verifies that values set by application fit in that
+ range.
+
+DRM_MODE_PROP_ENUM
+ Enumerated properties take a numerical value that ranges from 0 to
+ the number of enumerated values defined by the property minus one,
+ and associate a free-formed string name to each value. Applications
+ can retrieve the list of defined value-name pairs and use the
+ numerical value to get and set property instance values.
+
+DRM_MODE_PROP_BITMASK
+ Bitmask properties are enumeration properties that additionally
+ restrict all enumerated values to the 0..63 range. Bitmask property
+ instance values combine one or more of the enumerated bits defined
+ by the property.
+
+DRM_MODE_PROP_BLOB
+ Blob properties store a binary blob without any format restriction.
+ The binary blobs are created as KMS standalone objects, and blob
+ property instance values store the ID of their associated blob
+ object.
+
+ Blob properties are only used for the connector EDID property and
+ cannot be created by drivers.
+
+To create a property drivers call one of the following functions
+depending on the property type. All property creation functions take
+property flags and name, as well as type-specific arguments.
+
+- struct drm_property \*drm_property_create_range(struct
+ drm_device \*dev, int flags, const char \*name, uint64_t min,
+ uint64_t max);
+ Create a range property with the given minimum and maximum values.
+
+- struct drm_property \*drm_property_create_enum(struct drm_device
+ \*dev, int flags, const char \*name, const struct
+ drm_prop_enum_list \*props, int num_values);
+ Create an enumerated property. The ``props`` argument points to an
+ array of ``num_values`` value-name pairs.
+
+- struct drm_property \*drm_property_create_bitmask(struct
+ drm_device \*dev, int flags, const char \*name, const struct
+ drm_prop_enum_list \*props, int num_values);
+ Create a bitmask property. The ``props`` argument points to an array
+ of ``num_values`` value-name pairs.
+
+Properties can additionally be created as immutable, in which case they
+will be read-only for applications but can be modified by the driver. To
+create an immutable property drivers must set the
+DRM_MODE_PROP_IMMUTABLE flag at property creation time.
+
+When no array of value-name pairs is readily available at property
+creation time for enumerated or range properties, drivers can create the
+property using the :c:func:`drm_property_create()` function and
+manually add enumeration value-name pairs by calling the
+:c:func:`drm_property_add_enum()` function. Care must be taken to
+properly specify the property type through the ``flags`` argument.
+
+After creating properties drivers can attach property instances to CRTC,
+connector and plane objects by calling the
+:c:func:`drm_object_attach_property()`. The function takes a
+pointer to the target object, a pointer to the previously created
+property and an initial instance value.
+
+Existing KMS Properties
+-----------------------
+
+The following table gives description of drm properties exposed by
+various modules/drivers.
+
+.. csv-table::
+ :header-rows: 1
+ :file: kms-properties.csv
+
+Vertical Blanking
+=================
+
+Vertical blanking plays a major role in graphics rendering. To achieve
+tear-free display, users must synchronize page flips and/or rendering to
+vertical blanking. The DRM API offers ioctls to perform page flips
+synchronized to vertical blanking and wait for vertical blanking.
+
+The DRM core handles most of the vertical blanking management logic,
+which involves filtering out spurious interrupts, keeping race-free
+blanking counters, coping with counter wrap-around and resets and
+keeping use counts. It relies on the driver to generate vertical
+blanking interrupts and optionally provide a hardware vertical blanking
+counter. Drivers must implement the following operations.
+
+- int (\*enable_vblank) (struct drm_device \*dev, int crtc); void
+ (\*disable_vblank) (struct drm_device \*dev, int crtc);
+ Enable or disable vertical blanking interrupts for the given CRTC.
+
+- u32 (\*get_vblank_counter) (struct drm_device \*dev, int crtc);
+ Retrieve the value of the vertical blanking counter for the given
+ CRTC. If the hardware maintains a vertical blanking counter its value
+ should be returned. Otherwise drivers can use the
+ :c:func:`drm_vblank_count()` helper function to handle this
+ operation.
+
+Drivers must initialize the vertical blanking handling core with a call
+to :c:func:`drm_vblank_init()` in their load operation.
+
+Vertical blanking interrupts can be enabled by the DRM core or by
+drivers themselves (for instance to handle page flipping operations).
+The DRM core maintains a vertical blanking use count to ensure that the
+interrupts are not disabled while a user still needs them. To increment
+the use count, drivers call :c:func:`drm_vblank_get()`. Upon
+return vertical blanking interrupts are guaranteed to be enabled.
+
+To decrement the use count drivers call
+:c:func:`drm_vblank_put()`. Only when the use count drops to zero
+will the DRM core disable the vertical blanking interrupts after a delay
+by scheduling a timer. The delay is accessible through the
+vblankoffdelay module parameter or the ``drm_vblank_offdelay`` global
+variable and expressed in milliseconds. Its default value is 5000 ms.
+Zero means never disable, and a negative value means disable
+immediately. Drivers may override the behaviour by setting the
+:c:type:`struct drm_device <drm_device>`
+vblank_disable_immediate flag, which when set causes vblank interrupts
+to be disabled immediately regardless of the drm_vblank_offdelay
+value. The flag should only be set if there's a properly working
+hardware vblank counter present.
+
+When a vertical blanking interrupt occurs drivers only need to call the
+:c:func:`drm_handle_vblank()` function to account for the
+interrupt.
+
+Resources allocated by :c:func:`drm_vblank_init()` must be freed
+with a call to :c:func:`drm_vblank_cleanup()` in the driver unload
+operation handler.
+
+Vertical Blanking and Interrupt Handling Functions Reference
+------------------------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_irq.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_irq.h
+ :internal:
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
new file mode 100644
index 000000000000..59f9822fecd0
--- /dev/null
+++ b/Documentation/gpu/drm-mm.rst
@@ -0,0 +1,454 @@
+=====================
+DRM Memory Management
+=====================
+
+Modern Linux systems require large amount of graphics memory to store
+frame buffers, textures, vertices and other graphics-related data. Given
+the very dynamic nature of many of that data, managing graphics memory
+efficiently is thus crucial for the graphics stack and plays a central
+role in the DRM infrastructure.
+
+The DRM core includes two memory managers, namely Translation Table Maps
+(TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+manager to be developed and tried to be a one-size-fits-them all
+solution. It provides a single userspace API to accommodate the need of
+all hardware, supporting both Unified Memory Architecture (UMA) devices
+and devices with dedicated video RAM (i.e. most discrete video cards).
+This resulted in a large, complex piece of code that turned out to be
+hard to use for driver development.
+
+GEM started as an Intel-sponsored project in reaction to TTM's
+complexity. Its design philosophy is completely different: instead of
+providing a solution to every graphics memory-related problems, GEM
+identified common code between drivers and created a support library to
+share it. GEM has simpler initialization and execution requirements than
+TTM, but has no video RAM management capabilities and is thus limited to
+UMA devices.
+
+The Translation Table Manager (TTM)
+-----------------------------------
+
+TTM design background and information belongs here.
+
+TTM initialization
+~~~~~~~~~~~~~~~~~~
+
+ **Warning**
+
+ This section is outdated.
+
+Drivers wishing to support TTM must fill out a drm_bo_driver
+structure. The structure contains several fields with function pointers
+for initializing the TTM, allocating and freeing memory, waiting for
+command completion and fence synchronization, and memory migration. See
+the radeon_ttm.c file for an example of usage.
+
+The ttm_global_reference structure is made up of several fields:
+
+::
+
+ struct ttm_global_reference {
+ enum ttm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct ttm_global_reference *);
+ void (*release) (struct ttm_global_reference *);
+ };
+
+
+There should be one global reference structure for your memory manager
+as a whole, and there will be others for each object created by the
+memory manager at runtime. Your global TTM should have a type of
+TTM_GLOBAL_TTM_MEM. The size field for the global object should be
+sizeof(struct ttm_mem_global), and the init and release hooks should
+point at your driver-specific init and release routines, which probably
+eventually call ttm_mem_global_init and ttm_mem_global_release,
+respectively.
+
+Once your global TTM accounting structure is set up and initialized by
+calling ttm_global_item_ref() on it, you need to create a buffer
+object TTM to provide a pool for buffer object allocation by clients and
+the kernel itself. The type of this object should be
+TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
+ttm_bo_global). Again, driver-specific init and release functions may
+be provided, likely eventually calling ttm_bo_global_init() and
+ttm_bo_global_release(), respectively. Also, like the previous
+object, ttm_global_item_ref() is used to create an initial reference
+count for the TTM, which will call your initialization function.
+
+The Graphics Execution Manager (GEM)
+------------------------------------
+
+The GEM design approach has resulted in a memory manager that doesn't
+provide full coverage of all (or even all common) use cases in its
+userspace or kernel API. GEM exposes a set of standard memory-related
+operations to userspace and a set of helper functions to drivers, and
+let drivers implement hardware-specific operations with their own
+private API.
+
+The GEM userspace API is described in the `GEM - the Graphics Execution
+Manager <http://lwn.net/Articles/283798/>`__ article on LWN. While
+slightly outdated, the document provides a good overview of the GEM API
+principles. Buffer allocation and read and write operations, described
+as part of the common GEM API, are currently implemented using
+driver-specific ioctls.
+
+GEM is data-agnostic. It manages abstract buffer objects without knowing
+what individual buffers contain. APIs that require knowledge of buffer
+contents or purpose, such as buffer allocation or synchronization
+primitives, are thus outside of the scope of GEM and must be implemented
+using driver-specific ioctls.
+
+On a fundamental level, GEM involves several operations:
+
+- Memory allocation and freeing
+- Command execution
+- Aperture management at command execution time
+
+Buffer object allocation is relatively straightforward and largely
+provided by Linux's shmem layer, which provides memory to back each
+object.
+
+Device-specific operations, such as command execution, pinning, buffer
+read & write, mapping, and domain ownership transfers are left to
+driver-specific ioctls.
+
+GEM Initialization
+~~~~~~~~~~~~~~~~~~
+
+Drivers that use GEM must set the DRIVER_GEM bit in the struct
+:c:type:`struct drm_driver <drm_driver>` driver_features
+field. The DRM core will then automatically initialize the GEM core
+before calling the load operation. Behind the scene, this will create a
+DRM Memory Manager object which provides an address space pool for
+object allocation.
+
+In a KMS configuration, drivers need to allocate and initialize a
+command ring buffer following core GEM initialization if required by the
+hardware. UMA devices usually have what is called a "stolen" memory
+region, which provides space for the initial framebuffer and large,
+contiguous memory regions required by the device. This space is
+typically not managed by GEM, and must be initialized separately into
+its own DRM MM object.
+
+GEM Objects Creation
+~~~~~~~~~~~~~~~~~~~~
+
+GEM splits creation of GEM objects and allocation of the memory that
+backs them in two distinct operations.
+
+GEM objects are represented by an instance of struct :c:type:`struct
+drm_gem_object <drm_gem_object>`. Drivers usually need to
+extend GEM objects with private information and thus create a
+driver-specific GEM object structure type that embeds an instance of
+struct :c:type:`struct drm_gem_object <drm_gem_object>`.
+
+To create a GEM object, a driver allocates memory for an instance of its
+specific GEM object type and initializes the embedded struct
+:c:type:`struct drm_gem_object <drm_gem_object>` with a call
+to :c:func:`drm_gem_object_init()`. The function takes a pointer
+to the DRM device, a pointer to the GEM object and the buffer object
+size in bytes.
+
+GEM uses shmem to allocate anonymous pageable memory.
+:c:func:`drm_gem_object_init()` will create an shmfs file of the
+requested size and store it into the struct :c:type:`struct
+drm_gem_object <drm_gem_object>` filp field. The memory is
+used as either main storage for the object when the graphics hardware
+uses system memory directly or as a backing store otherwise.
+
+Drivers are responsible for the actual physical pages allocation by
+calling :c:func:`shmem_read_mapping_page_gfp()` for each page.
+Note that they can decide to allocate pages when initializing the GEM
+object, or to delay allocation until the memory is needed (for instance
+when a page fault occurs as a result of a userspace memory access or
+when the driver needs to start a DMA transfer involving the memory).
+
+Anonymous pageable memory allocation is not always desired, for instance
+when the hardware requires physically contiguous system memory as is
+often the case in embedded devices. Drivers can create GEM objects with
+no shmfs backing (called private GEM objects) by initializing them with
+a call to :c:func:`drm_gem_private_object_init()` instead of
+:c:func:`drm_gem_object_init()`. Storage for private GEM objects
+must be managed by drivers.
+
+GEM Objects Lifetime
+~~~~~~~~~~~~~~~~~~~~
+
+All GEM objects are reference-counted by the GEM core. References can be
+acquired and release by :c:func:`calling
+drm_gem_object_reference()` and
+:c:func:`drm_gem_object_unreference()` respectively. The caller
+must hold the :c:type:`struct drm_device <drm_device>`
+struct_mutex lock when calling
+:c:func:`drm_gem_object_reference()`. As a convenience, GEM
+provides :c:func:`drm_gem_object_unreference_unlocked()`
+functions that can be called without holding the lock.
+
+When the last reference to a GEM object is released the GEM core calls
+the :c:type:`struct drm_driver <drm_driver>` gem_free_object
+operation. That operation is mandatory for GEM-enabled drivers and must
+free the GEM object and all associated resources.
+
+void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
+responsible for freeing all GEM object resources. This includes the
+resources created by the GEM core, which need to be released with
+:c:func:`drm_gem_object_release()`.
+
+GEM Objects Naming
+~~~~~~~~~~~~~~~~~~
+
+Communication between userspace and the kernel refers to GEM objects
+using local handles, global names or, more recently, file descriptors.
+All of those are 32-bit integer values; the usual Linux kernel limits
+apply to the file descriptors.
+
+GEM handles are local to a DRM file. Applications get a handle to a GEM
+object through a driver-specific ioctl, and can use that handle to refer
+to the GEM object in other standard or driver-specific ioctls. Closing a
+DRM file handle frees all its GEM handles and dereferences the
+associated GEM objects.
+
+To create a handle for a GEM object drivers call
+:c:func:`drm_gem_handle_create()`. The function takes a pointer
+to the DRM file and the GEM object and returns a locally unique handle.
+When the handle is no longer needed drivers delete it with a call to
+:c:func:`drm_gem_handle_delete()`. Finally the GEM object
+associated with a handle can be retrieved by a call to
+:c:func:`drm_gem_object_lookup()`.
+
+Handles don't take ownership of GEM objects, they only take a reference
+to the object that will be dropped when the handle is destroyed. To
+avoid leaking GEM objects, drivers must make sure they drop the
+reference(s) they own (such as the initial reference taken at object
+creation time) as appropriate, without any special consideration for the
+handle. For example, in the particular case of combined GEM object and
+handle creation in the implementation of the dumb_create operation,
+drivers must drop the initial reference to the GEM object before
+returning the handle.
+
+GEM names are similar in purpose to handles but are not local to DRM
+files. They can be passed between processes to reference a GEM object
+globally. Names can't be used directly to refer to objects in the DRM
+API, applications must convert handles to names and names to handles
+using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+respectively. The conversion is handled by the DRM core without any
+driver-specific support.
+
+GEM also supports buffer sharing with dma-buf file descriptors through
+PRIME. GEM-based drivers must use the provided helpers functions to
+implement the exporting and importing correctly. See ?. Since sharing
+file descriptors is inherently more secure than the easily guessable and
+global GEM names it is the preferred buffer sharing mechanism. Sharing
+buffers through GEM names is only supported for legacy userspace.
+Furthermore PRIME also allows cross-device buffer sharing since it is
+based on dma-bufs.
+
+GEM Objects Mapping
+~~~~~~~~~~~~~~~~~~~
+
+Because mapping operations are fairly heavyweight GEM favours
+read/write-like access to buffers, implemented through driver-specific
+ioctls, over mapping buffers to userspace. However, when random access
+to the buffer is needed (to perform software rendering for instance),
+direct access to the object can be more efficient.
+
+The mmap system call can't be used directly to map GEM objects, as they
+don't have their own file handle. Two alternative methods currently
+co-exist to map GEM objects to userspace. The first method uses a
+driver-specific ioctl to perform the mapping operation, calling
+:c:func:`do_mmap()` under the hood. This is often considered
+dubious, seems to be discouraged for new GEM-enabled drivers, and will
+thus not be described here.
+
+The second method uses the mmap system call on the DRM file handle. void
+\*mmap(void \*addr, size_t length, int prot, int flags, int fd, off_t
+offset); DRM identifies the GEM object to be mapped by a fake offset
+passed through the mmap offset argument. Prior to being mapped, a GEM
+object must thus be associated with a fake offset. To do so, drivers
+must call :c:func:`drm_gem_create_mmap_offset()` on the object.
+
+Once allocated, the fake offset value must be passed to the application
+in a driver-specific way and can then be used as the mmap offset
+argument.
+
+The GEM core provides a helper method :c:func:`drm_gem_mmap()` to
+handle object mapping. The method can be set directly as the mmap file
+operation handler. It will look up the GEM object based on the offset
+value and set the VMA operations to the :c:type:`struct drm_driver
+<drm_driver>` gem_vm_ops field. Note that
+:c:func:`drm_gem_mmap()` doesn't map memory to userspace, but
+relies on the driver-provided fault handler to map pages individually.
+
+To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
+:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
+with a pointer to VM operations.
+
+struct vm_operations_struct \*gem_vm_ops struct
+vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
+void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
+vm_area_struct \*vma, struct vm_fault \*vmf); };
+
+The open and close operations must update the GEM object reference
+count. Drivers can use the :c:func:`drm_gem_vm_open()` and
+:c:func:`drm_gem_vm_close()` helper functions directly as open
+and close handlers.
+
+The fault operation handler is responsible for mapping individual pages
+to userspace when a page fault occurs. Depending on the memory
+allocation scheme, drivers can allocate pages at fault time, or can
+decide to allocate memory for the GEM object at the time the object is
+created.
+
+Drivers that want to map the GEM object upfront instead of handling page
+faults can implement their own mmap file operation handler.
+
+Memory Coherency
+~~~~~~~~~~~~~~~~
+
+When mapped to the device or used in a command buffer, backing pages for
+an object are flushed to memory and marked write combined so as to be
+coherent with the GPU. Likewise, if the CPU accesses an object after the
+GPU has finished rendering to the object, then the object must be made
+coherent with the CPU's view of memory, usually involving GPU cache
+flushing of various kinds. This core CPU<->GPU coherency management is
+provided by a device-specific ioctl, which evaluates an object's current
+domain and performs any necessary flushing or synchronization to put the
+object into the desired coherency domain (note that the object may be
+busy, i.e. an active render target; in that case, setting the domain
+blocks the client and waits for rendering to complete before performing
+any necessary flushing operations).
+
+Command Execution
+~~~~~~~~~~~~~~~~~
+
+Perhaps the most important GEM function for GPU devices is providing a
+command execution interface to clients. Client programs construct
+command buffers containing references to previously allocated memory
+objects, and then submit them to GEM. At that point, GEM takes care to
+bind all the objects into the GTT, execute the buffer, and provide
+necessary synchronization between clients accessing the same buffers.
+This often involves evicting some objects from the GTT and re-binding
+others (a fairly expensive operation), and providing relocation support
+which hides fixed GTT offsets from clients. Clients must take care not
+to submit command buffers that reference more objects than can fit in
+the GTT; otherwise, GEM will reject them and no rendering will occur.
+Similarly, if several objects in the buffer require fence registers to
+be allocated for correct rendering (e.g. 2D blits on pre-965 chips),
+care must be taken not to require more fence registers than are
+available to the client. Such resource management should be abstracted
+from the client in libdrm.
+
+GEM Function Reference
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_gem.h
+ :internal:
+
+VMA Offset Manager
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
+ :doc: vma offset manager
+
+.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_vma_manager.h
+ :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+PRIME is the cross device buffer sharing framework in drm, originally
+created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
+buffers are dma-buf based file descriptors.
+
+Overview and Driver Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Similar to GEM global names, PRIME file descriptors are also used to
+share buffer objects across processes. They offer additional security:
+as file descriptors must be explicitly sent over UNIX domain sockets to
+be shared between applications, they can't be guessed like the globally
+unique GEM names.
+
+Drivers that support the PRIME API must set the DRIVER_PRIME bit in the
+struct :c:type:`struct drm_driver <drm_driver>`
+driver_features field, and implement the prime_handle_to_fd and
+prime_fd_to_handle operations.
+
+int (\*prime_handle_to_fd)(struct drm_device \*dev, struct drm_file
+\*file_priv, uint32_t handle, uint32_t flags, int \*prime_fd); int
+(\*prime_fd_to_handle)(struct drm_device \*dev, struct drm_file
+\*file_priv, int prime_fd, uint32_t \*handle); Those two operations
+convert a handle to a PRIME file descriptor and vice versa. Drivers must
+use the kernel dma-buf buffer sharing framework to manage the PRIME file
+descriptors. Similar to the mode setting API PRIME is agnostic to the
+underlying buffer object manager, as long as handles are 32bit unsigned
+integers.
+
+While non-GEM drivers must implement the operations themselves, GEM
+drivers must use the :c:func:`drm_gem_prime_handle_to_fd()` and
+:c:func:`drm_gem_prime_fd_to_handle()` helper functions. Those
+helpers rely on the driver gem_prime_export and gem_prime_import
+operations to create a dma-buf instance from a GEM object (dma-buf
+exporter role) and to create a GEM object from a dma-buf instance
+(dma-buf importer role).
+
+struct dma_buf \* (\*gem_prime_export)(struct drm_device \*dev,
+struct drm_gem_object \*obj, int flags); struct drm_gem_object \*
+(\*gem_prime_import)(struct drm_device \*dev, struct dma_buf
+\*dma_buf); These two operations are mandatory for GEM drivers that
+support PRIME.
+
+PRIME Helper Functions
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_prime.c
+ :doc: PRIME Helpers
+
+PRIME Function References
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_prime.c
+ :export:
+
+DRM MM Range Allocator
+----------------------
+
+Overview
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+ :doc: Overview
+
+LRU Scan/Eviction Support
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+ :doc: lru scan roaster
+
+DRM MM Range Allocator Function References
+------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_mm.h
+ :internal:
+
+CMA Helper Functions Reference
+------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+ :doc: cma helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+ :internal:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
new file mode 100644
index 000000000000..536bf3eaadd4
--- /dev/null
+++ b/Documentation/gpu/drm-uapi.rst
@@ -0,0 +1,111 @@
+===================
+Userland interfaces
+===================
+
+The DRM core exports several interfaces to applications, generally
+intended to be used through corresponding libdrm wrapper functions. In
+addition, drivers export device-specific interfaces for use by userspace
+drivers & device-aware applications through ioctls and sysfs files.
+
+External interfaces include: memory mapping, context management, DMA
+operations, AGP management, vblank control, fence management, memory
+management, and output management.
+
+Cover generic ioctls and sysfs layout here. We only need high-level
+info, since man pages should cover the rest.
+
+libdrm Device Lookup
+====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+ :doc: getunique and setversion story
+
+
+Primary Nodes, DRM Master and Authentication
+============================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_auth.c
+ :doc: master and authentication
+
+.. kernel-doc:: drivers/gpu/drm/drm_auth.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_auth.h
+ :internal:
+
+Render nodes
+============
+
+DRM core provides multiple character-devices for user-space to use.
+Depending on which device is opened, user-space can perform a different
+set of operations (mainly ioctls). The primary node is always created
+and called card<num>. Additionally, a currently unused control node,
+called controlD<num> is also created. The primary node provides all
+legacy operations and historically was the only interface used by
+userspace. With KMS, the control node was introduced. However, the
+planned KMS control interface has never been written and so the control
+node stays unused to date.
+
+With the increased use of offscreen renderers and GPGPU applications,
+clients no longer require running compositors or graphics servers to
+make use of a GPU. But the DRM API required unprivileged clients to
+authenticate to a DRM-Master prior to getting GPU access. To avoid this
+step and to grant clients GPU access without authenticating, render
+nodes were introduced. Render nodes solely serve render clients, that
+is, no modesetting or privileged ioctls can be issued on render nodes.
+Only non-global rendering commands are allowed. If a driver supports
+render nodes, it must advertise it via the DRIVER_RENDER DRM driver
+capability. If not supported, the primary node must be used for render
+clients together with the legacy drmAuth authentication procedure.
+
+If a driver advertises render node support, DRM core will create a
+separate render node called renderD<num>. There will be one render node
+per device. No ioctls except PRIME-related ioctls will be allowed on
+this node. Especially GEM_OPEN will be explicitly prohibited. Render
+nodes are designed to avoid the buffer-leaks, which occur if clients
+guess the flink names or mmap offsets on the legacy interface.
+Additionally to this basic interface, drivers must mark their
+driver-dependent render-only ioctls as DRM_RENDER_ALLOW so render
+clients can use them. Driver authors must be careful not to allow any
+privileged ioctls on render nodes.
+
+With render nodes, user-space can now control access to the render node
+via basic file-system access-modes. A running graphics server which
+authenticates clients on the privileged primary/legacy node is no longer
+required. Instead, a client can open the render node and is immediately
+granted GPU access. Communication between clients (or servers) is done
+via PRIME. FLINK from render node to legacy node is not supported. New
+clients must not use the insecure FLINK interface.
+
+Besides dropping all modeset/global ioctls, render nodes also drop the
+DRM-Master concept. There is no reason to associate render clients with
+a DRM-Master as they are independent of any graphics server. Besides,
+they must work without any running master, anyway. Drivers must be able
+to run without a master object if they support render nodes. If, on the
+other hand, a driver requires shared state between clients which is
+visible to user-space and accessible beyond open-file boundaries, they
+cannot support render nodes.
+
+VBlank event handling
+=====================
+
+The DRM core exposes two vertical blank related ioctls:
+
+DRM_IOCTL_WAIT_VBLANK
+ This takes a struct drm_wait_vblank structure as its argument, and
+ it is used to block or request a signal when a specified vblank
+ event occurs.
+
+DRM_IOCTL_MODESET_CTL
+ This was only used for user-mode-settind drivers around modesetting
+ changes to allow the kernel to update the vblank interrupt after
+ mode setting, since on many devices the vertical blank counter is
+ reset to 0 at some point during modeset. Modern drivers should not
+ call this any more since with kernel mode setting it is a no-op.
+
+This second part of the GPU Driver Developer's Guide documents driver
+code, implementation details and also all the driver-specific userspace
+interfaces. Especially since all hardware-acceleration interfaces to
+userspace are driver specific for efficiency and other reasons these
+interfaces can be rather substantial. Hence every driver has its own
+chapter.
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
new file mode 100644
index 000000000000..2fe5952e90f1
--- /dev/null
+++ b/Documentation/gpu/i915.rst
@@ -0,0 +1,347 @@
+===========================
+ drm/i915 Intel GFX Driver
+===========================
+
+The drm/i915 driver supports all (with the exception of some very early
+models) integrated GFX chipsets with both Intel display and rendering
+blocks. This excludes a set of SoC platforms with an SGX rendering unit,
+those have basic support through the gma500 drm driver.
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure used by both the display
+and the GEM parts of the driver.
+
+Runtime Power Management
+------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
+ :doc: runtime pm
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c
+ :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+ :doc: interrupt handling
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+ :functions: intel_irq_init intel_irq_init_hw intel_hpd_init
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+ :functions: intel_runtime_pm_disable_interrupts
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+ :functions: intel_runtime_pm_enable_interrupts
+
+Intel GVT-g Guest Support(vGPU)
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
+ :doc: Intel GVT-g guest support
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
+ :internal:
+
+Display Hardware Handling
+=========================
+
+This section covers everything related to the display hardware including
+the mode setting infrastructure, plane, sprite and cursor handling and
+display, output probing and related topics.
+
+Mode Setting Infrastructure
+---------------------------
+
+The i915 driver is thus far the only DRM driver which doesn't use the
+common DRM helper code to implement mode setting sequences. Thus it has
+its own tailor-made infrastructure for executing a display configuration
+change.
+
+Frontbuffer Tracking
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
+ :doc: frontbuffer tracking
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
+ :functions: i915_gem_track_fb
+
+Display FIFO Underrun Reporting
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
+ :doc: fifo underrun handling
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
+ :internal:
+
+Plane Configuration
+-------------------
+
+This section covers plane configuration and composition with the primary
+plane, sprites, cursors and overlays. This includes the infrastructure
+to do atomic vsync'ed updates of all this state and also tightly coupled
+topics like watermark setup and computation, framebuffer compression and
+panel self refresh.
+
+Atomic Plane Helpers
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
+ :doc: atomic plane helpers
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
+ :internal:
+
+Output Probing
+--------------
+
+This section covers output probing and related infrastructure like the
+hotplug interrupt storm detection and mitigation code. Note that the
+i915 driver still uses most of the common DRM helper code for output
+probing, so those sections fully apply.
+
+Hotplug
+-------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
+ :doc: Hotplug
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
+ :internal:
+
+High Definition Audio
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :doc: High Definition Audio over HDMI and Display Port
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+ :internal:
+
+.. kernel-doc:: include/drm/i915_component.h
+ :internal:
+
+Panel Self Refresh PSR (PSR/SRD)
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
+ :doc: Panel Self Refresh (PSR/SRD)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
+ :internal:
+
+Frame Buffer Compression (FBC)
+------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
+ :doc: Frame Buffer Compression (FBC)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
+ :internal:
+
+Display Refresh Rate Switching (DRRS)
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :doc: Display Refresh Rate Switching (DRRS)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_dp_set_drrs_state
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_edp_drrs_enable
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_edp_drrs_disable
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_edp_drrs_invalidate
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_edp_drrs_flush
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+ :functions: intel_dp_drrs_init
+
+DPIO
+----
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+ :doc: DPIO
+
+CSR firmware support for DMC
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+ :doc: csr support for dmc
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+ :internal:
+
+Video BIOS Table (VBT)
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
+ :doc: Video BIOS Table (VBT)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
+ :internal:
+
+Memory Management and Command Submission
+========================================
+
+This sections covers all things related to the GEM implementation in the
+i915 driver.
+
+Batchbuffer Parsing
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
+ :doc: batch buffer command parser
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
+ :internal:
+
+Batchbuffer Pools
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
+ :doc: batch pool
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
+ :internal:
+
+Logical Rings, Logical Ring Contexts and Execlists
+--------------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
+ :doc: Logical Rings, Logical Ring Contexts and Execlists
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
+ :internal:
+
+Global GTT views
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+ :doc: Global GTT views
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+ :internal:
+
+GTT Fences and Swizzling
+------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+ :internal:
+
+Global GTT Fence Handling
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+ :doc: fence register handling
+
+Hardware Tiling and Swizzling Details
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+ :doc: tiling swizzling details
+
+Object Tiling IOCTLs
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
+ :doc: buffer object tiling
+
+Buffer Object Eviction
+----------------------
+
+This section documents the interface functions for evicting buffer
+objects to make space available in the virtual gpu address spaces. Note
+that this is mostly orthogonal to shrinking buffer objects caches, which
+has the goal to make main memory (shared with the gpu through the
+unified memory architecture) available.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_evict.c
+ :internal:
+
+Buffer Object Memory Shrinking
+------------------------------
+
+This section documents the interface function for shrinking memory usage
+of buffer object caches. Shrinking is used to make main memory
+available. Note that this is mostly orthogonal to evicting buffer
+objects, which has the goal to make space in gpu virtual address spaces.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
+ :internal:
+
+GuC
+===
+
+GuC-specific firmware loader
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+ :doc: GuC-specific firmware loader
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+ :internal:
+
+GuC-based command submission
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+ :doc: GuC-based command submission
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+ :internal:
+
+GuC Firmware Layout
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h
+ :doc: GuC Firmware Layout
+
+Tracing
+=======
+
+This sections covers all things related to the tracepoints implemented
+in the i915 driver.
+
+i915_ppgtt_create and i915_ppgtt_release
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+ :doc: i915_ppgtt_create and i915_ppgtt_release tracepoints
+
+i915_context_create and i915_context_free
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+ :doc: i915_context_create and i915_context_free tracepoints
+
+switch_mm
+---------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+ :doc: switch_mm tracepoint
+
+.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
new file mode 100644
index 000000000000..fcac0fa72056
--- /dev/null
+++ b/Documentation/gpu/index.rst
@@ -0,0 +1,14 @@
+==================================
+Linux GPU Driver Developer's Guide
+==================================
+
+.. toctree::
+
+ introduction
+ drm-internals
+ drm-mm
+ drm-kms
+ drm-kms-helpers
+ drm-uapi
+ i915
+ vga-switcheroo
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
new file mode 100644
index 000000000000..1903595b5310
--- /dev/null
+++ b/Documentation/gpu/introduction.rst
@@ -0,0 +1,51 @@
+============
+Introduction
+============
+
+The Linux DRM layer contains code intended to support the needs of
+complex graphics devices, usually containing programmable pipelines well
+suited to 3D graphics acceleration. Graphics drivers in the kernel may
+make use of DRM functions to make tasks like memory management,
+interrupt handling and DMA easier, and provide a uniform interface to
+applications.
+
+A note on versions: this guide covers features found in the DRM tree,
+including the TTM memory manager, output configuration and mode setting,
+and the new vblank internals, in addition to all the regular features
+found in current kernels.
+
+[Insert diagram of typical DRM stack here]
+
+Style Guidelines
+================
+
+For consistency this documentation uses American English. Abbreviations
+are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
+on. To aid in reading, documentations make full use of the markup
+characters kerneldoc provides: @parameter for function parameters,
+@member for structure members, &structure to reference structures and
+function() for functions. These all get automatically hyperlinked if
+kerneldoc for the referenced objects exists. When referencing entries in
+function vtables please use ->vfunc(). Note that kerneldoc does not
+support referencing struct members directly, so please add a reference
+to the vtable struct somewhere in the same paragraph or at least
+section.
+
+Except in special situations (to separate locked from unlocked variants)
+locking requirements for functions aren't documented in the kerneldoc.
+Instead locking should be check at runtime using e.g.
+``WARN_ON(!mutex_is_locked(...));``. Since it's much easier to ignore
+documentation than runtime noise this provides more value. And on top of
+that runtime checks do need to be updated when the locking rules change,
+increasing the chances that they're correct. Within the documentation
+the locking rules should be explained in the relevant structures: Either
+in the comment for the lock explaining what it protects, or data fields
+need a note about which lock protects them, or both.
+
+Functions which have a non-\ ``void`` return value should have a section
+called "Returns" explaining the expected return values in different
+cases and their meanings. Currently there's no consensus whether that
+section name should be all upper-case or not, and whether it should end
+in a colon or not. Go with the file-local style. Other common section
+names are "Notes" with information for dangerous or tricky corner cases,
+and "FIXME" where the interface could be cleaned up.
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
new file mode 100644
index 000000000000..4c5ce3edcfd9
--- /dev/null
+++ b/Documentation/gpu/kms-properties.csv
@@ -0,0 +1,129 @@
+Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
+DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
+,,“scaling mode”,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
+,Connector,“EDID”,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
+,,“DPMS”,ENUM,"{ “On”, “Standby”, “Suspend”, “Off” }",Connector,Contains DPMS operation mode value.
+,,“PATH”,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
+,,“TILE”,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
+,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
+,Plane,“type”,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
+,,“SRC_X”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
+,,“SRC_Y”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
+,,“SRC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
+,,“SRC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
+,,“CRTC_X”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
+,,“CRTC_Y”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
+,,“CRTC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
+,,“CRTC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
+,,“FB_ID”,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
+,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
+,,“zpos”,RANGE,"Min=0, Max=UINT_MAX","Plane,Z-order of the plane.Planes with higher Z-order values are displayed on top, planes with identical Z-order values are display in an undefined order"
+,DVI-I,“subconnector”,ENUM,"{ “Unknown”, “DVI-D”, “DVI-A” }",Connector,TBD
+,,“select subconnector”,ENUM,"{ “Automatic”, “DVI-D”, “DVI-A” }",Connector,TBD
+,TV,“subconnector”,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
+,,“select subconnector”,ENUM,"{ ""Automatic"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
+,,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,“left margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“right margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“top margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“bottom margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“brightness”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“flicker reduction”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“overscan”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max=100",Connector,TBD
+,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
+,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
+,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,,“dirty”,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
+,,“DEGAMMA_LUT”,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
+,,“DEGAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
+,,“CTM”,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
+,,“GAMMA_LUT”,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
+,,“GAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
+i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
+,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
+,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“vpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“sharpness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_adaptive”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_2d”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_chroma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_luma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
+,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+CDV gma-500,Generic,"""Broadcast RGB""",ENUM,"{ “Full”, “Limited 16:235” }",Connector,TBD
+,,"""Broadcast RGB""",ENUM,"{ “off”, “auto”, “on” }",Connector,TBD
+Poulsbo,Generic,“backlight”,RANGE,"Min=0, Max=100",Connector,TBD
+,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“vpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“sharpness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_adaptive”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_2d”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_chroma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_luma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
+,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+armada,CRTC,"""CSC_YUV""",ENUM,"{ ""Auto"" , ""CCIR601"", ""CCIR709"" }",CRTC,TBD
+,,"""CSC_RGB""",ENUM,"{ ""Auto"", ""Computer system"", ""Studio"" }",CRTC,TBD
+,Overlay,"""colorkey""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_min""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_max""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_val""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_alpha""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_mode""",ENUM,"{ ""disabled"", ""Y component"", ""U component"" , ""V component"", ""RGB"", “R component"", ""G component"", ""B component"" }",Plane,TBD
+,,"""brightness""",RANGE,"Min=0, Max=256 + 255",Plane,TBD
+,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
+,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
+exynos,CRTC,“mode”,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
+,Overlay,“zpos”,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
+i2c/ch7006_drv,Generic,“scale”,RANGE,"Min=0, Max=2",Connector,TBD
+,TV,“mode”,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, ”PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
+nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,“contrast”,RANGE,"Min=0, Max=8192-1",Plane,TBD
+,,“brightness”,RANGE,"Min=0, Max=1024",Plane,TBD
+,,“hue”,RANGE,"Min=0, Max=359",Plane,TBD
+,,“saturation”,RANGE,"Min=0, Max=8192-1",Plane,TBD
+,,“iturbt_709”,RANGE,"Min=0, Max=1",Plane,TBD
+,Nv04 Overlay,“colorkey”,RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,“brightness”,RANGE,"Min=0, Max=1024",Plane,TBD
+,Display,“dithering mode”,ENUM,"{ ""auto"", ""off"", ""on"" }",Connector,TBD
+,,“dithering depth”,ENUM,"{ ""auto"", ""off"", ""on"", ""static 2x2"", ""dynamic 2x2"", ""temporal"" }",Connector,TBD
+,,“underscan”,ENUM,"{ ""auto"", ""6 bpc"", ""8 bpc"" }",Connector,TBD
+,,“underscan hborder”,RANGE,"Min=0, Max=128",Connector,TBD
+,,“underscan vborder”,RANGE,"Min=0, Max=128",Connector,TBD
+,,“vibrant hue”,RANGE,"Min=0, Max=180",Connector,TBD
+,,“color vibrance”,RANGE,"Min=0, Max=200",Connector,TBD
+omap,Generic,“zorder”,RANGE,"Min=0, Max=3","CRTC, Plane",TBD
+qxl,Generic,"“hotplug_mode_update""",RANGE,"Min=0, Max=1",Connector,TBD
+radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
+,DAC enable load detect,“load detection”,RANGE,"Min=0, Max=1",Connector,TBD
+,TV Standard,"""tv standard""",ENUM,"{ ""ntsc"", ""pal"", ""pal-m"", ""pal-60"", ""ntsc-j"" , ""scart-pal"", ""pal-cn"", ""secam"" }",Connector,TBD
+,legacy TMDS PLL detect,"""tmds_pll""",ENUM,"{ ""driver"", ""bios"" }",-,TBD
+,Underscan,"""underscan""",ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
+,,"""underscan hborder""",RANGE,"Min=0, Max=128",Connector,TBD
+,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
+,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
+,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
+rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
+,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
diff --git a/Documentation/gpu/vga-switcheroo.rst b/Documentation/gpu/vga-switcheroo.rst
new file mode 100644
index 000000000000..cbbdb994f1dd
--- /dev/null
+++ b/Documentation/gpu/vga-switcheroo.rst
@@ -0,0 +1,98 @@
+.. _vga_switcheroo:
+
+==============
+VGA Switcheroo
+==============
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :doc: Overview
+
+Modes of Use
+============
+
+Manual switching and manual power control
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :doc: Manual switching and manual power control
+
+Driver power control
+--------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :doc: Driver power control
+
+API
+===
+
+Public functions
+----------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :export:
+
+Public structures
+-----------------
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+ :functions: vga_switcheroo_handler
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+ :functions: vga_switcheroo_client_ops
+
+Public constants
+----------------
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+ :functions: vga_switcheroo_handler_flags_t
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+ :functions: vga_switcheroo_client_id
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+ :functions: vga_switcheroo_state
+
+Private structures
+------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :functions: vgasr_priv
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+ :functions: vga_switcheroo_client
+
+Handlers
+========
+
+apple-gmux Handler
+------------------
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+ :doc: Overview
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+ :doc: Interrupt
+
+Graphics mux
+~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+ :doc: Graphics mux
+
+Power control
+~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+ :doc: Power control
+
+Backlight control
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+ :doc: Backlight control
+
+Public functions
+~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/apple-gmux.h
+ :internal:
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 43c722f15292..e0fc72963e87 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -18,6 +18,7 @@ Contents:
media/media_kapi
media/dvb-drivers/index
media/v4l-drivers/index
+ gpu/index
Indices and tables
==================
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 56af5e43e9c0..81c7f2bb7daf 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -248,7 +248,7 @@ Code Seq#(hex) Include File Comments
'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
'm' 00-1F net/irda/irmod.h conflict!
'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
-'n' 80-8F linux/nilfs2_fs.h NILFS2
+'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
'n' E0-FF linux/matroxfb.h matroxfb
'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 769db8399ac8..46c030a49186 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -931,9 +931,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
+ disable_1tb_segments [PPC]
+ Disables the use of 1TB hash page table segments. This
+ causes the kernel to fall back to 256MB segments which
+ can be useful when debugging issues that require an SLB
+ miss to occur.
+
disable= [IPV6]
See Documentation/networking/ipv6.txt.
+ disable_radix [PPC]
+ Disable RADIX MMU mode on POWER9
+
disable_cpu_apicid= [X86,APIC,SMP]
Format: <int>
The number of initial APIC ID for the
@@ -2311,6 +2320,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Note that if CONFIG_MODULE_SIG_FORCE is set, that
is always true, so this option does nothing.
+ module_blacklist= [KNL] Do not load a comma-separated list of
+ modules. Useful for debugging problem modules.
+
mousedev.tap_time=
[MOUSE] Maximum time between finger touching and
leaving touchpad surface for touch to be considered
@@ -3012,6 +3024,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
resource_alignment=
Format:
[<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
+ [<order of align>@]pci:<vendor>:<device>\
+ [:<subvendor>:<subdevice>][; ...]
Specifies alignment and device to reassign
aligned memory resources.
If <order of align> is not specified,
@@ -3030,6 +3044,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hpmemsize=nn[KMG] The fixed amount of bus space which is
reserved for hotplug bridge's memory window.
Default size is 2 megabytes.
+ hpbussize=nn The minimum amount of additional bus numbers
+ reserved for buses below a hotplug bridge.
+ Default is 1.
realloc= Enable/disable reallocating PCI bridge resources
if allocations done by BIOS are too small to
accommodate resources required by all child
@@ -3061,6 +3078,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
ports driver.
+ pcie_port_pm= [PCIE] PCIe port power management handling:
+ off Disable power management of all PCIe ports
+ force Forcibly enable power management of all PCIe ports
+
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
nomsi Do not use MSI for native PCIe PME signaling (this makes
all PCIe root ports use INTx for all services).
@@ -3164,6 +3185,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
default: disabled
+ printk.devkmsg={on,off,ratelimit}
+ Control writing to /dev/kmsg.
+ on - unlimited logging to /dev/kmsg from userspace
+ off - logging to /dev/kmsg disabled
+ ratelimit - ratelimit the logging
+ Default: ratelimit
+
printk.time= Show timing data prefixed to each printk message line
Format: <bool> (1/Y/y=enable, 0/N/n=disable)
@@ -3561,7 +3589,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
relax_domain_level=
[KNL, SMP] Set scheduler's default relax_domain_level.
- See Documentation/cgroups/cpusets.txt.
+ See Documentation/cgroup-v1/cpusets.txt.
relative_sleep_states=
[SUSPEND] Use sleep state labeling where the deepest
@@ -3849,6 +3877,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
using these two parameters to set the minimum and
maximum port values.
+ sunrpc.svc_rpc_per_connection_limit=
+ [NFS,SUNRPC]
+ Limit the number of requests that the server will
+ process in parallel from a single connection.
+ The default value is 0 (no limit).
+
sunrpc.pool_mode=
[NFS]
Control how the NFS server code allocates CPUs to
@@ -3884,7 +3918,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
- it if 0 is given (See Documentation/cgroups/memory.txt)
+ it if 0 is given (See Documentation/cgroup-v1/memory.txt)
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
Format: { <int> | force }
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
index edec3a3e648d..bbc3a8b8cff4 100644
--- a/Documentation/kernel-per-CPU-kthreads.txt
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -10,7 +10,7 @@ REFERENCES
o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
-o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
+o Documentation/cgroup-v1: Using cgroups to bind tasks to sets of CPUs.
o man taskset: Using the taskset command to bind tasks to sets
of CPUs.
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index caa555706f89..404a0e9e92b0 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -28,6 +28,8 @@ All attributes are read-only.
preferred_erase_size Preferred erase size
raw_rpmb_size_mult RPMB partition size
rel_sectors Reliable write sector count
+ ocr Operation Conditions Register
+ dsr Driver Stage Register
Note on Erase Size and Preferred Erase Size:
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index 696d5caf4fd8..f0e3361db20c 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use
the private key to sign modules and compromise the operating system. The
private key must be either destroyed or moved to a secure location and not kept
in the root node of the kernel source tree.
+
+If you use the same private key to sign modules for multiple kernel
+configurations, you must ensure that the module version information is
+sufficient to prevent loading a module into a different kernel. Either
+set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
+kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
index 9264bcab4099..26b9f31cf65a 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/ramoops.txt
@@ -45,18 +45,34 @@ corrupt, but usually it is restorable.
2. Setting the parameters
-Setting the ramoops parameters can be done in 3 different manners:
- 1. Use the module parameters (which have the names of the variables described
- as before).
- For quick debugging, you can also reserve parts of memory during boot
- and then use the reserved memory for ramoops. For example, assuming a machine
- with > 128 MB of memory, the following kernel command line will tell the
- kernel to use only the first 128 MB of memory, and place ECC-protected ramoops
- region at 128 MB boundary:
+Setting the ramoops parameters can be done in several different manners:
+
+ A. Use the module parameters (which have the names of the variables described
+ as before). For quick debugging, you can also reserve parts of memory during
+ boot and then use the reserved memory for ramoops. For example, assuming a
+ machine with > 128 MB of memory, the following kernel command line will tell
+ the kernel to use only the first 128 MB of memory, and place ECC-protected
+ ramoops region at 128 MB boundary:
"mem=128M ramoops.mem_address=0x8000000 ramoops.ecc=1"
- 2. Use Device Tree bindings, as described in
- Documentation/device-tree/bindings/misc/ramoops.txt.
- 3. Use a platform device and set the platform data. The parameters can then
+
+ B. Use Device Tree bindings, as described in
+ Documentation/device-tree/bindings/reserved-memory/ramoops.txt.
+ For example:
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ramoops@8f000000 {
+ compatible = "ramoops";
+ reg = <0 0x8f000000 0 0x100000>;
+ record-size = <0x4000>;
+ console-size = <0x4000>;
+ };
+ };
+
+ C. Use a platform device and set the platform data. The parameters can then
be set through that platform data. An example of doing that is:
#include <linux/pstore_ram.h>
diff --git a/Documentation/rapidio/mport_cdev.txt b/Documentation/rapidio/mport_cdev.txt
index 20c120d4b3b8..6e491a662461 100644
--- a/Documentation/rapidio/mport_cdev.txt
+++ b/Documentation/rapidio/mport_cdev.txt
@@ -82,8 +82,7 @@ III. Module parameters
- 'dbg_level' - This parameter allows to control amount of debug information
generated by this device driver. This parameter is formed by set of
- This parameter can be changed bit masks that correspond to the specific
- functional block.
+ bit masks that correspond to the specific functional blocks.
For mask definitions see 'drivers/rapidio/devices/rio_mport_cdev.c'
This parameter can be changed dynamically.
Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.
diff --git a/Documentation/rapidio/rio_cm.txt b/Documentation/rapidio/rio_cm.txt
new file mode 100644
index 000000000000..27aa401f1126
--- /dev/null
+++ b/Documentation/rapidio/rio_cm.txt
@@ -0,0 +1,119 @@
+RapidIO subsystem Channelized Messaging character device driver (rio_cm.c)
+==========================================================================
+
+Version History:
+----------------
+ 1.0.0 - Initial driver release.
+
+==========================================================================
+
+I. Overview
+
+This device driver is the result of collaboration within the RapidIO.org
+Software Task Group (STG) between Texas Instruments, Prodrive Technologies,
+Nokia Networks, BAE and IDT. Additional input was received from other members
+of RapidIO.org.
+
+The objective was to create a character mode driver interface which exposes
+messaging capabilities of RapidIO endpoint devices (mports) directly
+to applications, in a manner that allows the numerous and varied RapidIO
+implementations to interoperate.
+
+This driver (RIO_CM) provides to user-space applications shared access to
+RapidIO mailbox messaging resources.
+
+RapidIO specification (Part 2) defines that endpoint devices may have up to four
+messaging mailboxes in case of multi-packet message (up to 4KB) and
+up to 64 mailboxes if single-packet messages (up to 256 B) are used. In addition
+to protocol definition limitations, a particular hardware implementation can
+have reduced number of messaging mailboxes. RapidIO aware applications must
+therefore share the messaging resources of a RapidIO endpoint.
+
+Main purpose of this device driver is to provide RapidIO mailbox messaging
+capability to large number of user-space processes by introducing socket-like
+operations using a single messaging mailbox. This allows applications to
+use the limited RapidIO messaging hardware resources efficiently.
+
+Most of device driver's operations are supported through 'ioctl' system calls.
+
+When loaded this device driver creates a single file system node named rio_cm
+in /dev directory common for all registered RapidIO mport devices.
+
+Following ioctl commands are available to user-space applications:
+
+- RIO_CM_MPORT_GET_LIST : Returns to caller list of local mport devices that
+ support messaging operations (number of entries up to RIO_MAX_MPORTS).
+ Each list entry is combination of mport's index in the system and RapidIO
+ destination ID assigned to the port.
+- RIO_CM_EP_GET_LIST_SIZE : Returns number of messaging capable remote endpoints
+ in a RapidIO network associated with the specified mport device.
+- RIO_CM_EP_GET_LIST : Returns list of RapidIO destination IDs for messaging
+ capable remote endpoints (peers) available in a RapidIO network associated
+ with the specified mport device.
+- RIO_CM_CHAN_CREATE : Creates RapidIO message exchange channel data structure
+ with channel ID assigned automatically or as requested by a caller.
+- RIO_CM_CHAN_BIND : Binds the specified channel data structure to the specified
+ mport device.
+- RIO_CM_CHAN_LISTEN : Enables listening for connection requests on the specified
+ channel.
+- RIO_CM_CHAN_ACCEPT : Accepts a connection request from peer on the specified
+ channel. If wait timeout for this request is specified by a caller it is
+ a blocking call. If timeout set to 0 this is non-blocking call - ioctl
+ handler checks for a pending connection request and if one is not available
+ exits with -EGAIN error status immediately.
+- RIO_CM_CHAN_CONNECT : Sends a connection request to a remote peer/channel.
+- RIO_CM_CHAN_SEND : Sends a data message through the specified channel.
+ The handler for this request assumes that message buffer specified by
+ a caller includes the reserved space for a packet header required by
+ this driver.
+- RIO_CM_CHAN_RECEIVE : Receives a data message through a connected channel.
+ If the channel does not have an incoming message ready to return this ioctl
+ handler will wait for new message until timeout specified by a caller
+ expires. If timeout value is set to 0, ioctl handler uses a default value
+ defined by MAX_SCHEDULE_TIMEOUT.
+- RIO_CM_CHAN_CLOSE : Closes a specified channel and frees associated buffers.
+ If the specified channel is in the CONNECTED state, sends close notification
+ to the remote peer.
+
+The ioctl command codes and corresponding data structures intended for use by
+user-space applications are defined in 'include/uapi/linux/rio_cm_cdev.h'.
+
+II. Hardware Compatibility
+
+This device driver uses standard interfaces defined by kernel RapidIO subsystem
+and therefore it can be used with any mport device driver registered by RapidIO
+subsystem with limitations set by available mport HW implementation of messaging
+mailboxes.
+
+III. Module parameters
+
+- 'dbg_level' - This parameter allows to control amount of debug information
+ generated by this device driver. This parameter is formed by set of
+ bit masks that correspond to the specific functional block.
+ For mask definitions see 'drivers/rapidio/devices/rio_cm.c'
+ This parameter can be changed dynamically.
+ Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.
+
+- 'cmbox' - Number of RapidIO mailbox to use (default value is 1).
+ This parameter allows to set messaging mailbox number that will be used
+ within entire RapidIO network. It can be used when default mailbox is
+ used by other device drivers or is not supported by some nodes in the
+ RapidIO network.
+
+- 'chstart' - Start channel number for dynamic assignment. Default value - 256.
+ Allows to exclude channel numbers below this parameter from dynamic
+ allocation to avoid conflicts with software components that use
+ reserved predefined channel numbers.
+
+IV. Known problems
+
+ None.
+
+V. User-space Applications and API Library
+
+Messaging API library and applications that use this device driver are available
+from RapidIO.org.
+
+VI. TODO List
+
+- Add support for system notification messages (reserved channel 0).
diff --git a/Documentation/rapidio/tsi721.txt b/Documentation/rapidio/tsi721.txt
index 7c1c7bf48ec0..cd2a2935d51d 100644
--- a/Documentation/rapidio/tsi721.txt
+++ b/Documentation/rapidio/tsi721.txt
@@ -25,6 +25,32 @@ fully compatible with RIONET driver (Ethernet over RapidIO messaging services).
This parameter can be changed dynamically.
Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.
+- 'dma_desc_per_channel' - This parameter defines number of hardware buffer
+ descriptors allocated for each registered Tsi721 DMA channel.
+ Its default value is 128.
+
+- 'dma_txqueue_sz' - DMA transactions queue size. Defines number of pending
+ transaction requests that can be accepted by each DMA channel.
+ Default value is 16.
+
+- 'dma_sel' - DMA channel selection mask. Bitmask that defines which hardware
+ DMA channels (0 ... 6) will be registered with DmaEngine core.
+ If bit is set to 1, the corresponding DMA channel will be registered.
+ DMA channels not selected by this mask will not be used by this device
+ driver. Default value is 0x7f (use all channels).
+
+- 'pcie_mrrs' - override value for PCIe Maximum Read Request Size (MRRS).
+ This parameter gives an ability to override MRRS value set during PCIe
+ configuration process. Tsi721 supports read request sizes up to 4096B.
+ Value for this parameter must be set as defined by PCIe specification:
+ 0 = 128B, 1 = 256B, 2 = 512B, 3 = 1024B, 4 = 2048B and 5 = 4096B.
+ Default value is '-1' (= keep platform setting).
+
+- 'mbox_sel' - RIO messaging MBOX selection mask. This is a bitmask that defines
+ messaging MBOXes are managed by this device driver. Mask bits 0 - 3
+ correspond to MBOX0 - MBOX3. MBOX is under driver's control if the
+ corresponding bit is set to '1'. Default value is 0x0f (= all).
+
II. Known problems
None.
diff --git a/Documentation/scheduler/sched-deadline.txt b/Documentation/scheduler/sched-deadline.txt
index e114513a2731..53a2fe1ae8b8 100644
--- a/Documentation/scheduler/sched-deadline.txt
+++ b/Documentation/scheduler/sched-deadline.txt
@@ -431,7 +431,7 @@ CONTENTS
-deadline tasks cannot have an affinity mask smaller that the entire
root_domain they are created on. However, affinities can be specified
- through the cpuset facility (Documentation/cgroups/cpusets.txt).
+ through the cpuset facility (Documentation/cgroup-v1/cpusets.txt).
5.1 SCHED_DEADLINE and cpusets HOWTO
------------------------------------
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index f14f49304222..edd861c94c1b 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -215,7 +215,7 @@ SCHED_BATCH) tasks.
These options need CONFIG_CGROUPS to be defined, and let the administrator
create arbitrary groups of tasks, using the "cgroup" pseudo filesystem. See
- Documentation/cgroups/cgroups.txt for more information about this filesystem.
+ Documentation/cgroup-v1/cgroups.txt for more information about this filesystem.
When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
group created using the pseudo filesystem. See example steps below to create
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 71b54d549987..a03f0d944fe6 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -133,7 +133,7 @@ This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
to control the CPU time reserved for each control group.
For more information on working with control groups, you should read
-Documentation/cgroups/cgroups.txt as well.
+Documentation/cgroup-v1/cgroups.txt as well.
Group settings are checked against the following limits in order to keep the
configuration schedulable:
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 74056dba52be..6bf2d2063b52 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -3,7 +3,7 @@ ASoC Machine Driver
The ASoC machine (or board) driver is the code that glues together all the
component drivers (e.g. codecs, platforms and DAIs). It also describes the
-relationships between each componnent which include audio paths, GPIOs,
+relationships between each component which include audio paths, GPIOs,
interrupts, clocking, jacks and voltage regulators.
The machine driver can contain codec and platform specific code. It registers
diff --git a/Documentation/sound/alsa/timestamping.txt b/Documentation/sound/alsa/timestamping.txt
index 1b6473f393a8..9d579aefbffd 100644
--- a/Documentation/sound/alsa/timestamping.txt
+++ b/Documentation/sound/alsa/timestamping.txt
@@ -14,7 +14,7 @@ provides a refined estimate with a delay.
event or application query.
The difference (tstamp - trigger_tstamp) defines the elapsed time.
-The ALSA API provides reports two basic pieces of information, avail
+The ALSA API provides two basic pieces of information, avail
and delay, which combined with the trigger and current system
timestamps allow for applications to keep track of the 'fullness' of
the ring buffer and the amount of queued samples.
@@ -53,21 +53,21 @@ case):
The analog time is taken at the last stage of the playback, as close
as possible to the actual transducer
-The link time is taken at the output of the SOC/chipset as the samples
+The link time is taken at the output of the SoC/chipset as the samples
are pushed on a link. The link time can be directly measured if
supported in hardware by sample counters or wallclocks (e.g. with
HDAudio 24MHz or PTP clock for networked solutions) or indirectly
estimated (e.g. with the frame counter in USB).
The DMA time is measured using counters - typically the least reliable
-of all measurements due to the bursty natured of DMA transfers.
+of all measurements due to the bursty nature of DMA transfers.
The app time corresponds to the time tracked by an application after
writing in the ring buffer.
-The application can query what the hardware supports, define which
+The application can query the hardware capabilities, define which
audio time it wants reported by selecting the relevant settings in
-audio_tstamp_config fields, get an estimate of the timestamp
+audio_tstamp_config fields, thus get an estimate of the timestamp
accuracy. It can also request the delay-to-analog be included in the
measurement. Direct access to the link time is very interesting on
platforms that provide an embedded DSP; measuring directly the link
@@ -169,7 +169,7 @@ playback: systime: 938107562 nsec, audio time 938112708 nsec, systime delta -51
Example 1 shows that the timestamp at the DMA level is close to 1ms
ahead of the actual playback time (as a side time this sort of
measurement can help define rewind safeguards). Compensating for the
-DMA-link delay in example 2 helps remove the hardware buffering abut
+DMA-link delay in example 2 helps remove the hardware buffering but
the information is still very jittery, with up to one sample of
error. In example 3 where the timestamps are measured with the link
wallclock, the timestamps show a monotonic behavior and a lower
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 33204604de6c..ffab8b5caa60 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -764,6 +764,20 @@ send before ratelimiting kicks in.
==============================================================
+printk_devkmsg:
+
+Control the logging to /dev/kmsg from userspace:
+
+ratelimit: default, ratelimited
+on: unlimited logging to /dev/kmsg from userspace
+off: logging to /dev/kmsg disabled
+
+The kernel command line parameter printk.devkmsg= overrides this and is
+a one-time setting until next reboot: once set, it cannot be changed by
+this sysctl interface anymore.
+
+==============================================================
+
randomize_va_space:
This option can be used to select the type of process address
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a4482cce4bae..739db9ab16b2 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1433,13 +1433,16 @@ KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
-Architectures: x86 s390
+Architectures: x86 s390 arm arm64
Type: vm ioctl
Parameters: struct kvm_irq_routing (in)
Returns: 0 on success, -1 on error
Sets the GSI routing table entries, overwriting any previously set entries.
+On arm/arm64, GSI routing has the following limitation:
+- GSI routing does not apply to KVM_IRQ_LINE but only to KVM_IRQFD.
+
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
@@ -1468,7 +1471,13 @@ struct kvm_irq_routing_entry {
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
-No flags are specified so far, the corresponding field must be set to zero.
+flags:
+- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry
+ type, specifies that the devid field contains a valid value. The per-VM
+ KVM_CAP_MSI_DEVID capability advertises the requirement to provide
+ the device ID. If this capability is not available, userspace should
+ never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail.
+- zero otherwise
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1479,9 +1488,21 @@ struct kvm_irq_routing_msi {
__u32 address_lo;
__u32 address_hi;
__u32 data;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 devid;
+ };
};
+If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier
+for the device that wrote the MSI message. For PCI, this is usually a
+BFD identifier in the lower 16 bits.
+
+On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS
+feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
+address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of
+address_hi must be zero.
+
struct kvm_irq_routing_s390_adapter {
__u64 ind_addr;
__u64 summary_addr;
@@ -1583,6 +1604,17 @@ struct kvm_lapic_state {
Reads the Local APIC registers and copies them into the input argument. The
data format and layout are the same as documented in the architecture manual.
+If KVM_X2APIC_API_USE_32BIT_IDS feature of KVM_CAP_X2APIC_API is
+enabled, then the format of APIC_ID register depends on the APIC mode
+(reported by MSR_IA32_APICBASE) of its VCPU. x2APIC stores APIC ID in
+the APIC_ID register (bytes 32-35). xAPIC only allows an 8-bit APIC ID
+which is stored in bits 31-24 of the APIC register, or equivalently in
+byte 35 of struct kvm_lapic_state's regs field. KVM_GET_LAPIC must then
+be called after MSR_IA32_APICBASE has been set with KVM_SET_MSR.
+
+If KVM_X2APIC_API_USE_32BIT_IDS feature is disabled, struct kvm_lapic_state
+always uses xAPIC format.
+
4.58 KVM_SET_LAPIC
@@ -1600,6 +1632,10 @@ struct kvm_lapic_state {
Copies the input argument into the Local APIC registers. The data format
and layout are the same as documented in the architecture manual.
+The format of the APIC ID register (bytes 32-35 of struct kvm_lapic_state's
+regs field) depends on the state of the KVM_CAP_X2APIC_API capability.
+See the note in KVM_GET_LAPIC.
+
4.59 KVM_IOEVENTFD
@@ -2032,6 +2068,12 @@ registers, find a list below:
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
+ MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
@@ -2156,7 +2198,7 @@ after pausing the vcpu, but before it is resumed.
4.71 KVM_SIGNAL_MSI
Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86
+Architectures: x86 arm64
Type: vm ioctl
Parameters: struct kvm_msi (in)
Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
@@ -2169,10 +2211,23 @@ struct kvm_msi {
__u32 address_hi;
__u32 data;
__u32 flags;
- __u8 pad[16];
+ __u32 devid;
+ __u8 pad[12];
};
-No flags are defined so far. The corresponding field must be 0.
+flags: KVM_MSI_VALID_DEVID: devid contains a valid value. The per-VM
+ KVM_CAP_MSI_DEVID capability advertises the requirement to provide
+ the device ID. If this capability is not available, userspace
+ should never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail.
+
+If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier
+for the device that wrote the MSI message. For PCI, this is usually a
+BFD identifier in the lower 16 bits.
+
+On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS
+feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
+address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of
+address_hi must be zero.
4.71 KVM_CREATE_PIT2
@@ -2345,9 +2400,13 @@ Note that closing the resamplefd is not sufficient to disable the
irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment
and need not be specified with KVM_IRQFD_FLAG_DEASSIGN.
-On ARM/ARM64, the gsi field in the kvm_irqfd struct specifies the Shared
-Peripheral Interrupt (SPI) index, such that the GIC interrupt ID is
-given by gsi + 32.
+On arm/arm64, gsi routing being supported, the following can happen:
+- in case no routing entry is associated to this gsi, injection fails
+- in case the gsi is associated to an irqchip routing entry,
+ irqchip.pin + 32 corresponds to the injected SPI ID.
+- in case the gsi is associated to an MSI routing entry, the MSI
+ message and device ID are translated into an LPI (support restricted
+ to GICv3 ITS in-kernel emulation).
4.76 KVM_PPC_ALLOCATE_HTAB
@@ -2520,6 +2579,7 @@ Parameters: struct kvm_device_attr
Returns: 0 on success, -1 on error
Errors:
ENXIO: The group or attribute is unknown/unsupported for this device
+ or hardware support is missing.
EPERM: The attribute cannot (currently) be accessed this way
(e.g. read-only attribute, or attribute that only makes
sense when the device is in a different state)
@@ -2547,6 +2607,7 @@ Parameters: struct kvm_device_attr
Returns: 0 on success, -1 on error
Errors:
ENXIO: The group or attribute is unknown/unsupported for this device
+ or hardware support is missing.
Tests whether a device supports a particular attribute. A successful
return indicates the attribute is implemented. It does not necessarily
@@ -3803,6 +3864,42 @@ Allows use of runtime-instrumentation introduced with zEC12 processor.
Will return -EINVAL if the machine does not support runtime-instrumentation.
Will return -EBUSY if a VCPU has already been created.
+7.7 KVM_CAP_X2APIC_API
+
+Architectures: x86
+Parameters: args[0] - features that should be enabled
+Returns: 0 on success, -EINVAL when args[0] contains invalid features
+
+Valid feature flags in args[0] are
+
+#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+
+Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of
+KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC,
+allowing the use of 32-bit APIC IDs. See KVM_CAP_X2APIC_API in their
+respective sections.
+
+KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK must be enabled for x2APIC to work
+in logical mode or with more than 255 VCPUs. Otherwise, KVM treats 0xff
+as a broadcast even in x2APIC mode in order to support physical x2APIC
+without interrupt remapping. This is undesirable in logical mode,
+where 0xff represents CPUs 0-7 in cluster 0.
+
+7.8 KVM_CAP_S390_USER_INSTR0
+
+Architectures: s390
+Parameters: none
+
+With this capability enabled, all illegal instructions 0x0000 (2 bytes) will
+be intercepted and forwarded to user space. User space can use this
+mechanism e.g. to realize 2-byte software breakpoints. The kernel will
+not inject an operating exception for these instructions, user space has
+to take care of that.
+
+This capability can be enabled dynamically even if VCPUs were already
+created and are running.
+
8. Other capabilities.
----------------------
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index 59541d49e15c..89182f80cc7f 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -4,16 +4,22 @@ ARM Virtual Generic Interrupt Controller (VGIC)
Device types supported:
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
+ KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller
-Only one VGIC instance may be instantiated through either this API or the
-legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt
-controller, requiring emulated user-space devices to inject interrupts to the
-VGIC instead of directly to CPUs.
+Only one VGIC instance of the V2/V3 types above may be instantiated through
+either this API or the legacy KVM_CREATE_IRQCHIP api. The created VGIC will
+act as the VM interrupt controller, requiring emulated user-space devices to
+inject interrupts to the VGIC instead of directly to CPUs.
Creating a guest GICv3 device requires a host GICv3 as well.
GICv3 implementations with hardware compatibility support allow a guest GICv2
as well.
+Creating a virtual ITS controller requires a host GICv3 (but does not depend
+on having physical ITS controllers).
+There can be multiple ITS controllers per guest, each of them has to have
+a separate, non-overlapping MMIO region.
+
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
Attributes:
@@ -39,6 +45,13 @@ Groups:
Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
This address needs to be 64K aligned.
+ KVM_VGIC_V3_ADDR_TYPE_ITS (rw, 64-bit)
+ Base address in the guest physical address space of the GICv3 ITS
+ control register frame. The ITS allows MSI(-X) interrupts to be
+ injected into guests. This extension is optional. If the kernel
+ does not support the ITS, the call returns -ENODEV.
+ Only valid for KVM_DEV_TYPE_ARM_VGIC_ITS.
+ This address needs to be 64K aligned and the region covers 128K.
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
Attributes:
@@ -109,8 +122,8 @@ Groups:
KVM_DEV_ARM_VGIC_GRP_CTRL
Attributes:
KVM_DEV_ARM_VGIC_CTRL_INIT
- request the initialization of the VGIC, no additional parameter in
- kvm_device_attr.addr.
+ request the initialization of the VGIC or ITS, no additional parameter
+ in kvm_device_attr.addr.
Errors:
-ENXIO: VGIC not properly configured as required prior to calling
this attribute
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index a9ea8774a45f..b6cda49f2ba4 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -20,7 +20,8 @@ Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
Parameters: none
-Returns: 0
+Returns: -EINVAL if CMMA was not enabled
+ 0 otherwise
Clear the CMMA status for all guest pages, so any pages the guest marked
as unused are again used any may not be reclaimed by the host.
@@ -85,6 +86,90 @@ Returns: -EBUSY in case 1 or more vcpus are already activated (only in write
-ENOMEM if not enough memory is available to process the ioctl
0 in case of success
+2.3. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_FEAT (r/o)
+
+Allows user space to retrieve available cpu features. A feature is available if
+provided by the hardware and supported by kvm. In theory, cpu features could
+even be completely emulated by kvm.
+
+struct kvm_s390_vm_cpu_feat {
+ __u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering
+};
+
+Parameters: address of a buffer to load the feature list from.
+Returns: -EFAULT if the given address is not accessible from kernel space.
+ 0 in case of success.
+
+2.4. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_FEAT (r/w)
+
+Allows user space to retrieve or change enabled cpu features for all VCPUs of a
+VM. Features that are not available cannot be enabled.
+
+See 2.3. for a description of the parameter struct.
+
+Parameters: address of a buffer to store/load the feature list from.
+Returns: -EFAULT if the given address is not accessible from kernel space.
+ -EINVAL if a cpu feature that is not available is to be enabled.
+ -EBUSY if at least one VCPU has already been defined.
+ 0 in case of success.
+
+2.5. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_SUBFUNC (r/o)
+
+Allows user space to retrieve available cpu subfunctions without any filtering
+done by a set IBC. These subfunctions are indicated to the guest VCPU via
+query or "test bit" subfunctions and used e.g. by cpacf functions, plo and ptff.
+
+A subfunction block is only valid if KVM_S390_VM_CPU_MACHINE contains the
+STFL(E) bit introducing the affected instruction. If the affected instruction
+indicates subfunctions via a "query subfunction", the response block is
+contained in the returned struct. If the affected instruction
+indicates subfunctions via a "test bit" mechanism, the subfunction codes are
+contained in the returned struct in MSB 0 bit numbering.
+
+struct kvm_s390_vm_cpu_subfunc {
+ u8 plo[32]; # always valid (ESA/390 feature)
+ u8 ptff[16]; # valid with TOD-clock steering
+ u8 kmac[16]; # valid with Message-Security-Assist
+ u8 kmc[16]; # valid with Message-Security-Assist
+ u8 km[16]; # valid with Message-Security-Assist
+ u8 kimd[16]; # valid with Message-Security-Assist
+ u8 klmd[16]; # valid with Message-Security-Assist
+ u8 pckmo[16]; # valid with Message-Security-Assist-Extension 3
+ u8 kmctr[16]; # valid with Message-Security-Assist-Extension 4
+ u8 kmf[16]; # valid with Message-Security-Assist-Extension 4
+ u8 kmo[16]; # valid with Message-Security-Assist-Extension 4
+ u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
+ u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
+ u8 reserved[1824]; # reserved for future instructions
+};
+
+Parameters: address of a buffer to load the subfunction blocks from.
+Returns: -EFAULT if the given address is not accessible from kernel space.
+ 0 in case of success.
+
+2.6. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_SUBFUNC (r/w)
+
+Allows user space to retrieve or change cpu subfunctions to be indicated for
+all VCPUs of a VM. This attribute will only be available if kernel and
+hardware support are in place.
+
+The kernel uses the configured subfunction blocks for indication to
+the guest. A subfunction block will only be used if the associated STFL(E) bit
+has not been disabled by user space (so the instruction to be queried is
+actually available for the guest).
+
+As long as no data has been written, a read will fail. The IBC will be used
+to determine available subfunctions in this case, this will guarantee backward
+compatibility.
+
+See 2.5. for a description of the parameter struct.
+
+Parameters: address of a buffer to store/load the subfunction blocks from.
+Returns: -EFAULT if the given address is not accessible from kernel space.
+ -EINVAL when reading, if there was no write yet.
+ -EBUSY if at least one VCPU has already been defined.
+ 0 in case of success.
+
3. GROUP: KVM_S390_VM_TOD
Architectures: s390
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index 19f94a6b9bb0..f2491a8c68b4 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits():
old_spte = *spte;
/* 'if' condition is satisfied. */
- if (old_spte.Accssed == 1 &&
+ if (old_spte.Accessed == 1 &&
old_spte.W == 0)
spte = 0ull;
on fast page fault path:
@@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits():
old_spte = xchg(spte, 0ull)
- if (old_spte.Accssed == 1)
+ if (old_spte.Accessed == 1)
kvm_set_pfn_accessed(spte.pfn);
if (old_spte.Dirty == 1)
kvm_set_pfn_dirty(spte.pfn);
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index ade01274212d..e0b58c0e6b49 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -63,7 +63,7 @@ nodes. Each emulated node will manage a fraction of the underlying cells'
physical memory. NUMA emluation is useful for testing NUMA kernel and
application features on non-NUMA platforms, and as a sort of memory resource
management mechanism when used together with cpusets.
-[see Documentation/cgroups/cpusets.txt]
+[see Documentation/cgroup-v1/cpusets.txt]
For each node with memory, Linux constructs an independent memory management
subsystem, complete with its own free page lists, in-use page lists, usage
@@ -113,7 +113,7 @@ allocation behavior using Linux NUMA memory policy.
System administrators can restrict the CPUs and nodes' memories that a non-
privileged user can specify in the scheduling or NUMA commands and functions
-using control groups and CPUsets. [see Documentation/cgroups/cpusets.txt]
+using control groups and CPUsets. [see Documentation/cgroup-v1/cpusets.txt]
On architectures that do not hide memoryless nodes, Linux will include only
zones [nodes] with memory in the zonelists. This means that for a memoryless
diff --git a/Documentation/vm/numa_memory_policy.txt b/Documentation/vm/numa_memory_policy.txt
index badb0507608f..622b927816e7 100644
--- a/Documentation/vm/numa_memory_policy.txt
+++ b/Documentation/vm/numa_memory_policy.txt
@@ -9,7 +9,7 @@ document attempts to describe the concepts and APIs of the 2.6 memory policy
support.
Memory policies should not be confused with cpusets
-(Documentation/cgroups/cpusets.txt)
+(Documentation/cgroup-v1/cpusets.txt)
which is an administrative mechanism for restricting the nodes from which
memory may be allocated by a set of processes. Memory policies are a
programming interface that a NUMA-aware application can take advantage of. When
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 94bd9c11c4e0..0478ae2ad44a 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -38,7 +38,7 @@ locations.
Larger installations usually partition the system using cpusets into
sections of nodes. Paul Jackson has equipped cpusets with the ability to
move pages when a task is moved to another cpuset (See
-Documentation/cgroups/cpusets.txt).
+Documentation/cgroup-v1/cpusets.txt).
Cpusets allows the automation of process locality. If a task is moved to
a new cpuset then also all its pages are moved with it so that the
performance of the process does not sink dramatically. Also the pages
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
index 0026a8d33fc0..e14718572476 100644
--- a/Documentation/vm/unevictable-lru.txt
+++ b/Documentation/vm/unevictable-lru.txt
@@ -122,7 +122,7 @@ MEMORY CONTROL GROUP INTERACTION
--------------------------------
The unevictable LRU facility interacts with the memory control group [aka
-memory controller; see Documentation/cgroups/memory.txt] by extending the
+memory controller; see Documentation/cgroup-v1/memory.txt] by extending the
lru_list enum.
The memory controller data structure automatically gets a per-zone unevictable
diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt
index a40398cce9d1..7a9f635d0258 100644
--- a/Documentation/watchdog/hpwdt.txt
+++ b/Documentation/watchdog/hpwdt.txt
@@ -1,9 +1,9 @@
-Last reviewed: 04/04/2016
+Last reviewed: 05/20/2016
HPE iLO NMI Watchdog Driver
NMI sourcing for iLO based ProLiant Servers
Documentation and Driver by
- Thomas Mingarelli <thomas.mingarelli@hpe.com>
+ Thomas Mingarelli
The HPE iLO NMI Watchdog driver is a kernel module that provides basic
watchdog functionality and the added benefit of NMI sourcing. Both the
@@ -95,4 +95,3 @@ Last reviewed: 04/04/2016
-- Tom Mingarelli
- (thomas.mingarelli@hpe.com)
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index fcdde8fc98be..6983d05097e2 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -2,6 +2,7 @@
* Watchdog Driver Test Program
*/
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -13,6 +14,7 @@
#include <linux/watchdog.h>
int fd;
+const char v = 'V';
/*
* This function simply sends an IOCTL to the driver, which in turn ticks
@@ -23,6 +25,7 @@ static void keep_alive(void)
{
int dummy;
+ printf(".");
ioctl(fd, WDIOC_KEEPALIVE, &dummy);
}
@@ -33,8 +36,13 @@ static void keep_alive(void)
static void term(int sig)
{
+ int ret = write(fd, &v, 1);
+
close(fd);
- fprintf(stderr, "Stopping watchdog ticks...\n");
+ if (ret < 0)
+ printf("\nStopping watchdog ticks failed (%d)...\n", errno);
+ else
+ printf("\nStopping watchdog ticks...\n");
exit(0);
}
@@ -42,12 +50,14 @@ int main(int argc, char *argv[])
{
int flags;
unsigned int ping_rate = 1;
+ int ret;
+
+ setbuf(stdout, NULL);
fd = open("/dev/watchdog", O_WRONLY);
if (fd == -1) {
- fprintf(stderr, "Watchdog device not enabled.\n");
- fflush(stderr);
+ printf("Watchdog device not enabled.\n");
exit(-1);
}
@@ -55,36 +65,30 @@ int main(int argc, char *argv[])
if (!strncasecmp(argv[1], "-d", 2)) {
flags = WDIOS_DISABLECARD;
ioctl(fd, WDIOC_SETOPTIONS, &flags);
- fprintf(stderr, "Watchdog card disabled.\n");
- fflush(stderr);
+ printf("Watchdog card disabled.\n");
goto end;
} else if (!strncasecmp(argv[1], "-e", 2)) {
flags = WDIOS_ENABLECARD;
ioctl(fd, WDIOC_SETOPTIONS, &flags);
- fprintf(stderr, "Watchdog card enabled.\n");
- fflush(stderr);
+ printf("Watchdog card enabled.\n");
goto end;
} else if (!strncasecmp(argv[1], "-t", 2) && argv[2]) {
flags = atoi(argv[2]);
ioctl(fd, WDIOC_SETTIMEOUT, &flags);
- fprintf(stderr, "Watchdog timeout set to %u seconds.\n", flags);
- fflush(stderr);
+ printf("Watchdog timeout set to %u seconds.\n", flags);
goto end;
} else if (!strncasecmp(argv[1], "-p", 2) && argv[2]) {
ping_rate = strtoul(argv[2], NULL, 0);
- fprintf(stderr, "Watchdog ping rate set to %u seconds.\n", ping_rate);
- fflush(stderr);
+ printf("Watchdog ping rate set to %u seconds.\n", ping_rate);
} else {
- fprintf(stderr, "-d to disable, -e to enable, -t <n> to set " \
+ printf("-d to disable, -e to enable, -t <n> to set " \
"the timeout,\n-p <n> to set the ping rate, and \n");
- fprintf(stderr, "run by itself to tick the card.\n");
- fflush(stderr);
+ printf("run by itself to tick the card.\n");
goto end;
}
}
- fprintf(stderr, "Watchdog Ticking Away!\n");
- fflush(stderr);
+ printf("Watchdog Ticking Away!\n");
signal(SIGINT, term);
@@ -93,6 +97,9 @@ int main(int argc, char *argv[])
sleep(ping_rate);
}
end:
+ ret = write(fd, &v, 1);
+ if (ret < 0)
+ printf("Stopping watchdog ticks failed (%d)...\n", errno);
close(fd);
return 0;
}
diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt
index 917eeeabfa5e..7f31125c123e 100644
--- a/Documentation/watchdog/watchdog-kernel-api.txt
+++ b/Documentation/watchdog/watchdog-kernel-api.txt
@@ -82,8 +82,9 @@ It contains following fields:
* max_timeout: the watchdog timer's maximum timeout value (in seconds),
as seen from userspace. If set, the maximum configurable value for
'timeout'. Not used if max_hw_heartbeat_ms is non-zero.
-* min_hw_heartbeat_ms: Minimum time between heartbeats sent to the chip,
- in milli-seconds.
+* min_hw_heartbeat_ms: Hardware limit for minimum time between heartbeats,
+ in milli-seconds. This value is normally 0; it should only be provided
+ if the hardware can not tolerate lower intervals between heartbeats.
* max_hw_heartbeat_ms: Maximum hardware heartbeat, in milli-seconds.
If set, the infrastructure will send heartbeats to the watchdog driver
if 'timeout' is larger than max_hw_heartbeat_ms, unless WDOG_ACTIVE
@@ -166,6 +167,10 @@ they are supported. These optional routines/operations are:
info structure).
* status: this routine checks the status of the watchdog timer device. The
status of the device is reported with watchdog WDIOF_* status flags/bits.
+ WDIOF_MAGICCLOSE and WDIOF_KEEPALIVEPING are reported by the watchdog core;
+ it is not necessary to report those bits from the driver. Also, if no status
+ function is provided by the driver, the watchdog core reports the status bits
+ provided in the bootstatus variable of struct watchdog_device.
* set_timeout: this routine checks and changes the timeout of the watchdog
timer device. It returns 0 on success, -EINVAL for "parameter out of range"
and -EIO for "could not write value to the watchdog". On success this
diff --git a/Documentation/x86/x86_64/fake-numa-for-cpusets b/Documentation/x86/x86_64/fake-numa-for-cpusets
index 0f11d9becb0b..4b09f18831f8 100644
--- a/Documentation/x86/x86_64/fake-numa-for-cpusets
+++ b/Documentation/x86/x86_64/fake-numa-for-cpusets
@@ -8,7 +8,7 @@ assign them to cpusets and their attached tasks. This is a way of limiting the
amount of system memory that are available to a certain class of tasks.
For more information on the features of cpusets, see
-Documentation/cgroups/cpusets.txt.
+Documentation/cgroup-v1/cpusets.txt.
There are a number of different configurations you can use for your needs. For
more information on the numa=fake command line option and its various ways of
configuring fake nodes, see Documentation/x86/x86_64/boot-options.txt.
@@ -33,7 +33,7 @@ A machine may be split as follows with "numa=fake=4*512," as reported by dmesg:
On node 3 totalpages: 131072
Now following the instructions for mounting the cpusets filesystem from
-Documentation/cgroups/cpusets.txt, you can assign fake nodes (i.e. contiguous memory
+Documentation/cgroup-v1/cpusets.txt, you can assign fake nodes (i.e. contiguous memory
address spaces) to individual cpusets:
[root@xroads /]# mkdir exampleset
diff --git a/MAINTAINERS b/MAINTAINERS
index 256f56bbb2ad..20bb1d00098c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -612,6 +612,13 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-altera.c
+ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
+M: Thor Thayer <tthayer@opensource.altera.com>
+S: Maintained
+F: drivers/gpio/gpio-altera-a10sr.c
+F: drivers/mfd/altera-a10sr.c
+F: include/linux/mfd/altera-a10sr.h
+
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Vince Bridgers <vbridger@opensource.altera.com>
L: netdev@vger.kernel.org
@@ -771,6 +778,11 @@ W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/dma/dma-axi-dmac.c
+ANDROID CONFIG FRAGMENTS
+M: Rob Herring <robh@kernel.org>
+S: Supported
+F: kernel/configs/android*
+
ANDROID DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
@@ -872,9 +884,17 @@ F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported
-F: drivers/gpu/drm/arm/
+F: drivers/gpu/drm/arm/hdlcd_*
F: Documentation/devicetree/bindings/display/arm,hdlcd.txt
+ARM MALI-DP DRM DRIVER
+M: Liviu Dudau <liviu.dudau@arm.com>
+M: Brian Starkey <brian.starkey@arm.com>
+M: Mali DP Maintainers <malidp@foss.arm.com>
+S: Supported
+F: drivers/gpu/drm/arm/
+F: Documentation/devicetree/bindings/display/arm,malidp.txt
+
ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro@f2s.com>
S: Maintained
@@ -1529,6 +1549,7 @@ M: David Brown <david.brown@linaro.org>
L: linux-arm-msm@vger.kernel.org
L: linux-soc@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/soc/qcom/
F: arch/arm/boot/dts/qcom-*.dts
F: arch/arm/boot/dts/qcom-*.dtsi
F: arch/arm/mach-qcom/
@@ -1606,12 +1627,13 @@ F: arch/arm/mach-s3c24*/
F: arch/arm/mach-s3c64xx/
F: arch/arm/mach-s5p*/
F: arch/arm/mach-exynos*/
-F: drivers/*/*s3c2410*
-F: drivers/*/*/*s3c2410*
+F: drivers/*/*s3c24*
+F: drivers/*/*/*s3c24*
+F: drivers/*/*s3c64xx*
+F: drivers/*/*s5pv210*
F: drivers/memory/samsung/*
F: drivers/soc/samsung/*
F: drivers/spi/spi-s3c*
-F: sound/soc/samsung/*
F: Documentation/arm/Samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1626,7 +1648,8 @@ F: arch/arm/mach-s5pv210/
ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Andrzej Hajda <a.hajda@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
@@ -1634,8 +1657,9 @@ F: drivers/media/platform/s5p-g2d/
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
M: Jeongtae Park <jtp.park@samsung.com>
+M: Andrzej Hajda <a.hajda@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
@@ -1746,8 +1770,7 @@ ARM/TANGO ARCHITECTURE
M: Marc Gonzalez <marc_gonzalez@sigmadesigns.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
-F: arch/arm/mach-tango/
-F: arch/arm/boot/dts/tango*
+N: tango
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -1834,7 +1857,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.linaro.org/people/ulfh/clk.git
S: Maintained
F: drivers/clk/ux500/
-F: include/linux/platform_data/clk-ux500.h
ARM/VERSATILE EXPRESS PLATFORM
M: Liviu Dudau <liviu.dudau@arm.com>
@@ -2331,7 +2353,10 @@ S: Supported
F: drivers/media/platform/sti/bdisp
BEFS FILE SYSTEM
-S: Orphan
+M: Luis de Bethencourt <luisbg@osg.samsung.com>
+M: Salah Triki <salah.triki@gmail.com>
+S: Maintained
+T: git git://github.com/luisbg/linux-befs.git
F: Documentation/filesystems/befs.txt
F: fs/befs/
@@ -2501,17 +2526,14 @@ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
-L: bcm-kernel-feedback-list@broadcom.com
+M: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/mach-bcm
S: Maintained
+N: bcm281*
+N: bcm113*
+N: bcm216*
+N: kona
F: arch/arm/mach-bcm/
-F: arch/arm/boot/dts/bcm113*
-F: arch/arm/boot/dts/bcm216*
-F: arch/arm/boot/dts/bcm281*
-F: arch/arm64/boot/dts/broadcom/
-F: arch/arm/configs/bcm_defconfig
-F: drivers/mmc/host/sdhci-bcm-kona.c
-F: drivers/clocksource/bcm_kona_timer.c
BROADCOM BCM2835 ARM ARCHITECTURE
M: Stephen Warren <swarren@wwwdotorg.org>
@@ -2534,20 +2556,21 @@ F: arch/mips/include/asm/mach-bcm47xx/*
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
+M: Rafał Miłecki <zajec5@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: arch/arm/mach-bcm/bcm_5301x.c
-F: arch/arm/boot/dts/bcm5301x.dtsi
+F: arch/arm/boot/dts/bcm5301x*.dtsi
F: arch/arm/boot/dts/bcm470*
BROADCOM BCM63XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/stblinux.git
S: Maintained
-F: arch/arm/mach-bcm/bcm63xx.c
-F: arch/arm/include/debug/bcm63xx.S
+N: bcm63xx
BROADCOM BCM63XX/BCM33XX UDC DRIVER
M: Kevin Cernekee <cernekee@gmail.com>
@@ -2559,8 +2582,8 @@ BROADCOM BCM7XXX ARM ARCHITECTURE
M: Brian Norris <computersforpeace@gmail.com>
M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
+M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/stblinux.git
S: Maintained
F: arch/arm/mach-bcm/*brcmstb*
@@ -2617,13 +2640,13 @@ BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
M: Jon Mason <jonmason@broadcom.com>
+M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/cygnus-linux.git
S: Maintained
N: iproc
N: cygnus
-N: nsp
+N: bcm[-_]nsp
N: bcm9113*
N: bcm9583*
N: bcm9585*
@@ -2634,6 +2657,9 @@ N: bcm583*
N: bcm585*
N: bcm586*
N: bcm88312
+F: arch/arm64/boot/dts/broadcom/ns2*
+F: drivers/clk/bcm/clk-ns*
+F: drivers/pinctrl/bcm/pinctrl-ns*
BROADCOM BRCMSTB GPIO DRIVER
M: Gregory Fong <gregory.0xf0@gmail.com>
@@ -2678,8 +2704,8 @@ F: drivers/net/ethernet/broadcom/bcmsysport.*
BROADCOM VULCAN ARM64 SOC
M: Jayachandran C. <jchandra@broadcom.com>
+M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: arch/arm64/boot/dts/broadcom/vulcan*
@@ -3193,7 +3219,7 @@ M: Johannes Weiner <hannes@cmpxchg.org>
L: cgroups@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
-F: Documentation/cgroups/
+F: Documentation/cgroup*
F: include/linux/cgroup*
F: kernel/cgroup*
@@ -3204,7 +3230,7 @@ W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
-F: Documentation/cgroups/cpusets.txt
+F: Documentation/cgroup-v1/cpusets.txt
F: include/linux/cpuset.h
F: kernel/cpuset.c
@@ -3468,6 +3494,7 @@ F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
M: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
M: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
@@ -3712,6 +3739,8 @@ M: Support Opensource <support.opensource@diasemi.com>
W: http://www.dialog-semiconductor.com/products
S: Supported
F: Documentation/hwmon/da90??
+F: Documentation/devicetree/bindings/mfd/da90*.txt
+F: Documentation/devicetree/bindings/regulator/da92*.txt
F: Documentation/devicetree/bindings/sound/da[79]*.txt
F: drivers/gpio/gpio-da90??.c
F: drivers/hwmon/da90??-hwmon.c
@@ -3732,8 +3761,10 @@ F: drivers/watchdog/da90??_wdt.c
F: include/linux/mfd/da903x.h
F: include/linux/mfd/da9052/
F: include/linux/mfd/da9055/
+F: include/linux/mfd/da9062/
F: include/linux/mfd/da9063/
F: include/linux/mfd/da9150/
+F: include/linux/regulator/da9211.h
F: include/sound/da[79]*.h
F: sound/soc/codecs/da[79]*.[ch]
@@ -3809,6 +3840,17 @@ F: include/linux/*fence.h
F: Documentation/dma-buf-sharing.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+SYNC FILE FRAMEWORK
+M: Sumit Semwal <sumit.semwal@linaro.org>
+R: Gustavo Padovan <gustavo@padovan.org>
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+F: drivers/dma-buf/sync_file.c
+F: include/linux/sync_file.h
+F: Documentation/sync_file.txt
+T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
L: dmaengine@vger.kernel.org
@@ -3898,7 +3940,10 @@ T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: drivers/gpu/vga/
-F: Documentation/DocBook/gpu.*
+F: Documentation/devicetree/bindings/display/
+F: Documentation/devicetree/bindings/gpu/
+F: Documentation/devicetree/bindings/video/
+F: Documentation/gpu/
F: include/drm/
F: include/uapi/drm/
@@ -3950,6 +3995,7 @@ S: Supported
F: drivers/gpu/drm/i915/
F: include/drm/i915*
F: include/uapi/drm/i915_drm.h
+F: Documentation/gpu/i915.rst
DRM DRIVERS FOR ATMEL HLCDC
M: Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -4145,6 +4191,21 @@ F: drivers/gpu/drm/vc4/
F: include/uapi/drm/vc4_drm.h
F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
+DRM DRIVERS FOR TI OMAP
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/omapdrm/
+F: Documentation/devicetree/bindings/display/ti/
+
+DRM DRIVERS FOR TI LCDC
+M: Jyri Sarha <jsarha@ti.com>
+R: Tomi Valkeinen <tomi.valkeinen@ti.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/tilcdc/
+F: Documentation/devicetree/bindings/display/tilcdc/
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -5044,6 +5105,15 @@ L: linux-scsi@vger.kernel.org
S: Odd Fixes (e.g., new signatures)
F: drivers/scsi/fdomain.*
+GCC PLUGINS
+M: Kees Cook <keescook@chromium.org>
+R: Emese Revfy <re.emese@gmail.com>
+L: kernel-hardening@lists.openwall.com
+S: Maintained
+F: scripts/gcc-plugins/
+F: scripts/gcc-plugin.sh
+F: Documentation/gcc-plugins.txt
+
GCOV BASED KERNEL PROFILING
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
S: Maintained
@@ -5354,6 +5424,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/dvb-frontends/hd29l2*
+HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
+M: Brian Boylston <brian.boylston@hpe.com>
+S: Supported
+F: Documentation/watchdog/hpwdt.txt
+F: drivers/watchdog/hpwdt.c
+
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
M: Don Brace <don.brace@microsemi.com>
L: iss_storagedev@hp.com
@@ -5756,7 +5832,15 @@ M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvscsi*
-F: drivers/scsi/ibmvscsi/viosrp.h
+F: include/scsi/viosrp.h
+
+IBM Power Virtual SCSI Device Target Driver
+M: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+M: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+L: linux-scsi@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+F: drivers/scsi/ibmvscsi_tgt/
IBM Power Virtual FC Device Drivers
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
@@ -6218,6 +6302,7 @@ M: Joerg Roedel <joro@8bytes.org>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
S: Maintained
+F: Documentation/devicetree/bindings/iommu/
F: drivers/iommu/
IP MASQUERADING
@@ -6948,6 +7033,7 @@ F: drivers/crypto/nx/
F: drivers/crypto/vmx/
F: drivers/net/ethernet/ibm/ibmveth.*
F: drivers/net/ethernet/ibm/ibmvnic.*
+F: drivers/pci/hotplug/pnv_php.c
F: drivers/pci/hotplug/rpa*
F: drivers/scsi/ibmvscsi/
N: opal
@@ -7354,6 +7440,13 @@ F: Documentation/devicetree/bindings/i2c/max6697.txt
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
+MAX9860 MONO AUDIO VOICE CODEC DRIVER
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/max9860.txt
+F: sound/soc/codecs/max9860.*
+
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
L: linux-pm@vger.kernel.org
@@ -7555,6 +7648,15 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
+SOFT-ROCE DRIVER (rxe)
+M: Moni Shoua <monis@mellanox.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/hw/rxe/
+F: include/uapi/rdma/rdma_user_rxe.h
+
MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
@@ -7850,6 +7952,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-mmc@vger.kernel.org
T: git git://git.linaro.org/people/ulf.hansson/mmc.git
S: Maintained
+F: Documentation/devicetree/bindings/mmc/
F: drivers/mmc/
F: include/linux/mmc/
F: include/uapi/linux/mmc/
@@ -8189,8 +8292,9 @@ T: git git://github.com/konis/nilfs2.git
S: Supported
F: Documentation/filesystems/nilfs2.txt
F: fs/nilfs2/
-F: include/linux/nilfs2_fs.h
F: include/trace/events/nilfs2.h
+F: include/uapi/linux/nilfs2_api.h
+F: include/uapi/linux/nilfs2_ondisk.h
NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER
M: YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
@@ -8237,6 +8341,7 @@ F: drivers/ntb/
F: drivers/net/ntb_netdev.c
F: include/linux/ntb.h
F: include/linux/ntb_transport.h
+F: tools/testing/selftests/ntb/
NTB INTEL DRIVER
M: Jon Mason <jdmason@kudzu.us>
@@ -8808,6 +8913,7 @@ L: linux-pci@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
S: Supported
+F: Documentation/devicetree/bindings/pci/
F: Documentation/PCI/
F: drivers/pci/
F: include/linux/pci*
@@ -8871,6 +8977,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/pci/host/*mvebu*
+PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/pci-aardvark.c
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
@@ -8953,6 +9066,15 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
F: drivers/pci/host/pci-xgene-msi.c
+PCIE DRIVER FOR AXIS ARTPEC
+M: Niklas Cassel <niklas.cassel@axis.com>
+M: Jesper Nilsson <jesper.nilsson@axis.com>
+L: linux-arm-kernel@axis.com
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/axis,artpec*
+F: drivers/pci/host/*artpec*
+
PCIE DRIVER FOR HISILICON
M: Zhou Wang <wangzhou1@hisilicon.com>
M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
@@ -9130,6 +9252,16 @@ W: http://www.st.com/spear
S: Maintained
F: drivers/pinctrl/spear/
+PISTACHIO SOC SUPPORT
+M: James Hartley <james.hartley@imgtec.com>
+M: Ionela Voinescu <ionela.voinescu@imgtec.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: arch/mips/pistachio/
+F: arch/mips/include/asm/mach-pistachio/
+F: arch/mips/boot/dts/pistachio/
+F: arch/mips/configs/pistachio*_defconfig
+
PKTCDVD DRIVER
M: Jiri Kosina <jikos@kernel.org>
S: Maintained
@@ -9214,6 +9346,12 @@ F: drivers/firmware/psci.c
F: include/linux/psci.h
F: include/uapi/linux/psci.h
+POWERNV OPERATOR PANEL LCD DISPLAY DRIVER
+M: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: drivers/char/powernv-op-panel.c
+
PNP SUPPORT
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
S: Maintained
@@ -9363,7 +9501,8 @@ S: Odd Fixes
F: drivers/media/usb/pwc/*
PWM FAN DRIVER
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Lukasz Majewski <l.majewski@samsung.com>
L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@ -9701,10 +9840,14 @@ L: rtc-linux@googlegroups.com
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
S: Maintained
+F: Documentation/devicetree/bindings/rtc/
F: Documentation/rtc.txt
F: drivers/rtc/
F: include/linux/rtc.h
F: include/uapi/linux/rtc.h
+F: include/linux/rtc/
+F: include/linux/platform_data/rtc-*
+F: tools/testing/selftests/timers/rtctest.c
REALTEK AUDIO CODECS
M: Bard Liao <bardliao@realtek.com>
@@ -10020,7 +10163,9 @@ S: Maintained
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG AUDIO (ASoC) DRIVERS
+M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
M: Sangbeom Kim <sbkim73@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung/
@@ -10109,7 +10254,8 @@ T: git https://github.com/lmajewski/linux-samsung-thermal.git
F: drivers/thermal/samsung/
SAMSUNG USB2 PHY DRIVER
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/phy/samsung-phy.txt
@@ -10334,6 +10480,13 @@ F: tools/testing/selftests/seccomp/*
K: \bsecure_computing
K: \bTIF_SECCOMP\b
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
+M: Al Cooper <alcooperx@gmail.com>
+L: linux-mmc@vger.kernel.org
+L: bcm-kernel-feedback-list@broadcom.com
+S: Maintained
+F: drivers/mmc/host/sdhci-brcmstb*
+
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
M: Ben Dooks <ben-linux@fluff.org>
M: Jaehoon Chung <jh80.chung@samsung.com>
@@ -10849,6 +11002,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
S: Supported
+F: Documentation/devicetree/bindings/sound/
F: Documentation/sound/alsa/soc/
F: sound/soc/
F: include/sound/soc*
@@ -12265,6 +12419,19 @@ S: Maintained
F: drivers/media/v4l2-core/videobuf2-*
F: include/media/videobuf2-*
+VIRTIO AND VHOST VSOCK DRIVER
+M: Stefan Hajnoczi <stefanha@redhat.com>
+L: kvm@vger.kernel.org
+L: virtualization@lists.linux-foundation.org
+L: netdev@vger.kernel.org
+S: Maintained
+F: include/linux/virtio_vsock.h
+F: include/uapi/linux/virtio_vsock.h
+F: net/vmw_vsock/virtio_transport_common.c
+F: net/vmw_vsock/virtio_transport.c
+F: drivers/vhost/vsock.c
+F: drivers/vhost/vsock.h
+
VIRTUAL SERIO DEVICE DRIVER
M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
diff --git a/Makefile b/Makefile
index 393b6159ae92..8c504f324154 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
-PATCHLEVEL = 7
+PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@@ -371,26 +371,27 @@ CFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
-CFLAGS_KCOV = -fsanitize-coverage=trace-pc
+CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
# Use USERINCLUDE when you must reference the UAPI directories only.
USERINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include/uapi \
- -Iarch/$(hdr-arch)/include/generated/uapi \
+ -I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
-I$(srctree)/include/uapi \
- -Iinclude/generated/uapi \
+ -I$(objtree)/include/generated/uapi \
-include $(srctree)/include/linux/kconfig.h
# Use LINUXINCLUDE when you must reference the include/ directory.
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
- -Iarch/$(hdr-arch)/include/generated/uapi \
- -Iarch/$(hdr-arch)/include/generated \
+ -I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
+ -I$(objtree)/arch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
- -Iinclude \
- $(USERINCLUDE)
+ -I$(objtree)/include
+
+LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
KBUILD_CPPFLAGS := -D__KERNEL__
@@ -554,7 +555,7 @@ ifeq ($(KBUILD_EXTMOD),)
# in parallel
PHONY += scripts
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic
+ asm-generic gcc-plugins
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
@@ -620,7 +621,6 @@ include arch/$(SRCARCH)/Makefile
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
-KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
@@ -635,6 +635,8 @@ endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+include scripts/Makefile.gcc-plugins
+
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
@@ -666,21 +668,11 @@ endif
endif
# Find arch-specific stack protector compiler sanity-checking script.
ifdef CONFIG_CC_STACKPROTECTOR
- stackp-path := $(srctree)/scripts/gcc-$(ARCH)_$(BITS)-has-stack-protector.sh
- ifneq ($(wildcard $(stackp-path)),)
- stackp-check := $(stackp-path)
- endif
+ stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh
+ stackp-check := $(wildcard $(stackp-path))
endif
KBUILD_CFLAGS += $(stackp-flag)
-ifdef CONFIG_KCOV
- ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
- $(warning Cannot use CONFIG_KCOV: \
- -fsanitize-coverage=trace-pc is not supported by compiler)
- CFLAGS_KCOV =
- endif
-endif
-
ifeq ($(cc-name),clang)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
@@ -1019,7 +1011,7 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
-prepare0: archprepare
+prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=.
# All the preparing..
@@ -1433,7 +1425,7 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
-DOC_TARGETS := xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs cleanmediadocs
+DOC_TARGETS := xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs
PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
@@ -1531,6 +1523,7 @@ clean: $(clean-dirs)
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
-o -name modules.builtin -o -name '.tmp_*.o.*' \
+ -o -name '*.c.[012]*.*' \
-o -name '*.gcno' \) -type f -print | xargs rm -f
# Generate tags for editors
@@ -1641,7 +1634,7 @@ endif
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
-Documentation/: headers_install
+Documentation/ samples/: headers_install
%/: prepare scripts FORCE
$(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
diff --git a/arch/Kconfig b/arch/Kconfig
index 15996290fed4..e9c9334507dd 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -357,6 +357,43 @@ config SECCOMP_FILTER
See Documentation/prctl/seccomp_filter.txt for details.
+config HAVE_GCC_PLUGINS
+ bool
+ help
+ An arch should select this symbol if it supports building with
+ GCC plugins.
+
+menuconfig GCC_PLUGINS
+ bool "GCC plugins"
+ depends on HAVE_GCC_PLUGINS
+ depends on !COMPILE_TEST
+ help
+ GCC plugins are loadable modules that provide extra features to the
+ compiler. They are useful for runtime instrumentation and static analysis.
+
+ See Documentation/gcc-plugins.txt for details.
+
+config GCC_PLUGIN_CYC_COMPLEXITY
+ bool "Compute the cyclomatic complexity of a function"
+ depends on GCC_PLUGINS
+ help
+ The complexity M of a function's control flow graph is defined as:
+ M = E - N + 2P
+ where
+
+ E = the number of edges
+ N = the number of nodes
+ P = the number of connected components (exit nodes).
+
+config GCC_PLUGIN_SANCOV
+ bool
+ depends on GCC_PLUGINS
+ help
+ This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
+ basic blocks. It supports all gcc versions with plugin support (from
+ gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
+ by Dmitry Vyukov <dvyukov@google.com>.
+
config HAVE_CC_STACKPROTECTOR
bool
help
@@ -424,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
endchoice
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
config HAVE_CONTEXT_TRACKING
bool
help
diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile
index 8399bd0e68e8..0cbe4c59d3ce 100644
--- a/arch/alpha/boot/Makefile
+++ b/arch/alpha/boot/Makefile
@@ -15,7 +15,7 @@ targets := vmlinux.gz vmlinux \
OBJSTRIP := $(obj)/tools/objstrip
HOSTCFLAGS := -Wall -I$(objtree)/usr/include
-BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
+BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
# SRM bootable image. Copy to offset 512 of a partition.
$(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 3c3451f58ff4..c63b6ac19ee5 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -1,8 +1,6 @@
#ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H
-#include <linux/dma-attrs.h>
-
extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h
deleted file mode 100644
index f71c3b0ed360..000000000000
--- a/arch/alpha/include/asm/rtc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/rtc.h>
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 32e920a83ae5..e9e90bfa2b50 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -86,33 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */
#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */
#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif
#define SET_UNALIGN_CTL(task,value) ({ \
__u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 53dd2f1a53aa..d5f0580746a5 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -24,7 +24,6 @@
#include <asm/gct.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#include <asm/rtc.h>
#include <asm/vga.h>
#include "proto.h"
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index f54bdf658cd0..d3398f6ab74c 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -137,7 +137,7 @@
#define __initmv __initdata
#define ALIAS_MV(x)
#else
-#define __initmv __initdata_refok
+#define __initmv __refdata
/* GCC actually has a syntax for defining aliases, but is under some
delusion that you shouldn't be able to declare it extern somewhere
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 8e735b5e56bd..bb152e21e5ae 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -109,7 +109,7 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 8969bf2dfe3a..451fc9cdd323 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -349,7 +349,7 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
@@ -369,7 +369,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags;
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
@@ -433,7 +433,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
@@ -478,7 +478,7 @@ try_again:
static void alpha_pci_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
@@ -651,7 +651,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct scatterlist *start, *end, *out;
@@ -729,7 +729,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
unsigned long flags;
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
index f535a3fd0f60..ceed68c7500b 100644
--- a/arch/alpha/kernel/rtc.c
+++ b/arch/alpha/kernel/rtc.c
@@ -15,8 +15,6 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <asm/rtc.h>
-
#include "proto.h"
@@ -81,7 +79,7 @@ init_rtc_epoch(void)
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- __get_rtc_time(tm);
+ mc146818_get_time(tm);
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create
@@ -112,7 +110,7 @@ alpha_rtc_set_time(struct device *dev, struct rtc_time *tm)
tm = &xtm;
}
- return __set_rtc_time(tm);
+ return mc146818_set_time(tm);
}
static int
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index 763d66c883da..e659a340ca8a 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -19,7 +19,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
};
aliases {
@@ -57,9 +57,17 @@
no-loopback-test = <1>;
};
- pgu0: pgu@f9000000 {
- compatible = "snps,arcpgufb";
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
};
ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts
index 4eb97c584b18..16ce5d65cfde 100644
--- a/arch/arc/boot/dts/nsimosci_hs.dts
+++ b/arch/arc/boot/dts/nsimosci_hs.dts
@@ -19,7 +19,7 @@
/* this is for console on PGU */
/* bootargs = "console=tty0 consoleblank=0"; */
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
};
aliases {
@@ -57,9 +57,17 @@
no-loopback-test = <1>;
};
- pgu0: pgu@f9000000 {
- compatible = "snps,arcpgufb";
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
};
ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts
index 853f897eb2a3..ce8dfbc30c4d 100644
--- a/arch/arc/boot/dts/nsimosci_hs_idu.dts
+++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts
@@ -17,7 +17,7 @@
chosen {
/* this is for console on serial */
- bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug";
+ bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
};
aliases {
@@ -76,9 +76,17 @@
no-loopback-test = <1>;
};
- pgu0: pgu@f9000000 {
- compatible = "snps,arcpgufb";
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
+
+ pgu@f9000000 {
+ compatible = "snps,arcpgu";
reg = <0xf9000000 0x400>;
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
};
ps2: ps2@f9001000 {
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 45cd665fca23..99498a4b4216 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -23,6 +23,11 @@
#clock-cells = <0>;
};
+ pguclk: pguclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <25175000>;
+ };
};
ethernet@0x18000 {
@@ -75,11 +80,11 @@
};
/* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */
- pgu@0x17000 {
- compatible = "snps,arcpgufb";
+ pgu@17000 {
+ compatible = "snps,arcpgu";
reg = <0x17000 0x400>;
- clock-frequency = <51000000>; /* PGU'clock is initated in init function */
- /* interrupts = <5>; PGU interrupts not used, this vector is used for ps2 below */
+ clocks = <&pguclk>;
+ clock-names = "pxlclk";
};
/* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */
diff --git a/arch/arc/boot/dts/vdk_hs38_smp.dts b/arch/arc/boot/dts/vdk_hs38_smp.dts
index 031a5bc79b3e..2ba60c399d99 100644
--- a/arch/arc/boot/dts/vdk_hs38_smp.dts
+++ b/arch/arc/boot/dts/vdk_hs38_smp.dts
@@ -16,6 +16,6 @@
compatible = "snps,axs103";
chosen {
- bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+ bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=640x480-24";
};
};
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 42bafa552498..98cf20933bbb 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -58,7 +58,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 4bb60c1cd4a2..ddf8b96d494e 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -57,7 +57,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 7e88f4c720f8..ceb90745326e 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -70,7 +70,8 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 52ec315dc5c9..969b206d6c67 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -63,12 +63,9 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index ab74b5d9186c..20afc65e22dc 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -22,7 +22,7 @@
static void *arc_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
@@ -46,7 +46,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
- dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ (attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
/*
@@ -90,13 +90,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
}
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
- is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+ is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
if (PageHighMem(page) || !is_non_coh)
@@ -130,7 +130,7 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
@@ -138,7 +138,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
}
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 8be930394750..399e2f223d25 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -220,7 +220,7 @@ void __init mem_init(void)
/*
* free_initmem: Free all the __init memory.
*/
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
free_initmem_default(-1);
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c445fb9c189..a9c4e48bb7ec 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -54,6 +55,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+ select HAVE_GCC_PLUGINS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_IDE if PCI || ISA || PCMCIA
@@ -327,7 +329,6 @@ choice
config ARCH_MULTIPLATFORM
bool "Allow multiple platforms to be selected"
depends on MMU
- select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_HAS_SG_CHAIN
select ARM_PATCH_PHYS_VIRT
select AUTO_ZRELADDR
@@ -342,7 +343,6 @@ config ARCH_MULTIPLATFORM
config ARM_SINGLE_ARMV7M
bool "ARMv7-M based platforms (Cortex-M0/M3/M4)"
depends on !MMU
- select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_NVIC
select AUTO_ZRELADDR
select CLKSRC_OF
@@ -353,26 +353,12 @@ config ARM_SINGLE_ARMV7M
select SPARSE_IRQ
select USE_OF
-
-config ARCH_CLPS711X
- bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
- select ARCH_REQUIRE_GPIOLIB
- select AUTO_ZRELADDR
- select COMMON_CLK
- select CPU_ARM720T
- select GENERIC_CLOCKEVENTS
- select CLPS711X_TIMER
- select MFD_SYSCON
- select SOC_BUS
- help
- Support for Cirrus Logic 711x/721x/731x based boards.
-
config ARCH_GEMINI
bool "Cortina Systems Gemini"
- select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_FA526
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
help
Support for the Cortina Systems Gemini family SoCs
@@ -393,7 +379,6 @@ config ARCH_EBSA110
config ARCH_EP93XX
bool "EP93xx-based"
select ARCH_HAS_HOLES_MEMORYMODEL
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_PATCH_PHYS_VIRT
select ARM_VIC
@@ -402,6 +387,7 @@ config ARCH_EP93XX
select CLKSRC_MMIO
select CPU_ARM920T
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -442,9 +428,9 @@ config ARCH_IOP13XX
config ARCH_IOP32X
bool "IOP32x-based"
depends on MMU
- select ARCH_REQUIRE_GPIOLIB
select CPU_XSCALE
select GPIO_IOP
+ select GPIOLIB
select NEED_RET_TO_USER
select PCI
select PLAT_IOP
@@ -455,9 +441,9 @@ config ARCH_IOP32X
config ARCH_IOP33X
bool "IOP33x-based"
depends on MMU
- select ARCH_REQUIRE_GPIOLIB
select CPU_XSCALE
select GPIO_IOP
+ select GPIOLIB
select NEED_RET_TO_USER
select PCI
select PLAT_IOP
@@ -468,12 +454,12 @@ config ARCH_IXP4XX
bool "IXP4xx-based"
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
- select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_BIG_ENDIAN
select CLKSRC_MMIO
select CPU_XSCALE
select DMABOUNCE if PCI
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select MIGHT_HAVE_PCI
select NEED_MACH_IO_H
select USB_EHCI_BIG_ENDIAN_DESC
@@ -483,9 +469,9 @@ config ARCH_IXP4XX
config ARCH_DOVE
bool "Marvell Dove"
- select ARCH_REQUIRE_GPIOLIB
select CPU_PJ4
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select MIGHT_HAVE_PCI
select MULTI_IRQ_HANDLER
select MVEBU_MBUS
@@ -499,10 +485,10 @@ config ARCH_DOVE
config ARCH_KS8695
bool "Micrel/Kendin KS8695"
- select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select CPU_ARM922T
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select NEED_MACH_MEMORY_H
help
Support for Micrel/Kendin KS8695 "Centaur" (ARM922T) based
@@ -510,11 +496,11 @@ config ARCH_KS8695
config ARCH_W90X900
bool "Nuvoton W90X900 CPU"
- select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
help
Support for Nuvoton (Winbond logic dept.) ARM9 processor,
At present, the w90x900 has been renamed nuc900, regarding
@@ -526,13 +512,13 @@ config ARCH_W90X900
config ARCH_LPC32XX
bool "NXP LPC32XX"
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select CLKDEV_LOOKUP
select CLKSRC_LPC32XX
select COMMON_CLK
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select MULTI_IRQ_HANDLER
select SPARSE_IRQ
select USE_OF
@@ -543,7 +529,6 @@ config ARCH_PXA
bool "PXA2xx/PXA3xx-based"
depends on MMU
select ARCH_MTD_XIP
- select ARCH_REQUIRE_GPIOLIB
select ARM_CPU_SUSPEND if PM
select AUTO_ZRELADDR
select COMMON_CLK
@@ -554,6 +539,7 @@ config ARCH_PXA
select CPU_XSCALE if !CPU_XSC3
select GENERIC_CLOCKEVENTS
select GPIO_PXA
+ select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
@@ -584,7 +570,6 @@ config ARCH_RPC
config ARCH_SA1100
bool "SA1100-based"
select ARCH_MTD_XIP
- select ARCH_REQUIRE_GPIOLIB
select ARCH_SPARSEMEM_ENABLE
select CLKDEV_LOOKUP
select CLKSRC_MMIO
@@ -593,6 +578,7 @@ config ARCH_SA1100
select CPU_FREQ
select CPU_SA1100
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select ISA
@@ -604,12 +590,12 @@ config ARCH_SA1100
config ARCH_S3C24XX
bool "Samsung S3C24XX SoCs"
- select ARCH_REQUIRE_GPIOLIB
select ATAGS
select CLKDEV_LOOKUP
select CLKSRC_SAMSUNG_PWM
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
+ select GPIOLIB
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
@@ -625,12 +611,12 @@ config ARCH_S3C24XX
config ARCH_DAVINCI
bool "TI DaVinci"
select ARCH_HAS_HOLES_MEMORYMODEL
- select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CPU_ARM926T
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select HAVE_IDE
select USE_OF
select ZONE_DMA
@@ -642,11 +628,11 @@ config ARCH_OMAP1
depends on MMU
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
- select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
@@ -715,7 +701,7 @@ config ARCH_VIRT
depends on ARCH_MULTI_V7
select ARM_AMBA
select ARM_GIC
- select ARM_GIC_V2M if PCI_MSI
+ select ARM_GIC_V2M if PCI
select ARM_GIC_V3
select ARM_PSCI
select HAVE_ARM_ARCH_TIMER
@@ -868,7 +854,7 @@ source "arch/arm/mach-zynq/Kconfig"
config ARCH_EFM32
bool "Energy Micro efm32"
depends on ARM_SINGLE_ARMV7M
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
Support for Energy Micro's (now Silicon Labs) efm32 Giant Gecko
processors.
@@ -901,7 +887,7 @@ config MACH_STM32F429
default y
config ARCH_MPS2
- bool "ARM MPS2 paltform"
+ bool "ARM MPS2 platform"
depends on ARM_SINGLE_ARMV7M
select ARM_AMBA
select CLKSRC_MPS2
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 19a3dcf5eb2e..a9693b6987a6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -109,23 +109,41 @@ choice
0x80020000 | 0xf0020000 | UART8
0x80024000 | 0xf0024000 | UART9
- config DEBUG_AT91_UART
- bool "Kernel low-level debugging on Atmel SoCs"
- depends on ARCH_AT91
+ config DEBUG_AT91_RM9200_DBGU
+ bool "Kernel low-level debugging on AT91RM9200, AT91SAM9 DBGU"
+ select DEBUG_AT91_UART
+ depends on SOC_AT91RM9200 || SOC_AT91SAM9
help
- Say Y here if you want the debug print routines to direct
- their output to the serial port on atmel devices.
+ Say Y here if you want kernel low-level debugging support
+ on the DBGU port of:
+ at91rm9200, at91sam9260, at91sam9g20, at91sam9261,
+ at91sam9g10, at91sam9n12, at91sam9rl64, at91sam9x5
- SOC DEBUG_UART_PHYS DEBUG_UART_VIRT PORT
- rm9200, 9260/9g20, 0xfffff200 0xfefff200 DBGU
- 9261/9g10, 9rl
- 9263, 9g45, sama5d3 0xffffee00 0xfeffee00 DBGU
- sama5d4 0xfc00c000 0xfb00c000 USART3
- sama5d4 0xfc069000 0xfb069000 DBGU
- sama5d2 0xf8020000 0xf7020000 UART1
+ config DEBUG_AT91_SAM9263_DBGU
+ bool "Kernel low-level debugging on AT91SAM{9263,9G45,A5D3} DBGU"
+ select DEBUG_AT91_UART
+ depends on SOC_AT91SAM9 || SOC_SAMA5D3
+ help
+ Say Y here if you want kernel low-level debugging support
+ on the DBGU port of:
+ at91sam9263, at91sam9g45, at91sam9m10,
+ sama5d3
- Please adjust DEBUG_UART_PHYS configuration options based on
- your needs.
+ config DEBUG_AT91_SAMA5D2_UART1
+ bool "Kernel low-level debugging on SAMA5D2 UART1"
+ select DEBUG_AT91_UART
+ depends on SOC_SAMA5D2
+ help
+ Say Y here if you want kernel low-level debugging support
+ on the UART1 port of sama5d2.
+
+ config DEBUG_AT91_SAMA5D4_USART3
+ bool "Kernel low-level debugging on SAMA5D4 USART3"
+ select DEBUG_AT91_UART
+ depends on SOC_SAMA5D4
+ help
+ Say Y here if you want kernel low-level debugging support
+ on the USART3 port of sama5d4.
config DEBUG_BCM2835
bool "Kernel low-level debugging on BCM2835 PL011 UART"
@@ -138,8 +156,8 @@ choice
select DEBUG_UART_PL01X
config DEBUG_BCM_5301X
- bool "Kernel low-level debugging on BCM5301X UART1"
- depends on ARCH_BCM_5301X
+ bool "Kernel low-level debugging on BCM5301X/NSP UART1"
+ depends on ARCH_BCM_5301X || ARCH_BCM_NSP
select DEBUG_UART_8250
config DEBUG_BCM_KONA_UART
@@ -1296,6 +1314,10 @@ choice
endchoice
+config DEBUG_AT91_UART
+ bool
+ depends on ARCH_AT91
+
config DEBUG_EXYNOS_UART
bool
@@ -1502,8 +1524,10 @@ config DEBUG_UART_PHYS
default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE
default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE
default 0xf7fc9000 if DEBUG_BERLIN_UART
+ default 0xf8020000 if DEBUG_AT91_SAMA5D2_UART1
default 0xf8b00000 if DEBUG_HIX5HD2_UART
default 0xf991e000 if DEBUG_QCOM_UARTDM
+ default 0xfc00c000 if DEBUG_AT91_SAMA5D4_USART3
default 0xfcb00000 if DEBUG_HI3620_UART
default 0xfd883000 if DEBUG_ALPINE_UART0
default 0xfe800000 if ARCH_IOP32X
@@ -1518,6 +1542,8 @@ config DEBUG_UART_PHYS
default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
default 0xfffe8600 if DEBUG_BCM63XX_UART
+ default 0xffffee00 if DEBUG_AT91_SAM9263_DBGU
+ default 0xfffff200 if DEBUG_AT91_RM9200_DBGU
default 0xfffff700 if ARCH_IOP33X
depends on ARCH_EP93XX || \
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
@@ -1566,13 +1592,17 @@ config DEBUG_UART_VIRT
DEBUG_S3C2410_UART1)
default 0xf7008000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART2 || \
DEBUG_S3C2410_UART2)
+ default 0xf7020000 if DEBUG_AT91_SAMA5D2_UART1
default 0xf7fc9000 if DEBUG_BERLIN_UART
default 0xf8007000 if DEBUG_HIP04_UART
default 0xf8009000 if DEBUG_VEXPRESS_UART0_CA9
default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1
+ default 0xf8ffee00 if DEBUG_AT91_SAM9263_DBGU
+ default 0xf8fff200 if DEBUG_AT91_RM9200_DBGU
default 0xfa71e000 if DEBUG_QCOM_UARTDM
default 0xfb002000 if DEBUG_CNS3XXX
default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
+ default 0xfb00c000 if DEBUG_AT91_SAMA5D4_USART3
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfc705000 if DEBUG_ZTE_ZX
@@ -1627,7 +1657,8 @@ config DEBUG_UART_VIRT
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
- DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+ DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
+ DEBUG_AT91_UART
config DEBUG_UART_8250_SHIFT
int "Register offset shift for the 8250 debug UART"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 229afaf2058b..56ea5c60b318 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -140,7 +140,6 @@ head-y := arch/arm/kernel/head$(MMUEXT).o
# Text offset. This list is sorted numerically by address in order to
# provide a means to avoid/resolve conflicts in multi-arch kernels.
textofs-y := 0x00008000
-textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
# We don't want the htc bootloader to corrupt kernel during resume
textofs-$(CONFIG_PM_H1940) := 0x00108000
# SA1111 DMA bug: we don't want the kernel to live in precious DMA-able memory
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 414b42710a36..faacd52370d2 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -7,9 +7,10 @@ dtb-$(CONFIG_MACH_ARTPEC6) += \
dtb-$(CONFIG_MACH_ASM9260) += \
alphascale-asm9260-devkit.dtb
# Keep at91 dtb files sorted alphabetically for each SoC
-dtb-$(CONFIG_SOC_SAM_V4_V5) += \
+dtb-$(CONFIG_SOC_AT91RM9200) += \
at91rm9200ek.dtb \
- mpa1600.dtb \
+ mpa1600.dtb
+dtb-$(CONFIG_SOC_AT91SAM9) += \
animeo_ip.dtb \
at91-qil_a9260.dtb \
aks-cdu.dtb \
@@ -17,8 +18,10 @@ dtb-$(CONFIG_SOC_SAM_V4_V5) += \
evk-pro3.dtb \
tny_a9260.dtb \
usb_a9260.dtb \
+ at91sam9260ek.dtb \
at91sam9261ek.dtb \
at91sam9263ek.dtb \
+ at91-sam9_l9260.dtb \
tny_a9263.dtb \
usb_a9263.dtb \
at91-foxg20.dtb \
@@ -85,6 +88,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \
bcm47094-dlink-dir-885l.dtb \
bcm94708.dtb \
bcm94709.dtb \
+ bcm953012er.dtb \
bcm953012k.dtb
dtb-$(CONFIG_ARCH_BCM_63XX) += \
bcm963138dvt.dtb
@@ -95,8 +99,11 @@ dtb-$(CONFIG_ARCH_BCM_CYGNUS) += \
bcm958305k.dtb
dtb-$(CONFIG_ARCH_BCM_MOBILE) += \
bcm28155-ap.dtb \
- bcm21664-garnet.dtb
+ bcm21664-garnet.dtb \
+ bcm23550-sparrow.dtb
dtb-$(CONFIG_ARCH_BCM_NSP) += \
+ bcm958525xmc.dtb \
+ bcm958625hr.dtb \
bcm958625k.dtb
dtb-$(CONFIG_ARCH_BERLIN) += \
berlin2-sony-nsz-gs7.dtb \
@@ -104,6 +111,8 @@ dtb-$(CONFIG_ARCH_BERLIN) += \
berlin2q-marvell-dmp.dtb
dtb-$(CONFIG_ARCH_BRCMSTB) += \
bcm7445-bcm97445svmb.dtb
+dtb-$(CONFIG_ARCH_CLPS711X) += \
+ ep7211-edb7211.dtb
dtb-$(CONFIG_ARCH_DAVINCI) += \
da850-enbw-cmc.dtb \
da850-evm.dtb
@@ -134,6 +143,7 @@ dtb-$(CONFIG_ARCH_EXYNOS5) += \
exynos5250-snow-rev5.dtb \
exynos5250-spring.dtb \
exynos5260-xyref5260.dtb \
+ exynos5410-odroidxu.dtb \
exynos5410-smdk5410.dtb \
exynos5420-arndale-octa.dtb \
exynos5420-peach-pit.dtb \
@@ -146,8 +156,6 @@ dtb-$(CONFIG_ARCH_EXYNOS5) += \
exynos5800-peach-pi.dtb
dtb-$(CONFIG_ARCH_HI3xxx) += \
hi3620-hi4511.dtb
-dtb-$(CONFIG_ARCH_HIX5HD2) += \
- hisi-x5hd2-dkb.dtb
dtb-$(CONFIG_ARCH_HIGHBANK) += \
highbank.dtb \
ecx-2000.dtb
@@ -155,6 +163,10 @@ dtb-$(CONFIG_ARCH_HIP01) += \
hip01-ca9x2.dtb
dtb-$(CONFIG_ARCH_HIP04) += \
hip04-d01.dtb
+dtb-$(CONFIG_ARCH_HISI) += \
+ hi3519-demb.dtb
+dtb-$(CONFIG_ARCH_HIX5HD2) += \
+ hisi-x5hd2-dkb.dtb
dtb-$(CONFIG_ARCH_INTEGRATOR) += \
integratorap.dtb \
integratorcp.dtb
@@ -356,6 +368,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-gw54xx.dtb \
imx6q-gw551x.dtb \
imx6q-gw552x.dtb \
+ imx6q-h100.dtb \
imx6q-hummingboard.dtb \
imx6q-icore-rqs.dtb \
imx6q-marsboard.dtb \
@@ -377,6 +390,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-tx6q-1110.dtb \
imx6q-tx6q-11x0-mb7.dtb \
imx6q-udoo.dtb \
+ imx6q-utilite-pro.dtb \
imx6q-wandboard.dtb \
imx6q-wandboard-revb1.dtb \
imx6qp-nitrogen6_max.dtb \
@@ -399,9 +413,11 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-tx6ul-mainboard.dtb
dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-cl-som-imx7.dtb \
+ imx7d-colibri-eval-v3.dtb \
imx7d-nitrogen7.dtb \
imx7d-sbc-imx7.dtb \
- imx7d-sdb.dtb
+ imx7d-sdb.dtb \
+ imx7s-colibri-eval-v3.dtb
dtb-$(CONFIG_SOC_LS1021A) += \
ls1021a-qds.dtb \
ls1021a-twr.dtb
@@ -416,7 +432,9 @@ dtb-$(CONFIG_SOC_VF610) += \
dtb-$(CONFIG_ARCH_MXS) += \
imx23-evk.dtb \
imx23-olinuxino.dtb \
+ imx23-sansa.dtb \
imx23-stmp378x_devb.dtb \
+ imx23-xfi3.dtb \
imx28-apf28.dtb \
imx28-apf28dev.dtb \
imx28-apx4devkit.dtb \
@@ -572,7 +590,8 @@ dtb-$(CONFIG_ARCH_PRIMA2) += \
dtb-$(CONFIG_ARCH_OXNAS) += \
wd-mbwe.dtb
dtb-$(CONFIG_ARCH_QCOM) += \
- qcom-apq8064-arrow-db600c.dtb \
+ qcom-apq8060-dragonboard.dtb \
+ qcom-apq8064-arrow-sd-600eval.dtb \
qcom-apq8064-cm-qs600.dtb \
qcom-apq8064-ifc6410.dtb \
qcom-apq8064-sony-xperia-yuga.dtb \
@@ -602,6 +621,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3066a-rayeager.dtb \
rk3188-radxarock.dtb \
rk3228-evb.dtb \
+ rk3229-evb.dtb \
rk3288-evb-act8846.dtb \
rk3288-evb-rk808.dtb \
rk3288-firefly-beta.dtb \
@@ -638,6 +658,7 @@ dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
r8a7790-lager.dtb \
r8a7791-koelsch.dtb \
r8a7791-porter.dtb \
+ r8a7792-blanche.dtb \
r8a7793-gose.dtb \
r8a7794-alt.dtb \
r8a7794-silk.dtb \
@@ -728,6 +749,7 @@ dtb-$(CONFIG_MACH_SUN6I) += \
sun6i-a31s-yones-toptech-bs1078-v2.dtb
dtb-$(CONFIG_MACH_SUN7I) += \
sun7i-a20-bananapi.dtb \
+ sun7i-a20-bananapi-m1-plus.dtb \
sun7i-a20-bananapro.dtb \
sun7i-a20-cubieboard2.dtb \
sun7i-a20-cubietruck.dtb \
@@ -752,8 +774,10 @@ dtb-$(CONFIG_MACH_SUN7I) += \
dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a23-evb.dtb \
sun8i-a23-gt90h-v4.dtb \
+ sun8i-a23-inet86dz.dtb \
sun8i-a23-ippo-q8h-v5.dtb \
sun8i-a23-ippo-q8h-v1.2.dtb \
+ sun8i-a23-polaroid-mid2407pxe03.dtb \
sun8i-a23-polaroid-mid2809pxe04.dtb \
sun8i-a23-q8-tablet.dtb \
sun8i-a33-et-q8-v1.6.dtb \
@@ -763,10 +787,12 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a33-sinlinx-sina33.dtb \
sun8i-a83t-allwinner-h8homlet-v2.dtb \
sun8i-a83t-cubietruck-plus.dtb \
+ sun8i-h3-bananapi-m2-plus.dtb \
sun8i-h3-orangepi-2.dtb \
sun8i-h3-orangepi-one.dtb \
sun8i-h3-orangepi-pc.dtb \
- sun8i-h3-orangepi-plus.dtb
+ sun8i-h3-orangepi-plus.dtb \
+ sun8i-r16-parrot.dtb
dtb-$(CONFIG_MACH_SUN9I) += \
sun9i-a80-optimus.dtb \
sun9i-a80-cubieboard4.dtb
@@ -794,6 +820,7 @@ dtb-$(CONFIG_ARCH_TEGRA_114_SOC) += \
tegra114-roth.dtb \
tegra114-tn7.dtb
dtb-$(CONFIG_ARCH_TEGRA_124_SOC) += \
+ tegra124-apalis-eval.dtb \
tegra124-jetson-tk1.dtb \
tegra124-nyan-big.dtb \
tegra124-nyan-blaze.dtb \
diff --git a/arch/arm/boot/dts/aks-cdu.dts b/arch/arm/boot/dts/aks-cdu.dts
index d9c50fbb49d2..5b1bf92d927c 100644
--- a/arch/arm/boot/dts/aks-cdu.dts
+++ b/arch/arm/boot/dts/aks-cdu.dts
@@ -57,7 +57,7 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 0cc150b87b86..e247c15e5176 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -18,6 +18,10 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
+ chosen {
+ stdout-path = &uart0;
+ };
+
leds {
pinctrl-names = "default";
pinctrl-0 = <&user_leds_s0>;
@@ -318,7 +322,7 @@
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
- regulator-max-microvolt = <1325000>;
+ regulator-max-microvolt = <1351500>;
regulator-boot-on;
regulator-always-on;
};
@@ -359,12 +363,8 @@
phy-mode = "mii";
};
-&cpsw_emac1 {
- phy_id = <&davinci_mdio>, <1>;
- phy-mode = "mii";
-};
-
&mac {
+ slaves = <1>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 55c0e954b146..ca721670bd91 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -33,6 +33,17 @@
status = "okay";
};
+&cpu0_opp_table {
+ /*
+ * All PG 2.0 silicon may not support 1GHz but some of the early
+ * BeagleBone Blacks have PG 2.0 silicon which is guaranteed
+ * to support 1GHz OPP so enable it for PG 2.0 on this board.
+ */
+ oppnitro@1000000000 {
+ opp-supported-hw = <0x06 0x0100>;
+ };
+};
+
&am33xx_pinmux {
nxp_hdmi_bonelt_pins: nxp_hdmi_bonelt_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 516673bb023d..5d28712ad253 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -640,7 +640,7 @@
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912500>;
- regulator-max-microvolt = <1312500>;
+ regulator-max-microvolt = <1351500>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 282fe1b37095..09308d66645b 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -560,7 +560,7 @@
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <912500>;
- regulator-max-microvolt = <1312500>;
+ regulator-max-microvolt = <1351500>;
regulator-boot-on;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index e271013e78a6..7d8b8fefdf08 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -206,6 +206,13 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ pca9536: gpio@41 {
+ compatible = "ti,pca9536";
+ reg = <0x41>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
};
#include "tps65910.dtsi"
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 7fa295155543..98748c61ed99 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -45,19 +45,9 @@
device_type = "cpu";
reg = <0>;
- /*
- * To consider voltage drop between PMIC and SoC,
- * tolerance value is reduced to 2% from 4% and
- * voltage value is increased as a precaution.
- */
- operating-points = <
- /* kHz uV */
- 720000 1285000
- 600000 1225000
- 500000 1125000
- 275000 1125000
- >;
- voltage-tolerance = <2>; /* 2 percentage */
+ operating-points-v2 = <&cpu0_opp_table>;
+ ti,syscon-efuse = <&scm_conf 0x7fc 0x1fff 0>;
+ ti,syscon-rev = <&scm_conf 0x600>;
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
@@ -66,6 +56,78 @@
};
};
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+
+ /*
+ * The three following nodes are marked with opp-suspend
+ * because the can not be enabled simultaneously on a
+ * single SoC.
+ */
+ opp50@300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <950000 931000 969000>;
+ opp-supported-hw = <0x06 0x0010>;
+ opp-suspend;
+ };
+
+ opp100@275000000 {
+ opp-hz = /bits/ 64 <275000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x01 0x00FF>;
+ opp-suspend;
+ };
+
+ opp100@300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0020>;
+ opp-suspend;
+ };
+
+ opp100@500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ opp100@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0040>;
+ };
+
+ opp120@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000 1176000 1224000>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ opp120@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1200000 1176000 1224000>;
+ opp-supported-hw = <0x06 0x0080>;
+ };
+
+ oppturbo@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1260000 1234800 1285200>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ oppturbo@800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1260000 1234800 1285200>;
+ opp-supported-hw = <0x06 0x0100>;
+ };
+
+ oppnitro@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1325000 1298500 1351500>;
+ opp-supported-hw = <0x04 0x0200>;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a8-pmu";
interrupts = <3>;
@@ -187,7 +249,7 @@
reg = <0x49000000 0x10000>;
reg-names = "edma3_cc";
interrupts = <12 13 14>;
- interrupt-names = "edma3_ccint", "emda3_mperr",
+ interrupt-names = "edma3_ccint", "edma3_mperr",
"edma3_ccerrint";
dma-requests = <64>;
#dma-cells = <2>;
@@ -679,20 +741,24 @@
0x48300200 0x48300200 0x80>; /* EHRPWM */
ecap0: ecap@48300100 {
- compatible = "ti,am33xx-ecap";
+ compatible = "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48300100 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
interrupts = <31>;
interrupt-names = "ecap0";
- ti,hwmods = "ecap0";
status = "disabled";
};
ehrpwm0: pwm@48300200 {
- compatible = "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48300200 0x80>;
- ti,hwmods = "ehrpwm0";
+ clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -709,20 +775,24 @@
0x48302200 0x48302200 0x80>; /* EHRPWM */
ecap1: ecap@48302100 {
- compatible = "ti,am33xx-ecap";
+ compatible = "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48302100 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
interrupts = <47>;
interrupt-names = "ecap1";
- ti,hwmods = "ecap1";
status = "disabled";
};
ehrpwm1: pwm@48302200 {
- compatible = "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48302200 0x80>;
- ti,hwmods = "ehrpwm1";
+ clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -739,20 +809,24 @@
0x48304200 0x48304200 0x80>; /* EHRPWM */
ecap2: ecap@48304100 {
- compatible = "ti,am33xx-ecap";
+ compatible = "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48304100 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
interrupts = <61>;
interrupt-names = "ecap2";
- ti,hwmods = "ecap2";
status = "disabled";
};
ehrpwm2: pwm@48304200 {
- compatible = "ti,am33xx-ehrpwm";
+ compatible = "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48304200 0x80>;
- ti,hwmods = "ehrpwm2";
+ clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/am3517-craneboard.dts b/arch/arm/boot/dts/am3517-craneboard.dts
index cb7de1d4e05f..f9d8f3948c4a 100644
--- a/arch/arm/boot/dts/am3517-craneboard.dts
+++ b/arch/arm/boot/dts/am3517-craneboard.dts
@@ -20,7 +20,7 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
- vbat: fixedregulator@0 {
+ vbat: fixedregulator {
compatible = "regulator-fixed";
regulator-name = "vbat";
regulator-min-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index cd81ecf12731..0fadae5396e1 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -44,10 +44,49 @@
clocks = <&dpll_mpu_ck>;
clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>;
+ ti,syscon-efuse = <&scm_conf 0x610 0x3f 0>;
+ ti,syscon-rev = <&scm_conf 0x600>;
+
clock-latency = <300000>; /* From omap-cpufreq driver */
};
};
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+
+ opp50@300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <950000 931000 969000>;
+ opp-supported-hw = <0xFF 0x01>;
+ opp-suspend;
+ };
+
+ opp100@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0xFF 0x04>;
+ };
+
+ opp120@720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1200000 1176000 1224000>;
+ opp-supported-hw = <0xFF 0x08>;
+ };
+
+ oppturbo@800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1260000 1234800 1285200>;
+ opp-supported-hw = <0xFF 0x10>;
+ };
+
+ oppnitro@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1325000 1298500 1351500>;
+ opp-supported-hw = <0xFF 0x20>;
+ };
+ };
+
gic: interrupt-controller@48241000 {
compatible = "arm,cortex-a9-gic";
interrupt-controller;
@@ -199,7 +238,7 @@
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_ccint", "emda3_mperr",
+ interrupt-names = "edma3_ccint", "edma3_mperr",
"edma3_ccerrint";
dma-requests = <64>;
#dma-cells = <2>;
@@ -671,18 +710,24 @@
status = "disabled";
ecap0: ecap@48300100 {
- compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ compatible = "ti,am4372-ecap",
+ "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48300100 0x80>;
- ti,hwmods = "ecap0";
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
status = "disabled";
};
ehrpwm0: pwm@48300200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48300200 0x80>;
- ti,hwmods = "ehrpwm0";
+ clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -697,18 +742,24 @@
status = "disabled";
ecap1: ecap@48302100 {
- compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ compatible = "ti,am4372-ecap",
+ "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48302100 0x80>;
- ti,hwmods = "ecap1";
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
status = "disabled";
};
ehrpwm1: pwm@48302200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48302200 0x80>;
- ti,hwmods = "ehrpwm1";
+ clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -723,18 +774,24 @@
status = "disabled";
ecap2: ecap@48304100 {
- compatible = "ti,am4372-ecap","ti,am33xx-ecap";
+ compatible = "ti,am4372-ecap",
+ "ti,am3352-ecap",
+ "ti,am33xx-ecap";
#pwm-cells = <3>;
reg = <0x48304100 0x80>;
- ti,hwmods = "ecap2";
+ clocks = <&l4ls_gclk>;
+ clock-names = "fck";
status = "disabled";
};
ehrpwm2: pwm@48304200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48304200 0x80>;
- ti,hwmods = "ehrpwm2";
+ clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -749,10 +806,13 @@
status = "disabled";
ehrpwm3: pwm@48306200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48306200 0x80>;
- ti,hwmods = "ehrpwm3";
+ clocks = <&ehrpwm3_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -767,10 +827,13 @@
status = "disabled";
ehrpwm4: pwm@48308200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x48308200 0x80>;
- ti,hwmods = "ehrpwm4";
+ clocks = <&ehrpwm4_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -785,10 +848,13 @@
status = "disabled";
ehrpwm5: pwm@4830a200 {
- compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm";
+ compatible = "ti,am4372-ehrpwm",
+ "ti,am3352-ehrpwm",
+ "ti,am33xx-ehrpwm";
#pwm-cells = <3>;
reg = <0x4830a200 0x80>;
- ti,hwmods = "ehrpwm5";
+ clocks = <&ehrpwm5_tbclk>, <&l4ls_gclk>;
+ clock-names = "tbclk", "fck";
status = "disabled";
};
};
@@ -842,6 +908,13 @@
dma-names = "tx", "rx";
};
+ rng: rng@48310000 {
+ compatible = "ti,omap4-rng";
+ ti,hwmods = "rng";
+ reg = <0x48310000 0x2000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
mcasp0: mcasp@48038000 {
compatible = "ti,am33xx-mcasp-audio";
ti,hwmods = "mcasp0";
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 5bcd3aa025bc..14677d599595 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -897,7 +897,7 @@
pinctrl-0 = <&dss_pins>;
port {
- dpi_out: endpoint@0 {
+ dpi_out: endpoint {
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
@@ -975,3 +975,7 @@
clock-names = "ext-clk", "int-clk";
status = "okay";
};
+
+&cpu {
+ cpu0-supply = <&dcdc2>;
+};
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index 76dcfc6d5f0d..12a69518383e 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -382,6 +382,7 @@
};
&mac {
+ slaves = <1>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
diff --git a/arch/arm/boot/dts/am437x-sbc-t43.dts b/arch/arm/boot/dts/am437x-sbc-t43.dts
index 5f750c0ed6c9..d23260d3a581 100644
--- a/arch/arm/boot/dts/am437x-sbc-t43.dts
+++ b/arch/arm/boot/dts/am437x-sbc-t43.dts
@@ -145,7 +145,7 @@
pinctrl-0 = <&dss_pinctrl_default>;
port {
- dpi_lcd_out: endpoint@0 {
+ dpi_lcd_out: endpoint {
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 3549b8c9ac49..ad32e55532f8 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -754,7 +754,7 @@
pinctrl-0 = <&dss_pins>;
port {
- dpi_out: endpoint@0 {
+ dpi_out: endpoint {
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index 7630ba1d89e4..d1d73b725f47 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -104,6 +104,14 @@
clock-div = <1>;
};
+ rng_fck: rng_fck {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&sys_clkin_ck>;
+ clock-mult = <1>;
+ clock-div = <1>;
+ };
+
ehrpwm0_tbclk: ehrpwm0_tbclk@664 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/am57xx-sbc-am57x.dts b/arch/arm/boot/dts/am57xx-sbc-am57x.dts
index 988e99632d49..31f9be632406 100644
--- a/arch/arm/boot/dts/am57xx-sbc-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-sbc-am57x.dts
@@ -128,7 +128,7 @@
vdda_video-supply = <&ldoln_reg>;
port {
- dpi_lcd_out: endpoint@0 {
+ dpi_lcd_out: endpoint {
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts
index 0962f2fa3f6e..9cc372b9fb9b 100644
--- a/arch/arm/boot/dts/animeo_ip.dts
+++ b/arch/arm/boot/dts/animeo_ip.dts
@@ -32,15 +32,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <18432000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -114,7 +105,7 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
atmel,vbus-gpio = <&pioB 15 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index c60206efb583..2e0556af6e5e 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -239,22 +239,6 @@
status = "okay";
};
- mdio@72004 {
- pinctrl-0 = <&mdio_pins>;
- pinctrl-names = "default";
-
- phy_dedicated: ethernet-phy@0 {
- /*
- * Annoyingly, the marvell phy driver
- * configures the LED register, rather
- * than preserving reset-loaded setting.
- * We undo that rubbish here.
- */
- marvell,reg-init = <3 16 0 0x101e>;
- reg = <0>;
- };
- };
-
pinctrl@18000 {
clearfog_dsa0_clk_pins: clearfog-dsa0-clk-pins {
marvell,pins = "mpp46";
diff --git a/arch/arm/boot/dts/at91-ariag25.dts b/arch/arm/boot/dts/at91-ariag25.dts
index e9ced30159a7..4da011a7a698 100644
--- a/arch/arm/boot/dts/at91-ariag25.dts
+++ b/arch/arm/boot/dts/at91-ariag25.dts
@@ -34,15 +34,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -178,7 +169,7 @@
};
- onewire@0 {
+ onewire {
compatible = "w1-gpio";
gpios = <&pioA 21 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-cosino.dtsi b/arch/arm/boot/dts/at91-cosino.dtsi
index b6ea3f4a7206..02d8ef43de3a 100644
--- a/arch/arm/boot/dts/at91-cosino.dtsi
+++ b/arch/arm/boot/dts/at91-cosino.dtsi
@@ -26,15 +26,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts
index 6bf873e7d96c..50d5e719b451 100644
--- a/arch/arm/boot/dts/at91-foxg20.dts
+++ b/arch/arm/boot/dts/at91-foxg20.dts
@@ -23,15 +23,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <18432000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -128,13 +119,13 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
};
- i2c@0 {
+ i2c-gpio-0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
i2c-gpio,delay-us = <5>; /* ~85 kHz */
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index 229e989eb60d..b4f147c193fd 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -54,7 +54,7 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <1>;
status = "okay";
};
@@ -96,7 +96,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
rtc: pcf8563@51 {
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts
index 4f2eebf4a560..8f019184fccf 100644
--- a/arch/arm/boot/dts/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/at91-qil_a9260.dts
@@ -20,15 +20,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -111,7 +102,7 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
@@ -187,7 +178,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/at91-sam9_l9260.dts b/arch/arm/boot/dts/at91-sam9_l9260.dts
new file mode 100644
index 000000000000..171243ca4f2f
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sam9_l9260.dts
@@ -0,0 +1,121 @@
+/*
+ * at91-sam9_l9260.dts - Device Tree file for Olimex SAM9-L9260 board
+ *
+ * Copyright (C) 2016 Raashid Muhammed <raashidmuhammed@zilogic.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+/dts-v1/;
+#include "at91sam9260.dtsi"
+
+/ {
+ model = "Olimex sam9-l9260";
+ compatible = "olimex,sam9-l9260", "atmel,at91sam9260", "atmel,at91sam9";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ reg = <0x20000000 0x4000000>;
+ };
+
+ clocks {
+ slow_xtal {
+ clock-frequency = <32768>;
+ };
+
+ main_xtal {
+ clock-frequency = <18432000>;
+ };
+ };
+
+ ahb {
+ apb {
+ mmc0: mmc@fffa8000 {
+ pinctrl-0 = <
+ &pinctrl_board_mmc0
+ &pinctrl_mmc0_clk
+ &pinctrl_mmc0_slot1_cmd_dat0
+ &pinctrl_mmc0_slot1_dat1_3>;
+ status = "okay";
+
+ slot@1 {
+ reg = <1>;
+ bus-width = <4>;
+ cd-gpios = <&pioC 8 GPIO_ACTIVE_HIGH>;
+ wp-gpios = <&pioC 4 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ macb0: ethernet@fffc4000 {
+ pinctrl-0 = <&pinctrl_macb_rmii &pinctrl_macb_rmii_mii_alt>;
+ phy-mode = "mii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ };
+ };
+
+ spi0: spi@fffc8000 {
+ cs-gpios = <&pioC 11 0>, <0>, <0>, <0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <15000000>;
+ reg = <0>;
+ };
+ };
+
+ dbgu: serial@fffff200 {
+ status = "okay";
+ };
+
+ pinctrl@fffff400 {
+ mmc0 {
+ pinctrl_board_mmc0: mmc0-board {
+ atmel,pins =
+ <AT91_PIOC 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH /* CD pin */
+ AT91_PIOC 4 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP>; /* WP pin */
+ };
+ };
+ };
+ };
+
+ nand0: nand@40000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "soft";
+ nand-on-flash-bbt = <1>;
+ status = "okay";
+ };
+
+ usb0: ohci@500000 {
+ status = "okay";
+ };
+
+ };
+
+ i2c-gpio-0 {
+ status = "okay";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ pwr_led {
+ label = "sam9-l9260:yellow:pwr";
+ gpios = <&pioA 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu0";
+ };
+
+ status_led {
+ label = "sam9-l9260:green:status";
+ gpios = <&pioA 6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "timer";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index eb4f1ac96271..0b9a59d5fdac 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -158,56 +158,64 @@
i2c-sda-hold-time-ns = <350>;
status = "okay";
- pmic: act8865@5b {
- compatible = "active-semi,act8865";
+ pmic@5b {
+ compatible = "active-semi,act8945a";
reg = <0x5b>;
active-semi,vsel-high;
+ active-semi,chglev-gpios = <&pioA 12 GPIO_ACTIVE_HIGH>;
+ active-semi,lbo-gpios = <&pioA 72 GPIO_ACTIVE_LOW>;
+ active-semi,irq_gpios = <&pioA 45 GPIO_ACTIVE_LOW>;
+ active-semi,input-voltage-threshold-microvolt = <6600>;
+ active-semi,precondition-timeout = <40>;
+ active-semi,total-timeout = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_charger_chglev &pinctrl_charger_lbo &pinctrl_charger_irq>;
status = "okay";
regulators {
- vdd_1v35_reg: DCDC_REG1 {
+ vdd_1v35_reg: REG_DCDC1 {
regulator-name = "VDD_1V35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
};
- vdd_1v2_reg: DCDC_REG2 {
+ vdd_1v2_reg: REG_DCDC2 {
regulator-name = "VDD_1V2";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-always-on;
};
- vdd_3v3_reg: DCDC_REG3 {
+ vdd_3v3_reg: REG_DCDC3 {
regulator-name = "VDD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
- vdd_fuse_reg: LDO_REG1 {
+ vdd_fuse_reg: REG_LDO1 {
regulator-name = "VDD_FUSE";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
};
- vdd_3v3_lp_reg: LDO_REG2 {
+ vdd_3v3_lp_reg: REG_LDO2 {
regulator-name = "VDD_3V3_LP";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
- vdd_led_reg: LDO_REG3 {
+ vdd_led_reg: REG_LDO3 {
regulator-name = "VDD_LED";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
- vdd_sdhc_1v8_reg: LDO_REG4 {
+ vdd_sdhc_1v8_reg: REG_LDO4 {
regulator-name = "VDD_SDHC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -309,6 +317,21 @@
bias-disable;
};
+ pinctrl_charger_chglev: charger_chglev {
+ pinmux = <PIN_PA12__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_charger_irq: charger_irq {
+ pinmux = <PIN_PB13__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_charger_lbo: charger_lbo {
+ pinmux = <PIN_PC8__GPIO>;
+ bias-pull-up;
+ };
+
pinctrl_flx0_default: flx0_default {
pinmux = <PIN_PB28__FLEXCOM0_IO0>,
<PIN_PB29__FLEXCOM0_IO1>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index f3e2b96c06a3..c51fc652f6c7 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -297,7 +297,7 @@
};
};
- vcc_mmc0_reg: fixedregulator@0 {
+ vcc_mmc0_reg: fixedregulator_mmc0 {
compatible = "regulator-fixed";
gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
regulator-name = "mmc0-card-supply";
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
index e7b2109fc85a..a92c6e0ca854 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
@@ -20,8 +20,11 @@
};
clocks {
- main_clock: main_clock {
- compatible = "atmel,osc", "fixed-clock";
+ slow_xtal {
+ clock-frequency = <32768>;
+ };
+
+ main_xtal {
clock-frequency = <12000000>;
};
@@ -106,7 +109,7 @@
};
};
- vcc_3v3_reg: fixedregulator@0 {
+ vcc_3v3_reg: fixedregulator_3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC 3V3";
regulator-min-microvolt = <3300000>;
@@ -115,7 +118,7 @@
regulator-always-on;
};
- vcc_mmc0_reg: fixedregulator@1 {
+ vcc_mmc0_reg: fixedregulator_mmc0 {
compatible = "regulator-fixed";
gpio = <&pioE 15 GPIO_ACTIVE_HIGH>;
regulator-name = "RST_n MCI0";
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
index abaaba58fbec..eac4ea2744cc 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
@@ -159,7 +159,7 @@
};
};
- vcc_mmc1_reg: fixedregulator@2 {
+ vcc_mmc1_reg: fixedregulator_mmc1 {
compatible = "regulator-fixed";
gpio = <&pioE 17 GPIO_ACTIVE_LOW>;
regulator-name = "VDD MCI1";
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index da84e65b56ef..ed7fce297738 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -252,7 +252,7 @@
};
};
- vcc_3v3_reg: fixedregulator@0 {
+ vcc_3v3_reg: fixedregulator_3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC 3V3";
regulator-min-microvolt = <3300000>;
@@ -261,7 +261,7 @@
regulator-always-on;
};
- vcc_mmc1_reg: fixedregulator@1 {
+ vcc_mmc1_reg: fixedregulator_mmc1 {
compatible = "regulator-fixed";
gpio = <&pioE 4 GPIO_ACTIVE_LOW>;
regulator-name = "VDD MCI1";
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 4e98cda97403..f8b96cef5e1a 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -69,26 +69,6 @@
ahb {
apb {
- lcd_bus@f0000000 {
- status = "okay";
-
- lcd@f0000000 {
- status = "okay";
- };
-
- lcdovl1@f0000140 {
- status = "okay";
- };
-
- lcdovl2@f0000240 {
- status = "okay";
- };
-
- lcdheo1@f0000340 {
- status = "okay";
- };
- };
-
adc0: adc@fc034000 {
pinctrl-names = "default";
pinctrl-0 = <
diff --git a/arch/arm/boot/dts/at91-vinco.dts b/arch/arm/boot/dts/at91-vinco.dts
index 6a366ee952a8..e0c0b2897a49 100644
--- a/arch/arm/boot/dts/at91-vinco.dts
+++ b/arch/arm/boot/dts/at91-vinco.dts
@@ -245,7 +245,7 @@
};
- vcc_3v3_reg: fixedregulator@0 {
+ vcc_3v3_reg: fixedregulator_3v3 {
compatible = "regulator-fixed";
regulator-name = "VCC 3V3";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index f6cb7a80a2f5..4e913c2ccb79 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -948,7 +948,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 25 GPIO_ACTIVE_HIGH /* sda */
&pioA 26 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index d4884dd1c243..a3e363d79122 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -938,25 +938,21 @@
atmel,adc-res-names = "lowres", "highres";
atmel,adc-use-res = "highres";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "timer-counter-0";
trigger-value = <0x1>;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "timer-counter-1";
trigger-value = <0x3>;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "timer-counter-2";
trigger-value = <0x5>;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "external";
trigger-value = <0xd>;
trigger-external;
@@ -1007,7 +1003,7 @@
status = "disabled";
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -1017,7 +1013,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 23 GPIO_ACTIVE_HIGH /* sda */
&pioA 24 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
new file mode 100644
index 000000000000..2c87f58448e7
--- /dev/null
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
@@ -0,0 +1,211 @@
+/*
+ * Device Tree file for Atmel at91sam9260 Evaluation Kit
+ *
+ * Copyright (C) 2016 Atmel,
+ * 2016 Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+#include "at91sam9260.dtsi"
+
+/ {
+ model = "Atmel at91sam9260ek";
+ compatible = "atmel,at91sam9260ek", "atmel,at91sam9260", "atmel,at91sam9";
+
+ chosen {
+ stdout-path = &dbgu;
+ };
+
+ memory {
+ reg = <0x20000000 0x4000000>;
+ };
+
+ clocks {
+ slow_xtal {
+ clock-frequency = <32768>;
+ };
+
+ main_xtal {
+ clock-frequency = <18432000>;
+ };
+ };
+
+ ahb {
+ apb {
+ usb1: gadget@fffa4000 {
+ atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ mmc0: mmc@fffa8000 {
+ pinctrl-0 = <
+ &pinctrl_board_mmc0_slot1
+ &pinctrl_mmc0_clk
+ &pinctrl_mmc0_slot1_cmd_dat0
+ &pinctrl_mmc0_slot1_dat1_3>;
+ status = "okay";
+ slot@1 {
+ reg = <1>;
+ bus-width = <4>;
+ cd-gpios = <&pioC 9 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ usart0: serial@fffb0000 {
+ pinctrl-0 =
+ <&pinctrl_usart0
+ &pinctrl_usart0_rts
+ &pinctrl_usart0_cts
+ &pinctrl_usart0_dtr_dsr
+ &pinctrl_usart0_dcd
+ &pinctrl_usart0_ri>;
+ status = "okay";
+ };
+
+ usart1: serial@fffb4000 {
+ status = "okay";
+ };
+
+ ssc0: ssc@fffbc000 {
+ status = "okay";
+ pinctrl-0 = <&pinctrl_ssc0_tx>;
+ };
+
+ macb0: ethernet@fffc4000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
+ spi0: spi@fffc8000 {
+ cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
+ mtd_dataflash@0 {
+ compatible = "atmel,at45", "atmel,dataflash";
+ spi-max-frequency = <50000000>;
+ reg = <1>;
+ };
+ };
+
+ dbgu: serial@fffff200 {
+ status = "okay";
+ };
+
+ pinctrl@fffff400 {
+ board {
+ pinctrl_board_mmc0_slot1: mmc0_slot1-board {
+ atmel,pins =
+ <AT91_PIOC 9 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+ };
+ };
+
+ shdwc@fffffd10 {
+ atmel,wakeup-counter = <10>;
+ atmel,wakeup-rtt-timer;
+ };
+
+ rtc@fffffd20 {
+ atmel,rtt-rtc-time-reg = <&gpbr 0x0>;
+ status = "okay";
+ };
+
+ watchdog@fffffd40 {
+ status = "okay";
+ };
+
+ gpbr: syscon@fffffd50 {
+ status = "okay";
+ };
+ };
+
+ usb0: ohci@500000 {
+ num-ports = <2>;
+ status = "okay";
+ };
+
+ nand0: nand@40000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "soft";
+ nand-on-flash-bbt;
+ status = "okay";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ btn3 {
+ label = "Button 3";
+ gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
+ linux,code = <0x103>;
+ gpio-key,wakeup;
+ };
+
+ btn4 {
+ label = "Button 4";
+ gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
+ linux,code = <0x104>;
+ gpio-key,wakeup;
+ };
+ };
+
+ i2c-gpio-0 {
+ status = "okay";
+
+ 24c512@50 {
+ compatible = "24c512";
+ reg = <0x50>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ ds1 {
+ label = "ds1";
+ gpios = <&pioA 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ ds5 {
+ label = "ds5";
+ gpios = <&pioA 6 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index 5e09de4eb9cd..32752d7883f1 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -860,7 +860,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c_bitbang>;
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 93446420af25..aeb1a36373f4 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -1019,7 +1019,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioB 4 GPIO_ACTIVE_HIGH /* sda */
&pioB 5 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 59df9d73d276..127cc42e9e29 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -215,7 +215,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
24c512@50 {
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index e9cc99b6353a..27847a47c108 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -170,13 +170,13 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
24c512@50 {
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 707fd4ea58f5..91a71774472e 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -26,7 +26,24 @@
i2c0: i2c@f8010000 {
ov2640: camera@0x30 {
+ compatible = "ovti,ov2640";
+ reg = <0x30>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+ resetb-gpios = <&pioA 7 GPIO_ACTIVE_LOW>;
+ pwdn-gpios = <&pioA 13 GPIO_ACTIVE_HIGH>;
+ clocks = <&pck0>;
+ clock-names = "xvclk";
+ assigned-clocks = <&pck0>;
+ assigned-clock-rates = <25000000>;
status = "okay";
+
+ port {
+ ov2640_0: endpoint {
+ remote-endpoint = <&isi_0>;
+ bus-width = <8>;
+ };
+ };
};
};
@@ -37,6 +54,15 @@
isi: isi@f8048000 {
status = "okay";
+ port {
+ isi_0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ov2640_0>;
+ bus-width = <8>;
+ vsync-active = <1>;
+ hsync-active = <1>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 8837b7e4292c..b3501ae2a3bd 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -1044,28 +1044,24 @@
atmel,adc-res-names = "lowres", "highres";
atmel,adc-use-res = "highres";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "external-rising";
trigger-value = <0x1>;
trigger-external;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "external-falling";
trigger-value = <0x2>;
trigger-external;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "external-any";
trigger-value = <0x3>;
trigger-external;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "continuous";
trigger-value = <0x6>;
};
@@ -1169,13 +1165,13 @@
clock-names = "pclk", "hclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -1183,7 +1179,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -1191,21 +1187,21 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1213,7 +1209,7 @@
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1320,7 +1316,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 20 GPIO_ACTIVE_HIGH /* sda */
&pioA 21 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 95569a87b6c9..3b3eb3edcb47 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -1030,7 +1030,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 30 GPIO_ACTIVE_HIGH /* sda */
&pioA 31 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 6d829db4e887..70adf940d98c 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -265,25 +265,21 @@
atmel,adc-res-names = "lowres", "highres";
atmel,adc-use-res = "highres";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "timer-counter-0";
trigger-value = <0x1>;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "timer-counter-1";
trigger-value = <0x3>;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "timer-counter-2";
trigger-value = <0x5>;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "external";
trigger-value = <0x13>;
trigger-external;
@@ -301,13 +297,13 @@
clock-names = "pclk", "hclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -315,7 +311,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -323,21 +319,21 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -345,7 +341,7 @@
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1093,7 +1089,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 23 GPIO_ACTIVE_HIGH>, /* sda */
<&pioA 24 GPIO_ACTIVE_HIGH>; /* scl */
@@ -1107,7 +1103,7 @@
status = "disabled";
};
- i2c@1 {
+ i2c-gpio-1 {
compatible = "i2c-gpio";
gpios = <&pioD 10 GPIO_ACTIVE_HIGH>, /* sda */
<&pioD 11 GPIO_ACTIVE_HIGH>; /* scl */
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index f10566f759cd..2e567d90fba8 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -227,11 +227,11 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
- i2c@1 {
+ i2c-gpio-1 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index cd0cd5fd09a3..ed4e4bd8a8f1 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1048,29 +1048,25 @@
atmel,adc-res-names = "lowres", "highres";
atmel,adc-use-res = "highres";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "external-rising";
trigger-value = <0x1>;
trigger-external;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "external-falling";
trigger-value = <0x2>;
trigger-external;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "external-any";
trigger-value = <0x3>;
trigger-external;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "continuous";
trigger-value = <0x6>;
};
@@ -1119,13 +1115,13 @@
clock-names = "hclk", "pclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -1133,7 +1129,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -1141,21 +1137,21 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
atmel,can-dma;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1163,7 +1159,7 @@
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1242,7 +1238,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
compatible = "i2c-gpio";
gpios = <&pioA 30 GPIO_ACTIVE_HIGH /* sda */
&pioA 31 GPIO_ACTIVE_HIGH /* scl */
@@ -1257,7 +1253,7 @@
status = "disabled";
};
- i2c@1 {
+ i2c-gpio-1 {
compatible = "i2c-gpio";
gpios = <&pioC 0 GPIO_ACTIVE_HIGH /* sda */
&pioC 1 GPIO_ACTIVE_HIGH /* scl */
@@ -1272,7 +1268,7 @@
status = "disabled";
};
- i2c@2 {
+ i2c-gpio-2 {
compatible = "i2c-gpio";
gpios = <&pioB 4 GPIO_ACTIVE_HIGH /* sda */
&pioB 5 GPIO_ACTIVE_HIGH /* scl */
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 52425a4ca97e..696b8ba064a6 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -60,18 +60,6 @@
status = "okay";
};
- isi: isi@f8048000 {
- status = "disabled";
- port {
- isi_0: endpoint@0 {
- remote-endpoint = <&ov2640_0>;
- bus-width = <8>;
- vsync-active = <1>;
- hsync-active = <1>;
- };
- };
- };
-
i2c0: i2c@f8010000 {
status = "okay";
@@ -79,27 +67,6 @@
compatible = "wm8731";
reg = <0x1a>;
};
-
- ov2640: camera@0x30 {
- compatible = "ovti,ov2640";
- reg = <0x30>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pck0_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
- resetb-gpios = <&pioA 7 GPIO_ACTIVE_LOW>;
- pwdn-gpios = <&pioA 13 GPIO_ACTIVE_HIGH>;
- clocks = <&pck0>;
- clock-names = "xvclk";
- assigned-clocks = <&pck0>;
- assigned-clock-rates = <25000000>;
- status = "disabled";
-
- port {
- ov2640_0: endpoint {
- remote-endpoint = <&isi_0>;
- bus-width = <8>;
- };
- };
- };
};
adc0: adc@f804c000 {
diff --git a/arch/arm/boot/dts/axp209.dtsi b/arch/arm/boot/dts/axp209.dtsi
index 051ab3ba9a65..afbe89c01df5 100644
--- a/arch/arm/boot/dts/axp209.dtsi
+++ b/arch/arm/boot/dts/axp209.dtsi
@@ -87,6 +87,7 @@
reg_ldo5: ldo5 {
regulator-name = "ldo5";
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/axp22x.dtsi b/arch/arm/boot/dts/axp22x.dtsi
index 76302f58c478..458b6681e3ec 100644
--- a/arch/arm/boot/dts/axp22x.dtsi
+++ b/arch/arm/boot/dts/axp22x.dtsi
@@ -126,10 +126,12 @@
reg_ldo_io0: ldo_io0 {
regulator-name = "ldo_io0";
+ status = "disabled";
};
reg_ldo_io1: ldo_io1 {
regulator-name = "ldo_io1";
+ status = "disabled";
};
reg_rtc_ldo: rtc_ldo {
@@ -139,5 +141,15 @@
regulator-max-microvolt = <3000000>;
regulator-name = "rtc_ldo";
};
+
+ reg_drivevbus: drivevbus {
+ regulator-name = "drivevbus";
+ status = "disabled";
+ };
+ };
+
+ usb_power_supply: usb_power_supply {
+ compatible = "x-powers,axp221-usb-power-supply";
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/axp809.dtsi b/arch/arm/boot/dts/axp809.dtsi
new file mode 100644
index 000000000000..ab8e5f2d9246
--- /dev/null
+++ b/arch/arm/boot/dts/axp809.dtsi
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * AXP809 Integrated Power Management Chip
+ */
+
+&axp809 {
+ compatible = "x-powers,axp809";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+};
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index b42fe5596b94..fabc9f36c408 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -366,5 +366,16 @@
interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
+
+ adc: adc@180a6000 {
+ compatible = "brcm,iproc-static-adc";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ adc-syscon = <&ts_adc_syscon>;
+ clocks = <&asiu_clks BCM_CYGNUS_ASIU_ADC_CLK>;
+ clock-names = "tsc_clk";
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 6a40ed7d0502..c3bf7d23f136 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -57,7 +57,7 @@
compatible = "arm,cortex-a9";
next-level-cache = <&L2>;
enable-method = "brcm,bcm-nsp-smp";
- secondary-boot-reg = <0xffff042c>;
+ secondary-boot-reg = <0xffff0fec>;
reg = <0x1>;
};
};
@@ -192,6 +192,23 @@
status = "disabled";
};
+ dma@20000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x20000 0x1000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&iprocslow>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ };
+
nand: nand@26000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1";
reg = <0x026000 0x600>,
@@ -337,6 +354,18 @@
ranges = <0x82000000 0 0x08000000 0x08000000 0 0x8000000>;
status = "disabled";
+
+ msi-parent = <&msi0>;
+ msi0: msi@18012000 {
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
+ <GIC_SPI 128 IRQ_TYPE_NONE>,
+ <GIC_SPI 129 IRQ_TYPE_NONE>,
+ <GIC_SPI 130 IRQ_TYPE_NONE>;
+ brcm,pcie-msi-inten;
+ };
};
pcie1: pcie@18013000 {
@@ -361,6 +390,18 @@
ranges = <0x82000000 0 0x40000000 0x40000000 0 0x8000000>;
status = "disabled";
+
+ msi-parent = <&msi1>;
+ msi1: msi@18013000 {
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
+ <GIC_SPI 134 IRQ_TYPE_NONE>,
+ <GIC_SPI 135 IRQ_TYPE_NONE>,
+ <GIC_SPI 136 IRQ_TYPE_NONE>;
+ brcm,pcie-msi-inten;
+ };
};
pcie2: pcie@18014000 {
@@ -385,5 +426,17 @@
ranges = <0x82000000 0 0x48000000 0x48000000 0 0x8000000>;
status = "disabled";
+
+ msi-parent = <&msi2>;
+ msi2: msi@18014000 {
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
+ <GIC_SPI 140 IRQ_TYPE_NONE>,
+ <GIC_SPI 141 IRQ_TYPE_NONE>,
+ <GIC_SPI 142 IRQ_TYPE_NONE>;
+ brcm,pcie-msi-inten;
+ };
};
};
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index 3dc7a8cc5812..18045c38bcf1 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -30,7 +30,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "brcm,bcm11351-cpu-method";
cpu0: cpu@0 {
device_type = "cpu";
@@ -41,6 +40,7 @@
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
+ enable-method = "brcm,bcm11351-cpu-method";
secondary-boot-reg = <0x3500417c>;
reg = <1>;
};
diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi
index 3f525be28fd0..6dde95f21cef 100644
--- a/arch/arm/boot/dts/bcm21664.dtsi
+++ b/arch/arm/boot/dts/bcm21664.dtsi
@@ -30,7 +30,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- enable-method = "brcm,bcm11351-cpu-method";
cpu0: cpu@0 {
device_type = "cpu";
@@ -41,6 +40,7 @@
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a9";
+ enable-method = "brcm,bcm11351-cpu-method";
secondary-boot-reg = <0x35004178>;
reg = <1>;
};
diff --git a/arch/arm/boot/dts/bcm23550-sparrow.dts b/arch/arm/boot/dts/bcm23550-sparrow.dts
new file mode 100644
index 000000000000..4d525ccb48c8
--- /dev/null
+++ b/arch/arm/boot/dts/bcm23550-sparrow.dts
@@ -0,0 +1,80 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+#include "bcm23550.dtsi"
+
+/ {
+ model = "BCM23550 Sparrow board";
+ compatible = "brcm,bcm23550-sparrow", "brcm,bcm23550";
+
+ chosen {
+ stdout-path = "/slaves@3e000000/serial@0:115200n8";
+ bootargs = "console=ttyS0,115200n8";
+ };
+
+ memory {
+ reg = <0x80000000 0x20000000>; /* 512 MB */
+ };
+};
+
+&uartb {
+ status = "okay";
+};
+
+&usbotg {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
+
+&sdio1 {
+ max-frequency = <48000000>;
+ status = "okay";
+};
+
+&sdio2 {
+ non-removable;
+ max-frequency = <48000000>;
+ status = "okay";
+};
+
+&sdio4 {
+ max-frequency = <48000000>;
+ cd-gpios = <&gpio 91 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm23550.dtsi b/arch/arm/boot/dts/bcm23550.dtsi
new file mode 100644
index 000000000000..a7a643f38385
--- /dev/null
+++ b/arch/arm/boot/dts/bcm23550.dtsi
@@ -0,0 +1,415 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/* BCM23550 and BCM21664 have almost identical clocks */
+#include "dt-bindings/clock/bcm21664.h"
+
+#include "skeleton.dtsi"
+
+/ {
+ model = "BCM23550 SoC";
+ compatible = "brcm,bcm23550";
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0>;
+ clock-frequency = <1000000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ enable-method = "brcm,bcm23550";
+ secondary-boot-reg = <0x35004178>;
+ reg = <1>;
+ clock-frequency = <1000000000>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ enable-method = "brcm,bcm23550";
+ secondary-boot-reg = <0x35004178>;
+ reg = <2>;
+ clock-frequency = <1000000000>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ enable-method = "brcm,bcm23550";
+ secondary-boot-reg = <0x35004178>;
+ reg = <3>;
+ clock-frequency = <1000000000>;
+ };
+ };
+
+ /* Hub bus */
+ hub@34000000 {
+ compatible = "simple-bus";
+ ranges = <0 0x34000000 0x102f83ac>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ smc@4e000 {
+ compatible = "brcm,bcm23550-smc", "brcm,kona-smc";
+ reg = <0x0004e000 0x400>; /* 1 KiB in SRAM */
+ };
+
+ resetmgr: reset-controller@1001f00 {
+ compatible = "brcm,bcm21664-resetmgr";
+ reg = <0x01001f00 0x24>;
+ };
+
+ gpio: gpio@1003000 {
+ compatible = "brcm,bcm23550-gpio", "brcm,kona-gpio";
+ reg = <0x01003000 0x524>;
+ interrupts =
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ };
+
+ timer@1006000 {
+ compatible = "brcm,kona-timer";
+ reg = <0x01006000 0x1c>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&aon_ccu BCM21664_AON_CCU_HUB_TIMER>;
+ };
+ };
+
+ /* Slaves bus */
+ slaves@3e000000 {
+ compatible = "simple-bus";
+ ranges = <0 0x3e000000 0x0001c070>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ uartb: serial@0 {
+ compatible = "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x00000000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uartb2: serial@1000 {
+ compatible = "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x00001000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB2>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uartb3: serial@2000 {
+ compatible = "snps,dw-apb-uart";
+ status = "disabled";
+ reg = <0x00002000 0x118>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_UARTB3>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ bsc1: i2c@16000 {
+ compatible = "brcm,kona-i2c";
+ reg = <0x00016000 0x70>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC1>;
+ status = "disabled";
+ };
+
+ bsc2: i2c@17000 {
+ compatible = "brcm,kona-i2c";
+ reg = <0x00017000 0x70>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC2>;
+ status = "disabled";
+ };
+
+ bsc3: i2c@18000 {
+ compatible = "brcm,kona-i2c";
+ reg = <0x00018000 0x70>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC3>;
+ status = "disabled";
+ };
+
+ bsc4: i2c@1c000 {
+ compatible = "brcm,kona-i2c";
+ reg = <0x0001c000 0x70>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&slave_ccu BCM21664_SLAVE_CCU_BSC4>;
+ status = "disabled";
+ };
+ };
+
+ /* Apps bus */
+ apps@3e300000 {
+ compatible = "simple-bus";
+ ranges = <0 0x3e300000 0x01b77000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ usbotg: usb@e20000 {
+ compatible = "snps,dwc2";
+ reg = <0x00e20000 0x10000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&usb_otg_ahb_clk>;
+ clock-names = "otg";
+ phys = <&usbphy>;
+ phy-names = "usb2-phy";
+ status = "disabled";
+ };
+
+ usbphy: usb-phy@e30000 {
+ compatible = "brcm,kona-usb2-phy";
+ reg = <0x00e30000 0x28>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ sdio1: sdio@e80000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00e80000 0x801c>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO1>;
+ status = "disabled";
+ };
+
+ sdio2: sdio@e90000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00e90000 0x801c>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO2>;
+ status = "disabled";
+ };
+
+ sdio3: sdio@ea0000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00ea0000 0x801c>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO3>;
+ status = "disabled";
+ };
+
+ sdio4: sdio@eb0000 {
+ compatible = "brcm,kona-sdhci";
+ reg = <0x00eb0000 0x801c>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&master_ccu BCM21664_MASTER_CCU_SDIO4>;
+ status = "disabled";
+ };
+
+ cdc: cdc@1b0e000 {
+ compatible = "brcm,bcm23550-cdc";
+ reg = <0x01b0e000 0x78>;
+ };
+
+ gic: interrupt-controller@1b21000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x01b21000 0x1000>,
+ <0x01b22000 0x1000>;
+ };
+ };
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ /*
+ * Fixed clocks are defined before CCUs whose
+ * clocks may depend on them.
+ */
+
+ ref_32k_clk: ref_32k {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ bbl_32k_clk: bbl_32k {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ };
+
+ ref_13m_clk: ref_13m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <13000000>;
+ };
+
+ var_13m_clk: var_13m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <13000000>;
+ };
+
+ dft_19_5m_clk: dft_19_5m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <19500000>;
+ };
+
+ ref_crystal_clk: ref_crystal {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ };
+
+ ref_52m_clk: ref_52m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ var_52m_clk: var_52m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ usb_otg_ahb_clk: usb_otg_ahb {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <52000000>;
+ };
+
+ ref_96m_clk: ref_96m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <96000000>;
+ };
+
+ var_96m_clk: var_96m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <96000000>;
+ };
+
+ ref_104m_clk: ref_104m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <104000000>;
+ };
+
+ var_104m_clk: var_104m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <104000000>;
+ };
+
+ ref_156m_clk: ref_156m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <156000000>;
+ };
+
+ var_156m_clk: var_156m {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <156000000>;
+ };
+
+ root_ccu: root_ccu {
+ compatible = BCM21664_DT_ROOT_CCU_COMPAT;
+ reg = <0x35001000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "frac_1m";
+ };
+
+ aon_ccu: aon_ccu {
+ compatible = BCM21664_DT_AON_CCU_COMPAT;
+ reg = <0x35002000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "hub_timer";
+ };
+
+ slave_ccu: slave_ccu {
+ compatible = BCM21664_DT_SLAVE_CCU_COMPAT;
+ reg = <0x3e011000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "uartb",
+ "uartb2",
+ "uartb3",
+ "bsc1",
+ "bsc2",
+ "bsc3",
+ "bsc4";
+ };
+
+ master_ccu: master_ccu {
+ compatible = BCM21664_DT_MASTER_CCU_COMPAT;
+ reg = <0x3f001000 0x0f00>;
+ #clock-cells = <1>;
+ clock-output-names = "sdio1",
+ "sdio2",
+ "sdio3",
+ "sdio4",
+ "sdio1_sleep",
+ "sdio2_sleep",
+ "sdio3_sleep",
+ "sdio4_sleep";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index 57d313b6afaf..d5fdb8e761a3 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
/ {
compatible = "raspberrypi,model-b-plus", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index cf2774ec0834..bfc4bd9b7733 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9512.dtsi"
/ {
compatible = "raspberrypi,model-b-rev2", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index 8b15f9c35643..0371bb7374b8 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2835.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9512.dtsi"
/ {
compatible = "raspberrypi,model-b", "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index c4743f42237b..29e1cfe8eb14 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include "bcm2836.dtsi"
#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
/ {
compatible = "raspberrypi,2-model-b", "brcm,bcm2836";
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
new file mode 100644
index 000000000000..12c981e51134
--- /dev/null
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
@@ -0,0 +1,19 @@
+/ {
+ aliases {
+ ethernet = &ethernet;
+ };
+};
+
+&usb {
+ usb1@1 {
+ compatible = "usb424,9512";
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet: usbether@1 {
+ compatible = "usb424,ec00";
+ reg = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
new file mode 100644
index 000000000000..3f0a56ebcf1f
--- /dev/null
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
@@ -0,0 +1,19 @@
+/ {
+ aliases {
+ ethernet = &ethernet;
+ };
+};
+
+&usb {
+ usb1@1 {
+ compatible = "usb424,9514";
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet: usbether@1 {
+ compatible = "usb424,ec00";
+ reg = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 10b27b912bac..b98252232d20 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -287,6 +287,8 @@
compatible = "brcm,bcm2835-usb";
reg = <0x7e980000 0x10000>;
interrupts = <1 9>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
v3d: v3d@7ec00000 {
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
index 5087aa81efb1..9cb186ea2e97 100644
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
@@ -147,3 +147,7 @@
&usb3 {
vcc-gpio = <&chipcommon 10 GPIO_ACTIVE_LOW>;
};
+
+&spi_nor {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index 1049ab108b32..8ce39d58eeb8 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -90,3 +90,7 @@
&usb3 {
vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
};
+
+&spi_nor {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
index 3a94606d042b..6229ef283c41 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
@@ -82,3 +82,7 @@
};
};
};
+
+&spi_nor {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
index 8b0c440b2e71..70f4bb9d864a 100644
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
@@ -126,3 +126,43 @@
&spi_nor {
status = "okay";
};
+
+&srab {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan4";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan3";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan1";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
index 791d7225c733..0653e7ef248c 100644
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
@@ -131,3 +131,7 @@
&usb2 {
vcc-gpio = <&chipcommon 13 GPIO_ACTIVE_HIGH>;
};
+
+&spi_nor {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index ace38efd2db3..c8c0b3616935 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -10,7 +10,7 @@
/dts-v1/;
#include "bcm4708.dtsi"
-#include "bcm5301x-nand-cs0-bch8.dtsi"
+#include "bcm5301x-nand-cs0-bch1.dtsi"
/ {
compatible = "dlink,dir-885l", "brcm,bcm47094", "brcm,bcm4708";
@@ -113,3 +113,7 @@
&usb3 {
vcc-gpio = <&chipcommon 18 GPIO_ACTIVE_HIGH>;
};
+
+&spi_nor {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm5301x-nand-cs0-bch1.dtsi b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch1.dtsi
new file mode 100644
index 000000000000..24b099c00f13
--- /dev/null
+++ b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch1.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Broadcom Northstar NAND.
+ *
+ * Copyright (C) 2016 Rafał Miłecki <rafal.milecki@gmail.com>
+ *
+ * Licensed under the ISC license.
+ */
+
+#include "bcm5301x-nand-cs0.dtsi"
+
+&nandcs {
+ nand-ecc-algo = "bch";
+ nand-ecc-strength = <1>;
+ nand-ecc-step-size = <512>;
+};
diff --git a/arch/arm/boot/dts/bcm5301x-nand-cs0-bch8.dtsi b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch8.dtsi
index d10781e36f54..9a9630ded306 100644
--- a/arch/arm/boot/dts/bcm5301x-nand-cs0-bch8.dtsi
+++ b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch8.dtsi
@@ -9,16 +9,10 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-/ {
- nand@18028000 {
- nandcs@0 {
- compatible = "brcm,nandcs";
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
+#include "bcm5301x-nand-cs0.dtsi"
- nand-ecc-strength = <8>;
- nand-ecc-step-size = <512>;
- };
- };
+&nandcs {
+ nand-ecc-algo = "bch";
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
};
diff --git a/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi b/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi
new file mode 100644
index 000000000000..168495106b82
--- /dev/null
+++ b/arch/arm/boot/dts/bcm5301x-nand-cs0.dtsi
@@ -0,0 +1,18 @@
+/*
+ * Broadcom Northstar NAND.
+ *
+ * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+/ {
+ nand@18028000 {
+ nandcs: nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 7d4d29bf0ed3..8af47913b3b4 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -153,6 +153,21 @@
/* ChipCommon */
<0x00000000 0 &gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ /* Switch Register Access Block */
+ <0x00007000 0 &gic GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 1 &gic GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 2 &gic GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 3 &gic GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 4 &gic GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 5 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 6 &gic GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 7 &gic GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 8 &gic GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 9 &gic GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 10 &gic GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 11 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <0x00007000 12 &gic GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+
/* PCIe Controller 0 */
<0x00012000 0 &gic GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
<0x00012000 1 &gic GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
@@ -239,6 +254,22 @@
status = "disabled";
};
};
+
+ gmac0: ethernet@24000 {
+ reg = <0x24000 0x800>;
+ };
+
+ gmac1: ethernet@25000 {
+ reg = <0x25000 0x800>;
+ };
+
+ gmac2: ethernet@26000 {
+ reg = <0x26000 0x800>;
+ };
+
+ gmac3: ethernet@27000 {
+ reg = <0x27000 0x800>;
+ };
};
lcpll0: lcpll0@1800c100 {
@@ -260,6 +291,22 @@
"sata2";
};
+ srab: srab@18007000 {
+ compatible = "brcm,bcm5301x-srab";
+ reg = <0x18007000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ /* ports are defined in board DTS */
+ };
+
+ rng: rng@18004000 {
+ compatible = "brcm,bcm5301x-rng";
+ reg = <0x18004000 0x14>;
+ };
+
nand: nand@18028000 {
compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1", "brcm,brcmnand";
reg = <0x18028000 0x600>, <0x1811a408 0x600>, <0x18028f00 0x20>;
diff --git a/arch/arm/boot/dts/bcm953012er.dts b/arch/arm/boot/dts/bcm953012er.dts
new file mode 100644
index 000000000000..0a9abecf9423
--- /dev/null
+++ b/arch/arm/boot/dts/bcm953012er.dts
@@ -0,0 +1,104 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm4708.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+/ {
+ model = "NorthStar Enterprise Router (BCM953012ER)";
+ compatible = "brcm,bcm953012er", "brcm,brcm53012", "brcm,bcm4708";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ reg = <0x00000000 0x8000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&chipcommon 15 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&spi_nor {
+ status = "okay";
+};
+
+&srab {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "port0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "port1";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
new file mode 100644
index 000000000000..d257e83dedfc
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -0,0 +1,109 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+
+/ {
+ model = "NorthStar Plus XMC (BCM958525xmc)";
+ compatible = "brcm,bcm58525", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+/* XHCI, SATA, MMC, and Ethernet support needed to be complete */
+
+&uart0 {
+ status = "okay";
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
new file mode 100644
index 000000000000..03b8bbeb694f
--- /dev/null
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -0,0 +1,111 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Broadcom. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "bcm-nsp.dtsi"
+
+/ {
+ model = "NorthStar Plus SVK (BCM958625HR)";
+ compatible = "brcm,bcm58625", "brcm,nsp";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ reg = <0x60000000 0x20000000>;
+ };
+};
+
+&nand {
+ nandcs@0 {
+ compatible = "brcm,nandcs";
+ reg = <0>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+
+ brcm,nand-oob-sector-size = <27>;
+
+ partition@0 {
+ label = "nboot";
+ reg = <0x00000000 0x00200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "nenv";
+ reg = <0x00200000 0x00400000>;
+ };
+ partition@600000 {
+ label = "nsystem";
+ reg = <0x00600000 0x00a00000>;
+ };
+ partition@1000000 {
+ label = "nrootfs";
+ reg = <0x01000000 0x03000000>;
+ };
+ partition@4000000 {
+ label = "ncustfs";
+ reg = <0x04000000 0x3c000000>;
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
diff --git a/arch/arm/boot/dts/compulab-sb-som.dtsi b/arch/arm/boot/dts/compulab-sb-som.dtsi
index 93d7e235bc80..4af1adfee788 100644
--- a/arch/arm/boot/dts/compulab-sb-som.dtsi
+++ b/arch/arm/boot/dts/compulab-sb-som.dtsi
@@ -40,7 +40,7 @@
};
};
- hdmi_conn: connector@0 {
+ hdmi_conn: connector {
compatible = "hdmi-connector";
label = "hdmi";
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index f23cae0c2179..68e412c9863c 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -448,7 +448,7 @@
reg = <0x49000000 0x10000>;
reg-names = "edma3_cc";
interrupts = <12 13 14>;
- interrupt-names = "edma3_ccint", "emda3_mperr",
+ interrupt-names = "edma3_ccint", "edma3_mperr",
"edma3_ccerrint";
dma-requests = <64>;
#dma-cells = <2>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index de559f6e4fee..d9bfb94a2992 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -73,6 +73,49 @@
interrupt-parent = <&gic>;
};
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+
+ operating-points-v2 = <&cpu0_opp_table>;
+ ti,syscon-efuse = <&scm_wkup 0x20c 0xf80000 19>;
+ ti,syscon-rev = <&scm_wkup 0x204>;
+
+ clocks = <&dpll_mpu_ck>;
+ clock-names = "cpu";
+
+ clock-latency = <300000>; /* From omap-cpufreq driver */
+
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <2>;
+ #cooling-cells = <2>; /* min followed by max */
+ };
+ };
+
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp_nom@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1060000 850000 1150000>;
+ opp-supported-hw = <0xFF 0x01>;
+ opp-suspend;
+ };
+
+ opp_od@1176000000 {
+ opp-hz = /bits/ 64 <1176000000>;
+ opp-microvolt = <1160000 885000 1160000>;
+ opp-supported-hw = <0xFF 0x02>;
+ };
+ };
+
/*
* The soc node represents the soc top level view. It is used for IPs
* that are not memory mapped in the MPU view or for the MPU itself.
@@ -233,6 +276,11 @@
prm_clockdomains: clockdomains {
};
};
+
+ scm_wkup: scm_conf@c000 {
+ compatible = "syscon";
+ reg = <0xc000 0x1000>;
+ };
};
axi@0 {
@@ -276,7 +324,7 @@
ranges = <0x51800000 0x51800000 0x3000
0x0 0x30000000 0x10000000>;
status = "disabled";
- pcie@51000000 {
+ pcie@51800000 {
compatible = "ti,dra7-pcie";
reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
reg-names = "rc_dbics", "ti_conf", "config";
@@ -304,6 +352,53 @@
};
};
+ ocmcram1: ocmcram@40300000 {
+ compatible = "mmio-sram";
+ reg = <0x40300000 0x80000>;
+ ranges = <0x0 0x40300000 0x80000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /*
+ * This is a placeholder for an optional reserved
+ * region for use by secure software. The size
+ * of this region is not known until runtime so it
+ * is set as zero to either be updated to reserve
+ * space or left unchanged to leave all SRAM for use.
+ * On HS parts that that require the reserved region
+ * either the bootloader can update the size to
+ * the required amount or the node can be overridden
+ * from the board dts file for the secure platform.
+ */
+ sram-hs@0 {
+ compatible = "ti,secure-ram";
+ reg = <0x0 0x0>;
+ };
+ };
+
+ /*
+ * NOTE: ocmcram2 and ocmcram3 are not available on all
+ * DRA7xx and AM57xx variants. Confirm availability in
+ * the data manual for the exact part number in use
+ * before enabling these nodes in the board dts file.
+ */
+ ocmcram2: ocmcram@40400000 {
+ status = "disabled";
+ compatible = "mmio-sram";
+ reg = <0x40400000 0x100000>;
+ ranges = <0x0 0x40400000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ ocmcram3: ocmcram@40500000 {
+ status = "disabled";
+ compatible = "mmio-sram";
+ reg = <0x40500000 0x100000>;
+ ranges = <0x0 0x40500000 0x100000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
bandgap: bandgap@4a0021e0 {
reg = <0x4a0021e0 0xc
0x4a00232c 0xc
@@ -341,7 +436,7 @@
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "edma3_ccint", "emda3_mperr",
+ interrupt-names = "edma3_ccint", "edma3_mperr",
"edma3_ccerrint";
dma-requests = <64>;
#dma-cells = <2>;
@@ -1744,6 +1839,149 @@
clock-names = "fck", "sys_clk";
};
};
+
+ epwmss0: epwmss@4843e000 {
+ compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss";
+ reg = <0x4843e000 0x30>;
+ ti,hwmods = "epwmss0";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ ranges;
+
+ ehrpwm0: pwm@4843e200 {
+ compatible = "ti,dra746-ehrpwm",
+ "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x4843e200 0x80>;
+ clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ ecap0: ecap@4843e100 {
+ compatible = "ti,dra746-ecap",
+ "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x4843e100 0x80>;
+ clocks = <&l4_root_clk_div>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+ };
+
+ epwmss1: epwmss@48440000 {
+ compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss";
+ reg = <0x48440000 0x30>;
+ ti,hwmods = "epwmss1";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ ranges;
+
+ ehrpwm1: pwm@48440200 {
+ compatible = "ti,dra746-ehrpwm",
+ "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x48440200 0x80>;
+ clocks = <&ehrpwm1_tbclk>, <&l4_root_clk_div>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ ecap1: ecap@48440100 {
+ compatible = "ti,dra746-ecap",
+ "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x48440100 0x80>;
+ clocks = <&l4_root_clk_div>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+ };
+
+ epwmss2: epwmss@48442000 {
+ compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss";
+ reg = <0x48442000 0x30>;
+ ti,hwmods = "epwmss2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ ranges;
+
+ ehrpwm2: pwm@48442200 {
+ compatible = "ti,dra746-ehrpwm",
+ "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x48442200 0x80>;
+ clocks = <&ehrpwm2_tbclk>, <&l4_root_clk_div>;
+ clock-names = "tbclk", "fck";
+ status = "disabled";
+ };
+
+ ecap2: ecap@48442100 {
+ compatible = "ti,dra746-ecap",
+ "ti,am3352-ecap";
+ #pwm-cells = <3>;
+ reg = <0x48442100 0x80>;
+ clocks = <&l4_root_clk_div>;
+ clock-names = "fck";
+ status = "disabled";
+ };
+ };
+
+ aes1: aes@4b500000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes1";
+ reg = <0x4b500000 0xa0>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>;
+ dma-names = "tx", "rx";
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ };
+
+ aes2: aes@4b700000 {
+ compatible = "ti,omap4-aes";
+ ti,hwmods = "aes2";
+ reg = <0x4b700000 0xa0>;
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>;
+ dma-names = "tx", "rx";
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ };
+
+ des: des@480a5000 {
+ compatible = "ti,omap4-des";
+ ti,hwmods = "des";
+ reg = <0x480a5000 0xa0>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma_xbar 117>, <&sdma_xbar 116>;
+ dma-names = "tx", "rx";
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ };
+
+ sham: sham@53100000 {
+ compatible = "ti,omap5-sham";
+ ti,hwmods = "sham";
+ reg = <0x4b101000 0x300>;
+ interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&edma_xbar 119 0>;
+ dma-names = "rx";
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ };
+
+ rng: rng@48090000 {
+ compatible = "ti,omap4-rng";
+ ti,hwmods = "rng";
+ reg = <0x48090000 0x2000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&l3_iclk_div>;
+ clock-names = "fck";
+ };
};
thermal_zones: thermal-zones {
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index 093538ea5b5f..9d3cf50ca37e 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -18,7 +18,7 @@
display0 = &hdmi0;
};
- evm_3v3: fixedregulator-evm_3v3 {
+ evm_3v3_sw: fixedregulator-evm_3v3 {
compatible = "regulator-fixed";
regulator-name = "evm_3v3";
regulator-min-microvolt = <3300000>;
@@ -29,7 +29,7 @@
/* TPS77018DBVT */
compatible = "regulator-fixed";
regulator-name = "aic_dvdd";
- vin-supply = <&evm_3v3>;
+ vin-supply = <&evm_3v3_sw>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
@@ -414,9 +414,9 @@
status = "okay";
/* Regulators */
- AVDD-supply = <&evm_3v3>;
- IOVDD-supply = <&evm_3v3>;
- DRVDD-supply = <&evm_3v3>;
+ AVDD-supply = <&evm_3v3_sw>;
+ IOVDD-supply = <&evm_3v3_sw>;
+ DRVDD-supply = <&evm_3v3_sw>;
DVDD-supply = <&aic_dvdd>;
};
};
@@ -597,7 +597,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins_default>;
- vmmc-supply = <&evm_3v3>;
+ vmmc-supply = <&evm_3v3_sw>;
bus-width = <8>;
ti,non-removable;
max-frequency = <192000000>;
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index 70a217050a4c..67107605fb4c 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -12,22 +12,6 @@
/ {
compatible = "ti,dra722", "ti,dra72", "ti,dra7";
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0>;
-
- /* cooling options */
- cooling-min-level = <0>;
- cooling-max-level = <2>;
- #cooling-cells = <2>; /* min followed by max */
- };
- };
-
pmu {
compatible = "arm,cortex-a15-pmu";
interrupt-parent = <&wakeupgen>;
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 5e06020f450b..8987b3e180a1 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -13,34 +13,11 @@
compatible = "ti,dra742", "ti,dra74", "ti,dra7";
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a15";
- reg = <0>;
-
- operating-points = <
- /* kHz uV */
- 1000000 1060000
- 1176000 1160000
- >;
-
- clocks = <&dpll_mpu_ck>;
- clock-names = "cpu";
-
- clock-latency = <300000>; /* From omap-cpufreq driver */
-
- /* cooling options */
- cooling-min-level = <0>;
- cooling-max-level = <2>;
- #cooling-cells = <2>; /* min followed by max */
- };
cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <1>;
+ operating-points-v2 = <&cpu0_opp_table>;
};
};
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index a35b851e1cd7..60d0a732833a 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -18,14 +18,18 @@
model = "EMEV2 KZM9D Board";
compatible = "renesas,kzm9d", "renesas,emev2";
- memory {
+ memory@40000000 {
device_type = "memory";
reg = <0x40000000 0x8000000>;
};
+ aliases {
+ serial1 = &uart1;
+ };
+
chosen {
- bootargs = "console=ttyS1,115200n81 ignore_loglevel root=/dev/nfs ip=dhcp";
- stdout-path = &uart1;
+ bootargs = "ignore_loglevel root=/dev/nfs ip=dhcp";
+ stdout-path = "serial1:115200n8";
};
gpio_keys {
@@ -33,28 +37,28 @@
#address-cells = <1>;
#size-cells = <0>;
- button@1 {
+ one {
debounce_interval = <50>;
wakeup-source;
label = "DSW2-1";
linux,code = <KEY_1>;
gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
- button@2 {
+ two {
debounce_interval = <50>;
wakeup-source;
label = "DSW2-2";
linux,code = <KEY_2>;
gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
};
- button@3 {
+ three {
debounce_interval = <50>;
wakeup-source;
label = "DSW2-3";
linux,code = <KEY_3>;
gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
};
- button@4 {
+ four {
debounce_interval = <50>;
wakeup-source;
label = "DSW2-4";
@@ -63,7 +67,7 @@
};
};
- reg_1p8v: regulator@0 {
+ reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
regulator-min-microvolt = <1800000>;
@@ -72,7 +76,7 @@
regulator-boot-on;
};
- reg_3p3v: regulator@1 {
+ reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -104,7 +108,7 @@
};
&pfc {
- uart1_pins: serial@e1030000 {
+ uart1_pins: uart1 {
groups = "uart1_ctrl", "uart1_data";
function = "uart1";
};
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index bcce6f50c93d..cd119400f440 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -69,25 +69,25 @@
clock-frequency = <32768>;
#clock-cells = <0>;
};
- iic0_sclkdiv: iic0_sclkdiv {
+ iic0_sclkdiv: iic0_sclkdiv@624,0 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x624 0>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- iic0_sclk: iic0_sclk {
+ iic0_sclk: iic0_sclk@48c,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x48c 1>;
clocks = <&iic0_sclkdiv>;
#clock-cells = <0>;
};
- iic1_sclkdiv: iic1_sclkdiv {
+ iic1_sclkdiv: iic1_sclkdiv@624,16 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x624 16>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- iic1_sclk: iic1_sclk {
+ iic1_sclk: iic1_sclk@490,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x490 1>;
clocks = <&iic1_sclkdiv>;
@@ -100,55 +100,55 @@
clock-mult = <7000>;
#clock-cells = <0>;
};
- usia_u0_sclkdiv: usia_u0_sclkdiv {
+ usia_u0_sclkdiv: usia_u0_sclkdiv@610,0 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x610 0>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- usib_u1_sclkdiv: usib_u1_sclkdiv {
+ usib_u1_sclkdiv: usib_u1_sclkdiv@65c,0 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x65c 0>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- usib_u2_sclkdiv: usib_u2_sclkdiv {
+ usib_u2_sclkdiv: usib_u2_sclkdiv@65c,16 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x65c 16>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- usib_u3_sclkdiv: usib_u3_sclkdiv {
+ usib_u3_sclkdiv: usib_u3_sclkdiv@660,0 {
compatible = "renesas,emev2-smu-clkdiv";
reg = <0x660 0>;
clocks = <&pll3_fo>;
#clock-cells = <0>;
};
- usia_u0_sclk: usia_u0_sclk {
+ usia_u0_sclk: usia_u0_sclk@4a0,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x4a0 1>;
clocks = <&usia_u0_sclkdiv>;
#clock-cells = <0>;
};
- usib_u1_sclk: usib_u1_sclk {
+ usib_u1_sclk: usib_u1_sclk@4b8,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x4b8 1>;
clocks = <&usib_u1_sclkdiv>;
#clock-cells = <0>;
};
- usib_u2_sclk: usib_u2_sclk {
+ usib_u2_sclk: usib_u2_sclk@4bc,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x4bc 1>;
clocks = <&usib_u2_sclkdiv>;
#clock-cells = <0>;
};
- usib_u3_sclk: usib_u3_sclk {
+ usib_u3_sclk: usib_u3_sclk@4c0,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x4c0 1>;
clocks = <&usib_u3_sclkdiv>;
#clock-cells = <0>;
};
- sti_sclk: sti_sclk {
+ sti_sclk: sti_sclk@528,1 {
compatible = "renesas,emev2-smu-gclk";
reg = <0x528 1>;
clocks = <&c32ki>;
diff --git a/arch/arm/boot/dts/ep7209.dtsi b/arch/arm/boot/dts/ep7209.dtsi
new file mode 100644
index 000000000000..aaf1261d2ee4
--- /dev/null
+++ b/arch/arm/boot/dts/ep7209.dtsi
@@ -0,0 +1,191 @@
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ */
+
+/dts-v1/;
+
+#include "skeleton.dtsi"
+
+#include <dt-bindings/clock/clps711x-clock.h>
+
+/ {
+ model = "Cirrus Logic EP7209";
+ compatible = "cirrus,ep7209";
+
+ aliases {
+ gpio0 = &porta;
+ gpio1 = &portb;
+ gpio3 = &portd;
+ gpio4 = &porte;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ spi0 = &spi;
+ timer0 = &timer1;
+ timer1 = &timer2;
+ };
+
+ cpus {
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ device_type = "cpu";
+ compatible = "arm,arm720t";
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&intc>;
+ ranges;
+
+ clks: clks@80000000 {
+ #clock-cells = <1>;
+ compatible = "cirrus,ep7209-clk";
+ reg = <0x80000000 0xc000>;
+ startup-frequency = <73728000>;
+ };
+
+ intc: intc@80000000 {
+ compatible = "cirrus,ep7209-intc";
+ reg = <0x80000000 0x4000>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ porta: gpio@80000000 {
+ compatible = "cirrus,ep7209-gpio";
+ reg = <0x80000000 0x1 0x80000040 0x1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ portb: gpio@80000001 {
+ compatible = "cirrus,ep7209-gpio";
+ reg = <0x80000001 0x1 0x80000041 0x1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ portd: gpio@80000003 {
+ compatible = "cirrus,ep7209-gpio";
+ reg = <0x80000003 0x1 0x80000043 0x1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ porte: gpio@80000083 {
+ compatible = "cirrus,ep7209-gpio";
+ reg = <0x80000083 0x1 0x800000c3 0x1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ syscon1: syscon@80000100 {
+ compatible = "cirrus,ep7209-syscon1", "syscon";
+ reg = <0x80000100 0x80>;
+ };
+
+ bus: bus@80000180 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "cirrus,ep7209-bus", "simple-bus";
+ clocks = <&clks CLPS711X_CLK_BUS>;
+ reg = <0x80000180 0x80>;
+ ranges = <
+ 0 0 0x00000000 0x10000000
+ 1 0 0x10000000 0x10000000
+ 2 0 0x20000000 0x10000000
+ 3 0 0x30000000 0x10000000
+ 4 0 0x40000000 0x10000000
+ 5 0 0x50000000 0x10000000
+ 6 0 0x60000000 0x0000c000
+ 7 0 0x70000000 0x00000080
+ >;
+ };
+
+ fb: fb@800002c0 {
+ compatible = "cirrus,ep7209-fb";
+ reg = <0x800002c0 0xd44>, <0x60000000 0xc000>;
+ clocks = <&clks CLPS711X_CLK_BUS>;
+ status = "disabled";
+ };
+
+ timer1: timer@80000300 {
+ compatible = "cirrus,ep7209-timer";
+ reg = <0x80000300 0x4>;
+ clocks = <&clks CLPS711X_CLK_TIMER1>;
+ interrupts = <8>;
+ };
+
+ timer2: timer@80000340 {
+ compatible = "cirrus,ep7209-timer";
+ reg = <0x80000340 0x4>;
+ clocks = <&clks CLPS711X_CLK_TIMER2>;
+ interrupts = <9>;
+ };
+
+ pwm: pwm@80000400 {
+ compatible = "cirrus,ep7209-pwm";
+ reg = <0x80000400 0x4>;
+ clocks = <&clks CLPS711X_CLK_PWM>;
+ #pwm-cells = <1>;
+ };
+
+ uart1: uart@80000480 {
+ compatible = "cirrus,ep7209-uart";
+ reg = <0x80000480 0x80>;
+ interrupts = <12 13>;
+ clocks = <&clks CLPS711X_CLK_UART>;
+ syscon = <&syscon1>;
+ };
+
+ spi: spi@80000500 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cirrus,ep7209-spi";
+ reg = <0x80000500 0x4>;
+ interrupts = <15>;
+ clocks = <&clks CLPS711X_CLK_SPI>;
+ status = "disabled";
+ };
+
+ syscon2: syscon@80001100 {
+ compatible = "cirrus,ep7209-syscon2", "syscon";
+ reg = <0x80001100 0x80>;
+ };
+
+ uart2: uart@80001480 {
+ compatible = "cirrus,ep7209-uart";
+ reg = <0x80001480 0x80>;
+ interrupts = <28 29>;
+ clocks = <&clks CLPS711X_CLK_UART>;
+ syscon = <&syscon2>;
+ };
+
+ dai: dai@80002000 {
+ #sound-dai-cells = <0>;
+ compatible = "cirrus,ep7209-dai";
+ reg = <0x80002000 0x604>;
+ clocks = <&clks CLPS711X_CLK_PLL>;
+ clock-names = "pll";
+ interrupts = <32>;
+ status = "disabled";
+ };
+
+ syscon3: syscon@80002200 {
+ compatible = "cirrus,ep7209-syscon3", "syscon";
+ reg = <0x80002200 0x40>;
+ };
+ };
+
+ mctrl: mctrl {
+ compatible = "cirrus,ep7209-mctrl-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
diff --git a/arch/arm/boot/dts/ep7211-edb7211.dts b/arch/arm/boot/dts/ep7211-edb7211.dts
new file mode 100644
index 000000000000..9a134ed271eb
--- /dev/null
+++ b/arch/arm/boot/dts/ep7211-edb7211.dts
@@ -0,0 +1,100 @@
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ */
+
+#include "ep7211.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Cirrus Logic EP7211 Development Board";
+ compatible = "cirrus,edb7211", "cirrus,ep7211", "cirrus,ep7209";
+
+ memory {
+ reg = <0xc0000000 0x02000000>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0>;
+ brightness-levels = <
+ 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf
+ >;
+ default-brightness-level = <0x0>;
+ power-supply = <&blen>;
+ };
+
+ display: display {
+ model = "320x240x4";
+ native-mode = <&timing0>;
+ bits-per-pixel = <4>;
+ ac-prescale = <17>;
+
+ display-timings {
+ timing0: 320x240 {
+ hactive = <320>;
+ hback-porch = <0>;
+ hfront-porch = <0>;
+ hsync-len = <0>;
+ vactive = <240>;
+ vback-porch = <0>;
+ vfront-porch = <0>;
+ vsync-len = <0>;
+ clock-frequency = <6500000>;
+ };
+ };
+ };
+
+ i2c: i2c {
+ compatible = "i2c-gpio";
+ gpios = <&portd 4 GPIO_ACTIVE_HIGH>,
+ <&portd 5 GPIO_ACTIVE_HIGH>;
+ i2c-gpio,delay-us = <2>;
+ i2c-gpio,scl-output-only;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ lcddc: lcddc {
+ compatible = "regulator-fixed";
+ regulator-name = "BACKLIGHT ENABLE";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&portd 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ blen: blen {
+ compatible = "regulator-fixed";
+ regulator-name = "BACKLIGHT ENABLE";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&portd 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&bus {
+ flash: nor@00000000 {
+ compatible = "cfi-flash";
+ reg = <0 0x00000000 0x02000000>;
+ bank-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&fb {
+ display = <&display>;
+ lcd-supply = <&lcddc>;
+ status = "okay";
+};
+
+&portd {
+ lcden {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "LCD ENABLE";
+ };
+};
diff --git a/arch/arm/boot/dts/ep7211.dtsi b/arch/arm/boot/dts/ep7211.dtsi
new file mode 100644
index 000000000000..e438f6db0673
--- /dev/null
+++ b/arch/arm/boot/dts/ep7211.dtsi
@@ -0,0 +1,12 @@
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ */
+
+#include "ep7209.dtsi"
+
+/ {
+ model = "Cirrus Logic EP7211";
+ compatible = "cirrus,ep7211", "cirrus,ep7209";
+};
diff --git a/arch/arm/boot/dts/ethernut5.dts b/arch/arm/boot/dts/ethernut5.dts
index 243044343ee8..4687229a3ab9 100644
--- a/arch/arm/boot/dts/ethernut5.dts
+++ b/arch/arm/boot/dts/ethernut5.dts
@@ -77,13 +77,13 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
pcf8563@50 {
diff --git a/arch/arm/boot/dts/evk-pro3.dts b/arch/arm/boot/dts/evk-pro3.dts
index f72969efe6d7..20a4481b6e12 100644
--- a/arch/arm/boot/dts/evk-pro3.dts
+++ b/arch/arm/boot/dts/evk-pro3.dts
@@ -46,13 +46,13 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos-mfc-reserved-memory.dtsi b/arch/arm/boot/dts/exynos-mfc-reserved-memory.dtsi
new file mode 100644
index 000000000000..f78c14c82e17
--- /dev/null
+++ b/arch/arm/boot/dts/exynos-mfc-reserved-memory.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Samsung's Exynos SoC MFC (Video Codec) reserved memory common definition.
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mfc_left: region_mfc_left {
+ compatible = "shared-dma-pool";
+ no-map;
+ size = <0x1000000>;
+ alignment = <0x100000>;
+ };
+
+ mfc_right: region_mfc_right {
+ compatible = "shared-dma-pool";
+ no-map;
+ size = <0x800000>;
+ alignment = <0x100000>;
+ };
+ };
+};
+
+&mfc {
+ memory-region = <&mfc_left>, <&mfc_right>;
+};
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index e422819591dc..a92181368e5b 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -632,10 +632,6 @@
status = "okay";
};
-&mfc {
- status = "okay";
-};
-
&jpeg {
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 62f3dcd9e046..70e3aceab3a9 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -431,7 +431,6 @@
clocks = <&cmu CLK_MFC>, <&cmu CLK_SCLK_MFC>;
power-domains = <&pd_mfc>;
iommus = <&sysmmu_mfc>;
- status = "disabled";
};
sysmmu_mfc: sysmmu@13620000 {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index ca8f3e3cf2f3..32f22e12c70b 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -428,7 +428,6 @@
clock-names = "mfc", "sclk_mfc";
iommus = <&sysmmu_mfc_l>, <&sysmmu_mfc_r>;
iommu-names = "left", "right";
- status = "disabled";
};
serial_0: serial@13800000 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index ad7394c1d67a..be2751eebaf8 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -18,6 +18,7 @@
#include "exynos4210.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Insignal Origen evaluation board based on Exynos4210";
@@ -287,12 +288,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
- status = "okay";
-};
-
&sdhci_0 {
bus-width = <4>;
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>;
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 94ca7d36ab37..847fae3dd1f1 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -17,6 +17,7 @@
/dts-v1/;
#include "exynos4210.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Samsung smdkv310 evaluation board based on Exynos4210";
@@ -132,12 +133,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
- status = "okay";
-};
-
&pinctrl_1 {
keypad_rows: keypad-rows {
samsung,pins = "gpx2-0", "gpx2-1";
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index ec7619a384a2..58ad48e7b8f7 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -13,6 +13,7 @@
#include "exynos4412.dtsi"
#include "exynos4412-ppmu-common.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
chosen {
@@ -297,7 +298,6 @@
regulator-name = "VDDQ_MMC2_2.8V";
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
- regulator-always-on;
regulator-boot-on;
};
@@ -390,10 +390,18 @@
};
ldo21_reg: LDO21 {
- regulator-name = "LDO21_3.3V";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ regulator-name = "TFLASH_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-boot-on;
+ };
+
+ ldo22_reg: LDO22 {
+ /*
+ * Only U3 uses it, so let it define the
+ * constraints
+ */
+ regulator-name = "LDO22";
regulator-boot-on;
};
@@ -460,9 +468,11 @@
};
buck8_reg: BUCK8 {
+ /*
+ * Constraints set by specific board: X,
+ * X2 and U3.
+ */
regulator-name = "BUCK8_2.8V";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
};
};
};
@@ -506,7 +516,7 @@
&mshc_0 {
pinctrl-0 = <&sd4_clk &sd4_cmd &sd4_bus4 &sd4_bus8>;
pinctrl-names = "default";
- vmmc-supply = <&ldo20_reg &buck8_reg>;
+ vmmc-supply = <&ldo20_reg>;
mmc-pwrseq = <&emmc_pwrseq>;
status = "okay";
@@ -530,7 +540,8 @@
bus-width = <4>;
pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus4>;
pinctrl-names = "default";
- vmmc-supply = <&ldo4_reg &ldo21_reg>;
+ vmmc-supply = <&ldo21_reg>;
+ vqmmc-supply = <&ldo4_reg>;
cd-gpios = <&gpk2 2 GPIO_ACTIVE_HIGH>;
cd-inverted;
status = "okay";
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index dd89f7b37c9f..d73aa6c58fe3 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -69,6 +69,24 @@
};
};
+/* Supply for LAN9730/SMSC95xx */
+&buck8_reg {
+ regulator-name = "BUCK8_P3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+};
+
+/* VDDQ for MSHC (eMMC card) */
+&ldo22_reg {
+ regulator-name = "LDO22_VDDQ_MMC4_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+&mshc_0 {
+ vqmmc-supply = <&ldo22_reg>;
+};
+
&pwm {
pinctrl-0 = <&pwm0_out>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index bf7b21b817e4..2af235151301 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -63,12 +63,23 @@
};
};
+/* VDDQ for MSHC (eMMC card) */
+&buck8_reg {
+ regulator-name = "BUCK8_VDDQ_MMC4_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
&ehci {
port@1 {
status = "okay";
};
};
+&mshc_0 {
+ vqmmc-supply = <&buck8_reg>;
+};
+
&pinctrl_1 {
gpio_home_key: home_key {
samsung,pins = "gpx2-2";
diff --git a/arch/arm/boot/dts/exynos4412-odroidx2.dts b/arch/arm/boot/dts/exynos4412-odroidx2.dts
index 6e33678562ae..3e3584270e00 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx2.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx2.dts
@@ -22,6 +22,17 @@
};
};
+/* VDDQ for MSHC (eMMC card) */
+&buck8_reg {
+ regulator-name = "BUCK8_VDDQ_MMC4_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+&mshc_0 {
+ vqmmc-supply = <&buck8_reg>;
+};
+
&sound {
simple-audio-card,name = "Odroid-X2";
simple-audio-card,widgets =
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index 8bca699b7f20..26a36fed9652 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -16,6 +16,7 @@
#include "exynos4412.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Insignal Origen evaluation board based on Exynos4412";
@@ -83,6 +84,22 @@
cpu0-supply = <&buck2_reg>;
};
+&exynos_usbphy {
+ status = "okay";
+};
+
+&ehci {
+ samsung,vbus-gpio = <&gpx3 5 1>;
+ status = "okay";
+
+ port@1{
+ status = "okay";
+ };
+ port@2 {
+ status = "okay";
+ };
+};
+
&fimd {
pinctrl-0 = <&lcd_clk &lcd_data24 &pwm1_out>;
pinctrl-names = "default";
@@ -465,12 +482,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
- status = "okay";
-};
-
&mshc_0 {
pinctrl-0 = <&sd4_clk &sd4_cmd &sd4_bus4 &sd4_bus8>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
index a51069f3c03b..231ffbdbf9d0 100644
--- a/arch/arm/boot/dts/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "exynos4412.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Samsung SMDK evaluation board based on Exynos4412";
@@ -111,12 +112,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
- status = "okay";
-};
-
&pinctrl_1 {
keypad_rows: keypad-rows {
samsung,pins = "gpx2-0", "gpx2-1", "gpx2-2";
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 9336fd4824d9..129e973a06a6 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -253,7 +253,7 @@
};
thermistor-ap {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>; /* VCC_1.8V_AP */
pullup-ohm = <100000>; /* 100K */
pulldown-ohm = <100000>; /* 100K */
@@ -261,7 +261,7 @@
};
thermistor-battery {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>; /* VCC_1.8V_AP */
pullup-ohm = <100000>; /* 100K */
pulldown-ohm = <100000>; /* 100K */
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index d5c0f18a4223..cab91782e20c 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -20,97 +20,160 @@
interrupt-parent = <&gic>;
aliases {
+ i2c0 = &i2c_0;
+ i2c1 = &i2c_1;
+ i2c2 = &i2c_2;
+ i2c3 = &i2c_3;
serial0 = &serial_0;
serial1 = &serial_1;
serial2 = &serial_2;
serial3 = &serial_3;
};
- chipid@10000000 {
- compatible = "samsung,exynos4210-chipid";
- reg = <0x10000000 0x100>;
- };
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
- memory-controller@12250000 {
- compatible = "samsung,exynos4210-srom";
- reg = <0x12250000 0x14>;
- };
+ chipid@10000000 {
+ compatible = "samsung,exynos4210-chipid";
+ reg = <0x10000000 0x100>;
+ };
- combiner: interrupt-controller@10440000 {
- compatible = "samsung,exynos4210-combiner";
- #interrupt-cells = <2>;
- interrupt-controller;
- samsung,combiner-nr = <32>;
- reg = <0x10440000 0x1000>;
- interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
- <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
- <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
- <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
- <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
- <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
- <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
- <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
- };
+ sromc: memory-controller@12250000 {
+ compatible = "samsung,exynos4210-srom";
+ reg = <0x12250000 0x14>;
+ };
- gic: interrupt-controller@10481000 {
- compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x10481000 0x1000>,
- <0x10482000 0x1000>,
- <0x10484000 0x2000>,
- <0x10486000 0x2000>;
- interrupts = <1 9 0xf04>;
- };
+ combiner: interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ samsung,combiner-nr = <32>;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
+ <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
+ <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
+ <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
+ <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+ };
- serial_0: serial@12C00000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C00000 0x100>;
- interrupts = <0 51 0>;
- };
+ gic: interrupt-controller@10481000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x10481000 0x1000>,
+ <0x10482000 0x1000>,
+ <0x10484000 0x2000>,
+ <0x10486000 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
- serial_1: serial@12C10000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C10000 0x100>;
- interrupts = <0 52 0>;
- };
+ sysreg_system_controller: syscon@10050000 {
+ compatible = "samsung,exynos5-sysreg", "syscon";
+ reg = <0x10050000 0x5000>;
+ };
- serial_2: serial@12C20000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C20000 0x100>;
- interrupts = <0 53 0>;
- };
+ serial_0: serial@12C00000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x12C00000 0x100>;
+ interrupts = <0 51 0>;
+ };
- serial_3: serial@12C30000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C30000 0x100>;
- interrupts = <0 54 0>;
- };
+ serial_1: serial@12C10000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x12C10000 0x100>;
+ interrupts = <0 52 0>;
+ };
- rtc: rtc@101E0000 {
- compatible = "samsung,s3c6410-rtc";
- reg = <0x101E0000 0x100>;
- interrupts = <0 43 0>, <0 44 0>;
- status = "disabled";
- };
+ serial_2: serial@12C20000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x12C20000 0x100>;
+ interrupts = <0 53 0>;
+ };
- fimd: fimd@14400000 {
- compatible = "samsung,exynos5250-fimd";
- interrupt-parent = <&combiner>;
- reg = <0x14400000 0x40000>;
- interrupt-names = "fifo", "vsync", "lcd_sys";
- interrupts = <18 4>, <18 5>, <18 6>;
- samsung,sysreg = <&sysreg_system_controller>;
- status = "disabled";
- };
+ serial_3: serial@12C30000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x12C30000 0x100>;
+ interrupts = <0 54 0>;
+ };
- dp: dp-controller@145B0000 {
- compatible = "samsung,exynos5-dp";
- reg = <0x145B0000 0x1000>;
- interrupts = <10 3>;
- interrupt-parent = <&combiner>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
+ i2c_0: i2c@12C60000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C60000 0x100>;
+ interrupts = <0 56 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ status = "disabled";
+ };
+
+ i2c_1: i2c@12C70000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C70000 0x100>;
+ interrupts = <0 57 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ status = "disabled";
+ };
+
+ i2c_2: i2c@12C80000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C80000 0x100>;
+ interrupts = <0 58 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ status = "disabled";
+ };
+
+ i2c_3: i2c@12C90000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12C90000 0x100>;
+ interrupts = <0 59 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ status = "disabled";
+ };
+
+ pwm: pwm@12DD0000 {
+ compatible = "samsung,exynos4210-pwm";
+ reg = <0x12DD0000 0x100>;
+ samsung,pwm-outputs = <0>, <1>, <2>, <3>;
+ #pwm-cells = <3>;
+ };
+
+ rtc: rtc@101E0000 {
+ compatible = "samsung,s3c6410-rtc";
+ reg = <0x101E0000 0x100>;
+ interrupts = <0 43 0>, <0 44 0>;
+ status = "disabled";
+ };
+
+ fimd: fimd@14400000 {
+ compatible = "samsung,exynos5250-fimd";
+ interrupt-parent = <&combiner>;
+ reg = <0x14400000 0x40000>;
+ interrupt-names = "fifo", "vsync", "lcd_sys";
+ interrupts = <18 4>, <18 5>, <18 6>;
+ samsung,sysreg = <&sysreg_system_controller>;
+ status = "disabled";
+ };
+
+ dp: dp-controller@145B0000 {
+ compatible = "samsung,exynos5-dp";
+ reg = <0x145B0000 0x1000>;
+ interrupts = <10 3>;
+ interrupt-parent = <&combiner>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 85dad29c08dc..ea70603f660d 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -14,6 +14,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/input.h>
#include "exynos5250.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Insignal Arndale evaluation board based on EXYNOS5250";
@@ -515,11 +516,6 @@
status = "okay";
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
num-slots = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index b7992b13c9de..381af134c4c8 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -13,6 +13,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include "exynos5250.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "SAMSUNG SMDK5250 board based on EXYNOS5250";
@@ -343,11 +344,6 @@
status = "okay";
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
num-slots = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index fa14f77df563..fadbea744e1a 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -61,7 +61,7 @@
#address-cells = <1>;
#size-cells = <0>;
- i2c-parent = <&{/i2c@12CA0000}>;
+ i2c-parent = <&i2c_4>;
our-claim-gpio = <&gpf0 3 GPIO_ACTIVE_LOW>;
their-claim-gpios = <&gpe0 4 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index ac291f540812..44f4292bfef6 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -14,6 +14,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/input.h>
#include "exynos5250.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Google Spring";
@@ -424,11 +425,6 @@
status = "okay";
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
num-slots = <1>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index c7158b2fb213..f7357d99b47c 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -37,10 +37,6 @@
mshc1 = &mmc_1;
mshc2 = &mmc_2;
mshc3 = &mmc_3;
- i2c0 = &i2c_0;
- i2c1 = &i2c_1;
- i2c2 = &i2c_2;
- i2c3 = &i2c_3;
i2c4 = &i2c_4;
i2c5 = &i2c_5;
i2c6 = &i2c_6;
@@ -96,962 +92,896 @@
};
};
- sysram@02020000 {
- compatible = "mmio-sram";
- reg = <0x02020000 0x30000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x02020000 0x30000>;
+ soc: soc {
+ sysram@02020000 {
+ compatible = "mmio-sram";
+ reg = <0x02020000 0x30000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x02020000 0x30000>;
- smp-sysram@0 {
- compatible = "samsung,exynos4210-sysram";
- reg = <0x0 0x1000>;
- };
+ smp-sysram@0 {
+ compatible = "samsung,exynos4210-sysram";
+ reg = <0x0 0x1000>;
+ };
- smp-sysram@2f000 {
- compatible = "samsung,exynos4210-sysram-ns";
- reg = <0x2f000 0x1000>;
+ smp-sysram@2f000 {
+ compatible = "samsung,exynos4210-sysram-ns";
+ reg = <0x2f000 0x1000>;
+ };
};
- };
- pd_gsc: gsc-power-domain@10044000 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044000 0x20>;
- #power-domain-cells = <0>;
- };
+ pd_gsc: gsc-power-domain@10044000 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044000 0x20>;
+ #power-domain-cells = <0>;
+ };
- pd_mfc: mfc-power-domain@10044040 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044040 0x20>;
- #power-domain-cells = <0>;
- };
+ pd_mfc: mfc-power-domain@10044040 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044040 0x20>;
+ #power-domain-cells = <0>;
+ };
- pd_disp1: disp1-power-domain@100440A0 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x100440A0 0x20>;
- #power-domain-cells = <0>;
- clocks = <&clock CLK_FIN_PLL>,
- <&clock CLK_MOUT_ACLK200_DISP1_SUB>,
- <&clock CLK_MOUT_ACLK300_DISP1_SUB>;
- clock-names = "oscclk", "clk0", "clk1";
- };
+ pd_disp1: disp1-power-domain@100440A0 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x100440A0 0x20>;
+ #power-domain-cells = <0>;
+ clocks = <&clock CLK_FIN_PLL>,
+ <&clock CLK_MOUT_ACLK200_DISP1_SUB>,
+ <&clock CLK_MOUT_ACLK300_DISP1_SUB>;
+ clock-names = "oscclk", "clk0", "clk1";
+ };
- clock: clock-controller@10010000 {
- compatible = "samsung,exynos5250-clock";
- reg = <0x10010000 0x30000>;
- #clock-cells = <1>;
- };
+ clock: clock-controller@10010000 {
+ compatible = "samsung,exynos5250-clock";
+ reg = <0x10010000 0x30000>;
+ #clock-cells = <1>;
+ };
- clock_audss: audss-clock-controller@3810000 {
- compatible = "samsung,exynos5250-audss-clock";
- reg = <0x03810000 0x0C>;
- #clock-cells = <1>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
- <&clock CLK_SCLK_AUDIO0>, <&clock CLK_DIV_PCM0>;
- clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
- };
+ clock_audss: audss-clock-controller@3810000 {
+ compatible = "samsung,exynos5250-audss-clock";
+ reg = <0x03810000 0x0C>;
+ #clock-cells = <1>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
+ <&clock CLK_SCLK_AUDIO0>, <&clock CLK_DIV_PCM0>;
+ clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
+ };
- timer {
- compatible = "arm,armv7-timer";
- interrupts = <1 13 0xf08>,
- <1 14 0xf08>,
- <1 11 0xf08>,
- <1 10 0xf08>;
- /* Unfortunately we need this since some versions of U-Boot
- * on Exynos don't set the CNTFRQ register, so we need the
- * value from DT.
- */
- clock-frequency = <24000000>;
- };
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ /*
+ * Unfortunately we need this since some versions
+ * of U-Boot on Exynos don't set the CNTFRQ register,
+ * so we need the value from DT.
+ */
+ clock-frequency = <24000000>;
+ };
- mct@101C0000 {
- compatible = "samsung,exynos4210-mct";
- reg = <0x101C0000 0x800>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&mct_map>;
- interrupts = <0 0>, <1 0>, <2 0>, <3 0>,
- <4 0>, <5 0>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MCT>;
- clock-names = "fin_pll", "mct";
-
- mct_map: mct-map {
+ mct@101C0000 {
+ compatible = "samsung,exynos4210-mct";
+ reg = <0x101C0000 0x800>;
+ interrupt-controller;
#interrupt-cells = <2>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0x0 0 &combiner 23 3>,
- <0x1 0 &combiner 23 4>,
- <0x2 0 &combiner 25 2>,
- <0x3 0 &combiner 25 3>,
- <0x4 0 &gic 0 120 0>,
- <0x5 0 &gic 0 121 0>;
+ interrupt-parent = <&mct_map>;
+ interrupts = <0 0>, <1 0>, <2 0>, <3 0>,
+ <4 0>, <5 0>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MCT>;
+ clock-names = "fin_pll", "mct";
+
+ mct_map: mct-map {
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <0x0 0 &combiner 23 3>,
+ <0x1 0 &combiner 23 4>,
+ <0x2 0 &combiner 25 2>,
+ <0x3 0 &combiner 25 3>,
+ <0x4 0 &gic 0 120 0>,
+ <0x5 0 &gic 0 121 0>;
+ };
};
- };
-
- pmu {
- compatible = "arm,cortex-a15-pmu";
- interrupt-parent = <&combiner>;
- interrupts = <1 2>, <22 4>;
- };
-
- pinctrl_0: pinctrl@11400000 {
- compatible = "samsung,exynos5250-pinctrl";
- reg = <0x11400000 0x1000>;
- interrupts = <0 46 0>;
- wakup_eint: wakeup-interrupt-controller {
- compatible = "samsung,exynos4210-wakeup-eint";
- interrupt-parent = <&gic>;
- interrupts = <0 32 0>;
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupt-parent = <&combiner>;
+ interrupts = <1 2>, <22 4>;
};
- };
-
- pinctrl_1: pinctrl@13400000 {
- compatible = "samsung,exynos5250-pinctrl";
- reg = <0x13400000 0x1000>;
- interrupts = <0 45 0>;
- };
- pinctrl_2: pinctrl@10d10000 {
- compatible = "samsung,exynos5250-pinctrl";
- reg = <0x10d10000 0x1000>;
- interrupts = <0 50 0>;
- };
-
- pinctrl_3: pinctrl@03860000 {
- compatible = "samsung,exynos5250-pinctrl";
- reg = <0x03860000 0x1000>;
- interrupts = <0 47 0>;
- };
-
- pmu_system_controller: system-controller@10040000 {
- compatible = "samsung,exynos5250-pmu", "syscon";
- reg = <0x10040000 0x5000>;
- clock-names = "clkout16";
- clocks = <&clock CLK_FIN_PLL>;
- #clock-cells = <1>;
- interrupt-controller;
- #interrupt-cells = <3>;
- interrupt-parent = <&gic>;
- };
+ pinctrl_0: pinctrl@11400000 {
+ compatible = "samsung,exynos5250-pinctrl";
+ reg = <0x11400000 0x1000>;
+ interrupts = <0 46 0>;
- sysreg_system_controller: syscon@10050000 {
- compatible = "samsung,exynos5-sysreg", "syscon";
- reg = <0x10050000 0x5000>;
- };
+ wakup_eint: wakeup-interrupt-controller {
+ compatible = "samsung,exynos4210-wakeup-eint";
+ interrupt-parent = <&gic>;
+ interrupts = <0 32 0>;
+ };
+ };
- watchdog@101D0000 {
- compatible = "samsung,exynos5250-wdt";
- reg = <0x101D0000 0x100>;
- interrupts = <0 42 0>;
- clocks = <&clock CLK_WDT>;
- clock-names = "watchdog";
- samsung,syscon-phandle = <&pmu_system_controller>;
- };
+ pinctrl_1: pinctrl@13400000 {
+ compatible = "samsung,exynos5250-pinctrl";
+ reg = <0x13400000 0x1000>;
+ interrupts = <0 45 0>;
+ };
- g2d@10850000 {
- compatible = "samsung,exynos5250-g2d";
- reg = <0x10850000 0x1000>;
- interrupts = <0 91 0>;
- clocks = <&clock CLK_G2D>;
- clock-names = "fimg2d";
- iommus = <&sysmmu_g2d>;
- };
+ pinctrl_2: pinctrl@10d10000 {
+ compatible = "samsung,exynos5250-pinctrl";
+ reg = <0x10d10000 0x1000>;
+ interrupts = <0 50 0>;
+ };
- mfc: codec@11000000 {
- compatible = "samsung,mfc-v6";
- reg = <0x11000000 0x10000>;
- interrupts = <0 96 0>;
- power-domains = <&pd_mfc>;
- clocks = <&clock CLK_MFC>;
- clock-names = "mfc";
- iommus = <&sysmmu_mfc_l>, <&sysmmu_mfc_r>;
- iommu-names = "left", "right";
- };
+ pinctrl_3: pinctrl@03860000 {
+ compatible = "samsung,exynos5250-pinctrl";
+ reg = <0x03860000 0x1000>;
+ interrupts = <0 47 0>;
+ };
- rotator: rotator@11C00000 {
- compatible = "samsung,exynos5250-rotator";
- reg = <0x11C00000 0x64>;
- interrupts = <0 84 0>;
- clocks = <&clock CLK_ROTATOR>;
- clock-names = "rotator";
- iommus = <&sysmmu_rotator>;
- };
+ pmu_system_controller: system-controller@10040000 {
+ compatible = "samsung,exynos5250-pmu", "syscon";
+ reg = <0x10040000 0x5000>;
+ clock-names = "clkout16";
+ clocks = <&clock CLK_FIN_PLL>;
+ #clock-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ };
- tmu: tmu@10060000 {
- compatible = "samsung,exynos5250-tmu";
- reg = <0x10060000 0x100>;
- interrupts = <0 65 0>;
- clocks = <&clock CLK_TMU>;
- clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
+ watchdog@101D0000 {
+ compatible = "samsung,exynos5250-wdt";
+ reg = <0x101D0000 0x100>;
+ interrupts = <0 42 0>;
+ clocks = <&clock CLK_WDT>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ };
- thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-sensors = <&tmu 0>;
+ g2d@10850000 {
+ compatible = "samsung,exynos5250-g2d";
+ reg = <0x10850000 0x1000>;
+ interrupts = <0 91 0>;
+ clocks = <&clock CLK_G2D>;
+ clock-names = "fimg2d";
+ iommus = <&sysmmu_g2d>;
+ };
- cooling-maps {
- map0 {
- /* Corresponds to 800MHz at freq_table */
- cooling-device = <&cpu0 9 9>;
- };
- map1 {
- /* Corresponds to 200MHz at freq_table */
- cooling-device = <&cpu0 15 15>;
- };
- };
+ mfc: codec@11000000 {
+ compatible = "samsung,mfc-v6";
+ reg = <0x11000000 0x10000>;
+ interrupts = <0 96 0>;
+ power-domains = <&pd_mfc>;
+ clocks = <&clock CLK_MFC>;
+ clock-names = "mfc";
+ iommus = <&sysmmu_mfc_l>, <&sysmmu_mfc_r>;
+ iommu-names = "left", "right";
};
- };
- sata: sata@122F0000 {
- compatible = "snps,dwc-ahci";
- samsung,sata-freq = <66>;
- reg = <0x122F0000 0x1ff>;
- interrupts = <0 115 0>;
- clocks = <&clock CLK_SATA>, <&clock CLK_SCLK_SATA>;
- clock-names = "sata", "sclk_sata";
- phys = <&sata_phy>;
- phy-names = "sata-phy";
- status = "disabled";
- };
+ rotator: rotator@11C00000 {
+ compatible = "samsung,exynos5250-rotator";
+ reg = <0x11C00000 0x64>;
+ interrupts = <0 84 0>;
+ clocks = <&clock CLK_ROTATOR>;
+ clock-names = "rotator";
+ iommus = <&sysmmu_rotator>;
+ };
- sata_phy: sata-phy@12170000 {
- compatible = "samsung,exynos5250-sata-phy";
- reg = <0x12170000 0x1ff>;
- clocks = <&clock CLK_SATA_PHYCTRL>;
- clock-names = "sata_phyctrl";
- #phy-cells = <0>;
- samsung,syscon-phandle = <&pmu_system_controller>;
- status = "disabled";
- };
+ tmu: tmu@10060000 {
+ compatible = "samsung,exynos5250-tmu";
+ reg = <0x10060000 0x100>;
+ interrupts = <0 65 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
- i2c_0: i2c@12C60000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C60000 0x100>;
- interrupts = <0 56 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C0>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
+ sata: sata@122F0000 {
+ compatible = "snps,dwc-ahci";
+ samsung,sata-freq = <66>;
+ reg = <0x122F0000 0x1ff>;
+ interrupts = <0 115 0>;
+ clocks = <&clock CLK_SATA>, <&clock CLK_SCLK_SATA>;
+ clock-names = "sata", "sclk_sata";
+ phys = <&sata_phy>;
+ phy-names = "sata-phy";
+ status = "disabled";
+ };
- i2c_1: i2c@12C70000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C70000 0x100>;
- interrupts = <0 57 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C1>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
+ sata_phy: sata-phy@12170000 {
+ compatible = "samsung,exynos5250-sata-phy";
+ reg = <0x12170000 0x1ff>;
+ clocks = <&clock CLK_SATA_PHYCTRL>;
+ clock-names = "sata_phyctrl";
+ #phy-cells = <0>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
+ };
- i2c_2: i2c@12C80000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C80000 0x100>;
- interrupts = <0 58 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C2>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
+ /* i2c_0-3 are defined in exynos5.dtsi */
+ i2c_4: i2c@12CA0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12CA0000 0x100>;
+ interrupts = <0 60 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_I2C4>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_bus>;
+ status = "disabled";
+ };
- i2c_3: i2c@12C90000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C90000 0x100>;
- interrupts = <0 59 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C3>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
+ i2c_5: i2c@12CB0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12CB0000 0x100>;
+ interrupts = <0 61 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_I2C5>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_bus>;
+ status = "disabled";
+ };
- i2c_4: i2c@12CA0000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12CA0000 0x100>;
- interrupts = <0 60 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C4>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_bus>;
- status = "disabled";
- };
+ i2c_6: i2c@12CC0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12CC0000 0x100>;
+ interrupts = <0 62 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_I2C6>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_bus>;
+ status = "disabled";
+ };
- i2c_5: i2c@12CB0000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12CB0000 0x100>;
- interrupts = <0 61 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C5>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_bus>;
- status = "disabled";
- };
+ i2c_7: i2c@12CD0000 {
+ compatible = "samsung,s3c2440-i2c";
+ reg = <0x12CD0000 0x100>;
+ interrupts = <0 63 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_I2C7>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_bus>;
+ status = "disabled";
+ };
- i2c_6: i2c@12CC0000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12CC0000 0x100>;
- interrupts = <0 62 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C6>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c6_bus>;
- status = "disabled";
- };
+ i2c_8: i2c@12CE0000 {
+ compatible = "samsung,s3c2440-hdmiphy-i2c";
+ reg = <0x12CE0000 0x1000>;
+ interrupts = <0 64 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_I2C_HDMI>;
+ clock-names = "i2c";
+ status = "disabled";
+ };
- i2c_7: i2c@12CD0000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12CD0000 0x100>;
- interrupts = <0 63 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C7>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c7_bus>;
- status = "disabled";
- };
+ i2c_9: i2c@121D0000 {
+ compatible = "samsung,exynos5-sata-phy-i2c";
+ reg = <0x121D0000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_SATA_PHYI2C>;
+ clock-names = "i2c";
+ status = "disabled";
+ };
- i2c_8: i2c@12CE0000 {
- compatible = "samsung,s3c2440-hdmiphy-i2c";
- reg = <0x12CE0000 0x1000>;
- interrupts = <0 64 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C_HDMI>;
- clock-names = "i2c";
- status = "disabled";
- };
+ spi_0: spi@12d20000 {
+ compatible = "samsung,exynos4210-spi";
+ status = "disabled";
+ reg = <0x12d20000 0x100>;
+ interrupts = <0 66 0>;
+ dmas = <&pdma0 5
+ &pdma0 4>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_SPI0>, <&clock CLK_SCLK_SPI0>;
+ clock-names = "spi", "spi_busclk0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_bus>;
+ };
- i2c_9: i2c@121D0000 {
- compatible = "samsung,exynos5-sata-phy-i2c";
- reg = <0x121D0000 0x100>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_SATA_PHYI2C>;
- clock-names = "i2c";
- status = "disabled";
- };
+ spi_1: spi@12d30000 {
+ compatible = "samsung,exynos4210-spi";
+ status = "disabled";
+ reg = <0x12d30000 0x100>;
+ interrupts = <0 67 0>;
+ dmas = <&pdma1 5
+ &pdma1 4>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_SPI1>, <&clock CLK_SCLK_SPI1>;
+ clock-names = "spi", "spi_busclk0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_bus>;
+ };
- spi_0: spi@12d20000 {
- compatible = "samsung,exynos4210-spi";
- status = "disabled";
- reg = <0x12d20000 0x100>;
- interrupts = <0 66 0>;
- dmas = <&pdma0 5
- &pdma0 4>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_SPI0>, <&clock CLK_SCLK_SPI0>;
- clock-names = "spi", "spi_busclk0";
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_bus>;
- };
+ spi_2: spi@12d40000 {
+ compatible = "samsung,exynos4210-spi";
+ status = "disabled";
+ reg = <0x12d40000 0x100>;
+ interrupts = <0 68 0>;
+ dmas = <&pdma0 7
+ &pdma0 6>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_SPI2>, <&clock CLK_SCLK_SPI2>;
+ clock-names = "spi", "spi_busclk0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_bus>;
+ };
- spi_1: spi@12d30000 {
- compatible = "samsung,exynos4210-spi";
- status = "disabled";
- reg = <0x12d30000 0x100>;
- interrupts = <0 67 0>;
- dmas = <&pdma1 5
- &pdma1 4>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_SPI1>, <&clock CLK_SCLK_SPI1>;
- clock-names = "spi", "spi_busclk0";
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_bus>;
- };
+ mmc_0: mmc@12200000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 75 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12200000 0x1000>;
+ clocks = <&clock CLK_SDMMC0>, <&clock CLK_SCLK_MMC0>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
+ };
- spi_2: spi@12d40000 {
- compatible = "samsung,exynos4210-spi";
- status = "disabled";
- reg = <0x12d40000 0x100>;
- interrupts = <0 68 0>;
- dmas = <&pdma0 7
- &pdma0 6>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_SPI2>, <&clock CLK_SCLK_SPI2>;
- clock-names = "spi", "spi_busclk0";
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_bus>;
- };
+ mmc_1: mmc@12210000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 76 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12210000 0x1000>;
+ clocks = <&clock CLK_SDMMC1>, <&clock CLK_SCLK_MMC1>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
+ };
- mmc_0: mmc@12200000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12200000 0x1000>;
- clocks = <&clock CLK_SDMMC0>, <&clock CLK_SCLK_MMC0>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x80>;
- status = "disabled";
- };
+ mmc_2: mmc@12220000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ interrupts = <0 77 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12220000 0x1000>;
+ clocks = <&clock CLK_SDMMC2>, <&clock CLK_SCLK_MMC2>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
+ };
- mmc_1: mmc@12210000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 76 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12210000 0x1000>;
- clocks = <&clock CLK_SDMMC1>, <&clock CLK_SCLK_MMC1>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x80>;
- status = "disabled";
- };
+ mmc_3: mmc@12230000 {
+ compatible = "samsung,exynos5250-dw-mshc";
+ reg = <0x12230000 0x1000>;
+ interrupts = <0 78 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clock CLK_SDMMC3>, <&clock CLK_SCLK_MMC3>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x80>;
+ status = "disabled";
+ };
- mmc_2: mmc@12220000 {
- compatible = "samsung,exynos5250-dw-mshc";
- interrupts = <0 77 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12220000 0x1000>;
- clocks = <&clock CLK_SDMMC2>, <&clock CLK_SCLK_MMC2>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x80>;
- status = "disabled";
- };
+ i2s0: i2s@03830000 {
+ compatible = "samsung,s5pv210-i2s";
+ status = "disabled";
+ reg = <0x03830000 0x100>;
+ dmas = <&pdma0 10
+ &pdma0 9
+ &pdma0 8>;
+ dma-names = "tx", "rx", "tx-sec";
+ clocks = <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_SCLK_I2S>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ samsung,idma-addr = <0x03000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ };
- mmc_3: mmc@12230000 {
- compatible = "samsung,exynos5250-dw-mshc";
- reg = <0x12230000 0x1000>;
- interrupts = <0 78 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_SDMMC3>, <&clock CLK_SCLK_MMC3>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x80>;
- status = "disabled";
- };
+ i2s1: i2s@12D60000 {
+ compatible = "samsung,s3c6410-i2s";
+ status = "disabled";
+ reg = <0x12D60000 0x100>;
+ dmas = <&pdma1 12
+ &pdma1 11>;
+ dma-names = "tx", "rx";
+ clocks = <&clock CLK_I2S1>, <&clock CLK_DIV_I2S1>;
+ clock-names = "iis", "i2s_opclk0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1_bus>;
+ };
- i2s0: i2s@03830000 {
- compatible = "samsung,s5pv210-i2s";
- status = "disabled";
- reg = <0x03830000 0x100>;
- dmas = <&pdma0 10
- &pdma0 9
- &pdma0 8>;
- dma-names = "tx", "rx", "tx-sec";
- clocks = <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_SCLK_I2S>;
- clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- samsung,idma-addr = <0x03000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_bus>;
- };
+ i2s2: i2s@12D70000 {
+ compatible = "samsung,s3c6410-i2s";
+ status = "disabled";
+ reg = <0x12D70000 0x100>;
+ dmas = <&pdma0 12
+ &pdma0 11>;
+ dma-names = "tx", "rx";
+ clocks = <&clock CLK_I2S2>, <&clock CLK_DIV_I2S2>;
+ clock-names = "iis", "i2s_opclk0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2_bus>;
+ };
- i2s1: i2s@12D60000 {
- compatible = "samsung,s3c6410-i2s";
- status = "disabled";
- reg = <0x12D60000 0x100>;
- dmas = <&pdma1 12
- &pdma1 11>;
- dma-names = "tx", "rx";
- clocks = <&clock CLK_I2S1>, <&clock CLK_DIV_I2S1>;
- clock-names = "iis", "i2s_opclk0";
- pinctrl-names = "default";
- pinctrl-0 = <&i2s1_bus>;
- };
+ usb_dwc3 {
+ compatible = "samsung,exynos5250-dwusb3";
+ clocks = <&clock CLK_USB3>;
+ clock-names = "usbdrd30";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usbdrd_dwc3: dwc3@12000000 {
+ compatible = "synopsys,dwc3";
+ reg = <0x12000000 0x10000>;
+ interrupts = <0 72 0>;
+ phys = <&usbdrd_phy 0>, <&usbdrd_phy 1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
- i2s2: i2s@12D70000 {
- compatible = "samsung,s3c6410-i2s";
- status = "disabled";
- reg = <0x12D70000 0x100>;
- dmas = <&pdma0 12
- &pdma0 11>;
- dma-names = "tx", "rx";
- clocks = <&clock CLK_I2S2>, <&clock CLK_DIV_I2S2>;
- clock-names = "iis", "i2s_opclk0";
- pinctrl-names = "default";
- pinctrl-0 = <&i2s2_bus>;
- };
+ usbdrd_phy: phy@12100000 {
+ compatible = "samsung,exynos5250-usbdrd-phy";
+ reg = <0x12100000 0x100>;
+ clocks = <&clock CLK_USB3>, <&clock CLK_FIN_PLL>;
+ clock-names = "phy", "ref";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <1>;
+ };
- usb_dwc3 {
- compatible = "samsung,exynos5250-dwusb3";
- clocks = <&clock CLK_USB3>;
- clock-names = "usbdrd30";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
+ ehci: usb@12110000 {
+ compatible = "samsung,exynos4210-ehci";
+ reg = <0x12110000 0x100>;
+ interrupts = <0 71 0>;
- usbdrd_dwc3: dwc3@12000000 {
- compatible = "synopsys,dwc3";
- reg = <0x12000000 0x10000>;
- interrupts = <0 72 0>;
- phys = <&usbdrd_phy 0>, <&usbdrd_phy 1>;
- phy-names = "usb2-phy", "usb3-phy";
+ clocks = <&clock CLK_USB2>;
+ clock-names = "usbhost";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ phys = <&usb2_phy_gen 1>;
+ };
};
- };
-
- usbdrd_phy: phy@12100000 {
- compatible = "samsung,exynos5250-usbdrd-phy";
- reg = <0x12100000 0x100>;
- clocks = <&clock CLK_USB3>, <&clock CLK_FIN_PLL>;
- clock-names = "phy", "ref";
- samsung,pmu-syscon = <&pmu_system_controller>;
- #phy-cells = <1>;
- };
- ehci: usb@12110000 {
- compatible = "samsung,exynos4210-ehci";
- reg = <0x12110000 0x100>;
- interrupts = <0 71 0>;
+ ohci: usb@12120000 {
+ compatible = "samsung,exynos4210-ohci";
+ reg = <0x12120000 0x100>;
+ interrupts = <0 71 0>;
- clocks = <&clock CLK_USB2>;
- clock-names = "usbhost";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- phys = <&usb2_phy_gen 1>;
+ clocks = <&clock CLK_USB2>;
+ clock-names = "usbhost";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ phys = <&usb2_phy_gen 1>;
+ };
};
- };
- ohci: usb@12120000 {
- compatible = "samsung,exynos4210-ohci";
- reg = <0x12120000 0x100>;
- interrupts = <0 71 0>;
-
- clocks = <&clock CLK_USB2>;
- clock-names = "usbhost";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- phys = <&usb2_phy_gen 1>;
+ usb2_phy_gen: phy@12130000 {
+ compatible = "samsung,exynos5250-usb2-phy";
+ reg = <0x12130000 0x100>;
+ clocks = <&clock CLK_USB2>, <&clock CLK_FIN_PLL>;
+ clock-names = "phy", "ref";
+ #phy-cells = <1>;
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ samsung,pmureg-phandle = <&pmu_system_controller>;
};
- };
- usb2_phy_gen: phy@12130000 {
- compatible = "samsung,exynos5250-usb2-phy";
- reg = <0x12130000 0x100>;
- clocks = <&clock CLK_USB2>, <&clock CLK_FIN_PLL>;
- clock-names = "phy", "ref";
- #phy-cells = <1>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- samsung,pmureg-phandle = <&pmu_system_controller>;
- };
+ amba {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ pdma0: pdma@121A0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x121A0000 0x1000>;
+ interrupts = <0 34 0>;
+ clocks = <&clock CLK_PDMA0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ pdma1: pdma@121B0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x121B0000 0x1000>;
+ interrupts = <0 35 0>;
+ clocks = <&clock CLK_PDMA1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ mdma0: mdma@10800000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x10800000 0x1000>;
+ interrupts = <0 33 0>;
+ clocks = <&clock CLK_MDMA0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
+ };
+
+ mdma1: mdma@11C10000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x11C10000 0x1000>;
+ interrupts = <0 124 0>;
+ clocks = <&clock CLK_MDMA1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
+ };
+ };
- pwm: pwm@12dd0000 {
- compatible = "samsung,exynos4210-pwm";
- reg = <0x12dd0000 0x100>;
- samsung,pwm-outputs = <0>, <1>, <2>, <3>;
- #pwm-cells = <3>;
- clocks = <&clock CLK_PWM>;
- clock-names = "timers";
- };
+ gsc_0: gsc@13e00000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e00000 0x1000>;
+ interrupts = <0 85 0>;
+ power-domains = <&pd_gsc>;
+ clocks = <&clock CLK_GSCL0>;
+ clock-names = "gscl";
+ iommu = <&sysmmu_gsc0>;
+ };
- amba {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&gic>;
- ranges;
-
- pdma0: pdma@121A0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x121A0000 0x1000>;
- interrupts = <0 34 0>;
- clocks = <&clock CLK_PDMA0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
- pdma1: pdma@121B0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x121B0000 0x1000>;
- interrupts = <0 35 0>;
- clocks = <&clock CLK_PDMA1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
- mdma0: mdma@10800000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x10800000 0x1000>;
- interrupts = <0 33 0>;
- clocks = <&clock CLK_MDMA0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <1>;
- };
-
- mdma1: mdma@11C10000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x11C10000 0x1000>;
- interrupts = <0 124 0>;
- clocks = <&clock CLK_MDMA1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <1>;
+ gsc_1: gsc@13e10000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e10000 0x1000>;
+ interrupts = <0 86 0>;
+ power-domains = <&pd_gsc>;
+ clocks = <&clock CLK_GSCL1>;
+ clock-names = "gscl";
+ iommu = <&sysmmu_gsc1>;
};
- };
- gsc_0: gsc@13e00000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e00000 0x1000>;
- interrupts = <0 85 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL0>;
- clock-names = "gscl";
- iommu = <&sysmmu_gsc0>;
- };
+ gsc_2: gsc@13e20000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e20000 0x1000>;
+ interrupts = <0 87 0>;
+ power-domains = <&pd_gsc>;
+ clocks = <&clock CLK_GSCL2>;
+ clock-names = "gscl";
+ iommu = <&sysmmu_gsc2>;
+ };
- gsc_1: gsc@13e10000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e10000 0x1000>;
- interrupts = <0 86 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL1>;
- clock-names = "gscl";
- iommu = <&sysmmu_gsc1>;
- };
+ gsc_3: gsc@13e30000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e30000 0x1000>;
+ interrupts = <0 88 0>;
+ power-domains = <&pd_gsc>;
+ clocks = <&clock CLK_GSCL3>;
+ clock-names = "gscl";
+ iommu = <&sysmmu_gsc3>;
+ };
- gsc_2: gsc@13e20000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e20000 0x1000>;
- interrupts = <0 87 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL2>;
- clock-names = "gscl";
- iommu = <&sysmmu_gsc2>;
- };
+ hdmi: hdmi@14530000 {
+ compatible = "samsung,exynos4212-hdmi";
+ reg = <0x14530000 0x70000>;
+ power-domains = <&pd_disp1>;
+ interrupts = <0 95 0>;
+ clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
+ <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
+ <&clock CLK_MOUT_HDMI>;
+ clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
+ "sclk_hdmiphy", "mout_hdmi";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ };
- gsc_3: gsc@13e30000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e30000 0x1000>;
- interrupts = <0 88 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL3>;
- clock-names = "gscl";
- iommu = <&sysmmu_gsc3>;
- };
+ mixer@14450000 {
+ compatible = "samsung,exynos5250-mixer";
+ reg = <0x14450000 0x10000>;
+ power-domains = <&pd_disp1>;
+ interrupts = <0 94 0>;
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>;
+ clock-names = "mixer", "hdmi", "sclk_hdmi";
+ iommus = <&sysmmu_tv>;
+ };
- hdmi: hdmi@14530000 {
- compatible = "samsung,exynos4212-hdmi";
- reg = <0x14530000 0x70000>;
- power-domains = <&pd_disp1>;
- interrupts = <0 95 0>;
- clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
- <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
- <&clock CLK_MOUT_HDMI>;
- clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
- "sclk_hdmiphy", "mout_hdmi";
- samsung,syscon-phandle = <&pmu_system_controller>;
- };
+ dp_phy: video-phy {
+ compatible = "samsung,exynos5250-dp-video-phy";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <0>;
+ };
- mixer@14450000 {
- compatible = "samsung,exynos5250-mixer";
- reg = <0x14450000 0x10000>;
- power-domains = <&pd_disp1>;
- interrupts = <0 94 0>;
- clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
- <&clock CLK_SCLK_HDMI>;
- clock-names = "mixer", "hdmi", "sclk_hdmi";
- iommus = <&sysmmu_tv>;
- };
+ adc: adc@12D10000 {
+ compatible = "samsung,exynos-adc-v1";
+ reg = <0x12D10000 0x100>;
+ interrupts = <0 106 0>;
+ clocks = <&clock CLK_ADC>;
+ clock-names = "adc";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
+ };
- dp_phy: video-phy {
- compatible = "samsung,exynos5250-dp-video-phy";
- samsung,pmu-syscon = <&pmu_system_controller>;
- #phy-cells = <0>;
- };
+ sss@10830000 {
+ compatible = "samsung,exynos4210-secss";
+ reg = <0x10830000 0x300>;
+ interrupts = <0 112 0>;
+ clocks = <&clock CLK_SSS>;
+ clock-names = "secss";
+ };
- adc: adc@12D10000 {
- compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>;
- interrupts = <0 106 0>;
- clocks = <&clock CLK_ADC>;
- clock-names = "adc";
- #io-channel-cells = <1>;
- io-channel-ranges;
- samsung,syscon-phandle = <&pmu_system_controller>;
- status = "disabled";
- };
+ sysmmu_g2d: sysmmu@10A60000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x10A60000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <24 5>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_2D>, <&clock CLK_G2D>;
+ #iommu-cells = <0>;
+ };
- sss@10830000 {
- compatible = "samsung,exynos4210-secss";
- reg = <0x10830000 0x300>;
- interrupts = <0 112 0>;
- clocks = <&clock CLK_SSS>;
- clock-names = "secss";
- };
+ sysmmu_mfc_r: sysmmu@11200000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11200000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <6 2>;
+ power-domains = <&pd_mfc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MFCR>, <&clock CLK_MFC>;
+ #iommu-cells = <0>;
+ };
- sysmmu_g2d: sysmmu@10A60000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x10A60000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <24 5>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_2D>, <&clock CLK_G2D>;
- #iommu-cells = <0>;
- };
+ sysmmu_mfc_l: sysmmu@11210000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11210000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <8 5>;
+ power-domains = <&pd_mfc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MFCL>, <&clock CLK_MFC>;
+ #iommu-cells = <0>;
+ };
- sysmmu_mfc_r: sysmmu@11200000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11200000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <6 2>;
- power-domains = <&pd_mfc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MFCR>, <&clock CLK_MFC>;
- #iommu-cells = <0>;
- };
+ sysmmu_rotator: sysmmu@11D40000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11D40000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <4 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_ROTATOR>, <&clock CLK_ROTATOR>;
+ #iommu-cells = <0>;
+ };
- sysmmu_mfc_l: sysmmu@11210000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11210000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <8 5>;
- power-domains = <&pd_mfc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MFCL>, <&clock CLK_MFC>;
- #iommu-cells = <0>;
- };
+ sysmmu_jpeg: sysmmu@11F20000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11F20000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <4 2>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_JPEG>, <&clock CLK_JPEG>;
+ #iommu-cells = <0>;
+ };
- sysmmu_rotator: sysmmu@11D40000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11D40000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <4 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_ROTATOR>, <&clock CLK_ROTATOR>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_isp: sysmmu@13260000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13260000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <10 6>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_ISP>;
+ #iommu-cells = <0>;
+ };
- sysmmu_jpeg: sysmmu@11F20000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11F20000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <4 2>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_JPEG>, <&clock CLK_JPEG>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_drc: sysmmu@13270000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13270000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <11 6>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_DRC>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_isp: sysmmu@13260000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13260000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <10 6>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_ISP>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_fd: sysmmu@132A0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132A0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <5 0>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_FD>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_drc: sysmmu@13270000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13270000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <11 6>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_DRC>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_scc: sysmmu@13280000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13280000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <5 2>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_SCC>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_fd: sysmmu@132A0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132A0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <5 0>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_FD>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_scp: sysmmu@13290000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13290000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <3 6>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_SCP>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_scc: sysmmu@13280000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13280000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <5 2>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_SCC>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_mcuctl: sysmmu@132B0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132B0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <5 4>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_MCU>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_scp: sysmmu@13290000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13290000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <3 6>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_SCP>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_odc: sysmmu@132C0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132C0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <11 0>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_ODC>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_mcuctl: sysmmu@132B0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132B0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <5 4>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_MCU>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_dis0: sysmmu@132D0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132D0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <10 4>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_DIS0>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_odc: sysmmu@132C0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132C0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <11 0>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_ODC>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_dis1: sysmmu@132E0000{
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132E0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <9 4>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_DIS1>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_dis0: sysmmu@132D0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132D0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <10 4>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_DIS0>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_3dnr: sysmmu@132F0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x132F0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <5 6>;
+ clock-names = "sysmmu";
+ clocks = <&clock CLK_SMMU_FIMC_3DNR>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_dis1: sysmmu@132E0000{
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132E0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <9 4>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_DIS1>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_lite0: sysmmu@13C40000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13C40000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <3 4>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_FIMC_LITE0>, <&clock CLK_CAMIF_TOP>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_3dnr: sysmmu@132F0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x132F0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <5 6>;
- clock-names = "sysmmu";
- clocks = <&clock CLK_SMMU_FIMC_3DNR>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimc_lite1: sysmmu@13C50000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13C50000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <24 1>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_FIMC_LITE1>, <&clock CLK_CAMIF_TOP>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_lite0: sysmmu@13C40000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13C40000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <3 4>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMC_LITE0>, <&clock CLK_CAMIF_TOP>;
- #iommu-cells = <0>;
- };
+ sysmmu_gsc0: sysmmu@13E80000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13E80000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 0>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
+ #iommu-cells = <0>;
+ };
- sysmmu_fimc_lite1: sysmmu@13C50000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13C50000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <24 1>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMC_LITE1>, <&clock CLK_CAMIF_TOP>;
- #iommu-cells = <0>;
- };
+ sysmmu_gsc1: sysmmu@13E90000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13E90000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 2>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL1>, <&clock CLK_GSCL1>;
+ #iommu-cells = <0>;
+ };
- sysmmu_gsc0: sysmmu@13E80000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13E80000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 0>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
- #iommu-cells = <0>;
- };
+ sysmmu_gsc2: sysmmu@13EA0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13EA0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 4>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL2>, <&clock CLK_GSCL2>;
+ #iommu-cells = <0>;
+ };
- sysmmu_gsc1: sysmmu@13E90000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13E90000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 2>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL1>, <&clock CLK_GSCL1>;
- #iommu-cells = <0>;
- };
+ sysmmu_gsc3: sysmmu@13EB0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13EB0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 6>;
+ power-domains = <&pd_gsc>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL3>, <&clock CLK_GSCL3>;
+ #iommu-cells = <0>;
+ };
- sysmmu_gsc2: sysmmu@13EA0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13EA0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 4>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL2>, <&clock CLK_GSCL2>;
- #iommu-cells = <0>;
- };
+ sysmmu_fimd1: sysmmu@14640000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x14640000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <3 2>;
+ power-domains = <&pd_disp1>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_FIMD1>, <&clock CLK_FIMD1>;
+ #iommu-cells = <0>;
+ };
- sysmmu_gsc3: sysmmu@13EB0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13EB0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 6>;
- power-domains = <&pd_gsc>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL3>, <&clock CLK_GSCL3>;
- #iommu-cells = <0>;
+ sysmmu_tv: sysmmu@14650000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x14650000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <7 4>;
+ power-domains = <&pd_disp1>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_TV>, <&clock CLK_MIXER>;
+ #iommu-cells = <0>;
+ };
};
- sysmmu_fimd1: sysmmu@14640000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x14640000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <3 2>;
- power-domains = <&pd_disp1>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMD1>, <&clock CLK_FIMD1>;
- #iommu-cells = <0>;
- };
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&tmu 0>;
- sysmmu_tv: sysmmu@14650000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x14650000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <7 4>;
- power-domains = <&pd_disp1>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_TV>, <&clock CLK_MIXER>;
- #iommu-cells = <0>;
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz at freq_table */
+ cooling-device = <&cpu0 9 9>;
+ };
+ map1 {
+ /* Corresponds to 200MHz at freq_table */
+ cooling-device = <&cpu0 15 15>;
+ };
+ };
+ };
};
};
@@ -1070,6 +1000,39 @@
iommus = <&sysmmu_fimd1>;
};
+&i2c_0 {
+ clocks = <&clock CLK_I2C0>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+};
+
+&i2c_1 {
+ clocks = <&clock CLK_I2C1>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_bus>;
+};
+
+&i2c_2 {
+ clocks = <&clock CLK_I2C2>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+};
+
+&i2c_3 {
+ clocks = <&clock CLK_I2C3>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_bus>;
+};
+
+&pwm {
+ clocks = <&clock CLK_PWM>;
+ clock-names = "timers";
+};
+
&rtc {
clocks = <&clock CLK_RTC>;
clock-names = "rtc";
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
new file mode 100644
index 000000000000..d9499310a301
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -0,0 +1,580 @@
+/*
+ * Hardkernel Odroid XU board device tree source
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2016 Krzysztof Kozlowski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+#include "exynos5410.dtsi"
+#include <dt-bindings/clock/maxim,max77802.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "exynos54xx-odroidxu-leds.dtsi"
+
+/ {
+ model = "Hardkernel Odroid XU";
+ compatible = "hardkernel,odroid-xu", "samsung,exynos5410", "samsung,exynos5";
+
+ memory {
+ reg = <0x40000000 0x7ea00000>;
+ };
+
+ chosen {
+ linux,stdout-path = &serial_2;
+ };
+
+ emmc_pwrseq: pwrseq {
+ pinctrl-0 = <&emmc_nrst_pin>;
+ pinctrl-names = "default";
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpd1 0 GPIO_ACTIVE_LOW>;
+ };
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm 0 20972 0>;
+ cooling-min-state = <0>;
+ cooling-max-state = <3>;
+ #cooling-cells = <2>;
+ cooling-levels = <0 130 170 230>;
+ };
+
+ fin_pll: xxti {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
+ firmware@02073000 {
+ compatible = "samsung,secure-firmware";
+ reg = <0x02073000 0x1000>;
+ };
+};
+
+&cpu0_thermal {
+ thermal-sensors = <&tmu_cpu0 0>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ trips {
+ cpu_alert0: cpu-alert-0 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert1: cpu-alert-1 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert2: cpu-alert-2 {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&fan0 0 1>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&fan0 1 2>;
+ };
+ map2 {
+ trip = <&cpu_alert2>;
+ cooling-device = <&fan0 2 3>;
+ };
+ };
+};
+
+&hsi2c_4 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <400000>;
+ status = "okay";
+
+ usb3503: usb-hub@08 {
+ compatible = "smsc,usb3503";
+ reg = <0x08>;
+
+ intn-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
+ connect-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpx1 4 GPIO_ACTIVE_HIGH>;
+ initial-mode = <1>;
+
+ clock-names = "refclk";
+ clocks = <&pmu_system_controller 0>;
+ refclk-frequency = <24000000>;
+ };
+
+ max77802: pmic@09 {
+ compatible = "maxim,max77802";
+ reg = <0x9>;
+ interrupt-parent = <&gpx0>;
+ interrupts = <4 IRQ_TYPE_NONE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77802_irq>, <&pmic_dvs_1>, <&pmic_dvs_2>,
+ <&pmic_dvs_3>;
+ #clock-cells = <1>;
+
+ inl1-supply = <&buck5_reg>;
+ inl2-supply = <&buck7_reg>;
+ inl3-supply = <&buck9_reg>;
+ inl4-supply = <&buck9_reg>;
+ inl5-supply = <&buck9_reg>;
+ inl6-supply = <&buck10_reg>;
+ inl7-supply = <&buck9_reg>;
+ /* inl9 supply is BOOST, not configured here */
+ inl10-supply = <&buck7_reg>;
+
+ regulators {
+ buck1_reg: BUCK1 {
+ regulator-name = "vdd_mif";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck2_reg: BUCK2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck3_reg: BUCK3 {
+ regulator-name = "vdd_int";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck4_reg: BUCK4 {
+ regulator-name = "vdd_g3d";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck5_reg: BUCK5 {
+ regulator-name = "vdd_mem";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck6_reg: BUCK6 {
+ regulator-name = "vdd_kfc";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck7_reg: BUCK7 {
+ regulator-name = "buck7";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck8_reg: BUCK8 {
+ /* vdd_mmc0 */
+ regulator-name = "vddf_2v85";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck9_reg: BUCK9 {
+ regulator-name = "buck9";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck10_reg: BUCK10 {
+ regulator-name = "buck10";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldo1_reg: LDO1 {
+ regulator-name = "vdd_alive";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ ldo2_reg: LDO2 {
+ regulator-name = "vddq_m1_m2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ ldo3_reg: LDO3 {
+ regulator-name = "vddq_gpio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo4_reg: LDO4 {
+ regulator-name = "vddq_mmc2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ /* Having it off prevents reboot */
+ regulator-always-on;
+ };
+
+ ldo5_reg: LDO5 {
+ regulator-name = "vdd18_hsic";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo6_reg: LDO6 {
+ regulator-name = "vdd18_bpll";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo7_reg: LDO7 {
+ regulator-name = "vddq_lcd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo8_reg: LDO8 {
+ regulator-name = "vdd10_hdmi";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ ldo9_reg: LDO9 {
+ regulator-name = "ldo9";
+ };
+
+ ldo10_reg: LDO10 {
+ regulator-name = "vdd18_mipi";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo11_reg: LDO11 {
+ regulator-name = "vddq_mmc01";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ /*
+ * Having it off prevents accessing MMC after
+ * reboot with error:
+ * MMC Device 1: Clock OFF has been failed.
+ */
+ regulator-always-on;
+ };
+
+ ldo12_reg: LDO12 {
+ regulator-name = "vdd33_usb3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo13_reg: LDO13 {
+ regulator-name = "vddq_abbg0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo14_reg: LDO14 {
+ regulator-name = "vddq_abbg1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ldo15_reg: LDO15 {
+ regulator-name = "vdd10_usb3";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ };
+
+ ldo16_reg: LDO16 {
+ regulator-name = "ldo16";
+ };
+
+ ldo17_reg: LDO17 {
+ regulator-name = "cam_sensor_core";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ ldo18_reg: LDO18 {
+ regulator-name = "ldo18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo19_reg: LDO19 {
+ regulator-name = "ldo19";
+ };
+
+ ldo20_reg: LDO20 {
+ regulator-name = "vdd_mmc0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo21_reg: LDO21 {
+ /* vdd_mmc2 */
+ regulator-name = "vddf_2v8";
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ ldo22_reg: LDO22 {
+ regulator-name = "ldo22";
+ };
+
+ ldo23_reg: LDO23 {
+ regulator-name = "dp_p3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo24_reg: LDO24 {
+ regulator-name = "cam_af";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ ldo25_reg: LDO25 {
+ regulator-name = "eth_p3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo26_reg: LDO26 {
+ regulator-name = "usb30_extclk";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo27_reg: LDO27 {
+ regulator-name = "ldo27";
+ };
+
+ ldo28_reg: LDO28 {
+ regulator-name = "ldo28";
+ };
+
+ ldo29_reg: LDO29 {
+ regulator-name = "ldo29";
+ };
+
+ ldo30_reg: LDO30 {
+ regulator-name = "vddq_e1_e2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ ldo31_reg: LDO31 {
+ regulator-name = "ldo31";
+ };
+
+ /* On revisions with ti,ina231 this is sensor VS */
+ ldo32_reg: LDO32 {
+ regulator-name = "vs_power_meter";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo33_reg: LDO33 {
+ regulator-name = "ldo33";
+ };
+
+ ldo34_reg: LDO34 {
+ regulator-name = "ldo34";
+ };
+
+ ldo35_reg: LDO35 {
+ regulator-name = "ldo35";
+ };
+ };
+ };
+};
+
+&mmc_0 {
+ status = "okay";
+ mmc-pwrseq = <&emmc_pwrseq>;
+ cd-gpios = <&gpc0 2 GPIO_ACTIVE_LOW>;
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ samsung,dw-mshc-hs400-timing = <0 2>;
+ samsung,read-strobe-delay = <90>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ vmmc-supply = <&ldo20_reg>;
+ vqmmc-supply = <&ldo11_reg>;
+};
+
+&mmc_2 {
+ status = "okay";
+ card-detect-delay = <200>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ vmmc-supply = <&ldo21_reg>;
+ vqmmc-supply = <&ldo4_reg>;
+};
+
+&pinctrl_0 {
+ emmc_nrst_pin: emmc-nrst {
+ samsung,pins = "gpd1-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pmic_dvs_3: pmic-dvs-3 {
+ samsung,pins = "gpx0-0";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pmic_dvs_2: pmic-dvs-2 {
+ samsung,pins = "gpx0-1";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pmic_dvs_1: pmic-dvs-1 {
+ samsung,pins = "gpx0-2";
+ samsung,pin-function = <1>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ samsung,pin-val = <1>;
+ };
+
+ max77802_irq: max77802-irq {
+ samsung,pins = "gpx0-4";
+ samsung,pin-function = <0xf>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pwm {
+ /*
+ * PWM 0 -- fan
+ * PWM 1 -- Green LED
+ * PWM 2 -- Blue LED
+ * PWM 3 -- on MIPI connector for backlight
+ */
+ pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+ clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
+ clock-names = "rtc", "rtc_src";
+};
+
+&serial_0 {
+ status = "okay";
+};
+
+&serial_1 {
+ status = "okay";
+};
+
+&serial_2 {
+ status = "okay";
+};
+
+&serial_3 {
+ status = "okay";
+};
+
+&tmu_cpu0 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu1 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu2 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu3 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&usbdrd_dwc3_0 {
+ dr_mode = "host";
+};
+
+&usbdrd_dwc3_1 {
+ dr_mode = "peripheral";
+};
+
+&usbdrd3_0 {
+ vdd33-supply = <&ldo12_reg>;
+ vdd10-supply = <&ldo15_reg>;
+};
+
+&usbdrd3_1 {
+ vdd33-supply = <&ldo12_reg>;
+ vdd10-supply = <&ldo15_reg>;
+};
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
index f9aa6bb55464..b58a0f29f42c 100644
--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
@@ -277,6 +277,216 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ uart0_data: uart0-data {
+ samsung,pins = "gpa0-0", "gpa0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart0_fctl: uart0-fctl {
+ samsung,pins = "gpa0-2", "gpa0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_data: uart1-data {
+ samsung,pins = "gpa0-4", "gpa0-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_fctl: uart1-fctl {
+ samsung,pins = "gpa0-6", "gpa0-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c2_bus: i2c2-bus {
+ samsung,pins = "gpa0-6", "gpa0-7";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart2_data: uart2-data {
+ samsung,pins = "gpa1-0", "gpa1-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart2_fctl: uart2-fctl {
+ samsung,pins = "gpa1-2", "gpa1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c3_bus: i2c3-bus {
+ samsung,pins = "gpa1-2", "gpa1-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart3_data: uart3-data {
+ samsung,pins = "gpa1-4", "gpa1-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c4_hs_bus: i2c4-hs-bus {
+ samsung,pins = "gpa2-0", "gpa2-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c5_hs_bus: i2c5-hs-bus {
+ samsung,pins = "gpa2-2", "gpa2-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c6_hs_bus: i2c6-hs-bus {
+ samsung,pins = "gpb1-3", "gpb1-4";
+ samsung,pin-function = <4>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm0_out: pwm0-out {
+ samsung,pins = "gpb2-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm1_out: pwm1-out {
+ samsung,pins = "gpb2-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm2_out: pwm2-out {
+ samsung,pins = "gpb2-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm3_out: pwm3-out {
+ samsung,pins = "gpb2-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c7_hs_bus: i2c7-hs-bus {
+ samsung,pins = "gpb2-2", "gpb2-3";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c0_bus: i2c0-bus {
+ samsung,pins = "gpb3-0", "gpb3-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ i2c1_bus: i2c1-bus {
+ samsung,pins = "gpb3-2", "gpb3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ sd0_clk: sd0-clk {
+ samsung,pins = "gpc0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_cmd: sd0-cmd {
+ samsung,pins = "gpc0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_cd: sd0-cd {
+ samsung,pins = "gpc0-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus1: sd0-bus-width1 {
+ samsung,pins = "gpc0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus4: sd0-bus-width4 {
+ samsung,pins = "gpc0-4", "gpc0-5", "gpc0-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_clk: sd2-clk {
+ samsung,pins = "gpc2-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cmd: sd2-cmd {
+ samsung,pins = "gpc2-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cd: sd2-cd {
+ samsung,pins = "gpc2-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus1: sd2-bus-width1 {
+ samsung,pins = "gpc2-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus4: sd2-bus-width4 {
+ samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus8: sd0-bus-width8 {
+ samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
};
&pinctrl_1 {
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index 0f6429e1b75c..777fcf2edd79 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -102,14 +102,14 @@
};
};
-&uart0 {
+&serial_0 {
status = "okay";
};
-&uart1 {
+&serial_1 {
status = "okay";
};
-&uart2 {
+&serial_2 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
index 7a56aec2c5ba..137f48464f8b 100644
--- a/arch/arm/boot/dts/exynos5410.dtsi
+++ b/arch/arm/boot/dts/exynos5410.dtsi
@@ -13,9 +13,10 @@
* published by the Free Software Foundation.
*/
-#include "skeleton.dtsi"
+#include "exynos54xx.dtsi"
#include "exynos-syscon-restart.dtsi"
#include <dt-bindings/clock/exynos5410.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
compatible = "samsung,exynos5410", "samsung,exynos5";
@@ -26,37 +27,34 @@
pinctrl1 = &pinctrl_1;
pinctrl2 = &pinctrl_2;
pinctrl3 = &pinctrl_3;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
- CPU0: cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x0>;
clock-frequency = <1600000000>;
};
- CPU1: cpu@1 {
+ cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x1>;
clock-frequency = <1600000000>;
};
- CPU2: cpu@2 {
+ cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x2>;
clock-frequency = <1600000000>;
};
- CPU3: cpu@3 {
+ cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0x3>;
@@ -70,105 +68,54 @@
#size-cells = <1>;
ranges;
- combiner: interrupt-controller@10440000 {
- compatible = "samsung,exynos4210-combiner";
- #interrupt-cells = <2>;
- interrupt-controller;
- samsung,combiner-nr = <32>;
- reg = <0x10440000 0x1000>;
- interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
- <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
- <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
- <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
- <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
- <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
- <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
- <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
- };
-
- gic: interrupt-controller@10481000 {
- compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x10481000 0x1000>,
- <0x10482000 0x1000>,
- <0x10484000 0x2000>,
- <0x10486000 0x2000>;
- interrupts = <1 9 0xf04>;
- };
-
- chipid@10000000 {
- compatible = "samsung,exynos4210-chipid";
- reg = <0x10000000 0x100>;
- };
-
- sromc: memory-controller@12250000 {
- compatible = "samsung,exynos4210-srom";
- reg = <0x12250000 0x14>;
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x04000000 0x20000
- 1 0 0x05000000 0x20000
- 2 0 0x06000000 0x20000
- 3 0 0x07000000 0x20000>;
- };
-
pmu_system_controller: system-controller@10040000 {
compatible = "samsung,exynos5410-pmu", "syscon";
reg = <0x10040000 0x5000>;
+ clock-names = "clkout16";
+ clocks = <&fin_pll>;
+ #clock-cells = <1>;
};
- mct: mct@101C0000 {
- compatible = "samsung,exynos4210-mct";
- reg = <0x101C0000 0xB00>;
- interrupt-parent = <&interrupt_map>;
- interrupts = <0>, <1>, <2>, <3>,
- <4>, <5>, <6>, <7>,
- <8>, <9>, <10>, <11>;
- clocks = <&fin_pll>, <&clock CLK_MCT>;
- clock-names = "fin_pll", "mct";
-
- interrupt_map: interrupt-map {
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &combiner 23 3>,
- <1 &combiner 23 4>,
- <2 &combiner 25 2>,
- <3 &combiner 25 3>,
- <4 &gic 0 120 0>,
- <5 &gic 0 121 0>,
- <6 &gic 0 122 0>,
- <7 &gic 0 123 0>,
- <8 &gic 0 128 0>,
- <9 &gic 0 129 0>,
- <10 &gic 0 130 0>,
- <11 &gic 0 131 0>;
- };
+ clock: clock-controller@10010000 {
+ compatible = "samsung,exynos5410-clock";
+ reg = <0x10010000 0x30000>;
+ #clock-cells = <1>;
};
- sysram@02020000 {
- compatible = "mmio-sram";
- reg = <0x02020000 0x54000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x02020000 0x54000>;
+ tmu_cpu0: tmu@10060000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x10060000 0x100>;
+ interrupts = <GIC_SPI 65 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
- smp-sysram@0 {
- compatible = "samsung,exynos4210-sysram";
- reg = <0x0 0x1000>;
- };
+ tmu_cpu1: tmu@10064000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x10064000 0x100>;
+ interrupts = <GIC_SPI 183 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
- smp-sysram@53000 {
- compatible = "samsung,exynos4210-sysram-ns";
- reg = <0x53000 0x1000>;
- };
+ tmu_cpu2: tmu@10068000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x10068000 0x100>;
+ interrupts = <GIC_SPI 184 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
- clock: clock-controller@10010000 {
- compatible = "samsung,exynos5410-clock";
- reg = <0x10010000 0x30000>;
- #clock-cells = <1>;
+ tmu_cpu3: tmu@1006c000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x1006c000 0x100>;
+ interrupts = <GIC_SPI 185 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
};
mmc_0: mmc@12200000 {
@@ -236,34 +183,182 @@
reg = <0x03860000 0x1000>;
interrupts = <0 47 0>;
};
+ };
- uart0: serial@12C00000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C00000 0x100>;
- interrupts = <0 51 0>;
- clocks = <&clock CLK_UART0>, <&clock CLK_SCLK_UART0>;
- clock-names = "uart", "clk_uart_baud0";
- status = "disabled";
+ thermal-zones {
+ cpu0_thermal: cpu0-thermal {
+ thermal-sensors = <&tmu_cpu0>;
+ #include "exynos5420-trip-points.dtsi"
};
-
- uart1: serial@12C10000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C10000 0x100>;
- interrupts = <0 52 0>;
- clocks = <&clock CLK_UART1>, <&clock CLK_SCLK_UART1>;
- clock-names = "uart", "clk_uart_baud0";
- status = "disabled";
+ cpu1_thermal: cpu1-thermal {
+ thermal-sensors = <&tmu_cpu1>;
+ #include "exynos5420-trip-points.dtsi"
};
-
- uart2: serial@12C20000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C20000 0x100>;
- interrupts = <0 53 0>;
- clocks = <&clock CLK_UART2>, <&clock CLK_SCLK_UART2>;
- clock-names = "uart", "clk_uart_baud0";
- status = "disabled";
+ cpu2_thermal: cpu2-thermal {
+ thermal-sensors = <&tmu_cpu2>;
+ #include "exynos5420-trip-points.dtsi"
+ };
+ cpu3_thermal: cpu3-thermal {
+ thermal-sensors = <&tmu_cpu3>;
+ #include "exynos5420-trip-points.dtsi"
};
};
};
+&i2c_0 {
+ clocks = <&clock CLK_I2C0>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+};
+
+&i2c_1 {
+ clocks = <&clock CLK_I2C1>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_bus>;
+};
+
+&i2c_2 {
+ clocks = <&clock CLK_I2C2>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+};
+
+&i2c_3 {
+ clocks = <&clock CLK_I2C3>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_bus>;
+};
+
+&hsi2c_4 {
+ clocks = <&clock CLK_USI0>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_hs_bus>;
+};
+
+&hsi2c_5 {
+ clocks = <&clock CLK_USI1>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_hs_bus>;
+};
+
+&hsi2c_6 {
+ clocks = <&clock CLK_USI2>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_hs_bus>;
+};
+
+&hsi2c_7 {
+ clocks = <&clock CLK_USI3>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_hs_bus>;
+};
+
+&mct {
+ clocks = <&fin_pll>, <&clock CLK_MCT>;
+ clock-names = "fin_pll", "mct";
+};
+
+&pwm {
+ clocks = <&clock CLK_PWM>;
+ clock-names = "timers";
+};
+
+&rtc {
+ clocks = <&clock CLK_RTC>;
+ clock-names = "rtc";
+ interrupt-parent = <&pmu_system_controller>;
+ status = "disabled";
+};
+
+&serial_0 {
+ clocks = <&clock CLK_UART0>, <&clock CLK_SCLK_UART0>;
+ clock-names = "uart", "clk_uart_baud0";
+};
+
+&serial_1 {
+ clocks = <&clock CLK_UART1>, <&clock CLK_SCLK_UART1>;
+ clock-names = "uart", "clk_uart_baud0";
+};
+
+&serial_2 {
+ clocks = <&clock CLK_UART2>, <&clock CLK_SCLK_UART2>;
+ clock-names = "uart", "clk_uart_baud0";
+};
+
+&serial_3 {
+ clocks = <&clock CLK_UART3>, <&clock CLK_SCLK_UART3>;
+ clock-names = "uart", "clk_uart_baud0";
+};
+
+&sss {
+ clocks = <&clock CLK_SSS>;
+ clock-names = "secss";
+};
+
+&sromc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x04000000 0x20000
+ 1 0 0x05000000 0x20000
+ 2 0 0x06000000 0x20000
+ 3 0 0x07000000 0x20000>;
+};
+
+&usbdrd3_0 {
+ clocks = <&clock CLK_USBD300>;
+ clock-names = "usbdrd30";
+};
+
+&usbdrd_phy0 {
+ clocks = <&clock CLK_USBD300>, <&clock CLK_SCLK_USBPHY300>;
+ clock-names = "phy", "ref";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+};
+
+&usbdrd3_1 {
+ clocks = <&clock CLK_USBD301>;
+ clock-names = "usbdrd30";
+};
+
+&usbdrd_dwc3_1 {
+ interrupts = <GIC_SPI 200 0>;
+};
+
+&usbdrd_phy1 {
+ clocks = <&clock CLK_USBD301>, <&clock CLK_SCLK_USBPHY301>;
+ clock-names = "phy", "ref";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+};
+
+&usbhost1 {
+ clocks = <&clock CLK_USBH20>;
+ clock-names = "usbhost";
+};
+
+&usbhost2 {
+ clocks = <&clock CLK_USBH20>;
+ clock-names = "usbhost";
+};
+
+&usb2_phy {
+ clocks = <&clock CLK_USBH20>, <&clock CLK_SCLK_USBPHY300>;
+ clock-names = "phy", "ref";
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ samsung,pmureg-phandle = <&pmu_system_controller>;
+};
+
+&watchdog {
+ clocks = <&clock CLK_WDT>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+};
+
#include "exynos5410-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 60bc861d0f9d..39a3b81478fd 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -16,6 +16,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/clock/samsung,s2mps11.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Insignal Arndale Octa evaluation board based on EXYNOS5420";
@@ -346,11 +347,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
broken-cd;
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index 1de972d46a87..fe4e0915c0c6 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -16,6 +16,7 @@
#include <dt-bindings/regulator/maxim,max77802.h>
#include "exynos5420.dtsi"
#include "exynos5420-cpus.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Google Peach Pit Rev 6+";
@@ -278,7 +279,6 @@
regulator-name = "vdd_1v2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-off-in-suspend;
@@ -301,7 +301,6 @@
regulator-name = "vdd_1v35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -323,7 +322,6 @@
regulator-name = "vdd_2v";
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <2000000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -334,7 +332,6 @@
regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -419,7 +416,6 @@
regulator-name = "vdd_ldo9";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-mode = <MAX77802_OPMODE_LP>;
@@ -430,7 +426,6 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};
@@ -701,11 +696,6 @@
status = "okay";
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
num-slots = <1>;
@@ -1053,6 +1043,26 @@
status = "okay";
};
+&tmu_cpu0 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu1 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu2 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu3 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_gpu {
+ vtmu-supply = <&ldo10_reg>;
+};
+
&usbdrd_dwc3_0 {
dr_mode = "host";
};
diff --git a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
index 130563b2ca95..14beb7e07323 100644
--- a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
@@ -193,17 +193,17 @@
samsung,pin-drv = <3>;
};
- sd1_clk: sd1-clk {
- samsung,pins = "gpc1-0";
+ sd0_rclk: sd0-rclk {
+ samsung,pins = "gpc0-7";
samsung,pin-function = <2>;
- samsung,pin-pud = <0>;
+ samsung,pin-pud = <1>;
samsung,pin-drv = <3>;
};
- sd0_rclk: sd0-rclk {
- samsung,pins = "gpc0-7";
+ sd1_clk: sd1-clk {
+ samsung,pins = "gpc1-0";
samsung,pin-function = <2>;
- samsung,pin-pud = <1>;
+ samsung,pin-pud = <0>;
samsung,pin-drv = <3>;
};
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 2e748d19322f..ed8f3426911b 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -13,6 +13,7 @@
#include "exynos5420.dtsi"
#include "exynos5420-cpus.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Samsung SMDK5420 board based on EXYNOS5420";
@@ -354,11 +355,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
broken-cd;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index c6e05eb88937..00c4cfa54839 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -13,10 +13,10 @@
* published by the Free Software Foundation.
*/
+#include "exynos54xx.dtsi"
#include <dt-bindings/clock/exynos5420.h>
-#include "exynos5.dtsi"
-
#include <dt-bindings/clock/exynos-audss-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
compatible = "samsung,exynos5420", "samsung,exynos5";
@@ -30,14 +30,6 @@
pinctrl2 = &pinctrl_2;
pinctrl3 = &pinctrl_3;
pinctrl4 = &pinctrl_4;
- i2c0 = &i2c_0;
- i2c1 = &i2c_1;
- i2c2 = &i2c_2;
- i2c3 = &i2c_3;
- i2c4 = &hsi2c_4;
- i2c5 = &hsi2c_5;
- i2c6 = &hsi2c_6;
- i2c7 = &hsi2c_7;
i2c8 = &hsi2c_8;
i2c9 = &hsi2c_9;
i2c10 = &hsi2c_10;
@@ -46,118 +38,6 @@
spi0 = &spi_0;
spi1 = &spi_1;
spi2 = &spi_2;
- usbdrdphy0 = &usbdrd_phy0;
- usbdrdphy1 = &usbdrd_phy1;
- };
-
- cluster_a15_opp_table: opp_table0 {
- compatible = "operating-points-v2";
- opp-shared;
- opp@1800000000 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <1250000>;
- clock-latency-ns = <140000>;
- };
- opp@1700000000 {
- opp-hz = /bits/ 64 <1700000000>;
- opp-microvolt = <1212500>;
- clock-latency-ns = <140000>;
- };
- opp@1600000000 {
- opp-hz = /bits/ 64 <1600000000>;
- opp-microvolt = <1175000>;
- clock-latency-ns = <140000>;
- };
- opp@1500000000 {
- opp-hz = /bits/ 64 <1500000000>;
- opp-microvolt = <1137500>;
- clock-latency-ns = <140000>;
- };
- opp@1400000000 {
- opp-hz = /bits/ 64 <1400000000>;
- opp-microvolt = <1112500>;
- clock-latency-ns = <140000>;
- };
- opp@1300000000 {
- opp-hz = /bits/ 64 <1300000000>;
- opp-microvolt = <1062500>;
- clock-latency-ns = <140000>;
- };
- opp@1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1037500>;
- clock-latency-ns = <140000>;
- };
- opp@1100000000 {
- opp-hz = /bits/ 64 <1100000000>;
- opp-microvolt = <1012500>;
- clock-latency-ns = <140000>;
- };
- opp@1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = < 987500>;
- clock-latency-ns = <140000>;
- };
- opp@900000000 {
- opp-hz = /bits/ 64 <900000000>;
- opp-microvolt = < 962500>;
- clock-latency-ns = <140000>;
- };
- opp@800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = < 937500>;
- clock-latency-ns = <140000>;
- };
- opp@700000000 {
- opp-hz = /bits/ 64 <700000000>;
- opp-microvolt = < 912500>;
- clock-latency-ns = <140000>;
- };
- };
-
- cluster_a7_opp_table: opp_table1 {
- compatible = "operating-points-v2";
- opp-shared;
- opp@1300000000 {
- opp-hz = /bits/ 64 <1300000000>;
- opp-microvolt = <1275000>;
- clock-latency-ns = <140000>;
- };
- opp@1200000000 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1212500>;
- clock-latency-ns = <140000>;
- };
- opp@1100000000 {
- opp-hz = /bits/ 64 <1100000000>;
- opp-microvolt = <1162500>;
- clock-latency-ns = <140000>;
- };
- opp@1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <1112500>;
- clock-latency-ns = <140000>;
- };
- opp@900000000 {
- opp-hz = /bits/ 64 <900000000>;
- opp-microvolt = <1062500>;
- clock-latency-ns = <140000>;
- };
- opp@800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1025000>;
- clock-latency-ns = <140000>;
- };
- opp@700000000 {
- opp-hz = /bits/ 64 <700000000>;
- opp-microvolt = <975000>;
- clock-latency-ns = <140000>;
- };
- opp@600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <937500>;
- clock-latency-ns = <140000>;
- };
};
/*
@@ -165,1434 +45,1270 @@
* by exynos5420-cpus.dtsi or exynos5422-cpus.dtsi.
*/
- cci: cci@10d20000 {
- compatible = "arm,cci-400";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x10d20000 0x1000>;
- ranges = <0x0 0x10d20000 0x6000>;
-
- cci_control0: slave-if@4000 {
- compatible = "arm,cci-400-ctrl-if";
- interface-type = "ace";
- reg = <0x4000 0x1000>;
- };
- cci_control1: slave-if@5000 {
- compatible = "arm,cci-400-ctrl-if";
- interface-type = "ace";
- reg = <0x5000 0x1000>;
+ soc: soc {
+ cluster_a15_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ opp@1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <1250000>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1700000000 {
+ opp-hz = /bits/ 64 <1700000000>;
+ opp-microvolt = <1212500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1600000000 {
+ opp-hz = /bits/ 64 <1600000000>;
+ opp-microvolt = <1175000>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1500000000 {
+ opp-hz = /bits/ 64 <1500000000>;
+ opp-microvolt = <1137500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1400000000 {
+ opp-hz = /bits/ 64 <1400000000>;
+ opp-microvolt = <1112500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1062500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1037500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1012500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = < 987500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = < 962500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = < 937500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = < 912500>;
+ clock-latency-ns = <140000>;
+ };
+ };
+
+ cluster_a7_opp_table: opp_table1 {
+ compatible = "operating-points-v2";
+ opp-shared;
+ opp@1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1275000>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1212500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1162500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1112500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <1062500>;
+ clock-latency-ns = <140000>;
+ };
+ opp@800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1025000>;
+ clock-latency-ns = <140000>;
+ };
+ opp@700000000 {
+ opp-hz = /bits/ 64 <700000000>;
+ opp-microvolt = <975000>;
+ clock-latency-ns = <140000>;
+ };
+ opp@600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <937500>;
+ clock-latency-ns = <140000>;
+ };
+ };
+
+ cci: cci@10d20000 {
+ compatible = "arm,cci-400";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x10d20000 0x1000>;
+ ranges = <0x0 0x10d20000 0x6000>;
+
+ cci_control0: slave-if@4000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x4000 0x1000>;
+ };
+ cci_control1: slave-if@5000 {
+ compatible = "arm,cci-400-ctrl-if";
+ interface-type = "ace";
+ reg = <0x5000 0x1000>;
+ };
+ };
+
+ clock: clock-controller@10010000 {
+ compatible = "samsung,exynos5420-clock";
+ reg = <0x10010000 0x30000>;
+ #clock-cells = <1>;
+ };
+
+ clock_audss: audss-clock-controller@3810000 {
+ compatible = "samsung,exynos5420-audss-clock";
+ reg = <0x03810000 0x0C>;
+ #clock-cells = <1>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
+ <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
+ clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
+ };
+
+ mfc: codec@11000000 {
+ compatible = "samsung,mfc-v7";
+ reg = <0x11000000 0x10000>;
+ interrupts = <0 96 0>;
+ clocks = <&clock CLK_MFC>;
+ clock-names = "mfc";
+ power-domains = <&mfc_pd>;
+ iommus = <&sysmmu_mfc_l>, <&sysmmu_mfc_r>;
+ iommu-names = "left", "right";
+ };
+
+ mmc_0: mmc@12200000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ interrupts = <0 75 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12200000 0x2000>;
+ clocks = <&clock CLK_MMC0>, <&clock CLK_SCLK_MMC0>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
};
- };
-
- sysram@02020000 {
- compatible = "mmio-sram";
- reg = <0x02020000 0x54000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x02020000 0x54000>;
- smp-sysram@0 {
- compatible = "samsung,exynos4210-sysram";
- reg = <0x0 0x1000>;
+ mmc_1: mmc@12210000 {
+ compatible = "samsung,exynos5420-dw-mshc-smu";
+ interrupts = <0 76 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12210000 0x2000>;
+ clocks = <&clock CLK_MMC1>, <&clock CLK_SCLK_MMC1>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
};
- smp-sysram@53000 {
- compatible = "samsung,exynos4210-sysram-ns";
- reg = <0x53000 0x1000>;
+ mmc_2: mmc@12220000 {
+ compatible = "samsung,exynos5420-dw-mshc";
+ interrupts = <0 77 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x12220000 0x1000>;
+ clocks = <&clock CLK_MMC2>, <&clock CLK_SCLK_MMC2>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
};
- };
-
- clock: clock-controller@10010000 {
- compatible = "samsung,exynos5420-clock";
- reg = <0x10010000 0x30000>;
- #clock-cells = <1>;
- };
- clock_audss: audss-clock-controller@3810000 {
- compatible = "samsung,exynos5420-audss-clock";
- reg = <0x03810000 0x0C>;
- #clock-cells = <1>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
- <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
- clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
- };
-
- mfc: codec@11000000 {
- compatible = "samsung,mfc-v7";
- reg = <0x11000000 0x10000>;
- interrupts = <0 96 0>;
- clocks = <&clock CLK_MFC>;
- clock-names = "mfc";
- power-domains = <&mfc_pd>;
- iommus = <&sysmmu_mfc_l>, <&sysmmu_mfc_r>;
- iommu-names = "left", "right";
- };
+ nocp_mem0_0: nocp@10CA1000 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x10CA1000 0x200>;
+ status = "disabled";
+ };
- mmc_0: mmc@12200000 {
- compatible = "samsung,exynos5420-dw-mshc-smu";
- interrupts = <0 75 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12200000 0x2000>;
- clocks = <&clock CLK_MMC0>, <&clock CLK_SCLK_MMC0>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x40>;
- status = "disabled";
- };
+ nocp_mem0_1: nocp@10CA1400 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x10CA1400 0x200>;
+ status = "disabled";
+ };
- mmc_1: mmc@12210000 {
- compatible = "samsung,exynos5420-dw-mshc-smu";
- interrupts = <0 76 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12210000 0x2000>;
- clocks = <&clock CLK_MMC1>, <&clock CLK_SCLK_MMC1>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x40>;
- status = "disabled";
- };
+ nocp_mem1_0: nocp@10CA1800 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x10CA1800 0x200>;
+ status = "disabled";
+ };
- mmc_2: mmc@12220000 {
- compatible = "samsung,exynos5420-dw-mshc";
- interrupts = <0 77 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x12220000 0x1000>;
- clocks = <&clock CLK_MMC2>, <&clock CLK_SCLK_MMC2>;
- clock-names = "biu", "ciu";
- fifo-depth = <0x40>;
- status = "disabled";
- };
+ nocp_mem1_1: nocp@10CA1C00 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x10CA1C00 0x200>;
+ status = "disabled";
+ };
- mct: mct@101C0000 {
- compatible = "samsung,exynos4210-mct";
- reg = <0x101C0000 0x800>;
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&mct_map>;
- interrupts = <0>, <1>, <2>, <3>, <4>, <5>, <6>, <7>,
- <8>, <9>, <10>, <11>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MCT>;
- clock-names = "fin_pll", "mct";
-
- mct_map: mct-map {
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &combiner 23 3>,
- <1 &combiner 23 4>,
- <2 &combiner 25 2>,
- <3 &combiner 25 3>,
- <4 &gic 0 120 0>,
- <5 &gic 0 121 0>,
- <6 &gic 0 122 0>,
- <7 &gic 0 123 0>,
- <8 &gic 0 128 0>,
- <9 &gic 0 129 0>,
- <10 &gic 0 130 0>,
- <11 &gic 0 131 0>;
+ nocp_g3d_0: nocp@11A51000 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x11A51000 0x200>;
+ status = "disabled";
};
- };
- nocp_mem0_0: nocp@10CA1000 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x10CA1000 0x200>;
- status = "disabled";
- };
+ nocp_g3d_1: nocp@11A51400 {
+ compatible = "samsung,exynos5420-nocp";
+ reg = <0x11A51400 0x200>;
+ status = "disabled";
+ };
- nocp_mem0_1: nocp@10CA1400 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x10CA1400 0x200>;
- status = "disabled";
- };
+ gsc_pd: power-domain@10044000 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044000 0x20>;
+ #power-domain-cells = <0>;
+ clocks = <&clock CLK_FIN_PLL>,
+ <&clock CLK_MOUT_USER_ACLK300_GSCL>,
+ <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
+ clock-names = "oscclk", "clk0", "asb0", "asb1";
+ };
- nocp_mem1_0: nocp@10CA1800 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x10CA1800 0x200>;
- status = "disabled";
- };
+ isp_pd: power-domain@10044020 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044020 0x20>;
+ #power-domain-cells = <0>;
+ };
- nocp_mem1_1: nocp@10CA1C00 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x10CA1C00 0x200>;
- status = "disabled";
- };
+ mfc_pd: power-domain@10044060 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044060 0x20>;
+ clocks = <&clock CLK_FIN_PLL>,
+ <&clock CLK_MOUT_USER_ACLK333>,
+ <&clock CLK_ACLK333>;
+ clock-names = "oscclk", "clk0","asb0";
+ #power-domain-cells = <0>;
+ };
- nocp_g3d_0: nocp@11A51000 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x11A51000 0x200>;
- status = "disabled";
- };
+ msc_pd: power-domain@10044120 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044120 0x20>;
+ #power-domain-cells = <0>;
+ };
- nocp_g3d_1: nocp@11A51400 {
- compatible = "samsung,exynos5420-nocp";
- reg = <0x11A51400 0x200>;
- status = "disabled";
- };
+ disp_pd: power-domain@100440C0 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x100440C0 0x20>;
+ #power-domain-cells = <0>;
+ clocks = <&clock CLK_FIN_PLL>,
+ <&clock CLK_MOUT_USER_ACLK200_DISP1>,
+ <&clock CLK_MOUT_USER_ACLK300_DISP1>,
+ <&clock CLK_MOUT_USER_ACLK400_DISP1>,
+ <&clock CLK_FIMD1>, <&clock CLK_MIXER>;
+ clock-names = "oscclk", "clk0", "clk1", "clk2", "asb0", "asb1";
+ };
- gsc_pd: power-domain@10044000 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044000 0x20>;
- #power-domain-cells = <0>;
- clocks = <&clock CLK_FIN_PLL>,
- <&clock CLK_MOUT_USER_ACLK300_GSCL>,
- <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
- clock-names = "oscclk", "clk0", "asb0", "asb1";
- };
+ pinctrl_0: pinctrl@13400000 {
+ compatible = "samsung,exynos5420-pinctrl";
+ reg = <0x13400000 0x1000>;
+ interrupts = <0 45 0>;
- isp_pd: power-domain@10044020 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044020 0x20>;
- #power-domain-cells = <0>;
- };
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos4210-wakeup-eint";
+ interrupt-parent = <&gic>;
+ interrupts = <0 32 0>;
+ };
+ };
- mfc_pd: power-domain@10044060 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044060 0x20>;
- clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_USER_ACLK333>;
- clock-names = "oscclk", "clk0";
- #power-domain-cells = <0>;
- };
+ pinctrl_1: pinctrl@13410000 {
+ compatible = "samsung,exynos5420-pinctrl";
+ reg = <0x13410000 0x1000>;
+ interrupts = <0 78 0>;
+ };
- msc_pd: power-domain@10044120 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044120 0x20>;
- #power-domain-cells = <0>;
- };
+ pinctrl_2: pinctrl@14000000 {
+ compatible = "samsung,exynos5420-pinctrl";
+ reg = <0x14000000 0x1000>;
+ interrupts = <0 46 0>;
+ };
- disp_pd: power-domain@100440C0 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x100440C0 0x20>;
- #power-domain-cells = <0>;
- clocks = <&clock CLK_FIN_PLL>,
- <&clock CLK_MOUT_USER_ACLK200_DISP1>,
- <&clock CLK_MOUT_USER_ACLK300_DISP1>,
- <&clock CLK_MOUT_USER_ACLK400_DISP1>,
- <&clock CLK_FIMD1>, <&clock CLK_MIXER>;
- clock-names = "oscclk", "clk0", "clk1", "clk2", "asb0", "asb1";
- };
+ pinctrl_3: pinctrl@14010000 {
+ compatible = "samsung,exynos5420-pinctrl";
+ reg = <0x14010000 0x1000>;
+ interrupts = <0 50 0>;
+ };
- pinctrl_0: pinctrl@13400000 {
- compatible = "samsung,exynos5420-pinctrl";
- reg = <0x13400000 0x1000>;
- interrupts = <0 45 0>;
+ pinctrl_4: pinctrl@03860000 {
+ compatible = "samsung,exynos5420-pinctrl";
+ reg = <0x03860000 0x1000>;
+ interrupts = <0 47 0>;
+ };
- wakeup-interrupt-controller {
- compatible = "samsung,exynos4210-wakeup-eint";
+ amba {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
interrupt-parent = <&gic>;
- interrupts = <0 32 0>;
+ ranges;
+
+ adma: adma@03880000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x03880000 0x1000>;
+ interrupts = <0 110 0>;
+ clocks = <&clock_audss EXYNOS_ADMA>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <6>;
+ #dma-requests = <16>;
+ };
+
+ pdma0: pdma@121A0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x121A0000 0x1000>;
+ interrupts = <0 34 0>;
+ clocks = <&clock CLK_PDMA0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ pdma1: pdma@121B0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x121B0000 0x1000>;
+ interrupts = <0 35 0>;
+ clocks = <&clock CLK_PDMA1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ mdma0: mdma@10800000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x10800000 0x1000>;
+ interrupts = <0 33 0>;
+ clocks = <&clock CLK_MDMA0>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
+ };
+
+ mdma1: mdma@11C10000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x11C10000 0x1000>;
+ interrupts = <0 124 0>;
+ clocks = <&clock CLK_MDMA1>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
+ /*
+ * MDMA1 can support both secure and non-secure
+ * AXI transactions. When this is enabled in
+ * the kernel for boards that run in secure
+ * mode, we are getting imprecise external
+ * aborts causing the kernel to oops.
+ */
+ status = "disabled";
+ };
+ };
+
+ i2s0: i2s@03830000 {
+ compatible = "samsung,exynos5420-i2s";
+ reg = <0x03830000 0x100>;
+ dmas = <&adma 0
+ &adma 2
+ &adma 1>;
+ dma-names = "tx", "rx", "tx-sec";
+ clocks = <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_SCLK_I2S>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk0";
+ #sound-dai-cells = <1>;
+ samsung,idma-addr = <0x03000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ status = "disabled";
};
- };
-
- pinctrl_1: pinctrl@13410000 {
- compatible = "samsung,exynos5420-pinctrl";
- reg = <0x13410000 0x1000>;
- interrupts = <0 78 0>;
- };
- pinctrl_2: pinctrl@14000000 {
- compatible = "samsung,exynos5420-pinctrl";
- reg = <0x14000000 0x1000>;
- interrupts = <0 46 0>;
- };
-
- pinctrl_3: pinctrl@14010000 {
- compatible = "samsung,exynos5420-pinctrl";
- reg = <0x14010000 0x1000>;
- interrupts = <0 50 0>;
- };
-
- pinctrl_4: pinctrl@03860000 {
- compatible = "samsung,exynos5420-pinctrl";
- reg = <0x03860000 0x1000>;
- interrupts = <0 47 0>;
- };
-
- amba {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&gic>;
- ranges;
-
- adma: adma@03880000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x03880000 0x1000>;
- interrupts = <0 110 0>;
- clocks = <&clock_audss EXYNOS_ADMA>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <6>;
- #dma-requests = <16>;
- };
-
- pdma0: pdma@121A0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x121A0000 0x1000>;
- interrupts = <0 34 0>;
- clocks = <&clock CLK_PDMA0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
- pdma1: pdma@121B0000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x121B0000 0x1000>;
- interrupts = <0 35 0>;
- clocks = <&clock CLK_PDMA1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
- mdma0: mdma@10800000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x10800000 0x1000>;
- interrupts = <0 33 0>;
- clocks = <&clock CLK_MDMA0>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <1>;
- };
-
- mdma1: mdma@11C10000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x11C10000 0x1000>;
- interrupts = <0 124 0>;
- clocks = <&clock CLK_MDMA1>;
- clock-names = "apb_pclk";
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <1>;
- /*
- * MDMA1 can support both secure and non-secure
- * AXI transactions. When this is enabled in the kernel
- * for boards that run in secure mode, we are getting
- * imprecise external aborts causing the kernel to oops.
- */
+ i2s1: i2s@12D60000 {
+ compatible = "samsung,exynos5420-i2s";
+ reg = <0x12D60000 0x100>;
+ dmas = <&pdma1 12
+ &pdma1 11>;
+ dma-names = "tx", "rx";
+ clocks = <&clock CLK_I2S1>, <&clock CLK_SCLK_I2S1>;
+ clock-names = "iis", "i2s_opclk0";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk1";
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1_bus>;
status = "disabled";
};
- };
-
- i2s0: i2s@03830000 {
- compatible = "samsung,exynos5420-i2s";
- reg = <0x03830000 0x100>;
- dmas = <&adma 0
- &adma 2
- &adma 1>;
- dma-names = "tx", "rx", "tx-sec";
- clocks = <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_SCLK_I2S>;
- clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- #clock-cells = <1>;
- clock-output-names = "i2s_cdclk0";
- #sound-dai-cells = <1>;
- samsung,idma-addr = <0x03000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_bus>;
- status = "disabled";
- };
-
- i2s1: i2s@12D60000 {
- compatible = "samsung,exynos5420-i2s";
- reg = <0x12D60000 0x100>;
- dmas = <&pdma1 12
- &pdma1 11>;
- dma-names = "tx", "rx";
- clocks = <&clock CLK_I2S1>, <&clock CLK_SCLK_I2S1>;
- clock-names = "iis", "i2s_opclk0";
- #clock-cells = <1>;
- clock-output-names = "i2s_cdclk1";
- #sound-dai-cells = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s1_bus>;
- status = "disabled";
- };
-
- i2s2: i2s@12D70000 {
- compatible = "samsung,exynos5420-i2s";
- reg = <0x12D70000 0x100>;
- dmas = <&pdma0 12
- &pdma0 11>;
- dma-names = "tx", "rx";
- clocks = <&clock CLK_I2S2>, <&clock CLK_SCLK_I2S2>;
- clock-names = "iis", "i2s_opclk0";
- #clock-cells = <1>;
- clock-output-names = "i2s_cdclk2";
- #sound-dai-cells = <1>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s2_bus>;
- status = "disabled";
- };
-
- spi_0: spi@12d20000 {
- compatible = "samsung,exynos4210-spi";
- reg = <0x12d20000 0x100>;
- interrupts = <0 68 0>;
- dmas = <&pdma0 5
- &pdma0 4>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi0_bus>;
- clocks = <&clock CLK_SPI0>, <&clock CLK_SCLK_SPI0>;
- clock-names = "spi", "spi_busclk0";
- status = "disabled";
- };
-
- spi_1: spi@12d30000 {
- compatible = "samsung,exynos4210-spi";
- reg = <0x12d30000 0x100>;
- interrupts = <0 69 0>;
- dmas = <&pdma1 5
- &pdma1 4>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi1_bus>;
- clocks = <&clock CLK_SPI1>, <&clock CLK_SCLK_SPI1>;
- clock-names = "spi", "spi_busclk0";
- status = "disabled";
- };
-
- spi_2: spi@12d40000 {
- compatible = "samsung,exynos4210-spi";
- reg = <0x12d40000 0x100>;
- interrupts = <0 70 0>;
- dmas = <&pdma0 7
- &pdma0 6>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_bus>;
- clocks = <&clock CLK_SPI2>, <&clock CLK_SCLK_SPI2>;
- clock-names = "spi", "spi_busclk0";
- status = "disabled";
- };
-
- pwm: pwm@12dd0000 {
- compatible = "samsung,exynos4210-pwm";
- reg = <0x12dd0000 0x100>;
- samsung,pwm-outputs = <0>, <1>, <2>, <3>;
- #pwm-cells = <3>;
- clocks = <&clock CLK_PWM>;
- clock-names = "timers";
- };
-
- dp_phy: dp-video-phy {
- compatible = "samsung,exynos5420-dp-video-phy";
- samsung,pmu-syscon = <&pmu_system_controller>;
- #phy-cells = <0>;
- };
-
- mipi_phy: mipi-video-phy {
- compatible = "samsung,s5pv210-mipi-video-phy";
- syscon = <&pmu_system_controller>;
- #phy-cells = <1>;
- };
-
- dsi@14500000 {
- compatible = "samsung,exynos5410-mipi-dsi";
- reg = <0x14500000 0x10000>;
- interrupts = <0 82 0>;
- phys = <&mipi_phy 1>;
- phy-names = "dsim";
- clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
- clock-names = "bus_clk", "pll_clk";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
- adc: adc@12D10000 {
- compatible = "samsung,exynos-adc-v2";
- reg = <0x12D10000 0x100>;
- interrupts = <0 106 0>;
- clocks = <&clock CLK_TSADC>;
- clock-names = "adc";
- #io-channel-cells = <1>;
- io-channel-ranges;
- samsung,syscon-phandle = <&pmu_system_controller>;
- status = "disabled";
- };
-
- i2c_0: i2c@12C60000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C60000 0x100>;
- interrupts = <0 56 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C0>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
-
- i2c_1: i2c@12C70000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C70000 0x100>;
- interrupts = <0 57 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C1>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
-
- i2c_2: i2c@12C80000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C80000 0x100>;
- interrupts = <0 58 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C2>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
-
- i2c_3: i2c@12C90000 {
- compatible = "samsung,s3c2440-i2c";
- reg = <0x12C90000 0x100>;
- interrupts = <0 59 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&clock CLK_I2C3>;
- clock-names = "i2c";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_bus>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- status = "disabled";
- };
- hsi2c_4: i2c@12CA0000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12CA0000 0x1000>;
- interrupts = <0 60 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_hs_bus>;
- clocks = <&clock CLK_USI0>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_5: i2c@12CB0000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12CB0000 0x1000>;
- interrupts = <0 61 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_hs_bus>;
- clocks = <&clock CLK_USI1>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_6: i2c@12CC0000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12CC0000 0x1000>;
- interrupts = <0 62 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c6_hs_bus>;
- clocks = <&clock CLK_USI2>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_7: i2c@12CD0000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12CD0000 0x1000>;
- interrupts = <0 63 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c7_hs_bus>;
- clocks = <&clock CLK_USI3>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_8: i2c@12E00000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12E00000 0x1000>;
- interrupts = <0 87 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c8_hs_bus>;
- clocks = <&clock CLK_USI4>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_9: i2c@12E10000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12E10000 0x1000>;
- interrupts = <0 88 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c9_hs_bus>;
- clocks = <&clock CLK_USI5>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hsi2c_10: i2c@12E20000 {
- compatible = "samsung,exynos5-hsi2c";
- reg = <0x12E20000 0x1000>;
- interrupts = <0 203 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2c10_hs_bus>;
- clocks = <&clock CLK_USI6>;
- clock-names = "hsi2c";
- status = "disabled";
- };
-
- hdmi: hdmi@14530000 {
- compatible = "samsung,exynos5420-hdmi";
- reg = <0x14530000 0x70000>;
- interrupts = <0 95 0>;
- clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
- <&clock CLK_DOUT_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
- <&clock CLK_MOUT_HDMI>;
- clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
- "sclk_hdmiphy", "mout_hdmi";
- phy = <&hdmiphy>;
- samsung,syscon-phandle = <&pmu_system_controller>;
- status = "disabled";
- power-domains = <&disp_pd>;
- };
-
- hdmiphy: hdmiphy@145D0000 {
- reg = <0x145D0000 0x20>;
- };
-
- mixer: mixer@14450000 {
- compatible = "samsung,exynos5420-mixer";
- reg = <0x14450000 0x10000>;
- interrupts = <0 94 0>;
- clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
- <&clock CLK_SCLK_HDMI>;
- clock-names = "mixer", "hdmi", "sclk_hdmi";
- power-domains = <&disp_pd>;
- iommus = <&sysmmu_tv>;
- };
-
- rotator: rotator@11C00000 {
- compatible = "samsung,exynos5250-rotator";
- reg = <0x11C00000 0x64>;
- interrupts = <0 84 0>;
- clocks = <&clock CLK_ROTATOR>;
- clock-names = "rotator";
- iommus = <&sysmmu_rotator>;
- };
-
- gsc_0: video-scaler@13e00000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e00000 0x1000>;
- interrupts = <0 85 0>;
- clocks = <&clock CLK_GSCL0>;
- clock-names = "gscl";
- power-domains = <&gsc_pd>;
- iommus = <&sysmmu_gscl0>;
- };
-
- gsc_1: video-scaler@13e10000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e10000 0x1000>;
- interrupts = <0 86 0>;
- clocks = <&clock CLK_GSCL1>;
- clock-names = "gscl";
- power-domains = <&gsc_pd>;
- iommus = <&sysmmu_gscl1>;
- };
-
- jpeg_0: jpeg@11F50000 {
- compatible = "samsung,exynos5420-jpeg";
- reg = <0x11F50000 0x1000>;
- interrupts = <0 89 0>;
- clock-names = "jpeg";
- clocks = <&clock CLK_JPEG>;
- iommus = <&sysmmu_jpeg0>;
- };
-
- jpeg_1: jpeg@11F60000 {
- compatible = "samsung,exynos5420-jpeg";
- reg = <0x11F60000 0x1000>;
- interrupts = <0 168 0>;
- clock-names = "jpeg";
- clocks = <&clock CLK_JPEG2>;
- iommus = <&sysmmu_jpeg1>;
- };
-
- pmu_system_controller: system-controller@10040000 {
- compatible = "samsung,exynos5420-pmu", "syscon";
- reg = <0x10040000 0x5000>;
- clock-names = "clkout16";
- clocks = <&clock CLK_FIN_PLL>;
- #clock-cells = <1>;
- interrupt-controller;
- #interrupt-cells = <3>;
- interrupt-parent = <&gic>;
- };
-
- sysreg_system_controller: syscon@10050000 {
- compatible = "samsung,exynos5-sysreg", "syscon";
- reg = <0x10050000 0x5000>;
- };
-
- tmu_cpu0: tmu@10060000 {
- compatible = "samsung,exynos5420-tmu";
- reg = <0x10060000 0x100>;
- interrupts = <0 65 0>;
- clocks = <&clock CLK_TMU>;
- clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
-
- tmu_cpu1: tmu@10064000 {
- compatible = "samsung,exynos5420-tmu";
- reg = <0x10064000 0x100>;
- interrupts = <0 183 0>;
- clocks = <&clock CLK_TMU>;
- clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
-
- tmu_cpu2: tmu@10068000 {
- compatible = "samsung,exynos5420-tmu-ext-triminfo";
- reg = <0x10068000 0x100>, <0x1006c000 0x4>;
- interrupts = <0 184 0>;
- clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
- clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
-
- tmu_cpu3: tmu@1006c000 {
- compatible = "samsung,exynos5420-tmu-ext-triminfo";
- reg = <0x1006c000 0x100>, <0x100a0000 0x4>;
- interrupts = <0 185 0>;
- clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
- clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
-
- tmu_gpu: tmu@100a0000 {
- compatible = "samsung,exynos5420-tmu-ext-triminfo";
- reg = <0x100a0000 0x100>, <0x10068000 0x4>;
- interrupts = <0 215 0>;
- clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
- clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
- };
-
- thermal-zones {
- cpu0_thermal: cpu0-thermal {
- thermal-sensors = <&tmu_cpu0>;
- #include "exynos5420-trip-points.dtsi"
- };
- cpu1_thermal: cpu1-thermal {
- thermal-sensors = <&tmu_cpu1>;
- #include "exynos5420-trip-points.dtsi"
- };
- cpu2_thermal: cpu2-thermal {
- thermal-sensors = <&tmu_cpu2>;
- #include "exynos5420-trip-points.dtsi"
- };
- cpu3_thermal: cpu3-thermal {
- thermal-sensors = <&tmu_cpu3>;
- #include "exynos5420-trip-points.dtsi"
- };
- gpu_thermal: gpu-thermal {
- thermal-sensors = <&tmu_gpu>;
- #include "exynos5420-trip-points.dtsi"
+ i2s2: i2s@12D70000 {
+ compatible = "samsung,exynos5420-i2s";
+ reg = <0x12D70000 0x100>;
+ dmas = <&pdma0 12
+ &pdma0 11>;
+ dma-names = "tx", "rx";
+ clocks = <&clock CLK_I2S2>, <&clock CLK_SCLK_I2S2>;
+ clock-names = "iis", "i2s_opclk0";
+ #clock-cells = <1>;
+ clock-output-names = "i2s_cdclk2";
+ #sound-dai-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s2_bus>;
+ status = "disabled";
};
- };
- watchdog: watchdog@101D0000 {
- compatible = "samsung,exynos5420-wdt";
- reg = <0x101D0000 0x100>;
- interrupts = <0 42 0>;
- clocks = <&clock CLK_WDT>;
- clock-names = "watchdog";
- samsung,syscon-phandle = <&pmu_system_controller>;
- };
-
- sss: sss@10830000 {
- compatible = "samsung,exynos4210-secss";
- reg = <0x10830000 0x300>;
- interrupts = <0 112 0>;
- clocks = <&clock CLK_SSS>;
- clock-names = "secss";
- };
-
- usbdrd3_0: usb3-0 {
- compatible = "samsung,exynos5250-dwusb3";
- clocks = <&clock CLK_USBD300>;
- clock-names = "usbdrd30";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- usbdrd_dwc3_0: dwc3@12000000 {
- compatible = "snps,dwc3";
- reg = <0x12000000 0x10000>;
- interrupts = <0 72 0>;
- phys = <&usbdrd_phy0 0>, <&usbdrd_phy0 1>;
- phy-names = "usb2-phy", "usb3-phy";
+ spi_0: spi@12d20000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d20000 0x100>;
+ interrupts = <0 68 0>;
+ dmas = <&pdma0 5
+ &pdma0 4>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_bus>;
+ clocks = <&clock CLK_SPI0>, <&clock CLK_SCLK_SPI0>;
+ clock-names = "spi", "spi_busclk0";
+ status = "disabled";
};
- };
-
- usbdrd_phy0: phy@12100000 {
- compatible = "samsung,exynos5420-usbdrd-phy";
- reg = <0x12100000 0x100>;
- clocks = <&clock CLK_USBD300>, <&clock CLK_SCLK_USBPHY300>;
- clock-names = "phy", "ref";
- samsung,pmu-syscon = <&pmu_system_controller>;
- #phy-cells = <1>;
- };
- usbdrd3_1: usb3-1 {
- compatible = "samsung,exynos5250-dwusb3";
- clocks = <&clock CLK_USBD301>;
- clock-names = "usbdrd30";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- usbdrd_dwc3_1: dwc3@12400000 {
- compatible = "snps,dwc3";
- reg = <0x12400000 0x10000>;
- interrupts = <0 73 0>;
- phys = <&usbdrd_phy1 0>, <&usbdrd_phy1 1>;
- phy-names = "usb2-phy", "usb3-phy";
+ spi_1: spi@12d30000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d30000 0x100>;
+ interrupts = <0 69 0>;
+ dmas = <&pdma1 5
+ &pdma1 4>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_bus>;
+ clocks = <&clock CLK_SPI1>, <&clock CLK_SCLK_SPI1>;
+ clock-names = "spi", "spi_busclk0";
+ status = "disabled";
};
- };
-
- usbdrd_phy1: phy@12500000 {
- compatible = "samsung,exynos5420-usbdrd-phy";
- reg = <0x12500000 0x100>;
- clocks = <&clock CLK_USBD301>, <&clock CLK_SCLK_USBPHY301>;
- clock-names = "phy", "ref";
- samsung,pmu-syscon = <&pmu_system_controller>;
- #phy-cells = <1>;
- };
-
- usbhost2: usb@12110000 {
- compatible = "samsung,exynos4210-ehci";
- reg = <0x12110000 0x100>;
- interrupts = <0 71 0>;
- clocks = <&clock CLK_USBH20>;
- clock-names = "usbhost";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- phys = <&usb2_phy 1>;
+ spi_2: spi@12d40000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d40000 0x100>;
+ interrupts = <0 70 0>;
+ dmas = <&pdma0 7
+ &pdma0 6>;
+ dma-names = "tx", "rx";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_bus>;
+ clocks = <&clock CLK_SPI2>, <&clock CLK_SCLK_SPI2>;
+ clock-names = "spi", "spi_busclk0";
+ status = "disabled";
};
- };
- usbhost1: usb@12120000 {
- compatible = "samsung,exynos4210-ohci";
- reg = <0x12120000 0x100>;
- interrupts = <0 71 0>;
-
- clocks = <&clock CLK_USBH20>;
- clock-names = "usbhost";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- phys = <&usb2_phy 1>;
+ dp_phy: dp-video-phy {
+ compatible = "samsung,exynos5420-dp-video-phy";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <0>;
};
- };
-
- usb2_phy: phy@12130000 {
- compatible = "samsung,exynos5250-usb2-phy";
- reg = <0x12130000 0x100>;
- clocks = <&clock CLK_USBH20>, <&clock CLK_SCLK_USBPHY300>;
- clock-names = "phy", "ref";
- #phy-cells = <1>;
- samsung,sysreg-phandle = <&sysreg_system_controller>;
- samsung,pmureg-phandle = <&pmu_system_controller>;
- };
-
- sysmmu_g2dr: sysmmu@0x10A60000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x10A60000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <24 5>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_G2D>, <&clock CLK_G2D>;
- #iommu-cells = <0>;
- };
-
- sysmmu_g2dw: sysmmu@0x10A70000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x10A70000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <22 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_G2D>, <&clock CLK_G2D>;
- #iommu-cells = <0>;
- };
-
- sysmmu_tv: sysmmu@0x14650000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x14650000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <7 4>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MIXER>, <&clock CLK_MIXER>;
- power-domains = <&disp_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_gscl0: sysmmu@0x13E80000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13E80000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
- power-domains = <&gsc_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_gscl1: sysmmu@0x13E90000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13E90000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL1>, <&clock CLK_GSCL1>;
- power-domains = <&gsc_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_scaler0r: sysmmu@0x12880000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x12880000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <22 4>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL0>, <&clock CLK_MSCL0>;
- #iommu-cells = <0>;
- };
-
- sysmmu_scaler1r: sysmmu@0x12890000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x12890000 0x1000>;
- interrupts = <0 186 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
- #iommu-cells = <0>;
- };
- sysmmu_scaler2r: sysmmu@0x128A0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x128A0000 0x1000>;
- interrupts = <0 188 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
- #iommu-cells = <0>;
- };
-
- sysmmu_scaler0w: sysmmu@0x128C0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x128C0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <27 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL0>, <&clock CLK_MSCL0>;
- #iommu-cells = <0>;
- };
-
- sysmmu_scaler1w: sysmmu@0x128D0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x128D0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <22 6>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
- #iommu-cells = <0>;
- };
-
- sysmmu_scaler2w: sysmmu@0x128E0000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x128E0000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <19 6>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
- #iommu-cells = <0>;
- };
-
- sysmmu_rotator: sysmmu@0x11D40000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11D40000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <4 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_ROTATOR>, <&clock CLK_ROTATOR>;
- #iommu-cells = <0>;
- };
-
- sysmmu_jpeg0: sysmmu@0x11F10000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11F10000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <4 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_JPEG>, <&clock CLK_JPEG>;
- #iommu-cells = <0>;
- };
-
- sysmmu_jpeg1: sysmmu@0x11F20000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11F20000 0x1000>;
- interrupts = <0 169 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_JPEG2>, <&clock CLK_JPEG2>;
- #iommu-cells = <0>;
- };
-
- sysmmu_mfc_l: sysmmu@0x11200000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11200000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <6 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MFCL>, <&clock CLK_MFC>;
- power-domains = <&mfc_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_mfc_r: sysmmu@0x11210000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x11210000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <8 5>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_MFCR>, <&clock CLK_MFC>;
- power-domains = <&mfc_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_fimd1_0: sysmmu@0x14640000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x14640000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <3 2>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
- power-domains = <&disp_pd>;
- #iommu-cells = <0>;
- };
-
- sysmmu_fimd1_1: sysmmu@0x14680000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x14680000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <3 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
- power-domains = <&disp_pd>;
- #iommu-cells = <0>;
- };
-
- bus_wcore: bus_wcore {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK400_WCORE>;
- clock-names = "bus";
- operating-points-v2 = <&bus_wcore_opp_table>;
- status = "disabled";
- };
-
- bus_noc: bus_noc {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK100_NOC>;
- clock-names = "bus";
- operating-points-v2 = <&bus_noc_opp_table>;
- status = "disabled";
- };
-
- bus_fsys_apb: bus_fsys_apb {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_PCLK200_FSYS>;
- clock-names = "bus";
- operating-points-v2 = <&bus_fsys_apb_opp_table>;
- status = "disabled";
- };
-
- bus_fsys: bus_fsys {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK200_FSYS>;
- clock-names = "bus";
- operating-points-v2 = <&bus_fsys_apb_opp_table>;
- status = "disabled";
- };
-
- bus_fsys2: bus_fsys2 {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK200_FSYS2>;
- clock-names = "bus";
- operating-points-v2 = <&bus_fsys2_opp_table>;
- status = "disabled";
- };
-
- bus_mfc: bus_mfc {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK333>;
- clock-names = "bus";
- operating-points-v2 = <&bus_mfc_opp_table>;
- status = "disabled";
- };
-
- bus_gen: bus_gen {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK266>;
- clock-names = "bus";
- operating-points-v2 = <&bus_gen_opp_table>;
- status = "disabled";
- };
-
- bus_peri: bus_peri {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK66>;
- clock-names = "bus";
- operating-points-v2 = <&bus_peri_opp_table>;
- status = "disabled";
- };
-
- bus_g2d: bus_g2d {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK333_G2D>;
- clock-names = "bus";
- operating-points-v2 = <&bus_g2d_opp_table>;
- status = "disabled";
- };
-
- bus_g2d_acp: bus_g2d_acp {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK266_G2D>;
- clock-names = "bus";
- operating-points-v2 = <&bus_g2d_acp_opp_table>;
- status = "disabled";
- };
-
- bus_jpeg: bus_jpeg {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK300_JPEG>;
- clock-names = "bus";
- operating-points-v2 = <&bus_jpeg_opp_table>;
- status = "disabled";
- };
-
- bus_jpeg_apb: bus_jpeg_apb {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK166>;
- clock-names = "bus";
- operating-points-v2 = <&bus_jpeg_apb_opp_table>;
- status = "disabled";
- };
-
- bus_disp1_fimd: bus_disp1_fimd {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK300_DISP1>;
- clock-names = "bus";
- operating-points-v2 = <&bus_disp1_fimd_opp_table>;
- status = "disabled";
- };
-
- bus_disp1: bus_disp1 {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK400_DISP1>;
- clock-names = "bus";
- operating-points-v2 = <&bus_disp1_opp_table>;
- status = "disabled";
- };
-
- bus_gscl_scaler: bus_gscl_scaler {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK300_GSCL>;
- clock-names = "bus";
- operating-points-v2 = <&bus_gscl_opp_table>;
- status = "disabled";
- };
-
- bus_mscl: bus_mscl {
- compatible = "samsung,exynos-bus";
- clocks = <&clock CLK_DOUT_ACLK400_MSCL>;
- clock-names = "bus";
- operating-points-v2 = <&bus_mscl_opp_table>;
- status = "disabled";
- };
-
- bus_wcore_opp_table: opp_table2 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <84000000>;
- opp-microvolt = <925000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <111000000>;
- opp-microvolt = <950000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <222000000>;
- opp-microvolt = <950000>;
+ mipi_phy: mipi-video-phy {
+ compatible = "samsung,s5pv210-mipi-video-phy";
+ syscon = <&pmu_system_controller>;
+ #phy-cells = <1>;
};
- opp03 {
- opp-hz = /bits/ 64 <333000000>;
- opp-microvolt = <950000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <400000000>;
- opp-microvolt = <987500>;
- };
- };
- bus_noc_opp_table: opp_table3 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <67000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <75000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <86000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <100000000>;
+ dsi@14500000 {
+ compatible = "samsung,exynos5410-mipi-dsi";
+ reg = <0x14500000 0x10000>;
+ interrupts = <0 82 0>;
+ phys = <&mipi_phy 1>;
+ phy-names = "dsim";
+ clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
+ clock-names = "bus_clk", "pll_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
};
- };
-
- bus_fsys_apb_opp_table: opp_table4 {
- compatible = "operating-points-v2";
- opp-shared;
- opp00 {
- opp-hz = /bits/ 64 <100000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <200000000>;
+ adc: adc@12D10000 {
+ compatible = "samsung,exynos-adc-v2";
+ reg = <0x12D10000 0x100>;
+ interrupts = <0 106 0>;
+ clocks = <&clock CLK_TSADC>;
+ clock-names = "adc";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
};
- };
-
- bus_fsys2_opp_table: opp_table5 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <75000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <100000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <150000000>;
+ hsi2c_8: i2c@12E00000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12E00000 0x1000>;
+ interrupts = <0 87 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8_hs_bus>;
+ clocks = <&clock CLK_USI4>;
+ clock-names = "hsi2c";
+ status = "disabled";
};
- };
- bus_mfc_opp_table: opp_table6 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <96000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <111000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <167000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <222000000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <333000000>;
+ hsi2c_9: i2c@12E10000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12E10000 0x1000>;
+ interrupts = <0 88 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c9_hs_bus>;
+ clocks = <&clock CLK_USI5>;
+ clock-names = "hsi2c";
+ status = "disabled";
};
- };
- bus_gen_opp_table: opp_table7 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <89000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <133000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <178000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <267000000>;
+ hsi2c_10: i2c@12E20000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12E20000 0x1000>;
+ interrupts = <0 203 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c10_hs_bus>;
+ clocks = <&clock CLK_USI6>;
+ clock-names = "hsi2c";
+ status = "disabled";
};
- };
-
- bus_peri_opp_table: opp_table8 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <67000000>;
+ hdmi: hdmi@14530000 {
+ compatible = "samsung,exynos5420-hdmi";
+ reg = <0x14530000 0x70000>;
+ interrupts = <0 95 0>;
+ clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
+ <&clock CLK_DOUT_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
+ <&clock CLK_MOUT_HDMI>;
+ clock-names = "hdmi", "sclk_hdmi", "sclk_pixel",
+ "sclk_hdmiphy", "mout_hdmi";
+ phy = <&hdmiphy>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
+ power-domains = <&disp_pd>;
+ };
+
+ hdmiphy: hdmiphy@145D0000 {
+ reg = <0x145D0000 0x20>;
+ };
+
+ mixer: mixer@14450000 {
+ compatible = "samsung,exynos5420-mixer";
+ reg = <0x14450000 0x10000>;
+ interrupts = <0 94 0>;
+ clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
+ <&clock CLK_SCLK_HDMI>;
+ clock-names = "mixer", "hdmi", "sclk_hdmi";
+ power-domains = <&disp_pd>;
+ iommus = <&sysmmu_tv>;
+ };
+
+ rotator: rotator@11C00000 {
+ compatible = "samsung,exynos5250-rotator";
+ reg = <0x11C00000 0x64>;
+ interrupts = <0 84 0>;
+ clocks = <&clock CLK_ROTATOR>;
+ clock-names = "rotator";
+ iommus = <&sysmmu_rotator>;
+ };
+
+ gsc_0: video-scaler@13e00000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e00000 0x1000>;
+ interrupts = <0 85 0>;
+ clocks = <&clock CLK_GSCL0>;
+ clock-names = "gscl";
+ power-domains = <&gsc_pd>;
+ iommus = <&sysmmu_gscl0>;
+ };
+
+ gsc_1: video-scaler@13e10000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e10000 0x1000>;
+ interrupts = <0 86 0>;
+ clocks = <&clock CLK_GSCL1>;
+ clock-names = "gscl";
+ power-domains = <&gsc_pd>;
+ iommus = <&sysmmu_gscl1>;
+ };
+
+ jpeg_0: jpeg@11F50000 {
+ compatible = "samsung,exynos5420-jpeg";
+ reg = <0x11F50000 0x1000>;
+ interrupts = <0 89 0>;
+ clock-names = "jpeg";
+ clocks = <&clock CLK_JPEG>;
+ iommus = <&sysmmu_jpeg0>;
+ };
+
+ jpeg_1: jpeg@11F60000 {
+ compatible = "samsung,exynos5420-jpeg";
+ reg = <0x11F60000 0x1000>;
+ interrupts = <0 168 0>;
+ clock-names = "jpeg";
+ clocks = <&clock CLK_JPEG2>;
+ iommus = <&sysmmu_jpeg1>;
+ };
+
+ pmu_system_controller: system-controller@10040000 {
+ compatible = "samsung,exynos5420-pmu", "syscon";
+ reg = <0x10040000 0x5000>;
+ clock-names = "clkout16";
+ clocks = <&clock CLK_FIN_PLL>;
+ #clock-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
};
- };
-
- bus_g2d_opp_table: opp_table9 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <84000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <167000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <222000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <300000000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <333000000>;
+ tmu_cpu0: tmu@10060000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x10060000 0x100>;
+ interrupts = <0 65 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ tmu_cpu1: tmu@10064000 {
+ compatible = "samsung,exynos5420-tmu";
+ reg = <0x10064000 0x100>;
+ interrupts = <0 183 0>;
+ clocks = <&clock CLK_TMU>;
+ clock-names = "tmu_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ tmu_cpu2: tmu@10068000 {
+ compatible = "samsung,exynos5420-tmu-ext-triminfo";
+ reg = <0x10068000 0x100>, <0x1006c000 0x4>;
+ interrupts = <0 184 0>;
+ clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
+ clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ tmu_cpu3: tmu@1006c000 {
+ compatible = "samsung,exynos5420-tmu-ext-triminfo";
+ reg = <0x1006c000 0x100>, <0x100a0000 0x4>;
+ interrupts = <0 185 0>;
+ clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
+ clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ tmu_gpu: tmu@100a0000 {
+ compatible = "samsung,exynos5420-tmu-ext-triminfo";
+ reg = <0x100a0000 0x100>, <0x10068000 0x4>;
+ interrupts = <0 215 0>;
+ clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
+ clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+ #include "exynos4412-tmu-sensor-conf.dtsi"
+ };
+
+ sysmmu_g2dr: sysmmu@0x10A60000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x10A60000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <24 5>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_G2D>, <&clock CLK_G2D>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_g2dw: sysmmu@0x10A70000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x10A70000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <22 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_G2D>, <&clock CLK_G2D>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_tv: sysmmu@0x14650000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x14650000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <7 4>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MIXER>, <&clock CLK_MIXER>;
+ power-domains = <&disp_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_gscl0: sysmmu@0x13E80000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13E80000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
+ power-domains = <&gsc_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_gscl1: sysmmu@0x13E90000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13E90000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL1>, <&clock CLK_GSCL1>;
+ power-domains = <&gsc_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler0r: sysmmu@0x12880000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x12880000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <22 4>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL0>, <&clock CLK_MSCL0>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler1r: sysmmu@0x12890000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x12890000 0x1000>;
+ interrupts = <0 186 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler2r: sysmmu@0x128A0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x128A0000 0x1000>;
+ interrupts = <0 188 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler0w: sysmmu@0x128C0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x128C0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <27 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL0>, <&clock CLK_MSCL0>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler1w: sysmmu@0x128D0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x128D0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <22 6>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_scaler2w: sysmmu@0x128E0000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x128E0000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <19 6>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_rotator: sysmmu@0x11D40000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11D40000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <4 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_ROTATOR>, <&clock CLK_ROTATOR>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_jpeg0: sysmmu@0x11F10000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11F10000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <4 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_JPEG>, <&clock CLK_JPEG>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_jpeg1: sysmmu@0x11F20000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11F20000 0x1000>;
+ interrupts = <0 169 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_JPEG2>, <&clock CLK_JPEG2>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_mfc_l: sysmmu@0x11200000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11200000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <6 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MFCL>, <&clock CLK_MFC>;
+ power-domains = <&mfc_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_mfc_r: sysmmu@0x11210000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x11210000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <8 5>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_MFCR>, <&clock CLK_MFC>;
+ power-domains = <&mfc_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_fimd1_0: sysmmu@0x14640000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x14640000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <3 2>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+ power-domains = <&disp_pd>;
+ #iommu-cells = <0>;
+ };
+
+ sysmmu_fimd1_1: sysmmu@0x14680000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x14680000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <3 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
+ power-domains = <&disp_pd>;
+ #iommu-cells = <0>;
+ };
+
+ bus_wcore: bus_wcore {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK400_WCORE>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_wcore_opp_table>;
+ status = "disabled";
};
- };
-
- bus_g2d_acp_opp_table: opp_table10 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <67000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <133000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <178000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <267000000>;
+ bus_noc: bus_noc {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK100_NOC>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_noc_opp_table>;
+ status = "disabled";
};
- };
-
- bus_jpeg_opp_table: opp_table11 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <75000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <150000000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <200000000>;
- };
- opp03 {
- opp-hz = /bits/ 64 <300000000>;
+ bus_fsys_apb: bus_fsys_apb {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_PCLK200_FSYS>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_fsys_apb_opp_table>;
+ status = "disabled";
};
- };
-
- bus_jpeg_apb_opp_table: opp_table12 {
- compatible = "operating-points-v2";
- opp00 {
- opp-hz = /bits/ 64 <84000000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <111000000>;
+ bus_fsys: bus_fsys {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK200_FSYS>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_fsys_apb_opp_table>;
+ status = "disabled";
};
- opp02 {
- opp-hz = /bits/ 64 <134000000>;
+
+ bus_fsys2: bus_fsys2 {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK200_FSYS2>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_fsys2_opp_table>;
+ status = "disabled";
};
- opp03 {
- opp-hz = /bits/ 64 <167000000>;
+
+ bus_mfc: bus_mfc {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK333>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_mfc_opp_table>;
+ status = "disabled";
};
- };
- bus_disp1_fimd_opp_table: opp_table13 {
- compatible = "operating-points-v2";
+ bus_gen: bus_gen {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK266>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_gen_opp_table>;
+ status = "disabled";
+ };
- opp00 {
- opp-hz = /bits/ 64 <120000000>;
+ bus_peri: bus_peri {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK66>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_peri_opp_table>;
+ status = "disabled";
};
- opp01 {
- opp-hz = /bits/ 64 <200000000>;
+
+ bus_g2d: bus_g2d {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK333_G2D>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_g2d_opp_table>;
+ status = "disabled";
};
- };
- bus_disp1_opp_table: opp_table14 {
- compatible = "operating-points-v2";
+ bus_g2d_acp: bus_g2d_acp {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK266_G2D>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_g2d_acp_opp_table>;
+ status = "disabled";
+ };
- opp00 {
- opp-hz = /bits/ 64 <120000000>;
+ bus_jpeg: bus_jpeg {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK300_JPEG>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_jpeg_opp_table>;
+ status = "disabled";
};
- opp01 {
- opp-hz = /bits/ 64 <200000000>;
+
+ bus_jpeg_apb: bus_jpeg_apb {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK166>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_jpeg_apb_opp_table>;
+ status = "disabled";
};
- opp02 {
- opp-hz = /bits/ 64 <300000000>;
+
+ bus_disp1_fimd: bus_disp1_fimd {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK300_DISP1>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_disp1_fimd_opp_table>;
+ status = "disabled";
};
- };
- bus_gscl_opp_table: opp_table15 {
- compatible = "operating-points-v2";
+ bus_disp1: bus_disp1 {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK400_DISP1>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_disp1_opp_table>;
+ status = "disabled";
+ };
- opp00 {
- opp-hz = /bits/ 64 <150000000>;
+ bus_gscl_scaler: bus_gscl_scaler {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK300_GSCL>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_gscl_opp_table>;
+ status = "disabled";
};
- opp01 {
- opp-hz = /bits/ 64 <200000000>;
+
+ bus_mscl: bus_mscl {
+ compatible = "samsung,exynos-bus";
+ clocks = <&clock CLK_DOUT_ACLK400_MSCL>;
+ clock-names = "bus";
+ operating-points-v2 = <&bus_mscl_opp_table>;
+ status = "disabled";
};
- opp02 {
- opp-hz = /bits/ 64 <300000000>;
+
+ bus_wcore_opp_table: opp_table2 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <84000000>;
+ opp-microvolt = <925000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <111000000>;
+ opp-microvolt = <950000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <222000000>;
+ opp-microvolt = <950000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <333000000>;
+ opp-microvolt = <950000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <987500>;
+ };
+ };
+
+ bus_noc_opp_table: opp_table3 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <67000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <75000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <86000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+ };
+
+ bus_fsys_apb_opp_table: opp_table4 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp00 {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+ };
+
+ bus_fsys2_opp_table: opp_table5 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <75000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <150000000>;
+ };
+ };
+
+ bus_mfc_opp_table: opp_table6 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <96000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <111000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <167000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <222000000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <333000000>;
+ };
+ };
+
+ bus_gen_opp_table: opp_table7 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <89000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <133000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <178000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <267000000>;
+ };
+ };
+
+ bus_peri_opp_table: opp_table8 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <67000000>;
+ };
+ };
+
+ bus_g2d_opp_table: opp_table9 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <84000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <167000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <222000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <300000000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <333000000>;
+ };
+ };
+
+ bus_g2d_acp_opp_table: opp_table10 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <67000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <133000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <178000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <267000000>;
+ };
+ };
+
+ bus_jpeg_opp_table: opp_table11 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <75000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <150000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <300000000>;
+ };
+ };
+
+ bus_jpeg_apb_opp_table: opp_table12 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <84000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <111000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <134000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <167000000>;
+ };
+ };
+
+ bus_disp1_fimd_opp_table: opp_table13 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <120000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+ };
+
+ bus_disp1_opp_table: opp_table14 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <120000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <300000000>;
+ };
+ };
+
+ bus_gscl_opp_table: opp_table15 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <150000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <200000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <300000000>;
+ };
+ };
+
+ bus_mscl_opp_table: opp_table16 {
+ compatible = "operating-points-v2";
+
+ opp00 {
+ opp-hz = /bits/ 64 <84000000>;
+ };
+ opp01 {
+ opp-hz = /bits/ 64 <167000000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <222000000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <333000000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <400000000>;
+ };
};
};
- bus_mscl_opp_table: opp_table16 {
- compatible = "operating-points-v2";
-
- opp00 {
- opp-hz = /bits/ 64 <84000000>;
+ thermal-zones {
+ cpu0_thermal: cpu0-thermal {
+ thermal-sensors = <&tmu_cpu0>;
+ #include "exynos5420-trip-points.dtsi"
};
- opp01 {
- opp-hz = /bits/ 64 <167000000>;
+ cpu1_thermal: cpu1-thermal {
+ thermal-sensors = <&tmu_cpu1>;
+ #include "exynos5420-trip-points.dtsi"
};
- opp02 {
- opp-hz = /bits/ 64 <222000000>;
+ cpu2_thermal: cpu2-thermal {
+ thermal-sensors = <&tmu_cpu2>;
+ #include "exynos5420-trip-points.dtsi"
};
- opp03 {
- opp-hz = /bits/ 64 <333000000>;
+ cpu3_thermal: cpu3-thermal {
+ thermal-sensors = <&tmu_cpu3>;
+ #include "exynos5420-trip-points.dtsi"
};
- opp04 {
- opp-hz = /bits/ 64 <400000000>;
+ gpu_thermal: gpu-thermal {
+ thermal-sensors = <&tmu_gpu>;
+ #include "exynos5420-trip-points.dtsi"
};
};
};
@@ -1614,6 +1330,72 @@
iommu-names = "m0", "m1";
};
+&i2c_0 {
+ clocks = <&clock CLK_I2C0>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_bus>;
+};
+
+&i2c_1 {
+ clocks = <&clock CLK_I2C1>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_bus>;
+};
+
+&i2c_2 {
+ clocks = <&clock CLK_I2C2>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_bus>;
+};
+
+&i2c_3 {
+ clocks = <&clock CLK_I2C3>;
+ clock-names = "i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_bus>;
+};
+
+&hsi2c_4 {
+ clocks = <&clock CLK_USI0>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_hs_bus>;
+};
+
+&hsi2c_5 {
+ clocks = <&clock CLK_USI1>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_hs_bus>;
+};
+
+&hsi2c_6 {
+ clocks = <&clock CLK_USI2>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_hs_bus>;
+};
+
+&hsi2c_7 {
+ clocks = <&clock CLK_USI3>;
+ clock-names = "hsi2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_hs_bus>;
+};
+
+&mct {
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MCT>;
+ clock-names = "fin_pll", "mct";
+};
+
+&pwm {
+ clocks = <&clock CLK_PWM>;
+ clock-names = "timers";
+};
+
&rtc {
clocks = <&clock CLK_RTC>;
clock-names = "rtc";
@@ -1641,4 +1423,58 @@
clock-names = "uart", "clk_uart_baud0";
};
+&sss {
+ clocks = <&clock CLK_SSS>;
+ clock-names = "secss";
+};
+
+&usbdrd3_0 {
+ clocks = <&clock CLK_USBD300>;
+ clock-names = "usbdrd30";
+};
+
+&usbdrd_phy0 {
+ clocks = <&clock CLK_USBD300>, <&clock CLK_SCLK_USBPHY300>;
+ clock-names = "phy", "ref";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+};
+
+&usbdrd3_1 {
+ clocks = <&clock CLK_USBD301>;
+ clock-names = "usbdrd30";
+};
+
+&usbdrd_dwc3_1 {
+ interrupts = <GIC_SPI 73 0>;
+};
+
+&usbdrd_phy1 {
+ clocks = <&clock CLK_USBD301>, <&clock CLK_SCLK_USBPHY301>;
+ clock-names = "phy", "ref";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+};
+
+&usbhost1 {
+ clocks = <&clock CLK_USBH20>;
+ clock-names = "usbhost";
+};
+
+&usbhost2 {
+ clocks = <&clock CLK_USBH20>;
+ clock-names = "usbhost";
+};
+
+&usb2_phy {
+ clocks = <&clock CLK_USBH20>, <&clock CLK_SCLK_USBPHY300>;
+ clock-names = "phy", "ref";
+ samsung,sysreg-phandle = <&sysreg_system_controller>;
+ samsung,pmureg-phandle = <&pmu_system_controller>;
+};
+
+&watchdog {
+ clocks = <&clock CLK_WDT>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+};
+
#include "exynos5420-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi
deleted file mode 100644
index 3e4c4ad96d63..000000000000
--- a/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Device tree sources for Exynos5422 thermal zone
- *
- * Copyright (c) 2015 Lukasz Majewski <l.majewski@samsung.com>
- * Anand Moon <linux.amoon@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <dt-bindings/thermal/thermal.h>
-
-/ {
- thermal-zones {
- cpu0_thermal: cpu0-thermal {
- thermal-sensors = <&tmu_cpu0 0>;
- polling-delay-passive = <250>;
- polling-delay = <0>;
- trips {
- cpu_alert0: cpu-alert-0 {
- temperature = <50000>; /* millicelsius */
- hysteresis = <5000>; /* millicelsius */
- type = "active";
- };
- cpu_alert1: cpu-alert-1 {
- temperature = <60000>; /* millicelsius */
- hysteresis = <5000>; /* millicelsius */
- type = "active";
- };
- cpu_alert2: cpu-alert-2 {
- temperature = <70000>; /* millicelsius */
- hysteresis = <5000>; /* millicelsius */
- type = "active";
- };
- cpu_crit0: cpu-crit-0 {
- temperature = <120000>; /* millicelsius */
- hysteresis = <0>; /* millicelsius */
- type = "critical";
- };
- /*
- * Exyunos542x support only 4 trip-points
- * so for these polling mode is required.
- * Start polling at temperature level of last
- * interrupt-driven trip: cpu_alert2
- */
- cpu_alert3: cpu-alert-3 {
- temperature = <70000>; /* millicelsius */
- hysteresis = <10000>; /* millicelsius */
- type = "passive";
- };
- cpu_alert4: cpu-alert-4 {
- temperature = <85000>; /* millicelsius */
- hysteresis = <10000>; /* millicelsius */
- type = "passive";
- };
-
- };
- cooling-maps {
- map0 {
- trip = <&cpu_alert0>;
- cooling-device = <&fan0 0 1>;
- };
- map1 {
- trip = <&cpu_alert1>;
- cooling-device = <&fan0 1 2>;
- };
- map2 {
- trip = <&cpu_alert2>;
- cooling-device = <&fan0 2 3>;
- };
- /*
- * When reaching cpu_alert3, reduce CPU
- * by 2 steps. On Exynos5422/5800 that would
- * be: 1500 MHz and 1100 MHz.
- */
- map3 {
- trip = <&cpu_alert3>;
- cooling-device = <&cpu0 0 2>;
- };
- map4 {
- trip = <&cpu_alert3>;
- cooling-device = <&cpu4 0 2>;
- };
-
- /*
- * When reaching cpu_alert4, reduce CPU
- * further, down to 600 MHz (11 steps for big,
- * 7 steps for LITTLE).
- */
- map5 {
- trip = <&cpu_alert4>;
- cooling-device = <&cpu0 3 7>;
- };
- map6 {
- trip = <&cpu_alert4>;
- cooling-device = <&cpu4 3 11>;
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 2a4e10bc8801..d56253049ccb 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -1,9 +1,11 @@
/*
* Hardkernel Odroid XU3 board device tree source
*
- * Copyright (c) 2014 Collabora Ltd.
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
+ * Copyright (c) 2014 Collabora Ltd.
+ * Copyright (c) 2015 Lukasz Majewski <l.majewski@samsung.com>
+ * Anand Moon <linux.amoon@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,7 +18,7 @@
#include <dt-bindings/sound/samsung-i2s.h>
#include "exynos5800.dtsi"
#include "exynos5422-cpus.dtsi"
-#include "exynos5422-cpu-thermal.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
memory {
@@ -54,6 +56,94 @@
#cooling-cells = <2>;
cooling-levels = <0 130 170 230>;
};
+
+ thermal-zones {
+ cpu0_thermal: cpu0-thermal {
+ thermal-sensors = <&tmu_cpu0 0>;
+ polling-delay-passive = <250>;
+ polling-delay = <0>;
+ trips {
+ cpu_alert0: cpu-alert-0 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert1: cpu-alert-1 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_alert2: cpu-alert-2 {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "active";
+ };
+ cpu_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ /*
+ * Exynos542x supports only 4 trip-points
+ * so for these polling mode is required.
+ * Start polling at temperature level of last
+ * interrupt-driven trip: cpu_alert2
+ */
+ cpu_alert3: cpu-alert-3 {
+ temperature = <70000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu_alert4: cpu-alert-4 {
+ temperature = <85000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+
+ };
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&fan0 0 1>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&fan0 1 2>;
+ };
+ map2 {
+ trip = <&cpu_alert2>;
+ cooling-device = <&fan0 2 3>;
+ };
+ /*
+ * When reaching cpu_alert3, reduce CPU
+ * by 2 steps. On Exynos5422/5800 that would
+ * be: 1600 MHz and 1100 MHz.
+ */
+ map3 {
+ trip = <&cpu_alert3>;
+ cooling-device = <&cpu0 0 2>;
+ };
+ map4 {
+ trip = <&cpu_alert3>;
+ cooling-device = <&cpu4 0 2>;
+ };
+
+ /*
+ * When reaching cpu_alert4, reduce CPU
+ * further, down to 600 MHz (11 steps for big,
+ * 7 steps for LITTLE).
+ */
+ map5 {
+ trip = <&cpu_alert4>;
+ cooling-device = <&cpu0 3 7>;
+ };
+ map6 {
+ trip = <&cpu_alert4>;
+ cooling-device = <&cpu4 3 11>;
+ };
+ };
+ };
+ };
};
&bus_wcore {
@@ -405,11 +495,6 @@
};
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
mmc-pwrseq = <&emmc_pwrseq>;
@@ -487,27 +572,22 @@
&tmu_cpu0 {
vtmu-supply = <&ldo7_reg>;
- status = "okay";
};
&tmu_cpu1 {
vtmu-supply = <&ldo7_reg>;
- status = "okay";
};
&tmu_cpu2 {
vtmu-supply = <&ldo7_reg>;
- status = "okay";
};
&tmu_cpu3 {
vtmu-supply = <&ldo7_reg>;
- status = "okay";
};
&tmu_gpu {
vtmu-supply = <&ldo7_reg>;
- status = "okay";
};
&rtc {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
index 2ae1cf41dcb6..03fa88c45426 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts
@@ -14,44 +14,11 @@
/dts-v1/;
#include "exynos5422-odroidxu3-common.dtsi"
#include "exynos5422-odroidxu3-audio.dtsi"
+#include "exynos54xx-odroidxu-leds.dtsi"
/ {
model = "Hardkernel Odroid XU3 Lite";
compatible = "hardkernel,odroid-xu3-lite", "samsung,exynos5800", "samsung,exynos5";
-
- pwmleds {
- compatible = "pwm-leds";
-
- greenled {
- label = "green:mmc0";
- pwms = <&pwm 1 2000000 0>;
- pwm-names = "pwm1";
- /*
- * Green LED is much brighter than the others
- * so limit its max brightness
- */
- max_brightness = <127>;
- linux,default-trigger = "mmc0";
- };
-
- blueled {
- label = "blue:heartbeat";
- pwms = <&pwm 2 2000000 0>;
- pwm-names = "pwm2";
- max_brightness = <255>;
- linux,default-trigger = "heartbeat";
- };
- };
-
- gpioleds {
- compatible = "gpio-leds";
- redled {
- label = "red:microSD";
- gpios = <&gpx2 3 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- linux,default-trigger = "mmc1";
- };
- };
};
&pwm {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3.dts b/arch/arm/boot/dts/exynos5422-odroidxu3.dts
index 432406db85de..9ed6564acfb0 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3.dts
@@ -13,44 +13,11 @@
/dts-v1/;
#include "exynos5422-odroidxu3-common.dtsi"
#include "exynos5422-odroidxu3-audio.dtsi"
+#include "exynos54xx-odroidxu-leds.dtsi"
/ {
model = "Hardkernel Odroid XU3";
compatible = "hardkernel,odroid-xu3", "samsung,exynos5800", "samsung,exynos5";
-
- pwmleds {
- compatible = "pwm-leds";
-
- greenled {
- label = "green:mmc0";
- pwms = <&pwm 1 2000000 0>;
- pwm-names = "pwm1";
- /*
- * Green LED is much brighter than the others
- * so limit its max brightness
- */
- max_brightness = <127>;
- linux,default-trigger = "mmc0";
- };
-
- blueled {
- label = "blue:heartbeat";
- pwms = <&pwm 2 2000000 0>;
- pwm-names = "pwm2";
- max_brightness = <255>;
- linux,default-trigger = "heartbeat";
- };
- };
-
- gpioleds {
- compatible = "gpio-leds";
- redled {
- label = "red:microSD";
- gpios = <&gpx2 3 GPIO_ACTIVE_HIGH>;
- default-state = "off";
- linux,default-trigger = "mmc1";
- };
- };
};
&i2c_0 {
diff --git a/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
new file mode 100644
index 000000000000..0ed30206625c
--- /dev/null
+++ b/arch/arm/boot/dts/exynos54xx-odroidxu-leds.dtsi
@@ -0,0 +1,50 @@
+/*
+ * Hardkernel Odroid XU/XU3 LED device tree source
+ *
+ * Copyright (c) 2015,2016 Krzysztof Kozlowski
+ * Copyright (c) 2014 Collabora Ltd.
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ pwmleds {
+ compatible = "pwm-leds";
+
+ greenled {
+ label = "green:mmc0";
+ pwms = <&pwm 1 2000000 0>;
+ pwm-names = "pwm1";
+ /*
+ * Green LED is much brighter than the others
+ * so limit its max brightness
+ */
+ max_brightness = <127>;
+ linux,default-trigger = "mmc0";
+ };
+
+ blueled {
+ label = "blue:heartbeat";
+ pwms = <&pwm 2 2000000 0>;
+ pwm-names = "pwm2";
+ max_brightness = <255>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ gpioleds {
+ compatible = "gpio-leds";
+ redled {
+ label = "red:microSD";
+ gpios = <&gpx2 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "mmc1";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
new file mode 100644
index 000000000000..06a604911e87
--- /dev/null
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -0,0 +1,199 @@
+/*
+ * Samsung's Exynos54xx SoC series common device tree source
+ *
+ * Copyright (c) 2012-2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2016 Krzysztof Kozlowski
+ *
+ * Device nodes common for Samsung Exynos5410/5420/5422/5800. Specific
+ * Exynos 54xx SoCs should include this file and customize it further
+ * (e.g. with clocks).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "skeleton.dtsi"
+#include "exynos5.dtsi"
+
+/ {
+ compatible = "samsung,exynos5";
+
+ aliases {
+ i2c4 = &hsi2c_4;
+ i2c5 = &hsi2c_5;
+ i2c6 = &hsi2c_6;
+ i2c7 = &hsi2c_7;
+ usbdrdphy0 = &usbdrd_phy0;
+ usbdrdphy1 = &usbdrd_phy1;
+ };
+
+ soc: soc {
+ sysram@02020000 {
+ compatible = "mmio-sram";
+ reg = <0x02020000 0x54000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x02020000 0x54000>;
+
+ smp-sysram@0 {
+ compatible = "samsung,exynos4210-sysram";
+ reg = <0x0 0x1000>;
+ };
+
+ smp-sysram@53000 {
+ compatible = "samsung,exynos4210-sysram-ns";
+ reg = <0x53000 0x1000>;
+ };
+ };
+
+ mct: mct@101c0000 {
+ compatible = "samsung,exynos4210-mct";
+ reg = <0x101c0000 0xb00>;
+ interrupt-parent = <&mct_map>;
+ interrupts = <0>, <1>, <2>, <3>, <4>, <5>, <6>, <7>,
+ <8>, <9>, <10>, <11>;
+
+ mct_map: mct-map {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <0 &combiner 23 3>,
+ <1 &combiner 23 4>,
+ <2 &combiner 25 2>,
+ <3 &combiner 25 3>,
+ <4 &gic 0 120 0>,
+ <5 &gic 0 121 0>,
+ <6 &gic 0 122 0>,
+ <7 &gic 0 123 0>,
+ <8 &gic 0 128 0>,
+ <9 &gic 0 129 0>,
+ <10 &gic 0 130 0>,
+ <11 &gic 0 131 0>;
+ };
+ };
+
+ watchdog: watchdog@101d0000 {
+ compatible = "samsung,exynos5420-wdt";
+ reg = <0x101d0000 0x100>;
+ interrupts = <0 42 0>;
+ };
+
+ sss: sss@10830000 {
+ compatible = "samsung,exynos4210-secss";
+ reg = <0x10830000 0x300>;
+ interrupts = <0 112 0>;
+ };
+
+ /* i2c_0-3 are defined in exynos5.dtsi */
+ hsi2c_4: i2c@12ca0000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12ca0000 0x1000>;
+ interrupts = <0 60 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_5: i2c@12cb0000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12cb0000 0x1000>;
+ interrupts = <0 61 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_6: i2c@12cc0000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12cc0000 0x1000>;
+ interrupts = <0 62 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_7: i2c@12cd0000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12cd0000 0x1000>;
+ interrupts = <0 63 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ usbdrd3_0: usb3-0 {
+ compatible = "samsung,exynos5250-dwusb3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usbdrd_dwc3_0: dwc3@12000000 {
+ compatible = "snps,dwc3";
+ reg = <0x12000000 0x10000>;
+ interrupts = <0 72 0>;
+ phys = <&usbdrd_phy0 0>, <&usbdrd_phy0 1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ usbdrd_phy0: phy@12100000 {
+ compatible = "samsung,exynos5420-usbdrd-phy";
+ reg = <0x12100000 0x100>;
+ #phy-cells = <1>;
+ };
+
+ usbdrd3_1: usb3-1 {
+ compatible = "samsung,exynos5250-dwusb3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usbdrd_dwc3_1: dwc3@12400000 {
+ compatible = "snps,dwc3";
+ reg = <0x12400000 0x10000>;
+ phys = <&usbdrd_phy1 0>, <&usbdrd_phy1 1>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ usbdrd_phy1: phy@12500000 {
+ compatible = "samsung,exynos5420-usbdrd-phy";
+ reg = <0x12500000 0x100>;
+ #phy-cells = <1>;
+ };
+
+ usbhost2: usb@12110000 {
+ compatible = "samsung,exynos4210-ehci";
+ reg = <0x12110000 0x100>;
+ interrupts = <0 71 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ phys = <&usb2_phy 1>;
+ };
+ };
+
+ usbhost1: usb@12120000 {
+ compatible = "samsung,exynos4210-ohci";
+ reg = <0x12120000 0x100>;
+ interrupts = <0 71 0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ phys = <&usb2_phy 1>;
+ };
+ };
+
+ usb2_phy: phy@12130000 {
+ compatible = "samsung,exynos5250-usb2-phy";
+ reg = <0x12130000 0x100>;
+ #phy-cells = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 62ceb89e073f..5ec71e2400fd 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -16,6 +16,7 @@
#include <dt-bindings/regulator/maxim,max77802.h>
#include "exynos5800.dtsi"
#include "exynos5420-cpus.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
/ {
model = "Google Peach Pi Rev 10+";
@@ -278,7 +279,6 @@
regulator-name = "vdd_1v2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-off-in-suspend;
@@ -301,7 +301,6 @@
regulator-name = "vdd_1v35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -323,7 +322,6 @@
regulator-name = "vdd_2v";
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <2000000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -334,7 +332,6 @@
regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -419,7 +416,6 @@
regulator-name = "vdd_ldo9";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-state-mem {
regulator-on-in-suspend;
regulator-mode = <MAX77802_OPMODE_LP>;
@@ -430,7 +426,6 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};
@@ -669,11 +664,6 @@
status = "okay";
};
-&mfc {
- samsung,mfc-r = <0x43000000 0x800000>;
- samsung,mfc-l = <0x51000000 0x800000>;
-};
-
&mmc_0 {
status = "okay";
num-slots = <1>;
@@ -1022,6 +1012,26 @@
status = "okay";
};
+&tmu_cpu0 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu1 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu2 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_cpu3 {
+ vtmu-supply = <&ldo10_reg>;
+};
+
+&tmu_gpu {
+ vtmu-supply = <&ldo10_reg>;
+};
+
&usbdrd_dwc3_0 {
dr_mode = "host";
};
diff --git a/arch/arm/boot/dts/ge863-pro3.dtsi b/arch/arm/boot/dts/ge863-pro3.dtsi
index 0d0e62489d93..4aee5cc75fa4 100644
--- a/arch/arm/boot/dts/ge863-pro3.dtsi
+++ b/arch/arm/boot/dts/ge863-pro3.dtsi
@@ -11,15 +11,6 @@
/ {
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <6000000>;
- };
-
main_xtal {
clock-frequency = <6000000>;
};
diff --git a/arch/arm/boot/dts/hi3519-demb.dts b/arch/arm/boot/dts/hi3519-demb.dts
new file mode 100644
index 000000000000..6991ab694c9c
--- /dev/null
+++ b/arch/arm/boot/dts/hi3519-demb.dts
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/dts-v1/;
+#include "hi3519.dtsi"
+
+/ {
+ model = "HiSilicon HI3519 DEMO Board";
+ compatible = "hisilicon,hi3519";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&dual_timer0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/hi3519.dtsi b/arch/arm/boot/dts/hi3519.dtsi
new file mode 100644
index 000000000000..5729ecfcdc8b
--- /dev/null
+++ b/arch/arm/boot/dts/hi3519.dtsi
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2015 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <dt-bindings/clock/hi3519-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0>;
+ };
+ };
+
+ gic: interrupt-controller@10300000 {
+ compatible = "arm,cortex-a7-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x10301000 0x1000>, <0x10302000 0x1000>;
+ };
+
+ clk_3m: clk_3m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <3000000>;
+ };
+
+ crg: clock-reset-controller@12010000 {
+ compatible = "hisilicon,hi3519-crg";
+ #clock-cells = <1>;
+ #reset-cells = <2>;
+ reg = <0x12010000 0x10000>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ uart0: serial@12100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x12100000 0x1000>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_UART0_CLK>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ uart1: serial@12101000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x12101000 0x1000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_UART1_CLK>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ uart2: serial@12102000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x12102000 0x1000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_UART2_CLK>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ uart3: serial@12103000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x12103000 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_UART3_CLK>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ uart4: serial@12104000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x12104000 0x1000>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_UART4_CLK>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ dual_timer0: timer@12000000 {
+ compatible = "arm,sp804", "arm,primecell";
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x12000000 0x1000>;
+ clocks = <&clk_3m>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ dual_timer1: timer@12001000 {
+ compatible = "arm,sp804", "arm,primecell";
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x12001000 0x1000>;
+ clocks = <&clk_3m>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ dual_timer2: timer@12002000 {
+ compatible = "arm,sp804", "arm,primecell";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x12002000 0x1000>;
+ clocks = <&clk_3m>;
+ clock-names = "apb_pclk";
+ status = "disable";
+ };
+
+ spi_bus0: spi@12120000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x12120000 0x1000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_SPI0_CLK>;
+ clock-names = "apb_pclk";
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ spi_bus1: spi@12121000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x12121000 0x1000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_SPI1_CLK>;
+ clock-names = "apb_pclk";
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ spi_bus2: spi@12122000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x12122000 0x1000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg HI3519_SPI2_CLK>;
+ clock-names = "apb_pclk";
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ sysctrl: system-controller@12020000 {
+ compatible = "hisilicon,hi3519-sysctrl", "syscon";
+ reg = <0x12020000 0x1000>;
+ };
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&sysctrl>;
+ offset = <0x4>;
+ mask = <0xdeadbeef>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx1-ads.dts b/arch/arm/boot/dts/imx1-ads.dts
index af4eee5794aa..f50498659cc3 100644
--- a/arch/arm/boot/dts/imx1-ads.dts
+++ b/arch/arm/boot/dts/imx1-ads.dts
@@ -66,14 +66,14 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx1-apf9328.dts b/arch/arm/boot/dts/imx1-apf9328.dts
index 07d92fb40e6f..e8b4b52c2418 100644
--- a/arch/arm/boot/dts/imx1-apf9328.dts
+++ b/arch/arm/boot/dts/imx1-apf9328.dts
@@ -34,14 +34,14 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx23-sansa.dts b/arch/arm/boot/dts/imx23-sansa.dts
new file mode 100644
index 000000000000..4ec32f4c7885
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-sansa.dts
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2013-2016 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/dts-v1/;
+#include "imx23.dtsi"
+
+/ {
+ model = "SanDisk Sansa Fuze+";
+ compatible = "sandisk,sansa_fuze_plus", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x04000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_vddio_sd0>;
+ cd-inverted;
+ status = "okay";
+ };
+
+ ssp1: ssp@80034000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_8bit_pins_a>;
+ bus-width = <8>;
+ vmmc-supply = <&reg_vddio_sd1>;
+ non-removable;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_GPMI_D08__GPIO_0_8
+ MX23_PAD_PWM3__GPIO_1_29
+ MX23_PAD_AUART1_RTS__GPIO_0_27
+ MX23_PAD_AUART1_CTS__GPIO_0_26
+ MX23_PAD_I2C_SCL__I2C_SCL
+ MX23_PAD_I2C_SDA__I2C_SDA
+ MX23_PAD_LCD_DOTCK__GPIO_1_22
+ MX23_PAD_LCD_HSYNC__GPIO_1_24
+ MX23_PAD_PWM3__GPIO_1_29
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2_pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+
+ lradc@80050000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ dr_mode = "peripheral";
+ status = "okay";
+ };
+ };
+
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio0 8 0>;
+ };
+
+ reg_vddio_sd1: regulator-vddio-sd1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 0>;
+ };
+
+ reg_vdd_touchpad: regulator-vdd-touchpad0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-touchpad0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio0 26 0>;
+ regulator-always-on;
+ enable-active-low;
+ };
+
+ reg_vdd_tuner: regulator-vdd-tuner0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-tuner0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio0 29 0>;
+ regulator-always-on;
+ enable-active-low;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 2 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+
+ i2c-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+ gpios = <
+ &gpio1 24 0 /* SDA */
+ &gpio1 22 0 /* SCL */
+ >;
+ i2c-gpio,delay-us = <2>; /* ~100 kHz */
+ };
+
+ i2c-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+ gpios = <
+ &gpio0 31 0 /* SDA */
+ &gpio0 30 0 /* SCL */
+ >;
+ i2c-gpio,delay-us = <2>; /* ~100 kHz */
+
+ touch: touch@20 {
+ compatible = "synaptics,synaptics_i2c";
+ reg = <0x20>;
+ };
+
+ eeprom: eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+ };
+
+};
diff --git a/arch/arm/boot/dts/imx23-xfi3.dts b/arch/arm/boot/dts/imx23-xfi3.dts
new file mode 100644
index 000000000000..025cf949662d
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-xfi3.dts
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2013-2016 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/dts-v1/;
+#include "imx23.dtsi"
+
+/ {
+ model = "Creative ZEN X-Fi3";
+ compatible = "creative,x-fi3", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x04000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_vddio_sd0>;
+ cd-inverted;
+ status = "okay";
+ };
+
+ ssp1: ssp@80034000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_4bit_pins_a>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_GPMI_D07__GPIO_0_7
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ key_pins_a: keys@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_ROTARYA__GPIO_2_7
+ MX23_PAD_ROTARYB__GPIO_2_8
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ i2c: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_a>;
+ status = "okay";
+ };
+
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2_pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ auart1: serial@8006e000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart1_2pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+
+ lradc@80050000 {
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ usb0: usb@80080000 {
+ dr_mode = "peripheral";
+ status = "okay";
+ };
+ };
+
+ reg_vddio_sd0: regulator-vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio0 7 0>;
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 2 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_pins_a>;
+
+ voldown {
+ label = "volume-down";
+ linux,code = <114>;
+ gpios = <&gpio2 7 0>;
+ debounce-interval = <20>;
+ };
+
+ volup {
+ label = "volume-up";
+ linux,code = <115>;
+ gpios = <&gpio2 8 0>;
+ debounce-interval = <20>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 302d1168f424..440ee9a4a158 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -111,6 +111,7 @@
gpio0: gpio@0 {
compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ reg = <0>;
interrupts = <16>;
gpio-controller;
#gpio-cells = <2>;
@@ -120,6 +121,7 @@
gpio1: gpio@1 {
compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ reg = <1>;
interrupts = <17>;
gpio-controller;
#gpio-cells = <2>;
@@ -129,6 +131,7 @@
gpio2: gpio@2 {
compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ reg = <2>;
interrupts = <18>;
gpio-controller;
#gpio-cells = <2>;
@@ -171,6 +174,17 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ auart1_2pins_a: auart1-2pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_GPMI_D14__AUART2_RX
+ MX23_PAD_GPMI_D15__AUART2_TX
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
gpmi_pins_a: gpmi-nand@0 {
reg = <0>;
fsl,pinmux-ids = <
@@ -249,6 +263,40 @@
fsl,pull-up = <MXS_PULL_DISABLE>;
};
+ mmc1_4bit_pins_a: mmc1-4bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_GPMI_D00__SSP2_DATA0
+ MX23_PAD_GPMI_D01__SSP2_DATA1
+ MX23_PAD_GPMI_D02__SSP2_DATA2
+ MX23_PAD_GPMI_D03__SSP2_DATA3
+ MX23_PAD_GPMI_RDY1__SSP2_CMD
+ MX23_PAD_GPMI_WRN__SSP2_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
+ mmc1_8bit_pins_a: mmc1-8bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX23_PAD_GPMI_D00__SSP2_DATA0
+ MX23_PAD_GPMI_D01__SSP2_DATA1
+ MX23_PAD_GPMI_D02__SSP2_DATA2
+ MX23_PAD_GPMI_D03__SSP2_DATA3
+ MX23_PAD_GPMI_D04__SSP2_DATA4
+ MX23_PAD_GPMI_D05__SSP2_DATA5
+ MX23_PAD_GPMI_D06__SSP2_DATA6
+ MX23_PAD_GPMI_D07__SSP2_DATA7
+ MX23_PAD_GPMI_RDY1__SSP2_CMD
+ MX23_PAD_GPMI_WRN__SSP2_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
pwm2_pins_a: pwm2@0 {
reg = <0>;
fsl,pinmux-ids = <
diff --git a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
index cda6907a27b9..9300711f1ea3 100644
--- a/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
+++ b/arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
@@ -161,14 +161,14 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index 9351296356dc..70292101ba03 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -298,7 +298,7 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx25-pinfunc.h b/arch/arm/boot/dts/imx25-pinfunc.h
index f96fa2df8f11..f840f03ad171 100644
--- a/arch/arm/boot/dts/imx25-pinfunc.h
+++ b/arch/arm/boot/dts/imx25-pinfunc.h
@@ -26,77 +26,77 @@
#define MX25_PAD_A13__GPIO_4_1 0x00c 0x22C 0x000 0x05 0x000
#define MX25_PAD_A13__LCDC_CLS 0x00c 0x22C 0x000 0x07 0x000
-#define MX25_PAD_A14__A14 0x010 0x230 0x000 0x10 0x000
-#define MX25_PAD_A14__GPIO_2_0 0x010 0x230 0x000 0x15 0x000
-#define MX25_PAD_A14__SIM1_CLK1 0x010 0x230 0x000 0x16 0x000
-#define MX25_PAD_A14__LCDC_SPL 0x010 0x230 0x000 0x17 0x000
-
-#define MX25_PAD_A15__A15 0x014 0x234 0x000 0x10 0x000
-#define MX25_PAD_A15__GPIO_2_1 0x014 0x234 0x000 0x15 0x000
-#define MX25_PAD_A15__SIM1_RST1 0x014 0x234 0x000 0x16 0x000
-#define MX25_PAD_A15__LCDC_PS 0x014 0x234 0x000 0x17 0x000
-
-#define MX25_PAD_A16__A16 0x018 0x000 0x000 0x10 0x000
-#define MX25_PAD_A16__GPIO_2_2 0x018 0x000 0x000 0x15 0x000
-#define MX25_PAD_A16__SIM1_VEN1 0x018 0x000 0x000 0x16 0x000
-#define MX25_PAD_A16__LCDC_REV 0x018 0x000 0x000 0x17 0x000
-
-#define MX25_PAD_A17__A17 0x01c 0x238 0x000 0x10 0x000
-#define MX25_PAD_A17__GPIO_2_3 0x01c 0x238 0x000 0x15 0x000
-#define MX25_PAD_A17__SIM1_TX 0x01c 0x238 0x554 0x16 0x000
-#define MX25_PAD_A17__FEC_TX_ERR 0x01c 0x238 0x000 0x17 0x000
-
-#define MX25_PAD_A18__A18 0x020 0x23c 0x000 0x10 0x000
-#define MX25_PAD_A18__GPIO_2_4 0x020 0x23c 0x000 0x15 0x000
-#define MX25_PAD_A18__SIM1_PD1 0x020 0x23c 0x550 0x16 0x000
-#define MX25_PAD_A18__FEC_COL 0x020 0x23c 0x504 0x17 0x000
-
-#define MX25_PAD_A19__A19 0x024 0x240 0x000 0x10 0x000
-#define MX25_PAD_A19__GPIO_2_5 0x024 0x240 0x000 0x15 0x000
-#define MX25_PAD_A19__SIM1_RX1 0x024 0x240 0x54c 0x16 0x000
-#define MX25_PAD_A19__FEC_RX_ERR 0x024 0x240 0x518 0x17 0x000
-
-#define MX25_PAD_A20__A20 0x028 0x244 0x000 0x10 0x000
-#define MX25_PAD_A20__GPIO_2_6 0x028 0x244 0x000 0x15 0x000
-#define MX25_PAD_A20__SIM2_CLK1 0x028 0x244 0x000 0x16 0x000
-#define MX25_PAD_A20__FEC_RDATA2 0x028 0x244 0x50c 0x17 0x000
-
-#define MX25_PAD_A21__A21 0x02c 0x248 0x000 0x10 0x000
-#define MX25_PAD_A21__GPIO_2_7 0x02c 0x248 0x000 0x15 0x000
-#define MX25_PAD_A21__SIM2_RST1 0x02c 0x248 0x000 0x16 0x000
-#define MX25_PAD_A21__FEC_RDATA3 0x02c 0x248 0x510 0x17 0x000
-
-#define MX25_PAD_A22__A22 0x030 0x000 0x000 0x10 0x000
-#define MX25_PAD_A22__GPIO_2_8 0x030 0x000 0x000 0x15 0x000
-#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x17 0x000
-#define MX25_PAD_A22__SIM2_VEN1 0x030 0x000 0x000 0x16 0x000
-#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x17 0x000
-
-#define MX25_PAD_A23__A23 0x034 0x24c 0x000 0x10 0x000
-#define MX25_PAD_A23__GPIO_2_9 0x034 0x24c 0x000 0x15 0x000
-#define MX25_PAD_A23__SIM2_TX1 0x034 0x24c 0x560 0x16 0x000
-#define MX25_PAD_A23__FEC_TDATA3 0x034 0x24c 0x000 0x17 0x000
-
-#define MX25_PAD_A24__A24 0x038 0x250 0x000 0x10 0x000
-#define MX25_PAD_A24__GPIO_2_10 0x038 0x250 0x000 0x15 0x000
-#define MX25_PAD_A24__SIM2_PD1 0x038 0x250 0x55c 0x16 0x000
-#define MX25_PAD_A24__FEC_RX_CLK 0x038 0x250 0x514 0x17 0x000
-
-#define MX25_PAD_A25__A25 0x03c 0x254 0x000 0x10 0x000
-#define MX25_PAD_A25__GPIO_2_11 0x03c 0x254 0x000 0x15 0x000
-#define MX25_PAD_A25__FEC_CRS 0x03c 0x254 0x508 0x17 0x000
-
-#define MX25_PAD_EB0__EB0 0x040 0x258 0x000 0x10 0x000
-#define MX25_PAD_EB0__AUD4_TXD 0x040 0x258 0x464 0x14 0x000
-#define MX25_PAD_EB0__GPIO_2_12 0x040 0x258 0x000 0x15 0x000
-
-#define MX25_PAD_EB1__EB1 0x044 0x25c 0x000 0x10 0x000
-#define MX25_PAD_EB1__AUD4_RXD 0x044 0x25c 0x460 0x14 0x000
-#define MX25_PAD_EB1__GPIO_2_13 0x044 0x25c 0x000 0x15 0x000
-
-#define MX25_PAD_OE__OE 0x048 0x260 0x000 0x10 0x000
-#define MX25_PAD_OE__AUD4_TXC 0x048 0x260 0x000 0x14 0x000
-#define MX25_PAD_OE__GPIO_2_14 0x048 0x260 0x000 0x15 0x000
+#define MX25_PAD_A14__A14 0x010 0x230 0x000 0x00 0x000
+#define MX25_PAD_A14__GPIO_2_0 0x010 0x230 0x000 0x05 0x000
+#define MX25_PAD_A14__SIM1_CLK1 0x010 0x230 0x000 0x06 0x000
+#define MX25_PAD_A14__LCDC_SPL 0x010 0x230 0x000 0x07 0x000
+
+#define MX25_PAD_A15__A15 0x014 0x234 0x000 0x00 0x000
+#define MX25_PAD_A15__GPIO_2_1 0x014 0x234 0x000 0x05 0x000
+#define MX25_PAD_A15__SIM1_RST1 0x014 0x234 0x000 0x06 0x000
+#define MX25_PAD_A15__LCDC_PS 0x014 0x234 0x000 0x07 0x000
+
+#define MX25_PAD_A16__A16 0x018 0x000 0x000 0x00 0x000
+#define MX25_PAD_A16__GPIO_2_2 0x018 0x000 0x000 0x05 0x000
+#define MX25_PAD_A16__SIM1_VEN1 0x018 0x000 0x000 0x06 0x000
+#define MX25_PAD_A16__LCDC_REV 0x018 0x000 0x000 0x07 0x000
+
+#define MX25_PAD_A17__A17 0x01c 0x238 0x000 0x00 0x000
+#define MX25_PAD_A17__GPIO_2_3 0x01c 0x238 0x000 0x05 0x000
+#define MX25_PAD_A17__SIM1_TX 0x01c 0x238 0x554 0x06 0x000
+#define MX25_PAD_A17__FEC_TX_ERR 0x01c 0x238 0x000 0x07 0x000
+
+#define MX25_PAD_A18__A18 0x020 0x23c 0x000 0x00 0x000
+#define MX25_PAD_A18__GPIO_2_4 0x020 0x23c 0x000 0x05 0x000
+#define MX25_PAD_A18__SIM1_PD1 0x020 0x23c 0x550 0x06 0x000
+#define MX25_PAD_A18__FEC_COL 0x020 0x23c 0x504 0x07 0x000
+
+#define MX25_PAD_A19__A19 0x024 0x240 0x000 0x00 0x000
+#define MX25_PAD_A19__GPIO_2_5 0x024 0x240 0x000 0x05 0x000
+#define MX25_PAD_A19__SIM1_RX1 0x024 0x240 0x54c 0x06 0x000
+#define MX25_PAD_A19__FEC_RX_ERR 0x024 0x240 0x518 0x07 0x000
+
+#define MX25_PAD_A20__A20 0x028 0x244 0x000 0x00 0x000
+#define MX25_PAD_A20__GPIO_2_6 0x028 0x244 0x000 0x05 0x000
+#define MX25_PAD_A20__SIM2_CLK1 0x028 0x244 0x000 0x06 0x000
+#define MX25_PAD_A20__FEC_RDATA2 0x028 0x244 0x50c 0x07 0x000
+
+#define MX25_PAD_A21__A21 0x02c 0x248 0x000 0x00 0x000
+#define MX25_PAD_A21__GPIO_2_7 0x02c 0x248 0x000 0x05 0x000
+#define MX25_PAD_A21__SIM2_RST1 0x02c 0x248 0x000 0x06 0x000
+#define MX25_PAD_A21__FEC_RDATA3 0x02c 0x248 0x510 0x07 0x000
+
+#define MX25_PAD_A22__A22 0x030 0x000 0x000 0x00 0x000
+#define MX25_PAD_A22__GPIO_2_8 0x030 0x000 0x000 0x05 0x000
+#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x07 0x000
+#define MX25_PAD_A22__SIM2_VEN1 0x030 0x000 0x000 0x06 0x000
+#define MX25_PAD_A22__FEC_TDATA2 0x030 0x000 0x000 0x07 0x000
+
+#define MX25_PAD_A23__A23 0x034 0x24c 0x000 0x00 0x000
+#define MX25_PAD_A23__GPIO_2_9 0x034 0x24c 0x000 0x05 0x000
+#define MX25_PAD_A23__SIM2_TX1 0x034 0x24c 0x560 0x06 0x000
+#define MX25_PAD_A23__FEC_TDATA3 0x034 0x24c 0x000 0x07 0x000
+
+#define MX25_PAD_A24__A24 0x038 0x250 0x000 0x00 0x000
+#define MX25_PAD_A24__GPIO_2_10 0x038 0x250 0x000 0x05 0x000
+#define MX25_PAD_A24__SIM2_PD1 0x038 0x250 0x55c 0x06 0x000
+#define MX25_PAD_A24__FEC_RX_CLK 0x038 0x250 0x514 0x07 0x000
+
+#define MX25_PAD_A25__A25 0x03c 0x254 0x000 0x00 0x000
+#define MX25_PAD_A25__GPIO_2_11 0x03c 0x254 0x000 0x05 0x000
+#define MX25_PAD_A25__FEC_CRS 0x03c 0x254 0x508 0x07 0x000
+
+#define MX25_PAD_EB0__EB0 0x040 0x258 0x000 0x00 0x000
+#define MX25_PAD_EB0__AUD4_TXD 0x040 0x258 0x464 0x04 0x000
+#define MX25_PAD_EB0__GPIO_2_12 0x040 0x258 0x000 0x05 0x000
+
+#define MX25_PAD_EB1__EB1 0x044 0x25c 0x000 0x00 0x000
+#define MX25_PAD_EB1__AUD4_RXD 0x044 0x25c 0x460 0x04 0x000
+#define MX25_PAD_EB1__GPIO_2_13 0x044 0x25c 0x000 0x05 0x000
+
+#define MX25_PAD_OE__OE 0x048 0x260 0x000 0x00 0x000
+#define MX25_PAD_OE__AUD4_TXC 0x048 0x260 0x000 0x04 0x000
+#define MX25_PAD_OE__GPIO_2_14 0x048 0x260 0x000 0x05 0x000
#define MX25_PAD_CS0__CS0 0x04c 0x000 0x000 0x00 0x000
#define MX25_PAD_CS0__GPIO_4_2 0x04c 0x000 0x000 0x05 0x000
@@ -105,51 +105,51 @@
#define MX25_PAD_CS1__NF_CE3 0x050 0x000 0x000 0x01 0x000
#define MX25_PAD_CS1__GPIO_4_3 0x050 0x000 0x000 0x05 0x000
-#define MX25_PAD_CS4__CS4 0x054 0x264 0x000 0x10 0x000
+#define MX25_PAD_CS4__CS4 0x054 0x264 0x000 0x00 0x000
#define MX25_PAD_CS4__NF_CE1 0x054 0x264 0x000 0x01 0x000
-#define MX25_PAD_CS4__UART5_CTS 0x054 0x264 0x000 0x13 0x000
-#define MX25_PAD_CS4__GPIO_3_20 0x054 0x264 0x000 0x15 0x000
+#define MX25_PAD_CS4__UART5_CTS 0x054 0x264 0x000 0x03 0x000
+#define MX25_PAD_CS4__GPIO_3_20 0x054 0x264 0x000 0x05 0x000
#define MX25_PAD_CS5__CS5 0x058 0x268 0x000 0x00 0x000
#define MX25_PAD_CS5__NF_CE2 0x058 0x268 0x000 0x01 0x000
#define MX25_PAD_CS5__UART5_RTS 0x058 0x268 0x574 0x03 0x000
#define MX25_PAD_CS5__GPIO_3_21 0x058 0x268 0x000 0x05 0x000
-#define MX25_PAD_NF_CE0__NF_CE0 0x05c 0x26c 0x000 0x10 0x000
-#define MX25_PAD_NF_CE0__GPIO_3_22 0x05c 0x26c 0x000 0x15 0x000
+#define MX25_PAD_NF_CE0__NF_CE0 0x05c 0x26c 0x000 0x00 0x000
+#define MX25_PAD_NF_CE0__GPIO_3_22 0x05c 0x26c 0x000 0x05 0x000
-#define MX25_PAD_ECB__ECB 0x060 0x270 0x000 0x10 0x000
-#define MX25_PAD_ECB__UART5_TXD 0x060 0x270 0x000 0x13 0x000
-#define MX25_PAD_ECB__GPIO_3_23 0x060 0x270 0x000 0x15 0x000
+#define MX25_PAD_ECB__ECB 0x060 0x270 0x000 0x00 0x000
+#define MX25_PAD_ECB__UART5_TXD 0x060 0x270 0x000 0x03 0x000
+#define MX25_PAD_ECB__GPIO_3_23 0x060 0x270 0x000 0x05 0x000
-#define MX25_PAD_LBA__LBA 0x064 0x274 0x000 0x10 0x000
-#define MX25_PAD_LBA__UART5_RXD 0x064 0x274 0x578 0x13 0x000
-#define MX25_PAD_LBA__GPIO_3_24 0x064 0x274 0x000 0x15 0x000
+#define MX25_PAD_LBA__LBA 0x064 0x274 0x000 0x00 0x000
+#define MX25_PAD_LBA__UART5_RXD 0x064 0x274 0x578 0x03 0x000
+#define MX25_PAD_LBA__GPIO_3_24 0x064 0x274 0x000 0x05 0x000
#define MX25_PAD_BCLK__BCLK 0x068 0x000 0x000 0x00 0x000
#define MX25_PAD_BCLK__GPIO_4_4 0x068 0x000 0x000 0x05 0x000
-#define MX25_PAD_RW__RW 0x06c 0x278 0x000 0x10 0x000
-#define MX25_PAD_RW__AUD4_TXFS 0x06c 0x278 0x474 0x14 0x000
-#define MX25_PAD_RW__GPIO_3_25 0x06c 0x278 0x000 0x15 0x000
+#define MX25_PAD_RW__RW 0x06c 0x278 0x000 0x00 0x000
+#define MX25_PAD_RW__AUD4_TXFS 0x06c 0x278 0x474 0x04 0x000
+#define MX25_PAD_RW__GPIO_3_25 0x06c 0x278 0x000 0x05 0x000
-#define MX25_PAD_NFWE_B__NFWE_B 0x070 0x000 0x000 0x10 0x000
-#define MX25_PAD_NFWE_B__GPIO_3_26 0x070 0x000 0x000 0x15 0x000
+#define MX25_PAD_NFWE_B__NFWE_B 0x070 0x000 0x000 0x00 0x000
+#define MX25_PAD_NFWE_B__GPIO_3_26 0x070 0x000 0x000 0x05 0x000
-#define MX25_PAD_NFRE_B__NFRE_B 0x074 0x000 0x000 0x10 0x000
-#define MX25_PAD_NFRE_B__GPIO_3_27 0x074 0x000 0x000 0x15 0x000
+#define MX25_PAD_NFRE_B__NFRE_B 0x074 0x000 0x000 0x00 0x000
+#define MX25_PAD_NFRE_B__GPIO_3_27 0x074 0x000 0x000 0x05 0x000
-#define MX25_PAD_NFALE__NFALE 0x078 0x000 0x000 0x10 0x000
-#define MX25_PAD_NFALE__GPIO_3_28 0x078 0x000 0x000 0x15 0x000
+#define MX25_PAD_NFALE__NFALE 0x078 0x000 0x000 0x00 0x000
+#define MX25_PAD_NFALE__GPIO_3_28 0x078 0x000 0x000 0x05 0x000
-#define MX25_PAD_NFCLE__NFCLE 0x07c 0x000 0x000 0x10 0x000
-#define MX25_PAD_NFCLE__GPIO_3_29 0x07c 0x000 0x000 0x15 0x000
+#define MX25_PAD_NFCLE__NFCLE 0x07c 0x000 0x000 0x00 0x000
+#define MX25_PAD_NFCLE__GPIO_3_29 0x07c 0x000 0x000 0x05 0x000
-#define MX25_PAD_NFWP_B__NFWP_B 0x080 0x000 0x000 0x10 0x000
-#define MX25_PAD_NFWP_B__GPIO_3_30 0x080 0x000 0x000 0x15 0x000
+#define MX25_PAD_NFWP_B__NFWP_B 0x080 0x000 0x000 0x00 0x000
+#define MX25_PAD_NFWP_B__GPIO_3_30 0x080 0x000 0x000 0x05 0x000
-#define MX25_PAD_NFRB__NFRB 0x084 0x27c 0x000 0x10 0x000
-#define MX25_PAD_NFRB__GPIO_3_31 0x084 0x27c 0x000 0x15 0x000
+#define MX25_PAD_NFRB__NFRB 0x084 0x27c 0x000 0x00 0x000
+#define MX25_PAD_NFRB__GPIO_3_31 0x084 0x27c 0x000 0x05 0x000
#define MX25_PAD_D15__D15 0x088 0x280 0x000 0x00 0x000
#define MX25_PAD_D15__LD16 0x088 0x280 0x000 0x01 0x000
@@ -210,101 +210,101 @@
#define MX25_PAD_D0__D0 0x0c4 0x2bc 0x000 0x00 0x000
#define MX25_PAD_D0__GPIO_4_20 0x0c4 0x2bc 0x000 0x05 0x000
-#define MX25_PAD_LD0__LD0 0x0c8 0x2c0 0x000 0x10 0x000
-#define MX25_PAD_LD0__CSI_D0 0x0c8 0x2c0 0x488 0x12 0x000
-#define MX25_PAD_LD0__GPIO_2_15 0x0c8 0x2c0 0x000 0x15 0x000
+#define MX25_PAD_LD0__LD0 0x0c8 0x2c0 0x000 0x00 0x000
+#define MX25_PAD_LD0__CSI_D0 0x0c8 0x2c0 0x488 0x02 0x000
+#define MX25_PAD_LD0__GPIO_2_15 0x0c8 0x2c0 0x000 0x05 0x000
-#define MX25_PAD_LD1__LD1 0x0cc 0x2c4 0x000 0x10 0x000
-#define MX25_PAD_LD1__CSI_D1 0x0cc 0x2c4 0x48c 0x12 0x000
-#define MX25_PAD_LD1__GPIO_2_16 0x0cc 0x2c4 0x000 0x15 0x000
+#define MX25_PAD_LD1__LD1 0x0cc 0x2c4 0x000 0x00 0x000
+#define MX25_PAD_LD1__CSI_D1 0x0cc 0x2c4 0x48c 0x02 0x000
+#define MX25_PAD_LD1__GPIO_2_16 0x0cc 0x2c4 0x000 0x05 0x000
-#define MX25_PAD_LD2__LD2 0x0d0 0x2c8 0x000 0x10 0x000
-#define MX25_PAD_LD2__GPIO_2_17 0x0d0 0x2c8 0x000 0x15 0x000
+#define MX25_PAD_LD2__LD2 0x0d0 0x2c8 0x000 0x00 0x000
+#define MX25_PAD_LD2__GPIO_2_17 0x0d0 0x2c8 0x000 0x05 0x000
-#define MX25_PAD_LD3__LD3 0x0d4 0x2cc 0x000 0x10 0x000
-#define MX25_PAD_LD3__GPIO_2_18 0x0d4 0x2cc 0x000 0x15 0x000
+#define MX25_PAD_LD3__LD3 0x0d4 0x2cc 0x000 0x00 0x000
+#define MX25_PAD_LD3__GPIO_2_18 0x0d4 0x2cc 0x000 0x05 0x000
-#define MX25_PAD_LD4__LD4 0x0d8 0x2d0 0x000 0x10 0x000
-#define MX25_PAD_LD4__GPIO_2_19 0x0d8 0x2d0 0x000 0x15 0x000
+#define MX25_PAD_LD4__LD4 0x0d8 0x2d0 0x000 0x00 0x000
+#define MX25_PAD_LD4__GPIO_2_19 0x0d8 0x2d0 0x000 0x05 0x000
-#define MX25_PAD_LD5__LD5 0x0dc 0x2d4 0x000 0x10 0x000
-#define MX25_PAD_LD5__GPIO_1_19 0x0dc 0x2d4 0x000 0x15 0x000
+#define MX25_PAD_LD5__LD5 0x0dc 0x2d4 0x000 0x00 0x000
+#define MX25_PAD_LD5__GPIO_1_19 0x0dc 0x2d4 0x000 0x05 0x000
-#define MX25_PAD_LD6__LD6 0x0e0 0x2d8 0x000 0x10 0x000
-#define MX25_PAD_LD6__GPIO_1_20 0x0e0 0x2d8 0x000 0x15 0x000
+#define MX25_PAD_LD6__LD6 0x0e0 0x2d8 0x000 0x00 0x000
+#define MX25_PAD_LD6__GPIO_1_20 0x0e0 0x2d8 0x000 0x05 0x000
-#define MX25_PAD_LD7__LD7 0x0e4 0x2dc 0x000 0x10 0x000
-#define MX25_PAD_LD7__GPIO_1_21 0x0e4 0x2dc 0x000 0x15 0x000
+#define MX25_PAD_LD7__LD7 0x0e4 0x2dc 0x000 0x00 0x000
+#define MX25_PAD_LD7__GPIO_1_21 0x0e4 0x2dc 0x000 0x05 0x000
-#define MX25_PAD_LD8__LD8 0x0e8 0x2e0 0x000 0x10 0x000
-#define MX25_PAD_LD8__UART4_RXD 0x0e8 0x2e0 0x570 0x12 0x000
-#define MX25_PAD_LD8__FEC_TX_ERR 0x0e8 0x2e0 0x000 0x15 0x000
+#define MX25_PAD_LD8__LD8 0x0e8 0x2e0 0x000 0x00 0x000
+#define MX25_PAD_LD8__UART4_RXD 0x0e8 0x2e0 0x570 0x02 0x000
+#define MX25_PAD_LD8__FEC_TX_ERR 0x0e8 0x2e0 0x000 0x05 0x000
#define MX25_PAD_LD8__SDHC2_CMD 0x0e8 0x2e0 0x4e0 0x06 0x000
-#define MX25_PAD_LD9__LD9 0x0ec 0x2e4 0x000 0x10 0x000
-#define MX25_PAD_LD9__UART4_TXD 0x0ec 0x2e4 0x000 0x12 0x000
-#define MX25_PAD_LD9__FEC_COL 0x0ec 0x2e4 0x504 0x15 0x001
+#define MX25_PAD_LD9__LD9 0x0ec 0x2e4 0x000 0x00 0x000
+#define MX25_PAD_LD9__UART4_TXD 0x0ec 0x2e4 0x000 0x02 0x000
+#define MX25_PAD_LD9__FEC_COL 0x0ec 0x2e4 0x504 0x05 0x001
#define MX25_PAD_LD9__SDHC2_CLK 0x0ec 0x2e4 0x4dc 0x06 0x000
#define MX25_PAD_LD10__LD10 0x0f0 0x2e8 0x000 0x00 0x000
#define MX25_PAD_LD10__UART4_RTS 0x0f0 0x2e8 0x56c 0x02 0x000
#define MX25_PAD_LD10__FEC_RX_ERR 0x0f0 0x2e8 0x518 0x05 0x001
-#define MX25_PAD_LD11__LD11 0x0f4 0x2ec 0x000 0x10 0x000
-#define MX25_PAD_LD11__UART4_CTS 0x0f4 0x2ec 0x000 0x12 0x000
-#define MX25_PAD_LD11__FEC_RDATA2 0x0f4 0x2ec 0x50c 0x15 0x001
+#define MX25_PAD_LD11__LD11 0x0f4 0x2ec 0x000 0x00 0x000
+#define MX25_PAD_LD11__UART4_CTS 0x0f4 0x2ec 0x000 0x02 0x000
+#define MX25_PAD_LD11__FEC_RDATA2 0x0f4 0x2ec 0x50c 0x05 0x001
#define MX25_PAD_LD11__SDHC2_DAT1 0x0f4 0x2ec 0x4e8 0x06 0x000
-#define MX25_PAD_LD12__LD12 0x0f8 0x2f0 0x000 0x10 0x000
+#define MX25_PAD_LD12__LD12 0x0f8 0x2f0 0x000 0x00 0x000
#define MX25_PAD_LD12__CSPI2_MOSI 0x0f8 0x2f0 0x4a0 0x02 0x000
-#define MX25_PAD_LD12__FEC_RDATA3 0x0f8 0x2f0 0x510 0x15 0x001
+#define MX25_PAD_LD12__FEC_RDATA3 0x0f8 0x2f0 0x510 0x05 0x001
-#define MX25_PAD_LD13__LD13 0x0fc 0x2f4 0x000 0x10 0x000
+#define MX25_PAD_LD13__LD13 0x0fc 0x2f4 0x000 0x00 0x000
#define MX25_PAD_LD13__CSPI2_MISO 0x0fc 0x2f4 0x49c 0x02 0x000
-#define MX25_PAD_LD13__FEC_TDATA2 0x0fc 0x2f4 0x000 0x15 0x000
+#define MX25_PAD_LD13__FEC_TDATA2 0x0fc 0x2f4 0x000 0x05 0x000
-#define MX25_PAD_LD14__LD14 0x100 0x2f8 0x000 0x10 0x000
+#define MX25_PAD_LD14__LD14 0x100 0x2f8 0x000 0x00 0x000
#define MX25_PAD_LD14__CSPI2_SCLK 0x100 0x2f8 0x494 0x02 0x000
-#define MX25_PAD_LD14__FEC_TDATA3 0x100 0x2f8 0x000 0x15 0x000
+#define MX25_PAD_LD14__FEC_TDATA3 0x100 0x2f8 0x000 0x05 0x000
-#define MX25_PAD_LD15__LD15 0x104 0x2fc 0x000 0x10 0x000
+#define MX25_PAD_LD15__LD15 0x104 0x2fc 0x000 0x00 0x000
#define MX25_PAD_LD15__CSPI2_RDY 0x104 0x2fc 0x498 0x02 0x000
-#define MX25_PAD_LD15__FEC_RX_CLK 0x104 0x2fc 0x514 0x15 0x001
+#define MX25_PAD_LD15__FEC_RX_CLK 0x104 0x2fc 0x514 0x05 0x001
-#define MX25_PAD_HSYNC__HSYNC 0x108 0x300 0x000 0x10 0x000
-#define MX25_PAD_HSYNC__GPIO_1_22 0x108 0x300 0x000 0x15 0x000
+#define MX25_PAD_HSYNC__HSYNC 0x108 0x300 0x000 0x00 0x000
+#define MX25_PAD_HSYNC__GPIO_1_22 0x108 0x300 0x000 0x05 0x000
-#define MX25_PAD_VSYNC__VSYNC 0x10c 0x304 0x000 0x10 0x000
-#define MX25_PAD_VSYNC__GPIO_1_23 0x10c 0x304 0x000 0x15 0x000
+#define MX25_PAD_VSYNC__VSYNC 0x10c 0x304 0x000 0x00 0x000
+#define MX25_PAD_VSYNC__GPIO_1_23 0x10c 0x304 0x000 0x05 0x000
-#define MX25_PAD_LSCLK__LSCLK 0x110 0x308 0x000 0x10 0x000
-#define MX25_PAD_LSCLK__GPIO_1_24 0x110 0x308 0x000 0x15 0x000
+#define MX25_PAD_LSCLK__LSCLK 0x110 0x308 0x000 0x00 0x000
+#define MX25_PAD_LSCLK__GPIO_1_24 0x110 0x308 0x000 0x05 0x000
-#define MX25_PAD_OE_ACD__OE_ACD 0x114 0x30c 0x000 0x10 0x000
+#define MX25_PAD_OE_ACD__OE_ACD 0x114 0x30c 0x000 0x00 0x000
#define MX25_PAD_OE_ACD__CSPI2_SS0 0x114 0x30c 0x4a4 0x02 0x000
-#define MX25_PAD_OE_ACD__GPIO_1_25 0x114 0x30c 0x000 0x15 0x000
+#define MX25_PAD_OE_ACD__GPIO_1_25 0x114 0x30c 0x000 0x05 0x000
-#define MX25_PAD_CONTRAST__CONTRAST 0x118 0x310 0x000 0x10 0x000
-#define MX25_PAD_CONTRAST__CC4 0x118 0x310 0x000 0x11 0x000
-#define MX25_PAD_CONTRAST__PWM4_PWMO 0x118 0x310 0x000 0x14 0x000
-#define MX25_PAD_CONTRAST__FEC_CRS 0x118 0x310 0x508 0x15 0x001
-#define MX25_PAD_CONTRAST__USBH2_PWR 0x118 0x310 0x000 0x16 0x000
+#define MX25_PAD_CONTRAST__CONTRAST 0x118 0x310 0x000 0x00 0x000
+#define MX25_PAD_CONTRAST__CC4 0x118 0x310 0x000 0x01 0x000
+#define MX25_PAD_CONTRAST__PWM4_PWMO 0x118 0x310 0x000 0x04 0x000
+#define MX25_PAD_CONTRAST__FEC_CRS 0x118 0x310 0x508 0x05 0x001
+#define MX25_PAD_CONTRAST__USBH2_PWR 0x118 0x310 0x000 0x06 0x000
-#define MX25_PAD_PWM__PWM 0x11c 0x314 0x000 0x10 0x000
-#define MX25_PAD_PWM__GPIO_1_26 0x11c 0x314 0x000 0x15 0x000
-#define MX25_PAD_PWM__USBH2_OC 0x11c 0x314 0x580 0x16 0x001
+#define MX25_PAD_PWM__PWM 0x11c 0x314 0x000 0x00 0x000
+#define MX25_PAD_PWM__GPIO_1_26 0x11c 0x314 0x000 0x05 0x000
+#define MX25_PAD_PWM__USBH2_OC 0x11c 0x314 0x580 0x06 0x001
-#define MX25_PAD_CSI_D2__CSI_D2 0x120 0x318 0x000 0x10 0x000
-#define MX25_PAD_CSI_D2__UART5_RXD 0x120 0x318 0x578 0x11 0x001
+#define MX25_PAD_CSI_D2__CSI_D2 0x120 0x318 0x000 0x00 0x000
+#define MX25_PAD_CSI_D2__UART5_RXD 0x120 0x318 0x578 0x01 0x001
#define MX25_PAD_CSI_D2__SIM1_CLK0 0x120 0x318 0x000 0x04 0x000
-#define MX25_PAD_CSI_D2__GPIO_1_27 0x120 0x318 0x000 0x15 0x000
-#define MX25_PAD_CSI_D2__CSPI3_MOSI 0x120 0x318 0x000 0x17 0x000
+#define MX25_PAD_CSI_D2__GPIO_1_27 0x120 0x318 0x000 0x05 0x000
+#define MX25_PAD_CSI_D2__CSPI3_MOSI 0x120 0x318 0x000 0x07 0x000
-#define MX25_PAD_CSI_D3__CSI_D3 0x124 0x31c 0x000 0x10 0x000
-#define MX25_PAD_CSI_D3__UART5_TXD 0x124 0x31c 0x000 0x11 0x000
+#define MX25_PAD_CSI_D3__CSI_D3 0x124 0x31c 0x000 0x00 0x000
+#define MX25_PAD_CSI_D3__UART5_TXD 0x124 0x31c 0x000 0x01 0x000
#define MX25_PAD_CSI_D3__SIM1_RST0 0x124 0x31c 0x000 0x04 0x000
-#define MX25_PAD_CSI_D3__GPIO_1_28 0x124 0x31c 0x000 0x15 0x000
-#define MX25_PAD_CSI_D3__CSPI3_MISO 0x124 0x31c 0x4b4 0x17 0x001
+#define MX25_PAD_CSI_D3__GPIO_1_28 0x124 0x31c 0x000 0x05 0x000
+#define MX25_PAD_CSI_D3__CSPI3_MISO 0x124 0x31c 0x4b4 0x07 0x001
#define MX25_PAD_CSI_D4__CSI_D4 0x128 0x320 0x000 0x00 0x000
#define MX25_PAD_CSI_D4__UART5_RTS 0x128 0x320 0x574 0x01 0x001
@@ -312,80 +312,80 @@
#define MX25_PAD_CSI_D4__GPIO_1_29 0x128 0x320 0x000 0x05 0x000
#define MX25_PAD_CSI_D4__CSPI3_SCLK 0x128 0x320 0x000 0x07 0x000
-#define MX25_PAD_CSI_D5__CSI_D5 0x12c 0x324 0x000 0x10 0x000
-#define MX25_PAD_CSI_D5__UART5_CTS 0x12c 0x324 0x000 0x11 0x000
+#define MX25_PAD_CSI_D5__CSI_D5 0x12c 0x324 0x000 0x00 0x000
+#define MX25_PAD_CSI_D5__UART5_CTS 0x12c 0x324 0x000 0x01 0x000
#define MX25_PAD_CSI_D5__SIM1_TX0 0x12c 0x324 0x000 0x04 0x000
-#define MX25_PAD_CSI_D5__GPIO_1_30 0x12c 0x324 0x000 0x15 0x000
-#define MX25_PAD_CSI_D5__CSPI3_RDY 0x12c 0x324 0x000 0x17 0x000
+#define MX25_PAD_CSI_D5__GPIO_1_30 0x12c 0x324 0x000 0x05 0x000
+#define MX25_PAD_CSI_D5__CSPI3_RDY 0x12c 0x324 0x000 0x07 0x000
-#define MX25_PAD_CSI_D6__CSI_D6 0x130 0x328 0x000 0x10 0x000
-#define MX25_PAD_CSI_D6__SDHC2_CMD 0x130 0x328 0x4e0 0x12 0x001
+#define MX25_PAD_CSI_D6__CSI_D6 0x130 0x328 0x000 0x00 0x000
+#define MX25_PAD_CSI_D6__SDHC2_CMD 0x130 0x328 0x4e0 0x02 0x001
#define MX25_PAD_CSI_D6__SIM1_PD0 0x130 0x328 0x000 0x04 0x000
-#define MX25_PAD_CSI_D6__GPIO_1_31 0x130 0x328 0x000 0x15 0x000
+#define MX25_PAD_CSI_D6__GPIO_1_31 0x130 0x328 0x000 0x05 0x000
-#define MX25_PAD_CSI_D7__CSI_D7 0x134 0x32c 0x000 0x10 0x000
-#define MX25_PAD_CSI_D7__SDHC2_DAT_CLK 0x134 0x32C 0x4dc 0x12 0x001
-#define MX25_PAD_CSI_D7__GPIO_1_6 0x134 0x32c 0x000 0x15 0x000
+#define MX25_PAD_CSI_D7__CSI_D7 0x134 0x32c 0x000 0x00 0x000
+#define MX25_PAD_CSI_D7__SDHC2_DAT_CLK 0x134 0x32C 0x4dc 0x02 0x001
+#define MX25_PAD_CSI_D7__GPIO_1_6 0x134 0x32c 0x000 0x05 0x000
-#define MX25_PAD_CSI_D8__CSI_D8 0x138 0x330 0x000 0x10 0x000
-#define MX25_PAD_CSI_D8__AUD6_RXC 0x138 0x330 0x000 0x12 0x000
-#define MX25_PAD_CSI_D8__GPIO_1_7 0x138 0x330 0x000 0x15 0x000
-#define MX25_PAD_CSI_D8__CSPI3_SS2 0x138 0x330 0x4c4 0x17 0x000
+#define MX25_PAD_CSI_D8__CSI_D8 0x138 0x330 0x000 0x00 0x000
+#define MX25_PAD_CSI_D8__AUD6_RXC 0x138 0x330 0x000 0x02 0x000
+#define MX25_PAD_CSI_D8__GPIO_1_7 0x138 0x330 0x000 0x05 0x000
+#define MX25_PAD_CSI_D8__CSPI3_SS2 0x138 0x330 0x4c4 0x07 0x000
-#define MX25_PAD_CSI_D9__CSI_D9 0x13c 0x334 0x000 0x10 0x000
-#define MX25_PAD_CSI_D9__AUD6_RXFS 0x13c 0x334 0x000 0x12 0x000
-#define MX25_PAD_CSI_D9__GPIO_4_21 0x13c 0x334 0x000 0x15 0x000
-#define MX25_PAD_CSI_D9__CSPI3_SS3 0x13c 0x334 0x4c8 0x17 0x000
+#define MX25_PAD_CSI_D9__CSI_D9 0x13c 0x334 0x000 0x00 0x000
+#define MX25_PAD_CSI_D9__AUD6_RXFS 0x13c 0x334 0x000 0x02 0x000
+#define MX25_PAD_CSI_D9__GPIO_4_21 0x13c 0x334 0x000 0x05 0x000
+#define MX25_PAD_CSI_D9__CSPI3_SS3 0x13c 0x334 0x4c8 0x07 0x000
-#define MX25_PAD_CSI_MCLK__CSI_MCLK 0x140 0x338 0x000 0x10 0x000
-#define MX25_PAD_CSI_MCLK__AUD6_TXD 0x140 0x338 0x000 0x11 0x000
-#define MX25_PAD_CSI_MCLK__SDHC2_DAT0 0x140 0x338 0x4e4 0x12 0x001
-#define MX25_PAD_CSI_MCLK__GPIO_1_8 0x140 0x338 0x000 0x15 0x000
+#define MX25_PAD_CSI_MCLK__CSI_MCLK 0x140 0x338 0x000 0x00 0x000
+#define MX25_PAD_CSI_MCLK__AUD6_TXD 0x140 0x338 0x000 0x01 0x000
+#define MX25_PAD_CSI_MCLK__SDHC2_DAT0 0x140 0x338 0x4e4 0x02 0x001
+#define MX25_PAD_CSI_MCLK__GPIO_1_8 0x140 0x338 0x000 0x05 0x000
-#define MX25_PAD_CSI_VSYNC__CSI_VSYNC 0x144 0x33c 0x000 0x10 0x000
-#define MX25_PAD_CSI_VSYNC__AUD6_RXD 0x144 0x33c 0x000 0x11 0x000
-#define MX25_PAD_CSI_VSYNC__SDHC2_DAT1 0x144 0x33c 0x4e8 0x12 0x001
-#define MX25_PAD_CSI_VSYNC__GPIO_1_9 0x144 0x33c 0x000 0x15 0x000
+#define MX25_PAD_CSI_VSYNC__CSI_VSYNC 0x144 0x33c 0x000 0x00 0x000
+#define MX25_PAD_CSI_VSYNC__AUD6_RXD 0x144 0x33c 0x000 0x01 0x000
+#define MX25_PAD_CSI_VSYNC__SDHC2_DAT1 0x144 0x33c 0x4e8 0x02 0x001
+#define MX25_PAD_CSI_VSYNC__GPIO_1_9 0x144 0x33c 0x000 0x05 0x000
-#define MX25_PAD_CSI_HSYNC__CSI_HSYNC 0x148 0x340 0x000 0x10 0x000
-#define MX25_PAD_CSI_HSYNC__AUD6_TXC 0x148 0x340 0x000 0x11 0x000
-#define MX25_PAD_CSI_HSYNC__SDHC2_DAT2 0x148 0x340 0x4ec 0x12 0x001
-#define MX25_PAD_CSI_HSYNC__GPIO_1_10 0x148 0x340 0x000 0x15 0x000
+#define MX25_PAD_CSI_HSYNC__CSI_HSYNC 0x148 0x340 0x000 0x00 0x000
+#define MX25_PAD_CSI_HSYNC__AUD6_TXC 0x148 0x340 0x000 0x01 0x000
+#define MX25_PAD_CSI_HSYNC__SDHC2_DAT2 0x148 0x340 0x4ec 0x02 0x001
+#define MX25_PAD_CSI_HSYNC__GPIO_1_10 0x148 0x340 0x000 0x05 0x000
-#define MX25_PAD_CSI_PIXCLK__CSI_PIXCLK 0x14c 0x344 0x000 0x10 0x000
-#define MX25_PAD_CSI_PIXCLK__AUD6_TXFS 0x14c 0x344 0x000 0x11 0x000
-#define MX25_PAD_CSI_PIXCLK__SDHC2_DAT3 0x14c 0x344 0x4f0 0x12 0x001
-#define MX25_PAD_CSI_PIXCLK__GPIO_1_11 0x14c 0x344 0x000 0x15 0x000
+#define MX25_PAD_CSI_PIXCLK__CSI_PIXCLK 0x14c 0x344 0x000 0x00 0x000
+#define MX25_PAD_CSI_PIXCLK__AUD6_TXFS 0x14c 0x344 0x000 0x01 0x000
+#define MX25_PAD_CSI_PIXCLK__SDHC2_DAT3 0x14c 0x344 0x4f0 0x02 0x001
+#define MX25_PAD_CSI_PIXCLK__GPIO_1_11 0x14c 0x344 0x000 0x05 0x000
-#define MX25_PAD_I2C1_CLK__I2C1_CLK 0x150 0x348 0x000 0x10 0x000
-#define MX25_PAD_I2C1_CLK__GPIO_1_12 0x150 0x348 0x000 0x15 0x000
+#define MX25_PAD_I2C1_CLK__I2C1_CLK 0x150 0x348 0x000 0x00 0x000
+#define MX25_PAD_I2C1_CLK__GPIO_1_12 0x150 0x348 0x000 0x05 0x000
-#define MX25_PAD_I2C1_DAT__I2C1_DAT 0x154 0x34c 0x000 0x10 0x000
-#define MX25_PAD_I2C1_DAT__GPIO_1_13 0x154 0x34c 0x000 0x15 0x000
+#define MX25_PAD_I2C1_DAT__I2C1_DAT 0x154 0x34c 0x000 0x00 0x000
+#define MX25_PAD_I2C1_DAT__GPIO_1_13 0x154 0x34c 0x000 0x05 0x000
-#define MX25_PAD_CSPI1_MOSI__CSPI1_MOSI 0x158 0x350 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_MOSI__UART3_RXD 0x158 0x350 0x568 0x12 0x000
-#define MX25_PAD_CSPI1_MOSI__GPIO_1_14 0x158 0x350 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_MOSI__CSPI1_MOSI 0x158 0x350 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_MOSI__UART3_RXD 0x158 0x350 0x568 0x02 0x000
+#define MX25_PAD_CSPI1_MOSI__GPIO_1_14 0x158 0x350 0x000 0x05 0x000
-#define MX25_PAD_CSPI1_MISO__CSPI1_MISO 0x15c 0x354 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_MISO__UART3_TXD 0x15c 0x354 0x000 0x12 0x000
-#define MX25_PAD_CSPI1_MISO__GPIO_1_15 0x15c 0x354 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_MISO__CSPI1_MISO 0x15c 0x354 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_MISO__UART3_TXD 0x15c 0x354 0x000 0x02 0x000
+#define MX25_PAD_CSPI1_MISO__GPIO_1_15 0x15c 0x354 0x000 0x05 0x000
-#define MX25_PAD_CSPI1_SS0__CSPI1_SS0 0x160 0x358 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_SS0__PWM2_PWMO 0x160 0x358 0x000 0x12 0x000
-#define MX25_PAD_CSPI1_SS0__GPIO_1_16 0x160 0x358 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_SS0__CSPI1_SS0 0x160 0x358 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_SS0__PWM2_PWMO 0x160 0x358 0x000 0x02 0x000
+#define MX25_PAD_CSPI1_SS0__GPIO_1_16 0x160 0x358 0x000 0x05 0x000
#define MX25_PAD_CSPI1_SS1__CSPI1_SS1 0x164 0x35c 0x000 0x00 0x000
#define MX25_PAD_CSPI1_SS1__I2C3_DAT 0x164 0x35C 0x528 0x01 0x001
#define MX25_PAD_CSPI1_SS1__UART3_RTS 0x164 0x35c 0x000 0x02 0x000
#define MX25_PAD_CSPI1_SS1__GPIO_1_17 0x164 0x35c 0x000 0x05 0x000
-#define MX25_PAD_CSPI1_SCLK__CSPI1_SCLK 0x168 0x360 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_SCLK__UART3_CTS 0x168 0x360 0x000 0x12 0x000
-#define MX25_PAD_CSPI1_SCLK__GPIO_1_18 0x168 0x360 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_SCLK__CSPI1_SCLK 0x168 0x360 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_SCLK__UART3_CTS 0x168 0x360 0x000 0x02 0x000
+#define MX25_PAD_CSPI1_SCLK__GPIO_1_18 0x168 0x360 0x000 0x05 0x000
-#define MX25_PAD_CSPI1_RDY__CSPI1_RDY 0x16c 0x364 0x000 0x10 0x000
-#define MX25_PAD_CSPI1_RDY__GPIO_2_22 0x16c 0x364 0x000 0x15 0x000
+#define MX25_PAD_CSPI1_RDY__CSPI1_RDY 0x16c 0x364 0x000 0x00 0x000
+#define MX25_PAD_CSPI1_RDY__GPIO_2_22 0x16c 0x364 0x000 0x05 0x000
#define MX25_PAD_UART1_RXD__UART1_RXD 0x170 0x368 0x000 0x00 0x000
#define MX25_PAD_UART1_RXD__UART2_DTR 0x170 0x368 0x000 0x03 0x000
@@ -406,46 +406,55 @@
#define MX25_PAD_UART1_CTS__UART2_RI 0x17c 0x374 0x000 0x03 0x001
#define MX25_PAD_UART1_CTS__GPIO_4_25 0x17c 0x374 0x000 0x05 0x000
-#define MX25_PAD_UART2_RXD__UART2_RXD 0x180 0x378 0x000 0x10 0x000
-#define MX25_PAD_UART2_RXD__GPIO_4_26 0x180 0x378 0x000 0x15 0x000
+#define MX25_PAD_UART2_RXD__UART2_RXD 0x180 0x378 0x000 0x00 0x000
+#define MX25_PAD_UART2_RXD__GPIO_4_26 0x180 0x378 0x000 0x05 0x000
-#define MX25_PAD_UART2_TXD__UART2_TXD 0x184 0x37c 0x000 0x10 0x000
-#define MX25_PAD_UART2_TXD__GPIO_4_27 0x184 0x37c 0x000 0x15 0x000
+#define MX25_PAD_UART2_TXD__UART2_TXD 0x184 0x37c 0x000 0x00 0x000
+#define MX25_PAD_UART2_TXD__GPIO_4_27 0x184 0x37c 0x000 0x05 0x000
#define MX25_PAD_UART2_RTS__UART2_RTS 0x188 0x380 0x000 0x00 0x000
#define MX25_PAD_UART2_RTS__FEC_COL 0x188 0x380 0x504 0x02 0x002
#define MX25_PAD_UART2_RTS__CC1 0x188 0x380 0x000 0x03 0x000
#define MX25_PAD_UART2_RTS__GPIO_4_28 0x188 0x380 0x000 0x05 0x000
-#define MX25_PAD_UART2_CTS__UART2_CTS 0x18c 0x384 0x000 0x10 0x000
-#define MX25_PAD_UART2_CTS__FEC_RX_ERR 0x18c 0x384 0x518 0x12 0x002
-#define MX25_PAD_UART2_CTS__GPIO_4_29 0x18c 0x384 0x000 0x15 0x000
+#define MX25_PAD_UART2_CTS__UART2_CTS 0x18c 0x384 0x000 0x00 0x000
+#define MX25_PAD_UART2_CTS__FEC_RX_ERR 0x18c 0x384 0x518 0x02 0x002
+#define MX25_PAD_UART2_CTS__GPIO_4_29 0x18c 0x384 0x000 0x05 0x000
+/*
+ * Removing the SION bit from MX25_PAD_SD1_CMD__SD1_CMD breaks detecting an SD
+ * card. According to the i.MX25 reference manual (e.g. Figure 23-2 in IMX25RM
+ * Rev. 2 from 01/2011) this pin is bidirectional. So it seems to be a silicon
+ * bug that configuring the SD1_CMD function doesn't enable the input path for
+ * this pin.
+ * This might have side effects for other hardware units that are connected to
+ * that pin and use the respective function as input.
+ */
#define MX25_PAD_SD1_CMD__SD1_CMD 0x190 0x388 0x000 0x10 0x000
-#define MX25_PAD_SD1_CMD__CSPI2_MOSI 0x190 0x388 0x4a0 0x11 0x001
-#define MX25_PAD_SD1_CMD__FEC_RDATA2 0x190 0x388 0x50c 0x12 0x002
-#define MX25_PAD_SD1_CMD__GPIO_2_23 0x190 0x388 0x000 0x15 0x000
+#define MX25_PAD_SD1_CMD__CSPI2_MOSI 0x190 0x388 0x4a0 0x01 0x001
+#define MX25_PAD_SD1_CMD__FEC_RDATA2 0x190 0x388 0x50c 0x02 0x002
+#define MX25_PAD_SD1_CMD__GPIO_2_23 0x190 0x388 0x000 0x05 0x000
-#define MX25_PAD_SD1_CLK__SD1_CLK 0x194 0x38c 0x000 0x10 0x000
-#define MX25_PAD_SD1_CLK__CSPI2_MISO 0x194 0x38c 0x49c 0x11 0x001
-#define MX25_PAD_SD1_CLK__FEC_RDATA3 0x194 0x38c 0x510 0x12 0x002
-#define MX25_PAD_SD1_CLK__GPIO_2_24 0x194 0x38c 0x000 0x15 0x000
+#define MX25_PAD_SD1_CLK__SD1_CLK 0x194 0x38c 0x000 0x00 0x000
+#define MX25_PAD_SD1_CLK__CSPI2_MISO 0x194 0x38c 0x49c 0x01 0x001
+#define MX25_PAD_SD1_CLK__FEC_RDATA3 0x194 0x38c 0x510 0x02 0x002
+#define MX25_PAD_SD1_CLK__GPIO_2_24 0x194 0x38c 0x000 0x05 0x000
-#define MX25_PAD_SD1_DATA0__SD1_DATA0 0x198 0x390 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA0__CSPI2_SCLK 0x198 0x390 0x494 0x11 0x001
-#define MX25_PAD_SD1_DATA0__GPIO_2_25 0x198 0x390 0x000 0x15 0x000
+#define MX25_PAD_SD1_DATA0__SD1_DATA0 0x198 0x390 0x000 0x00 0x000
+#define MX25_PAD_SD1_DATA0__CSPI2_SCLK 0x198 0x390 0x494 0x01 0x001
+#define MX25_PAD_SD1_DATA0__GPIO_2_25 0x198 0x390 0x000 0x05 0x000
-#define MX25_PAD_SD1_DATA1__SD1_DATA1 0x19c 0x394 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA1__AUD7_RXD 0x19c 0x394 0x478 0x13 0x000
-#define MX25_PAD_SD1_DATA1__GPIO_2_26 0x19c 0x394 0x000 0x15 0x000
+#define MX25_PAD_SD1_DATA1__SD1_DATA1 0x19c 0x394 0x000 0x00 0x000
+#define MX25_PAD_SD1_DATA1__AUD7_RXD 0x19c 0x394 0x478 0x03 0x000
+#define MX25_PAD_SD1_DATA1__GPIO_2_26 0x19c 0x394 0x000 0x05 0x000
-#define MX25_PAD_SD1_DATA2__SD1_DATA2 0x1a0 0x398 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA2__FEC_RX_CLK 0x1a0 0x398 0x514 0x12 0x002
-#define MX25_PAD_SD1_DATA2__GPIO_2_27 0x1a0 0x398 0x000 0x15 0x000
+#define MX25_PAD_SD1_DATA2__SD1_DATA2 0x1a0 0x398 0x000 0x00 0x000
+#define MX25_PAD_SD1_DATA2__FEC_RX_CLK 0x1a0 0x398 0x514 0x02 0x002
+#define MX25_PAD_SD1_DATA2__GPIO_2_27 0x1a0 0x398 0x000 0x05 0x000
-#define MX25_PAD_SD1_DATA3__SD1_DATA3 0x1a4 0x39c 0x000 0x10 0x000
-#define MX25_PAD_SD1_DATA3__FEC_CRS 0x1a4 0x39c 0x508 0x12 0x002
-#define MX25_PAD_SD1_DATA3__GPIO_2_28 0x1a4 0x39c 0x000 0x15 0x000
+#define MX25_PAD_SD1_DATA3__SD1_DATA3 0x1a4 0x39c 0x000 0x00 0x000
+#define MX25_PAD_SD1_DATA3__FEC_CRS 0x1a4 0x39c 0x508 0x02 0x002
+#define MX25_PAD_SD1_DATA3__GPIO_2_28 0x1a4 0x39c 0x000 0x05 0x000
#define MX25_PAD_KPP_ROW0__KPP_ROW0 0x1a8 0x3a0 0x000 0x00 0x000
#define MX25_PAD_KPP_ROW0__UART3_RXD 0x1a8 0x3a0 0x568 0x01 0x001
@@ -469,123 +478,123 @@
#define MX25_PAD_KPP_ROW3__UART1_RI 0x1b4 0x3ac 0x000 0x04 0x000
#define MX25_PAD_KPP_ROW3__GPIO_3_0 0x1b4 0x3ac 0x000 0x05 0x000
-#define MX25_PAD_KPP_COL0__KPP_COL0 0x1b8 0x3b0 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL0__UART4_RXD 0x1b8 0x3b0 0x570 0x11 0x001
-#define MX25_PAD_KPP_COL0__AUD5_TXD 0x1b8 0x3b0 0x000 0x12 0x000
-#define MX25_PAD_KPP_COL0__GPIO_3_1 0x1b8 0x3b0 0x000 0x15 0x000
+#define MX25_PAD_KPP_COL0__KPP_COL0 0x1b8 0x3b0 0x000 0x00 0x000
+#define MX25_PAD_KPP_COL0__UART4_RXD 0x1b8 0x3b0 0x570 0x01 0x001
+#define MX25_PAD_KPP_COL0__AUD5_TXD 0x1b8 0x3b0 0x000 0x02 0x000
+#define MX25_PAD_KPP_COL0__GPIO_3_1 0x1b8 0x3b0 0x000 0x05 0x000
-#define MX25_PAD_KPP_COL1__KPP_COL1 0x1bc 0x3b4 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL1__UART4_TXD 0x1bc 0x3b4 0x000 0x11 0x000
-#define MX25_PAD_KPP_COL1__AUD5_RXD 0x1bc 0x3b4 0x000 0x12 0x000
-#define MX25_PAD_KPP_COL1__GPIO_3_2 0x1bc 0x3b4 0x000 0x15 0x000
+#define MX25_PAD_KPP_COL1__KPP_COL1 0x1bc 0x3b4 0x000 0x00 0x000
+#define MX25_PAD_KPP_COL1__UART4_TXD 0x1bc 0x3b4 0x000 0x01 0x000
+#define MX25_PAD_KPP_COL1__AUD5_RXD 0x1bc 0x3b4 0x000 0x02 0x000
+#define MX25_PAD_KPP_COL1__GPIO_3_2 0x1bc 0x3b4 0x000 0x05 0x000
#define MX25_PAD_KPP_COL2__KPP_COL2 0x1c0 0x3b8 0x000 0x00 0x000
#define MX25_PAD_KPP_COL2__UART4_RTS 0x1c0 0x3b8 0x56c 0x01 0x001
#define MX25_PAD_KPP_COL2__AUD5_TXC 0x1c0 0x3b8 0x000 0x02 0x000
#define MX25_PAD_KPP_COL2__GPIO_3_3 0x1c0 0x3b8 0x000 0x05 0x000
-#define MX25_PAD_KPP_COL3__KPP_COL3 0x1c4 0x3bc 0x000 0x10 0x000
-#define MX25_PAD_KPP_COL3__UART4_CTS 0x1c4 0x3bc 0x000 0x11 0x000
-#define MX25_PAD_KPP_COL3__AUD5_TXFS 0x1c4 0x3bc 0x000 0x12 0x000
-#define MX25_PAD_KPP_COL3__GPIO_3_4 0x1c4 0x3bc 0x000 0x15 0x000
+#define MX25_PAD_KPP_COL3__KPP_COL3 0x1c4 0x3bc 0x000 0x00 0x000
+#define MX25_PAD_KPP_COL3__UART4_CTS 0x1c4 0x3bc 0x000 0x01 0x000
+#define MX25_PAD_KPP_COL3__AUD5_TXFS 0x1c4 0x3bc 0x000 0x02 0x000
+#define MX25_PAD_KPP_COL3__GPIO_3_4 0x1c4 0x3bc 0x000 0x05 0x000
-#define MX25_PAD_FEC_MDC__FEC_MDC 0x1c8 0x3c0 0x000 0x10 0x000
-#define MX25_PAD_FEC_MDC__AUD4_TXD 0x1c8 0x3c0 0x464 0x12 0x001
-#define MX25_PAD_FEC_MDC__GPIO_3_5 0x1c8 0x3c0 0x000 0x15 0x000
+#define MX25_PAD_FEC_MDC__FEC_MDC 0x1c8 0x3c0 0x000 0x00 0x000
+#define MX25_PAD_FEC_MDC__AUD4_TXD 0x1c8 0x3c0 0x464 0x02 0x001
+#define MX25_PAD_FEC_MDC__GPIO_3_5 0x1c8 0x3c0 0x000 0x05 0x000
-#define MX25_PAD_FEC_MDIO__FEC_MDIO 0x1cc 0x3c4 0x000 0x10 0x000
-#define MX25_PAD_FEC_MDIO__AUD4_RXD 0x1cc 0x3c4 0x460 0x12 0x001
-#define MX25_PAD_FEC_MDIO__GPIO_3_6 0x1cc 0x3c4 0x000 0x15 0x000
+#define MX25_PAD_FEC_MDIO__FEC_MDIO 0x1cc 0x3c4 0x000 0x00 0x000
+#define MX25_PAD_FEC_MDIO__AUD4_RXD 0x1cc 0x3c4 0x460 0x02 0x001
+#define MX25_PAD_FEC_MDIO__GPIO_3_6 0x1cc 0x3c4 0x000 0x05 0x000
-#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 0x1d0 0x3c8 0x000 0x10 0x000
-#define MX25_PAD_FEC_TDATA0__GPIO_3_7 0x1d0 0x3c8 0x000 0x15 0x000
+#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 0x1d0 0x3c8 0x000 0x00 0x000
+#define MX25_PAD_FEC_TDATA0__GPIO_3_7 0x1d0 0x3c8 0x000 0x05 0x000
-#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 0x1d4 0x3cc 0x000 0x10 0x000
-#define MX25_PAD_FEC_TDATA1__AUD4_TXFS 0x1d4 0x3cc 0x474 0x12 0x001
-#define MX25_PAD_FEC_TDATA1__GPIO_3_8 0x1d4 0x3cc 0x000 0x15 0x000
+#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 0x1d4 0x3cc 0x000 0x00 0x000
+#define MX25_PAD_FEC_TDATA1__AUD4_TXFS 0x1d4 0x3cc 0x474 0x02 0x001
+#define MX25_PAD_FEC_TDATA1__GPIO_3_8 0x1d4 0x3cc 0x000 0x05 0x000
-#define MX25_PAD_FEC_TX_EN__FEC_TX_EN 0x1d8 0x3d0 0x000 0x10 0x000
-#define MX25_PAD_FEC_TX_EN__GPIO_3_9 0x1d8 0x3d0 0x000 0x15 0x000
+#define MX25_PAD_FEC_TX_EN__FEC_TX_EN 0x1d8 0x3d0 0x000 0x00 0x000
+#define MX25_PAD_FEC_TX_EN__GPIO_3_9 0x1d8 0x3d0 0x000 0x05 0x000
-#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 0x1dc 0x3d4 0x000 0x10 0x000
-#define MX25_PAD_FEC_RDATA0__GPIO_3_10 0x1dc 0x3d4 0x000 0x15 0x000
+#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 0x1dc 0x3d4 0x000 0x00 0x000
+#define MX25_PAD_FEC_RDATA0__GPIO_3_10 0x1dc 0x3d4 0x000 0x05 0x000
-#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 0x1e0 0x3d8 0x000 0x10 0x000
+#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 0x1e0 0x3d8 0x000 0x00 0x000
/*
* According to the i.MX25 Reference manual (IMX25RM, Rev. 2,
* 01/2011) this is CAN1_TX but that's wrong.
*/
-#define MX25_PAD_FEC_RDATA1__CAN2_TX 0x1e0 0x3d8 0x000 0x14 0x000
-#define MX25_PAD_FEC_RDATA1__GPIO_3_11 0x1e0 0x3d8 0x000 0x15 0x000
+#define MX25_PAD_FEC_RDATA1__CAN2_TX 0x1e0 0x3d8 0x000 0x04 0x000
+#define MX25_PAD_FEC_RDATA1__GPIO_3_11 0x1e0 0x3d8 0x000 0x05 0x000
-#define MX25_PAD_FEC_RX_DV__FEC_RX_DV 0x1e4 0x3dc 0x000 0x10 0x000
+#define MX25_PAD_FEC_RX_DV__FEC_RX_DV 0x1e4 0x3dc 0x000 0x00 0x000
/*
* According to the i.MX25 Reference manual (IMX25RM, Rev. 2,
* 01/2011) this is CAN1_RX but that's wrong.
*/
-#define MX25_PAD_FEC_RX_DV__CAN2_RX 0x1e4 0x3dc 0x484 0x14 0x000
-#define MX25_PAD_FEC_RX_DV__GPIO_3_12 0x1e4 0x3dc 0x000 0x15 0x000
+#define MX25_PAD_FEC_RX_DV__CAN2_RX 0x1e4 0x3dc 0x484 0x04 0x000
+#define MX25_PAD_FEC_RX_DV__GPIO_3_12 0x1e4 0x3dc 0x000 0x05 0x000
-#define MX25_PAD_FEC_TX_CLK__FEC_TX_CLK 0x1e8 0x3e0 0x000 0x10 0x000
-#define MX25_PAD_FEC_TX_CLK__GPIO_3_13 0x1e8 0x3e0 0x000 0x15 0x000
+#define MX25_PAD_FEC_TX_CLK__FEC_TX_CLK 0x1e8 0x3e0 0x000 0x00 0x000
+#define MX25_PAD_FEC_TX_CLK__GPIO_3_13 0x1e8 0x3e0 0x000 0x05 0x000
-#define MX25_PAD_RTCK__RTCK 0x1ec 0x3e4 0x000 0x10 0x000
-#define MX25_PAD_RTCK__OWIRE 0x1ec 0x3e4 0x000 0x11 0x000
-#define MX25_PAD_RTCK__GPIO_3_14 0x1ec 0x3e4 0x000 0x15 0x000
+#define MX25_PAD_RTCK__RTCK 0x1ec 0x3e4 0x000 0x00 0x000
+#define MX25_PAD_RTCK__OWIRE 0x1ec 0x3e4 0x000 0x01 0x000
+#define MX25_PAD_RTCK__GPIO_3_14 0x1ec 0x3e4 0x000 0x05 0x000
-#define MX25_PAD_DE_B__DE_B 0x1f0 0x3ec 0x000 0x10 0x000
-#define MX25_PAD_DE_B__GPIO_2_20 0x1f0 0x3ec 0x000 0x15 0x000
+#define MX25_PAD_DE_B__DE_B 0x1f0 0x3ec 0x000 0x00 0x000
+#define MX25_PAD_DE_B__GPIO_2_20 0x1f0 0x3ec 0x000 0x05 0x000
-#define MX25_PAD_GPIO_A__GPIO_A 0x1f4 0x3f0 0x000 0x10 0x000
-#define MX25_PAD_GPIO_A__CAN1_TX 0x1f4 0x3f0 0x000 0x16 0x000
-#define MX25_PAD_GPIO_A__USBOTG_PWR 0x1f4 0x3f0 0x000 0x12 0x000
+#define MX25_PAD_GPIO_A__GPIO_A 0x1f4 0x3f0 0x000 0x00 0x000
+#define MX25_PAD_GPIO_A__CAN1_TX 0x1f4 0x3f0 0x000 0x06 0x000
+#define MX25_PAD_GPIO_A__USBOTG_PWR 0x1f4 0x3f0 0x000 0x02 0x000
-#define MX25_PAD_GPIO_B__GPIO_B 0x1f8 0x3f4 0x000 0x10 0x000
-#define MX25_PAD_GPIO_B__USBOTG_OC 0x1f8 0x3f4 0x57c 0x12 0x001
-#define MX25_PAD_GPIO_B__CAN1_RX 0x1f8 0x3f4 0x480 0x16 0x001
+#define MX25_PAD_GPIO_B__GPIO_B 0x1f8 0x3f4 0x000 0x00 0x000
+#define MX25_PAD_GPIO_B__USBOTG_OC 0x1f8 0x3f4 0x57c 0x02 0x001
+#define MX25_PAD_GPIO_B__CAN1_RX 0x1f8 0x3f4 0x480 0x06 0x001
-#define MX25_PAD_GPIO_C__GPIO_C 0x1fc 0x3f8 0x000 0x10 0x000
-#define MX25_PAD_GPIO_C__PWM4_PWMO 0x1fc 0x3f8 0x000 0x11 0x000
-#define MX25_PAD_GPIO_C__I2C2_SCL 0x1fc 0x3f8 0x51c 0x12 0x001
-#define MX25_PAD_GPIO_C__KPP_COL4 0x1fc 0x3f8 0x52c 0x13 0x001
-#define MX25_PAD_GPIO_C__CAN2_TX 0x1fc 0x3f8 0x000 0x16 0x000
+#define MX25_PAD_GPIO_C__GPIO_C 0x1fc 0x3f8 0x000 0x00 0x000
+#define MX25_PAD_GPIO_C__PWM4_PWMO 0x1fc 0x3f8 0x000 0x01 0x000
+#define MX25_PAD_GPIO_C__I2C2_SCL 0x1fc 0x3f8 0x51c 0x02 0x001
+#define MX25_PAD_GPIO_C__KPP_COL4 0x1fc 0x3f8 0x52c 0x03 0x001
+#define MX25_PAD_GPIO_C__CAN2_TX 0x1fc 0x3f8 0x000 0x06 0x000
-#define MX25_PAD_GPIO_D__GPIO_D 0x200 0x3fc 0x000 0x10 0x000
-#define MX25_PAD_GPIO_D__I2C2_SDA 0x200 0x3fc 0x520 0x12 0x001
-#define MX25_PAD_GPIO_D__CAN2_RX 0x200 0x3fc 0x484 0x16 0x001
+#define MX25_PAD_GPIO_D__GPIO_D 0x200 0x3fc 0x000 0x00 0x000
+#define MX25_PAD_GPIO_D__I2C2_SDA 0x200 0x3fc 0x520 0x02 0x001
+#define MX25_PAD_GPIO_D__CAN2_RX 0x200 0x3fc 0x484 0x06 0x001
-#define MX25_PAD_GPIO_E__GPIO_E 0x204 0x400 0x000 0x10 0x000
-#define MX25_PAD_GPIO_E__I2C3_CLK 0x204 0x400 0x524 0x11 0x002
-#define MX25_PAD_GPIO_E__LD16 0x204 0x400 0x000 0x12 0x000
-#define MX25_PAD_GPIO_E__AUD7_TXD 0x204 0x400 0x000 0x14 0x000
-#define MX25_PAD_GPIO_E__UART4_RXD 0x204 0x400 0x570 0x16 0x002
+#define MX25_PAD_GPIO_E__GPIO_E 0x204 0x400 0x000 0x00 0x000
+#define MX25_PAD_GPIO_E__I2C3_CLK 0x204 0x400 0x524 0x01 0x002
+#define MX25_PAD_GPIO_E__LD16 0x204 0x400 0x000 0x02 0x000
+#define MX25_PAD_GPIO_E__AUD7_TXD 0x204 0x400 0x000 0x04 0x000
+#define MX25_PAD_GPIO_E__UART4_RXD 0x204 0x400 0x570 0x06 0x002
-#define MX25_PAD_GPIO_F__GPIO_F 0x208 0x404 0x000 0x10 0x000
-#define MX25_PAD_GPIO_F__LD17 0x208 0x404 0x000 0x12 0x000
-#define MX25_PAD_GPIO_F__AUD7_TXC 0x208 0x404 0x000 0x14 0x000
-#define MX25_PAD_GPIO_F__UART4_TXD 0x208 0x404 0x000 0x16 0x000
+#define MX25_PAD_GPIO_F__GPIO_F 0x208 0x404 0x000 0x00 0x000
+#define MX25_PAD_GPIO_F__LD17 0x208 0x404 0x000 0x02 0x000
+#define MX25_PAD_GPIO_F__AUD7_TXC 0x208 0x404 0x000 0x04 0x000
+#define MX25_PAD_GPIO_F__UART4_TXD 0x208 0x404 0x000 0x06 0x000
-#define MX25_PAD_EXT_ARMCLK__EXT_ARMCLK 0x20c 0x000 0x000 0x10 0x000
-#define MX25_PAD_EXT_ARMCLK__GPIO_3_15 0x20c 0x000 0x000 0x15 0x000
+#define MX25_PAD_EXT_ARMCLK__EXT_ARMCLK 0x20c 0x000 0x000 0x00 0x000
+#define MX25_PAD_EXT_ARMCLK__GPIO_3_15 0x20c 0x000 0x000 0x05 0x000
-#define MX25_PAD_UPLL_BYPCLK__UPLL_BYPCLK 0x210 0x000 0x000 0x10 0x000
-#define MX25_PAD_UPLL_BYPCLK__GPIO_3_16 0x210 0x000 0x000 0x15 0x000
+#define MX25_PAD_UPLL_BYPCLK__UPLL_BYPCLK 0x210 0x000 0x000 0x00 0x000
+#define MX25_PAD_UPLL_BYPCLK__GPIO_3_16 0x210 0x000 0x000 0x05 0x000
#define MX25_PAD_VSTBY_REQ__VSTBY_REQ 0x214 0x408 0x000 0x00 0x000
#define MX25_PAD_VSTBY_REQ__AUD7_TXFS 0x214 0x408 0x000 0x04 0x000
#define MX25_PAD_VSTBY_REQ__GPIO_3_17 0x214 0x408 0x000 0x05 0x000
#define MX25_PAD_VSTBY_REQ__UART4_RTS 0x214 0x408 0x56c 0x06 0x002
-#define MX25_PAD_VSTBY_ACK__VSTBY_ACK 0x218 0x40c 0x000 0x10 0x000
-#define MX25_PAD_VSTBY_ACK__GPIO_3_18 0x218 0x40c 0x000 0x15 0x000
+#define MX25_PAD_VSTBY_ACK__VSTBY_ACK 0x218 0x40c 0x000 0x00 0x000
+#define MX25_PAD_VSTBY_ACK__GPIO_3_18 0x218 0x40c 0x000 0x05 0x000
-#define MX25_PAD_POWER_FAIL__POWER_FAIL 0x21c 0x410 0x000 0x10 0x000
-#define MX25_PAD_POWER_FAIL__AUD7_RXD 0x21c 0x410 0x478 0x14 0x001
-#define MX25_PAD_POWER_FAIL__GPIO_3_19 0x21c 0x410 0x000 0x15 0x000
-#define MX25_PAD_POWER_FAIL__UART4_CTS 0x21c 0x410 0x000 0x16 0x000
+#define MX25_PAD_POWER_FAIL__POWER_FAIL 0x21c 0x410 0x000 0x00 0x000
+#define MX25_PAD_POWER_FAIL__AUD7_RXD 0x21c 0x410 0x478 0x04 0x001
+#define MX25_PAD_POWER_FAIL__GPIO_3_19 0x21c 0x410 0x000 0x05 0x000
+#define MX25_PAD_POWER_FAIL__UART4_CTS 0x21c 0x410 0x000 0x06 0x000
-#define MX25_PAD_CLKO__CLKO 0x220 0x414 0x000 0x10 0x000
-#define MX25_PAD_CLKO__GPIO_2_21 0x220 0x414 0x000 0x15 0x000
+#define MX25_PAD_CLKO__CLKO 0x220 0x414 0x000 0x00 0x000
+#define MX25_PAD_CLKO__GPIO_2_21 0x220 0x414 0x000 0x05 0x000
#define MX25_PAD_BOOT_MODE0__BOOT_MODE0 0x224 0x000 0x000 0x00 0x000
#define MX25_PAD_BOOT_MODE0__GPIO_4_30 0x224 0x000 0x000 0x05 0x000
diff --git a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
index e2242638ea0b..2cf896c505f9 100644
--- a/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
+++ b/arch/arm/boot/dts/imx27-eukrea-cpuimx27.dtsi
@@ -77,7 +77,7 @@
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
index 2ab65fc4c1e1..27846ff9bb0d 100644
--- a/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
+++ b/arch/arm/boot/dts/imx27-eukrea-mbimxsd27-baseboard.dts
@@ -140,21 +140,21 @@
};
&uart1 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
&uart2 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
status = "okay";
};
&uart3 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx27-pdk.dts b/arch/arm/boot/dts/imx27-pdk.dts
index 49450dbbcab8..d0ef496a1af8 100644
--- a/arch/arm/boot/dts/imx27-pdk.dts
+++ b/arch/arm/boot/dts/imx27-pdk.dts
@@ -106,7 +106,7 @@
};
&uart1 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
index 7c869fe3c30b..bfd4946cf9fe 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
@@ -147,21 +147,21 @@
};
&uart1 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
&uart2 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
status = "okay";
};
&uart3 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
index 538568b0de26..cf09e72aeb06 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore-rdk.dts
@@ -283,14 +283,14 @@
};
&uart1 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
&uart2 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx28-apf28dev.dts b/arch/arm/boot/dts/imx28-apf28dev.dts
index 1eaa131e2d18..c4fadbc1b400 100644
--- a/arch/arm/boot/dts/imx28-apf28dev.dts
+++ b/arch/arm/boot/dts/imx28-apf28dev.dts
@@ -140,7 +140,7 @@
auart0: serial@8006a000 {
pinctrl-names = "default";
pinctrl-0 = <&auart0_pins_a>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx28-cfa10049.dts b/arch/arm/boot/dts/imx28-cfa10049.dts
index ef944b6d4f01..a9c347e48bcf 100644
--- a/arch/arm/boot/dts/imx28-cfa10049.dts
+++ b/arch/arm/boot/dts/imx28-cfa10049.dts
@@ -426,7 +426,7 @@
};
- onewire@0 {
+ onewire {
compatible = "w1-gpio";
pinctrl-names = "default";
pinctrl-0 = <&w1_gpio_pins>;
diff --git a/arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi b/arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi
index 88594747f454..581e85f4fd4c 100644
--- a/arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi
+++ b/arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi
@@ -84,6 +84,7 @@
reg_3p3v: regulator@0 {
compatible = "regulator-fixed";
+ reg = <0>;
regulator-name = "3P3V";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -92,6 +93,7 @@
reg_lcd_3v3: regulator@1 {
compatible = "regulator-fixed";
+ reg = <1>;
pinctrl-names = "default";
pinctrl-0 = <&reg_lcd_3v3_pins_mbmx28lc>;
regulator-name = "lcd-3v3";
@@ -103,6 +105,7 @@
reg_usb0_vbus: regulator@2 {
compatible = "regulator-fixed";
+ reg = <2>;
pinctrl-names = "default";
pinctrl-0 = <&reg_usb0_vbus_pins_mbmx28lc>;
regulator-name = "usb0_vbus";
@@ -114,6 +117,7 @@
reg_usb1_vbus: regulator@3 {
compatible = "regulator-fixed";
+ reg = <3>;
pinctrl-names = "default";
pinctrl-0 = <&reg_usb1_vbus_pins_mbmx28lc>;
regulator-name = "usb1_vbus";
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index e3ef94ac159f..a5ba669b4eaa 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -224,7 +224,7 @@
auart0: serial@8006a000 {
pinctrl-names = "default";
pinctrl-0 = <&auart0_pins_a>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx28-tx28.dts b/arch/arm/boot/dts/imx28-tx28.dts
index fd20e99c777e..0ebbc83852d0 100644
--- a/arch/arm/boot/dts/imx28-tx28.dts
+++ b/arch/arm/boot/dts/imx28-tx28.dts
@@ -173,7 +173,7 @@
default-brightness-level = <50>;
};
- matrix_keypad: matrix-keypad@0 {
+ matrix_keypad: matrix-keypad {
compatible = "gpio-matrix-keypad";
col-gpios = <
&gpio5 0 GPIO_ACTIVE_HIGH
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 74aa151cdb45..0ad893bf5f43 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -165,6 +165,7 @@
gpio0: gpio@0 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ reg = <0>;
interrupts = <127>;
gpio-controller;
#gpio-cells = <2>;
@@ -174,6 +175,7 @@
gpio1: gpio@1 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ reg = <1>;
interrupts = <126>;
gpio-controller;
#gpio-cells = <2>;
@@ -183,6 +185,7 @@
gpio2: gpio@2 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ reg = <2>;
interrupts = <125>;
gpio-controller;
#gpio-cells = <2>;
@@ -192,6 +195,7 @@
gpio3: gpio@3 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ reg = <3>;
interrupts = <124>;
gpio-controller;
#gpio-cells = <2>;
@@ -201,6 +205,7 @@
gpio4: gpio@4 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ reg = <4>;
interrupts = <123>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/imx31-bug.dts b/arch/arm/boot/dts/imx31-bug.dts
index 2424abfc9c7b..ae6cebbed84b 100644
--- a/arch/arm/boot/dts/imx31-bug.dts
+++ b/arch/arm/boot/dts/imx31-bug.dts
@@ -22,6 +22,6 @@
};
&uart5 {
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts b/arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts
index 4727bbb804e1..e9357131b026 100644
--- a/arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts
+++ b/arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts
@@ -139,14 +139,14 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx35-pdk.dts b/arch/arm/boot/dts/imx35-pdk.dts
index 8d715523708f..9bb628f22502 100644
--- a/arch/arm/boot/dts/imx35-pdk.dts
+++ b/arch/arm/boot/dts/imx35-pdk.dts
@@ -63,6 +63,6 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 018d24eb9965..f097b4f29ab4 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -388,7 +388,7 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
@@ -401,7 +401,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
index d270df3e5891..728212861ece 100644
--- a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
+++ b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
@@ -261,14 +261,14 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3 &pinctrl_uart3_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx51-ts4800.dts b/arch/arm/boot/dts/imx51-ts4800.dts
index 30f44b5565b9..ca1cc5eca80f 100644
--- a/arch/arm/boot/dts/imx51-ts4800.dts
+++ b/arch/arm/boot/dts/imx51-ts4800.dts
@@ -165,6 +165,27 @@
reg = <0x12000 0x1000>;
syscon = <&syscon 0x10 6>;
};
+
+ fpga_irqc: fpga-irqc@15000 {
+ compatible = "technologic,ts4800-irqc";
+ reg = <0x15000 0x1000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_interrupt_fpga>;
+ interrupt-parent = <&gpio2>;
+ interrupts= <9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ can@1a000 {
+ compatible = "technologic,sja1000";
+ reg = <0x1a000 0x100>;
+ interrupt-parent = <&fpga_irqc>;
+ interrupts = <1>;
+ reg-io-width = <2>;
+ nxp,tx-output-config = <0x06>;
+ nxp,external-clock-frequency = <24000000>;
+ };
};
};
@@ -228,6 +249,12 @@
>;
};
+ pinctrl_interrupt_fpga: fpgaicgrp {
+ fsl,pins = <
+ MX51_PAD_EIM_D27__GPIO2_9 0xe5
+ >;
+ };
+
pinctrl_lcd: lcdgrp {
fsl,pins = <
MX51_PAD_DISP1_DAT0__DISP1_DAT0 0x5
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 542ab9e697fb..9f5190040555 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -56,7 +56,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index e03373a58760..91a6a9ff50d7 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -218,7 +218,7 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index bd3dfefa5778..57e75f1639e0 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -513,21 +513,21 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index bfbed52ce1bd..2becd7cd6544 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -97,6 +97,7 @@
phy-reset-gpios = <&gpio3 31 0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
index 8e67ca27ad79..207b85b91ada 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
@@ -93,7 +93,7 @@
reg = <0>;
lcd_display_in: endpoint {
- remote-endpoint = <&ipu1_di0_disp1>;
+ remote-endpoint = <&ipu1_di1_disp1>;
};
};
@@ -210,7 +210,7 @@
};
};
-&ipu1_di0_disp1 {
+&ipu1_di1_disp1 {
remote-endpoint = <&lcd_display_in>;
};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index d6515f7a56c4..d8acf15611e4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -185,6 +185,7 @@
phy-mode = "rgmii";
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
@@ -218,7 +219,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
fsl,dte-mode;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index f7e17e2004ac..f2adc60723da 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -351,7 +351,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index bb66dfd5294c..cf3fd31e3406 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -52,6 +52,12 @@
};
};
+ gpio-poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ status = "okay";
+ };
+
reg_wl18xx_vmmc: regulator-wl18xx {
compatible = "regulator-fixed";
regulator-name = "vwl1807";
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
index 99b46f8030ad..b5de7e620905 100644
--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -3,15 +3,46 @@
*
* Author: Valentin Raevsky <valentin@compulab.co.il>
*
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
*
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx6q.dtsi"
/ {
@@ -31,6 +62,71 @@
linux,default-trigger = "heartbeat";
};
};
+
+ reg_pcie_power_on_gpio: regulator-pcie-power-on-gpio {
+ compatible = "regulator-fixed";
+ regulator-name = "regulator-pcie-power-on-gpio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 24 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_h1_vbus: usb_h1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio7 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&cpu0 {
+ /*
+ * Although the imx6q fuse indicates that 1.2GHz operation is possible,
+ * the module behaves unstable at this frequency. Hence, remove the
+ * 1.2GHz operation point here.
+ */
+ operating-points = <
+ /* kHz uV */
+ 996000 1250000
+ 852000 1250000
+ 792000 1175000
+ 396000 975000
+ >;
+ fsl,soc-operating-points = <
+ /* ARM kHz SOC-PU uV */
+ 996000 1250000
+ 852000 1250000
+ 792000 1175000
+ 396000 1175000
+ >;
+};
+
+&ecspi1 {
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>, <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ status = "okay";
+
+ m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,m25p", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
};
&fec {
@@ -46,58 +142,122 @@
status = "okay";
};
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+ clock-frequency = <100000>;
+
+ eeprom@50 {
+ compatible = "at24,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
&iomuxc {
- imx6q-cm-fx6 {
- pinctrl_enet: enetgrp {
- fsl,pins = <
- MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
- MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
- MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
- MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
- MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
- MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
- MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
- MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
- MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
- MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
- MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
- MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
- MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
- MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
- MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
- MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
- >;
- };
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1
+ MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1
+ MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1
+ MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x100b1
+ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x100b1
+ >;
+ };
- pinctrl_gpmi_nand: gpminandgrp {
- fsl,pins = <
- MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
- MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
- MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
- MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
- MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
- MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
- MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
- MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
- MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
- MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
- MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
- MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
- MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
- MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
- MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
- MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
- MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
- >;
- };
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b0b0
+ MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b0b0
+ MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b0b0
+ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b0b0
+ MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b0b0
+ MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+ MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b0b0
+ MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b0b0
+ MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b0b0
+ MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b0b0
+ MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b0b0
+ MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+ MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+ >;
+ };
- pinctrl_uart4: uart4grp {
- fsl,pins = <
- MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
- MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
- >;
- };
+ pinctrl_gpmi_nand: gpminandgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
+ MX6QDL_PAD_NANDF_ALE__NAND_ALE 0xb0b1
+ MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0xb0b1
+ MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+ MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0xb0b1
+ MX6QDL_PAD_NANDF_CS1__NAND_CE1_B 0xb0b1
+ MX6QDL_PAD_SD4_CMD__NAND_RE_B 0xb0b1
+ MX6QDL_PAD_SD4_CLK__NAND_WE_B 0xb0b1
+ MX6QDL_PAD_NANDF_D0__NAND_DATA00 0xb0b1
+ MX6QDL_PAD_NANDF_D1__NAND_DATA01 0xb0b1
+ MX6QDL_PAD_NANDF_D2__NAND_DATA02 0xb0b1
+ MX6QDL_PAD_NANDF_D3__NAND_DATA03 0xb0b1
+ MX6QDL_PAD_NANDF_D4__NAND_DATA04 0xb0b1
+ MX6QDL_PAD_NANDF_D5__NAND_DATA05 0xb0b1
+ MX6QDL_PAD_NANDF_D6__NAND_DATA06 0xb0b1
+ MX6QDL_PAD_NANDF_D7__NAND_DATA07 0xb0b1
+ MX6QDL_PAD_SD4_DAT0__NAND_DQS 0x00b1
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b1
+ MX6QDL_PAD_EIM_CS1__GPIO2_IO24 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+ MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+ >;
};
+
+ pinctrl_usbh1: usbh1grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x1b0b1
+ >;
+ };
+
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x130b0
+ >;
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio1 26 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_pcie_power_on_gpio>;
+ status = "okay";
+};
+
+&sata {
+ status = "okay";
+};
+
+&snvs_poweroff {
+ status = "okay";
};
&uart4 {
@@ -105,3 +265,18 @@
pinctrl-0 = <&pinctrl_uart4>;
status = "okay";
};
+
+&usbh1 {
+ vbus-supply = <&reg_usb_h1_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1>;
+ status = "okay";
+};
+
+&usbotg {
+ vbus-supply = <&reg_usb_otg_vbus>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ dr_mode = "otg";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-h100.dts b/arch/arm/boot/dts/imx6q-h100.dts
new file mode 100644
index 000000000000..65e66f994f88
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-h100.dts
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2015 Lucas Stach <kernel@pengutronix.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-microsom.dtsi"
+#include "imx6qdl-microsom-ar8035.dtsi"
+
+/ {
+ model = "Auvidea H100";
+ compatible = "auvidea,h100", "fsl,imx6q";
+
+ aliases {
+ rtc0 = &rtc;
+ rtc1 = &snvs_rtc;
+ };
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ hdmi_osc: hdmi-osc {
+ compatible = "fixed-clock";
+ clock-output-names = "hdmi-osc";
+ clock-frequency = <27000000>;
+ #clock-cells = <0>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_leds>;
+
+ led0: power {
+ label = "power";
+ gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+ led1: stream {
+ label = "stream";
+ gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led2: rec {
+ label = "rec";
+ gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_hdmi: regulator-hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_reg_hdmi>;
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+ regulator-name = "V_HDMI";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_nvcc_sd2: regulator-nvcc-sd2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_reg_nvcc_sd2>;
+ compatible = "regulator-gpio";
+ regulator-name = "NVCC_SD2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-type = "voltage";
+ regulator-boot-on;
+ regulator-always-on;
+ gpios = <&gpio4 9 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x1
+ 3300000 0x0>;
+ };
+
+ reg_usbh1_vbus: regulator-usb-h1-vbus {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_usbh1_vbus>;
+ regulator-name = "USB_H1_VBUS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_usbotg_vbus: regulator-usb-otg-vbus {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_usbotg_vbus>;
+ regulator-name = "USB_OTG_VBUS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ sound-sgtl5000 {
+ compatible = "fsl,imx-audio-sgtl5000";
+ model = "H100 on-board codec";
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-ext-port = <5>;
+ mux-int-port = <1>;
+ ssi-controller = <&ssi1>;
+ };
+};
+
+&audmux {
+ status = "okay";
+};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_hdmi>;
+ ddc-i2c-bus = <&i2c2>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_i2c1>;
+ status = "okay";
+
+ eeprom: 24c02@51 {
+ compatible = "microchip,24c02", "at24";
+ reg = <0x51>;
+ };
+
+ rtc: pcf8523@68 {
+ compatible = "nxp,pcf8523";
+ reg = <0x68>;
+ };
+
+ sgtl5000: sgtl5000@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_sgtl5000>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+ };
+
+ tc358743: tc358743@0f {
+ compatible = "toshiba,tc358743";
+ reg = <0x0f>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_tc358743>;
+ clocks = <&hdmi_osc>;
+ clock-names = "refclk";
+ reset-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+ /* IRQ has a wrong pull resistor which renders it useless */
+
+ port@0 {
+ tc358743_out: endpoint {
+ remote-endpoint = <&mipi_csi2_in>;
+ data-lanes = <1 2 3 4>;
+ clock-lanes = <0>;
+ clock-noncontinuous;
+ link-frequencies = /bits/ 64 <297000000>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_i2c2>;
+ status = "okay";
+};
+
+&iomuxc {
+ h100 {
+ pinctrl_h100_hdmi: h100-hdmi {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0
+ >;
+ };
+
+ pinctrl_h100_i2c1: h100-i2c1 {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_h100_i2c2: h100-i2c2 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_h100_leds: pinctrl-h100-leds {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x1b0b0
+ MX6QDL_PAD_EIM_EB1__GPIO2_IO29 0x1b0b0
+ MX6QDL_PAD_EIM_EB0__GPIO2_IO28 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_reg_hdmi: h100-reg-hdmi {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_A18__GPIO2_IO20 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_reg_nvcc_sd2: h100-reg-nvcc-sd2 {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_sgtl5000: h100-sgtl5000 {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0
+ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0
+ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0
+ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0
+ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
+ >;
+ };
+
+ pinctrl_h100_tc358743: h100-tc358743 {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_uart2: h100-uart2 {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
+ >;
+ };
+
+ pinctrl_h100_usbh1_vbus: hummingboard-usbh1-vbus {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_usbotg_id: hummingboard-usbotg-id {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059
+ >;
+ };
+
+ pinctrl_h100_usbotg_vbus: hummingboard-usbotg-vbus {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0
+ >;
+ };
+
+ pinctrl_h100_usdhc2: h100-usdhc2 {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+ >;
+ };
+
+ pinctrl_h100_usdhc2_100mhz: h100-usdhc2-100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_h100_usdhc2_200mhz: h100-usdhc2-200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1f071
+ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
+ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
+ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
+ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
+ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170f9
+ >;
+ };
+ };
+};
+
+&mipi_csi {
+ status = "okay";
+
+ port@0 {
+ mipi_csi2_in: endpoint {
+ remote-endpoint = <&tc358743_out>;
+ data-lanes = <1 2 3 4>;
+ clock-lanes = <0>;
+ clock-noncontinuous;
+ link-frequencies = /bits/ 64 <297000000>;
+ };
+ };
+};
+
+&ssi1 {
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_uart2>;
+ status = "okay";
+};
+
+&usbh1 {
+ disable-over-current;
+ vbus-supply = <&reg_usbh1_vbus>;
+ status = "okay";
+};
+
+&usbotg {
+ disable-over-current;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_usbotg_id>;
+ vbus-supply = <&reg_usbotg_vbus>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_h100_usdhc2>;
+ pinctrl-1 = <&pinctrl_h100_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_h100_usdhc2_200mhz>;
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_nvcc_sd2>;
+ cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index 1926b1348a62..d7c8ccb2da95 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -191,7 +191,7 @@
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
- reset-gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+ reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-utilite-pro.dts b/arch/arm/boot/dts/imx6q-utilite-pro.dts
new file mode 100644
index 000000000000..61990630a748
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-utilite-pro.dts
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2013 CompuLab Ltd.
+ * Copyright 2016 Christopher Spinrath
+ *
+ * Based on the devicetree distributed with the vendor kernel for the
+ * Utilite Pro:
+ * Copyright 2013 CompuLab Ltd.
+ * Author: Valentin Raevsky <valentin@compulab.co.il>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+#include "imx6q-cm-fx6.dts"
+
+/ {
+ model = "CompuLab Utilite Pro";
+ compatible = "compulab,utilite-pro", "compulab,cm-fx6", "fsl,imx6q";
+
+ aliases {
+ ethernet1 = &eth1;
+ rtc0 = &em3027;
+ rtc1 = &snvs_rtc;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_keys>;
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ gpio-key,wakeup;
+ };
+ };
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c2>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "at24,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+
+ em3027: rtc@56 {
+ compatible = "emmicro,em3027";
+ reg = <0x56>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_gpio_keys: gpio_keysgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__UART2_TX_DATA 0x1b0b1
+ MX6QDL_PAD_GPIO_8__UART2_RX_DATA 0x1b0b1
+ MX6QDL_PAD_SD4_DAT5__UART2_RTS_B 0x1b0b1
+ MX6QDL_PAD_SD4_DAT6__UART2_CTS_B 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp-100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170B9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100B9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170B9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170B9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170B9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170B9
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp-200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x170F9
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x100F9
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170F9
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170F9
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170F9
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170F9
+ >;
+ };
+};
+
+&pcie {
+ pcie@0,0 {
+ reg = <0x000000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ /* non-removable i211 ethernet card */
+ eth1: intel,i211@pcie0,0 {
+ reg = <0x010000 0 0 0 0>;
+ };
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ no-1-8-v;
+ broken-cd;
+ keep-power-in-suspend;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 922b1dd06fda..315e033ff1d8 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -423,7 +423,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_dte &pinctrl_uart1_ctrl>;
fsl,dte-mode;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "disabled";
};
@@ -431,7 +431,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2_dte>;
fsl,dte-mode;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
index 865c9a264a43..edbce222c782 100644
--- a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
@@ -254,7 +254,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3 &pinctrl_gsm>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index ecbc6eba6a2c..54f4f0193f2b 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -143,14 +143,14 @@
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart4>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 7d81100e7d47..7fff02c406f2 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -351,7 +351,7 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
@@ -364,7 +364,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
index 86460e46d055..3d62401dbd7f 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
@@ -143,7 +143,7 @@
&uart4 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
index e456b5cc1b03..cfd50ea1ed48 100644
--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
@@ -250,6 +250,7 @@
txd3-skew-ps = <0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
@@ -591,7 +592,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 657da6b6ccd2..9677bf323823 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -385,6 +385,7 @@
txd3-skew-ps = <0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 73915db704a0..97d9c333902b 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -287,6 +287,7 @@
txd3-skew-ps = <0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index d354d406954d..6aa193fb283f 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -155,6 +155,7 @@
phy-mode = "rgmii";
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index c47fe6c79b36..f65fdfc2536d 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -273,6 +273,7 @@
txd3-skew-ps = <0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 5248e7bd2b06..d77ea9423bbc 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -501,6 +501,12 @@
MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17059
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_1__WDOG2_B 0x1b0b0
+ >;
+ };
};
gpio_leds {
@@ -533,7 +539,7 @@
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
- reset-gpio = <&gpio7 12 0>;
+ reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -596,3 +602,14 @@
no-1-8-v;
status = "okay";
};
+
+&wdog1 {
+ status = "disabled";
+};
+
+&wdog2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index 39b85aef93e1..ac9529f85593 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -689,21 +689,21 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1 &pinctrl_uart1_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2 &pinctrl_uart2_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3 &pinctrl_uart3_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 8e7c40e114dd..3ffe00c557f1 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -211,6 +211,7 @@
phy-reset-gpios = <&gpio3 29 0>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,err006687-workaround-present;
status = "okay";
};
@@ -233,7 +234,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index ed613ebe0812..b620ac884cfd 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -185,6 +185,7 @@
cache-level = <2>;
arm,tag-latency = <4 2 3>;
arm,data-latency = <4 2 3>;
+ arm,shared-override;
};
pcie: pcie@0x01000000 {
@@ -1100,6 +1101,7 @@
ocotp: ocotp@021bc000 {
compatible = "fsl,imx6q-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6QDL_CLK_IIM>;
};
tzasc@021d0000 { /* TZASC1 */
@@ -1255,7 +1257,7 @@
#size-cells = <0>;
reg = <3>;
- ipu1_di0_disp1: disp1-endpoint {
+ ipu1_di1_disp1: disp1-endpoint {
};
ipu1_di1_hdmi: hdmi-endpoint {
diff --git a/arch/arm/boot/dts/imx6sl-warp.dts b/arch/arm/boot/dts/imx6sl-warp.dts
index 058bcdceb81a..72c7745f51d3 100644
--- a/arch/arm/boot/dts/imx6sl-warp.dts
+++ b/arch/arm/boot/dts/imx6sl-warp.dts
@@ -84,7 +84,7 @@
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index d12b250342a6..542515089b1e 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -461,7 +461,7 @@
<0 54 IRQ_TYPE_LEVEL_HIGH>,
<0 127 IRQ_TYPE_LEVEL_HIGH>;
- regulator-1p1@110 {
+ regulator-1p1 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd1p1";
regulator-min-microvolt = <800000>;
@@ -475,7 +475,7 @@
anatop-max-voltage = <1375000>;
};
- regulator-3p0@120 {
+ regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
regulator-min-microvolt = <2800000>;
@@ -489,7 +489,7 @@
anatop-max-voltage = <3400000>;
};
- regulator-2p5@130 {
+ regulator-2p5 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd2p5";
regulator-min-microvolt = <2100000>;
@@ -503,7 +503,7 @@
anatop-max-voltage = <2850000>;
};
- reg_arm: regulator-vddcore@140 {
+ reg_arm: regulator-vddcore {
compatible = "fsl,anatop-regulator";
regulator-name = "vddarm";
regulator-min-microvolt = <725000>;
@@ -520,7 +520,7 @@
anatop-max-voltage = <1450000>;
};
- reg_pu: regulator-vddpu@140 {
+ reg_pu: regulator-vddpu {
compatible = "fsl,anatop-regulator";
regulator-name = "vddpu";
regulator-min-microvolt = <725000>;
@@ -537,7 +537,7 @@
anatop-max-voltage = <1450000>;
};
- reg_soc: regulator-vddsoc@140 {
+ reg_soc: regulator-vddsoc {
compatible = "fsl,anatop-regulator";
regulator-name = "vddsoc";
regulator-min-microvolt = <725000>;
@@ -853,6 +853,7 @@
ocotp: ocotp@021bc000 {
compatible = "fsl,imx6sl-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6SL_CLK_OCOTP>;
};
audmux: audmux@021d8000 {
diff --git a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
index ba62348d8284..9b817f3501a6 100644
--- a/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
+++ b/arch/arm/boot/dts/imx6sx-nitrogen6sx.dts
@@ -326,7 +326,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index e5eafe4d9a70..9d70cfd40aff 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -273,7 +273,7 @@
&uart5 { /* for bluetooth */
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
@@ -322,6 +322,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
imx6x-sdb {
pinctrl_audmux: audmuxgrp {
@@ -588,5 +594,11 @@
MX6SX_PAD_SD4_DATA6__GPIO6_IO_20 0x17059 /* WP */
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6SX_PAD_GPIO1_IO13__WDOG1_WDOG_ANY 0x30b0
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 6a993bfda248..2863c52be6f5 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -547,7 +547,7 @@
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- regulator-1p1@110 {
+ regulator-1p1 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd1p1";
regulator-min-microvolt = <800000>;
@@ -561,7 +561,7 @@
anatop-max-voltage = <1375000>;
};
- regulator-3p0@120 {
+ regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
regulator-min-microvolt = <2800000>;
@@ -575,7 +575,7 @@
anatop-max-voltage = <3400000>;
};
- regulator-2p5@130 {
+ regulator-2p5 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd2p5";
regulator-min-microvolt = <2100000>;
@@ -589,7 +589,7 @@
anatop-max-voltage = <2875000>;
};
- reg_arm: regulator-vddcore@140 {
+ reg_arm: regulator-vddcore {
compatible = "fsl,anatop-regulator";
regulator-name = "vddarm";
regulator-min-microvolt = <725000>;
@@ -606,7 +606,7 @@
anatop-max-voltage = <1450000>;
};
- reg_pcie: regulator-vddpcie@140 {
+ reg_pcie: regulator-vddpcie {
compatible = "fsl,anatop-regulator";
regulator-name = "vddpcie";
regulator-min-microvolt = <725000>;
@@ -622,7 +622,7 @@
anatop-max-voltage = <1450000>;
};
- reg_soc: regulator-vddsoc@140 {
+ reg_soc: regulator-vddsoc {
compatible = "fsl,anatop-regulator";
regulator-name = "vddsoc";
regulator-min-microvolt = <725000>;
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index 668a72997590..e281d5087d4a 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -22,6 +22,14 @@
reg = <0x80000000 0x20000000>;
};
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ status = "okay";
+ };
+
regulators {
compatible = "simple-bus";
#address-cells = <1>;
@@ -125,6 +133,46 @@
};
};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcdif_dat
+ &pinctrl_lcdif_ctrl>;
+ display = <&display0>;
+ status = "okay";
+
+ display0: display {
+ bits-per-pixel = <16>;
+ bus-width = <24>;
+
+ display-timings {
+ native-mode = <&timing0>;
+
+ timing0: timing0 {
+ clock-frequency = <9200000>;
+ hactive = <480>;
+ vactive = <272>;
+ hfront-porch = <8>;
+ hback-porch = <4>;
+ hsync-len = <41>;
+ vback-porch = <2>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
&qspi {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_qspi>;
@@ -146,6 +194,7 @@
<&clks IMX6UL_CLK_SAI2>;
assigned-clock-parents = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
assigned-clock-rates = <0>, <12288000>;
+ fsl,sai-mclk-direction-output;
status = "okay";
};
@@ -171,7 +220,7 @@
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
@@ -207,6 +256,12 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
pinctrl-names = "default";
@@ -435,4 +490,10 @@
MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
>;
};
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_LCD_RESET__WDOG1_WDOG_ANY 0x30b0
+ >;
+ };
};
diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
index 8ce1fec36e86..86f68faded0e 100644
--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
@@ -151,8 +151,8 @@
phy-mode = "rmii";
phy-handle = <&ethphy1>;
status = "okay";
- phy-reset-gpios = <&gpio2 15 GPIO_ACTIVE_LOW>;
- phy-reset-duration = <11>;
+ phy-reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <1>;
mdio {
#address-cells = <1>;
@@ -286,7 +286,7 @@
&uart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
index d25899b71575..7c5dd1b316ca 100644
--- a/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
+++ b/arch/arm/boot/dts/imx6ul-tx6ul-mainboard.dts
@@ -146,12 +146,12 @@
&uart1 {
pinctrl-0 = <&pinctrl_uart1>;
- /delete-property/ fsl,uart-has-rtscts;
+ /delete-property/ uart-has-rtscts;
};
&uart2 {
pinctrl-0 = <&pinctrl_uart2>;
- /delete-property/ fsl,uart-has-rtscts;
+ /delete-property/ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
index 437e9aad5920..530e9ca13a74 100644
--- a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
@@ -563,21 +563,21 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1 &pinctrl_uart1_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2 &pinctrl_uart2_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
&uart5 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart5 &pinctrl_uart5_rtscts>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 4356b655ef02..33b95d78831a 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -36,6 +36,9 @@
serial5 = &uart6;
serial6 = &uart7;
serial7 = &uart8;
+ sai1 = &sai1;
+ sai2 = &sai2;
+ sai3 = &sai3;
spi0 = &ecspi1;
spi1 = &ecspi2;
spi2 = &ecspi3;
@@ -512,7 +515,7 @@
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- reg_3p0: regulator-3p0@120 {
+ reg_3p0: regulator-3p0 {
compatible = "fsl,anatop-regulator";
regulator-name = "vdd3p0";
regulator-min-microvolt = <2625000>;
@@ -526,7 +529,7 @@
anatop-enable-bit = <0>;
};
- reg_arm: regulator-vddcore@140 {
+ reg_arm: regulator-vddcore {
compatible = "fsl,anatop-regulator";
regulator-name = "cpu";
regulator-min-microvolt = <725000>;
@@ -543,7 +546,7 @@
anatop-max-voltage = <1450000>;
};
- reg_soc: regulator-vddsoc@140 {
+ reg_soc: regulator-vddsoc {
compatible = "fsl,anatop-regulator";
regulator-name = "vddsoc";
regulator-min-microvolt = <725000>;
diff --git a/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
new file mode 100644
index 000000000000..1545661df583
--- /dev/null
+++ b/arch/arm/boot/dts/imx7-colibri-eval-v3.dtsi
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&bl {
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ status = "okay";
+};
+
+&adc1 {
+ status = "okay";
+};
+
+&adc2 {
+ status = "okay";
+};
+
+&fec1 {
+ status = "okay";
+};
+
+&i2c4 {
+ status = "okay";
+
+ /* M41T0M6 real time clock on carrier board */
+ rtc: m41t0m6@68 {
+ compatible = "st,m41t00";
+ reg = <0x68>;
+ };
+};
+
+&lcdif {
+ display = <&display0>;
+ status = "okay";
+
+ display0: lcd-display {
+ bits-per-pixel = <16>;
+ bus-width = <18>;
+
+ display-timings {
+ native-mode = <&timing_vga>;
+
+ /* Standard VGA timing */
+ timing_vga: 640x480 {
+ clock-frequency = <25175000>;
+ hactive = <640>;
+ vactive = <480>;
+ hback-porch = <40>;
+ hfront-porch = <24>;
+ vback-porch = <32>;
+ vfront-porch = <11>;
+ hsync-len = <96>;
+ vsync-len = <2>;
+ de-active = <1>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&pwm3 {
+ status = "okay";
+};
+
+&pwm4 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ status = "okay";
+};
+
+&usbotg1 {
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_cd_usdhc1>;
+ no-1-8-v;
+ cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ keep-power-in-suspend;
+ wakeup-source;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
new file mode 100644
index 000000000000..0a9d3a822fc0
--- /dev/null
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ bl: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 5000000>;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_vref_1v8: regulator-vref-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+};
+
+&adc2 {
+ vref-supply = <&reg_vref_1v8>;
+};
+
+&cpu0 {
+ arm-supply = <&reg_DCDC2>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>,
+ <&clks IMX7D_PLL_ENET_MAIN_50M_CLK>;
+ clock-names = "ipg", "ahb", "ptp", "enet_clk_ref";
+ assigned-clocks = <&clks IMX7D_ENET1_TIME_ROOT_SRC>,
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+ phy-mode = "rmii";
+ phy-supply = <&reg_LDO1>;
+ fsl,magic-packet;
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1 &pinctrl_i2c1_int>;
+ status = "okay";
+
+ ad7879@2c {
+ compatible = "adi,ad7879-1";
+ reg = <0x2c>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+
+ pmic@33 {
+ compatible = "ricoh,rn5t567";
+ reg = <0x33>;
+
+ regulators {
+ reg_DCDC1: DCDC1 { /* V1.0_SOC */
+ regulator-min-microvolt = <975000>;
+ regulator-max-microvolt = <1125000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_DCDC2: DCDC2 { /* V1.1_ARM */
+ regulator-min-microvolt = <975000>;
+ regulator-max-microvolt = <1125000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_DCDC3: DCDC3 { /* V1.8 */
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <1825000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_DCDC4: DCDC4 { /* V1.35_DRAM */
+ regulator-min-microvolt = <1325000>;
+ regulator-max-microvolt = <1375000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_LDO1: LDO1 { /* PWR_EN_+V3.3_ETH */
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_LDO2: LDO2 { /* +V1.8_SD */
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <3325000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_LDO3: LDO3 { /* PWR_EN_+V3.3_LPSR */
+ regulator-min-microvolt = <3275000>;
+ regulator-max-microvolt = <3325000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_LDO4: LDO4 { /* V1.8_LPSR */
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <1825000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_LDO5: LDO5 { /* PWR_EN_+V3.3 */
+ regulator-min-microvolt = <1775000>;
+ regulator-max-microvolt = <1825000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcdif_dat
+ &pinctrl_lcdif_ctrl>;
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+};
+
+&pwm2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+};
+
+&reg_1p0d {
+ vin-supply = <&reg_DCDC3>;
+};
+
+&snvs_pwrkey {
+ status = "disabled";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1 &pinctrl_uart1_ctrl1 &pinctrl_uart1_ctrl2>;
+ assigned-clocks = <&clks IMX7D_UART1_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ uart-has-rtscts;
+ fsl,dte-mode;
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clks IMX7D_UART2_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ uart-has-rtscts;
+ fsl,dte-mode;
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clks IMX7D_UART3_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ fsl,dte-mode;
+};
+
+&usbotg1 {
+ dr_mode = "host";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1 &pinctrl_gpio2 &pinctrl_gpio3 &pinctrl_gpio4>;
+
+ pinctrl_gpio1: gpio1-grp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_RGMII_RD3__GPIO7_IO3 0x14 /* SODIMM 55 */
+ MX7D_PAD_ENET1_RGMII_RD2__GPIO7_IO2 0x14 /* SODIMM 63 */
+ MX7D_PAD_SD1_RESET_B__GPIO5_IO2 0X14 /* SODIMM 73 */
+ MX7D_PAD_SAI1_RX_SYNC__GPIO6_IO16 0X14 /* SODIMM 77 */
+ MX7D_PAD_EPDC_DATA09__GPIO2_IO9 0x14 /* SODIMM 89 */
+ MX7D_PAD_EPDC_DATA08__GPIO2_IO8 0x14 /* SODIMM 91 */
+ MX7D_PAD_LCD_RESET__GPIO3_IO4 0x14 /* SODIMM 93 */
+ MX7D_PAD_EPDC_DATA13__GPIO2_IO13 0x14 /* SODIMM 95 */
+ MX7D_PAD_ENET1_RGMII_TXC__GPIO7_IO11 0x14 /* SODIMM 99 */
+ MX7D_PAD_EPDC_DATA10__GPIO2_IO10 0x14 /* SODIMM 105 */
+ MX7D_PAD_EPDC_DATA15__GPIO2_IO15 0x14 /* SODIMM 107 */
+ MX7D_PAD_EPDC_DATA00__GPIO2_IO0 0x14 /* SODIMM 111 */
+ MX7D_PAD_EPDC_DATA01__GPIO2_IO1 0x14 /* SODIMM 113 */
+ MX7D_PAD_EPDC_DATA02__GPIO2_IO2 0x14 /* SODIMM 115 */
+ MX7D_PAD_EPDC_DATA03__GPIO2_IO3 0x14 /* SODIMM 117 */
+ MX7D_PAD_EPDC_DATA04__GPIO2_IO4 0x14 /* SODIMM 119 */
+ MX7D_PAD_EPDC_DATA05__GPIO2_IO5 0x14 /* SODIMM 121 */
+ MX7D_PAD_EPDC_DATA06__GPIO2_IO6 0x14 /* SODIMM 123 */
+ MX7D_PAD_EPDC_DATA07__GPIO2_IO7 0x14 /* SODIMM 125 */
+ MX7D_PAD_EPDC_SDCE2__GPIO2_IO22 0x14 /* SODIMM 127 */
+ MX7D_PAD_UART3_RTS_B__GPIO4_IO6 0x14 /* SODIMM 131 */
+ MX7D_PAD_EPDC_GDRL__GPIO2_IO26 0x14 /* SODIMM 133 */
+ MX7D_PAD_SAI1_RX_BCLK__GPIO6_IO17 0x14 /* SODIMM 24 */
+ MX7D_PAD_SD2_DATA2__GPIO5_IO16 0x14 /* SODIMM 100 */
+ MX7D_PAD_SD2_DATA3__GPIO5_IO17 0x14 /* SODIMM 102 */
+ MX7D_PAD_EPDC_GDSP__GPIO2_IO27 0x14 /* SODIMM 104 */
+ MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x14 /* SODIMM 106 */
+ MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x14 /* SODIMM 110 */
+ MX7D_PAD_EPDC_PWR_COM__GPIO2_IO30 0x14 /* SODIMM 112 */
+ MX7D_PAD_EPDC_SDCLK__GPIO2_IO16 0x14 /* SODIMM 114 */
+ MX7D_PAD_EPDC_SDLE__GPIO2_IO17 0x14 /* SODIMM 116 */
+ MX7D_PAD_EPDC_SDOE__GPIO2_IO18 0x14 /* SODIMM 118 */
+ MX7D_PAD_EPDC_SDSHR__GPIO2_IO19 0x14 /* SODIMM 120 */
+ MX7D_PAD_EPDC_SDCE0__GPIO2_IO20 0x14 /* SODIMM 122 */
+ MX7D_PAD_EPDC_SDCE1__GPIO2_IO21 0x14 /* SODIMM 124 */
+ MX7D_PAD_EPDC_DATA14__GPIO2_IO14 0x14 /* SODIMM 126 */
+ MX7D_PAD_EPDC_PWR_STAT__GPIO2_IO31 0x14 /* SODIMM 128 */
+ MX7D_PAD_EPDC_SDCE3__GPIO2_IO23 0x14 /* SODIMM 130 */
+ MX7D_PAD_EPDC_GDCLK__GPIO2_IO24 0x14 /* SODIMM 132 */
+ MX7D_PAD_EPDC_GDOE__GPIO2_IO25 0x14 /* SODIMM 134 */
+ MX7D_PAD_EPDC_DATA12__GPIO2_IO12 0x14 /* SODIMM 150 */
+ MX7D_PAD_EPDC_DATA11__GPIO2_IO11 0x14 /* SODIMM 152 */
+ MX7D_PAD_SD2_CLK__GPIO5_IO12 0x14 /* SODIMM 184 */
+ MX7D_PAD_SD2_CMD__GPIO5_IO13 0x14 /* SODIMM 186 */
+ >;
+ };
+
+ pinctrl_gpio2: gpio2-grp { /* On X22 Camera interface */
+ fsl,pins = <
+ MX7D_PAD_ECSPI2_SS0__GPIO4_IO23 0x14 /* SODIMM 65 */
+ MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x14 /* SODIMM 69 */
+ MX7D_PAD_SD1_WP__GPIO5_IO1 0x14 /* SODIMM 71 */
+ MX7D_PAD_I2C4_SDA__GPIO4_IO15 0x14 /* SODIMM 75 */
+ MX7D_PAD_ECSPI1_MISO__GPIO4_IO18 0x14 /* SODIMM 79 */
+ MX7D_PAD_I2C3_SCL__GPIO4_IO12 0x14 /* SODIMM 81 */
+ MX7D_PAD_ECSPI2_MISO__GPIO4_IO22 0x14 /* SODIMM 85 */
+ MX7D_PAD_ECSPI1_SS0__GPIO4_IO19 0x14 /* SODIMM 97 */
+ MX7D_PAD_ECSPI1_SCLK__GPIO4_IO16 0x14 /* SODIMM 101 */
+ MX7D_PAD_ECSPI1_MOSI__GPIO4_IO17 0x14 /* SODIMM 103 */
+ MX7D_PAD_I2C3_SDA__GPIO4_IO13 0x14 /* SODIMM 94 */
+ MX7D_PAD_I2C4_SCL__GPIO4_IO14 0x14 /* SODIMM 96 */
+ MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x14 /* SODIMM 98 */
+ >;
+ };
+
+ pinctrl_gpio3: gpio3-grp { /* LCD 18-23 */
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA18__GPIO3_IO23 0x14 /* SODIMM 136 */
+ MX7D_PAD_LCD_DATA19__GPIO3_IO24 0x14 /* SODIMM 138 */
+ MX7D_PAD_LCD_DATA20__GPIO3_IO25 0x14 /* SODIMM 140 */
+ MX7D_PAD_LCD_DATA21__GPIO3_IO26 0x14 /* SODIMM 142 */
+ MX7D_PAD_LCD_DATA22__GPIO3_IO27 0x14 /* SODIMM 146 */
+ MX7D_PAD_LCD_DATA23__GPIO3_IO28 0x14 /* SODIMM 148 */
+ >;
+ };
+
+ pinctrl_gpio4: gpio4-grp { /* Alternatively CAN2 */
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO15__GPIO1_IO15 0x14 /* SODIMM 178 */
+ MX7D_PAD_GPIO1_IO14__GPIO1_IO14 0x14 /* SODIMM 188 */
+ >;
+ };
+
+ pinctrl_i2c1_int: i2c1-int-grp { /* PMIC / TOUCH */
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x79
+ >;
+ };
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x14
+ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x73
+ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x73
+ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x73
+ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RX_ER 0x73
+
+ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x73
+ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x73
+ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x73
+ MX7D_PAD_GPIO1_IO12__CCM_ENET_REF_CLK1 0x73
+ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
+ MX7D_PAD_SD2_WP__ENET1_MDC 0x3
+ >;
+ };
+
+ pinctrl_ecspi3_cs: ecspi3-cs-grp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SDA__GPIO4_IO11 0x14
+ >;
+ };
+
+ pinctrl_ecspi3: ecspi3-grp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SCL__ECSPI3_MISO 0x2
+ MX7D_PAD_I2C1_SDA__ECSPI3_MOSI 0x2
+ MX7D_PAD_I2C2_SCL__ECSPI3_SCLK 0x2
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59
+ MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59
+ >;
+ };
+
+ pinctrl_gpmi_nand: gpmi-nand-grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CLK__NAND_CLE 0x71
+ MX7D_PAD_SD3_CMD__NAND_ALE 0x71
+ MX7D_PAD_SAI1_TX_BCLK__NAND_CE0_B 0x71
+ MX7D_PAD_SAI1_RX_DATA__NAND_CE1_B 0x71
+ MX7D_PAD_SAI1_TX_DATA__NAND_READY_B 0x74
+ MX7D_PAD_SD3_STROBE__NAND_RE_B 0x71
+ MX7D_PAD_SD3_RESET_B__NAND_WE_B 0x71
+ MX7D_PAD_SD3_DATA0__NAND_DATA00 0x71
+ MX7D_PAD_SD3_DATA1__NAND_DATA01 0x71
+ MX7D_PAD_SD3_DATA2__NAND_DATA02 0x71
+ MX7D_PAD_SD3_DATA3__NAND_DATA03 0x71
+ MX7D_PAD_SD3_DATA4__NAND_DATA04 0x71
+ MX7D_PAD_SD3_DATA5__NAND_DATA05 0x71
+ MX7D_PAD_SD3_DATA6__NAND_DATA06 0x71
+ MX7D_PAD_SD3_DATA7__NAND_DATA07 0x71
+ >;
+ };
+
+ pinctrl_i2c4: i2c4-grp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_RGMII_TD3__I2C4_SDA 0x4000007f
+ MX7D_PAD_ENET1_RGMII_TD2__I2C4_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_lcdif_dat: lcdif-dat-grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79
+ MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79
+ MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79
+ MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79
+ MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79
+ MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79
+ MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79
+ MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79
+ MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79
+ MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79
+ MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79
+ MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79
+ MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79
+ MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79
+ MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79
+ MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79
+ MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79
+ MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79
+ >;
+ };
+
+ pinctrl_lcdif_dat_24: lcdif-dat-24-grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79
+ MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79
+ MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79
+ MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79
+ MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79
+ MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79
+ >;
+ };
+
+ pinctrl_lcdif_ctrl: lcdif-ctrl-grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_CLK__LCD_CLK 0x79
+ MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79
+ MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79
+ MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79
+ >;
+ };
+
+ pinctrl_pwm1: pwm1-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO08__PWM1_OUT 0x79
+ >;
+ };
+
+ pinctrl_pwm2: pwm2-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO09__PWM2_OUT 0x79
+ >;
+ };
+
+ pinctrl_pwm3: pwm3-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO10__PWM3_OUT 0x79
+ >;
+ };
+
+ pinctrl_pwm4: pwm4-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO11__PWM4_OUT 0x79
+ >;
+ };
+
+ pinctrl_uart1: uart1-grp {
+ fsl,pins = <
+ MX7D_PAD_UART1_TX_DATA__UART1_DTE_RX 0x79
+ MX7D_PAD_UART1_RX_DATA__UART1_DTE_TX 0x79
+ MX7D_PAD_SAI2_TX_BCLK__UART1_DTE_CTS 0x79
+ MX7D_PAD_SAI2_TX_SYNC__UART1_DTE_RTS 0x79
+ >;
+ };
+
+ pinctrl_uart1_ctrl1: uart1-ctrl1-grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_DATA1__GPIO5_IO15 0x14 /* DCD */
+ MX7D_PAD_SD2_DATA0__GPIO5_IO14 0x14 /* DTR */
+ >;
+ };
+
+ pinctrl_uart2: uart2-grp {
+ fsl,pins = <
+ MX7D_PAD_UART2_TX_DATA__UART2_DTE_RX 0x79
+ MX7D_PAD_UART2_RX_DATA__UART2_DTE_TX 0x79
+ MX7D_PAD_SAI2_RX_DATA__UART2_DTE_RTS 0x79
+ MX7D_PAD_SAI2_TX_DATA__UART2_DTE_CTS 0x79
+ >;
+ };
+ pinctrl_uart3: uart3-grp {
+ fsl,pins = <
+ MX7D_PAD_UART3_TX_DATA__UART3_DTE_RX 0x79
+ MX7D_PAD_UART3_RX_DATA__UART3_DTE_TX 0x79
+ >;
+ };
+
+ pinctrl_usbotg2_reg: gpio-usbotg2-vbus {
+ fsl,pins = <
+ MX7D_PAD_UART3_CTS_B__GPIO4_IO7 0x14 /* SODIMM 129 USBH PEN */
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1-grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x59
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x19
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59
+ >;
+ };
+
+ pinctrl_sai1: sai1-grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_MCLK__SAI1_MCLK 0x1f
+ MX7D_PAD_ENET1_RX_CLK__SAI1_TX_BCLK 0x1f
+ MX7D_PAD_SAI1_TX_SYNC__SAI1_TX_SYNC 0x1f
+ MX7D_PAD_ENET1_COL__SAI1_TX_DATA0 0x30
+ MX7D_PAD_ENET1_TX_CLK__SAI1_RX_DATA0 0x1f
+ >;
+ };
+};
+
+&iomuxc_lpsr {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_lpsr>;
+
+ pinctrl_gpio_lpsr: gpio1-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO01__GPIO1_IO1 0x59
+ MX7D_PAD_GPIO1_IO02__GPIO1_IO2 0x59
+ MX7D_PAD_GPIO1_IO03__GPIO1_IO3 0x59
+ >;
+ };
+
+ pinctrl_i2c1: i2c1-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO05__I2C1_SDA 0x4000007f
+ MX7D_PAD_GPIO1_IO04__I2C1_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_cd_usdhc1: usdhc1-cd-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO00__GPIO1_IO0 0x59 /* CD */
+ >;
+ };
+
+ pinctrl_uart1_ctrl2: uart1-ctrl2-grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO07__GPIO1_IO7 0x14 /* DSR */
+ MX7D_PAD_GPIO1_IO06__GPIO1_IO6 0x14 /* RI */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 48634519d13a..58b09bf1ba2d 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -12,7 +12,6 @@
/dts-v1/;
-#include <dt-bindings/input/input.h>
#include "imx7d.dtsi"
/ {
diff --git a/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
new file mode 100644
index 000000000000..bd01d2cc642d
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-colibri-eval-v3.dts
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx7d-colibri.dtsi"
+#include "imx7-colibri-eval-v3.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX7D on Colibri Evaluation Board V3";
+ compatible = "toradex,colibri-imx7d-eval-v3", "toradex,colibri-imx7d",
+ "fsl,imx7d";
+
+ reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg2_reg>;
+ regulator-name = "VCC_USB[1-4]";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&usbotg2 {
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-colibri.dtsi b/arch/arm/boot/dts/imx7d-colibri.dtsi
new file mode 100644
index 000000000000..3c2cb502b388
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-colibri.dtsi
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx7d.dtsi"
+#include "imx7-colibri.dtsi"
+
+/ {
+ memory {
+ reg = <0x80000000 0x20000000>;
+ };
+};
+
+&usbotg2 {
+ dr_mode = "host";
+};
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index 1ce97800f0c5..ce08f180f213 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -42,7 +42,6 @@
/dts-v1/;
-#include <dt-bindings/input/input.h>
#include "imx7d.dtsi"
/ {
@@ -392,7 +391,7 @@
pinctrl-0 = <&pinctrl_uart6>;
assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>;
assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
- fsl,uart-has-rtscts;
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-pinfunc.h b/arch/arm/boot/dts/imx7d-pinfunc.h
index eeda78347619..3f9f0d9c8094 100644
--- a/arch/arm/boot/dts/imx7d-pinfunc.h
+++ b/arch/arm/boot/dts/imx7d-pinfunc.h
@@ -594,7 +594,7 @@
#define MX7D_PAD_UART2_RX_DATA__GPIO4_IO2 0x0130 0x03A0 0x0000 0x5 0x0
#define MX7D_PAD_UART2_RX_DATA__ENET2_MDIO 0x0130 0x03A0 0x0000 0x6 0x0
#define MX7D_PAD_UART2_TX_DATA__UART2_DCE_TX 0x0134 0x03A4 0x0000 0x0 0x0
-#define MX7D_PAD_UART2_TX_DATA__UART2_DTE_RX 0x0134 0x03A4 0x0000 0x0 0x0
+#define MX7D_PAD_UART2_TX_DATA__UART2_DTE_RX 0x0134 0x03A4 0x06FC 0x0 0x3
#define MX7D_PAD_UART2_TX_DATA__I2C2_SDA 0x0134 0x03A4 0x05E0 0x1 0x0
#define MX7D_PAD_UART2_TX_DATA__SAI3_RX_DATA0 0x0134 0x03A4 0x06C8 0x2 0x0
#define MX7D_PAD_UART2_TX_DATA__ECSPI1_RDY 0x0134 0x03A4 0x0000 0x3 0x0
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index b267f79e3059..95ee268ed510 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -42,7 +42,6 @@
/dts-v1/;
-#include <dt-bindings/input/input.h>
#include "imx7d.dtsi"
/ {
@@ -111,6 +110,32 @@
arm-supply = <&sw1a_reg>;
};
+&ecspi3 {
+ fsl,spi-num-chipselects = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi3>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ tsc2046@0 {
+ compatible = "ti,tsc2046";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ pinctrl-names ="default";
+ pinctrl-0 = <&pinctrl_tsc2046_pendown>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <29 0>;
+ pendown-gpio = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+ ti,x-min = /bits/ 16 <0>;
+ ti,x-max = /bits/ 16 <0>;
+ ti,y-min = /bits/ 16 <0>;
+ ti,y-max = /bits/ 16 <0>;
+ ti,pressure-max = /bits/ 16 <0>;
+ ti,x-plat-ohms = /bits/ 16 <400>;
+ wakeup-source;
+ };
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
@@ -272,6 +297,44 @@
};
};
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcdif>;
+ display = <&display0>;
+ status = "okay";
+
+ display0: display {
+ bits-per-pixel = <16>;
+ bus-width = <24>;
+
+ display-timings {
+ native-mode = <&timing0>;
+
+ timing0: timing0 {
+ clock-frequency = <9200000>;
+ hactive = <480>;
+ vactive = <272>;
+ hfront-porch = <8>;
+ hback-porch = <4>;
+ hsync-len = <41>;
+ vback-porch = <2>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <0>;
+ };
+ };
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -314,11 +377,26 @@
status = "okay";
};
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
&iomuxc {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
imx7d-sdb {
+ pinctrl_ecspi3: ecspi3grp {
+ fsl,pins = <
+ MX7D_PAD_SAI2_TX_SYNC__ECSPI3_MISO 0x2
+ MX7D_PAD_SAI2_TX_BCLK__ECSPI3_MOSI 0x2
+ MX7D_PAD_SAI2_RX_DATA__ECSPI3_SCLK 0x2
+ MX7D_PAD_SD2_CD_B__GPIO5_IO9 0x59
+ >;
+ };
+
pinctrl_enet1: enet1grp {
fsl,pins = <
MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x3
@@ -390,6 +468,52 @@
>;
};
+ pinctrl_lcdif: lcdifgrp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA00__LCD_DATA0 0x79
+ MX7D_PAD_LCD_DATA01__LCD_DATA1 0x79
+ MX7D_PAD_LCD_DATA02__LCD_DATA2 0x79
+ MX7D_PAD_LCD_DATA03__LCD_DATA3 0x79
+ MX7D_PAD_LCD_DATA04__LCD_DATA4 0x79
+ MX7D_PAD_LCD_DATA05__LCD_DATA5 0x79
+ MX7D_PAD_LCD_DATA06__LCD_DATA6 0x79
+ MX7D_PAD_LCD_DATA07__LCD_DATA7 0x79
+ MX7D_PAD_LCD_DATA08__LCD_DATA8 0x79
+ MX7D_PAD_LCD_DATA09__LCD_DATA9 0x79
+ MX7D_PAD_LCD_DATA10__LCD_DATA10 0x79
+ MX7D_PAD_LCD_DATA11__LCD_DATA11 0x79
+ MX7D_PAD_LCD_DATA12__LCD_DATA12 0x79
+ MX7D_PAD_LCD_DATA13__LCD_DATA13 0x79
+ MX7D_PAD_LCD_DATA14__LCD_DATA14 0x79
+ MX7D_PAD_LCD_DATA15__LCD_DATA15 0x79
+ MX7D_PAD_LCD_DATA16__LCD_DATA16 0x79
+ MX7D_PAD_LCD_DATA17__LCD_DATA17 0x79
+ MX7D_PAD_LCD_DATA18__LCD_DATA18 0x79
+ MX7D_PAD_LCD_DATA19__LCD_DATA19 0x79
+ MX7D_PAD_LCD_DATA20__LCD_DATA20 0x79
+ MX7D_PAD_LCD_DATA21__LCD_DATA21 0x79
+ MX7D_PAD_LCD_DATA22__LCD_DATA22 0x79
+ MX7D_PAD_LCD_DATA23__LCD_DATA23 0x79
+ MX7D_PAD_LCD_CLK__LCD_CLK 0x79
+ MX7D_PAD_LCD_ENABLE__LCD_ENABLE 0x79
+ MX7D_PAD_LCD_VSYNC__LCD_VSYNC 0x79
+ MX7D_PAD_LCD_HSYNC__LCD_HSYNC 0x79
+ MX7D_PAD_LCD_RESET__LCD_RESET 0x79
+ >;
+ };
+
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO01__PWM1_OUT 0x110b0
+ >;
+ };
+
+ pinctrl_tsc2046_pendown: tsc2046_pendown {
+ fsl,pins = <
+ MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79
@@ -512,5 +636,10 @@
>;
};
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO00__WDOD1_WDOG_B 0x74
+ >;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 6b3faa298417..51c13cbdffb7 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -1,5 +1,6 @@
/*
* Copyright 2015 Freescale Semiconductor, Inc.
+ * Copyright 2016 Toradex AG
*
* This file is dual-licensed: you can use it either under the terms
* of the GPL or the X11 license, at your option. Note that this dual
@@ -40,54 +41,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <dt-bindings/clock/imx7d-clock.h>
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include "imx7d-pinfunc.h"
-#include "skeleton.dtsi"
+#include "imx7s.dtsi"
/ {
- aliases {
- gpio0 = &gpio1;
- gpio1 = &gpio2;
- gpio2 = &gpio3;
- gpio3 = &gpio4;
- gpio4 = &gpio5;
- gpio5 = &gpio6;
- gpio6 = &gpio7;
- i2c0 = &i2c1;
- i2c1 = &i2c2;
- i2c2 = &i2c3;
- i2c3 = &i2c4;
- mmc0 = &usdhc1;
- mmc1 = &usdhc2;
- mmc2 = &usdhc3;
- serial0 = &uart1;
- serial1 = &uart2;
- serial2 = &uart3;
- serial3 = &uart4;
- serial4 = &uart5;
- serial5 = &uart6;
- serial6 = &uart7;
- };
-
cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- compatible = "arm,cortex-a7";
- device_type = "cpu";
- reg = <0>;
- operating-points = <
- /* KHz uV */
- 996000 1075000
- 792000 975000
- >;
- clock-latency = <61036>; /* two CLK32 periods */
- clocks = <&clks IMX7D_CLK_ARM>;
- };
-
cpu1: cpu@1 {
compatible = "arm,cortex-a7";
device_type = "cpu";
@@ -95,221 +52,6 @@
};
};
- intc: interrupt-controller@31001000 {
- compatible = "arm,cortex-a7-gic";
- #interrupt-cells = <3>;
- interrupt-controller;
- reg = <0x31001000 0x1000>,
- <0x31002000 0x1000>,
- <0x31004000 0x2000>,
- <0x31006000 0x2000>;
- };
-
- ckil: clock-cki {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "ckil";
- };
-
- osc: clock-osc {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "osc";
- };
-
- timer {
- compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- interrupt-parent = <&intc>;
- };
-
- etr@30086000 {
- compatible = "arm,coresight-tmc", "arm,primecell";
- reg = <0x30086000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- port {
- etr_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&replicator_out_port1>;
- };
- };
- };
-
- tpiu@30087000 {
- compatible = "arm,coresight-tpiu", "arm,primecell";
- reg = <0x30087000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- port {
- tpiu_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&replicator_out_port1>;
- };
- };
- };
-
- replicator {
- /*
- * non-configurable replicators don't show up on the
- * AMBA bus. As such no need to add "arm,primecell"
- */
- compatible = "arm,coresight-replicator";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* replicator output ports */
- port@0 {
- reg = <0>;
- replicator_out_port0: endpoint {
- remote-endpoint = <&tpiu_in_port>;
- };
- };
-
- port@1 {
- reg = <1>;
- replicator_out_port1: endpoint {
- remote-endpoint = <&etr_in_port>;
- };
- };
-
- /* replicator input port */
- port@2 {
- reg = <0>;
- replicator_in_port0: endpoint {
- slave-mode;
- remote-endpoint = <&etf_out_port>;
- };
- };
- };
- };
-
- etf@30084000 {
- compatible = "arm,coresight-tmc", "arm,primecell";
- reg = <0x30084000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- etf_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&hugo_funnel_out_port0>;
- };
- };
-
- port@1 {
- reg = <0>;
- etf_out_port: endpoint {
- remote-endpoint = <&replicator_in_port0>;
- };
- };
- };
- };
-
- funnel@30083000 {
- compatible = "arm,coresight-funnel", "arm,primecell";
- reg = <0x30083000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* funnel input ports */
- port@0 {
- reg = <0>;
- hugo_funnel_in_port0: endpoint {
- slave-mode;
- remote-endpoint = <&ca_funnel_out_port0>;
- };
- };
-
- port@1 {
- reg = <1>;
- hugo_funnel_in_port1: endpoint {
- slave-mode; /* M4 input */
- };
- };
-
- port@2 {
- reg = <0>;
- hugo_funnel_out_port0: endpoint {
- remote-endpoint = <&etf_in_port>;
- };
- };
-
- /* the other input ports are not connect to anything */
- };
- };
-
- funnel@30041000 {
- compatible = "arm,coresight-funnel", "arm,primecell";
- reg = <0x30041000 0x1000>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* funnel input ports */
- port@0 {
- reg = <0>;
- ca_funnel_in_port0: endpoint {
- slave-mode;
- remote-endpoint = <&etm0_out_port>;
- };
- };
-
- port@1 {
- reg = <1>;
- ca_funnel_in_port1: endpoint {
- slave-mode;
- remote-endpoint = <&etm1_out_port>;
- };
- };
-
- /* funnel output port */
- port@2 {
- reg = <0>;
- ca_funnel_out_port0: endpoint {
- remote-endpoint = <&hugo_funnel_in_port0>;
- };
- };
-
- /* the other input ports are not connect to anything */
- };
- };
-
- etm@3007c000 {
- compatible = "arm,coresight-etm3x", "arm,primecell";
- reg = <0x3007c000 0x1000>;
- cpu = <&cpu0>;
- clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
- clock-names = "apb_pclk";
-
- port {
- etm0_out_port: endpoint {
- remote-endpoint = <&ca_funnel_in_port0>;
- };
- };
- };
-
etm@3007d000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x3007d000 0x1000>;
@@ -330,626 +72,57 @@
};
};
};
+};
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "simple-bus";
- interrupt-parent = <&intc>;
- ranges;
-
- aips1: aips-bus@30000000 {
- compatible = "fsl,aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x30000000 0x400000>;
- ranges;
-
- gpio1: gpio@30200000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30200000 0x10000>;
- interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>, /* GPIO1_INT15_0 */
- <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; /* GPIO1_INT31_16 */
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@30210000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30210000 0x10000>;
- interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@30220000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30220000 0x10000>;
- interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio4: gpio@30230000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30230000 0x10000>;
- interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio5: gpio@30240000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30240000 0x10000>;
- interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio6: gpio@30250000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30250000 0x10000>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio7: gpio@30260000 {
- compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
- reg = <0x30260000 0x10000>;
- interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- wdog1: wdog@30280000 {
- compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
- reg = <0x30280000 0x10000>;
- interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_WDOG1_ROOT_CLK>;
- };
-
- wdog2: wdog@30290000 {
- compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
- reg = <0x30290000 0x10000>;
- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_WDOG2_ROOT_CLK>;
- status = "disabled";
- };
-
- wdog3: wdog@302a0000 {
- compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
- reg = <0x302a0000 0x10000>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_WDOG3_ROOT_CLK>;
- status = "disabled";
- };
-
- wdog4: wdog@302b0000 {
- compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
- reg = <0x302b0000 0x10000>;
- interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_WDOG4_ROOT_CLK>;
- status = "disabled";
- };
-
- iomuxc_lpsr: iomuxc-lpsr@302c0000 {
- compatible = "fsl,imx7d-iomuxc-lpsr";
- reg = <0x302c0000 0x10000>;
- fsl,input-sel = <&iomuxc>;
- };
-
- gpt1: gpt@302d0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
- reg = <0x302d0000 0x10000>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_GPT1_ROOT_CLK>;
- clock-names = "ipg", "per";
- };
-
- gpt2: gpt@302e0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
- reg = <0x302e0000 0x10000>;
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_GPT2_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- gpt3: gpt@302f0000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
- reg = <0x302f0000 0x10000>;
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_GPT3_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- gpt4: gpt@30300000 {
- compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
- reg = <0x30300000 0x10000>;
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_GPT4_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- iomuxc: iomuxc@30330000 {
- compatible = "fsl,imx7d-iomuxc";
- reg = <0x30330000 0x10000>;
- };
-
- gpr: iomuxc-gpr@30340000 {
- compatible = "fsl,imx7d-iomuxc-gpr", "syscon";
- reg = <0x30340000 0x10000>;
- };
-
- ocotp: ocotp-ctrl@30350000 {
- compatible = "syscon";
- reg = <0x30350000 0x10000>;
- clocks = <&clks IMX7D_CLK_DUMMY>;
- status = "disabled";
- };
-
- anatop: anatop@30360000 {
- compatible = "fsl,imx7d-anatop", "fsl,imx6q-anatop",
- "syscon", "simple-bus";
- reg = <0x30360000 0x10000>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
-
- reg_1p0d: regulator-vdd1p0d@210 {
- compatible = "fsl,anatop-regulator";
- regulator-name = "vdd1p0d";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1200000>;
- anatop-reg-offset = <0x210>;
- anatop-vol-bit-shift = <8>;
- anatop-vol-bit-width = <5>;
- anatop-min-bit-val = <8>;
- anatop-min-voltage = <800000>;
- anatop-max-voltage = <1200000>;
- anatop-enable-bit = <31>;
- };
- };
-
- snvs: snvs@30370000 {
- compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
- reg = <0x30370000 0x10000>;
-
- snvs_rtc: snvs-rtc-lp {
- compatible = "fsl,sec-v4.0-mon-rtc-lp";
- regmap = <&snvs>;
- offset = <0x34>;
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- snvs_poweroff: snvs-poweroff {
- compatible = "syscon-poweroff";
- regmap = <&snvs>;
- offset = <0x38>;
- mask = <0x60>;
- };
-
- snvs_pwrkey: snvs-powerkey {
- compatible = "fsl,sec-v4.0-pwrkey";
- regmap = <&snvs>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- linux,keycode = <KEY_POWER>;
- wakeup-source;
- };
- };
-
- clks: ccm@30380000 {
- compatible = "fsl,imx7d-ccm";
- reg = <0x30380000 0x10000>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
- #clock-cells = <1>;
- clocks = <&ckil>, <&osc>;
- clock-names = "ckil", "osc";
- };
-
- src: src@30390000 {
- compatible = "fsl,imx7d-src", "fsl,imx51-src", "syscon";
- reg = <0x30390000 0x10000>;
- interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
- #reset-cells = <1>;
- };
- };
-
- aips2: aips-bus@30400000 {
- compatible = "fsl,aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x30400000 0x400000>;
- ranges;
-
- adc1: adc@30610000 {
- compatible = "fsl,imx7d-adc";
- reg = <0x30610000 0x10000>;
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ADC_ROOT_CLK>;
- clock-names = "adc";
- status = "disabled";
- };
-
- adc2: adc@30620000 {
- compatible = "fsl,imx7d-adc";
- reg = <0x30620000 0x10000>;
- interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ADC_ROOT_CLK>;
- clock-names = "adc";
- status = "disabled";
- };
-
- pwm1: pwm@30660000 {
- compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
- reg = <0x30660000 0x10000>;
- interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_PWM1_ROOT_CLK>,
- <&clks IMX7D_PWM1_ROOT_CLK>;
- clock-names = "ipg", "per";
- #pwm-cells = <2>;
- status = "disabled";
- };
-
- pwm2: pwm@30670000 {
- compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
- reg = <0x30670000 0x10000>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_PWM2_ROOT_CLK>,
- <&clks IMX7D_PWM2_ROOT_CLK>;
- clock-names = "ipg", "per";
- #pwm-cells = <2>;
- status = "disabled";
- };
-
- pwm3: pwm@30680000 {
- compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
- reg = <0x30680000 0x10000>;
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_PWM3_ROOT_CLK>,
- <&clks IMX7D_PWM3_ROOT_CLK>;
- clock-names = "ipg", "per";
- #pwm-cells = <2>;
- status = "disabled";
- };
-
- pwm4: pwm@30690000 {
- compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
- reg = <0x30690000 0x10000>;
- interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_PWM4_ROOT_CLK>,
- <&clks IMX7D_PWM4_ROOT_CLK>;
- clock-names = "ipg", "per";
- #pwm-cells = <2>;
- status = "disabled";
- };
-
- lcdif: lcdif@30730000 {
- compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
- reg = <0x30730000 0x10000>;
- interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
- <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CLK_DUMMY>;
- clock-names = "pix", "axi", "disp_axi";
- status = "disabled";
- };
- };
-
- aips3: aips-bus@30800000 {
- compatible = "fsl,aips-bus", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x30800000 0x400000>;
- ranges;
-
- uart1: serial@30860000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30860000 0x10000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART1_ROOT_CLK>,
- <&clks IMX7D_UART1_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart2: serial@30890000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30890000 0x10000>;
- interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART2_ROOT_CLK>,
- <&clks IMX7D_UART2_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart3: serial@30880000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30880000 0x10000>;
- interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART3_ROOT_CLK>,
- <&clks IMX7D_UART3_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- flexcan1: can@30a00000 {
- compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
- reg = <0x30a00000 0x10000>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CAN1_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- flexcan2: can@30a10000 {
- compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
- reg = <0x30a10000 0x10000>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CAN2_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- i2c1: i2c@30a20000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
- reg = <0x30a20000 0x10000>;
- interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_I2C1_ROOT_CLK>;
- status = "disabled";
- };
-
- i2c2: i2c@30a30000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
- reg = <0x30a30000 0x10000>;
- interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_I2C2_ROOT_CLK>;
- status = "disabled";
- };
-
- i2c3: i2c@30a40000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
- reg = <0x30a40000 0x10000>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_I2C3_ROOT_CLK>;
- status = "disabled";
- };
-
- i2c4: i2c@30a50000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
- reg = <0x30a50000 0x10000>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_I2C4_ROOT_CLK>;
- status = "disabled";
- };
-
- uart4: serial@30a60000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30a60000 0x10000>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART4_ROOT_CLK>,
- <&clks IMX7D_UART4_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart5: serial@30a70000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30a70000 0x10000>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART5_ROOT_CLK>,
- <&clks IMX7D_UART5_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart6: serial@30a80000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30a80000 0x10000>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART6_ROOT_CLK>,
- <&clks IMX7D_UART6_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- uart7: serial@30a90000 {
- compatible = "fsl,imx7d-uart",
- "fsl,imx6q-uart";
- reg = <0x30a90000 0x10000>;
- interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_UART7_ROOT_CLK>,
- <&clks IMX7D_UART7_ROOT_CLK>;
- clock-names = "ipg", "per";
- status = "disabled";
- };
-
- usbotg1: usb@30b10000 {
- compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
- reg = <0x30b10000 0x200>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_USB_CTRL_CLK>;
- fsl,usbphy = <&usbphynop1>;
- fsl,usbmisc = <&usbmisc1 0>;
- phy-clkgate-delay-us = <400>;
- status = "disabled";
- };
-
- usbotg2: usb@30b20000 {
- compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
- reg = <0x30b20000 0x200>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_USB_CTRL_CLK>;
- fsl,usbphy = <&usbphynop2>;
- fsl,usbmisc = <&usbmisc2 0>;
- phy-clkgate-delay-us = <400>;
- status = "disabled";
- };
-
- usbh: usb@30b30000 {
- compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
- reg = <0x30b30000 0x200>;
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_USB_CTRL_CLK>;
- fsl,usbphy = <&usbphynop3>;
- fsl,usbmisc = <&usbmisc3 0>;
- phy_type = "hsic";
- dr_mode = "host";
- phy-clkgate-delay-us = <400>;
- status = "disabled";
- };
-
- usbmisc1: usbmisc@30b10200 {
- #index-cells = <1>;
- compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
- reg = <0x30b10200 0x200>;
- };
-
- usbmisc2: usbmisc@30b20200 {
- #index-cells = <1>;
- compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
- reg = <0x30b20200 0x200>;
- };
-
- usbmisc3: usbmisc@30b30200 {
- #index-cells = <1>;
- compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
- reg = <0x30b30200 0x200>;
- };
-
- usbphynop1: usbphynop1 {
- compatible = "usb-nop-xceiv";
- clocks = <&clks IMX7D_USB_PHY1_CLK>;
- clock-names = "main_clk";
- };
-
- usbphynop2: usbphynop2 {
- compatible = "usb-nop-xceiv";
- clocks = <&clks IMX7D_USB_PHY2_CLK>;
- clock-names = "main_clk";
- };
-
- usbphynop3: usbphynop3 {
- compatible = "usb-nop-xceiv";
- clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>;
- clock-names = "main_clk";
- };
-
- usdhc1: usdhc@30b40000 {
- compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
- reg = <0x30b40000 0x10000>;
- interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_USDHC1_ROOT_CLK>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
+&aips3 {
+ usbotg2: usb@30b20000 {
+ compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x30b20000 0x200>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_USB_CTRL_CLK>;
+ fsl,usbphy = <&usbphynop2>;
+ fsl,usbmisc = <&usbmisc2 0>;
+ phy-clkgate-delay-us = <400>;
+ status = "disabled";
+ };
- usdhc2: usdhc@30b50000 {
- compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
- reg = <0x30b50000 0x10000>;
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_USDHC2_ROOT_CLK>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
+ usbmisc2: usbmisc@30b20200 {
+ #index-cells = <1>;
+ compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
+ reg = <0x30b20200 0x200>;
+ };
- usdhc3: usdhc@30b60000 {
- compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
- reg = <0x30b60000 0x10000>;
- interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_CLK_DUMMY>,
- <&clks IMX7D_USDHC3_ROOT_CLK>;
- clock-names = "ipg", "ahb", "per";
- bus-width = <4>;
- status = "disabled";
- };
+ usbphynop2: usbphynop2 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clks IMX7D_USB_PHY2_CLK>;
+ clock-names = "main_clk";
+ };
- fec1: ethernet@30be0000 {
- compatible = "fsl,imx7d-fec", "fsl,imx6sx-fec";
- reg = <0x30be0000 0x10000>;
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
- <&clks IMX7D_ENET_AXI_ROOT_CLK>,
- <&clks IMX7D_ENET1_TIME_ROOT_CLK>,
- <&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
- <&clks IMX7D_ENET_PHY_REF_ROOT_CLK>;
- clock-names = "ipg", "ahb", "ptp",
- "enet_clk_ref", "enet_out";
- fsl,num-tx-queues=<3>;
- fsl,num-rx-queues=<3>;
- status = "disabled";
- };
+ fec2: ethernet@30bf0000 {
+ compatible = "fsl,imx7d-fec", "fsl,imx6sx-fec";
+ reg = <0x30bf0000 0x10000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET2_TIME_ROOT_CLK>,
+ <&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
+ <&clks IMX7D_ENET_PHY_REF_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref", "enet_out";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ status = "disabled";
+ };
+};
- fec2: ethernet@30bf0000 {
- compatible = "fsl,imx7d-fec", "fsl,imx6sx-fec";
- reg = <0x30bf0000 0x10000>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
- <&clks IMX7D_ENET_AXI_ROOT_CLK>,
- <&clks IMX7D_ENET2_TIME_ROOT_CLK>,
- <&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
- <&clks IMX7D_ENET_PHY_REF_ROOT_CLK>;
- clock-names = "ipg", "ahb", "ptp",
- "enet_clk_ref", "enet_out";
- fsl,num-tx-queues=<3>;
- fsl,num-rx-queues=<3>;
- status = "disabled";
- };
+&ca_funnel_ports {
+ port@1 {
+ reg = <1>;
+ ca_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out_port>;
};
};
};
diff --git a/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts b/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts
new file mode 100644
index 000000000000..bd2a49c1ade6
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-colibri-eval-v3.dts
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "imx7s-colibri.dtsi"
+#include "imx7-colibri-eval-v3.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX7S on Colibri Evaluation Board V3";
+ compatible = "toradex,colibri-imx7s-eval-v3", "toradex,colibri-imx7s",
+ "fsl,imx7s";
+};
diff --git a/arch/arm/boot/dts/imx7s-colibri.dtsi b/arch/arm/boot/dts/imx7s-colibri.dtsi
new file mode 100644
index 000000000000..b81013455b21
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-colibri.dtsi
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx7s.dtsi"
+#include "imx7-colibri.dtsi"
+
+/ {
+ memory {
+ reg = <0x80000000 0x10000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
new file mode 100644
index 000000000000..1e90bdbe3a6e
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -0,0 +1,933 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/imx7d-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "imx7d-pinfunc.h"
+#include "skeleton.dtsi"
+
+/ {
+ aliases {
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ i2c3 = &i2c4;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ serial5 = &uart6;
+ serial6 = &uart7;
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ spi3 = &ecspi4;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a7";
+ device_type = "cpu";
+ reg = <0>;
+ operating-points = <
+ /* KHz uV */
+ 996000 1075000
+ 792000 975000
+ >;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clks IMX7D_CLK_ARM>;
+ };
+ };
+
+ intc: interrupt-controller@31001000 {
+ compatible = "arm,cortex-a7-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x31001000 0x1000>,
+ <0x31002000 0x1000>,
+ <0x31004000 0x2000>,
+ <0x31006000 0x2000>;
+ };
+
+ ckil: clock-cki {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "ckil";
+ };
+
+ osc: clock-osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc";
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-parent = <&intc>;
+ };
+
+ etr@30086000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x30086000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etr_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ tpiu@30087000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x30087000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpiu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ replicator {
+ /*
+ * non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell"
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etr_in_port>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out_port>;
+ };
+ };
+ };
+ };
+
+ etf@30084000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x30084000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ etf_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&hugo_funnel_out_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ etf_out_port: endpoint {
+ remote-endpoint = <&replicator_in_port0>;
+ };
+ };
+ };
+ };
+
+ funnel@30083000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x30083000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ hugo_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&ca_funnel_out_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ hugo_funnel_in_port1: endpoint {
+ slave-mode; /* M4 input */
+ };
+ };
+
+ port@2 {
+ reg = <0>;
+ hugo_funnel_out_port0: endpoint {
+ remote-endpoint = <&etf_in_port>;
+ };
+ };
+
+ /* the other input ports are not connect to anything */
+ };
+ };
+
+ funnel@30041000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x30041000 0x1000>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ ca_funnel_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* funnel input ports */
+ port@0 {
+ reg = <0>;
+ ca_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out_port>;
+ };
+ };
+
+ /* funnel output port */
+ port@2 {
+ reg = <0>;
+ ca_funnel_out_port0: endpoint {
+ remote-endpoint = <&hugo_funnel_in_port0>;
+ };
+ };
+
+ /* the other input ports are not connect to anything */
+ };
+ };
+
+ etm@3007c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x3007c000 0x1000>;
+ cpu = <&cpu0>;
+ clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etm0_out_port: endpoint {
+ remote-endpoint = <&ca_funnel_in_port0>;
+ };
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&intc>;
+ ranges;
+
+ aips1: aips-bus@30000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30000000 0x400000>;
+ ranges;
+
+ gpio1: gpio@30200000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30200000 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>, /* GPIO1_INT15_0 */
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; /* GPIO1_INT31_16 */
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@30210000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30210000 0x10000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@30220000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30220000 0x10000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@30230000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30230000 0x10000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio5: gpio@30240000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30240000 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio6: gpio@30250000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30250000 0x10000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio7: gpio@30260000 {
+ compatible = "fsl,imx7d-gpio", "fsl,imx35-gpio";
+ reg = <0x30260000 0x10000>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wdog1: wdog@30280000 {
+ compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+ reg = <0x30280000 0x10000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_WDOG1_ROOT_CLK>;
+ };
+
+ wdog2: wdog@30290000 {
+ compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+ reg = <0x30290000 0x10000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_WDOG2_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ wdog3: wdog@302a0000 {
+ compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+ reg = <0x302a0000 0x10000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_WDOG3_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ wdog4: wdog@302b0000 {
+ compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+ reg = <0x302b0000 0x10000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_WDOG4_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ iomuxc_lpsr: iomuxc-lpsr@302c0000 {
+ compatible = "fsl,imx7d-iomuxc-lpsr";
+ reg = <0x302c0000 0x10000>;
+ fsl,input-sel = <&iomuxc>;
+ };
+
+ gpt1: gpt@302d0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ reg = <0x302d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_GPT1_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ };
+
+ gpt2: gpt@302e0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ reg = <0x302e0000 0x10000>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_GPT2_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ gpt3: gpt@302f0000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ reg = <0x302f0000 0x10000>;
+ interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_GPT3_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ gpt4: gpt@30300000 {
+ compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+ reg = <0x30300000 0x10000>;
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_GPT4_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ iomuxc: iomuxc@30330000 {
+ compatible = "fsl,imx7d-iomuxc";
+ reg = <0x30330000 0x10000>;
+ };
+
+ gpr: iomuxc-gpr@30340000 {
+ compatible = "fsl,imx7d-iomuxc-gpr", "syscon";
+ reg = <0x30340000 0x10000>;
+ };
+
+ ocotp: ocotp-ctrl@30350000 {
+ compatible = "syscon";
+ reg = <0x30350000 0x10000>;
+ clocks = <&clks IMX7D_CLK_DUMMY>;
+ status = "disabled";
+ };
+
+ anatop: anatop@30360000 {
+ compatible = "fsl,imx7d-anatop", "fsl,imx6q-anatop",
+ "syscon", "simple-bus";
+ reg = <0x30360000 0x10000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+
+ reg_1p0d: regulator-vdd1p0d {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd1p0d";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1200000>;
+ anatop-reg-offset = <0x210>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <8>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1200000>;
+ anatop-enable-bit = <31>;
+ };
+ };
+
+ snvs: snvs@30370000 {
+ compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+ reg = <0x30370000 0x10000>;
+
+ snvs_rtc: snvs-rtc-lp {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ regmap = <&snvs>;
+ offset = <0x34>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ snvs_poweroff: snvs-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&snvs>;
+ offset = <0x38>;
+ mask = <0x60>;
+ };
+
+ snvs_pwrkey: snvs-powerkey {
+ compatible = "fsl,sec-v4.0-pwrkey";
+ regmap = <&snvs>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ linux,keycode = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ clks: ccm@30380000 {
+ compatible = "fsl,imx7d-ccm";
+ reg = <0x30380000 0x10000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>;
+ clock-names = "ckil", "osc";
+ };
+
+ src: src@30390000 {
+ compatible = "fsl,imx7d-src", "fsl,imx51-src", "syscon";
+ reg = <0x30390000 0x10000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
+ };
+
+ aips2: aips-bus@30400000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30400000 0x400000>;
+ ranges;
+
+ adc1: adc@30610000 {
+ compatible = "fsl,imx7d-adc";
+ reg = <0x30610000 0x10000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ADC_ROOT_CLK>;
+ clock-names = "adc";
+ status = "disabled";
+ };
+
+ adc2: adc@30620000 {
+ compatible = "fsl,imx7d-adc";
+ reg = <0x30620000 0x10000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ADC_ROOT_CLK>;
+ clock-names = "adc";
+ status = "disabled";
+ };
+
+ ecspi4: ecspi@30630000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30630000 0x10000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ECSPI4_ROOT_CLK>,
+ <&clks IMX7D_ECSPI4_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ pwm1: pwm@30660000 {
+ compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
+ reg = <0x30660000 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_PWM1_ROOT_CLK>,
+ <&clks IMX7D_PWM1_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@30670000 {
+ compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
+ reg = <0x30670000 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_PWM2_ROOT_CLK>,
+ <&clks IMX7D_PWM2_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@30680000 {
+ compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
+ reg = <0x30680000 0x10000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_PWM3_ROOT_CLK>,
+ <&clks IMX7D_PWM3_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@30690000 {
+ compatible = "fsl,imx7d-pwm", "fsl,imx27-pwm";
+ reg = <0x30690000 0x10000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_PWM4_ROOT_CLK>,
+ <&clks IMX7D_PWM4_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ lcdif: lcdif@30730000 {
+ compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
+ reg = <0x30730000 0x10000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "pix", "axi", "disp_axi";
+ status = "disabled";
+ };
+ };
+
+ aips3: aips-bus@30800000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x30800000 0x400000>;
+ ranges;
+
+ ecspi1: ecspi@30820000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ECSPI1_ROOT_CLK>,
+ <&clks IMX7D_ECSPI1_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi2: ecspi@30830000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ECSPI2_ROOT_CLK>,
+ <&clks IMX7D_ECSPI2_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ ecspi3: ecspi@30840000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-ecspi", "fsl,imx51-ecspi";
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ECSPI3_ROOT_CLK>,
+ <&clks IMX7D_ECSPI3_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart1: serial@30860000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30860000 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART1_ROOT_CLK>,
+ <&clks IMX7D_UART1_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart2: serial@30890000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30890000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART2_ROOT_CLK>,
+ <&clks IMX7D_UART2_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart3: serial@30880000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30880000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART3_ROOT_CLK>,
+ <&clks IMX7D_UART3_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ flexcan1: can@30a00000 {
+ compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
+ reg = <0x30a00000 0x10000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CAN1_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ flexcan2: can@30a10000 {
+ compatible = "fsl,imx7d-flexcan", "fsl,imx6q-flexcan";
+ reg = <0x30a10000 0x10000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CAN2_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ i2c1: i2c@30a20000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
+ reg = <0x30a20000 0x10000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_I2C1_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@30a30000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
+ reg = <0x30a30000 0x10000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_I2C2_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@30a40000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
+ reg = <0x30a40000 0x10000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_I2C3_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@30a50000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx7d-i2c", "fsl,imx21-i2c";
+ reg = <0x30a50000 0x10000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_I2C4_ROOT_CLK>;
+ status = "disabled";
+ };
+
+ uart4: serial@30a60000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30a60000 0x10000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART4_ROOT_CLK>,
+ <&clks IMX7D_UART4_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart5: serial@30a70000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30a70000 0x10000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART5_ROOT_CLK>,
+ <&clks IMX7D_UART5_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart6: serial@30a80000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30a80000 0x10000>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART6_ROOT_CLK>,
+ <&clks IMX7D_UART6_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ uart7: serial@30a90000 {
+ compatible = "fsl,imx7d-uart",
+ "fsl,imx6q-uart";
+ reg = <0x30a90000 0x10000>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_UART7_ROOT_CLK>,
+ <&clks IMX7D_UART7_ROOT_CLK>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ usbotg1: usb@30b10000 {
+ compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x30b10000 0x200>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_USB_CTRL_CLK>;
+ fsl,usbphy = <&usbphynop1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+ phy-clkgate-delay-us = <400>;
+ status = "disabled";
+ };
+
+ usbh: usb@30b30000 {
+ compatible = "fsl,imx7d-usb", "fsl,imx27-usb";
+ reg = <0x30b30000 0x200>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_USB_CTRL_CLK>;
+ fsl,usbphy = <&usbphynop3>;
+ fsl,usbmisc = <&usbmisc3 0>;
+ phy_type = "hsic";
+ dr_mode = "host";
+ phy-clkgate-delay-us = <400>;
+ status = "disabled";
+ };
+
+ usbmisc1: usbmisc@30b10200 {
+ #index-cells = <1>;
+ compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
+ reg = <0x30b10200 0x200>;
+ };
+
+ usbmisc3: usbmisc@30b30200 {
+ #index-cells = <1>;
+ compatible = "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc";
+ reg = <0x30b30200 0x200>;
+ };
+
+ usbphynop1: usbphynop1 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clks IMX7D_USB_PHY1_CLK>;
+ clock-names = "main_clk";
+ };
+
+ usbphynop3: usbphynop3 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clks IMX7D_USB_HSIC_ROOT_CLK>;
+ clock-names = "main_clk";
+ };
+
+ usdhc1: usdhc@30b40000 {
+ compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
+ reg = <0x30b40000 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_USDHC1_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc2: usdhc@30b50000 {
+ compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
+ reg = <0x30b50000 0x10000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_USDHC2_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc3: usdhc@30b60000 {
+ compatible = "fsl,imx7d-usdhc", "fsl,imx6sl-usdhc";
+ reg = <0x30b60000 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_USDHC3_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ fec1: ethernet@30be0000 {
+ compatible = "fsl,imx7d-fec", "fsl,imx6sx-fec";
+ reg = <0x30be0000 0x10000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET_AXI_ROOT_CLK>,
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>,
+ <&clks IMX7D_PLL_ENET_MAIN_125M_CLK>,
+ <&clks IMX7D_ENET_PHY_REF_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref", "enet_out";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi
index 96b349fb0430..9a51b8c88581 100644
--- a/arch/arm/boot/dts/keystone-k2e.dtsi
+++ b/arch/arm/boot/dts/keystone-k2e.dtsi
@@ -96,13 +96,16 @@
#address-cells = <3>;
#size-cells = <2>;
reg = <0x21021000 0x2000>, <0x21020000 0x1000>, <0x02620128 4>;
- ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
- 0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
+ ranges = <0x82000000 0 0x60000000 0x60000000
+ 0 0x10000000>;
status = "disabled";
device_type = "pci";
num-lanes = <2>;
+ bus-range = <0x00 0xff>;
+ /* error interrupt */
+ interrupts = <GIC_SPI 385 IRQ_TYPE_EDGE_RISING>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>, /* INT A */
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index 5bfd9e7845f2..692fcbb1434a 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -27,6 +27,17 @@
};
+&k2g_pinctrl {
+ uart0_pins: pinmux_uart0_pins {
+ pinctrl-single,pins = <
+ K2G_CORE_IOPAD(0x11cc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* uart0_rxd.uart0_rxd */
+ K2G_CORE_IOPAD(0x11d0) (BUFFER_CLASS_B | PIN_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ >;
+ };
+};
+
&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index 7ff2796ae925..3372615b885c 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -14,6 +14,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/keystone.h>
#include "skeleton.dtsi"
/ {
@@ -75,6 +76,13 @@
ranges = <0x0 0x0 0x0 0xc0000000>;
dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
+ k2g_pinctrl: pinmux@02621000 {
+ compatible = "pinctrl-single";
+ reg = <0x02621000 0x410>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x001b0007>;
+ };
+
uart0: serial@02530c00 {
compatible = "ns16550a";
current-speed = <115200>;
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index ff22ffc3dee7..2ee3d0ac2816 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -54,6 +54,155 @@
interrupts = <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>;
};
+ k2l_pmx: pinmux@02620690 {
+ compatible = "pinctrl-single";
+ reg = <0x02620690 0xc>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,bit-per-mux;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x1>;
+ status = "disabled";
+
+ uart3_emifa_pins: pinmux_uart3_emifa_pins {
+ pinctrl-single,bits = <
+ /* UART3_EMIFA_SEL */
+ 0x0 0x0 0xc0
+ >;
+ };
+
+ uart2_emifa_pins: pinmux_uart2_emifa_pins {
+ pinctrl-single,bits = <
+ /* UART2_EMIFA_SEL */
+ 0x0 0x0 0x30
+ >;
+ };
+
+ uart01_spi2_pins: pinmux_uart01_spi2_pins {
+ pinctrl-single,bits = <
+ /* UART01_SPI2_SEL */
+ 0x0 0x0 0x4
+ >;
+ };
+
+ dfesync_rp1_pins: pinmux_dfesync_rp1_pins{
+ pinctrl-single,bits = <
+ /* DFESYNC_RP1_SEL */
+ 0x0 0x0 0x2
+ >;
+ };
+
+ avsif_pins: pinmux_avsif_pins {
+ pinctrl-single,bits = <
+ /* AVSIF_SEL */
+ 0x0 0x0 0x1
+ >;
+ };
+
+ gpio_emu_pins: pinmux_gpio_emu_pins {
+ pinctrl-single,bits = <
+ /*
+ * GPIO_EMU_SEL[31]: 0-GPIO31, 1-EMU33
+ * GPIO_EMU_SEL[30]: 0-GPIO30, 1-EMU32
+ * GPIO_EMU_SEL[29]: 0-GPIO29, 1-EMU31
+ * GPIO_EMU_SEL[28]: 0-GPIO28, 1-EMU30
+ * GPIO_EMU_SEL[27]: 0-GPIO27, 1-EMU29
+ * GPIO_EMU_SEL[26]: 0-GPIO26, 1-EMU28
+ * GPIO_EMU_SEL[25]: 0-GPIO25, 1-EMU27
+ * GPIO_EMU_SEL[24]: 0-GPIO24, 1-EMU26
+ * GPIO_EMU_SEL[23]: 0-GPIO23, 1-EMU25
+ * GPIO_EMU_SEL[22]: 0-GPIO22, 1-EMU24
+ * GPIO_EMU_SEL[21]: 0-GPIO21, 1-EMU23
+ * GPIO_EMU_SEL[20]: 0-GPIO20, 1-EMU22
+ * GPIO_EMU_SEL[19]: 0-GPIO19, 1-EMU21
+ * GPIO_EMU_SEL[18]: 0-GPIO18, 1-EMU20
+ * GPIO_EMU_SEL[17]: 0-GPIO17, 1-EMU19
+ */
+ 0x4 0x0000 0xFFFE0000
+ >;
+ };
+
+ gpio_timio_pins: pinmux_gpio_timio_pins {
+ pinctrl-single,bits = <
+ /*
+ * GPIO_TIMIO_SEL[15]: 0-GPIO15, 1-TIMO7
+ * GPIO_TIMIO_SEL[14]: 0-GPIO14, 1-TIMO6
+ * GPIO_TIMIO_SEL[13]: 0-GPIO13, 1-TIMO5
+ * GPIO_TIMIO_SEL[12]: 0-GPIO12, 1-TIMO4
+ * GPIO_TIMIO_SEL[11]: 0-GPIO11, 1-TIMO3
+ * GPIO_TIMIO_SEL[10]: 0-GPIO10, 1-TIMO2
+ * GPIO_TIMIO_SEL[9]: 0-GPIO9, 1-TIMI7
+ * GPIO_TIMIO_SEL[8]: 0-GPIO8, 1-TIMI6
+ * GPIO_TIMIO_SEL[7]: 0-GPIO7, 1-TIMI5
+ * GPIO_TIMIO_SEL[6]: 0-GPIO6, 1-TIMI4
+ * GPIO_TIMIO_SEL[5]: 0-GPIO5, 1-TIMI3
+ * GPIO_TIMIO_SEL[4]: 0-GPIO4, 1-TIMI2
+ */
+ 0x4 0x0 0xFFF0
+ >;
+ };
+
+ gpio_spi2cs_pins: pinmux_gpio_spi2cs_pins {
+ pinctrl-single,bits = <
+ /*
+ * GPIO_SPI2CS_SEL[3]: 0-GPIO3, 1-SPI2CS4
+ * GPIO_SPI2CS_SEL[2]: 0-GPIO2, 1-SPI2CS3
+ * GPIO_SPI2CS_SEL[1]: 0-GPIO1, 1-SPI2CS2
+ * GPIO_SPI2CS_SEL[0]: 0-GPIO0, 1-SPI2CS1
+ */
+ 0x4 0x0 0xF
+ >;
+ };
+
+ gpio_dfeio_pins: pinmux_gpio_dfeio_pins {
+ pinctrl-single,bits = <
+ /*
+ * GPIO_DFEIO_SEL[31]: 0-DFEIO17, 1-GPIO63
+ * GPIO_DFEIO_SEL[30]: 0-DFEIO16, 1-GPIO62
+ * GPIO_DFEIO_SEL[29]: 0-DFEIO15, 1-GPIO61
+ * GPIO_DFEIO_SEL[28]: 0-DFEIO14, 1-GPIO60
+ * GPIO_DFEIO_SEL[27]: 0-DFEIO13, 1-GPIO59
+ * GPIO_DFEIO_SEL[26]: 0-DFEIO12, 1-GPIO58
+ * GPIO_DFEIO_SEL[25]: 0-DFEIO11, 1-GPIO57
+ * GPIO_DFEIO_SEL[24]: 0-DFEIO10, 1-GPIO56
+ * GPIO_DFEIO_SEL[23]: 0-DFEIO9, 1-GPIO55
+ * GPIO_DFEIO_SEL[22]: 0-DFEIO8, 1-GPIO54
+ * GPIO_DFEIO_SEL[21]: 0-DFEIO7, 1-GPIO53
+ * GPIO_DFEIO_SEL[20]: 0-DFEIO6, 1-GPIO52
+ * GPIO_DFEIO_SEL[19]: 0-DFEIO5, 1-GPIO51
+ * GPIO_DFEIO_SEL[18]: 0-DFEIO4, 1-GPIO50
+ * GPIO_DFEIO_SEL[17]: 0-DFEIO3, 1-GPIO49
+ * GPIO_DFEIO_SEL[16]: 0-DFEIO2, 1-GPIO48
+ */
+ 0x8 0x0 0xFFFF0000
+ >;
+ };
+
+ gpio_emifa_pins: pinmux_gpio_emifa_pins {
+ pinctrl-single,bits = <
+ /*
+ * GPIO_EMIFA_SEL[15]: 0-EMIFA17, 1-GPIO47
+ * GPIO_EMIFA_SEL[14]: 0-EMIFA16, 1-GPIO46
+ * GPIO_EMIFA_SEL[13]: 0-EMIFA15, 1-GPIO45
+ * GPIO_EMIFA_SEL[12]: 0-EMIFA14, 1-GPIO44
+ * GPIO_EMIFA_SEL[11]: 0-EMIFA13, 1-GPIO43
+ * GPIO_EMIFA_SEL[10]: 0-EMIFA10, 1-GPIO42
+ * GPIO_EMIFA_SEL[9]: 0-EMIFA9, 1-GPIO41
+ * GPIO_EMIFA_SEL[8]: 0-EMIFA8, 1-GPIO40
+ * GPIO_EMIFA_SEL[7]: 0-EMIFA7, 1-GPIO39
+ * GPIO_EMIFA_SEL[6]: 0-EMIFA6, 1-GPIO38
+ * GPIO_EMIFA_SEL[5]: 0-EMIFA5, 1-GPIO37
+ * GPIO_EMIFA_SEL[4]: 0-EMIFA4, 1-GPIO36
+ * GPIO_EMIFA_SEL[3]: 0-EMIFA3, 1-GPIO35
+ * GPIO_EMIFA_SEL[2]: 0-EMIFA2, 1-GPIO34
+ * GPIO_EMIFA_SEL[1]: 0-EMIFA1, 1-GPIO33
+ * GPIO_EMIFA_SEL[0]: 0-EMIFA0, 1-GPIO32
+ */
+ 0x8 0x0 0xFFFF
+ >;
+ };
+ };
+
dspgpio0: keystone_dsp_gpio@02620240 {
compatible = "ti,keystone-dsp-gpio";
gpio-controller;
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e34b2265458a..00cb314d5e4d 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -70,6 +70,14 @@
cpu_on = <0x84000003>;
};
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0x84000003>;
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -294,13 +302,16 @@
#address-cells = <3>;
#size-cells = <2>;
reg = <0x21801000 0x2000>, <0x21800000 0x1000>, <0x02620128 4>;
- ranges = <0x81000000 0 0 0x23250000 0 0x4000
- 0x82000000 0 0x50000000 0x50000000 0 0x10000000>;
+ ranges = <0x82000000 0 0x50000000 0x50000000
+ 0 0x10000000>;
status = "disabled";
device_type = "pci";
num-lanes = <2>;
+ bus-range = <0x00 0xff>;
+ /* error interrupt */
+ interrupts = <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>, /* INT A */
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 015f795a8d19..08cce17a25a0 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -72,7 +72,7 @@
};
};
- pwm10: dmtimer-pwm@10 {
+ pwm10: dmtimer-pwm {
compatible = "ti,omap-dmtimer-pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm_pins>;
@@ -147,7 +147,7 @@
gpio = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
};
- lcd0: display@0 {
+ lcd0: display {
compatible = "panel-dpi";
label = "15";
status = "okay";
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 5ae8e9297e9a..368e21934285 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -626,6 +626,7 @@
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
pcie@3400000 {
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 2bfe401a4da9..fc4080de4b7b 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -46,6 +46,7 @@
#include <dt-bindings/clock/meson8b-clkc.h>
#include <dt-bindings/gpio/meson8b-gpio.h>
+#include <dt-bindings/reset/amlogic,meson8b-reset.h>
#include "skeleton.dtsi"
/ {
@@ -105,6 +106,12 @@
#interrupt-cells = <3>;
};
+ reset: reset-controller@c1104404 {
+ compatible = "amlogic,meson8b-reset";
+ reg = <0xc1104404 0x20>;
+ #reset-cells = <1>;
+ };
+
wdt: watchdog@c1109900 {
compatible = "amlogic,meson8b-wdt";
reg = <0xc1109900 0x8>;
diff --git a/arch/arm/boot/dts/mpa1600.dts b/arch/arm/boot/dts/mpa1600.dts
index f0f5e1098928..116ce78bea4f 100644
--- a/arch/arm/boot/dts/mpa1600.dts
+++ b/arch/arm/boot/dts/mpa1600.dts
@@ -17,15 +17,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <18432000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -61,7 +52,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap24xx-clocks.dtsi b/arch/arm/boot/dts/omap24xx-clocks.dtsi
index ca73722b5ea4..769a346de613 100644
--- a/arch/arm/boot/dts/omap24xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap24xx-clocks.dtsi
@@ -164,7 +164,7 @@
clock-div = <1>;
};
- func_96m_ck: func_96m_ck {
+ func_96m_ck: func_96m_ck@540 {
#clock-cells = <0>;
};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 01e1e2d5c735..8ffde06281ad 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -91,7 +91,7 @@
vcc-supply = <&hsusb2_power>;
};
- tfp410: encoder@0 {
+ tfp410: encoder0 {
compatible = "ti,tfp410";
powerdown-gpios = <&twl_gpio 2 GPIO_ACTIVE_LOW>;
@@ -104,7 +104,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -112,14 +112,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector0 {
compatible = "dvi-connector";
label = "dvi";
@@ -134,7 +134,7 @@
};
};
- tv0: connector@1 {
+ tv0: connector1 {
compatible = "svideo-connector";
label = "tv";
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a4deff0e2d52..a19d907d4850 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -85,7 +85,7 @@
};
- tfp410: encoder@0 {
+ tfp410: encoder0 {
compatible = "ti,tfp410";
powerdown-gpios = <&gpio6 10 GPIO_ACTIVE_LOW>; /* gpio_170 */
@@ -99,7 +99,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -107,14 +107,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector0 {
compatible = "dvi-connector";
label = "dvi";
@@ -129,7 +129,7 @@
};
};
- tv0: connector@1 {
+ tv0: connector1 {
compatible = "svideo-connector";
label = "tv";
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index a8127bc31fd9..6a0df13fa0f3 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -57,7 +57,7 @@
regulator-max-microvolt = <3300000>;
};
- tv0: connector@1 {
+ tv0: connector {
compatible = "svideo-connector";
label = "tv";
diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
index b1b8ebf90c1c..586010179752 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
@@ -68,7 +68,7 @@
};
};
- tfp410: encoder@0 {
+ tfp410: encoder0 {
compatible = "ti,tfp410";
powerdown-gpios = <&twl_gpio 7 GPIO_ACTIVE_LOW>;
@@ -79,7 +79,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_dvi_out>;
};
};
@@ -87,14 +87,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector0 {
compatible = "dvi-connector";
label = "dvi";
@@ -109,7 +109,7 @@
};
};
- tv0: connector@1 {
+ tv0: connector1 {
compatible = "svideo-connector";
label = "tv";
@@ -352,7 +352,7 @@
vdda_dac-supply = <&vdac>;
port {
- dpi_dvi_out: endpoint@0 {
+ dpi_dvi_out: endpoint {
remote-endpoint = <&tfp410_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
index 738910db5c0c..2d64bcffaaa8 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
@@ -14,7 +14,7 @@
display2 = &tv0;
};
- lcd0: display@0 {
+ lcd0: display {
compatible = "panel-dpi";
label = "lcd";
@@ -30,7 +30,7 @@
&dss {
port {
- dpi_lcd_out: endpoint@1 {
+ dpi_lcd_out: endpoint {
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
index d5705356d52c..d8b16398bfb3 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
@@ -16,7 +16,7 @@
model = "TimLL OMAP3 Devkit8000 with 4.3'' LCD panel";
compatible = "timll,omap3-devkit8000", "ti,omap3";
- lcd0: display@0 {
+ lcd0: display {
panel-timing {
clock-frequency = <10164705>;
hactive = <480>;
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
index 4afad4b233ec..edb37ba80498 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
@@ -16,7 +16,7 @@
model = "TimLL OMAP3 Devkit8000 with 7.0'' LCD panel";
compatible = "timll,omap3-devkit8000", "ti,omap3";
- lcd0: display@0 {
+ lcd0: display {
panel-timing {
clock-frequency = <40000000>;
hactive = <800>;
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index ab9fb8f49ff3..c09a0574af90 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -100,12 +100,28 @@
};
};
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm11 0 2000000 0>;
+ pwm-names = "backlight";
+ brightness-levels = <0 11 20 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <9>; /* => 90 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&backlight_pins>;
+ };
+
+ pwm11: dmtimer-pwm {
+ compatible = "ti,omap-dmtimer-pwm";
+ ti,timers = <&timer11>;
+ #pwm-cells = <3>;
+ };
+
hsusb2_phy: hsusb2_phy {
compatible = "usb-nop-xceiv";
reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
};
- tv0: connector@1 {
+ tv0: connector {
compatible = "svideo-connector";
label = "tv";
@@ -126,19 +142,24 @@
port@0 {
reg = <0>;
- opa_in: endpoint@0 {
+ opa_in: endpoint {
remote-endpoint = <&venc_out>;
};
};
port@1 {
reg = <1>;
- opa_out: endpoint@0 {
+ opa_out: endpoint {
remote-endpoint = <&tv_connector_in>;
};
};
};
};
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&tca6507 0 GPIO_ACTIVE_LOW>; /* W2CBW003 reset through tca6507 */
+ };
};
&omap3_pmx_core {
@@ -190,6 +211,12 @@
>;
};
+ backlight_pins: backlight_pins_pimnux {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x20ba, MUX_MODE3) /* gpt11/gpio57 */
+ >;
+ };
+
dss_dpi_pins: pinmux_dss_dpi_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x20d4, PIN_OUTPUT | MUX_MODE0) /* dss_pclk.dss_pclk */
@@ -228,6 +255,24 @@
OMAP3_CORE1_IOPAD(0x21c6, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda.hdq */
>;
};
+
+ bma180_pins: pinmux_bma180_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x213a, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio115 */
+ >;
+ };
+
+ itg3200_pins: pinmux_itg3200_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x20b8, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio56 */
+ >;
+ };
+
+ hmc5843_pins: pinmux_hmc5843_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2134, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio112 */
+ >;
+ };
};
&omap3_pmx_core2 {
@@ -298,6 +343,8 @@
bma180@41 {
compatible = "bosch,bma180";
reg = <0x41>;
+ pinctrl-names = "default";
+ pintcrl-0 = <&bma180_pins>;
interrupt-parent = <&gpio4>;
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_115 */
};
@@ -306,12 +353,14 @@
itg3200@68 {
compatible = "invensense,itg3200";
reg = <0x68>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&itg3200_pins>;
interrupt-parent = <&gpio2>;
- interrupts = <24 0>; /* GPIO_56 */
+ interrupts = <24 IRQ_TYPE_EDGE_FALLING>; /* GPIO_56 */
};
- /* leds */
- tca6507@45 {
+ /* leds + gpios */
+ tca6507: tca6507@45 {
compatible = "ti,tca6507";
#address-cells = <1>;
#size-cells = <0>;
@@ -351,6 +400,10 @@
hmc5843@1e {
compatible = "honeywell,hmc5883l";
reg = <0x1e>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hmc5843_pins>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <16 IRQ_TYPE_EDGE_FALLING>; /* gpio112 */
};
/* touchscreen */
@@ -362,6 +415,12 @@
gpios = <&gpio6 0 GPIO_ACTIVE_LOW>;
ti,x-plate-ohms = <600>;
};
+
+ /* RFID EEPROM */
+ m24lr64@50 {
+ compatible = "at,24c64";
+ reg = <0x50>;
+ };
};
&i2c3 {
@@ -398,6 +457,7 @@
bus-width = <4>;
ti,non-removable;
cap-power-off-card;
+ mmc-pwrseq = <&wifi_pwrseq>;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/omap3-ha-lcd.dts b/arch/arm/boot/dts/omap3-ha-lcd.dts
index 11aa28d73f3a..60af7c2358a3 100644
--- a/arch/arm/boot/dts/omap3-ha-lcd.dts
+++ b/arch/arm/boot/dts/omap3-ha-lcd.dts
@@ -121,7 +121,7 @@
display0 = &lcd0;
};
- lcd0: display@0 {
+ lcd0: display {
compatible = "panel-dpi";
label = "lcd";
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index b6971060648a..667f96245729 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -60,7 +60,7 @@
vcc-supply = <&hsusb1_power>;
};
- tfp410: encoder@0 {
+ tfp410: encoder {
compatible = "ti,tfp410";
powerdown-gpios = <&gpio6 10 GPIO_ACTIVE_LOW>; /* gpio_170 */
@@ -71,7 +71,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -79,14 +79,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector {
compatible = "dvi-connector";
label = "dvi";
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 2b74a81d1de2..2a6078a8422c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -143,6 +143,18 @@
io-channels = <&twl_madc 0>, <&twl_madc 4>, <&twl_madc 12>;
io-channel-names = "temp", "bsi", "vbat";
};
+
+ pwm9: dmtimer-pwm {
+ compatible = "ti,omap-dmtimer-pwm";
+ #pwm-cells = <3>;
+ ti,timers = <&timer9>;
+ ti,clock-source = <0x00>; /* timer_sys_ck */
+ };
+
+ ir: n900-ir {
+ compatible = "nokia,n900-ir";
+ pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
+ };
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-overo-common-dvi.dtsi b/arch/arm/boot/dts/omap3-overo-common-dvi.dtsi
index 802f704f67e5..ae5564abbe2f 100644
--- a/arch/arm/boot/dts/omap3-overo-common-dvi.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-dvi.dtsi
@@ -69,7 +69,7 @@
display0 = &dvi0;
};
- tfp410: encoder@0 {
+ tfp410: encoder {
compatible = "ti,tfp410";
ports {
@@ -79,7 +79,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -87,14 +87,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector {
compatible = "dvi-connector";
label = "dvi";
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index 6314da2580f5..ca86da68220c 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -119,7 +119,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mcspi1_pins>;
- lcd0: display@0 {
+ lcd0: display {
compatible = "lgphilips,lb035q02";
label = "lcd35";
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
index 7e3fe85a8ad9..b0753ef8abd4 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
@@ -96,7 +96,7 @@
display0 = &lcd0;
};
- lcd0: display@0 {
+ lcd0: display {
compatible = "samsung,lte430wq-f0c", "panel-dpi";
label = "lcd43";
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index bcf39d606b65..dbc4dc721cc2 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -27,7 +27,7 @@
display0 = &lcd;
};
- tv: connector@1 {
+ tv: connector {
compatible = "connector-analog-tv";
label = "tv";
diff --git a/arch/arm/boot/dts/omap3-sb-t35.dtsi b/arch/arm/boot/dts/omap3-sb-t35.dtsi
index 827f614261f6..73643fabde5d 100644
--- a/arch/arm/boot/dts/omap3-sb-t35.dtsi
+++ b/arch/arm/boot/dts/omap3-sb-t35.dtsi
@@ -3,7 +3,7 @@
*/
/ {
- tfp410: encoder@0 {
+ tfp410: encoder {
compatible = "ti,tfp410";
powerdown-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; /* gpio_54 */
@@ -18,7 +18,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -26,14 +26,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector {
compatible = "dvi-connector";
label = "dvi";
diff --git a/arch/arm/boot/dts/omap3-thunder.dts b/arch/arm/boot/dts/omap3-thunder.dts
index d659515ab9b8..9736ba79bb5b 100644
--- a/arch/arm/boot/dts/omap3-thunder.dts
+++ b/arch/arm/boot/dts/omap3-thunder.dts
@@ -85,7 +85,7 @@
display0 = &lcd0;
};
- lcd0: display@0 {
+ lcd0: display {
compatible = "samsung,lte430wq-f0c", "panel-dpi";
label = "lcd";
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 9fbda38528dc..4c3c471d2a83 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -493,6 +493,8 @@
dmas = <&sdma 31>,
<&sdma 32>;
dma-names = "tx", "rx";
+ clocks = <&mcbsp1_fck>;
+ clock-names = "fck";
status = "disabled";
};
@@ -511,6 +513,8 @@
dmas = <&sdma 33>,
<&sdma 34>;
dma-names = "tx", "rx";
+ clocks = <&mcbsp2_fck>, <&mcbsp2_ick>;
+ clock-names = "fck", "ick";
status = "disabled";
};
@@ -529,6 +533,8 @@
dmas = <&sdma 17>,
<&sdma 18>;
dma-names = "tx", "rx";
+ clocks = <&mcbsp3_fck>, <&mcbsp3_ick>;
+ clock-names = "fck", "ick";
status = "disabled";
};
@@ -545,6 +551,8 @@
dmas = <&sdma 19>,
<&sdma 20>;
dma-names = "tx", "rx";
+ clocks = <&mcbsp4_fck>;
+ clock-names = "fck";
status = "disabled";
};
@@ -561,6 +569,8 @@
dmas = <&sdma 21>,
<&sdma 22>;
dma-names = "tx", "rx";
+ clocks = <&mcbsp5_fck>;
+ clock-names = "fck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index 06c54822ddc2..6b39808b8313 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -40,7 +40,7 @@
};
};
- hdmi0: connector@0 {
+ hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index f2a94fa62552..a90b582e4c3f 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -177,6 +177,7 @@
twl6040: twl@4b {
compatible = "ti,twl6040";
+ #clock-cells = <0>;
reg = <0x4b>;
interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>; /* IRQ_SYS_2N cascaded to gic */
ti,audpwron-gpio = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* gpio_160 */
@@ -207,6 +208,10 @@
&mcpdm {
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index df2e356ec089..f8f13952cfeb 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -103,7 +103,7 @@
enable-active-high;
};
- tfp410: encoder@0 {
+ tfp410: encoder0 {
compatible = "ti,tfp410";
powerdown-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>; /* gpio_0 */
@@ -114,7 +114,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
@@ -122,14 +122,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@0 {
+ dvi0: connector0 {
compatible = "dvi-connector";
label = "dvi";
@@ -144,7 +144,7 @@
};
};
- tpd12s015: encoder@1 {
+ tpd12s015: encoder1 {
compatible = "ti,tpd12s015";
gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>, /* 60, CT CP HPD */
@@ -158,7 +158,7 @@
port@0 {
reg = <0>;
- tpd12s015_in: endpoint@0 {
+ tpd12s015_in: endpoint {
remote-endpoint = <&hdmi_out>;
};
};
@@ -166,14 +166,14 @@
port@1 {
reg = <1>;
- tpd12s015_out: endpoint@0 {
+ tpd12s015_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
};
};
- hdmi0: connector@1 {
+ hdmi0: connector1 {
compatible = "hdmi-connector";
label = "hdmi";
@@ -376,6 +376,7 @@
twl6040: twl@4b {
compatible = "ti,twl6040";
+ #clock-cells = <0>;
reg = <0x4b>;
pinctrl-names = "default";
@@ -479,6 +480,10 @@
&mcpdm {
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index aae513265dc2..10d73a784050 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -160,7 +160,7 @@
enable-active-high;
};
- tpd12s015: encoder@0 {
+ tpd12s015: encoder {
compatible = "ti,tpd12s015";
gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>, /* 60, CT CP HPD */
@@ -174,7 +174,7 @@
port@0 {
reg = <0>;
- tpd12s015_in: endpoint@0 {
+ tpd12s015_in: endpoint {
remote-endpoint = <&hdmi_out>;
};
};
@@ -182,14 +182,14 @@
port@1 {
reg = <1>;
- tpd12s015_out: endpoint@0 {
+ tpd12s015_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
};
};
- hdmi0: connector@0 {
+ hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -367,6 +367,7 @@
twl6040: twl@4b {
compatible = "ti,twl6040";
+ #clock-cells = <0>;
reg = <0x4b>;
pinctrl-names = "default";
@@ -620,6 +621,10 @@
&mcpdm {
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
index 6e278d7716a5..74940b6d7719 100644
--- a/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
+++ b/arch/arm/boot/dts/omap4-var-om44customboard.dtsi
@@ -45,7 +45,7 @@
};
};
- hdmi0: connector@0 {
+ hdmi0: connector {
compatible = "hdmi-connector";
pinctrl-names = "default";
pinctrl-0 = <&hdmi_hpd_pins>;
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index a17997f4e9aa..873cfc87260c 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -189,6 +189,7 @@
twl6040: twl@4b {
compatible = "ti,twl6040";
+ #clock-cells = <0>;
reg = <0x4b>;
pinctrl-names = "default";
@@ -252,6 +253,10 @@
&mcpdm {
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 5d5b620b7d9b..5196113202a2 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -87,7 +87,7 @@
};
};
- tpd12s015: encoder@0 {
+ tpd12s015: encoder {
compatible = "ti,tpd12s015";
pinctrl-names = "default";
@@ -102,7 +102,7 @@
port@0 {
reg = <0>;
- tpd12s015_in: endpoint@0 {
+ tpd12s015_in: endpoint {
remote-endpoint = <&hdmi_out>;
};
};
@@ -110,14 +110,14 @@
port@1 {
reg = <1>;
- tpd12s015_out: endpoint@0 {
+ tpd12s015_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
};
};
- hdmi0: connector@0 {
+ hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -637,6 +637,7 @@
twl6040: twl@4b {
compatible = "ti,twl6040";
+ #clock-cells = <0>;
reg = <0x4b>;
pinctrl-names = "default";
@@ -658,6 +659,10 @@
&mcpdm {
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+
status = "okay";
};
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 93fdfa96776e..a9765605d53b 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -112,7 +112,7 @@
};
};
- hdmi0: connector@0 {
+ hdmi0: connector0 {
compatible = "hdmi-connector";
label = "hdmi";
@@ -130,7 +130,7 @@
};
};
- tfp410: encoder@0 {
+ tfp410: encoder0 {
compatible = "ti,tfp410";
ports {
@@ -140,7 +140,7 @@
port@0 {
reg = <0>;
- tfp410_in: endpoint@0 {
+ tfp410_in: endpoint {
remote-endpoint = <&dpi_dvi_out>;
};
};
@@ -148,14 +148,14 @@
port@1 {
reg = <1>;
- tfp410_out: endpoint@0 {
+ tfp410_out: endpoint {
remote-endpoint = <&dvi_connector_in>;
};
};
};
};
- dvi0: connector@1 {
+ dvi0: connector1 {
compatible = "dvi-connector";
label = "dvi";
@@ -646,12 +646,17 @@
pinctrl-0 = <&dss_dpi_pins>;
port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
dpi_dvi_out: endpoint@0 {
+ reg = <0>;
remote-endpoint = <&tfp410_in>;
data-lines = <24>;
};
dpi_lcd_out: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&lcd_in>;
data-lines = <24>;
};
diff --git a/arch/arm/boot/dts/pm9g45.dts b/arch/arm/boot/dts/pm9g45.dts
index 66afcff67fde..0abd7bf17568 100644
--- a/arch/arm/boot/dts/pm9g45.dts
+++ b/arch/arm/boot/dts/pm9g45.dts
@@ -21,15 +21,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index 210192c38df3..9e73dc6b3ed3 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -22,8 +22,15 @@
marvell,intc-nr-irqs = <34>;
};
+ pinctrl: pinctrl@40e00000 {
+ reg = <0x40e00054 0x20 0x40e0000c 0xc 0x40e0010c 4
+ 0x40f00020 0x10>;
+ compatible = "marvell,pxa27x-pinctrl";
+ };
+
gpio: gpio@40e00000 {
compatible = "intel,pxa27x-gpio";
+ gpio-ranges = <&pinctrl 0 0 128>;
clocks = <&clks CLK_NONE>;
};
diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi
index 5e5af078b9b5..3ff077ca4400 100644
--- a/arch/arm/boot/dts/pxa2xx.dtsi
+++ b/arch/arm/boot/dts/pxa2xx.dtsi
@@ -140,5 +140,13 @@
reg = <0x40900000 0x3c>;
interrupts = <30 31>;
};
+
+ lcd-controller@40500000 {
+ compatible = "marvell,pxa2xx-lcdc";
+ reg = <0x44000000 0x10000>;
+ interrupts = <17>;
+ clocks = <&clks CLK_LCD>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index fec47bcd8292..9d6f3aacedb7 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -1,6 +1,96 @@
/* The pxa3xx skeleton simply augments the 2xx version */
#include "pxa2xx.dtsi"
+#define MFP_PIN_PXA300(gpio) \
+ ((gpio <= 2) ? (0x00b4 + 4 * gpio) : \
+ (gpio <= 26) ? (0x027c + 4 * (gpio - 3)) : \
+ (gpio <= 98) ? (0x0400 + 4 * (gpio - 27)) : \
+ (gpio <= 127) ? (0x0600 + 4 * (gpio - 99)) : \
+ 0)
+
+#define MFP_PIN_PXA310(gpio) \
+ ((gpio <= 2) ? (0x00b4 + 4 * gpio) : \
+ (gpio <= 26) ? (0x027c + 4 * (gpio - 3)) : \
+ (gpio <= 29) ? (0x0400 + 4 * (gpio - 27)) : \
+ (gpio <= 98) ? (0x0418 + 4 * (gpio - 30)) : \
+ (gpio <= 127) ? (0x0600 + 4 * (gpio - 99)) : \
+ (gpio <= 262) ? 0 : \
+ (gpio <= 268) ? (0x052c + 4 * (gpio - 263)) : \
+ 0)
+
+#define MFP_PIN_PXA320(gpio) \
+ ((gpio <= 4) ? (0x0124 + 4 * gpio) : \
+ (gpio <= 9) ? (0x028c + 4 * (gpio - 5)) : \
+ (gpio <= 10) ? (0x0458 + 4 * (gpio - 10)) : \
+ (gpio <= 26) ? (0x02a0 + 4 * (gpio - 11)) : \
+ (gpio <= 48) ? (0x0400 + 4 * (gpio - 27)) : \
+ (gpio <= 62) ? (0x045c + 4 * (gpio - 49)) : \
+ (gpio <= 73) ? (0x04b4 + 4 * (gpio - 63)) : \
+ (gpio <= 98) ? (0x04f0 + 4 * (gpio - 74)) : \
+ (gpio <= 127) ? (0x0600 + 4 * (gpio - 99)) : \
+ 0)
+
+/*
+ * MFP Alternate functions for pins having a gpio.
+ * Example of use: pinctrl-single,pins = < MFP_PIN_PXA310(21) MFP_AF1 >
+ */
+#define MFP_AF0 (0 << 0)
+#define MFP_AF1 (1 << 0)
+#define MFP_AF2 (2 << 0)
+#define MFP_AF3 (3 << 0)
+#define MFP_AF4 (4 << 0)
+#define MFP_AF5 (5 << 0)
+#define MFP_AF6 (6 << 0)
+
+/*
+ * MFP drive strength functions for pins.
+ * Example of use: pinctrl-single,drive-strength = MFP_DS03X;
+ */
+#define MFP_DSMSK (0x7 << 10)
+#define MFP_DS01X < (0x0 << 10) MFP_DSMSK >
+#define MFP_DS02X < (0x1 << 10) MFP_DSMSK >
+#define MFP_DS03X < (0x2 << 10) MFP_DSMSK >
+#define MFP_DS04X < (0x3 << 10) MFP_DSMSK >
+#define MFP_DS06X < (0x4 << 10) MFP_DSMSK >
+#define MFP_DS08X < (0x5 << 10) MFP_DSMSK >
+#define MFP_DS10X < (0x6 << 10) MFP_DSMSK >
+#define MFP_DS13X < (0x7 << 10) MFP_DSMSK >
+
+/*
+ * MFP low power mode for pins.
+ * Example of use:
+ * pinctrl-single,low-power-mode = MFP_LPM(MFP_LPM_PULL_LOW|MFP_LPM_EDGE_FALL);
+ *
+ * Table that determines the low power modes outputs, with actual settings
+ * used in parentheses for don't-care values. Except for the float output,
+ * the configured driven and pulled levels match, so if there is a need for
+ * non-LPM pulled output, the same configuration could probably be used.
+ *
+ * Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel
+ * (bit 7) (bit 8) (bit 14) (bit 13) (bit 15)
+ *
+ * Input 0 X(0) X(0) X(0) 0
+ * Drive 0 0 0 0 X(1) 0
+ * Drive 1 0 1 X(1) 0 0
+ * Pull hi (1) 1 X(1) 1 0 0
+ * Pull lo (0) 1 X(0) 0 1 0
+ * Z (float) 1 X(0) 0 0 0
+ */
+#define MFP_LPM(x) < (x) MFP_LPM_MSK >
+
+#define MFP_LPM_MSK 0xe1f0
+#define MFP_LPM_INPUT 0x0000
+#define MFP_LPM_DRIVE_LOW 0x2000
+#define MFP_LPM_DRIVE_HIGH 0x4100
+#define MFP_LPM_PULL_LOW 0x2080
+#define MFP_LPM_PULL_HIGH 0x4180
+#define MFP_LPM_FLOAT 0x0080
+
+#define MFP_LPM_EDGE_NONE 0x0000
+#define MFP_LPM_EDGE_RISE 0x0010
+#define MFP_LPM_EDGE_FALL 0x0020
+#define MFP_LPM_EDGE_BOTH 0x0030
+
/ {
model = "Marvell PXA3xx familiy SoC";
compatible = "marvell,pxa3xx";
@@ -43,6 +133,15 @@
marvell,intc-nr-irqs = <56>;
};
+ pinctrl: pinctrl@40e10000 {
+ compatible = "pinconf-single";
+ reg = <0x40e10000 0xffff>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0x7>;
+ };
+
gpio: gpio@40e00000 {
compatible = "intel,pxa3xx-gpio";
reg = <0x40e00000 0x10000>;
@@ -92,7 +191,39 @@
compatible = "marvell,pxa-ohci";
reg = <0x4c000000 0x10000>;
interrupts = <3>;
- clocks = <&clks CLK_USBHOST>;
+ clocks = <&clks CLK_USBH>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@40b00000 {
+ compatible = "marvell,pxa270-pwm";
+ reg = <0x40b00000 0x10>;
+ #pwm-cells = <1>;
+ clocks = <&clks CLK_PWM0>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@40b00010 {
+ compatible = "marvell,pxa270-pwm";
+ reg = <0x40b00010 0x10>;
+ #pwm-cells = <1>;
+ clocks = <&clks CLK_PWM1>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@40c00000 {
+ compatible = "marvell,pxa270-pwm";
+ reg = <0x40c00000 0x10>;
+ #pwm-cells = <1>;
+ clocks = <&clks CLK_PWM0>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@40c00010 {
+ compatible = "marvell,pxa270-pwm";
+ reg = <0x40c00010 0x10>;
+ #pwm-cells = <1>;
+ clocks = <&clks CLK_PWM1>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
new file mode 100644
index 000000000000..0abc93e5bb00
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -0,0 +1,626 @@
+/*
+ * Copyright 2016 Linaro Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include "qcom-msm8660.dtsi"
+
+/ {
+ model = "Qualcomm APQ8060 Dragonboard";
+ compatible = "qcom,apq8060-dragonboard", "qcom,msm8660";
+
+ aliases {
+ serial0 = &gsbi12_serial;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ /* Main power of the board: 3.7V */
+ vph: regulator-fixed {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-name = "VPH";
+ regulator-type = "voltage";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* This is a levelshifter for SDCC5 */
+ dragon_vio_txb: txb0104rgyr {
+ compatible = "regulator-fixed";
+ regulator-name = "Dragon SDCC levelshifter";
+ vin-supply = <&pm8058_l14>;
+ regulator-always-on;
+ };
+ };
+
+ soc {
+ pinctrl@800000 {
+ /* eMMMC pins, all 8 data lines connected */
+ dragon_sdcc1_pins: sdcc1 {
+ mux {
+ pins = "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167",
+ "gpio168";
+ function = "sdc1";
+ };
+ clk {
+ pins = "gpio167"; /* SDC5 CLK */
+ drive-strength = <16>;
+ bias-disable;
+ };
+ cmd {
+ pins = "gpio168"; /* SDC5 CMD */
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ data {
+ /* SDC5 D0 to D7 */
+ pins = "gpio159", "gpio160", "gpio161", "gpio162",
+ "gpio163", "gpio164", "gpio165", "gpio166";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ /*
+ * The SDCC3 pins are hardcoded (non-muxable) but need some pin
+ * configuration.
+ */
+ dragon_sdcc3_pins: sdcc3 {
+ clk {
+ pins = "sdc3_clk";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ cmd {
+ pins = "sdc3_cmd";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ data {
+ pins = "sdc3_data";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
+ /* Second SD card slot pins */
+ dragon_sdcc5_pins: sdcc5 {
+ mux {
+ pins = "gpio95", "gpio96", "gpio97",
+ "gpio98", "gpio99", "gpio100";
+ function = "sdc5";
+ };
+ clk {
+ pins = "gpio97"; /* SDC5 CLK */
+ drive-strength = <16>;
+ bias-disable;
+ };
+ cmd {
+ pins = "gpio95"; /* SDC5 CMD */
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ data {
+ /* SDC5 D0 to D3 */
+ pins = "gpio96", "gpio98", "gpio99", "gpio100";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ dragon_gsbi12_i2c_pins: gsbi12_i2c {
+ mux {
+ pins = "gpio115", "gpio116";
+ function = "gsbi12";
+ };
+ pinconf {
+ pins = "gpio115", "gpio116";
+ drive-strength = <16>;
+ /* These have external pull-up 4.7kOhm to 1.8V */
+ bias-disable;
+ };
+ };
+
+ /* Primary serial port uart 0 pins */
+ dragon_gsbi12_serial_pins: gsbi12_serial {
+ mux {
+ pins = "gpio117", "gpio118";
+ function = "gsbi12";
+ };
+ tx {
+ pins = "gpio117";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ rx {
+ pins = "gpio118";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ qcom,ssbi@500000 {
+ pmic@0 {
+ keypad@148 {
+ linux,keymap = <
+ MATRIX_KEY(0, 0, KEY_MENU)
+ MATRIX_KEY(0, 2, KEY_1)
+ MATRIX_KEY(0, 3, KEY_4)
+ MATRIX_KEY(0, 4, KEY_7)
+ MATRIX_KEY(1, 0, KEY_UP)
+ MATRIX_KEY(1, 1, KEY_LEFT)
+ MATRIX_KEY(1, 2, KEY_DOWN)
+ MATRIX_KEY(1, 3, KEY_5)
+ MATRIX_KEY(1, 3, KEY_8)
+ MATRIX_KEY(2, 0, KEY_HOME)
+ MATRIX_KEY(2, 1, KEY_REPLY)
+ MATRIX_KEY(2, 2, KEY_2)
+ MATRIX_KEY(2, 3, KEY_6)
+ MATRIX_KEY(3, 0, KEY_VOLUMEUP)
+ MATRIX_KEY(3, 1, KEY_RIGHT)
+ MATRIX_KEY(3, 2, KEY_3)
+ MATRIX_KEY(3, 3, KEY_9)
+ MATRIX_KEY(3, 4, KEY_SWITCHVIDEOMODE)
+ MATRIX_KEY(4, 0, KEY_VOLUMEDOWN)
+ MATRIX_KEY(4, 1, KEY_BACK)
+ MATRIX_KEY(4, 2, KEY_CAMERA)
+ MATRIX_KEY(4, 3, KEY_KBDILLUMTOGGLE)
+ >;
+ keypad,num-rows = <6>;
+ keypad,num-columns = <5>;
+ };
+
+ gpio@150 {
+ dragon_bmp085_gpios: bmp085-gpios {
+ pinconf {
+ pins = "gpio16";
+ function = "normal";
+ input-enable;
+ bias-disable;
+ power-source = <PM8058_GPIO_S3>;
+ };
+ };
+ dragon_sdcc3_gpios: sdcc3-gpios {
+ pinconf {
+ pins = "gpio22";
+ function = "normal";
+ input-enable;
+ bias-disable;
+ power-source = <PM8058_GPIO_S3>;
+ };
+ };
+ dragon_sdcc5_gpios: sdcc5-gpios {
+ pinconf {
+ pins = "gpio26";
+ function = "normal";
+ input-enable;
+ bias-pull-up;
+ qcom,pull-up-strength = <PMIC_GPIO_PULL_UP_30>;
+ power-source = <PM8058_GPIO_S3>;
+ };
+ };
+ dragon_ak8975_gpios: ak8975-gpios {
+ pinconf {
+ pins = "gpio33";
+ function = "normal";
+ input-enable;
+ bias-disable;
+ power-source = <PM8058_GPIO_S3>;
+ };
+ };
+ };
+ };
+ };
+
+ gsbi@19c00000 {
+ status = "ok";
+ qcom,mode = <GSBI_PROT_I2C_UART>;
+
+ serial@19c40000 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_gsbi12_serial_pins>;
+ };
+
+ i2c@19c80000 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_gsbi12_i2c_pins>;
+
+ ak8975@0c {
+ compatible = "asahi-kasei,ak8975";
+ reg = <0x0c>;
+ /* GPIO33 has interrupt 224 on the PM8058 */
+ interrupt-parent = <&pm8058_gpio>;
+ interrupts = <224 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_ak8975_gpios>;
+ vid-supply = <&pm8058_lvs0>; // 1.8V
+ vdd-supply = <&pm8058_l14>; // 2.85V
+ };
+ bmp085@77 {
+ compatible = "bosch,bmp085";
+ reg = <0x77>;
+ /* GPIO16 has interrupt 207 on the PM8058 */
+ interrupt-parent = <&pm8058_gpio>;
+ interrupts = <207 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&tlmm 86 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_bmp085_gpios>;
+ vddd-supply = <&pm8058_lvs0>; // 1.8V
+ vdda-supply = <&pm8058_l14>; // 2.85V
+ };
+ };
+ };
+
+ rpm@104000 {
+ /*
+ * Set up of the PMIC RPM regulators for this board
+ * PM8901 supplies "preliminary regulators" whatever
+ * that means
+ */
+ pm8901-regulators {
+ vdd_l0-supply = <&pm8901_s4>;
+ vdd_l1-supply = <&vph>;
+ vdd_l2-supply = <&vph>;
+ vdd_l3-supply = <&vph>;
+ vdd_l4-supply = <&vph>;
+ vdd_l5-supply = <&vph>;
+ vdd_l6-supply = <&vph>;
+ /* vdd_s0-supply, vdd_s1-supply: SAW regulators */
+ vdd_s2-supply = <&vph>;
+ vdd_s3-supply = <&vph>;
+ vdd_s4-supply = <&vph>;
+ lvs0_in-supply = <&pm8058_s3>;
+ lvs1_in-supply = <&pm8901_s4>;
+ lvs2_in-supply = <&pm8058_l0>;
+ lvs3_in-supply = <&pm8058_s2>;
+ mvs_in-supply = <&pm8058_s3>;
+
+ l0 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+ l1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+ l2 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+ l3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+ l4 {
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <2600000>;
+ bias-pull-down;
+ };
+ l5 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ bias-pull-down;
+ };
+ l6 {
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ bias-pull-down;
+ };
+
+ /* s0 and s1 are SAW regulators controlled over SPM */
+ s2 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+ s3 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+ s4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+
+ /* LVS0 thru 3 and mvs0 are just switches */
+ lvs0 {
+ regulator-always-on;
+ };
+ lvs1 { };
+ lvs2 { };
+ lvs3 { };
+ mvs0 {};
+
+ };
+
+ pm8058-regulators {
+ vdd_l0_l1_lvs-supply = <&pm8058_s3>;
+ vdd_l2_l11_l12-supply = <&vph>;
+ vdd_l3_l4_l5-supply = <&vph>;
+ vdd_l6_l7-supply = <&vph>;
+ vdd_l8-supply = <&vph>;
+ vdd_l9-supply = <&vph>;
+ vdd_l10-supply = <&vph>;
+ vdd_l13_l16-supply = <&pm8058_s4>;
+ vdd_l14_l15-supply = <&vph>;
+ vdd_l17_l18-supply = <&vph>;
+ vdd_l19_l20-supply = <&vph>;
+ vdd_l21-supply = <&pm8058_s3>;
+ vdd_l22-supply = <&pm8058_s3>;
+ vdd_l23_l24_l25-supply = <&pm8058_s3>;
+ vdd_s0-supply = <&vph>;
+ vdd_s1-supply = <&vph>;
+ vdd_s2-supply = <&vph>;
+ vdd_s3-supply = <&vph>;
+ vdd_s4-supply = <&vph>;
+ vdd_ncp-supply = <&vph>;
+
+ l0 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+ l1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+ l2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2600000>;
+ bias-pull-down;
+ };
+ l3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+ l4 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ bias-pull-down;
+ };
+ l5 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ bias-pull-down;
+ };
+ l6 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3600000>;
+ bias-pull-down;
+ };
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+ l8 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3050000>;
+ bias-pull-down;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+ l10 {
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <2600000>;
+ bias-pull-down;
+ };
+ l11 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ bias-pull-down;
+ };
+ l12 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ bias-pull-down;
+ };
+ l13 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ bias-pull-down;
+ };
+ l14 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+ l15 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ bias-pull-down;
+ };
+ l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ regulator-always-on;
+ };
+ l17 {
+ // 1.5V according to schematic
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <2600000>;
+ bias-pull-down;
+ };
+ l18 {
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ bias-pull-down;
+ };
+ l19 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ bias-pull-down;
+ };
+ l20 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+ l21 {
+ // 1.1 V according to schematic
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ regulator-always-on;
+ };
+ l22 {
+ // 1.2 V according to schematic
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ bias-pull-down;
+ };
+ l23 {
+ // Unused
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+ l24 {
+ // Unused
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
+ s0 {
+ // regulator-min-microvolt = <500000>;
+ // regulator-max-microvolt = <1325000>;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+ s1 {
+ // regulator-min-microvolt = <500000>;
+ // regulator-max-microvolt = <1250000>;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+ s2 {
+ // 1.3 V according to schematic
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ qcom,switch-mode-frequency = <1600000>;
+ bias-pull-down;
+ };
+ s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <1600000>;
+ regulator-always-on;
+ bias-pull-down;
+ };
+ s4 {
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ qcom,switch-mode-frequency = <1600000>;
+ regulator-always-on;
+ bias-pull-down;
+ };
+
+ /* LVS0 and LVS1 are just switches */
+ lvs0 {
+ bias-pull-down;
+ };
+ lvs1 {
+ bias-pull-down;
+ };
+
+ ncp {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <1600000>;
+ };
+ };
+ };
+ amba {
+ /* Internal 3.69 GiB eMMC */
+ sdcc@12400000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_sdcc1_pins>;
+ vmmc-supply = <&pm8901_l5>;
+ vqmmc-supply = <&pm8901_lvs0>;
+ };
+
+ /* External micro SD card, directly connected, pulled up to 2.85 V */
+ sdcc@12180000 {
+ status = "okay";
+ /* Enable SSBI GPIO 22 as input, use for card detect */
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_sdcc3_pins>, <&dragon_sdcc3_gpios>;
+ cd-gpios = <&pm8058_gpio 22 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&pm8058_l14>;
+ };
+
+ /*
+ * Second external micro SD card, using two TXB104RGYR levelshifters
+ * to lift from 1.8 V to 2.85 V
+ */
+ sdcc@12200000 {
+ status = "okay";
+ /* Enable SSBI GPIO 26 as input, use for card detect */
+ pinctrl-names = "default";
+ pinctrl-0 = <&dragon_sdcc5_pins>, <&dragon_sdcc5_gpios>;
+ cd-gpios = <&pm8058_gpio 26 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&tlmm 106 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&pm8058_l14>;
+ vqmmc-supply = <&dragon_vio_txb>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval-pins.dtsi
index a3efb9704fcd..a3efb9704fcd 100644
--- a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval-pins.dtsi
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
index e01b27ea7fba..39ae2bc8cb08 100644
--- a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
@@ -1,10 +1,11 @@
#include "qcom-apq8064-v2.0.dtsi"
-#include "qcom-apq8064-arrow-db600c-pins.dtsi"
+#include "qcom-apq8064-arrow-sd-600eval-pins.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
/ {
- model = "Arrow Electronics, APQ8064 DB600c";
- compatible = "arrow,db600c", "qcom,apq8064";
+ model = "Arrow Electronics, APQ8064 SD_600eval";
+ compatible = "arrow,sd_600eval", "qcom,apq8064";
aliases {
serial0 = &gsbi7_serial;
@@ -82,7 +83,8 @@
s4 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- qcom,switch-mode-frequency = <3200000>;
+ qcom,switch-mode-frequency = <1600000>;
+ qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
bias-pull-down;
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index 32fedfa149d0..7b05f072bfc2 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -29,12 +29,6 @@
gpio-keys {
compatible = "gpio-keys";
- power {
- label = "Power";
- gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
- linux,code = <KEY_POWER>;
- gpio-key,wakeup;
- };
volume_up {
label = "Volume Up";
gpios = <&pm8921_gpio 4 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
index 4102a98f475b..6b801e7e57a2 100644
--- a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
@@ -7,6 +7,46 @@
};
};
+ sdcc1_pins: sdcc1-pin-active {
+ clk {
+ pins = "sdc1_clk";
+ drive-strengh = <16>;
+ bias-disable;
+ };
+
+ cmd {
+ pins = "sdc1_cmd";
+ drive-strengh = <10>;
+ bias-pull-up;
+ };
+
+ data {
+ pins = "sdc1_data";
+ drive-strengh = <10>;
+ bias-pull-up;
+ };
+ };
+
+ sdcc3_pins: sdcc3-pin-active {
+ clk {
+ pins = "sdc3_clk";
+ drive-strengh = <8>;
+ bias-disable;
+ };
+
+ cmd {
+ pins = "sdc3_cmd";
+ drive-strengh = <8>;
+ bias-pull-up;
+ };
+
+ data {
+ pins = "sdc3_data";
+ drive-strengh = <8>;
+ bias-pull-up;
+ };
+ };
+
ps_hold: ps_hold {
mux {
pins = "gpio78";
diff --git a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
index 06b3c76c3e41..ebd675ca94b4 100644
--- a/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-sony-xperia-yuga.dts
@@ -70,45 +70,6 @@
};
};
- sdcc1_pin_a: sdcc1-pin-active {
- clk {
- pins = "sdc1_clk";
- drive-strengh = <16>;
- bias-disable;
- };
-
- cmd {
- pins = "sdc1_cmd";
- drive-strengh = <10>;
- bias-pull-up;
- };
-
- data {
- pins = "sdc1_data";
- drive-strengh = <10>;
- bias-pull-up;
- };
- };
-
- sdcc3_pin_a: sdcc3-pin-active {
- clk {
- pins = "sdc3_clk";
- drive-strengh = <8>;
- bias-disable;
- };
-
- cmd {
- pins = "sdc3_cmd";
- drive-strengh = <8>;
- bias-pull-up;
- };
-
- data {
- pins = "sdc3_data";
- drive-strengh = <8>;
- bias-pull-up;
- };
- };
sdcc3_cd_pin_a: sdcc3-cd-pin-active {
pins = "gpio26";
@@ -417,9 +378,6 @@
vmmc-supply = <&pm8921_l5>;
vqmmc-supply = <&pm8921_s4>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&sdcc1_pin_a>;
};
sdcc3: sdcc@12180000 {
@@ -429,7 +387,7 @@
cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&sdcc3_pin_a>, <&sdcc3_cd_pin_a>;
+ pinctrl-0 = <&sdcc3_pins>, <&sdcc3_cd_pin_a>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index df96ccdc9bb4..74a9b6c394f5 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -177,7 +177,7 @@
apps_smsm: apps@0 {
reg = <0>;
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
modem_smsm: modem@1 {
@@ -213,6 +213,12 @@
};
};
+ firmware {
+ scm {
+ compatible = "qcom,scm-apq8064";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -247,7 +253,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-apq8064", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>;
@@ -853,6 +860,8 @@
sdcc1: sdcc@12400000 {
status = "disabled";
compatible = "arm,pl18x", "arm,primecell";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdcc1_pins>;
arm,primecell-periphid = <0x00051180>;
reg = <0x12400000 0x2000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
index c0e205315042..ad51df27dfb7 100644
--- a/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
@@ -25,11 +25,23 @@
bus-width = <8>;
non-removable;
status = "ok";
+
+ vmmc-supply = <&pm8941_l20>;
+ vqmmc-supply = <&pm8941_s3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhc1_pin_a>;
};
sdhci@f98a4900 {
cd-gpios = <&msmgpio 62 0x1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhc2_pin_a>, <&sdhc2_cd_pin_a>;
bus-width = <4>;
+ status = "ok";
+
+ vmmc-supply = <&pm8941_l21>;
+ vqmmc-supply = <&pm8941_l13>;
};
@@ -59,6 +71,42 @@
function = "blsp_spi8";
};
};
+
+ sdhc1_pin_a: sdhc1-pin-active {
+ clk {
+ pins = "sdc1_clk";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ cmd-data {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <10>;
+ bias-pull-up;
+ };
+ };
+
+ sdhc2_cd_pin_a: sdhc2-cd-pin-active {
+ pins = "gpio62";
+ function = "gpio";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ sdhc2_pin_a: sdhc2-pin-active {
+ clk {
+ pins = "sdc2_clk";
+ drive-strength = <10>;
+ bias-disable;
+ };
+
+ cmd-data {
+ pins = "sdc2_cmd", "sdc2_data";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
};
i2c@f9967000 {
@@ -75,4 +123,203 @@
};
};
};
+
+ smd {
+ rpm {
+ rpm_requests {
+ pm8841-regulators {
+ s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s3 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+ };
+
+ pm8941-regulators {
+ vdd_l1_l3-supply = <&pm8941_s1>;
+ vdd_l2_lvs1_2_3-supply = <&pm8941_s3>;
+ vdd_l4_l11-supply = <&pm8941_s1>;
+ vdd_l5_l7-supply = <&pm8941_s2>;
+ vdd_l6_l12_l14_l15-supply = <&pm8941_s2>;
+ vin_5vs-supply = <&pm8941_5v>;
+
+ s1 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ s2 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-boot-on;
+ };
+
+ s3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ l3 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-boot-on;
+ };
+
+ l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-boot-on;
+ };
+
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ l11 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-boot-on;
+ };
+
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ l17 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ l19 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-allow-set-load;
+ regulator-boot-on;
+ regulator-system-load = <200000>;
+ };
+
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-boot-on;
+ };
+
+ l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+
+ regulator-boot-on;
+ };
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index a33a09f6821e..7c2df062a025 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -86,6 +86,14 @@
};
};
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>, <&gcc GCC_CE1_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 5c08d19066c2..b7a24af8f47b 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -84,6 +84,12 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
clocks {
sleep_clk: sleep_clk {
compatible = "fixed-clock";
@@ -252,7 +258,7 @@
};
watchdog@b017000 {
- compatible = "qcom,kpss-standalone";
+ compatible = "qcom,kpss-wdt", "qcom,kpss-wdt-ipq4019";
reg = <0xb017000 0x40>;
clocks = <&sleep_clk>;
timeout-sec = <10>;
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 2601a907947b..2e375576ffd0 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -122,7 +122,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-ipq8064", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>,
diff --git a/arch/arm/boot/dts/qcom-msm8660-surf.dts b/arch/arm/boot/dts/qcom-msm8660-surf.dts
index b17f379e8c2a..23de764558ab 100644
--- a/arch/arm/boot/dts/qcom-msm8660-surf.dts
+++ b/arch/arm/boot/dts/qcom-msm8660-surf.dts
@@ -23,15 +23,26 @@
};
};
+ /* Temporary fixed regulator */
+ vsdcc_fixed: vsdcc-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "SDCC Power";
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ regulator-always-on;
+ };
+
amba {
/* eMMC */
sdcc1: sdcc@12400000 {
status = "okay";
+ vmmc-supply = <&vsdcc_fixed>;
};
/* External micro SD card */
sdcc3: sdcc@12180000 {
status = "okay";
+ vmmc-supply = <&vsdcc_fixed>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index cd214030b84a..acbe71febe13 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -122,11 +122,22 @@
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x19c40000 0x1000>,
<0x19c00000 0x1000>;
- interrupts = <0 195 0x0>;
+ interrupts = <0 195 IRQ_TYPE_NONE>;
clocks = <&gcc GSBI12_UART_CLK>, <&gcc GSBI12_H_CLK>;
clock-names = "core", "iface";
status = "disabled";
};
+
+ gsbi12_i2c: i2c@19c80000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ reg = <0x19c80000 0x1000>;
+ interrupts = <0 196 IRQ_TYPE_NONE>;
+ clocks = <&gcc GSBI12_QUP_CLK>, <&gcc GSBI12_H_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
qcom,ssbi@500000 {
@@ -143,6 +154,44 @@
#address-cells = <1>;
#size-cells = <0>;
+ pm8058_gpio: gpio@150 {
+ compatible = "qcom,pm8058-gpio",
+ "qcom,ssbi-gpio";
+ reg = <0x150>;
+ interrupt-parent = <&pmicintc>;
+ interrupts = <192 1>, <193 1>, <194 1>,
+ <195 1>, <196 1>, <197 1>,
+ <198 1>, <199 1>, <200 1>,
+ <201 1>, <202 1>, <203 1>,
+ <204 1>, <205 1>, <206 1>,
+ <207 1>, <208 1>, <209 1>,
+ <210 1>, <211 1>, <212 1>,
+ <213 1>, <214 1>, <215 1>,
+ <216 1>, <217 1>, <218 1>,
+ <219 1>, <220 1>, <221 1>,
+ <222 1>, <223 1>, <224 1>,
+ <225 1>, <226 1>, <227 1>,
+ <228 1>, <229 1>, <230 1>,
+ <231 1>, <232 1>, <233 1>,
+ <234 1>, <235 1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ };
+
+ pm8058_mpps: mpps@50 {
+ compatible = "qcom,pm8058-mpp",
+ "qcom,ssbi-mpp";
+ reg = <0x50>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&pmicintc>;
+ interrupts =
+ <128 1>, <129 1>, <130 1>, <131 1>,
+ <132 1>, <133 1>, <134 1>, <135 1>,
+ <136 1>, <137 1>, <138 1>, <139 1>;
+ };
+
pwrkey@1c {
compatible = "qcom,pm8058-pwrkey";
reg = <0x1c>;
@@ -162,11 +211,11 @@
row-hold = <91500>;
};
- rtc@11d {
+ rtc@1e8 {
compatible = "qcom,pm8058-rtc";
+ reg = <0x1e8>;
interrupt-parent = <&pmicintc>;
interrupts = <39 1>;
- reg = <0x11d>;
allow-set-time;
};
@@ -177,13 +226,93 @@
};
};
- /* Temporary fixed regulator */
- vsdcc_fixed: vsdcc-regulator {
- compatible = "regulator-fixed";
- regulator-name = "SDCC Power";
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <2700000>;
- regulator-always-on;
+ l2cc: clock-controller@2082000 {
+ compatible = "syscon";
+ reg = <0x02082000 0x1000>;
+ };
+
+ rpm: rpm@104000 {
+ compatible = "qcom,rpm-msm8660";
+ reg = <0x00104000 0x1000>;
+ qcom,ipc = <&l2cc 0x8 2>;
+
+ interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ack", "err", "wakeup";
+ clocks = <&gcc RPM_MSG_RAM_H_CLK>;
+ clock-names = "ram";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-apq8660", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
+ pm8901-regulators {
+ compatible = "qcom,rpm-pm8901-regulators";
+
+ pm8901_l0: l0 {};
+ pm8901_l1: l1 {};
+ pm8901_l2: l2 {};
+ pm8901_l3: l3 {};
+ pm8901_l4: l4 {};
+ pm8901_l5: l5 {};
+ pm8901_l6: l6 {};
+
+ /* S0 and S1 Handled as SAW regulators by SPM */
+ pm8901_s2: s2 {};
+ pm8901_s3: s3 {};
+ pm8901_s4: s4 {};
+
+ pm8901_lvs0: lvs0 {};
+ pm8901_lvs1: lvs1 {};
+ pm8901_lvs2: lvs2 {};
+ pm8901_lvs3: lvs3 {};
+
+ pm8901_mvs: mvs {};
+ };
+
+ pm8058-regulators {
+ compatible = "qcom,rpm-pm8058-regulators";
+
+ pm8058_l0: l0 {};
+ pm8058_l1: l1 {};
+ pm8058_l2: l2 {};
+ pm8058_l3: l3 {};
+ pm8058_l4: l4 {};
+ pm8058_l5: l5 {};
+ pm8058_l6: l6 {};
+ pm8058_l7: l7 {};
+ pm8058_l8: l8 {};
+ pm8058_l9: l9 {};
+ pm8058_l10: l10 {};
+ pm8058_l11: l11 {};
+ pm8058_l12: l12 {};
+ pm8058_l13: l13 {};
+ pm8058_l14: l14 {};
+ pm8058_l15: l15 {};
+ pm8058_l16: l16 {};
+ pm8058_l17: l17 {};
+ pm8058_l18: l18 {};
+ pm8058_l19: l19 {};
+ pm8058_l20: l20 {};
+ pm8058_l21: l21 {};
+ pm8058_l22: l22 {};
+ pm8058_l23: l23 {};
+ pm8058_l24: l24 {};
+ pm8058_l25: l25 {};
+
+ pm8058_s0: s0 {};
+ pm8058_s1: s1 {};
+ pm8058_s2: s2 {};
+ pm8058_s3: s3 {};
+ pm8058_s4: s4 {};
+
+ pm8058_lvs0: lvs0 {};
+ pm8058_lvs1: lvs1 {};
+
+ pm8058_ncp: ncp {};
+ };
};
amba {
@@ -205,7 +334,6 @@
non-removable;
cap-sd-highspeed;
cap-mmc-highspeed;
- vmmc-supply = <&vsdcc_fixed>;
};
sdcc3: sdcc@12180000 {
@@ -222,7 +350,21 @@
cap-mmc-highspeed;
max-frequency = <48000000>;
no-1-8-v;
- vmmc-supply = <&vsdcc_fixed>;
+ };
+
+ sdcc5: sdcc@12200000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x00051180>;
+ status = "disabled";
+ reg = <0x12200000 0x8000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&gcc SDC5_CLK>, <&gcc SDC5_H_CLK>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <48000000>;
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index da05e28a81a7..288f56e0ccf5 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -87,7 +87,8 @@
};
timer@200a000 {
- compatible = "qcom,kpss-timer", "qcom,msm-timer";
+ compatible = "qcom,kpss-timer",
+ "qcom,kpss-wdt-msm8960", "qcom,msm-timer";
interrupts = <1 1 0x301>,
<1 2 0x301>,
<1 3 0x301>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index a0398b69f4f2..3fb4dada6b0d 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -367,6 +367,10 @@
};
};
+
+ dma-controller@f9944000 {
+ qcom,controlled-remotely;
+ };
};
&spmi_bus {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 6f164266a010..561d4d136762 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,6 @@
/dts-v1/;
-#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
#include "skeleton.dtsi"
@@ -182,7 +182,7 @@
modem_smp2p_out: master-kernel {
qcom,entry-name = "master-kernel";
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
modem_smp2p_in: slave-kernel {
@@ -208,7 +208,7 @@
wcnss_smp2p_out: master-kernel {
qcom,entry-name = "master-kernel";
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
wcnss_smp2p_in: slave-kernel {
@@ -232,7 +232,7 @@
apps_smsm: apps@0 {
reg = <0>;
- #qcom,state-cells = <1>;
+ #qcom,smem-state-cells = <1>;
};
modem_smsm: modem@1 {
@@ -260,6 +260,14 @@
};
};
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CE1_CLK>, <&gcc GCC_CE1_AXI_CLK>, <&gcc GCC_CE1_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -501,6 +509,8 @@
clock-names = "core", "iface";
#address-cells = <1>;
#size-cells = <0>;
+ dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
+ dma-names = "tx", "rx";
};
spmi_bus: spmi@fc4cf000 {
@@ -518,6 +528,16 @@
interrupt-controller;
#interrupt-cells = <4>;
};
+
+ blsp2_dma: dma-controller@f9944000 {
+ compatible = "qcom,bam-v1.4.0";
+ reg = <0xf9944000 0x19000>;
+ interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ };
};
smd {
diff --git a/arch/arm/boot/dts/qcom-pma8084.dtsi b/arch/arm/boot/dts/qcom-pma8084.dtsi
index 4e9bd3f88473..82d258094156 100644
--- a/arch/arm/boot/dts/qcom-pma8084.dtsi
+++ b/arch/arm/boot/dts/qcom-pma8084.dtsi
@@ -12,15 +12,23 @@
rtc@6000 {
compatible = "qcom,pm8941-rtc";
- reg = <0x6000 0x100>,
- <0x6100 0x100>;
+ reg = <0x6000>,
+ <0x6100>;
reg-names = "rtc", "alarm";
interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
};
+ pwrkey@800 {
+ compatible = "qcom,pm8941-pwrkey";
+ reg = <0x800>;
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ debounce = <15625>;
+ bias-pull-up;
+ };
+
pma8084_gpios: gpios@c000 {
compatible = "qcom,pma8084-gpio", "qcom,spmi-gpio";
- reg = <0xc000 0x1600>;
+ reg = <0xc000>;
gpio-controller;
#gpio-cells = <2>;
interrupts = <0 0xc0 0 IRQ_TYPE_NONE>,
@@ -49,7 +57,7 @@
pma8084_mpps: mpps@a000 {
compatible = "qcom,pma8084-mpp", "qcom,spmi-mpp";
- reg = <0xa000 0x800>;
+ reg = <0xa000>;
gpio-controller;
#gpio-cells = <2>;
interrupts = <0 0xa0 0 IRQ_TYPE_NONE>,
@@ -64,7 +72,7 @@
pma8084_temp: temp-alarm@2400 {
compatible = "qcom,spmi-temp-alarm";
- reg = <0x2400 0x100>;
+ reg = <0x2400>;
interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
#thermal-sensor-cells = <0>;
io-channels = <&pma8084_vadc VADC_DIE_TEMP>;
@@ -73,7 +81,7 @@
pma8084_vadc: vadc@3100 {
compatible = "qcom,spmi-vadc";
- reg = <0x3100 0x100>;
+ reg = <0x3100>;
interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/r7s72100-genmai.dts b/arch/arm/boot/dts/r7s72100-genmai.dts
index a9da7a89fc4b..118a8e2b86bd 100644
--- a/arch/arm/boot/dts/r7s72100-genmai.dts
+++ b/arch/arm/boot/dts/r7s72100-genmai.dts
@@ -17,15 +17,15 @@
compatible = "renesas,genmai", "renesas,r7s72100";
aliases {
- serial2 = &scif2;
+ serial0 = &scif2;
};
chosen {
bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
- stdout-path = &scif2;
+ stdout-path = "serial0:115200n8";
};
- memory {
+ memory@8000000 {
device_type = "memory";
reg = <0x08000000 0x08000000>;
};
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index 93ace33e3e36..ec7c86e06538 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -36,7 +36,7 @@
reg = <2 0x00000000 0 0x40000000>;
};
- vcc_mmc0: regulator@0 {
+ vcc_mmc0: regulator-mmc0 {
compatible = "regulator-fixed";
regulator-name = "MMC0 Vcc";
regulator-min-microvolt = <2800000>;
@@ -44,7 +44,7 @@
regulator-always-on;
};
- vcc_sdhi0: regulator@1 {
+ vcc_sdhi0: regulator-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -56,7 +56,7 @@
};
/* Common 1.8V and 3.3V rails, used by several devices on APE6EVM */
- ape6evm_fixed_1v8: regulator@2 {
+ ape6evm_fixed_1v8: regulator-1v8 {
compatible = "regulator-fixed";
regulator-name = "1V8";
regulator-min-microvolt = <1800000>;
@@ -64,7 +64,7 @@
regulator-always-on;
};
- ape6evm_fixed_3v3: regulator@3 {
+ ape6evm_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "3V3";
regulator-min-microvolt = <3300000>;
@@ -188,12 +188,12 @@
};
&pfc {
- scifa0_pins: serial0 {
+ scifa0_pins: scifa0 {
groups = "scifa0_data";
function = "scifa0";
};
- mmc0_pins: mmc {
+ mmc0_pins: mmc0 {
groups = "mmc0_data8", "mmc0_ctrl";
function = "mmc0";
};
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 6954912a3753..ca8672778fe0 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -31,6 +31,24 @@
power-domains = <&pd_a2sl>;
next-level-cache = <&L2_CA15>;
};
+
+ L2_CA15: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ clocks = <&cpg_clocks R8A73A4_CLK_Z>;
+ power-domains = <&pd_a3sm>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ L2_CA7: cache-controller@100 {
+ compatible = "cache";
+ reg = <0x100>;
+ clocks = <&cpg_clocks R8A73A4_CLK_Z2>;
+ power-domains = <&pd_a3km>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
ptm {
@@ -46,22 +64,6 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
- L2_CA15: cache-controller@0 {
- compatible = "cache";
- clocks = <&cpg_clocks R8A73A4_CLK_Z>;
- power-domains = <&pd_a3sm>;
- cache-unified;
- cache-level = <2>;
- };
-
- L2_CA7: cache-controller@1 {
- compatible = "cache";
- clocks = <&cpg_clocks R8A73A4_CLK_Z2>;
- power-domains = <&pd_a3km>;
- cache-unified;
- cache-level = <2>;
- };
-
dbsc1: memory-controller@e6790000 {
compatible = "renesas,dbsc-r8a73a4";
reg = <0 0xe6790000 0 0x10000>;
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 2c82dab2b6f4..7885075428bb 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -20,20 +20,20 @@
compatible = "renesas,armadillo800eva", "renesas,r8a7740";
aliases {
- serial1 = &scifa1;
+ serial0 = &scifa1;
};
chosen {
- bootargs = "console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp rw";
- stdout-path = &scifa1;
+ bootargs = "earlyprintk ignore_loglevel root=/dev/nfs ip=dhcp rw";
+ stdout-path = "serial0:115200n8";
};
- memory {
+ memory@40000000 {
device_type = "memory";
reg = <0x40000000 0x20000000>;
};
- reg_3p3v: regulator@0 {
+ reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -42,7 +42,7 @@
regulator-boot-on;
};
- vcc_sdhi0: regulator@1 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -53,7 +53,7 @@
enable-active-high;
};
- vccq_sdhi0: regulator@2 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -69,7 +69,7 @@
enable-active-high;
};
- reg_5p0v: regulator@3 {
+ reg_5p0v: regulator-5p0v {
compatible = "regulator-fixed";
regulator-name = "fixed-5.0V";
regulator-min-microvolt = <5000000>;
@@ -127,7 +127,7 @@
};
};
- i2c2: i2c@2 {
+ i2c2: i2c-2 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "i2c-gpio";
@@ -232,7 +232,7 @@
function = "gether";
};
- scifa1_pins: serial1 {
+ scifa1_pins: scifa1 {
groups = "scifa1_data";
function = "scifa1";
};
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 39b2f88ad151..159e04eb1b9e 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -39,7 +39,7 @@
<0xc2000000 0x1000>;
};
- L2: cache-controller {
+ L2: cache-controller@f0100000 {
compatible = "arm,pl310-cache";
reg = <0xf0100000 0x1000>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index e0dab1464648..211d239d9041 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -32,12 +32,12 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x10000000>;
};
- fixedregulator3v3: fixedregulator@0 {
+ fixedregulator3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -129,7 +129,7 @@
pinctrl-0 = <&scif_clk_pins>;
pinctrl-names = "default";
- scif0_pins: serial0 {
+ scif0_pins: scif0 {
groups = "scif0_data_a", "scif0_ctrl";
function = "scif0";
};
@@ -223,6 +223,7 @@
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index fe787b4751d2..e571d66ea0fe 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -276,23 +276,23 @@
status = "disabled";
rcar_sound,src {
- src3: src@3 { };
- src4: src@4 { };
- src5: src@5 { };
- src6: src@6 { };
- src7: src@7 { };
- src8: src@8 { };
- src9: src@9 { };
+ src3: src-3 { };
+ src4: src-4 { };
+ src5: src-5 { };
+ src6: src-6 { };
+ src7: src-7 { };
+ src8: src-8 { };
+ src9: src-9 { };
};
rcar_sound,ssi {
- ssi3: ssi@3 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
- ssi4: ssi@4 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
- ssi5: ssi@5 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
- ssi6: ssi@6 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
- ssi7: ssi@7 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
- ssi8: ssi@8 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
- ssi9: ssi@9 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi3: ssi-3 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi4: ssi-4 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi5: ssi-5 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi6: ssi-6 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi7: ssi-7 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi8: ssi-8 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+ ssi9: ssi-9 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
};
};
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index b795da6f5503..541678df90a9 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -25,15 +25,15 @@
chosen {
bootargs = "ignore_loglevel root=/dev/nfs ip=on";
- stdout-path = &scif2;
+ stdout-path = "serial0:115200n8";
};
- memory {
+ memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
};
- fixedregulator3v3: fixedregulator@0 {
+ fixedregulator3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -195,12 +195,12 @@
};
};
- scif2_pins: serial2 {
+ scif2_pins: scif2 {
groups = "scif2_data_c";
function = "scif2";
};
- scif4_pins: serial4 {
+ scif4_pins: scif4 {
groups = "scif4_data";
function = "scif4";
};
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 749ba02b6a53..52b56fcaddf2 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -76,28 +76,28 @@
keyboard {
compatible = "gpio-keys";
- button@1 {
+ one {
linux,code = <KEY_1>;
label = "SW2-1";
wakeup-source;
debounce-interval = <20>;
gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
};
- button@2 {
+ two {
linux,code = <KEY_2>;
label = "SW2-2";
wakeup-source;
debounce-interval = <20>;
gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
};
- button@3 {
+ three {
linux,code = <KEY_3>;
label = "SW2-3";
wakeup-source;
debounce-interval = <20>;
gpios = <&gpio1 26 GPIO_ACTIVE_LOW>;
};
- button@4 {
+ four {
linux,code = <KEY_4>;
label = "SW2-4";
wakeup-source;
@@ -119,7 +119,7 @@
};
};
- fixedregulator3v3: fixedregulator@0 {
+ fixedregulator3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -128,7 +128,7 @@
regulator-always-on;
};
- vcc_sdhi0: regulator@1 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -139,7 +139,7 @@
enable-active-high;
};
- vccq_sdhi0: regulator@2 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -152,7 +152,7 @@
1800000 0>;
};
- vcc_sdhi2: regulator@3 {
+ vcc_sdhi2: regulator-vcc-sdhi2 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
@@ -163,7 +163,7 @@
enable-active-high;
};
- vccq_sdhi2: regulator@4 {
+ vccq_sdhi2: regulator-vccq-sdhi2 {
compatible = "regulator-gpio";
regulator-name = "SDHI2 VccQ";
@@ -263,7 +263,7 @@
* instantiate the slave device at runtime according to the documentation.
* You can then communicate with the slave via IIC3.
*/
- i2cexio: i2c@8 {
+ i2cexio: i2c-8 {
compatible = "i2c-demux-pinctrl";
i2c-parent = <&iic0>, <&i2c0>;
i2c-bus-name = "i2c-exio";
@@ -317,7 +317,7 @@
function = "du";
};
- scif0_pins: serial0 {
+ scif0_pins: scif0 {
groups = "scif0_data";
function = "scif0";
};
@@ -337,7 +337,7 @@
function = "intc";
};
- scifa1_pins: serial1 {
+ scifa1_pins: scifa1 {
groups = "scifa1_data";
function = "scifa1";
};
@@ -371,12 +371,12 @@
function = "mmc1";
};
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
- msiof1_pins: spi2 {
+ msiof1_pins: msiof1 {
groups = "msiof1_clk", "msiof1_sync", "msiof1_rx",
"msiof1_tx";
function = "msiof1";
@@ -427,7 +427,7 @@
function = "usb2";
};
- vin1_pins: vin {
+ vin1_pins: vin1 {
groups = "vin1_data8", "vin1_clk";
function = "vin1";
};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 83cf23cd26bb..d18558f21102 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -44,6 +44,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
+ enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -92,7 +93,7 @@
next-level-cache = <&L2_CA15>;
};
- cpu4: cpu@4 {
+ cpu4: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x100>;
@@ -101,7 +102,7 @@
next-level-cache = <&L2_CA7>;
};
- cpu5: cpu@5 {
+ cpu5: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x101>;
@@ -110,7 +111,7 @@
next-level-cache = <&L2_CA7>;
};
- cpu6: cpu@6 {
+ cpu6: cpu@102 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x102>;
@@ -119,7 +120,7 @@
next-level-cache = <&L2_CA7>;
};
- cpu7: cpu@7 {
+ cpu7: cpu@103 {
device_type = "cpu";
compatible = "arm,cortex-a7";
reg = <0x103>;
@@ -127,6 +128,22 @@
power-domains = <&sysc R8A7790_PD_CA7_CPU3>;
next-level-cache = <&L2_CA7>;
};
+
+ L2_CA15: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7790_PD_CA15_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ L2_CA7: cache-controller@100 {
+ compatible = "cache";
+ reg = <0x100>;
+ power-domains = <&sysc R8A7790_PD_CA7_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
thermal-zones {
@@ -148,18 +165,16 @@
};
};
- L2_CA15: cache-controller@0 {
- compatible = "cache";
- power-domains = <&sysc R8A7790_PD_CA15_SCU>;
- cache-unified;
- cache-level = <2>;
+ apmu@e6151000 {
+ compatible = "renesas,r8a7790-apmu", "renesas,apmu";
+ reg = <0 0xe6151000 0 0x188>;
+ cpus = <&cpu4 &cpu5 &cpu6 &cpu7>;
};
- L2_CA7: cache-controller@1 {
- compatible = "cache";
- power-domains = <&sysc R8A7790_PD_CA7_SCU>;
- cache-unified;
- cache-level = <2>;
+ apmu@e6152000 {
+ compatible = "renesas,r8a7790-apmu", "renesas,apmu";
+ reg = <0 0xe6152000 0 0x188>;
+ cpus = <&cpu0 &cpu1 &cpu2 &cpu3>;
};
gic: interrupt-controller@f1001000 {
@@ -517,8 +532,9 @@
reg = <0 0xe6500000 0 0x425>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
- dmas = <&dmac0 0x61>, <&dmac0 0x62>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x61>, <&dmac0 0x62>,
+ <&dmac1 0x61>, <&dmac1 0x62>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -530,8 +546,9 @@
reg = <0 0xe6510000 0 0x425>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_IIC1>;
- dmas = <&dmac0 0x65>, <&dmac0 0x66>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x65>, <&dmac0 0x66>,
+ <&dmac1 0x65>, <&dmac1 0x66>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -543,8 +560,9 @@
reg = <0 0xe6520000 0 0x425>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_IIC2>;
- dmas = <&dmac0 0x69>, <&dmac0 0x6a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x69>, <&dmac0 0x6a>,
+ <&dmac1 0x69>, <&dmac1 0x6a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -556,8 +574,9 @@
reg = <0 0xe60b0000 0 0x425>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7790_CLK_IICDVFS>;
- dmas = <&dmac0 0x77>, <&dmac0 0x78>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x77>, <&dmac0 0x78>,
+ <&dmac1 0x77>, <&dmac1 0x78>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -567,8 +586,9 @@
reg = <0 0xee200000 0 0x80>;
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
- dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>,
+ <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
reg-io-width = <4>;
status = "disabled";
@@ -580,8 +600,9 @@
reg = <0 0xee220000 0 0x80>;
interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_MMCIF1>;
- dmas = <&dmac0 0xe1>, <&dmac0 0xe2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xe1>, <&dmac0 0xe2>,
+ <&dmac1 0xe1>, <&dmac1 0xe2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
reg-io-width = <4>;
status = "disabled";
@@ -598,8 +619,9 @@
reg = <0 0xee100000 0 0x328>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
- dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
@@ -610,8 +632,9 @@
reg = <0 0xee120000 0 0x328>;
interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
- dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xc9>, <&dmac0 0xca>,
+ <&dmac1 0xc9>, <&dmac1 0xca>;
+ dma-names = "tx", "rx", "tx", "rx";
max-frequency = <195000000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
@@ -622,8 +645,9 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
- dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
@@ -634,8 +658,9 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
- dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
max-frequency = <97500000>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
@@ -648,8 +673,9 @@
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFA0>;
clock-names = "fck";
- dmas = <&dmac0 0x21>, <&dmac0 0x22>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+ <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -661,8 +687,9 @@
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFA1>;
clock-names = "fck";
- dmas = <&dmac0 0x25>, <&dmac0 0x26>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+ <&dmac1 0x25>, <&dmac1 0x26>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -674,8 +701,9 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFA2>;
clock-names = "fck";
- dmas = <&dmac0 0x27>, <&dmac0 0x28>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+ <&dmac1 0x27>, <&dmac1 0x28>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -687,8 +715,9 @@
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFB0>;
clock-names = "fck";
- dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -700,8 +729,9 @@
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFB1>;
clock-names = "fck";
- dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+ <&dmac1 0x19>, <&dmac1 0x1a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -713,8 +743,9 @@
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFB2>;
clock-names = "fck";
- dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+ <&dmac1 0x1d>, <&dmac1 0x1e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -727,8 +758,9 @@
clocks = <&mstp7_clks R8A7790_CLK_SCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -741,8 +773,9 @@
clocks = <&mstp7_clks R8A7790_CLK_SCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -755,8 +788,9 @@
clocks = <&mstp3_clks R8A7790_CLK_SCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -769,8 +803,9 @@
clocks = <&mstp7_clks R8A7790_CLK_HSCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -783,8 +818,9 @@
clocks = <&mstp7_clks R8A7790_CLK_HSCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -1469,8 +1505,9 @@
reg = <0 0xe6b10000 0 0x2c>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7790_CLK_QSPI_MOD>;
- dmas = <&dmac0 0x17>, <&dmac0 0x18>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x17>, <&dmac0 0x18>,
+ <&dmac1 0x17>, <&dmac1 0x18>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
num-cs = <1>;
#address-cells = <1>;
@@ -1483,8 +1520,9 @@
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
- dmas = <&dmac0 0x51>, <&dmac0 0x52>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x51>, <&dmac0 0x52>,
+ <&dmac1 0x51>, <&dmac1 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1496,8 +1534,9 @@
reg = <0 0xe6e10000 0 0x0064>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
- dmas = <&dmac0 0x55>, <&dmac0 0x56>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x55>, <&dmac0 0x56>,
+ <&dmac1 0x55>, <&dmac1 0x56>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1509,8 +1548,9 @@
reg = <0 0xe6e00000 0 0x0064>;
interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
- dmas = <&dmac0 0x41>, <&dmac0 0x42>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x41>, <&dmac0 0x42>,
+ <&dmac1 0x41>, <&dmac1 0x42>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1522,8 +1562,9 @@
reg = <0 0xe6c90000 0 0x0064>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
- dmas = <&dmac0 0x45>, <&dmac0 0x46>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x45>, <&dmac0 0x46>,
+ <&dmac1 0x45>, <&dmac1 0x46>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1702,79 +1743,79 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 {
+ dvc0: dvc-0 {
dmas = <&audma0 0xbc>;
dma-names = "tx";
};
- dvc1: dvc@1 {
+ dvc1: dvc-1 {
dmas = <&audma0 0xbe>;
dma-names = "tx";
};
};
rcar_sound,mix {
- mix0: mix@0 { };
- mix1: mix@1 { };
+ mix0: mix-0 { };
+ mix1: mix-1 { };
};
rcar_sound,ctu {
- ctu00: ctu@0 { };
- ctu01: ctu@1 { };
- ctu02: ctu@2 { };
- ctu03: ctu@3 { };
- ctu10: ctu@4 { };
- ctu11: ctu@5 { };
- ctu12: ctu@6 { };
- ctu13: ctu@7 { };
+ ctu00: ctu-0 { };
+ ctu01: ctu-1 { };
+ ctu02: ctu-2 { };
+ ctu03: ctu-3 { };
+ ctu10: ctu-4 { };
+ ctu11: ctu-5 { };
+ ctu12: ctu-6 { };
+ ctu13: ctu-7 { };
};
rcar_sound,src {
- src0: src@0 {
+ src0: src-0 {
interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x85>, <&audma1 0x9a>;
dma-names = "rx", "tx";
};
- src1: src@1 {
+ src1: src-1 {
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x87>, <&audma1 0x9c>;
dma-names = "rx", "tx";
};
- src2: src@2 {
+ src2: src-2 {
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x89>, <&audma1 0x9e>;
dma-names = "rx", "tx";
};
- src3: src@3 {
+ src3: src-3 {
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8b>, <&audma1 0xa0>;
dma-names = "rx", "tx";
};
- src4: src@4 {
+ src4: src-4 {
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8d>, <&audma1 0xb0>;
dma-names = "rx", "tx";
};
- src5: src@5 {
+ src5: src-5 {
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8f>, <&audma1 0xb2>;
dma-names = "rx", "tx";
};
- src6: src@6 {
+ src6: src-6 {
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x91>, <&audma1 0xb4>;
dma-names = "rx", "tx";
};
- src7: src@7 {
+ src7: src-7 {
interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x93>, <&audma1 0xb6>;
dma-names = "rx", "tx";
};
- src8: src@8 {
+ src8: src-8 {
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x95>, <&audma1 0xb8>;
dma-names = "rx", "tx";
};
- src9: src@9 {
+ src9: src-9 {
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x97>, <&audma1 0xba>;
dma-names = "rx", "tx";
@@ -1782,52 +1823,52 @@
};
rcar_sound,ssi {
- ssi0: ssi@0 {
+ ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi1: ssi@1 {
+ ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi2: ssi@2 {
+ ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi3: ssi@3 {
+ ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi4: ssi@4 {
+ ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi5: ssi@5 {
+ ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi6: ssi@6 {
+ ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi7: ssi@7 {
+ ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi8: ssi@8 {
+ ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi9: ssi@9 {
+ ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
dma-names = "rx", "tx", "rxu", "txu";
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index da59c2844b8a..f8a7d090fd01 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -170,7 +170,7 @@
};
};
- vcc_sdhi0: regulator@0 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -181,7 +181,7 @@
enable-active-high;
};
- vccq_sdhi0: regulator@1 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -194,7 +194,7 @@
1800000 0>;
};
- vcc_sdhi1: regulator@2 {
+ vcc_sdhi1: regulator-vcc-sdhi1 {
compatible = "regulator-fixed";
regulator-name = "SDHI1 Vcc";
@@ -205,7 +205,7 @@
enable-active-high;
};
- vccq_sdhi1: regulator@3 {
+ vccq_sdhi1: regulator-vccq-sdhi1 {
compatible = "regulator-gpio";
regulator-name = "SDHI1 VccQ";
@@ -218,7 +218,7 @@
1800000 0>;
};
- vcc_sdhi2: regulator@4 {
+ vcc_sdhi2: regulator-vcc-sdhi2 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
@@ -229,7 +229,7 @@
enable-active-high;
};
- vccq_sdhi2: regulator@5 {
+ vccq_sdhi2: regulator-vccq-sdhi2 {
compatible = "regulator-gpio";
regulator-name = "SDHI2 VccQ";
@@ -332,12 +332,12 @@
function = "du";
};
- scif0_pins: serial0 {
+ scif0_pins: scif0 {
groups = "scif0_data_d";
function = "scif0";
};
- scif1_pins: serial1 {
+ scif1_pins: scif1 {
groups = "scif1_data_d";
function = "scif1";
};
@@ -372,12 +372,12 @@
function = "sdhi2";
};
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
- msiof0_pins: spi1 {
+ msiof0_pins: msiof0 {
groups = "msiof0_clk", "msiof0_sync", "msiof0_rx",
"msiof0_tx";
function = "msiof0";
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6a1bb1a8209b..6761d11d3f9e 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -46,7 +46,7 @@
reg = <2 0x00000000 0 0x40000000>;
};
- vcc_sdhi0: regulator@0 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -55,7 +55,7 @@
regulator-always-on;
};
- vccq_sdhi0: regulator@1 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -68,7 +68,7 @@
1800000 0>;
};
- vcc_sdhi2: regulator@2 {
+ vcc_sdhi2: regulator-vcc-sdhi2 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
@@ -77,7 +77,7 @@
regulator-always-on;
};
- vccq_sdhi2: regulator@3 {
+ vccq_sdhi2: regulator-vccq-sdhi2 {
compatible = "regulator-gpio";
regulator-name = "SDHI2 VccQ";
@@ -142,7 +142,7 @@
};
&pfc {
- scif0_pins: serial0 {
+ scif0_pins: scif0 {
groups = "scif0_data_d";
function = "scif0";
};
@@ -167,7 +167,7 @@
function = "sdhi2";
};
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index db67e342c585..8f0086bbd96b 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -43,6 +43,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
+ enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -72,6 +73,14 @@
power-domains = <&sysc R8A7791_PD_CA15_CPU1>;
next-level-cache = <&L2_CA15>;
};
+
+ L2_CA15: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7791_PD_CA15_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
thermal-zones {
@@ -93,11 +102,10 @@
};
};
- L2_CA15: cache-controller@0 {
- compatible = "cache";
- power-domains = <&sysc R8A7791_PD_CA15_SCU>;
- cache-unified;
- cache-level = <2>;
+ apmu@e6152000 {
+ compatible = "renesas,r8a7791-apmu", "renesas,apmu";
+ reg = <0 0xe6152000 0 0x188>;
+ cpus = <&cpu0 &cpu1>;
};
gic: interrupt-controller@f1001000 {
@@ -514,8 +522,9 @@
reg = <0 0xe60b0000 0 0x425>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7791_CLK_IICDVFS>;
- dmas = <&dmac0 0x77>, <&dmac0 0x78>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x77>, <&dmac0 0x78>,
+ <&dmac1 0x77>, <&dmac1 0x78>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -527,8 +536,9 @@
reg = <0 0xe6500000 0 0x425>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_IIC0>;
- dmas = <&dmac0 0x61>, <&dmac0 0x62>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x61>, <&dmac0 0x62>,
+ <&dmac1 0x61>, <&dmac1 0x62>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -540,8 +550,9 @@
reg = <0 0xe6510000 0 0x425>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_IIC1>;
- dmas = <&dmac0 0x65>, <&dmac0 0x66>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x65>, <&dmac0 0x66>,
+ <&dmac1 0x65>, <&dmac1 0x66>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -556,8 +567,9 @@
reg = <0 0xee200000 0 0x80>;
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_MMCIF0>;
- dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>,
+ <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
reg-io-width = <4>;
status = "disabled";
@@ -569,8 +581,9 @@
reg = <0 0xee100000 0 0x328>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
- dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -580,8 +593,9 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
- dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -591,8 +605,9 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
- dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -604,8 +619,9 @@
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFA0>;
clock-names = "fck";
- dmas = <&dmac0 0x21>, <&dmac0 0x22>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+ <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -617,8 +633,9 @@
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFA1>;
clock-names = "fck";
- dmas = <&dmac0 0x25>, <&dmac0 0x26>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+ <&dmac1 0x25>, <&dmac1 0x26>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -630,8 +647,9 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFA2>;
clock-names = "fck";
- dmas = <&dmac0 0x27>, <&dmac0 0x28>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+ <&dmac1 0x27>, <&dmac1 0x28>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -643,8 +661,9 @@
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7791_CLK_SCIFA3>;
clock-names = "fck";
- dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1b>, <&dmac0 0x1c>,
+ <&dmac1 0x1b>, <&dmac1 0x1c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -656,8 +675,9 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7791_CLK_SCIFA4>;
clock-names = "fck";
- dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1f>, <&dmac0 0x20>,
+ <&dmac1 0x1f>, <&dmac1 0x20>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -669,8 +689,9 @@
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7791_CLK_SCIFA5>;
clock-names = "fck";
- dmas = <&dmac0 0x23>, <&dmac0 0x24>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x23>, <&dmac0 0x24>,
+ <&dmac1 0x23>, <&dmac1 0x24>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -682,8 +703,9 @@
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFB0>;
clock-names = "fck";
- dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -695,8 +717,9 @@
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFB1>;
clock-names = "fck";
- dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+ <&dmac1 0x19>, <&dmac1 0x1a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -708,8 +731,9 @@
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_SCIFB2>;
clock-names = "fck";
- dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+ <&dmac1 0x1d>, <&dmac1 0x1e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -722,8 +746,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -736,8 +761,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -750,8 +776,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -764,8 +791,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF3>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+ <&dmac1 0x2f>, <&dmac1 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -778,8 +806,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF4>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfb>, <&dmac0 0xfc>,
+ <&dmac1 0xfb>, <&dmac1 0xfc>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -792,8 +821,9 @@
clocks = <&mstp7_clks R8A7791_CLK_SCIF5>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfd>, <&dmac0 0xfe>,
+ <&dmac1 0xfd>, <&dmac1 0xfe>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -806,8 +836,9 @@
clocks = <&mstp7_clks R8A7791_CLK_HSCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -820,8 +851,9 @@
clocks = <&mstp7_clks R8A7791_CLK_HSCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -834,8 +866,9 @@
clocks = <&mstp7_clks R8A7791_CLK_HSCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+ <&dmac1 0x3b>, <&dmac1 0x3c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -1478,8 +1511,9 @@
reg = <0 0xe6b10000 0 0x2c>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
- dmas = <&dmac0 0x17>, <&dmac0 0x18>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x17>, <&dmac0 0x18>,
+ <&dmac1 0x17>, <&dmac1 0x18>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
num-cs = <1>;
#address-cells = <1>;
@@ -1492,8 +1526,9 @@
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
- dmas = <&dmac0 0x51>, <&dmac0 0x52>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x51>, <&dmac0 0x52>,
+ <&dmac1 0x51>, <&dmac1 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1505,8 +1540,9 @@
reg = <0 0xe6e10000 0 0x0064>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_MSIOF1>;
- dmas = <&dmac0 0x55>, <&dmac0 0x56>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x55>, <&dmac0 0x56>,
+ <&dmac1 0x55>, <&dmac1 0x56>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1518,8 +1554,9 @@
reg = <0 0xe6e00000 0 0x0064>;
interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7791_CLK_MSIOF2>;
- dmas = <&dmac0 0x41>, <&dmac0 0x42>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x41>, <&dmac0 0x42>,
+ <&dmac1 0x41>, <&dmac1 0x42>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1737,79 +1774,79 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 {
+ dvc0: dvc-0 {
dmas = <&audma0 0xbc>;
dma-names = "tx";
};
- dvc1: dvc@1 {
+ dvc1: dvc-1 {
dmas = <&audma0 0xbe>;
dma-names = "tx";
};
};
rcar_sound,mix {
- mix0: mix@0 { };
- mix1: mix@1 { };
+ mix0: mix-0 { };
+ mix1: mix-1 { };
};
rcar_sound,ctu {
- ctu00: ctu@0 { };
- ctu01: ctu@1 { };
- ctu02: ctu@2 { };
- ctu03: ctu@3 { };
- ctu10: ctu@4 { };
- ctu11: ctu@5 { };
- ctu12: ctu@6 { };
- ctu13: ctu@7 { };
+ ctu00: ctu-0 { };
+ ctu01: ctu-1 { };
+ ctu02: ctu-2 { };
+ ctu03: ctu-3 { };
+ ctu10: ctu-4 { };
+ ctu11: ctu-5 { };
+ ctu12: ctu-6 { };
+ ctu13: ctu-7 { };
};
rcar_sound,src {
- src0: src@0 {
+ src0: src-0 {
interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x85>, <&audma1 0x9a>;
dma-names = "rx", "tx";
};
- src1: src@1 {
+ src1: src-1 {
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x87>, <&audma1 0x9c>;
dma-names = "rx", "tx";
};
- src2: src@2 {
+ src2: src-2 {
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x89>, <&audma1 0x9e>;
dma-names = "rx", "tx";
};
- src3: src@3 {
+ src3: src-3 {
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8b>, <&audma1 0xa0>;
dma-names = "rx", "tx";
};
- src4: src@4 {
+ src4: src-4 {
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8d>, <&audma1 0xb0>;
dma-names = "rx", "tx";
};
- src5: src@5 {
+ src5: src-5 {
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8f>, <&audma1 0xb2>;
dma-names = "rx", "tx";
};
- src6: src@6 {
+ src6: src-6 {
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x91>, <&audma1 0xb4>;
dma-names = "rx", "tx";
};
- src7: src@7 {
+ src7: src-7 {
interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x93>, <&audma1 0xb6>;
dma-names = "rx", "tx";
};
- src8: src@8 {
+ src8: src-8 {
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x95>, <&audma1 0xb8>;
dma-names = "rx", "tx";
};
- src9: src@9 {
+ src9: src-9 {
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x97>, <&audma1 0xba>;
dma-names = "rx", "tx";
@@ -1817,52 +1854,52 @@
};
rcar_sound,ssi {
- ssi0: ssi@0 {
+ ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi1: ssi@1 {
+ ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi2: ssi@2 {
+ ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi3: ssi@3 {
+ ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi4: ssi@4 {
+ ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi5: ssi@5 {
+ ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi6: ssi@6 {
+ ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi7: ssi@7 {
+ ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi8: ssi@8 {
+ ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi9: ssi@9 {
+ ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
dma-names = "rx", "tx", "rxu", "txu";
diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
new file mode 100644
index 000000000000..e7b40f0e7da6
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7792-blanche.dts
@@ -0,0 +1,66 @@
+/*
+ * Device Tree Source for the Blanche board
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7792.dtsi"
+
+/ {
+ model = "Blanche";
+ compatible = "renesas,blanche", "renesas,r8a7792";
+
+ aliases {
+ serial0 = &scif0;
+ serial1 = &scif3;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+ d3_3v: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "D3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ethernet@18000000 {
+ compatible = "smsc,lan89218", "smsc,lan9115";
+ reg = <0 0x18000000 0 0x100>;
+ phy-mode = "mii";
+ interrupt-parent = <&irqc>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ smsc,irq-push-pull;
+ reg-io-width = <4>;
+ vddvario-supply = <&d3_3v>;
+ vdd33a-supply = <&d3_3v>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <20000000>;
+};
+
+&scif0 {
+ status = "okay";
+};
+
+&scif3 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
new file mode 100644
index 000000000000..3fd61d7ab906
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -0,0 +1,385 @@
+/*
+ * Device Tree Source for the r8a7792 SoC
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/clock/r8a7792-clock.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a7792-sysc.h>
+
+/ {
+ compatible = "renesas,r8a7792";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "renesas,apmu";
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ clock-frequency = <1000000000>;
+ clocks = <&cpg_clocks R8A7792_CLK_Z>;
+ power-domains = <&sysc R8A7792_PD_CA15_CPU0>;
+ next-level-cache = <&L2_CA15>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ clock-frequency = <1000000000>;
+ power-domains = <&sysc R8A7792_PD_CA15_CPU1>;
+ next-level-cache = <&L2_CA15>;
+ };
+
+ L2_CA15: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ cache-unified;
+ cache-level = <2>;
+ power-domains = <&sysc R8A7792_PD_CA15_SCU>;
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ apmu@e6152000 {
+ compatible = "renesas,r8a7792-apmu", "renesas,apmu";
+ reg = <0 0xe6152000 0 0x188>;
+ cpus = <&cpu0 &cpu1>;
+ };
+
+ gic: interrupt-controller@f1001000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0 0xf1001000 0 0x1000>,
+ <0 0xf1002000 0 0x1000>,
+ <0 0xf1004000 0 0x2000>,
+ <0 0xf1006000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ irqc: interrupt-controller@e61c0000 {
+ compatible = "renesas,irqc-r8a7792", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp4_clks R8A7792_CLK_IRQC>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a7792-sysc";
+ reg = <0 0xe6180000 0 0x0200>;
+ #power-domain-cells = <1>;
+ };
+
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,dmac-r8a7792",
+ "renesas,rcar-dmac";
+ reg = <0 0xe6700000 0 0x20000>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7792_CLK_SYS_DMAC0>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ dmac1: dma-controller@e6720000 {
+ compatible = "renesas,dmac-r8a7792",
+ "renesas,rcar-dmac";
+ reg = <0 0xe6720000 0 0x20000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+ GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&mstp2_clks R8A7792_CLK_SYS_DMAC1>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a7792",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_SCIF0>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a7792",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 64>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_SCIF1>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ scif2: serial@e6e58000 {
+ compatible = "renesas,scif-r8a7792",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e58000 0 64>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_SCIF2>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ scif3: serial@e6ea8000 {
+ compatible = "renesas,scif-r8a7792",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6ea8000 0 64>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_SCIF3>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+ <&dmac1 0x2f>, <&dmac1 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ hscif0: serial@e62c0000 {
+ compatible = "renesas,hscif-r8a7792",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c0000 0 96>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_HSCIF0>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e62c8000 {
+ compatible = "renesas,hscif-r8a7792",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c8000 0 96>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp7_clks R8A7792_CLK_HSCIF1>, <&zs_clk>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ jpu: jpeg-codec@fe980000 {
+ compatible = "renesas,jpu-r8a7792",
+ "renesas,rcar-gen2-jpu";
+ reg = <0 0xfe980000 0 0x10300>;
+ interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp1_clks R8A7792_CLK_JPU>;
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ };
+
+ /* Special CPG clocks */
+ cpg_clocks: cpg_clocks@e6150000 {
+ compatible = "renesas,r8a7792-cpg-clocks",
+ "renesas,rcar-gen2-cpg-clocks";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>;
+ #clock-cells = <1>;
+ clock-output-names = "main", "pll0", "pll1", "pll3",
+ "lb", "qspi", "z";
+ #power-domain-cells = <0>;
+ };
+
+ /* Fixed factor clocks */
+ pll1_div2_clk: pll1_div2 {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <2>;
+ clock-mult = <1>;
+ };
+ zs_clk: zs {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <6>;
+ clock-mult = <1>;
+ };
+ p_clk: p {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <24>;
+ clock-mult = <1>;
+ };
+ cp_clk: cp {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <48>;
+ clock-mult = <1>;
+ };
+ m2_clk: m2 {
+ compatible = "fixed-factor-clock";
+ clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
+ #clock-cells = <0>;
+ clock-div = <8>;
+ clock-mult = <1>;
+ };
+
+ /* Gate clocks */
+ mstp1_clks: mstp1_clks@e6150134 {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150134 0 4>, <0 0xe6150038 0 4>;
+ clocks = <&m2_clk>;
+ #clock-cells = <1>;
+ clock-indices = <R8A7792_CLK_JPU>;
+ clock-output-names = "jpu";
+ };
+ mstp2_clks: mstp2_clks@e6150138 {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150138 0 4>, <0 0xe6150040 0 4>;
+ clocks = <&zs_clk>, <&zs_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7792_CLK_SYS_DMAC1 R8A7792_CLK_SYS_DMAC0
+ >;
+ clock-output-names = "sys-dmac1", "sys-dmac0";
+ };
+ mstp4_clks: mstp4_clks@e6150140 {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
+ clocks = <&cp_clk>;
+ #clock-cells = <1>;
+ clock-indices = <R8A7792_CLK_IRQC>;
+ clock-output-names = "irqc";
+ };
+ mstp7_clks: mstp7_clks@e615014c {
+ compatible = "renesas,r8a7792-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
+ clocks = <&zs_clk>, <&zs_clk>, <&p_clk>, <&p_clk>,
+ <&p_clk>, <&p_clk>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A7792_CLK_HSCIF1 R8A7792_CLK_HSCIF0
+ R8A7792_CLK_SCIF3 R8A7792_CLK_SCIF2
+ R8A7792_CLK_SCIF1 R8A7792_CLK_SCIF0
+ >;
+ clock-output-names = "hscif1", "hscif0", "scif3",
+ "scif2", "scif1", "scif0";
+ };
+ };
+
+ /* External root clock */
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ };
+
+ /* External SCIF clock */
+ scif_clk: scif {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 0ebc3ee34923..90af18600124 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -158,7 +158,7 @@
};
};
- vcc_sdhi0: regulator@0 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -169,7 +169,7 @@
enable-active-high;
};
- vccq_sdhi0: regulator@1 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -182,7 +182,7 @@
1800000 0>;
};
- vcc_sdhi1: regulator@2 {
+ vcc_sdhi1: regulator-vcc-sdhi1 {
compatible = "regulator-fixed";
regulator-name = "SDHI1 Vcc";
@@ -193,7 +193,7 @@
enable-active-high;
};
- vccq_sdhi1: regulator@3 {
+ vccq_sdhi1: regulator-vccq-sdhi1 {
compatible = "regulator-gpio";
regulator-name = "SDHI1 VccQ";
@@ -206,7 +206,7 @@
1800000 0>;
};
- vcc_sdhi2: regulator@4 {
+ vcc_sdhi2: regulator-vcc-sdhi2 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
@@ -217,7 +217,7 @@
enable-active-high;
};
- vccq_sdhi2: regulator@5 {
+ vccq_sdhi2: regulator-vccq-sdhi2 {
compatible = "regulator-gpio";
regulator-name = "SDHI2 VccQ";
@@ -320,12 +320,12 @@
function = "du";
};
- scif0_pins: serial0 {
+ scif0_pins: scif0 {
groups = "scif0_data_d";
function = "scif0";
};
- scif1_pins: serial1 {
+ scif1_pins: scif1 {
groups = "scif1_data_d";
function = "scif1";
};
@@ -360,7 +360,7 @@
renesas,function = "sdhi2";
};
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 1dd6d202cd4c..8d02aacf2892 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -35,6 +35,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
+ enable-method = "renesas,apmu";
cpu0: cpu@0 {
device_type = "cpu";
@@ -55,6 +56,28 @@
< 375000 1000000>;
next-level-cache = <&L2_CA15>;
};
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ clock-frequency = <1500000000>;
+ power-domains = <&sysc R8A7793_PD_CA15_CPU1>;
+ };
+
+ L2_CA15: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7793_PD_CA15_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+ };
+
+ apmu@e6152000 {
+ compatible = "renesas,r8a7793-apmu", "renesas,apmu";
+ reg = <0 0xe6152000 0 0x188>;
+ cpus = <&cpu0 &cpu1>;
};
thermal-zones {
@@ -76,13 +99,6 @@
};
};
- L2_CA15: cache-controller@0 {
- compatible = "cache";
- power-domains = <&sysc R8A7793_PD_CA15_SCU>;
- cache-unified;
- cache-level = <2>;
- };
-
gic: interrupt-controller@f1001000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
@@ -473,8 +489,9 @@
reg = <0 0xe60b0000 0 0x425>;
interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7793_CLK_IICDVFS>;
- dmas = <&dmac0 0x77>, <&dmac0 0x78>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x77>, <&dmac0 0x78>,
+ <&dmac1 0x77>, <&dmac1 0x78>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -486,8 +503,9 @@
reg = <0 0xe6500000 0 0x425>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7793_CLK_IIC0>;
- dmas = <&dmac0 0x61>, <&dmac0 0x62>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x61>, <&dmac0 0x62>,
+ <&dmac1 0x61>, <&dmac1 0x62>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -499,8 +517,9 @@
reg = <0 0xe6510000 0 0x425>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7793_CLK_IIC1>;
- dmas = <&dmac0 0x65>, <&dmac0 0x66>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x65>, <&dmac0 0x66>,
+ <&dmac1 0x65>, <&dmac1 0x66>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -515,8 +534,9 @@
reg = <0 0xee100000 0 0x328>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7793_CLK_SDHI0>;
- dmas = <&dmac0 0xcd>, <&dmac0 0xce>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -526,8 +546,9 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7793_CLK_SDHI1>;
- dmas = <&dmac0 0xc1>, <&dmac0 0xc2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -537,10 +558,25 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7793_CLK_SDHI2>;
- dmas = <&dmac0 0xd3>, <&dmac0 0xd4>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ mmcif0: mmc@ee200000 {
+ compatible = "renesas,mmcif-r8a7793", "renesas,sh-mmcif";
+ reg = <0 0xee200000 0 0x80>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7793_CLK_MMCIF0>;
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>,
+ <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+ reg-io-width = <4>;
status = "disabled";
+ max-frequency = <97500000>;
};
scifa0: serial@e6c40000 {
@@ -550,8 +586,9 @@
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFA0>;
clock-names = "fck";
- dmas = <&dmac0 0x21>, <&dmac0 0x22>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+ <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -563,8 +600,9 @@
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFA1>;
clock-names = "fck";
- dmas = <&dmac0 0x25>, <&dmac0 0x26>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+ <&dmac1 0x25>, <&dmac1 0x26>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -576,8 +614,9 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFA2>;
clock-names = "fck";
- dmas = <&dmac0 0x27>, <&dmac0 0x28>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+ <&dmac1 0x27>, <&dmac1 0x28>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -589,8 +628,9 @@
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7793_CLK_SCIFA3>;
clock-names = "fck";
- dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1b>, <&dmac0 0x1c>,
+ <&dmac1 0x1b>, <&dmac1 0x1c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -602,8 +642,9 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7793_CLK_SCIFA4>;
clock-names = "fck";
- dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1f>, <&dmac0 0x20>,
+ <&dmac1 0x1f>, <&dmac1 0x20>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -615,8 +656,9 @@
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7793_CLK_SCIFA5>;
clock-names = "fck";
- dmas = <&dmac0 0x23>, <&dmac0 0x24>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x23>, <&dmac0 0x24>,
+ <&dmac1 0x23>, <&dmac1 0x24>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -628,8 +670,9 @@
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFB0>;
clock-names = "fck";
- dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -641,8 +684,9 @@
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFB1>;
clock-names = "fck";
- dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+ <&dmac1 0x19>, <&dmac1 0x1a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -654,8 +698,9 @@
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7793_CLK_SCIFB2>;
clock-names = "fck";
- dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+ <&dmac1 0x1d>, <&dmac1 0x1e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -668,8 +713,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -682,8 +728,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -696,8 +743,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -710,8 +758,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF3>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+ <&dmac1 0x2f>, <&dmac1 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -724,8 +773,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF4>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfb>, <&dmac0 0xfc>,
+ <&dmac1 0xfb>, <&dmac1 0xfc>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -738,8 +788,9 @@
clocks = <&mstp7_clks R8A7793_CLK_SCIF5>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfd>, <&dmac0 0xfe>,
+ <&dmac1 0xfd>, <&dmac1 0xfe>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -752,8 +803,9 @@
clocks = <&mstp7_clks R8A7793_CLK_HSCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -766,8 +818,9 @@
clocks = <&mstp7_clks R8A7793_CLK_HSCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -780,8 +833,9 @@
clocks = <&mstp7_clks R8A7793_CLK_HSCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+ <&dmac1 0x3b>, <&dmac1 0x3c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -803,8 +857,9 @@
reg = <0 0xe6b10000 0 0x2c>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7793_CLK_QSPI_MOD>;
- dmas = <&dmac0 0x17>, <&dmac0 0x18>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x17>, <&dmac0 0x18>,
+ <&dmac1 0x17>, <&dmac1 0x18>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
num-cs = <1>;
#address-cells = <1>;
@@ -1330,63 +1385,63 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 {
+ dvc0: dvc-0 {
dmas = <&audma0 0xbc>;
dma-names = "tx";
};
- dvc1: dvc@1 {
+ dvc1: dvc-1 {
dmas = <&audma0 0xbe>;
dma-names = "tx";
};
};
rcar_sound,src {
- src0: src@0 {
+ src0: src-0 {
interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x85>, <&audma1 0x9a>;
dma-names = "rx", "tx";
};
- src1: src@1 {
+ src1: src-1 {
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x87>, <&audma1 0x9c>;
dma-names = "rx", "tx";
};
- src2: src@2 {
+ src2: src-2 {
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x89>, <&audma1 0x9e>;
dma-names = "rx", "tx";
};
- src3: src@3 {
+ src3: src-3 {
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8b>, <&audma1 0xa0>;
dma-names = "rx", "tx";
};
- src4: src@4 {
+ src4: src-4 {
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8d>, <&audma1 0xb0>;
dma-names = "rx", "tx";
};
- src5: src@5 {
+ src5: src-5 {
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8f>, <&audma1 0xb2>;
dma-names = "rx", "tx";
};
- src6: src@6 {
+ src6: src-6 {
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x91>, <&audma1 0xb4>;
dma-names = "rx", "tx";
};
- src7: src@7 {
+ src7: src-7 {
interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x93>, <&audma1 0xb6>;
dma-names = "rx", "tx";
};
- src8: src@8 {
+ src8: src-8 {
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x95>, <&audma1 0xb8>;
dma-names = "rx", "tx";
};
- src9: src@9 {
+ src9: src-9 {
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x97>, <&audma1 0xba>;
dma-names = "rx", "tx";
@@ -1394,52 +1449,52 @@
};
rcar_sound,ssi {
- ssi0: ssi@0 {
+ ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi1: ssi@1 {
+ ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi2: ssi@2 {
+ ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi3: ssi@3 {
+ ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi4: ssi@4 {
+ ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi5: ssi@5 {
+ ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi6: ssi@6 {
+ ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi7: ssi@7 {
+ ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi8: ssi@8 {
+ ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi9: ssi@9 {
+ ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
dma-names = "rx", "tx", "rxu", "txu";
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 383ad791f1db..1ad37d431a2a 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -111,7 +111,7 @@
function = "du";
};
- scif2_pins: serial2 {
+ scif2_pins: scif2 {
groups = "scif2_data";
function = "scif2";
};
@@ -147,7 +147,7 @@
};
&pfc {
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index 56d98d5b2185..cf24f45fff22 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -32,7 +32,7 @@
reg = <0 0x40000000 0 0x40000000>;
};
- d3_3v: regulator@0 {
+ d3_3v: regulator-d3-3v {
compatible = "regulator-fixed";
regulator-name = "D3.3V";
regulator-min-microvolt = <3300000>;
@@ -41,7 +41,7 @@
regulator-always-on;
};
- vcc_sdhi1: regulator@3 {
+ vcc_sdhi1: regulator-vcc-sdhi1 {
compatible = "regulator-fixed";
regulator-name = "SDHI1 Vcc";
@@ -52,7 +52,7 @@
enable-active-high;
};
- vccq_sdhi1: regulator@4 {
+ vccq_sdhi1: regulator-vccq-sdhi1 {
compatible = "regulator-gpio";
regulator-name = "SDHI1 VccQ";
@@ -129,7 +129,7 @@
pinctrl-0 = <&scif_clk_pins>;
pinctrl-names = "default";
- scif2_pins: serial2 {
+ scif2_pins: scif2 {
groups = "scif2_data";
function = "scif2";
};
@@ -164,7 +164,7 @@
function = "sdhi1";
};
- qspi_pins: spi0 {
+ qspi_pins: qspi {
groups = "qspi_ctrl", "qspi_data4";
function = "qspi";
};
@@ -183,6 +183,16 @@
groups = "usb1";
function = "usb1";
};
+
+ du0_pins: du0 {
+ groups = "du0_rgb888", "du0_sync", "du0_disp", "du0_clk0_out";
+ function = "du0";
+ };
+
+ du1_pins: du1 {
+ groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_clk0_out";
+ function = "du1";
+ };
};
&scif2 {
@@ -360,6 +370,8 @@
};
&du {
+ pinctrl-0 = <&du0_pins &du1_pins>;
+ pinctrl-names = "default";
status = "okay";
clocks = <&mstp7_clks R8A7794_CLK_DU0>,
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index f334a3a715f2..685f986cf962 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -55,13 +55,14 @@
power-domains = <&sysc R8A7794_PD_CA7_CPU1>;
next-level-cache = <&L2_CA7>;
};
- };
- L2_CA7: cache-controller@1 {
- compatible = "cache";
- power-domains = <&sysc R8A7794_PD_CA7_SCU>;
- cache-unified;
- cache-level = <2>;
+ L2_CA7: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7794_PD_CA7_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
gic: interrupt-controller@f1001000 {
@@ -302,8 +303,9 @@
interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFA0>;
clock-names = "fck";
- dmas = <&dmac0 0x21>, <&dmac0 0x22>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+ <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -315,8 +317,9 @@
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFA1>;
clock-names = "fck";
- dmas = <&dmac0 0x25>, <&dmac0 0x26>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+ <&dmac1 0x25>, <&dmac1 0x26>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -328,8 +331,9 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFA2>;
clock-names = "fck";
- dmas = <&dmac0 0x27>, <&dmac0 0x28>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+ <&dmac1 0x27>, <&dmac1 0x28>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -341,8 +345,9 @@
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7794_CLK_SCIFA3>;
clock-names = "fck";
- dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1b>, <&dmac0 0x1c>,
+ <&dmac1 0x1b>, <&dmac1 0x1c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -354,8 +359,9 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7794_CLK_SCIFA4>;
clock-names = "fck";
- dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1f>, <&dmac0 0x20>,
+ <&dmac1 0x1f>, <&dmac1 0x20>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -367,8 +373,9 @@
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp11_clks R8A7794_CLK_SCIFA5>;
clock-names = "fck";
- dmas = <&dmac0 0x23>, <&dmac0 0x24>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x23>, <&dmac0 0x24>,
+ <&dmac1 0x23>, <&dmac1 0x24>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -380,8 +387,9 @@
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFB0>;
clock-names = "fck";
- dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -393,8 +401,9 @@
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFB1>;
clock-names = "fck";
- dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+ <&dmac1 0x19>, <&dmac1 0x1a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -406,8 +415,9 @@
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7794_CLK_SCIFB2>;
clock-names = "fck";
- dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+ <&dmac1 0x1d>, <&dmac1 0x1e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -420,8 +430,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -434,8 +445,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -448,8 +460,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -462,8 +475,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF3>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+ <&dmac1 0x2f>, <&dmac1 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -476,8 +490,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF4>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfb>, <&dmac0 0xfc>,
+ <&dmac1 0xfb>, <&dmac1 0xfc>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -490,8 +505,9 @@
clocks = <&mstp7_clks R8A7794_CLK_SCIF5>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xfd>, <&dmac0 0xfe>,
+ <&dmac1 0xfd>, <&dmac1 0xfe>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -504,8 +520,9 @@
clocks = <&mstp7_clks R8A7794_CLK_HSCIF0>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -518,8 +535,9 @@
clocks = <&mstp7_clks R8A7794_CLK_HSCIF1>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -532,8 +550,9 @@
clocks = <&mstp7_clks R8A7794_CLK_HSCIF2>, <&zs_clk>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
- dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+ <&dmac1 0x3b>, <&dmac1 0x3c>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -640,8 +659,9 @@
reg = <0 0xe6500000 0 0x425>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_IIC0>;
- dmas = <&dmac0 0x61>, <&dmac0 0x62>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x61>, <&dmac0 0x62>,
+ <&dmac1 0x61>, <&dmac1 0x62>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -653,8 +673,9 @@
reg = <0 0xe6510000 0 0x425>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_IIC1>;
- dmas = <&dmac0 0x65>, <&dmac0 0x66>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x65>, <&dmac0 0x66>,
+ <&dmac1 0x65>, <&dmac1 0x66>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
#address-cells = <1>;
#size-cells = <0>;
@@ -666,8 +687,9 @@
reg = <0 0xee200000 0 0x80>;
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_MMCIF0>;
- dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0xd1>, <&dmac0 0xd2>,
+ <&dmac1 0xd1>, <&dmac1 0xd2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
reg-io-width = <4>;
status = "disabled";
@@ -678,6 +700,9 @@
reg = <0 0xee100000 0 0x200>;
interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -687,6 +712,9 @@
reg = <0 0xee140000 0 0x100>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -696,6 +724,9 @@
reg = <0 0xee160000 0 0x100>;
interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
status = "disabled";
};
@@ -705,8 +736,9 @@
reg = <0 0xe6b10000 0 0x2c>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp9_clks R8A7794_CLK_QSPI_MOD>;
- dmas = <&dmac0 0x17>, <&dmac0 0x18>;
- dma-names = "tx", "rx";
+ dmas = <&dmac0 0x17>, <&dmac0 0x18>,
+ <&dmac1 0x17>, <&dmac1 0x18>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
num-cs = <1>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index 5956e8246abe..904668e2e666 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -40,7 +40,7 @@
/dts-v1/;
-#include "rk3228.dtsi"
+#include "rk322x.dtsi"
/ {
model = "Rockchip RK3228 Evaluation board";
diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts
new file mode 100644
index 000000000000..b6a12035a6bb
--- /dev/null
+++ b/arch/arm/boot/dts/rk3229-evb.dts
@@ -0,0 +1,90 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk322x.dtsi"
+
+/ {
+ model = "Rockchip RK3229 Evaluation board";
+ compatible = "rockchip,rk3229-evb", "rockchip,rk3229";
+
+ memory {
+ device_type = "memory";
+ reg = <0x60000000 0x40000000>;
+ };
+
+ ext_gmac: ext_gmac {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "ext_gmac";
+ #clock-cells = <0>;
+ };
+
+ vcc_phy: vcc-phy-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ regulator-name = "vcc_phy";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC_EXTCLK>, <&cru SCLK_MAC>;
+ assigned-clock-parents = <&ext_gmac>, <&cru SCLK_MAC_EXTCLK>;
+ clock_in_out = "input";
+ phy-supply = <&vcc_phy>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio2 24 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3228.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index e23a22e29155..9e6bf0e311bb 100644
--- a/arch/arm/boot/dts/rk3228.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -47,8 +47,6 @@
#include "skeleton.dtsi"
/ {
- compatible = "rockchip,rk3228";
-
interrupt-parent = <&gic>;
aliases {
@@ -140,6 +138,47 @@
#clock-cells = <0>;
};
+ i2s1: i2s1@100b0000 {
+ compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
+ reg = <0x100b0000 0x4000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S1>, <&cru HCLK_I2S1_8CH>;
+ dmas = <&pdma 14>, <&pdma 15>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s1_bus>;
+ status = "disabled";
+ };
+
+ i2s0: i2s0@100c0000 {
+ compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
+ reg = <0x100c0000 0x4000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S0>, <&cru HCLK_I2S0_8CH>;
+ dmas = <&pdma 11>, <&pdma 12>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ i2s2: i2s2@100e0000 {
+ compatible = "rockchip,rk3228-i2s", "rockchip,rk3066-i2s";
+ reg = <0x100e0000 0x4000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2s_clk", "i2s_hclk";
+ clocks = <&cru SCLK_I2S2>, <&cru HCLK_I2S2_2CH>;
+ dmas = <&pdma 0>, <&pdma 1>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
grf: syscon@11000000 {
compatible = "syscon";
reg = <0x11000000 0x1000>;
@@ -376,6 +415,25 @@
status = "disabled";
};
+ gmac: ethernet@30200000 {
+ compatible = "rockchip,rk3228-gmac";
+ reg = <0x30200000 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>,
+ <&cru SCLK_MAC_TX>, <&cru SCLK_MAC_REF>,
+ <&cru SCLK_MAC_REFOUT>, <&cru ACLK_GMAC>,
+ <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac";
+ resets = <&cru SRST_GMAC>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@32010000 {
compatible = "arm,gic-400";
interrupt-controller;
@@ -460,6 +518,10 @@
bias-disable;
};
+ pcfg_pull_none_drv_12ma: pcfg-pull-none-drv-12ma {
+ drive-strength = <12>;
+ };
+
emmc {
emmc_clk: emmc-clk {
rockchip,pins = <2 7 RK_FUNC_2 &pcfg_pull_none>;
@@ -481,6 +543,44 @@
};
};
+ gmac {
+ rgmii_pins: rgmii-pins {
+ rockchip,pins = <2 14 RK_FUNC_1 &pcfg_pull_none>,
+ <2 12 RK_FUNC_1 &pcfg_pull_none>,
+ <2 25 RK_FUNC_1 &pcfg_pull_none>,
+ <2 19 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 18 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 22 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 23 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 9 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 13 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 17 RK_FUNC_1 &pcfg_pull_none>,
+ <2 16 RK_FUNC_1 &pcfg_pull_none>,
+ <2 21 RK_FUNC_2 &pcfg_pull_none>,
+ <2 20 RK_FUNC_2 &pcfg_pull_none>,
+ <2 11 RK_FUNC_1 &pcfg_pull_none>,
+ <2 8 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ rmii_pins: rmii-pins {
+ rockchip,pins = <2 14 RK_FUNC_1 &pcfg_pull_none>,
+ <2 12 RK_FUNC_1 &pcfg_pull_none>,
+ <2 25 RK_FUNC_1 &pcfg_pull_none>,
+ <2 19 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 18 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 13 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
+ <2 17 RK_FUNC_1 &pcfg_pull_none>,
+ <2 16 RK_FUNC_1 &pcfg_pull_none>,
+ <2 8 RK_FUNC_1 &pcfg_pull_none>,
+ <2 15 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ phy_pins: phy-pins {
+ rockchip,pins = <2 14 RK_FUNC_2 &pcfg_pull_none>,
+ <2 8 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
i2c0 {
i2c0_xfer: i2c0-xfer {
rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>,
@@ -509,6 +609,20 @@
};
};
+ i2s1 {
+ i2s1_bus: i2s1-bus {
+ rockchip,pins = <0 8 RK_FUNC_1 &pcfg_pull_none>,
+ <0 9 RK_FUNC_1 &pcfg_pull_none>,
+ <0 11 RK_FUNC_1 &pcfg_pull_none>,
+ <0 12 RK_FUNC_1 &pcfg_pull_none>,
+ <0 13 RK_FUNC_1 &pcfg_pull_none>,
+ <0 14 RK_FUNC_1 &pcfg_pull_none>,
+ <1 2 RK_FUNC_1 &pcfg_pull_none>,
+ <1 4 RK_FUNC_1 &pcfg_pull_none>,
+ <1 5 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
pwm0 {
pwm0_pin: pwm0-pin {
rockchip,pins = <3 21 RK_FUNC_1 &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index d6cf9ada13c9..114c90fb65e2 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -64,22 +64,6 @@
clock-output-names = "ext_gmac";
};
- io_domains: io-domains {
- compatible = "rockchip,rk3288-io-voltage-domain";
- rockchip,grf = <&grf>;
-
- audio-supply = <&vcca_33>;
- bb-supply = <&vcc_io>;
- dvp-supply = <&dovdd_1v8>;
- flash0-supply = <&vcc_flash>;
- flash1-supply = <&vcc_lan>;
- gpio30-supply = <&vcc_io>;
- gpio1830-supply = <&vcc_io>;
- lcdc-supply = <&vcc_io>;
- sdcard-supply = <&vccio_sd>;
- wifi-supply = <&vccio_wl>;
- };
-
ir: ir-receiver {
compatible = "gpio-ir-receiver";
pinctrl-names = "default";
@@ -397,6 +381,21 @@
status = "okay";
};
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&vcca_33>;
+ bb-supply = <&vcc_io>;
+ dvp-supply = <&dovdd_1v8>;
+ flash0-supply = <&vcc_flash>;
+ flash1-supply = <&vcc_lan>;
+ gpio30-supply = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
+ lcdc-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vccio_wl>;
+};
+
&pinctrl {
pcfg_output_high: pcfg-output-high {
output-high;
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index 8643103d8cd8..24488421f0f0 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -64,19 +64,6 @@
clock-output-names = "ext_gmac";
};
- io_domains: io-domains {
- compatible = "rockchip,rk3288-io-voltage-domain";
-
- audio-supply = <&vcca_33>;
- flash0-supply = <&vcc_flash>;
- flash1-supply = <&vcc_lan>;
- gpio30-supply = <&vcc_io>;
- gpio1830-supply = <&vcc_io>;
- lcdc-supply = <&vcc_io>;
- sdcard-supply = <&vccio_sd>;
- wifi-supply = <&vcc_18>;
- };
-
leds {
compatible = "gpio-leds";
@@ -321,6 +308,19 @@
status = "okay";
};
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&vcca_33>;
+ flash0-supply = <&vcc_flash>;
+ flash1-supply = <&vcc_lan>;
+ gpio30-supply = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
+ lcdc-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vcc_18>;
+};
+
&pinctrl {
pcfg_output_high: pcfg-output-high {
output-high;
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index 720717bb3614..dda8d259bb6d 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -77,22 +77,6 @@
};
};
- io_domains: io-domains {
- compatible = "rockchip,rk3288-io-voltage-domain";
- rockchip,grf = <&grf>;
-
- audio-supply = <&vcca_33>;
- bb-supply = <&vcc_io>;
- dvp-supply = <&vcc18_dvp>;
- flash0-supply = <&vcc_flash>;
- flash1-supply = <&vcc_lan>;
- gpio30-supply = <&vcc_io>;
- gpio1830-supply = <&vcc_io>;
- lcdc-supply = <&vcc_io>;
- sdcard-supply = <&vccio_sd>;
- wifi-supply = <&vccio_wl>;
- };
-
ir: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
@@ -437,6 +421,21 @@
status = "okay";
};
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&vcca_33>;
+ bb-supply = <&vcc_io>;
+ dvp-supply = <&vcc18_dvp>;
+ flash0-supply = <&vcc_flash>;
+ flash1-supply = <&vcc_lan>;
+ gpio30-supply = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
+ lcdc-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vccio_wl>;
+};
+
&pinctrl {
ak8963 {
comp_int: comp-int {
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index e1ee9f949035..bb1f01e037ba 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -61,22 +61,6 @@
clock-output-names = "ext_gmac";
};
- io_domains: io-domains {
- compatible = "rockchip,rk3288-io-voltage-domain";
- rockchip,grf = <&grf>;
-
- audio-supply = <&vcc_io>;
- bb-supply = <&vcc_io>;
- dvp-supply = <&vcc_18>;
- flash0-supply = <&vcc_flash>;
- flash1-supply = <&vccio_pmu>;
- gpio30-supply = <&vccio_pmu>;
- gpio1830 = <&vcc_io>;
- lcdc-supply = <&vcc_io>;
- sdcard-supply = <&vccio_sd>;
- wifi-supply = <&vcc_18>;
- };
-
vcc_flash: flash-regulator {
compatible = "regulator-fixed";
regulator-name = "vcc_sys";
@@ -259,6 +243,21 @@
};
};
+&io_domains {
+ status = "okay";
+
+ audio-supply = <&vcc_io>;
+ bb-supply = <&vcc_io>;
+ dvp-supply = <&vcc_18>;
+ flash0-supply = <&vcc_flash>;
+ flash1-supply = <&vccio_pmu>;
+ gpio30-supply = <&vccio_pmu>;
+ gpio1830 = <&vcc_io>;
+ lcdc-supply = <&vcc_io>;
+ sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vcc_18>;
+};
+
&pinctrl {
pcfg_output_high: pcfg-output-high {
output-high;
diff --git a/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi b/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi
new file mode 100644
index 000000000000..6d105914a4f3
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi
@@ -0,0 +1,101 @@
+/*
+ * Google Veyron (and derivatives) fragment for the max98090 audio
+ * codec and analog headphone jack.
+ *
+ * Copyright 2016 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+ sound {
+ compatible = "rockchip,rockchip-audio-max98090";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mic_det>, <&hp_det>;
+ rockchip,model = "VEYRON-I2S";
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,audio-codec = <&max98090>;
+ rockchip,hp-det-gpios = <&gpio6 5 GPIO_ACTIVE_HIGH>;
+ rockchip,mic-det-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ rockchip,headset-codec = <&headsetcodec>;
+ };
+};
+
+&i2c2 {
+ max98090: max98090@10 {
+ compatible = "maxim,max98090";
+ reg = <0x10>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ clock-names = "mclk";
+ clocks = <&cru SCLK_I2S0_OUT>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&int_codec>;
+ };
+};
+
+&i2c4 {
+ headsetcodec: ts3a227e@3b {
+ compatible = "ti,ts3a227e";
+ reg = <0x3b>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts3a227e_int_l>;
+ ti,micbias = <7>; /* MICBIAS = 2.8V */
+ };
+};
+
+&i2s {
+ status = "okay";
+};
+
+&io_domains {
+ audio-supply = <&vcc18_codec>;
+};
+
+&rk808 {
+ vcc10-supply = <&vcc33_sys>;
+
+ regulators {
+ vcc18_codec: LDO_REG6 {
+ regulator-name = "vcc18_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+};
+
+&pinctrl {
+ codec {
+ hp_det: hp-det {
+ rockchip,pins = <6 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ /*
+ * HACK: We're going to _pull down_ this _active low_ interrupt
+ * so that it never fires. We don't need this interrupt because
+ * we've got a ts3a227e chip but the driver requires it.
+ */
+ int_codec: int-codec {
+ rockchip,pins = <6 7 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ mic_det: mic-det {
+ rockchip,pins = <6 11 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ headset {
+ ts3a227e_int_l: ts3a227e-int-l {
+ rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
index 2958c36d12a0..ce1f87980bcb 100644
--- a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
@@ -46,6 +46,7 @@
#include <dt-bindings/clock/rockchip,rk808.h>
#include <dt-bindings/input/input.h>
#include "rk3288-veyron.dtsi"
+#include "rk3288-veyron-analog-audio.dtsi"
#include "rk3288-veyron-sdmmc.dtsi"
/ {
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index b2557bf5a58f..3dd2cca48c11 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -83,19 +83,6 @@
reset-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>;
};
- io_domains: io-domains {
- compatible = "rockchip,rk3288-io-voltage-domain";
- rockchip,grf = <&grf>;
-
- bb-supply = <&vcc33_io>;
- dvp-supply = <&vcc_18>;
- flash0-supply = <&vcc18_flashio>;
- gpio1830-supply = <&vcc33_io>;
- gpio30-supply = <&vcc33_io>;
- lcdc-supply = <&vcc33_lcd>;
- wifi-supply = <&vcc18_wl>;
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rk808 RK808_CLKOUT1>;
@@ -355,6 +342,18 @@
i2c-scl-rising-time-ns = <1000>;
};
+&io_domains {
+ status = "okay";
+
+ bb-supply = <&vcc33_io>;
+ dvp-supply = <&vcc_18>;
+ flash0-supply = <&vcc18_flashio>;
+ gpio1830-supply = <&vcc33_io>;
+ gpio30-supply = <&vcc33_io>;
+ lcdc-supply = <&vcc33_lcd>;
+ wifi-supply = <&vcc18_wl>;
+};
+
&pwm1 {
status = "okay";
};
@@ -383,6 +382,12 @@
status = "okay";
rx-sample-delay-ns = <12>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ };
};
&tsadc {
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 3ebee530f2b0..cd33f0170890 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -827,6 +827,11 @@
#phy-cells = <0>;
status = "disabled";
};
+
+ io_domains: io-domains {
+ compatible = "rockchip,rk3288-io-voltage-domain";
+ status = "disabled";
+ };
};
wdt: watchdog@ff800000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 5dd2734e67ba..353d0e5ec83b 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -72,6 +72,11 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a5-pmu";
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH 0>;
+ };
+
memory {
reg = <0x20000000 0x20000000>;
};
@@ -112,13 +117,13 @@
clock-names = "pclk", "hclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -126,7 +131,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -134,7 +139,7 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -142,7 +147,7 @@
atmel,can-isoc;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -150,7 +155,7 @@
atmel,can-isoc;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -158,7 +163,7 @@
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -166,7 +171,7 @@
atmel,can-isoc;
};
- ep7 {
+ ep@7 {
reg = <7>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -174,56 +179,56 @@
atmel,can-isoc;
};
- ep8 {
+ ep@8 {
reg = <8>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep9 {
+ ep@9 {
reg = <9>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep10 {
+ ep@10 {
reg = <10>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep11 {
+ ep@11 {
reg = <11>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep12 {
+ ep@12 {
reg = <12>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep13 {
+ ep@13 {
reg = <13>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep14 {
+ ep@14 {
reg = <14>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep15 {
+ ep@15 {
reg = <15>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 36301bd9a14a..4c84d333fc7e 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -326,26 +326,22 @@
atmel,adc-res-names = "lowres", "highres";
status = "disabled";
- trigger@0 {
- reg = <0>;
+ trigger0 {
trigger-name = "external-rising";
trigger-value = <0x1>;
trigger-external;
};
- trigger@1 {
- reg = <1>;
+ trigger1 {
trigger-name = "external-falling";
trigger-value = <0x2>;
trigger-external;
};
- trigger@2 {
- reg = <2>;
+ trigger2 {
trigger-name = "external-any";
trigger-value = <0x3>;
trigger-external;
};
- trigger@3 {
- reg = <3>;
+ trigger3 {
trigger-name = "continuous";
trigger-value = <0x6>;
};
@@ -1341,13 +1337,13 @@
clock-names = "pclk", "hclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1355,7 +1351,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -1363,84 +1359,84 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-dma;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-dma;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-dma;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-dma;
};
- ep7 {
+ ep@7 {
reg = <7>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-dma;
};
- ep8 {
+ ep@8 {
reg = <8>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep9 {
+ ep@9 {
reg = <9>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep10 {
+ ep@10 {
reg = <10>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep11 {
+ ep@11 {
reg = <11>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep12 {
+ ep@12 {
reg = <12>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep13 {
+ ep@13 {
reg = <13>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep14 {
+ ep@14 {
reg = <14>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
};
- ep15 {
+ ep@15 {
reg = <15>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
diff --git a/arch/arm/boot/dts/sama5d31ek.dts b/arch/arm/boot/dts/sama5d31ek.dts
index 04eec0dfcf7d..25e4c0b2d786 100644
--- a/arch/arm/boot/dts/sama5d31ek.dts
+++ b/arch/arm/boot/dts/sama5d31ek.dts
@@ -9,6 +9,7 @@
/dts-v1/;
#include "sama5d31.dtsi"
#include "sama5d3xmb.dtsi"
+#include "sama5d3xmb_emac.dtsi"
#include "sama5d3xdm.dtsi"
/ {
diff --git a/arch/arm/boot/dts/sama5d33ek.dts b/arch/arm/boot/dts/sama5d33ek.dts
index cbd6a3ff1545..c517b87a1de2 100644
--- a/arch/arm/boot/dts/sama5d33ek.dts
+++ b/arch/arm/boot/dts/sama5d33ek.dts
@@ -9,6 +9,7 @@
/dts-v1/;
#include "sama5d33.dtsi"
#include "sama5d3xmb.dtsi"
+#include "sama5d3xmb_gmac.dtsi"
#include "sama5d3xdm.dtsi"
/ {
diff --git a/arch/arm/boot/dts/sama5d34ek.dts b/arch/arm/boot/dts/sama5d34ek.dts
index 878aa164275a..c8b8449fdc3e 100644
--- a/arch/arm/boot/dts/sama5d34ek.dts
+++ b/arch/arm/boot/dts/sama5d34ek.dts
@@ -9,6 +9,7 @@
/dts-v1/;
#include "sama5d34.dtsi"
#include "sama5d3xmb.dtsi"
+#include "sama5d3xmb_gmac.dtsi"
#include "sama5d3xdm.dtsi"
/ {
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index e812f5c1bf70..6e261fcf576c 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -9,6 +9,8 @@
/dts-v1/;
#include "sama5d35.dtsi"
#include "sama5d3xmb.dtsi"
+#include "sama5d3xmb_emac.dtsi"
+#include "sama5d3xmb_gmac.dtsi"
/ {
model = "Atmel SAMA5D35-EK";
diff --git a/arch/arm/boot/dts/sama5d36ek.dts b/arch/arm/boot/dts/sama5d36ek.dts
index 59576c6f9826..cd458b85a205 100644
--- a/arch/arm/boot/dts/sama5d36ek.dts
+++ b/arch/arm/boot/dts/sama5d36ek.dts
@@ -10,6 +10,8 @@
#include "sama5d36.dtsi"
#include "sama5d3xmb.dtsi"
#include "sama5d3xdm.dtsi"
+#include "sama5d3xmb_emac.dtsi"
+#include "sama5d3xmb_gmac.dtsi"
/ {
model = "Atmel SAMA5D36-EK";
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 2cf9c3611db6..b5e111b29da1 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -34,40 +34,6 @@
spi0: spi@f0004000 {
cs-gpios = <&pioD 13 0>, <0>, <0>, <0>;
};
-
- macb0: ethernet@f0028000 {
- phy-mode = "rgmii";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethernet-phy@1 {
- reg = <0x1>;
- interrupt-parent = <&pioB>;
- interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
- txen-skew-ps = <800>;
- txc-skew-ps = <3000>;
- rxdv-skew-ps = <400>;
- rxc-skew-ps = <3000>;
- rxd0-skew-ps = <400>;
- rxd1-skew-ps = <400>;
- rxd2-skew-ps = <400>;
- rxd3-skew-ps = <400>;
- };
-
- ethernet-phy@7 {
- reg = <0x7>;
- interrupt-parent = <&pioB>;
- interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
- txen-skew-ps = <800>;
- txc-skew-ps = <3000>;
- rxdv-skew-ps = <400>;
- rxc-skew-ps = <3000>;
- rxd0-skew-ps = <400>;
- rxd1-skew-ps = <400>;
- rxd2-skew-ps = <400>;
- rxd3-skew-ps = <400>;
- };
- };
};
nand0: nand@60000000 {
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 89010422812d..6d252ad050f6 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -117,18 +117,6 @@
status = "okay";
};
- macb1: ethernet@f802c000 {
- phy-mode = "rmii";
-
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: ethernet-phy@1 {
- interrupt-parent = <&pioE>;
- interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
- reg = <1>;
- };
- };
-
pinctrl@fffff200 {
board {
pinctrl_mmc0_cd: mmc0_cd {
diff --git a/arch/arm/boot/dts/sama5d3xmb_emac.dtsi b/arch/arm/boot/dts/sama5d3xmb_emac.dtsi
new file mode 100644
index 000000000000..2fd14f371a04
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3xmb_emac.dtsi
@@ -0,0 +1,26 @@
+/*
+ * sama5d3xmb_emac.dts - Device Tree Include file for SAMA5D3x mother board
+ * Ethernet
+ *
+ * Copyright (C) 2016 Atmel,
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/ {
+ ahb {
+ apb {
+ macb1: ethernet@f802c000 {
+ phy-mode = "rmii";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@1 {
+ interrupt-parent = <&pioE>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+ reg = <1>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d3xmb_gmac.dtsi b/arch/arm/boot/dts/sama5d3xmb_gmac.dtsi
new file mode 100644
index 000000000000..65aea7a75b1d
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d3xmb_gmac.dtsi
@@ -0,0 +1,48 @@
+/*
+ * sama5d3xmb_gmac.dtsi - Device Tree Include file for SAMA5D3x motherboard
+ * Gigabit Ethernet
+ *
+ * Copyright (C) 2016 Atmel,
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/ {
+ ahb {
+ apb {
+ macb0: ethernet@f0028000 {
+ phy-mode = "rgmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ interrupt-parent = <&pioB>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ txen-skew-ps = <800>;
+ txc-skew-ps = <3000>;
+ rxdv-skew-ps = <400>;
+ rxc-skew-ps = <3000>;
+ rxd0-skew-ps = <400>;
+ rxd1-skew-ps = <400>;
+ rxd2-skew-ps = <400>;
+ rxd3-skew-ps = <400>;
+ };
+
+ ethernet-phy@7 {
+ reg = <0x7>;
+ interrupt-parent = <&pioB>;
+ interrupts = <25 IRQ_TYPE_EDGE_FALLING>;
+ txen-skew-ps = <800>;
+ txc-skew-ps = <3000>;
+ rxdv-skew-ps = <400>;
+ rxc-skew-ps = <3000>;
+ rxd0-skew-ps = <400>;
+ rxd1-skew-ps = <400>;
+ rxd2-skew-ps = <400>;
+ rxd3-skew-ps = <400>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 4e2cc30d6615..65e725fb5679 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -135,13 +135,13 @@
clock-names = "pclk", "hclk";
status = "disabled";
- ep0 {
+ ep@0 {
reg = <0>;
atmel,fifo-size = <64>;
atmel,nb-banks = <1>;
};
- ep1 {
+ ep@1 {
reg = <1>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -149,7 +149,7 @@
atmel,can-isoc;
};
- ep2 {
+ ep@2 {
reg = <2>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <3>;
@@ -157,7 +157,7 @@
atmel,can-isoc;
};
- ep3 {
+ ep@3 {
reg = <3>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -165,7 +165,7 @@
atmel,can-isoc;
};
- ep4 {
+ ep@4 {
reg = <4>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -173,7 +173,7 @@
atmel,can-isoc;
};
- ep5 {
+ ep@5 {
reg = <5>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -181,7 +181,7 @@
atmel,can-isoc;
};
- ep6 {
+ ep@6 {
reg = <6>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -189,7 +189,7 @@
atmel,can-isoc;
};
- ep7 {
+ ep@7 {
reg = <7>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -197,56 +197,56 @@
atmel,can-isoc;
};
- ep8 {
+ ep@8 {
reg = <8>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep9 {
+ ep@9 {
reg = <9>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep10 {
+ ep@10 {
reg = <10>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep11 {
+ ep@11 {
reg = <11>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep12 {
+ ep@12 {
reg = <12>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep13 {
+ ep@13 {
reg = <13>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep14 {
+ ep@14 {
reg = <14>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
atmel,can-isoc;
};
- ep15 {
+ ep@15 {
reg = <15>;
atmel,fifo-size = <1024>;
atmel,nb-banks = <2>;
@@ -1226,22 +1226,22 @@
atmel,adc-ts-pressure-threshold = <10000>;
status = "disabled";
- trigger@0 {
+ trigger0 {
trigger-name = "external-rising";
trigger-value = <0x1>;
trigger-external;
};
- trigger@1 {
+ trigger1 {
trigger-name = "external-falling";
trigger-value = <0x2>;
trigger-external;
};
- trigger@2 {
+ trigger2 {
trigger-name = "external-any";
trigger-value = <0x3>;
trigger-external;
};
- trigger@3 {
+ trigger3 {
trigger-name = "continuous";
trigger-value = <0x6>;
};
diff --git a/arch/arm/boot/dts/sh73a0-kzm9g.dts b/arch/arm/boot/dts/sh73a0-kzm9g.dts
index c2d8a080e392..3d65f1f6d78b 100644
--- a/arch/arm/boot/dts/sh73a0-kzm9g.dts
+++ b/arch/arm/boot/dts/sh73a0-kzm9g.dts
@@ -22,7 +22,7 @@
compatible = "renesas,kzm9g", "renesas,sh73a0";
aliases {
- serial4 = &scifa4;
+ serial0 = &scifa4;
};
cpus {
@@ -39,16 +39,16 @@
};
chosen {
- bootargs = "console=tty0 console=ttySC4,115200 root=/dev/nfs ip=dhcp ignore_loglevel rw";
- stdout-path = &scifa4;
+ bootargs = "root=/dev/nfs ip=dhcp ignore_loglevel rw";
+ stdout-path = "serial0:115200n8";
};
- memory {
+ memory@40000000 {
device_type = "memory";
reg = <0x40000000 0x20000000>;
};
- reg_1p8v: regulator@0 {
+ reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
regulator-min-microvolt = <1800000>;
@@ -57,7 +57,7 @@
regulator-boot-on;
};
- reg_3p3v: regulator@1 {
+ reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
@@ -66,7 +66,7 @@
regulator-boot-on;
};
- vmmc_sdhi0: regulator@2 {
+ vmmc_sdhi0: regulator-vmmc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
regulator-min-microvolt = <3300000>;
@@ -75,7 +75,7 @@
enable-active-high;
};
- vmmc_sdhi2: regulator@3 {
+ vmmc_sdhi2: regulator-vmmc-sdhi2 {
compatible = "regulator-fixed";
regulator-name = "SDHI2 Vcc";
regulator-min-microvolt = <3300000>;
@@ -352,7 +352,7 @@
};
};
- scifa4_pins: serial4 {
+ scifa4_pins: scifa4 {
groups = "scifa4_data", "scifa4_ctrl";
function = "scifa4";
};
@@ -378,6 +378,7 @@
pinctrl-0 = <&scifa4_pins>;
pinctrl-names = "default";
+ uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index c4f434cdec60..032fe2f14b16 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -55,7 +55,7 @@
<0xf0000100 0x100>;
};
- L2: cache-controller {
+ L2: cache-controller@f0100000 {
compatible = "arm,pl310-cache";
reg = <0xf0100000 0x1000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 5820b70c95b3..94000cbe576b 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -22,11 +22,6 @@
#address-cells = <1>;
#size-cells = <1>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -572,12 +567,6 @@
reg = <0xffcfb100 0x80>;
};
- sdramedac {
- compatible = "altr,sdram-edac-a10";
- altr,sdr-syscon = <&sdr>;
- interrupts = <0 2 4>, <0 0 4>;
- };
-
L2: l2-cache@fffff000 {
compatible = "arm,pl310-cache";
reg = <0xfffff000 0x1000>;
@@ -610,16 +599,29 @@
#size-cells = <1>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>,
<0 0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
ranges;
+ sdramedac {
+ compatible = "altr,sdram-edac-a10";
+ altr,sdr-syscon = <&sdr>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
+ <49 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
l2-ecc@ffd06010 {
compatible = "altr,socfpga-a10-l2-ecc";
reg = <0xffd06010 0x4>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>,
+ <32 IRQ_TYPE_LEVEL_HIGH>;
};
ocram-ecc@ff8c3000 {
compatible = "altr,socfpga-a10-ocram-ecc";
reg = <0xff8c3000 0x400>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>,
+ <33 IRQ_TYPE_LEVEL_HIGH>;
};
emac0-rx-ecc@ff8c0800 {
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
index 567df98f1bb5..8e3a4adc389f 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
@@ -20,9 +20,14 @@
model = "Altera SOCFPGA Arria 10";
compatible = "altr,socfpga-arria10", "altr,socfpga";
+ aliases {
+ ethernet0 = &gmac0;
+ serial0 = &uart1;
+ };
+
chosen {
bootargs = "earlyprintk";
- stdout-path = "serial1:115200n8";
+ stdout-path = "serial0:115200n8";
};
memory {
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 6ae56838bd3a..d309314f3a36 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -604,6 +604,7 @@
#interrupt-cells = <2>;
ab8500_gpio: ab8500-gpio {
+ compatible = "stericsson,ab8500-gpio";
gpio-controller;
#gpio-cells = <2>;
};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
index fc5e8ce700c3..3c9f2f068c2f 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -99,46 +99,69 @@
vddio-supply = <&db8500_vsmps2_reg>;
pinctrl-names = "default";
pinctrl-0 = <&accel_tvk_mode>;
- interrupt-parent = <&gpio2>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>,
- <19 IRQ_TYPE_EDGE_FALLING>;
- };
- lsm303dlh@1e {
/*
- * This magnetometer is packaged with
- * the accelerometer, and has a DRDY line,
- * however it is not connected on this
- * board so it can not generate interrupts.
+ * These interrupts cannot be used: the other component
+ * ST-Micro L3D4200D gyro that is connected to the same lines
+ * cannot set its DRDY line to open drain, so it cannot be
+ * shared with other peripherals. The should be defined for
+ * the falling edge if they could be wired together.
+ *
+ * interrupts-extended =
+ * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+ * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
*/
+ };
+ lsm303dlh@1e {
+ /* Magnetometer */
compatible = "st,lsm303dlh-magn";
reg = <0x1e>;
vdd-supply = <&ab8500_ldo_aux1_reg>;
vddio-supply = <&db8500_vsmps2_reg>;
+ /*
+ * These interrupts cannot be used: the other component
+ * ST-Micro L3D4200D gyro that is connected to the same lines
+ * cannot set its DRDY line to open drain, so it cannot be
+ * shared with other peripherals. The should be defined for
+ * the falling edge if they could be wired together.
+ *
+ * interrupts-extended =
+ * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+ * <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
+ */
};
lis331dl@1c {
/* Accelerometer */
compatible = "st,lis331dl-accel";
st,drdy-int-pin = <1>;
- drive-open-drain;
reg = <0x1c>;
vdd-supply = <&ab8500_ldo_aux1_reg>;
vddio-supply = <&db8500_vsmps2_reg>;
pinctrl-names = "default";
pinctrl-0 = <&accel_tvk_mode>;
interrupt-parent = <&gpio2>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>,
- <19 IRQ_TYPE_EDGE_FALLING>;
+ /* INT2 would need to be open drain */
+ interrupts = <18 IRQ_TYPE_EDGE_RISING>,
+ <19 IRQ_TYPE_EDGE_RISING>;
};
ak8974@0f {
/* Magnetometer */
compatible = "asahi-kasei,ak8974";
reg = <0x0f>;
- vdd-supply = <&ab8500_ldo_aux1_reg>;
- vddio-supply = <&db8500_vsmps2_reg>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+ dvdd-supply = <&db8500_vsmps2_reg>;
pinctrl-names = "default";
pinctrl-0 = <&gyro_magn_tvk_mode>;
- interrupt-parent = <&gpio1>;
- interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ /*
+ * These interrupts cannot be used: the other component
+ * ST-Micro L3D4200D gyro that is connected to the same lines
+ * cannot set its DRDY line to open drain, so it cannot be
+ * shared with other peripherals. The should be defined for
+ * the falling edge if they could be wired together.
+ *
+ * interrupts-extended =
+ * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+ * <&gpio0 31 IRQ_TYPE_EDGE_FALLING>;
+ */
};
l3g4200d@68 {
/* Gyroscope */
@@ -149,8 +172,9 @@
vddio-supply = <&db8500_vsmps2_reg>;
pinctrl-names = "default";
pinctrl-0 = <&gyro_magn_tvk_mode>;
- interrupt-parent = <&gpio1>;
- interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ interrupts-extended =
+ <&gpio1 0 IRQ_TYPE_EDGE_RISING>,
+ <&gpio0 31 IRQ_TYPE_EDGE_RISING>;
};
lsp001wm@5c {
/* Barometer/pressure sensor */
@@ -218,7 +242,7 @@
/* Accelerometer interrupt lines 1 & 2 */
tvk_cfg {
pins = "GPIO82_C1", "GPIO83_D3";
- ste,config = <&gpio_in_pu>;
+ ste,config = <&gpio_in_pd>;
};
};
};
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 6d8ce154347e..48dc38482633 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -223,7 +223,6 @@
prcmu@80157000 {
ab8500 {
ab8500-gpio {
- compatible = "stericsson,ab8500-gpio";
};
ab8500-regulators {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index 45d7af326718..7187676836be 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -18,6 +18,126 @@
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
soc {
+ /* Name the GPIO muxed rails on the HREF boards */
+ gpio@8012e000 {
+ /* GPIOs 0 - 31 */
+ gpio-line-names =
+ /* GPIO0,1 used for UART0 BT RX/TX */
+ "", "",
+ "UART_WAKE",
+ "BT_WAKE",
+ "",
+ "SDMMC_1V8_3V_SEL",
+ "FLASH_LED_SYNC (FLASH_CTRL_0)",
+ "XENON_READY (FLASH_CTRL_1)",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "",
+ "",
+ "FLASH_LED_EN (FLASH_CTRL_3)",
+ "", "",
+ "", "", "", "", "",
+ /* Used by UART2 (console) */
+ "", "",
+ "MAGNETOMETER_INT";
+ };
+
+ gpio@8012e080 {
+ /* GPIOs 32 - 63 */
+ gpio-line-names =
+ "MAGNETOMETER_DRDY",
+ "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e000 {
+ /* GPIOs 64 - 95 */
+ gpio-line-names = "XENON_EN2 (FLASH_CTRL_4)",
+ "DISP1_RST",
+ "DISP2_RST",
+ "TOUCH_INT2",
+ "LCD_VSI0_A",
+ "LCD_VSI1_A",
+ /* GPIO 70-77 used for ETM */
+ "", "", "", "", "", "", "", "",
+ /* GPIO 78-81 used for YCBCR */
+ "", "", "", "",
+ "ACCELEROMETER_INT1_RDY",
+ "ACCELEROMETER_INT2",
+ "TOUCH_INT",
+ "WLAN_ENA",
+ "", "", "", "", "",
+ "FORCE_SENSING_INT",
+ "FORCE_SENSING_RESET",
+ "", "",
+ "SDMMC_CD";
+ };
+
+ gpio@8000e080 {
+ /* GPIOs 96 - 127 */
+ gpio-line-names = "",
+ "FORCE_SENSING_WU",
+ "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e100 {
+ /* GPIOs 128 - 159 */
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "", "", "",
+ "DIPRO_INT", /* GPIO139 */
+ "XSHUTDOWN_SECONDARY_SENSOR",
+ "XSHUTDOWN_PRIMARY_SENSOR",
+ "NFC_RST (NFC_CTRL_",
+ "TOUCH_RST",
+ "NFC_IRQ (NFC_CTRL_1)",
+ "HAL_SW",
+ "TOUCH_RST2",
+ "", "",
+ "VAUDIO_HF_EN", /* GPIO149 */
+ "", "", "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e180 {
+ /* GPIOs 160 - 191 */
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "",
+ "SDMMC_EN",
+ "XENON_CHARGE (FLASH_CONTROL_5)",
+ "GBF_ENA_RESET",
+ "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8011e000 {
+ /* GPIOs 192 - 223 */
+ gpio-line-names = "HDTV_INTN",
+ "", "", "",
+ "HDTV_RSTN",
+ "", "", "",
+ "", /* GPIO200 */
+ "", "", "", "", "", "", "",
+ /* GPIO208-216 used for WGBF_MC1 */
+ "", "", "", "", "", "", "", "", "",
+ "SW_FRONT_PROXIMITY", /* GPIO217 */
+ "KPD_CTRL_INT", /* Keypad controller */
+ "", "", "", "", "";
+ };
+
+ gpio@8011e080 {
+ /* GPIOs 224 - 255 */
+ gpio-line-names = "", "",
+ "HSIT_ACWAKE0",
+ "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
// External Micro SD slot
sdi0_per1@80126000 {
cd-gpios = <&gpio2 31 GPIO_ACTIVE_HIGH>; // 95
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 36e84efc401c..b3df1c60d465 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -95,6 +95,70 @@
};
soc {
+ /* Name the GPIO muxed rails on the Snowball board */
+ gpio@8012e000 {
+ /* GPIOs 0 - 31 */
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "",
+ "AP_GPIO31";
+ };
+
+ gpio@8012e080 {
+ /* GPIOs 32 - 63 */
+ gpio-line-names = "USR PB", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e000 {
+ /* GPIOs 64 - 95 */
+ gpio-line-names = "", "", "", "", "AP_GPIO68", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e100 {
+ /* GPIOs 128 - 159 */
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "", "", "", "", "IRQ_LAN", "RSTn_LAN",
+ "USR_LED", "", "", "", "", "", "",
+ "", "", "AP_GPIO151", "AP_GPIO152",
+ "", "", "", "", "", "", "";
+ };
+
+ gpio@8000e180 {
+ /* GPIOs 160 - 191 */
+ gpio-line-names = "", "AP_GPIO161", "AP_GPIO162",
+ "ACCELEROMETER_INT1_RDY",
+ "ACCELEROMETER_INT2", "MAG_DRDY",
+ "GYRO_DRDY", "RSTn_MLC", "RSTn_SLC",
+ "GYRO_INT", "UART_WAKE", "GBF_RESET",
+ "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
+ gpio@8011e000 {
+ /* GPIOs 192 - 223 */
+ gpio-line-names = "HDTV_INTn", "", "", "", "HDTV_RST",
+ "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "",
+ "WLAN_RESETN", "WLAN_IRQ", "MMC_EN",
+ "MMC_CD", "", "", "", "", "";
+ };
+
+ gpio@8011e080 {
+ /* GPIOs 224 - 255 */
+ gpio-line-names = "", "", "", "", "SD_SEL", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+ };
+
usb_per5@a03e0000 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&musb_default_mode>;
@@ -352,7 +416,25 @@
ab8500 {
ab8500-gpio {
- compatible = "stericsson,ab8500-gpio";
+ /*
+ * AB8500 GPIOs are numbered starting from 1, so the first
+ * index 0 is what in the datasheet is called "GPIO1", and
+ * the second is "GPIO2" and so forth. Confusingly, the
+ * Snowball schematic then names the "GPIO2" line "PM_GPIO1".
+ * while later naming "GPIO4" as "PM_GPIO4".
+ */
+ gpio-line-names = "", /* AB8500 GPIO1 */
+ "PM_GPIO1", /* AB8500 GPIO2 */
+ "WLAN_CLK_REQ", /* AB8500 GPIO3 */
+ "PM_GPIO4", /* AB8500 GPIO4 */
+ "", "", "", "", "", "", "", "", "", "", "",
+ "EN_3V6", /* AB8500 GPIO16 */
+ "", "", "", "" ,"", "", "", "", "",
+ "EN_3V3", /* AB8500 GPIO26 */
+ "", "", "", "", "", "", "", "", "", "", "", "", "",
+ "PM_GPIO40", /* AB8500 GPIO40 */
+ "PM_GPIO41", /* AB8500 GPIO41 */
+ "PM_GPIO42"; /* AB8500 GPIO42 */
};
ext_regulators: ab8500-ext-regulators {
diff --git a/arch/arm/boot/dts/stih410-clock.dtsi b/arch/arm/boot/dts/stih410-clock.dtsi
index d1f2acafc9b6..fd5049682181 100644
--- a/arch/arm/boot/dts/stih410-clock.dtsi
+++ b/arch/arm/boot/dts/stih410-clock.dtsi
@@ -103,6 +103,7 @@
clocks = <&clk_sysin>;
clock-output-names = "clk-s-a0-pll-ofd-0";
+ clock-critical = <0>; /* clk-s-a0-pll-ofd-0 */
};
clk_s_a0_flexgen: clk-s-a0-flexgen {
@@ -115,6 +116,7 @@
clock-output-names = "clk-ic-lmi0",
"clk-ic-lmi1";
+ clock-critical = <CLK_IC_LMI0>;
};
};
@@ -129,6 +131,7 @@
"clk-s-c0-fs0-ch1",
"clk-s-c0-fs0-ch2",
"clk-s-c0-fs0-ch3";
+ clock-critical = <0>; /* clk-s-c0-fs0-ch0 */
};
clk_s_c0: clockgen-c@09103000 {
@@ -142,6 +145,7 @@
clocks = <&clk_sysin>;
clock-output-names = "clk-s-c0-pll0-odf-0";
+ clock-critical = <0>; /* clk-s-c0-pll0-odf-0 */
};
clk_s_c0_pll1: clk-s-c0-pll1 {
@@ -204,6 +208,11 @@
"clk-clust-hades",
"clk-hwpe-hades",
"clk-fc-hades";
+ clock-critical = <CLK_ICN_CPU>,
+ <CLK_TX_ICN_DMU>,
+ <CLK_EXT2F_A9>,
+ <CLK_ICN_LMI>,
+ <CLK_ICN_SBC>;
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
index c92a1ae33a1e..39e368ec3428 100644
--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
+++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
@@ -72,8 +72,9 @@
};
blue {
- label = "a1000:blue:usr";
+ label = "a1000:blue:pwr";
gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
};
};
@@ -84,6 +85,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
index 2b17c5199151..6de83a6187d0 100644
--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
@@ -66,6 +66,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
index 7afc7a64eef1..e28f080b1fd5 100644
--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
@@ -80,6 +80,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index ca58eb279d55..7e7dfc2b43db 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -65,9 +65,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
- <&ahb_gates 43>, <&ahb_gates 44>,
- <&dram_gates 26>;
+ clocks = <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>, <&de_be0_clk>,
+ <&tcon0_ch1_clk>, <&dram_gates 26>;
status = "disabled";
};
@@ -75,9 +75,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
- <&ahb_gates 43>, <&ahb_gates 44>,
- <&ahb_gates 46>,
+ clocks = <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>, <&ahb_gates 46>,
+ <&de_be0_clk>, <&de_fe0_clk>, <&tcon0_ch1_clk>,
<&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
@@ -86,8 +86,8 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
- <&ahb_gates 44>, <&ahb_gates 46>,
+ clocks = <&ahb_gates 36>, <&ahb_gates 44>, <&ahb_gates 46>,
+ <&de_be0_clk>, <&de_fe0_clk>, <&tcon0_ch0_clk>,
<&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
@@ -96,10 +96,11 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
- <&ahb_gates 36>, <&ahb_gates 44>,
- <&ahb_gates 46>,
- <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
+ clocks = <&ahb_gates 34>, <&ahb_gates 36>,
+ <&ahb_gates 44>, <&ahb_gates 46>,
+ <&de_be0_clk>, <&de_fe0_clk>,
+ <&tcon0_ch1_clk>, <&dram_gates 5>,
+ <&dram_gates 25>, <&dram_gates 26>;
status = "disabled";
};
};
@@ -577,6 +578,81 @@
"dram_de_mp", "dram_ace";
};
+ de_be0_clk: clk@01c20104 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20104 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-be0";
+ };
+
+ de_be1_clk: clk@01c20108 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20108 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-be1";
+ };
+
+ de_fe0_clk: clk@01c2010c {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c2010c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-fe0";
+ };
+
+ de_fe1_clk: clk@01c20110 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20110 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-fe1";
+ };
+
+
+ tcon0_ch0_clk: clk@01c20118 {
+ #clock-cells = <0>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+ reg = <0x01c20118 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon0-ch0-sclk";
+
+ };
+
+ tcon1_ch0_clk: clk@01c2011c {
+ #clock-cells = <0>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+ reg = <0x01c2011c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon1-ch0-sclk";
+
+ };
+
+ tcon0_ch1_clk: clk@01c2012c {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+ reg = <0x01c2012c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon0-ch1-sclk";
+
+ };
+
+ tcon1_ch1_clk: clk@01c20130 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+ reg = <0x01c20130 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon1-ch1-sclk";
+
+ };
+
ve_clk: clk@01c2013c {
#clock-cells = <0>;
#reset-cells = <0>;
@@ -645,6 +721,19 @@
#dma-cells = <2>;
};
+ nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <37>;
+ clocks = <&ahb_gates 13>, <&nand_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 3>;
+ dma-names = "rxtx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
spi0: spi@01c05000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c05000 0x1000>;
@@ -884,69 +973,62 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
- pwm0_pins_a: pwm0@0 {
- allwinner,pins = "PB2";
- allwinner,function = "pwm";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- pwm1_pins_a: pwm1@0 {
- allwinner,pins = "PI3";
- allwinner,function = "pwm";
+ emac_pins_a: emac0@0 {
+ allwinner,pins = "PA0", "PA1", "PA2",
+ "PA3", "PA4", "PA5", "PA6",
+ "PA7", "PA8", "PA9", "PA10",
+ "PA11", "PA12", "PA13", "PA14",
+ "PA15", "PA16";
+ allwinner,function = "emac";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart0_pins_a: uart0@0 {
- allwinner,pins = "PB22", "PB23";
- allwinner,function = "uart0";
+ i2c0_pins_a: i2c0@0 {
+ allwinner,pins = "PB0", "PB1";
+ allwinner,function = "i2c0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart0_pins_b: uart0@1 {
- allwinner,pins = "PF2", "PF4";
- allwinner,function = "uart0";
+ i2c1_pins_a: i2c1@0 {
+ allwinner,pins = "PB18", "PB19";
+ allwinner,function = "i2c1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart1_pins_a: uart1@0 {
- allwinner,pins = "PA10", "PA11";
- allwinner,function = "uart1";
+ i2c2_pins_a: i2c2@0 {
+ allwinner,pins = "PB20", "PB21";
+ allwinner,function = "i2c2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c0_pins_a: i2c0@0 {
- allwinner,pins = "PB0", "PB1";
- allwinner,function = "i2c0";
+ ir0_rx_pins_a: ir0@0 {
+ allwinner,pins = "PB4";
+ allwinner,function = "ir0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c1_pins_a: i2c1@0 {
- allwinner,pins = "PB18", "PB19";
- allwinner,function = "i2c1";
+ ir0_tx_pins_a: ir0@1 {
+ allwinner,pins = "PB3";
+ allwinner,function = "ir0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c2_pins_a: i2c2@0 {
- allwinner,pins = "PB20", "PB21";
- allwinner,function = "i2c2";
+ ir1_rx_pins_a: ir1@0 {
+ allwinner,pins = "PB23";
+ allwinner,function = "ir1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- emac_pins_a: emac0@0 {
- allwinner,pins = "PA0", "PA1", "PA2",
- "PA3", "PA4", "PA5", "PA6",
- "PA7", "PA8", "PA9", "PA10",
- "PA11", "PA12", "PA13", "PA14",
- "PA15", "PA16";
- allwinner,function = "emac";
+ ir1_tx_pins_a: ir1@1 {
+ allwinner,pins = "PB22";
+ allwinner,function = "ir1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
@@ -966,34 +1048,41 @@
allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
};
- ir0_rx_pins_a: ir0@0 {
- allwinner,pins = "PB4";
- allwinner,function = "ir0";
+ ps20_pins_a: ps20@0 {
+ allwinner,pins = "PI20", "PI21";
+ allwinner,function = "ps2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir0_tx_pins_a: ir0@1 {
- allwinner,pins = "PB3";
- allwinner,function = "ir0";
+ ps21_pins_a: ps21@0 {
+ allwinner,pins = "PH12", "PH13";
+ allwinner,function = "ps2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir1_rx_pins_a: ir1@0 {
- allwinner,pins = "PB23";
- allwinner,function = "ir1";
+ pwm0_pins_a: pwm0@0 {
+ allwinner,pins = "PB2";
+ allwinner,function = "pwm";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir1_tx_pins_a: ir1@1 {
- allwinner,pins = "PB22";
- allwinner,function = "ir1";
+ pwm1_pins_a: pwm1@0 {
+ allwinner,pins = "PI3";
+ allwinner,function = "pwm";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
+ spdif_tx_pins_a: spdif@0 {
+ allwinner,pins = "PB13";
+ allwinner,function = "spdif";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
spi0_pins_a: spi0@0 {
allwinner,pins = "PI11", "PI12", "PI13";
allwinner,function = "spi0";
@@ -1050,25 +1139,25 @@
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ps20_pins_a: ps20@0 {
- allwinner,pins = "PI20", "PI21";
- allwinner,function = "ps2";
+ uart0_pins_a: uart0@0 {
+ allwinner,pins = "PB22", "PB23";
+ allwinner,function = "uart0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ps21_pins_a: ps21@0 {
- allwinner,pins = "PH12", "PH13";
- allwinner,function = "ps2";
+ uart0_pins_b: uart0@1 {
+ allwinner,pins = "PF2", "PF4";
+ allwinner,function = "uart0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- spdif_tx_pins_a: spdif@0 {
- allwinner,pins = "PB13";
- allwinner,function = "spdif";
+ uart1_pins_a: uart1@0 {
+ allwinner,pins = "PA10", "PA11";
+ allwinner,function = "uart1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
index a790ec8adb75..2150e15e115a 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
@@ -124,7 +124,18 @@
status = "okay";
};
+&otg_sram {
+ status = "okay";
+};
+
&pio {
+ usb0_id_detect_pin: usb0_id_detect_pin@0 {
+ allwinner,pins = "PG12";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
mmc0_cd_pin_t004: mmc0_cd_pin@0 {
allwinner,pins = "PG1";
allwinner,function = "gpio_in";
@@ -158,11 +169,19 @@
status = "okay";
};
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
&usb1_vbus_pin_a {
allwinner,pins = "PG13";
};
&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_detect_pin>;
+ usb0_id_det-gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-mk802.dts b/arch/arm/boot/dts/sun5i-a10s-mk802.dts
index 46ff9407826d..c84ac005342e 100644
--- a/arch/arm/boot/dts/sun5i-a10s-mk802.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-mk802.dts
@@ -73,6 +73,20 @@
status = "okay";
};
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ axp152: pmic@30 {
+ compatible = "x-powers,axp152";
+ reg = <0x30>;
+ interrupts = <0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+};
+
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_mk802>;
@@ -83,10 +97,23 @@
status = "okay";
};
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
&ohci0 {
status = "okay";
};
+&otg_sram {
+ status = "okay";
+};
+
&pio {
led_pins_mk802: led_pins@0 {
allwinner,pins = "PB2";
@@ -122,6 +149,11 @@
status = "okay";
};
+&usb_otg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
&usbphy {
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index 86d046a502e6..aef91476f9ae 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -248,6 +248,13 @@
status = "okay";
};
+&spi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>,
+ <&spi2_cs0_pins_a>;
+ status = "okay";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
index 9fea918f949e..b5de75f4c710 100644
--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
@@ -79,6 +79,7 @@
regulator-name = "emac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ startup-delay-us = <20000>;
enable-active-high;
gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
};
@@ -195,7 +196,14 @@
regulator-always-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
+ regulator-name = "vcc-wifi1";
+};
+
+&reg_ldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi2";
};
&reg_usb1_vbus {
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 367f33012493..c41a2ba34dde 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -242,6 +242,20 @@
allwinner,drive = <SUN4I_PINCTRL_30_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
+
+ spi2_pins_a: spi2@0 {
+ allwinner,pins = "PB12", "PB13", "PB14";
+ allwinner,function = "spi2";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ spi2_cs0_pins_a: spi2_cs0@0 {
+ allwinner,pins = "PB11";
+ allwinner,function = "spi2";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
};
&sram_a {
diff --git a/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts b/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts
index 6546fa02901d..894c4c4f9a1f 100644
--- a/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts
+++ b/arch/arm/boot/dts/sun5i-a13-difrnce-dit4350.dts
@@ -42,185 +42,9 @@
/dts-v1/;
#include "sun5i-a13.dtsi"
-#include "sunxi-common-regulators.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-#include <dt-bindings/pwm/pwm.h>
+#include "sun5i-reference-design-tablet.dtsi"
/ {
model = "Difrnce DIT4350";
compatible = "difrnce,dit4350", "allwinner,sun5i-a13";
-
- aliases {
- serial0 = &uart1;
- };
-
- backlight: backlight {
- compatible = "pwm-backlight";
- pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
- default-brightness-level = <8>;
- /* TODO: backlight uses axp gpio1 as enable pin */
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
-&cpu0 {
- cpu-supply = <&reg_dcdc2>;
-};
-
-&ehci0 {
- status = "okay";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-
- axp209: pmic@34 {
- reg = <0x34>;
- interrupts = <0>;
- };
-};
-
-#include "axp209.dtsi"
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
- status = "okay";
-
- pcf8563: rtc@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- };
-};
-
-&lradc {
- vref-supply = <&reg_ldo2>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-};
-
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_d709>;
- vmmc-supply = <&reg_vcc3v3>;
- bus-width = <4>;
- cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
- cd-inverted;
- status = "okay";
-};
-
-&otg_sram {
- status = "okay";
-};
-
-&pio {
- mmc0_cd_pin_d709: mmc0_cd_pin@0 {
- allwinner,pins = "PG0";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
- allwinner,pins = "PG1";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
- };
-
- usb0_id_detect_pin: usb0_id_detect_pin@0 {
- allwinner,pins = "PG2";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins>;
- status = "okay";
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <1250000>;
- regulator-name = "vdd-int-pll";
-};
-
-&reg_ldo1 {
- regulator-name = "vdd-rtc";
-};
-
-&reg_ldo2 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "avcc";
-};
-
-&reg_ldo3 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
-};
-
-&reg_usb0_vbus {
- gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
- status = "okay";
-};
-
-&uart1 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart1_pins_b>;
- status = "okay";
-};
-
-&usb_otg {
- dr_mode = "otg";
- status = "okay";
-};
-
-&usb0_vbus_pin_a {
- allwinner,pins = "PG12";
-};
-
-&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
- usb0_vbus-supply = <&reg_usb0_vbus>;
- usb1_vbus-supply = <&reg_ldo3>;
- status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
index 72e93acb5a9e..a89f29fa3e40 100644
--- a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
@@ -42,19 +42,45 @@
/dts-v1/;
#include "sun5i-a13.dtsi"
-#include "sun5i-q8-common.dtsi"
+#include "sun5i-reference-design-tablet.dtsi"
/ {
model = "Q8 A13 Tablet";
compatible = "allwinner,q8-a13", "allwinner,sun5i-a13";
+
+ panel: panel {
+ compatible = "urt,umsh-8596md-t", "simple-panel";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ /* TODO: lcd panel uses axp gpio0 as enable pin */
+ backlight = <&backlight>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel_input: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_out_lcd>;
+ };
+ };
+ };
+};
+
+&be0 {
+ status = "okay";
};
-&reg_ldo3 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
+&tcon0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_rgb666_pins>;
+ status = "okay";
};
-&usbphy {
- usb1_vbus-supply = <&reg_ldo3>;
+&tcon0_out {
+ tcon0_out_lcd: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
};
diff --git a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
index fa9ddfdcfe96..a8b0bcc04514 100644
--- a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
+++ b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
@@ -42,24 +42,20 @@
/dts-v1/;
#include "sun5i-a13.dtsi"
-#include "sunxi-common-regulators.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
+#include "sun5i-reference-design-tablet.dtsi"
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-#include <dt-bindings/pwm/pwm.h>
/ {
model = "Utoo P66";
compatible = "utoo,p66", "allwinner,sun5i-a13";
- backlight: backlight {
- compatible = "pwm-backlight";
- pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
- /* Note levels of 10 / 20% result in backlight off */
- brightness-levels = <0 30 40 50 60 70 80 90 100>;
- default-brightness-level = <6>;
- /* TODO: backlight uses axp gpio1 as enable pin */
+ /* The P66 uses the uart pins as gpios */
+ aliases {
+ /delete-property/serial0;
+ };
+
+ chosen {
+ /delete-property/stdout-path;
};
i2c_lcd: i2c@0 {
@@ -73,39 +69,21 @@
};
};
-&codec {
- pinctrl-names = "default";
- pinctrl-0 = <&codec_pa_pin>;
- allwinner,pa-gpios = <&pio 6 3 GPIO_ACTIVE_HIGH>; /* PG3 */
- status = "okay";
-};
-
-&cpu0 {
- cpu-supply = <&reg_dcdc2>;
+&backlight {
+ /* Note levels of 10 / 20% result in backlight off */
+ brightness-levels = <0 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <6>;
};
-&ehci0 {
- status = "okay";
+&codec {
+ allwinner,pa-gpios = <&pio 6 3 GPIO_ACTIVE_HIGH>; /* PG3 */
};
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-
- axp209: pmic@34 {
- reg = <0x34>;
- interrupts = <0>;
- };
+&codec_pa_pin {
+ allwinner,pins = "PG3";
};
-#include "axp209.dtsi"
-
&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
- status = "okay";
-
icn8318: touchscreen@40 {
compatible = "chipone,icn8318";
reg = <0x40>;
@@ -119,40 +97,6 @@
touchscreen-inverted-x;
touchscreen-swapped-x-y;
};
-
- pcf8563: rtc@51 {
- compatible = "nxp,pcf8563";
- reg = <0x51>;
- };
-};
-
-&lradc {
- vref-supply = <&reg_ldo2>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-};
-
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_p66>;
- vmmc-supply = <&reg_vcc3v3>;
- bus-width = <4>;
- cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
- cd-inverted;
- status = "okay";
};
&mmc2 {
@@ -170,39 +114,7 @@
};
};
-&otg_sram {
- status = "okay";
-};
-
&pio {
- codec_pa_pin: codec_pa_pin@0 {
- allwinner,pins = "PG3";
- allwinner,function = "gpio_out";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- mmc0_cd_pin_p66: mmc0_cd_pin@0 {
- allwinner,pins = "PG0";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
- allwinner,pins = "PG1";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
- };
-
- usb0_id_detect_pin: usb0_id_detect_pin@0 {
- allwinner,pins = "PG2";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
i2c_lcd_pins: i2c_lcd_pin@0 {
allwinner,pins = "PG10", "PG12";
allwinner,function = "gpio_out";
@@ -217,67 +129,17 @@
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- usb0_vbus_pin_a: usb0_vbus_pin@0 {
- allwinner,pins = "PB4";
- allwinner,function = "gpio_out";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins>;
- status = "okay";
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1500000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-int-pll";
-};
-
-&reg_ldo1 {
- regulator-name = "vdd-rtc";
-};
-
-&reg_ldo2 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "avcc";
-};
-
-&reg_ldo3 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-wifi";
};
&reg_usb0_vbus {
gpio = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
- status = "okay";
};
-&usb_otg {
- dr_mode = "otg";
- status = "okay";
+&uart1 {
+ /* The P66 uses the uart pins as gpios */
+ status = "disabled";
};
-&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
- usb0_vbus-supply = <&reg_usb0_vbus>;
- usb1_vbus-supply = <&reg_ldo3>;
- status = "okay";
+&usb0_vbus_pin_a {
+ allwinner,pins = "PB4";
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 263d46dbc7e6..e012890e0cf2 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -207,7 +207,50 @@
};
};
+ display-engine {
+ compatible = "allwinner,sun5i-a13-display-engine";
+ allwinner,pipelines = <&fe0>;
+ };
+
soc@01c00000 {
+ tcon0: lcd-controller@01c0c000 {
+ compatible = "allwinner,sun5i-a13-tcon";
+ reg = <0x01c0c000 0x1000>;
+ interrupts = <44>;
+ resets = <&tcon_ch0_clk 1>;
+ reset-names = "lcd";
+ clocks = <&ahb_gates 36>,
+ <&tcon_ch0_clk>,
+ <&tcon_ch1_clk>;
+ clock-names = "ahb",
+ "tcon-ch0",
+ "tcon-ch1";
+ clock-output-names = "tcon-pixel-clock";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tcon0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ tcon0_in_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_out_tcon0>;
+ };
+ };
+
+ tcon0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ };
+ };
+
pwm: pwm@01c20e00 {
compatible = "allwinner,sun5i-a13-pwm";
reg = <0x01c20e00 0xc>;
@@ -215,6 +258,75 @@
#pwm-cells = <3>;
status = "disabled";
};
+
+ fe0: display-frontend@01e00000 {
+ compatible = "allwinner,sun5i-a13-display-frontend";
+ reg = <0x01e00000 0x20000>;
+ interrupts = <47>;
+ clocks = <&ahb_gates 46>, <&de_fe_clk>,
+ <&dram_gates 25>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&de_fe_clk>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fe0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ fe0_out_be0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&be0_in_fe0>;
+ };
+ };
+ };
+ };
+
+ be0: display-backend@01e60000 {
+ compatible = "allwinner,sun5i-a13-display-backend";
+ reg = <0x01e60000 0x10000>;
+ clocks = <&ahb_gates 44>, <&de_be_clk>,
+ <&dram_gates 26>;
+ clock-names = "ahb", "mod",
+ "ram";
+ resets = <&de_be_clk>;
+ status = "disabled";
+
+ assigned-clocks = <&de_be_clk>;
+ assigned-clock-rates = <300000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ be0_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ be0_in_fe0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&fe0_out_be0>;
+ };
+ };
+
+ be0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ be0_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_be0>;
+ };
+ };
+ };
+ };
};
};
@@ -237,6 +349,16 @@
&pio {
compatible = "allwinner,sun5i-a13-pinctrl";
+ lcd_rgb666_pins: lcd_rgb666@0 {
+ allwinner,pins = "PD2", "PD3", "PD4", "PD5", "PD6", "PD7",
+ "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
+ "PD18", "PD19", "PD20", "PD21", "PD22", "PD23",
+ "PD24", "PD25", "PD26", "PD27";
+ allwinner,function = "lcd0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
uart1_pins_a: uart1@0 {
allwinner,pins = "PE10", "PE11";
allwinner,function = "uart1";
diff --git a/arch/arm/boot/dts/sun5i-r8.dtsi b/arch/arm/boot/dts/sun5i-r8.dtsi
index c04cf690b858..8b058f53b7dc 100644
--- a/arch/arm/boot/dts/sun5i-r8.dtsi
+++ b/arch/arm/boot/dts/sun5i-r8.dtsi
@@ -76,122 +76,12 @@
};
};
};
-
- tcon0: lcd-controller@01c0c000 {
- compatible = "allwinner,sun5i-a13-tcon";
- reg = <0x01c0c000 0x1000>;
- interrupts = <44>;
- resets = <&tcon_ch0_clk 1>;
- reset-names = "lcd";
- clocks = <&ahb_gates 36>,
- <&tcon_ch0_clk>,
- <&tcon_ch1_clk>;
- clock-names = "ahb",
- "tcon-ch0",
- "tcon-ch1";
- clock-output-names = "tcon-pixel-clock";
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- tcon0_in_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_out_tcon0>;
- };
- };
-
- tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- tcon0_out_tve0: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&tve0_in_tcon0>;
- };
- };
- };
- };
-
- fe0: display-frontend@01e00000 {
- compatible = "allwinner,sun5i-a13-display-frontend";
- reg = <0x01e00000 0x20000>;
- interrupts = <47>;
- clocks = <&ahb_gates 46>, <&de_fe_clk>,
- <&dram_gates 25>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&de_fe_clk>;
- status = "disabled";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- fe0_out_be0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&be0_in_fe0>;
- };
- };
- };
- };
-
- be0: display-backend@01e60000 {
- compatible = "allwinner,sun5i-a13-display-backend";
- reg = <0x01e60000 0x10000>;
- clocks = <&ahb_gates 44>, <&de_be_clk>,
- <&dram_gates 26>;
- clock-names = "ahb", "mod",
- "ram";
- resets = <&de_be_clk>;
- status = "disabled";
-
- assigned-clocks = <&de_be_clk>;
- assigned-clock-rates = <300000000>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- be0_in_fe0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&fe0_out_be0>;
- };
- };
-
- be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>;
-
- be0_out_tcon0: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&tcon0_in_be0>;
- };
- };
- };
- };
};
+};
- display-engine {
- compatible = "allwinner,sun5i-a13-display-engine";
- allwinner,pipelines = <&fe0>;
+&tcon0_out {
+ tcon0_out_tve0: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&tve0_in_tcon0>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-q8-common.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
index a78e189f6653..20cc940f5f91 100644
--- a/arch/arm/boot/dts/sun5i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
@@ -39,7 +39,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "sunxi-q8-common.dtsi"
+#include "sunxi-reference-design-tablet.dtsi"
#include <dt-bindings/pwm/pwm.h>
@@ -61,6 +61,13 @@
};
};
+&codec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&codec_pa_pin>;
+ allwinner,pa-gpios = <&pio 6 10 GPIO_ACTIVE_HIGH>; /* PG10 */
+ status = "okay";
+};
+
&cpu0 {
cpu-supply = <&reg_dcdc2>;
};
@@ -85,9 +92,13 @@
#include "axp209.dtsi"
+&lradc {
+ vref-supply = <&reg_ldo2>;
+};
+
&mmc0 {
pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_q8>;
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
vmmc-supply = <&reg_vcc3v0>;
bus-width = <4>;
cd-gpios = <&pio 6 0 GPIO_ACTIVE_HIGH>; /* PG0 */
@@ -100,7 +111,14 @@
};
&pio {
- mmc0_cd_pin_q8: mmc0_cd_pin@0 {
+ codec_pa_pin: codec_pa_pin@0 {
+ allwinner,pins = "PG10";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_cd_pin: mmc0_cd_pin@0 {
allwinner,pins = "PG0";
allwinner,function = "gpio_in";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
@@ -154,6 +172,12 @@
regulator-name = "avcc";
};
+&reg_ldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
&reg_usb0_vbus {
gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
status = "okay";
@@ -170,11 +194,17 @@
status = "okay";
};
+&usb_power_supply {
+ status = "okay";
+};
+
&usbphy {
pinctrl-names = "default";
pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
+ usb1_vbus-supply = <&reg_ldo3>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 0840612b5ed6..e374f4fc8073 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -130,7 +130,7 @@
};
pll3x2: pll3x2_clk {
- compatible = "fixed-factor-clock";
+ compatible = "allwinner,sun4i-a10-pll3-2x-clk", "fixed-factor-clock";
#clock-cells = <0>;
clock-div = <1>;
clock-mult = <2>;
diff --git a/arch/arm/boot/dts/sun6i-a31-m9.dts b/arch/arm/boot/dts/sun6i-a31-m9.dts
index 6e0e5687a09c..29016a13a2c1 100644
--- a/arch/arm/boot/dts/sun6i-a31-m9.dts
+++ b/arch/arm/boot/dts/sun6i-a31-m9.dts
@@ -65,12 +65,17 @@
pinctrl-0 = <&led_pins_m9>;
blue {
- label = "m9:blue:usr";
+ label = "m9:blue:pwr";
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
};
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc3>;
+};
+
&ehci0 {
status = "okay";
};
@@ -84,6 +89,7 @@
pinctrl-0 = <&gmac_pins_mii_a>;
phy = <&phy1>;
phy-mode = "mii";
+ phy-supply = <&reg_dldo1>;
status = "okay";
phy1: ethernet-phy@1 {
@@ -100,13 +106,26 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_m9>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
bus-width = <4>;
cd-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
cd-inverted;
status = "okay";
};
+&p2wi {
+ status = "okay";
+
+ axp22x: pmic@68 {
+ compatible = "x-powers,axp221";
+ reg = <0x68>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+#include "axp22x.dtsi"
+
&pio {
led_pins_m9: led_pins@0 {
allwinner,pins = "PH13";
@@ -130,6 +149,78 @@
};
};
+&reg_aldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpus"; /* This is an educated guess */
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-gpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-sys-dll";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-ethernet-phy";
+};
+
+/*
+ * Both reg_usb1_vbus and reg_dldo4 need to be on for the hub attached
+ * to usb1 to work, and we can list only one usb1_vbus-supply, so dldo4 is
+ * marked as regulator-always-on.
+ */
+&reg_dldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-usb-hub";
+};
+
&reg_usb1_vbus {
pinctrl-names = "default";
pinctrl-0 = <&usb1_vbus_pin_m9>;
@@ -145,5 +236,6 @@
&usbphy {
usb1_vbus-supply = <&reg_usb1_vbus>;
+ usb2_vbus-supply = <&reg_aldo1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
index 4dd70cce2127..5faeae429e2a 100644
--- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
+++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
@@ -65,12 +65,17 @@
pinctrl-0 = <&led_pins_m9>;
blue {
- label = "m9:blue:usr";
+ label = "a1000g:blue:pwr";
gpios = <&pio 7 13 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
};
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc3>;
+};
+
&ehci0 {
status = "okay";
};
@@ -84,6 +89,7 @@
pinctrl-0 = <&gmac_pins_mii_a>;
phy = <&phy1>;
phy-mode = "mii";
+ phy-supply = <&reg_dldo1>;
status = "okay";
phy1: ethernet-phy@1 {
@@ -100,13 +106,26 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_m9>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
bus-width = <4>;
cd-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
cd-inverted;
status = "okay";
};
+&p2wi {
+ status = "okay";
+
+ axp22x: pmic@68 {
+ compatible = "x-powers,axp221";
+ reg = <0x68>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+#include "axp22x.dtsi"
+
&pio {
led_pins_m9: led_pins@0 {
allwinner,pins = "PH13";
@@ -130,6 +149,78 @@
};
};
+&reg_aldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "avcc";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpus"; /* This is an educated guess */
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+};
+
+&reg_dcdc2 {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-gpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1320000>;
+ regulator-name = "vdd-sys-dll";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-ethernet-phy";
+};
+
+/*
+ * Both reg_usb1_vbus and reg_dldo4 need to be on for the hub attached
+ * to usb1 to work, and we can list only one usb1_vbus-supply, so dldo4 is
+ * marked as regulator-always-on.
+ */
+&reg_dldo4 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-usb-hub";
+};
+
&reg_usb1_vbus {
pinctrl-names = "default";
pinctrl-0 = <&usb1_vbus_pin_m9>;
@@ -150,5 +241,6 @@
&usbphy {
usb1_vbus-supply = <&reg_usb1_vbus>;
+ usb2_vbus-supply = <&reg_aldo1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
new file mode 100644
index 000000000000..ba5bca0fe997
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2016 Luo Yi <luoyi.ly@gmail.com>
+ *
+ * Thanks to the original work by Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun7i-a20.dtsi"
+#include "sunxi-common-regulators.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "Banana Pi BPI-M1-Plus";
+ compatible = "sinovoip,bpi-m1-plus", "allwinner,sun7i-a20";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_bpi_m1p>;
+
+ green {
+ label = "bananapi-m1-plus:green:usr";
+ gpios = <&pio 7 24 GPIO_ACTIVE_HIGH>;
+ };
+
+ pwr {
+ label = "bananapi-m1-plus:pwr:usr";
+ gpios = <&pio 7 25 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ mmc3_pwrseq: mmc3_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pwrseq_pin_bpi_m1p>;
+ reset-gpios = <&pio 7 22 GPIO_ACTIVE_LOW>; /* PH22 WL-PMU-EN */
+ };
+
+ reg_gmac_3v3: gmac-3v3 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac_power_pin_bpi_m1p>;
+ regulator-name = "gmac-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <100000>;
+ enable-active-high;
+ gpio = <&pio 7 23 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&ahci {
+ status = "okay";
+};
+
+&codec {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&gmac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac_pins_rgmii_a>;
+ phy = <&phy1>;
+ phy-mode = "rgmii";
+ phy-supply = <&reg_gmac_3v3>;
+ status = "okay";
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ axp209: pmic@34 {
+ compatible = "x-powers,axp209";
+ reg = <0x34>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+};
+
+&ir0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir0_rx_pins_a>;
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m1p>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 7 10 GPIO_ACTIVE_HIGH>; /* PH10 */
+ cd-inverted;
+ status = "okay";
+};
+
+&mmc3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins_a>;
+ vmmc-supply = <&reg_vcc3v3>;
+ mmc-pwrseq = <&mmc3_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ enable-sdio-wakeup;
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ interrupt-parent = <&pio>;
+ interrupts = <7 15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&mmc3_pins_a {
+ /* AP6210 requires pull-up */
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&pio {
+ gmac_power_pin_bpi_m1p: gmac_power_pin@0 {
+ allwinner,pins = "PH23";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ led_pins_bpi_m1p: led_pins@0 {
+ allwinner,pins = "PH24", "PH25";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_cd_pin_bpi_m1p: mmc0_cd_pin@0 {
+ allwinner,pins = "PH10";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ mmc3_pwrseq_pin_bpi_m1p: mmc3_pwrseq_pin@0 {
+ allwinner,pins = "PH22";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 5ee43d8bf174..73c05dab0a69 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -95,6 +95,10 @@
status = "okay";
};
+&codec {
+ status = "okay";
+};
+
&cpu0 {
cpu-supply = <&reg_dcdc2>;
};
@@ -110,13 +114,67 @@
&gmac {
pinctrl-names = "default";
pinctrl-0 = <&gmac_pins_rgmii_a>;
- phy = <&phy1>;
phy-mode = "rgmii";
phy-supply = <&reg_gmac_3v3>;
status = "okay";
- phy1: ethernet-phy@1 {
- reg = <1>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch: ethernet-switch@1e {
+ compatible = "brcm,bcm53125";
+ reg = <30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ label = "lan2";
+ };
+
+ port1: port@1 {
+ reg = <1>;
+ label = "lan3";
+ };
+
+ port2: port@2 {
+ reg = <2>;
+ label = "lan4";
+ };
+
+ port3: port@3 {
+ reg = <3>;
+ label = "wan";
+ };
+
+ port4: port@4 {
+ reg = <4>;
+ label = "lan1";
+ };
+
+ port8: port@8 {
+ reg = <8>;
+ label = "cpu";
+ ethernet = <&gmac>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
};
@@ -158,10 +216,6 @@
status = "okay";
};
-&ohci1 {
- status = "okay";
-};
-
&otg_sram {
status = "okay";
};
@@ -199,7 +253,7 @@
#include "axp209.dtsi"
&reg_ahci_5v {
- gpio = <&pio 1 3 0>; /* PB3 */
+ gpio = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
status = "okay";
};
@@ -232,11 +286,8 @@
status = "okay";
};
-&reg_usb1_vbus {
- status = "okay";
-};
-
&reg_usb2_vbus {
+ gpio = <&pio 7 12 GPIO_ACTIVE_HIGH>; /* PH12 */
status = "okay";
};
@@ -275,13 +326,16 @@
status = "okay";
};
+&usb2_vbus_pin_a {
+ allwinner,pins = "PH12";
+};
+
&usbphy {
pinctrl-names = "default";
pinctrl-0 = <&usb0_id_detect_pin>;
usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
- usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 2c34bbbb9570..bd0c47660243 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -67,9 +67,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
- <&ahb_gates 43>, <&ahb_gates 44>,
- <&dram_gates 26>;
+ clocks = <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>, <&de_be0_clk>,
+ <&tcon0_ch1_clk>, <&dram_gates 26>;
status = "disabled";
};
@@ -77,8 +77,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0";
- clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
- <&ahb_gates 44>, <&dram_gates 26>;
+ clocks = <&ahb_gates 36>, <&ahb_gates 44>,
+ <&de_be0_clk>, <&tcon0_ch0_clk>,
+ <&dram_gates 26>;
status = "disabled";
};
@@ -86,8 +87,9 @@
compatible = "allwinner,simple-framebuffer",
"simple-framebuffer";
allwinner,pipeline = "de_be0-lcd0-tve0";
- clocks = <&pll3>, <&pll5 1>,
- <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
+ clocks = <&ahb_gates 34>, <&ahb_gates 36>,
+ <&ahb_gates 44>,
+ <&de_be0_clk>, <&tcon0_ch1_clk>,
<&dram_gates 5>, <&dram_gates 26>;
status = "disabled";
};
@@ -369,9 +371,9 @@
<5>, <6>, <7>,
<8>, <10>;
clock-output-names = "apb0_codec", "apb0_spdif",
- "apb0_ac97", "apb0_iis0", "apb0_iis1",
+ "apb0_ac97", "apb0_i2s0", "apb0_i2s1",
"apb0_pio", "apb0_ir0", "apb0_ir1",
- "apb0_iis2", "apb0_keypad";
+ "apb0_i2s2", "apb0_keypad";
};
apb1: clk@01c20058 {
@@ -521,6 +523,28 @@
clock-output-names = "ir1";
};
+ i2s0_clk: clk@01c200b8 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200b8 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "i2s0";
+ };
+
+ ac97_clk: clk@01c200bc {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200bc 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "ac97";
+ };
+
spdif_clk: clk@01c200c0 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-mod1-clk";
@@ -558,6 +582,28 @@
clock-output-names = "spi3";
};
+ i2s1_clk: clk@01c200d8 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200d8 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "i2s1";
+ };
+
+ i2s2_clk: clk@01c200dc {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-mod1-clk";
+ reg = <0x01c200dc 0x4>;
+ clocks = <&pll2 SUN4I_A10_PLL2_8X>,
+ <&pll2 SUN4I_A10_PLL2_4X>,
+ <&pll2 SUN4I_A10_PLL2_2X>,
+ <&pll2 SUN4I_A10_PLL2_1X>;
+ clock-output-names = "i2s2";
+ };
+
dram_gates: clk@01c20100 {
#clock-cells = <1>;
compatible = "allwinner,sun4i-a10-dram-gates-clk";
@@ -583,6 +629,80 @@
"dram_de_mp", "dram_ace";
};
+ de_be0_clk: clk@01c20104 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20104 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-be0";
+ };
+
+ de_be1_clk: clk@01c20108 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20108 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-be1";
+ };
+
+ de_fe0_clk: clk@01c2010c {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c2010c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-fe0";
+ };
+
+ de_fe1_clk: clk@01c20110 {
+ #clock-cells = <0>;
+ #reset-cells = <0>;
+ compatible = "allwinner,sun4i-a10-display-clk";
+ reg = <0x01c20110 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll5 1>;
+ clock-output-names = "de-fe1";
+ };
+
+ tcon0_ch0_clk: clk@01c20118 {
+ #clock-cells = <0>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+ reg = <0x01c20118 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon0-ch0-sclk";
+
+ };
+
+ tcon1_ch0_clk: clk@01c2011c {
+ #clock-cells = <0>;
+ #reset-cells = <1>;
+ compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+ reg = <0x01c2011c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon1-ch0-sclk";
+
+ };
+
+ tcon0_ch1_clk: clk@01c2012c {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-tcon-ch0-clk";
+ reg = <0x01c2012c 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon0-ch1-sclk";
+
+ };
+
+ tcon1_ch1_clk: clk@01c20130 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun4i-a10-tcon-ch1-clk";
+ reg = <0x01c20130 0x4>;
+ clocks = <&pll3>, <&pll7>, <&pll3x2>, <&pll7x2>;
+ clock-output-names = "tcon1-ch1-sclk";
+
+ };
+
ve_clk: clk@01c2013c {
#clock-cells = <0>;
#reset-cells = <0>;
@@ -726,6 +846,19 @@
#dma-cells = <2>;
};
+ nfc: nand@01c03000 {
+ compatible = "allwinner,sun4i-a10-nand";
+ reg = <0x01c03000 0x1000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ahb_gates 13>, <&nand_clk>;
+ clock-names = "ahb", "mod";
+ dmas = <&dma SUN4I_DMA_DEDICATED 3>;
+ dma-names = "rxtx";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
spi0: spi@01c05000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c05000 0x1000>;
@@ -958,160 +1091,177 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
- pwm0_pins_a: pwm0@0 {
- allwinner,pins = "PB2";
- allwinner,function = "pwm";
+ clk_out_a_pins_a: clk_out_a@0 {
+ allwinner,pins = "PI12";
+ allwinner,function = "clk_out_a";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- pwm1_pins_a: pwm1@0 {
- allwinner,pins = "PI3";
- allwinner,function = "pwm";
+ clk_out_b_pins_a: clk_out_b@0 {
+ allwinner,pins = "PI13";
+ allwinner,function = "clk_out_b";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart0_pins_a: uart0@0 {
- allwinner,pins = "PB22", "PB23";
- allwinner,function = "uart0";
+ emac_pins_a: emac0@0 {
+ allwinner,pins = "PA0", "PA1", "PA2",
+ "PA3", "PA4", "PA5", "PA6",
+ "PA7", "PA8", "PA9", "PA10",
+ "PA11", "PA12", "PA13", "PA14",
+ "PA15", "PA16";
+ allwinner,function = "emac";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart2_pins_a: uart2@0 {
- allwinner,pins = "PI16", "PI17", "PI18", "PI19";
- allwinner,function = "uart2";
+ gmac_pins_mii_a: gmac_mii@0 {
+ allwinner,pins = "PA0", "PA1", "PA2",
+ "PA3", "PA4", "PA5", "PA6",
+ "PA7", "PA8", "PA9", "PA10",
+ "PA11", "PA12", "PA13", "PA14",
+ "PA15", "PA16";
+ allwinner,function = "gmac";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart3_pins_a: uart3@0 {
- allwinner,pins = "PG6", "PG7", "PG8", "PG9";
- allwinner,function = "uart3";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ gmac_pins_rgmii_a: gmac_rgmii@0 {
+ allwinner,pins = "PA0", "PA1", "PA2",
+ "PA3", "PA4", "PA5", "PA6",
+ "PA7", "PA8", "PA10",
+ "PA11", "PA12", "PA13",
+ "PA15", "PA16";
+ allwinner,function = "gmac";
+ /*
+ * data lines in RGMII mode use DDR mode
+ * and need a higher signal drive strength
+ */
+ allwinner,drive = <SUN4I_PINCTRL_40_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart3_pins_b: uart3@1 {
- allwinner,pins = "PH0", "PH1";
- allwinner,function = "uart3";
+ i2c0_pins_a: i2c0@0 {
+ allwinner,pins = "PB0", "PB1";
+ allwinner,function = "i2c0";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart4_pins_a: uart4@0 {
- allwinner,pins = "PG10", "PG11";
- allwinner,function = "uart4";
+ i2c1_pins_a: i2c1@0 {
+ allwinner,pins = "PB18", "PB19";
+ allwinner,function = "i2c1";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart4_pins_b: uart4@1 {
- allwinner,pins = "PH4", "PH5";
- allwinner,function = "uart4";
+ i2c2_pins_a: i2c2@0 {
+ allwinner,pins = "PB20", "PB21";
+ allwinner,function = "i2c2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart5_pins_a: uart5@0 {
- allwinner,pins = "PI10", "PI11";
- allwinner,function = "uart5";
+ i2c3_pins_a: i2c3@0 {
+ allwinner,pins = "PI0", "PI1";
+ allwinner,function = "i2c3";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart6_pins_a: uart6@0 {
- allwinner,pins = "PI12", "PI13";
- allwinner,function = "uart6";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ ir0_rx_pins_a: ir0@0 {
+ allwinner,pins = "PB4";
+ allwinner,function = "ir0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- uart7_pins_a: uart7@0 {
- allwinner,pins = "PI20", "PI21";
- allwinner,function = "uart7";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ ir0_tx_pins_a: ir0@1 {
+ allwinner,pins = "PB3";
+ allwinner,function = "ir0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c0_pins_a: i2c0@0 {
- allwinner,pins = "PB0", "PB1";
- allwinner,function = "i2c0";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ ir1_rx_pins_a: ir1@0 {
+ allwinner,pins = "PB23";
+ allwinner,function = "ir1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c1_pins_a: i2c1@0 {
- allwinner,pins = "PB18", "PB19";
- allwinner,function = "i2c1";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ ir1_tx_pins_a: ir1@1 {
+ allwinner,pins = "PB22";
+ allwinner,function = "ir1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c2_pins_a: i2c2@0 {
- allwinner,pins = "PB20", "PB21";
- allwinner,function = "i2c2";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ mmc0_pins_a: mmc0@0 {
+ allwinner,pins = "PF0", "PF1", "PF2",
+ "PF3", "PF4", "PF5";
+ allwinner,function = "mmc0";
+ allwinner,drive = <SUN4I_PINCTRL_30_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- i2c3_pins_a: i2c3@0 {
- allwinner,pins = "PI0", "PI1";
- allwinner,function = "i2c3";
+ mmc0_cd_pin_reference_design: mmc0_cd_pin@0 {
+ allwinner,pins = "PH1";
+ allwinner,function = "gpio_in";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ mmc2_pins_a: mmc2@0 {
+ allwinner,pins = "PC6", "PC7", "PC8",
+ "PC9", "PC10", "PC11";
+ allwinner,function = "mmc2";
+ allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ mmc3_pins_a: mmc3@0 {
+ allwinner,pins = "PI4", "PI5", "PI6",
+ "PI7", "PI8", "PI9";
+ allwinner,function = "mmc3";
+ allwinner,drive = <SUN4I_PINCTRL_30_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- emac_pins_a: emac0@0 {
- allwinner,pins = "PA0", "PA1", "PA2",
- "PA3", "PA4", "PA5", "PA6",
- "PA7", "PA8", "PA9", "PA10",
- "PA11", "PA12", "PA13", "PA14",
- "PA15", "PA16";
- allwinner,function = "emac";
+ ps20_pins_a: ps20@0 {
+ allwinner,pins = "PI20", "PI21";
+ allwinner,function = "ps2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- clk_out_a_pins_a: clk_out_a@0 {
- allwinner,pins = "PI12";
- allwinner,function = "clk_out_a";
+ ps21_pins_a: ps21@0 {
+ allwinner,pins = "PH12", "PH13";
+ allwinner,function = "ps2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- clk_out_b_pins_a: clk_out_b@0 {
- allwinner,pins = "PI13";
- allwinner,function = "clk_out_b";
+ pwm0_pins_a: pwm0@0 {
+ allwinner,pins = "PB2";
+ allwinner,function = "pwm";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- gmac_pins_mii_a: gmac_mii@0 {
- allwinner,pins = "PA0", "PA1", "PA2",
- "PA3", "PA4", "PA5", "PA6",
- "PA7", "PA8", "PA9", "PA10",
- "PA11", "PA12", "PA13", "PA14",
- "PA15", "PA16";
- allwinner,function = "gmac";
+ pwm1_pins_a: pwm1@0 {
+ allwinner,pins = "PI3";
+ allwinner,function = "pwm";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- gmac_pins_rgmii_a: gmac_rgmii@0 {
- allwinner,pins = "PA0", "PA1", "PA2",
- "PA3", "PA4", "PA5", "PA6",
- "PA7", "PA8", "PA10",
- "PA11", "PA12", "PA13",
- "PA15", "PA16";
- allwinner,function = "gmac";
- /*
- * data lines in RGMII mode use DDR mode
- * and need a higher signal drive strength
- */
- allwinner,drive = <SUN4I_PINCTRL_40_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ spdif_tx_pins_a: spdif@0 {
+ allwinner,pins = "PB13";
+ allwinner,function = "spdif";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
};
spi0_pins_a: spi0@0 {
@@ -1177,84 +1327,67 @@
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- mmc0_pins_a: mmc0@0 {
- allwinner,pins = "PF0", "PF1", "PF2",
- "PF3", "PF4", "PF5";
- allwinner,function = "mmc0";
- allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+ uart0_pins_a: uart0@0 {
+ allwinner,pins = "PB22", "PB23";
+ allwinner,function = "uart0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- mmc0_cd_pin_reference_design: mmc0_cd_pin@0 {
- allwinner,pins = "PH1";
- allwinner,function = "gpio_in";
+ uart2_pins_a: uart2@0 {
+ allwinner,pins = "PI16", "PI17", "PI18", "PI19";
+ allwinner,function = "uart2";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- mmc2_pins_a: mmc2@0 {
- allwinner,pins = "PC6", "PC7", "PC8",
- "PC9", "PC10", "PC11";
- allwinner,function = "mmc2";
- allwinner,drive = <SUN4I_PINCTRL_30_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-
- mmc3_pins_a: mmc3@0 {
- allwinner,pins = "PI4", "PI5", "PI6",
- "PI7", "PI8", "PI9";
- allwinner,function = "mmc3";
- allwinner,drive = <SUN4I_PINCTRL_30_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir0_rx_pins_a: ir0@0 {
- allwinner,pins = "PB4";
- allwinner,function = "ir0";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ uart3_pins_a: uart3@0 {
+ allwinner,pins = "PG6", "PG7", "PG8", "PG9";
+ allwinner,function = "uart3";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir0_tx_pins_a: ir0@1 {
- allwinner,pins = "PB3";
- allwinner,function = "ir0";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ uart3_pins_b: uart3@1 {
+ allwinner,pins = "PH0", "PH1";
+ allwinner,function = "uart3";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir1_rx_pins_a: ir1@0 {
- allwinner,pins = "PB23";
- allwinner,function = "ir1";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ uart4_pins_a: uart4@0 {
+ allwinner,pins = "PG10", "PG11";
+ allwinner,function = "uart4";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ir1_tx_pins_a: ir1@1 {
- allwinner,pins = "PB22";
- allwinner,function = "ir1";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ uart4_pins_b: uart4@1 {
+ allwinner,pins = "PH4", "PH5";
+ allwinner,function = "uart4";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ps20_pins_a: ps20@0 {
- allwinner,pins = "PI20", "PI21";
- allwinner,function = "ps2";
+ uart5_pins_a: uart5@0 {
+ allwinner,pins = "PI10", "PI11";
+ allwinner,function = "uart5";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- ps21_pins_a: ps21@0 {
- allwinner,pins = "PH12", "PH13";
- allwinner,function = "ps2";
+ uart6_pins_a: uart6@0 {
+ allwinner,pins = "PI12", "PI13";
+ allwinner,function = "uart6";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
- spdif_tx_pins_a: spdif@0 {
- allwinner,pins = "PB13";
- allwinner,function = "spdif";
+ uart7_pins_a: uart7@0 {
+ allwinner,pins = "PI20", "PI21";
+ allwinner,function = "uart7";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
};
@@ -1320,6 +1453,32 @@
status = "disabled";
};
+ i2s1: i2s@01c22000 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c22000 0x400>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb0_gates 4>, <&i2s1_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 4>,
+ <&dma SUN4I_DMA_NORMAL 4>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ i2s0: i2s@01c22400 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c22400 0x400>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb0_gates 3>, <&i2s0_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 3>,
+ <&dma SUN4I_DMA_NORMAL 3>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
lradc: lradc@01c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x100>;
@@ -1345,6 +1504,19 @@
reg = <0x01c23800 0x200>;
};
+ i2s2: i2s@01c24400 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun4i-a10-i2s";
+ reg = <0x01c24400 0x400>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&apb0_gates 8>, <&i2s2_clk>;
+ clock-names = "apb", "mod";
+ dmas = <&dma SUN4I_DMA_NORMAL 6>,
+ <&dma SUN4I_DMA_NORMAL 6>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
rtp: rtp@01c25000 {
compatible = "allwinner,sun5i-a13-ts";
reg = <0x01c25000 0x100>;
diff --git a/arch/arm/boot/dts/sun8i-a23-inet86dz.dts b/arch/arm/boot/dts/sun8i-a23-inet86dz.dts
new file mode 100644
index 000000000000..0f9f71b14047
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a23-inet86dz.dts
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a23.dtsi"
+#include "sun8i-reference-design-tablet.dtsi"
+
+/ {
+ model = "INet-86DZ Rev 01";
+ compatible = "primux,inet86dz", "allwinner,sun8i-a23";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&usbphy {
+ usb1_vbus-supply = <&reg_dldo1>;
+};
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
new file mode 100644
index 000000000000..e3004428e7a7
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a23.dtsi"
+#include "sun8i-reference-design-tablet.dtsi"
+
+/ {
+ model = "Polaroid MID2407PXE03 tablet";
+ compatible = "polaroid,mid2407pxe03", "allwinner,sun8i-a23";
+};
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
index cb5daafcb7c2..6d06e24d446b 100644
--- a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2809pxe04.dts
@@ -42,202 +42,9 @@
/dts-v1/;
#include "sun8i-a23.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
-#include <dt-bindings/pwm/pwm.h>
+#include "sun8i-reference-design-tablet.dtsi"
/ {
model = "Polaroid MID2809PXE04 tablet";
compatible = "polaroid,mid2809pxe04", "allwinner,sun8i-a23";
-
- aliases {
- serial0 = &r_uart;
- };
-
- backlight: backlight {
- compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&bl_en_pin_mid2809>;
- pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
- default-brightness-level = <8>;
- enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
-&ehci0 {
- status = "okay";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-};
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
- status = "okay";
-};
-
-&lradc {
- vref-supply = <&reg_vcc3v0>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-};
-
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_mid2809>;
- vmmc-supply = <&reg_dcdc1>;
- bus-width = <4>;
- cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
- cd-inverted;
- status = "okay";
-};
-
-&pio {
- bl_en_pin_mid2809: bl_en_pin@0 {
- allwinner,pins = "PH6";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- mmc0_cd_pin_mid2809: mmc0_cd_pin@0 {
- allwinner,pins = "PB4";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&pwm0_pins>;
- status = "okay";
-};
-
-&r_rsb {
- status = "okay";
-
- axp22x: pmic@3a3 {
- compatible = "x-powers,axp223";
- reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- eldoin-supply = <&reg_dcdc1>;
- };
-};
-
-&r_uart {
- pinctrl-names = "default";
- pinctrl-0 = <&r_uart_pins_a>;
- status = "okay";
-};
-
-#include "axp22x.dtsi"
-
-&reg_aldo1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-io";
-};
-
-&reg_aldo2 {
- regulator-always-on;
- regulator-min-microvolt = <2350000>;
- regulator-max-microvolt = <2650000>;
- regulator-name = "vdd-dll";
-};
-
-&reg_aldo3 {
- regulator-always-on;
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-pll-avcc";
-};
-
-&reg_dc1sw {
- regulator-name = "vcc-lcd";
-};
-
-&reg_dc5ldo {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpus";
-};
-
-&reg_dcdc1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-sys";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc5 {
- regulator-always-on;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-name = "vcc-dram";
-};
-
-&reg_rtc_ldo {
- regulator-name = "vcc-rtc";
-};
-
-&simplefb_lcd {
- vcc-lcd-supply = <&reg_dc1sw>;
-};
-
-/*
- * FIXME for now we only support host mode and rely on u-boot to have
- * turned on Vbus which is controlled by the axp223 pmic on the board.
- *
- * Once we have axp223 support we should switch to fully supporting otg.
- */
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
-&usbphy {
- status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
index 6062ea7a9903..956320a6cc78 100644
--- a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
@@ -48,18 +48,3 @@
model = "Q8 A23 Tablet";
compatible = "allwinner,q8-a23", "allwinner,sun8i-a23";
};
-
-/*
- * FIXME for now we only support host mode and rely on u-boot to have
- * turned on Vbus which is controlled by the axp223 pmic on the board.
- *
- * Once we have axp223 support we should switch to fully supporting otg.
- */
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
-&usbphy {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
index 1aefc6793e25..65660324005c 100644
--- a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
+++ b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
@@ -42,59 +42,18 @@
/dts-v1/;
#include "sun8i-a33.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sun8i-reference-design-tablet.dtsi"
/ {
model = "Allwinner GA10H Quad Core Tablet (v1.1)";
compatible = "allwinner,ga10h-v1.1", "allwinner,sun8i-a33";
-
- aliases {
- serial0 = &r_uart;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
};
&ehci0 {
status = "okay";
};
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins_a>;
- status = "okay";
-};
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_a>;
- status = "okay";
-};
-
&lradc {
- vref-supply = <&reg_vcc3v0>;
- status = "okay";
-
- button@200 {
- label = "Volume Up";
- linux,code = <KEY_VOLUMEUP>;
- channel = <0>;
- voltage = <200000>;
- };
-
- button@400 {
- label = "Volume Down";
- linux,code = <KEY_VOLUMEDOWN>;
- channel = <0>;
- voltage = <400000>;
- };
-
button@600 {
label = "Back";
linux,code = <KEY_BACK>;
@@ -103,40 +62,6 @@
};
};
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_q8h>;
- vmmc-supply = <&reg_vcc3v0>;
- bus-width = <4>;
- cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
- cd-inverted;
- status = "okay";
-};
-
&ohci0 {
status = "okay";
};
-
-&pio {
- mmc0_cd_pin_q8h: mmc0_cd_pin@0 {
- allwinner,pins = "PB4";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&r_uart {
- pinctrl-names = "default";
- pinctrl-0 = <&r_uart_pins_a>;
- status = "okay";
-};
-
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
-&usbphy {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
index 44b32296a025..b0bc2360f8c4 100644
--- a/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
@@ -48,18 +48,3 @@
model = "Q8 A33 Tablet";
compatible = "allwinner,q8-a33", "allwinner,sun8i-a33";
};
-
-/*
- * FIXME for now we only support host mode and rely on u-boot to have
- * turned on Vbus which is controlled by the axp223 pmic on the board.
- *
- * Once we have axp223 support we should switch to fully supporting otg.
- */
-&usb_otg {
- dr_mode = "host";
- status = "okay";
-};
-
-&usbphy {
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
new file mode 100644
index 000000000000..f3b1d5f6dbd2
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+ model = "Banana Pi BPI-M2-Plus";
+ compatible = "sinovoip,bpi-m2-plus", "allwinner,sun8i-h3";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_led_bpi_m2p>;
+
+ pwr_led {
+ label = "bananapi-m2-plus:red:pwr";
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
+ default-state = "on";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&sw_r_bpi_m2p>;
+
+ sw4 {
+ label = "power";
+ linux,code = <BTN_0>;
+ gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_en_bpi_m2p>;
+ reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
+ };
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ir {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_pins_a>;
+ status = "okay";
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+ cd-inverted;
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>;
+ vmmc-supply = <&reg_vcc3v3>;
+ vqmmc-supply = <&reg_vcc3v3>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ interrupt-parent = <&pio>;
+ interrupts = <6 10 IRQ_TYPE_LEVEL_LOW>; /* PG10 / EINT10 */
+ interrupt-names = "host-wake";
+ };
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ vqmmc-supply = <&reg_vcc3v3>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&r_pio {
+ pwr_led_bpi_m2p: led_pins@0 {
+ allwinner,pins = "PL10";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ sw_r_bpi_m2p: key_pins@0 {
+ allwinner,pins = "PL3";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ wifi_en_bpi_m2p: wifi_en_pin {
+ allwinner,pins = "PL7";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_a>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins_a>;
+ status = "okay";
+};
+
+&usbphy {
+ /* USB VBUS is on as long as VCC-IO is on */
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 9871bad34742..fdf9fdbda267 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -327,13 +327,6 @@
interrupt-controller;
#interrupt-cells = <3>;
- uart0_pins_a: uart0@0 {
- allwinner,pins = "PA4", "PA5";
- allwinner,function = "uart0";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
mmc0_pins_a: mmc0@0 {
allwinner,pins = "PF0", "PF1", "PF2", "PF3",
"PF4", "PF5";
@@ -366,6 +359,20 @@
allwinner,drive = <SUN4I_PINCTRL_30_MA>;
allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
};
+
+ uart0_pins_a: uart0@0 {
+ allwinner,pins = "PA4", "PA5";
+ allwinner,function = "uart0";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ uart1_pins_a: uart1@0 {
+ allwinner,pins = "PG6", "PG7", "PG8", "PG9";
+ allwinner,function = "uart1";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
};
timer@01c20c00 {
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi
index 346a49d805a7..60fa9585022b 100644
--- a/arch/arm/boot/dts/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi
@@ -39,140 +39,13 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "sunxi-q8-common.dtsi"
+#include "sunxi-reference-design-tablet.dtsi"
+#include "sun8i-reference-design-tablet.dtsi"
-#include <dt-bindings/pwm/pwm.h>
-
-/ {
- aliases {
- serial0 = &r_uart;
- };
-
- backlight: backlight {
- compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&bl_en_pin_q8>;
- pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
- default-brightness-level = <8>;
- enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-};
-
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_q8>;
- vmmc-supply = <&reg_dcdc1>;
- bus-width = <4>;
- cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
- cd-inverted;
- status = "okay";
-};
-
-&pio {
- bl_en_pin_q8: bl_en_pin@0 {
- allwinner,pins = "PH6";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- mmc0_cd_pin_q8: mmc0_cd_pin@0 {
- allwinner,pins = "PB4";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
-&r_rsb {
- status = "okay";
-
- axp22x: pmic@3a3 {
- compatible = "x-powers,axp223";
- reg = <0x3a3>;
- interrupt-parent = <&nmi_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- eldoin-supply = <&reg_dcdc1>;
- };
-};
-
-#include "axp22x.dtsi"
-
-&reg_aldo1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-io";
-};
-
-&reg_aldo2 {
- regulator-always-on;
- regulator-min-microvolt = <2350000>;
- regulator-max-microvolt = <2650000>;
- regulator-name = "vdd-dll";
-};
-
-&reg_aldo3 {
- regulator-always-on;
- regulator-min-microvolt = <2700000>;
- regulator-max-microvolt = <3300000>;
- regulator-name = "vcc-pll-avcc";
-};
-
-&reg_dc1sw {
- regulator-name = "vcc-lcd";
-};
-
-&reg_dc5ldo {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpus";
-};
-
-&reg_dcdc1 {
- regulator-always-on;
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-name = "vcc-3v0";
-};
-
-&reg_dcdc2 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-sys";
-};
-
-&reg_dcdc3 {
- regulator-always-on;
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <1400000>;
- regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc5 {
- regulator-always-on;
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-name = "vcc-dram";
-};
-
-&reg_rtc_ldo {
- regulator-name = "vcc-rtc";
-};
-
-&r_uart {
- pinctrl-names = "default";
- pinctrl-0 = <&r_uart_pins_a>;
- status = "okay";
+&ehci0 {
+ status = "okay";
};
-&simplefb_lcd {
- vcc-lcd-supply = <&reg_dc1sw>;
+&usbphy {
+ usb1_vbus-supply = <&reg_dldo1>;
};
diff --git a/arch/arm/boot/dts/sun8i-r16-parrot.dts b/arch/arm/boot/dts/sun8i-r16-parrot.dts
new file mode 100644
index 000000000000..47553e522982
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-r16-parrot.dts
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2016 Quentin Schulz
+ *
+ * Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a33.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Allwinner R16 EVB (Parrot)";
+ compatible = "allwinner,parrot", "allwinner,sun8i-a33";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pins_parrot>;
+
+ led1 {
+ label = "parrot:led1:usr";
+ gpio = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
+ };
+
+ led2 {
+ label = "parrot:led2:usr";
+ gpio = <&pio 4 16 GPIO_ACTIVE_HIGH>; /* PE16 */
+ };
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL06 */
+ };
+
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+
+ /*
+ * FIXME: An as-yet-unknown accelerometer is connected to this
+ * i2c bus.
+ */
+};
+
+&lradc {
+ vref-supply = <&reg_aldo3>;
+ status = "okay";
+
+ button@0 {
+ label = "V+";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <190000>;
+ };
+
+ button@1 {
+ label = "V-";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <390000>;
+ };
+
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_parrot>;
+ vmmc-supply = <&reg_dcdc1>;
+ cd-gpios = <&pio 3 14 GPIO_ACTIVE_LOW>; /* PD14 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_a>, <&wifi_reset_pin_parrot>;
+ vmmc-supply = <&reg_aldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&mmc2_8bit_pins {
+ allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ mmc0_cd_pin_parrot: mmc0_cd_pin@0 {
+ allwinner,pins = "PD14";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ led_pins_parrot: led_pins@0 {
+ allwinner,pins = "PE16", "PE17";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ usb0_id_det: usb0_id_detect_pin@0 {
+ allwinner,pins = "PD10";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ usb1_vbus_pin_parrot: usb1_vbus_pin@0 {
+ allwinner,pins = "PD12";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_pio {
+ wifi_reset_pin_parrot: wifi_reset_pin@0 {
+ allwinner,pins = "PL6";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp22x: pmic@3a3 {
+ compatible = "x-powers,axp223";
+ reg = <0x3a3>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ drivevbus-supply = <&reg_vcc5v0>;
+ x-powers,drive-vbus-en;
+ };
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-io";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <2350000>;
+ regulator-max-microvolt = <2650000>;
+ regulator-name = "vdd-dll";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ /*
+ * TODO: WiFi chip needs dldo1 AND dldo2 to be on to be powered.
+ * Remove next line once it is possible to sync two regulators.
+ */
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi0";
+};
+
+&reg_dldo2 {
+ /*
+ * TODO: WiFi chip needs dldo1 AND dldo2 to be on to be powered.
+ * Remove next line once it is possible to sync two regulators.
+ */
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi1";
+};
+
+&reg_dldo3 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v0-csi";
+};
+
+&reg_drivevbus {
+ regulator-name = "usb0-vbus";
+ status = "okay";
+};
+
+&reg_eldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-1v2-hsic";
+};
+
+&reg_eldo2 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-dsp";
+};
+
+&reg_eldo3 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "eldo3";
+};
+
+&reg_usb1_vbus {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_vbus_pin_parrot>;
+ gpio = <&pio 3 12 GPIO_ACTIVE_HIGH>; /* PD12 */
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins_b>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_det>;
+ usb0_vbus-supply = <&reg_drivevbus>;
+ usb0_id_det-gpios = <&pio 3 10 GPIO_ACTIVE_HIGH>; /* PD10 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb1_vbus-supply = <&reg_usb1_vbus>;
+};
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
new file mode 100644
index 000000000000..9d9036140511
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "sunxi-reference-design-tablet.dtsi"
+
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ aliases {
+ serial0 = &r_uart;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bl_en_pin>;
+ pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <8>;
+ enable-gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+ cd-inverted;
+ status = "okay";
+};
+
+&pio {
+ bl_en_pin: bl_en_pin@0 {
+ allwinner,pins = "PH6";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_cd_pin: mmc0_cd_pin@0 {
+ allwinner,pins = "PB4";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+
+ usb0_id_detect_pin: usb0_id_detect_pin@0 {
+ allwinner,pins = "PH8";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+};
+
+&r_rsb {
+ status = "okay";
+
+ axp22x: pmic@3a3 {
+ compatible = "x-powers,axp223";
+ reg = <0x3a3>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ eldoin-supply = <&reg_dcdc1>;
+ drivevbus-supply = <&reg_vcc5v0>;
+ x-powers,drive-vbus-en;
+ };
+};
+
+#include "axp22x.dtsi"
+
+&reg_aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-io";
+};
+
+&reg_aldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <2350000>;
+ regulator-max-microvolt = <2650000>;
+ regulator-name = "vdd-dll";
+};
+
+&reg_aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pll-avcc";
+};
+
+&reg_dc1sw {
+ regulator-name = "vcc-lcd";
+};
+
+&reg_dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpus";
+};
+
+&reg_dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v0";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-sys";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-name = "vcc-dram";
+};
+
+&reg_dldo1 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+};
+
+&reg_drivevbus {
+ regulator-name = "usb0-vbus";
+ status = "okay";
+};
+
+&reg_rtc_ldo {
+ regulator-name = "vcc-rtc";
+};
+
+&r_uart {
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_uart_pins_a>;
+ status = "okay";
+};
+
+&simplefb_lcd {
+ vcc-lcd-supply = <&reg_dc1sw>;
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_id_detect_pin>;
+ usb0_id_det-gpio = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index eb2ccd0a3bd5..1526b41c70f1 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -45,7 +45,6 @@
/dts-v1/;
#include "sun9i-a80.dtsi"
-#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/sun4i-a10.h>
@@ -79,26 +78,10 @@
};
};
-&pio {
- led_pins_cubieboard4: led-pins@0 {
- allwinner,pins = "PH6", "PH17";
- allwinner,function = "gpio_out";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
- };
-
- mmc0_cd_pin_cubieboard4: mmc0_cd_pin@0 {
- allwinner,pins = "PH18";
- allwinner,function = "gpio_in";
- allwinner,drive = <SUN4I_PINCTRL_10_MA>;
- allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>, <&mmc0_cd_pin_cubieboard4>;
- vmmc-supply = <&reg_vcc3v0>;
+ vmmc-supply = <&reg_dcdc1>;
bus-width = <4>;
cd-gpios = <&pio 7 18 GPIO_ACTIVE_HIGH>; /* PH18 */
cd-inverted;
@@ -108,7 +91,7 @@
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_8bit_pins>;
- vmmc-supply = <&reg_vcc3v0>;
+ vmmc-supply = <&reg_dcdc1>;
bus-width = <8>;
non-removable;
cap-mmc-hw-reset;
@@ -120,14 +103,157 @@
allwinner,drive = <SUN4I_PINCTRL_40_MA>;
};
+&pio {
+ led_pins_cubieboard4: led-pins@0 {
+ allwinner,pins = "PH6", "PH17";
+ allwinner,function = "gpio_out";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+ };
+
+ mmc0_cd_pin_cubieboard4: mmc0_cd_pin@0 {
+ allwinner,pins = "PH18";
+ allwinner,function = "gpio_in";
+ allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+ allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+ };
+};
+
&r_ir {
status = "okay";
};
&r_rsb {
status = "okay";
+
+ axp809: pmic@3a3 {
+ reg = <0x3a3>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ reg_aldo1: aldo1 {
+ /*
+ * TODO: This should be handled by the
+ * USB PHY driver.
+ */
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc33-usbh";
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pb-io-cam";
+ };
+
+ aldo3 {
+ /* unused */
+ };
+
+ reg_dc5ldo: dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpus-09-usbh";
+ };
+
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-gpu";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpua";
+ };
+
+ reg_dcdc4: dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-sys-usb0-hdmi";
+ };
+
+ reg_dcdc5: dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1425000>;
+ regulator-max-microvolt = <1575000>;
+ regulator-name = "vcc-dram";
+ };
+
+ reg_dldo1: dldo1 {
+ /*
+ * The WiFi chip supports a wide range
+ * (3.0 ~ 4.8V) of voltages, and so does
+ * this regulator (3.0 ~ 4.2V), but
+ * Allwinner SDK always sets it to 3.3V.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+ };
+
+ reg_dldo2: dldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pl";
+ };
+
+ reg_eldo1: eldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dvdd-cam";
+ };
+
+ reg_eldo2: eldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pe";
+ };
+
+ reg_eldo3: eldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pm-codec-io1";
+ };
+
+ reg_ldo_io0: ldo_io0 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pg";
+ };
+
+ reg_ldo_io1: ldo_io1 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-name = "vcc-pa-gmac-2v5";
+ };
+
+ reg_rtc_ldo: rtc_ldo {
+ regulator-name = "vcc-rtc-vdd1v8-io";
+ };
+ };
+ };
};
+#include "axp809.dtsi"
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index d7a20d92b114..7fd22e888602 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -44,7 +44,6 @@
/dts-v1/;
#include "sun9i-a80.dtsi"
-#include "sunxi-common-regulators.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/sun4i-a10.h>
@@ -85,6 +84,17 @@
};
};
+ reg_usb1_vbus: usb1-vbus {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_vbus_pin_optimus>;
+ regulator-name = "usb1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ };
+
reg_usb3_vbus: usb3-vbus {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -109,6 +119,31 @@
status = "okay";
};
+&mmc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>, <&mmc0_cd_pin_optimus>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 7 18 GPIO_ACTIVE_HIGH>; /* PH8 */
+ cd-inverted;
+ status = "okay";
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&mmc2_8bit_pins {
+ /* Increase drive strength for DDR modes */
+ allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+};
+
&ohci0 {
status = "okay";
};
@@ -147,37 +182,6 @@
};
};
-&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins>, <&mmc0_cd_pin_optimus>;
- vmmc-supply = <&reg_vcc3v0>;
- bus-width = <4>;
- cd-gpios = <&pio 7 18 GPIO_ACTIVE_HIGH>; /* PH8 */
- cd-inverted;
- status = "okay";
-};
-
-&mmc2 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc2_8bit_pins>;
- vmmc-supply = <&reg_vcc3v0>;
- bus-width = <8>;
- non-removable;
- cap-mmc-hw-reset;
- status = "okay";
-};
-
-&mmc2_8bit_pins {
- /* Increase drive strength for DDR modes */
- allwinner,drive = <SUN4I_PINCTRL_40_MA>;
-};
-
-&reg_usb1_vbus {
- pinctrl-0 = <&usb1_vbus_pin_optimus>;
- gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- status = "okay";
-};
-
&r_ir {
status = "okay";
};
@@ -193,8 +197,135 @@
&r_rsb {
status = "okay";
+
+ axp809: pmic@3a3 {
+ reg = <0x3a3>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ reg_aldo1: aldo1 {
+ /*
+ * TODO: This should be handled by the
+ * USB PHY driver.
+ */
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc33-usbh";
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pb-io-cam";
+ };
+
+ aldo3 {
+ /* unused */
+ };
+
+ reg_dc5ldo: dc5ldo {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpus-09-usbh";
+ };
+
+ reg_dcdc1: dcdc1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-3v";
+ };
+
+ reg_dcdc2: dcdc2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-gpu";
+ };
+
+ reg_dcdc3: dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-cpua";
+ };
+
+ reg_dcdc4: dcdc4 {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vdd-sys-usb0-hdmi";
+ };
+
+ reg_dcdc5: dcdc5 {
+ regulator-always-on;
+ regulator-min-microvolt = <1425000>;
+ regulator-max-microvolt = <1575000>;
+ regulator-name = "vcc-dram";
+ };
+
+ reg_dldo1: dldo1 {
+ /*
+ * The WiFi chip supports a wide range
+ * (3.0 ~ 4.8V) of voltages, and so does
+ * this regulator (3.0 ~ 4.2V), but
+ * Allwinner SDK always sets it to 3.3V.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi";
+ };
+
+ reg_dldo2: dldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pl";
+ };
+
+ reg_eldo1: eldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dvdd-cam";
+ };
+
+ reg_eldo2: eldo2 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-pe";
+ };
+
+ reg_eldo3: eldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pm-codec-io1";
+ };
+
+ reg_ldo_io0: ldo_io0 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "vcc-pg";
+ };
+
+ reg_ldo_io1: ldo_io1 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-name = "vcc-pa-gmac-2v5";
+ };
+
+ reg_rtc_ldo: rtc_ldo {
+ regulator-name = "vcc-rtc-vdd1v8-io";
+ };
+ };
+ };
};
+#include "axp809.dtsi"
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sunxi-q8-common.dtsi b/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi
index b8241462fcea..b8241462fcea 100644
--- a/arch/arm/boot/dts/sunxi-q8-common.dtsi
+++ b/arch/arm/boot/dts/sunxi-reference-design-tablet.dtsi
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index c970bf65c74c..1dfc492cc004 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -1149,7 +1149,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index 9d868af97b8e..70cf40996c3f 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -1020,9 +1020,9 @@
#address-cells = <1>;
#size-cells = <0>;
- clk32k_in: clock {
+ clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index 89047edb5c5f..17dd14545862 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -277,7 +277,7 @@
#address-cells = <1>;
#size-cells = <0>;
- clk32k_in: clock {
+ clk32k_in: clock@0 {
compatible = "fixed-clock";
reg = <0>;
#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/tegra124-apalis-emc.dtsi b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
new file mode 100644
index 000000000000..ca2c3a557895
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
@@ -0,0 +1,1502 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ clock@60006000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-20400000 {
+ clock-frequency = <20400000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-40800000 {
+ clock-frequency = <40800000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-68000000 {
+ clock-frequency = <68000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-102000000 {
+ clock-frequency = <102000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-204000000 {
+ clock-frequency = <204000000>;
+ nvidia,parent-clock-frequency = <408000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_P>;
+ clock-names = "emc-parent";
+ };
+ timing-300000000 {
+ clock-frequency = <300000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C>;
+ clock-names = "emc-parent";
+ };
+ timing-396000000 {
+ clock-frequency = <396000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M>;
+ clock-names = "emc-parent";
+ };
+ timing-528000000 {
+ clock-frequency = <528000000>;
+ nvidia,parent-clock-frequency = <528000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-600000000 {
+ clock-frequency = <600000000>;
+ nvidia,parent-clock-frequency = <600000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_C_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-792000000 {
+ clock-frequency = <792000000>;
+ nvidia,parent-clock-frequency = <792000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ timing-924000000 {
+ clock-frequency = <924000000>;
+ nvidia,parent-clock-frequency = <924000000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_M_UD>;
+ clock-names = "emc-parent";
+ };
+ };
+ };
+
+ emc@7001b000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000 0x00000003
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000000
+ 0x00000000 0x00000003
+ 0x00000003 0x00000000
+ 0x00000006 0x00000006
+ 0x00000006 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000c 0x0000000d
+ 0x0000000f 0x00000060
+ 0x00000000 0x00000018
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x00000007 0x0000000f
+ 0x00000005 0x00000005
+ 0x00000004 0x00000005
+ 0x00000004 0x00000000
+ 0x00000000 0x00000005
+ 0x00000005 0x00000064
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000
+ 0x0000fc00 0x0000fc00
+ 0x0000fc00 0x0000fc00
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000e0e 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000007 0x00000000
+ 0x00000042 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000f2f3 0x800001c5
+ 0x0000000a
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000000 0x00000005
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000000
+ 0x00000000 0x00000003
+ 0x00000003 0x00000000
+ 0x00000006 0x00000006
+ 0x00000006 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000c 0x0000000d
+ 0x0000000f 0x0000009a
+ 0x00000000 0x00000026
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x00000007 0x0000000f
+ 0x00000006 0x00000006
+ 0x00000004 0x00000005
+ 0x00000004 0x00000000
+ 0x00000000 0x00000005
+ 0x00000005 0x000000a0
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000
+ 0x0000fc00 0x0000fc00
+ 0x0000fc00 0x0000fc00
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000e0e 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x0000000b 0x00000000
+ 0x00000042 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000f2f3 0x8000023a
+ 0x0000000a
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000001 0x0000000a
+ 0x00000000 0x00000001
+ 0x00000000 0x00000004
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000000
+ 0x00000000 0x00000003
+ 0x00000003 0x00000000
+ 0x00000006 0x00000006
+ 0x00000006 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000c 0x0000000d
+ 0x0000000f 0x00000134
+ 0x00000000 0x0000004d
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x00000008 0x0000000f
+ 0x0000000c 0x0000000c
+ 0x00000004 0x00000005
+ 0x00000004 0x00000000
+ 0x00000000 0x00000005
+ 0x00000005 0x0000013f
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000
+ 0x0000fc00 0x0000fc00
+ 0x0000fc00 0x0000fc00
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000e0e 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000015 0x00000000
+ 0x00000042 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000f2f3 0x80000370
+ 0x0000000a
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000003 0x00000011
+ 0x00000000 0x00000002
+ 0x00000000 0x00000004
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000000
+ 0x00000000 0x00000003
+ 0x00000003 0x00000000
+ 0x00000006 0x00000006
+ 0x00000006 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000c 0x0000000d
+ 0x0000000f 0x00000202
+ 0x00000000 0x00000080
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x0000000f 0x0000000f
+ 0x00000013 0x00000013
+ 0x00000004 0x00000005
+ 0x00000004 0x00000001
+ 0x00000000 0x00000005
+ 0x00000005 0x00000213
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000
+ 0x0000fc00 0x0000fc00
+ 0x0000fc00 0x0000fc00
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000e0e 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000022 0x00000000
+ 0x00000042 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000f2f3 0x8000050e
+ 0x0000000a
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
+
+ nvidia,emc-configuration = <
+ 0x00000004 0x0000001a
+ 0x00000000 0x00000003
+ 0x00000001 0x00000004
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000001
+ 0x00000001 0x00000003
+ 0x00000003 0x00000000
+ 0x00000006 0x00000006
+ 0x00000006 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000004
+ 0x0000000c 0x0000000d
+ 0x0000000f 0x00000304
+ 0x00000000 0x000000c1
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x00000018 0x0000000f
+ 0x0000001c 0x0000001c
+ 0x00000004 0x00000005
+ 0x00000004 0x00000002
+ 0x00000000 0x00000005
+ 0x00000005 0x0000031c
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x000fc000 0x000fc000
+ 0x000fc000 0x000fc000
+ 0x0000fc00 0x0000fc00
+ 0x0000fc00 0x0000fc00
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000e0e 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000033 0x00000000
+ 0x00000042 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000f2f3 0x80000713
+ 0x0000000a
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008cd>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100003>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000009 0x00000035
+ 0x00000000 0x00000006
+ 0x00000002 0x00000005
+ 0x0000000a 0x00000005
+ 0x0000000b 0x00000002
+ 0x00000002 0x00000003
+ 0x00000003 0x00000000
+ 0x00000005 0x00000005
+ 0x00000006 0x00000002
+ 0x00000000 0x00000004
+ 0x00000006 0x00010000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000003
+ 0x0000000d 0x0000000f
+ 0x00000011 0x00000607
+ 0x00000000 0x00000181
+ 0x00000002 0x00000002
+ 0x00000001 0x00000000
+ 0x00000032 0x0000000f
+ 0x00000038 0x00000038
+ 0x00000004 0x00000005
+ 0x00000004 0x00000006
+ 0x00000000 0x00000005
+ 0x00000005 0x00000638
+ 0x00000000 0x00000000
+ 0x00000000 0x106aa298
+ 0x002c00a0 0x00008000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00080000 0x00080000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00008000 0x00000000
+ 0x00000000 0x00008000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00090000 0x00090000
+ 0x00090000 0x00090000
+ 0x00009000 0x00009000
+ 0x00009000 0x00009000
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000707 0x81f1f108
+ 0x07070004 0x0000003f
+ 0x016eeeee 0x51451400
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000066 0x00000000
+ 0x00000100 0x000e000e
+ 0x00000000 0x00000003
+ 0x0000d2b3 0x80000d22
+ 0x0000000a
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x000008d5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000321>;
+ nvidia,emc-mrs-wait-cnt = <0x0173000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000000d 0x0000004d
+ 0x00000000 0x00000009
+ 0x00000003 0x00000004
+ 0x00000008 0x00000002
+ 0x00000009 0x00000003
+ 0x00000003 0x00000002
+ 0x00000002 0x00000000
+ 0x00000003 0x00000003
+ 0x00000005 0x00000002
+ 0x00000000 0x00000002
+ 0x00000007 0x00020000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000001
+ 0x0000000e 0x00000010
+ 0x00000012 0x000008e4
+ 0x00000000 0x00000239
+ 0x00000001 0x00000008
+ 0x00000001 0x00000000
+ 0x0000004b 0x0000000e
+ 0x00000052 0x00000200
+ 0x00000004 0x00000005
+ 0x00000004 0x00000008
+ 0x00000000 0x00000005
+ 0x00000005 0x00000924
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab098
+ 0x002c00a0 0x00008000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00098000 0x00098000
+ 0x00000000 0x00098000
+ 0x00098000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00050000 0x00050000
+ 0x00050000 0x00050000
+ 0x00005000 0x00005000
+ 0x00005000 0x00005000
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000505 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x51451420
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x00000096 0x00000000
+ 0x00000100 0x0173000e
+ 0x00000000 0x00000003
+ 0x000052a3 0x800012d7
+ 0x00000009
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73340000>;
+ nvidia,emc-cfg-2 = <0x00000895>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200000>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000521>;
+ nvidia,emc-mrs-wait-cnt = <0x015b000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x01231339>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000011 0x00000066
+ 0x00000000 0x0000000c
+ 0x00000004 0x00000004
+ 0x00000008 0x00000002
+ 0x0000000a 0x00000004
+ 0x00000004 0x00000002
+ 0x00000002 0x00000000
+ 0x00000003 0x00000003
+ 0x00000005 0x00000002
+ 0x00000000 0x00000001
+ 0x00000008 0x00020000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x0000000f 0x00000010
+ 0x00000012 0x00000bd1
+ 0x00000000 0x000002f4
+ 0x00000001 0x00000008
+ 0x00000001 0x00000000
+ 0x00000063 0x0000000f
+ 0x0000006c 0x00000200
+ 0x00000004 0x00000005
+ 0x00000004 0x0000000b
+ 0x00000000 0x00000005
+ 0x00000005 0x00000c11
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab098
+ 0x002c00a0 0x00008000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00030000 0x00030000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00070000 0x00070000
+ 0x00000000 0x00070000
+ 0x00070000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00038000 0x00038000
+ 0x00038000 0x00038000
+ 0x00003800 0x00003800
+ 0x00003800 0x00003800
+ 0x10000280 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc081
+ 0x00000505 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x51451420
+ 0x00514514 0x00514514
+ 0x51451400 0x0000003f
+ 0x000000c6 0x00000000
+ 0x00000100 0x015b000e
+ 0x00000000 0x00000003
+ 0x000052a3 0x8000188b
+ 0x00000009
+ >;
+ };
+
+ timing-528000000 {
+ clock-frequency = <528000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200008>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000941>;
+ nvidia,emc-mrs-wait-cnt = <0x0139000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0123133d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000018 0x00000088
+ 0x00000000 0x00000010
+ 0x00000006 0x00000006
+ 0x00000009 0x00000002
+ 0x0000000d 0x00000006
+ 0x00000006 0x00000002
+ 0x00000002 0x00000000
+ 0x00000003 0x00000003
+ 0x00000006 0x00000002
+ 0x00000000 0x00000001
+ 0x00000009 0x00030000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000010 0x00000012
+ 0x00000014 0x00000fd6
+ 0x00000000 0x000003f5
+ 0x00000002 0x0000000b
+ 0x00000001 0x00000000
+ 0x00000085 0x00000012
+ 0x00000090 0x00000200
+ 0x00000004 0x00000005
+ 0x00000004 0x00000010
+ 0x00000000 0x00000006
+ 0x00000006 0x00001017
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab098
+ 0xe01200b1 0x00008000
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00054000 0x00054000
+ 0x00000000 0x00054000
+ 0x00054000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x0000000c 0x0000000c
+ 0x0000000c 0x0000000c
+ 0x0000000c 0x0000000c
+ 0x0000000c 0x0000000c
+ 0x100002a0 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc085
+ 0x00000505 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x51451420
+ 0x00514514 0x00514514
+ 0x51451400 0x0606003f
+ 0x00000000 0x00000000
+ 0x00000100 0x0139000e
+ 0x00000000 0x00000003
+ 0x000042a0 0x80002062
+ 0x0000000a
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200010>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000b61>;
+ nvidia,emc-mrs-wait-cnt = <0x0127000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040008>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0121113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000001b 0x0000009b
+ 0x00000000 0x00000013
+ 0x00000007 0x00000007
+ 0x0000000b 0x00000003
+ 0x00000010 0x00000007
+ 0x00000007 0x00000002
+ 0x00000002 0x00000000
+ 0x00000005 0x00000005
+ 0x0000000a 0x00000002
+ 0x00000000 0x00000003
+ 0x0000000b 0x00070000
+ 0x00000003 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000002
+ 0x00000012 0x00000016
+ 0x00000018 0x00001208
+ 0x00000000 0x00000482
+ 0x00000002 0x0000000d
+ 0x00000001 0x00000000
+ 0x00000097 0x00000015
+ 0x000000a3 0x00000200
+ 0x00000004 0x00000005
+ 0x00000004 0x00000013
+ 0x00000000 0x00000006
+ 0x00000006 0x00001248
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab098
+ 0xe00e00b1 0x00008000
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00048000 0x00048000
+ 0x00000000 0x00048000
+ 0x00048000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x0000000d 0x0000000d
+ 0x0000000d 0x0000000d
+ 0x0000000d 0x0000000d
+ 0x0000000d 0x0000000d
+ 0x100002a0 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc085
+ 0x00000505 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x51451420
+ 0x00514514 0x00514514
+ 0x51451400 0x0606003f
+ 0x00000000 0x00000000
+ 0x00000100 0x0127000e
+ 0x00000000 0x00000003
+ 0x000040a0 0x800024aa
+ 0x0000000e
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430000>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200018>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000d71>;
+ nvidia,emc-mrs-wait-cnt = <0x00f7000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x00000024 0x000000cd
+ 0x00000000 0x00000019
+ 0x0000000a 0x00000008
+ 0x0000000d 0x00000004
+ 0x00000013 0x0000000a
+ 0x0000000a 0x00000004
+ 0x00000002 0x00000000
+ 0x00000006 0x00000006
+ 0x0000000b 0x00000002
+ 0x00000000 0x00000002
+ 0x0000000d 0x00080000
+ 0x00000004 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000001
+ 0x00000014 0x00000018
+ 0x0000001a 0x000017e2
+ 0x00000000 0x000005f8
+ 0x00000003 0x00000011
+ 0x00000001 0x00000000
+ 0x000000c7 0x00000018
+ 0x000000d7 0x00000200
+ 0x00000005 0x00000006
+ 0x00000005 0x00000019
+ 0x00000000 0x00000008
+ 0x00000008 0x00001822
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab098
+ 0xe00700b1 0x00008000
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x007fc008 0x007fc008
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00034000 0x00034000
+ 0x00000000 0x00034000
+ 0x00034000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x00000005 0x00000005
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x0000000a 0x0000000a
+ 0x100002a0 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc085
+ 0x00000000 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x61861820
+ 0x00514514 0x00514514
+ 0x61861800 0x0606003f
+ 0x00000000 0x00000000
+ 0x00000100 0x00f7000e
+ 0x00000000 0x00000004
+ 0x00004080 0x80003012
+ 0x0000000f
+ >;
+ };
+
+ timing-924000000 {
+ clock-frequency = <924000000>;
+
+ nvidia,emc-auto-cal-config = <0xa1430303>;
+ nvidia,emc-auto-cal-config2 = <0x00000000>;
+ nvidia,emc-auto-cal-config3 = <0x00000000>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000000>;
+ nvidia,emc-cfg = <0x73300000>;
+ nvidia,emc-cfg-2 = <0x0000089d>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
+ nvidia,emc-mode-1 = <0x80100002>;
+ nvidia,emc-mode-2 = <0x80200020>;
+ nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80000f15>;
+ nvidia,emc-mrs-wait-cnt = <0x00cd000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040000>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0120113d>;
+ nvidia,emc-zcal-cnt-long = <0x0000004c>;
+ nvidia,emc-zcal-interval = <0x00020000>;
+
+ nvidia,emc-configuration = <
+ 0x0000002b 0x000000f0
+ 0x00000000 0x0000001e
+ 0x0000000b 0x00000009
+ 0x0000000f 0x00000005
+ 0x00000016 0x0000000b
+ 0x0000000b 0x00000004
+ 0x00000002 0x00000000
+ 0x00000007 0x00000007
+ 0x0000000d 0x00000002
+ 0x00000000 0x00000002
+ 0x0000000f 0x000a0000
+ 0x00000004 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000001
+ 0x00000016 0x0000001a
+ 0x0000001c 0x00001be7
+ 0x00000000 0x000006f9
+ 0x00000004 0x00000015
+ 0x00000001 0x00000000
+ 0x000000e7 0x0000001b
+ 0x000000fb 0x00000200
+ 0x00000006 0x00000007
+ 0x00000006 0x0000001e
+ 0x00000000 0x0000000a
+ 0x0000000a 0x00001c28
+ 0x00000000 0x00000000
+ 0x00000000 0x104ab898
+ 0xe00400b1 0x00008000
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x007f800a 0x007f800a
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x0002c000 0x0002c000
+ 0x00000000 0x0002c000
+ 0x0002c000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000000 0x00000000
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000004 0x00000004
+ 0x00000008 0x00000008
+ 0x00000008 0x00000008
+ 0x00000008 0x00000008
+ 0x00000008 0x00000008
+ 0x100002a0 0x00000000
+ 0x00111111 0x00000000
+ 0x00000000 0x77ffc085
+ 0x00000000 0x81f1f108
+ 0x07070004 0x00000000
+ 0x016eeeee 0x5d75d720
+ 0x00514514 0x00514514
+ 0x5d75d700 0x0606003f
+ 0x00000000 0x00000000
+ 0x00000128 0x00cd000e
+ 0x00000000 0x00000004
+ 0x00004080 0x800037ea
+ 0x00000011
+ >;
+ };
+
+ };
+ };
+
+ memory-controller@70019000 {
+ emc-timings-1 {
+ nvidia,ram-code = <1>;
+
+ timing-12750000 {
+ clock-frequency = <12750000>;
+
+ nvidia,emem-configuration = <
+ 0x40040001 0x8000000a
+ 0x00000001 0x00000001
+ 0x00000002 0x00000000
+ 0x00000002 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000003 0x00000006
+ 0x06030203 0x000a0502
+ 0x77e30303 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-20400000 {
+ clock-frequency = <20400000>;
+
+ nvidia,emem-configuration = <
+ 0x40020001 0x80000012
+ 0x00000001 0x00000001
+ 0x00000002 0x00000000
+ 0x00000002 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000003 0x00000006
+ 0x06030203 0x000a0502
+ 0x76230303 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-40800000 {
+ clock-frequency = <40800000>;
+
+ nvidia,emem-configuration = <
+ 0xa0000001 0x80000017
+ 0x00000001 0x00000001
+ 0x00000002 0x00000000
+ 0x00000002 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000003 0x00000006
+ 0x06030203 0x000a0502
+ 0x74a30303 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-68000000 {
+ clock-frequency = <68000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000001 0x8000001e
+ 0x00000001 0x00000001
+ 0x00000002 0x00000000
+ 0x00000002 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000003 0x00000006
+ 0x06030203 0x000a0502
+ 0x74230403 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-102000000 {
+ clock-frequency = <102000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000001 0x80000026
+ 0x00000001 0x00000001
+ 0x00000003 0x00000000
+ 0x00000002 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000003 0x00000006
+ 0x06030203 0x000a0503
+ 0x73c30504 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-204000000 {
+ clock-frequency = <204000000>;
+
+ nvidia,emem-configuration = <
+ 0x01000003 0x80000040
+ 0x00000001 0x00000001
+ 0x00000004 0x00000002
+ 0x00000003 0x00000001
+ 0x00000003 0x00000008
+ 0x00000003 0x00000002
+ 0x00000004 0x00000006
+ 0x06040203 0x000a0504
+ 0x73840a05 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-300000000 {
+ clock-frequency = <300000000>;
+
+ nvidia,emem-configuration = <
+ 0x08000004 0x80000040
+ 0x00000001 0x00000002
+ 0x00000007 0x00000004
+ 0x00000004 0x00000001
+ 0x00000002 0x00000007
+ 0x00000002 0x00000002
+ 0x00000004 0x00000006
+ 0x06040202 0x000b0607
+ 0x77450e08 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-396000000 {
+ clock-frequency = <396000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000005 0x80000040
+ 0x00000001 0x00000002
+ 0x00000009 0x00000005
+ 0x00000006 0x00000001
+ 0x00000002 0x00000008
+ 0x00000002 0x00000002
+ 0x00000004 0x00000006
+ 0x06040202 0x000d0709
+ 0x7586120a 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-528000000 {
+ clock-frequency = <528000000>;
+
+ nvidia,emem-configuration = <
+ 0x0f000007 0x80000040
+ 0x00000002 0x00000003
+ 0x0000000c 0x00000007
+ 0x00000008 0x00000001
+ 0x00000002 0x00000009
+ 0x00000002 0x00000002
+ 0x00000005 0x00000006
+ 0x06050202 0x0010090c
+ 0x7428180d 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-600000000 {
+ clock-frequency = <600000000>;
+
+ nvidia,emem-configuration = <
+ 0x00000009 0x80000040
+ 0x00000003 0x00000004
+ 0x0000000e 0x00000009
+ 0x0000000a 0x00000001
+ 0x00000003 0x0000000b
+ 0x00000002 0x00000002
+ 0x00000005 0x00000007
+ 0x07050202 0x00130b0e
+ 0x73a91b0f 0x70000f03
+ 0x001f0000
+ >;
+ };
+
+ timing-792000000 {
+ clock-frequency = <792000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000b 0x80000040
+ 0x00000004 0x00000005
+ 0x00000013 0x0000000c
+ 0x0000000d 0x00000002
+ 0x00000003 0x0000000c
+ 0x00000002 0x00000002
+ 0x00000006 0x00000008
+ 0x08060202 0x00170e13
+ 0x736c2414 0x70000f02
+ 0x001f0000
+ >;
+ };
+
+ timing-924000000 {
+ clock-frequency = <924000000>;
+
+ nvidia,emem-configuration = <
+ 0x0e00000d 0x80000040
+ 0x00000005 0x00000006
+ 0x00000016 0x0000000e
+ 0x0000000f 0x00000002
+ 0x00000004 0x0000000e
+ 0x00000002 0x00000002
+ 0x00000006 0x00000009
+ 0x09060202 0x001a1016
+ 0x734e2a17 0x70000f02
+ 0x001f0000
+ >;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-apalis-eval.dts b/arch/arm/boot/dts/tegra124-apalis-eval.dts
new file mode 100644
index 000000000000..653044a44f0d
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-apalis-eval.dts
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include "tegra124-apalis.dtsi"
+
+/ {
+ model = "Toradex Apalis TK1 on Apalis Evaluation Board";
+ compatible = "toradex,apalis-tk1-eval", "toradex,apalis-tk1",
+ "nvidia,tegra124";
+
+ aliases {
+ rtc0 = "/i2c@7000c000/rtc@68";
+ rtc1 = "/i2c@7000d000/pmic@40";
+ rtc2 = "/rtc@7000e000";
+ serial0 = &uarta;
+ serial1 = &uartb;
+ serial2 = &uartc;
+ serial3 = &uartd;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ pcie-controller@01003000 {
+ pci@1,0 {
+ status = "okay";
+ };
+ };
+
+ host1x@50000000 {
+ hdmi@54280000 {
+ status = "okay";
+ };
+ };
+
+ /* Apalis UART1 */
+ serial@70006000 {
+ status = "okay";
+ };
+
+ /* Apalis UART2 */
+ serial@70006040 {
+ status = "okay";
+ };
+
+ /* Apalis UART3 */
+ serial@70006200 {
+ status = "okay";
+ };
+
+ /* Apalis UART4 */
+ serial@70006300 {
+ status = "okay";
+ };
+
+ pwm@7000a000 {
+ status = "okay";
+ };
+
+ /*
+ * GEN1_I2C: I2C1_SDA/SCL on MXM3 pin 209/211 (e.g. RTC on carrier
+ * board)
+ */
+ i2c@7000c000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ pcie-switch@58 {
+ compatible = "plx,pex8605";
+ reg = <0x58>;
+ };
+
+ /* M41T0M6 real time clock on carrier board */
+ rtc@68 {
+ compatible = "st,m41t00";
+ reg = <0x68>;
+ };
+ };
+
+ /*
+ * GEN2_I2C: I2C2_SDA/SCL (DDC) on MXM3 pin 205/207 (e.g. display EDID)
+ */
+ hdmi_ddc: i2c@7000c400 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ /*
+ * CAM_I2C: I2C3_SDA/SCL (CAM) on MXM3 pin 201/203 (e.g. camera sensor
+ * on carrier board)
+ */
+ i2c@7000c500 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ /* I2C4 (DDC): unused */
+
+ /* SPI1: Apalis SPI1 */
+ spi@7000d400 {
+ status = "okay";
+ spi-max-frequency = <50000000>;
+
+ spidev0: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+
+ /* SPI4: Apalis SPI2 */
+ spi@7000da00 {
+ status = "okay";
+ spi-max-frequency = <50000000>;
+
+ spidev1: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+ };
+
+ /* Apalis Serial ATA */
+ sata@70020000 {
+ status = "okay";
+ };
+
+ hda@70030000 {
+ status = "okay";
+ };
+
+ usb@70090000 {
+ status = "okay";
+ };
+
+ /* Apalis MMC1 */
+ sdhci@700b0000 {
+ status = "okay";
+ /* MMC1_CD# */
+ cd-gpios = <&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vqmmc-supply = <&vddio_sdmmc1>;
+ };
+
+ /* Apalis SD1 */
+ sdhci@700b0400 {
+ status = "okay";
+ /*
+ * Don't use SD1_CD# aka SDMMC3_CLK_LB_OUT for now as it
+ * features some magic properties even though the external
+ * loopback is disabled and the internal loopback used as per
+ * SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits being
+ * set to 0xfffd according to the TRM!
+ * cd-gpios = <&gpio TEGRA_GPIO(EE, 4) GPIO_ACTIVE_LOW>;
+ */
+ bus-width = <4>;
+ vqmmc-supply = <&vddio_sdmmc3>;
+ };
+
+ /* EHCI instance 0: USB1_DP/N -> USBO1_DP/N */
+ usb@7d000000 {
+ status = "okay";
+ dr_mode = "otg";
+ };
+
+ usb-phy@7d000000 {
+ status = "okay";
+ vbus-supply = <&reg_usbo1_vbus>;
+ };
+
+ /* EHCI instance 1: USB2_DP/N -> USBH2_DP/N */
+ usb@7d004000 {
+ status = "okay";
+ };
+
+ usb-phy@7d004000 {
+ status = "okay";
+ vbus-supply = <&reg_usbh_vbus>;
+ };
+
+ /* EHCI instance 2: USB3_DP/N -> USBH4_DP/N */
+ usb@7d008000 {
+ status = "okay";
+ };
+
+ usb-phy@7d008000 {
+ status = "okay";
+ vbus-supply = <&reg_usbh_vbus>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+
+ /* BKL1_PWM */
+ pwms = <&pwm 3 5000000>;
+ brightness-levels = <255 231 223 207 191 159 127 0>;
+ default-brightness-level = <6>;
+ /* BKL1_ON */
+ enable-gpios = <&gpio TEGRA_GPIO(BB, 5) GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ wakeup {
+ label = "WAKE1_MICO";
+ gpios = <&gpio TEGRA_GPIO(DD, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+ };
+
+ reg_5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_SW";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ /* USBO1_EN */
+ reg_usbo1_vbus: regulator-usbo1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_USBO1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_5v0>;
+ };
+
+ /* USBH_EN */
+ reg_usbh_vbus: regulator-usbh-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_USBH(2A|2C|2D|3|4)";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 5) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_5v0>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-apalis.dtsi b/arch/arm/boot/dts/tegra124-apalis.dtsi
new file mode 100644
index 000000000000..e7a73db17613
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-apalis.dtsi
@@ -0,0 +1,2100 @@
+/*
+ * Copyright 2016 Toradex AG
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "tegra124.dtsi"
+#include "tegra124-apalis-emc.dtsi"
+
+/*
+ * Toradex Apalis TK1 Module Device Tree
+ * Compatible for Revisions 2GB: V1.0A
+ */
+/ {
+ model = "Toradex Apalis TK1";
+ compatible = "toradex,apalis-tk1", "nvidia,tegra124";
+
+ memory {
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ pcie-controller@01003000 {
+ status = "okay";
+
+ avddio-pex-supply = <&vdd_1v05>;
+ avdd-pex-pll-supply = <&vdd_1v05>;
+ avdd-pll-erefe-supply = <&avdd_1v05>;
+ dvddio-pex-supply = <&vdd_1v05>;
+ hvdd-pex-pll-e-supply = <&reg_3v3>;
+ hvdd-pex-supply = <&reg_3v3>;
+ vddio-pex-ctl-supply = <&reg_3v3>;
+
+ /* Apalis PCIe (additional lane Apalis type specific) */
+ pci@1,0 {
+ /* PCIE1_RX/TX and TS_DIFF1/2 */
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-3}>;
+ phy-names = "pcie-0", "pcie-1";
+ };
+
+ /* I210 Gigabit Ethernet Controller (On-module) */
+ pci@2,0 {
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>;
+ phy-names = "pcie-0";
+ status = "okay";
+ };
+ };
+
+ host1x@50000000 {
+ hdmi@54280000 {
+ pll-supply = <&reg_1v05_avdd_hdmi_pll>;
+ vdd-supply = <&reg_3v3_avdd_hdmi>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio =
+ <&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpu@0,57000000 {
+ /*
+ * Node left disabled on purpose - the bootloader will enable
+ * it after having set the VPR up
+ */
+ vdd-supply = <&vdd_gpu>;
+ };
+
+ pinmux: pinmux@70000868 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ /* Analogue Audio (On-module) */
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_pw4 {
+ nvidia,pins = "dap_mclk1_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_ON */
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "vgp5";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_PWM */
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis CAM1_MCLK */
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi_alt3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis Digital Audio */
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_din_pa4 {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_dout_pa5 {
+ nvidia,pins = "dap2_dout_pa5";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb3 { /* DAP1_RESET */
+ nvidia,pins = "pbb3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis GPIO */
+ ddc_scl_pv4 {
+ nvidia,pins = "ddc_scl_pv4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dp_hpd_pff0 {
+ nvidia,pins = "dp_hpd_pff0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pff2 {
+ nvidia,pins = "pff2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ owr { /* PEX_L1_CLKREQ_N multiplexed GPIO6 */
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_CEC */
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_HPD */
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis I2C1 */
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C2 (DDC) */
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C3 (CAM) */
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis MMC1 */
+ sdmmc1_cd_n_pv3 { /* CD# GPIO */
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_out_pw5 { /* D5 GPIO */
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 { /* D4 GPIO */
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ /*
+ * Don't use MMC1_D6 aka SDMMC3_CLK_LB_IN for now as it
+ * features some magic properties even though the
+ * external loopback is disabled and the internal
+ * loopback used as per SDMMC_VENDOR_MISC_CNTRL_0
+ * register's SDMMC_SPARE1 bits being set to 0xfffd
+ * according to the TRM!
+ */
+ sdmmc3_clk_lb_in_pee5 { /* D6 GPIO */
+ nvidia,pins = "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ usb_vbus_en2_pff1 { /* D7 GPIO */
+ nvidia,pins = "usb_vbus_en2_pff1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis PWM */
+ ph0 {
+ nvidia,pins = "ph0";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph1 {
+ nvidia,pins = "ph1";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph2 {
+ nvidia,pins = "ph2";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /* PWM3 active on pu6 being Apalis BKL1_PWM */
+ ph3 {
+ nvidia,pins = "ph3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SATA1_ACT# */
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SD1 */
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ /*
+ * Don't use SD1_CD# aka SDMMC3_CLK_LB_OUT for now as it
+ * features some magic properties even though the
+ * external loopback is disabled and the internal
+ * loopback used as per SDMMC_VENDOR_MISC_CNTRL_0
+ * register's SDMMC_SPARE1 bits being set to 0xfffd
+ * according to the TRM!
+ */
+ sdmmc3_clk_lb_out_pee4 { /* CD# GPIO */
+ nvidia,pins = "sdmmc3_clk_lb_out_pee4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SPDIF */
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis SPI1 */
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SPI2 */
+ pg5 {
+ nvidia,pins = "pg5";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg6 {
+ nvidia,pins = "pg6";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg7 {
+ nvidia,pins = "pg7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi3 {
+ nvidia,pins = "pi3";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART1 */
+ pb1 { /* DCD GPIO */
+ nvidia,pins = "pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk7 { /* RI GPIO */
+ nvidia,pins = "pk7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_txd_pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_rxd_pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_cts_n_pu2 {
+ nvidia,pins = "pu2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_rts_n_pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_cts_n_pa1 { /* DSR GPIO */
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 { /* DTR GPIO */
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART2 */
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART3 */
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis UART4 */
+ uart4_rxd_pb0 {
+ nvidia,pins = "pb0";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart4_txd_pj7 {
+ nvidia,pins = "pj7";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_EN */
+ usb_vbus_en1_pn5 {
+ nvidia,pins = "usb_vbus_en1_pn5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_OC# */
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis USBO1_EN */
+ usb_vbus_en0_pn4 {
+ nvidia,pins = "usb_vbus_en0_pn4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBO1_OC# */
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis WAKE1_MICO */
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* CORE_PWR_REQ */
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* CPU_PWR_REQ */
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* DVFS */
+ dvfs_pwm_px0 {
+ nvidia,pins = "dvfs_pwm_px0";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_clk_px2 {
+ nvidia,pins = "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* eMMC */
+ sdmmc4_dat0_paa0 {
+ nvidia,pins = "sdmmc4_dat0_paa0";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* JTAG_RTCK */
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_DEV_OFF# */
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_RESET# */
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_WAKE# */
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT1# */
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT2# */
+ pj2 {
+ nvidia,pins = "pj2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT3# */
+ pi5 {
+ nvidia,pins = "pi5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT4# */
+ pj0 {
+ nvidia,pins = "pj0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_RESET */
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* MCU SPI */
+ gpio_x4_aud_px4 {
+ nvidia,pins = "gpio_x4_aud_px4";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x5_aud_px5 {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x6_aud_px6 { /* MCU_CS */
+ nvidia,pins = "gpio_x6_aud_px6";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x7_aud_px7 {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gpio_w2_aud_pw2 { /* MCU_CSEZP */
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* PMIC_CLK_32K */
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PMIC_CPU_OC_INT */
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_I2C */
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_INT_N */
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* RESET_MOCI_CTRL */
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* RESET_OUT_N */
+ reset_out_n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_IN */
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Configure level-shifter as output for HDA */
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_OUT */
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_OE */
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GPIO_PI6 aka TEMP_ALERT_L */
+ pi6 {
+ nvidia,pins = "pi6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* TOUCH_INT */
+ gpio_w3_aud_pw3 {
+ nvidia,pins = "gpio_w3_aud_pw3";
+ nvidia,function = "spi6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pc7 { /* NC */
+ nvidia,pins = "pc7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg0 { /* NC */
+ nvidia,pins = "pg0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg1 { /* NC */
+ nvidia,pins = "pg1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg2 { /* NC */
+ nvidia,pins = "pg2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg3 { /* NC */
+ nvidia,pins = "pg3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg4 { /* NC */
+ nvidia,pins = "pg4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph4 { /* NC */
+ nvidia,pins = "ph4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph5 { /* NC */
+ nvidia,pins = "ph5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 { /* NC */
+ nvidia,pins = "ph6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph7 { /* NC */
+ nvidia,pins = "ph7";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi0 { /* NC */
+ nvidia,pins = "pi0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi1 { /* NC */
+ nvidia,pins = "pi1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi2 { /* NC */
+ nvidia,pins = "pi2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi4 { /* NC */
+ nvidia,pins = "pi4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi7 { /* NC */
+ nvidia,pins = "pi7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk0 { /* NC */
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 { /* NC */
+ nvidia,pins = "pk1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk3 { /* NC */
+ nvidia,pins = "pk3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 { /* NC */
+ nvidia,pins = "pk4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_fs_pn0 { /* NC */
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pn1 { /* NC */
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pn3 { /* NC */
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data7_po0 { /* NC */
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 { /* NC */
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 { /* NC */
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data2_po3 { /* NC */
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data3_po4 { /* NC */
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data6_po7 { /* NC */
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_fs_pp4 { /* NC */
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pp5 { /* NC */
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pp6 { /* NC */
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pp7 { /* NC */
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col3_pq3 { /* NC */
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row3_pr3 { /* NC */
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row4_pr4 { /* NC */
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row5_pr5 { /* NC */
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row6_pr6 { /* NC */
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row7_pr7 { /* NC */
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row8_ps0 { /* NC */
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row9_ps1 { /* NC */
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row12_ps4 { /* NC */
+ nvidia,pins = "kb_row12_ps4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row13_ps5 { /* NC */
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row14_ps6 { /* NC */
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row15_ps7 { /* NC */
+ nvidia,pins = "kb_row15_ps7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row16_pt0 { /* NC */
+ nvidia,pins = "kb_row16_pt0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row17_pt1 { /* NC */
+ nvidia,pins = "kb_row17_pt1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu5 { /* NC */
+ nvidia,pins = "pu5";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv0 { /* NC */
+ nvidia,pins = "pv0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv1 { /* NC */
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ sdmmc3_cd_n_pv2 { /* NC */
+ nvidia,pins = "sdmmc3_cd_n_pv2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_px1 { /* NC */
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x3_aud_px3 { /* NC */
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb7 { /* NC */
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc1 { /* NC */
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc2 { /* NC */
+ nvidia,pins = "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 { /* NC */
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_req_pee2 { /* NC */
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ };
+ };
+
+ serial@70006040 {
+ compatible = "nvidia,tegra124-hsuart";
+ };
+
+ serial@70006200 {
+ compatible = "nvidia,tegra124-hsuart";
+ };
+
+ serial@70006300 {
+ compatible = "nvidia,tegra124-hsuart";
+ };
+
+ hdmi_ddc: i2c@7000c400 {
+ clock-frequency = <100000>;
+ };
+
+ /* PWR_I2C: power I2C to audio codec, PMIC and temperature sensor */
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ /* SGTL5000 audio codec */
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3v3>;
+ VDDIO-supply = <&vddio_1v8>;
+ clocks = <&tegra_car TEGRA124_CLK_EXTERN1>;
+ };
+
+ pmic: pmic@40 {
+ compatible = "ams,as3722";
+ reg = <0x40>;
+ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ ams,system-power-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&as3722_default>;
+
+ as3722_default: pinmux {
+ gpio2_7 {
+ pins = "gpio2", /* PWR_EN_+V3.3 */
+ "gpio7"; /* +V1.6_LPO */
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ gpio1_3_4_5_6 {
+ pins = "gpio1", "gpio3", "gpio4",
+ "gpio5", "gpio6";
+ bias-high-impedance;
+ };
+ };
+
+ regulators {
+ vsup-sd2-supply = <&reg_3v3>;
+ vsup-sd3-supply = <&reg_3v3>;
+ vsup-sd4-supply = <&reg_3v3>;
+ vsup-sd5-supply = <&reg_3v3>;
+ vin-ldo0-supply = <&vddio_ddr_1v35>;
+ vin-ldo1-6-supply = <&reg_3v3>;
+ vin-ldo2-5-7-supply = <&vddio_1v8>;
+ vin-ldo3-4-supply = <&reg_3v3>;
+ vin-ldo9-10-supply = <&reg_3v3>;
+ vin-ldo11-supply = <&reg_3v3>;
+
+ vdd_cpu: sd0 {
+ regulator-name = "+VDD_CPU_AP";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <2>;
+ };
+
+ sd1 {
+ regulator-name = "+VDD_CORE";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microamp = <2500000>;
+ regulator-max-microamp = <4000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <1>;
+ };
+
+ vddio_ddr_1v35: sd2 {
+ regulator-name =
+ "+V1.35_VDDIO_DDR(sd2)";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ sd3 {
+ regulator-name =
+ "+V1.35_VDDIO_DDR(sd3)";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v05: sd4 {
+ regulator-name = "+V1.05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ vddio_1v8: sd5 {
+ regulator-name = "+V1.8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_gpu: sd6 {
+ regulator-name = "+VDD_GPU_AP";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ avdd_1v05: ldo0 {
+ regulator-name = "+V1.05_AVDD";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,ext-control = <1>;
+ };
+
+ vddio_sdmmc1: ldo1 {
+ regulator-name = "VDDIO_SDMMC1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo2 {
+ regulator-name = "+V1.2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3 {
+ regulator-name = "+V1.05_RTC";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,enable-tracking;
+ };
+
+ /* 1.8V for LVDS, 3.3V for eDP */
+ ldo4 {
+ regulator-name = "AVDD_LVDS0_PLL";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ /* LDO5 not used */
+
+ vddio_sdmmc3: ldo6 {
+ regulator-name = "VDDIO_SDMMC3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ /* LDO7 not used */
+
+ ldo9 {
+ regulator-name = "+V3.3_ETH(ldo9)";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo10 {
+ regulator-name = "+V3.3_ETH(ldo10)";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ldo11 {
+ regulator-name = "+V1.8_VPP_FUSE";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+
+ /*
+ * TMP451 temperature sensor
+ * Note: THERM_N directly connected to AS3722 PMIC THERM
+ */
+ temperature-sensor@4c {
+ compatible = "ti,tmp451";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(I, 6) IRQ_TYPE_LEVEL_LOW>;
+
+ #thermal-sensor-cells = <1>;
+ };
+ };
+
+ /* SPI2: MCU SPI */
+ spi@7000d600 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+ };
+
+ pmc@7000e400 {
+ nvidia,invert-interrupt;
+ nvidia,suspend-mode = <1>;
+ nvidia,cpu-pwr-good-time = <500>;
+ nvidia,cpu-pwr-off-time = <300>;
+ nvidia,core-pwr-good-time = <641 3845>;
+ nvidia,core-pwr-off-time = <61036>;
+ nvidia,core-power-req-active-high;
+ nvidia,sys-clock-req-active-high;
+
+ /* Set power_off bit in ResetControl register of AS3722 PMIC */
+ i2c-thermtrip {
+ nvidia,i2c-controller-id = <4>;
+ nvidia,bus-addr = <0x40>;
+ nvidia,reg-addr = <0x36>;
+ nvidia,reg-data = <0x2>;
+ };
+ };
+
+ sata@70020000 {
+ phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>;
+ phy-names = "sata-0";
+
+ avdd-supply = <&vdd_1v05>;
+ hvdd-supply = <&reg_3v3>;
+ vddio-supply = <&vdd_1v05>;
+ };
+
+ usb@70090000 {
+ /* USBO1, USBO1 (SS), USBH2, USBH4 and USBH4 (SS) */
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>;
+ phy-names = "usb2-0", "usb3-1", "usb2-1", "usb2-2", "usb3-0";
+
+ avddio-pex-supply = <&vdd_1v05>;
+ avdd-pll-erefe-supply = <&avdd_1v05>;
+ avdd-pll-utmip-supply = <&vddio_1v8>;
+ avdd-usb-ss-pll-supply = <&vdd_1v05>;
+ avdd-usb-supply = <&reg_3v3>;
+ dvddio-pex-supply = <&vdd_1v05>;
+ hvdd-usb-ss-pll-e-supply = <&reg_3v3>;
+ hvdd-usb-ss-supply = <&reg_3v3>;
+ };
+
+ padctl@7009f000 {
+ pads {
+ usb2 {
+ status = "okay";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ pcie {
+ status = "okay";
+
+ lanes {
+ pcie-0 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+
+ pcie-1 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+
+ pcie-2 {
+ nvidia,function = "pcie";
+ status = "okay";
+ };
+
+ pcie-3 {
+ nvidia,function = "pcie";
+ status = "okay";
+ };
+
+ pcie-4 {
+ nvidia,function = "pcie";
+ status = "okay";
+ };
+ };
+ };
+
+ sata {
+ status = "okay";
+
+ lanes {
+ sata-0 {
+ nvidia,function = "sata";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ /* USBO1 */
+ usb2-0 {
+ status = "okay";
+ mode = "otg";
+
+ vbus-supply = <&reg_usbo1_vbus>;
+ };
+
+ /* USBH2 */
+ usb2-1 {
+ status = "okay";
+ mode = "host";
+
+ vbus-supply = <&reg_usbh_vbus>;
+ };
+
+ /* USBH4 */
+ usb2-2 {
+ status = "okay";
+ mode = "host";
+
+ vbus-supply = <&reg_usbh_vbus>;
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <2>;
+ status = "okay";
+ };
+
+ usb3-1 {
+ nvidia,usb2-companion = <0>;
+ status = "okay";
+ };
+ };
+ };
+
+ /* eMMC */
+ sdhci@700b0600 {
+ status = "okay";
+ bus-width = <8>;
+ non-removable;
+ };
+
+ /* CPU DFLL clock */
+ clock@70110000 {
+ status = "okay";
+ vdd-cpu-supply = <&vdd_cpu>;
+ nvidia,i2c-fs-rate = <400000>;
+ };
+
+ ahub@70300000 {
+ i2s@70301200 {
+ status = "okay";
+ };
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk32k_in: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ cpus {
+ cpu@0 {
+ vdd-cpu-supply = <&vdd_cpu>;
+ };
+ };
+
+ reg_1v05_avdd_hdmi_pll: regulator-1v05-avdd-hdmi-pll {
+ compatible = "regulator-fixed";
+ regulator-name = "+V1.05_AVDD_HDMI_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ gpio = <&gpio TEGRA_GPIO(H, 7) GPIO_ACTIVE_LOW>;
+ vin-supply = <&vdd_1v05>;
+ };
+
+ reg_3v3_mxm: regulator-3v3-mxm {
+ compatible = "regulator-fixed";
+ regulator-name = "+V3.3_MXM";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "+V3.3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ /* PWR_EN_+V3.3 */
+ gpio = <&pmic 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_3v3_mxm>;
+ };
+
+ reg_3v3_avdd_hdmi: regulator-3v3-avdd-hdmi {
+ compatible = "regulator-fixed";
+ regulator-name = "+V3.3_AVDD_HDMI";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vdd_1v05>;
+ };
+
+ sound {
+ compatible = "toradex,tegra-audio-sgtl5000-apalis_tk1",
+ "nvidia,tegra-audio-sgtl5000";
+ nvidia,model = "Toradex Apalis TK1";
+ nvidia,audio-routing =
+ "Headphone Jack", "HP_OUT",
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack";
+ nvidia,i2s-controller = <&tegra_i2s2>;
+ nvidia,audio-codec = <&sgtl5000>;
+ clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
+ <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
+
+ thermal-zones {
+ cpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /*
+ * There are currently no cooling maps because
+ * there are no cooling devices
+ */
+ };
+ };
+
+ mem {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /*
+ * There are currently no cooling maps because
+ * there are no cooling devices
+ */
+ };
+ };
+
+ gpu {
+ trips {
+ trip@0 {
+ temperature = <101000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /*
+ * There are currently no cooling maps because
+ * there are no cooling devices
+ */
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
index 2c5cede686dc..accb7055165a 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1-emc.dtsi
@@ -1,5 +1,5 @@
/ {
- clock@0,60006000 {
+ clock@60006000 {
emc-timings-3 {
nvidia,ram-code = <3>;
@@ -78,7 +78,7 @@
};
};
- emc@0,7001b000 {
+ emc@7001b000 {
emc-timings-3 {
nvidia,ram-code = <3>;
@@ -2101,7 +2101,7 @@
};
};
- memory-controller@0,70019000 {
+ memory-controller@70019000 {
emc-timings-3 {
nvidia,ram-code = <3>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 941f36263c8f..e52b82449a79 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -10,8 +10,8 @@
compatible = "nvidia,jetson-tk1", "nvidia,tegra124";
aliases {
- rtc0 = "/i2c@0,7000d000/pmic@40";
- rtc1 = "/rtc@0,7000e000";
+ rtc0 = "/i2c@7000d000/pmic@40";
+ rtc1 = "/rtc@7000e000";
/* This order keeps the mapping DB9 connector <-> ttyS0 */
serial0 = &uartd;
@@ -27,7 +27,7 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
- pcie-controller@0,01003000 {
+ pcie-controller@01003000 {
status = "okay";
avddio-pex-supply = <&vdd_1v05_run>;
@@ -40,21 +40,21 @@
/* Mini PCIe */
pci@1,0 {
- phys = <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-4}>;
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>;
phy-names = "pcie-0";
status = "okay";
};
/* Gigabit Ethernet */
pci@2,0 {
- phys = <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-2}>;
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>;
phy-names = "pcie-0";
status = "okay";
};
};
- host1x@0,50000000 {
- hdmi@0,54280000 {
+ host1x@50000000 {
+ hdmi@54280000 {
status = "okay";
hdmi-supply = <&vdd_5v0_hdmi>;
@@ -75,7 +75,7 @@
vdd-supply = <&vdd_gpu>;
};
- pinmux: pinmux@0,70000868 {
+ pinmux: pinmux@70000868 {
pinctrl-names = "boot";
pinctrl-0 = <&state_boot>;
@@ -1356,14 +1356,6 @@
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_ENABLE>;
};
- owr {
- nvidia,pins = "owr";
- nvidia,function = "rsvd2";
- nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
- nvidia,tristate = <TEGRA_PIN_ENABLE>;
- nvidia,enable-input = <TEGRA_PIN_DISABLE>;
- nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
- };
clk_32k_in {
nvidia,pins = "clk_32k_in";
nvidia,function = "clk";
@@ -1378,6 +1370,10 @@
nvidia,tristate = <TEGRA_PIN_DISABLE>;
nvidia,enable-input = <TEGRA_PIN_DISABLE>;
};
+ dsi_b {
+ nvidia,pins = "mipi_pad_ctrl_dsi_b";
+ nvidia,function = "dsi_b";
+ };
};
};
@@ -1404,12 +1400,12 @@
};
/* DB9 serial port */
- serial@0,70006300 {
+ serial@70006300 {
status = "okay";
};
/* Expansion GEN1_I2C_*, mini-PCIe I2C, on-board components */
- i2c@0,7000c000 {
+ i2c@7000c000 {
status = "okay";
clock-frequency = <100000>;
@@ -1437,25 +1433,25 @@
};
/* Expansion GEN2_I2C_* */
- i2c@0,7000c400 {
+ i2c@7000c400 {
status = "okay";
clock-frequency = <100000>;
};
/* Expansion CAM_I2C_* */
- i2c@0,7000c500 {
+ i2c@7000c500 {
status = "okay";
clock-frequency = <100000>;
};
/* HDMI DDC */
- hdmi_ddc: i2c@0,7000c700 {
+ hdmi_ddc: i2c@7000c700 {
status = "okay";
clock-frequency = <100000>;
};
/* Expansion PWR_I2C_*, on-board components */
- i2c@0,7000d000 {
+ i2c@7000d000 {
status = "okay";
clock-frequency = <400000>;
@@ -1646,12 +1642,12 @@
};
/* Expansion TS_SPI_* */
- spi@0,7000d400 {
+ spi@7000d400 {
status = "okay";
};
/* Internal SPI */
- spi@0,7000da00 {
+ spi@7000da00 {
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@0 {
@@ -1661,7 +1657,7 @@
};
};
- pmc@0,7000e400 {
+ pmc@7000e400 {
nvidia,invert-interrupt;
nvidia,suspend-mode = <1>;
nvidia,cpu-pwr-good-time = <500>;
@@ -1680,10 +1676,10 @@
};
/* Serial ATA */
- sata@0,70020000 {
+ sata@70020000 {
status = "okay";
- phys = <&{/padctl@0,7009f000/pads/sata/lanes/sata-0}>;
+ phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>;
phy-names = "sata-0";
hvdd-supply = <&vdd_3v3_lp0>;
@@ -1694,15 +1690,15 @@
target-12v-supply = <&vdd_12v0_sata>;
};
- hda@0,70030000 {
+ hda@70030000 {
status = "okay";
};
- usb@0,70090000 {
- phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* Micro A/B */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Mini PCIe */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* USB3 */
- <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>; /* USB3 */
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>, /* Micro A/B */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>, /* Mini PCIe */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>, /* USB3 */
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>; /* USB3 */
phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0";
avddio-pex-supply = <&vdd_1v05_run>;
@@ -1717,7 +1713,7 @@
status = "okay";
};
- padctl@0,7009f000 {
+ padctl@7009f000 {
status = "okay";
pads {
@@ -1804,7 +1800,7 @@
};
/* SD card */
- sdhci@0,700b0400 {
+ sdhci@700b0400 {
status = "okay";
cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
@@ -1814,40 +1810,40 @@
};
/* eMMC */
- sdhci@0,700b0600 {
+ sdhci@700b0600 {
status = "okay";
bus-width = <8>;
non-removable;
};
/* CPU DFLL clock */
- clock@0,70110000 {
+ clock@70110000 {
status = "okay";
vdd-cpu-supply = <&vdd_cpu>;
nvidia,i2c-fs-rate = <400000>;
};
- ahub@0,70300000 {
- i2s@0,70301100 {
+ ahub@70300000 {
+ i2s@70301100 {
status = "okay";
};
};
/* mini-PCIe USB */
- usb@0,7d004000 {
+ usb@7d004000 {
status = "okay";
};
- usb-phy@0,7d004000 {
+ usb-phy@7d004000 {
status = "okay";
};
/* USB A connector */
- usb@0,7d008000 {
+ usb@7d008000 {
status = "okay";
};
- usb-phy@0,7d008000 {
+ usb-phy@7d008000 {
status = "okay";
vbus-supply = <&vdd_usb3_vbus>;
};
@@ -2049,7 +2045,7 @@
thermal-zones {
cpu {
trips {
- trip@0 {
+ trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
@@ -2063,7 +2059,7 @@
mem {
trips {
- trip@0 {
+ trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
@@ -2077,7 +2073,7 @@
gpu {
trips {
- trip@0 {
+ trip {
temperature = <101000>;
hysteresis = <0>;
type = "critical";
diff --git a/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
index 1a5748d05dda..4458e86b2769 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan-big-emc.dtsi
@@ -1,5 +1,5 @@
/ {
- clock@0,60006000 {
+ clock@60006000 {
emc-timings-1 {
nvidia,ram-code = <1>;
@@ -67,7 +67,7 @@
};
};
- emc@0,7001b000 {
+ emc@7001b000 {
emc-timings-1 {
nvidia,ram-code = <1>;
@@ -1754,7 +1754,7 @@
};
};
- memory-controller@0,70019000 {
+ memory-controller@70019000 {
emc-timings-1 {
nvidia,ram-code = <1>;
diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts
index 2d21253ea4e3..67d7cfb32541 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts
@@ -15,7 +15,7 @@
ddc-i2c-bus = <&dpaux>;
};
- sdhci@0,700b0400 { /* SD Card on this bus */
+ sdhci@700b0400 { /* SD Card on this bus */
wp-gpios = <&gpio TEGRA_GPIO(Q, 4) GPIO_ACTIVE_LOW>;
};
@@ -26,7 +26,7 @@
nvidia,model = "GoogleNyanBig";
};
- pinmux@0,70000868 {
+ pinmux@70000868 {
pinctrl-names = "default";
pinctrl-0 = <&pinmux_default>;
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
index 9ecd108f56cf..4e7b59e25728 100644
--- a/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze-emc.dtsi
@@ -1,5 +1,5 @@
/ {
- clock@0,60006000 {
+ clock@60006000 {
emc-timings-1 {
nvidia,ram-code = <1>;
@@ -67,7 +67,7 @@
};
};
- emc@0,7001b000 {
+ emc@7001b000 {
emc-timings-1 {
nvidia,ram-code = <1>;
@@ -1754,7 +1754,7 @@
};
};
- memory-controller@0,70019000 {
+ memory-controller@70019000 {
emc-timings-1 {
nvidia,ram-code = <1>;
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
index 0d30c514ffad..c9582361c26e 100644
--- a/arch/arm/boot/dts/tegra124-nyan-blaze.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
@@ -22,7 +22,7 @@
nvidia,model = "GoogleNyanBlaze";
};
- pinmux@0,70000868 {
+ pinmux@70000868 {
pinctrl-names = "default";
pinctrl-0 = <&pinmux_default>;
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 0710a600cc69..271505e0715f 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -3,8 +3,8 @@
/ {
aliases {
- rtc0 = "/i2c@0,7000d000/pmic@40";
- rtc1 = "/rtc@0,7000e000";
+ rtc0 = "/i2c@7000d000/pmic@40";
+ rtc1 = "/rtc@7000e000";
serial0 = &uarta;
};
@@ -16,8 +16,8 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
- host1x@0,50000000 {
- hdmi@0,54280000 {
+ host1x@50000000 {
+ hdmi@54280000 {
status = "okay";
vdd-supply = <&vdd_3v3_hdmi>;
@@ -29,29 +29,29 @@
<&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
};
- sor@0,54540000 {
+ sor@54540000 {
status = "okay";
nvidia,dpaux = <&dpaux>;
nvidia,panel = <&panel>;
};
- dpaux@0,545c0000 {
+ dpaux@545c0000 {
vdd-supply = <&vdd_3v3_panel>;
status = "okay";
};
};
- serial@0,70006000 {
+ serial@70006000 {
/* Debug connector on the bottom of the board near SD card. */
status = "okay";
};
- pwm@0,7000a000 {
+ pwm@7000a000 {
status = "okay";
};
- i2c@0,7000c000 {
+ i2c@7000c000 {
status = "okay";
clock-frequency = <100000>;
@@ -72,7 +72,7 @@
};
};
- i2c@0,7000c400 {
+ i2c@7000c400 {
status = "okay";
clock-frequency = <100000>;
@@ -85,7 +85,7 @@
};
};
- i2c@0,7000c500 {
+ i2c@7000c500 {
status = "okay";
clock-frequency = <400000>;
@@ -95,12 +95,12 @@
};
};
- hdmi_ddc: i2c@0,7000c700 {
+ hdmi_ddc: i2c@7000c700 {
status = "okay";
clock-frequency = <100000>;
};
- i2c@0,7000d000 {
+ i2c@7000d000 {
status = "okay";
clock-frequency = <400000>;
@@ -301,7 +301,7 @@
};
};
- spi@0,7000d400 {
+ spi@7000d400 {
status = "okay";
cros_ec: cros-ec@0 {
@@ -342,7 +342,7 @@
};
};
- spi@0,7000da00 {
+ spi@7000da00 {
status = "okay";
spi-max-frequency = <25000000>;
@@ -353,7 +353,7 @@
};
};
- pmc@0,7000e400 {
+ pmc@7000e400 {
nvidia,invert-interrupt;
nvidia,suspend-mode = <0>;
nvidia,cpu-pwr-good-time = <500>;
@@ -364,16 +364,16 @@
nvidia,sys-clock-req-active-high;
};
- hda@0,70030000 {
+ hda@70030000 {
status = "okay";
};
- usb@0,70090000 {
- phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
- <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
- <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0", "usb3-1";
avddio-pex-supply = <&vdd_1v05_run>;
@@ -388,7 +388,7 @@
status = "okay";
};
- padctl@0,7009f000 {
+ padctl@7009f000 {
status = "okay";
pads {
@@ -467,7 +467,7 @@
reset-gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>;
};
- sdhci@0,700b0000 { /* WiFi/BT on this bus */
+ sdhci@700b0000 { /* WiFi/BT on this bus */
status = "okay";
bus-width = <4>;
no-1-8-v;
@@ -478,7 +478,7 @@
keep-power-in-suspend;
};
- sdhci@0,700b0400 { /* SD Card on this bus */
+ sdhci@700b0400 { /* SD Card on this bus */
status = "okay";
cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
@@ -487,7 +487,7 @@
vqmmc-supply = <&vddio_sdmmc3>;
};
- sdhci@0,700b0600 { /* eMMC on this bus */
+ sdhci@700b0600 { /* eMMC on this bus */
status = "okay";
bus-width = <8>;
no-1-8-v;
@@ -495,14 +495,14 @@
};
/* CPU DFLL clock */
- clock@0,70110000 {
+ clock@70110000 {
status = "disabled";
vdd-cpu-supply = <&vdd_cpu>;
nvidia,i2c-fs-rate = <400000>;
};
- ahub@0,70300000 {
- i2s@0,70301100 {
+ ahub@70300000 {
+ i2s@70301100 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 973446d07182..6e59cec0962b 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -8,8 +8,8 @@
compatible = "nvidia,venice2", "nvidia,tegra124";
aliases {
- rtc0 = "/i2c@0,7000d000/pmic@40";
- rtc1 = "/rtc@0,7000e000";
+ rtc0 = "/i2c@7000d000/pmic@40";
+ rtc1 = "/rtc@7000e000";
serial0 = &uarta;
};
@@ -21,8 +21,8 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
- host1x@0,50000000 {
- hdmi@0,54280000 {
+ host1x@50000000 {
+ hdmi@54280000 {
status = "okay";
vdd-supply = <&vdd_3v3_hdmi>;
@@ -34,14 +34,14 @@
<&gpio TEGRA_GPIO(N, 7) GPIO_ACTIVE_HIGH>;
};
- sor@0,54540000 {
+ sor@54540000 {
status = "okay";
nvidia,dpaux = <&dpaux>;
nvidia,panel = <&panel>;
};
- dpaux@0,545c0000 {
+ dpaux@545c0000 {
vdd-supply = <&vdd_3v3_panel>;
status = "okay";
};
@@ -55,7 +55,7 @@
vdd-supply = <&vdd_gpu>;
};
- pinmux: pinmux@0,70000868 {
+ pinmux: pinmux@70000868 {
pinctrl-names = "boot";
pinctrl-0 = <&pinmux_boot>;
@@ -596,15 +596,15 @@
};
};
- serial@0,70006000 {
+ serial@70006000 {
status = "okay";
};
- pwm@0,7000a000 {
+ pwm@7000a000 {
status = "okay";
};
- i2c@0,7000c000 {
+ i2c@7000c000 {
status = "okay";
clock-frequency = <100000>;
@@ -616,7 +616,7 @@
};
};
- i2c@0,7000c400 {
+ i2c@7000c400 {
status = "okay";
clock-frequency = <100000>;
@@ -629,17 +629,17 @@
};
};
- i2c@0,7000c500 {
+ i2c@7000c500 {
status = "okay";
clock-frequency = <100000>;
};
- hdmi_ddc: i2c@0,7000c700 {
+ hdmi_ddc: i2c@7000c700 {
status = "okay";
clock-frequency = <100000>;
};
- i2c@0,7000d000 {
+ i2c@7000d000 {
status = "okay";
clock-frequency = <400000>;
@@ -834,7 +834,7 @@
};
};
- spi@0,7000d400 {
+ spi@7000d400 {
status = "okay";
cros_ec: cros-ec@0 {
@@ -874,7 +874,7 @@
};
};
- spi@0,7000da00 {
+ spi@7000da00 {
status = "okay";
spi-max-frequency = <25000000>;
spi-flash@0 {
@@ -884,7 +884,7 @@
};
};
- pmc@0,7000e400 {
+ pmc@7000e400 {
nvidia,invert-interrupt;
nvidia,suspend-mode = <1>;
nvidia,cpu-pwr-good-time = <500>;
@@ -895,16 +895,16 @@
nvidia,sys-clock-req-active-high;
};
- hda@0,70030000 {
+ hda@70030000 {
status = "okay";
};
- usb@0,70090000 {
- phys = <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
- <&{/padctl@0,7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
- <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
- <&{/padctl@0,7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>, /* 1st USB A */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>, /* Internal USB */
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>, /* 2nd USB A */
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>, /* 1st USB A */
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>; /* 2nd USB A */
phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0", "usb3-1";
avddio-pex-supply = <&vdd_1v05_run>;
@@ -919,7 +919,7 @@
status = "okay";
};
- padctl@0,7009f000 {
+ padctl@7009f000 {
pads {
usb2 {
status = "okay";
@@ -998,7 +998,7 @@
};
};
- sdhci@0,700b0400 {
+ sdhci@700b0400 {
cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
power-gpios = <&gpio TEGRA_GPIO(R, 0) GPIO_ACTIVE_HIGH>;
wp-gpios = <&gpio TEGRA_GPIO(Q, 4) GPIO_ACTIVE_LOW>;
@@ -1007,41 +1007,41 @@
vqmmc-supply = <&vddio_sdmmc3>;
};
- sdhci@0,700b0600 {
+ sdhci@700b0600 {
status = "okay";
bus-width = <8>;
non-removable;
};
- ahub@0,70300000 {
- i2s@0,70301100 {
+ ahub@70300000 {
+ i2s@70301100 {
status = "okay";
};
};
- usb@0,7d000000 {
+ usb@7d000000 {
status = "okay";
};
- usb-phy@0,7d000000 {
+ usb-phy@7d000000 {
status = "okay";
vbus-supply = <&vdd_usb1_vbus>;
};
- usb@0,7d004000 {
+ usb@7d004000 {
status = "okay";
};
- usb-phy@0,7d004000 {
+ usb-phy@7d004000 {
status = "okay";
vbus-supply = <&vdd_run_cam>;
};
- usb@0,7d008000 {
+ usb@7d008000 {
status = "okay";
};
- usb-phy@0,7d008000 {
+ usb-phy@7d008000 {
status = "okay";
vbus-supply = <&vdd_usb3_vbus>;
};
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index ea4811870de2..ea340f9de448 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -14,7 +14,7 @@
#address-cells = <2>;
#size-cells = <2>;
- pcie-controller@0,01003000 {
+ pcie-controller@01003000 {
compatible = "nvidia,tegra124-pcie";
device_type = "pci";
reg = <0x0 0x01003000 0x0 0x00000800 /* PADS registers */
@@ -77,7 +77,7 @@
};
};
- host1x@0,50000000 {
+ host1x@50000000 {
compatible = "nvidia,tegra124-host1x", "simple-bus";
reg = <0x0 0x50000000 0x0 0x00034000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>, /* syncpt */
@@ -91,7 +91,7 @@
ranges = <0 0x54000000 0 0x54000000 0 0x01000000>;
- dc@0,54200000 {
+ dc@54200000 {
compatible = "nvidia,tegra124-dc";
reg = <0x0 0x54200000 0x0 0x00040000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
@@ -106,7 +106,7 @@
nvidia,head = <0>;
};
- dc@0,54240000 {
+ dc@54240000 {
compatible = "nvidia,tegra124-dc";
reg = <0x0 0x54240000 0x0 0x00040000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
@@ -121,7 +121,7 @@
nvidia,head = <1>;
};
- hdmi@0,54280000 {
+ hdmi@54280000 {
compatible = "nvidia,tegra124-hdmi";
reg = <0x0 0x54280000 0x0 0x00040000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
@@ -133,7 +133,7 @@
status = "disabled";
};
- sor@0,54540000 {
+ sor@54540000 {
compatible = "nvidia,tegra124-sor";
reg = <0x0 0x54540000 0x0 0x00040000>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
@@ -147,7 +147,7 @@
status = "disabled";
};
- dpaux: dpaux@0,545c0000 {
+ dpaux: dpaux@545c0000 {
compatible = "nvidia,tegra124-dpaux";
reg = <0x0 0x545c0000 0x0 0x00040000>;
interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
@@ -160,7 +160,7 @@
};
};
- gic: interrupt-controller@0,50041000 {
+ gic: interrupt-controller@50041000 {
compatible = "arm,cortex-a15-gic";
#interrupt-cells = <3>;
interrupt-controller;
@@ -173,6 +173,11 @@
interrupt-parent = <&gic>;
};
+ /*
+ * Please keep the following 0, notation in place as a former mainline
+ * U-Boot version was looking for that particular notation in order to
+ * perform required fix-ups on that GPU node.
+ */
gpu@0,57000000 {
compatible = "nvidia,gk20a";
reg = <0x0 0x57000000 0x0 0x01000000>,
@@ -203,7 +208,7 @@
interrupt-parent = <&gic>;
};
- timer@0,60005000 {
+ timer@60005000 {
compatible = "nvidia,tegra124-timer", "nvidia,tegra30-timer", "nvidia,tegra20-timer";
reg = <0x0 0x60005000 0x0 0x400>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
@@ -215,7 +220,7 @@
clocks = <&tegra_car TEGRA124_CLK_TIMER>;
};
- tegra_car: clock@0,60006000 {
+ tegra_car: clock@60006000 {
compatible = "nvidia,tegra124-car";
reg = <0x0 0x60006000 0x0 0x1000>;
#clock-cells = <1>;
@@ -223,12 +228,12 @@
nvidia,external-memory-controller = <&emc>;
};
- flow-controller@0,60007000 {
+ flow-controller@60007000 {
compatible = "nvidia,tegra124-flowctrl";
reg = <0x0 0x60007000 0x0 0x1000>;
};
- actmon@0,6000c800 {
+ actmon@6000c800 {
compatible = "nvidia,tegra124-actmon";
reg = <0x0 0x6000c800 0x0 0x400>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
@@ -239,7 +244,7 @@
reset-names = "actmon";
};
- gpio: gpio@0,6000d000 {
+ gpio: gpio@6000d000 {
compatible = "nvidia,tegra124-gpio", "nvidia,tegra30-gpio";
reg = <0x0 0x6000d000 0x0 0x1000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
@@ -259,7 +264,7 @@
*/
};
- apbdma: dma@0,60020000 {
+ apbdma: dma@60020000 {
compatible = "nvidia,tegra124-apbdma", "nvidia,tegra148-apbdma";
reg = <0x0 0x60020000 0x0 0x1400>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
@@ -300,13 +305,13 @@
#dma-cells = <1>;
};
- apbmisc@0,70000800 {
+ apbmisc@70000800 {
compatible = "nvidia,tegra124-apbmisc", "nvidia,tegra20-apbmisc";
reg = <0x0 0x70000800 0x0 0x64>, /* Chip revision */
<0x0 0x7000e864 0x0 0x04>; /* Strapping options */
};
- pinmux: pinmux@0,70000868 {
+ pinmux: pinmux@70000868 {
compatible = "nvidia,tegra124-pinmux";
reg = <0x0 0x70000868 0x0 0x164>, /* Pad control registers */
<0x0 0x70003000 0x0 0x434>, /* Mux registers */
@@ -321,7 +326,7 @@
* the APB DMA based serial driver, the compatible is
* "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
*/
- uarta: serial@0,70006000 {
+ uarta: serial@70006000 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006000 0x0 0x40>;
reg-shift = <2>;
@@ -334,7 +339,7 @@
status = "disabled";
};
- uartb: serial@0,70006040 {
+ uartb: serial@70006040 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006040 0x0 0x40>;
reg-shift = <2>;
@@ -347,7 +352,7 @@
status = "disabled";
};
- uartc: serial@0,70006200 {
+ uartc: serial@70006200 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006200 0x0 0x40>;
reg-shift = <2>;
@@ -360,7 +365,7 @@
status = "disabled";
};
- uartd: serial@0,70006300 {
+ uartd: serial@70006300 {
compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
reg = <0x0 0x70006300 0x0 0x40>;
reg-shift = <2>;
@@ -373,7 +378,7 @@
status = "disabled";
};
- pwm: pwm@0,7000a000 {
+ pwm: pwm@7000a000 {
compatible = "nvidia,tegra124-pwm", "nvidia,tegra20-pwm";
reg = <0x0 0x7000a000 0x0 0x100>;
#pwm-cells = <2>;
@@ -383,7 +388,7 @@
status = "disabled";
};
- i2c@0,7000c000 {
+ i2c@7000c000 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000c000 0x0 0x100>;
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
@@ -398,7 +403,7 @@
status = "disabled";
};
- i2c@0,7000c400 {
+ i2c@7000c400 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000c400 0x0 0x100>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
@@ -413,7 +418,7 @@
status = "disabled";
};
- i2c@0,7000c500 {
+ i2c@7000c500 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000c500 0x0 0x100>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
@@ -428,7 +433,7 @@
status = "disabled";
};
- i2c@0,7000c700 {
+ i2c@7000c700 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000c700 0x0 0x100>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
@@ -443,7 +448,7 @@
status = "disabled";
};
- i2c@0,7000d000 {
+ i2c@7000d000 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000d000 0x0 0x100>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
@@ -458,7 +463,7 @@
status = "disabled";
};
- i2c@0,7000d100 {
+ i2c@7000d100 {
compatible = "nvidia,tegra124-i2c", "nvidia,tegra114-i2c";
reg = <0x0 0x7000d100 0x0 0x100>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
@@ -473,7 +478,7 @@
status = "disabled";
};
- spi@0,7000d400 {
+ spi@7000d400 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000d400 0x0 0x200>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
@@ -488,7 +493,7 @@
status = "disabled";
};
- spi@0,7000d600 {
+ spi@7000d600 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000d600 0x0 0x200>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
@@ -503,7 +508,7 @@
status = "disabled";
};
- spi@0,7000d800 {
+ spi@7000d800 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000d800 0x0 0x200>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
@@ -518,7 +523,7 @@
status = "disabled";
};
- spi@0,7000da00 {
+ spi@7000da00 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000da00 0x0 0x200>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
@@ -533,7 +538,7 @@
status = "disabled";
};
- spi@0,7000dc00 {
+ spi@7000dc00 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000dc00 0x0 0x200>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
@@ -548,7 +553,7 @@
status = "disabled";
};
- spi@0,7000de00 {
+ spi@7000de00 {
compatible = "nvidia,tegra124-spi", "nvidia,tegra114-spi";
reg = <0x0 0x7000de00 0x0 0x200>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
@@ -563,21 +568,21 @@
status = "disabled";
};
- rtc@0,7000e000 {
+ rtc@7000e000 {
compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
reg = <0x0 0x7000e000 0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA124_CLK_RTC>;
};
- pmc@0,7000e400 {
+ pmc@7000e400 {
compatible = "nvidia,tegra124-pmc";
reg = <0x0 0x7000e400 0x0 0x400>;
clocks = <&tegra_car TEGRA124_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
};
- fuse@0,7000f800 {
+ fuse@7000f800 {
compatible = "nvidia,tegra124-efuse";
reg = <0x0 0x7000f800 0x0 0x400>;
clocks = <&tegra_car TEGRA124_CLK_FUSE>;
@@ -586,7 +591,7 @@
reset-names = "fuse";
};
- mc: memory-controller@0,70019000 {
+ mc: memory-controller@70019000 {
compatible = "nvidia,tegra124-mc";
reg = <0x0 0x70019000 0x0 0x1000>;
clocks = <&tegra_car TEGRA124_CLK_MC>;
@@ -597,14 +602,14 @@
#iommu-cells = <1>;
};
- emc: emc@0,7001b000 {
+ emc: emc@7001b000 {
compatible = "nvidia,tegra124-emc";
reg = <0x0 0x7001b000 0x0 0x1000>;
nvidia,memory-controller = <&mc>;
};
- sata@0,70020000 {
+ sata@70020000 {
compatible = "nvidia,tegra124-ahci";
reg = <0x0 0x70027000 0x0 0x2000>, /* AHCI */
<0x0 0x70020000 0x0 0x7000>; /* SATA */
@@ -621,7 +626,7 @@
status = "disabled";
};
- hda@0,70030000 {
+ hda@70030000 {
compatible = "nvidia,tegra124-hda", "nvidia,tegra30-hda";
reg = <0x0 0x70030000 0x0 0x10000>;
interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
@@ -636,7 +641,7 @@
status = "disabled";
};
- usb@0,70090000 {
+ usb@70090000 {
compatible = "nvidia,tegra124-xusb";
reg = <0x0 0x70090000 0x0 0x8000>,
<0x0 0x70098000 0x0 0x1000>,
@@ -671,7 +676,7 @@
status = "disabled";
};
- padctl: padctl@0,7009f000 {
+ padctl: padctl@7009f000 {
compatible = "nvidia,tegra124-xusb-padctl";
reg = <0x0 0x7009f000 0x0 0x1000>;
resets = <&tegra_car 142>;
@@ -804,7 +809,7 @@
};
};
- sdhci@0,700b0000 {
+ sdhci@700b0000 {
compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
@@ -814,7 +819,7 @@
status = "disabled";
};
- sdhci@0,700b0200 {
+ sdhci@700b0200 {
compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0200 0x0 0x200>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
@@ -824,7 +829,7 @@
status = "disabled";
};
- sdhci@0,700b0400 {
+ sdhci@700b0400 {
compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0400 0x0 0x200>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -834,7 +839,7 @@
status = "disabled";
};
- sdhci@0,700b0600 {
+ sdhci@700b0600 {
compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0600 0x0 0x200>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
@@ -844,7 +849,7 @@
status = "disabled";
};
- soctherm: thermal-sensor@0,700e2000 {
+ soctherm: thermal-sensor@700e2000 {
compatible = "nvidia,tegra124-soctherm";
reg = <0x0 0x700e2000 0x0 0x1000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
@@ -856,7 +861,7 @@
#thermal-sensor-cells = <1>;
};
- dfll: clock@0,70110000 {
+ dfll: clock@70110000 {
compatible = "nvidia,tegra124-dfll";
reg = <0 0x70110000 0 0x100>, /* DFLL control */
<0 0x70110000 0 0x100>, /* I2C output control */
@@ -880,7 +885,7 @@
status = "disabled";
};
- ahub@0,70300000 {
+ ahub@70300000 {
compatible = "nvidia,tegra124-ahub";
reg = <0x0 0x70300000 0x0 0x200>,
<0x0 0x70300800 0x0 0x800>,
@@ -932,7 +937,7 @@
#address-cells = <2>;
#size-cells = <2>;
- tegra_i2s0: i2s@0,70301000 {
+ tegra_i2s0: i2s@70301000 {
compatible = "nvidia,tegra124-i2s";
reg = <0x0 0x70301000 0x0 0x100>;
nvidia,ahub-cif-ids = <4 4>;
@@ -942,7 +947,7 @@
status = "disabled";
};
- tegra_i2s1: i2s@0,70301100 {
+ tegra_i2s1: i2s@70301100 {
compatible = "nvidia,tegra124-i2s";
reg = <0x0 0x70301100 0x0 0x100>;
nvidia,ahub-cif-ids = <5 5>;
@@ -952,7 +957,7 @@
status = "disabled";
};
- tegra_i2s2: i2s@0,70301200 {
+ tegra_i2s2: i2s@70301200 {
compatible = "nvidia,tegra124-i2s";
reg = <0x0 0x70301200 0x0 0x100>;
nvidia,ahub-cif-ids = <6 6>;
@@ -962,7 +967,7 @@
status = "disabled";
};
- tegra_i2s3: i2s@0,70301300 {
+ tegra_i2s3: i2s@70301300 {
compatible = "nvidia,tegra124-i2s";
reg = <0x0 0x70301300 0x0 0x100>;
nvidia,ahub-cif-ids = <7 7>;
@@ -972,7 +977,7 @@
status = "disabled";
};
- tegra_i2s4: i2s@0,70301400 {
+ tegra_i2s4: i2s@70301400 {
compatible = "nvidia,tegra124-i2s";
reg = <0x0 0x70301400 0x0 0x100>;
nvidia,ahub-cif-ids = <8 8>;
@@ -983,7 +988,7 @@
};
};
- usb@0,7d000000 {
+ usb@7d000000 {
compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
reg = <0x0 0x7d000000 0x0 0x4000>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
@@ -995,7 +1000,7 @@
status = "disabled";
};
- phy1: usb-phy@0,7d000000 {
+ phy1: usb-phy@7d000000 {
compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
reg = <0x0 0x7d000000 0x0 0x4000>,
<0x0 0x7d000000 0x0 0x4000>;
@@ -1020,7 +1025,7 @@
status = "disabled";
};
- usb@0,7d004000 {
+ usb@7d004000 {
compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
reg = <0x0 0x7d004000 0x0 0x4000>;
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -1032,7 +1037,7 @@
status = "disabled";
};
- phy2: usb-phy@0,7d004000 {
+ phy2: usb-phy@7d004000 {
compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
reg = <0x0 0x7d004000 0x0 0x4000>,
<0x0 0x7d000000 0x0 0x4000>;
@@ -1056,7 +1061,7 @@
status = "disabled";
};
- usb@0,7d008000 {
+ usb@7d008000 {
compatible = "nvidia,tegra124-ehci", "nvidia,tegra30-ehci", "usb-ehci";
reg = <0x0 0x7d008000 0x0 0x4000>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
@@ -1068,7 +1073,7 @@
status = "disabled";
};
- phy3: usb-phy@0,7d008000 {
+ phy3: usb-phy@7d008000 {
compatible = "nvidia,tegra124-usb-phy", "nvidia,tegra30-usb-phy";
reg = <0x0 0x7d008000 0x0 0x4000>,
<0x0 0x7d000000 0x0 0x4000>;
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 8e0066ad9628..1242b841f147 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -478,7 +478,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index d2e960cbc001..d4fb4d39ede7 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -646,7 +646,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 33ed2b23026b..4e361a8c167e 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -512,7 +512,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 94b60a710dd8..2017acacc00c 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -798,7 +798,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 025e9e8037da..27d2bbbf1eae 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -508,7 +508,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 4a035f74043a..381747f114a9 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -383,7 +383,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index a28c060a839b..8f0aaabf7e28 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -592,7 +592,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index 073806d07b2b..1e06f854c8b4 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -569,7 +569,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index bf361277fe10..192b95177aac 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -567,7 +567,7 @@
blocks = <0x5>;
irq-trigger = <0x1>;
- stmpe_touchscreen {
+ stmpe_touchscreen@0 {
compatible = "st,stmpe-ts";
reg = <0>;
/* 3.25 MHz ADC clock speed */
@@ -674,13 +674,14 @@
clk32k_in: clk@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
+
clk16m: clk@1 {
compatible = "fixed-clock";
- reg=<1>;
+ reg = <1>;
#clock-cells = <0>;
clock-frequency = <16000000>;
clock-output-names = "clk16m";
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index b6da15d823a6..0350002849d5 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -1952,7 +1952,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 4721c1c9c780..f11012bb58cc 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -423,7 +423,7 @@
clk32k_in: clock@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
index 76875c3160fe..a8c0318743b6 100644
--- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
@@ -131,7 +131,7 @@
clocks {
clk16m: clk@1 {
compatible = "fixed-clock";
- reg=<1>;
+ reg = <1>;
#clock-cells = <0>;
clock-frequency = <16000000>;
clock-output-names = "clk16m";
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi
index 2d8c58fd9357..a265534cd314 100644
--- a/arch/arm/boot/dts/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/tegra30-colibri.dtsi
@@ -420,7 +420,7 @@
clk32k_in: clk@0 {
compatible = "fixed-clock";
- reg=<0>;
+ reg = <0>;
#clock-cells = <0>;
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tny_a9260_common.dtsi b/arch/arm/boot/dts/tny_a9260_common.dtsi
index ce7138c3af1b..f9dc463b9e48 100644
--- a/arch/arm/boot/dts/tny_a9260_common.dtsi
+++ b/arch/arm/boot/dts/tny_a9260_common.dtsi
@@ -16,15 +16,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/tny_a9263.dts b/arch/arm/boot/dts/tny_a9263.dts
index 3043296345b7..9161cd9889b4 100644
--- a/arch/arm/boot/dts/tny_a9263.dts
+++ b/arch/arm/boot/dts/tny_a9263.dts
@@ -21,15 +21,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -99,7 +90,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/uniphier-common32.dtsi b/arch/arm/boot/dts/uniphier-common32.dtsi
index 61a095598206..03f60ec340b5 100644
--- a/arch/arm/boot/dts/uniphier-common32.dtsi
+++ b/arch/arm/boot/dts/uniphier-common32.dtsi
@@ -105,6 +105,8 @@
reg = <0x58c00000 0x400>;
#address-cells = <2>;
#size-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_system_bus>;
};
smpctrl@59800000 {
@@ -134,9 +136,13 @@
interrupt-controller;
};
- pinctrl: pinctrl@5f801000 {
- /* specify compatible in each SoC DTSI */
- reg = <0x5f801000 0xe00>;
+ soc-glue@5f800000 {
+ compatible = "simple-mfd", "syscon";
+ reg = <0x5f800000 0x2000>;
+
+ pinctrl: pinctrl {
+ /* specify compatible in each SoC DTSI */
+ };
};
};
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi b/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
index dadd86070c98..debad7ffef05 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
@@ -182,5 +182,5 @@
};
&pinctrl {
- compatible = "socionext,ph1-ld4-pinctrl", "syscon";
+ compatible = "socionext,uniphier-ld4-pinctrl";
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi b/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
index 532115234025..19c107c66bae 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
@@ -63,5 +63,5 @@
* which makes the pinctrl driver unshareable.
*/
&pinctrl {
- compatible = "socionext,ph1-ld6b-pinctrl", "syscon";
+ compatible = "socionext,uniphier-ld6b-pinctrl";
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi b/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
index 20f3f2ae7fa4..7b9da0852005 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
@@ -200,5 +200,5 @@
};
&pinctrl {
- compatible = "socionext,ph1-pro4-pinctrl", "syscon";
+ compatible = "socionext,uniphier-pro4-pinctrl";
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi b/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
index 24f6f664b269..7e4aa2fde719 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
@@ -194,5 +194,5 @@
};
&pinctrl {
- compatible = "socionext,ph1-pro5-pinctrl", "syscon";
+ compatible = "socionext,uniphier-pro5-pinctrl";
};
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi b/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
index 6bfd29a05575..467f9d8e9873 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
@@ -181,5 +181,5 @@
};
&pinctrl {
- compatible = "socionext,ph1-sld8-pinctrl", "syscon";
+ compatible = "socionext,uniphier-sld8-pinctrl";
};
diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
index f2f3fbe2d517..10a711041b4a 100644
--- a/arch/arm/boot/dts/uniphier-pinctrl.dtsi
+++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
@@ -78,6 +78,11 @@
function = "nand";
};
+ pinctrl_system_bus: system_bus_grp {
+ groups = "system_bus", "system_bus_cs1";
+ function = "system_bus";
+ };
+
pinctrl_uart0: uart0_grp {
groups = "uart0";
function = "uart0";
diff --git a/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts b/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts
index bf2619e4d489..98d895b7af1d 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts
+++ b/arch/arm/boot/dts/uniphier-proxstream2-gentil.dts
@@ -55,13 +55,13 @@
};
chosen {
- stdout-path = "serial2:115200n8";
+ stdout-path = "serial0:115200n8";
};
aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
+ serial0 = &serial2;
+ serial1 = &serial0;
+ serial2 = &serial1;
i2c0 = &i2c0;
i2c2 = &i2c2;
i2c4 = &i2c4;
diff --git a/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts b/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts
index 498acac3d95d..1fb8bd7bb686 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts
+++ b/arch/arm/boot/dts/uniphier-proxstream2-vodka.dts
@@ -55,13 +55,13 @@
};
chosen {
- stdout-path = "serial2:115200n8";
+ stdout-path = "serial0:115200n8";
};
aliases {
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
+ serial0 = &serial2;
+ serial1 = &serial0;
+ serial2 = &serial1;
i2c0 = &i2c0;
i2c4 = &i2c4;
i2c5 = &i2c5;
diff --git a/arch/arm/boot/dts/uniphier-proxstream2.dtsi b/arch/arm/boot/dts/uniphier-proxstream2.dtsi
index 4ac484c6ce4e..d00d6f5c2668 100644
--- a/arch/arm/boot/dts/uniphier-proxstream2.dtsi
+++ b/arch/arm/boot/dts/uniphier-proxstream2.dtsi
@@ -205,5 +205,5 @@
};
&pinctrl {
- compatible = "socionext,proxstream2-pinctrl", "syscon";
+ compatible = "socionext,uniphier-pxs2-pinctrl";
};
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi
index 9beea8976584..7514b347cdd2 100644
--- a/arch/arm/boot/dts/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9260_common.dtsi
@@ -8,15 +8,6 @@
/ {
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -90,7 +81,7 @@
};
};
- usb0: ohci@00500000 {
+ usb0: ohci@500000 {
num-ports = <2>;
status = "okay";
};
@@ -119,7 +110,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index 8cc6edb29694..bfc48a272417 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -21,15 +21,6 @@
};
clocks {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- main_clock: clock@0 {
- compatible = "atmel,osc", "fixed-clock";
- clock-frequency = <12000000>;
- };
-
slow_xtal {
clock-frequency = <32768>;
};
@@ -147,7 +138,7 @@
};
};
- i2c@0 {
+ i2c-gpio-0 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/usb_a9g20_common.dtsi b/arch/arm/boot/dts/usb_a9g20_common.dtsi
index 0b3b36182fe5..088c2c3685ab 100644
--- a/arch/arm/boot/dts/usb_a9g20_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9g20_common.dtsi
@@ -11,14 +11,15 @@
/ {
chosen {
- bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs";
+ bootargs = "mem=64M root=/dev/mtdblock5 rw rootfstype=ubifs";
+ stdout-path = "serial0:115200n8";
};
memory {
reg = <0x20000000 0x4000000>;
};
- i2c@0 {
+ i2c-gpio-0 {
rv3029c2@56 {
compatible = "rv3029c2";
reg = <0x56>;
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 1143c4d5c567..301281645d08 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -310,7 +310,7 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
*/
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_addr;
int ret;
@@ -344,7 +344,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
* should be)
*/
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct safe_buffer *buf;
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
deleted file mode 100644
index 909049a280ec..000000000000
--- a/arch/arm/configs/bcm_defconfig
+++ /dev/null
@@ -1,141 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=19
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
-CONFIG_NAMESPACES=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_ARCH_BCM=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
-CONFIG_ARM_THUMBEE=y
-CONFIG_SMP=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-# CONFIG_COMPACTION is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200n8 mem=128M"
-CONFIG_CPU_IDLE=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_PACKET_DIAG=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_SYN_COOKIES=y
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_PROC_DEVICETREE=y
-# CONFIG_BLK_DEV is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_INPUT_FF_MEMLESS=y
-CONFIG_INPUT_JOYDEV=y
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_8250_DW=y
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-# CONFIG_HWMON is not set
-CONFIG_MFD_BCM590XX=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_REGULATOR_USERSPACE_CONSUMER=y
-CONFIG_REGULATOR_BCM590XX=y
-
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_FB=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_PWM=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=32
-CONFIG_MMC_TEST=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_BCM_KONA=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_PWM=y
-CONFIG_PWM_BCM_KONA=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_CONFIGFS_FS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=110
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
-# CONFIG_FTRACE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
-CONFIG_XZ_DEC=y
-CONFIG_AVERAGE=y
-CONFIG_PINCTRL_BCM281XX=y
-CONFIG_WATCHDOG=y
-CONFIG_BCM_KONA_WDT=y
-CONFIG_BCM_KONA_WDT_DEBUG=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 47195e8690b4..01986deef7c5 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -1,5 +1,4 @@
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CGROUPS=y
@@ -13,8 +12,6 @@ CONFIG_ARCH_EXYNOS3=y
CONFIG_EXYNOS5420_MCPM=y
CONFIG_SMP=y
CONFIG_BIG_LITTLE=y
-CONFIG_BL_SWITCHER=y
-CONFIG_BL_SWITCHER_DUMMY_IF=y
CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
CONFIG_AEABI=y
@@ -84,9 +81,9 @@ CONFIG_INPUT_MAX77693_HAPTIC=y
CONFIG_INPUT_MAX8997_HAPTIC=y
CONFIG_KEYBOARD_SAMSUNG=y
CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HW_RANDOM=y
CONFIG_TCG_TPM=y
CONFIG_TCG_TIS_I2C_INFINEON=y
@@ -112,10 +109,8 @@ CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_NTC_THERMISTOR=y
CONFIG_SENSORS_PWM_FAN=y
CONFIG_SENSORS_INA2XX=y
-CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
-CONFIG_EXYNOS_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MFD_CROS_EC=y
@@ -144,20 +139,32 @@ CONFIG_REGULATOR_TPS65090=y
CONFIG_REGULATOR_WM8994=y
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
+CONFIG_VIDEO_S5P_FIMC=m
+CONFIG_VIDEO_S5P_MIPI_CSIS=m
+CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
+CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_V4L_TEST_DRIVERS=y
CONFIG_DRM=y
-CONFIG_DRM_NXP_PTN3460=y
-CONFIG_DRM_PARADE_PS8622=y
CONFIG_DRM_EXYNOS=y
CONFIG_DRM_EXYNOS_FIMD=y
-CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_MIXER=y
CONFIG_DRM_EXYNOS_DPI=y
+CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_SAMSUNG_LD9040=y
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=y
+CONFIG_DRM_NXP_PTN3460=y
+CONFIG_DRM_PARADE_PS8622=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
CONFIG_LCD_CLASS_DEVICE=y
@@ -171,7 +178,6 @@ CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994=y
CONFIG_SND_SOC_SMDK_WM8994_PCM=y
CONFIG_SND_SOC_SNOW=y
-CONFIG_SND_SOC_ODROIDX2=y
CONFIG_SND_SIMPLE_CARD=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
@@ -209,7 +215,6 @@ CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_S3C=y
CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
-CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC_CHARDEV=y
CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_MAX77802=y
@@ -227,7 +232,6 @@ CONFIG_PWM_SAMSUNG=y
CONFIG_PHY_EXYNOS5250_SATA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -260,3 +264,4 @@ CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRC_CCITT=y
CONFIG_FONTS=y
CONFIG_FONT_7x14=y
+CONFIG_VIDEO_VIVID=m
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 21339ce29654..3219480b9dbf 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -146,7 +146,7 @@ CONFIG_MICREL_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
-CONFIG_USB_USBNET=m
+CONFIG_USB_USBNET=y
CONFIG_USB_NET_CDC_EEM=m
CONFIG_BRCMFMAC=m
CONFIG_WL12XX=m
@@ -161,6 +161,7 @@ CONFIG_KEYBOARD_IMX=y
CONFIG_MOUSE_PS2=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
CONFIG_TOUCHSCREEN_EGALAX=y
CONFIG_TOUCHSCREEN_IMX6UL_TSC=y
CONFIG_TOUCHSCREEN_EDT_FT5X06=y
@@ -296,6 +297,7 @@ CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
CONFIG_USB_G_NCM=m
CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index faba04d93ad5..71b42e66488a 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -144,6 +144,7 @@ CONFIG_I2C_DAVINCI=y
CONFIG_SPI=y
CONFIG_SPI_DAVINCI=y
CONFIG_SPI_SPIDEV=y
+CONFIG_PINCTRL_SINGLE=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DAVINCI=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index 2ae00b09cfc2..2de1bf0e497e 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -149,6 +149,8 @@ CONFIG_PWM=y
CONFIG_PWM_LPC18XX_SCT=y
CONFIG_IIO=y
CONFIG_MMA7455_I2C=y
+CONFIG_LPC18XX_ADC=y
+CONFIG_LPC18XX_DAC=y
CONFIG_IIO_SYSFS_TRIGGER=y
CONFIG_PHY_LPC18XX_USB_OTG=y
CONFIG_NVMEM=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
new file mode 100644
index 000000000000..433eebb4103f
--- /dev/null
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -0,0 +1,106 @@
+CONFIG_KERNEL_LZMA=y
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_SLOB=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_LBDAF is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_MULTI_V4T=y
+# CONFIG_ARCH_MULTI_V7 is not set
+CONFIG_ARCH_AT91=y
+CONFIG_SOC_AT91RM9200=y
+CONFIG_ARCH_CLPS711X=y
+CONFIG_ARCH_INTEGRATOR=y
+CONFIG_ARCH_INTEGRATOR_AP=y
+CONFIG_INTEGRATOR_IMPD1=y
+CONFIG_INTEGRATOR_CM720T=y
+CONFIG_INTEGRATOR_CM920T=y
+CONFIG_INTEGRATOR_CM922T_XA10=y
+CONFIG_ARCH_MXC=y
+CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
+CONFIG_MACH_IMX1_DT=y
+CONFIG_ARCH_NSPIRE=y
+CONFIG_AEABI=y
+# CONFIG_ATAGS is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_CLPS711X_CPUIDLE=y
+# CONFIG_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_GPIO=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_CLPS711X=y
+CONFIG_SERIAL_CLPS711X_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_AT91=y
+CONFIG_I2C_GPIO=y
+CONFIG_I2C_IMX=y
+CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
+CONFIG_SPI_CLPS711X=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_IMX=y
+CONFIG_PINCTRL_AT91PIO4=y
+CONFIG_GPIO_74XX_MMIO=y
+CONFIG_GPIO_CLPS711X=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_GPIO_SYSCON=y
+CONFIG_W1=y
+CONFIG_W1_MASTER_MXC=y
+CONFIG_W1_MASTER_GPIO=y
+CONFIG_POWER_RESET_AT91_POWEROFF=y
+CONFIG_POWER_RESET_AT91_RESET=y
+CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_GPIO_WATCHDOG=y
+CONFIG_AT91RM9200_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_FB=y
+CONFIG_FB_CLPS711X=y
+CONFIG_FB_IMX=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_PWM=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_PWM=y
+CONFIG_PWM_ATMEL=y
+CONFIG_PWM_CLPS711X=y
+CONFIG_PWM_IMX=y
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_CRAMFS=y
+CONFIG_MINIX_FS=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_USER=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 4f82656f7ea2..2658b80fa263 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -13,6 +13,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_MVEBU=y
CONFIG_MACH_KIRKWOOD=y
+CONFIG_ARCH_AT91=y
+CONFIG_SOC_AT91SAM9=y
CONFIG_ARCH_ASPEED=y
CONFIG_MACH_ASPEED_G4=y
CONFIG_ARCH_MXC=y
@@ -89,8 +91,12 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_NAND_ORION=y
+CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_SSC=m
CONFIG_EEPROM_AT24=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
@@ -102,15 +108,22 @@ CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_NET_DSA_MV88E6060=y
CONFIG_NET_DSA_MV88E6XXX=y
+CONFIG_MACB=y
+CONFIG_DM9000=y
CONFIG_MV643XX_ETH=y
CONFIG_R8169=y
CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_MICREL_PHY=y
CONFIG_LIBERTAS=y
CONFIG_LIBERTAS_SDIO=y
CONFIG_MWL8K=m
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_QT1070=m
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=m
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
@@ -120,16 +133,21 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_TTYAT=y
CONFIG_SERIAL_IMX=y
CONFIG_SERIAL_IMX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=y
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_AT91=y
CONFIG_I2C_IMX=y
CONFIG_I2C_MV64XXX=y
CONFIG_I2C_NOMADIK=y
CONFIG_SPI=y
+CONFIG_SPI_ATMEL=y
CONFIG_SPI_IMX=y
CONFIG_SPI_ORION=y
CONFIG_GPIO_SYSFS=y
@@ -143,18 +161,36 @@ CONFIG_SENSORS_LM75=y
CONFIG_SENSORS_LM85=y
CONFIG_THERMAL=y
CONFIG_KIRKWOOD_THERMAL=y
+CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_ORION_WATCHDOG=y
CONFIG_IMX2_WDT=y
+CONFIG_MFD_ATMEL_HLCDC=y
# CONFIG_ABX500_CORE is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_VIDEO_ATMEL_ISI=m
+CONFIG_SOC_CAMERA_OV2640=m
+CONFIG_DRM=y
+CONFIG_DRM_ATMEL_HLCDC=m
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_FB=y
CONFIG_FB_IMX=y
+CONFIG_FB_ATMEL=y
+CONFIG_BACKLIGHT_ATMEL_LCDC=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
+CONFIG_SND_ATMEL_SOC=y
+CONFIG_SND_AT91_SOC_SAM9G20_WM8731=m
+CONFIG_SND_ATMEL_SOC_WM8904=m
+CONFIG_SND_AT91_SOC_SAM9X5_WM8731=m
CONFIG_SND_KIRKWOOD_SOC=y
CONFIG_SND_SOC_ALC5623=y
+CONFIG_SND_SOC_WM8731=y
CONFIG_SND_SIMPLE_CARD=y
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
@@ -173,6 +209,7 @@ CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
CONFIG_USB_PRINTER=m
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_DATAFAB=y
@@ -182,8 +219,12 @@ CONFIG_USB_STORAGE_SDDR55=y
CONFIG_USB_STORAGE_JUMPSHOT=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_AT91=m
+CONFIG_USB_ATMEL_USBA=m
CONFIG_MMC=y
CONFIG_SDIO_UART=y
+CONFIG_MMC_ATMELMCI=y
CONFIG_MMC_MVSDIO=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -196,11 +237,21 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
CONFIG_RTC_DRV_PCF8563=y
CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_RV3029C2=m
+CONFIG_RTC_DRV_AT91RM9200=m
+CONFIG_RTC_DRV_AT91SAM9=m
CONFIG_RTC_DRV_MV=y
CONFIG_DMADEVICES=y
+CONFIG_AT_HDMAC=y
CONFIG_MV_XOR=y
CONFIG_STAGING=y
CONFIG_FB_XGI=y
+CONFIG_IIO=m
+CONFIG_AT91_ADC=m
+CONFIG_PWM=y
+CONFIG_PWM_ATMEL=m
+CONFIG_PWM_ATMEL_HLCDC_PWM=m
+CONFIG_PWM_ATMEL_TCB=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=m
@@ -210,6 +261,7 @@ CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 8a5fff1b7f6f..2c8665cd9dc5 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -36,6 +36,7 @@ CONFIG_ARCH_BCM_21664=y
CONFIG_ARCH_BCM_281XX=y
CONFIG_ARCH_BCM_5301X=y
CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM_63XX=y
CONFIG_ARCH_BRCMSTB=y
CONFIG_ARCH_BERLIN=y
CONFIG_MACH_BERLIN_BG2=y
@@ -90,6 +91,7 @@ CONFIG_ARCH_R8A7778=y
CONFIG_ARCH_R8A7779=y
CONFIG_ARCH_R8A7790=y
CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7792=y
CONFIG_ARCH_R8A7793=y
CONFIG_ARCH_R8A7794=y
CONFIG_ARCH_SH73A0=y
@@ -157,6 +159,8 @@ CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_DSA=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_CAN=y
CONFIG_CAN_RAW=y
CONFIG_CAN_BCM=y
@@ -165,6 +169,7 @@ CONFIG_CAN_AT91=m
CONFIG_CAN_RCAR=m
CONFIG_CAN_XILINXCAN=y
CONFIG_CAN_MCP251X=y
+CONFIG_NET_DSA_BCM_SF2=m
CONFIG_CAN_SUN4I=y
CONFIG_BT=m
CONFIG_BT_MRVL=m
@@ -213,6 +218,7 @@ CONFIG_SCSI_MULTI_LUN=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_BRCM=y
CONFIG_AHCI_ST=y
CONFIG_AHCI_SUNXI=y
CONFIG_AHCI_TEGRA=y
@@ -224,6 +230,8 @@ CONFIG_VIRTIO_NET=y
CONFIG_HIX5HD2_GMAC=y
CONFIG_SUN4I_EMAC=y
CONFIG_MACB=y
+CONFIG_BCMGENET=m
+CONFIG_SYSTEMPORT=m
CONFIG_NET_CALXEDA_XGMAC=y
CONFIG_GIANFAR=y
CONFIG_IGB=y
@@ -270,6 +278,7 @@ CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_TOUCHSCREEN_MMS114=m
CONFIG_TOUCHSCREEN_ST1232=m
CONFIG_TOUCHSCREEN_STMPE=y
CONFIG_TOUCHSCREEN_SUN4I=y
@@ -292,6 +301,8 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_TTYAT=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
CONFIG_SERIAL_MESON=y
CONFIG_SERIAL_MESON_CONSOLE=y
CONFIG_SERIAL_SAMSUNG=y
@@ -358,6 +369,7 @@ CONFIG_SPI_BCM2835=y
CONFIG_SPI_BCM2835AUX=y
CONFIG_SPI_CADENCE=y
CONFIG_SPI_DAVINCI=y
+CONFIG_SPI_GPIO=m
CONFIG_SPI_FSL_DSPI=m
CONFIG_SPI_OMAP24XX=y
CONFIG_SPI_ORION=y
@@ -404,12 +416,14 @@ CONFIG_GPIO_PALMAS=y
CONFIG_GPIO_SYSCON=y
CONFIG_GPIO_TPS6586X=y
CONFIG_GPIO_TPS65910=y
+CONFIG_BATTERY_ACT8945A=y
CONFIG_BATTERY_SBS=y
CONFIG_BATTERY_MAX17040=m
CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_MAX14577=m
CONFIG_CHARGER_MAX77693=m
CONFIG_CHARGER_MAX8997=m
+CONFIG_CHARGER_MAX8998=m
CONFIG_CHARGER_TPS65090=y
CONFIG_AXP20X_POWER=m
CONFIG_POWER_RESET_AS3722=y
@@ -449,6 +463,9 @@ CONFIG_MESON_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_BCM2835_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCM_KONA_WDT=y
+CONFIG_MFD_ACT8945A=y
CONFIG_MFD_AS3711=y
CONFIG_MFD_AS3722=y
CONFIG_MFD_ATMEL_FLEXCOM=y
@@ -463,9 +480,10 @@ CONFIG_MFD_CROS_EC_SPI=m
CONFIG_MFD_DA9063=m
CONFIG_MFD_MAX14577=y
CONFIG_MFD_MAX77686=y
-CONFIG_MFD_MAX77693=y
+CONFIG_MFD_MAX77693=m
CONFIG_MFD_MAX8907=y
CONFIG_MFD_MAX8997=y
+CONFIG_MFD_MAX8998=y
CONFIG_MFD_RK808=y
CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_QCOM_RPM=y
@@ -478,6 +496,7 @@ CONFIG_MFD_TPS65217=y
CONFIG_MFD_TPS65218=y
CONFIG_MFD_TPS6586X=y
CONFIG_MFD_TPS65910=y
+CONFIG_REGULATOR_ACT8945A=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_ACT8865=y
CONFIG_REGULATOR_AS3711=y
@@ -495,6 +514,7 @@ CONFIG_REGULATOR_MAX14577=m
CONFIG_REGULATOR_MAX8907=y
CONFIG_REGULATOR_MAX8973=y
CONFIG_REGULATOR_MAX8997=m
+CONFIG_REGULATOR_MAX8998=m
CONFIG_REGULATOR_MAX77686=y
CONFIG_REGULATOR_MAX77693=m
CONFIG_REGULATOR_MAX77802=m
@@ -515,6 +535,7 @@ CONFIG_REGULATOR_TPS6586X=y
CONFIG_REGULATOR_TPS65910=y
CONFIG_REGULATOR_TWL4030=y
CONFIG_REGULATOR_VEXPRESS=y
+CONFIG_REGULATOR_WM8994=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
@@ -527,9 +548,18 @@ CONFIG_SOC_CAMERA=m
CONFIG_SOC_CAMERA_PLATFORM=m
CONFIG_VIDEO_RCAR_VIN=m
CONFIG_VIDEO_ATMEL_ISI=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
+CONFIG_VIDEO_S5P_FIMC=m
+CONFIG_VIDEO_S5P_MIPI_CSIS=m
+CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
+CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_STI_BDISP=m
CONFIG_VIDEO_RENESAS_JPU=m
CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_V4L_TEST_DRIVERS=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_ML86V7667=m
@@ -541,8 +571,10 @@ CONFIG_DRM_NXP_PTN3460=m
CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_EXYNOS=m
-CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_MIXER=y
+CONFIG_DRM_EXYNOS_DPI=y
+CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_ROCKCHIP=m
CONFIG_ROCKCHIP_ANALOGIX_DP=m
@@ -553,9 +585,12 @@ CONFIG_DRM_ATMEL_HLCDC=m
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_RCAR_HDMI=y
CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_SUN4I=m
CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_STI=m
CONFIG_DRM_VC4=y
CONFIG_FB_ARMCLCD=y
CONFIG_FB_WM8505=y
@@ -588,13 +623,14 @@ CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_MAX98090=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
+CONFIG_SND_SOC_SAMSUNG=m
+CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994=m
+CONFIG_SND_SOC_SMDK_WM8994_PCM=m
+CONFIG_SND_SOC_SNOW=m
CONFIG_SND_SOC_SH4_FSI=m
CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_RSRC_CARD=m
CONFIG_SND_SUN4I_CODEC=m
-CONFIG_SND_SOC_SAMSUNG=m
-CONFIG_SND_SOC_SNOW=m
-CONFIG_SND_SOC_ODROIDX2=m
CONFIG_SND_SOC_TEGRA=m
CONFIG_SND_SOC_TEGRA_RT5640=m
CONFIG_SND_SOC_TEGRA_WM8753=m
@@ -700,6 +736,7 @@ CONFIG_RTC_DRV_AS3722=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_MAX8907=y
+CONFIG_RTC_DRV_MAX8998=m
CONFIG_RTC_DRV_MAX8997=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
@@ -791,6 +828,7 @@ CONFIG_BERLIN2_ADC=m
CONFIG_EXYNOS_ADC=m
CONFIG_VF610_ADC=m
CONFIG_XILINX_XADC=y
+CONFIG_CM36651=m
CONFIG_AK8975=y
CONFIG_RASPBERRYPI_POWER=y
CONFIG_PWM=y
@@ -807,6 +845,7 @@ CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y
CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
CONFIG_PHY_BERLIN_USB=y
@@ -823,6 +862,7 @@ CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_SUN9I_USB=y
CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_PHY_BRCM_SATA=y
CONFIG_NVMEM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_BCM2835_MBOX=y
@@ -855,6 +895,7 @@ CONFIG_KEYSTONE_IRQ=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_ST=y
CONFIG_CRYPTO_DEV_MARVELL_CESA=m
+CONFIG_CRYPTO_DEV_S5P=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
CONFIG_ARM_CRYPTO=y
@@ -871,6 +912,7 @@ CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_DEV_ATMEL_AES=m
CONFIG_CRYPTO_DEV_ATMEL_TDES=m
CONFIG_CRYPTO_DEV_ATMEL_SHA=m
+CONFIG_VIDEO_VIVID=m
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ac717cccd2b5..487c6c3b13fd 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -355,11 +355,13 @@ CONFIG_USB_OHCI_HCD=m
CONFIG_USB_WDM=m
CONFIG_USB_STORAGE=m
CONFIG_USB_MUSB_HDRC=m
+CONFIG_USB_MUSB_TUSB6010=m
CONFIG_USB_MUSB_OMAP2PLUS=m
CONFIG_USB_MUSB_AM35X=m
CONFIG_USB_MUSB_DSPS=m
CONFIG_USB_INVENTRA_DMA=y
CONFIG_USB_TI_CPPI41_DMA=y
+CONFIG_USB_TUSB_OMAP_DMA=y
CONFIG_USB_DWC3=m
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
@@ -367,7 +369,8 @@ CONFIG_USB_SERIAL_SIMPLE=m
CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_TEST=m
-CONFIG_AM335X_PHY_USB=y
+CONFIG_NOP_USB_XCEIV=m
+CONFIG_AM335X_PHY_USB=m
CONFIG_TWL6030_USB=m
CONFIG_USB_GADGET=m
CONFIG_USB_GADGET_DEBUG=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 7bff7bf24a85..c2dff4fd5fc4 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -23,6 +23,7 @@ CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8X60=y
CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
+CONFIG_ARCH_MDM9615=y
CONFIG_SMP=y
CONFIG_HAVE_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
@@ -94,6 +95,7 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
@@ -103,7 +105,9 @@ CONFIG_SPMI=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
+CONFIG_PINCTRL_MSM8660=y
CONFIG_PINCTRL_MSM8960=y
+CONFIG_PINCTRL_MDM9615=y
CONFIG_PINCTRL_MSM8X74=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
@@ -114,6 +118,7 @@ CONFIG_CHARGER_QCOM_SMBB=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_MSM=y
CONFIG_THERMAL=y
+CONFIG_MFD_PM8XXX=y
CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
@@ -143,6 +148,7 @@ CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_QCOM_DML=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
@@ -156,6 +162,8 @@ CONFIG_APQ_MMCC_8084=y
CONFIG_IPQ_LCC_806X=y
CONFIG_MSM_GCC_8660=y
CONFIG_MSM_LCC_8960=y
+CONFIG_MSM_GCC_9615=y
+CONFIG_MSM_LCC_9615=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_HWSPINLOCK_QCOM=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 9cb1a85bb166..aca8625b6fc9 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -129,16 +129,19 @@ CONFIG_SPI_ATMEL=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_ACT8945A=y
CONFIG_POWER_RESET=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_MFD_ACT8945A=y
CONFIG_MFD_ATMEL_FLEXCOM=y
CONFIG_MFD_ATMEL_HLCDC=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_V4L_PLATFORM_DRIVERS=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index f2d635566a13..baa07a46a88b 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -17,6 +17,7 @@ CONFIG_ARCH_R8A7778=y
CONFIG_ARCH_R8A7779=y
CONFIG_ARCH_R8A7790=y
CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7792=y
CONFIG_ARCH_R8A7793=y
CONFIG_ARCH_R8A7794=y
CONFIG_ARCH_SH73A0=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 753f1a5defc7..9f84be5b3ac5 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -1,5 +1,4 @@
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -19,6 +18,10 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SOCFPGA=y
CONFIG_ARM_THUMBEE=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_ALTERA=y
+CONFIG_PCIE_ALTERA_MSI=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_AEABI=y
@@ -50,15 +53,21 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_NVME=m
CONFIG_SRAM=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
+CONFIG_E1000E=m
+CONFIG_IGB=m
+CONFIG_IXGBE=m
CONFIG_STMMAC_ETH=y
CONFIG_MICREL_PHY=y
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_STMPE=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
@@ -78,14 +87,19 @@ CONFIG_SENSORS_LTC2978=y
CONFIG_SENSORS_LTC2978_REGULATOR=y
CONFIG_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
+CONFIG_MFD_STMPE=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_USB=y
+CONFIG_USB_STORAGE=y
CONFIG_USB_DWC2=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_MMC=y
CONFIG_MMC_DW=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y
+CONFIG_DMATEST=m
CONFIG_FPGA=y
CONFIG_FPGA_MGR_SOCFPGA=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 81a1b92fd4e6..714da336ec86 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -97,6 +97,8 @@ CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_RC_SUPPORT=y
CONFIG_RC_DEVICES=y
CONFIG_IR_SUNXI=y
+CONFIG_DRM=y
+CONFIG_DRM_SUN4I=y
CONFIG_FB=y
CONFIG_FB_SIMPLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a83570f10124..d009f7911ffc 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,7 +5,6 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm/memory.h>
@@ -174,7 +173,7 @@ static inline void dma_mark_clean(void *addr, size_t size) { }
* to be the device-viewed address.
*/
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
+ gfp_t gfp, unsigned long attrs);
/**
* arm_dma_free - free memory allocated by arm_dma_alloc
@@ -191,7 +190,7 @@ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* during and after this call executing are illegal.
*/
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
+ dma_addr_t handle, unsigned long attrs);
/**
* arm_dma_mmap - map a coherent DMA allocation into user space
@@ -208,7 +207,7 @@ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
*/
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/*
* This can be called during early boot to increase the size of the atomic
@@ -262,16 +261,16 @@ extern void dmabounce_unregister_dev(struct device *);
* The scatter list versions of the above methods.
*/
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
+ enum dma_data_direction, unsigned long attrs);
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
+ enum dma_data_direction, unsigned long attrs);
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index c2b9b4bdec00..1869af6bac5c 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -53,6 +53,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
/* Function pointer to optional machine-specific reinitialization */
extern void (*kexec_reinit)(void);
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+ return phys_to_idmap(phys);
+}
+#define phys_to_boot_phys phys_to_boot_phys
+
+static inline phys_addr_t boot_phys_to_phys(unsigned long entry)
+{
+ return idmap_to_phys(entry);
+}
+#define boot_phys_to_phys boot_phys_to_phys
+
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+ return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
+}
+#define page_to_boot_pfn page_to_boot_pfn
+
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+ return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT));
+}
+#define boot_pfn_to_page boot_pfn_to_page
+
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_KEXEC */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 3d5a5cd071bd..58faff5f1eb2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -66,6 +66,8 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
+
+extern void __kvm_hyp_reset(unsigned long);
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 96387d477e91..de338d93d11b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -241,8 +241,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
-static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
- phys_addr_t pgd_ptr,
+static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
@@ -251,18 +250,13 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
* code. The init code doesn't need to preserve these
* registers as r0-r3 are already callee saved according to
* the AAPCS.
- * Note that we slightly misuse the prototype by casing the
+ * Note that we slightly misuse the prototype by casting the
* stack pointer to a void *.
- *
- * We don't have enough registers to perform the full init in
- * one go. Install the boot PGD first, and then install the
- * runtime PGD, stack pointer and vectors. The PGDs are always
- * passed as the third argument, in order to be passed into
- * r2-r3 to the init code (yes, this is compliant with the
- * PCS!).
- */
- kvm_call_hyp(NULL, 0, boot_pgd_ptr);
+ * The PGDs are always passed as the third argument, in order
+ * to be passed into r2-r3 to the init code (yes, this is
+ * compliant with the PCS!).
+ */
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
@@ -272,16 +266,13 @@ static inline void __cpu_init_stage2(void)
kvm_call_hyp(__init_stage2_translation);
}
-static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
phys_addr_t phys_idmap_start)
{
- /*
- * TODO
- * kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
- */
+ kvm_call_hyp((void *)virt_to_idmap(__kvm_hyp_reset), vector_ptr);
}
-static inline int kvm_arch_dev_ioctl_check_extension(long ext)
+static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
{
return 0;
}
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index f0e860761380..6eaff28f2ff3 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -25,9 +25,6 @@
#define __hyp_text __section(.hyp.text) notrace
-#define kern_hyp_va(v) (v)
-#define hyp_kern_va(v) (v)
-
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
#define __ACCESS_CP15_64(Op1, CRm) \
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index f9a65061130b..3bb803d6814b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -26,16 +26,7 @@
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
-#define HYP_PAGE_OFFSET_MASK UL(~0)
-#define HYP_PAGE_OFFSET PAGE_OFFSET
-#define KERN_TO_HYP(kva) (kva)
-
-/*
- * Our virtual mapping for the boot-time MMU-enable code. Must be
- * shared across all the page-tables. Conveniently, we use the vectors
- * page, where no kernel data will ever be shared with HYP.
- */
-#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
+#define kern_hyp_va(kva) (kva)
/*
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
@@ -49,9 +40,8 @@
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>
-int create_hyp_mappings(void *from, void *to);
+int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
-void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
@@ -65,7 +55,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 0070e8520cd4..2d88af5be45f 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -22,6 +22,7 @@ struct hw_pci {
struct msi_controller *msi_ctrl;
struct pci_ops *ops;
int nr_controllers;
+ unsigned int io_optional:1;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index d62204060cbe..a8d656d9aec7 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -97,7 +97,9 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
-#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
+#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
+#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
+#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 62a6f65029e6..a93c0f99acf7 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(to, n, false);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(from, n, true);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
+ check_object_size(from, n, true);
return arm_copy_to_user(to, from, n);
#endif
}
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index d4ceaf5f299b..a2e75b84e2ae 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,10 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
+/* The section containing the hypervisor idmap text */
+extern char __hyp_idmap_text_start[];
+extern char __hyp_idmap_text_end[];
+
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 9408a994cc91..95ce6ac3a971 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -2,15 +2,14 @@
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
@@ -18,22 +17,20 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
@@ -58,8 +55,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long pfn = PFN_DOWN(handle);
/*
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S
index d4ae3b8e2426..0098401e5aeb 100644
--- a/arch/arm/include/debug/at91.S
+++ b/arch/arm/include/debug/at91.S
@@ -9,14 +9,6 @@
*
*/
-#ifdef CONFIG_MMU
-#define AT91_IO_P2V(x) ((x) - 0x01000000)
-#else
-#define AT91_IO_P2V(x) (x)
-#endif
-
-#define AT91_DEBUG_UART_VIRT AT91_IO_P2V(CONFIG_DEBUG_UART_PHYS)
-
#define AT91_DBGU_SR (0x14) /* Status Register */
#define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */
#define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */
@@ -24,7 +16,7 @@
.macro addruart, rp, rv, tmp
ldr \rp, =CONFIG_DEBUG_UART_PHYS @ System peripherals (phys address)
- ldr \rv, =AT91_DEBUG_UART_VIRT @ System peripherals (virt address)
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT @ System peripherals (virt address)
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/include/debug/clps711x.S b/arch/arm/include/debug/clps711x.S
index abe225436686..c17ac5c9e5f3 100644
--- a/arch/arm/include/debug/clps711x.S
+++ b/arch/arm/include/debug/clps711x.S
@@ -9,10 +9,10 @@
#ifndef CONFIG_DEBUG_CLPS711X_UART2
#define CLPS711X_UART_PADDR (0x80000000 + 0x0000)
-#define CLPS711X_UART_VADDR (0xfeff0000 + 0x0000)
+#define CLPS711X_UART_VADDR (0xfeff4000 + 0x0000)
#else
#define CLPS711X_UART_PADDR (0x80000000 + 0x1000)
-#define CLPS711X_UART_VADDR (0xfeff0000 + 0x1000)
+#define CLPS711X_UART_VADDR (0xfeff4000 + 0x1000)
#endif
#define SYSFLG (0x0140)
diff --git a/arch/arm/include/debug/exynos.S b/arch/arm/include/debug/exynos.S
index b17fdb7fbd34..60bf3c23200d 100644
--- a/arch/arm/include/debug/exynos.S
+++ b/arch/arm/include/debug/exynos.S
@@ -24,7 +24,11 @@
mrc p15, 0, \tmp, c0, c0, 0
and \tmp, \tmp, #0xf0
teq \tmp, #0xf0 @@ A15
- ldreq \rp, =EXYNOS5_PA_UART
+ beq 100f
+ mrc p15, 0, \tmp, c0, c0, 5
+ and \tmp, \tmp, #0xf00
+ teq \tmp, #0x100 @@ A15 + A7 but boot to A7
+100: ldreq \rp, =EXYNOS5_PA_UART
movne \rp, #EXYNOS4_PA_UART @@ EXYNOS4
ldr \rv, =S3C_VA_UART
#if CONFIG_DEBUG_S3C_UART != 0
diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S
index 8d8d922e5e44..f4eeed2a1981 100644
--- a/arch/arm/include/debug/samsung.S
+++ b/arch/arm/include/debug/samsung.S
@@ -15,11 +15,13 @@
.macro fifo_level_s5pv210 rd, rx
ldr \rd, [\rx, # S3C2410_UFSTAT]
+ARM_BE8(rev \rd, \rd)
and \rd, \rd, #S5PV210_UFSTAT_TXMASK
.endm
.macro fifo_full_s5pv210 rd, rx
ldr \rd, [\rx, # S3C2410_UFSTAT]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S5PV210_UFSTAT_TXFULL
.endm
@@ -28,6 +30,7 @@
.macro fifo_level_s3c2440 rd, rx
ldr \rd, [\rx, # S3C2410_UFSTAT]
+ARM_BE8(rev \rd, \rd)
and \rd, \rd, #S3C2440_UFSTAT_TXMASK
.endm
@@ -37,6 +40,7 @@
.macro fifo_full_s3c2440 rd, rx
ldr \rd, [\rx, # S3C2410_UFSTAT]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2440_UFSTAT_TXFULL
.endm
@@ -50,6 +54,7 @@
.macro busyuart, rd, rx
ldr \rd, [\rx, # S3C2410_UFCON]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
beq 1001f @
@ FIFO enabled...
@@ -61,6 +66,7 @@
1001:
@ busy waiting for non fifo
ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2410_UTRSTAT_TXFE
beq 1001b
@@ -69,6 +75,7 @@
.macro waituart,rd,rx
ldr \rd, [\rx, # S3C2410_UFCON]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
beq 1001f @
@ FIFO enabled...
@@ -80,6 +87,7 @@
1001:
@ idle waiting for non fifo
ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ARM_BE8(rev \rd, \rd)
tst \rd, #S3C2410_UTRSTAT_TXFE
beq 1001b
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 05e61a2eeabe..2f0e07735d1d 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -410,7 +410,8 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
-static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
+static int pcibios_init_resource(int busnr, struct pci_sys_data *sys,
+ int io_optional)
{
int ret;
struct resource_entry *window;
@@ -420,6 +421,14 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
&iomem_resource, sys->mem_offset);
}
+ /*
+ * If a platform says I/O port support is optional, we don't add
+ * the default I/O space. The platform is responsible for adding
+ * any I/O space it needs.
+ */
+ if (io_optional)
+ return 0;
+
resource_list_for_each_entry(window, &sys->resources)
if (resource_type(window->res) == IORESOURCE_IO)
return 0;
@@ -466,7 +475,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
if (ret > 0) {
struct pci_host_bridge *host_bridge;
- ret = pcibios_init_resources(nr, sys);
+ ret = pcibios_init_resource(nr, sys, hw->io_optional);
if (ret) {
kfree(sys);
break;
@@ -515,25 +524,23 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
list_for_each_entry(sys, &head, node) {
struct pci_bus *bus = sys->bus;
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
struct pci_bus *child;
- /*
- * Size the bridge windows.
- */
pci_bus_size_bridges(bus);
-
- /*
- * Assign resources.
- */
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
- /*
- * Tell drivers about devices found.
- */
+
pci_bus_add_devices(bus);
}
}
@@ -590,18 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
-/**
- * pcibios_enable_device - Enable I/O and memory.
- * @dev: PCI device to be enabled
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- if (pci_has_flag(PCI_PROBE_ONLY))
- return 0;
-
- return pci_enable_resources(dev, mask);
-}
-
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 59fd0e24c56b..b18c1ea56bed 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -57,7 +57,7 @@ int machine_kexec_prepare(struct kimage *image)
for (i = 0; i < image->nr_segments; i++) {
current_segment = &image->segment[i];
- if (!memblock_is_region_memory(current_segment->mem,
+ if (!memblock_is_region_memory(idmap_to_phys(current_segment->mem),
current_segment->memsz))
return -EINVAL;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index da2f6c360f6b..df7f2a75e769 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -848,10 +848,29 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
+ phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ unsigned long boot_alias_start;
+
+ /*
+ * Some systems have a special memory alias which is only
+ * used for booting. We need to advertise this region to
+ * kexec-tools so they know where bootable RAM is located.
+ */
+ boot_alias_start = phys_to_idmap(start);
+ if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
+ res = memblock_virt_alloc(sizeof(*res), 0);
+ res->name = "System RAM (boot alias)";
+ res->start = boot_alias_start;
+ res->end = phys_to_idmap(end);
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ }
+
res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
- res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
- res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ res->start = start;
+ res->end = end;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
@@ -1000,9 +1019,25 @@ static void __init reserve_crashkernel(void)
(unsigned long)(crash_base >> 20),
(unsigned long)(total_mem >> 20));
+ /* The crashk resource must always be located in normal mem */
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
+
+ if (arm_has_idmap_alias()) {
+ /*
+ * If we have a special RAM alias for use at boot, we
+ * need to advertise to kexec tools where the alias is.
+ */
+ static struct resource crashk_boot_res = {
+ .name = "Crash kernel (boot alias)",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+ };
+
+ crashk_boot_res.start = phys_to_idmap(crash_base);
+ crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_boot_res);
+ }
}
#else
static inline void reserve_crashkernel(void) {}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 99420fc1f066..d24e5dd2aa7a 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -44,7 +44,7 @@
#endif
#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG)
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 02abfff68ee5..3e1cd0452d67 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -32,6 +32,8 @@ config KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQ_ROUTING
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
@@ -46,13 +48,6 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
-config KVM_NEW_VGIC
- bool "New VGIC implementation"
- depends on KVM
- default y
- ---help---
- uses the new VGIC implementation
-
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index a596b58f6d37..10d77a66cad5 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -22,7 +22,6 @@ obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
-ifeq ($(CONFIG_KVM_NEW_VGIC),y)
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
@@ -30,9 +29,5 @@ obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
-else
-obj-y += $(KVM)/arm/vgic.o
-obj-y += $(KVM)/arm/vgic-v2.o
-obj-y += $(KVM)/arm/vgic-v2-emul.o
-endif
+obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index f1bde7c4e736..d94bb9093ead 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
@@ -122,7 +123,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out_fail_alloc;
- ret = create_hyp_mappings(kvm, kvm + 1);
+ ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret)
goto out_free_stage2_pgd;
@@ -201,7 +202,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_MAX_VCPUS;
break;
default:
- r = kvm_arch_dev_ioctl_check_extension(ext);
+ r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
break;
}
return r;
@@ -239,7 +240,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
- err = create_hyp_mappings(vcpu, vcpu + 1);
+ err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
if (err)
goto vcpu_uninit;
@@ -377,7 +378,7 @@ void force_vm_exit(const cpumask_t *mask)
/**
* need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to checkt
+ * @kvm: The VM's VMID to check
*
* return true if there is a new generation of VMIDs being used
*
@@ -616,7 +617,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Enter the guest
*/
trace_kvm_entry(*vcpu_pc(vcpu));
- __kvm_guest_enter();
+ guest_enter_irqoff();
vcpu->mode = IN_GUEST_MODE;
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
@@ -642,14 +643,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
local_irq_enable();
/*
- * We do local_irq_enable() before calling kvm_guest_exit() so
+ * We do local_irq_enable() before calling guest_exit() so
* that if a timer interrupt hits while running the guest we
* account that tick as being spent in the guest. We enable
- * preemption after calling kvm_guest_exit() so that if we get
+ * preemption after calling guest_exit() so that if we get
* preempted we make sure ticks after that is not counted as
* guest time.
*/
- kvm_guest_exit();
+ guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/*
@@ -1039,7 +1040,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
static void cpu_init_hyp_mode(void *dummy)
{
- phys_addr_t boot_pgd_ptr;
phys_addr_t pgd_ptr;
unsigned long hyp_stack_ptr;
unsigned long stack_page;
@@ -1048,13 +1048,12 @@ static void cpu_init_hyp_mode(void *dummy)
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors(kvm_get_idmap_vector());
- boot_pgd_ptr = kvm_mmu_get_boot_httbr();
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
- __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+ __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
kvm_arm_init_debug();
@@ -1076,15 +1075,9 @@ static void cpu_hyp_reinit(void)
static void cpu_hyp_reset(void)
{
- phys_addr_t boot_pgd_ptr;
- phys_addr_t phys_idmap_start;
-
- if (!is_kernel_in_hyp_mode()) {
- boot_pgd_ptr = kvm_mmu_get_boot_httbr();
- phys_idmap_start = kvm_get_idmap_start();
-
- __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
- }
+ if (!is_kernel_in_hyp_mode())
+ __cpu_reset_hyp_mode(hyp_default_vectors,
+ kvm_get_idmap_start());
}
static void _kvm_arch_hardware_enable(void *discard)
@@ -1294,14 +1287,14 @@ static int init_hyp_mode(void)
* Map the Hyp-code called directly from the host
*/
err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
- kvm_ksym_ref(__hyp_text_end));
+ kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_err;
}
err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
- kvm_ksym_ref(__end_rodata));
+ kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
if (err) {
kvm_err("Cannot map rodata section\n");
goto out_err;
@@ -1312,7 +1305,8 @@ static int init_hyp_mode(void)
*/
for_each_possible_cpu(cpu) {
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
+ err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
+ PAGE_HYP);
if (err) {
kvm_err("Cannot map hyp stack\n");
@@ -1324,7 +1318,7 @@ static int init_hyp_mode(void)
kvm_cpu_context_t *cpu_ctxt;
cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
- err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
+ err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
@@ -1332,10 +1326,6 @@ static int init_hyp_mode(void)
}
}
-#ifndef CONFIG_HOTPLUG_CPU
- free_boot_hyp_pgd();
-#endif
-
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index a494def3f195..af93e3ffc9f3 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -210,7 +210,7 @@ bool kvm_condition_valid(struct kvm_vcpu *vcpu)
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
- * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
* to do this little bit of work manually. The fields map like this:
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 9093ed0f8b2a..9aca92074f85 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -182,7 +182,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
- * We do core registers right here, then we apppend coproc regs.
+ * We do core registers right here, then we append coproc regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1f9ae17476f9..bf89c919efc1 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -32,23 +32,13 @@
* r2,r3 = Hypervisor pgd pointer
*
* The init scenario is:
- * - We jump in HYP with four parameters: boot HYP pgd, runtime HYP pgd,
- * runtime stack, runtime vectors
- * - Enable the MMU with the boot pgd
- * - Jump to a target into the trampoline page (remember, this is the same
- * physical page!)
- * - Now switch to the runtime pgd (same VA, and still the same physical
- * page!)
+ * - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
+ * runtime vectors
* - Invalidate TLBs
* - Set stack and vectors
+ * - Setup the page tables
+ * - Enable the MMU
* - Profit! (or eret, if you only care about the code).
- *
- * As we only have four registers available to pass parameters (and we
- * need six), we split the init in two phases:
- * - Phase 1: r0 = 0, r1 = 0, r2,r3 contain the boot PGD.
- * Provides the basic HYP init, and enable the MMU.
- * - Phase 2: r0 = ToS, r1 = vectors, r2,r3 contain the runtime PGD.
- * Switches to the runtime PGD, set stack and vectors.
*/
.text
@@ -68,8 +58,11 @@ __kvm_hyp_init:
W(b) .
__do_hyp_init:
- cmp r0, #0 @ We have a SP?
- bne phase2 @ Yes, second stage init
+ @ Set stack pointer
+ mov sp, r0
+
+ @ Set HVBAR to point to the HYP vectors
+ mcr p15, 4, r1, c12, c0, 0 @ HVBAR
@ Set the HTTBR to point to the hypervisor PGD pointer passed
mcrr p15, 4, rr_lo_hi(r2, r3), c2
@@ -114,34 +107,25 @@ __do_hyp_init:
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
- isb
mcr p15, 4, r0, c1, c0, 0 @ HSCR
+ isb
- @ End of init phase-1
eret
-phase2:
- @ Set stack pointer
- mov sp, r0
-
- @ Set HVBAR to point to the HYP vectors
- mcr p15, 4, r1, c12, c0, 0 @ HVBAR
-
- @ Jump to the trampoline page
- ldr r0, =TRAMPOLINE_VA
- adr r1, target
- bfi r0, r1, #0, #PAGE_SHIFT
- ret r0
+ @ r0 : stub vectors address
+ENTRY(__kvm_hyp_reset)
+ /* We're now in idmap, disable MMU */
+ mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
+ ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
+ bic r1, r1, r2
+ mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
-target: @ We're now in the trampoline code, switch page tables
- mcrr p15, 4, rr_lo_hi(r2, r3), c2
+ /* Install stub vectors */
+ mcr p15, 4, r0, c12, c0, 0 @ HVBAR
isb
- @ Invalidate the old TLBs
- mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
- dsb ish
-
eret
+ENDPROC(__kvm_hyp_reset)
.ltorg
diff --git a/arch/arm/kvm/irq.h b/arch/arm/kvm/irq.h
new file mode 100644
index 000000000000..b74099b905fd
--- /dev/null
+++ b/arch/arm/kvm/irq.h
@@ -0,0 +1,19 @@
+/*
+ * irq.h: in kernel interrupt controller related definitions
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This header is included by irqchip.c. However, on ARM, interrupt
+ * controller declarations are located in include/kvm/arm_vgic.h since
+ * they are mostly shared between arm and arm64.
+ */
+
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#include <kvm/arm_vgic.h>
+
+#endif
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 45c43aecb8f2..bda27b6b1aa2 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -32,8 +32,6 @@
#include "trace.h"
-extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
-
static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
static pgd_t *merged_hyp_pgd;
@@ -484,28 +482,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
}
/**
- * free_boot_hyp_pgd - free HYP boot page tables
- *
- * Free the HYP boot page tables. The bounce page is also freed.
- */
-void free_boot_hyp_pgd(void)
-{
- mutex_lock(&kvm_hyp_pgd_mutex);
-
- if (boot_hyp_pgd) {
- unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
- boot_hyp_pgd = NULL;
- }
-
- if (hyp_pgd)
- unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-
- mutex_unlock(&kvm_hyp_pgd_mutex);
-}
-
-/**
* free_hyp_pgds - free Hyp-mode page tables
*
* Assumes hyp_pgd is a page table used strictly in Hyp-mode and
@@ -519,15 +495,20 @@ void free_hyp_pgds(void)
{
unsigned long addr;
- free_boot_hyp_pgd();
-
mutex_lock(&kvm_hyp_pgd_mutex);
+ if (boot_hyp_pgd) {
+ unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
+ free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
+ boot_hyp_pgd = NULL;
+ }
+
if (hyp_pgd) {
+ unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
+ unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL;
@@ -679,17 +660,18 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
* @to: The virtual kernel end address of the range (exclusive)
+ * @prot: The protection to be applied to this range
*
* The same virtual address as the kernel virtual address is also used
* in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
* physical pages.
*/
-int create_hyp_mappings(void *from, void *to)
+int create_hyp_mappings(void *from, void *to, pgprot_t prot)
{
phys_addr_t phys_addr;
unsigned long virt_addr;
- unsigned long start = KERN_TO_HYP((unsigned long)from);
- unsigned long end = KERN_TO_HYP((unsigned long)to);
+ unsigned long start = kern_hyp_va((unsigned long)from);
+ unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -704,7 +686,7 @@ int create_hyp_mappings(void *from, void *to)
err = __create_hyp_mappings(hyp_pgd, virt_addr,
virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
- PAGE_HYP);
+ prot);
if (err)
return err;
}
@@ -723,8 +705,8 @@ int create_hyp_mappings(void *from, void *to)
*/
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
{
- unsigned long start = KERN_TO_HYP((unsigned long)from);
- unsigned long end = KERN_TO_HYP((unsigned long)to);
+ unsigned long start = kern_hyp_va((unsigned long)from);
+ unsigned long end = kern_hyp_va((unsigned long)to);
if (is_kernel_in_hyp_mode())
return 0;
@@ -1687,14 +1669,6 @@ phys_addr_t kvm_mmu_get_httbr(void)
return virt_to_phys(hyp_pgd);
}
-phys_addr_t kvm_mmu_get_boot_httbr(void)
-{
- if (__kvm_cpu_uses_extended_idmap())
- return virt_to_phys(merged_hyp_pgd);
- else
- return virt_to_phys(boot_hyp_pgd);
-}
-
phys_addr_t kvm_get_idmap_vector(void)
{
return hyp_idmap_vector;
@@ -1705,6 +1679,22 @@ phys_addr_t kvm_get_idmap_start(void)
return hyp_idmap_start;
}
+static int kvm_map_idmap_text(pgd_t *pgd)
+{
+ int err;
+
+ /* Create the idmap in the boot page tables */
+ err = __create_hyp_mappings(pgd,
+ hyp_idmap_start, hyp_idmap_end,
+ __phys_to_pfn(hyp_idmap_start),
+ PAGE_HYP_EXEC);
+ if (err)
+ kvm_err("Failed to idmap %lx-%lx\n",
+ hyp_idmap_start, hyp_idmap_end);
+
+ return err;
+}
+
int kvm_mmu_init(void)
{
int err;
@@ -1719,28 +1709,41 @@ int kvm_mmu_init(void)
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
- hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
- boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
+ kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
+ kvm_info("HYP VA range: %lx:%lx\n",
+ kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
- if (!hyp_pgd || !boot_hyp_pgd) {
- kvm_err("Hyp mode PGD not allocated\n");
- err = -ENOMEM;
+ if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
+ hyp_idmap_start < kern_hyp_va(~0UL)) {
+ /*
+ * The idmap page is intersecting with the VA space,
+ * it is not safe to continue further.
+ */
+ kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
+ err = -EINVAL;
goto out;
}
- /* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(boot_hyp_pgd,
- hyp_idmap_start, hyp_idmap_end,
- __phys_to_pfn(hyp_idmap_start),
- PAGE_HYP);
-
- if (err) {
- kvm_err("Failed to idmap %lx-%lx\n",
- hyp_idmap_start, hyp_idmap_end);
+ hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
+ if (!hyp_pgd) {
+ kvm_err("Hyp mode PGD not allocated\n");
+ err = -ENOMEM;
goto out;
}
if (__kvm_cpu_uses_extended_idmap()) {
+ boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ hyp_pgd_order);
+ if (!boot_hyp_pgd) {
+ kvm_err("Hyp boot PGD not allocated\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_map_idmap_text(boot_hyp_pgd);
+ if (err)
+ goto out;
+
merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!merged_hyp_pgd) {
kvm_err("Failed to allocate extra HYP pgd\n");
@@ -1748,29 +1751,10 @@ int kvm_mmu_init(void)
}
__kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
hyp_idmap_start);
- return 0;
- }
-
- /* Map the very same page at the trampoline VA */
- err = __create_hyp_mappings(boot_hyp_pgd,
- TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
- __phys_to_pfn(hyp_idmap_start),
- PAGE_HYP);
- if (err) {
- kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
- TRAMPOLINE_VA);
- goto out;
- }
-
- /* Map the same page again into the runtime page tables */
- err = __create_hyp_mappings(hyp_pgd,
- TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
- __phys_to_pfn(hyp_idmap_start),
- PAGE_HYP);
- if (err) {
- kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
- TRAMPOLINE_VA);
- goto out;
+ } else {
+ err = kvm_map_idmap_text(hyp_pgd);
+ if (err)
+ goto out;
}
return 0;
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 0048b5a62a50..4b5e802e57d1 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -52,7 +52,7 @@ static const struct kvm_irq_level cortexa_vtimer_irq = {
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on the
- * virtual CPU struct to their architectually defined reset values.
+ * virtual CPU struct to their architecturally defined reset values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 08047afdf38e..5204395efda8 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,8 +1,8 @@
menuconfig ARCH_AT91
bool "Atmel SoCs"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_AT91
+ select GPIOLIB
select PINCTRL
select SOC_BUS
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index f06270198bf1..b4332b727e9c 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/clk/at91_pmc.h>
@@ -355,7 +356,7 @@ static __init void at91_dt_ramc(void)
at91_pm_set_standby(standby);
}
-void at91rm9200_idle(void)
+static void at91rm9200_idle(void)
{
/*
* Disable the processor clock. The processor will be automatically
@@ -364,7 +365,7 @@ void at91rm9200_idle(void)
writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
}
-void at91sam9_idle(void)
+static void at91sam9_idle(void)
{
writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR);
cpu_do_idle();
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 4f1709b31822..34f0fca0b847 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -17,7 +17,7 @@ config ARCH_BCM_IPROC
select ARM_GLOBAL_TIMER
select COMMON_CLK_IPROC
select CLKSRC_MMIO
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select ARM_AMBA
select PINCTRL
help
@@ -80,7 +80,7 @@ comment "KONA architected SoCs"
config ARCH_BCM_MOBILE
bool
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select ARM_ERRATA_754322
select ARM_ERRATA_775420
select ARM_GIC
@@ -112,9 +112,17 @@ config ARCH_BCM_21664
Enable support for the BCM21664 family, which includes
BCM21663 and BCM21664 variants.
+config ARCH_BCM_23550
+ bool "Broadcom BCM23550 SoC"
+ depends on ARCH_MULTI_V7
+ select ARCH_BCM_MOBILE
+ select HAVE_SMP
+ help
+ Enable support for the BCM23550.
+
config ARCH_BCM_MOBILE_L2_CACHE
bool "Broadcom mobile SoC level 2 cache support"
- depends on ARCH_BCM_MOBILE
+ depends on ARCH_BCM_281XX || ARCH_BCM_21664
default y
select CACHE_L2X0
select ARCH_BCM_MOBILE_SMC
@@ -129,7 +137,7 @@ config ARCH_BCM_MOBILE_SMP
select HAVE_ARM_SCU
select ARM_ERRATA_764369
help
- SMP support for the BCM281XX and BCM21664 SoC families.
+ SMP support for the BCM281XX, BCM21664 and BCM23550 SoC families.
Provided as an option so SMP support for SoCs of this type
can be disabled for an SMP-enabled kernel.
@@ -138,7 +146,7 @@ comment "Other Architectures"
config ARCH_BCM2835
bool "Broadcom BCM2835 family"
depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select ARM_AMBA
select ARM_ERRATA_411920 if ARCH_MULTI_V6
select ARM_TIMER_SP804
@@ -178,7 +186,6 @@ config ARCH_BRCMSTB
select BRCMSTB_L2_IRQ
select BCM7120_L2_IRQ
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
- select ARCH_WANT_OPTIONAL_GPIOLIB
select SOC_BRCMSTB
select SOC_BUS
help
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index 7d665151c772..980f5850097c 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -26,7 +26,10 @@ obj-$(CONFIG_ARCH_BCM_281XX) += board_bcm281xx.o
# BCM21664
obj-$(CONFIG_ARCH_BCM_21664) += board_bcm21664.o
-# BCM281XX and BCM21664 SMP support
+# BCM23550
+obj-$(CONFIG_ARCH_BCM_23550) += board_bcm23550.o
+
+# BCM281XX, BCM21664 and BCM23550 SMP support
obj-$(CONFIG_ARCH_BCM_MOBILE_SMP) += platsmp.o
# BCM281XX and BCM21664 L2 cache control
diff --git a/arch/arm/mach-bcm/board_bcm21664.c b/arch/arm/mach-bcm/board_bcm21664.c
index 0d7034c57334..c5bf01641172 100644
--- a/arch/arm/mach-bcm/board_bcm21664.c
+++ b/arch/arm/mach-bcm/board_bcm21664.c
@@ -11,52 +11,10 @@
* GNU General Public License for more details.
*/
-#include <linux/of_address.h>
-#include <linux/io.h>
-
#include <asm/mach/arch.h>
#include "kona_l2_cache.h"
-#define RSTMGR_DT_STRING "brcm,bcm21664-resetmgr"
-
-#define RSTMGR_REG_WR_ACCESS_OFFSET 0
-#define RSTMGR_REG_CHIP_SOFT_RST_OFFSET 4
-
-#define RSTMGR_WR_PASSWORD 0xa5a5
-#define RSTMGR_WR_PASSWORD_SHIFT 8
-#define RSTMGR_WR_ACCESS_ENABLE 1
-
-static void bcm21664_restart(enum reboot_mode mode, const char *cmd)
-{
- void __iomem *base;
- struct device_node *resetmgr;
-
- resetmgr = of_find_compatible_node(NULL, NULL, RSTMGR_DT_STRING);
- if (!resetmgr) {
- pr_emerg("Couldn't find " RSTMGR_DT_STRING "\n");
- return;
- }
- base = of_iomap(resetmgr, 0);
- if (!base) {
- pr_emerg("Couldn't map " RSTMGR_DT_STRING "\n");
- return;
- }
-
- /*
- * A soft reset is triggered by writing a 0 to bit 0 of the soft reset
- * register. To write to that register we must first write the password
- * and the enable bit in the write access enable register.
- */
- writel((RSTMGR_WR_PASSWORD << RSTMGR_WR_PASSWORD_SHIFT) |
- RSTMGR_WR_ACCESS_ENABLE,
- base + RSTMGR_REG_WR_ACCESS_OFFSET);
- writel(0, base + RSTMGR_REG_CHIP_SOFT_RST_OFFSET);
-
- /* Wait for reset */
- while (1);
-}
-
static void __init bcm21664_init(void)
{
kona_l2_cache_init();
@@ -69,6 +27,5 @@ static const char * const bcm21664_dt_compat[] = {
DT_MACHINE_START(BCM21664_DT, "BCM21664 Broadcom Application Processor")
.init_machine = bcm21664_init,
- .restart = bcm21664_restart,
.dt_compat = bcm21664_dt_compat,
MACHINE_END
diff --git a/arch/arm/mach-bcm/board_bcm23550.c b/arch/arm/mach-bcm/board_bcm23550.c
new file mode 100644
index 000000000000..0ac01debd077
--- /dev/null
+++ b/arch/arm/mach-bcm/board_bcm23550.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+
+static const char * const bcm23550_dt_compat[] = {
+ "brcm,bcm23550",
+ NULL,
+};
+
+DT_MACHINE_START(BCM23550_DT, "BCM23550 Broadcom Application Processor")
+ .dt_compat = bcm23550_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-bcm/kona_l2_cache.c b/arch/arm/mach-bcm/kona_l2_cache.c
index b31970377c20..59ad86304092 100644
--- a/arch/arm/mach-bcm/kona_l2_cache.c
+++ b/arch/arm/mach-bcm/kona_l2_cache.c
@@ -17,6 +17,7 @@
#include <asm/hardware/cache-l2x0.h>
#include "bcm_kona_smc.h"
+#include "kona_l2_cache.h"
void __init kona_l2_cache_init(void)
{
diff --git a/arch/arm/mach-bcm/platsmp.c b/arch/arm/mach-bcm/platsmp.c
index cfae9c71fb74..3ac3a9bc663c 100644
--- a/arch/arm/mach-bcm/platsmp.c
+++ b/arch/arm/mach-bcm/platsmp.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -37,9 +38,6 @@
#define OF_SECONDARY_BOOT "secondary-boot-reg"
#define MPIDR_CPUID_BITMASK 0x3
-/* I/O address of register used to coordinate secondary core startup */
-static u32 secondary_boot_addr;
-
/*
* Enable the Cortex A9 Snoop Control Unit
*
@@ -81,20 +79,40 @@ static int __init scu_a9_enable(void)
return 0;
}
-static int nsp_write_lut(void)
+static u32 secondary_boot_addr_for(unsigned int cpu)
+{
+ u32 secondary_boot_addr = 0;
+ struct device_node *cpu_node = of_get_cpu_node(cpu, NULL);
+
+ if (!cpu_node) {
+ pr_err("Failed to find device tree node for CPU%u\n", cpu);
+ return 0;
+ }
+
+ if (of_property_read_u32(cpu_node,
+ OF_SECONDARY_BOOT,
+ &secondary_boot_addr))
+ pr_err("required secondary boot register not specified for CPU%u\n",
+ cpu);
+
+ of_node_put(cpu_node);
+
+ return secondary_boot_addr;
+}
+
+static int nsp_write_lut(unsigned int cpu)
{
void __iomem *sku_rom_lut;
phys_addr_t secondary_startup_phy;
+ const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
- if (!secondary_boot_addr) {
- pr_warn("required secondary boot register not specified\n");
+ if (!secondary_boot_addr)
return -EINVAL;
- }
sku_rom_lut = ioremap_nocache((phys_addr_t)secondary_boot_addr,
- sizeof(secondary_boot_addr));
+ sizeof(phys_addr_t));
if (!sku_rom_lut) {
- pr_warn("unable to ioremap SKU-ROM LUT register\n");
+ pr_warn("unable to ioremap SKU-ROM LUT register for cpu %u\n", cpu);
return -ENOMEM;
}
@@ -113,70 +131,12 @@ static int nsp_write_lut(void)
static void __init bcm_smp_prepare_cpus(unsigned int max_cpus)
{
- static cpumask_t only_cpu_0 = { CPU_BITS_CPU0 };
- struct device_node *cpus_node = NULL;
- struct device_node *cpu_node = NULL;
- int ret;
-
- /*
- * This function is only called via smp_ops->smp_prepare_cpu().
- * That only happens if a "/cpus" device tree node exists
- * and has an "enable-method" property that selects the SMP
- * operations defined herein.
- */
- cpus_node = of_find_node_by_path("/cpus");
- if (!cpus_node)
- return;
-
- for_each_child_of_node(cpus_node, cpu_node) {
- u32 cpuid;
-
- if (of_node_cmp(cpu_node->type, "cpu"))
- continue;
-
- if (of_property_read_u32(cpu_node, "reg", &cpuid)) {
- pr_debug("%s: missing reg property\n",
- cpu_node->full_name);
- ret = -ENOENT;
- goto out;
- }
-
- /*
- * "secondary-boot-reg" property should be defined only
- * for secondary cpu
- */
- if ((cpuid & MPIDR_CPUID_BITMASK) == 1) {
- /*
- * Our secondary enable method requires a
- * "secondary-boot-reg" property to specify a register
- * address used to request the ROM code boot a secondary
- * core. If we have any trouble getting this we fall
- * back to uniprocessor mode.
- */
- if (of_property_read_u32(cpu_node,
- OF_SECONDARY_BOOT,
- &secondary_boot_addr)) {
- pr_warn("%s: no" OF_SECONDARY_BOOT "property\n",
- cpu_node->name);
- ret = -ENOENT;
- goto out;
- }
- }
- }
-
- /*
- * Enable the SCU on Cortex A9 based SoCs. If -ENOENT is
- * returned, the SoC reported a uniprocessor configuration.
- * We bail on any other error.
- */
- ret = scu_a9_enable();
-out:
- of_node_put(cpu_node);
- of_node_put(cpus_node);
+ const cpumask_t only_cpu_0 = { CPU_BITS_CPU0 };
- if (ret) {
+ /* Enable the SCU on Cortex A9 based SoCs */
+ if (scu_a9_enable()) {
/* Update the CPU present map to reflect uniprocessor mode */
- pr_warn("disabling SMP\n");
+ pr_warn("failed to enable A9 SCU - disabling SMP\n");
init_cpu_present(&only_cpu_0);
}
}
@@ -207,6 +167,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
u32 cpu_id;
u32 boot_val;
bool timeout = false;
+ const u32 secondary_boot_addr = secondary_boot_addr_for(cpu);
cpu_id = cpu_logical_map(cpu);
if (cpu_id & ~BOOT_ADDR_CPUID_MASK) {
@@ -214,13 +175,11 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
return -EINVAL;
}
- if (!secondary_boot_addr) {
- pr_err("required secondary boot register not specified\n");
+ if (!secondary_boot_addr)
return -EINVAL;
- }
- boot_reg = ioremap_nocache(
- (phys_addr_t)secondary_boot_addr, sizeof(u32));
+ boot_reg = ioremap_nocache((phys_addr_t)secondary_boot_addr,
+ sizeof(phys_addr_t));
if (!boot_reg) {
pr_err("unable to map boot register for cpu %u\n", cpu_id);
return -ENOMEM;
@@ -255,6 +214,57 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
return -ENXIO;
}
+/* Cluster Dormant Control command to bring CPU into a running state */
+#define CDC_CMD 6
+#define CDC_CMD_OFFSET 0
+#define CDC_CMD_REG(cpu) (CDC_CMD_OFFSET + 4*(cpu))
+
+/*
+ * BCM23550 has a Cluster Dormant Control block that keeps the core in
+ * idle state. A command needs to be sent to the block to bring the CPU
+ * into running state.
+ */
+static int bcm23550_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ void __iomem *cdc_base;
+ struct device_node *dn;
+ char *name;
+ int ret;
+
+ /* Make sure a CDC node exists before booting the
+ * secondary core.
+ */
+ name = "brcm,bcm23550-cdc";
+ dn = of_find_compatible_node(NULL, NULL, name);
+ if (!dn) {
+ pr_err("unable to find cdc node\n");
+ return -ENODEV;
+ }
+
+ cdc_base = of_iomap(dn, 0);
+ of_node_put(dn);
+
+ if (!cdc_base) {
+ pr_err("unable to remap cdc base register\n");
+ return -ENOMEM;
+ }
+
+ /* Boot the secondary core */
+ ret = kona_boot_secondary(cpu, idle);
+ if (ret)
+ goto out;
+
+ /* Bring this CPU to RUN state so that nIRQ nFIQ
+ * signals are unblocked.
+ */
+ writel_relaxed(CDC_CMD, cdc_base + CDC_CMD_REG(cpu));
+
+out:
+ iounmap(cdc_base);
+
+ return ret;
+}
+
static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -263,7 +273,7 @@ static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle)
* After wake up, secondary core branches to the startup
* address programmed at SKU ROM LUT location.
*/
- ret = nsp_write_lut();
+ ret = nsp_write_lut(cpu);
if (ret) {
pr_err("unable to write startup addr to SKU ROM LUT\n");
goto out;
@@ -276,12 +286,18 @@ out:
return ret;
}
-static const struct smp_operations bcm_smp_ops __initconst = {
+static const struct smp_operations kona_smp_ops __initconst = {
.smp_prepare_cpus = bcm_smp_prepare_cpus,
.smp_boot_secondary = kona_boot_secondary,
};
CPU_METHOD_OF_DECLARE(bcm_smp_bcm281xx, "brcm,bcm11351-cpu-method",
- &bcm_smp_ops);
+ &kona_smp_ops);
+
+static const struct smp_operations bcm23550_smp_ops __initconst = {
+ .smp_boot_secondary = bcm23550_boot_secondary,
+};
+CPU_METHOD_OF_DECLARE(bcm_smp_bcm23550, "brcm,bcm23550",
+ &bcm23550_smp_ops);
static const struct smp_operations nsp_smp_ops __initconst = {
.smp_prepare_cpus = bcm_smp_prepare_cpus,
diff --git a/arch/arm/mach-berlin/Kconfig b/arch/arm/mach-berlin/Kconfig
index ffbfa0bd091b..63ab1d368625 100644
--- a/arch/arm/mach-berlin/Kconfig
+++ b/arch/arm/mach-berlin/Kconfig
@@ -2,11 +2,11 @@ menuconfig ARCH_BERLIN
bool "Marvell Berlin SoCs"
depends on ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
select ARM_GIC
select DW_APB_ICTL
select DW_APB_TIMER_OF
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select MFD_SYSCON
select PINCTRL
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig
index f711498c180c..dc7c6edeab39 100644
--- a/arch/arm/mach-clps711x/Kconfig
+++ b/arch/arm/mach-clps711x/Kconfig
@@ -1,38 +1,15 @@
-if ARCH_CLPS711X
-
-menu "CLPS711X/EP721X/EP731X Implementations"
-
-config ARCH_AUTCPU12
- bool "AUTCPU12"
- help
- Say Y if you intend to run the kernel on the autronix autcpu12
- board. This board is based on a Cirrus Logic CS89712.
-
-config ARCH_CDB89712
- bool "CDB89712"
- help
- This is an evaluation board from Cirrus for the CS89712 processor.
- The board includes 2 serial ports, Ethernet, IRDA, and expansion
- headers. It comes with 16 MB SDRAM and 8 MB flash ROM.
-
-config ARCH_CLEP7312
- bool "CLEP7312"
- help
- Boards based on the Cirrus Logic 7212/7312 chips.
-
-config ARCH_EDB7211
- bool "EDB7211"
- select ARCH_HAS_HOLES_MEMORYMODEL
- help
- Say Y here if you intend to run this kernel on a Cirrus Logic EDB-7211
- evaluation board.
-
-config ARCH_P720T
- bool "P720T"
- help
- Say Y here if you intend to run this kernel on the ARM Prospector
- 720T.
-
-endmenu
-
-endif
+menuconfig ARCH_CLPS711X
+ bool "Cirrus Logic EP721x/EP731x-based"
+ depends on ARCH_MULTI_V4T
+ select ARCH_REQUIRE_GPIOLIB
+ select AUTO_ZRELADDR
+ select CLKSRC_OF
+ select CLPS711X_TIMER
+ select COMMON_CLK
+ select CPU_ARM720T
+ select GENERIC_CLOCKEVENTS
+ select MFD_SYSCON
+ select OF_IRQ
+ select USE_OF
+ help
+ Select this if you use ARMv4T Cirrus Logic chips.
diff --git a/arch/arm/mach-clps711x/Makefile b/arch/arm/mach-clps711x/Makefile
index f04151efd96a..bd0b7b5d6e9d 100644
--- a/arch/arm/mach-clps711x/Makefile
+++ b/arch/arm/mach-clps711x/Makefile
@@ -1,13 +1 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := common.o devices.o
-
-obj-$(CONFIG_ARCH_AUTCPU12) += board-autcpu12.o
-obj-$(CONFIG_ARCH_CDB89712) += board-cdb89712.o
-obj-$(CONFIG_ARCH_CLEP7312) += board-clep7312.o
-obj-$(CONFIG_ARCH_EDB7211) += board-edb7211.o
-obj-$(CONFIG_ARCH_P720T) += board-p720t.o
+obj-y += board-dt.o
diff --git a/arch/arm/mach-clps711x/Makefile.boot b/arch/arm/mach-clps711x/Makefile.boot
index eba77d35a615..e69de29bb2d1 100644
--- a/arch/arm/mach-clps711x/Makefile.boot
+++ b/arch/arm/mach-clps711x/Makefile.boot
@@ -1,5 +0,0 @@
-# The standard locations for stuff on CLPS711x type processors
-params_phys-y := 0xc0000100
-# Should probably have some agreement on these...
-initrd_phys-$(CONFIG_ARCH_P720T) := 0xc0400000
-initrd_phys-$(CONFIG_ARCH_CDB89712) := 0x00700000
diff --git a/arch/arm/mach-clps711x/board-dt.c b/arch/arm/mach-clps711x/board-dt.c
new file mode 100644
index 000000000000..ee1f83b1a332
--- /dev/null
+++ b/arch/arm/mach-clps711x/board-dt.c
@@ -0,0 +1,82 @@
+/*
+ * Author: Alexander Shiyan <shc_work@mail.ru>, 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/sizes.h>
+
+#include <linux/mfd/syscon/clps711x.h>
+
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#define CLPS711X_VIRT_BASE IOMEM(0xfeff4000)
+#define CLPS711X_PHYS_BASE (0x80000000)
+# define SYSFLG1 (0x0140)
+# define HALT (0x0800)
+# define UNIQID (0x2440)
+# define RANDID0 (0x2700)
+# define RANDID1 (0x2704)
+# define RANDID2 (0x2708)
+# define RANDID3 (0x270c)
+
+static struct map_desc clps711x_io_desc __initdata = {
+ .virtual = (unsigned long)CLPS711X_VIRT_BASE,
+ .pfn = __phys_to_pfn(CLPS711X_PHYS_BASE),
+ .length = 48 * SZ_1K,
+ .type = MT_DEVICE,
+};
+
+static void __init clps711x_map_io(void)
+{
+ iotable_init(&clps711x_io_desc, 1);
+}
+
+static const struct resource clps711x_cpuidle_res =
+ DEFINE_RES_MEM(CLPS711X_PHYS_BASE + HALT, SZ_128);
+
+static void __init clps711x_init(void)
+{
+ u32 id[5];
+
+ id[0] = readl(CLPS711X_VIRT_BASE + UNIQID);
+ id[1] = readl(CLPS711X_VIRT_BASE + RANDID0);
+ id[2] = readl(CLPS711X_VIRT_BASE + RANDID1);
+ id[3] = readl(CLPS711X_VIRT_BASE + RANDID2);
+ id[4] = readl(CLPS711X_VIRT_BASE + RANDID3);
+ system_rev = SYSFLG1_VERID(readl(CLPS711X_VIRT_BASE + SYSFLG1));
+
+ add_device_randomness(id, sizeof(id));
+
+ system_serial_low = id[0];
+
+ platform_device_register_simple("clps711x-cpuidle", PLATFORM_DEVID_NONE,
+ &clps711x_cpuidle_res, 1);
+}
+
+static void clps711x_restart(enum reboot_mode mode, const char *cmd)
+{
+ soft_restart(0);
+}
+
+static const char *clps711x_compat[] __initconst = {
+ "cirrus,ep7209",
+ NULL
+};
+
+DT_MACHINE_START(CLPS711X_DT, "Cirrus Logic CLPS711X (Device Tree Support)")
+ .dt_compat = clps711x_compat,
+ .map_io = clps711x_map_io,
+ .init_late = clps711x_init,
+ .restart = clps711x_restart,
+MACHINE_END
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
index 671acc5a3282..6466da8f3c11 100644
--- a/arch/arm/mach-clps711x/common.c
+++ b/arch/arm/mach-clps711x/common.c
@@ -37,8 +37,8 @@ static struct map_desc clps711x_io_desc[] __initdata = {
{
.virtual = (unsigned long)CLPS711X_VIRT_BASE,
.pfn = __phys_to_pfn(CLPS711X_PHYS_BASE),
- .length = SZ_64K,
- .type = MT_DEVICE
+ .length = 48 * SZ_1K,
+ .type = MT_DEVICE,
}
};
diff --git a/arch/arm/mach-clps711x/include/mach/clps711x.h b/arch/arm/mach-clps711x/include/mach/clps711x.h
deleted file mode 100644
index eb052a11aa9d..000000000000
--- a/arch/arm/mach-clps711x/include/mach/clps711x.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * This file contains the hardware definitions of the Cirrus Logic
- * ARM7 CLPS711X internal registers.
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __MACH_CLPS711X_H
-#define __MACH_CLPS711X_H
-
-#include <linux/mfd/syscon/clps711x.h>
-
-#define CLPS711X_PHYS_BASE (0x80000000)
-
-#define PADR (0x0000)
-#define PBDR (0x0001)
-#define PCDR (0x0002)
-#define PDDR (0x0003)
-#define PADDR (0x0040)
-#define PBDDR (0x0041)
-#define PCDDR (0x0042)
-#define PDDDR (0x0043)
-#define PEDR (0x0083)
-#define PEDDR (0x00c3)
-#define SYSCON1 (0x0100)
-#define SYSFLG1 (0x0140)
-#define MEMCFG1 (0x0180)
-#define MEMCFG2 (0x01c0)
-#define DRFPR (0x0200)
-#define LCDCON (0x02c0)
-#define TC1D (0x0300)
-#define TC2D (0x0340)
-#define RTCDR (0x0380)
-#define RTCMR (0x03c0)
-#define PMPCON (0x0400)
-#define CODR (0x0440)
-#define UARTDR1 (0x0480)
-#define UBRLCR1 (0x04c0)
-#define SYNCIO (0x0500)
-#define PALLSW (0x0540)
-#define PALMSW (0x0580)
-#define STFCLR (0x05c0)
-#define HALT (0x0800)
-#define STDBY (0x0840)
-
-#define FBADDR (0x1000)
-#define SYSCON2 (0x1100)
-#define SYSFLG2 (0x1140)
-#define UARTDR2 (0x1480)
-#define UBRLCR2 (0x14c0)
-#define SS2DR (0x1500)
-#define SS2POP (0x16c0)
-
-#define DAIR (0x2000)
-#define DAIDR0 (0x2040)
-#define DAIDR1 (0x2080)
-#define DAIDR2 (0x20c0)
-#define DAISR (0x2100)
-#define SYSCON3 (0x2200)
-#define LEDFLSH (0x22c0)
-#define SDCONF (0x2300)
-#define SDRFPR (0x2340)
-#define UNIQID (0x2440)
-#define DAI64FS (0x2600)
-#define PLLW (0x2610)
-#define PLLR (0xa5a8)
-#define RANDID0 (0x2700)
-#define RANDID1 (0x2704)
-#define RANDID2 (0x2708)
-#define RANDID3 (0x270c)
-
-#define LCDCON_GSEN (1 << 30)
-#define LCDCON_GSMD (1 << 31)
-
-/* common bits: UARTDR1 / UARTDR2 */
-#define UARTDR_FRMERR (1 << 8)
-#define UARTDR_PARERR (1 << 9)
-#define UARTDR_OVERR (1 << 10)
-
-/* common bits: UBRLCR1 / UBRLCR2 */
-#define UBRLCR_BAUD_MASK ((1 << 12) - 1)
-#define UBRLCR_BREAK (1 << 12)
-#define UBRLCR_PRTEN (1 << 13)
-#define UBRLCR_EVENPRT (1 << 14)
-#define UBRLCR_XSTOP (1 << 15)
-#define UBRLCR_FIFOEN (1 << 16)
-#define UBRLCR_WRDLEN5 (0 << 17)
-#define UBRLCR_WRDLEN6 (1 << 17)
-#define UBRLCR_WRDLEN7 (2 << 17)
-#define UBRLCR_WRDLEN8 (3 << 17)
-#define UBRLCR_WRDLEN_MASK (3 << 17)
-
-#define SYNCIO_FRMLEN(x) (((x) & 0x1f) << 8)
-#define SYNCIO_SMCKEN (1 << 13)
-#define SYNCIO_TXFRMEN (1 << 14)
-
-#define DAIR_RESERVED (0x0404)
-#define DAIR_DAIEN (1 << 16)
-#define DAIR_ECS (1 << 17)
-#define DAIR_LCTM (1 << 19)
-#define DAIR_LCRM (1 << 20)
-#define DAIR_RCTM (1 << 21)
-#define DAIR_RCRM (1 << 22)
-#define DAIR_LBM (1 << 23)
-
-#define DAIDR2_FIFOEN (1 << 15)
-#define DAIDR2_FIFOLEFT (0x0d << 16)
-#define DAIDR2_FIFORIGHT (0x11 << 16)
-
-#define DAISR_RCTS (1 << 0)
-#define DAISR_RCRS (1 << 1)
-#define DAISR_LCTS (1 << 2)
-#define DAISR_LCRS (1 << 3)
-#define DAISR_RCTU (1 << 4)
-#define DAISR_RCRO (1 << 5)
-#define DAISR_LCTU (1 << 6)
-#define DAISR_LCRO (1 << 7)
-#define DAISR_RCNF (1 << 8)
-#define DAISR_RCNE (1 << 9)
-#define DAISR_LCNF (1 << 10)
-#define DAISR_LCNE (1 << 11)
-#define DAISR_FIFO (1 << 12)
-
-#define DAI64FS_I2SF64 (1 << 0)
-#define DAI64FS_AUDIOCLKEN (1 << 1)
-#define DAI64FS_AUDIOCLKSRC (1 << 2)
-#define DAI64FS_MCLK256EN (1 << 3)
-#define DAI64FS_LOOPBACK (1 << 5)
-
-#define SDCONF_ACTIVE (1 << 10)
-#define SDCONF_CLKCTL (1 << 9)
-#define SDCONF_WIDTH_4 (0 << 7)
-#define SDCONF_WIDTH_8 (1 << 7)
-#define SDCONF_WIDTH_16 (2 << 7)
-#define SDCONF_WIDTH_32 (3 << 7)
-#define SDCONF_SIZE_16 (0 << 5)
-#define SDCONF_SIZE_64 (1 << 5)
-#define SDCONF_SIZE_128 (2 << 5)
-#define SDCONF_SIZE_256 (3 << 5)
-#define SDCONF_CASLAT_2 (2)
-#define SDCONF_CASLAT_3 (3)
-
-#define MEMCFG_BUS_WIDTH_32 (1)
-#define MEMCFG_BUS_WIDTH_16 (0)
-#define MEMCFG_BUS_WIDTH_8 (3)
-
-#define MEMCFG_SQAEN (1 << 6)
-#define MEMCFG_CLKENB (1 << 7)
-
-#define MEMCFG_WAITSTATE_8_3 (0 << 2)
-#define MEMCFG_WAITSTATE_7_3 (1 << 2)
-#define MEMCFG_WAITSTATE_6_3 (2 << 2)
-#define MEMCFG_WAITSTATE_5_3 (3 << 2)
-#define MEMCFG_WAITSTATE_4_2 (4 << 2)
-#define MEMCFG_WAITSTATE_3_2 (5 << 2)
-#define MEMCFG_WAITSTATE_2_2 (6 << 2)
-#define MEMCFG_WAITSTATE_1_2 (7 << 2)
-#define MEMCFG_WAITSTATE_8_1 (8 << 2)
-#define MEMCFG_WAITSTATE_7_1 (9 << 2)
-#define MEMCFG_WAITSTATE_6_1 (10 << 2)
-#define MEMCFG_WAITSTATE_5_1 (11 << 2)
-#define MEMCFG_WAITSTATE_4_0 (12 << 2)
-#define MEMCFG_WAITSTATE_3_0 (13 << 2)
-#define MEMCFG_WAITSTATE_2_0 (14 << 2)
-#define MEMCFG_WAITSTATE_1_0 (15 << 2)
-
-/* INTSR1 Interrupts */
-#define IRQ_CSINT (4)
-#define IRQ_EINT1 (5)
-#define IRQ_EINT2 (6)
-#define IRQ_EINT3 (7)
-#define IRQ_TC1OI (8)
-#define IRQ_TC2OI (9)
-#define IRQ_RTCMI (10)
-#define IRQ_TINT (11)
-#define IRQ_UTXINT1 (12)
-#define IRQ_URXINT1 (13)
-#define IRQ_UMSINT (14)
-#define IRQ_SSEOTI (15)
-
-/* INTSR2 Interrupts */
-#define IRQ_KBDINT (16 + 0)
-#define IRQ_SS2RX (16 + 1)
-#define IRQ_SS2TX (16 + 2)
-#define IRQ_UTXINT2 (16 + 12)
-#define IRQ_URXINT2 (16 + 13)
-
-/* INTSR3 Interrupts */
-#define IRQ_DAIINT (32 + 0)
-
-#endif /* __MACH_CLPS711X_H */
diff --git a/arch/arm/mach-clps711x/include/mach/hardware.h b/arch/arm/mach-clps711x/include/mach/hardware.h
deleted file mode 100644
index 833129c9f798..000000000000
--- a/arch/arm/mach-clps711x/include/mach/hardware.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * arch/arm/mach-clps711x/include/mach/hardware.h
- *
- * This file contains the hardware definitions of the Prospector P720T.
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __MACH_HARDWARE_H
-#define __MACH_HARDWARE_H
-
-#include <mach/clps711x.h>
-
-#define CLPS711X_VIRT_BASE IOMEM(0xfeff0000)
-
-#ifndef __ASSEMBLY__
-#define clps_readb(off) readb(CLPS711X_VIRT_BASE + (off))
-#define clps_readw(off) readw(CLPS711X_VIRT_BASE + (off))
-#define clps_readl(off) readl(CLPS711X_VIRT_BASE + (off))
-#define clps_writeb(val,off) writeb(val, CLPS711X_VIRT_BASE + (off))
-#define clps_writew(val,off) writew(val, CLPS711X_VIRT_BASE + (off))
-#define clps_writel(val,off) writel(val, CLPS711X_VIRT_BASE + (off))
-#endif
-
-#define CS0_PHYS_BASE (0x00000000)
-#define CS1_PHYS_BASE (0x10000000)
-#define CS2_PHYS_BASE (0x20000000)
-#define CS3_PHYS_BASE (0x30000000)
-#define CS4_PHYS_BASE (0x40000000)
-#define CS5_PHYS_BASE (0x50000000)
-#define CS6_PHYS_BASE (0x60000000)
-#define CS7_PHYS_BASE (0x70000000)
-
-#define CLPS711X_SRAM_BASE CS6_PHYS_BASE
-#define CLPS711X_SRAM_SIZE (48 * 1024)
-
-#define CLPS711X_SDRAM0_BASE (0xc0000000)
-#define CLPS711X_SDRAM1_BASE (0xd0000000)
-
-#endif
diff --git a/arch/arm/mach-clps711x/include/mach/uncompress.h b/arch/arm/mach-clps711x/include/mach/uncompress.h
deleted file mode 100644
index 5f02d06dc655..000000000000
--- a/arch/arm/mach-clps711x/include/mach/uncompress.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-clps711x/include/mach/uncompress.h
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <mach/clps711x.h>
-
-#ifdef CONFIG_DEBUG_CLPS711X_UART2
-#define SYSFLGx SYSFLG2
-#define UARTDRx UARTDR2
-#else
-#define SYSFLGx SYSFLG1
-#define UARTDRx UARTDR1
-#endif
-
-#define phys_reg(x) (*(volatile u32 *)(CLPS711X_PHYS_BASE + (x)))
-
-/*
- * The following code assumes the serial port has already been
- * initialized by the bootloader. If you didn't setup a port in
- * your bootloader then nothing will appear (which might be desired).
- *
- * This does not append a newline
- */
-static inline void putc(int c)
-{
- while (phys_reg(SYSFLGx) & SYSFLG_UTXFF)
- barrier();
- phys_reg(UARTDRx) = c;
-}
-
-static inline void flush(void)
-{
- while (phys_reg(SYSFLGx) & SYSFLG_UBUSY)
- barrier();
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 1844076f6403..18296a99c4d2 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -115,8 +115,6 @@ static struct davinci_i2c_platform_data i2c_pdata = {
.scl_pin = 14,
};
-static struct snd_platform_data dm355_evm_snd_data;
-
static int dm355evm_mmc_gpios = -EINVAL;
static void dm355evm_mmcsd_gpios(unsigned gpio)
@@ -411,7 +409,7 @@ static __init void dm355_evm_init(void)
ARRAY_SIZE(dm355_evm_spi_info));
/* DM335 EVM uses ASP1; line-out is a stereo mini-jack */
- dm355_init_asp1(ASP1_TX_EVT_EN | ASP1_RX_EVT_EN, &dm355_evm_snd_data);
+ dm355_init_asp1(ASP1_TX_EVT_EN | ASP1_RX_EVT_EN);
}
MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index f073518f621a..0464999b7137 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -176,10 +176,6 @@ static struct at24_platform_data eeprom_info = {
.context = (void *)0x7f00,
};
-static struct snd_platform_data dm365_evm_snd_data __maybe_unused = {
- .asp_chan_q = EVENTQ_3,
-};
-
static struct i2c_board_info i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
@@ -763,9 +759,9 @@ static __init void dm365_evm_init(void)
evm_init_cpld();
#ifdef CONFIG_SND_DM365_AIC3X_CODEC
- dm365_init_asp(&dm365_evm_snd_data);
+ dm365_init_asp();
#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
- dm365_init_vc(&dm365_evm_snd_data);
+ dm365_init_vc();
#endif
dm365_init_rtc();
dm365_init_ks(&dm365evm_ks_data);
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index ab47b8eb1b15..521e40977265 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -264,8 +264,6 @@ static struct platform_device rtc_dev = {
.id = -1,
};
-static struct snd_platform_data dm644x_evm_snd_data;
-
/*----------------------------------------------------------------------*/
#ifdef CONFIG_I2C
/*
@@ -799,7 +797,7 @@ static __init void davinci_evm_init(void)
dm644x_init_video(&dm644xevm_capture_cfg, &dm644xevm_display_cfg);
davinci_serial_init(dm644x_serial_device);
- dm644x_init_asp(&dm644x_evm_snd_data);
+ dm644x_init_asp();
/* irlml6401 switches over 1A, in under 8 msec */
davinci_setup_usb(1000, 8);
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 8fcdcf87c47c..ad10017203c1 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -127,8 +127,6 @@ static struct platform_device davinci_fb_device = {
.num_resources = 0,
};
-static struct snd_platform_data dm644x_ntosd2_snd_data;
-
static struct gpio_led ntosd2_leds[] = {
{ .name = "led1_green", .gpio = GPIO(10), },
{ .name = "led1_red", .gpio = GPIO(11), },
@@ -200,7 +198,7 @@ static __init void davinci_ntosd2_init(void)
ARRAY_SIZE(davinci_ntosd2_devices));
davinci_serial_init(dm644x_serial_device);
- dm644x_init_asp(&dm644x_ntosd2_snd_data);
+ dm644x_init_asp();
soc_info->emac_pdata->phy_id = NEUROS_OSD2_PHY_ID;
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 239886299968..0d046accb35b 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -326,6 +326,20 @@ static struct clk mcasp_clk = {
.gpsc = 1,
};
+static struct clk mcbsp0_clk = {
+ .name = "mcbsp0",
+ .parent = &async3_clk,
+ .lpsc = DA850_LPSC1_McBSP0,
+ .gpsc = 1,
+};
+
+static struct clk mcbsp1_clk = {
+ .name = "mcbsp1",
+ .parent = &async3_clk,
+ .lpsc = DA850_LPSC1_McBSP1,
+ .gpsc = 1,
+};
+
static struct clk lcdc_clk = {
.name = "lcdc",
.parent = &pll0_sysclk2,
@@ -482,6 +496,8 @@ static struct clk_lookup da850_clks[] = {
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci_mdio.0", "fck", &emac_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
+ CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk),
+ CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk),
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h
index 4ffc37accce0..c62b90c6118a 100644
--- a/arch/arm/mach-davinci/davinci.h
+++ b/arch/arm/mach-davinci/davinci.h
@@ -85,14 +85,14 @@ int davinci_init_wdt(void);
void dm355_init(void);
void dm355_init_spi0(unsigned chipselect_mask,
const struct spi_board_info *info, unsigned len);
-void dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata);
+void dm355_init_asp1(u32 evt_enable);
int dm355_init_video(struct vpfe_config *, struct vpbe_config *);
int dm355_gpio_register(void);
/* DM365 function declarations */
void dm365_init(void);
-void dm365_init_asp(struct snd_platform_data *pdata);
-void dm365_init_vc(struct snd_platform_data *pdata);
+void dm365_init_asp(void);
+void dm365_init_vc(void);
void dm365_init_ks(struct davinci_ks_platform_data *pdata);
void dm365_init_rtc(void);
void dm365_init_spi0(unsigned chipselect_mask,
@@ -102,7 +102,7 @@ int dm365_gpio_register(void);
/* DM644x function declarations */
void dm644x_init(void);
-void dm644x_init_asp(struct snd_platform_data *pdata);
+void dm644x_init_asp(void);
int dm644x_init_video(struct vpfe_config *, struct vpbe_config *);
int dm644x_gpio_register(void);
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 5a19cca7ed6a..d33322ddedab 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -1035,7 +1035,7 @@ static struct davinci_soc_info davinci_soc_info_dm355 = {
.sram_len = SZ_32K,
};
-void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
+void __init dm355_init_asp1(u32 evt_enable)
{
/* we don't use ASP1 IRQs, or we'd need to mux them ... */
if (evt_enable & ASP1_TX_EVT_EN)
@@ -1044,7 +1044,6 @@ void __init dm355_init_asp1(u32 evt_enable, struct snd_platform_data *pdata)
if (evt_enable & ASP1_RX_EVT_EN)
davinci_cfg_reg(DM355_EVT9_ASP1_RX);
- dm355_asp1_device.dev.platform_data = pdata;
platform_device_register(&dm355_asp1_device);
}
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8aa004b02fe9..ef3add999263 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1138,7 +1138,7 @@ static struct davinci_soc_info davinci_soc_info_dm365 = {
.sram_len = SZ_32K,
};
-void __init dm365_init_asp(struct snd_platform_data *pdata)
+void __init dm365_init_asp(void)
{
davinci_cfg_reg(DM365_MCBSP0_BDX);
davinci_cfg_reg(DM365_MCBSP0_X);
@@ -1148,15 +1148,13 @@ void __init dm365_init_asp(struct snd_platform_data *pdata)
davinci_cfg_reg(DM365_MCBSP0_BFSR);
davinci_cfg_reg(DM365_EVT2_ASP_TX);
davinci_cfg_reg(DM365_EVT3_ASP_RX);
- dm365_asp_device.dev.platform_data = pdata;
platform_device_register(&dm365_asp_device);
}
-void __init dm365_init_vc(struct snd_platform_data *pdata)
+void __init dm365_init_vc(void)
{
davinci_cfg_reg(DM365_EVT2_VC_TX);
davinci_cfg_reg(DM365_EVT3_VC_RX);
- dm365_vc_device.dev.platform_data = pdata;
platform_device_register(&dm365_vc_device);
}
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0afa279ec460..b437c3730f65 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -921,10 +921,9 @@ static struct davinci_soc_info davinci_soc_info_dm644x = {
.sram_len = SZ_16K,
};
-void __init dm644x_init_asp(struct snd_platform_data *pdata)
+void __init dm644x_init_asp(void)
{
davinci_cfg_reg(DM644X_MCBSP);
- dm644x_asp_device.dev.platform_data = pdata;
platform_device_register(&dm644x_asp_device);
}
diff --git a/arch/arm/mach-davinci/psc.h b/arch/arm/mach-davinci/psc.h
index 99d47cfa301f..8af9f09fc10c 100644
--- a/arch/arm/mach-davinci/psc.h
+++ b/arch/arm/mach-davinci/psc.h
@@ -171,6 +171,8 @@
#define DA8XX_LPSC1_I2C 11
#define DA8XX_LPSC1_UART1 12
#define DA8XX_LPSC1_UART2 13
+#define DA850_LPSC1_McBSP0 14
+#define DA850_LPSC1_McBSP1 15
#define DA8XX_LPSC1_LCDC 16
#define DA8XX_LPSC1_PWM 17
#define DA850_LPSC1_MMC_SD1 18
diff --git a/arch/arm/mach-digicolor/Kconfig b/arch/arm/mach-digicolor/Kconfig
index fc65b0f1db48..9d05c6c4181d 100644
--- a/arch/arm/mach-digicolor/Kconfig
+++ b/arch/arm/mach-digicolor/Kconfig
@@ -1,10 +1,10 @@
config ARCH_DIGICOLOR
bool "Conexant Digicolor SoC Support"
depends on ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select DIGICOLOR_TIMER
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select MFD_SYSCON
select PINCTRL
select PINCTRL_DIGICOLOR
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 45b81a2bcd4b..3b39ea353d30 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/m48t86.h>
+#include <linux/platform_data/rtc-m48t86.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 20dcf6e904b2..8f820de890b4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -12,13 +12,14 @@ menuconfig ARCH_EXYNOS
depends on ARCH_MULTI_V7
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_GIC
select COMMON_CLK_SAMSUNG
select EXYNOS_THERMAL
select EXYNOS_PMU
select EXYNOS_SROM
+ select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
+ select GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 34d29df3e006..9ea6c54645ad 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_ARCH_EXYNOS) += exynos.o exynos-smc.o firmware.o
obj-$(CONFIG_EXYNOS_CPU_SUSPEND) += pm.o sleep.o
obj-$(CONFIG_PM_SLEEP) += suspend.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
@@ -23,5 +22,3 @@ AFLAGS_sleep.o :=-Wa,-march=armv7-a$(plus_sec)
obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
CFLAGS_mcpm-exynos.o += -march=armv7-a
-
-obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 5365bf1f586a..9424a8a9f308 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -166,7 +166,6 @@ extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
extern void exynos_set_delayed_reset_assertion(bool enable);
-extern void s5p_init_cpu(void __iomem *cpuid_addr);
extern unsigned int samsung_rev(void);
extern void exynos_core_restart(u32 core_id);
extern int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr);
@@ -174,12 +173,12 @@ extern int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr);
static inline void pmu_raw_writel(u32 val, u32 offset)
{
- __raw_writel(val, pmu_base_addr + offset);
+ writel_relaxed(val, pmu_base_addr + offset);
}
static inline u32 pmu_raw_readl(u32 offset)
{
- return __raw_readl(pmu_base_addr + offset);
+ return readl_relaxed(pmu_base_addr + offset);
}
#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index dea410adee7e..acabf0bffc5d 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -24,9 +24,9 @@
#include <asm/mach/map.h>
#include <mach/map.h>
+#include <plat/cpu.h>
#include "common.h"
-#include "mfc.h"
static struct map_desc exynos4_iodesc[] __initdata = {
{
@@ -234,23 +234,6 @@ static char const *const exynos_dt_compat[] __initconst = {
NULL
};
-static void __init exynos_reserve(void)
-{
-#ifdef CONFIG_S5P_DEV_MFC
- int i;
- char *mfc_mem[] = {
- "samsung,mfc-v5",
- "samsung,mfc-v6",
- "samsung,mfc-v7",
- "samsung,mfc-v8",
- };
-
- for (i = 0; i < ARRAY_SIZE(mfc_mem); i++)
- if (of_scan_flat_dt(s5p_fdt_alloc_mfc_mem, mfc_mem[i]))
- break;
-#endif
-}
-
static void __init exynos_dt_fixup(void)
{
/*
@@ -272,6 +255,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
.init_machine = exynos_dt_machine_init,
.init_late = exynos_init_late,
.dt_compat = exynos_dt_compat,
- .reserve = exynos_reserve,
.dt_fixup = exynos_dt_fixup,
MACHINE_END
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 1bfd1b0bd9dc..fd6da5419b51 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -41,9 +41,9 @@ static int exynos_do_idle(unsigned long mode)
case FW_DO_IDLE_AFTR:
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_save_cp15();
- __raw_writel(virt_to_phys(exynos_cpu_resume_ns),
- sysram_ns_base_addr + 0x24);
- __raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
+ writel_relaxed(virt_to_phys(exynos_cpu_resume_ns),
+ sysram_ns_base_addr + 0x24);
+ writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
if (soc_is_exynos3250()) {
flush_cache_all();
exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
@@ -97,7 +97,7 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
if (soc_is_exynos4412())
boot_reg += 4 * cpu;
- __raw_writel(boot_addr, boot_reg);
+ writel_relaxed(boot_addr, boot_reg);
return 0;
}
@@ -113,7 +113,7 @@ static int exynos_get_cpu_boot_addr(int cpu, unsigned long *boot_addr)
if (soc_is_exynos4412())
boot_reg += 4 * cpu;
- *boot_addr = __raw_readl(boot_reg);
+ *boot_addr = readl_relaxed(boot_reg);
return 0;
}
@@ -234,20 +234,20 @@ void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
{
unsigned int tmp;
- tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+ tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
if (mode & BOOT_MODE_MASK)
tmp &= ~BOOT_MODE_MASK;
tmp |= mode;
- __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+ writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
}
void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
{
unsigned int tmp;
- tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+ tmp = readl_relaxed(REG_CPU_STATE_ADDR + cpu * 4);
tmp &= ~mode;
- __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+ writel_relaxed(tmp, REG_CPU_STATE_ADDR + cpu * 4);
}
diff --git a/arch/arm/mach-exynos/headsmp.S b/arch/arm/mach-exynos/headsmp.S
index b54f9701e421..d3d24ab351ae 100644
--- a/arch/arm/mach-exynos/headsmp.S
+++ b/arch/arm/mach-exynos/headsmp.S
@@ -12,12 +12,15 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
+
/*
* exynos4 specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
* ready for them to initialise.
*/
ENTRY(exynos4_secondary_startup)
+ARM_BE8(setend be)
mrc p15, 0, r0, c0, c0, 5
and r0, r0, #15
adr r4, 1f
diff --git a/arch/arm/mach-exynos/mfc.h b/arch/arm/mach-exynos/mfc.h
deleted file mode 100644
index dec93cd5b3c6..000000000000
--- a/arch/arm/mach-exynos/mfc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (C) 2013 Samsung Electronics Co.Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __MACH_EXYNOS_MFC_H
-#define __MACH_EXYNOS_MFC_H __FILE__
-
-int __init s5p_fdt_alloc_mfc_mem(unsigned long node, const char *uname,
- int depth, void *data);
-
-#endif /* __MACH_EXYNOS_MFC_H */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 85c3be63d644..98ffe1e62ad5 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -264,7 +264,7 @@ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
ret = PTR_ERR(boot_reg);
goto fail;
}
- __raw_writel(boot_addr, boot_reg);
+ writel_relaxed(boot_addr, boot_reg);
ret = 0;
}
fail:
@@ -289,7 +289,7 @@ int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr)
ret = PTR_ERR(boot_reg);
goto fail;
}
- *boot_addr = __raw_readl(boot_reg);
+ *boot_addr = readl_relaxed(boot_reg);
ret = 0;
}
fail:
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index c43b776a51a3..487295f4a56b 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -132,9 +132,9 @@ static void exynos_set_wakeupmask(long mask)
static void exynos_cpu_set_boot_vector(long flags)
{
- __raw_writel(virt_to_phys(exynos_cpu_resume),
- exynos_boot_vector_addr());
- __raw_writel(flags, exynos_boot_vector_flag());
+ writel_relaxed(virt_to_phys(exynos_cpu_resume),
+ exynos_boot_vector_addr());
+ writel_relaxed(flags, exynos_boot_vector_flag());
}
static int exynos_aftr_finisher(unsigned long flags)
diff --git a/arch/arm/mach-exynos/s5p-dev-mfc.c b/arch/arm/mach-exynos/s5p-dev-mfc.c
deleted file mode 100644
index 8ef1f3ee4e98..000000000000
--- a/arch/arm/mach-exynos/s5p-dev-mfc.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
- *
- * Base S5P MFC resource and device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/memblock.h>
-#include <linux/ioport.h>
-#include <linux/of_fdt.h>
-#include <linux/of.h>
-
-static struct platform_device s5p_device_mfc_l;
-static struct platform_device s5p_device_mfc_r;
-
-struct s5p_mfc_dt_meminfo {
- unsigned long loff;
- unsigned long lsize;
- unsigned long roff;
- unsigned long rsize;
- char *compatible;
-};
-
-struct s5p_mfc_reserved_mem {
- phys_addr_t base;
- unsigned long size;
- struct device *dev;
-};
-
-static struct s5p_mfc_reserved_mem s5p_mfc_mem[2] __initdata;
-
-
-static void __init s5p_mfc_reserve_mem(phys_addr_t rbase, unsigned int rsize,
- phys_addr_t lbase, unsigned int lsize)
-{
- int i;
-
- s5p_mfc_mem[0].dev = &s5p_device_mfc_r.dev;
- s5p_mfc_mem[0].base = rbase;
- s5p_mfc_mem[0].size = rsize;
-
- s5p_mfc_mem[1].dev = &s5p_device_mfc_l.dev;
- s5p_mfc_mem[1].base = lbase;
- s5p_mfc_mem[1].size = lsize;
-
- for (i = 0; i < ARRAY_SIZE(s5p_mfc_mem); i++) {
- struct s5p_mfc_reserved_mem *area = &s5p_mfc_mem[i];
- if (memblock_remove(area->base, area->size)) {
- printk(KERN_ERR "Failed to reserve memory for MFC device (%ld bytes at 0x%08lx)\n",
- area->size, (unsigned long) area->base);
- area->base = 0;
- }
- }
-}
-
-int __init s5p_fdt_alloc_mfc_mem(unsigned long node, const char *uname,
- int depth, void *data)
-{
- const __be32 *prop;
- int len;
- struct s5p_mfc_dt_meminfo mfc_mem;
-
- if (!data)
- return 0;
-
- if (!of_flat_dt_is_compatible(node, data))
- return 0;
-
- prop = of_get_flat_dt_prop(node, "samsung,mfc-l", &len);
- if (!prop || (len != 2 * sizeof(unsigned long)))
- return 0;
-
- mfc_mem.loff = be32_to_cpu(prop[0]);
- mfc_mem.lsize = be32_to_cpu(prop[1]);
-
- prop = of_get_flat_dt_prop(node, "samsung,mfc-r", &len);
- if (!prop || (len != 2 * sizeof(unsigned long)))
- return 0;
-
- mfc_mem.roff = be32_to_cpu(prop[0]);
- mfc_mem.rsize = be32_to_cpu(prop[1]);
-
- s5p_mfc_reserve_mem(mfc_mem.roff, mfc_mem.rsize,
- mfc_mem.loff, mfc_mem.lsize);
-
- return 1;
-}
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index f21690937b7d..3750575c73c5 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -301,7 +301,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- __raw_writel(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);
+ writel_relaxed(0x0, sysram_base_addr + EXYNOS5420_CPU_STATE);
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
@@ -373,8 +373,8 @@ static void exynos5420_pm_prepare(void)
* needs to restore it back in case, the primary cpu fails to
* suspend for any reason.
*/
- exynos5420_cpu_state = __raw_readl(sysram_base_addr +
- EXYNOS5420_CPU_STATE);
+ exynos5420_cpu_state = readl_relaxed(sysram_base_addr +
+ EXYNOS5420_CPU_STATE);
exynos_pm_enter_sleep_mode();
@@ -504,11 +504,11 @@ static void exynos5420_pm_resume(void)
/* Restore the CPU0 low power state register */
tmp = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
pmu_raw_writel(tmp | S5P_CORE_LOCAL_PWR_EN,
- EXYNOS5_ARM_CORE0_SYS_PWR_REG);
+ EXYNOS5_ARM_CORE0_SYS_PWR_REG);
/* Restore the sysram cpu state register */
- __raw_writel(exynos5420_cpu_state,
- sysram_base_addr + EXYNOS5420_CPU_STATE);
+ writel_relaxed(exynos5420_cpu_state,
+ sysram_base_addr + EXYNOS5420_CPU_STATE);
pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL,
S5P_CENTRAL_SEQ_OPTION);
diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c
index 8cc62150116a..c08c44ec5175 100644
--- a/arch/arm/mach-hisi/hisilicon.c
+++ b/arch/arm/mach-hisi/hisilicon.c
@@ -53,31 +53,3 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)")
.map_io = hi3620_map_io,
.dt_compat = hi3xxx_compat,
MACHINE_END
-
-static const char *const hix5hd2_compat[] __initconst = {
- "hisilicon,hix5hd2",
- NULL,
-};
-
-DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)")
- .dt_compat = hix5hd2_compat,
-MACHINE_END
-
-static const char *const hip04_compat[] __initconst = {
- "hisilicon,hip04-d01",
- NULL,
-};
-
-DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)")
- .dt_compat = hip04_compat,
-MACHINE_END
-
-static const char *const hip01_compat[] __initconst = {
- "hisilicon,hip01",
- "hisilicon,hip01-ca9x2",
- NULL,
-};
-
-DT_MACHINE_START(HIP01, "Hisilicon HIP01 (Flattened Device Tree)")
- .dt_compat = hip01_compat,
-MACHINE_END
diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c
index 47ed32cf57cc..e1d67648d5d0 100644
--- a/arch/arm/mach-hisi/platsmp.c
+++ b/arch/arm/mach-hisi/platsmp.c
@@ -103,7 +103,7 @@ static void __init hisi_common_smp_prepare_cpus(unsigned int max_cpus)
hisi_enable_scu_a9();
}
-void hix5hd2_set_scu_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
+static void hix5hd2_set_scu_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
{
void __iomem *virt;
@@ -139,7 +139,7 @@ static const struct smp_operations hix5hd2_smp_ops __initconst = {
#define HIP01_BOOT_ADDRESS 0x80000000
#define REG_SC_CTRL 0x000
-void hip01_set_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
+static void hip01_set_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr)
{
void __iomem *virt;
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index dd905b9602a0..ee9a318cab31 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,10 +1,10 @@
menuconfig ARCH_MXC
bool "Freescale i.MX family"
depends on ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 || ARM_SINGLE_ARMV7M
- select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_BIG_ENDIAN
select CLKSRC_IMX_GPT
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select PINCTRL
select PM_OPP if PM
select SOC_BUS
@@ -44,9 +44,6 @@ config MXC_USE_EPIT
uses the same clocks as the GPT. Anyway, on some systems the GPT
may be in use for other purposes.
-config ARCH_HAS_RNGA
- bool
-
config HAVE_IMX_ANATOP
bool
@@ -90,7 +87,6 @@ config SOC_IMX27
config SOC_IMX31
bool
select CPU_V6
- select IMX_HAVE_PLATFORM_MXC_RNGA
select MXC_AVIC
select SMP_ON_UP if SMP
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 9fbe624a5ef9..9f5fffd62702 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -8,8 +8,8 @@ obj-$(CONFIG_SOC_IMX25) += cpu-imx25.o mach-imx25.o pm-imx25.o
obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
obj-$(CONFIG_SOC_IMX27) += mm-imx27.o ehci-imx27.o
-obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o ehci-imx35.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o iomux-imx31.o ehci-imx31.o
+obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o ehci-imx35.o
imx5-pm-$(CONFIG_PM) += pm-imx5.o
obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o $(imx5-pm-y)
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c
index 7fa176e792bd..1afccae0420c 100644
--- a/arch/arm/mach-imx/avic.c
+++ b/arch/arm/mach-imx/avic.c
@@ -55,23 +55,20 @@ static void __iomem *avic_base;
static struct irq_domain *domain;
#ifdef CONFIG_FIQ
-static int avic_set_irq_fiq(unsigned int irq, unsigned int type)
+static int avic_set_irq_fiq(unsigned int hwirq, unsigned int type)
{
- struct irq_data *d = irq_get_irq_data(irq);
unsigned int irqt;
- irq = d->hwirq;
-
- if (irq >= AVIC_NUM_IRQS)
+ if (hwirq >= AVIC_NUM_IRQS)
return -EINVAL;
- if (irq < AVIC_NUM_IRQS / 2) {
- irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
- imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
+ if (hwirq < AVIC_NUM_IRQS / 2) {
+ irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << hwirq);
+ imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEL);
} else {
- irq -= AVIC_NUM_IRQS / 2;
- irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
- imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
+ hwirq -= AVIC_NUM_IRQS / 2;
+ irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << hwirq);
+ imx_writel(irqt | (!!type << hwirq), avic_base + AVIC_INTTYPEH);
}
return 0;
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 58a38464480d..a8f469333027 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -32,7 +32,6 @@ void imx27_init_early(void);
void imx31_init_early(void);
void imx35_init_early(void);
void mxc_init_irq(void __iomem *);
-void tzic_init_irq(void);
void mx1_init_irq(void);
void mx21_init_irq(void);
void mx27_init_irq(void);
@@ -55,6 +54,7 @@ struct platform_device *mxc_register_gpio(char *name, int id,
void mxc_set_cpu_type(unsigned int type);
void mxc_restart(enum reboot_mode, const char *);
void mxc_arch_reset_init(void __iomem *);
+void imx1_reset_init(void __iomem *);
void imx_set_aips(void __iomem *);
void imx_aips_allow_unprivileged_access(const char *compat);
int mxc_device_init(void);
@@ -67,6 +67,7 @@ void imx_gpc_set_arm_power_in_lpm(bool power_off);
void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw);
void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw);
void imx25_pm_init(void);
+void imx27_pm_init(void);
enum mxc_cpu_pwr_mode {
WAIT_CLOCKED, /* wfi only */
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index 3403bac94a31..4f2d1c772f85 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -60,13 +60,9 @@ static int get_mx51_srev(void)
/*
* Returns:
* the silicon revision of the cpu
- * -EINVAL - not a mx51
*/
int mx51_revision(void)
{
- if (!cpu_is_mx51())
- return -EINVAL;
-
if (mx5_cpu_rev == -1)
mx5_cpu_rev = get_mx51_srev();
@@ -112,13 +108,9 @@ static int get_mx53_srev(void)
/*
* Returns:
* the silicon revision of the cpu
- * -EINVAL - not a mx53
*/
int mx53_revision(void)
{
- if (!cpu_is_mx53())
- return -EINVAL;
-
if (mx5_cpu_rev == -1)
mx5_cpu_rev = get_mx53_srev();
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 6a96b7cf468f..b3347d32349f 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -10,8 +10,6 @@
#include "common.h"
unsigned int __mxc_cpu_type;
-EXPORT_SYMBOL(__mxc_cpu_type);
-
static unsigned int imx_soc_revision;
void mxc_set_cpu_type(unsigned int type)
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 353bb8774112..db0f48c4b17e 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -10,6 +10,8 @@
#include <linux/module.h>
#include <asm/cpuidle.h>
+#include <soc/imx/cpuidle.h>
+
#include "common.h"
#include "cpuidle.h"
#include "hardware.h"
@@ -62,6 +64,24 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
.safe_state_index = 0,
};
+/*
+ * i.MX6 Q/DL has an erratum (ERR006687) that prevents the FEC from waking the
+ * CPUs when they are in wait(unclocked) state. As the hardware workaround isn't
+ * applicable to all boards, disable the deeper idle state when the workaround
+ * isn't present and the FEC is in use.
+ */
+void imx6q_cpuidle_fec_irqs_used(void)
+{
+ imx6q_cpuidle_driver.states[1].disabled = true;
+}
+EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_used);
+
+void imx6q_cpuidle_fec_irqs_unused(void)
+{
+ imx6q_cpuidle_driver.states[1].disabled = false;
+}
+EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused);
+
int __init imx6q_cpuidle_init(void)
{
/* Set INT_MEM_CLK_LPM bit to get a reliable WAIT mode support */
diff --git a/arch/arm/mach-imx/devices/Kconfig b/arch/arm/mach-imx/devices/Kconfig
index 3a552989248e..6ffe57267233 100644
--- a/arch/arm/mach-imx/devices/Kconfig
+++ b/arch/arm/mach-imx/devices/Kconfig
@@ -57,10 +57,6 @@ config IMX_HAVE_PLATFORM_MXC_MMC
config IMX_HAVE_PLATFORM_MXC_NAND
bool
-config IMX_HAVE_PLATFORM_MXC_RNGA
- bool
- select ARCH_HAS_RNGA
-
config IMX_HAVE_PLATFORM_MXC_RTC
bool
diff --git a/arch/arm/mach-imx/devices/devices.c b/arch/arm/mach-imx/devices/devices.c
index 8eab5440da28..300451727362 100644
--- a/arch/arm/mach-imx/devices/devices.c
+++ b/arch/arm/mach-imx/devices/devices.c
@@ -22,6 +22,9 @@
#include <linux/err.h>
#include <linux/platform_device.h>
+#include "../common.h"
+#include "devices-common.h"
+
struct device mxc_aips_bus = {
.init_name = "mxc_aips",
};
diff --git a/arch/arm/mach-imx/devices/platform-gpio-mxc.c b/arch/arm/mach-imx/devices/platform-gpio-mxc.c
index 26483fa94b75..cd1fe69d8807 100644
--- a/arch/arm/mach-imx/devices/platform-gpio-mxc.c
+++ b/arch/arm/mach-imx/devices/platform-gpio-mxc.c
@@ -7,6 +7,7 @@
* Free Software Foundation.
*/
#include "devices-common.h"
+#include "../common.h"
struct platform_device *__init mxc_register_gpio(char *name, int id,
resource_size_t iobase, resource_size_t iosize, int irq, int irq_high)
diff --git a/arch/arm/mach-imx/devices/platform-mxc_rnga.c b/arch/arm/mach-imx/devices/platform-mxc_rnga.c
deleted file mode 100644
index 851fbc8af7a9..000000000000
--- a/arch/arm/mach-imx/devices/platform-mxc_rnga.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2010 Pengutronix
- * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- */
-#include "../hardware.h"
-#include "devices-common.h"
-
-struct imx_mxc_rnga_data {
- resource_size_t iobase;
-};
-
-#define imx_mxc_rnga_data_entry_single(soc) \
- { \
- .iobase = soc ## _RNGA_BASE_ADDR, \
- }
-
-#ifdef CONFIG_SOC_IMX31
-static const struct imx_mxc_rnga_data imx31_mxc_rnga_data __initconst =
- imx_mxc_rnga_data_entry_single(MX31);
-#endif /* ifdef CONFIG_SOC_IMX31 */
-
-static struct platform_device *__init imx_add_mxc_rnga(
- const struct imx_mxc_rnga_data *data)
-{
- struct resource res[] = {
- {
- .start = data->iobase,
- .end = data->iobase + SZ_16K - 1,
- .flags = IORESOURCE_MEM,
- },
- };
- return imx_add_platform_device("mxc_rnga", -1,
- res, ARRAY_SIZE(res), NULL, 0);
-}
-
-static int __init imxXX_add_mxc_rnga(void)
-{
- struct platform_device *ret;
-
-#if defined(CONFIG_SOC_IMX31)
- if (cpu_is_mx31())
- ret = imx_add_mxc_rnga(&imx31_mxc_rnga_data);
- else
-#endif /* if defined(CONFIG_SOC_IMX31) */
- ret = ERR_PTR(-ENODEV);
-
- return PTR_ERR_OR_ZERO(ret);
-}
-arch_initcall(imxXX_add_mxc_rnga);
diff --git a/arch/arm/mach-imx/eukrea-baseboards.h b/arch/arm/mach-imx/eukrea-baseboards.h
deleted file mode 100644
index bb2c90d65914..000000000000
--- a/arch/arm/mach-imx/eukrea-baseboards.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2010 Eric Benard - eric@eukrea.com
- *
- * Based on board-pcm038.h which is :
- * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_EUKREA_BASEBOARDS_H__
-#define __MACH_EUKREA_BASEBOARDS_H__
-
-#ifndef __ASSEMBLY__
-/*
- * This CPU module needs a baseboard to work. After basic initializing
- * its own devices, it calls baseboard's init function.
- * TODO: Add your own baseboard init function and call it from
- * inside eukrea_cpuimx25_init() or eukrea_cpuimx35_init()
- *
- * This example here is for the development board. Refer
- * mach-mx25/eukrea_mbimxsd-baseboard.c for cpuimx25
- * mach-mx3/eukrea_mbimxsd-baseboard.c for cpuimx35
- */
-
-extern void eukrea_mbimxsd25_baseboard_init(void);
-extern void eukrea_mbimxsd35_baseboard_init(void);
-
-#endif
-
-#endif /* __MACH_EUKREA_BASEBOARDS_H__ */
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index 530a728c2acc..a754b8a48f38 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -27,5 +27,6 @@ DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
.map_io = mx27_map_io,
.init_early = imx27_init_early,
.init_irq = mx27_init_irq,
+ .init_late = imx27_pm_init,
.dt_compat = imx27_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/imx31-dt.c b/arch/arm/mach-imx/imx31-dt.c
index 32100222a017..62e6b4fb5370 100644
--- a/arch/arm/mach-imx/imx31-dt.c
+++ b/arch/arm/mach-imx/imx31-dt.c
@@ -28,10 +28,22 @@ static void __init imx31_dt_timer_init(void)
mx31_clocks_init_dt();
}
+/* FIXME: replace with DT binding */
+static const struct resource imx31_rnga_res[] __initconst = {
+ DEFINE_RES_MEM(MX31_RNGA_BASE_ADDR, SZ_16K),
+};
+
+static void __init imx31_dt_mach_init(void)
+{
+ platform_device_register_simple("mxc_rnga", -1, imx31_rnga_res,
+ ARRAY_SIZE(imx31_rnga_res));
+}
+
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
.map_io = mx31_map_io,
.init_early = imx31_init_early,
.init_irq = mx31_init_irq,
.init_time = imx31_dt_timer_init,
+ .init_machine = imx31_dt_mach_init,
.dt_compat = imx31_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/imx35-dt.c b/arch/arm/mach-imx/imx35-dt.c
index e9396037235d..99bb63dedb87 100644
--- a/arch/arm/mach-imx/imx35-dt.c
+++ b/arch/arm/mach-imx/imx35-dt.c
@@ -20,20 +20,16 @@
#include "common.h"
#include "mx35.h"
-static void __init imx35_irq_init(void)
-{
- imx_init_l2cache();
- mx35_init_irq();
-}
-
static const char * const imx35_dt_board_compat[] __initconst = {
"fsl,imx35",
NULL
};
DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.map_io = mx35_map_io,
.init_early = imx35_init_early,
- .init_irq = imx35_irq_init,
+ .init_irq = mx35_init_irq,
.dt_compat = imx35_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/irq-common.c b/arch/arm/mach-imx/irq-common.c
index 0a920d184867..210d36eba8f2 100644
--- a/arch/arm/mach-imx/irq-common.c
+++ b/arch/arm/mach-imx/irq-common.c
@@ -33,8 +33,10 @@ int mxc_set_irq_fiq(unsigned int irq, unsigned int type)
gc = irq_get_chip_data(irq);
if (gc && gc->private) {
exirq = gc->private;
- if (exirq->set_irq_fiq)
- ret = exirq->set_irq_fiq(irq, type);
+ if (exirq->set_irq_fiq) {
+ struct irq_data *d = irq_get_irq_data(irq);
+ ret = exirq->set_irq_fiq(irqd_to_hwirq(d), type);
+ }
}
return ret;
diff --git a/arch/arm/mach-imx/mach-imx50.c b/arch/arm/mach-imx/mach-imx50.c
index ecf58b9e974b..4cab5f61260f 100644
--- a/arch/arm/mach-imx/mach-imx50.c
+++ b/arch/arm/mach-imx/mach-imx50.c
@@ -22,6 +22,5 @@ static const char * const imx50_dt_board_compat[] __initconst = {
};
DT_MACHINE_START(IMX50_DT, "Freescale i.MX50 (Device Tree Support)")
- .init_irq = tzic_init_irq,
.dt_compat = imx50_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index ec64de611d90..3835b6a3423c 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -52,6 +52,8 @@ static void __init imx51_dt_init(void)
{
imx51_ipu_mipi_setup();
imx_src_init();
+
+ imx_aips_allow_unprivileged_access("fsl,imx51-aipstz");
}
static void __init imx51_init_late(void)
@@ -67,7 +69,6 @@ static const char * const imx51_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
.init_early = imx51_init_early,
- .init_irq = tzic_init_irq,
.init_machine = imx51_dt_init,
.init_late = imx51_init_late,
.dt_compat = imx51_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-imx53.c b/arch/arm/mach-imx/mach-imx53.c
index 68aec23be016..07c2e8dca494 100644
--- a/arch/arm/mach-imx/mach-imx53.c
+++ b/arch/arm/mach-imx/mach-imx53.c
@@ -47,7 +47,6 @@ static const char * const imx53_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX53_DT, "Freescale i.MX53 (Device Tree Support)")
.init_early = imx53_init_early,
- .init_irq = tzic_init_irq,
.init_machine = imx53_dt_init,
.init_late = imx53_init_late,
.dt_compat = imx53_dt_board_compat,
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index e3940707eeb8..97fd25105e2c 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -220,7 +220,7 @@ static void __init imx6q_1588_init(void)
IMX6Q_GPR1_ENET_CLK_SEL_MASK,
clksel);
else
- pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
+ pr_err("failed to find fsl,imx6q-iomuxc-gpr regmap\n");
clk_put(enet_ref);
put_ptp_clk:
@@ -407,6 +407,8 @@ static const char * const imx6q_dt_compat[] __initconst = {
};
DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad/DualLite (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.smp = smp_ops(imx_smp_ops),
.map_io = imx6q_map_io,
.init_irq = imx6q_init_irq,
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index 37ae87d6e0e9..04084900d810 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -75,6 +75,8 @@ static const char * const imx6sl_dt_compat[] __initconst = {
};
DT_MACHINE_START(IMX6SL, "Freescale i.MX6 SoloLite (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.init_irq = imx6sl_init_irq,
.init_machine = imx6sl_init_machine,
.init_late = imx6sl_init_late,
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index 107cfc15282b..7f52d9b1e8a4 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -103,6 +103,8 @@ static const char * const imx6sx_dt_compat[] __initconst = {
};
DT_MACHINE_START(IMX6SX, "Freescale i.MX6 SoloX (Device Tree)")
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.init_irq = imx6sx_init_irq,
.init_machine = imx6sx_init_machine,
.dt_compat = imx6sx_dt_compat,
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index f388e6bd46ec..26ca744d3e2b 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -106,6 +106,7 @@ static void __init imx7d_init_irq(void)
static const char *const imx7d_dt_compat[] __initconst = {
"fsl,imx7d",
+ "fsl,imx7s",
NULL,
};
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index e065fedb3ad4..9a42f19be81e 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -50,7 +50,7 @@ void __init mx1_init_irq(void)
void __init imx1_soc_init(void)
{
- mxc_arch_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
+ imx1_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
mxc_device_init();
mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index 7d82a5a5b16b..862b9b7762c7 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -98,4 +98,6 @@ void __init imx27_soc_init(void)
/* imx27 has the imx21 type audmux */
platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res,
ARRAY_SIZE(imx27_audmux_res));
+
+ imx27_pm_init();
}
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 0884ca90d15a..7638a35b3b36 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/pinctrl/machine.h>
#include <asm/pgtable.h>
@@ -38,8 +39,6 @@ static void imx3_idle(void)
{
unsigned long reg = 0;
- mx3_cpu_lp_set(MX3_WAIT);
-
__asm__ __volatile__(
/* disable I and D cache */
"mrc p15, 0, %0, c1, c0, 0\n"
@@ -135,11 +134,20 @@ void __init mx31_map_io(void)
iotable_init(mx31_io_desc, ARRAY_SIZE(mx31_io_desc));
}
+static void imx31_idle(void)
+{
+ int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR);
+ reg &= ~MXC_CCM_CCMR_LPM_MASK;
+ imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
+
+ imx3_idle();
+}
+
void __init imx31_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX31);
arch_ioremap_caller = imx3_ioremap_caller;
- arm_pm_idle = imx3_idle;
+ arm_pm_idle = imx31_idle;
mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
}
@@ -167,6 +175,10 @@ static const struct resource imx31_audmux_res[] __initconst = {
DEFINE_RES_MEM(MX31_AUDMUX_BASE_ADDR, SZ_16K),
};
+static const struct resource imx31_rnga_res[] __initconst = {
+ DEFINE_RES_MEM(MX31_RNGA_BASE_ADDR, SZ_16K),
+};
+
void __init imx31_soc_init(void)
{
int to_version = mx31_revision() >> 4;
@@ -195,6 +207,8 @@ void __init imx31_soc_init(void)
platform_device_register_simple("imx31-audmux", 0, imx31_audmux_res,
ARRAY_SIZE(imx31_audmux_res));
+ platform_device_register_simple("mxc_rnga", -1, imx31_rnga_res,
+ ARRAY_SIZE(imx31_rnga_res));
}
#endif /* ifdef CONFIG_SOC_IMX31 */
@@ -212,11 +226,21 @@ void __init mx35_map_io(void)
iotable_init(mx35_io_desc, ARRAY_SIZE(mx35_io_desc));
}
+static void imx35_idle(void)
+{
+ int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR);
+ reg &= ~MXC_CCM_CCMR_LPM_MASK;
+ reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
+ imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
+
+ imx3_idle();
+}
+
void __init imx35_init_early(void)
{
mxc_set_cpu_type(MXC_CPU_MX35);
mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
- arm_pm_idle = imx3_idle;
+ arm_pm_idle = imx35_idle;
arch_ioremap_caller = imx3_ioremap_caller;
mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
}
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index d32704256781..34f2ff62583c 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -45,105 +45,7 @@
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
-#endif
-
-#ifdef CONFIG_SOC_IMX1
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX1
-# endif
-# define cpu_is_mx1() (mxc_cpu_type == MXC_CPU_MX1)
-#else
-# define cpu_is_mx1() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX21
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX21
-# endif
-# define cpu_is_mx21() (mxc_cpu_type == MXC_CPU_MX21)
-#else
-# define cpu_is_mx21() (0)
-#endif
-#ifdef CONFIG_SOC_IMX25
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX25
-# endif
-# define cpu_is_mx25() (mxc_cpu_type == MXC_CPU_MX25)
-#else
-# define cpu_is_mx25() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX27
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX27
-# endif
-# define cpu_is_mx27() (mxc_cpu_type == MXC_CPU_MX27)
-#else
-# define cpu_is_mx27() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX31
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX31
-# endif
-# define cpu_is_mx31() (mxc_cpu_type == MXC_CPU_MX31)
-#else
-# define cpu_is_mx31() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX35
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX35
-# endif
-# define cpu_is_mx35() (mxc_cpu_type == MXC_CPU_MX35)
-#else
-# define cpu_is_mx35() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX51
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX51
-# endif
-# define cpu_is_mx51() (mxc_cpu_type == MXC_CPU_MX51)
-#else
-# define cpu_is_mx51() (0)
-#endif
-
-#ifdef CONFIG_SOC_IMX53
-# ifdef mxc_cpu_type
-# undef mxc_cpu_type
-# define mxc_cpu_type __mxc_cpu_type
-# else
-# define mxc_cpu_type MXC_CPU_MX53
-# endif
-# define cpu_is_mx53() (mxc_cpu_type == MXC_CPU_MX53)
-#else
-# define cpu_is_mx53() (0)
-#endif
-
-#ifndef __ASSEMBLY__
#ifdef CONFIG_SOC_IMX6SL
static inline bool cpu_is_imx6sl(void)
{
@@ -190,9 +92,6 @@ int tzic_enable_wake(void);
extern struct cpu_op *(*get_cpu_op)(int *op);
#endif
-#define cpu_is_mx3() (cpu_is_mx31() || cpu_is_mx35())
-#define cpu_is_mx2() (cpu_is_mx21() || cpu_is_mx27())
-
#define imx_readl readl_relaxed
#define imx_readw readw_relaxed
#define imx_writel writel_relaxed
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 43096c8990d4..d943535566c8 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -37,13 +37,7 @@ static const struct platform_suspend_ops mx27_suspend_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init mx27_pm_init(void)
+void __init imx27_pm_init(void)
{
- if (!cpu_is_mx27())
- return 0;
-
suspend_set_ops(&mx27_suspend_ops);
- return 0;
}
-
-device_initcall(mx27_pm_init);
diff --git a/arch/arm/mach-imx/pm-imx3.c b/arch/arm/mach-imx/pm-imx3.c
deleted file mode 100644
index 94c0898751d8..000000000000
--- a/arch/arm/mach-imx/pm-imx3.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2012 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/io.h>
-
-#include "common.h"
-#include "crmregs-imx3.h"
-#include "devices/devices-common.h"
-#include "hardware.h"
-
-/*
- * Set cpu low power mode before WFI instruction. This function is called
- * mx3 because it can be used for mx31 and mx35.
- * Currently only WAIT_MODE is supported.
- */
-void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
-{
- int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR);
- reg &= ~MXC_CCM_CCMR_LPM_MASK;
-
- switch (mode) {
- case MX3_WAIT:
- if (cpu_is_mx35())
- reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
- imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
- break;
- default:
- pr_err("Unknown cpu power mode: %d\n", mode);
- return;
- }
-}
diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c
index 105d1ce4ed9d..c06af650e6b1 100644
--- a/arch/arm/mach-imx/system.c
+++ b/arch/arm/mach-imx/system.c
@@ -34,25 +34,19 @@
static void __iomem *wdog_base;
static struct clk *wdog_clk;
+static int wcr_enable = (1 << 2);
/*
* Reset the system. It is called by machine_restart().
*/
void mxc_restart(enum reboot_mode mode, const char *cmd)
{
- unsigned int wcr_enable;
-
if (!wdog_base)
goto reset_fallback;
if (!IS_ERR(wdog_clk))
clk_enable(wdog_clk);
- if (cpu_is_mx1())
- wcr_enable = (1 << 0);
- else
- wcr_enable = (1 << 2);
-
/* Assert SRS signal */
imx_writew(wcr_enable, wdog_base);
/*
@@ -89,6 +83,14 @@ void __init mxc_arch_reset_init(void __iomem *base)
clk_prepare(wdog_clk);
}
+#ifdef CONFIG_SOC_IMX1
+void __init imx1_reset_init(void __iomem *base)
+{
+ wcr_enable = (1 << 0);
+ mxc_arch_reset_init(base);
+}
+#endif
+
#ifdef CONFIG_CACHE_L2X0
void __init imx_init_l2cache(void)
{
@@ -98,38 +100,28 @@ void __init imx_init_l2cache(void)
np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
if (!np)
- goto out;
+ return;
l2x0_base = of_iomap(np, 0);
- if (!l2x0_base) {
- of_node_put(np);
- goto out;
- }
+ if (!l2x0_base)
+ goto put_node;
- if (readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)
- goto skip_if_enabled;
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+ /* Configure the L2 PREFETCH and POWER registers */
+ val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
+ val |= L310_PREFETCH_CTRL_DBL_LINEFILL |
+ L310_PREFETCH_CTRL_INSTR_PREFETCH |
+ L310_PREFETCH_CTRL_DATA_PREFETCH;
- /* Configure the L2 PREFETCH and POWER registers */
- val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
- val |= 0x70800000;
- /*
- * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
- * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2
- * But according to ARM PL310 errata: 752271
- * ID: 752271: Double linefill feature can cause data corruption
- * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2
- * Workaround: The only workaround to this erratum is to disable the
- * double linefill feature. This is the default behavior.
- */
- if (cpu_is_imx6q())
- val &= ~(1 << 30 | 1 << 23);
- writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
+ /* Set perfetch offset to improve performance */
+ val &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
+ val |= 15;
+
+ writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
+ }
-skip_if_enabled:
iounmap(l2x0_base);
+put_node:
of_node_put(np);
-
-out:
- l2x0_of_init(0, ~0);
}
#endif
diff --git a/arch/arm/mach-imx/tzic.c b/arch/arm/mach-imx/tzic.c
index ae23d50f7861..4ca37c3f1da4 100644
--- a/arch/arm/mach-imx/tzic.c
+++ b/arch/arm/mach-imx/tzic.c
@@ -9,12 +9,11 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -56,14 +55,14 @@ static struct irq_domain *domain;
#define TZIC_NUM_IRQS 128
#ifdef CONFIG_FIQ
-static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
+static int tzic_set_irq_fiq(unsigned int hwirq, unsigned int type)
{
unsigned int index, mask, value;
- index = irq >> 5;
+ index = hwirq >> 5;
if (unlikely(index >= 4))
return -EINVAL;
- mask = 1U << (irq & 0x1F);
+ mask = 1U << (hwirq & 0x1F);
value = imx_readl(tzic_base + TZIC_INTSEC0(index)) | mask;
if (type)
@@ -153,13 +152,11 @@ static void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs)
* interrupts. It registers the interrupt enable and disable functions
* to the kernel for each interrupt source.
*/
-void __init tzic_init_irq(void)
+static int __init tzic_init_dt(struct device_node *np, struct device_node *p)
{
- struct device_node *np;
int irq_base;
int i;
- np = of_find_compatible_node(NULL, NULL, "fsl,tzic");
tzic_base = of_iomap(np, 0);
WARN_ON(!tzic_base);
@@ -199,7 +196,10 @@ void __init tzic_init_irq(void)
#endif
pr_info("TrustZone Interrupt Controller (TZIC) initialized\n");
+
+ return 0;
}
+IRQCHIP_DECLARE(tzic, "fsl,tzic", tzic_init_dt);
/**
* tzic_enable_wake() - enable wakeup interrupt
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index 291262e5aeaf..599f973e10d8 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -32,9 +32,9 @@ config ARCH_INTEGRATOR_AP
config INTEGRATOR_IMPD1
bool "Include support for Integrator/IM-PD1"
depends on ARCH_INTEGRATOR_AP
- select ARCH_REQUIRE_GPIOLIB
select ARM_VIC
- select GPIO_PL061 if GPIOLIB
+ select GPIO_PL061
+ select GPIOLIB
help
The IM-PD1 is an add-on logic module for the Integrator which
allows ARM(R) Ltd PrimeCells to be developed and evaluated.
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 38b0da300dd5..ed9a01484030 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -320,11 +320,11 @@ static struct impd1_device impd1_devs[] = {
#define IMPD1_VALID_IRQS 0x00000bffU
/*
- * As this module is bool, it is OK to have this as __init_refok() - no
+ * As this module is bool, it is OK to have this as __ref() - no
* probe calls will be done after the initial system bootup, as devices
* are discovered as part of the machine startup.
*/
-static int __init_refok impd1_probe(struct lm_device *dev)
+static int __ref impd1_probe(struct lm_device *dev)
{
struct impd1_module *impd1;
int irq_base;
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index bac577badc7e..8ff61be1a29f 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -1,7 +1,6 @@
config ARCH_KEYSTONE
bool "Texas Instruments Keystone Devices"
depends on ARCH_MULTI_V7
- depends on ARM_PATCH_PHYS_VIRT
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
select KEYSTONE_TIMER
@@ -11,6 +10,7 @@ config ARCH_KEYSTONE
select ZONE_DMA if ARM_LPAE
select MIGHT_HAVE_PCI
select PCI_DOMAINS if PCI
+ select PINCTRL
help
Support for boards based on the Texas Instruments Keystone family of
SoCs.
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index e283939a216f..8cbb35765a19 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -18,6 +18,8 @@
#include <linux/platform_device.h>
#include <linux/of.h>
+#include "keystone.h"
+
static struct dev_pm_domain keystone_pm_domain = {
.ops = {
USE_PM_CLK_RUNTIME_OPS
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index 31bdd91098b6..b6e3acc63e14 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -1,12 +1,14 @@
menuconfig ARCH_MESON
bool "Amlogic Meson SoCs"
depends on ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select GENERIC_IRQ_CHIP
select ARM_GIC
select CACHE_L2X0
select PINCTRL
select PINCTRL_MESON
+ select COMMON_CLK
+ select COMMON_CLK_AMLOGIC
if ARCH_MESON
@@ -24,5 +26,6 @@ config MACH_MESON8B
bool "Amlogic Meson8b SoCs support"
default ARCH_MESON
select MESON6_TIMER
+ select COMMON_CLK_MESON8B
endif
diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig
index 01c57d369462..94500bed56ab 100644
--- a/arch/arm/mach-mmp/Kconfig
+++ b/arch/arm/mach-mmp/Kconfig
@@ -1,8 +1,8 @@
menuconfig ARCH_MMP
bool "Marvell PXA168/910/MMP2"
depends on ARCH_MULTI_V5 || ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select GPIO_PXA
+ select GPIOLIB
select PINCTRL
select PLAT_PXA
help
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index ddc79cea32d3..f69e28b85e88 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -5,7 +5,7 @@ menuconfig ARCH_MOXART
select ARM_DMA_MEM_BUFFERABLE
select MOXART_TIMER
select GENERIC_IRQ_CHIP
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select PHYLIB if NETDEVICES
help
Say Y here if you want to run your kernel on hardware with a
diff --git a/arch/arm/mach-mv78xx0/Kconfig b/arch/arm/mach-mv78xx0/Kconfig
index c32f85559c65..81c0f08a2684 100644
--- a/arch/arm/mach-mv78xx0/Kconfig
+++ b/arch/arm/mach-mv78xx0/Kconfig
@@ -1,8 +1,8 @@
menuconfig ARCH_MV78XX0
bool "Marvell MV78xx0"
depends on ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
select CPU_FEROCEON
+ select GPIOLIB
select MVEBU_MBUS
select PCI
select PLAT_ORION_LEGACY
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 45a05207b418..6af5430d0d97 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -343,7 +343,7 @@ void __init mv78xx0_init_early(void)
DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ);
}
-void __init_refok mv78xx0_timer_init(void)
+void __ref mv78xx0_timer_init(void)
{
orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR,
IRQ_MV78XX0_TIMER_1, get_tclk());
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 348044ea650c..f9b6bd306cfe 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -8,7 +8,7 @@ menuconfig ARCH_MVEBU
select SOC_BUS
select MVEBU_MBUS
select ZONE_DMA if ARM_LPAE
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select PCI_QUIRKS if PCI
select OF_ADDRESS_PCI
@@ -119,8 +119,8 @@ config MACH_DOVE
config MACH_KIRKWOOD
bool "Marvell Kirkwood boards"
depends on ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
select CPU_FEROCEON
+ select GPIOLIB
select KIRKWOOD_CLK
select MACH_MVEBU_ANY
select ORION_IRQCHIP
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
index 54cb7607b526..6067f14263f7 100644
--- a/arch/arm/mach-mvebu/coherency.h
+++ b/arch/arm/mach-mvebu/coherency.h
@@ -14,6 +14,7 @@
#ifndef __MACH_370_XP_COHERENCY_H
#define __MACH_370_XP_COHERENCY_H
+extern void __iomem *coherency_base; /* for coherency_ll.S */
extern unsigned long coherency_phys_base;
int set_cpu_coherent(void);
diff --git a/arch/arm/mach-mvebu/cpu-reset.c b/arch/arm/mach-mvebu/cpu-reset.c
index 4a2cadd6b48e..f33a31c6aff8 100644
--- a/arch/arm/mach-mvebu/cpu-reset.c
+++ b/arch/arm/mach-mvebu/cpu-reset.c
@@ -16,6 +16,8 @@
#include <linux/io.h>
#include <linux/resource.h>
+#include "common.h"
+
static void __iomem *cpu_reset_base;
static size_t cpu_reset_size;
diff --git a/arch/arm/mach-mvebu/kirkwood-pm.c b/arch/arm/mach-mvebu/kirkwood-pm.c
index cbb816f2120c..1e1f879f16ce 100644
--- a/arch/arm/mach-mvebu/kirkwood-pm.c
+++ b/arch/arm/mach-mvebu/kirkwood-pm.c
@@ -18,6 +18,7 @@
#include <linux/suspend.h>
#include <linux/io.h>
#include "kirkwood.h"
+#include "kirkwood-pm.h"
static void __iomem *ddr_operation_base;
static void __iomem *memory_pm_ctrl;
@@ -66,11 +67,10 @@ static const struct platform_suspend_ops kirkwood_suspend_ops = {
.valid = kirkwood_pm_valid_standby,
};
-int __init kirkwood_pm_init(void)
+void __init kirkwood_pm_init(void)
{
ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
memory_pm_ctrl = ioremap(MEMORY_PM_CTRL_PHYS, 4);
suspend_set_ops(&kirkwood_suspend_ops);
- return 0;
}
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index 8f459ee34e6f..7d9f2fd9e450 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -150,7 +150,7 @@ eth_fixup_skip:
* causes mbus errors (which can occur for example for PCI aborts) to
* throw CPU aborts, which we're not set up to deal with.
*/
-void kirkwood_disable_mbus_error_propagation(void)
+static void kirkwood_disable_mbus_error_propagation(void)
{
void __iomem *cpu_config;
diff --git a/arch/arm/mach-mvebu/pm.c b/arch/arm/mach-mvebu/pm.c
index 8d32bf762b86..2990c5269b18 100644
--- a/arch/arm/mach-mvebu/pm.c
+++ b/arch/arm/mach-mvebu/pm.c
@@ -23,6 +23,7 @@
#include <asm/suspend.h>
#include "coherency.h"
+#include "common.h"
#include "pmsu.h"
#define SDRAM_CONFIG_OFFS 0x0
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index b44442338e4e..f39bd51bce18 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mbus.h>
+#include <linux/mvebu-pmsu.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -38,7 +39,7 @@
#include <asm/suspend.h>
#include <asm/tlbflush.h>
#include "common.h"
-
+#include "pmsu.h"
#define PMSU_BASE_OFFSET 0x100
#define PMSU_REG_SIZE 0x1000
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index c6c132acd7a6..76cbc82a7407 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -127,7 +127,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
}
#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
-void mvebu_armada375_smp_wa_init(void)
+static void mvebu_armada375_smp_wa_init(void)
{
u32 dev, rev;
phys_addr_t resume_addr_reg;
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 68a3a9ec605d..cb429bc6dc0d 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -15,7 +15,7 @@ config SOC_IMX28
config ARCH_MXS
bool "Freescale MXS (i.MX23, i.MX28) support"
depends on ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select MXS_TIMER
select PINCTRL
select SOC_BUS
diff --git a/arch/arm/mach-nomadik/Kconfig b/arch/arm/mach-nomadik/Kconfig
index 3c61096c8627..b7e9801fdaa4 100644
--- a/arch/arm/mach-nomadik/Kconfig
+++ b/arch/arm/mach-nomadik/Kconfig
@@ -1,12 +1,12 @@
menuconfig ARCH_NOMADIK
bool "ST-Ericsson Nomadik"
depends on ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
select CLKSRC_NOMADIK_MTU
select CLKSRC_NOMADIK_MTU_SCHED_CLOCK
select CPU_ARM926T
+ select GPIOLIB
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
select PINCTRL_NOMADIK
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index ec760ae2f917..793a24a53c52 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -137,7 +137,7 @@ void __init ams_delta_init_fiq(void)
fiq_buffer[i] = 0;
/*
- * FIQ mode r9 always points to the fiq_buffer, becauses the FIQ isr
+ * FIQ mode r9 always points to the fiq_buffer, because the FIQ isr
* will run in an unpredictable context. The fiq_buffer is the FIQ isr's
* only means of communication with the IRQ level and other kernel
* context code.
diff --git a/arch/arm/mach-omap1/include/mach/mtd-xip.h b/arch/arm/mach-omap1/include/mach/mtd-xip.h
index f82a8dcaad94..d09b2bc4920f 100644
--- a/arch/arm/mach-omap1/include/mach/mtd-xip.h
+++ b/arch/arm/mach-omap1/include/mach/mtd-xip.h
@@ -39,7 +39,7 @@ static inline unsigned long xip_omap_mpu_timer_read(int nr)
#define xip_currtime() (~xip_omap_mpu_timer_read(0))
/*
- * It's permitted to do approxmation for xip_elapsed_since macro
+ * It's permitted to do approximation for xip_elapsed_since macro
* (see linux/mtd/xip.h)
*/
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 1a648e9dfaa0..5a0b380a8166 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -92,9 +92,9 @@ config ARCH_OMAP2PLUS
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
- select ARCH_REQUIRE_GPIOLIB
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select MACH_OMAP_GENERIC
select MEMORY
select MFD_SYSCON
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 04e276ce8413..a7f2d051f524 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -8,7 +8,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
# Common support
obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
- omap_device.o sram.o drm.o
+ omap_device.o omap-headsmp.o sram.o drm.o
hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
omap_hwmod_common_data.o
@@ -32,7 +32,7 @@ obj-$(CONFIG_SOC_HAS_OMAP2_SDRC) += sdrc.o
# SMP support ONLY available for OMAP4
-smp-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
+smp-$(CONFIG_SMP) += omap-smp.o
smp-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
omap-4-5-common = omap4-common.o omap-wakeupgen.o
obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) $(smp-y) sleep44xx.o
@@ -78,13 +78,16 @@ obj-$(CONFIG_ARCH_OMAP4) += opp4xxx_data.o
endif
# Power Management
+omap-4-5-pm-common = omap-mpuss-lowpower.o
+obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-pm-common)
+obj-$(CONFIG_ARCH_OMAP5) += $(omap-4-5-pm-common)
obj-$(CONFIG_OMAP_PM_NOOP) += omap-pm-noop.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o
-omap-4-5-pm-common = pm44xx.o omap-mpuss-lowpower.o
+omap-4-5-pm-common += pm44xx.o
obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-pm-common)
obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-pm-common)
obj-$(CONFIG_SOC_DRA7XX) += $(omap-4-5-pm-common)
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index d9c3ffc39329..390795b334c3 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -39,7 +39,7 @@
#include "gpmc.h"
#include "gpmc-smsc911x.h"
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
#include <video/omap-panel-data.h>
#include "board-flash.h"
@@ -47,6 +47,7 @@
#include "hsmmc.h"
#include "control.h"
#include "common-board-devices.h"
+#include "display.h"
#define LDP_SMSC911X_CS 1
#define LDP_SMSC911X_GPIO 152
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 9a7073949d1d..a5ab712c1a59 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -1242,11 +1242,6 @@ static struct pwm_omap_dmtimer_pdata __maybe_unused pwm_dmtimer_pdata = {
#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
static struct lirc_rx51_platform_data rx51_lirc_data = {
.set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat,
- .pwm_timer = 9, /* Use GPT 9 for CIR */
-#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
- .dmtimer = &pwm_dmtimer_pdata,
-#endif
-
};
static struct platform_device rx51_lirc_device = {
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 9cfebc5c7455..180c6aa633bd 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -15,13 +15,14 @@
#include <linux/spi/spi.h>
#include <linux/mm.h>
#include <asm/mach-types.h>
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
#include <video/omap-panel-data.h>
#include <linux/platform_data/spi-omap2-mcspi.h>
#include "soc.h"
#include "board-rx51.h"
+#include "display.h"
#include "mux.h"
@@ -32,7 +33,6 @@
static struct connector_atv_platform_data rx51_tv_pdata = {
.name = "tv",
.source = "venc.0",
- .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
.invert_polarity = false,
};
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 2da3b5ec010c..b79b1ca9aee9 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -465,10 +465,7 @@ int clkdm_complete_init(void)
return -EACCES;
list_for_each_entry(clkdm, &clkdm_list, node) {
- if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
- clkdm_wakeup(clkdm);
- else if (clkdm->flags & CLKDM_CAN_DISABLE_AUTO)
- clkdm_deny_idle(clkdm);
+ clkdm_deny_idle(clkdm);
_resolve_clkdm_deps(clkdm, clkdm->wkdep_srcs);
clkdm_clear_all_wkdeps(clkdm);
@@ -925,11 +922,20 @@ void clkdm_allow_idle_nolock(struct clockdomain *clkdm)
if (!clkdm)
return;
- if (!(clkdm->flags & CLKDM_CAN_ENABLE_AUTO)) {
- pr_debug("clock: %s: automatic idle transitions cannot be enabled\n",
- clkdm->name);
+ if (!WARN_ON(!clkdm->forcewake_count))
+ clkdm->forcewake_count--;
+
+ if (clkdm->forcewake_count)
+ return;
+
+ if (!clkdm->usecount && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
+ clkdm_sleep_nolock(clkdm);
+
+ if (!(clkdm->flags & CLKDM_CAN_ENABLE_AUTO))
+ return;
+
+ if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING)
return;
- }
if (!arch_clkdm || !arch_clkdm->clkdm_allow_idle)
return;
@@ -974,11 +980,17 @@ void clkdm_deny_idle_nolock(struct clockdomain *clkdm)
if (!clkdm)
return;
- if (!(clkdm->flags & CLKDM_CAN_DISABLE_AUTO)) {
- pr_debug("clockdomain: %s: automatic idle transitions cannot be disabled\n",
- clkdm->name);
+ if (clkdm->forcewake_count++)
+ return;
+
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ clkdm_wakeup_nolock(clkdm);
+
+ if (!(clkdm->flags & CLKDM_CAN_DISABLE_AUTO))
+ return;
+
+ if (clkdm->flags & CLKDM_MISSING_IDLE_REPORTING)
return;
- }
if (!arch_clkdm || !arch_clkdm->clkdm_deny_idle)
return;
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index 2c398ce1a0f2..24667a5a9dc0 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -114,6 +114,7 @@ struct omap_hwmod;
* @wkdep_srcs: Clockdomains that can be told to wake this powerdomain up
* @sleepdep_srcs: Clockdomains that can be told to keep this clkdm from inact
* @usecount: Usecount tracking
+ * @forcewake_count: Usecount for forcing the domain active
* @node: list_head to link all clockdomains together
*
* @prcm_partition should be a macro from mach-omap2/prcm44xx.h (OMAP4 only)
@@ -138,6 +139,7 @@ struct clockdomain {
struct clkdm_dep *wkdep_srcs;
struct clkdm_dep *sleepdep_srcs;
int usecount;
+ int forcewake_count;
struct list_head node;
};
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index 7b181f929525..c073fb57dd13 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -220,6 +220,9 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
{
int i = 0;
+ if (!clkctrl_offs)
+ return 0;
+
omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
MAX_MODULE_READY_TIME, i);
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index 187fa4386718..d91ae8206d1e 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -649,7 +649,7 @@ void omap3_cm_save_scratchpad_contents(u32 *ptr)
/*
* As per erratum i671, ROM code does not respect the PER DPLL
* programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1.
- * Then, in anycase, clear these bits to avoid extra latencies.
+ * Then, in any case, clear these bits to avoid extra latencies.
*/
*ptr++ = omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) &
~OMAP3430_AUTO_PERIPH_DPLL_MASK;
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index f7666b9f3b21..deed42e1dd9c 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -257,18 +257,22 @@ extern void gic_dist_enable(void);
extern bool gic_dist_disabled(void);
extern void gic_timer_retrigger(void);
extern void omap_smc1(u32 fn, u32 arg);
+extern void omap4_sar_ram_init(void);
extern void __iomem *omap4_get_sar_ram_base(void);
+extern void omap4_mpuss_early_init(void);
extern void omap_do_wfi(void);
-#ifdef CONFIG_SMP
-/* Needed for secondary core boot */
extern void omap4_secondary_startup(void);
extern void omap4460_secondary_startup(void);
+
+#ifdef CONFIG_SMP
+/* Needed for secondary core boot */
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
extern void omap_auxcoreboot_addr(u32 cpu_addr);
extern u32 omap_read_auxcoreboot0(void);
extern void omap4_cpu_die(unsigned int cpu);
+extern int omap4_cpu_kill(unsigned int cpu);
extern const struct smp_operations omap4_smp_ops;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4b8e9f4d59ea..fa138d4032b6 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -140,7 +140,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
- clkdm_wakeup(cpu_clkdm[1]);
+ clkdm_deny_idle(cpu_clkdm[1]);
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
clkdm_allow_idle(cpu_clkdm[1]);
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6ab13d18c636..70b3eaf085e4 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -29,7 +29,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
#include "omap_hwmod.h"
#include "omap_device.h"
#include "omap-pm.h"
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
index 7375854b16c7..78f253005279 100644
--- a/arch/arm/mach-omap2/display.h
+++ b/arch/arm/mach-omap2/display.h
@@ -33,4 +33,9 @@ int omap_init_vout(void);
struct device_node * __init omapdss_find_dss_of_node(void);
+struct omap_dss_board_info;
+
+/* Init with the board info */
+int omap_display_init(struct omap_dss_board_info *board_data);
+
#endif
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index ea2be0f5953b..1d583bc0b1a9 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -27,7 +27,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
#include <video/omap-panel-data.h>
#include "soc.h"
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 49de4dd227be..0e9acdd95d70 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -690,6 +690,8 @@ void __init omap4430_init_early(void)
omap4xxx_check_revision();
omap4xxx_check_features();
omap2_prcm_base_init();
+ omap4_sar_ram_init();
+ omap4_mpuss_early_init();
omap4_pm_init_early();
omap44xx_voltagedomains_init();
omap44xx_powerdomains_init();
@@ -718,6 +720,7 @@ void __init omap5_init_early(void)
omap4_pm_init_early();
omap2_prcm_base_init();
omap5xxx_check_revision();
+ omap4_sar_ram_init();
omap54xx_voltagedomains_init();
omap54xx_powerdomains_init();
omap54xx_clockdomains_init();
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index b4ac3af1160c..fc04be74e064 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -34,18 +34,24 @@
#include "cm3xxx.h"
#include "cm-regbits-34xx.h"
-static struct clk *mcbsp_iclks[5];
-
-static int omap3_enable_st_clock(unsigned int id, bool enable)
+static int omap3_mcbsp_force_ick_on(struct clk *clk, bool force_on)
{
- /*
- * Sidetone uses McBSP ICLK - which must not idle when sidetones
- * are enabled or sidetones start sounding ugly.
- */
- if (enable)
- return omap2_clk_deny_idle(mcbsp_iclks[id]);
+ if (!clk)
+ return 0;
+
+ if (force_on)
+ return omap2_clk_deny_idle(clk);
else
- return omap2_clk_allow_idle(mcbsp_iclks[id]);
+ return omap2_clk_allow_idle(clk);
+}
+
+void __init omap3_mcbsp_init_pdata_callback(
+ struct omap_mcbsp_platform_data *pdata)
+{
+ if (!pdata)
+ return;
+
+ pdata->force_ick_on = omap3_mcbsp_force_ick_on;
}
static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
@@ -55,7 +61,6 @@ static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
struct omap_hwmod *oh_device[2];
struct omap_mcbsp_platform_data *pdata = NULL;
struct platform_device *pdev;
- char clk_name[11];
sscanf(oh->name, "mcbsp%d", &id);
@@ -96,9 +101,7 @@ static int __init omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
if (oh->dev_attr) {
oh_device[1] = omap_hwmod_lookup((
(struct omap_mcbsp_dev_attr *)(oh->dev_attr))->sidetone);
- pdata->enable_st_clock = omap3_enable_st_clock;
- sprintf(clk_name, "mcbsp%d_ick", id);
- mcbsp_iclks[id] = clk_get(NULL, clk_name);
+ pdata->force_ick_on = omap3_mcbsp_force_ick_on;
count++;
}
pdev = omap_device_build_ss(name, id, oh_device, count, pdata,
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index be271f1d585b..393e687f99e2 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -1266,7 +1266,7 @@ static struct omap_ball __initdata omap3_cus_ball[] = {
#endif
/*
- * Signals different on CBB package comapared to superset
+ * Signals different on CBB package compared to superset
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBB)
static struct omap_mux __initdata omap3_cbb_subset[] = {
@@ -1597,7 +1597,7 @@ static struct omap_ball __initdata omap3_cbb_ball[] = {
#endif
/*
- * Signals different on 36XX CBP package comapared to 34XX CBC package
+ * Signals different on 36XX CBP package compared to 34XX CBC package
*/
#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBP)
static struct omap_mux __initdata omap36xx_cbp_subset[] = {
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 6d1dffca6c7b..fe36ce2734d4 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -24,6 +24,16 @@
#define AUX_CORE_BOOT0_PA 0x48281800
#define API_HYP_ENTRY 0x102
+ENTRY(omap_secondary_startup)
+#ifdef CONFIG_SMP
+ b secondary_startup
+#else
+/* Should never get here */
+again: wfi
+ b again
+#endif
+#ENDPROC(omap_secondary_startup)
+
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
@@ -39,7 +49,7 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
and r4, r4, #0x0f
cmp r0, r4
bne wait
- b secondary_startup
+ b omap_secondary_startup
ENDPROC(omap5_secondary_startup)
/*
* Same as omap5_secondary_startup except we call into the ROM to
@@ -59,7 +69,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
adr r0, hyp_boot
smc #0
hyp_boot:
- b secondary_startup
+ b omap_secondary_startup
ENDPROC(omap5_secondary_hyp_startup)
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
@@ -82,7 +92,7 @@ hold: ldr r12,=0x103
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
- b secondary_startup
+ b omap_secondary_startup
ENDPROC(omap4_secondary_startup)
ENTRY(omap4460_secondary_startup)
@@ -119,5 +129,5 @@ hold_2: ldr r12,=0x103
* we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
- b secondary_startup
+ b omap_secondary_startup
ENDPROC(omap4460_secondary_startup)
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 593fec753b28..d3fb5661bb5d 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -64,3 +64,9 @@ void omap4_cpu_die(unsigned int cpu)
pr_debug("CPU%u: spurious wakeup call\n", cpu);
}
}
+
+/* Needed by kexec and platform_can_cpu_hotplug() */
+int omap4_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 65024af169d3..ad982465efd0 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -62,7 +62,9 @@
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
-#ifdef CONFIG_SMP
+static void __iomem *sar_base;
+
+#if defined(CONFIG_PM) && defined(CONFIG_SMP)
struct omap4_cpu_pm_info {
struct powerdomain *pwrdm;
@@ -90,7 +92,6 @@ struct cpu_pm_ops {
static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
static struct powerdomain *mpuss_pd;
-static void __iomem *sar_base;
static u32 cpu_context_offset;
static int default_finish_suspend(unsigned long cpu_state)
@@ -366,9 +367,6 @@ int __init omap4_mpuss_init(void)
return -ENODEV;
}
- if (cpu_is_omap44xx())
- sar_base = omap4_get_sar_ram_base();
-
/* Initilaise per CPU PM information */
pm_info = &per_cpu(omap4_pm_info, 0x0);
if (sar_base) {
@@ -444,3 +442,26 @@ int __init omap4_mpuss_init(void)
}
#endif
+
+/*
+ * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
+ * current kernel's secondary_startup() early before
+ * clockdomains_init(). Otherwise clockdomain_init() can
+ * wake CPU1 and cause a hang.
+ */
+void __init omap4_mpuss_early_init(void)
+{
+ unsigned long startup_pa;
+
+ if (!cpu_is_omap44xx())
+ return;
+
+ sar_base = omap4_get_sar_ram_base();
+
+ if (cpu_is_omap443x())
+ startup_pa = virt_to_phys(omap4_secondary_startup);
+ else
+ startup_pa = virt_to_phys(omap4460_secondary_startup);
+
+ writel_relaxed(startup_pa, sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
+}
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 8cd1de914ee4..b4de3da6dffa 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -40,14 +40,35 @@
#define OMAP5_CORE_COUNT 0x2
-/* SCU base address */
-static void __iomem *scu_base;
+struct omap_smp_config {
+ unsigned long cpu1_rstctrl_pa;
+ void __iomem *cpu1_rstctrl_va;
+ void __iomem *scu_base;
+ void *startup_addr;
+};
+
+static struct omap_smp_config cfg;
+
+static const struct omap_smp_config omap443x_cfg __initconst = {
+ .cpu1_rstctrl_pa = 0x4824380c,
+ .startup_addr = omap4_secondary_startup,
+};
+
+static const struct omap_smp_config omap446x_cfg __initconst = {
+ .cpu1_rstctrl_pa = 0x4824380c,
+ .startup_addr = omap4460_secondary_startup,
+};
+
+static const struct omap_smp_config omap5_cfg __initconst = {
+ .cpu1_rstctrl_pa = 0x48243810,
+ .startup_addr = omap5_secondary_startup,
+};
static DEFINE_SPINLOCK(boot_lock);
void __iomem *omap4_get_scu_base(void)
{
- return scu_base;
+ return cfg.scu_base;
}
#ifdef CONFIG_OMAP5_ERRATA_801819
@@ -93,7 +114,7 @@ static void omap4_secondary_init(unsigned int cpu)
* OMAP443X GP devices- SMP bit isn't accessible.
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
*/
- if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+ if (soc_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
4, 0, 0, 0, 0, 0);
@@ -179,7 +200,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Ensure that CPU power state is set to ON to avoid CPU
* powerdomain transition on wfi
*/
- clkdm_wakeup_nolock(cpu1_clkdm);
+ clkdm_deny_idle_nolock(cpu1_clkdm);
pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
clkdm_allow_idle_nolock(cpu1_clkdm);
@@ -222,9 +243,9 @@ static void __init omap4_smp_init_cpus(void)
* Currently we can't call ioremap here because
* SoC detection won't work until after init_early.
*/
- scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base());
- BUG_ON(!scu_base);
- ncores = scu_get_core_count(scu_base);
+ cfg.scu_base = OMAP2_L4_IO_ADDRESS(scu_a9_get_base());
+ BUG_ON(!cfg.scu_base);
+ ncores = scu_get_core_count(cfg.scu_base);
} else if (cpu_id == CPU_CORTEX_A15) {
ncores = OMAP5_CORE_COUNT;
}
@@ -242,20 +263,51 @@ static void __init omap4_smp_init_cpus(void)
static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
{
- void *startup_addr = omap4_secondary_startup;
void __iomem *base = omap_get_wakeupgen_base();
+ const struct omap_smp_config *c = NULL;
+
+ if (soc_is_omap443x())
+ c = &omap443x_cfg;
+ else if (soc_is_omap446x())
+ c = &omap446x_cfg;
+ else if (soc_is_dra74x() || soc_is_omap54xx())
+ c = &omap5_cfg;
+
+ if (!c) {
+ pr_err("%s Unknown SMP SoC?\n", __func__);
+ return;
+ }
+
+ /* Must preserve cfg.scu_base set earlier */
+ cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
+ cfg.startup_addr = c->startup_addr;
+
+ if (soc_is_dra74x() || soc_is_omap54xx()) {
+ if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+ cfg.startup_addr = omap5_secondary_hyp_startup;
+ omap5_erratum_workaround_801819();
+ }
+
+ cfg.cpu1_rstctrl_va = ioremap(cfg.cpu1_rstctrl_pa, 4);
+ if (!cfg.cpu1_rstctrl_va)
+ return;
/*
* Initialise the SCU and wake up the secondary core using
* wakeup_secondary().
*/
- if (scu_base)
- scu_enable(scu_base);
+ if (cfg.scu_base)
+ scu_enable(cfg.scu_base);
- if (cpu_is_omap446x())
- startup_addr = omap4460_secondary_startup;
- if (soc_is_dra74x() || soc_is_omap54xx())
- omap5_erratum_workaround_801819();
+ /*
+ * Reset CPU1 before configuring, otherwise kexec will
+ * end up trying to use old kernel startup address.
+ */
+ if (cfg.cpu1_rstctrl_va) {
+ writel_relaxed(1, cfg.cpu1_rstctrl_va);
+ readl_relaxed(cfg.cpu1_rstctrl_va);
+ writel_relaxed(0, cfg.cpu1_rstctrl_va);
+ }
/*
* Write the address of secondary startup routine into the
@@ -264,19 +316,10 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
* A barrier is added to ensure that write buffer is drained
*/
if (omap_secure_apis_support())
- omap_auxcoreboot_addr(virt_to_phys(startup_addr));
+ omap_auxcoreboot_addr(virt_to_phys(cfg.startup_addr));
else
- /*
- * If the boot CPU is in HYP mode then start secondary
- * CPU in HYP mode as well.
- */
- if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
- writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
- base + OMAP_AUX_CORE_BOOT_1);
- else
- writel_relaxed(virt_to_phys(omap5_secondary_startup),
- base + OMAP_AUX_CORE_BOOT_1);
-
+ writel_relaxed(virt_to_phys(cfg.startup_addr),
+ base + OMAP_AUX_CORE_BOOT_1);
}
const struct smp_operations omap4_smp_ops __initconst = {
@@ -286,5 +329,6 @@ const struct smp_operations omap4_smp_ops __initconst = {
.smp_boot_secondary = omap4_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = omap4_cpu_die,
+ .cpu_kill = omap4_cpu_kill,
#endif
};
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 949696b6f17b..cf65ab8bb004 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -61,7 +61,7 @@ static phys_addr_t dram_sync_paddr;
static u32 dram_sync_size;
/*
- * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * The OMAP4 bus structure contains asynchronous bridges which can buffer
* data writes from the MPU. These asynchronous bridges can be found on
* paths between the MPU to EMIF, and the MPU to L3 interconnects.
*
@@ -266,10 +266,11 @@ void __iomem *omap4_get_sar_ram_base(void)
}
/*
- * SAR RAM used to save and restore the HW
- * context in low power modes
+ * SAR RAM used to save and restore the HW context in low power modes.
+ * Note that we need to initialize this very early for kexec. See
+ * omap4_mpuss_early_init().
*/
-static int __init omap4_sar_ram_init(void)
+void __init omap4_sar_ram_init(void)
{
unsigned long sar_base;
@@ -282,16 +283,13 @@ static int __init omap4_sar_ram_init(void)
else if (soc_is_omap54xx())
sar_base = OMAP54XX_SAR_RAM_BASE;
else
- return -ENOMEM;
+ return;
/* Static mapping, never released */
sar_ram_base = ioremap(sar_base, SZ_16K);
if (WARN_ON(!sar_ram_base))
- return -ENOMEM;
-
- return 0;
+ return;
}
-omap_early_initcall(omap4_sar_ram_init);
static const struct of_device_id intc_match[] = {
{ .compatible = "ti,omap4-wugen-mpu", },
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index f7ff3b9dad87..e920dd83e443 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -63,7 +63,22 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias,
return;
}
- rc = clk_add_alias(clk_alias, dev_name(&od->pdev->dev), clk_name, NULL);
+ r = clk_get_sys(NULL, clk_name);
+
+ if (IS_ERR(r) && of_have_populated_dt()) {
+ struct of_phandle_args clkspec;
+
+ clkspec.np = of_find_node_by_name(NULL, clk_name);
+
+ r = of_clk_get_from_provider(&clkspec);
+
+ rc = clk_register_clkdev(r, clk_alias,
+ dev_name(&od->pdev->dev));
+ } else {
+ rc = clk_add_alias(clk_alias, dev_name(&od->pdev->dev),
+ clk_name, NULL);
+ }
+
if (rc) {
if (rc == -ENODEV || rc == -ENOMEM)
dev_err(&od->pdev->dev,
@@ -194,7 +209,7 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
int err;
switch (event) {
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
if (pdev->archdata.od)
omap_device_delete(pdev->archdata.od);
break;
@@ -268,7 +283,7 @@ static int _omap_device_idle_hwmods(struct omap_device *od)
* function returns a value different than the value the caller got
* the last time it called this function.
*
- * If any hwmods exist for the omap_device assoiated with @pdev,
+ * If any hwmods exist for the omap_device associated with @pdev,
* return the context loss counter for that hwmod, otherwise return
* zero.
*/
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 83cb527755a9..5b709383381c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -178,6 +178,11 @@
*/
#define OMAP4_RST_CTRL_ST_OFFSET 4
+/*
+ * Maximum length for module clock handle names
+ */
+#define MOD_CLK_MAX_NAME_LEN 32
+
/**
* struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
* @enable_module: function to enable a module (via MODULEMODE)
@@ -200,6 +205,7 @@ struct omap_hwmod_soc_ops {
int (*init_clkdm)(struct omap_hwmod *oh);
void (*update_context_lost)(struct omap_hwmod *oh);
int (*get_context_lost)(struct omap_hwmod *oh);
+ int (*disable_direct_prcm)(struct omap_hwmod *oh);
};
/* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
@@ -776,17 +782,35 @@ static int _del_initiator_dep(struct omap_hwmod *oh, struct omap_hwmod *init_oh)
* @oh: struct omap_hwmod *
*
* Called from _init_clocks(). Populates the @oh _clk (main
- * functional clock pointer) if a main_clk is present. Returns 0 on
- * success or -EINVAL on error.
+ * functional clock pointer) if a clock matching the hwmod name is found,
+ * or a main_clk is present. Returns 0 on success or -EINVAL on error.
*/
static int _init_main_clk(struct omap_hwmod *oh)
{
int ret = 0;
+ char name[MOD_CLK_MAX_NAME_LEN];
+ struct clk *clk;
- if (!oh->main_clk)
- return 0;
+ /* +7 magic comes from '_mod_ck' suffix */
+ if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
+ pr_warn("%s: warning: cropping name for %s\n", __func__,
+ oh->name);
+
+ strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
+ strcat(name, "_mod_ck");
+
+ clk = clk_get(NULL, name);
+ if (!IS_ERR(clk)) {
+ oh->_clk = clk;
+ soc_ops.disable_direct_prcm(oh);
+ oh->main_clk = kstrdup(name, GFP_KERNEL);
+ } else {
+ if (!oh->main_clk)
+ return 0;
+
+ oh->_clk = clk_get(NULL, oh->main_clk);
+ }
- oh->_clk = clk_get(NULL, oh->main_clk);
if (IS_ERR(oh->_clk)) {
pr_warn("omap_hwmod: %s: cannot clk_get main_clk %s\n",
oh->name, oh->main_clk);
@@ -1678,7 +1702,6 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
{
struct omap_hwmod_rst_info ohri;
int ret = -EINVAL;
- int hwsup = 0;
if (!oh)
return -EINVAL;
@@ -1696,7 +1719,7 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
* might not be completed. The clockdomain can be set
* in HW_AUTO only when the module become ready.
*/
- hwsup = clkdm_in_hwsup(oh->clkdm);
+ clkdm_deny_idle(oh->clkdm);
ret = clkdm_hwmod_enable(oh->clkdm, oh);
if (ret) {
WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n",
@@ -1723,8 +1746,7 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
* Set the clockdomain to HW_AUTO, assuming that the
* previous state was HW_AUTO.
*/
- if (hwsup)
- clkdm_allow_idle(oh->clkdm);
+ clkdm_allow_idle(oh->clkdm);
clkdm_hwmod_disable(oh->clkdm, oh);
}
@@ -2078,7 +2100,6 @@ static int _enable_preprogram(struct omap_hwmod *oh)
static int _enable(struct omap_hwmod *oh)
{
int r;
- int hwsup = 0;
pr_debug("omap_hwmod: %s: enabling\n", oh->name);
@@ -2138,8 +2159,7 @@ static int _enable(struct omap_hwmod *oh)
* completely the module. The clockdomain can be set
* in HW_AUTO only when the module become ready.
*/
- hwsup = clkdm_in_hwsup(oh->clkdm) &&
- !clkdm_missing_idle_reporting(oh->clkdm);
+ clkdm_deny_idle(oh->clkdm);
r = clkdm_hwmod_enable(oh->clkdm, oh);
if (r) {
WARN(1, "omap_hwmod: %s: could not enable clockdomain %s: %d\n",
@@ -2159,14 +2179,10 @@ static int _enable(struct omap_hwmod *oh)
r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) :
-EINVAL;
- if (!r) {
- /*
- * Set the clockdomain to HW_AUTO only if the target is ready,
- * assuming that the previous state was HW_AUTO
- */
- if (oh->clkdm && hwsup)
- clkdm_allow_idle(oh->clkdm);
+ if (oh->clkdm)
+ clkdm_allow_idle(oh->clkdm);
+ if (!r) {
oh->_state = _HWMOD_STATE_ENABLED;
/* Access the sysconfig only if the target is ready */
@@ -2220,6 +2236,9 @@ static int _idle(struct omap_hwmod *oh)
_idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
+ if (oh->clkdm)
+ clkdm_deny_idle(oh->clkdm);
+
if (oh->flags & HWMOD_BLOCK_WFI)
cpu_idle_poll_ctrl(false);
if (soc_ops.disable_module)
@@ -2232,8 +2251,10 @@ static int _idle(struct omap_hwmod *oh)
* transition to complete properly.
*/
_disable_clocks(oh);
- if (oh->clkdm)
+ if (oh->clkdm) {
+ clkdm_allow_idle(oh->clkdm);
clkdm_hwmod_disable(oh->clkdm, oh);
+ }
/* Mux pins for device idle if populated */
if (oh->mux && oh->mux->pads_dynamic) {
@@ -3091,6 +3112,25 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
}
/**
+ * _omap4_disable_direct_prcm - disable direct PRCM control for hwmod
+ * @oh: struct omap_hwmod * to disable control for
+ *
+ * Disables direct PRCM clkctrl done by hwmod core. Instead, the hwmod
+ * will be using its main_clk to enable/disable the module. Returns
+ * 0 if successful.
+ */
+static int _omap4_disable_direct_prcm(struct omap_hwmod *oh)
+{
+ if (!oh)
+ return -EINVAL;
+
+ oh->prcm.omap4.clkctrl_offs = 0;
+ oh->prcm.omap4.modulemode = 0;
+
+ return 0;
+}
+
+/**
* _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args
* @oh: struct omap_hwmod * to deassert hardreset
* @ohri: hardreset line data
@@ -3913,6 +3953,7 @@ void __init omap_hwmod_init(void)
soc_ops.init_clkdm = _init_clkdm;
soc_ops.update_context_lost = _omap4_update_context_lost;
soc_ops.get_context_lost = _omap4_get_context_lost;
+ soc_ops.disable_direct_prcm = _omap4_disable_direct_prcm;
} else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() ||
soc_is_am43xx()) {
soc_ops.enable_module = _omap4_enable_module;
@@ -3922,6 +3963,7 @@ void __init omap_hwmod_init(void)
soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
soc_ops.init_clkdm = _init_clkdm;
+ soc_ops.disable_direct_prcm = _omap4_disable_direct_prcm;
} else {
WARN(1, "omap_hwmod: unknown SoC type\n");
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index 7f737965f543..d3e61d1a02d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -36,17 +36,8 @@ extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio3;
extern struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__elm;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0;
-extern struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0;
-extern struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0;
-extern struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1;
-extern struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1;
-extern struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1;
-extern struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2;
-extern struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2;
-extern struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2;
-extern struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2;
extern struct omap_hwmod_ocp_if am33xx_l3_s__gpmc;
extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c2;
extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c3;
@@ -98,17 +89,8 @@ extern struct omap_hwmod am33xx_dcan0_hwmod;
extern struct omap_hwmod am33xx_dcan1_hwmod;
extern struct omap_hwmod am33xx_elm_hwmod;
extern struct omap_hwmod am33xx_epwmss0_hwmod;
-extern struct omap_hwmod am33xx_ecap0_hwmod;
-extern struct omap_hwmod am33xx_eqep0_hwmod;
-extern struct omap_hwmod am33xx_ehrpwm0_hwmod;
extern struct omap_hwmod am33xx_epwmss1_hwmod;
-extern struct omap_hwmod am33xx_ecap1_hwmod;
-extern struct omap_hwmod am33xx_eqep1_hwmod;
-extern struct omap_hwmod am33xx_ehrpwm1_hwmod;
extern struct omap_hwmod am33xx_epwmss2_hwmod;
-extern struct omap_hwmod am33xx_ecap2_hwmod;
-extern struct omap_hwmod am33xx_eqep2_hwmod;
-extern struct omap_hwmod am33xx_ehrpwm2_hwmod;
extern struct omap_hwmod am33xx_gpio1_hwmod;
extern struct omap_hwmod am33xx_gpio2_hwmod;
extern struct omap_hwmod am33xx_gpio3_hwmod;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index 1c210cb2b8c1..10dff2f0086a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -176,28 +176,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0 = {
.user = OCP_USER_MPU,
};
-struct omap_hwmod_ocp_if am33xx_epwmss0__ecap0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_ecap0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss0__eqep0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_eqep0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss0__ehrpwm0 = {
- .master = &am33xx_epwmss0_hwmod,
- .slave = &am33xx_ehrpwm0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-
static struct omap_hwmod_addr_space am33xx_epwmss1_addr_space[] = {
{
.pa_start = 0x48302000,
@@ -215,27 +193,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1 = {
.user = OCP_USER_MPU,
};
-struct omap_hwmod_ocp_if am33xx_epwmss1__ecap1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_ecap1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss1__eqep1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_eqep1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss1__ehrpwm1 = {
- .master = &am33xx_epwmss1_hwmod,
- .slave = &am33xx_ehrpwm1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_addr_space am33xx_epwmss2_addr_space[] = {
{
.pa_start = 0x48304000,
@@ -253,27 +210,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2 = {
.user = OCP_USER_MPU,
};
-struct omap_hwmod_ocp_if am33xx_epwmss2__ecap2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_ecap2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss2__eqep2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_eqep2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_epwmss2__ehrpwm2 = {
- .master = &am33xx_epwmss2_hwmod,
- .slave = &am33xx_ehrpwm2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l3s cfg -> gpmc */
struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
.master = &am33xx_l3_s_hwmod,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index aed33621deeb..55c5878577f4 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -449,18 +449,6 @@ struct omap_hwmod_class am33xx_epwmss_hwmod_class = {
.sysc = &am33xx_epwmss_sysc,
};
-static struct omap_hwmod_class am33xx_ecap_hwmod_class = {
- .name = "ecap",
-};
-
-static struct omap_hwmod_class am33xx_eqep_hwmod_class = {
- .name = "eqep",
-};
-
-struct omap_hwmod_class am33xx_ehrpwm_hwmod_class = {
- .name = "ehrpwm",
-};
-
/* epwmss0 */
struct omap_hwmod am33xx_epwmss0_hwmod = {
.name = "epwmss0",
@@ -474,30 +462,6 @@ struct omap_hwmod am33xx_epwmss0_hwmod = {
},
};
-/* ecap0 */
-struct omap_hwmod am33xx_ecap0_hwmod = {
- .name = "ecap0",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep0 */
-struct omap_hwmod am33xx_eqep0_hwmod = {
- .name = "eqep0",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm0 */
-struct omap_hwmod am33xx_ehrpwm0_hwmod = {
- .name = "ehrpwm0",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
/* epwmss1 */
struct omap_hwmod am33xx_epwmss1_hwmod = {
.name = "epwmss1",
@@ -511,30 +475,6 @@ struct omap_hwmod am33xx_epwmss1_hwmod = {
},
};
-/* ecap1 */
-struct omap_hwmod am33xx_ecap1_hwmod = {
- .name = "ecap1",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep1 */
-struct omap_hwmod am33xx_eqep1_hwmod = {
- .name = "eqep1",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm1 */
-struct omap_hwmod am33xx_ehrpwm1_hwmod = {
- .name = "ehrpwm1",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
/* epwmss2 */
struct omap_hwmod am33xx_epwmss2_hwmod = {
.name = "epwmss2",
@@ -548,30 +488,6 @@ struct omap_hwmod am33xx_epwmss2_hwmod = {
},
};
-/* ecap2 */
-struct omap_hwmod am33xx_ecap2_hwmod = {
- .name = "ecap2",
- .class = &am33xx_ecap_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* eqep2 */
-struct omap_hwmod am33xx_eqep2_hwmod = {
- .name = "eqep2",
- .class = &am33xx_eqep_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
-/* ehrpwm2 */
-struct omap_hwmod am33xx_ehrpwm2_hwmod = {
- .name = "ehrpwm2",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
/*
* 'gpio' class: for gpio 0,1,2,3
*/
@@ -1476,6 +1392,7 @@ static void omap_hwmod_am43xx_rst(void)
{
RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET);
RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET);
+ RSTST(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTST_OFFSET);
RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index cc0791d9125b..e1c2025d6d3e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -593,17 +593,8 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_ls__spinlock,
&am33xx_l4_ls__elm,
&am33xx_l4_ls__epwmss0,
- &am33xx_epwmss0__ecap0,
- &am33xx_epwmss0__eqep0,
- &am33xx_epwmss0__ehrpwm0,
&am33xx_l4_ls__epwmss1,
- &am33xx_epwmss1__ecap1,
- &am33xx_epwmss1__eqep1,
- &am33xx_epwmss1__ehrpwm1,
&am33xx_l4_ls__epwmss2,
- &am33xx_epwmss2__ecap2,
- &am33xx_epwmss2__eqep2,
- &am33xx_epwmss2__ehrpwm2,
&am33xx_l3_s__gpmc,
&am33xx_l3_main__lcdc,
&am33xx_l4_ls__mcspi0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 9869a75c5d96..d72ee6185d5e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1322,16 +1322,8 @@ static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
.name = "mcbsp2_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp2_sidetone_irqs,
- .main_clk = "mcbsp2_fck",
- .prcm = {
- .omap2 = {
- .prcm_reg_id = 1,
- .module_bit = OMAP3430_EN_MCBSP2_SHIFT,
- .module_offs = OMAP3430_PER_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
- },
- },
+ .main_clk = "mcbsp2_ick",
+ .flags = HWMOD_NO_IDLEST,
};
/* mcbsp3_sidetone */
@@ -1344,16 +1336,8 @@ static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
.name = "mcbsp3_sidetone",
.class = &omap3xxx_mcbsp_sidetone_hwmod_class,
.mpu_irqs = omap3xxx_mcbsp3_sidetone_irqs,
- .main_clk = "mcbsp3_fck",
- .prcm = {
- .omap2 = {
- .prcm_reg_id = 1,
- .module_bit = OMAP3430_EN_MCBSP3_SHIFT,
- .module_offs = OMAP3430_PER_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
- },
- },
+ .main_clk = "mcbsp3_ick",
+ .flags = HWMOD_NO_IDLEST,
};
/* SR common */
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 97fd399202dc..61f2f301d739 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -202,13 +202,6 @@ static struct omap_hwmod am43xx_epwmss3_hwmod = {
},
};
-static struct omap_hwmod am43xx_ehrpwm3_hwmod = {
- .name = "ehrpwm3",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
static struct omap_hwmod am43xx_epwmss4_hwmod = {
.name = "epwmss4",
.class = &am33xx_epwmss_hwmod_class,
@@ -222,13 +215,6 @@ static struct omap_hwmod am43xx_epwmss4_hwmod = {
},
};
-static struct omap_hwmod am43xx_ehrpwm4_hwmod = {
- .name = "ehrpwm4",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
static struct omap_hwmod am43xx_epwmss5_hwmod = {
.name = "epwmss5",
.class = &am33xx_epwmss_hwmod_class,
@@ -242,13 +228,6 @@ static struct omap_hwmod am43xx_epwmss5_hwmod = {
},
};
-static struct omap_hwmod am43xx_ehrpwm5_hwmod = {
- .name = "ehrpwm5",
- .class = &am33xx_ehrpwm_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "l4ls_gclk",
-};
-
static struct omap_hwmod am43xx_spi2_hwmod = {
.name = "spi2",
.class = &am33xx_spi_hwmod_class,
@@ -744,13 +723,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss3 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_epwmss3__ehrpwm3 = {
- .master = &am43xx_epwmss3_hwmod,
- .slave = &am43xx_ehrpwm3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss4 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_epwmss4_hwmod,
@@ -758,13 +730,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss4 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_epwmss4__ehrpwm4 = {
- .master = &am43xx_epwmss4_hwmod,
- .slave = &am43xx_ehrpwm4_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss5 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_epwmss5_hwmod,
@@ -772,13 +737,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__epwmss5 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_epwmss5__ehrpwm5 = {
- .master = &am43xx_epwmss5_hwmod,
- .slave = &am43xx_ehrpwm5_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi2 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_spi2_hwmod,
@@ -919,11 +877,8 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_ls__timer10,
&am43xx_l4_ls__timer11,
&am43xx_l4_ls__epwmss3,
- &am43xx_epwmss3__ehrpwm3,
&am43xx_l4_ls__epwmss4,
- &am43xx_epwmss4__ehrpwm4,
&am43xx_l4_ls__epwmss5,
- &am43xx_epwmss5__ehrpwm5,
&am43xx_l4_ls__mcspi2,
&am43xx_l4_ls__mcspi3,
&am43xx_l4_ls__mcspi4,
@@ -982,17 +937,8 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_ls__spinlock,
&am33xx_l4_ls__elm,
&am33xx_l4_ls__epwmss0,
- &am33xx_epwmss0__ecap0,
- &am33xx_epwmss0__eqep0,
- &am33xx_epwmss0__ehrpwm0,
&am33xx_l4_ls__epwmss1,
- &am33xx_epwmss1__ecap1,
- &am33xx_epwmss1__eqep1,
- &am33xx_epwmss1__ehrpwm1,
&am33xx_l4_ls__epwmss2,
- &am33xx_epwmss2__ecap2,
- &am33xx_epwmss2__eqep2,
- &am33xx_epwmss2__ehrpwm2,
&am33xx_l3_s__gpmc,
&am33xx_l4_ls__mcspi0,
&am33xx_l4_ls__mcspi1,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index d0e7e5259ec3..1ab7096af8e2 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -2905,58 +2905,27 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__tptc1 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_addr_space dra7xx_dss_addrs[] = {
- {
- .name = "family",
- .pa_start = 0x58000000,
- .pa_end = 0x5800007f,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l3_main_1 -> dss */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dss = {
.master = &dra7xx_l3_main_1_hwmod,
.slave = &dra7xx_dss_hwmod,
.clk = "l3_iclk_div",
- .addr = dra7xx_dss_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-static struct omap_hwmod_addr_space dra7xx_dss_dispc_addrs[] = {
- {
- .name = "dispc",
- .pa_start = 0x58001000,
- .pa_end = 0x58001fff,
- .flags = ADDR_TYPE_RT
- },
-};
-
/* l3_main_1 -> dispc */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dispc = {
.master = &dra7xx_l3_main_1_hwmod,
.slave = &dra7xx_dss_dispc_hwmod,
.clk = "l3_iclk_div",
- .addr = dra7xx_dss_dispc_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-static struct omap_hwmod_addr_space dra7xx_dss_hdmi_addrs[] = {
- {
- .name = "hdmi_wp",
- .pa_start = 0x58040000,
- .pa_end = 0x580400ff,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
/* l3_main_1 -> dispc */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
.master = &dra7xx_l3_main_1_hwmod,
.slave = &dra7xx_dss_hdmi_hwmod,
.clk = "l3_iclk_div",
- .addr = dra7xx_dss_hdmi_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
@@ -3410,21 +3379,11 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-static struct omap_hwmod_addr_space dra7xx_qspi_addrs[] = {
- {
- .pa_start = 0x4b300000,
- .pa_end = 0x4b30007f,
- .flags = ADDR_TYPE_RT
- },
- { }
-};
-
/* l3_main_1 -> qspi */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__qspi = {
.master = &dra7xx_l3_main_1_hwmod,
.slave = &dra7xx_qspi_hwmod,
.clk = "l3_iclk_div",
- .addr = dra7xx_qspi_addrs,
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index df8327713d06..b82b77cff24c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -243,7 +243,7 @@ static struct omap_hwmod_class ti81xx_rtc_hwmod_class = {
.sysc = &ti81xx_rtc_sysc,
};
-struct omap_hwmod ti81xx_rtc_hwmod = {
+static struct omap_hwmod ti81xx_rtc_hwmod = {
.name = "rtc",
.class = &ti81xx_rtc_hwmod_class,
.clkdm_name = "alwon_l3s_clkdm",
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 6571ad959908..05e20aaf68dd 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -26,6 +26,7 @@
#include <linux/platform_data/wkup_m3.h>
#include <linux/platform_data/pwm_omap_dmtimer.h>
#include <linux/platform_data/media/ir-rx51.h>
+#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <plat/dmtimer.h>
#include "common.h"
@@ -273,8 +274,6 @@ static struct platform_device omap3_rom_rng_device = {
},
};
-static struct platform_device rx51_lirc_device;
-
static void __init nokia_n900_legacy_init(void)
{
hsmmc2_internal_input_clk();
@@ -293,10 +292,7 @@ static void __init nokia_n900_legacy_init(void)
pr_info("RX-51: Registering OMAP3 HWRNG device\n");
platform_device_register(&omap3_rom_rng_device);
-
}
-
- platform_device_register(&rx51_lirc_device);
}
static void __init omap3_tao3530_legacy_init(void)
@@ -491,10 +487,6 @@ static struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
static struct lirc_rx51_platform_data __maybe_unused rx51_lirc_data = {
.set_max_mpu_wakeup_lat = omap_pm_set_max_mpu_wakeup_lat,
- .pwm_timer = 9, /* Use GPT 9 for CIR */
-#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
- .dmtimer = &pwm_dmtimer_pdata,
-#endif
};
static struct platform_device __maybe_unused rx51_lirc_device = {
@@ -505,6 +497,16 @@ static struct platform_device __maybe_unused rx51_lirc_device = {
},
};
+#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP)
+static struct omap_mcbsp_platform_data mcbsp_pdata;
+static void __init omap3_mcbsp_init(void)
+{
+ omap3_mcbsp_init_pdata_callback(&mcbsp_pdata);
+}
+#else
+static void __init omap3_mcbsp_init(void) {}
+#endif
+
/*
* Few boards still need auxdata populated before we populate
* the dev entries in of_platform_populate().
@@ -532,10 +534,16 @@ static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
&omap3_iommu_pdata),
OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x4809c000, "4809c000.mmc", &mmc_pdata[0]),
OF_DEV_AUXDATA("ti,omap3-hsmmc", 0x480b4000, "480b4000.mmc", &mmc_pdata[1]),
+ OF_DEV_AUXDATA("nokia,n900-ir", 0, "n900-ir", &rx51_lirc_data),
/* Only on am3517 */
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
&am35xx_emac_pdata),
+ /* McBSP modules with sidetone core */
+#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP)
+ OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
+ OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49024000, "49024000.mcbsp", &mcbsp_pdata),
+#endif
#endif
#ifdef CONFIG_SOC_AM33XX
OF_DEV_AUXDATA("ti,am3352-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
@@ -608,6 +616,8 @@ void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
of_machine_is_compatible("ti,omap3"))
omap_sdrc_init(NULL, NULL);
+ if (of_machine_is_compatible("ti,omap3"))
+ omap3_mcbsp_init();
pdata_quirks_check(auxdata_quirks);
of_platform_populate(NULL, omap_dt_match_table,
omap_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 2f7b11da7d5d..678d2a31dcb8 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -110,13 +110,7 @@ static void __init omap2_init_processor_devices(void)
int __init omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
{
- /* XXX The usecount test is racy */
- if ((clkdm->flags & CLKDM_CAN_ENABLE_AUTO) &&
- !(clkdm->flags & CLKDM_MISSING_IDLE_REPORTING))
- clkdm_allow_idle(clkdm);
- else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
- clkdm->usecount == 0)
- clkdm_sleep(clkdm);
+ clkdm_allow_idle(clkdm);
return 0;
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index daf2753de7aa..76eb6ec5f157 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -223,7 +223,6 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
* @pwrdm: struct powerdomain * to operate on
* @curr_pwrst: current power state of @pwrdm
* @pwrst: power state to switch to
- * @hwsup: ptr to a bool to return whether the clkdm is hardware-supervised
*
* Determine whether the powerdomain needs to be turned on before
* attempting to switch power states. Called by
@@ -234,8 +233,7 @@ static int _pwrdm_post_transition_cb(struct powerdomain *pwrdm, void *unused)
* "Types of sleep_switch" comment above).
*/
static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm,
- u8 curr_pwrst, u8 pwrst,
- bool *hwsup)
+ u8 curr_pwrst, u8 pwrst)
{
u8 sleep_switch;
@@ -245,8 +243,7 @@ static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm,
arch_pwrdm->pwrdm_set_lowpwrstchange) {
sleep_switch = LOWPOWERSTATE_SWITCH;
} else {
- *hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
- clkdm_wakeup_nolock(pwrdm->pwrdm_clkdms[0]);
+ clkdm_deny_idle_nolock(pwrdm->pwrdm_clkdms[0]);
sleep_switch = FORCEWAKEUP_SWITCH;
}
} else {
@@ -260,7 +257,6 @@ static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm,
* _pwrdm_restore_clkdm_state - restore the clkdm hwsup state after pwrst change
* @pwrdm: struct powerdomain * to operate on
* @sleep_switch: return value from _pwrdm_save_clkdm_state_and_activate()
- * @hwsup: should @pwrdm's first clockdomain be set to hardware-supervised mode?
*
* Restore the clockdomain state perturbed by
* _pwrdm_save_clkdm_state_and_activate(), and call the power state
@@ -271,14 +267,11 @@ static u8 _pwrdm_save_clkdm_state_and_activate(struct powerdomain *pwrdm,
* software-supervised sleep. No return value.
*/
static void _pwrdm_restore_clkdm_state(struct powerdomain *pwrdm,
- u8 sleep_switch, bool hwsup)
+ u8 sleep_switch)
{
switch (sleep_switch) {
case FORCEWAKEUP_SWITCH:
- if (hwsup)
- clkdm_allow_idle_nolock(pwrdm->pwrdm_clkdms[0]);
- else
- clkdm_sleep_nolock(pwrdm->pwrdm_clkdms[0]);
+ clkdm_allow_idle_nolock(pwrdm->pwrdm_clkdms[0]);
break;
case LOWPOWERSTATE_SWITCH:
if (pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE &&
@@ -1093,7 +1086,6 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst)
u8 next_pwrst, sleep_switch;
int curr_pwrst;
int ret = 0;
- bool hwsup = false;
if (!pwrdm || IS_ERR(pwrdm))
return -EINVAL;
@@ -1117,14 +1109,14 @@ int omap_set_pwrdm_state(struct powerdomain *pwrdm, u8 pwrst)
goto osps_out;
sleep_switch = _pwrdm_save_clkdm_state_and_activate(pwrdm, curr_pwrst,
- pwrst, &hwsup);
+ pwrst);
ret = pwrdm_set_next_pwrst(pwrdm, pwrst);
if (ret)
pr_err("%s: unable to set power state of powerdomain: %s\n",
__func__, pwrdm->name);
- _pwrdm_restore_clkdm_state(pwrdm, sleep_switch, hwsup);
+ _pwrdm_restore_clkdm_state(pwrdm, sleep_switch);
osps_out:
pwrdm_unlock(pwrdm);
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index 7c34c44eb0ae..babb5db5a3a4 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -39,6 +39,7 @@
/* RM RSTST offsets */
#define AM43XX_RM_GFX_RSTST_OFFSET 0x0014
+#define AM43XX_RM_PER_RSTST_OFFSET 0x0014
#define AM43XX_RM_WKUP_RSTST_OFFSET 0x0014
/* CM instances */
diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h
index 2bc4ec52ba78..66302c6aba61 100644
--- a/arch/arm/mach-omap2/prm33xx.h
+++ b/arch/arm/mach-omap2/prm33xx.h
@@ -52,8 +52,6 @@
/* PRM.PER_PRM register offsets */
#define AM33XX_RM_PER_RSTCTRL_OFFSET 0x0000
#define AM33XX_RM_PER_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0000)
-#define AM33XX_RM_PER_RSTST_OFFSET 0x0004
-#define AM33XX_RM_PER_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0004)
#define AM33XX_PM_PER_PWRSTST_OFFSET 0x0008
#define AM33XX_PM_PER_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0008)
#define AM33XX_PM_PER_PWRSTCTRL_OFFSET 0x000c
diff --git a/arch/arm/mach-omap2/sdrc.h b/arch/arm/mach-omap2/sdrc.h
index 645a2a46b213..f11500612983 100644
--- a/arch/arm/mach-omap2/sdrc.h
+++ b/arch/arm/mach-omap2/sdrc.h
@@ -175,8 +175,8 @@ u32 omap2xxx_sdrc_reprogram(u32 level, u32 force);
* don't adjust it down as your clock period increases the refresh interval
* will not be met. Setting all parameters for complete worst case may work,
* but may cut memory performance by 2x. Due to errata the DLLs need to be
- * unlocked and their value needs run time calibration. A dynamic call is
- * need for that as no single right value exists acorss production samples.
+ * unlocked and their value needs run time calibration. A dynamic call is
+ * need for that as no single right value exists across production samples.
*
* Only the FULL speed values are given. Current code is such that rate
* changes must be made at DPLLoutx2. The actual value adjustment for low
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index cb9497a20fb3..5e2e2218a402 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -289,6 +289,8 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
if (!timer->io_base)
return -ENXIO;
+ omap_hwmod_setup_one(oh_name);
+
/* After the dmtimer is using hwmod these clocks won't be needed */
timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
if (IS_ERR(timer->fclk))
@@ -303,7 +305,6 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
clk_put(src);
- omap_hwmod_setup_one(oh_name);
omap_hwmod_enable(oh);
__omap_dm_timer_init_regs(timer);
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index a2af15822fcb..89bb0fc796bd 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -1,9 +1,9 @@
menuconfig ARCH_ORION5X
bool "Marvell Orion"
depends on MMU && ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
select CPU_FEROCEON
select GENERIC_CLOCKEVENTS
+ select GPIOLIB
select MVEBU_MBUS
select PCI
select PLAT_ORION_LEGACY
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index de980ef9cda1..ac4af2283bef 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -26,7 +26,7 @@ static int __initdata gpio0_irqs[4] = {
IRQ_ORION5X_GPIO_24_31,
};
-asmlinkage void
+static asmlinkage void
__exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
{
u32 stat;
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 3a58a5d4a28a..8d597267d0c4 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
-#include <linux/m48t86.h>
+#include <linux/platform_data/rtc-m48t86.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/timeriomem-rng.h>
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig
index 4fff3c7666df..567496bd250a 100644
--- a/arch/arm/mach-oxnas/Kconfig
+++ b/arch/arm/mach-oxnas/Kconfig
@@ -1,7 +1,7 @@
menuconfig ARCH_OXNAS
bool "Oxford Semiconductor OXNAS Family SoCs"
- select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
+ select GPIOLIB
select PINCTRL
depends on ARCH_MULTI_V5
help
@@ -11,10 +11,10 @@ if ARCH_OXNAS
config MACH_OX810SE
bool "Support OX810SE Based Products"
- select ARM_TIMER_SP804
select COMMON_CLK_OXNAS
select CPU_ARM926T
select MFD_SYSCON
+ select OXNAS_RPS_TIMER
select PINCTRL_OXNAS
select RESET_OXNAS
select VERSATILE_FPGA_IRQ
diff --git a/arch/arm/mach-picoxcell/Kconfig b/arch/arm/mach-picoxcell/Kconfig
index aef92ba0eacd..1c8f701526c9 100644
--- a/arch/arm/mach-picoxcell/Kconfig
+++ b/arch/arm/mach-picoxcell/Kconfig
@@ -1,8 +1,8 @@
config ARCH_PICOXCELL
bool "Picochip PicoXcell"
depends on ARCH_MULTI_V6
- select ARCH_REQUIRE_GPIOLIB
select ARM_VIC
select DW_APB_TIMER_OF
+ select GPIOLIB
select HAVE_TCM
select NO_IOPORT_MAP
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 9e938f2961cf..85e874a97337 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -3,8 +3,8 @@ menuconfig ARCH_SIRF
depends on ARCH_MULTI_V7
select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select NO_IOPORT_MAP
select REGMAP
select PINCTRL
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index fa5f51d633a3..be4a66166d61 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -14,7 +14,7 @@
#include <linux/gpio.h>
#include <linux/delay.h>
-#include <linux/rtc-v3020.h>
+#include <linux/platform_data/rtc-v3020.h>
#include <video/mbxfb.h>
#include <linux/spi/spi.h>
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 5f5ac7c8faf0..868448d2cd82 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -25,7 +25,7 @@
#include <linux/gpio.h>
#include <linux/dm9000.h>
#include <linux/leds.h>
-#include <linux/rtc-v3020.h>
+#include <linux/platform_data/rtc-v3020.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 6e0268deec43..03354c21e1f2 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/dm9000.h>
-#include <linux/rtc-v3020.h>
+#include <linux/platform_data/rtc-v3020.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 73494500b51c..46ed10a807f0 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -23,4 +23,8 @@ config ARCH_MSM8974
bool "Enable support for MSM8974"
select HAVE_ARM_ARCH_TIMER
+config ARCH_MDM9615
+ bool "Enable support for MDM9615"
+ select CLKSRC_QCOM
+
endif
diff --git a/arch/arm/mach-qcom/board.c b/arch/arm/mach-qcom/board.c
index 6d8bbf7d39d8..d8060dfd1a21 100644
--- a/arch/arm/mach-qcom/board.c
+++ b/arch/arm/mach-qcom/board.c
@@ -22,6 +22,7 @@ static const char * const qcom_dt_match[] __initconst = {
"qcom,ipq8064",
"qcom,msm8660-surf",
"qcom,msm8960-cdp",
+ "qcom,mdm9615",
NULL
};
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
index cef42fd886d1..9ad84cd01ba0 100644
--- a/arch/arm/mach-rockchip/Kconfig
+++ b/arch/arm/mach-rockchip/Kconfig
@@ -4,10 +4,10 @@ config ARCH_ROCKCHIP
select PINCTRL
select PINCTRL_ROCKCHIP
select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_GIC
select CACHE_L2X0
+ select GPIOLIB
select HAVE_ARM_ARCH_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index b91aee406c74..4b1690acb6a5 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -11,7 +11,7 @@ if ARCH_S3C24XX
config PLAT_S3C24XX
def_bool y
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select NO_IOPORT_MAP
select S3C_DEV_NAND
select IRQ_DOMAIN
diff --git a/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h b/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
index c6583cfa5835..0d622f3b57a5 100644
--- a/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
@@ -520,7 +520,7 @@
#define S3C24XX_EXTINT1 S3C24XX_GPIOREG2(0x8C)
#define S3C24XX_EXTINT2 S3C24XX_GPIOREG2(0x90)
-/* interrupt filtering conrrol for EINT16..EINT23 */
+/* interrupt filtering control for EINT16..EINT23 */
#define S3C2410_EINFLT0 S3C2410_GPIOREG(0x94)
#define S3C2410_EINFLT1 S3C2410_GPIOREG(0x98)
#define S3C2410_EINFLT2 S3C2410_GPIOREG(0x9C)
diff --git a/arch/arm/mach-s3c24xx/iotiming-s3c2410.c b/arch/arm/mach-s3c24xx/iotiming-s3c2410.c
index 4cd13ab6496b..65e5f9cb650f 100644
--- a/arch/arm/mach-s3c24xx/iotiming-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/iotiming-s3c2410.c
@@ -423,7 +423,7 @@ void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
* @timings: The IO timing information to fill out.
*
* Calculate the @timings timing information from the current frequency
- * information in @cfg, and the new frequency configur
+ * information in @cfg, and the new frequency configuration
* through all the IO banks, reading the state and then updating @iot
* as necessary.
*
diff --git a/arch/arm/mach-s3c24xx/mach-n30.c b/arch/arm/mach-s3c24xx/mach-n30.c
index 171c1f11fd22..070a0d0714a5 100644
--- a/arch/arm/mach-s3c24xx/mach-n30.c
+++ b/arch/arm/mach-s3c24xx/mach-n30.c
@@ -522,7 +522,7 @@ static void __init n30_hwinit(void)
*
* The pull ups for H6/H7 are enabled on N30 but not on the
* N35/PiN. I suppose is useful for a budget model of the N30
- * with no bluetooh. It doesn't hurt to have the pull ups
+ * with no bluetooth. It doesn't hurt to have the pull ups
* enabled on the N35, so leave them enabled for all models.
*/
__raw_writel(0x0028aaaa, S3C2410_GPHCON);
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index ce2db235dbaf..262ab0744748 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -143,7 +143,7 @@ static int osiris_dvs_remove(struct platform_device *pdev)
return 0;
}
-/* the CONFIG_PM block is so small, it isn't worth actaully compiling it
+/* the CONFIG_PM block is so small, it isn't worth actually compiling it
* out if the configuration isn't set. */
static int osiris_dvs_suspend(struct device *dev)
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2410.c b/arch/arm/mach-s3c24xx/pll-s3c2410.c
index 5e37d368594b..7ee4924a543d 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2410.c
@@ -32,11 +32,12 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
+/* This array should be sorted in ascending order of the frequencies */
static struct cpufreq_frequency_table pll_vals_12MHz[] = {
{ .frequency = 34000000, .driver_data = PLLVAL(82, 2, 3), },
{ .frequency = 45000000, .driver_data = PLLVAL(82, 1, 3), },
- { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), },
{ .frequency = 48000000, .driver_data = PLLVAL(120, 2, 3), },
+ { .frequency = 51000000, .driver_data = PLLVAL(161, 3, 3), },
{ .frequency = 56000000, .driver_data = PLLVAL(142, 2, 3), },
{ .frequency = 68000000, .driver_data = PLLVAL(82, 2, 2), },
{ .frequency = 79000000, .driver_data = PLLVAL(71, 1, 2), },
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index b355fca6cc2e..a3fbfed75e28 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -20,6 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
+/* This array should be sorted in ascending order of the frequencies */
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index be9a248b5ce9..bcff89fd9871 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -20,6 +20,7 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
+/* This array should be sorted in ascending order of the frequencies */
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index e5c1888fc67b..459214fa20b4 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -5,12 +5,12 @@
menuconfig ARCH_S3C64XX
bool "Samsung S3C64XX"
depends on ARCH_MULTI_V6
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
select CLKSRC_SAMSUNG_PWM
select COMMON_CLK_SAMSUNG
select GPIO_SAMSUNG if ATAGS
+ select GPIOLIB
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_TCM
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 9eb864412911..4f204668f00e 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -24,6 +24,7 @@ void s3c64xx_init_io(struct map_desc *mach_desc, int size);
void s3c64xx_restart(enum reboot_mode mode, const char *cmd);
+struct device_node;
void s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
unsigned long xusbxti_f, bool is_s3c6400, void __iomem *reg_base);
void s3c64xx_set_xtal_freq(unsigned long freq);
diff --git a/arch/arm/mach-s3c64xx/include/mach/map.h b/arch/arm/mach-s3c64xx/include/mach/map.h
index f55ccb1ce893..d51873e8f63f 100644
--- a/arch/arm/mach-s3c64xx/include/mach/map.h
+++ b/arch/arm/mach-s3c64xx/include/mach/map.h
@@ -99,7 +99,7 @@
#define S3C64XX_PA_USB_HSPHY (0x7C100000)
-/* compatibiltiy defines. */
+/* compatibility defines. */
#define S3C_PA_TIMER S3C64XX_PA_TIMER
#define S3C_PA_HSMMC0 S3C64XX_PA_HSMMC0
#define S3C_PA_HSMMC1 S3C64XX_PA_HSMMC1
diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c
index 936a63fc68d5..e0e1a729ef98 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq.c
@@ -43,6 +43,7 @@
#include <plat/samsung-time.h>
#include "common.h"
+#include "mach-smartq.h"
#include "regs-modem.h"
#define UCON S3C2410_UCON_DEFAULT
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 13bc9820ff22..4cec11cf5e6f 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -11,10 +11,10 @@ config ARCH_S5PV210
bool "Samsung S5PV210/S5PC110"
depends on ARCH_MULTI_V7
select ARCH_HAS_HOLES_MEMORYMODEL
- select ARCH_REQUIRE_GPIOLIB
select ARM_VIC
select CLKSRC_SAMSUNG_PWM
select COMMON_CLK_SAMSUNG
+ select GPIOLIB
select HAVE_S3C2410_I2C if I2C
select HAVE_S3C2410_WATCHDOG if WATCHDOG
select HAVE_S3C_RTC if RTC_CLASS
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index fe4ccb52f921..4a48c9f5f725 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -41,7 +41,7 @@ menuconfig ARCH_RENESAS
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
select NO_IOPORT_MAP
select PINCTRL
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select ZONE_DMA if ARM_LPAE
if ARCH_RENESAS
@@ -86,6 +86,10 @@ config ARCH_R8A7791
select ARCH_RCAR_GEN2
select I2C
+config ARCH_R8A7792
+ bool "R-Car V2H (R8A77920)"
+ select ARCH_RCAR_GEN2
+
config ARCH_R8A7793
bool "R-Car M2-N (R8A7793)"
select ARCH_RCAR_GEN2
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index fc95f7bd2dd9..3fc48b02eb4f 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ARCH_R8A7778) += setup-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += setup-r8a7779.o pm-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += setup-r8a7790.o
obj-$(CONFIG_ARCH_R8A7791) += setup-r8a7791.o
+obj-$(CONFIG_ARCH_R8A7792) += setup-r8a7792.o
obj-$(CONFIG_ARCH_R8A7793) += setup-r8a7793.o
obj-$(CONFIG_ARCH_R8A7794) += setup-r8a7794.o
obj-$(CONFIG_ARCH_EMEV2) += setup-emev2.o
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 3b562d87826d..1a8f7b3ab449 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -10,6 +10,7 @@ extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
+extern bool shmobile_smp_init_fallback_ops(void);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
unsigned int max_cpus);
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index aba75c89f9c1..0c6bb458b7a4 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -24,6 +24,7 @@
#include <asm/suspend.h>
#include "common.h"
#include "platsmp-apmu.h"
+#include "rcar-gen2.h"
static struct {
void __iomem *iomem;
@@ -74,6 +75,7 @@ static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu)
return p ? fn(p, apmu_cpus[cpu].bit) : -EINVAL;
}
+#ifdef CONFIG_SMP
static void apmu_init_cpu(struct resource *res, int cpu, int bit)
{
if ((cpu >= ARRAY_SIZE(apmu_cpus)) || apmu_cpus[cpu].iomem)
@@ -117,18 +119,69 @@ static void apmu_parse_cfg(void (*fn)(struct resource *res, int cpu, int bit),
}
}
-void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
- struct rcar_apmu_config *apmu_config,
- int num)
+static const struct of_device_id apmu_ids[] = {
+ { .compatible = "renesas,apmu" },
+ { /*sentinel*/ }
+};
+
+static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
+{
+ struct device_node *np_apmu, *np_cpu;
+ struct resource res;
+ int bit, index;
+ u32 id;
+
+ for_each_matching_node(np_apmu, apmu_ids) {
+ /* only enable the cluster that includes the boot CPU */
+ bool is_allowed = false;
+
+ for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
+ np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
+ if (np_cpu) {
+ if (!of_property_read_u32(np_cpu, "reg", &id)) {
+ if (id == cpu_logical_map(0)) {
+ is_allowed = true;
+ of_node_put(np_cpu);
+ break;
+ }
+
+ }
+ of_node_put(np_cpu);
+ }
+ }
+ if (!is_allowed)
+ continue;
+
+ for (bit = 0; bit < CONFIG_NR_CPUS; bit++) {
+ np_cpu = of_parse_phandle(np_apmu, "cpus", bit);
+ if (np_cpu) {
+ if (!of_property_read_u32(np_cpu, "reg", &id)) {
+ index = get_logical_index(id);
+ if ((index >= 0) &&
+ !of_address_to_resource(np_apmu,
+ 0, &res))
+ fn(&res, index, bit);
+ }
+ of_node_put(np_cpu);
+ }
+ }
+ }
+}
+
+static void __init shmobile_smp_apmu_setup_boot(void)
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
+}
- /* perform per-cpu setup */
+void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
+ struct rcar_apmu_config *apmu_config,
+ int num)
+{
+ shmobile_smp_apmu_setup_boot();
apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
}
-#ifdef CONFIG_SMP
int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
/* For this particular CPU register boot vector */
@@ -136,7 +189,38 @@ int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
return apmu_wrap(cpu, apmu_power_on);
}
+
+static void __init shmobile_smp_apmu_prepare_cpus_dt(unsigned int max_cpus)
+{
+ shmobile_smp_apmu_setup_boot();
+ apmu_parse_dt(apmu_init_cpu);
+ rcar_gen2_pm_init();
+}
+
+static int shmobile_smp_apmu_boot_secondary_md21(unsigned int cpu,
+ struct task_struct *idle)
+{
+ /* Error out when hardware debug mode is enabled */
+ if (rcar_gen2_read_mode_pins() & BIT(21)) {
+ pr_warn("Unable to boot CPU%u when MD21 is set\n", cpu);
+ return -ENOTSUPP;
+ }
+
+ return shmobile_smp_apmu_boot_secondary(cpu, idle);
+}
+
+static struct smp_operations apmu_smp_ops __initdata = {
+ .smp_prepare_cpus = shmobile_smp_apmu_prepare_cpus_dt,
+ .smp_boot_secondary = shmobile_smp_apmu_boot_secondary_md21,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
+ .cpu_die = shmobile_smp_apmu_cpu_die,
+ .cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif
+};
+
+CPU_METHOD_OF_DECLARE(shmobile_smp_apmu, "renesas,apmu", &apmu_smp_ops);
+#endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_SUSPEND)
/* nicked from arch/arm/mach-exynos/hotplug.c */
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index b23378f3d7e1..f3dba6f356e2 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -36,3 +36,9 @@ bool shmobile_smp_cpu_can_disable(unsigned int cpu)
return true; /* Hotplug of any CPU is supported */
}
#endif
+
+bool __init shmobile_smp_init_fallback_ops(void)
+{
+ /* fallback on PSCI/smp_ops if no other DT based method is detected */
+ return platform_can_secondary_boot() ? true : false;
+}
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
index 4174cbcbc467..5c9a93f5e650 100644
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ b/arch/arm/mach-shmobile/pm-r8a7779.c
@@ -23,11 +23,7 @@
static void __init r8a7779_sysc_init(void)
{
- void __iomem *base = rcar_sysc_init(0xffd85000);
-
- /* enable all interrupt sources, but do not use interrupt handler */
- iowrite32(0x0131000e, base + SYSCIER);
- iowrite32(0, base + SYSCIMR);
+ rcar_sysc_init(0xffd85000, 0x0131000e);
}
#else /* CONFIG_PM || CONFIG_SMP */
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
index 691ac166a277..dd9ac366868f 100644
--- a/arch/arm/mach-shmobile/pm-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -26,8 +26,7 @@
#define CA7RESCNT 0x0044
/* On-chip RAM */
-#define MERAM 0xe8080000
-#define RAM 0xe6300000
+#define ICRAM1 0xe63c0000 /* Inter Connect RAM1 (4 KiB) */
/* SYSC */
#define SYSCIER 0x0c
@@ -37,11 +36,7 @@
static void __init rcar_gen2_sysc_init(u32 syscier)
{
- void __iomem *base = rcar_sysc_init(0xe6180000);
-
- /* enable all interrupt sources, but do not use interrupt handler */
- iowrite32(syscier, base + SYSCIER);
- iowrite32(0, base + SYSCIMR);
+ rcar_sysc_init(0xe6180000, syscier);
}
#else /* CONFIG_SMP */
@@ -58,7 +53,7 @@ void __init rcar_gen2_pm_init(void)
struct device_node *np, *cpus;
bool has_a7 = false;
bool has_a15 = false;
- phys_addr_t boot_vector_addr = 0;
+ phys_addr_t boot_vector_addr = ICRAM1;
u32 syscier = 0;
if (once++)
@@ -75,14 +70,10 @@ void __init rcar_gen2_pm_init(void)
has_a7 = true;
}
- if (of_machine_is_compatible("renesas,r8a7790")) {
- boot_vector_addr = MERAM;
+ if (of_machine_is_compatible("renesas,r8a7790"))
syscier = 0x013111ef;
-
- } else if (of_machine_is_compatible("renesas,r8a7791")) {
- boot_vector_addr = RAM;
+ else if (of_machine_is_compatible("renesas,r8a7791"))
syscier = 0x00111003;
- }
/* RAM for jump stub, because BAR requires 256KB aligned address */
p = ioremap_nocache(boot_vector_addr, shmobile_boot_size);
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index c0b05e9e6442..45a195501b78 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -131,13 +131,13 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
struct dev_power_governor *gov = rmobile_pd->gov;
genpd->flags = GENPD_FLAG_PM_CLK;
- pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup;
genpd->power_off = rmobile_pd_power_down;
genpd->power_on = rmobile_pd_power_up;
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
__rmobile_pd_power_up(rmobile_pd, false);
+ pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
}
static int rmobile_pd_suspend_busy(void)
diff --git a/arch/arm/mach-shmobile/setup-r8a7790.c b/arch/arm/mach-shmobile/setup-r8a7790.c
index 3a18af4922b4..3506327e0bed 100644
--- a/arch/arm/mach-shmobile/setup-r8a7790.c
+++ b/arch/arm/mach-shmobile/setup-r8a7790.c
@@ -28,6 +28,7 @@ static const char * const r8a7790_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R8A7790_DT, "Generic R8A7790 (Flattened Device Tree)")
+ .smp_init = shmobile_smp_init_fallback_ops,
.smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
index 3b8dbaf07777..110e8b588e56 100644
--- a/arch/arm/mach-shmobile/setup-r8a7791.c
+++ b/arch/arm/mach-shmobile/setup-r8a7791.c
@@ -29,6 +29,7 @@ static const char *const r8a7791_boards_compat_dt[] __initconst = {
};
DT_MACHINE_START(R8A7791_DT, "Generic R8A7791 (Flattened Device Tree)")
+ .smp_init = shmobile_smp_init_fallback_ops,
.smp = smp_ops(r8a7791_smp_ops),
.init_early = shmobile_init_delay,
.init_time = rcar_gen2_timer_init,
diff --git a/arch/arm/mach-shmobile/setup-r8a7792.c b/arch/arm/mach-shmobile/setup-r8a7792.c
new file mode 100644
index 000000000000..a0910395da09
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-r8a7792.c
@@ -0,0 +1,35 @@
+/*
+ * r8a7792 processor support
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_platform.h>
+
+#include <asm/mach/arch.h>
+
+#include "common.h"
+#include "rcar-gen2.h"
+
+static const char * const r8a7792_boards_compat_dt[] __initconst = {
+ "renesas,r8a7792",
+ NULL,
+};
+
+DT_MACHINE_START(R8A7792_DT, "Generic R8A7792 (Flattened Device Tree)")
+ .init_early = shmobile_init_delay,
+ .init_late = shmobile_init_late,
+ .init_time = rcar_gen2_timer_init,
+ .reserve = rcar_gen2_reserve,
+ .dt_compat = r8a7792_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 1c6fd11c2f82..afb9fdcd3d90 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -46,6 +46,26 @@ u32 rcar_gen2_read_mode_pins(void)
return mode;
}
+static unsigned int __init get_extal_freq(void)
+{
+ struct device_node *cpg, *extal;
+ u32 freq = 20000000;
+
+ cpg = of_find_compatible_node(NULL, NULL,
+ "renesas,rcar-gen2-cpg-clocks");
+ if (!cpg)
+ return freq;
+
+ extal = of_parse_phandle(cpg, "clocks", 0);
+ of_node_put(cpg);
+ if (!extal)
+ return freq;
+
+ of_property_read_u32(extal, "clock-frequency", &freq);
+ of_node_put(extal);
+ return freq;
+}
+
#define CNTCR 0
#define CNTFID0 0x20
@@ -54,10 +74,10 @@ void __init rcar_gen2_timer_init(void)
u32 mode = rcar_gen2_read_mode_pins();
#ifdef CONFIG_ARM_ARCH_TIMER
void __iomem *base;
- int extal_mhz = 0;
u32 freq;
- if (of_machine_is_compatible("renesas,r8a7794")) {
+ if (of_machine_is_compatible("renesas,r8a7792") ||
+ of_machine_is_compatible("renesas,r8a7794")) {
freq = 260000000 / 8; /* ZS / 8 */
/* CNTVOFF has to be initialized either from non-secure
* Hypervisor mode or secure Monitor mode with SCR.NS==1.
@@ -82,26 +102,9 @@ void __init rcar_gen2_timer_init(void)
* with the counter disabled. Moreover, it may also report
* a potentially incorrect fixed 13 MHz frequency. To be
* correct these registers need to be updated to use the
- * frequency EXTAL / 2 which can be determined by the MD pins.
+ * frequency EXTAL / 2.
*/
-
- switch (mode & (MD(14) | MD(13))) {
- case 0:
- extal_mhz = 15;
- break;
- case MD(13):
- extal_mhz = 20;
- break;
- case MD(14):
- extal_mhz = 26;
- break;
- case MD(13) | MD(14):
- extal_mhz = 30;
- break;
- }
-
- /* The arch timer frequency equals EXTAL / 2 */
- freq = extal_mhz * (1000000 / 2);
+ freq = get_extal_freq() / 2;
}
/* Remap "armgcnt address map" space */
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig
index ea9ea95630bd..b7260c2b510c 100644
--- a/arch/arm/mach-spear/Kconfig
+++ b/arch/arm/mach-spear/Kconfig
@@ -5,9 +5,9 @@
menuconfig PLAT_SPEAR
bool "ST SPEAr Family"
depends on ARCH_MULTI_V7 || ARCH_MULTI_V5
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select CLKSRC_MMIO
+ select GPIOLIB
if PLAT_SPEAR
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 6f1af29f935d..119e1108b1f8 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -10,7 +10,7 @@ menuconfig ARCH_STI
select MFD_SYSCON
select ARCH_HAS_RESET_CONTROLLER
select HAVE_ARM_SCU if SMP
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index ae10fb280a78..e04cd1b201bb 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -23,7 +23,15 @@ static const char *const stih41x_dt_match[] __initconst = {
NULL
};
-DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree")
+static void sti_l2_write_sec(unsigned long val, unsigned reg)
+{
+ /*
+ * We can't write to secure registers as we are in non-secure
+ * mode, until we have some SMI service available.
+ */
+}
+
+DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
.dt_compat = stih41x_dt_match,
.l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE |
L310_AUX_CTRL_DATA_PREFETCH |
@@ -31,4 +39,5 @@ DT_MACHINE_START(STM, "STiH415/416 SoC with Flattened Device Tree")
L2C_AUX_CTRL_WAY_SIZE(4),
.l2c_aux_mask = 0xc0000fff,
.smp = smp_ops(sti_smp_ops),
+ .l2c_write_sec = sti_l2_write_sec,
MACHINE_END
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index c124d658b350..096ed216c6d5 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,10 +1,10 @@
menuconfig ARCH_SUNXI
bool "Allwinner SoCs"
depends on ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select GPIOLIB
select PINCTRL
select SUN4I_TIMER
select RESET_CONTROLLER
diff --git a/arch/arm/mach-tango/Makefile b/arch/arm/mach-tango/Makefile
index f33935e42e77..204fcd9fe180 100644
--- a/arch/arm/mach-tango/Makefile
+++ b/arch/arm/mach-tango/Makefile
@@ -3,3 +3,4 @@ AFLAGS_smc.o := -Wa,-march=armv7-a$(plus_sec)
obj-y += setup.o smc.o
obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_SUSPEND) += pm.o
diff --git a/arch/arm/mach-tango/platsmp.c b/arch/arm/mach-tango/platsmp.c
index a21f55e000d2..98c62a4a8623 100644
--- a/arch/arm/mach-tango/platsmp.c
+++ b/arch/arm/mach-tango/platsmp.c
@@ -1,3 +1,4 @@
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include "smc.h"
@@ -9,8 +10,42 @@ static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * cpu_kill() and cpu_die() run concurrently on different cores.
+ * Firmware will only "kill" a core once it has properly "died".
+ * Try a few times to kill a core before giving up, and sleep
+ * between tries to give that core enough time to die.
+ */
+static int tango_cpu_kill(unsigned int cpu)
+{
+ int i, err;
+
+ for (i = 0; i < 10; ++i) {
+ msleep(10);
+ err = tango_aux_core_kill(cpu);
+ if (!err)
+ return true;
+ }
+
+ return false;
+}
+
+static void tango_cpu_die(unsigned int cpu)
+{
+ while (tango_aux_core_die(cpu) < 0)
+ cpu_relax();
+
+ panic("cpu %d failed to die\n", cpu);
+}
+#endif
+
static const struct smp_operations tango_smp_ops __initconst = {
.smp_boot_secondary = tango_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_kill = tango_cpu_kill,
+ .cpu_die = tango_cpu_die,
+#endif
};
CPU_METHOD_OF_DECLARE(tango4_smp, "sigma,tango4-smp", &tango_smp_ops);
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
new file mode 100644
index 000000000000..b05c6d6f99d0
--- /dev/null
+++ b/arch/arm/mach-tango/pm.c
@@ -0,0 +1,32 @@
+#include <linux/init.h>
+#include <linux/suspend.h>
+#include <asm/suspend.h>
+#include "smc.h"
+
+static int tango_pm_powerdown(unsigned long arg)
+{
+ tango_suspend(virt_to_phys(cpu_resume));
+
+ return -EIO; /* tango_suspend has failed */
+}
+
+static int tango_pm_enter(suspend_state_t state)
+{
+ if (state == PM_SUSPEND_MEM)
+ return cpu_suspend(0, tango_pm_powerdown);
+
+ return -EINVAL;
+}
+
+static const struct platform_suspend_ops tango_pm_ops = {
+ .enter = tango_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init tango_pm_init(void)
+{
+ suspend_set_ops(&tango_pm_ops);
+ return 0;
+}
+
+late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/smc.h b/arch/arm/mach-tango/smc.h
index 7a4af35cc390..57919539da1b 100644
--- a/arch/arm/mach-tango/smc.h
+++ b/arch/arm/mach-tango/smc.h
@@ -2,4 +2,7 @@ extern int tango_smc(unsigned int val, unsigned int service);
#define tango_set_l2_control(val) tango_smc(val, 0x102)
#define tango_start_aux_core(val) tango_smc(val, 0x104)
-#define tango_set_aux_boot_addr(val) tango_smc((unsigned int)val, 0x105)
+#define tango_set_aux_boot_addr(val) tango_smc(val, 0x105)
+#define tango_suspend(val) tango_smc(val, 0x120)
+#define tango_aux_core_die(val) tango_smc(val, 0x121)
+#define tango_aux_core_kill(val) tango_smc(val, 0x122)
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 0fa8b84ed657..329f01c5b6f8 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -1,11 +1,11 @@
menuconfig ARCH_TEGRA
bool "NVIDIA Tegra"
depends on ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
select ARM_AMBA
select ARM_GIC
select CLKSRC_MMIO
+ select GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select PINCTRL
diff --git a/arch/arm/mach-tegra/common.h b/arch/arm/mach-tegra/common.h
index 1f6fb808e236..4cc00e993b00 100644
--- a/arch/arm/mach-tegra/common.h
+++ b/arch/arm/mach-tegra/common.h
@@ -1,4 +1,26 @@
+/*
+ * Copyright (c) 2011, ARM Ltd.
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MACH_TEGRA_COMMON_H
+#define __MACH_TEGRA_COMMON_H
+
extern const struct smp_operations tegra_smp_ops;
extern int tegra_cpu_kill(unsigned int cpu);
extern void tegra_cpu_die(unsigned int cpu);
+
+#endif
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index 9157546fe68c..d3aa9be16621 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -26,6 +26,7 @@
#include <asm/suspend.h>
#include <asm/psci.h>
+#include "cpuidle.h"
#include "pm.h"
#include "sleep.h"
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 7469347b1749..afcee04f2616 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -30,6 +30,7 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
+#include "cpuidle.h"
#include "flowctrl.h"
#include "iomap.h"
#include "irq.h"
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index 4dbe1dae937c..c1417361e10e 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -30,6 +30,7 @@
#include <asm/smp_plat.h>
#include <asm/suspend.h>
+#include "cpuidle.h"
#include "pm.h"
#include "sleep.h"
diff --git a/arch/arm/mach-tegra/cpuidle.h b/arch/arm/mach-tegra/cpuidle.h
index c017dab60ffa..dd1624d4b294 100644
--- a/arch/arm/mach-tegra/cpuidle.h
+++ b/arch/arm/mach-tegra/cpuidle.h
@@ -23,8 +23,10 @@ void tegra20_cpuidle_pcie_irqs_in_use(void);
int tegra30_cpuidle_init(void);
int tegra114_cpuidle_init(void);
void tegra_cpuidle_init(void);
+void tegra_cpuidle_pcie_irqs_in_use(void);
#else
static inline void tegra_cpuidle_init(void) {}
+static inline void tegra_cpuidle_pcie_irqs_in_use(void) {}
#endif
#endif
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c
index 1b129899a277..8ec707826072 100644
--- a/arch/arm/mach-tegra/hotplug.c
+++ b/arch/arm/mach-tegra/hotplug.c
@@ -17,6 +17,7 @@
#include <asm/smp_plat.h>
+#include "common.h"
#include "sleep.h"
static void (*tegra_hotplug_shutdown)(void);
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index 3b9098d27ea5..a69b22d37eed 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -29,6 +29,7 @@
#include "board.h"
#include "iomap.h"
+#include "irq.h"
#define SGI_MASK 0xFFFF
diff --git a/arch/arm/mach-tegra/pm.h b/arch/arm/mach-tegra/pm.h
index 83bc87583446..717b48f22912 100644
--- a/arch/arm/mach-tegra/pm.h
+++ b/arch/arm/mach-tegra/pm.h
@@ -36,7 +36,7 @@ void tegra30_sleep_core_init(void);
void tegra_clear_cpu_in_lp2(void);
bool tegra_set_cpu_in_lp2(void);
-
+int tegra_cpu_do_idle(void);
void tegra_idle_lp2_last(void);
extern void (*tegra_tear_down_cpu)(void);
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 6745a657d261..e01cbca196b5 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -118,32 +118,14 @@ out:
of_platform_default_populate(NULL, NULL, parent);
}
-static void __init paz00_init(void)
-{
- if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
- tegra_paz00_wifikill_init();
-}
-
-static struct {
- char *machine;
- void (*init)(void);
-} board_init_funcs[] = {
- { "compal,paz00", paz00_init },
-};
-
static void __init tegra_dt_init_late(void)
{
- int i;
-
tegra_init_suspend();
tegra_cpuidle_init();
- for (i = 0; i < ARRAY_SIZE(board_init_funcs); i++) {
- if (of_machine_is_compatible(board_init_funcs[i].machine)) {
- board_init_funcs[i].init();
- break;
- }
- }
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) &&
+ of_machine_is_compatible("compal,paz00"))
+ tegra_paz00_wifikill_init();
}
static const char * const tegra_dt_board_compat[] = {
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 4fdc3425ffbd..22dcbf5b76b2 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -1,11 +1,11 @@
menuconfig ARCH_U300
bool "ST-Ericsson U300 Series"
depends on ARCH_MULTI_V5 && MMU
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_VIC
select U300_TIMER
select CPU_ARM926T
+ select GPIOLIB
select HAVE_TCM
select PINCTRL
select PINCTRL_COH901
diff --git a/arch/arm/mach-uniphier/Makefile b/arch/arm/mach-uniphier/Makefile
index 1233f9b610bc..396afe109e67 100644
--- a/arch/arm/mach-uniphier/Makefile
+++ b/arch/arm/mach-uniphier/Makefile
@@ -1,2 +1 @@
-obj-y := uniphier.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
index e802ca836ec7..9978c41128f6 100644
--- a/arch/arm/mach-uniphier/platsmp.c
+++ b/arch/arm/mach-uniphier/platsmp.c
@@ -101,21 +101,13 @@ static int __init uniphier_smp_prepare_trampoline(unsigned int max_cpus)
np = of_find_compatible_node(NULL, NULL, "socionext,uniphier-smpctrl");
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
- if (!ret) {
- rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
- } else {
- /* try old binding too */
- np = of_find_compatible_node(NULL, NULL,
- "socionext,uniphier-system-bus-controller");
- ret = of_address_to_resource(np, 1, &res);
- of_node_put(np);
- if (ret) {
- pr_err("failed to get resource of SMP control\n");
- return ret;
- }
- rom_rsv2_phys = res.start + 0x1000 + UNIPHIER_SMPCTRL_ROM_RSV2;
+ if (ret) {
+ pr_err("failed to get resource of SMP control\n");
+ return ret;
}
+ rom_rsv2_phys = res.start + UNIPHIER_SMPCTRL_ROM_RSV2;
+
ret = uniphier_smp_copy_trampoline(rom_rsv2_phys);
if (ret)
return ret;
diff --git a/arch/arm/mach-uniphier/uniphier.c b/arch/arm/mach-uniphier/uniphier.c
deleted file mode 100644
index 9be10efacb7d..000000000000
--- a/arch/arm/mach-uniphier/uniphier.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <asm/mach/arch.h>
-
-static const char * const uniphier_dt_compat[] __initconst = {
- "socionext,ph1-sld3",
- "socionext,ph1-ld4",
- "socionext,ph1-pro4",
- "socionext,ph1-sld8",
- "socionext,ph1-pro5",
- "socionext,proxstream2",
- "socionext,ph1-ld6b",
- NULL,
-};
-
-DT_MACHINE_START(UNIPHIER, "Socionext UniPhier")
- .dt_compat = uniphier_dt_compat,
-MACHINE_END
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 3185081bdb2c..4740ac393297 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -3,13 +3,13 @@ menuconfig ARCH_U8500
depends on ARCH_MULTI_V7 && MMU
select AB8500_CORE
select ABX500_CORE
- select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_GIC
select CACHE_L2X0
select CLKSRC_NOMADIK_MTU
+ select GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select PINCTRL
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index edfff1ae1f8d..56d0eb6e254e 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -2,11 +2,9 @@
# Makefile for the linux kernel, U8500 machine.
#
-obj-y := cpu.o id.o pm.o
-obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
+obj-y := pm.o
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o
-obj-$(CONFIG_MACH_MOP500) += board-mop500-regulators.o \
- board-mop500-audio.o
+obj-$(CONFIG_MACH_MOP500) += board-mop500-audio.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
deleted file mode 100644
index 32d744e91ec2..000000000000
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ /dev/null
@@ -1,1065 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- *
- * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
- * Bengt Jonsson <bengt.g.jonsson@stericsson.com>
- * Daniel Willerud <daniel.willerud@stericsson.com>
- *
- * MOP500 board specific initialization for regulators
- */
-#include <linux/kernel.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/ab8500.h>
-#include "board-mop500-regulators.h"
-#include "id.h"
-
-static struct regulator_consumer_supply gpio_en_3v3_consumers[] = {
- REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-struct regulator_init_data gpio_en_3v3_regulator = {
- .constraints = {
- .name = "EN-3V3",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(gpio_en_3v3_consumers),
- .consumer_supplies = gpio_en_3v3_consumers,
-};
-
-/*
- * TPS61052 regulator
- */
-static struct regulator_consumer_supply tps61052_vaudio_consumers[] = {
- /*
- * Boost converter supply to raise voltage on audio speaker, this
- * is actually connected to three pins, VInVhfL (left amplifier)
- * VInVhfR (right amplifier) and VIntDClassInt - all three must
- * be connected to the same voltage.
- */
- REGULATOR_SUPPLY("vintdclassint", "ab8500-codec.0"),
-};
-
-struct regulator_init_data tps61052_regulator = {
- .constraints = {
- .name = "vaudio-hf",
- .min_uV = 4500000,
- .max_uV = 4500000,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(tps61052_vaudio_consumers),
- .consumer_supplies = tps61052_vaudio_consumers,
-};
-
-static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
- /* Main display, u8500 R3 uib */
- REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
- /* Main display, u8500 uib and ST uib */
- REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
- /* Secondary display, ST uib */
- REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
- /* SFH7741 proximity sensor */
- REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
- /* BH1780GLS ambient light sensor */
- REGULATOR_SUPPLY("vcc", "2-0029"),
- /* lsm303dlh accelerometer */
- REGULATOR_SUPPLY("vdd", "2-0018"),
- /* lsm303dlhc accelerometer */
- REGULATOR_SUPPLY("vdd", "2-0019"),
- /* lsm303dlh magnetometer */
- REGULATOR_SUPPLY("vdd", "2-001e"),
- /* Rohm BU21013 Touchscreen devices */
- REGULATOR_SUPPLY("avdd", "3-005c"),
- REGULATOR_SUPPLY("avdd", "3-005d"),
- /* Synaptics RMI4 Touchscreen device */
- REGULATOR_SUPPLY("vdd", "3-004b"),
- /* L3G4200D Gyroscope device */
- REGULATOR_SUPPLY("vdd", "2-0068"),
- /* Ambient light sensor device */
- REGULATOR_SUPPLY("vdd", "3-0029"),
- /* Pressure sensor device */
- REGULATOR_SUPPLY("vdd", "2-005c"),
- /* Cypress TrueTouch Touchscreen device */
- REGULATOR_SUPPLY("vcpin", "spi8.0"),
- /* Camera device */
- REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
-};
-
-static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
- /* On-board eMMC power */
- REGULATOR_SUPPLY("vmmc", "sdi4"),
- /* AB8500 audio codec */
- REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
- /* AB8500 accessory detect 1 */
- REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
- /* AB8500 Tv-out device */
- REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
- /* AV8100 HDMI device */
- REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
-};
-
-static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
- REGULATOR_SUPPLY("v-SD-STM", "stm"),
- /* External MMC slot power */
- REGULATOR_SUPPLY("vmmc", "sdi0"),
-};
-
-static struct regulator_consumer_supply ab8505_vaux4_consumers[] = {
-};
-
-static struct regulator_consumer_supply ab8505_vaux5_consumers[] = {
-};
-
-static struct regulator_consumer_supply ab8505_vaux6_consumers[] = {
-};
-
-static struct regulator_consumer_supply ab8505_vaux8_consumers[] = {
- /* AB8500 audio codec device */
- REGULATOR_SUPPLY("v-aux8", NULL),
-};
-
-static struct regulator_consumer_supply ab8505_vadc_consumers[] = {
- /* Internal general-purpose ADC */
- REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
- /* ADC for charger */
- REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
- /* TV-out DENC supply */
- REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
- /* Internal general-purpose ADC */
- REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
- /* ADC for charger */
- REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
- /* AB8500 Tv-out device */
- REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
-};
-
-static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
- /* AB8500 audio-codec main supply */
- REGULATOR_SUPPLY("vaud", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
- /* AB8500 audio-codec Mic1 supply */
- REGULATOR_SUPPLY("vamic1", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
- /* AB8500 audio-codec Mic2 supply */
- REGULATOR_SUPPLY("vamic2", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
- /* AB8500 audio-codec DMic supply */
- REGULATOR_SUPPLY("vdmic", "ab8500-codec.0"),
-};
-
-static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
- /* SoC core supply, no device */
- REGULATOR_SUPPLY("v-intcore", NULL),
- /* USB Transceiver */
- REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
- /* Handled by abx500 clk driver */
- REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
-};
-
-static struct regulator_consumer_supply ab8505_usb_consumers[] = {
- /* HS USB OTG physical interface */
- REGULATOR_SUPPLY("v-ape", NULL),
-};
-
-static struct regulator_consumer_supply ab8500_vana_consumers[] = {
- /* DB8500 DSI */
- REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
- REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
- REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
- REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
- /* DB8500 CSI */
- REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
-};
-
-/* ab8500 regulator register initialization */
-static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
- /*
- * VanaRequestCtrl = HP/LP depending on VxRequest
- * VextSupply1RequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
- /*
- * VextSupply2RequestCtrl = HP/LP depending on VxRequest
- * VextSupply3RequestCtrl = HP/LP depending on VxRequest
- * Vaux1RequestCtrl = HP/LP depending on VxRequest
- * Vaux2RequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
- /*
- * Vaux3RequestCtrl = HP/LP depending on VxRequest
- * SwHPReq = Control through SWValid disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
- /*
- * VanaSysClkReq1HPValid = disabled
- * Vaux1SysClkReq1HPValid = disabled
- * Vaux2SysClkReq1HPValid = disabled
- * Vaux3SysClkReq1HPValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
- /*
- * VextSupply1SysClkReq1HPValid = disabled
- * VextSupply2SysClkReq1HPValid = disabled
- * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
- /*
- * VanaHwHPReq1Valid = disabled
- * Vaux1HwHPreq1Valid = disabled
- * Vaux2HwHPReq1Valid = disabled
- * Vaux3HwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
- /*
- * VextSupply1HwHPReq1Valid = disabled
- * VextSupply2HwHPReq1Valid = disabled
- * VextSupply3HwHPReq1Valid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
- /*
- * VanaHwHPReq2Valid = disabled
- * Vaux1HwHPReq2Valid = disabled
- * Vaux2HwHPReq2Valid = disabled
- * Vaux3HwHPReq2Valid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
- /*
- * VextSupply1HwHPReq2Valid = disabled
- * VextSupply2HwHPReq2Valid = disabled
- * VextSupply3HwHPReq2Valid = HWReq2 controlled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
- /*
- * VanaSwHPReqValid = disabled
- * Vaux1SwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
- /*
- * Vaux2SwHPReqValid = disabled
- * Vaux3SwHPReqValid = disabled
- * VextSupply1SwHPReqValid = disabled
- * VextSupply2SwHPReqValid = disabled
- * VextSupply3SwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
- /*
- * SysClkReq2Valid1 = SysClkReq2 controlled
- * SysClkReq3Valid1 = disabled
- * SysClkReq4Valid1 = SysClkReq4 controlled
- * SysClkReq5Valid1 = disabled
- * SysClkReq6Valid1 = SysClkReq6 controlled
- * SysClkReq7Valid1 = disabled
- * SysClkReq8Valid1 = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
- /*
- * SysClkReq2Valid2 = disabled
- * SysClkReq3Valid2 = disabled
- * SysClkReq4Valid2 = disabled
- * SysClkReq5Valid2 = disabled
- * SysClkReq6Valid2 = SysClkReq6 controlled
- * SysClkReq7Valid2 = disabled
- * SysClkReq8Valid2 = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
- /*
- * VTVoutEna = disabled
- * Vintcore12Ena = disabled
- * Vintcore12Sel = 1.25 V
- * Vintcore12LP = inactive (HP)
- * VTVoutLP = inactive (HP)
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
- /*
- * VaudioEna = disabled
- * VdmicEna = disabled
- * Vamic1Ena = disabled
- * Vamic2Ena = disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
- /*
- * Vamic1_dzout = high-Z when Vamic1 is disabled
- * Vamic2_dzout = high-Z when Vamic2 is disabled
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
- /*
- * VPll = Hw controlled (NOTE! PRCMU bits)
- * VanaRegu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
- /*
- * VrefDDREna = disabled
- * VrefDDRSleepMode = inactive (no pulldown)
- */
- INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
- /*
- * VextSupply1Regu = force LP
- * VextSupply2Regu = force OFF
- * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
- * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
- * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
- */
- INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
- /*
- * Vaux1Regu = force HP
- * Vaux2Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
- /*
- * Vaux3Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
- /*
- * Vaux1Sel = 2.8 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
- /*
- * Vaux2Sel = 2.9 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
- /*
- * Vaux3Sel = 2.91 V
- */
- INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
- /*
- * VextSupply12LP = disabled (no LP)
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
- /*
- * Vaux1Disch = short discharge time
- * Vaux2Disch = short discharge time
- * Vaux3Disch = short discharge time
- * Vintcore12Disch = short discharge time
- * VTVoutDisch = short discharge time
- * VaudioDisch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
- /*
- * VanaDisch = short discharge time
- * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
- * VdmicDisch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
-};
-
-/* AB8500 regulators */
-static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
- /* supplies to the display/camera */
- [AB8500_LDO_AUX1] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-DISPLAY",
- .min_uV = 2800000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
- .boot_on = 1, /* display is on at boot */
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
- .consumer_supplies = ab8500_vaux1_consumers,
- },
- /* supplies to the on-board eMMC */
- [AB8500_LDO_AUX2] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-eMMC1",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
- .consumer_supplies = ab8500_vaux2_consumers,
- },
- /* supply for VAUX3, supplies to SDcard slots */
- [AB8500_LDO_AUX3] = {
- .supply_regulator = "ab8500-ext-supply3",
- .constraints = {
- .name = "V-MMC-SD",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
- .consumer_supplies = ab8500_vaux3_consumers,
- },
- /* supply for tvout, gpadc, TVOUT LDO */
- [AB8500_LDO_TVOUT] = {
- .constraints = {
- .name = "V-TVOUT",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers),
- .consumer_supplies = ab8500_vtvout_consumers,
- },
- /* supply for ab8500-vaudio, VAUDIO LDO */
- [AB8500_LDO_AUDIO] = {
- .constraints = {
- .name = "V-AUD",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
- .consumer_supplies = ab8500_vaud_consumers,
- },
- /* supply for v-anamic1 VAMic1-LDO */
- [AB8500_LDO_ANAMIC1] = {
- .constraints = {
- .name = "V-AMIC1",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
- .consumer_supplies = ab8500_vamic1_consumers,
- },
- /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
- [AB8500_LDO_ANAMIC2] = {
- .constraints = {
- .name = "V-AMIC2",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
- .consumer_supplies = ab8500_vamic2_consumers,
- },
- /* supply for v-dmic, VDMIC LDO */
- [AB8500_LDO_DMIC] = {
- .constraints = {
- .name = "V-DMIC",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
- .consumer_supplies = ab8500_vdmic_consumers,
- },
- /* supply for v-intcore12, VINTCORE12 LDO */
- [AB8500_LDO_INTCORE] = {
- .constraints = {
- .name = "V-INTCORE",
- .min_uV = 1250000,
- .max_uV = 1350000,
- .input_uV = 1800000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE |
- REGULATOR_CHANGE_DRMS,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
- .consumer_supplies = ab8500_vintcore_consumers,
- },
- /* supply for U8500 CSI-DSI, VANA LDO */
- [AB8500_LDO_ANA] = {
- .constraints = {
- .name = "V-CSI-DSI",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
- .consumer_supplies = ab8500_vana_consumers,
- },
-};
-
-/* supply for VextSupply3 */
-static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
- /* SIM supply for 3 V SIM cards */
- REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
-};
-
-/* extended configuration for VextSupply2, only used for HREFP_V20 boards */
-static struct ab8500_ext_regulator_cfg ab8500_ext_supply2 = {
- .hwreq = true,
-};
-
-/*
- * AB8500 external regulators
- */
-static struct regulator_init_data ab8500_ext_regulators[] = {
- /* fixed Vbat supplies VSMPS1_EXT_1V8 */
- [AB8500_EXT_SUPPLY1] = {
- .constraints = {
- .name = "ab8500-ext-supply1",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .initial_mode = REGULATOR_MODE_IDLE,
- .boot_on = 1,
- .always_on = 1,
- },
- },
- /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
- [AB8500_EXT_SUPPLY2] = {
- .constraints = {
- .name = "ab8500-ext-supply2",
- .min_uV = 1360000,
- .max_uV = 1360000,
- },
- },
- /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
- [AB8500_EXT_SUPPLY3] = {
- .constraints = {
- .name = "ab8500-ext-supply3",
- .min_uV = 3400000,
- .max_uV = 3400000,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- .boot_on = 1,
- },
- .num_consumer_supplies =
- ARRAY_SIZE(ab8500_ext_supply3_consumers),
- .consumer_supplies = ab8500_ext_supply3_consumers,
- },
-};
-
-/* ab8505 regulator register initialization */
-static struct ab8500_regulator_reg_init ab8505_reg_init[] = {
- /*
- * VarmRequestCtrl
- * VsmpsCRequestCtrl
- * VsmpsARequestCtrl
- * VsmpsBRequestCtrl
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL1, 0x00, 0x00),
- /*
- * VsafeRequestCtrl
- * VpllRequestCtrl
- * VanaRequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL2, 0x30, 0x00),
- /*
- * Vaux1RequestCtrl = HP/LP depending on VxRequest
- * Vaux2RequestCtrl = HP/LP depending on VxRequest
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL3, 0xf0, 0x00),
- /*
- * Vaux3RequestCtrl = HP/LP depending on VxRequest
- * SwHPReq = Control through SWValid disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUREQUESTCTRL4, 0x07, 0x00),
- /*
- * VsmpsASysClkReq1HPValid
- * VsmpsBSysClkReq1HPValid
- * VsafeSysClkReq1HPValid
- * VanaSysClkReq1HPValid = disabled
- * VpllSysClkReq1HPValid
- * Vaux1SysClkReq1HPValid = disabled
- * Vaux2SysClkReq1HPValid = disabled
- * Vaux3SysClkReq1HPValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
- /*
- * VsmpsCSysClkReq1HPValid
- * VarmSysClkReq1HPValid
- * VbbSysClkReq1HPValid
- * VsmpsMSysClkReq1HPValid
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQ1HPVALID2, 0x00, 0x00),
- /*
- * VsmpsAHwHPReq1Valid
- * VsmpsBHwHPReq1Valid
- * VsafeHwHPReq1Valid
- * VanaHwHPReq1Valid = disabled
- * VpllHwHPReq1Valid
- * Vaux1HwHPreq1Valid = disabled
- * Vaux2HwHPReq1Valid = disabled
- * Vaux3HwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID1, 0xe8, 0x00),
- /*
- * VsmpsMHwHPReq1Valid
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ1VALID2, 0x00, 0x00),
- /*
- * VsmpsAHwHPReq2Valid
- * VsmpsBHwHPReq2Valid
- * VsafeHwHPReq2Valid
- * VanaHwHPReq2Valid = disabled
- * VpllHwHPReq2Valid
- * Vaux1HwHPReq2Valid = disabled
- * Vaux2HwHPReq2Valid = disabled
- * Vaux3HwHPReq2Valid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID1, 0xe8, 0x00),
- /*
- * VsmpsMHwHPReq2Valid
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUHWHPREQ2VALID2, 0x00, 0x00),
- /**
- * VsmpsCSwHPReqValid
- * VarmSwHPReqValid
- * VsmpsASwHPReqValid
- * VsmpsBSwHPReqValid
- * VsafeSwHPReqValid
- * VanaSwHPReqValid
- * VanaSwHPReqValid = disabled
- * VpllSwHPReqValid
- * Vaux1SwHPReqValid = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID1, 0xa0, 0x00),
- /*
- * Vaux2SwHPReqValid = disabled
- * Vaux3SwHPReqValid = disabled
- * VsmpsMSwHPReqValid
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSWHPREQVALID2, 0x03, 0x00),
- /*
- * SysClkReq2Valid1 = SysClkReq2 controlled
- * SysClkReq3Valid1 = disabled
- * SysClkReq4Valid1 = SysClkReq4 controlled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID1, 0x0e, 0x0a),
- /*
- * SysClkReq2Valid2 = disabled
- * SysClkReq3Valid2 = disabled
- * SysClkReq4Valid2 = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUSYSCLKREQVALID2, 0x0e, 0x00),
- /*
- * Vaux4SwHPReqValid
- * Vaux4HwHPReq2Valid
- * Vaux4HwHPReq1Valid
- * Vaux4SysClkReq1HPValid
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUVAUX4REQVALID, 0x00, 0x00),
- /*
- * VadcEna = disabled
- * VintCore12Ena = disabled
- * VintCore12Sel = 1.25 V
- * VintCore12LP = inactive (HP)
- * VadcLP = inactive (HP)
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUMISC1, 0xfe, 0x10),
- /*
- * VaudioEna = disabled
- * Vaux8Ena = disabled
- * Vamic1Ena = disabled
- * Vamic2Ena = disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUDIOSUPPLY, 0x1e, 0x00),
- /*
- * Vamic1_dzout = high-Z when Vamic1 is disabled
- * Vamic2_dzout = high-Z when Vamic2 is disabled
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUCTRL1VAMIC, 0x03, 0x00),
- /*
- * VsmpsARegu
- * VsmpsASelCtrl
- * VsmpsAAutoMode
- * VsmpsAPWMMode
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSAREGU, 0x00, 0x00),
- /*
- * VsmpsBRegu
- * VsmpsBSelCtrl
- * VsmpsBAutoMode
- * VsmpsBPWMMode
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSBREGU, 0x00, 0x00),
- /*
- * VsafeRegu
- * VsafeSelCtrl
- * VsafeAutoMode
- * VsafePWMMode
- */
- INIT_REGULATOR_REGISTER(AB8505_VSAFEREGU, 0x00, 0x00),
- /*
- * VPll = Hw controlled (NOTE! PRCMU bits)
- * VanaRegu = force off
- */
- INIT_REGULATOR_REGISTER(AB8505_VPLLVANAREGU, 0x0f, 0x02),
- /*
- * VextSupply1Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
- * VextSupply2Regu = force OFF (OTP_ExtSupply12LPnPolarity 1)
- * VextSupply3Regu = force OFF (OTP_ExtSupply3LPnPolarity 0)
- * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
- * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
- */
- INIT_REGULATOR_REGISTER(AB8505_EXTSUPPLYREGU, 0xff, 0x30),
- /*
- * Vaux1Regu = force HP
- * Vaux2Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX12REGU, 0x0f, 0x01),
- /*
- * Vaux3Regu = force off
- */
- INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3REGU, 0x03, 0x00),
- /*
- * VsmpsASel1
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL1, 0x00, 0x00),
- /*
- * VsmpsASel2
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL2, 0x00, 0x00),
- /*
- * VsmpsASel3
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSASEL3, 0x00, 0x00),
- /*
- * VsmpsBSel1
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL1, 0x00, 0x00),
- /*
- * VsmpsBSel2
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL2, 0x00, 0x00),
- /*
- * VsmpsBSel3
- */
- INIT_REGULATOR_REGISTER(AB8505_VSMPSBSEL3, 0x00, 0x00),
- /*
- * VsafeSel1
- */
- INIT_REGULATOR_REGISTER(AB8505_VSAFESEL1, 0x00, 0x00),
- /*
- * VsafeSel2
- */
- INIT_REGULATOR_REGISTER(AB8505_VSAFESEL2, 0x00, 0x00),
- /*
- * VsafeSel3
- */
- INIT_REGULATOR_REGISTER(AB8505_VSAFESEL3, 0x00, 0x00),
- /*
- * Vaux1Sel = 2.8 V
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX1SEL, 0x0f, 0x0C),
- /*
- * Vaux2Sel = 2.9 V
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX2SEL, 0x0f, 0x0d),
- /*
- * Vaux3Sel = 2.91 V
- */
- INIT_REGULATOR_REGISTER(AB8505_VRF1VAUX3SEL, 0x07, 0x07),
- /*
- * Vaux4RequestCtrl
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX4REQCTRL, 0x00, 0x00),
- /*
- * Vaux4Regu
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX4REGU, 0x00, 0x00),
- /*
- * Vaux4Sel
- */
- INIT_REGULATOR_REGISTER(AB8505_VAUX4SEL, 0x00, 0x00),
- /*
- * Vaux1Disch = short discharge time
- * Vaux2Disch = short discharge time
- * Vaux3Disch = short discharge time
- * Vintcore12Disch = short discharge time
- * VTVoutDisch = short discharge time
- * VaudioDisch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH, 0xfc, 0x00),
- /*
- * VanaDisch = short discharge time
- * Vaux8PullDownEna = pulldown disabled when Vaux8 is disabled
- * Vaux8Disch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH2, 0x16, 0x00),
- /*
- * Vaux4Disch = short discharge time
- */
- INIT_REGULATOR_REGISTER(AB8505_REGUCTRLDISCH3, 0x01, 0x00),
- /*
- * Vaux5Sel
- * Vaux5LP
- * Vaux5Ena
- * Vaux5Disch
- * Vaux5DisSfst
- * Vaux5DisPulld
- */
- INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX5, 0x00, 0x00),
- /*
- * Vaux6Sel
- * Vaux6LP
- * Vaux6Ena
- * Vaux6DisPulld
- */
- INIT_REGULATOR_REGISTER(AB8505_CTRLVAUX6, 0x00, 0x00),
-};
-
-static struct regulator_init_data ab8505_regulators[AB8505_NUM_REGULATORS] = {
- /* supplies to the display/camera */
- [AB8505_LDO_AUX1] = {
- .constraints = {
- .name = "V-DISPLAY",
- .min_uV = 2800000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS,
- .boot_on = 1, /* display is on at boot */
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
- .consumer_supplies = ab8500_vaux1_consumers,
- },
- /* supplies to the on-board eMMC */
- [AB8505_LDO_AUX2] = {
- .constraints = {
- .name = "V-eMMC1",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
- .consumer_supplies = ab8500_vaux2_consumers,
- },
- /* supply for VAUX3, supplies to SDcard slots */
- [AB8505_LDO_AUX3] = {
- .constraints = {
- .name = "V-MMC-SD",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
- .consumer_supplies = ab8500_vaux3_consumers,
- },
- /* supply for VAUX4, supplies to NFC and standalone secure element */
- [AB8505_LDO_AUX4] = {
- .constraints = {
- .name = "V-NFC-SE",
- .min_uV = 1100000,
- .max_uV = 3300000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux4_consumers),
- .consumer_supplies = ab8505_vaux4_consumers,
- },
- /* supply for VAUX5, supplies to TBD */
- [AB8505_LDO_AUX5] = {
- .constraints = {
- .name = "V-AUX5",
- .min_uV = 1050000,
- .max_uV = 2790000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux5_consumers),
- .consumer_supplies = ab8505_vaux5_consumers,
- },
- /* supply for VAUX6, supplies to TBD */
- [AB8505_LDO_AUX6] = {
- .constraints = {
- .name = "V-AUX6",
- .min_uV = 1050000,
- .max_uV = 2790000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux6_consumers),
- .consumer_supplies = ab8505_vaux6_consumers,
- },
- /* supply for gpadc, ADC LDO */
- [AB8505_LDO_ADC] = {
- .constraints = {
- .name = "V-ADC",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_vadc_consumers),
- .consumer_supplies = ab8505_vadc_consumers,
- },
- /* supply for ab8500-vaudio, VAUDIO LDO */
- [AB8505_LDO_AUDIO] = {
- .constraints = {
- .name = "V-AUD",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
- .consumer_supplies = ab8500_vaud_consumers,
- },
- /* supply for v-anamic1 VAMic1-LDO */
- [AB8505_LDO_ANAMIC1] = {
- .constraints = {
- .name = "V-AMIC1",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
- .consumer_supplies = ab8500_vamic1_consumers,
- },
- /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
- [AB8505_LDO_ANAMIC2] = {
- .constraints = {
- .name = "V-AMIC2",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
- .consumer_supplies = ab8500_vamic2_consumers,
- },
- /* supply for v-aux8, VAUX8 LDO */
- [AB8505_LDO_AUX8] = {
- .constraints = {
- .name = "V-AUX8",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_vaux8_consumers),
- .consumer_supplies = ab8505_vaux8_consumers,
- },
- /* supply for v-intcore12, VINTCORE12 LDO */
- [AB8505_LDO_INTCORE] = {
- .constraints = {
- .name = "V-INTCORE",
- .min_uV = 1250000,
- .max_uV = 1350000,
- .input_uV = 1800000,
- .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
- REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE |
- REGULATOR_CHANGE_DRMS,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
- .consumer_supplies = ab8500_vintcore_consumers,
- },
- /* supply for LDO USB */
- [AB8505_LDO_USB] = {
- .constraints = {
- .name = "V-USB",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS |
- REGULATOR_CHANGE_MODE,
- .valid_modes_mask = REGULATOR_MODE_NORMAL |
- REGULATOR_MODE_IDLE,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8505_usb_consumers),
- .consumer_supplies = ab8505_usb_consumers,
- },
- /* supply for U8500 CSI-DSI, VANA LDO */
- [AB8505_LDO_ANA] = {
- .constraints = {
- .name = "V-CSI-DSI",
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
- .consumer_supplies = ab8500_vana_consumers,
- },
-};
-
-struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
- .reg_init = ab8500_reg_init,
- .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
- .regulator = ab8500_regulators,
- .num_regulator = ARRAY_SIZE(ab8500_regulators),
- .ext_regulator = ab8500_ext_regulators,
- .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
-};
-
-struct ab8500_regulator_platform_data ab8505_regulator_plat_data = {
- .reg_init = ab8505_reg_init,
- .num_reg_init = ARRAY_SIZE(ab8505_reg_init),
- .regulator = ab8505_regulators,
- .num_regulator = ARRAY_SIZE(ab8505_regulators),
-};
-
-static void ab8500_modify_reg_init(int id, u8 mask, u8 value)
-{
- int i;
-
- if (cpu_is_u8520()) {
- for (i = ARRAY_SIZE(ab8505_reg_init) - 1; i >= 0; i--) {
- if (ab8505_reg_init[i].id == id) {
- u8 initval = ab8505_reg_init[i].value;
- initval = (initval & ~mask) | (value & mask);
- ab8505_reg_init[i].value = initval;
-
- BUG_ON(mask & ~ab8505_reg_init[i].mask);
- return;
- }
- }
- } else {
- for (i = ARRAY_SIZE(ab8500_reg_init) - 1; i >= 0; i--) {
- if (ab8500_reg_init[i].id == id) {
- u8 initval = ab8500_reg_init[i].value;
- initval = (initval & ~mask) | (value & mask);
- ab8500_reg_init[i].value = initval;
-
- BUG_ON(mask & ~ab8500_reg_init[i].mask);
- return;
- }
- }
- }
-
- BUG_ON(1);
-}
-
-void mop500_regulator_init(void)
-{
- struct regulator_init_data *regulator;
-
- /*
- * Temporarily turn on Vaux2 on 8520 machine
- */
- if (cpu_is_u8520()) {
- /* Vaux2 initialized to be on */
- ab8500_modify_reg_init(AB8505_VAUX12REGU, 0x0f, 0x05);
- }
-
- /*
- * Handle AB8500_EXT_SUPPLY2 on HREFP_V20_V50 boards (do it for
- * all HREFP_V20 boards)
- */
- if (cpu_is_u8500v20()) {
- /* VextSupply2RequestCtrl = HP/OFF depending on VxRequest */
- ab8500_modify_reg_init(AB8500_REGUREQUESTCTRL3, 0x01, 0x01);
-
- /* VextSupply2SysClkReq1HPValid = SysClkReq1 controlled */
- ab8500_modify_reg_init(AB8500_REGUSYSCLKREQ1HPVALID2,
- 0x20, 0x20);
-
- /* VextSupply2 = force HP at initialization */
- ab8500_modify_reg_init(AB8500_EXTSUPPLYREGU, 0x0c, 0x04);
-
- /* enable VextSupply2 during platform active */
- regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
- regulator->constraints.always_on = 1;
-
- /* disable VextSupply2 in suspend */
- regulator = &ab8500_ext_regulators[AB8500_EXT_SUPPLY2];
- regulator->constraints.state_mem.disabled = 1;
- regulator->constraints.state_standby.disabled = 1;
-
- /* enable VextSupply2 HW control (used in suspend) */
- regulator->driver_data = (void *)&ab8500_ext_supply2;
- }
-}
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
deleted file mode 100644
index 9bece38fe933..000000000000
--- a/arch/arm/mach-ux500/board-mop500-regulators.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- *
- * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
- *
- * MOP500 board specific initialization for regulators
- */
-
-#ifndef __BOARD_MOP500_REGULATORS_H
-#define __BOARD_MOP500_REGULATORS_H
-
-#include <linux/regulator/machine.h>
-#include <linux/regulator/ab8500.h>
-
-extern struct ab8500_regulator_platform_data ab8500_regulator_plat_data;
-extern struct ab8500_regulator_platform_data ab8505_regulator_plat_data;
-extern struct regulator_init_data tps61052_regulator;
-extern struct regulator_init_data gpio_en_3v3_regulator;
-
-void mop500_regulator_init(void);
-
-#endif
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
deleted file mode 100644
index 780bd13cd7e3..000000000000
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <asm/outercache.h>
-#include <asm/hardware/cache-l2x0.h>
-
-#include "db8500-regs.h"
-#include "id.h"
-
-static int __init ux500_l2x0_unlock(void)
-{
- int i;
- struct device_node *np;
- void __iomem *l2x0_base;
-
- np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
- l2x0_base = of_iomap(np, 0);
- of_node_put(np);
- if (!l2x0_base)
- return -ENODEV;
-
- /*
- * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
- * apparently locks both caches before jumping to the kernel. The
- * l2x0 core will not touch the unlock registers if the l2x0 is
- * already enabled, so we do it right here instead. The PL310 has
- * 8 sets of registers, one per possible CPU.
- */
- for (i = 0; i < 8; i++) {
- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
- i * L2X0_LOCKDOWN_STRIDE);
- writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
- i * L2X0_LOCKDOWN_STRIDE);
- }
- iounmap(l2x0_base);
- return 0;
-}
-
-static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
-{
- /*
- * We can't write to secure registers as we are in non-secure
- * mode, until we have some SMI service available.
- */
-}
-
-static int __init ux500_l2x0_init(void)
-{
- /* Multiplatform guard */
- if (!((cpu_is_u8500_family() || cpu_is_ux540_family())))
- return -ENODEV;
-
- /* Unlock before init */
- ux500_l2x0_unlock();
- outer_cache.write_sec = ux500_l2c310_write_sec;
- l2x0_of_init(0, ~0);
-
- return 0;
-}
-early_initcall(ux500_l2x0_init);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index a557955472ea..46b1da1bf5d2 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -12,41 +12,107 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/platform_data/arm-ux500-pm.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/perf/arm_pmu.h>
#include <linux/regulator/machine.h>
-#include <linux/random.h>
+#include <asm/outercache.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
+#include <asm/mach/arch.h>
#include "setup.h"
-#include "board-mop500-regulators.h"
#include "board-mop500.h"
#include "db8500-regs.h"
-#include "id.h"
-static struct ab8500_platform_data ab8500_platdata = {
- .regulator = &ab8500_regulator_plat_data,
-};
+static int __init ux500_l2x0_unlock(void)
+{
+ int i;
+ struct device_node *np;
+ void __iomem *l2x0_base;
-static struct prcmu_pdata db8500_prcmu_pdata = {
- .ab_platdata = &ab8500_platdata,
- .version_offset = DB8500_PRCMU_FW_VERSION_OFFSET,
- .legacy_offset = DB8500_PRCMU_LEGACY_OFFSET,
-};
+ np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
+ l2x0_base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!l2x0_base)
+ return -ENODEV;
+
+ /*
+ * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
+ * apparently locks both caches before jumping to the kernel. The
+ * l2x0 core will not touch the unlock registers if the l2x0 is
+ * already enabled, so we do it right here instead. The PL310 has
+ * 8 sets of registers, one per possible CPU.
+ */
+ for (i = 0; i < 8; i++) {
+ writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+ i * L2X0_LOCKDOWN_STRIDE);
+ writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+ i * L2X0_LOCKDOWN_STRIDE);
+ }
+ iounmap(l2x0_base);
+ return 0;
+}
+
+static void ux500_l2c310_write_sec(unsigned long val, unsigned reg)
+{
+ /*
+ * We can't write to secure registers as we are in non-secure
+ * mode, until we have some SMI service available.
+ */
+}
-static void __init u8500_map_io(void)
+/*
+ * FIXME: Should we set up the GPIO domain here?
+ *
+ * The problem is that we cannot put the interrupt resources into the platform
+ * device until the irqdomain has been added. Right now, we set the GIC interrupt
+ * domain from init_irq(), then load the gpio driver from
+ * core_initcall(nmk_gpio_init) and add the platform devices from
+ * arch_initcall(customize_machine).
+ *
+ * This feels fragile because it depends on the gpio device getting probed
+ * _before_ any device uses the gpio interrupts.
+*/
+static void __init ux500_init_irq(void)
+{
+ struct device_node *np;
+ struct resource r;
+
+ irqchip_init();
+ np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
+ of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (!r.start) {
+ pr_err("could not find PRCMU base resource\n");
+ return;
+ }
+ prcmu_early_init(r.start, r.end-r.start);
+ ux500_pm_init(r.start, r.end-r.start);
+
+ /* Unlock before init */
+ ux500_l2x0_unlock();
+ outer_cache.write_sec = ux500_l2c310_write_sec;
+}
+
+static void ux500_restart(enum reboot_mode mode, const char *cmd)
{
- debug_ll_io_init();
- ux500_setup_id();
+ local_irq_disable();
+ local_fiq_disable();
+
+ prcmu_system_reset(0);
}
/*
@@ -73,31 +139,6 @@ static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
};
-static const char *db8500_read_soc_id(void)
-{
- void __iomem *uid;
- const char *retstr;
-
- uid = ioremap(U8500_BB_UID_BASE, 0x20);
- if (!uid)
- return NULL;
- /* Throw these device-specific numbers into the entropy pool */
- add_device_randomness(uid, 0x14);
- retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
- readl((u32 *)uid+0),
- readl((u32 *)uid+1), readl((u32 *)uid+2),
- readl((u32 *)uid+3), readl((u32 *)uid+4));
- iounmap(uid);
- return retstr;
-}
-
-static struct device * __init db8500_soc_device_init(void)
-{
- const char *soc_id = db8500_read_soc_id();
-
- return ux500_soc_device_init(soc_id);
-}
-
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
@@ -111,8 +152,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
"ux500-msp-i2s.3", &msp3_platform_data),
/* Requires non-DT:able platform data. */
- OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
- &db8500_prcmu_pdata),
+ OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
OF_DEV_AUXDATA("stericsson,ux500-cryp", 0xa03cb000, "cryp1", NULL),
OF_DEV_AUXDATA("stericsson,ux500-hash", 0xa03c2000, "hash1", NULL),
OF_DEV_AUXDATA("stericsson,snd-soc-mop500", 0, "snd-soc-mop500.0",
@@ -121,8 +161,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
};
static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
- &db8500_prcmu_pdata),
+ OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
{},
};
@@ -136,15 +175,13 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
static void __init u8500_init_machine(void)
{
- struct device *parent = db8500_soc_device_init();
-
/* automatically probe child nodes of dbx5x0 devices */
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,
- u8540_auxdata_lookup, parent);
+ u8540_auxdata_lookup, NULL);
else
of_platform_populate(NULL, u8500_local_bus_nodes,
- u8500_auxdata_lookup, parent);
+ u8500_auxdata_lookup, NULL);
}
static const char * stericsson_dt_platform_compat[] = {
@@ -156,10 +193,10 @@ static const char * stericsson_dt_platform_compat[] = {
};
DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)")
- .map_io = u8500_map_io,
+ .l2c_aux_val = 0,
+ .l2c_aux_mask = ~0,
.init_irq = ux500_init_irq,
.init_machine = u8500_init_machine,
- .init_late = NULL,
.dt_compat = stericsson_dt_platform_compat,
.restart = ux500_restart,
MACHINE_END
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
deleted file mode 100644
index 82156cbc22ce..000000000000
--- a/arch/arm/mach-ux500/cpu.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/sys_soc.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/clk-ux500.h>
-#include <linux/platform_data/arm-ux500-pm.h>
-
-#include <asm/mach/map.h>
-
-#include "setup.h"
-
-#include "board-mop500.h"
-#include "db8500-regs.h"
-#include "id.h"
-
-void ux500_restart(enum reboot_mode mode, const char *cmd)
-{
- local_irq_disable();
- local_fiq_disable();
-
- prcmu_system_reset(0);
-}
-
-/*
- * FIXME: Should we set up the GPIO domain here?
- *
- * The problem is that we cannot put the interrupt resources into the platform
- * device until the irqdomain has been added. Right now, we set the GIC interrupt
- * domain from init_irq(), then load the gpio driver from
- * core_initcall(nmk_gpio_init) and add the platform devices from
- * arch_initcall(customize_machine).
- *
- * This feels fragile because it depends on the gpio device getting probed
- * _before_ any device uses the gpio interrupts.
-*/
-void __init ux500_init_irq(void)
-{
- struct device_node *np;
- struct resource r;
-
- irqchip_init();
- np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
- of_address_to_resource(np, 0, &r);
- of_node_put(np);
- if (!r.start) {
- pr_err("could not find PRCMU base resource\n");
- return;
- }
- prcmu_early_init(r.start, r.end-r.start);
- ux500_pm_init(r.start, r.end-r.start);
-
- /*
- * Init clocks here so that they are available for system timer
- * initialization.
- */
- if (cpu_is_u8500_family())
- u8500_clk_init();
- else if (cpu_is_u9540())
- u9540_clk_init();
- else if (cpu_is_u8540())
- u8540_clk_init();
-}
-
-static const char * __init ux500_get_machine(void)
-{
- return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
-}
-
-static const char * __init ux500_get_family(void)
-{
- return kasprintf(GFP_KERNEL, "ux500");
-}
-
-static const char * __init ux500_get_revision(void)
-{
- unsigned int rev = dbx500_revision();
-
- if (rev == 0x01)
- return kasprintf(GFP_KERNEL, "%s", "ED");
- else if (rev >= 0xA0)
- return kasprintf(GFP_KERNEL, "%d.%d",
- (rev >> 4) - 0xA + 1, rev & 0xf);
-
- return kasprintf(GFP_KERNEL, "%s", "Unknown");
-}
-
-static ssize_t ux500_get_process(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- if (dbx500_id.process == 0x00)
- return sprintf(buf, "Standard\n");
-
- return sprintf(buf, "%02xnm\n", dbx500_id.process);
-}
-
-static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
- const char *soc_id)
-{
- soc_dev_attr->soc_id = soc_id;
- soc_dev_attr->machine = ux500_get_machine();
- soc_dev_attr->family = ux500_get_family();
- soc_dev_attr->revision = ux500_get_revision();
-}
-
-static const struct device_attribute ux500_soc_attr =
- __ATTR(process, S_IRUGO, ux500_get_process, NULL);
-
-struct device * __init ux500_soc_device_init(const char *soc_id)
-{
- struct device *parent;
- struct soc_device *soc_dev;
- struct soc_device_attribute *soc_dev_attr;
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return ERR_PTR(-ENOMEM);
-
- soc_info_populate(soc_dev_attr, soc_id);
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev)) {
- kfree(soc_dev_attr);
- return NULL;
- }
-
- parent = soc_device_to_device(soc_dev);
- device_create_file(parent, &ux500_soc_attr);
-
- return parent;
-}
diff --git a/arch/arm/mach-ux500/id.c b/arch/arm/mach-ux500/id.c
deleted file mode 100644
index 1e81e990044b..000000000000
--- a/arch/arm/mach-ux500/id.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <asm/cputype.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/mach/map.h>
-
-#include "setup.h"
-
-#include "db8500-regs.h"
-#include "id.h"
-
-struct dbx500_asic_id dbx500_id;
-
-static unsigned int __init ux500_read_asicid(phys_addr_t addr)
-{
- phys_addr_t base = addr & ~0xfff;
- struct map_desc desc = {
- .virtual = (unsigned long)UX500_VIRT_ROM,
- .pfn = __phys_to_pfn(base),
- .length = SZ_16K,
- .type = MT_DEVICE,
- };
-
- iotable_init(&desc, 1);
-
- /* As in devicemaps_init() */
- local_flush_tlb_all();
- flush_cache_all();
-
- return readl(UX500_VIRT_ROM + (addr & 0xfff));
-}
-
-static void ux500_print_soc_info(unsigned int asicid)
-{
- unsigned int rev = dbx500_revision();
-
- pr_info("DB%4x ", dbx500_partnumber());
-
- if (rev == 0x01)
- pr_cont("Early Drop");
- else if (rev >= 0xA0)
- pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
- else
- pr_cont("Unknown");
-
- pr_cont(" [%#010x]\n", asicid);
-}
-
-static unsigned int partnumber(unsigned int asicid)
-{
- return (asicid >> 8) & 0xffff;
-}
-
-/*
- * SOC MIDR ASICID ADDRESS ASICID VALUE
- * DB8500ed 0x410fc090 0x9001FFF4 0x00850001
- * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
- * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
- * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
- * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
- * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
- * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
- */
-
-void __init ux500_setup_id(void)
-{
- unsigned int cpuid = read_cpuid_id();
- unsigned int asicid = 0;
- phys_addr_t addr = 0;
-
- switch (cpuid) {
- case 0x410fc090: /* DB8500ed */
- case 0x411fc091: /* DB8500v1 */
- addr = 0x9001FFF4;
- break;
-
- case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
- asicid = ux500_read_asicid(0x9001DBF4);
- if (partnumber(asicid) == 0x8500 ||
- partnumber(asicid) == 0x8520)
- /* DB8500v2 */
- break;
-
- /* DB5500v1 */
- addr = 0x9001FFF4;
- break;
-
- case 0x413fc090: /* DB9540 */
- addr = 0xFFFFDBF4;
- break;
- }
-
- if (addr)
- asicid = ux500_read_asicid(addr);
-
- if (!asicid) {
- pr_err("Unable to identify SoC\n");
- ux500_unknown_soc();
- }
-
- dbx500_id.process = asicid >> 24;
- dbx500_id.partnumber = partnumber(asicid);
- dbx500_id.revision = asicid & 0xff;
-
- ux500_print_soc_info(asicid);
-}
diff --git a/arch/arm/mach-ux500/id.h b/arch/arm/mach-ux500/id.h
deleted file mode 100644
index bcc58a8cccbc..000000000000
--- a/arch/arm/mach-ux500/id.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#ifndef __MACH_UX500_ID
-#define __MACH_UX500_ID
-
-/**
- * struct dbx500_asic_id - fields of the ASIC ID
- * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
- * @partnumber: hithereto 0x8500 for DB8500
- * @revision: version code in the series
- */
-struct dbx500_asic_id {
- u16 partnumber;
- u8 revision;
- u8 process;
-};
-
-extern struct dbx500_asic_id dbx500_id;
-
-static inline unsigned int __attribute_const__ dbx500_partnumber(void)
-{
- return dbx500_id.partnumber;
-}
-
-static inline unsigned int __attribute_const__ dbx500_revision(void)
-{
- return dbx500_id.revision;
-}
-
-/*
- * SOCs
- */
-
-static inline bool __attribute_const__ cpu_is_u8500(void)
-{
- return dbx500_partnumber() == 0x8500;
-}
-
-static inline bool __attribute_const__ cpu_is_u8520(void)
-{
- return dbx500_partnumber() == 0x8520;
-}
-
-static inline bool cpu_is_u8500_family(void)
-{
- return cpu_is_u8500() || cpu_is_u8520();
-}
-
-static inline bool __attribute_const__ cpu_is_u9540(void)
-{
- return dbx500_partnumber() == 0x9540;
-}
-
-static inline bool __attribute_const__ cpu_is_u8540(void)
-{
- return dbx500_partnumber() == 0x8540;
-}
-
-static inline bool __attribute_const__ cpu_is_u8580(void)
-{
- return dbx500_partnumber() == 0x8580;
-}
-
-static inline bool cpu_is_ux540_family(void)
-{
- return cpu_is_u9540() || cpu_is_u8540() || cpu_is_u8580();
-}
-
-/*
- * 8500 revisions
- */
-
-static inline bool __attribute_const__ cpu_is_u8500ed(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0x00;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v1(void)
-{
- return cpu_is_u8500() && (dbx500_revision() & 0xf0) == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v10(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v11(void)
-{
- return cpu_is_u8500() && dbx500_revision() == 0xA1;
-}
-
-static inline bool __attribute_const__ cpu_is_u8500v2(void)
-{
- return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
-}
-
-static inline bool cpu_is_u8500v20(void)
-{
- return cpu_is_u8500() && (dbx500_revision() == 0xB0);
-}
-
-static inline bool cpu_is_u8500v21(void)
-{
- return cpu_is_u8500() && (dbx500_revision() == 0xB1);
-}
-
-static inline bool cpu_is_u8500v22(void)
-{
- return cpu_is_u8500() && (dbx500_revision() == 0xB2);
-}
-
-static inline bool cpu_is_u8500v20_or_later(void)
-{
- return (cpu_is_u8500() && !cpu_is_u8500v10() && !cpu_is_u8500v11());
-}
-
-/*
- * 8540 revisions
- */
-
-static inline bool __attribute_const__ cpu_is_u8540v10(void)
-{
- return cpu_is_u8540() && dbx500_revision() == 0xA0;
-}
-
-static inline bool __attribute_const__ cpu_is_u8580v10(void)
-{
- return cpu_is_u8580() && dbx500_revision() == 0xA0;
-}
-
-static inline bool ux500_is_svp(void)
-{
- return false;
-}
-
-#define ux500_unknown_soc() BUG()
-
-#endif
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 88b8ab4f300c..8f2f615ff958 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -26,7 +26,6 @@
#include "setup.h"
#include "db8500-regs.h"
-#include "id.h"
/* Magic triggers in backup RAM */
#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
index c704254ab67c..988e7c77068d 100644
--- a/arch/arm/mach-ux500/setup.h
+++ b/arch/arm/mach-ux500/setup.h
@@ -11,18 +11,6 @@
#ifndef __ASM_ARCH_SETUP_H
#define __ASM_ARCH_SETUP_H
-#include <asm/mach/arch.h>
-#include <linux/init.h>
-#include <linux/mfd/abx500/ab8500.h>
-
-void ux500_restart(enum reboot_mode mode, const char *cmd);
-
-void __init ux500_setup_id(void);
-
-extern void __init ux500_init_irq(void);
-
-extern struct device *ux500_soc_device_init(const char *soc_id);
-
extern void ux500_cpu_die(unsigned int cpu);
#endif /* __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 398a297b7e09..7c728ebc0b33 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,13 +1,13 @@
menuconfig ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family"
depends on ARCH_MULTI_V7
- select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select ARM_GLOBAL_TIMER
select ARM_TIMER_SP804
select COMMON_CLK_VERSATILE
+ select GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select HAVE_PATA_PLATFORM
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c
index f2fafc10a91d..d8f1a05f5e87 100644
--- a/arch/arm/mach-vexpress/hotplug.c
+++ b/arch/arm/mach-vexpress/hotplug.c
@@ -15,6 +15,8 @@
#include <asm/smp_plat.h>
#include <asm/cp15.h>
+#include "core.h"
+
static inline void cpu_enter_lowpower(void)
{
unsigned int v;
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 8409cab3f760..fe488523694c 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -31,6 +31,8 @@
#include <asm/cacheflush.h>
+#include "spc.h"
+
#define SPCLOG "vexpress-spc: "
#define PERF_LVL_A15 0x00
@@ -319,17 +321,15 @@ static int ve_spc_waitforcompletion(int req_type)
static int ve_spc_set_performance(int cluster, u32 freq)
{
- u32 perf_cfg_reg, perf_stat_reg;
+ u32 perf_cfg_reg;
int ret, perf, req_type;
if (cluster_is_a15(cluster)) {
req_type = CA15_DVFS;
perf_cfg_reg = PERF_LVL_A15;
- perf_stat_reg = PERF_REQ_A15;
} else {
req_type = CA7_DVFS;
perf_cfg_reg = PERF_LVL_A7;
- perf_stat_reg = PERF_REQ_A7;
}
perf = ve_spc_find_performance_index(cluster, freq);
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index aaaa24fe4d71..c4f1dba7bd8a 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -1,6 +1,6 @@
config ARCH_VT8500
bool
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select CLKDEV_LOOKUP
select VT8500_TIMER
select PINCTRL
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b7eed75960fe..c6834c0cfd1c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -128,16 +128,16 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned long,
*/
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
@@ -157,10 +157,9 @@ static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *pag
* whatever the device wrote there.
*/
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
@@ -198,12 +197,12 @@ struct dma_map_ops arm_dma_ops = {
EXPORT_SYMBOL(arm_dma_ops);
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
+ dma_addr_t handle, unsigned long attrs);
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
@@ -639,11 +638,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
-static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
{
- prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
- pgprot_writecombine(prot) :
- pgprot_dmacoherent(prot);
+ prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
return prot;
}
@@ -751,7 +750,7 @@ static struct arm_dma_allocator remap_allocator = {
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
- struct dma_attrs *attrs, const void *caller)
+ unsigned long attrs, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
@@ -764,7 +763,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
.gfp = gfp,
.prot = prot,
.caller = caller,
- .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
.coherent_flag = is_coherent ? COHERENT : NORMAL,
};
@@ -834,7 +833,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* virtual and bus address for that space.
*/
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
@@ -843,7 +842,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
@@ -851,7 +850,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
@@ -879,14 +878,14 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
*/
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
@@ -898,7 +897,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
* Free a buffer as defined by the above mapping.
*/
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs,
+ dma_addr_t handle, unsigned long attrs,
bool is_coherent)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
@@ -908,7 +907,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
.size = PAGE_ALIGN(size),
.cpu_addr = cpu_addr,
.page = page,
- .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
};
buf = arm_dma_buffer_find(cpu_addr);
@@ -920,20 +919,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
}
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
}
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
int ret;
@@ -1066,7 +1065,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
* here.
*/
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
@@ -1100,7 +1099,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
@@ -1273,7 +1272,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs,
+ gfp_t gfp, unsigned long attrs,
int coherent_flag)
{
struct page **pages;
@@ -1289,7 +1288,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if (!pages)
return NULL;
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
{
unsigned long order = get_order(size);
struct page *page;
@@ -1307,7 +1306,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
}
/* Go straight to 4K chunks if caller says it's OK. */
- if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
order_idx = ARRAY_SIZE(iommu_order_array) - 1;
/*
@@ -1363,12 +1362,12 @@ error:
}
static int __iommu_free_buffer(struct device *dev, struct page **pages,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
int count = size >> PAGE_SHIFT;
int i;
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
@@ -1460,14 +1459,14 @@ static struct page **__atomic_get_pages(void *addr)
return (struct page **)page;
}
-static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
area = find_vm_area(cpu_addr);
@@ -1511,7 +1510,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
}
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
int coherent_flag)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
@@ -1542,7 +1541,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return pages;
addr = __iommu_alloc_remap(pages, size, gfp, prot,
@@ -1560,20 +1559,20 @@ err_buffer:
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
}
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
}
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
@@ -1603,7 +1602,7 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
}
static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
@@ -1612,7 +1611,7 @@ static int arm_iommu_mmap_attrs(struct device *dev,
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -1622,7 +1621,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev,
* Must not be called with IRQs disabled.
*/
void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
+ dma_addr_t handle, unsigned long attrs, int coherent_flag)
{
struct page **pages;
size = PAGE_ALIGN(size);
@@ -1638,7 +1637,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
dma_common_free_remap(cpu_addr, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
@@ -1648,20 +1647,20 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
void arm_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
}
void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
@@ -1699,7 +1698,7 @@ static int __dma_direction_to_prot(enum dma_data_direction dir)
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, struct dma_attrs *attrs,
+ enum dma_data_direction dir, unsigned long attrs,
bool is_coherent)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
@@ -1720,8 +1719,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_direction_to_prot(dir);
@@ -1742,7 +1740,7 @@ fail:
}
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs,
+ enum dma_data_direction dir, unsigned long attrs,
bool is_coherent)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
@@ -1800,7 +1798,7 @@ bad_mapping:
* obtained via sg_dma_{address,length}.
*/
int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
}
@@ -1818,14 +1816,14 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
* sg_dma_{address,length}.
*/
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
}
static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
- bool is_coherent)
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs, bool is_coherent)
{
struct scatterlist *s;
int i;
@@ -1834,8 +1832,7 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
@@ -1852,7 +1849,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
}
@@ -1868,7 +1866,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
}
@@ -1921,7 +1920,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
@@ -1955,9 +1954,9 @@ fail:
*/
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
@@ -1973,8 +1972,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
* Coherent IOMMU aware version of arm_dma_unmap_page()
*/
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
@@ -1998,8 +1996,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
* IOMMU aware version of arm_dma_unmap_page()
*/
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
@@ -2010,7 +2007,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
index 5b217f460f18..8151bde990e6 100644
--- a/arch/arm/plat-iop/setup.c
+++ b/arch/arm/plat-iop/setup.c
@@ -20,12 +20,12 @@
* the IOP3xx OCCDR must be mapped uncached and unbuffered.
*/
static struct map_desc iop3xx_std_desc[] __initdata = {
- { /* mem mapped registers */
+ { /* mem mapped registers */
.virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
.pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
.length = IOP3XX_PERIPHERAL_SIZE,
.type = MT_UNCACHED,
- },
+ },
};
void __init iop3xx_map_io(void)
diff --git a/arch/arm/plat-samsung/cpu.c b/arch/arm/plat-samsung/cpu.c
index 71333bb61013..a107b3a0b095 100644
--- a/arch/arm/plat-samsung/cpu.c
+++ b/arch/arm/plat-samsung/cpu.c
@@ -29,14 +29,14 @@ EXPORT_SYMBOL(samsung_rev);
void __init s3c64xx_init_cpu(void)
{
- samsung_cpu_id = __raw_readl(S3C_VA_SYS + 0x118);
+ samsung_cpu_id = readl_relaxed(S3C_VA_SYS + 0x118);
if (!samsung_cpu_id) {
/*
* S3C6400 has the ID register in a different place,
* and needs a write before it can be read.
*/
- __raw_writel(0x0, S3C_VA_SYS + 0xA1C);
- samsung_cpu_id = __raw_readl(S3C_VA_SYS + 0xA1C);
+ writel_relaxed(0x0, S3C_VA_SYS + 0xA1C);
+ samsung_cpu_id = readl_relaxed(S3C_VA_SYS + 0xA1C);
}
samsung_cpu_rev = 0;
@@ -44,9 +44,9 @@ void __init s3c64xx_init_cpu(void)
pr_info("Samsung CPU ID: 0x%08lx\n", samsung_cpu_id);
}
-void __init s5p_init_cpu(void __iomem *cpuid_addr)
+void __init s5p_init_cpu(const void __iomem *cpuid_addr)
{
- samsung_cpu_id = __raw_readl(cpuid_addr);
+ samsung_cpu_id = readl_relaxed(cpuid_addr);
samsung_cpu_rev = samsung_cpu_id & 0xFF;
pr_info("Samsung CPU ID: 0x%08lx\n", samsung_cpu_id);
diff --git a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
index 317c52303288..37cf20e04aff 100644
--- a/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
+++ b/arch/arm/plat-samsung/include/plat/cpu-freq-core.h
@@ -39,7 +39,7 @@ struct s3c2410_iobank_timing {
unsigned int tacs;
unsigned int tcos;
unsigned int tacc;
- unsigned int tcoh; /* nCS hold afrer nOE/nWE */
+ unsigned int tcoh; /* nCS hold after nOE/nWE */
unsigned int tcah; /* Address hold after nCS */
unsigned char nwait_en; /* nWait enabled for bank. */
};
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 61d14f3a0426..b7b702a72cac 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -113,6 +113,7 @@ extern void s3c_init_cpu(unsigned long idcode,
extern void s3c24xx_init_io(struct map_desc *mach_desc, int size);
extern void s3c64xx_init_cpu(void);
+extern void s5p_init_cpu(const void __iomem *cpuid_addr);
extern unsigned int samsung_rev(void);
diff --git a/arch/arm/plat-samsung/include/plat/fb-s3c2410.h b/arch/arm/plat-samsung/include/plat/fb-s3c2410.h
index 4e5d9588b5ba..1f2972a74e9f 100644
--- a/arch/arm/plat-samsung/include/plat/fb-s3c2410.h
+++ b/arch/arm/plat-samsung/include/plat/fb-s3c2410.h
@@ -48,7 +48,7 @@ struct s3c2410fb_display {
struct s3c2410fb_mach_info {
- struct s3c2410fb_display *displays; /* attached diplays info */
+ struct s3c2410fb_display *displays; /* attached displays info */
unsigned num_displays; /* number of defined displays */
unsigned default_display;
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index b5294eff18b5..21391faab068 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -35,7 +35,7 @@ struct samsung_gpio_chip;
* struct samsung_gpio_cfg GPIO configuration
* @cfg_eint: Configuration setting when used for external interrupt source
* @get_pull: Read the current pull configuration for the GPIO
- * @set_pull: Set the current pull configuraiton for the GPIO
+ * @set_pull: Set the current pull configuration for the GPIO
* @set_config: Set the current configuration for the GPIO
* @get_config: Read the current configuration for the GPIO
*
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c
index 70f2f699bed3..d63516374f7c 100644
--- a/arch/arm/plat-samsung/pm-check.c
+++ b/arch/arm/plat-samsung/pm-check.c
@@ -5,7 +5,7 @@
* http://armlinux.simtec.co.uk
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C Power Mangament - suspend/resume memory corruptiuon check.
+ * S3C Power Mangament - suspend/resume memory corruption check.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/plat-samsung/pm-common.c b/arch/arm/plat-samsung/pm-common.c
index 515cd53372bd..6534c3ff9fe2 100644
--- a/arch/arm/plat-samsung/pm-common.c
+++ b/arch/arm/plat-samsung/pm-common.c
@@ -31,7 +31,7 @@
void s3c_pm_do_save(struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++) {
- ptr->val = __raw_readl(ptr->reg);
+ ptr->val = readl_relaxed(ptr->reg);
S3C_PMDBG("saved %p value %08lx\n", ptr->reg, ptr->val);
}
}
@@ -51,9 +51,9 @@ void s3c_pm_do_restore(const struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++) {
pr_debug("restore %p (restore %08lx, was %08x)\n",
- ptr->reg, ptr->val, __raw_readl(ptr->reg));
+ ptr->reg, ptr->val, readl_relaxed(ptr->reg));
- __raw_writel(ptr->val, ptr->reg);
+ writel_relaxed(ptr->val, ptr->reg);
}
}
@@ -71,5 +71,5 @@ void s3c_pm_do_restore(const struct sleep_save *ptr, int count)
void s3c_pm_do_restore_core(const struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++)
- __raw_writel(ptr->val, ptr->reg);
+ writel_relaxed(ptr->val, ptr->reg);
}
diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
index 2ecb50bea044..307d8ad96a78 100644
--- a/arch/arm/plat-samsung/watchdog-reset.c
+++ b/arch/arm/plat-samsung/watchdog-reset.c
@@ -3,7 +3,7 @@
* Copyright (c) 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
- * Coyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
+ * Copyright (c) 2013 Tomasz Figa <tomasz.figa@gmail.com>
*
* Watchdog reset support for Samsung SoCs.
*
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 53feb90c840c..c2366510187a 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -18,6 +18,8 @@
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
+#include <plat/platsmp.h>
+
/*
* Write pen_release in a way that is guaranteed to be visible to all
* observers, irrespective of whether they're taking part in coherency
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index c5f9a9e3d1f3..d062f08f5020 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -98,11 +98,11 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
if (is_device_dma_coherent(hwdev))
return;
- if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
@@ -110,12 +110,12 @@ void __xen_dma_map_page(struct device *hwdev, struct page *page,
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (is_device_dma_coherent(hwdev))
return;
- if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f8b99e20557..bc3f00f586f1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -3,6 +3,7 @@ config ARM64
select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
+ select ACPI_MCFG if ACPI
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -22,9 +23,9 @@ config ARM64
select ARM_ARCH_TIMER
select ARM_GIC
select AUDIT_ARCH_COMPAT_GENERIC
- select ARM_GIC_V2M if PCI_MSI
+ select ARM_GIC_V2M if PCI
select ARM_GIC_V3
- select ARM_GIC_V3_ITS if PCI_MSI
+ select ARM_GIC_V3_ITS if PCI
select ARM_PSCI_FW
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
@@ -53,6 +54,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
@@ -78,6 +80,7 @@ config ARM64
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_GCC_PLUGINS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
@@ -101,6 +104,7 @@ config ARM64
select OF_EARLY_FLATTREE
select OF_NUMA if NUMA && OF
select OF_RESERVED_MEM
+ select PCI_ECAM if ACPI
select PERF_USE_VMALLOC
select POWER_RESET
select POWER_SUPPLY
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 7ef1d05859ae..bb2616b16157 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -13,6 +13,19 @@ config ARCH_ALPINE
This enables support for the Annapurna Labs Alpine
Soc family.
+config ARCH_BCM2835
+ bool "Broadcom BCM2835 family"
+ select ARCH_REQUIRE_GPIOLIB
+ select CLKSRC_OF
+ select PINCTRL
+ select PINCTRL_BCM2835
+ select ARM_AMBA
+ select ARM_TIMER_SP804
+ select HAVE_ARM_ARCH_TIMER
+ help
+ This enables support for the Broadcom BCM2837 SoC.
+ This SoC is used in the Raspberry Pi 3 device.
+
config ARCH_BCM_IPROC
bool "Broadcom iProc SoC Family"
select COMMON_CLK_IPROC
@@ -36,6 +49,7 @@ config ARCH_EXYNOS
select HAVE_S3C_RTC if RTC_CLASS
select PINCTRL
select PINCTRL_EXYNOS
+ select SOC_SAMSUNG
help
This enables support for ARMv8 based Samsung Exynos SoC family.
@@ -66,6 +80,10 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
+ select PINCTRL
+ select PINCTRL_MESON
+ select COMMON_CLK_AMLOGIC
+ select COMMON_CLK_GXBB
help
This enables support for the Amlogic S905 SoCs.
@@ -73,6 +91,7 @@ config ARCH_MVEBU
bool "Marvell EBU SoC Family"
select ARMADA_AP806_SYSCON
select ARMADA_CP110_SYSCON
+ select ARMADA_37XX_CLK
select MVEBU_ODMI
help
This enables support for Marvell EBU familly, including:
@@ -121,6 +140,12 @@ config ARCH_R8A7795
help
This enables support for the Renesas R-Car H3 SoC.
+config ARCH_R8A7796
+ bool "Renesas R-Car M3-W SoC Platform"
+ depends on ARCH_RENESAS
+ help
+ This enables support for the Renesas R-Car M3-W SoC.
+
config ARCH_STRATIX10
bool "Altera's Stratix 10 SoCFPGA Family"
help
@@ -160,6 +185,8 @@ config ARCH_VEXPRESS
bool "ARMv8 software model (Versatile Express)"
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK_VERSATILE
+ select PM
+ select PM_GENERIC_DOMAINS
select POWER_RESET_VEXPRESS
select VEXPRESS_CONFIG
help
@@ -168,6 +195,7 @@ config ARCH_VEXPRESS
config ARCH_VULCAN
bool "Broadcom Vulcan SOC Family"
+ select GPIOLIB
help
This enables support for Broadcom Vulcan SoC Family
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index d59b6908a21a..5b54f8c021d8 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie
+LDFLAGS_vmlinux += -pie -Bsymbolic
endif
KBUILD_DEFCONFIG := defconfig
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 7f2c6747a71e..90a84c514d3d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -45,6 +45,7 @@
/dts-v1/;
#include "meson-gxbb.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
@@ -62,8 +63,27 @@
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
+
+ leds {
+ compatible = "gpio-leds";
+ blue {
+ label = "c2:blue:alive";
+ gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+ };
};
&uart_AO {
status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
};
+
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index bf7ff1d41851..f4f30f674b4c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -62,4 +62,13 @@
/* This UART is brought out to the DB9 connector */
&uart_AO {
status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
};
+
+&ethmac {
+ status = "okay";
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
+};
+
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 012cdccc8a35..54bb7c739089 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -56,4 +56,7 @@
&uart_AO {
status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 832815d80462..e502c24b0ac7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -43,6 +43,8 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
/ {
compatible = "amlogic,meson-gxbb";
@@ -129,13 +131,35 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc1100000 0x0 0x100000>;
+ reset: reset-controller@4404 {
+ compatible = "amlogic,meson-gxbb-reset";
+ reg = <0x0 0x04404 0x0 0x20>;
+ #reset-cells = <1>;
+ };
+
uart_A: serial@84c0 {
compatible = "amlogic,meson-uart";
- reg = <0x0 0x084c0 0x0 0x14>;
+ reg = <0x0 0x84c0 0x0 0x14>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
clocks = <&xtal>;
status = "disabled";
};
+
+ uart_B: serial@84dc {
+ compatible = "amlogic,meson-uart";
+ reg = <0x0 0x84dc 0x0 0x14>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>;
+ status = "disabled";
+ };
+
+ uart_C: serial@8700 {
+ compatible = "amlogic,meson-uart";
+ reg = <0x0 0x8700 0x0 0x14>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>;
+ status = "disabled";
+ };
};
gic: interrupt-controller@c4301000 {
@@ -158,6 +182,29 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xc8100000 0x0 0x100000>;
+ pinctrl_aobus: pinctrl@14 {
+ compatible = "amlogic,meson-gxbb-aobus-pinctrl";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio_ao: bank@14 {
+ reg = <0x0 0x00014 0x0 0x8>,
+ <0x0 0x0002c 0x0 0x4>,
+ <0x0 0x00024 0x0 0x8>;
+ reg-names = "mux", "pull", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ uart_ao_a_pins: uart_ao_a {
+ mux {
+ groups = "uart_tx_ao_a", "uart_rx_ao_a";
+ function = "uart_ao";
+ };
+ };
+ };
+
uart_AO: serial@4c0 {
compatible = "amlogic,meson-uart";
reg = <0x0 0x004c0 0x0 0x14>;
@@ -167,6 +214,115 @@
};
};
+ periphs: periphs@c8834000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xc8834000 0x0 0x2000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
+
+ rng {
+ compatible = "amlogic,meson-rng";
+ reg = <0x0 0x0 0x0 0x4>;
+ };
+
+ pinctrl_periphs: pinctrl@4b0 {
+ compatible = "amlogic,meson-gxbb-periphs-pinctrl";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio: bank@4b0 {
+ reg = <0x0 0x004b0 0x0 0x28>,
+ <0x0 0x004e8 0x0 0x14>,
+ <0x0 0x00120 0x0 0x14>,
+ <0x0 0x00430 0x0 0x40>;
+ reg-names = "mux", "pull", "pull-enable", "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ emmc_pins: emmc {
+ mux {
+ groups = "emmc_nand_d07",
+ "emmc_cmd",
+ "emmc_clk";
+ function = "emmc";
+ };
+ };
+
+ sdcard_pins: sdcard {
+ mux {
+ groups = "sdcard_d0",
+ "sdcard_d1",
+ "sdcard_d2",
+ "sdcard_d3",
+ "sdcard_cmd",
+ "sdcard_clk";
+ function = "sdcard";
+ };
+ };
+
+ uart_a_pins: uart_a {
+ mux {
+ groups = "uart_tx_a",
+ "uart_rx_a";
+ function = "uart_a";
+ };
+ };
+
+ uart_b_pins: uart_b {
+ mux {
+ groups = "uart_tx_b",
+ "uart_rx_b";
+ function = "uart_b";
+ };
+ };
+
+ uart_c_pins: uart_c {
+ mux {
+ groups = "uart_tx_c",
+ "uart_rx_c";
+ function = "uart_c";
+ };
+ };
+
+ eth_pins: eth_c {
+ mux {
+ groups = "eth_mdio",
+ "eth_mdc",
+ "eth_clk_rx_clk",
+ "eth_rx_dv",
+ "eth_rxd0",
+ "eth_rxd1",
+ "eth_rxd2",
+ "eth_rxd3",
+ "eth_rgmii_tx_clk",
+ "eth_tx_en",
+ "eth_txd0",
+ "eth_txd1",
+ "eth_txd2",
+ "eth_txd3";
+ function = "eth";
+ };
+ };
+ };
+ };
+
+ hiubus: hiubus@c883c000 {
+ compatible = "simple-bus";
+ reg = <0x0 0xc883c000 0x0 0x2000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0xc883c000 0x0 0x2000>;
+
+ clkc: clock-controller@0 {
+ compatible = "amlogic,gxbb-clkc";
+ #clock-cells = <1>;
+ reg = <0x0 0x0 0x0 0x3db>;
+ };
+ };
+
apb: apb@d0000000 {
compatible = "simple-bus";
reg = <0x0 0xd0000000 0x0 0x200000>;
@@ -174,5 +330,17 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xd0000000 0x0 0x200000>;
};
+
+ ethmac: ethernet@c9410000 {
+ compatible = "amlogic,meson6-dwmac", "snps,dwmac";
+ reg = <0x0 0xc9410000 0x0 0x10000
+ 0x0 0xc8834540 0x0 0x4>;
+ interrupts = <0 8 1>;
+ interrupt-names = "macirq";
+ clocks = <&xtal>;
+ clock-names = "stmmaceth";
+ phy-mode = "rgmii";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
index 2e1e5daa1dc7..1425ed41620c 100644
--- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi
@@ -106,88 +106,88 @@
interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */
ranges = <0 0 0 0x79000000 0x0 0x800000>; /* MSI Range */
reg = <0x0 0x78090000 0x0 0x10000>, /* GIC Dist */
- <0x0 0x780A0000 0x0 0x20000>, /* GIC CPU */
- <0x0 0x780C0000 0x0 0x10000>, /* GIC VCPU Control */
- <0x0 0x780E0000 0x0 0x20000>; /* GIC VCPU */
- v2m0: v2m@0x00000 {
+ <0x0 0x780a0000 0x0 0x20000>, /* GIC CPU */
+ <0x0 0x780c0000 0x0 0x10000>, /* GIC VCPU Control */
+ <0x0 0x780e0000 0x0 0x20000>; /* GIC VCPU */
+ v2m0: v2m@00000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x0 0x0 0x1000>;
};
- v2m1: v2m@0x10000 {
+ v2m1: v2m@10000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x10000 0x0 0x1000>;
};
- v2m2: v2m@0x20000 {
+ v2m2: v2m@20000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x20000 0x0 0x1000>;
};
- v2m3: v2m@0x30000 {
+ v2m3: v2m@30000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x30000 0x0 0x1000>;
};
- v2m4: v2m@0x40000 {
+ v2m4: v2m@40000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x40000 0x0 0x1000>;
};
- v2m5: v2m@0x50000 {
+ v2m5: v2m@50000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x50000 0x0 0x1000>;
};
- v2m6: v2m@0x60000 {
+ v2m6: v2m@60000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x60000 0x0 0x1000>;
};
- v2m7: v2m@0x70000 {
+ v2m7: v2m@70000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x70000 0x0 0x1000>;
};
- v2m8: v2m@0x80000 {
+ v2m8: v2m@80000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x80000 0x0 0x1000>;
};
- v2m9: v2m@0x90000 {
+ v2m9: v2m@90000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
reg = <0x0 0x90000 0x0 0x1000>;
};
- v2m10: v2m@0xA0000 {
+ v2m10: v2m@a0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xA0000 0x0 0x1000>;
+ reg = <0x0 0xa0000 0x0 0x1000>;
};
- v2m11: v2m@0xB0000 {
+ v2m11: v2m@b0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xB0000 0x0 0x1000>;
+ reg = <0x0 0xb0000 0x0 0x1000>;
};
- v2m12: v2m@0xC0000 {
+ v2m12: v2m@c0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xC0000 0x0 0x1000>;
+ reg = <0x0 0xc0000 0x0 0x1000>;
};
- v2m13: v2m@0xD0000 {
+ v2m13: v2m@d0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xD0000 0x0 0x1000>;
+ reg = <0x0 0xd0000 0x0 0x1000>;
};
- v2m14: v2m@0xE0000 {
+ v2m14: v2m@e0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xE0000 0x0 0x1000>;
+ reg = <0x0 0xe0000 0x0 0x1000>;
};
- v2m15: v2m@0xF0000 {
+ v2m15: v2m@f0000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0x0 0xF0000 0x0 0x1000>;
+ reg = <0x0 0xf0000 0x0 0x1000>;
};
};
@@ -198,10 +198,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 0 0xff04>, /* Secure Phys IRQ */
- <1 13 0xff04>, /* Non-secure Phys IRQ */
- <1 14 0xff04>, /* Virt IRQ */
- <1 15 0xff04>; /* Hyp IRQ */
+ interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
+ <1 13 0xff08>, /* Non-secure Phys IRQ */
+ <1 14 0xff08>, /* Virt IRQ */
+ <1 15 0xff08>; /* Hyp IRQ */
clock-frequency = <50000000>;
};
@@ -637,8 +637,8 @@
compatible = "apm,xgene2-sgenet";
status = "disabled";
reg = <0x0 0x1f610000 0x0 0xd100>,
- <0x0 0x1f600000 0x0 0Xd100>,
- <0x0 0x20000000 0x0 0X20000>;
+ <0x0 0x1f600000 0x0 0xd100>,
+ <0x0 0x20000000 0x0 0x20000>;
interrupts = <0 96 4>,
<0 97 4>;
dma-coherent;
@@ -652,8 +652,8 @@
compatible = "apm,xgene2-xgenet";
status = "disabled";
reg = <0x0 0x1f620000 0x0 0x10000>,
- <0x0 0x1f600000 0x0 0Xd100>,
- <0x0 0x20000000 0x0 0X220000>;
+ <0x0 0x1f600000 0x0 0xd100>,
+ <0x0 0x20000000 0x0 0x220000>;
interrupts = <0 108 4>,
<0 109 4>,
<0 110 4>,
@@ -693,7 +693,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0x0 0x10640000 0x0 0x1000>;
- interrupts = <0 0x3A 0x4>;
+ interrupts = <0 0x3a 0x4>;
clocks = <&i2c4clk 0>;
bus_num = <4>;
};
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 6bf7cbe2e72d..f1c2c713f9b0 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -199,16 +199,6 @@
clock-output-names = "sdioclk";
};
- qmlclk: qmlclk {
- compatible = "apm,xgene-device-clock";
- #clock-cells = <1>;
- clocks = <&socplldiv2 0>;
- clock-names = "qmlclk";
- reg = <0x0 0x1703C000 0x0 0x1000>;
- reg-names = "csr-reg";
- clock-output-names = "qmlclk";
- };
-
ethclk: ethclk {
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
@@ -226,7 +216,7 @@
compatible = "apm,xgene-device-clock";
#clock-cells = <1>;
clocks = <&ethclk 0>;
- reg = <0x0 0x1702C000 0x0 0x1000>;
+ reg = <0x0 0x1702c000 0x0 0x1000>;
reg-names = "csr-reg";
clock-output-names = "menetclk";
};
@@ -924,8 +914,8 @@
compatible = "apm,xgene-enet";
status = "disabled";
reg = <0x0 0x17020000 0x0 0xd100>,
- <0x0 0X17030000 0x0 0Xc300>,
- <0x0 0X10000000 0x0 0X200>;
+ <0x0 0x17030000 0x0 0xc300>,
+ <0x0 0x10000000 0x0 0x200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x3c 0x4>;
dma-coherent;
@@ -950,11 +940,11 @@
compatible = "apm,xgene1-sgenet";
status = "disabled";
reg = <0x0 0x1f210000 0x0 0xd100>,
- <0x0 0x1f200000 0x0 0Xc300>,
- <0x0 0x1B000000 0x0 0X200>;
+ <0x0 0x1f200000 0x0 0xc300>,
+ <0x0 0x1b000000 0x0 0x200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0xA0 0x4>,
- <0x0 0xA1 0x4>;
+ interrupts = <0x0 0xa0 0x4>,
+ <0x0 0xa1 0x4>;
dma-coherent;
clocks = <&sge0clk 0>;
local-mac-address = [00 00 00 00 00 00];
@@ -966,11 +956,11 @@
compatible = "apm,xgene1-sgenet";
status = "disabled";
reg = <0x0 0x1f210030 0x0 0xd100>,
- <0x0 0x1f200000 0x0 0Xc300>,
- <0x0 0x1B000000 0x0 0X8000>;
+ <0x0 0x1f200000 0x0 0xc300>,
+ <0x0 0x1b000000 0x0 0x8000>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0xAC 0x4>,
- <0x0 0xAD 0x4>;
+ interrupts = <0x0 0xac 0x4>,
+ <0x0 0xad 0x4>;
port-id = <1>;
dma-coherent;
local-mac-address = [00 00 00 00 00 00];
@@ -982,8 +972,8 @@
compatible = "apm,xgene1-xgenet";
status = "disabled";
reg = <0x0 0x1f610000 0x0 0xd100>,
- <0x0 0x1f600000 0x0 0Xc300>,
- <0x0 0x18000000 0x0 0X200>;
+ <0x0 0x1f600000 0x0 0xc300>,
+ <0x0 0x18000000 0x0 0x200>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
interrupts = <0x0 0x60 0x4>,
<0x0 0x61 0x4>,
@@ -1005,11 +995,11 @@
compatible = "apm,xgene1-xgenet";
status = "disabled";
reg = <0x0 0x1f620000 0x0 0xd100>,
- <0x0 0x1f600000 0x0 0Xc300>,
- <0x0 0x18000000 0x0 0X8000>;
+ <0x0 0x1f600000 0x0 0xc300>,
+ <0x0 0x18000000 0x0 0x8000>;
reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0x6C 0x4>,
- <0x0 0x6D 0x4>;
+ interrupts = <0x0 0x6c 0x4>,
+ <0x0 0x6d 0x4>;
port-id = <1>;
dma-coherent;
clocks = <&xge1clk 0>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index dee2386d3b9b..334271a25f70 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -56,6 +56,315 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>;
};
+ /*
+ * Juno TRMs specify the size for these coresight components as 64K.
+ * The actual size is just 4K though 64K is reserved. Access to the
+ * unmapped reserved region results in a DECERR response.
+ */
+ etf@20010000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x20010000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* input port */
+ port@0 {
+ reg = <0>;
+ etf_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&main_funnel_out_port>;
+ };
+ };
+
+ /* output port */
+ port@1 {
+ reg = <0>;
+ etf_out_port: endpoint {
+ remote-endpoint = <&replicator_in_port0>;
+ };
+ };
+ };
+ };
+
+ tpiu@20030000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0 0x20030000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ tpiu_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port0>;
+ };
+ };
+ };
+
+ main-funnel@20040000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x20040000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ main_funnel_out_port: endpoint {
+ remote-endpoint = <&etf_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ main_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster0_funnel_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ main_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster1_funnel_out_port>;
+ };
+ };
+
+ };
+ };
+
+ etr@20070000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x20070000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ etr_in_port: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out_port1>;
+ };
+ };
+ };
+
+ etm0: etm@22040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x22040000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster0_etm0_out_port: endpoint {
+ remote-endpoint = <&cluster0_funnel_in_port0>;
+ };
+ };
+ };
+
+ cluster0-funnel@220c0000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x220c0000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster0_funnel_out_port: endpoint {
+ remote-endpoint = <&main_funnel_in_port0>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster0_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster0_etm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ cluster0_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster0_etm1_out_port>;
+ };
+ };
+ };
+ };
+
+ etm1: etm@22140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x22140000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster0_etm1_out_port: endpoint {
+ remote-endpoint = <&cluster0_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm2: etm@23040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23040000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster1_etm0_out_port: endpoint {
+ remote-endpoint = <&cluster1_funnel_in_port0>;
+ };
+ };
+ };
+
+ cluster1-funnel@230c0000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0 0x230c0000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ cluster1_funnel_out_port: endpoint {
+ remote-endpoint = <&main_funnel_in_port1>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ cluster1_funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster1_etm0_out_port>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ cluster1_funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster1_etm1_out_port>;
+ };
+ };
+ port@3 {
+ reg = <2>;
+ cluster1_funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster1_etm2_out_port>;
+ };
+ };
+ port@4 {
+ reg = <3>;
+ cluster1_funnel_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&cluster1_etm3_out_port>;
+ };
+ };
+ };
+ };
+
+ etm3: etm@23140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23140000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster1_etm1_out_port: endpoint {
+ remote-endpoint = <&cluster1_funnel_in_port1>;
+ };
+ };
+ };
+
+ etm4: etm@23240000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23240000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster1_etm2_out_port: endpoint {
+ remote-endpoint = <&cluster1_funnel_in_port2>;
+ };
+ };
+ };
+
+ etm5: etm@23340000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x23340000 0 0x1000>;
+
+ clocks = <&soc_smc50mhz>;
+ clock-names = "apb_pclk";
+ power-domains = <&scpi_devpd 0>;
+ port {
+ cluster1_etm3_out_port: endpoint {
+ remote-endpoint = <&cluster1_funnel_in_port3>;
+ };
+ };
+ };
+
+ coresight-replicator {
+ /*
+ * Non-configurable replicators don't show up on the
+ * AMBA bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-replicator";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* replicator output ports */
+ port@0 {
+ reg = <0>;
+ replicator_out_port0: endpoint {
+ remote-endpoint = <&tpiu_in_port>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ replicator_out_port1: endpoint {
+ remote-endpoint = <&etr_in_port>;
+ };
+ };
+
+ /* replicator input port */
+ port@2 {
+ reg = <0>;
+ replicator_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out_port>;
+ };
+ };
+ };
+ };
+
sram: sram@2e000000 {
compatible = "arm,juno-sram-ns", "mmio-sram";
reg = <0x0 0x2e000000 0x0 0x8000>;
@@ -119,12 +428,60 @@
};
};
+ scpi_devpd: scpi-power-domains {
+ compatible = "arm,scpi-power-domains";
+ num-domains = <2>;
+ #power-domain-cells = <1>;
+ };
+
scpi_sensors0: sensors {
compatible = "arm,scpi-sensors";
#thermal-sensor-cells = <1>;
};
};
+ thermal-zones {
+ pmic {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 0>;
+ };
+
+ soc {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 3>;
+ };
+
+ big_cluster_thermal_zone: big_cluster {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 21>;
+ status = "disabled";
+ };
+
+ little_cluster_thermal_zone: little_cluster {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 22>;
+ status = "disabled";
+ };
+
+ gpu0_thermal_zone: gpu0 {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 23>;
+ status = "disabled";
+ };
+
+ gpu1_thermal_zone: gpu1 {
+ polling-delay = <1000>;
+ polling-delay-passive = <100>;
+ thermal-sensors = <&scpi_sensors0 24>;
+ status = "disabled";
+ };
+ };
+
/include/ "juno-clocks.dtsi"
dma@7ff00000 {
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index d95d9e7e2dc0..123a58b29cbd 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -181,3 +181,43 @@
&pcie_ctlr {
status = "okay";
};
+
+&etm0 {
+ cpu = <&A57_0>;
+};
+
+&etm1 {
+ cpu = <&A57_1>;
+};
+
+&etm2 {
+ cpu = <&A53_0>;
+};
+
+&etm3 {
+ cpu = <&A53_1>;
+};
+
+&etm4 {
+ cpu = <&A53_2>;
+};
+
+&etm5 {
+ cpu = <&A53_3>;
+};
+
+&big_cluster_thermal_zone {
+ status = "okay";
+};
+
+&little_cluster_thermal_zone {
+ status = "okay";
+};
+
+&gpu0_thermal_zone {
+ status = "okay";
+};
+
+&gpu1_thermal_zone {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 88ecd6182b67..007be826efce 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -181,3 +181,43 @@
&pcie_ctlr {
status = "okay";
};
+
+&etm0 {
+ cpu = <&A72_0>;
+};
+
+&etm1 {
+ cpu = <&A72_1>;
+};
+
+&etm2 {
+ cpu = <&A53_0>;
+};
+
+&etm3 {
+ cpu = <&A53_1>;
+};
+
+&etm4 {
+ cpu = <&A53_2>;
+};
+
+&etm5 {
+ cpu = <&A53_3>;
+};
+
+&big_cluster_thermal_zone {
+ status = "okay";
+};
+
+&little_cluster_thermal_zone {
+ status = "okay";
+};
+
+&gpu0_thermal_zone {
+ status = "okay";
+};
+
+&gpu1_thermal_zone {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index dcfcf15a17f5..a7270eff6939 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -173,3 +173,27 @@
#include "juno-base.dtsi"
};
+
+&etm0 {
+ cpu = <&A57_0>;
+};
+
+&etm1 {
+ cpu = <&A57_1>;
+};
+
+&etm2 {
+ cpu = <&A53_0>;
+};
+
+&etm3 {
+ cpu = <&A53_1>;
+};
+
+&etm4 {
+ cpu = <&A53_2>;
+};
+
+&etm5 {
+ cpu = <&A53_3>;
+};
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index bec1f8b36f60..05faf2a8a35c 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb
dtb-$(CONFIG_ARCH_VULCAN) += vulcan-eval.dtb
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
new file mode 100644
index 000000000000..6f47dd2bb1db
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -0,0 +1,30 @@
+/dts-v1/;
+#include "bcm2837.dtsi"
+#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
+#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
+
+/ {
+ compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
+ model = "Raspberry Pi 3 Model B";
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ leds {
+ act {
+ gpios = <&gpio 47 0>;
+ };
+
+ pwr {
+ label = "PWR";
+ gpios = <&gpio 35 0>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
+ };
+ };
+};
+
+&uart1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
new file mode 100644
index 000000000000..f2a31d06845d
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
@@ -0,0 +1,76 @@
+#include "../../../../arm/boot/dts/bcm283x.dtsi"
+
+/ {
+ compatible = "brcm,bcm2836";
+
+ soc {
+ ranges = <0x7e000000 0x3f000000 0x1000000>,
+ <0x40000000 0x40000000 0x00001000>;
+ dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
+
+ local_intc: local_intc {
+ compatible = "brcm,bcm2836-l1-intc";
+ reg = <0x40000000 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&local_intc>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupt-parent = <&local_intc>;
+ interrupts = <0>, // PHYS_SECURE_PPI
+ <1>, // PHYS_NONSECURE_PPI
+ <3>, // VIRT_PPI
+ <2>; // HYP_PPI
+ always-on;
+ };
+
+ cpus: cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x0 0x000000d8>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <1>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x0 0x000000e0>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <2>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x0 0x000000e8>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <3>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x0 0x000000f0>;
+ };
+ };
+};
+
+/* Make the BCM2835-style global interrupt controller be a child of the
+ * CPU-local interrupt controller.
+ */
+&intc {
+ compatible = "brcm,bcm2836-armctrl-ic";
+ reg = <0x7e00b200 0x200>;
+ interrupt-parent = <&local_intc>;
+ interrupts = <8>;
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index ea5603fd106a..2d7872a36b91 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -40,10 +40,14 @@
aliases {
serial0 = &uart3;
+ serial1 = &uart0;
+ serial2 = &uart1;
+ serial3 = &uart2;
};
chosen {
stdout-path = "serial0:115200n8";
+ bootargs = "earlycon=uart8250,mmio32,0x66130000";
};
memory {
@@ -76,6 +80,18 @@
status = "ok";
};
+&uart0 {
+ status = "ok";
+};
+
+&uart1 {
+ status = "ok";
+};
+
+&uart2 {
+ status = "ok";
+};
+
&uart3 {
status = "ok";
};
@@ -125,6 +141,18 @@
};
};
+&sata_phy0 {
+ status = "ok";
+};
+
+&sata_phy1 {
+ status = "ok";
+};
+
+&sata {
+ status = "ok";
+};
+
&sdio0 {
status = "ok";
};
@@ -148,3 +176,12 @@
};
};
};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_sel>;
+ nand_sel: nand_sel {
+ function = "nand";
+ groups = "nand_grp";
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 46b78fa89f4c..f53b0955bfd3 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -251,6 +251,22 @@
mmu-masters;
};
+ pinctrl: pinctrl@6501d130 {
+ compatible = "brcm,ns2-pinmux";
+ reg = <0x6501d130 0x08>,
+ <0x660a0028 0x04>,
+ <0x660009b0 0x40>;
+ };
+
+ gpio_aon: gpio@65024800 {
+ compatible = "brcm,iproc-gpio";
+ reg = <0x65024800 0x50>,
+ <0x65024008 0x18>;
+ ngpios = <6>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
gic: interrupt-controller@65210000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
@@ -263,6 +279,26 @@
IRQ_TYPE_LEVEL_HIGH)>;
};
+ cci@65590000 {
+ compatible = "arm,cci-400";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x65590000 0x1000>;
+ ranges = <0 0x65590000 0x10000>;
+
+ pmu@9000 {
+ compatible = "arm,cci-400-pmu,r1",
+ "arm,cci-400-pmu";
+ reg = <0x9000 0x4000>;
+ interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
mdio_mux_iproc: mdio-mux@6602023c {
compatible = "brcm,mdio-mux-iproc";
reg = <0x6602023c 0x14>;
@@ -360,6 +396,16 @@
clock-names = "wdogclk", "apb_pclk";
};
+ gpio_g: gpio@660a0000 {
+ compatible = "brcm,iproc-gpio";
+ reg = <0x660a0000 0x50>;
+ ngpios = <32>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
i2c1: i2c@660b0000 {
compatible = "brcm,iproc-i2c";
reg = <0x660b0000 0x100>;
@@ -370,6 +416,36 @@
status = "disabled";
};
+ uart0: serial@66100000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x66100000 0x100>;
+ interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&iprocslow>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart1: serial@66110000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x66110000 0x100>;
+ interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&iprocslow>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart2: serial@66120000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x66120000 0x100>;
+ interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&iprocslow>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
uart3: serial@66130000 {
compatible = "snps,dw-apb-uart";
reg = <0x66130000 0x100>;
@@ -407,6 +483,49 @@
reg = <0x66220000 0x28>;
};
+ sata_phy: sata_phy@663f0100 {
+ compatible = "brcm,iproc-ns2-sata-phy";
+ reg = <0x663f0100 0x1f00>,
+ <0x663f004c 0x10>;
+ reg-names = "phy", "phy-ctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ sata: ahci@663f2000 {
+ compatible = "brcm,iproc-ahci", "generic-ahci";
+ reg = <0x663f2000 0x1000>;
+ reg-names = "ahci";
+ interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ phy-names = "sata-phy";
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ phy-names = "sata-phy";
+ };
+ };
+
sdio0: sdhci@66420000 {
compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66420000 0x100>;
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
index d8767b00862e..299f3ce969ab 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -249,7 +249,7 @@
buck2_reg: BUCK2 {
regulator-name = "vdd_atlas";
- regulator-min-microvolt = <1200000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 6bd46c133010..e669fbd7f9c3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -51,7 +51,7 @@
#size-cells = <2>;
cpus {
- #address-cells = <2>;
+ #address-cells = <1>;
#size-cells = <0>;
/*
@@ -63,29 +63,37 @@
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
- reg = <0x0 0x0>;
+ reg = <0x0>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
- reg = <0x0 0x1>;
+ reg = <0x1>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
};
cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53";
- reg = <0x0 0x2>;
+ reg = <0x2>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
};
cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53";
- reg = <0x0 0x3>;
+ reg = <0x3>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&l2>;
+ };
+
+ l2: l2-cache {
+ compatible = "cache";
};
};
@@ -465,6 +473,7 @@
interrupts = <0 60 0x4>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
usb1: usb3@3000000 {
@@ -473,6 +482,7 @@
interrupts = <0 61 0x4>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
usb2: usb3@3100000 {
@@ -481,6 +491,7 @@
interrupts = <0 63 0x4>;
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
sata: sata@3200000 {
@@ -522,6 +533,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <4>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -546,6 +558,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <2>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
@@ -570,6 +583,7 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
+ dma-coherent;
num-lanes = <2>;
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 3187c822afa3..21023a388c29 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -51,7 +51,7 @@
#size-cells = <2>;
cpus {
- #address-cells = <2>;
+ #address-cells = <1>;
#size-cells = <0>;
/*
@@ -65,57 +65,81 @@
cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x0>;
+ reg = <0x0>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x1>;
+ reg = <0x1>;
clocks = <&clockgen 1 0>;
+ next-level-cache = <&cluster0_l2>;
};
cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x100>;
+ reg = <0x100>;
clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
};
cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x101>;
+ reg = <0x101>;
clocks = <&clockgen 1 1>;
+ next-level-cache = <&cluster1_l2>;
};
cpu@200 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x200>;
+ reg = <0x200>;
clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
};
cpu@201 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x201>;
+ reg = <0x201>;
clocks = <&clockgen 1 2>;
+ next-level-cache = <&cluster2_l2>;
};
cpu@300 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x300>;
+ reg = <0x300>;
clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
};
cpu@301 {
device_type = "cpu";
compatible = "arm,cortex-a57";
- reg = <0x0 0x301>;
+ reg = <0x301>;
clocks = <&clockgen 1 3>;
+ next-level-cache = <&cluster3_l2>;
+ };
+
+ cluster0_l2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ cluster1_l2: l2-cache1 {
+ compatible = "cache";
+ };
+
+ cluster2_l2: l2-cache2 {
+ compatible = "cache";
+ };
+
+ cluster3_l2: l2-cache3 {
+ compatible = "cache";
};
};
@@ -672,6 +696,7 @@
interrupts = <0 80 0x4>; /* Level high type */
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
usb1: usb3@3110000 {
@@ -681,6 +706,7 @@
interrupts = <0 81 0x4>; /* Level high type */
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
+ snps,dis_rxdet_inp3_quirk;
};
ccn@4000000 {
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index e92a30c87a82..593c7e43de79 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -66,6 +66,149 @@
status = "ok";
};
+ /*
+ * Legend: proper name = the GPIO line is used as GPIO
+ * NC = not connected (not routed from the SoC)
+ * "[PER]" = pin is muxed for peripheral (not GPIO)
+ * "" = no idea, schematic doesn't say, could be
+ * unrouted (not connected to any external pin)
+ * LSEC = Low Speed External Connector
+ * HSEC = High Speed External Connector
+ *
+ * Pin assignments taken from LeMaker and CircuitCo Schematics
+ * Rev A1.
+ *
+ * For the lines routed to the external connectors the
+ * lines are named after the 96Boards CE Specification 1.0,
+ * Appendix "Expansion Connector Signal Description".
+ *
+ * When the 96Board naming of a line and the schematic name of
+ * the same line are in conflict, the 96Board specification
+ * takes precedence, which means that the external UART on the
+ * LSEC is named UART0 while the schematic and SoC names this
+ * UART2. This is only for the informational lines i.e. "[FOO]",
+ * the GPIO named lines "GPIO-A" thru "GPIO-L" are the only
+ * ones actually used for GPIO.
+ */
+ gpio0: gpio@f8011000 {
+ gpio-line-names = "PWR_HOLD", "DSI_SEL",
+ "USB_HUB_RESET_N", "USB_SEL", "HDMI_PD", "WL_REG_ON",
+ "PWRON_DET", "5V_HUB_EN";
+ };
+
+ gpio1: gpio@f8012000 {
+ gpio-line-names = "SD_DET", "HDMI_INT", "PMU_IRQ_N",
+ "WL_HOST_WAKE", "NC", "NC", "NC", "BT_REG_ON";
+ };
+
+ gpio2: gpio@f8013000 {
+ gpio-line-names =
+ "GPIO-A", /* LSEC Pin 23: GPIO2_0 */
+ "GPIO-B", /* LSEC Pin 24: GPIO2_1 */
+ "GPIO-C", /* LSEC Pin 25: GPIO2_2 */
+ "GPIO-D", /* LSEC Pin 26: GPIO2_3 */
+ "GPIO-E", /* LSEC Pin 27: GPIO2_4 */
+ "USB_ID_DET", "USB_VBUS_DET",
+ "GPIO-H"; /* LSEC Pin 30: GPIO2_7 */
+ };
+
+ gpio3: gpio@f8014000 {
+ gpio-line-names = "GPIO3_0", "NC", "NC", "", "NC", "",
+ "WLAN_ACTIVE", "NC", "NC";
+ };
+
+ gpio4: gpio@f7020000 {
+ gpio-line-names = "USER_LED1", "USER_LED2", "USER_LED3",
+ "USER_LED4", "SD_SEL", "NC", "NC", "BT_ACTIVE";
+ };
+
+ gpio5: gpio@f7021000 {
+ gpio-line-names = "NC", "NC",
+ "[UART1_RxD]", /* LSEC Pin 11: UART3_RX */
+ "[UART1_TxD]", /* LSEC Pin 13: UART3_TX */
+ "[AUX_SSI1]", "NC",
+ "[PCM_CLK]", /* LSEC Pin 18: MODEM_PCM_XCLK */
+ "[PCM_FS]"; /* LSEC Pin 16: MODEM_PCM_XFS */
+ };
+
+ gpio6: gpio@f7022000 {
+ gpio-line-names =
+ "[SPI0_DIN]", /* Pin 10: SPI0_DI */
+ "[SPI0_DOUT]", /* Pin 14: SPI0_DO */
+ "[SPI0_CS]", /* Pin 12: SPI0_CS_N */
+ "[SPI0_SCLK]", /* Pin 8: SPI0_SCLK */
+ "NC", "NC", "NC",
+ "GPIO-G"; /* Pin 29: GPIO6_7_DSI_TE0 */
+ };
+
+ gpio7: gpio@f7023000 {
+ gpio-line-names = "NC", "NC", "NC", "NC",
+ "[PCM_DI]", /* Pin 22: MODEM_PCM_DI */
+ "[PCM_DO]", /* Pin 20: MODEM_PCM_DO */
+ "NC", "NC";
+ };
+
+ gpio8: gpio@f7024000 {
+ gpio-line-names = "NC", "[CEC_CLK_19_2MHZ]", "NC",
+ "", "", "", "", "", "";
+ };
+
+ gpio9: gpio@f7025000 {
+ gpio-line-names = "",
+ "GPIO-J", /* LSEC Pin 32: ISP_PWDN0_GPIO9_1 */
+ "GPIO-L", /* LSEC Pin 34: ISP_PWDN1_GPIO9_2 */
+ "NC", "NC", "NC", "NC", "[ISP_CCLK0]";
+ };
+
+ gpio10: gpio@f7026000 {
+ gpio-line-names = "BOOT_SEL",
+ "[ISP_CCLK1]",
+ "GPIO-I", /* LSEC Pin 31: ISP_RSTB0_GPIO10_2 */
+ "GPIO-K", /* LSEC Pin 33: ISP_RSTB1_GPIO10_3 */
+ "NC", "NC",
+ "[I2C2_SDA]", /* HSEC Pin 34: ISP0_SDA */
+ "[I2C2_SCL]"; /* HSEC Pin 32: ISP0_SCL */
+ };
+
+ gpio11: gpio@f7027000 {
+ gpio-line-names =
+ "[I2C3_SDA]", /* HSEC Pin 38: ISP1_SDA */
+ "[I2C3_SCL]", /* HSEC Pin 36: ISP1_SCL */
+ "", "NC", "NC", "NC", "", "";
+ };
+
+ gpio12: gpio@f7028000 {
+ gpio-line-names = "[BT_PCM_XFS]", "[BT_PCM_DI]",
+ "[BT_PCM_DO]",
+ "NC", "NC", "NC", "NC",
+ "GPIO-F"; /* LSEC Pin 28: BL_PWM_GPIO12_7 */
+ };
+
+ gpio13: gpio@f7029000 {
+ gpio-line-names = "[UART0_RX]", "[UART0_TX]",
+ "[BT_UART1_CTS]", "[BT_UART1_RTS]",
+ "[BT_UART1_RX]", "[BT_UART1_TX]",
+ "[UART0_CTS]", /* LSEC Pin 3: UART2_CTS_N */
+ "[UART0_RTS]"; /* LSEC Pin 9: UART2_RTS_N */
+ };
+
+ gpio14: gpio@f702a000 {
+ gpio-line-names =
+ "[UART0_RxD]", /* LSEC Pin 7: UART2_RX */
+ "[UART0_TxD]", /* LSEC Pin 5: UART2_TX */
+ "[I2C0_SCL]", /* LSEC Pin 15: I2C0_SCL */
+ "[I2C0_SDA]", /* LSEC Pin 17: I2C0_SDA */
+ "[I2C1_SCL]", /* LSEC Pin 19: I2C1_SCL */
+ "[I2C1_SDA]", /* LSEC Pin 21: I2C1_SDA */
+ "[I2C2_SCL]", "[I2C2_SDA]";
+ };
+
+ gpio15: gpio@f702b000 {
+ gpio-line-names = "", "", "", "", "", "", "NC", "";
+ };
+
+ /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
+
dwmmc_2: dwmmc2@f723f000 {
ti,non-removable;
non-removable;
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 189d21541f9c..4f270410a5cb 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -5,6 +5,7 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/hisi,hi6220-resets.h>
#include <dt-bindings/clock/hi6220-clock.h>
#include <dt-bindings/pinctrl/hisi.h>
#include <dt-bindings/thermal/thermal.h>
@@ -252,6 +253,7 @@
compatible = "hisilicon,hi6220-mediactrl", "syscon";
reg = <0x0 0xf4410000 0x0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pm_ctrl: pm_ctrl@f7032000 {
@@ -336,6 +338,22 @@
clock-names = "timer1", "timer2", "apb_pclk";
};
+ rtc0: rtc@f8003000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x0 0xf8003000 0x0 0x1000>;
+ interrupts = <0 12 4>;
+ clocks = <&ao_ctrl HI6220_RTC0_PCLK>;
+ clock-names = "apb_pclk";
+ };
+
+ rtc1: rtc@f8004000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x0 0xf8004000 0x0 0x1000>;
+ interrupts = <0 8 4>;
+ clocks = <&ao_ctrl HI6220_RTC1_PCLK>;
+ clock-names = "apb_pclk";
+ };
+
pmx0: pinmux@f7010000 {
compatible = "pinctrl-single";
reg = <0x0 0xf7010000 0x0 0x27c>;
diff --git a/arch/arm64/boot/dts/lg/Makefile b/arch/arm64/boot/dts/lg/Makefile
index b0cc64964171..5c7b54c12adc 100644
--- a/arch/arm64/boot/dts/lg/Makefile
+++ b/arch/arm64/boot/dts/lg/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_LG1K) += lg1312-ref.dtb
+dtb-$(CONFIG_ARCH_LG1K) += lg1313-ref.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/lg/lg1313-ref.dts b/arch/arm64/boot/dts/lg/lg1313-ref.dts
new file mode 100644
index 000000000000..df0ece43cfbf
--- /dev/null
+++ b/arch/arm64/boot/dts/lg/lg1313-ref.dts
@@ -0,0 +1,36 @@
+/*
+ * dts file for lg1313 Reference Board.
+ *
+ * Copyright (C) 2016, LG Electronics
+ */
+
+/dts-v1/;
+
+#include "lg1313.dtsi"
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ model = "LG Electronics, DTV SoC LG1313 Reference Board";
+ compatible = "lge,lg1313-ref", "lge,lg1313";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x00000000 0x20000000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
new file mode 100644
index 000000000000..e703e1149c75
--- /dev/null
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -0,0 +1,351 @@
+/*
+ * dts file for lg1313 SoC
+ *
+ * Copyright (C) 2016, LG Electronics
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ compatible = "lge,lg1313";
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x0>;
+ next-level-cache = <&L2_0>;
+ };
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+ L2_0: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-0.2", "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0x84000003>;
+ };
+
+ gic: interrupt-controller@c0001000 {
+ #interrupt-cells = <3>;
+ compatible = "arm,gic-400";
+ interrupt-controller;
+ reg = <0x0 0xc0001000 0x1000>,
+ <0x0 0xc0002000 0x2000>,
+ <0x0 0xc0004000 0x2000>,
+ <0x0 0xc0006000 0x2000>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0x0f) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_RAW(0x0f) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_RAW(0x0f) |
+ IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_RAW(0x0f) |
+ IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ clk_bus: clk_bus {
+ #clock-cells = <0>;
+
+ compatible = "fixed-clock";
+ clock-frequency = <198000000>;
+ clock-output-names = "BUSCLK";
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ eth0: ethernet@c3700000 {
+ compatible = "cdns,gem";
+ reg = <0x0 0xc3700000 0x1000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>, <&clk_bus>;
+ clock-names = "hclk", "pclk";
+ phy-mode = "rmii";
+ /* Filled in by boot */
+ mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ amba {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ #interrupts-cells = <3>;
+
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ ranges;
+
+ timers: timer@fd100000 {
+ compatible = "arm,sp804";
+ reg = <0x0 0xfd100000 0x1000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ wdog: watchdog@fd200000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xfd200000 0x1000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ uart0: serial@fe000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfe000000 0x1000>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ uart1: serial@fe100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfe100000 0x1000>;
+ interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ uart2: serial@fe200000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0xfe200000 0x1000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ spi0: ssp@fe800000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x0 0xfe800000 0x1000>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ spi1: ssp@fe900000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x0 0xfe900000 0x1000>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ dmac0: dma@c1128000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x0 0xc1128000 0x1000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ gpio0: gpio@fd400000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd400000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio1: gpio@fd410000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd410000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio2: gpio@fd420000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd420000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio3: gpio@fd430000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd430000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ gpio4: gpio@fd440000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd440000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio5: gpio@fd450000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd450000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio6: gpio@fd460000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd460000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio7: gpio@fd470000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd470000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio8: gpio@fd480000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd480000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio9: gpio@fd490000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd490000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio10: gpio@fd4a0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4a0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio11: gpio@fd4b0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4b0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ gpio12: gpio@fd4c0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4c0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio13: gpio@fd4d0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4d0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio14: gpio@fd4e0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4e0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio15: gpio@fd4f0000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd4f0000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio16: gpio@fd500000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd500000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ status="disabled";
+ };
+ gpio17: gpio@fd510000 {
+ #gpio-cells = <2>;
+ compatible = "arm,pl061", "arm,primecell";
+ gpio-controller;
+ reg = <0x0 0xfd510000 0x1000>;
+ clocks = <&clk_bus>;
+ clock-names = "apb_pclk";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index 86110a6ae330..1372e9a6aaa4 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -76,3 +76,8 @@
&usb3 {
status = "okay";
};
+
+/* CON17 (PCIe) / CON12 (mini-PCIe) */
+&pcie0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 9e2efb882983..c4762538ec01 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,6 +105,41 @@
status = "disabled";
};
+ nb_perih_clk: nb-periph-clk@13000{
+ compatible = "marvell,armada-3700-periph-clock-nb";
+ reg = <0x13000 0x100>;
+ clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+ <&tbg 3>, <&xtalclk>;
+ #clock-cells = <1>;
+ };
+
+ sb_perih_clk: sb-periph-clk@18000{
+ compatible = "marvell,armada-3700-periph-clock-sb";
+ reg = <0x18000 0x100>;
+ clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
+ <&tbg 3>, <&xtalclk>;
+ #clock-cells = <1>;
+ };
+
+ tbg: tbg@13200 {
+ compatible = "marvell,armada-3700-tbg-clock";
+ reg = <0x13200 0x100>;
+ clocks = <&xtalclk>;
+ #clock-cells = <1>;
+ };
+
+ gpio1: gpio@13800 {
+ compatible = "marvell,mvebu-gpio-3700",
+ "syscon", "simple-mfd";
+ reg = <0x13800 0x500>;
+
+ xtalclk: xtal-clk {
+ compatible = "marvell,armada-3700-xtal-clock";
+ clock-output-names = "xtal";
+ #clock-cells = <0>;
+ };
+ };
+
usb3: usb@58000 {
compatible = "marvell,armada3700-xhci",
"generic-xhci";
@@ -141,5 +176,30 @@
<0x1d40000 0x40000>; /* GICR */
};
};
+
+ pcie0: pcie@d0070000 {
+ compatible = "marvell,armada-3700-pcie";
+ device_type = "pci";
+ status = "disabled";
+ reg = <0 0xd0070000 0 0x20000>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ msi-parent = <&pcie0>;
+ msi-controller;
+ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
+ 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc 0>,
+ <0 0 0 2 &pcie_intc 1>,
+ <0 0 0 3 &pcie_intc 2>,
+ <0 0 0 4 &pcie_intc 3>;
+ pcie_intc: interrupt-controller {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 20d256b32670..eab1a42fb934 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -141,7 +141,7 @@
};
xor@400000 {
- compatible = "marvell,mv-xor-v2";
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x400000 0x1000>,
<0x410000 0x1000>;
msi-parent = <&gic_v2m0>;
@@ -149,7 +149,7 @@
};
xor@420000 {
- compatible = "marvell,mv-xor-v2";
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x420000 0x1000>,
<0x430000 0x1000>;
msi-parent = <&gic_v2m0>;
@@ -157,7 +157,7 @@
};
xor@440000 {
- compatible = "marvell,mv-xor-v2";
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x440000 0x1000>,
<0x450000 0x1000>;
msi-parent = <&gic_v2m0>;
@@ -165,7 +165,7 @@
};
xor@460000 {
- compatible = "marvell,mv-xor-v2";
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x460000 0x1000>,
<0x470000 0x1000>;
msi-parent = <&gic_v2m0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 367138bae3e0..da31bbbbb59e 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -107,6 +107,24 @@
status = "disabled";
};
+ cpm_xor0: xor@6a0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6a0000 0x1000>,
+ <0x6b0000 0x1000>;
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+ clocks = <&cpm_syscon0 1 8>;
+ };
+
+ cpm_xor1: xor@6c0000 {
+ compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
+ reg = <0x6c0000 0x1000>,
+ <0x6d0000 0x1000>;
+ dma-coherent;
+ msi-parent = <&gic_v2m0>;
+ clocks = <&cpm_syscon0 1 7>;
+ };
+
cpm_spi0: spi@700600 {
compatible = "marvell,armada-380-spi";
reg = <0x700600 0x50>;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index e0a4bff2fc17..9fbfd3238469 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt6755-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt6755-evb.dts b/arch/arm64/boot/dts/mediatek/mt6755-evb.dts
new file mode 100644
index 000000000000..c568d49235af
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6755-evb.dts
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt6755.dtsi"
+
+/ {
+ model = "MediaTek MT6755 EVB";
+ compatible = "mediatek,mt6755-evb", "mediatek,mt6755";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x1e800000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6755.dtsi b/arch/arm64/boot/dts/mediatek/mt6755.dtsi
new file mode 100644
index 000000000000..01ba77669717
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6755.dtsi
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "mediatek,mt6755";
+ interrupt-parent = <&sysirq>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x001>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x002>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x003>;
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x100>;
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x101>;
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x102>;
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x103>;
+ };
+ };
+
+ uart_clk: dummy26m {
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ #clock-cells = <0>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ sysirq: intpol-controller@10200620 {
+ compatible = "mediatek,mt6755-sysirq",
+ "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200620 0 0x20>;
+ };
+
+ gic: interrupt-controller@10231000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ reg = <0 0x10231000 0 0x1000>,
+ <0 0x10232000 0 0x2000>,
+ <0 0x10234000 0 0x2000>,
+ <0 0x10236000 0 0x2000>;
+ };
+
+ uart0: serial@11002000 {
+ compatible = "mediatek,mt6755-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11002000 0 0x400>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@11003000 {
+ compatible = "mediatek,mt6755-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x400>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 77b8c4e388ca..10f638f4e7d8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -26,6 +26,23 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ ovl0 = &ovl0;
+ ovl1 = &ovl1;
+ rdma0 = &rdma0;
+ rdma1 = &rdma1;
+ rdma2 = &rdma2;
+ wdma0 = &wdma0;
+ wdma1 = &wdma1;
+ color0 = &color0;
+ color1 = &color1;
+ split0 = &split0;
+ split1 = &split1;
+ dpi0 = &dpi0;
+ dsi0 = &dsi0;
+ dsi1 = &dsi1;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -366,6 +383,26 @@
#clock-cells = <1>;
};
+ mipi_tx0: mipi-dphy@10215000 {
+ compatible = "mediatek,mt8173-mipi-tx";
+ reg = <0 0x10215000 0 0x1000>;
+ clocks = <&clk26m>;
+ clock-output-names = "mipi_tx0_pll";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ mipi_tx1: mipi-dphy@10216000 {
+ compatible = "mediatek,mt8173-mipi-tx";
+ reg = <0 0x10216000 0 0x1000>;
+ clocks = <&clk26m>;
+ clock-output-names = "mipi_tx1_pll";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@10220000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
@@ -675,9 +712,181 @@
mmsys: clock-controller@14000000 {
compatible = "mediatek,mt8173-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
#clock-cells = <1>;
};
+ ovl0: ovl@1400c000 {
+ compatible = "mediatek,mt8173-disp-ovl";
+ reg = <0 0x1400c000 0 0x1000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_OVL0>;
+ iommus = <&iommu M4U_PORT_DISP_OVL0>;
+ mediatek,larb = <&larb0>;
+ };
+
+ ovl1: ovl@1400d000 {
+ compatible = "mediatek,mt8173-disp-ovl";
+ reg = <0 0x1400d000 0 0x1000>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_OVL1>;
+ iommus = <&iommu M4U_PORT_DISP_OVL1>;
+ mediatek,larb = <&larb4>;
+ };
+
+ rdma0: rdma@1400e000 {
+ compatible = "mediatek,mt8173-disp-rdma";
+ reg = <0 0x1400e000 0 0x1000>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_RDMA0>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+ mediatek,larb = <&larb0>;
+ };
+
+ rdma1: rdma@1400f000 {
+ compatible = "mediatek,mt8173-disp-rdma";
+ reg = <0 0x1400f000 0 0x1000>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_RDMA1>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+ mediatek,larb = <&larb4>;
+ };
+
+ rdma2: rdma@14010000 {
+ compatible = "mediatek,mt8173-disp-rdma";
+ reg = <0 0x14010000 0 0x1000>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_RDMA2>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA2>;
+ mediatek,larb = <&larb4>;
+ };
+
+ wdma0: wdma@14011000 {
+ compatible = "mediatek,mt8173-disp-wdma";
+ reg = <0 0x14011000 0 0x1000>;
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_WDMA0>;
+ iommus = <&iommu M4U_PORT_DISP_WDMA0>;
+ mediatek,larb = <&larb0>;
+ };
+
+ wdma1: wdma@14012000 {
+ compatible = "mediatek,mt8173-disp-wdma";
+ reg = <0 0x14012000 0 0x1000>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_WDMA1>;
+ iommus = <&iommu M4U_PORT_DISP_WDMA1>;
+ mediatek,larb = <&larb4>;
+ };
+
+ color0: color@14013000 {
+ compatible = "mediatek,mt8173-disp-color";
+ reg = <0 0x14013000 0 0x1000>;
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_COLOR0>;
+ };
+
+ color1: color@14014000 {
+ compatible = "mediatek,mt8173-disp-color";
+ reg = <0 0x14014000 0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_COLOR1>;
+ };
+
+ aal@14015000 {
+ compatible = "mediatek,mt8173-disp-aal";
+ reg = <0 0x14015000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_AAL>;
+ };
+
+ gamma@14016000 {
+ compatible = "mediatek,mt8173-disp-gamma";
+ reg = <0 0x14016000 0 0x1000>;
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_GAMMA>;
+ };
+
+ merge@14017000 {
+ compatible = "mediatek,mt8173-disp-merge";
+ reg = <0 0x14017000 0 0x1000>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_MERGE>;
+ };
+
+ split0: split@14018000 {
+ compatible = "mediatek,mt8173-disp-split";
+ reg = <0 0x14018000 0 0x1000>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_SPLIT0>;
+ };
+
+ split1: split@14019000 {
+ compatible = "mediatek,mt8173-disp-split";
+ reg = <0 0x14019000 0 0x1000>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_SPLIT1>;
+ };
+
+ ufoe@1401a000 {
+ compatible = "mediatek,mt8173-disp-ufoe";
+ reg = <0 0x1401a000 0 0x1000>;
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DISP_UFOE>;
+ };
+
+ dsi0: dsi@1401b000 {
+ compatible = "mediatek,mt8173-dsi";
+ reg = <0 0x1401b000 0 0x1000>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DSI0_ENGINE>,
+ <&mmsys CLK_MM_DSI0_DIGITAL>,
+ <&mipi_tx0>;
+ clock-names = "engine", "digital", "hs";
+ phys = <&mipi_tx0>;
+ phy-names = "dphy";
+ status = "disabled";
+ };
+
+ dsi1: dsi@1401c000 {
+ compatible = "mediatek,mt8173-dsi";
+ reg = <0 0x1401c000 0 0x1000>;
+ interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DSI1_ENGINE>,
+ <&mmsys CLK_MM_DSI1_DIGITAL>,
+ <&mipi_tx1>;
+ clock-names = "engine", "digital", "hs";
+ phy = <&mipi_tx1>;
+ phy-names = "dphy";
+ status = "disabled";
+ };
+
+ dpi0: dpi@1401d000 {
+ compatible = "mediatek,mt8173-dpi";
+ reg = <0 0x1401d000 0 0x1000>;
+ interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_DPI_PIXEL>,
+ <&mmsys CLK_MM_DPI_ENGINE>,
+ <&apmixedsys CLK_APMIXED_TVDPLL>;
+ clock-names = "pixel", "engine", "pll";
+ status = "disabled";
+ };
+
pwm0: pwm@1401e000 {
compatible = "mediatek,mt8173-disp-pwm",
"mediatek,mt6595-disp-pwm";
@@ -700,6 +909,14 @@
status = "disabled";
};
+ mutex: mutex@14020000 {
+ compatible = "mediatek,mt8173-disp-mutex";
+ reg = <0 0x14020000 0 0x1000>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+ clocks = <&mmsys CLK_MM_MUTEX_32K>;
+ };
+
larb0: larb@14021000 {
compatible = "mediatek,mt8173-smi-larb";
reg = <0 0x14021000 0 0x1000>;
@@ -719,6 +936,12 @@
clock-names = "apb", "smi";
};
+ od@14023000 {
+ compatible = "mediatek,mt8173-disp-od";
+ reg = <0 0x14023000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_DISP_OD>;
+ };
+
larb4: larb@14027000 {
compatible = "mediatek,mt8173-smi-larb";
reg = <0 0x14027000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 316c92c03821..5fda583351d7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -1,3 +1,5 @@
+#include <dt-bindings/mfd/max77620.h>
+
#include "tegra210.dtsi"
/ {
@@ -5,10 +7,15 @@
compatible = "nvidia,p2180", "nvidia,tegra210";
aliases {
+ rtc0 = "/i2c@7000d000/pmic@3c";
rtc1 = "/rtc@7000e000";
serial0 = &uarta;
};
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
memory {
device_type = "memory";
reg = <0x0 0x80000000 0x1 0x0>;
@@ -19,6 +26,248 @@
status = "okay";
};
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ pmic: pmic@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77620_default>;
+
+ max77620_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ };
+
+ gpio1 {
+ pins = "gpio1";
+ function = "fps-out";
+ drive-push-pull = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <7>;
+ maxim,active-fps-power-down-slot = <0>;
+ };
+
+ gpio2_3 {
+ pins = "gpio2", "gpio3";
+ function = "fps-out";
+ drive-open-drain = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ gpio4 {
+ pins = "gpio4";
+ function = "32k-out1";
+ };
+
+ gpio5_6_7 {
+ pins = "gpio5", "gpio6", "gpio7";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+ };
+
+ fps {
+ fps0 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,suspend-fps-time-period-us = <1280>;
+ };
+
+ fps1 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,suspend-fps-time-period-us = <1280>;
+ };
+
+ fps2 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+ };
+
+ regulators {
+ in-ldo0-1-supply = <&vdd_pre>;
+ in-ldo7-8-supply = <&vdd_pre>;
+ in-sd3-supply = <&vdd_5v0_sys>;
+
+ vdd_soc: sd0 {
+ regulator-name = "VDD_SOC";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <146>;
+ regulator-ramp-delay = <27500>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ vdd_ddr: sd1 {
+ regulator-name = "VDD_DDR_1V1_PMIC";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <130>;
+ regulator-ramp-delay = <27500>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ vdd_pre: sd2 {
+ regulator-name = "VDD_PRE_REG_1V35";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+
+ regulator-enable-ramp-delay = <176>;
+ regulator-ramp-delay = <27500>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ vdd_1v8: sd3 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <242>;
+ regulator-ramp-delay = <27500>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ vdd_sys_1v2: ldo0 {
+ regulator-name = "AVDD_SYS_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <26>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ vdd_pex_1v05: ldo1 {
+ regulator-name = "VDD_PEX_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ vddio_sdmmc: ldo2 {
+ regulator-name = "VDDIO_SDMMC";
+ /*
+ * Technically this supply should have
+ * a supported range from 1.8 - 3.3 V.
+ * However, that would cause the SDHCI
+ * driver to request 2.7 V upon access
+ * and that in turn will cause traffic
+ * to be broken. Leave it at 3.3 V for
+ * now.
+ */
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <62>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ vdd_cam_hv: ldo3 {
+ regulator-name = "VDD_CAM_HV";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-enable-ramp-delay = <50>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ vdd_rtc: ldo4 {
+ regulator-name = "VDD_RTC";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ };
+
+ vdd_ts_hv: ldo5 {
+ regulator-name = "VDD_TS_HV";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-enable-ramp-delay = <62>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ };
+
+ vdd_ts: ldo6 {
+ regulator-name = "VDD_TS_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-enable-ramp-delay = <36>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <7>;
+ maxim,active-fps-power-down-slot = <0>;
+ };
+
+ avdd_1v05_pll: ldo7 {
+ regulator-name = "AVDD_1V05_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-enable-ramp-delay = <24>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+
+ avdd_1v05: ldo8 {
+ regulator-name = "AVDD_SATA_HDMI_DP_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ };
+ };
+ };
+ };
+
pmc@7000e400 {
nvidia,invert-interrupt;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 683b339a980c..983775e637a4 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -6,4 +6,49 @@
/ {
model = "NVIDIA Jetson TX1 Developer Kit";
compatible = "nvidia,p2371-2180", "nvidia,tegra210";
+
+ host1x@50000000 {
+ dsi@54300000 {
+ status = "okay";
+
+ avdd-dsi-csi-supply = <&vdd_dsi_csi>;
+
+ panel@0 {
+ compatible = "auo,b080uan01";
+ reg = <0>;
+
+ enable-gpios = <&gpio TEGRA_GPIO(V, 2)
+ GPIO_ACTIVE_HIGH>;
+ power-supply = <&vdd_5v0_io>;
+ backlight = <&backlight>;
+ };
+ };
+ };
+
+ i2c@7000c400 {
+ backlight: backlight@2c {
+ compatible = "ti,lp8557";
+ reg = <0x2c>;
+
+ dev-ctrl = /bits/ 8 <0x80>;
+ init-brt = /bits/ 8 <0xff>;
+
+ pwm-period = <29334>;
+
+ pwms = <&pwm 0 29334>;
+ pwm-names = "lp8557";
+
+ /* 3 LED string */
+ rom_14h {
+ rom-addr = /bits/ 8 <0x14>;
+ rom-val = /bits/ 8 <0x87>;
+ };
+
+ /* boost frequency 1 MHz */
+ rom_13h {
+ rom-addr = /bits/ 8 <0x13>;
+ rom-val = /bits/ 8 <0x01>;
+ };
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index a2480c0c7e72..e5fc67bf46c2 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -4,6 +4,24 @@
model = "NVIDIA Tegra210 P2597 I/O board";
compatible = "nvidia,p2597", "nvidia,tegra210";
+ host1x@50000000 {
+ dpaux@54040000 {
+ status = "okay";
+ };
+
+ sor@54580000 {
+ status = "okay";
+
+ avdd-io-supply = <&avdd_1v05>;
+ vdd-pll-supply = <&vdd_1v8>;
+ hdmi-supply = <&vdd_hdmi>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio TEGRA_GPIO(CC, 1)
+ GPIO_ACTIVE_LOW>;
+ };
+ };
+
pinmux: pinmux@700008d4 {
pinctrl-names = "boot";
pinctrl-0 = <&state_boot>;
@@ -1261,6 +1279,169 @@
};
};
+ pwm@7000a000 {
+ status = "okay";
+ };
+
+ i2c@7000c400 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ exp1: gpio@74 {
+ compatible = "ti,tca9539";
+ reg = <0x74>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+
+ /* HDMI DDC */
+ hdmi_ddc: i2c@7000c700 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-3}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-6}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-5}>;
+ phy-names = "usb2-0", "usb2-1", "usb2-2", "usb2-3", "usb3-0",
+ "usb3-1";
+
+ dvddio-pex-supply = <&vdd_pex_1v05>;
+ hvddio-pex-supply = <&vdd_1v8>;
+ avdd-usb-supply = <&vdd_3v3_sys>;
+ /* XXX what are these? */
+ avdd-pll-utmip-supply = <&vdd_1v8>;
+ avdd-pll-uerefe-supply = <&vdd_pex_1v05>;
+ dvdd-usb-ss-pll-supply = <&vdd_pex_1v05>;
+ hvdd-usb-ss-pll-e-supply = <&vdd_1v8>;
+
+ status = "okay";
+ };
+
+ padctl@7009f000 {
+ status = "okay";
+
+ pads {
+ usb2 {
+ status = "okay";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-3 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ pcie {
+ status = "okay";
+
+ lanes {
+ pcie-0 {
+ nvidia,function = "pcie-x1";
+ status = "okay";
+ };
+
+ pcie-1 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-2 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-3 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-4 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-5 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+
+ pcie-6 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+ };
+ };
+
+ sata {
+ status = "okay";
+
+ lanes {
+ sata-0 {
+ nvidia,function = "sata";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "okay";
+ mode = "otg";
+ };
+
+ usb2-1 {
+ status = "okay";
+ vbus-supply = <&vdd_5v0_rtl>;
+ mode = "host";
+ };
+
+ usb2-2 {
+ status = "okay";
+ vbus-supply = <&vdd_usb_vbus>;
+ mode = "host";
+ };
+
+ usb2-3 {
+ status = "okay";
+ mode = "host";
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <1>;
+ status = "okay";
+ };
+
+ usb3-1 {
+ nvidia,usb2-companion = <2>;
+ status = "okay";
+ };
+ };
+ };
+
/* MMC/SD */
sdhci@700b0000 {
status = "okay";
@@ -1268,6 +1449,144 @@
no-1-8-v;
cd-gpios = <&gpio TEGRA_GPIO(Z, 1) GPIO_ACTIVE_LOW>;
+
+ vqmmc-supply = <&vddio_sdmmc>;
+ vmmc-supply = <&vdd_3v3_sd>;
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd_sys_mux: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "VDD_SYS_MUX";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_5v0_sys: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "VDD_5V0_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&pmic 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_sys_mux>;
+ };
+
+ vdd_3v3_sys: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "VDD_3V3_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpio = <&pmic 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_sys_mux>;
+
+ regulator-enable-ramp-delay = <160>;
+ regulator-disable-ramp-delay = <10000>;
+ };
+
+ vdd_5v0_io: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+ regulator-name = "VDD_5V0_IO_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3_sd: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+ regulator-name = "VDD_3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+
+ regulator-enable-ramp-delay = <472>;
+ regulator-disable-ramp-delay = <4880>;
+ };
+
+ vdd_dsi_csi: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+ regulator-name = "AVDD_DSI_CSI_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vdd_sys_1v2>;
+ };
+
+ vdd_3v3_dis: regulator@6 {
+ compatible = "regulator-fixed";
+ reg = <6>;
+ regulator-name = "VDD_DIS_3V3_LCD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ gpio = <&exp1 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_1v8_dis: regulator@7 {
+ compatible = "regulator-fixed";
+ reg = <7>;
+ regulator-name = "VDD_LCD_1V8_DIS";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_1v8>;
+ };
+
+ vdd_5v0_rtl: regulator@8 {
+ compatible = "regulator-fixed";
+ reg = <8>;
+ regulator-name = "RTL_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(H, 1) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_usb_vbus: regulator@9 {
+ compatible = "regulator-fixed";
+ reg = <9>;
+ regulator-name = "USB_VBUS_EN1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(CC, 5) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_hdmi: regulator@10 {
+ compatible = "regulator-fixed";
+ reg = <10>;
+ regulator-name = "VDD_HDMI_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&exp1 12 GPIO_ACTIVE_LOW>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index 4d89f4e02d98..431266a48e9c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -1,6 +1,7 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/mfd/max77620.h>
#include <dt-bindings/pinctrl/pinctrl-tegra.h>
#include "tegra210.dtsi"
@@ -1327,6 +1328,234 @@
};
};
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <1000000>;
+
+ max77620: max77620@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+ interrupts = <0 86 IRQ_TYPE_NONE>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77620_default>;
+
+ max77620_default: pinmux@0 {
+ pin_gpio {
+ pins = "gpio0", "gpio1", "gpio2", "gpio7";
+ function = "gpio";
+ };
+
+ /*
+ * GPIO3 is used to en_pp3300, and it is part of power
+ * sequence, So it must be sequenced up (automatically
+ * set by OTP) and down properly.
+ */
+ pin_gpio3 {
+ pins = "gpio3";
+ function = "fps-out";
+ drive-open-drain = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <4>;
+ maxim,active-fps-power-down-slot = <2>;
+ };
+
+ pin_gpio5_6 {
+ pins = "gpio5", "gpio6";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+
+ pin_32k {
+ pins = "gpio4";
+ function = "32k-out1";
+ };
+ };
+
+ fps {
+ fps0 {
+ maxim,shutdown-fps-time-period-us = <5120>;
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+
+ fps1 {
+ maxim,shutdown-fps-time-period-us = <5120>;
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,device-state-on-disabled-event = <MAX77620_FPS_INACTIVE_STATE_SLEEP>;
+ };
+
+ fps2 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+ };
+
+ regulators {
+ in-ldo0-1-supply = <&pp1350>;
+ in-ldo2-supply = <&pp3300>;
+ in-ldo3-5-supply = <&pp3300>;
+ in-ldo7-8-supply = <&pp1350>;
+
+ ppvar_soc: sd0 {
+ regulator-name = "PPVAR_SOC";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1125000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <1>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp1100_sd1: sd1 {
+ regulator-name = "PP1100";
+ regulator-min-microvolt = <1125000>;
+ regulator-max-microvolt = <1125000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <5>;
+ maxim,active-fps-power-down-slot = <1>;
+ };
+
+ pp1350: sd2 {
+ regulator-name = "PP1350";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <2>;
+ maxim,active-fps-power-down-slot = <5>;
+ };
+
+ pp1800: sd3 {
+ regulator-name = "PP1800";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <3>;
+ maxim,active-fps-power-down-slot = <3>;
+ };
+
+ pp1200_avdd: ldo0 {
+ regulator-name = "PP1200_AVDD";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <26>;
+ regulator-ramp-delay = <100000>;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp1200_rcam: ldo1 {
+ regulator-name = "PP1200_RCAM";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp_ldo2: ldo2 {
+ regulator-name = "PP_LDO2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <62>;
+ regulator-ramp-delay = <11000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp2800l_rcam: ldo3 {
+ regulator-name = "PP2800L_RCAM";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <50>;
+ regulator-ramp-delay = <100000>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp100_soc_rtc: ldo4 {
+ regulator-name = "PP1100_SOC_RTC";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on; /* Check this */
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <1>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp2800l_fcam: ldo5 {
+ regulator-name = "PP2800L_FCAM";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <62>;
+ regulator-ramp-delay = <100000>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ ldo6 {
+ /* Unused. */
+ regulator-name = "PP_LDO6";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <36>;
+ regulator-ramp-delay = <100000>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ pp1050_avdd: ldo7 {
+ regulator-name = "PP1050_AVDD";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <24>;
+ regulator-ramp-delay = <100000>;
+ regulator-always-on;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <3>;
+ maxim,active-fps-power-down-slot = <4>;
+ };
+
+ avddio_1v05: ldo8 {
+ regulator-name = "AVDDIO_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-ramp-delay = <100000>;
+ regulator-boot-on;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+ };
+ };
+ };
+
pmc@7000e400 {
nvidia,invert-interrupt;
nvidia,suspend-mode = <0>;
@@ -1421,4 +1650,89 @@
compatible = "arm,psci-1.0";
method = "smc";
};
+
+ regulators {
+ compatible = "simple-bus";
+ device_type = "fixed-regulators";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ppvar_sys: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+ regulator-name = "PPVAR_SYS";
+ regulator-min-microvolt = <4400000>;
+ regulator-max-microvolt = <4400000>;
+ regulator-always-on;
+ };
+
+ pplcd_vdd: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "PPLCD_VDD";
+ regulator-min-microvolt = <4400000>;
+ regulator-max-microvolt = <4400000>;
+ gpio = <&gpio TEGRA_GPIO(V, 4) 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ pp3000_always: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+ regulator-name = "PP3000_ALWAYS";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+
+ pp3300: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+ regulator-name = "PP3300";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ enable-active-high;
+ };
+
+ pp5000: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+ regulator-name = "PP5000";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ pp1800_lcdio: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+ regulator-name = "PP1800_LCDIO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio TEGRA_GPIO(V, 3) 0>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ pp1800_cam: regulator@6 {
+ compatible = "regulator-fixed";
+ reg= <6>;
+ regulator-name = "PP1800_CAM";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ gpio = <&gpio TEGRA_GPIO(K, 3) 0>;
+ enable-active-high;
+ };
+
+ usbc_vbus: regulator@7 {
+ compatible = "regulator-fixed";
+ reg = <7>;
+ regulator-name = "USBC_VBUS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 76fe31faa1a5..c4cfdcf60d26 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -35,6 +35,26 @@
resets = <&tegra_car 207>;
reset-names = "dpaux";
status = "disabled";
+
+ state_dpaux1_aux: pinmux-aux {
+ groups = "dpaux-io";
+ function = "aux";
+ };
+
+ state_dpaux1_i2c: pinmux-i2c {
+ groups = "dpaux-io";
+ function = "i2c";
+ };
+
+ state_dpaux1_off: pinmux-off {
+ groups = "dpaux-io";
+ function = "off";
+ };
+
+ i2c-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
vi@54080000 {
@@ -154,6 +174,10 @@
clock-names = "sor", "parent", "dp", "safe";
resets = <&tegra_car 182>;
reset-names = "sor";
+ pinctrl-0 = <&state_dpaux_aux>;
+ pinctrl-1 = <&state_dpaux_i2c>;
+ pinctrl-2 = <&state_dpaux_off>;
+ pinctrl-names = "aux", "i2c", "off";
status = "disabled";
};
@@ -162,12 +186,17 @@
reg = <0x0 0x54580000 0x0 0x00040000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA210_CLK_SOR1>,
+ <&tegra_car TEGRA210_CLK_SOR1_SRC>,
<&tegra_car TEGRA210_CLK_PLL_D2_OUT0>,
<&tegra_car TEGRA210_CLK_PLL_DP>,
<&tegra_car TEGRA210_CLK_SOR_SAFE>;
- clock-names = "sor", "parent", "dp", "safe";
+ clock-names = "sor", "source", "parent", "dp", "safe";
resets = <&tegra_car 183>;
reset-names = "sor";
+ pinctrl-0 = <&state_dpaux1_aux>;
+ pinctrl-1 = <&state_dpaux1_i2c>;
+ pinctrl-2 = <&state_dpaux1_off>;
+ pinctrl-names = "aux", "i2c", "off";
status = "disabled";
};
@@ -181,6 +210,26 @@
resets = <&tegra_car 181>;
reset-names = "dpaux";
status = "disabled";
+
+ state_dpaux_aux: pinmux-aux {
+ groups = "dpaux-io";
+ function = "aux";
+ };
+
+ state_dpaux_i2c: pinmux-i2c {
+ groups = "dpaux-io";
+ function = "i2c";
+ };
+
+ state_dpaux_off: pinmux-off {
+ groups = "dpaux-io";
+ function = "off";
+ };
+
+ i2c-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
isp@54600000 {
@@ -478,6 +527,9 @@
reset-names = "i2c";
dmas = <&apbdma 26>, <&apbdma 26>;
dma-names = "rx", "tx";
+ pinctrl-0 = <&state_dpaux1_i2c>;
+ pinctrl-1 = <&state_dpaux1_off>;
+ pinctrl-names = "default", "idle";
status = "disabled";
};
@@ -508,6 +560,9 @@
reset-names = "i2c";
dmas = <&apbdma 30>, <&apbdma 30>;
dma-names = "rx", "tx";
+ pinctrl-0 = <&state_dpaux_i2c>;
+ pinctrl-1 = <&state_dpaux_off>;
+ pinctrl-names = "default", "idle";
status = "disabled";
};
@@ -584,6 +639,39 @@
reg = <0x0 0x7000e400 0x0 0x400>;
clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
+
+ powergates {
+ pd_audio: aud {
+ clocks = <&tegra_car TEGRA210_CLK_APE>,
+ <&tegra_car TEGRA210_CLK_APB2APE>;
+ resets = <&tegra_car 198>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_xusbss: xusba {
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_SS>;
+ clock-names = "xusb-ss";
+ resets = <&tegra_car TEGRA210_CLK_XUSB_SS>;
+ reset-names = "xusb-ss";
+ #power-domain-cells = <0>;
+ };
+
+ pd_xusbdev: xusbb {
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_DEV>;
+ clock-names = "xusb-dev";
+ resets = <&tegra_car 95>;
+ reset-names = "xusb-dev";
+ #power-domain-cells = <0>;
+ };
+
+ pd_xusbhost: xusbc {
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_HOST>;
+ clock-names = "xusb-host";
+ resets = <&tegra_car TEGRA210_CLK_XUSB_HOST>;
+ reset-names = "xusb-host";
+ #power-domain-cells = <0>;
+ };
+ };
};
fuse@7000f800 {
@@ -621,6 +709,196 @@
status = "disabled";
};
+ usb@70090000 {
+ compatible = "nvidia,tegra210-xusb";
+ reg = <0x0 0x70090000 0x0 0x8000>,
+ <0x0 0x70098000 0x0 0x1000>,
+ <0x0 0x70099000 0x0 0x1000>;
+ reg-names = "hcd", "fpci", "ipfs";
+
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&tegra_car TEGRA210_CLK_XUSB_HOST>,
+ <&tegra_car TEGRA210_CLK_XUSB_HOST_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_FALCON_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_SS>,
+ <&tegra_car TEGRA210_CLK_XUSB_SS_DIV2>,
+ <&tegra_car TEGRA210_CLK_XUSB_SS_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_HS_SRC>,
+ <&tegra_car TEGRA210_CLK_XUSB_FS_SRC>,
+ <&tegra_car TEGRA210_CLK_PLL_U_480M>,
+ <&tegra_car TEGRA210_CLK_CLK_M>,
+ <&tegra_car TEGRA210_CLK_PLL_E>;
+ clock-names = "xusb_host", "xusb_host_src",
+ "xusb_falcon_src", "xusb_ss",
+ "xusb_ss_div2", "xusb_ss_src",
+ "xusb_hs_src", "xusb_fs_src",
+ "pll_u_480m", "clk_m", "pll_e";
+ resets = <&tegra_car 89>, <&tegra_car 156>,
+ <&tegra_car 143>;
+ reset-names = "xusb_host", "xusb_ss", "xusb_src";
+
+ nvidia,xusb-padctl = <&padctl>;
+
+ status = "disabled";
+ };
+
+ padctl: padctl@7009f000 {
+ compatible = "nvidia,tegra210-xusb-padctl";
+ reg = <0x0 0x7009f000 0x0 0x1000>;
+ resets = <&tegra_car 142>;
+ reset-names = "padctl";
+
+ status = "disabled";
+
+ pads {
+ usb2 {
+ clocks = <&tegra_car TEGRA210_CLK_USB2_TRK>;
+ clock-names = "trk";
+ status = "disabled";
+
+ lanes {
+ usb2-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-1 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-2 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-3 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ hsic {
+ clocks = <&tegra_car TEGRA210_CLK_HSIC_TRK>;
+ clock-names = "trk";
+ status = "disabled";
+
+ lanes {
+ hsic-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ hsic-1 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ pcie {
+ clocks = <&tegra_car TEGRA210_CLK_PLL_E>;
+ clock-names = "pll";
+ resets = <&tegra_car 205>;
+ reset-names = "phy";
+ status = "disabled";
+
+ lanes {
+ pcie-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-1 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-2 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-3 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-4 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-5 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ pcie-6 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ sata {
+ clocks = <&tegra_car TEGRA210_CLK_PLL_E>;
+ clock-names = "pll";
+ resets = <&tegra_car 204>;
+ reset-names = "phy";
+ status = "disabled";
+
+ lanes {
+ sata-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "disabled";
+ };
+
+ usb2-1 {
+ status = "disabled";
+ };
+
+ usb2-2 {
+ status = "disabled";
+ };
+
+ usb2-3 {
+ status = "disabled";
+ };
+
+ hsic-0 {
+ status = "disabled";
+ };
+
+ usb3-0 {
+ status = "disabled";
+ };
+
+ usb3-1 {
+ status = "disabled";
+ };
+
+ usb3-2 {
+ status = "disabled";
+ };
+
+ usb3-3 {
+ status = "disabled";
+ };
+ };
+ };
+
sdhci@700b0000 {
compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
@@ -673,6 +951,18 @@
#nvidia,mipi-calibrate-cells = <1>;
};
+ aconnect@702c0000 {
+ compatible = "nvidia,tegra210-aconnect";
+ clocks = <&tegra_car TEGRA210_CLK_APE>,
+ <&tegra_car TEGRA210_CLK_APB2APE>;
+ clock-names = "ape", "apb2ape";
+ power-domains = <&pd_audio>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x702c0000 0x0 0x702c0000 0x00040000>;
+ status = "disabled";
+ };
+
spi@70410000 {
compatible = "nvidia,tegra210-qspi";
reg = <0x0 0x70410000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 205ef89b8ca0..18639bc0a506 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -33,6 +33,10 @@
};
soc {
+ dma@7884000 {
+ status = "okay";
+ };
+
serial@78af000 {
label = "LS-UART0";
status = "okay";
@@ -140,6 +144,18 @@
status = "okay";
};
+ sdhci@07864000 {
+ vmmc-supply = <&pm8916_l11>;
+ vqmmc-supply = <&pm8916_l12>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ cd-gpios = <&msmgpio 38 0x1>;
+ status = "okay";
+ };
+
usb@78d9000 {
extcon = <&usb_id>, <&usb_id>;
status = "okay";
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 96812007850e..11bdc24cfc74 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -42,13 +42,48 @@
#size-cells = <2>;
ranges;
- reserve_aligned@86000000 {
- reg = <0x0 0x86000000 0x0 0x0300000>;
+ tz-apps@86000000 {
+ reg = <0x0 0x86000000 0x0 0x300000>;
no-map;
};
smem_mem: smem_region@86300000 {
- reg = <0x0 0x86300000 0x0 0x0100000>;
+ reg = <0x0 0x86300000 0x0 0x100000>;
+ no-map;
+ };
+
+ hypervisor@86400000 {
+ reg = <0x0 0x86400000 0x0 0x100000>;
+ no-map;
+ };
+
+ tz@86500000 {
+ reg = <0x0 0x86500000 0x0 0x180000>;
+ no-map;
+ };
+
+ reserved@8668000 {
+ reg = <0x0 0x86680000 0x0 0x80000>;
+ no-map;
+ };
+
+ rmtfs@86700000 {
+ reg = <0x0 0x86700000 0x0 0xe0000>;
+ no-map;
+ };
+
+ rfsa@867e00000 {
+ reg = <0x0 0x867e0000 0x0 0x20000>;
+ no-map;
+ };
+
+ mpss@86800000 {
+ reg = <0x0 0x86800000 0x0 0x2b00000>;
+ no-map;
+ };
+
+ wcnss@89300000 {
+ reg = <0x0 0x89300000 0x0 0x600000>;
no-map;
};
};
@@ -62,6 +97,8 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0>;
next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
};
CPU1: cpu@1 {
@@ -69,6 +106,8 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x1>;
next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
};
CPU2: cpu@2 {
@@ -76,6 +115,8 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x2>;
next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
};
CPU3: cpu@3 {
@@ -83,12 +124,35 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x3>;
next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
};
L2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
};
+
+ idle-states {
+ CPU_SPC: spc {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x40000002>;
+ entry-latency-us = <130>;
+ exit-latency-us = <150>;
+ min-residency-us = <2000>;
+ local-timer-stop;
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>;
};
timer {
@@ -122,6 +186,14 @@
hwlocks = <&tcsr_mutex 3>;
};
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CRYPTO_CLK>, <&gcc GCC_CRYPTO_AXI_CLK>, <&gcc GCC_CRYPTO_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
new file mode 100644
index 000000000000..659940434842
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&msmgpio {
+
+ blsp1_spi0_default: blsp1_spi0_default {
+ pinmux {
+ function = "blsp_spi1";
+ pins = "gpio0", "gpio1", "gpio3";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio2";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1", "gpio3";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio2";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ blsp1_spi0_sleep: blsp1_spi0_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ blsp1_i2c2_default: blsp1_i2c2_default {
+ pinmux {
+ function = "blsp_i2c3";
+ pins = "gpio47", "gpio48";
+ };
+ pinconf {
+ pins = "gpio47", "gpio48";
+ drive-strength = <16>;
+ bias-disable = <0>;
+ };
+ };
+
+ blsp1_i2c2_sleep: blsp1_i2c2_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio47", "gpio48";
+ };
+ pinconf {
+ pins = "gpio47", "gpio48";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ blsp2_i2c0_default: blsp2_i2c0 {
+ pinmux {
+ function = "blsp_i2c7";
+ pins = "gpio55", "gpio56";
+ };
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_i2c0_sleep: blsp2_i2c0_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio55", "gpio56";
+ };
+ pinconf {
+ pins = "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart1_2pins_default: blsp2_uart1_2pins {
+ pinmux {
+ function = "blsp_uart8";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart1_2pins_sleep: blsp2_uart1_2pins_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart1_4pins_default: blsp2_uart1_4pins {
+ pinmux {
+ function = "blsp_uart8";
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ };
+
+ pinconf {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart1_4pins_sleep: blsp2_uart1_4pins_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ };
+
+ pinconf {
+ pins = "gpio4", "gpiio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_i2c1_default: blsp2_i2c1 {
+ pinmux {
+ function = "blsp_i2c8";
+ pins = "gpio6", "gpio7";
+ };
+ pinconf {
+ pins = "gpio6", "gpio7";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_i2c1_sleep: blsp2_i2c1_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio6", "gpio7";
+ };
+ pinconf {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_2pins_default: blsp2_uart2_2pins {
+ pinmux {
+ function = "blsp_uart9";
+ pins = "gpio49", "gpio50";
+ };
+ pinconf {
+ pins = "gpio49", "gpio50";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_2pins_sleep: blsp2_uart2_2pins_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio49", "gpio50";
+ };
+ pinconf {
+ pins = "gpio49", "gpio50";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_4pins_default: blsp2_uart2_4pins {
+ pinmux {
+ function = "blsp_uart9";
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ };
+
+ pinconf {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp2_uart2_4pins_sleep: blsp2_uart2_4pins_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ };
+
+ pinconf {
+ pins = "gpio49", "gpio50", "gpio51", "gpio52";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp2_spi5_default: blsp2_spi5_default {
+ pinmux {
+ function = "blsp_spi12";
+ pins = "gpio85", "gpio86", "gpio88";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio87";
+ };
+ pinconf {
+ pins = "gpio85", "gpio86", "gpio88";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio87";
+ drive-strength = <16>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ blsp2_spi5_sleep: blsp2_spi5_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio85", "gpio86", "gpio87", "gpio88";
+ };
+ pinconf {
+ pins = "gpio85", "gpio86", "gpio87", "gpio88";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ sdc2_clk_on: sdc2_clk_on {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc2_clk_off: sdc2_clk_off {
+ config {
+ pins = "sdc2_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_cmd_on: sdc2_cmd_on {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_cmd_off: sdc2_cmd_off {
+ config {
+ pins = "sdc2_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc2_data_on: sdc2_data_on {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc2_data_off: sdc2_data_off {
+ config {
+ pins = "sdc2_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 0506fb808c56..55ec3e8326b7 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -151,6 +151,36 @@
reg = <0x300000 0x90000>;
};
+ blsp1_spi0: spi@07575000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x07575000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_spi0_default>;
+ pinctrl-1 = <&blsp1_spi0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_i2c0: i2c@075b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b5000 0x1000>;
+ interrupts = <GIC_SPI 101 0>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c0_default>;
+ pinctrl-1 = <&blsp2_i2c0_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
blsp2_uart1: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x75b0000 0x1000>;
@@ -161,7 +191,77 @@
status = "disabled";
};
- pinctrl@1010000 {
+ blsp2_i2c1: i2c@075b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x075b6000 0x1000>;
+ interrupts = <GIC_SPI 102 0>;
+ clocks = <&gcc GCC_BLSP2_AHB_CLK>,
+ <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_i2c1_default>;
+ pinctrl-1 = <&blsp2_i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_uart2: serial@75b1000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x075b1000 0x1000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART3_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ blsp1_i2c2: i2c@07577000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x07577000 0x1000>;
+ interrupts = <GIC_SPI 97 0>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_i2c2_default>;
+ pinctrl-1 = <&blsp1_i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp2_spi5: spi@075ba000{
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x075ba000 0x600>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_spi5_default>;
+ pinctrl-1 = <&blsp2_spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc2: sdhci@74a4900 {
+ status = "disabled";
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x74a4900 0x314>, <0x74a4000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clock-names = "iface", "core";
+ clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ <&gcc GCC_SDCC2_APPS_CLK>;
+ bus-width = <4>;
+ };
+
+ msmgpio: pinctrl@1010000 {
compatible = "qcom,msm8996-pinctrl";
reg = <0x01010000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
@@ -267,3 +367,4 @@
};
};
};
+#include "msm8996-pins.dtsi"
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 9ce1890a650e..17139f7003a6 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb
+dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb
always := $(dtb-y)
clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index 9f561c943f6f..98f02631a0f0 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -62,7 +62,7 @@
clock-frequency = <24576000>;
};
- vcc_sdhi0: regulator@1 {
+ vcc_sdhi0: regulator-vcc-sdhi0 {
compatible = "regulator-fixed";
regulator-name = "SDHI0 Vcc";
@@ -73,7 +73,7 @@
enable-active-high;
};
- vccq_sdhi0: regulator@2 {
+ vccq_sdhi0: regulator-vccq-sdhi0 {
compatible = "regulator-gpio";
regulator-name = "SDHI0 VccQ";
@@ -86,7 +86,7 @@
1800000 0>;
};
- vcc_sdhi3: regulator@3 {
+ vcc_sdhi3: regulator-vcc-sdhi3 {
compatible = "regulator-fixed";
regulator-name = "SDHI3 Vcc";
@@ -97,7 +97,7 @@
enable-active-high;
};
- vccq_sdhi3: regulator@4 {
+ vccq_sdhi3: regulator-vccq-sdhi3 {
compatible = "regulator-gpio";
regulator-name = "SDHI3 VccQ";
@@ -208,6 +208,7 @@
pinctrl-0 = <&scif1_pins>;
pinctrl-names = "default";
+ uart-has-rtscts;
status = "okay";
};
@@ -329,6 +330,11 @@
shared-pin;
};
+&wdt0 {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
&audio_clk_a {
clock-frequency = <22579200>;
};
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index 3285a9286786..b902356873c2 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -53,6 +53,7 @@
next-level-cache = <&L2_CA57>;
enable-method = "psci";
};
+
a57_2: cpu@2 {
compatible = "arm,cortex-a57","arm,armv8";
reg = <0x2>;
@@ -61,6 +62,7 @@
next-level-cache = <&L2_CA57>;
enable-method = "psci";
};
+
a57_3: cpu@3 {
compatible = "arm,cortex-a57","arm,armv8";
reg = <0x3>;
@@ -69,20 +71,22 @@
next-level-cache = <&L2_CA57>;
enable-method = "psci";
};
- };
- L2_CA57: cache-controller@0 {
- compatible = "cache";
- power-domains = <&sysc R8A7795_PD_CA57_SCU>;
- cache-unified;
- cache-level = <2>;
- };
+ L2_CA57: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7795_PD_CA57_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
- L2_CA53: cache-controller@1 {
- compatible = "cache";
- power-domains = <&sysc R8A7795_PD_CA53_SCU>;
- cache-unified;
- cache-level = <2>;
+ L2_CA53: cache-controller@100 {
+ compatible = "cache";
+ reg = <0x100>;
+ power-domains = <&sysc R8A7795_PD_CA53_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
};
extal_clk: extal {
@@ -151,19 +155,27 @@
#size-cells = <2>;
ranges;
- gic: interrupt-controller@0xf1010000 {
+ gic: interrupt-controller@f1010000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
reg = <0x0 0xf1010000 0 0x1000>,
- <0x0 0xf1020000 0 0x2000>,
+ <0x0 0xf1020000 0 0x20000>,
<0x0 0xf1040000 0 0x20000>,
- <0x0 0xf1060000 0 0x2000>;
+ <0x0 0xf1060000 0 0x20000>;
interrupts = <GIC_PPI 9
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
+ wdt0: watchdog@e6020000 {
+ compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
+ reg = <0 0xe6020000 0 0x0c>;
+ clocks = <&cpg CPG_MOD 402>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
gpio0: gpio@e6050000 {
compatible = "renesas,gpio-r8a7795",
"renesas,gpio-rcar";
@@ -571,6 +583,30 @@
status = "disabled";
};
+ canfd: can@e66c0000 {
+ compatible = "renesas,r8a7795-canfd",
+ "renesas,rcar-gen3-canfd";
+ reg = <0 0xe66c0000 0 0x8000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 914>,
+ <&cpg CPG_CORE R8A7795_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "fck", "canfd", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A7795_CLK_CANFD>;
+ assigned-clock-rates = <40000000>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ status = "disabled";
+
+ channel0 {
+ status = "disabled";
+ };
+
+ channel1 {
+ status = "disabled";
+ };
+ };
+
hscif0: serial@e6540000 {
compatible = "renesas,hscif-r8a7795",
"renesas,rcar-gen3-hscif",
@@ -749,6 +785,8 @@
interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 931>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac1 0x91>, <&dmac1 0x90>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
status = "disabled";
};
@@ -761,6 +799,8 @@
interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 930>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac1 0x93>, <&dmac1 0x92>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
status = "disabled";
};
@@ -773,6 +813,8 @@
interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 929>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac1 0x95>, <&dmac1 0x94>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
status = "disabled";
};
@@ -785,6 +827,8 @@
interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 928>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
status = "disabled";
};
@@ -797,6 +841,8 @@
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 927>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
status = "disabled";
};
@@ -809,6 +855,8 @@
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 919>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <110>;
status = "disabled";
};
@@ -821,6 +869,8 @@
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 918>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+ dma-names = "tx", "rx";
i2c-scl-internal-delay-ns = <6>;
status = "disabled";
};
@@ -874,63 +924,63 @@
status = "disabled";
rcar_sound,dvc {
- dvc0: dvc@0 {
+ dvc0: dvc-0 {
dmas = <&audma0 0xbc>;
dma-names = "tx";
};
- dvc1: dvc@1 {
+ dvc1: dvc-1 {
dmas = <&audma0 0xbe>;
dma-names = "tx";
};
};
rcar_sound,src {
- src0: src@0 {
+ src0: src-0 {
interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x85>, <&audma1 0x9a>;
dma-names = "rx", "tx";
};
- src1: src@1 {
+ src1: src-1 {
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x87>, <&audma1 0x9c>;
dma-names = "rx", "tx";
};
- src2: src@2 {
+ src2: src-2 {
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x89>, <&audma1 0x9e>;
dma-names = "rx", "tx";
};
- src3: src@3 {
+ src3: src-3 {
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8b>, <&audma1 0xa0>;
dma-names = "rx", "tx";
};
- src4: src@4 {
+ src4: src-4 {
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8d>, <&audma1 0xb0>;
dma-names = "rx", "tx";
};
- src5: src@5 {
+ src5: src-5 {
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8f>, <&audma1 0xb2>;
dma-names = "rx", "tx";
};
- src6: src@6 {
+ src6: src-6 {
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x91>, <&audma1 0xb4>;
dma-names = "rx", "tx";
};
- src7: src@7 {
+ src7: src-7 {
interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x93>, <&audma1 0xb6>;
dma-names = "rx", "tx";
};
- src8: src@8 {
+ src8: src-8 {
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x95>, <&audma1 0xb8>;
dma-names = "rx", "tx";
};
- src9: src@9 {
+ src9: src-9 {
interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x97>, <&audma1 0xba>;
dma-names = "rx", "tx";
@@ -938,52 +988,52 @@
};
rcar_sound,ssi {
- ssi0: ssi@0 {
+ ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi1: ssi@1 {
+ ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi2: ssi@2 {
+ ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi3: ssi@3 {
+ ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi4: ssi@4 {
+ ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi5: ssi@5 {
+ ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi6: ssi@6 {
+ ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi7: ssi@7 {
+ ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi8: ssi@8 {
+ ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi9: ssi@9 {
+ ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
dma-names = "rx", "tx", "rxu", "txu";
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
new file mode 100644
index 000000000000..e72be3856d79
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -0,0 +1,50 @@
+/*
+ * Device Tree Source for the Salvator-X board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7796.dtsi"
+
+/ {
+ model = "Renesas Salvator-X board based on r8a7796";
+ compatible = "renesas,salvator-x", "renesas,r8a7796";
+
+ aliases {
+ serial0 = &scif2;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel";
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x78000000>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <16666666>;
+};
+
+&scif2 {
+ status = "okay";
+};
+
+&scif_clk {
+ clock-frequency = <14745600>;
+ status = "okay";
+};
+
+&wdt0 {
+ timeout-sec = <60>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
new file mode 100644
index 000000000000..1edf82440d78
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -0,0 +1,138 @@
+/*
+ * Device Tree Source for the r8a7796 SoC
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/r8a7796-sysc.h>
+
+/ {
+ compatible = "renesas,r8a7796";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 1 core only at this point */
+ a57_0: cpu@0 {
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0>;
+ device_type = "cpu";
+ power-domains = <&sysc R8A7796_PD_CA57_CPU0>;
+ next-level-cache = <&L2_CA57>;
+ enable-method = "psci";
+ };
+
+ L2_CA57: cache-controller@0 {
+ compatible = "cache";
+ reg = <0>;
+ power-domains = <&sysc R8A7796_PD_CA57_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+ };
+
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ extalr_clk: extalr {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board */
+ clock-frequency = <0>;
+ };
+
+ /* External SCIF clock - to be overridden by boards that provide it */
+ scif_clk: scif {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gic: interrupt-controller@f1010000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x0 0xf1010000 0 0x1000>,
+ <0x0 0xf1020000 0 0x20000>,
+ <0x0 0xf1040000 0 0x20000>,
+ <0x0 0xf1060000 0 0x20000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ wdt0: watchdog@e6020000 {
+ compatible = "renesas,r8a7796-wdt",
+ "renesas,rcar-gen3-wdt";
+ reg = <0 0xe6020000 0 0x0c>;
+ clocks = <&cpg CPG_MOD 402>;
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a7796-cpg-mssr";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>, <&extalr_clk>;
+ clock-names = "extal", "extalr";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a7796-sysc";
+ reg = <0 0xe6180000 0 0x0400>;
+ #power-domain-cells = <1>;
+ };
+
+ scif2: serial@e6e88000 {
+ compatible = "renesas,scif-r8a7796",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e88000 0 64>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 310>,
+ <&cpg CPG_CORE R8A7796_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index b56b7205e39b..82a32e5591e2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -236,6 +236,15 @@
};
};
+&io_domains {
+ status = "ok";
+
+ audio-supply = <&vcc_io>;
+ gpio30-supply = <&vcc_io>;
+ gpio1830-supply = <&vcc_io>;
+ wifi-supply = <&vccio_wl>;
+};
+
&sdio0 {
assigned-clocks = <&cru SCLK_SDIO0>;
assigned-clock-parents = <&cru PLL_CPLL>;
@@ -329,6 +338,13 @@
};
};
+&pmu_io_domains {
+ status = "okay";
+
+ pmu-supply = <&vcc_io>;
+ vop-supply = <&vcc_io>;
+};
+
&saradc {
vref-supply = <&vcc_18>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 8b4a7c9154e9..d02a900378e1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -632,8 +632,13 @@
};
pmugrf: syscon@ff738000 {
- compatible = "rockchip,rk3368-pmugrf", "syscon";
+ compatible = "rockchip,rk3368-pmugrf", "syscon", "simple-mfd";
reg = <0x0 0xff738000 0x0 0x1000>;
+
+ pmu_io_domains: io-domains {
+ compatible = "rockchip,rk3368-pmu-io-voltage-domain";
+ status = "disabled";
+ };
};
cru: clock-controller@ff760000 {
@@ -645,8 +650,13 @@
};
grf: syscon@ff770000 {
- compatible = "rockchip,rk3368-grf", "syscon";
+ compatible = "rockchip,rk3368-grf", "syscon", "simple-mfd";
reg = <0x0 0xff770000 0x0 0x1000>;
+
+ io_domains: io-domains {
+ compatible = "rockchip,rk3368-io-voltage-domain";
+ status = "disabled";
+ };
};
wdt: watchdog@ff800000 {
@@ -670,7 +680,7 @@
#address-cells = <0>;
reg = <0x0 0xffb71000 0x0 0x1000>,
- <0x0 0xffb72000 0x0 0x1000>,
+ <0x0 0xffb72000 0x0 0x2000>,
<0x0 0xffb74000 0x0 0x2000>,
<0x0 0xffb76000 0x0 0x2000>;
interrupts = <GIC_PPI 9
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 1a3eb1482050..d33aa06d46f5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -77,6 +77,10 @@
};
};
+&emmc_phy {
+ status = "okay";
+};
+
&pwm0 {
status = "okay";
};
@@ -89,6 +93,14 @@
status = "okay";
};
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ non-removable;
+ status = "okay";
+};
+
&uart2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 188bbeab92b9..a6dd623a8845 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -45,6 +45,7 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
compatible = "rockchip,rk3399";
@@ -54,6 +55,15 @@
#size-cells = <2>;
aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -215,6 +225,22 @@
status = "disabled";
};
+ sdhci: sdhci@fe330000 {
+ compatible = "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1";
+ reg = <0x0 0xfe330000 0x0 0x10000>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ arasan,soc-ctl-syscon = <&grf>;
+ assigned-clocks = <&cru SCLK_EMMC>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_EMMC>, <&cru ACLK_EMMC>;
+ clock-names = "clk_xin", "clk_ahb";
+ clock-output-names = "emmc_cardclock";
+ #clock-cells = <0>;
+ phys = <&emmc_phy>;
+ phy-names = "phy_arasan";
+ status = "disabled";
+ };
+
usb_host0_ehci: usb@fe380000 {
compatible = "generic-ehci";
reg = <0x0 0xfe380000 0x0 0x20000>;
@@ -272,6 +298,96 @@
};
};
+ i2c1: i2c@ff110000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff110000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C1>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C1>, <&cru PCLK_I2C1>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ff120000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff120000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C2>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C2>, <&cru PCLK_I2C2>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ff130000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff130000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C3>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C3>, <&cru PCLK_I2C3>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@ff140000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff140000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C5>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C5>, <&cru PCLK_I2C5>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@ff150000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff150000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C6>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C6>, <&cru PCLK_I2C6>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@ff160000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ assigned-clocks = <&cru SCLK_I2C7>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&cru SCLK_I2C7>, <&cru PCLK_I2C7>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
uart0: serial@ff180000 {
compatible = "rockchip,rk3399-uart", "snps,dw-apb-uart";
reg = <0x0 0xff180000 0x0 0x100>;
@@ -389,9 +505,105 @@
status = "disabled";
};
+ thermal-zones {
+ cpu_thermal: cpu {
+ polling-delay-passive = <100>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsadc 0>;
+
+ trips {
+ cpu_alert0: cpu_alert0 {
+ temperature = <70000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_alert1: cpu_alert1 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit: cpu_crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu_alert1>;
+ cooling-device =
+ <&cpu_l0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpu_thermal: gpu {
+ polling-delay-passive = <100>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsadc 1>;
+
+ trips {
+ gpu_alert0: gpu_alert0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ gpu_crit: gpu_crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&gpu_alert0>;
+ cooling-device =
+ <&cpu_b0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ tsadc: tsadc@ff260000 {
+ compatible = "rockchip,rk3399-tsadc";
+ reg = <0x0 0xff260000 0x0 0x100>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ assigned-clocks = <&cru SCLK_TSADC>;
+ assigned-clock-rates = <750000>;
+ clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+ clock-names = "tsadc", "apb_pclk";
+ resets = <&cru SRST_TSADC>;
+ reset-names = "tsadc-apb";
+ rockchip,grf = <&grf>;
+ rockchip,hw-tshut-temp = <95000>;
+ pinctrl-names = "init", "default", "sleep";
+ pinctrl-0 = <&otp_gpio>;
+ pinctrl-1 = <&otp_out>;
+ pinctrl-2 = <&otp_gpio>;
+ #thermal-sensor-cells = <1>;
+ status = "disabled";
+ };
+
pmugrf: syscon@ff320000 {
- compatible = "rockchip,rk3399-pmugrf", "syscon";
+ compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd";
reg = <0x0 0xff320000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pmu_io_domains: io-domains {
+ compatible = "rockchip,rk3399-pmu-io-voltage-domain";
+ status = "disabled";
+ };
};
spi3: spi@ff350000 {
@@ -420,6 +632,51 @@
status = "disabled";
};
+ i2c0: i2c@ff3c0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3c0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C0_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C0_PMU>, <&pmucru PCLK_I2C0_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ff3d0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3d0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C4_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C4_PMU>, <&pmucru PCLK_I2C4_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@ff3e0000 {
+ compatible = "rockchip,rk3399-i2c";
+ reg = <0x0 0xff3e0000 0x0 0x1000>;
+ assigned-clocks = <&pmucru SCLK_I2C8_PMU>;
+ assigned-clock-rates = <200000000>;
+ clocks = <&pmucru SCLK_I2C8_PMU>, <&pmucru PCLK_I2C8_PMU>;
+ clock-names = "i2c", "pclk";
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c8_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pwm0: pwm@ff420000 {
compatible = "rockchip,rk3399-pwm", "rockchip,rk3288-pwm";
reg = <0x0 0xff420000 0x0 0x10>;
@@ -478,11 +735,43 @@
reg = <0x0 0xff760000 0x0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
+ assigned-clocks =
+ <&cru PLL_GPLL>, <&cru PLL_CPLL>,
+ <&cru PLL_NPLL>,
+ <&cru ACLK_PERIHP>, <&cru HCLK_PERIHP>,
+ <&cru PCLK_PERIHP>,
+ <&cru ACLK_PERILP0>, <&cru HCLK_PERILP0>,
+ <&cru PCLK_PERILP0>,
+ <&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>;
+ assigned-clock-rates =
+ <594000000>, <800000000>,
+ <1000000000>,
+ <150000000>, <75000000>,
+ <37500000>,
+ <100000000>, <100000000>,
+ <50000000>,
+ <100000000>, <50000000>;
};
grf: syscon@ff770000 {
- compatible = "rockchip,rk3399-grf", "syscon";
+ compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
reg = <0x0 0xff770000 0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ io_domains: io-domains {
+ compatible = "rockchip,rk3399-io-voltage-domain";
+ status = "disabled";
+ };
+
+ emmc_phy: phy@f780 {
+ compatible = "rockchip,rk3399-emmc-phy";
+ reg = <0xf780 0x24>;
+ clocks = <&sdhci>;
+ clock-names = "emmcclk";
+ #phy-cells = <0>;
+ status = "disabled";
+ };
};
watchdog@ff840000 {
@@ -764,6 +1053,16 @@
};
};
+ sleep {
+ ap_pwroff: ap-pwroff {
+ rockchip,pins = <1 5 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ ddrio_pwroff: ddrio-pwroff {
+ rockchip,pins = <0 1 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
spdif {
spdif_bus: spdif-bus {
rockchip,pins =
@@ -889,6 +1188,16 @@
};
};
+ tsadc {
+ otp_gpio: otp-gpio {
+ rockchip,pins = <1 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ otp_out: otp-out {
+ rockchip,pins = <1 6 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
uart0 {
uart0_xfer: uart0-xfer {
rockchip,pins =
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index 95328808e3c1..c223915f0907 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -42,6 +42,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+/memreserve/ 0x80000000 0x00000008; /* cpu-release-addr */
+
/ {
compatible = "socionext,ph1-ld20";
#address-cells = <2>;
@@ -77,7 +79,7 @@
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0 0x000>;
enable-method = "spin-table";
- cpu-release-addr = <0 0x80000100>;
+ cpu-release-addr = <0 0x80000000>;
};
cpu1: cpu@1 {
@@ -85,7 +87,7 @@
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0 0x001>;
enable-method = "spin-table";
- cpu-release-addr = <0 0x80000100>;
+ cpu-release-addr = <0 0x80000000>;
};
cpu2: cpu@100 {
@@ -93,7 +95,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0 0x100>;
enable-method = "spin-table";
- cpu-release-addr = <0 0x80000100>;
+ cpu-release-addr = <0 0x80000000>;
};
cpu3: cpu@101 {
@@ -101,7 +103,7 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0 0x101>;
enable-method = "spin-table";
- cpu-release-addr = <0 0x80000100>;
+ cpu-release-addr = <0 0x80000000>;
};
};
@@ -264,9 +266,13 @@
reg = <0x59801000 0x400>;
};
- pinctrl: pinctrl@5f801000 {
- compatible = "socionext,ph1-ld20-pinctrl", "syscon";
- reg = <0x5f801000 0xe00>;
+ soc-glue@5f800000 {
+ compatible = "simple-mfd", "syscon";
+ reg = <0x5f800000 0x2000>;
+
+ pinctrl: pinctrl {
+ compatible = "socionext,uniphier-ld20-pinctrl";
+ };
};
gic: interrupt-controller@5fe00000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 4ed4756dfa97..0555b7caaf2c 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -45,6 +45,7 @@ CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
CONFIG_ARCH_STRATIX10=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_SPRD=y
@@ -63,6 +64,7 @@ CONFIG_PCI_XGENE=y
CONFIG_PCI_LAYERSCAPE=y
CONFIG_PCI_HISI=y
CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_PREEMPT=y
@@ -102,6 +104,7 @@ CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
+CONFIG_SRAM=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
@@ -121,6 +124,7 @@ CONFIG_TUN=y
CONFIG_VIRTIO_NET=y
CONFIG_AMD_XGBE=y
CONFIG_NET_XGENE=y
+CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IGBVF=y
@@ -128,6 +132,8 @@ CONFIG_SKY2=y
CONFIG_RAVB=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
+CONFIG_STMMAC_ETH=m
+CONFIG_REALTEK_PHY=m
CONFIG_MICREL_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
@@ -143,6 +149,8 @@ CONFIG_WL18XX=m
CONFIG_WLCORE_SDIO=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
@@ -179,41 +187,62 @@ CONFIG_I2C_QUP=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_UNIPHIER_F=y
CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
CONFIG_SPI=y
CONFIG_SPI_ORION=y
CONFIG_SPI_PL022=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=m
+CONFIG_SPI_S3C64XX=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8996=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_MAX77620=y
CONFIG_POWER_RESET_MSM=y
+CONFIG_BATTERY_BQ27XXX=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_INA2XX=m
+CONFIG_SENSORS_ARM_SCPI=y
CONFIG_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_RENESAS_WDT=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_MFD_MAX77620=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_HI655X_PMIC=y
CONFIG_REGULATOR=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_PWM=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
CONFIG_REGULATOR_S2MPS11=y
+CONFIG_DRM=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_LP855X=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -222,18 +251,23 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_RCAR=y
+CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_AK4613=y
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_RCAR=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_XHCI_TEGRA=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC2=y
+CONFIG_USB_DWC3=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
@@ -263,12 +297,15 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_TEGRA=y
CONFIG_RTC_DRV_XGENE=y
+CONFIG_RTC_DRV_S3C=y
CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
CONFIG_TEGRA20_APB_DMA=y
@@ -283,9 +320,11 @@ CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
CONFIG_COMMON_CLK_SCPI=y
CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_S2MPS11=y
CONFIG_CLK_QORIQ=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_MMCC_8996=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
CONFIG_ARM_MHU=y
@@ -297,12 +336,18 @@ CONFIG_QCOM_SMD_RPM=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_EXTCON_USB_GPIO=y
+CONFIG_PWM=y
+CONFIG_PWM_TEGRA=m
CONFIG_COMMON_RESET_HI6220=y
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_XGENE=y
+CONFIG_PHY_TEGRA_XUSB=y
CONFIG_ARM_SCPI_PROTOCOL=y
CONFIG_ACPI=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_PWM_SAMSUNG=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
@@ -319,6 +364,8 @@ CONFIG_EFIVAR_FS=y
CONFIG_SQUASHFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
@@ -344,5 +391,5 @@ CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set
CONFIG_CRYPTO_CRC32_ARM64=y
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 49dd1bd3ea50..7099f26e3702 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -36,8 +36,9 @@
#define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_NCAPS 14
+#define ARM64_NCAPS 15
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 7dbea6c070ec..ccea82c2b089 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -66,12 +66,16 @@ static inline bool is_device_dma_coherent(struct device *dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return (dma_addr_t)paddr;
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+
+ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{
- return (phys_addr_t)dev_addr;
+ phys_addr_t paddr = (phys_addr_t)dev_addr;
+
+ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
@@ -86,5 +90,14 @@ static inline void dma_mark_clean(void *addr, size_t size)
{
}
+/* Override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask;
+
+ return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT;
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 579b6e654f2d..a55384f4a5d7 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 2cdb6b551ac6..4b5c977af465 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -178,7 +178,7 @@
/* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x)
-/* Hyp Coproccessor Trap Register Shifts */
+/* Hyp Coprocessor Trap Register Shifts */
#define CPTR_EL2_TFP_SHIFT 10
/* Hyp Coprocessor Trap Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 49095fc4b482..3eda975837d0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -47,8 +47,7 @@
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-int kvm_arch_dev_ioctl_check_extension(long ext);
-unsigned long kvm_hyp_reset_entry(void);
+int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
struct kvm_arch {
@@ -348,8 +347,7 @@ int kvm_perf_teardown(void);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
- phys_addr_t pgd_ptr,
+static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
@@ -357,19 +355,14 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
* Call initialization code, and switch to the full blown
* HYP code.
*/
- __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
- hyp_stack_ptr, vector_ptr);
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}
-static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+void __kvm_hyp_teardown(void);
+static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
phys_addr_t phys_idmap_start)
{
- /*
- * Call reset code, and switch back to stub hyp vectors.
- * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
- */
- __kvm_call_hyp((void *)kvm_hyp_reset_entry(),
- boot_pgd_ptr, phys_idmap_start);
+ kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
}
static inline void kvm_arch_hardware_unsetup(void) {}
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 44eaff70da6a..cff510574fae 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -25,29 +25,6 @@
#define __hyp_text __section(.hyp.text) notrace
-static inline unsigned long __kern_hyp_va(unsigned long v)
-{
- asm volatile(ALTERNATIVE("and %0, %0, %1",
- "nop",
- ARM64_HAS_VIRT_HOST_EXTN)
- : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
- return v;
-}
-
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
-
-static inline unsigned long __hyp_kern_va(unsigned long v)
-{
- u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
- asm volatile(ALTERNATIVE("add %0, %0, %1",
- "nop",
- ARM64_HAS_VIRT_HOST_EXTN)
- : "+r" (v) : "r" (offset));
- return v;
-}
-
-#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
-
#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index f05ac27d033e..b6bb83400cd8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -29,21 +29,48 @@
*
* Instead, give the HYP mode its own VA region at a fixed offset from
* the kernel by just masking the top bits (which are all ones for a
- * kernel address).
+ * kernel address). We need to find out how many bits to mask.
*
- * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
- * macros (the entire kernel runs at EL2).
+ * We want to build a set of page tables that cover both parts of the
+ * idmap (the trampoline page used to initialize EL2), and our normal
+ * runtime VA space, at the same time.
+ *
+ * Given that the kernel uses VA_BITS for its entire address space,
+ * and that half of that space (VA_BITS - 1) is used for the linear
+ * mapping, we can also limit the EL2 space to (VA_BITS - 1).
+ *
+ * The main question is "Within the VA_BITS space, does EL2 use the
+ * top or the bottom half of that space to shadow the kernel's linear
+ * mapping?". As we need to idmap the trampoline page, this is
+ * determined by the range in which this page lives.
+ *
+ * If the page is in the bottom half, we have to use the top half. If
+ * the page is in the top half, we have to use the bottom half:
+ *
+ * T = __virt_to_phys(__hyp_idmap_text_start)
+ * if (T & BIT(VA_BITS - 1))
+ * HYP_VA_MIN = 0 //idmap in upper half
+ * else
+ * HYP_VA_MIN = 1 << (VA_BITS - 1)
+ * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
+ *
+ * This of course assumes that the trampoline page exists within the
+ * VA_BITS range. If it doesn't, then it means we're in the odd case
+ * where the kernel idmap (as well as HYP) uses more levels than the
+ * kernel runtime page tables (as seen when the kernel is configured
+ * for 4k pages, 39bits VA, and yet memory lives just above that
+ * limit, forcing the idmap to use 4 levels of page tables while the
+ * kernel itself only uses 3). In this particular case, it doesn't
+ * matter which side of VA_BITS we use, as we're guaranteed not to
+ * conflict with anything.
+ *
+ * When using VHE, there are no separate hyp mappings and all KVM
+ * functionality is already mapped as part of the main kernel
+ * mappings, and none of this applies in that case.
*/
-#define HYP_PAGE_OFFSET_SHIFT VA_BITS
-#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
-#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
-/*
- * Our virtual mapping for the idmap-ed MMU-enable code. Must be
- * shared across all the page-tables. Conveniently, we use the last
- * possible page, where no kernel mapping will ever exist.
- */
-#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
+#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
+#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
#ifdef __ASSEMBLY__
@@ -53,13 +80,33 @@
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
+ *
+ * This generates the following sequences:
+ * - High mask:
+ * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
+ * nop
+ * - Low mask:
+ * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
+ * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
+ * - VHE:
+ * nop
+ * nop
+ *
+ * The "low mask" version works because the mask is a strict subset of
+ * the "high mask", hence performing the first mask for nothing.
+ * Should be completely invisible on any viable CPU.
*/
.macro kern_hyp_va reg
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- and \reg, \reg, #HYP_PAGE_OFFSET_MASK
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
alternative_else
nop
alternative_endif
+alternative_if_not ARM64_HYP_OFFSET_LOW
+ nop
+alternative_else
+ and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
+alternative_endif
.endm
#else
@@ -70,7 +117,22 @@ alternative_endif
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
-#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
+static inline unsigned long __kern_hyp_va(unsigned long v)
+{
+ asm volatile(ALTERNATIVE("and %0, %0, %1",
+ "nop",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "+r" (v)
+ : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
+ asm volatile(ALTERNATIVE("nop",
+ "and %0, %0, %1",
+ ARM64_HYP_OFFSET_LOW)
+ : "+r" (v)
+ : "i" (HYP_PAGE_OFFSET_LOW_MASK));
+ return v;
+}
+
+#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
/*
* We currently only support a 40bit IPA.
@@ -81,9 +143,8 @@ alternative_endif
#include <asm/stage2_pgtable.h>
-int create_hyp_mappings(void *from, void *to);
+int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
-void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
@@ -97,7 +158,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
-phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
phys_addr_t kvm_get_idmap_start(void);
int kvm_mmu_init(void);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 2813748e2f24..c3ae239db3ee 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -164,6 +164,7 @@
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
+#define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 29fcb33ab401..39f5252673f7 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -55,7 +55,9 @@
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
-#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
+#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 46472a91b6df..e20bd431184a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -224,6 +224,23 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ pteval_t lhs, rhs;
+
+ lhs = pte_val(pte_a);
+ rhs = pte_val(pte_b);
+
+ if (pte_present(pte_a))
+ lhs &= ~PTE_RDONLY;
+
+ if (pte_present(pte_b))
+ rhs &= ~PTE_RDONLY;
+
+ return (lhs == rhs);
+}
+
/*
* Huge pte definitions.
*/
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5e834d10b291..c47257c91b77 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
- return __arch_copy_from_user(to, from, n);
+ check_object_size(to, n, false);
+ return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
- return __arch_copy_to_user(to, from, n);
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(VERIFY_READ, from, n)) {
+ check_object_size(to, n, false);
n = __arch_copy_from_user(to, from, n);
- else /* security hole - plug it */
+ } else /* security hole - plug it */
memset(to, 0, n);
return n;
}
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
{
kasan_check_read(from, n);
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
+ }
return n;
}
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index bbc6a8cf83f1..1788545f25bc 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -87,6 +87,10 @@ extern void verify_cpu_run_el(void);
static inline void verify_cpu_run_el(void) {}
#endif
+/* The section containing the hypervisor idmap text */
+extern char __hyp_idmap_text_start[];
+extern char __hyp_idmap_text_end[];
+
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
index 22d6d8885854..4cf0c17787a8 100644
--- a/arch/arm64/include/uapi/asm/auxvec.h
+++ b/arch/arm64/include/uapi/asm/auxvec.h
@@ -19,4 +19,6 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index f209ea151dca..3051f86a9b5f 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -87,9 +87,11 @@ struct kvm_regs {
/* Supported VGICv3 address types */
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+#define KVM_VGIC_ITS_ADDR_TYPE 4
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 916d27ad79c1..62272eac1352 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -726,6 +726,19 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
return is_kernel_in_hyp_mode();
}
+static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+
+ /*
+ * Activate the lower HYP offset only if:
+ * - the idmap doesn't clash with it,
+ * - the kernel is not running at EL2.
+ */
+ return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -803,6 +816,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
+ {
+ .desc = "Reduced HYP mapping offset",
+ .capability = ARM64_HYP_OFFSET_LOW,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = hyp_offset_low,
+ },
{},
};
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2c6e598a94dc..b77f58355da1 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -781,40 +781,25 @@ __primary_switch:
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
*/
- ldr w8, =__dynsym_offset // offset to symbol table
ldr w9, =__rela_offset // offset to reloc table
ldr w10, =__rela_size // size of reloc table
mov_q x11, KIMAGE_VADDR // default virtual offset
add x11, x11, x23 // actual virtual offset
- add x8, x8, x11 // __va(.dynsym)
add x9, x9, x11 // __va(.rela)
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
0: cmp x9, x10
- b.hs 2f
+ b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
- b.ne 1f
+ b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
b 0b
-1: cmp w12, #R_AARCH64_ABS64
- b.ne 0b
- add x12, x12, x12, lsl #1 // symtab offset: 24x top word
- add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
- ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
- ldr x15, [x12, #8] // Elf64_Sym::st_value
- cmp w14, #-0xf // SHN_ABS (0xfff1) ?
- add x14, x15, x23 // relocate
- csel x15, x14, x15, ne
- add x15, x13, x15
- str x15, [x11, x23]
- b 0b
-
-2:
+1:
#endif
ldr x8, =__primary_switched
br x8
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 3c4e308b40a0..acf38722457b 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -17,6 +17,9 @@
#include <linux/mm.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/slab.h>
/*
@@ -36,25 +39,17 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
-/**
- * pcibios_enable_device - Enable I/O and memory.
- * @dev: PCI device to be enabled
- * @mask: bitmask of BARs to enable
- */
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- if (pci_has_flag(PCI_PROBE_ONLY))
- return 0;
-
- return pci_enable_resources(dev, mask);
-}
-
/*
- * Try to assign the IRQ number from DT when adding a new device
+ * Try to assign the IRQ number when probing a new device
*/
-int pcibios_add_device(struct pci_dev *dev)
+int pcibios_alloc_irq(struct pci_dev *dev)
{
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+ if (acpi_disabled)
+ dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
+#ifdef CONFIG_ACPI
+ else
+ return acpi_pci_irq_enable(dev);
+#endif
return 0;
}
@@ -65,13 +60,21 @@ int pcibios_add_device(struct pci_dev *dev)
int raw_pci_read(unsigned int domain, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *val)
{
- return -ENXIO;
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->read(b, devfn, reg, len, val);
}
int raw_pci_write(unsigned int domain, unsigned int bus,
unsigned int devfn, int reg, int len, u32 val)
{
- return -ENXIO;
+ struct pci_bus *b = pci_find_bus(domain, bus);
+
+ if (!b)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return b->ops->write(b, devfn, reg, len, val);
}
#ifdef CONFIG_NUMA
@@ -85,10 +88,124 @@ EXPORT_SYMBOL(pcibus_to_node);
#endif
#ifdef CONFIG_ACPI
-/* Root bridge scanning */
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ if (!acpi_disabled) {
+ struct pci_config_window *cfg = bridge->bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ }
+
+ return 0;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ struct pci_config_window *cfg;
+ struct resource cfgres;
+ unsigned int bsz;
+
+ /* Use address from _CBA if present, otherwise lookup MCFG */
+ if (!root->mcfg_addr)
+ root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
+
+ if (!root->mcfg_addr) {
+ dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
+ seg, bus_res);
+ return NULL;
+ }
+
+ bsz = 1 << pci_generic_ecam_ops.bus_shift;
+ cfgres.start = root->mcfg_addr + bus_res->start * bsz;
+ cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
+ cfgres.flags = IORESOURCE_MEM;
+ cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
+ seg, bus_res, PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ri);
+}
+
+static struct acpi_pci_root_ops acpi_pci_root_ops = {
+ .release_info = pci_acpi_generic_release_info,
+};
+
+/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
- /* TODO: Should be revisited when implementing PCI on ACPI */
- return NULL;
+ int node = acpi_get_node(root->device->handle);
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+
+ ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+ if (!ri)
+ return NULL;
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ return NULL;
+ }
+
+ acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
+ ri->cfg);
+ if (!bus)
+ return NULL;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
#endif
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 89d6e177ecbd..659963d40bb4 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -103,6 +103,7 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
+ *(.dynsym .dynstr .hash)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -174,19 +175,9 @@ SECTIONS
.rela : ALIGN(8) {
*(.rela .rela*)
}
- .dynsym : ALIGN(8) {
- *(.dynsym)
- }
- .dynstr : {
- *(.dynstr)
- }
- .hash : {
- *(.hash)
- }
- __rela_offset = ADDR(.rela) - KIMAGE_VADDR;
+ __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela);
- __dynsym_offset = ADDR(.dynsym) - KIMAGE_VADDR;
. = ALIGN(SEGMENT_ALIGN);
__init_end = .;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index c4f26ef91e77..9c9edc98d271 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -36,6 +36,9 @@ config KVM
select HAVE_KVM_IRQFD
select KVM_ARM_VGIC_V3
select KVM_ARM_PMU if HW_PERF_EVENTS
+ select HAVE_KVM_MSI
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQ_ROUTING
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
@@ -54,13 +57,6 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
-config KVM_NEW_VGIC
- bool "New VGIC implementation"
- depends on KVM
- default y
- ---help---
- uses the new VGIC implementation
-
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index a7a958ca29d5..695eb3c7ef41 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,7 +20,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
-ifeq ($(CONFIG_KVM_NEW_VGIC),y)
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
@@ -30,12 +29,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
-else
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
-endif
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/irqchip.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 32fad75bb9ff..3f9e15722473 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -211,7 +211,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
- * We do core registers right here, then we apppend system regs.
+ * We do core registers right here, then we append system regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index a873a6d8be90..6b29d3d9e1f2 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -53,10 +53,9 @@ __invalid:
b .
/*
- * x0: HYP boot pgd
- * x1: HYP pgd
- * x2: HYP stack
- * x3: HYP vectors
+ * x0: HYP pgd
+ * x1: HYP stack
+ * x2: HYP vectors
*/
__do_hyp_init:
@@ -110,71 +109,27 @@ __do_hyp_init:
msr sctlr_el2, x4
isb
- /* Skip the trampoline dance if we merged the boot and runtime PGDs */
- cmp x0, x1
- b.eq merged
-
- /* MMU is now enabled. Get ready for the trampoline dance */
- ldr x4, =TRAMPOLINE_VA
- adr x5, target
- bfi x4, x5, #0, #PAGE_SHIFT
- br x4
-
-target: /* We're now in the trampoline code, switch page tables */
- msr ttbr0_el2, x1
- isb
-
- /* Invalidate the old TLBs */
- tlbi alle2
- dsb sy
-
-merged:
/* Set the stack and new vectors */
+ kern_hyp_va x1
+ mov sp, x1
kern_hyp_va x2
- mov sp, x2
- kern_hyp_va x3
- msr vbar_el2, x3
+ msr vbar_el2, x2
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)
/*
- * Reset kvm back to the hyp stub. This is the trampoline dance in
- * reverse. If kvm used an extended idmap, __extended_idmap_trampoline
- * calls this code directly in the idmap. In this case switching to the
- * boot tables is a no-op.
- *
- * x0: HYP boot pgd
- * x1: HYP phys_idmap_start
+ * Reset kvm back to the hyp stub.
*/
ENTRY(__kvm_hyp_reset)
- /* We're in trampoline code in VA, switch back to boot page tables */
- msr ttbr0_el2, x0
- isb
-
- /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
- ic iallu
- tlbi alle2
- dsb sy
- isb
-
- /* Branch into PA space */
- adr x0, 1f
- bfi x1, x0, #0, #PAGE_SHIFT
- br x1
-
/* We're now in idmap, disable MMU */
-1: mrs x0, sctlr_el2
+ mrs x0, sctlr_el2
ldr x1, =SCTLR_ELx_FLAGS
bic x0, x0, x1 // Clear SCTL_M and etc
msr sctlr_el2, x0
isb
- /* Invalidate the old TLBs */
- tlbi alle2
- dsb sy
-
/* Install stub vectors */
adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 70254a65bd5b..ce9e5e5f28cf 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -164,22 +164,3 @@ alternative_endif
eret
ENDPROC(__fpsimd_guest_restore)
-
-/*
- * When using the extended idmap, we don't have a trampoline page we can use
- * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
- * directly would be ideal, but if we're using the extended idmap then the
- * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
- * kvm_call_hyp using kern_hyp_va.
- *
- * x0: HYP boot pgd
- * x1: HYP phys_idmap_start
- */
-ENTRY(__extended_idmap_trampoline)
- mov x4, x1
- adr_l x3, __kvm_hyp_reset
-
- /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
- bfi x4, x3, #0, #PAGE_SHIFT
- br x4
-ENDPROC(__extended_idmap_trampoline)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 2d87f36d5cb4..f6d9694ae3b1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -62,6 +62,21 @@ ENTRY(__vhe_hyp_call)
isb
ret
ENDPROC(__vhe_hyp_call)
+
+/*
+ * Compute the idmap address of __kvm_hyp_reset based on the idmap
+ * start passed as a parameter, and jump there.
+ *
+ * x0: HYP phys_idmap_start
+ */
+ENTRY(__kvm_hyp_teardown)
+ mov x4, x0
+ adr_l x3, __kvm_hyp_reset
+
+ /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
+ bfi x4, x3, #0, #PAGE_SHIFT
+ br x4
+ENDPROC(__kvm_hyp_teardown)
el1_sync: // Guest trapped into EL2
save_x0_to_x3
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 4373997d1a70..ae7855f16ec2 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -299,9 +299,16 @@ static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
{
- unsigned long str_va = (unsigned long)__hyp_panic_string;
+ unsigned long str_va;
- __hyp_do_panic(hyp_kern_va(str_va),
+ /*
+ * Force the panic string to be loaded from the literal pool,
+ * making sure it is a kernel address and not a PC-relative
+ * reference.
+ */
+ asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
+
+ __hyp_do_panic(str_va,
spsr, elr,
read_sysreg(esr_el2), read_sysreg_el2(far),
read_sysreg(hpfar_el2), par,
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index e9e0e6db73f6..898c0e6aedd4 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -132,16 +132,14 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- bool is_aarch32;
+ bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
u32 esr = 0;
- is_aarch32 = vcpu_mode_is_32bit(vcpu);
-
- *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_spsr(vcpu) = cpsr;
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
@@ -172,11 +170,11 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
- *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
+
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
+ *vcpu_spsr(vcpu) = cpsr;
/*
* Build an unknown exception, depending on the instruction
diff --git a/arch/arm64/kvm/irq.h b/arch/arm64/kvm/irq.h
new file mode 100644
index 000000000000..b74099b905fd
--- /dev/null
+++ b/arch/arm64/kvm/irq.h
@@ -0,0 +1,19 @@
+/*
+ * irq.h: in kernel interrupt controller related definitions
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This header is included by irqchip.c. However, on ARM, interrupt
+ * controller declarations are located in include/kvm/arm_vgic.h since
+ * they are mostly shared between arm and arm64.
+ */
+
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#include <kvm/arm_vgic.h>
+
+#endif
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index b1ad730e1567..5bc460884639 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -65,7 +65,7 @@ static bool cpu_has_32bit_el1(void)
* We currently assume that the number of HW registers is uniform
* across all CPUs (see cpuinfo_sanity_check).
*/
-int kvm_arch_dev_ioctl_check_extension(long ext)
+int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
@@ -86,6 +86,12 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
case KVM_CAP_VCPU_ATTRIBUTES:
r = 1;
break;
+ case KVM_CAP_MSI_DEVID:
+ if (!kvm)
+ r = -EINVAL;
+ else
+ r = kvm->arch.vgic.msis_require_devid;
+ break;
default:
r = 0;
}
@@ -98,7 +104,7 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
- * the virtual CPU struct to their architectually defined reset
+ * the virtual CPU struct to their architecturally defined reset
* values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
@@ -132,31 +138,3 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
-
-extern char __hyp_idmap_text_start[];
-
-unsigned long kvm_hyp_reset_entry(void)
-{
- if (!__kvm_cpu_uses_extended_idmap()) {
- unsigned long offset;
-
- /*
- * Find the address of __kvm_hyp_reset() in the trampoline page.
- * This is present in the running page tables, and the boot page
- * tables, so we call the code here to start the trampoline
- * dance in reverse.
- */
- offset = (unsigned long)__kvm_hyp_reset
- - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
-
- return TRAMPOLINE_VA + offset;
- } else {
- /*
- * KVM is running with merged page tables, which don't have the
- * trampoline page mapped. We know the idmap is still mapped,
- * but can't be called into directly. Use
- * __extended_idmap_trampoline to do the call.
- */
- return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline);
- }
-}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a57d650f552c..b0b225ceca18 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1546,7 +1546,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
struct sys_reg_params *params)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
- int cp;
+ int cp = -1;
switch(hsr_ec) {
case ESR_ELx_EC_CP15_32:
@@ -1558,7 +1558,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
cp = 14;
break;
default:
- WARN_ON((cp = -1));
+ WARN_ON(1);
}
kvm_err("Unsupported guest CP%d access at: %08lx\n",
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index f6c55afab3e2..c4284c432ae8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -32,10 +32,10 @@
static int swiotlb __read_mostly;
-static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot);
return prot;
}
@@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size)
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
@@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
static void __dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool freed;
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
@@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
@@ -188,7 +188,7 @@ no_mem:
static void __dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
@@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size,
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dev_addr;
@@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!is_device_dma_coherent(dev))
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
@@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i, ret;
@@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
static int __swiotlb_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev,
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -435,21 +435,21 @@ out:
static void *__dummy_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return NULL;
}
static void __dummy_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return -ENXIO;
}
@@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev,
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return DMA_ERROR_CODE;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return 0;
}
@@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
static void __dummy_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
@@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
@@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
}
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
size_t iosize = size;
@@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
@@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vm_struct *area;
int ret;
@@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr);
@@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev,
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_device(dev, dev_addr, size, dir);
return dev_addr;
@@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
@@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev,
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
@@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
static void __iommu_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 51a558195bb9..4989948d1feb 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -686,9 +686,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
- * at least 8 bytes so that we can always access the size field of the
- * FDT header after mapping the first chunk, double check here if that
- * is indeed the case.
+ * at least 8 bytes so that we can always access the magic and size
+ * fields of the FDT header after mapping the first chunk, double check
+ * here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
@@ -716,7 +716,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
- if (fdt_check_header(dt_virt) != 0)
+ if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;
*size = fdt_totalsize(dt_virt);
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 92cf1fb2b3e6..58610d0df7ed 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -99,7 +99,7 @@ static void __dma_free(struct device *dev, size_t size,
}
static void *avr32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
dma_addr_t phys;
@@ -109,7 +109,7 @@ static void *avr32_dma_alloc(struct device *dev, size_t size,
return NULL;
phys = page_to_phys(page);
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ if (attrs & DMA_ATTR_WRITE_COMBINE) {
/* Now, map the page into P3 with write-combining turned on */
*handle = phys;
return __ioremap(phys, size, _PAGE_BUFFER);
@@ -119,11 +119,11 @@ static void *avr32_dma_alloc(struct device *dev, size_t size,
}
static void avr32_dma_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
struct page *page;
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ if (attrs & DMA_ATTR_WRITE_COMBINE) {
iounmap(cpu_addr);
page = phys_to_page(handle);
@@ -142,7 +142,7 @@ static void avr32_dma_free(struct device *dev, size_t size,
static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
void *cpu_addr = page_address(page) + offset;
@@ -152,7 +152,7 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 771afe6e4264..53fbbb61aa86 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -79,7 +79,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
}
static void *bfin_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -94,7 +94,7 @@ static void *bfin_dma_alloc(struct device *dev, size_t size,
}
static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
__free_dma_pages((unsigned long)vaddr, get_pages(size));
}
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(__dma_sync);
static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -139,7 +139,7 @@ static void bfin_dma_sync_sg_for_device(struct device *dev,
static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 166842de3dc7..b59cd7c3261a 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -112,7 +112,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
#if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
free_initmem_default(-1);
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index 6b5cd7b0cf32..5717b1e52d96 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -26,8 +26,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
extern void coherent_mem_init(u32 start, u32 size);
void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
+ gfp_t gfp, unsigned long attrs);
void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs);
+ dma_addr_t dma_handle, unsigned long attrs);
#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index 8a80f3a250c0..db4a6a301f5e 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -38,7 +38,7 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size,
static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
@@ -47,13 +47,13 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
}
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
c6x_dma_sync(handle, size, dir);
}
static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -67,8 +67,7 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
}
static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index f7ee63af2541..95e38ad27c69 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -74,7 +74,7 @@ static void __free_dma_pages(u32 addr, int order)
* virtual and DMA address for that space.
*/
void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
u32 paddr;
int order;
@@ -99,7 +99,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* Free DMA coherent memory as defined by the above mapping.
*/
void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index 60d57c590032..bdc25aa43468 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -397,7 +397,7 @@ static int __init init_axis_flash(void)
if (!romfs_in_flash) {
/* Create an RAM device for the root partition (romfs). */
-#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
+#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
/* No use trying to boot this kernel from RAM. Panic! */
printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
"device due to kernel (mis)configuration!\n");
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index bd10d3ba0949..87656c41fec7 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -320,7 +320,7 @@ static int __init init_axis_flash(void)
* but its size must be configured as 0 so as not to conflict
* with our usage.
*/
-#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
+#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
if (!romfs_in_flash && !nand_boot) {
printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
"device; configure CONFIG_MTD_MTDRAM with size = 0!\n");
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
index 8d5efa58cce1..1f0636793f0c 100644
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -17,7 +17,7 @@
#include <asm/io.h>
static void *v32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -37,22 +37,21 @@ static void *v32_dma_alloc(struct device *dev, size_t size,
}
static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static inline dma_addr_t v32_dma_map_page(struct device *dev,
struct page *page, unsigned long offset, size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
printk("Map sg\n");
return nents;
diff --git a/arch/frv/include/asm/mc146818rtc.h b/arch/frv/include/asm/mc146818rtc.h
deleted file mode 100644
index 90dfb7a633d1..000000000000
--- a/arch/frv/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* mc146818rtc.h: RTC defs
- *
- * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_MC146818RTC_H
-#define _ASM_MC146818RTC_H
-
-
-#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 082be49b5df0..90f2e4cb33d6 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(dma_alloc_lock);
static LIST_HEAD(dma_alloc_list);
static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
@@ -86,7 +86,7 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_ha
}
static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct dma_alloc_record *rec;
unsigned long flags;
@@ -107,7 +107,7 @@ static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -124,7 +124,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 316b7b65348d..f585745b1abc 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -19,8 +19,7 @@
#include <asm/io.h>
static void *frv_dma_alloc(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -32,14 +31,14 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size,
}
static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
consistent_free(vaddr);
}
static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long dampr2;
void *vaddr;
@@ -69,7 +68,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
deleted file mode 100644
index ab9d9646d241..000000000000
--- a/arch/h8300/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _H8300_MC146818RTC_H
-#define _H8300_MC146818RTC_H
-
-/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */
-
-#endif /* _H8300_MC146818RTC_H */
diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c
index eeb13d3f2424..3651da045806 100644
--- a/arch/h8300/kernel/dma.c
+++ b/arch/h8300/kernel/dma.c
@@ -12,7 +12,7 @@
static void *dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
@@ -32,7 +32,7 @@ static void *dma_alloc(struct device *dev, size_t size,
static void dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
@@ -41,14 +41,14 @@ static void dma_free(struct device *dev, size_t size,
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static int map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index aa6203464520..7ef58df909fc 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
struct device;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 9e3ddf792bd3..b9017785fb71 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -51,7 +51,7 @@ static struct gen_pool *coherent_pool;
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
@@ -84,7 +84,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
@@ -105,7 +105,7 @@ static int check_addr(const char *name, struct device *hwdev,
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
@@ -172,7 +172,7 @@ static inline void dma_sync(void *addr, size_t size,
static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 88977e42af0a..192584d5ac2f 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -93,7 +93,7 @@ void __init mem_init(void)
* Todo: free pages between __init_begin and __init_end; possibly
* some devtree related stuff as well.
*/
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6a15083cc366..18ca6a9ce566 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -52,6 +52,7 @@ config IA64
select MODULES_USE_ELF_RELA
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index a6d6190c9d24..630ee8073899 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -919,7 +919,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
unsigned long poff, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ioc *ioc;
void *addr = page_address(page) + poff;
@@ -1005,7 +1005,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return sba_map_page(dev, virt_to_page(addr),
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
@@ -1046,7 +1046,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -1115,7 +1115,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
}
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
sba_unmap_page(dev, iova, size, dir, attrs);
}
@@ -1130,7 +1130,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
*/
static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, struct dma_attrs *attrs)
+ gfp_t flags, unsigned long attrs)
{
struct ioc *ioc;
void *addr;
@@ -1175,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
* device to map single to get an iova mapping.
*/
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
- size, 0, NULL);
+ size, 0, 0);
return addr;
}
@@ -1191,9 +1191,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
+ sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -1442,7 +1442,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
@@ -1455,7 +1455,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
*/
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -1551,7 +1551,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
*/
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 9c39bdfc2da8..ed7f09089f12 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -22,7 +22,6 @@ struct pci_bus;
struct task_struct;
struct pci_dev;
struct msi_desc;
-struct dma_attrs;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
diff --git a/arch/ia64/include/asm/mc146818rtc.h b/arch/ia64/include/asm/mc146818rtc.h
deleted file mode 100644
index 407787a237ba..000000000000
--- a/arch/ia64/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_IA64_MC146818RTC_H
-#define _ASM_IA64_MC146818RTC_H
-
-/*
- * Machine dependent access functions for RTC registers.
- */
-
-/* empty include file to satisfy the include in genrtc.c */
-
-#endif /* _ASM_IA64_MC146818RTC_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index d1212b84fb83..29bd59790d6c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -121,32 +121,4 @@ struct thread_info {
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
-#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif /* !__ASSEMBLY__ */
-
#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 2189d5ddc1ee..465c70982f40 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(from, count, true);
+
return __copy_user(to, (__force void __user *) from, count);
}
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
+ if (!__builtin_constant_p(count))
+ check_object_size(to, count, false);
+
return __copy_user((__force void __user *) to, from, count);
}
@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
const void *__cu_from = (from); \
long __cu_len = (n); \
\
- if (__access_ok(__cu_to, __cu_len, get_fs())) \
- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ if (__access_ok(__cu_to, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
+ } \
__cu_len; \
})
@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
long __cu_len = (n); \
\
__chk_user_ptr(__cu_from); \
- if (__access_ok(__cu_from, __cu_len, get_fs())) \
+ if (__access_ok(__cu_from, __cu_len, get_fs())) { \
+ if (!__builtin_constant_p(n)) \
+ check_object_size(__cu_to, __cu_len, false); \
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
+ } \
__cu_len; \
})
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 3b7a60e40e8a..121295637d0d 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -236,7 +236,7 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
STUB_RESET_SYSTEM(virt, id)
void
-efi_gettimeofday (struct timespec *ts)
+efi_gettimeofday (struct timespec64 *ts)
{
efi_time_t tm;
@@ -245,7 +245,7 @@ efi_gettimeofday (struct timespec *ts)
return;
}
- ts->tv_sec = mktime(tm.year, tm.month, tm.day,
+ ts->tv_sec = mktime64(tm.year, tm.month, tm.day,
tm.hour, tm.minute, tm.second);
ts->tv_nsec = tm.nanosecond;
}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index b72cd7a07222..599507bcec91 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -163,7 +163,7 @@ void arch_crash_save_vmcoreinfo(void)
#endif
}
-unsigned long paddr_vmcoreinfo_note(void)
+phys_addr_t paddr_vmcoreinfo_note(void)
{
return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 07a4e32ae96a..eb9220cde76c 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
}
/* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
+static void * __ref mca_bootmem(void)
{
return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
KERNEL_STACK_SIZE, 0);
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 939260aeac98..2933208c0285 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
@@ -25,7 +25,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 1eeffb7fbb16..5313007d5423 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -141,7 +141,7 @@ enum salinfo_state {
struct salinfo_data {
cpumask_t cpu_event; /* which cpus have outstanding events */
- struct semaphore mutex;
+ wait_queue_head_t read_wait;
u8 *log_buffer;
u64 log_size;
u8 *oemdata; /* decoded oem data */
@@ -182,21 +182,6 @@ struct salinfo_platform_oemdata_parms {
int ret;
};
-/* Kick the mutex that tells user space that there is work to do. Instead of
- * trying to track the state of the mutex across multiple cpus, in user
- * context, interrupt context, non-maskable interrupt context and hotplug cpu,
- * it is far easier just to grab the mutex if it is free then release it.
- *
- * This routine must be called with data_saved_lock held, to make the down/up
- * operation atomic.
- */
-static void
-salinfo_work_to_do(struct salinfo_data *data)
-{
- (void)(down_trylock(&data->mutex) ?: 0);
- up(&data->mutex);
-}
-
static void
salinfo_platform_oemdata_cpu(void *context)
{
@@ -258,7 +243,7 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
}
cpumask_set_cpu(smp_processor_id(), &data->cpu_event);
if (irqsafe) {
- salinfo_work_to_do(data);
+ wake_up_interruptible(&data->read_wait);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
}
@@ -271,14 +256,10 @@ extern void ia64_mlogbuf_dump(void);
static void
salinfo_timeout_check(struct salinfo_data *data)
{
- unsigned long flags;
if (!data->open)
return;
- if (!cpumask_empty(&data->cpu_event)) {
- spin_lock_irqsave(&data_saved_lock, flags);
- salinfo_work_to_do(data);
- spin_unlock_irqrestore(&data_saved_lock, flags);
- }
+ if (!cpumask_empty(&data->cpu_event))
+ wake_up_interruptible(&data->read_wait);
}
static void
@@ -308,10 +289,11 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
int i, n, cpu = -1;
retry:
- if (cpumask_empty(&data->cpu_event) && down_trylock(&data->mutex)) {
+ if (cpumask_empty(&data->cpu_event)) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (down_interruptible(&data->mutex))
+ if (wait_event_interruptible(data->read_wait,
+ !cpumask_empty(&data->cpu_event)))
return -EINTR;
}
@@ -510,7 +492,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
if (data->state == STATE_LOG_RECORD) {
spin_lock_irqsave(&data_saved_lock, flags);
cpumask_set_cpu(cpu, &data->cpu_event);
- salinfo_work_to_do(data);
+ wake_up_interruptible(&data->read_wait);
spin_unlock_irqrestore(&data_saved_lock, flags);
}
return 0;
@@ -582,7 +564,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
i < ARRAY_SIZE(salinfo_data);
++i, ++data) {
cpumask_set_cpu(cpu, &data->cpu_event);
- salinfo_work_to_do(data);
+ wake_up_interruptible(&data->read_wait);
}
spin_unlock_irqrestore(&data_saved_lock, flags);
break;
@@ -640,7 +622,7 @@ salinfo_init(void)
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
- sema_init(&data->mutex, 1);
+ init_waitqueue_head(&data->read_wait);
dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
if (!dir)
continue;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index c8dbe2acd735..6f892b94e906 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -355,7 +355,7 @@ static struct irqaction timer_irqaction = {
.name = "timer"
};
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
{
efi_gettimeofday(ts);
}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 8f59907007cb..74c934a997bb 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(sn_dma_set_mask);
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *cpuaddr;
unsigned long phys_addr;
@@ -138,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -176,21 +176,18 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr
static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- int dmabarr;
-
- dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
- if (dmabarr)
+ if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev, phys_addr,
size, SN_DMA_ADDR_PHYS);
else
@@ -218,7 +215,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
*/
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -240,7 +237,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
*/
static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -273,16 +270,13 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
*/
static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
- int dmabarr;
-
- dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
@@ -292,7 +286,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
for_each_sg(sgl, sg, nhwentries, i) {
dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
- if (dmabarr)
+ if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev,
phys_addr,
sg->length,
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 01693df7f2f6..ec9cc1fdd237 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -35,7 +35,6 @@
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 6e62d66c396e..432bc8bacfc2 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -15,7 +15,6 @@
#include <asm/pgtable.h>
#include <asm/apollohw.h>
#include <asm/irq.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
u_long sio01_physaddr;
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 478623dbb209..611d4d9ea2bd 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -34,7 +34,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/bvme6000hw.h>
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a9befe65adc4..7cfab158fb61 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/rtc.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-hp300.h>
@@ -20,7 +21,6 @@
#include <asm/blinken.h>
#include <asm/io.h> /* readb() and writeb() */
#include <asm/hp300hw.h>
-#include <asm/rtc.h>
#include "time.h"
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index f9454b89a51b..00c392b0cabd 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -1,5 +1,5 @@
/*
- * include/asm-m68knommu/flat.h -- uClinux flat-format executables
+ * flat.h -- uClinux flat-format executables
*/
#ifndef __M68KNOMMU_FLAT_H__
@@ -8,8 +8,9 @@
#define flat_argvp_envp_on_stack() 1
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
-#define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp)
-#define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp)
+#define flat_get_addr_from_rp(rp, relval, flags, p) \
+ ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; })
+#define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp)
#define flat_get_relocate_addr(rel) (rel)
static inline int flat_set_persistent(unsigned long relval,
@@ -18,4 +19,10 @@ static inline int flat_set_persistent(unsigned long relval,
return 0;
}
+#define FLAT_PLAT_INIT(regs) \
+ do { \
+ if (current->mm) \
+ (regs)->d5 = current->mm->start_data; \
+ } while (0)
+
#endif /* __M68KNOMMU_FLAT_H__ */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index a6ce2ec8d693..c84a2183b3f0 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -110,7 +110,6 @@ struct thread_struct {
#define setframeformat(_regs) do { } while (0)
#endif
-#ifdef CONFIG_MMU
/*
* Do necessary setup to start up a newly executed thread.
*/
@@ -123,26 +122,14 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
wrusp(usp);
}
+#ifdef CONFIG_MMU
extern int handle_kernel_fault(struct pt_regs *regs);
-
#else
-
-#define start_thread(_regs, _pc, _usp) \
-do { \
- (_regs)->pc = (_pc); \
- setframeformat(_regs); \
- if (current->mm) \
- (_regs)->d5 = current->mm->start_data; \
- (_regs)->sr &= ~0x2000; \
- wrusp(_usp); \
-} while(0)
-
static inline int handle_kernel_fault(struct pt_regs *regs)
{
/* Any fault in kernel is fatal on non-mmu */
return 0;
}
-
#endif
/* Forward declaration, a strange C thing */
diff --git a/arch/m68k/include/asm/rtc.h b/arch/m68k/include/asm/rtc.h
deleted file mode 100644
index a4d08ea122ee..000000000000
--- a/arch/m68k/include/asm/rtc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* include/asm-m68k/rtc.h
- *
- * Copyright Richard Zidlicky
- * implementation details for genrtc/q40rtc driver
- */
-/* permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
- */
-
-#ifndef _ASM_RTC_H
-#define _ASM_RTC_H
-
-#ifdef __KERNEL__
-
-#include <linux/rtc.h>
-#include <asm/errno.h>
-#include <asm/machdep.h>
-
-#define RTC_PIE 0x40 /* periodic interrupt enable */
-#define RTC_AIE 0x20 /* alarm interrupt enable */
-#define RTC_UIE 0x10 /* update-finished interrupt enable */
-
-/* some dummy definitions */
-#define RTC_BATT_BAD 0x100 /* battery bad */
-#define RTC_SQWE 0x08 /* enable square-wave output */
-#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
-static inline unsigned int get_rtc_time(struct rtc_time *time)
-{
- /*
- * Only the values that we read from the RTC are set. We leave
- * tm_wday, tm_yday and tm_isdst untouched. Even though the
- * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
- * by the RTC when initially set to a non-zero value.
- */
- if (mach_hwclk)
- mach_hwclk(0, time);
- return RTC_24H;
-}
-
-static inline int set_rtc_time(struct rtc_time *time)
-{
- if (mach_hwclk)
- return mach_hwclk(1, time);
- return -EINVAL;
-}
-
-static inline unsigned int get_rtc_ss(void)
-{
- if (mach_get_ss)
- return mach_get_ss();
- else{
- struct rtc_time h;
-
- get_rtc_time(&h);
- return h.tm_sec;
- }
-}
-
-static inline int get_rtc_pll(struct rtc_pll_info *pll)
-{
- if (mach_get_rtc_pll)
- return mach_get_rtc_pll(pll);
- else
- return -EINVAL;
-}
-static inline int set_rtc_pll(struct rtc_pll_info *pll)
-{
- if (mach_set_rtc_pll)
- return mach_set_rtc_pll(pll);
- else
- return -EINVAL;
-}
-#endif /* __KERNEL__ */
-
-#endif /* _ASM__RTC_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index cbc78b4117b5..8cf97cbadc91 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -19,7 +19,7 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
struct page *page, **map;
pgprot_t pgprot;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
static void m68k_dma_free(struct device *dev, size_t size, void *addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
@@ -73,7 +73,7 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
#include <asm/cacheflush.h>
static void *m68k_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
/* ignore region specifiers */
@@ -91,7 +91,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
}
static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
@@ -130,7 +130,7 @@ static void m68k_dma_sync_sg_for_device(struct device *dev,
static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = page_to_phys(page) + offset;
@@ -139,7 +139,7 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
}
static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 3857737e3958..4e5aa2f4f522 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -86,7 +86,49 @@ void read_persistent_clock(struct timespec *ts)
}
}
-#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+#if defined(CONFIG_ARCH_USES_GETTIMEOFFSET) && IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
+{
+ mach_hwclk(0, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
+{
+ if (mach_hwclk(1, tm) < 0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct rtc_pll_info pll;
+ struct rtc_pll_info __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case RTC_PLL_GET:
+ if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
+ return -EINVAL;
+ return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+
+ case RTC_PLL_SET:
+ if (!mach_set_rtc_pll)
+ return -EINVAL;
+ if (!capable(CAP_SYS_TIME))
+ return -EACCES;
+ if (copy_from_user(&pll, argp, sizeof(pll)))
+ return -EFAULT;
+ return mach_set_rtc_pll(&pll);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static const struct rtc_class_ops generic_rtc_ops = {
+ .ioctl = rtc_ioctl,
+ .read_time = rtc_generic_get_time,
+ .set_time = rtc_generic_set_time,
+};
static int __init rtc_init(void)
{
@@ -95,7 +137,9 @@ static int __init rtc_init(void)
if (!mach_hwclk)
return -ENODEV;
- pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &generic_rtc_ops,
+ sizeof(generic_rtc_ops));
return PTR_ERR_OR_ZERO(pdev);
}
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 689b47d292ac..2f33a33001e5 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -10,6 +10,7 @@
* Miscellaneous linux stuff
*/
+#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
@@ -25,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/adb.h>
#include <linux/cuda.h>
+#include <linux/rtc.h>
#include <asm/setup.h>
#include <asm/bootinfo.h>
@@ -34,7 +36,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/macintosh.h>
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 707b61aea203..0fb54a90eac2 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -18,7 +18,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/rtc.h>
#include <asm/segment.h>
#include <asm/setup.h>
#include <asm/macintosh.h>
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index e6a3b56c6481..c11d38dfad08 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -32,7 +32,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/mvme147hw.h>
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index a53803cc66cd..58e240939d26 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -35,7 +35,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/mvme16xhw.h>
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index e90fe903613e..fcb7f05b60b6 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -12,6 +12,7 @@
* for more details.
*/
+#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -27,7 +28,6 @@
#include <linux/platform_device.h>
#include <asm/io.h>
-#include <asm/rtc.h>
#include <asm/bootinfo.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 71884bf01d72..3af34fa3a344 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -26,7 +26,6 @@
#include <asm/pgalloc.h>
#include <asm/sun3-head.h>
#include <asm/sun3mmu.h>
-#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/machines.h>
#include <asm/idprom.h>
diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c
index 889829e11f1d..2cd0bcbe6f30 100644
--- a/arch/m68k/sun3/intersil.c
+++ b/arch/m68k/sun3/intersil.c
@@ -14,8 +14,8 @@
#include <linux/rtc.h>
#include <asm/errno.h>
-#include <asm/rtc.h>
#include <asm/intersil.h>
+#include <asm/machdep.h>
/* bits to set for start/run of the intersil */
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
index c8eb08add6b0..431d3c4306dd 100644
--- a/arch/m68k/sun3x/time.c
+++ b/arch/m68k/sun3x/time.c
@@ -15,10 +15,10 @@
#include <asm/irq.h>
#include <asm/io.h>
+#include <asm/machdep.h>
#include <asm/traps.h>
#include <asm/sun3x.h>
#include <asm/sun3ints.h>
-#include <asm/rtc.h>
#include "time.h"
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
index 0154e2807ebb..2369ad394876 100644
--- a/arch/metag/include/asm/cmpxchg_lnkget.h
+++ b/arch/metag/include/asm/cmpxchg_lnkget.h
@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
" DCACHE [%2], %0\n"
#endif
"2:\n"
- : "=&d" (temp), "=&da" (retval)
+ : "=&d" (temp), "=&d" (retval)
: "da" (m), "bd" (old), "da" (new)
: "cc"
);
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
index aa5a076df439..7848bc6d3b61 100644
--- a/arch/metag/include/asm/metag_mem.h
+++ b/arch/metag/include/asm/metag_mem.h
@@ -881,7 +881,7 @@
#define PERFCTRL_DCSTALL 11 /* Dcache+TLB o/p delayed (per-thread) */
#define PERFCTRL_ICSTALL 12 /* Icache+TLB o/p delayed (per-thread) */
-#define PERFCTRL_INT 13 /* Internal core delailed events (see next) */
+#define PERFCTRL_INT 13 /* Internal core detailed events (see next) */
#define PERFCTRL_EXT 15 /* External source in core periphery */
#endif /* METAC_2_1 */
diff --git a/arch/metag/include/asm/metag_regs.h b/arch/metag/include/asm/metag_regs.h
index 40c3f679c5b8..60b750971d8a 100644
--- a/arch/metag/include/asm/metag_regs.h
+++ b/arch/metag/include/asm/metag_regs.h
@@ -179,7 +179,7 @@
; is best to dump these registers immediately at the start of a routine
; using a MSETL or SETL instruction-
;
-; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump argments expected
+; MSETL [A0StP],D0Ar6,D0Ar4,D0Ar2; Only dump arguments expected
;or SETL [A0StP+#8++],D0Ar2 ; Up to two 32-bit args expected
;
; For non-leaf routines it is always necessary to save and restore at least
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
index 04b7d4f8429a..db944c2e7d88 100644
--- a/arch/metag/kernel/cachepart.c
+++ b/arch/metag/kernel/cachepart.c
@@ -15,7 +15,7 @@
#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
-#define CACHE_ASSOCIATIVITY 4 /* 4 way set-assosiative */
+#define CACHE_ASSOCIATIVITY 4 /* 4 way set-associative */
#define ICACHE 0
#define DCACHE 1
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index e12368d02155..0db31e24c541 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -172,7 +172,7 @@ out:
* virtual and bus address for that space.
*/
static void *metag_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
struct metag_vm_region *c;
@@ -268,7 +268,7 @@ no_page:
* free a page as defined by the above mapping.
*/
static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct metag_vm_region *c;
unsigned long flags, addr;
@@ -331,13 +331,13 @@ no_area:
static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, user_size, kern_size;
struct metag_vm_region *c;
int ret = -ENXIO;
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -482,7 +482,7 @@ static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
direction);
@@ -491,14 +491,14 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -516,7 +516,7 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
diff --git a/arch/metag/lib/divsi3.S b/arch/metag/lib/divsi3.S
index 7c8a8ae9a0a1..11124cc93dee 100644
--- a/arch/metag/lib/divsi3.S
+++ b/arch/metag/lib/divsi3.S
@@ -50,7 +50,7 @@ $LIDMCQuick:
ADDCC D0Re0,D0Re0,#1 ! If yes result += 1
SUBCC D1Ar1,D1Ar1,D1Re0 ! and A -= Bu
ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result?
- NEG D0Ar2,D0Re0 ! Calulate neg result
+ NEG D0Ar2,D0Re0 ! Calculate neg result
MOVMI D0Re0,D0Ar2 ! Yes: Take neg result
$LIDMCRet:
MOV PC,D1RtP
@@ -94,7 +94,7 @@ $LIDMCLoop:
LSR D1Re0, D1Re0, #1 ! Shift down B
BNZ $LIDMCLoop ! Was single bit in curbit lost?
ORS D0Ar4,D0Ar4,D0Ar4 ! Return neg result?
- NEG D0Ar2,D0Re0 ! Calulate neg result
+ NEG D0Ar2,D0Re0 ! Calculate neg result
MOVMI D0Re0,D0Ar2 ! Yes: Take neg result
MOV PC,D1RtP
.size ___divsi3,.-___divsi3
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 372783a67dda..c765b3621b9b 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -187,7 +187,7 @@ bad_area_nosemaphore:
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
- pr_info("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
+ printk("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
regs->ctx.CurrPC, regs->ctx.AX[0].U0,
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 1884783d15c0..1768d4bdc8d3 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -25,7 +25,6 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index fc3ecb55f1b2..2a120bb70e54 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -82,9 +82,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
pgprot_t prot);
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- resource_size_t *start, resource_size_t *end);
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 383f387b4eee..e7e8954e9815 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -148,33 +148,6 @@ static inline struct thread_info *current_thread_info(void)
*/
/* FPU was used by this task this quantum (SMP) */
#define TS_USEDFPU 0x0001
-#define TS_RESTORE_SIGMASK 0x0002
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index bf4dec229437..ec04dc1e2527 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -17,7 +17,7 @@
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
@@ -42,7 +42,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
@@ -53,7 +53,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -78,7 +78,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
__dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
@@ -88,7 +88,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* There is not necessary to do cache cleanup
*
@@ -157,7 +157,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
static
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 77bc7c7e6522..434639f9a3a6 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -414,7 +414,7 @@ void __init *early_get_page(void)
#endif /* CONFIG_MMU */
-void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
{
if (mem_init_done)
return kmalloc(size, mask);
@@ -422,7 +422,7 @@ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
return alloc_bootmem(size);
}
-void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index eb99fcc76088..cc732fe357ad 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -234,7 +234,7 @@ unsigned long iopa(unsigned long addr)
return pa;
}
-__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 14cba600da7a..81556b843a8e 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -219,33 +219,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
}
/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
- pgprot_t protection,
- enum pci_mmap_state mmap_state,
- int write_combine)
-{
- pgprot_t prot = protection;
-
- /* Write combine is always 0 on non-memory space mappings. On
- * memory space, if the user didn't pass 1, we check for a
- * "prefetchable" resource. This is a bit hackish, but we use
- * this to workaround the inability of /sysfs to provide a write
- * combine bit
- */
- if (mmap_state != pci_mmap_mem)
- write_combine = 0;
- else if (write_combine == 0) {
- if (rp->flags & IORESOURCE_PREFETCH)
- write_combine = 1;
- }
-
- return pgprot_noncached(prot);
-}
-
-/*
* This one is used by /dev/mem and fbdev who have no clue about the
* PCI device, it tries to find the PCI device first and calls the
* above routine
@@ -317,9 +290,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
- vma->vm_page_prot,
- mmap_state, write_combine);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
@@ -473,39 +444,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- resource_size_t offset = 0;
+ struct pci_bus_region region;
- if (hose == NULL)
+ if (rsrc->flags & IORESOURCE_IO) {
+ pcibios_resource_to_bus(dev->bus, &region,
+ (struct resource *) rsrc);
+ *start = region.start;
+ *end = region.end;
return;
+ }
- if (rsrc->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
- /* We pass a fully fixed up address to userland for MMIO instead of
- * a BAR value because X is lame and expects to be able to use that
- * to pass to /dev/mem !
+ /* We pass a CPU physical address to userland for MMIO instead of a
+ * BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem!
*
- * That means that we'll have potentially 64 bits values where some
- * userland apps only expect 32 (like X itself since it thinks only
- * Sparc has 64 bits MMIO) but if we don't do that, we break it on
- * 32 bits CHRPs :-(
- *
- * Hopefully, the sysfs insterface is immune to that gunk. Once X
- * has been fixed (and the fix spread enough), we can re-enable the
- * 2 lines below and pass down a BAR value to userland. In that case
- * we'll also have to re-enable the matching code in
- * __pci_mmap_make_offset().
- *
- * BenH.
+ * That means we may have 64-bit values where some apps only expect
+ * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
*/
-#if 0
- else if (rsrc->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-#endif
-
- *start = rsrc->start - offset;
- *end = rsrc->end - offset;
+ *start = rsrc->start;
+ *end = rsrc->end;
}
/**
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ac91939b9b75..26388562e300 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -64,6 +64,7 @@ config MIPS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
select HANDLE_DOMAIN_IRQ
+ select HAVE_EXIT_THREAD
menu "Machine selection"
@@ -384,7 +385,7 @@ config MACH_PISTACHIO
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CSRC_R4K
- select DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
select GPIOLIB
select IRQ_MIPS_CPU
select LIBFDT
@@ -880,7 +881,6 @@ config CAVIUM_OCTEON_SOC
select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
- select SWAP_IO_SPACE
select HW_HAS_PCI
select ZONE_DMA32
select HOLES_IN_ZONE
@@ -1111,16 +1111,6 @@ config NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool
-config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
- depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
- help
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- (Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
-
config SYS_SUPPORTS_HOTPLUG_CPU
bool
@@ -1406,7 +1396,6 @@ config CPU_LOONGSON1B
bool "Loongson 1B"
depends on SYS_HAS_CPU_LOONGSON1B
select CPU_LOONGSON1
- select ARCH_WANT_OPTIONAL_GPIOLIB
select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
@@ -1488,6 +1477,7 @@ config CPU_MIPS64_R2
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select CPU_SUPPORTS_MSA
+ select HAVE_KVM
help
Choose this option to build a kernel for release 2 or later of the
MIPS64 architecture. Many modern embedded systems with a 64-bit
@@ -1505,6 +1495,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -2634,6 +2625,16 @@ config SMP
If you don't know what to do here, say N.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
config SMP_UP
bool
@@ -2885,10 +2886,10 @@ choice
the documented boot protocol using a device tree.
config MIPS_RAW_APPENDED_DTB
- bool "vmlinux.bin"
+ bool "vmlinux.bin or vmlinuz.bin"
help
With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinux.bin (without decompressor).
+ DTB) appended to raw vmlinux.bin or vmlinuz.bin.
(e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb).
This is meant as a backward compatibility convenience for those
@@ -2900,24 +2901,6 @@ choice
look like a DTB header after a reboot if no actual DTB is appended
to vmlinux.bin. Do not leave this option active in a production kernel
if you don't intend to always append a DTB.
-
- config MIPS_ZBOOT_APPENDED_DTB
- bool "vmlinuz.bin"
- depends on SYS_SUPPORTS_ZBOOT
- help
- With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinuz.bin (with decompressor).
- (e.g. cat vmlinuz.bin <filename>.dtb > vmlinuz_w_dtb).
-
- This is meant as a backward compatibility convenience for those
- systems with a bootloader that can't be upgraded to accommodate
- the documented boot protocol using a device tree.
-
- Beware that there is very little in terms of protection against
- this option being confused by leftover garbage in memory that might
- look like a DTB header after a reboot if no actual DTB is appended
- to vmlinuz.bin. Do not leave this option active in a production kernel
- if you don't intend to always append a DTB.
endchoice
choice
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 3a0019deb7f7..f206dafbb0a3 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -203,8 +203,8 @@ void __init plat_mem_setup(void)
fdt_start = fw_getenvl("fdt_start");
if (fdt_start)
__dt_setup_arch((void *)KSEG0ADDR(fdt_start));
- else if (fw_arg0 == -2)
- __dt_setup_arch((void *)KSEG0ADDR(fw_arg1));
+ else if (fw_passed_dtb)
+ __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
if (mips_machtype != ATH79_MACH_GENERIC_OF) {
ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index f146d1219bde..6776042679dd 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -162,8 +162,8 @@ void __init plat_mem_setup(void)
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
- else if (fw_arg0 == -2) /* UHI interface */
- dtb = (void *)fw_arg1;
+ else if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 080cd53bac36..fdf99e9dd4c3 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/libfdt.h>
#include <asm/addrspace.h>
@@ -36,6 +37,8 @@ extern void puthex(unsigned long long val);
#define puthex(val) do {} while (0)
#endif
+extern char __appended_dtb[];
+
void error(char *x)
{
puts("\n\n");
@@ -114,6 +117,20 @@ void decompress_kernel(unsigned long boot_heap_start)
__decompress((char *)zimage_start, zimage_size, 0, 0,
(void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+ if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) &&
+ fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) {
+ unsigned int image_size, dtb_size;
+
+ dtb_size = fdt_totalsize((void *)&__appended_dtb);
+
+ /* last four bytes is always image size in little endian */
+ image_size = le32_to_cpup((void *)&__image_end - 4);
+
+ /* copy dtb to where the booted kernel will expect it */
+ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
+ __appended_dtb, dtb_size);
+ }
+
/* FIXME: should we flush cache here? */
puts("Now, booting the kernel...\n");
}
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index c580e853b9fb..409cb483a9ff 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -25,22 +25,6 @@ start:
move s2, a2
move s3, a3
-#ifdef CONFIG_MIPS_ZBOOT_APPENDED_DTB
- PTR_LA t0, __appended_dtb
-#ifdef CONFIG_CPU_BIG_ENDIAN
- li t1, 0xd00dfeed
-#else
- li t1, 0xedfe0dd0
-#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
-
- move s1, t0
- PTR_LI s0, -2
-not_found:
-#endif
-
/* Clear BSS */
PTR_LA a0, _edata
PTR_LA a2, _end
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
index d6bc994f736f..b134798a0fd7 100644
--- a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -9,6 +9,7 @@
*/
/include/ "octeon_3xxx.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/ {
model = "dlink,dsr-1000n";
@@ -63,12 +64,27 @@
usb1 {
label = "usb1";
- gpios = <&gpio 9 1>; /* Active low */
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
};
usb2 {
label = "usb2";
- gpios = <&gpio 10 1>; /* Active low */
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless1 {
+ label = "5g";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless2 {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index de61f02d3ef6..ca6b4467bcd3 100644
--- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -388,16 +388,4 @@
usbn = &usbn;
led0 = &led0;
};
-
- dsr1000n-leds {
- compatible = "gpio-leds";
- usb1 {
- label = "usb1";
- gpios = <&gpio 9 1>; /* Active low */
- };
- usb2 {
- label = "usb2";
- gpios = <&gpio 10 1>; /* Active low */
- };
- };
};
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c
index b671b5e2dcd8..06066e6ac2f9 100644
--- a/arch/mips/boot/tools/relocs_64.c
+++ b/arch/mips/boot/tools/relocs_64.c
@@ -9,17 +9,20 @@
typedef uint8_t Elf64_Byte;
-typedef struct {
- Elf64_Word r_sym; /* Symbol index. */
- Elf64_Byte r_ssym; /* Special symbol. */
- Elf64_Byte r_type3; /* Third relocation. */
- Elf64_Byte r_type2; /* Second relocation. */
- Elf64_Byte r_type; /* First relocation. */
+typedef union {
+ struct {
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+ } fields;
+ Elf64_Xword unused;
} Elf64_Mips_Rela;
#define ELF_CLASS ELFCLASS64
-#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->r_sym)
-#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->r_type)
+#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym)
+#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type)
#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 2cd45f5f9481..fd69528b24fb 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
direction, attrs);
@@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
}
static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 504ed61a47cd..b65a6c1ac016 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -668,7 +668,7 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
/*
* Round size up to mult of minimum alignment bytes We need
* the actual size allocated to allow for blocks to be
- * coallesced when they are freed. The alloc routine does the
+ * coalesced when they are freed. The alloc routine does the
* same rounding up on all allocations.
*/
size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 36e30d65ba05..ff49fc04500c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -186,15 +186,6 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
return 7 - ipd_port;
else
return -1;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- /*
- * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1)
- * connect to a switch (BCM53115).
- */
- if (ipd_port == 2)
- return 8;
- else
- return -1;
case CVMX_BOARD_TYPE_KONTRON_S1901:
if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
return 1;
@@ -289,18 +280,6 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
return result;
}
break;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- if (ipd_port == 0 || ipd_port == 1) {
- /* Ports 0 and 1 connect to a switch (BCM53115). */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- } else {
- /* Port 2 uses a Broadcom PHY (B5081). */
- is_broadcom_phy = 1;
- }
- break;
}
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
@@ -765,7 +744,6 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
case CVMX_BOARD_TYPE_LANAI2_G:
case CVMX_BOARD_TYPE_NIC10E_66:
case CVMX_BOARD_TYPE_UBNT_E100:
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
return USB_CLOCK_TYPE_CRYSTAL_12;
case CVMX_BOARD_TYPE_NIC10E:
return USB_CLOCK_TYPE_REF_12;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 368eb490354c..5a9b87b7993e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1260,7 +1260,7 @@ static int octeon_irq_gpio_map(struct irq_domain *d,
line = (hw + gpiod->base_hwirq) >> 6;
bit = (hw + gpiod->base_hwirq) & 63;
- if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+ if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1542,10 +1542,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
- if (r)
- goto err;
-
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
if (r)
goto err;
@@ -1559,10 +1555,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
- if (r)
- goto err;
-
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
@@ -2077,10 +2069,6 @@ static int __init octeon_irq_init_ciu2(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
- if (r)
- goto err;
-
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 7aeafedff94e..b31fbc9d6eae 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,33 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2011 Cavium Networks
+ * Copyright (C) 2004-2016 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
-#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/i2c.h>
-#include <linux/usb.h>
-#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-rnm-defs.h>
-#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
+#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
@@ -78,12 +72,36 @@ static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
+static int __init octeon2_usb_reset(void)
+{
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ u32 ucmd;
+
+ if (!OCTEON_IS_OCTEON2())
+ return 0;
+
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ if (clk_rst_ctl.s.hrst) {
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
+ ucmd &= ~CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ mdelay(2);
+ ucmd |= CMD_RESET;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
+ ucmd |= CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
+ }
+
+ return 0;
+}
+arch_initcall(octeon2_usb_reset);
+
static void octeon2_usb_clocks_start(struct device *dev)
{
u64 div;
union cvmx_uctlx_if_ena if_ena;
union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
- union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
int i;
unsigned long io_clk_64_to_ns;
@@ -131,6 +149,17 @@ static void octeon2_usb_clocks_start(struct device *dev)
if_ena.s.en = 1;
cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
/* Step 3: Configure the reference clock, PHY, and HCLK */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
@@ -218,29 +247,10 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_por = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
- /* Step 5: Wait 1 ms for the PHY clock to start. */
- mdelay(1);
+ /* Step 5: Wait 3 ms for the PHY clock to start. */
+ mdelay(3);
- /*
- * Step 6: Program the reset input from automatic test
- * equipment field in the UPHY CSR
- */
- uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
- uphy_ctl_status.s.ate_reset = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /* Step 7: Wait for at least 10ns. */
- ndelay(10);
-
- /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
- uphy_ctl_status.s.ate_reset = 0;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /*
- * Step 9: Wait for at least 20ns for UPHY to output PHY clock
- * signals and OHCI_CLK48
- */
- ndelay(20);
+ /* Steps 6..9 for ATE only, are skipped. */
/* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
/* 10a */
@@ -261,6 +271,20 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_prst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Step 11b */
+ udelay(1);
+
+ /* Step 11c */
+ clk_rst_ctl.s.p_prst = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 11d */
+ mdelay(1);
+
+ /* Step 11e */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
/* Step 12: Wait 1 uS. */
udelay(1);
@@ -269,21 +293,9 @@ static void octeon2_usb_clocks_start(struct device *dev)
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
end_clock:
- /* Now we can set some other registers. */
-
- for (i = 0; i <= 1; i++) {
- port_ctl_status.u64 =
- cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
- /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
- port_ctl_status.s.txvreftune = 15;
- port_ctl_status.s.txrisetune = 1;
- port_ctl_status.s.txpreemphasistune = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
- port_ctl_status.u64);
- }
-
/* Set uSOF cycle period to 60,000 bits. */
cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+
exit:
mutex_unlock(&octeon2_usb_clocks_mutex);
}
@@ -311,7 +323,11 @@ static struct usb_ehci_pdata octeon_ehci_pdata = {
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
- .dma_mask_64 = 1,
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ .dma_mask_64 = 0,
.power_on = octeon_ehci_power_on,
.power_off = octeon_ehci_power_off,
};
@@ -689,6 +705,10 @@ int __init octeon_prune_device_tree(void)
if (fdt_check_header(initial_boot_params))
panic("Corrupt Device Tree.");
+ WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
+ "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type));
+
aliases = fdt_path_offset(initial_boot_params, "/aliases");
if (aliases < 0) {
pr_err("Error: No /aliases node in device tree.");
@@ -1032,13 +1052,6 @@ end_led:
}
}
- if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) {
- int dsr1000n_leds = fdt_path_offset(initial_boot_params,
- "/dsr1000n-leds");
- if (dsr1000n_leds >= 0)
- fdt_nop_node(initial_boot_params, dsr1000n_leds);
- }
-
return 0;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 64f852b063a8..cb16fcc5f8f0 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -40,9 +40,27 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
-#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configuraable,
+ * but we chose little-endian for these.
+ */
+const bool octeon_should_swizzle_table[256] = {
+ [0x00] = true, /* bootbus/CF */
+ [0x1b] = true, /* PCI mmio window */
+ [0x1c] = true, /* PCI mmio window */
+ [0x1d] = true, /* PCI mmio window */
+ [0x1e] = true, /* PCI mmio window */
+ [0x68] = true, /* OCTEON III USB */
+ [0x69] = true, /* OCTEON III USB */
+ [0x6c] = true, /* OCTEON III SATA */
+ [0x6f] = true, /* OCTEON II USB */
+};
+EXPORT_SYMBOL(octeon_should_swizzle_table);
+
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 33aab89259f3..4d457d602d3b 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -271,6 +271,7 @@ static int octeon_cpu_disable(void)
return -ENOTSUPP;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
octeon_fixup_irqs();
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index 9a8c2fe8d334..c136a18c7221 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -42,8 +42,8 @@ const char *get_system_type(void)
/*
* Cobalt doesn't have PS/2 keyboard/mouse interfaces,
- * keyboard conntroller is never used.
- * Also PCI-ISA bridge DMA contoroller is never used.
+ * keyboard controller is never used.
+ * Also PCI-ISA bridge DMA controller is never used.
*/
static struct resource cobalt_reserved_resources[] = {
{ /* dma1 */
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
new file mode 100644
index 000000000000..2c829950be17
--- /dev/null
+++ b/arch/mips/configs/ath25_defconfig
@@ -0,0 +1,119 @@
+CONFIG_ATH25=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_FHANDLE is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH5K=m
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_LEDS_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index dcac308cec39..d470d08362c0 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -59,6 +59,8 @@ CONFIG_EEPROM_AT25=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_OCTEON=y
CONFIG_PATA_OCTEON_CF=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 3b0e51d5a613..c5b04e752e97 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -45,7 +45,7 @@
/*
* Returns the kernel segment base of a given address
*/
-#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
+#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
/*
* Returns the physical address of a CKSEGx / XKPHYS address
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 9f67033961a6..ee9f5f2d18fc 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -127,6 +127,10 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE];
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+extern unsigned long fw_passed_dtb;
+#endif
+
/*
* Platform memory detection hook called by setup_arch
*/
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
new file mode 100644
index 000000000000..a6e067801f23
--- /dev/null
+++ b/arch/mips/include/asm/dsemul.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_DSEMUL_H__
+#define __MIPS_ASM_DSEMUL_H__
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/* Break instruction with special math emu break code set */
+#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
+
+/* When used as a frame index, indicates the lack of a frame */
+#define BD_EMUFRAME_NONE ((int)BIT(31))
+
+struct mm_struct;
+struct pt_regs;
+struct task_struct;
+
+/**
+ * mips_dsemul() - 'Emulate' an instruction from a branch delay slot
+ * @regs: User thread register context.
+ * @ir: The instruction to be 'emulated'.
+ * @branch_pc: The PC of the branch instruction.
+ * @cont_pc: The PC to continue at following 'emulation'.
+ *
+ * Emulate or execute an arbitrary MIPS instruction within the context of
+ * the current user thread. This is used primarily to handle instructions
+ * in the delay slots of emulated branch instructions, for example FP
+ * branch instructions on systems without an FPU.
+ *
+ * Return: Zero on success, negative if ir is a NOP, signal number on failure.
+ */
+extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
+/**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+ *
+ * Call in response to the BRK_MEMU break instruction used to return to
+ * the kernel from branch delay slot 'emulation' frames following a call
+ * to mips_dsemul(). Restores the user thread PC to the value that was
+ * passed as the cpc parameter to mips_dsemul().
+ *
+ * Return: True if an emulation frame was returned from, else false.
+ */
+extern bool do_dsemulret(struct pt_regs *xcp);
+
+/**
+ * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame
+ * @tsk: The task structure associated with the thread
+ *
+ * If the thread @tsk has a branch delay slot 'emulation' frame
+ * allocated to it then free that frame.
+ *
+ * Return: True if a frame was freed, else false.
+ */
+extern bool dsemul_thread_cleanup(struct task_struct *tsk);
+
+/**
+ * dsemul_thread_rollback() - Rollback from an 'emulation' frame
+ * @regs: User thread register context.
+ *
+ * If the current thread, whose register context is represented by @regs,
+ * is executing within a delay slot 'emulation' frame then exit that
+ * frame. The PC will be rolled back to the branch if the instruction
+ * that was being 'emulated' has not yet executed, or advanced to the
+ * continuation PC if it has.
+ *
+ * Return: True if a frame was exited, else false.
+ */
+extern bool dsemul_thread_rollback(struct pt_regs *regs);
+
+/**
+ * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state
+ * @mm: The struct mm_struct to cleanup state for.
+ *
+ * Cleanup state for the given @mm, ensuring that any memory allocated
+ * for delay slot 'emulation' book-keeping is freed. This is to be called
+ * before @mm is freed in order to avoid memory leaks.
+ */
+extern void dsemul_mm_cleanup(struct mm_struct *mm);
+
+#endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index f5f45717968e..2b3dc2973670 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -458,6 +458,7 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
@@ -498,4 +499,7 @@ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
extern void mips_set_personality_nan(struct arch_elf_state *state);
extern void mips_set_personality_fp(struct arch_elf_state *state);
+#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk)
+extern int mips_elf_read_implies_exec(void *elf_ex, int exstack);
+
#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3225c3c0724b..355dc25172e7 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -24,7 +24,7 @@
#define _ASM_FPU_EMULATOR_H
#include <linux/sched.h>
-#include <asm/break.h>
+#include <asm/dsemul.h>
#include <asm/thread_info.h>
#include <asm/inst.h>
#include <asm/local.h>
@@ -60,27 +60,16 @@ do { \
#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
#endif /* CONFIG_DEBUG_FS */
-extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
- unsigned long cpc);
-extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
-/*
- * Instruction inserted following the badinst to further tag the sequence
- */
-#define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */
-
-/*
- * Break instruction with special math emu break code set
- */
-#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
-
#define SIGNALLING_NAN 0x7ff800007ff80000LL
static inline void fpu_emulator_init_fpu(void)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 36a391d289aa..b54bcadd8aec 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -19,6 +19,9 @@
#include <linux/threads.h>
#include <linux/spinlock.h>
+#include <asm/inst.h>
+#include <asm/mipsregs.h>
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
@@ -53,6 +56,12 @@
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
#define KVM_MAX_VCPUS 1
@@ -65,8 +74,14 @@
-/* Special address that contains the comm page, used for reducing # of traps */
-#define KVM_GUEST_COMMPAGE_ADDR 0x0
+/*
+ * Special address that contains the comm page, used for reducing # of traps
+ * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
+ * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
+ * caught.
+ */
+#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
+ (0x8000 - PAGE_SIZE))
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
@@ -93,9 +108,6 @@
#define KVM_INVALID_ADDR 0xdeadbeef
extern atomic_t kvm_mips_instance;
-extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -126,28 +138,6 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-enum kvm_mips_exit_types {
- WAIT_EXITS,
- CACHE_EXITS,
- SIGNAL_EXITS,
- INT_EXITS,
- COP_UNUSABLE_EXITS,
- TLBMOD_EXITS,
- TLBMISS_LD_EXITS,
- TLBMISS_ST_EXITS,
- ADDRERR_ST_EXITS,
- ADDRERR_LD_EXITS,
- SYSCALL_EXITS,
- RESVD_INST_EXITS,
- BREAK_INST_EXITS,
- TRAP_INST_EXITS,
- MSA_FPE_EXITS,
- FPE_EXITS,
- MSA_DISABLED_EXITS,
- FLUSH_DCACHE_EXITS,
- MAX_KVM_MIPS_EXIT_TYPES
-};
-
struct kvm_arch_memory_slot {
};
@@ -215,73 +205,6 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5
-/* Config0 register bits */
-#define CP0C0_M 31
-#define CP0C0_K23 28
-#define CP0C0_KU 25
-#define CP0C0_MDU 20
-#define CP0C0_MM 17
-#define CP0C0_BM 16
-#define CP0C0_BE 15
-#define CP0C0_AT 13
-#define CP0C0_AR 10
-#define CP0C0_MT 7
-#define CP0C0_VI 3
-#define CP0C0_K0 0
-
-/* Config1 register bits */
-#define CP0C1_M 31
-#define CP0C1_MMU 25
-#define CP0C1_IS 22
-#define CP0C1_IL 19
-#define CP0C1_IA 16
-#define CP0C1_DS 13
-#define CP0C1_DL 10
-#define CP0C1_DA 7
-#define CP0C1_C2 6
-#define CP0C1_MD 5
-#define CP0C1_PC 4
-#define CP0C1_WR 3
-#define CP0C1_CA 2
-#define CP0C1_EP 1
-#define CP0C1_FP 0
-
-/* Config2 Register bits */
-#define CP0C2_M 31
-#define CP0C2_TU 28
-#define CP0C2_TS 24
-#define CP0C2_TL 20
-#define CP0C2_TA 16
-#define CP0C2_SU 12
-#define CP0C2_SS 8
-#define CP0C2_SL 4
-#define CP0C2_SA 0
-
-/* Config3 Register bits */
-#define CP0C3_M 31
-#define CP0C3_ISA_ON_EXC 16
-#define CP0C3_ULRI 13
-#define CP0C3_DSPP 10
-#define CP0C3_LPA 7
-#define CP0C3_VEIC 6
-#define CP0C3_VInt 5
-#define CP0C3_SP 4
-#define CP0C3_MT 2
-#define CP0C3_SM 1
-#define CP0C3_TL 0
-
-/* MMU types, the first four entries have the same layout as the
- CP0C0_MT field. */
-enum mips_mmu_types {
- MMU_TYPE_NONE,
- MMU_TYPE_R4000,
- MMU_TYPE_RESERVED,
- MMU_TYPE_FMT,
- MMU_TYPE_R3000,
- MMU_TYPE_R6000,
- MMU_TYPE_R8000
-};
-
/* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -298,11 +221,6 @@ enum emulation_result {
EMULATE_PRIV_FAIL,
};
-#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
-#define MIPS3_PG_V 0x00000002 /* Valid */
-#define MIPS3_PG_NV 0x00000000
-#define MIPS3_PG_D 0x00000004 /* Dirty */
-
#define mips3_paddr_to_tlbpfn(x) \
(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
#define mips3_tlbpfn_to_paddr(x) \
@@ -313,13 +231,11 @@ enum emulation_result {
#define VPN2_MASK 0xffffe000
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
-#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
- ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
-#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
- ? ((x).tlb_lo1 & MIPS3_PG_V) \
- : ((x).tlb_lo0 & MIPS3_PG_V))
+#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
+#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
@@ -328,26 +244,23 @@ enum emulation_result {
struct kvm_mips_tlb {
long tlb_mask;
long tlb_hi;
- long tlb_lo0;
- long tlb_lo1;
+ long tlb_lo[2];
};
-#define KVM_MIPS_FPU_FPU 0x1
-#define KVM_MIPS_FPU_MSA 0x2
+#define KVM_MIPS_AUX_FPU 0x1
+#define KVM_MIPS_AUX_MSA 0x2
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
- void *host_ebase, *guest_ebase;
+ void *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
/* Host CP0 registers used when handling exits from guest */
unsigned long host_cp0_badvaddr;
- unsigned long host_cp0_cause;
unsigned long host_cp0_epc;
- unsigned long host_cp0_entryhi;
- uint32_t guest_inst;
+ u32 host_cp0_cause;
/* GPRS */
unsigned long gprs[32];
@@ -357,8 +270,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
- /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
- unsigned int fpu_inuse;
+ /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
+ unsigned int aux_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -370,11 +283,11 @@ struct kvm_vcpu_arch {
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
- uint32_t count_ctl;
+ u32 count_ctl;
/* Count bias from the raw time */
- uint32_t count_bias;
+ u32 count_bias;
/* Frequency of timer in Hz */
- uint32_t count_hz;
+ u32 count_hz;
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
s64 count_dyn_bias;
/* Resume time */
@@ -388,7 +301,7 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- unsigned long pending_load_cause;
+ u32 pending_load_cause;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
@@ -397,8 +310,8 @@ struct kvm_vcpu_arch {
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
/* Cached guest kernel/user ASIDs */
- uint32_t guest_user_asid[NR_CPUS];
- uint32_t guest_kernel_asid[NR_CPUS];
+ u32 guest_user_asid[NR_CPUS];
+ u32 guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm;
int last_sched_cpu;
@@ -408,6 +321,7 @@ struct kvm_vcpu_arch {
u8 fpu_enabled;
u8 msa_enabled;
+ u8 kscratch_enabled;
};
@@ -461,6 +375,18 @@ struct kvm_vcpu_arch {
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
+#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
+#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
+#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
+#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
+#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
+#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
+#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
+#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
+#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
+#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
+#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
/*
* Some of the guest registers may be modified asynchronously (e.g. from a
@@ -474,7 +400,7 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -490,7 +416,7 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -507,7 +433,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" or %0, %3 \n"
@@ -542,7 +468,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
{
- return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
vcpu->fpu_enabled;
}
@@ -589,9 +515,11 @@ struct kvm_mips_callbacks {
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
+ unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
+ int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
int (*get_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
@@ -605,8 +533,13 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
-/* Trampoline ASM routine to start running in "Guest" context */
-extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* Building of entry/exception code */
+int kvm_mips_entry_setup(void);
+void *kvm_mips_build_vcpu_run(void *addr);
+void *kvm_mips_build_exception(void *addr, void *handler);
+void *kvm_mips_build_exit(void *addr);
/* FPU/MSA context management */
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
@@ -622,11 +555,11 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
/* TLB handling */
-uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu);
@@ -635,22 +568,24 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1);
+ struct kvm_mips_tlb *tlb);
-extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern void kvm_mips_dump_host_tlbs(void);
extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ unsigned long entrylo0,
+ unsigned long entrylo1,
+ int flush_dcache_mask);
extern void kvm_mips_flush_host_tlb(int skip_kseg0);
extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
@@ -667,90 +602,90 @@ extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
/* Emulation */
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
-extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_ri(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
@@ -759,27 +694,27 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_store(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_load(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -789,13 +724,13 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */
-extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_cache_index(union mips_instruction inst,
+ u32 *opc, struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
/* Misc */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index d68e685cde60..bd8b9bbe1771 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -55,7 +55,7 @@
#define cpu_has_mipsmt 0
#define cpu_has_vint 0
#define cpu_has_veic 0
-#define cpu_hwrena_impl_bits 0xc0000000
+#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2)
#define cpu_has_wsbh 1
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index cceae32a0732..64b86b9d30fe 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -42,8 +42,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER1,
OCTEON_IRQ_TIMER2,
OCTEON_IRQ_TIMER3,
- OCTEON_IRQ_USB0,
- OCTEON_IRQ_USB1,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
index 374eefafb320..0cf5ac1f7245 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -12,6 +12,14 @@
#ifdef __BIG_ENDIAN
+static inline bool __should_swizzle_bits(volatile void *a)
+{
+ extern const bool octeon_should_swizzle_table[];
+
+ unsigned long did = ((unsigned long)a >> 40) & 0xff;
+ return octeon_should_swizzle_table[did];
+}
+
# define __swizzle_addr_b(port) (port)
# define __swizzle_addr_w(port) (port)
# define __swizzle_addr_l(port) (port)
@@ -19,6 +27,8 @@
#else /* __LITTLE_ENDIAN */
+#define __should_swizzle_bits(a) false
+
static inline bool __should_swizzle_addr(unsigned long p)
{
/* boot bus? */
@@ -35,40 +45,14 @@ static inline bool __should_swizzle_addr(unsigned long p)
#endif /* __BIG_ENDIAN */
-/*
- * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
- * less sane hardware forces software to fiddle with this...
- *
- * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
- * you can't have the numerical value of data and byte addresses within
- * multibyte quantities both preserved at the same time. Hence two
- * variations of functions: non-prefixed ones that preserve the value
- * and prefixed ones that preserve byte addresses. The latters are
- * typically used for moving raw data between a peripheral and memory (cf.
- * string I/O functions), hence the "__mem_" prefix.
- */
-#if defined(CONFIG_SWAP_IO_SPACE)
# define ioswabb(a, x) (x)
# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) le16_to_cpu(x)
+# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x)
# define __mem_ioswabw(a, x) (x)
-# define ioswabl(a, x) le32_to_cpu(x)
+# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x)
# define __mem_ioswabl(a, x) (x)
-# define ioswabq(a, x) le64_to_cpu(x)
+# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x)
# define __mem_ioswabq(a, x) (x)
-#else
-
-# define ioswabb(a, x) (x)
-# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) (x)
-# define __mem_ioswabw(a, x) cpu_to_le16(x)
-# define ioswabl(a, x) (x)
-# define __mem_ioswabl(a, x) cpu_to_le32(x)
-# define ioswabq(a, x) (x)
-# define __mem_ioswabq(a, x) cpu_to_le32(x)
-
-#endif
-
#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 9411a4c0bdad..58e7874e9347 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -462,7 +462,7 @@ static inline unsigned int mips_cm_max_vp_width(void)
if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
- if (config_enabled(CONFIG_SMP))
+ if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings;
return 1;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index e1ca65c62f6a..def9d8d13f6e 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -53,7 +53,7 @@
#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
-#define CP0_HWRENA $7, 0
+#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_BADINSTR $8, 1
#define CP0_COUNT $9
@@ -533,6 +533,7 @@
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_VI (_ULCAST_(1) << 3)
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
@@ -853,6 +854,24 @@
#define MIPS_CDMMBASE_ADDR_SHIFT 11
#define MIPS_CDMMBASE_ADDR_START 15
+/* RDHWR register numbers */
+#define MIPS_HWR_CPUNUM 0 /* CPU number */
+#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
+#define MIPS_HWR_CC 2 /* Cycle counter */
+#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
+#define MIPS_HWR_ULR 29 /* UserLocal */
+#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
+#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
+
+/* Bits in HWREna register */
+#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
+#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
+#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
+#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
+#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
+#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
+#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
+
/*
* Bitfields in the TX39 family CP0 Configuration Register 3
*/
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 1afa1f986df8..f6ba08d77931 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -2,11 +2,20 @@
#define __ASM_MMU_H
#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
typedef struct {
unsigned long asid[NR_CPUS];
void *vdso;
atomic_t fp_mode_switching;
+
+ /* lock to be held whilst modifying fp_bd_emupage_allocmap */
+ spinlock_t bd_emupage_lock;
+ /* bitmap tracking allocation of fp_bd_emupage */
+ unsigned long *bd_emupage_allocmap;
+ /* wait queue for threads requiring an emuframe */
+ wait_queue_head_t bd_emupage_queue;
} mm_context_t;
#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index fc57e135cb0a..ddd57ade1aa8 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -16,6 +16,7 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
+#include <asm/dsemul.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -128,6 +129,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
atomic_set(&mm->context.fp_mode_switching, 0);
+ mm->context.bd_emupage_allocmap = NULL;
+ spin_lock_init(&mm->context.bd_emupage_lock);
+ init_waitqueue_head(&mm->context.bd_emupage_queue);
+
return 0;
}
@@ -162,6 +167,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void destroy_context(struct mm_struct *mm)
{
+ dsemul_mm_cleanup(mm);
}
#define deactivate_mm(tsk, mm) do { } while (0)
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index ddf496cb2a2a..8967b475ab10 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -168,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \
unsigned int reg; \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" cfcmsa %0, $" #cs "\n" \
" .set pop\n" \
@@ -179,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \
{ \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" ctcmsa $" #cs ", %0\n" \
" .set pop\n" \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 21ed7150fec3..ea0cd9773914 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -162,16 +162,34 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* __pa()/__va() should be used only during mem init.
*/
-#ifdef CONFIG_64BIT
-#define __pa(x) \
-({ \
- unsigned long __x = (unsigned long)(x); \
- __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \
-})
-#else
-#define __pa(x) \
- ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
-#endif
+static inline unsigned long ___pa(unsigned long x)
+{
+ if (config_enabled(CONFIG_64BIT)) {
+ /*
+ * For MIPS64 the virtual address may either be in one of
+ * the compatibility segements ckseg0 or ckseg1, or it may
+ * be in xkphys.
+ */
+ return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+ }
+
+ if (!config_enabled(CONFIG_EVA)) {
+ /*
+ * We're using the standard MIPS32 legacy memory map, ie.
+ * the address x is going to be in kseg0 or kseg1. We can
+ * handle either case by masking out the desired bits using
+ * CPHYSADDR.
+ */
+ return CPHYSADDR(x);
+ }
+
+ /*
+ * EVA is in use so the memory map could be anything, making it not
+ * safe to just mask out bits.
+ */
+ return x - PAGE_OFFSET + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
#include <asm/io.h>
@@ -229,8 +247,10 @@ extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 86b239d9d75d..9b63cd41213d 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -80,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- phys_addr_t size = resource_size(rsrc);
-
- *start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
-}
-
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 7d44e888134f..70128d3f770a 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -159,7 +159,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* it better already be global)
*/
if (pte_none(*buddy)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
@@ -172,7 +172,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else {
@@ -319,7 +319,7 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~_PAGE_WRITE;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
@@ -328,7 +328,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~_PAGE_MODIFIED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
@@ -337,7 +337,7 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~_PAGE_ACCESSED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
@@ -347,7 +347,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
@@ -358,7 +358,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
@@ -369,7 +369,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
if (!(pte.pte_low & _PAGE_NO_READ)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 7e78b6208d7d..0d36c87acbe2 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -11,12 +11,14 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/dsemul.h>
#include <asm/mipsregs.h>
#include <asm/prefetch.h>
@@ -78,7 +80,11 @@ extern unsigned int vced_count, vcei_count;
#endif
-#define STACK_TOP (TASK_SIZE & PAGE_MASK)
+/*
+ * One page above the stack is used for branch delay slot "emulation".
+ * See dsemul.c for details.
+ */
+#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
/*
* This decides where the kernel will search for a free chunk of vm
@@ -256,6 +262,12 @@ struct thread_struct {
/* Saved fpu/fpu emulator stuff. */
struct mips_fpu_struct fpu FPU_ALIGN;
+ /* Assigned branch delay slot 'emulation' frame */
+ atomic_t bd_emu_frame;
+ /* PC of the branch from a branch delay slot 'emulation' */
+ unsigned long bd_emu_branch_pc;
+ /* PC to continue from following a branch delay slot 'emulation' */
+ unsigned long bd_emu_cont_pc;
#ifdef CONFIG_MIPS_MT_FPAFF
/* Emulated instruction count */
unsigned long emulated_fp;
@@ -323,6 +335,10 @@ struct thread_struct {
* FPU affinity state (null if not FPAFF) \
*/ \
FPAFF_INIT \
+ /* Delay slot emulation */ \
+ .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
+ .bd_emu_branch_pc = 0, \
+ .bd_emu_cont_pc = 0, \
/* \
* Saved DSP stuff \
*/ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 38902bf97adc..667ca3c467b7 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -210,7 +210,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr)
static inline void protected_writeback_scache_line(unsigned long addr)
{
+#ifdef CONFIG_EVA
+ protected_cachee_op(Hit_Writeback_Inv_SD, addr);
+#else
protected_cache_op(Hit_Writeback_Inv_SD, addr);
+#endif
}
/*
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 684fb3a12ed3..d886d6f7687a 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -16,10 +16,10 @@ static inline const int *get_compat_mode1_syscalls(void)
0, /* null terminated */
};
- if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+ if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
return syscalls_O32;
- if (config_enabled(CONFIG_MIPS32_N32))
+ if (IS_ENABLED(CONFIG_MIPS32_N32))
return syscalls_N32;
BUG();
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d7bfdeba9e84..4f5279a8308d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -21,6 +21,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
+extern unsigned int hwrena;
extern void per_cpu_trap_init(bool);
extern void cpu_cache_init(void);
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 2292373ff11a..23d6b8015c79 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -11,7 +11,7 @@
#include <uapi/asm/signal.h>
-#ifdef CONFIG_MIPS32_COMPAT
+#ifdef CONFIG_MIPS32_O32
extern struct mips_abi mips_abi_32;
#define sig_uses_siginfo(ka, abi) \
@@ -19,8 +19,8 @@ extern struct mips_abi mips_abi_32;
((ka)->sa.sa_flags & SA_SIGINFO))
#else
#define sig_uses_siginfo(ka, abi) \
- (config_enabled(CONFIG_64BIT) ? 1 : \
- (config_enabled(CONFIG_TRAD_SIGNALS) ? \
+ (IS_ENABLED(CONFIG_64BIT) ? 1 : \
+ (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
#endif
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 03722d4326a1..8bc6c70a4030 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,7 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
-extern cpumask_t cpu_foreign_map;
+extern cpumask_t cpu_foreign_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
@@ -53,6 +53,8 @@ extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void);
+extern void calculate_cpu_foreign_map(void);
+
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 47bc45a67e9b..d87882513ee3 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
int ret;
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
+ if ((IS_ENABLED(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall))
i++;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 7f109d4f64a4..11b965f98d95 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -88,7 +88,7 @@ extern u64 __ua_limit;
*/
static inline bool eva_kernel_access(void)
{
- if (!config_enabled(CONFIG_EVA))
+ if (!IS_ENABLED(CONFIG_EVA))
return false;
return segment_eq(get_fs(), get_ds());
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b6ecfeee4dbe..f7929f65f7ca 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -104,8 +104,13 @@ Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
Ip_u2s3u1(_cache);
+Ip_u1u2(_cfc1);
+Ip_u2u1(_cfcmsa);
+Ip_u1u2(_ctc1);
+Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
+Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u1u2(_divu);
@@ -141,6 +146,8 @@ Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u1u2u3(_mtc0);
Ip_u1u2u3(_mthc0);
+Ip_u1(_mthi);
+Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
index c9c7195272c4..45ba259a3618 100644
--- a/arch/mips/include/uapi/asm/auxvec.h
+++ b/arch/mips/include/uapi/asm/auxvec.h
@@ -14,4 +14,6 @@
/* Location of VDSO image. */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif /* __ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 8051f9aa1379..77429d1622b3 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
enum major_op {
spec_op, bcond_op, j_op, jal_op,
beq_op, bne_op, blez_op, bgtz_op,
- addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
+ addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op,
andi_op, ori_op, xori_op, lui_op,
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
- daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
+ daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op,
spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
- lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+ lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
- scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
+ scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op
};
/*
@@ -93,6 +93,50 @@ enum spec3_op {
};
/*
+ * Bits 10-6 minor opcode for r6 spec mult/div encodings
+ */
+enum mult_op {
+ mult_mult_op = 0x0,
+ mult_mul_op = 0x2,
+ mult_muh_op = 0x3,
+};
+enum multu_op {
+ multu_multu_op = 0x0,
+ multu_mulu_op = 0x2,
+ multu_muhu_op = 0x3,
+};
+enum div_op {
+ div_div_op = 0x0,
+ div_div6_op = 0x2,
+ div_mod_op = 0x3,
+};
+enum divu_op {
+ divu_divu_op = 0x0,
+ divu_divu6_op = 0x2,
+ divu_modu_op = 0x3,
+};
+enum dmult_op {
+ dmult_dmult_op = 0x0,
+ dmult_dmul_op = 0x2,
+ dmult_dmuh_op = 0x3,
+};
+enum dmultu_op {
+ dmultu_dmultu_op = 0x0,
+ dmultu_dmulu_op = 0x2,
+ dmultu_dmuhu_op = 0x3,
+};
+enum ddiv_op {
+ ddiv_ddiv_op = 0x0,
+ ddiv_ddiv6_op = 0x2,
+ ddiv_dmod_op = 0x3,
+};
+enum ddivu_op {
+ ddivu_ddivu_op = 0x0,
+ ddivu_ddivu6_op = 0x2,
+ ddivu_dmodu_op = 0x3,
+};
+
+/*
* rt field of bcond opcodes.
*/
enum rt_op {
@@ -103,7 +147,7 @@ enum rt_op {
bltzal_op, bgezal_op, bltzall_op, bgezall_op,
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
- bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
};
/*
@@ -238,6 +282,21 @@ enum bshfl_func {
};
/*
+ * MSA minor opcodes.
+ */
+enum msa_func {
+ msa_elm_op = 0x19,
+};
+
+/*
+ * MSA ELM opcodes.
+ */
+enum msa_elm {
+ msa_ctc_op = 0x3e,
+ msa_cfc_op = 0x7e,
+};
+
+/*
* func field for MSA MI10 format.
*/
enum msa_mi10_func {
@@ -264,7 +323,7 @@ enum mm_major_op {
mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
- mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+ mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op,
mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
@@ -360,7 +419,10 @@ enum mm_32axf_minor_op {
mm_mflo32_op = 0x075,
mm_jalrhb_op = 0x07c,
mm_tlbwi_op = 0x08d,
+ mm_mthi32_op = 0x0b5,
mm_tlbwr_op = 0x0cd,
+ mm_mtlo32_op = 0x0f5,
+ mm_di_op = 0x11d,
mm_jalrs_op = 0x13c,
mm_jalrshb_op = 0x17c,
mm_sync_op = 0x1ad,
@@ -479,6 +541,13 @@ enum mm_32f_73_minor_op {
};
/*
+ * (microMIPS) POOL32S minor opcodes.
+ */
+enum mm_32s_minor_op {
+ mm_32s_elm_op = 0x16,
+};
+
+/*
* (microMIPS) POOL16C minor opcodes.
*/
enum mm_16c_minor_op {
@@ -586,6 +655,36 @@ struct r_format { /* Register format */
;))))))
};
+struct c0r_format { /* C0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int z: 8,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))
+};
+
+struct mfmc0_format { /* MFMC0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int sc : 1,
+ __BITFIELD_FIELD(unsigned int : 2,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))))
+};
+
+struct co_format { /* C0 CO format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int co : 1,
+ __BITFIELD_FIELD(unsigned int code : 19,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))
+};
+
struct p_format { /* Performance counter format (R10000) */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5,
@@ -937,6 +1036,9 @@ union mips_instruction {
struct u_format u_format;
struct c_format c_format;
struct r_format r_format;
+ struct c0r_format c0r_format;
+ struct mfmc0_format mfmc0_format;
+ struct co_format co_format;
struct p_format p_format;
struct f_format f_format;
struct ma_format ma_format;
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 0914ef775b5f..6d0152321819 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -75,7 +75,7 @@ void __init device_tree_init(void)
const char *get_system_type(void)
{
- if (config_enabled(CONFIG_MACH_JZ4780))
+ if (IS_ENABLED(CONFIG_MACH_JZ4780))
return "JZ4780";
return "JZ4740";
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e6053d07072f..4a603a3ea657 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -71,7 +71,7 @@ obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 1ea973b2abb1..fae2f9447792 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -339,71 +339,9 @@ void output_pm_defines(void)
}
#endif
-void output_cpuinfo_defines(void)
-{
- COMMENT(" MIPS cpuinfo offsets. ");
- DEFINE(CPUINFO_SIZE, sizeof(struct cpuinfo_mips));
-#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
- OFFSET(CPUINFO_ASID_MASK, cpuinfo_mips, asid_mask);
-#endif
-}
-
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
- DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
- OFFSET(VCPU_RUN, kvm_vcpu, run);
- OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
-
- OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
- OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
-
- OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
- OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
-
- OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
- OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
- OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
- OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
-
- OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
-
- OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
- OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
- OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
- OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
- OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
- OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
- OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
- OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
- OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
- OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
- OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
- OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
- OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
- OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
- OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
- OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
- OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
- OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
- OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
- OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
- OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
- OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
- OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
- OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
- OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
- OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
- OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
- OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
- OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
- OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
- OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
- OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
- OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
- OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
- OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
- BLANK();
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
@@ -441,14 +379,6 @@ void output_kvm_defines(void)
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
-
- OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
- OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
- OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
-
- OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
- OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
- BLANK();
}
#ifdef CONFIG_MIPS_CPS
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 6dc3f1fdaccc..46c227fc98f5 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -790,7 +790,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -798,7 +798,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -809,8 +809,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc += 8;
break;
#endif
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
/* Only valid for MIPS R6 */
if (!cpu_has_mips_r6) {
ret = -SIGILL;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index e4c21bbf9422..804d2a2a19fe 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -276,12 +276,7 @@ int r4k_clockevent_init(void)
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
- clockevent_set_clock(cd, mips_hpt_frequency);
-
- /* Calculate the min / max delta */
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
min_delta = calculate_min_delta();
- cd->min_delta_ns = clockevent_delta2ns(min_delta, cd);
cd->rating = 300;
cd->irq = irq;
@@ -289,7 +284,7 @@ int r4k_clockevent_init(void)
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
- clockevents_register_device(cd);
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
if (cp0_timer_irq_installed)
return 0;
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 6392dbe504fb..a378e44688f5 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
+int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
static inline void check_daddiu(void)
{
@@ -314,7 +314,7 @@ static inline void check_daddiu(void)
void __init check_bugs64_early(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
check_mult_sh();
check_daddiu();
}
@@ -322,6 +322,6 @@ void __init check_bugs64_early(void)
void __init check_bugs64(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6))
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
check_daddi();
}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 1f910563fdf6..d76275da54cb 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace r4k_read_sched_clock(void)
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
{
return read_c0_count();
}
@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+#ifndef CONFIG_CPU_FREQ
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
return 0;
}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 891f5ee63983..6430bff21fff 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -8,9 +8,12 @@
* option) any later version.
*/
+#include <linux/binfmts.h>
#include <linux/elf.h>
+#include <linux/export.h>
#include <linux/sched.h>
+#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
@@ -179,7 +182,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
return -ELIBBAD;
}
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
@@ -285,7 +288,7 @@ void mips_set_personality_fp(struct arch_elf_state *state)
* not be worried about N32/N64 binaries.
*/
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
@@ -326,3 +329,19 @@ void mips_set_personality_nan(struct arch_elf_state *state)
BUG();
}
}
+
+int mips_elf_read_implies_exec(void *elf_ex, int exstack)
+{
+ if (exstack != EXSTACK_DISABLE_X) {
+ /* The binary doesn't request a non-executable stack */
+ return 1;
+ }
+
+ if (!cpu_has_rixi) {
+ /* The CPU doesn't support non-executable memory */
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mips_elf_read_implies_exec);
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 56e8fede3fd8..cf052204eb0a 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -93,21 +93,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
+#ifdef CONFIG_USE_OF
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
- PTR_LA t0, __appended_dtb
+ PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
#else
li t1, 0xedfe0dd0
#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
+ lw t0, (t2)
+ beq t0, t1, dtb_found
+#endif
+ li t1, -2
+ beq a0, t1, dtb_found
+ move t2, a1
- move a1, t0
- PTR_LI a0, -2
-not_found:
+ li t2, 0
+dtb_found:
#endif
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
@@ -122,6 +125,10 @@ not_found:
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
+#ifdef CONFIG_USE_OF
+ LONG_S t2, fw_passed_dtb
+#endif
+
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
/* Set the SP after an empty pt_regs. */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 760217bbb2fa..659e6d3ae335 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -251,7 +251,7 @@ int mips_cm_probe(void)
mips_cm_probe_l2sync();
/* determine register width for this CM */
- mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 7ff2a557f4aa..c3372cac6db2 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -84,7 +84,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
@@ -143,7 +143,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -152,7 +152,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case dsrl_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -161,7 +161,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case daddu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -170,7 +170,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -283,7 +283,7 @@ static int jr_func(struct pt_regs *regs, u32 ir)
err = mipsr6_emul(regs, nir);
if (err > 0) {
regs->cp0_epc = nepc;
- err = mips_dsemul(regs, nir, cepc);
+ err = mips_dsemul(regs, nir, epc, cepc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -498,7 +498,7 @@ static int dmult_func(struct pt_regs *regs, u32 ir)
s64 res;
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -530,7 +530,7 @@ static int dmultu_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -561,7 +561,7 @@ static int ddiv_func(struct pt_regs *regs, u32 ir)
{
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -586,7 +586,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir)
{
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -825,7 +825,7 @@ static int dclz_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -852,7 +852,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -1033,7 +1033,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1082,7 +1082,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1149,7 +1149,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1484,7 +1484,7 @@ fpu_emul:
break;
case ldl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1603,7 +1603,7 @@ fpu_emul:
break;
case ldr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1722,7 +1722,7 @@ fpu_emul:
break;
case sdl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1840,7 +1840,7 @@ fpu_emul:
break;
case sdr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2072,7 +2072,7 @@ fpu_emul:
break;
case lld_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2133,7 +2133,7 @@ fpu_emul:
break;
case scd_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index adda3ffb9b78..5b31a9405ebc 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -148,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
@@ -387,7 +387,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 813ed7829c61..7429ad09fbe3 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -30,6 +30,7 @@
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/msa.h>
@@ -68,11 +69,22 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
+void exit_thread(struct task_struct *tsk)
+{
+ /*
+ * User threads may have allocated a delay slot emulation frame.
+ * If so, clean up that allocation.
+ */
+ if (!(current->flags & PF_KTHREAD))
+ dsemul_thread_cleanup(tsk);
+}
+
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
@@ -159,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
if (clone_flags & CLONE_SETTLS)
ti->tp_value = regs->regs[7];
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 9c0b387d6427..51d3988933f8 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -348,7 +348,7 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
- PTR sys_keyctl /* 6245 */
+ PTR compat_sys_keyctl /* 6245 */
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f4f28b1580de..6efa7136748f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -504,7 +504,7 @@ EXPORT(sys32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
- PTR sys_keyctl
+ PTR compat_sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
index 87bc74a5a518..2703f218202e 100644
--- a/arch/mips/kernel/segment.c
+++ b/arch/mips/kernel/segment.c
@@ -26,17 +26,20 @@ static void build_segment_config(char *str, unsigned int cfg)
/*
* Access modes MK, MSK and MUSK are mapped segments. Therefore
- * there is no direct physical address mapping.
+ * there is no direct physical address mapping unless it becomes
+ * unmapped uncached at error level due to EU.
*/
- if ((am == 0) || (am > 3)) {
+ if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
str += sprintf(str, " %03lx",
((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ str += sprintf(str, " UND");
+
+ if ((am == 0) || (am > 3))
str += sprintf(str, " %01ld",
((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
- } else {
- str += sprintf(str, " UND");
+ else
str += sprintf(str, " U");
- }
/* Exception configuration. */
str += sprintf(str, " %01ld\n",
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index ef408a03e818..36cf8d65c47d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -875,6 +875,10 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+unsigned long fw_passed_dtb;
+#endif
+
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index ae4231452115..9e224469c788 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -165,7 +165,7 @@ static int save_msa_extcontext(void __user *buf)
* should already have been done when handling scalar FP
* context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
@@ -195,7 +195,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
unsigned int csr;
int i, err;
- if (!config_enabled(CONFIG_CPU_HAS_MSA))
+ if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
return SIGSYS;
if (size != sizeof(*msa))
@@ -215,7 +215,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
@@ -315,7 +315,7 @@ int protected_save_fp_context(void __user *sc)
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
@@ -378,7 +378,7 @@ int protected_restore_fp_context(void __user *sc)
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
@@ -772,6 +772,14 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
+ /*
+ * If we were emulating a delay slot instruction, exit that frame such
+ * that addresses in the sigframe are as expected for userland and we
+ * don't have a problem if we reuse the thread's frame for an
+ * instruction within the signal handler.
+ */
+ dsemul_thread_rollback(regs);
+
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 78c8349d151c..97b7c51b8251 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -6,129 +6,26 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
*/
-#include <linux/cache.h>
-#include <linux/compat.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/suspend.h>
-#include <linux/compiler.h>
-#include <linux/uaccess.h>
-#include <asm/abi.h>
-#include <asm/asm.h>
+#include <asm/compat.h>
#include <asm/compat-signal.h>
-#include <linux/bitops.h>
-#include <asm/cacheflush.h>
-#include <asm/sim.h>
-#include <asm/ucontext.h>
-#include <asm/fpu.h>
-#include <asm/war.h>
-#include <asm/dsp.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include "signal-common.h"
-/*
- * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
- */
-#define __NR_O32_restart_syscall 4253
-
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
-struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
- compat_stack_t uc_stack;
- struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct sigframe32 {
- u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_pad[2]; /* Was: signal trampoline */
- struct sigcontext32 sf_sc;
- compat_sigset_t sf_mask;
-};
-
-struct rt_sigframe32 {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2]; /* Was: signal trampoline */
- compat_siginfo_t rs_info;
- struct ucontext32 rs_uc;
-};
-
-static int setup_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- int i;
-
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
-
- err |= __put_user(0, &sc->sc_regs[0]);
- for (i = 1; i < 32; i++)
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- }
-
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context(sc);
-
- return err;
-}
-
-static int restore_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- s32 treg;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
-
- return err ?: protected_restore_fp_context(sc);
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
@@ -247,176 +144,3 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return 0;
}
-
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct sigframe32 __user *frame;
- sigset_t blocked;
- int sig;
-
- frame = (struct sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
- goto badframe;
-
- set_current_blocked(&blocked);
-
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct rt_sigframe32 __user *frame;
- sigset_t set;
- int sig;
-
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
- goto badframe;
-
- set_current_blocked(&set);
-
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- if (compat_restore_altstack(&frame->rs_uc.uc_stack))
- goto badframe;
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-static int setup_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- err |= setup_sigcontext32(regs, &frame->sf_sc);
- err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to struct sigcontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to the
- * struct sigframe.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = 0;
- regs->regs[ 6] = (unsigned long) &frame->sf_sc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct rt_sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
- err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->rs_uc.uc_flags);
- err |= __put_user(0, &frame->rs_uc.uc_link);
- err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
- err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to ucontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to
- * the struct rt_sigframe32.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = (unsigned long) &frame->rs_info;
- regs->regs[ 6] = (unsigned long) &frame->rs_uc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-/*
- * o32 compatibility on 64-bit kernels, without DSP ASE
- */
-struct mips_abi mips_abi_32 = {
- .setup_frame = setup_frame_32,
- .setup_rt_frame = setup_rt_frame_32,
- .restart = __NR_O32_restart_syscall,
-
- .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
- .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
- .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
-
- .vdso = &vdso_image_o32,
-};
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
new file mode 100644
index 000000000000..5e169fc5ca5c
--- /dev/null
+++ b/arch/mips/kernel/signal_o32.c
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/abi.h>
+#include <asm/compat-signal.h>
+#include <asm/dsp.h>
+#include <asm/sim.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_restart_syscall 4253
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct ucontext32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+static int setup_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext32(regs, &frame->sf_sc);
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct rt_sigframe32 __user *frame;
+ sigset_t set;
+ int sig;
+
+ frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
+
+static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe32.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+
+ .vdso = &vdso_image_o32,
+};
+
+
+asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct sigframe32 __user *frame;
+ sigset_t blocked;
+ int sig;
+
+ frame = (struct sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index e02addc0307f..6d0f1321e084 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -363,6 +363,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
clear_c0_status(IE_IRQ5);
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4ed36f288d64..e9d9fc6c754c 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -46,8 +46,8 @@ static unsigned core_vpe_count(unsigned core)
if (threads_disabled)
return 1;
- if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
- && (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
+ if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
return 1;
mips_cm_lock_other(core, 0);
@@ -206,7 +206,7 @@ err_out:
}
}
-static void boot_core(unsigned core)
+static void boot_core(unsigned int core, unsigned int vpe_id)
{
u32 access, stat, seq_state;
unsigned timeout;
@@ -233,8 +233,9 @@ static void boot_core(unsigned core)
mips_cpc_lock_other(core);
if (mips_cm_revision() >= CM_REV_CM3) {
- /* Run VP0 following the reset */
- write_cpc_co_vp_run(0x1);
+ /* Run only the requested VP following the reset */
+ write_cpc_co_vp_stop(0xf);
+ write_cpc_co_vp_run(1 << vpe_id);
/*
* Ensure that the VP_RUN register is written before the
@@ -306,7 +307,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core);
+ boot_core(core, vpe_id);
goto out;
}
@@ -397,6 +398,7 @@ static int cps_cpu_disable(void)
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
return 0;
@@ -411,14 +413,16 @@ static enum {
void play_dead(void)
{
- unsigned cpu, core;
+ unsigned int cpu, core, vpe_id;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
- if (cpu_has_mipsmt) {
+ pr_debug("CPU%d going offline\n", cpu);
+
+ if (cpu_has_mipsmt || cpu_has_vp) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
@@ -439,10 +443,21 @@ void play_dead(void)
complete(&cpu_death_chosen);
if (cpu_death == CPU_DEATH_HALT) {
- /* Halt this TC */
- write_c0_tchalt(TCHALT_H);
- instruction_hazard();
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
} else {
+ pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
@@ -469,6 +484,7 @@ static void wait_for_sibling_halt(void *ptr_cpu)
static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
+ unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned stat;
int err;
@@ -497,10 +513,12 @@ static void cps_cpu_die(unsigned int cpu)
* in which case the CPC will refuse to power down the core.
*/
do {
+ mips_cm_lock_other(core, vpe_id);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
@@ -517,6 +535,12 @@ static void cps_cpu_die(unsigned int cpu)
(void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
+ } else if (cpu_has_vp) {
+ do {
+ mips_cm_lock_other(core, vpe_id);
+ stat = read_cpc_co_vp_running();
+ mips_cm_unlock_other();
+ } while (stat & (1 << vpe_id));
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index f9d01e953acb..f95f094f36e4 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(cpu_core_map);
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
-cpumask_t cpu_foreign_map __read_mostly;
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
@@ -124,7 +124,7 @@ static inline void set_cpu_core_map(int cpu)
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
-static inline void calculate_cpu_foreign_map(void)
+void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
@@ -141,7 +141,9 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_set_cpu(i, &temp_foreign_map);
}
- cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
}
struct plat_smp_ops *mp_ops;
@@ -344,16 +346,9 @@ asmlinkage void start_secondary(void)
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU. Be a bit slow here and
- * set the bits for every online CPU so we don't miss
- * any IPI whilst taking this VPE down.
+ * Remove this CPU:
*/
- cpumask_copy(&cpu_foreign_map, cpu_online_mask);
-
- /* Make it visible to every other CPU */
- smp_mb();
-
set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable();
@@ -512,10 +507,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
unsigned int cpu;
+ int exec = vma->vm_flags & VM_EXEC;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_range() will only fully flush icache if
+ * the VMA is executable, otherwise we must invalidate
+ * ASID without it appearing to has_valid_asid() as if
+ * mm has been completely unused by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = 0;
+ cpu_context(cpu, mm) = !exec;
}
}
local_flush_tlb_range(vma, start, end);
@@ -560,8 +562,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
unsigned int cpu;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_page() only does partial flushes, so
+ * invalidate ASID without it appearing to
+ * has_valid_asid() as if mm has been completely unused
+ * by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 0;
+ cpu_context(cpu, vma->vm_mm) = 1;
}
}
local_flush_tlb_page(vma, page);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 4a1712b5abdf..3de85be2486a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -619,17 +619,17 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
- case 0: /* CPU number */
+ case MIPS_HWR_CPUNUM: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
- case 2: /* Read count register */
+ case MIPS_HWR_CC: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
@@ -639,7 +639,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
regs->regs[rt] = 2;
}
return 0;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
regs->regs[rt] = ti->tp_value;
return 0;
default:
@@ -704,6 +704,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
+ struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -744,7 +745,8 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
si.si_addr = fault_addr;
si.si_signo = sig;
down_read(&current->mm->mmap_sem);
- if (find_vma(current->mm, (unsigned long)fault_addr))
+ vma = find_vma(current->mm, (unsigned long)fault_addr);
+ if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
@@ -1859,6 +1861,7 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
+EXPORT_SYMBOL_GPL(ebase);
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
@@ -2063,16 +2066,22 @@ static void configure_status(void)
status_set);
}
+unsigned int hwrena;
+EXPORT_SYMBOL_GPL(hwrena);
+
/* configure HWRENA register */
static void configure_hwrena(void)
{
- unsigned int hwrena = cpu_hwrena_impl_bits;
+ hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2_r6)
- hwrena |= 0x0000000f;
+ hwrena |= MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
if (!noulri && cpu_has_userlocal)
- hwrena |= (1 << 29);
+ hwrena |= MIPS_HWRENA_ULR;
if (hwrena)
write_c0_hwrena(hwrena);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 28b3af73a17b..f1c308dbbc4a 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -1025,7 +1025,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHW(addr, value, res);
else
@@ -1044,7 +1044,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadW(addr, value, res);
else
@@ -1063,7 +1063,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHWU(addr, value, res);
else
@@ -1131,7 +1131,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreHW(addr, value, res);
else
@@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreW(addr, value, res);
else
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 54e1663ce639..9abe447a4b48 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -107,6 +107,16 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
+ /* Map delay slot emulation page */
+ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+ VM_READ|VM_WRITE|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+
/*
* Determine total area size. This includes the VDSO data itself, the
* data page, and the GIC user page if present. Always create a mapping
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2ae12825529f..7c56d6b124d1 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -17,6 +17,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
+ select EXPORT_UASM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_MMIO
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 637ebbebd549..847429de780d 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -7,9 +7,10 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
-kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
+kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o
+kvm-objs += mmu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index 2d6e976d1add..a36b77e1705c 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -4,7 +4,7 @@
* for more details.
*
* commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ 0x0.
+ * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f1527a465c1b..d280894915ed 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -20,125 +21,114 @@
#include "commpage.h"
-#define SYNCI_TEMPLATE 0x041f0000
-#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
-#define SYNCI_OFFSET ((x) & 0xffff)
+/**
+ * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
+ * @vcpu: Virtual CPU.
+ * @opc: PC of instruction to replace.
+ * @replace: Instruction to write
+ */
+static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
+ union mips_instruction replace)
+{
+ unsigned long paddr, flags;
+ void *vaddr;
+
+ if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+ (unsigned long)opc);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ memcpy(vaddr, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)vaddr,
+ (unsigned long)vaddr + 32);
+ kunmap_atomic(vaddr);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)opc,
+ (unsigned long)opc + 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
-#define LW_TEMPLATE 0x8c000000
-#define CLEAR_TEMPLATE 0x00000020
-#define SW_TEMPLATE 0xac000000
+ return 0;
+}
-int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = 0x0;
+ union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}
/*
* Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap
*/
-int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
-
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- synci_inst |= (base << 21);
- synci_inst |= offset;
-
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ union mips_instruction synci_inst = { 0 };
+
+ synci_inst.i_format.opcode = bcond_op;
+ synci_inst.i_format.rs = inst.i_format.rs;
+ synci_inst.i_format.rt = synci_op;
+ if (cpu_has_mips_r6)
+ synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
+ else
+ synci_inst.i_format.simmediate = inst.i_format.simmediate;
+
+ return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}
-int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mfc0_inst;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
+ union mips_instruction mfc0_inst = { 0 };
+ u32 rd, sel;
- if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
- mfc0_inst = CLEAR_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- } else {
- mfc0_inst = LW_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- mfc0_inst |= offsetof(struct kvm_mips_commpage,
- cop0.reg[rd][sel]);
- }
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
+ if (rd == MIPS_CP0_ERRCTL && sel == 0) {
+ mfc0_inst.r_format.opcode = spec_op;
+ mfc0_inst.r_format.rd = inst.c0r_format.rt;
+ mfc0_inst.r_format.func = add_op;
} else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
+ mfc0_inst.i_format.opcode = lw_op;
+ mfc0_inst.i_format.rt = inst.c0r_format.rt;
+ mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mfc0_inst.i_format.simmediate |= 4;
+#endif
}
- return 0;
+ return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}
-int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mtc0_inst = SW_TEMPLATE;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
-
- mtc0_inst |= ((rt & 0x1f) << 16);
- mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
- } else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
- }
-
- return 0;
+ union mips_instruction mtc0_inst = { 0 };
+ u32 rd, sel;
+
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ mtc0_inst.i_format.opcode = sw_op;
+ mtc0_inst.i_format.rt = inst.c0r_format.rt;
+ mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mtc0_inst.i_format.simmediate |= 4;
+#endif
+
+ return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 645c8a1982a7..6eb52b9c9818 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -52,7 +52,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
goto unaligned;
/* Read the instruction */
- insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+ insn.word = kvm_get_inst((u32 *) epc, vcpu);
if (insn.word == KVM_INVALID_INST)
return KVM_INVALID_INST;
@@ -161,9 +161,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case blez_op: /* not really i_format */
- case blezl_op:
- /* rt field assumed to be zero */
+ case blez_op: /* POP06 */
+#ifndef CONFIG_CPU_MIPSR6
+ case blezl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] <= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -171,9 +174,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case bgtz_op:
- case bgtzl_op:
- /* rt field assumed to be zero */
+ case bgtz_op: /* POP07 */
+#ifndef CONFIG_CPU_MIPSR6
+ case bgtzl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] > 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -185,6 +191,40 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
case cop1_op:
kvm_err("%s: unsupported cop1_op\n", __func__);
break;
+
+#ifdef CONFIG_CPU_MIPSR6
+ /* R6 added the following compact branches with forbidden slots */
+ case blezl_op: /* POP26 */
+ case bgtzl_op: /* POP27 */
+ /* only rt == 0 isn't compact branch */
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop10_op:
+ case pop30_op:
+ /* only rs == rt == 0 is reserved, rest are compact branches */
+ if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop66_op:
+ case pop76_op:
+ /* only rs == 0 isn't compact branch */
+ if (insn.i_format.rs != 0)
+ goto compact_branch;
+ break;
+compact_branch:
+ /*
+ * If we've hit an exception on the forbidden slot, then
+ * the branch must not have been taken.
+ */
+ epc += 8;
+ nextpc = epc;
+ break;
+#else
+compact_branch:
+ /* Compact branches not supported before R6 */
+ break;
+#endif
}
return nextpc;
@@ -198,7 +238,7 @@ sigill:
return nextpc;
}
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long branch_pc;
enum emulation_result er = EMULATE_DONE;
@@ -243,7 +283,7 @@ static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
{
s64 now_ns, periods;
u64 delta;
@@ -300,11 +340,11 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*
* Returns: The current value of the guest CP0_Count register.
*/
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
ktime_t expires, threshold;
- uint32_t count, compare;
+ u32 count, compare;
int running;
/* Calculate the biased and scaled guest CP0_Count */
@@ -315,7 +355,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
* Find whether CP0_Count has reached the closest timer interrupt. If
* not, we shouldn't inject it.
*/
- if ((int32_t)(count - compare) < 0)
+ if ((s32)(count - compare) < 0)
return count;
/*
@@ -360,7 +400,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*
* Returns: The current guest CP0_Count value.
*/
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -387,8 +427,7 @@ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
*
* Returns: The ktime at the point of freeze.
*/
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
- uint32_t *count)
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{
ktime_t now;
@@ -419,16 +458,16 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
- ktime_t now, uint32_t count)
+ ktime_t now, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t compare;
+ u32 compare;
u64 delta;
ktime_t expire;
/* Calculate timeout (wrap 0 to 2^32) */
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
expire = ktime_add_ns(now, delta);
@@ -444,7 +483,7 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
*
* Sets the CP0_Count value and updates the timer accordingly.
*/
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
ktime_t now;
@@ -538,13 +577,13 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
* If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
* any pending timer interrupt is preserved.
*/
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
ktime_t now;
- uint32_t count;
+ u32 count;
/* if unchanged, must just be an ack */
if (old_compare == compare) {
@@ -585,7 +624,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
ktime_t now;
/* Stop hrtimer */
@@ -632,7 +671,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@@ -661,7 +700,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
- uint32_t count, compare;
+ u32 count, compare;
/* Only allow defined bits to be changed */
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
@@ -687,7 +726,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
*/
count = kvm_read_c0_guest_count(cop0);
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC,
vcpu->arch.count_hz);
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
@@ -776,7 +815,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
vcpu->arch.pending_exceptions);
++vcpu->stat.wait_exits;
- trace_kvm_exit(vcpu, WAIT_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) {
vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu);
@@ -801,9 +840,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
- kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+ kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
return EMULATE_FAIL;
}
@@ -813,11 +852,11 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
int index = kvm_read_c0_guest_index(cop0);
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
kvm_debug("%s: illegal index: %d\n", __func__, index);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -834,10 +873,10 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -851,7 +890,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index;
get_random_bytes(&index, sizeof(index));
@@ -867,10 +906,10 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0));
@@ -882,14 +921,14 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
long entryhi = kvm_read_c0_guest_entryhi(cop0);
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index = -1;
index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
kvm_write_c0_guest_index(cop0, index);
- kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
index);
return EMULATE_DONE;
@@ -922,8 +961,8 @@ unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
*/
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
{
- /* Config4 is optional */
- unsigned int mask = MIPS_CONF_M;
+ /* Config4 and ULRI are optional */
+ unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
@@ -942,7 +981,12 @@ unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
{
/* Config5 is optional */
- return MIPS_CONF_M;
+ unsigned int mask = MIPS_CONF_M;
+
+ /* KScrExist */
+ mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
+
+ return mask;
}
/**
@@ -973,14 +1017,14 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
- uint32_t cause, struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t rt, rd, copz, sel, co_bit, op;
- uint32_t pc = vcpu->arch.pc;
+ u32 rt, rd, sel;
unsigned long curr_pc;
/*
@@ -992,16 +1036,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- copz = (inst >> 21) & 0x1f;
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
- co_bit = (inst >> 25) & 1;
-
- if (co_bit) {
- op = (inst) & 0xff;
-
- switch (op) {
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
case tlbr_op: /* Read indexed TLB entry */
er = kvm_mips_emul_tlbr(vcpu);
break;
@@ -1020,47 +1056,58 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case eret_op:
er = kvm_mips_emul_eret(vcpu);
goto dont_update_pc;
- break;
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
}
} else {
- switch (copz) {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
/* Get reg */
if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+ vcpu->arch.gprs[rt] =
+ (s32)kvm_mips_read_count(vcpu);
} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
vcpu->arch.gprs[rt] = 0x0;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
} else {
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+ vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
}
- kvm_debug
- ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
- pc, rd, sel, rt, vcpu->arch.gprs[rt]);
-
+ trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case dmfc_op:
vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case mtc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+
if ((rd == MIPS_CP0_TLB_INDEX)
&& (vcpu->arch.gprs[rt] >=
KVM_MIPS_GUEST_TLB_SIZE)) {
@@ -1078,16 +1125,15 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- uint32_t nasid =
+ u32 nasid =
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
- kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+ trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
- & KVM_ENTRYHI_ASID,
- vcpu->arch.gprs[rt]
- & KVM_ENTRYHI_ASID);
+ & KVM_ENTRYHI_ASID,
+ nasid);
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
@@ -1100,10 +1146,6 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
goto done;
} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
- kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
- pc, kvm_read_c0_guest_compare(cop0),
- vcpu->arch.gprs[rt]);
-
/* If we are writing to COMPARE */
/* Clear pending timer interrupt, if any */
kvm_mips_write_compare(vcpu,
@@ -1155,7 +1197,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1166,7 +1208,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* the near future.
*/
if (change & ST0_CU1 &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_status(ST0_CU1, val);
preempt_enable();
@@ -1201,7 +1243,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
/*
@@ -1211,7 +1253,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* quickly enabled again in the near future.
*/
if (change & MIPS_CONF5_MSAEN &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
change_c0_config5(MIPS_CONF5_MSAEN,
val);
@@ -1219,7 +1261,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
- uint32_t old_cause, new_cause;
+ u32 old_cause, new_cause;
old_cause = kvm_read_c0_guest_cause(cop0);
new_cause = vcpu->arch.gprs[rt];
@@ -1233,20 +1275,30 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
else
kvm_mips_count_enable_cause(vcpu);
}
+ } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
+ u32 mask = MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (kvm_read_c0_guest_config3(cop0) &
+ MIPS_CONF3_ULRI)
+ mask |= MIPS_HWRENA_ULR;
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
} else {
cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
}
-
- kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
- rd, sel, cop0->reg[rd][sel]);
break;
case dmtc_op:
kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
vcpu->arch.pc, rt, rd, sel);
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
er = EMULATE_FAIL;
break;
@@ -1258,7 +1310,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0);
/* EI */
- if (inst & 0x20) {
+ if (inst.mfmc0_format.sc) {
kvm_debug("[%#lx] mfmc0_op: EI\n",
vcpu->arch.pc);
kvm_set_c0_guest_status(cop0, ST0_IE);
@@ -1272,9 +1324,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case wrpgpr_op:
{
- uint32_t css =
- cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
- uint32_t pss =
+ u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ u32 pss =
(cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
/*
* We don't support any shadow register sets, so
@@ -1291,7 +1342,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
break;
default:
kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
- vcpu->arch.pc, copz);
+ vcpu->arch.pc, inst.c0r_format.rs);
er = EMULATE_FAIL;
break;
}
@@ -1312,13 +1363,14 @@ dont_update_pc:
return er;
}
-enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 rt;
+ u32 bytes;
void *data = run->mmio.data;
unsigned long curr_pc;
@@ -1331,12 +1383,9 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
if (er == EMULATE_FAIL)
return er;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
- switch (op) {
+ switch (inst.i_format.opcode) {
case sb_op:
bytes = 1;
if (bytes > sizeof(run->mmio.data)) {
@@ -1357,7 +1406,7 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
*(u8 *) data = vcpu->arch.gprs[rt];
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
- *(uint8_t *) data);
+ *(u8 *) data);
break;
@@ -1379,11 +1428,11 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint32_t *) data = vcpu->arch.gprs[rt];
+ *(u32 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
case sh_op:
@@ -1404,15 +1453,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint16_t *) data = vcpu->arch.gprs[rt];
+ *(u16 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
default:
- kvm_err("Store not yet supported");
+ kvm_err("Store not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1424,18 +1474,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
return er;
}
-enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
- struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 op, rt;
+ u32 bytes;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
+ op = inst.i_format.opcode;
vcpu->arch.pending_load_cause = cause;
vcpu->arch.io_gpr = rt;
@@ -1521,7 +1569,8 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
break;
default:
- kvm_err("Load not yet supported");
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1529,40 +1578,15 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
return er;
}
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
-{
- unsigned long offset = (va & ~PAGE_MASK);
- struct kvm *kvm = vcpu->kvm;
- unsigned long pa;
- gfn_t gfn;
- kvm_pfn_t pfn;
-
- gfn = va >> PAGE_SHIFT;
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- return -1;
- }
- pfn = kvm->arch.guest_pmap[gfn];
- pa = (pfn << PAGE_SHIFT) | offset;
-
- kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
- CKSEG0ADDR(pa));
-
- local_flush_icache_range(CKSEG0ADDR(pa), 32);
- return 0;
-}
-
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t offset, cache, op_inst, op, base;
+ u32 cache, op_inst, op, base;
+ s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va;
unsigned long curr_pc;
@@ -1576,9 +1600,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- base = (inst >> 21) & 0x1f;
- op_inst = (inst >> 16) & 0x1f;
- offset = (int16_t)inst;
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
@@ -1634,7 +1661,6 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
(cop0) & KVM_ENTRYHI_ASID));
if (index < 0) {
- vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
@@ -1659,9 +1685,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL,
- NULL);
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
}
}
} else {
@@ -1714,20 +1738,20 @@ dont_update_pc:
return er;
}
-enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
- uint32_t inst;
/* Fetch the instruction. */
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- switch (((union mips_instruction)inst).r_format.opcode) {
+ switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break;
@@ -1744,15 +1768,31 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
break;
+#ifndef CONFIG_CPU_MIPSR6
case cache_op:
++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, CACHE_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
break;
+#else
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+ case cache6_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_mips_emulate_cache(inst, opc, cause, run,
+ vcpu);
+ break;
+ default:
+ goto unknown;
+ };
+ break;
+unknown:
+#endif
default:
kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
- inst);
+ inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
@@ -1761,8 +1801,8 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1796,8 +1836,8 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1842,8 +1882,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1888,8 +1928,8 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1932,8 +1972,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1977,7 +2017,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
}
/* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2005,8 +2045,8 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2048,8 +2088,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2077,8 +2117,8 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2112,8 +2152,8 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2147,8 +2187,8 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2182,8 +2222,8 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2217,8 +2257,8 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2252,8 +2292,8 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2287,22 +2327,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
return er;
}
-/* ll/sc, rdhwr, sync emulation */
-
-#define OPCODE 0xfc000000
-#define BASE 0x03e00000
-#define RT 0x001f0000
-#define OFFSET 0x0000ffff
-#define LL 0xc0000000
-#define SC 0xe0000000
-#define SPEC0 0x00000000
-#define SPEC3 0x7c000000
-#define RD 0x0000f800
-#define FUNC 0x0000003f
-#define SYNC 0x0000000f
-#define RDHWR 0x0000003b
-
-enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2310,7 +2335,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc;
- uint32_t inst;
+ union mips_instruction inst;
/*
* Update PC and hold onto current PC in case there is
@@ -2325,17 +2350,22 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- if (inst == KVM_INVALID_INST) {
+ if (inst.word == KVM_INVALID_INST) {
kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
return EMULATE_FAIL;
}
- if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+ if (inst.r_format.opcode == spec3_op &&
+ inst.r_format.func == rdhwr_op &&
+ inst.r_format.rs == 0 &&
+ (inst.r_format.re >> 3) == 0) {
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
- int rd = (inst & RD) >> 11;
- int rt = (inst & RT) >> 16;
+ int rd = inst.r_format.rd;
+ int rt = inst.r_format.rt;
+ int sel = inst.r_format.re & 0x7;
+
/* If usermode, check RDHWR rd is allowed by guest HWREna */
if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
@@ -2343,17 +2373,17 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
goto emulate_ri;
}
switch (rd) {
- case 0: /* CPU number */
- arch->gprs[rt] = 0;
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ arch->gprs[rt] = vcpu->vcpu_id;
break;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
break;
- case 2: /* Read count register */
- arch->gprs[rt] = kvm_mips_read_count(vcpu);
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
break;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
@@ -2363,7 +2393,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
arch->gprs[rt] = 2;
}
break;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
break;
@@ -2371,8 +2401,12 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
goto emulate_ri;
}
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
+ vcpu->arch.gprs[rt]);
} else {
- kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ kvm_debug("Emulate RI not supported @ %p: %#x\n",
+ opc, inst.word);
goto emulate_ri;
}
@@ -2405,19 +2439,19 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 4:
- *gpr = *(int32_t *) run->mmio.data;
+ *gpr = *(s32 *) run->mmio.data;
break;
case 2:
if (vcpu->mmio_needed == 2)
- *gpr = *(int16_t *) run->mmio.data;
+ *gpr = *(s16 *) run->mmio.data;
else
- *gpr = *(uint16_t *)run->mmio.data;
+ *gpr = *(u16 *)run->mmio.data;
break;
case 1:
if (vcpu->mmio_needed == 2)
- *gpr = *(int8_t *) run->mmio.data;
+ *gpr = *(s8 *) run->mmio.data;
else
*gpr = *(u8 *) run->mmio.data;
break;
@@ -2432,12 +2466,12 @@ done:
return er;
}
-static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
- uint32_t *opc,
+static enum emulation_result kvm_mips_emulate_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
@@ -2470,13 +2504,13 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
@@ -2566,18 +2600,18 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this
* case we inject the TLB from the Guest TLB into the shadow host TLB
*/
-enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long va = vcpu->arch.host_cp0_badvaddr;
int index;
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
- vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr);
/*
* KVM would not have got the exception if this entry was valid in the
@@ -2620,13 +2654,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
}
} else {
kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
- tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+ tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
/*
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
- NULL);
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
}
}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
new file mode 100644
index 000000000000..6a02b3a3fa65
--- /dev/null
+++ b/arch/mips/kvm/entry.c
@@ -0,0 +1,701 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Generation of main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ *
+ * Copyright (C) 2016 Imagination Technologies Ltd.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/msa.h>
+#include <asm/setup.h>
+#include <asm/uasm.h>
+
+/* Register names */
+#define ZERO 0
+#define AT 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+#define T0 12
+#define T1 13
+#define T2 14
+#define T3 15
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#define S0 16
+#define S1 17
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define RA 31
+
+/* Some CP0 registers */
+#define C0_HWRENA 7, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_STATUS 12, 0
+#define C0_CAUSE 13, 0
+#define C0_EPC 14, 0
+#define C0_EBASE 15, 1
+#define C0_CONFIG5 16, 5
+#define C0_DDATA_LO 28, 3
+#define C0_ERROREPC 30, 0
+
+#define CALLFRAME_SIZ 32
+
+#ifdef CONFIG_64BIT
+#define ST0_KX_IF_64 ST0_KX
+#else
+#define ST0_KX_IF_64 0
+#endif
+
+static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_tmp[2] = { C0_ERROREPC };
+
+enum label_id {
+ label_fpu_1 = 1,
+ label_msa_1,
+ label_return_to_host,
+ label_kernel_asid,
+ label_exit_common,
+};
+
+UASM_L_LA(_fpu_1)
+UASM_L_LA(_msa_1)
+UASM_L_LA(_return_to_host)
+UASM_L_LA(_kernel_asid)
+UASM_L_LA(_exit_common)
+
+static void *kvm_mips_build_enter_guest(void *addr);
+static void *kvm_mips_build_ret_from_exit(void *addr);
+static void *kvm_mips_build_ret_to_guest(void *addr);
+static void *kvm_mips_build_ret_to_host(void *addr);
+
+/**
+ * kvm_mips_entry_setup() - Perform global setup for entry code.
+ *
+ * Perform global setup for entry code, such as choosing a scratch register.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+int kvm_mips_entry_setup(void)
+{
+ /*
+ * We prefer to use KScratchN registers if they are available over the
+ * defaults above, which may not work on all cores.
+ */
+ unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
+
+ /* Pick a scratch register for storing VCPU */
+ if (kscratch_mask) {
+ scratch_vcpu[0] = 31;
+ scratch_vcpu[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_vcpu[1]);
+ }
+
+ /* Pick a scratch register to use as a temp for saving state */
+ if (kscratch_mask) {
+ scratch_tmp[0] = 31;
+ scratch_tmp[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_tmp[1]);
+ }
+
+ return 0;
+}
+
+static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
+ UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+
+ /* Save the temp scratch register value in cp0_cause of stack frame */
+ if (scratch_tmp[0] == 31) {
+ UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ }
+}
+
+static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /*
+ * Restore host scratch register values saved by
+ * kvm_mips_build_save_scratch().
+ */
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+ UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+
+ if (scratch_tmp[0] == 31) {
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ }
+}
+
+/**
+ * build_set_exc_base() - Assemble code to write exception base address.
+ * @p: Code buffer pointer.
+ * @reg: Source register (generated code may set WG bit in @reg).
+ *
+ * Assemble code to modify the exception base address in the EBase register,
+ * using the appropriately sized access and setting the WG bit if necessary.
+ */
+static inline void build_set_exc_base(u32 **p, unsigned int reg)
+{
+ if (cpu_has_ebase_wg) {
+ /* Set WG so that all the bits get written */
+ uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
+ UASM_i_MTC0(p, reg, C0_EBASE);
+ } else {
+ uasm_i_mtc0(p, reg, C0_EBASE);
+ }
+}
+
+/**
+ * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the start of the vcpu_run function to run a guest VCPU. The function
+ * conforms to the following prototype:
+ *
+ * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ *
+ * The exit from the guest and return to the caller is handled by the code
+ * generated by kvm_mips_build_ret_to_host().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_vcpu_run(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /*
+ * A0: run
+ * A1: vcpu
+ */
+
+ /* k0/k1 not being used in host kernel context */
+ UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
+ for (i = 16; i < 32; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Save host status */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+ UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
+
+ /* Save scratch registers, will be used to store pointer to vcpu etc */
+ kvm_mips_build_save_scratch(&p, V1, K1);
+
+ /* VCPU scratch register has pointer to vcpu */
+ UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Offset into vcpu->arch */
+ UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Save the host stack to VCPU, used for exception processing
+ * when we exit from the Guest
+ */
+ UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Save the kernel gp as well */
+ UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /*
+ * Setup status register for running the guest in UM, interrupts
+ * are disabled
+ */
+ UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* load up the new EBASE */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+ build_set_exc_base(&p, K0);
+
+ /*
+ * Now that the new EBASE has been loaded, unset BEV, set
+ * interrupt mask as it was but make sure that timer interrupts
+ * are enabled
+ */
+ uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
+ uasm_i_andi(&p, V0, V0, ST0_IM);
+ uasm_i_or(&p, K0, K0, V0);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to resume guest execution. This code is common between the
+ * initial entry into the guest from the host, and returning from the exit
+ * handler back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_enter_guest(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Set Guest EPC */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
+ UASM_i_MTC0(&p, T0, C0_EPC);
+
+ /* Set the ASID for the Guest Kernel */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
+ UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
+ T0);
+ uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
+ uasm_i_xori(&p, T0, T0, KSU_USER);
+ uasm_il_bnez(&p, &r, T0, label_kernel_asid);
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
+ /* else user */
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_user_asid));
+ uasm_l_kernel_asid(&l, p);
+
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ /* smp_processor_id */
+ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
+ /* x4 */
+ uasm_i_sll(&p, T2, T2, 2);
+ UASM_i_ADDU(&p, T3, T1, T2);
+ uasm_i_lw(&p, K0, 0, T3);
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ /* x sizeof(struct cpuinfo_mips)/4 */
+ uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
+ uasm_i_mul(&p, T2, T2, T3);
+
+ UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
+ UASM_i_ADDU(&p, AT, AT, T2);
+ UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
+ uasm_i_and(&p, K0, K0, T2);
+#else
+ uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
+#endif
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+ uasm_i_ehb(&p);
+
+ /* Disable RDHWR access */
+ uasm_i_mtc0(&p, ZERO, C0_HWRENA);
+
+ /* load the guest context from VCPU and return */
+ for (i = 1; i < 32; ++i) {
+ /* Guest k0/k1 loaded later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* Restore hi/lo */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
+ uasm_i_mthi(&p, K0);
+
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
+ uasm_i_mtlo(&p, K0);
+#endif
+
+ /* Restore the guest's k0/k1 registers */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Jump to guest */
+ uasm_i_eret(&p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exception() - Assemble first level guest exception handler.
+ * @addr: Address to start writing code.
+ * @handler: Address of common handler (within range of @addr).
+ *
+ * Assemble exception vector code for guest execution. The generated vector will
+ * branch to the common exception handler generated by kvm_mips_build_exit().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exception(void *addr, void *handler)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Save guest k1 into scratch register */
+ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Get the VCPU pointer from the VCPU scratch register */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /* Save guest k0 into VCPU structure */
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+
+ /* Branch to the common handler */
+ uasm_il_b(&p, &r, label_exit_common);
+ uasm_i_nop(&p);
+
+ uasm_l_exit_common(&l, handler);
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exit() - Assemble common guest exit handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the generic guest exit handling code. This is called by the
+ * exception vectors (generated by kvm_mips_build_exception()), and calls
+ * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
+ * depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exit(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[3];
+ struct uasm_reloc relocs[3];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ * Both k0/k1 registers will have already been saved (k0 into the vcpu
+ * structure, and k1 into the scratch_tmp register).
+ *
+ * The k1 register will already contain the kvm_vcpu_arch pointer.
+ */
+
+ /* Start saving Guest context to VCPU */
+ for (i = 0; i < 32; ++i) {
+ /* Guest k0/k1 saved later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* We need to save hi/lo and restore them on the way out */
+ uasm_i_mfhi(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
+
+ uasm_i_mflo(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
+#endif
+
+ /* Finally save guest k1 to VCPU */
+ uasm_i_ehb(&p);
+ UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+ uasm_i_move(&p, S1, A1);
+
+ /* Restore run (vcpu->run) */
+ UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
+ /* Save pointer to run in s0, will be saved by the compiler */
+ uasm_i_move(&p, S0, A0);
+
+ /*
+ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
+ * the exception
+ */
+ UASM_i_MFC0(&p, K0, C0_EPC);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
+
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
+ K1);
+
+ uasm_i_mfc0(&p, K0, C0_CAUSE);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Switch EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V0, AT);
+
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ UASM_i_LA_mostly(&p, K0, (long)&ebase);
+ UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
+ build_set_exc_base(&p, K0);
+
+ if (raw_cpu_has_fpu) {
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later
+ * ctc1's don't trigger FPE for pending exceptions.
+ */
+ uasm_i_lui(&p, AT, ST0_CU1 >> 16);
+ uasm_i_and(&p, V1, V0, AT);
+ uasm_il_beqz(&p, &r, V1, label_fpu_1);
+ uasm_i_nop(&p);
+ uasm_i_cfc1(&p, T0, 31);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
+ K1);
+ uasm_i_ctc1(&p, ZERO, 31);
+ uasm_l_fpu_1(&l, p);
+ }
+
+ if (cpu_has_msa) {
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ uasm_i_mfc0(&p, T0, C0_CONFIG5);
+ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_nop(&p);
+ uasm_i_cfcmsa(&p, T0, MSA_CSR);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_l_msa_1(&l, p);
+ }
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+ uasm_i_and(&p, V0, V0, AT);
+ uasm_i_lui(&p, AT, ST0_CU0 >> 16);
+ uasm_i_or(&p, V0, V0, AT);
+ uasm_i_mtc0(&p, V0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* Load up host GP */
+ UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /* Need a stack before we can jump to "C" */
+ UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Saved host state */
+ UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
+
+ /*
+ * XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host scratch registers, as we'll have clobbered them */
+ kvm_mips_build_restore_scratch(&p, K0, SP);
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Jump to handler */
+ /*
+ * XXXKYMA: not sure if this is safe, how large is the stack??
+ * Now jump to the kvm_mips_handle_exit() to see if we can deal
+ * with this in the kernel
+ */
+ UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
+ uasm_i_jalr(&p, RA, T9);
+ UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ p = kvm_mips_build_ret_from_exit(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle the return from kvm_mips_handle_exit(), either
+ * resuming the guest or returning to the host depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_from_exit(void *addr)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Return from handler Make sure interrupts are disabled */
+ uasm_i_di(&p, ZERO);
+ uasm_i_ehb(&p);
+
+ /*
+ * XXXKYMA: k0/k1 could have been blown away if we processed
+ * an exception while we were handling the exception from the
+ * guest, reload k1
+ */
+
+ uasm_i_move(&p, K1, S1);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Check return value, should tell us if we are returning to the
+ * host (handle I/O etc)or resuming the guest
+ */
+ uasm_i_andi(&p, T0, V0, RESUME_HOST);
+ uasm_il_bnez(&p, &r, T0, label_return_to_host);
+ uasm_i_nop(&p);
+
+ p = kvm_mips_build_ret_to_guest(p);
+
+ uasm_l_return_to_host(&l, p);
+ p = kvm_mips_build_ret_to_host(p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_guest(void *addr)
+{
+ u32 *p = addr;
+
+ /* Put the saved pointer to vcpu (s1) back into the scratch register */
+ UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+
+ /* Switch EBASE back to the one used by KVM */
+ uasm_i_mfc0(&p, V1, C0_STATUS);
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V1, AT);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+ build_set_exc_base(&p, T0);
+
+ /* Setup status register for running guest in UM */
+ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
+ UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
+ uasm_i_and(&p, V1, V1, AT);
+ uasm_i_mtc0(&p, V1, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
+ * function generated by kvm_mips_build_vcpu_run().
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_host(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /* EBASE is already pointing to Linux */
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
+
+ /*
+ * r2/v0 is the return code, shift it down by 2 (arithmetic)
+ * to recover the err code
+ */
+ uasm_i_sra(&p, K0, V0, 2);
+ uasm_i_move(&p, V0, K0);
+
+ /* Load context saved on the host stack */
+ for (i = 16; i < 31; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Restore RA, which is the address we will return to */
+ UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
+ uasm_i_jr(&p, RA);
+ uasm_i_nop(&p);
+
+ return p;
+}
+
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
index 531fbf5131c0..16f17c6390dd 100644
--- a/arch/mips/kvm/fpu.S
+++ b/arch/mips/kvm/fpu.S
@@ -14,13 +14,16 @@
#include <asm/mipsregs.h>
#include <asm/regdef.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
.set noreorder
.set noat
LEAF(__kvm_save_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
@@ -63,8 +66,8 @@ LEAF(__kvm_save_fpu)
LEAF(__kvm_restore_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 95f790663b0c..ad28dac6b7e9 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -22,12 +22,12 @@
#include "interrupt.h"
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
}
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
@@ -114,10 +114,10 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
/* Deliver the interrupt of the corresponding priority, if possible. */
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
int allowed = 0;
- uint32_t exccode;
+ u32 exccode;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -196,12 +196,12 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
return 1;
}
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 2143884709e4..fb118a2c8379 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,17 +28,13 @@
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
-extern char __kvm_mips_vcpu_run_end[];
-extern char mips32_exception[], mips32_exceptionEnd[];
-extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
-
#define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
@@ -48,7 +44,7 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
+ u32 cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
deleted file mode 100644
index 828fcfc1cd7f..000000000000
--- a/arch/mips/kvm/locore.S
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Main entry point for the guest, exception handling.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <asm/asm.h>
-#include <asm/asmmacro.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-#include <asm/asm-offsets.h>
-
-#define _C_LABEL(x) x
-#define MIPSX(name) mips32_ ## name
-#define CALLFRAME_SIZ 32
-
-/*
- * VECTOR
- * exception vector entrypoint
- */
-#define VECTOR(x, regmask) \
- .ent _C_LABEL(x),0; \
- EXPORT(x);
-
-#define VECTOR_END(x) \
- EXPORT(x);
-
-/* Overload, Danger Will Robinson!! */
-#define PT_HOST_USERLOCAL PT_EPC
-
-#define CP0_DDATA_LO $28,3
-
-/* Resume Flags */
-#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
-
-#define RESUME_GUEST 0
-#define RESUME_HOST RESUME_FLAG_HOST
-
-/*
- * __kvm_mips_vcpu_run: entry point to the guest
- * a0: run
- * a1: vcpu
- */
- .set noreorder
-
-FEXPORT(__kvm_mips_vcpu_run)
- /* k0/k1 not being used in host kernel context */
- INT_ADDIU k1, sp, -PT_SIZE
- LONG_S $16, PT_R16(k1)
- LONG_S $17, PT_R17(k1)
- LONG_S $18, PT_R18(k1)
- LONG_S $19, PT_R19(k1)
- LONG_S $20, PT_R20(k1)
- LONG_S $21, PT_R21(k1)
- LONG_S $22, PT_R22(k1)
- LONG_S $23, PT_R23(k1)
-
- LONG_S $28, PT_R28(k1)
- LONG_S $29, PT_R29(k1)
- LONG_S $30, PT_R30(k1)
- LONG_S $31, PT_R31(k1)
-
- /* Save hi/lo */
- mflo v0
- LONG_S v0, PT_LO(k1)
- mfhi v1
- LONG_S v1, PT_HI(k1)
-
- /* Save host status */
- mfc0 v0, CP0_STATUS
- LONG_S v0, PT_STATUS(k1)
-
- /* Save DDATA_LO, will be used to store pointer to vcpu */
- mfc0 v1, CP0_DDATA_LO
- LONG_S v1, PT_HOST_USERLOCAL(k1)
-
- /* DDATA_LO has pointer to vcpu */
- mtc0 a1, CP0_DDATA_LO
-
- /* Offset into vcpu->arch */
- INT_ADDIU k1, a1, VCPU_HOST_ARCH
-
- /*
- * Save the host stack to VCPU, used for exception processing
- * when we exit from the Guest
- */
- LONG_S sp, VCPU_HOST_STACK(k1)
-
- /* Save the kernel gp as well */
- LONG_S gp, VCPU_HOST_GP(k1)
-
- /*
- * Setup status register for running the guest in UM, interrupts
- * are disabled
- */
- li k0, (ST0_EXL | KSU_USER | ST0_BEV)
- mtc0 k0, CP0_STATUS
- ehb
-
- /* load up the new EBASE */
- LONG_L k0, VCPU_GUEST_EBASE(k1)
- mtc0 k0, CP0_EBASE
-
- /*
- * Now that the new EBASE has been loaded, unset BEV, set
- * interrupt mask as it was but make sure that timer interrupts
- * are enabled
- */
- li k0, (ST0_EXL | KSU_USER | ST0_IE)
- andi v0, v0, ST0_IM
- or k0, k0, v0
- mtc0 k0, CP0_STATUS
- ehb
-
- /* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
-
-FEXPORT(__kvm_mips_load_asid)
- /* Set the ASID for the Guest Kernel */
- PTR_L t0, VCPU_COP0(k1)
- LONG_L t0, COP0_STATUS(t0)
- andi t0, KSU_USER | ST0_ERL | ST0_EXL
- xori t0, KSU_USER
- bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
-1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- INT_SLL t2, t2, 2 /* x4 */
- REG_ADDU t3, t1, t2
- LONG_L k0, (t3)
-#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
- li t3, CPUINFO_SIZE/4
- mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
- LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
- and k0, k0, t2
-#else
- andi k0, k0, MIPS_ENTRYHI_ASID
-#endif
- mtc0 k0, CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- .set noat
- /* Now load up the Guest Context from VCPU */
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
-
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
-
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* k0/k1 loaded up later */
-
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
-
- /* Restore hi/lo */
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
-
- LONG_L k0, VCPU_HI(k1)
- mthi k0
-
-FEXPORT(__kvm_mips_load_k0k1)
- /* Restore the guest's k0/k1 registers */
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
-
- /* Jump to guest */
- eret
-EXPORT(__kvm_mips_vcpu_run_end)
-
-VECTOR(MIPSX(exception), unknown)
-/* Find out what mode we came from and jump to the proper handler. */
- mtc0 k0, CP0_ERROREPC #01: Save guest k0
- ehb #02:
-
- mfc0 k0, CP0_EBASE #02: Get EBASE
- INT_SRL k0, k0, 10 #03: Get rid of CPUNum
- INT_SLL k0, k0, 10 #04
- LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
- INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
- # installed @ offset 0x2000
- j k0 #07: jump to the function
- nop #08: branch delay slot
-VECTOR_END(MIPSX(exceptionEnd))
-.end MIPSX(exception)
-
-/*
- * Generic Guest exception handler. We end up here when the guest
- * does something that causes a trap to kernel mode.
- */
-NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
- /* Get the VCPU pointer from DDTATA_LO */
- mfc0 k1, CP0_DDATA_LO
- INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
- /* Start saving Guest context to VCPU */
- LONG_S $0, VCPU_R0(k1)
- LONG_S $1, VCPU_R1(k1)
- LONG_S $2, VCPU_R2(k1)
- LONG_S $3, VCPU_R3(k1)
- LONG_S $4, VCPU_R4(k1)
- LONG_S $5, VCPU_R5(k1)
- LONG_S $6, VCPU_R6(k1)
- LONG_S $7, VCPU_R7(k1)
- LONG_S $8, VCPU_R8(k1)
- LONG_S $9, VCPU_R9(k1)
- LONG_S $10, VCPU_R10(k1)
- LONG_S $11, VCPU_R11(k1)
- LONG_S $12, VCPU_R12(k1)
- LONG_S $13, VCPU_R13(k1)
- LONG_S $14, VCPU_R14(k1)
- LONG_S $15, VCPU_R15(k1)
- LONG_S $16, VCPU_R16(k1)
- LONG_S $17, VCPU_R17(k1)
- LONG_S $18, VCPU_R18(k1)
- LONG_S $19, VCPU_R19(k1)
- LONG_S $20, VCPU_R20(k1)
- LONG_S $21, VCPU_R21(k1)
- LONG_S $22, VCPU_R22(k1)
- LONG_S $23, VCPU_R23(k1)
- LONG_S $24, VCPU_R24(k1)
- LONG_S $25, VCPU_R25(k1)
-
- /* Guest k0/k1 saved later */
-
- LONG_S $28, VCPU_R28(k1)
- LONG_S $29, VCPU_R29(k1)
- LONG_S $30, VCPU_R30(k1)
- LONG_S $31, VCPU_R31(k1)
-
- .set at
-
- /* We need to save hi/lo and restore them on the way out */
- mfhi t0
- LONG_S t0, VCPU_HI(k1)
-
- mflo t0
- LONG_S t0, VCPU_LO(k1)
-
- /* Finally save guest k0/k1 to VCPU */
- mfc0 t0, CP0_ERROREPC
- LONG_S t0, VCPU_R26(k1)
-
- /* Get GUEST k1 and save it in VCPU */
- PTR_LI t1, ~0x2ff
- mfc0 t0, CP0_EBASE
- and t0, t0, t1
- LONG_L t0, 0x3000(t0)
- LONG_S t0, VCPU_R27(k1)
-
- /* Now that context has been saved, we can use other registers */
-
- /* Restore vcpu */
- mfc0 a1, CP0_DDATA_LO
- move s1, a1
-
- /* Restore run (vcpu->run) */
- LONG_L a0, VCPU_RUN(a1)
- /* Save pointer to run in s0, will be saved by the compiler */
- move s0, a0
-
- /*
- * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
- * process the exception
- */
- mfc0 k0,CP0_EPC
- LONG_S k0, VCPU_PC(k1)
-
- mfc0 k0, CP0_BADVADDR
- LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
-
- mfc0 k0, CP0_CAUSE
- LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
-
- mfc0 k0, CP0_ENTRYHI
- LONG_S k0, VCPU_HOST_ENTRYHI(k1)
-
- /* Now restore the host state just enough to run the handlers */
-
- /* Switch EBASE to the one used by Linux */
- /* load up the host EBASE */
- mfc0 v0, CP0_STATUS
-
- or k0, v0, ST0_BEV
-
- mtc0 k0, CP0_STATUS
- ehb
-
- LONG_L k0, VCPU_HOST_EBASE(k1)
- mtc0 k0,CP0_EBASE
-
- /*
- * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
- * trigger FPE for pending exceptions.
- */
- and v1, v0, ST0_CU1
- beqz v1, 1f
- nop
- .set push
- SET_HARDFLOAT
- cfc1 t0, fcr31
- sw t0, VCPU_FCR31(k1)
- ctc1 zero,fcr31
- .set pop
-1:
-
-#ifdef CONFIG_CPU_HAS_MSA
- /*
- * If MSA is enabled, save MSACSR and clear it so that later
- * instructions don't trigger MSAFPE for pending exceptions.
- */
- mfc0 t0, CP0_CONFIG3
- ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
- beqz t0, 1f
- nop
- mfc0 t0, CP0_CONFIG5
- ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
- beqz t0, 1f
- nop
- _cfcmsa t0, MSA_CSR
- sw t0, VCPU_MSA_CSR(k1)
- _ctcmsa MSA_CSR, zero
-1:
-#endif
-
- /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
- and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
- or v0, v0, ST0_CU0
- mtc0 v0, CP0_STATUS
- ehb
-
- /* Load up host GP */
- LONG_L gp, VCPU_HOST_GP(k1)
-
- /* Need a stack before we can jump to "C" */
- LONG_L sp, VCPU_HOST_STACK(k1)
-
- /* Saved host state */
- INT_ADDIU sp, sp, -PT_SIZE
-
- /*
- * XXXKYMA do we need to load the host ASID, maybe not because the
- * kernel entries are marked GLOBAL, need to verify
- */
-
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(sp)
- mtc0 k0, CP0_DDATA_LO
-
- /* Restore RDHWR access */
- PTR_LI k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
-
- /* Jump to handler */
-FEXPORT(__kvm_mips_jump_to_handler)
- /*
- * XXXKYMA: not sure if this is safe, how large is the stack??
- * Now jump to the kvm_mips_handle_exit() to see if we can deal
- * with this in the kernel
- */
- PTR_LA t9, kvm_mips_handle_exit
- jalr.hb t9
- INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
-
- /* Return from handler Make sure interrupts are disabled */
- di
- ehb
-
- /*
- * XXXKYMA: k0/k1 could have been blown away if we processed
- * an exception while we were handling the exception from the
- * guest, reload k1
- */
-
- move k1, s1
- INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
- /*
- * Check return value, should tell us if we are returning to the
- * host (handle I/O etc)or resuming the guest
- */
- andi t0, v0, RESUME_HOST
- bnez t0, __kvm_mips_return_to_host
- nop
-
-__kvm_mips_return_to_guest:
- /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
- mtc0 s1, CP0_DDATA_LO
-
- /* Load up the Guest EBASE to minimize the window where BEV is set */
- LONG_L t0, VCPU_GUEST_EBASE(k1)
-
- /* Switch EBASE back to the one used by KVM */
- mfc0 v1, CP0_STATUS
- or k0, v1, ST0_BEV
- mtc0 k0, CP0_STATUS
- ehb
- mtc0 t0, CP0_EBASE
-
- /* Setup status register for running guest in UM */
- or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
- and v1, v1, ~(ST0_CU0 | ST0_MX)
- mtc0 v1, CP0_STATUS
- ehb
-
- /* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
-
- /* Set the ASID for the Guest Kernel */
- PTR_L t0, VCPU_COP0(k1)
- LONG_L t0, COP0_STATUS(t0)
- andi t0, KSU_USER | ST0_ERL | ST0_EXL
- xori t0, KSU_USER
- bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
-1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- INT_SLL t2, t2, 2 /* x4 */
- REG_ADDU t3, t1, t2
- LONG_L k0, (t3)
-#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
- li t3, CPUINFO_SIZE/4
- mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
- LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
- and k0, k0, t2
-#else
- andi k0, k0, MIPS_ENTRYHI_ASID
-#endif
- mtc0 k0, CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- .set noat
- /* load the guest context from VCPU and return */
- LONG_L $0, VCPU_R0(k1)
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* $/k1 loaded later */
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
-
-FEXPORT(__kvm_mips_skip_guest_restore)
- LONG_L k0, VCPU_HI(k1)
- mthi k0
-
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
-
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
-
- eret
- .set at
-
-__kvm_mips_return_to_host:
- /* EBASE is already pointing to Linux */
- LONG_L k1, VCPU_HOST_STACK(k1)
- INT_ADDIU k1,k1, -PT_SIZE
-
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(k1)
- mtc0 k0, CP0_DDATA_LO
-
- /*
- * r2/v0 is the return code, shift it down by 2 (arithmetic)
- * to recover the err code
- */
- INT_SRA k0, v0, 2
- move $2, k0
-
- /* Load context saved on the host stack */
- LONG_L $16, PT_R16(k1)
- LONG_L $17, PT_R17(k1)
- LONG_L $18, PT_R18(k1)
- LONG_L $19, PT_R19(k1)
- LONG_L $20, PT_R20(k1)
- LONG_L $21, PT_R21(k1)
- LONG_L $22, PT_R22(k1)
- LONG_L $23, PT_R23(k1)
-
- LONG_L $28, PT_R28(k1)
- LONG_L $29, PT_R29(k1)
- LONG_L $30, PT_R30(k1)
-
- LONG_L k0, PT_HI(k1)
- mthi k0
-
- LONG_L k0, PT_LO(k1)
- mtlo k0
-
- /* Restore RDHWR access */
- PTR_LI k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
-
- /* Restore RA, which is the address we will return to */
- LONG_L ra, PT_R31(k1)
- j ra
- nop
-
-VECTOR_END(MIPSX(GuestExceptionEnd))
-.end MIPSX(GuestException)
-
-MIPSX(exceptions):
- ####
- ##### The exception handlers.
- #####
- .word _C_LABEL(MIPSX(GuestException)) # 0
- .word _C_LABEL(MIPSX(GuestException)) # 1
- .word _C_LABEL(MIPSX(GuestException)) # 2
- .word _C_LABEL(MIPSX(GuestException)) # 3
- .word _C_LABEL(MIPSX(GuestException)) # 4
- .word _C_LABEL(MIPSX(GuestException)) # 5
- .word _C_LABEL(MIPSX(GuestException)) # 6
- .word _C_LABEL(MIPSX(GuestException)) # 7
- .word _C_LABEL(MIPSX(GuestException)) # 8
- .word _C_LABEL(MIPSX(GuestException)) # 9
- .word _C_LABEL(MIPSX(GuestException)) # 10
- .word _C_LABEL(MIPSX(GuestException)) # 11
- .word _C_LABEL(MIPSX(GuestException)) # 12
- .word _C_LABEL(MIPSX(GuestException)) # 13
- .word _C_LABEL(MIPSX(GuestException)) # 14
- .word _C_LABEL(MIPSX(GuestException)) # 15
- .word _C_LABEL(MIPSX(GuestException)) # 16
- .word _C_LABEL(MIPSX(GuestException)) # 17
- .word _C_LABEL(MIPSX(GuestException)) # 18
- .word _C_LABEL(MIPSX(GuestException)) # 19
- .word _C_LABEL(MIPSX(GuestException)) # 20
- .word _C_LABEL(MIPSX(GuestException)) # 21
- .word _C_LABEL(MIPSX(GuestException)) # 22
- .word _C_LABEL(MIPSX(GuestException)) # 23
- .word _C_LABEL(MIPSX(GuestException)) # 24
- .word _C_LABEL(MIPSX(GuestException)) # 25
- .word _C_LABEL(MIPSX(GuestException)) # 26
- .word _C_LABEL(MIPSX(GuestException)) # 27
- .word _C_LABEL(MIPSX(GuestException)) # 28
- .word _C_LABEL(MIPSX(GuestException)) # 29
- .word _C_LABEL(MIPSX(GuestException)) # 30
- .word _C_LABEL(MIPSX(GuestException)) # 31
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 44da5259f390..a6ea084b4d9d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -9,6 +9,7 @@
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
@@ -147,7 +148,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
/* Put the pages we reserved for the guest pmap */
for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
- kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
}
kfree(kvm->arch.guest_pmap);
@@ -244,10 +245,27 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
}
+static inline void dump_handler(const char *symbol, void *start, void *end)
+{
+ u32 *p;
+
+ pr_debug("LEAF(%s)\n", symbol);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (p = start; p < (u32 *)end; ++p)
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
+
+ pr_debug("\t.set\tpop\n");
+
+ pr_debug("\tEND(%s)\n", symbol);
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
- int err, size, offset;
- void *gebase;
+ int err, size;
+ void *gebase, *p, *handler;
int i;
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
@@ -273,9 +291,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
else
size = 0x4000;
- /* Save Linux EBASE */
- vcpu->arch.host_ebase = (void *)read_c0_ebase();
-
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
if (!gebase) {
@@ -285,44 +300,53 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
+ /*
+ * Check new ebase actually fits in CP0_EBase. The lack of a write gate
+ * limits us to the low 512MB of physical address space. If the memory
+ * we allocate is out of range, just give up now.
+ */
+ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
+ kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
+ gebase);
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
/* Save new ebase */
vcpu->arch.guest_ebase = gebase;
- /* Copy L1 Guest Exception handler to correct offset */
+ /* Build guest exception vectors dynamically in unmapped memory */
+ handler = gebase + 0x2000;
/* TLB Refill, EXL = 0 */
- memcpy(gebase, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase, handler);
/* General Exception Entry point */
- memcpy(gebase + 0x180, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x180, handler);
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
for (i = 0; i < 8; i++) {
kvm_debug("L1 Vectored handler @ %p\n",
gebase + 0x200 + (i * VECTORSPACING));
- memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
+ handler);
}
- /* General handler, relocate to unmapped space for sanity's sake */
- offset = 0x2000;
- kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
- gebase + offset,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* General exit handler */
+ p = handler;
+ p = kvm_mips_build_exit(p);
- memcpy(gebase + offset, mips32_GuestException,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* Guest entry routine */
+ vcpu->arch.vcpu_run = p;
+ p = kvm_mips_build_vcpu_run(p);
-#ifdef MODULE
- offset += mips32_GuestExceptionEnd - mips32_GuestException;
- memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
- __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
- vcpu->arch.vcpu_run = gebase + offset;
-#else
- vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
-#endif
+ /* Dump the generated code */
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+ dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
+ dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
+ dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
@@ -408,17 +432,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
- __kvm_guest_enter();
+ guest_enter_irqoff();
/* Disable hardware page table walking while in guest */
htw_stop();
+ trace_kvm_enter(vcpu);
r = vcpu->arch.vcpu_run(run, vcpu);
+ trace_kvm_out(vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
- __kvm_guest_exit();
+ guest_exit_irqoff();
local_irq_enable();
if (vcpu->sigset_active)
@@ -507,8 +533,10 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R30,
KVM_REG_MIPS_R31,
+#ifndef CONFIG_CPU_MIPSR6
KVM_REG_MIPS_HI,
KVM_REG_MIPS_LO,
+#endif
KVM_REG_MIPS_PC,
KVM_REG_MIPS_CP0_INDEX,
@@ -539,6 +567,104 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_COUNT_HZ,
};
+static u64 kvm_mips_get_one_regs_fpu[] = {
+ KVM_REG_MIPS_FCR_IR,
+ KVM_REG_MIPS_FCR_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_msa[] = {
+ KVM_REG_MIPS_MSA_IR,
+ KVM_REG_MIPS_MSA_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_kscratch[] = {
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
+static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_mips_get_one_regs);
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
+ /* odd doubles */
+ if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
+ ret += 16;
+ }
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
+ ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
+ ret += kvm_mips_callbacks->num_regs(vcpu);
+
+ return ret;
+}
+
+static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_mips_get_one_regs,
+ sizeof(kvm_mips_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs);
+
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
+ sizeof(kvm_mips_get_one_regs_fpu)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_FPR_32(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+
+ /* skip odd doubles if no F64 */
+ if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ continue;
+
+ index = KVM_REG_MIPS_FPR_64(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
+ sizeof(kvm_mips_get_one_regs_msa)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_VEC_128(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ for (i = 0; i < 6; ++i) {
+ if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
+ continue;
+
+ if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
+ sizeof(kvm_mips_get_one_regs_kscratch[i])))
+ return -EFAULT;
+ ++indices;
+ }
+
+ return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
+}
+
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -554,12 +680,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
v = (long)vcpu->arch.hi;
break;
case KVM_REG_MIPS_LO:
v = (long)vcpu->arch.lo;
break;
+#endif
case KVM_REG_MIPS_PC:
v = (long)vcpu->arch.pc;
break;
@@ -688,17 +816,37 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
v = (long)kvm_read_c0_guest_errorepc(cop0);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ v = (long)kvm_read_c0_guest_kscratch1(cop0);
+ break;
+ case 3:
+ v = (long)kvm_read_c0_guest_kscratch2(cop0);
+ break;
+ case 4:
+ v = (long)kvm_read_c0_guest_kscratch3(cop0);
+ break;
+ case 5:
+ v = (long)kvm_read_c0_guest_kscratch4(cop0);
+ break;
+ case 6:
+ v = (long)kvm_read_c0_guest_kscratch5(cop0);
+ break;
+ case 7:
+ v = (long)kvm_read_c0_guest_kscratch6(cop0);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
+ default:
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
if (ret)
return ret;
break;
- default:
- return -EINVAL;
}
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -755,12 +903,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
vcpu->arch.hi = v;
break;
case KVM_REG_MIPS_LO:
vcpu->arch.lo = v;
break;
+#endif
case KVM_REG_MIPS_PC:
vcpu->arch.pc = v;
break;
@@ -859,22 +1009,34 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ kvm_write_c0_guest_kscratch1(cop0, v);
+ break;
+ case 3:
+ kvm_write_c0_guest_kscratch2(cop0, v);
+ break;
+ case 4:
+ kvm_write_c0_guest_kscratch3(cop0, v);
+ break;
+ case 5:
+ kvm_write_c0_guest_kscratch4(cop0, v);
+ break;
+ case 6:
+ kvm_write_c0_guest_kscratch5(cop0, v);
+ break;
+ case 7:
+ kvm_write_c0_guest_kscratch6(cop0, v);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_CP0_COMPARE:
- case KVM_REG_MIPS_CP0_CAUSE:
- case KVM_REG_MIPS_CP0_CONFIG:
- case KVM_REG_MIPS_CP0_CONFIG1:
- case KVM_REG_MIPS_CP0_CONFIG2:
- case KVM_REG_MIPS_CP0_CONFIG3:
- case KVM_REG_MIPS_CP0_CONFIG4:
- case KVM_REG_MIPS_CP0_CONFIG5:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
- return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
default:
- return -EINVAL;
+ return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
}
return 0;
}
@@ -927,23 +1089,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
- u64 __user *reg_dest;
struct kvm_reg_list reg_list;
unsigned n;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
- reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+ reg_list.n = kvm_mips_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
- reg_dest = user_list->reg;
- if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
- sizeof(kvm_mips_get_one_regs)))
- return -EFAULT;
- return 0;
+ return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
}
case KVM_NMI:
/* Treat the NMI as a CPU reset */
@@ -1222,7 +1379,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
static void kvm_mips_set_c0_status(void)
{
- uint32_t status = read_c0_status();
+ u32 status = read_c0_status();
if (cpu_has_dsp)
status |= (ST0_MX);
@@ -1236,9 +1393,9 @@ static void kvm_mips_set_c0_status(void)
*/
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- uint32_t cause = vcpu->arch.host_cp0_cause;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -1260,6 +1417,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
cause, opc, run, vcpu);
+ trace_kvm_exit(vcpu, exccode);
/*
* Do a privilege check, if in UM most of these exit conditions end up
@@ -1279,7 +1437,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
++vcpu->stat.int_exits;
- trace_kvm_exit(vcpu, INT_EXITS);
if (need_resched())
cond_resched();
@@ -1291,7 +1448,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
++vcpu->stat.cop_unusable_exits;
- trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
/* XXXKYMA: Might need to return to user space */
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
@@ -1300,7 +1456,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EXCCODE_MOD:
++vcpu->stat.tlbmod_exits;
- trace_kvm_exit(vcpu, TLBMOD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
break;
@@ -1310,7 +1465,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
- trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
break;
@@ -1319,61 +1473,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, badvaddr);
++vcpu->stat.tlbmiss_ld_exits;
- trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
break;
case EXCCODE_ADES:
++vcpu->stat.addrerr_st_exits;
- trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
break;
case EXCCODE_ADEL:
++vcpu->stat.addrerr_ld_exits;
- trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
break;
case EXCCODE_SYS:
++vcpu->stat.syscall_exits;
- trace_kvm_exit(vcpu, SYSCALL_EXITS);
ret = kvm_mips_callbacks->handle_syscall(vcpu);
break;
case EXCCODE_RI:
++vcpu->stat.resvd_inst_exits;
- trace_kvm_exit(vcpu, RESVD_INST_EXITS);
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
break;
case EXCCODE_BP:
++vcpu->stat.break_inst_exits;
- trace_kvm_exit(vcpu, BREAK_INST_EXITS);
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
case EXCCODE_TR:
++vcpu->stat.trap_inst_exits;
- trace_kvm_exit(vcpu, TRAP_INST_EXITS);
ret = kvm_mips_callbacks->handle_trap(vcpu);
break;
case EXCCODE_MSAFPE:
++vcpu->stat.msa_fpe_exits;
- trace_kvm_exit(vcpu, MSA_FPE_EXITS);
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
break;
case EXCCODE_FPE:
++vcpu->stat.fpe_exits;
- trace_kvm_exit(vcpu, FPE_EXITS);
ret = kvm_mips_callbacks->handle_fpe(vcpu);
break;
case EXCCODE_MSADIS:
++vcpu->stat.msa_disabled_exits;
- trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break;
@@ -1400,11 +1544,13 @@ skip_emul:
run->exit_reason = KVM_EXIT_INTR;
ret = (-EINTR << 2) | RESUME_HOST;
++vcpu->stat.signal_exits;
- trace_kvm_exit(vcpu, SIGNAL_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
}
}
if (ret == RESUME_GUEST) {
+ trace_kvm_reenter(vcpu);
+
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
* is live), restore FCR31 / MSACSR.
@@ -1450,7 +1596,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1465,9 +1611,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
enable_fpu_hazard();
/* If guest FPU state not active, restore it now */
- if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
+ } else {
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
}
preempt_enable();
@@ -1494,8 +1643,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
* interacts with MSA state, so play it safe and save it first.
*/
if (!(sr & ST0_FR) &&
- (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
- KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
+ KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
kvm_lose_fpu(vcpu);
change_c0_status(ST0_CU1 | ST0_FR, sr);
@@ -1509,22 +1658,26 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
- switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
- case KVM_MIPS_FPU_FPU:
+ switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
+ case KVM_MIPS_AUX_FPU:
/*
* Guest FPU state already loaded, only restore upper MSA state
*/
__kvm_restore_msa_upper(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
break;
case 0:
/* Neither FPU or MSA already active, restore full MSA state */
__kvm_restore_msa(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch))
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
+ KVM_TRACE_AUX_FPU_MSA);
break;
default:
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
break;
}
@@ -1536,13 +1689,15 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa();
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
}
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
}
preempt_enable();
}
@@ -1558,25 +1713,27 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
__kvm_save_msa(&vcpu->arch);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
/* Disable MSA & FPU */
disable_msa();
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
disable_fpu_hazard();
}
- vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
- } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
+ } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
set_c0_status(ST0_CU1);
enable_fpu_hazard();
__kvm_save_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);
@@ -1638,6 +1795,10 @@ static int __init kvm_mips_init(void)
{
int ret;
+ ret = kvm_mips_entry_setup();
+ if (ret)
+ return ret;
+
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
@@ -1645,18 +1806,6 @@ static int __init kvm_mips_init(void)
register_die_notifier(&kvm_mips_csr_die_notifier);
- /*
- * On MIPS, kernel modules are executed from "mapped space", which
- * requires TLBs. The TLB handling code is statically linked with
- * the rest of the kernel (tlb.c) to avoid the possibility of
- * double faulting. The issue is that the TLB code references
- * routines that are part of the the KVM module, which are only
- * available once the module is loaded.
- */
- kvm_mips_gfn_to_pfn = gfn_to_pfn;
- kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
- kvm_mips_is_error_pfn = is_error_pfn;
-
return 0;
}
@@ -1664,10 +1813,6 @@ static void __exit kvm_mips_exit(void)
{
kvm_exit();
- kvm_mips_gfn_to_pfn = NULL;
- kvm_mips_release_pfn_clean = NULL;
- kvm_mips_is_error_pfn = NULL;
-
unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
new file mode 100644
index 000000000000..57319ee57c4f
--- /dev/null
+++ b/arch/mips/kvm/mmu.c
@@ -0,0 +1,375 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS MMU handling in the KVM module.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/highmem.h>
+#include <linux/kvm_host.h>
+#include <asm/mmu_context.h>
+
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_kernel_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_user_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+ int srcu_idx, err = 0;
+ kvm_pfn_t pfn;
+
+ if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+ return 0;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = gfn_to_pfn(kvm, gfn);
+
+ if (is_error_pfn(pfn)) {
+ kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
+ err = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.guest_pmap[gfn] = pfn;
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva)
+{
+ gfn_t gfn;
+ unsigned long offset = gva & ~PAGE_MASK;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+ kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+ __builtin_return_address(0), gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+ gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return KVM_INVALID_ADDR;
+
+ return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ gfn_t gfn;
+ kvm_pfn_t pfn0, pfn1;
+ unsigned long vaddr = 0;
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ const int flush_dcache_mask = 0;
+ int ret;
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+ vaddr = badvaddr & (PAGE_MASK << 1);
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
+ pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
+
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+
+ preempt_disable();
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ kvm_pfn_t pfn0, pfn1;
+ int ret;
+
+ if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+ pfn0 = 0;
+ pfn1 = 0;
+ } else {
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
+ >> PAGE_SHIFT) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
+ >> PAGE_SHIFT) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[
+ mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
+ pfn1 = kvm->arch.guest_pmap[
+ mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
+ }
+
+ /* Get attributes from the Guest TLB */
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb->tlb_lo[0] & ENTRYLO_D) |
+ (tlb->tlb_lo[0] & ENTRYLO_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb->tlb_lo[1] & ENTRYLO_D) |
+ (tlb->tlb_lo[1] & ENTRYLO_V);
+
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo[0], tlb->tlb_lo[1]);
+
+ preempt_disable();
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) :
+ kvm_mips_get_user_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ asid += cpu_asid_inc();
+ if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+
+ if (!asid) /* fix version if needed */
+ asid = asid_first_version(cpu);
+ }
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+ if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+ hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+ unsigned long flags;
+ int newasid = 0;
+
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+ /* Allocate new kernel and user ASIDs if needed */
+
+ local_irq_save(flags);
+
+ if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
+ asid_version_mask(cpu)) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
+ }
+
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ /*
+ * Migrate the timer interrupt to the current CPU so that it
+ * always interrupts the guest and synchronously triggers a
+ * guest timer interrupt.
+ */
+ kvm_mips_migrate_count(vcpu);
+ }
+
+ if (!newasid) {
+ /*
+ * If we preempted while the guest was executing, then reload
+ * the pre-empted ASID
+ */
+ if (current->flags & PF_VCPU) {
+ write_c0_entryhi(vcpu->arch.
+ preempt_entryhi & asid_mask);
+ ehb();
+ }
+ } else {
+ /* New ASIDs were allocated for the VM */
+
+ /*
+ * Were we in guest context? If so then the pre-empted ASID is
+ * no longer valid, we need to set it to what it should be based
+ * on the mode of the Guest (Kernel/User)
+ */
+ if (current->flags & PF_VCPU) {
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi(vcpu->arch.
+ guest_kernel_asid[cpu] &
+ asid_mask);
+ else
+ write_c0_entryhi(vcpu->arch.
+ guest_user_asid[cpu] &
+ asid_mask);
+ ehb();
+ }
+ }
+
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
+ local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ int cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ vcpu->arch.preempt_entryhi = read_c0_entryhi();
+ vcpu->arch.last_sched_cpu = cpu;
+
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
+ if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+ asid_version_mask(cpu))) {
+ kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
+ cpu_context(cpu, current->mm));
+ drop_mmu_context(current->mm, cpu);
+ }
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+ ehb();
+
+ local_irq_restore(flags);
+}
+
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long paddr, flags, vpn2, asid;
+ unsigned long va = (unsigned long)opc;
+ void *vaddr;
+ u32 inst;
+ int index;
+
+ if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ index = kvm_mips_host_tlb_lookup(vcpu, va);
+ if (index >= 0) {
+ inst = *(opc);
+ } else {
+ vpn2 = va & VPN2_MASK;
+ asid = kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID;
+ index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+ if (index < 0) {
+ kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, vcpu, read_c0_entryhi());
+ kvm_mips_dump_host_tlbs();
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.
+ guest_tlb[index]);
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
+ } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ inst = *(u32 *)vaddr;
+ kunmap_atomic(vaddr);
+ } else {
+ kvm_err("%s: illegal address: %p\n", __func__, opc);
+ return KVM_INVALID_INST;
+ }
+
+ return inst;
+}
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index 888bb67070ac..53f851a61554 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -11,27 +11,6 @@
#include <linux/kvm_host.h>
-char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
- "WAIT",
- "CACHE",
- "Signal",
- "Interrupt",
- "COP0/1 Unusable",
- "TLB Mod",
- "TLB Miss (LD)",
- "TLB Miss (ST)",
- "Address Err (ST)",
- "Address Error (LD)",
- "System Call",
- "Reserved Inst",
- "Break Inst",
- "Trap Inst",
- "MSA FPE",
- "FPE",
- "MSA Disabled",
- "D-Cache Flushes",
-};
-
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
"Index",
"Random",
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index ed021ae7867a..254377d8e0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -14,7 +14,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kvm_host.h>
#include <linux/srcu.h>
@@ -24,6 +24,7 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
+#include <asm/tlbdebug.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
@@ -32,22 +33,10 @@
#define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1
-#define PRIx64 "llx"
-
atomic_t kvm_mips_instance;
EXPORT_SYMBOL_GPL(kvm_mips_instance);
-/* These function pointers are initialized once the KVM module is loaded */
-kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
-
-void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
-
-bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
-
-uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
@@ -55,7 +44,7 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
cpu_asid_mask(&cpu_data[cpu]);
}
-uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
@@ -63,7 +52,7 @@ uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
cpu_asid_mask(&cpu_data[cpu]);
}
-inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.commpage_tlb;
}
@@ -72,50 +61,15 @@ inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
void kvm_mips_dump_host_tlbs(void)
{
- unsigned long old_entryhi;
- unsigned long old_pagemask;
- struct kvm_mips_tlb tlb;
unsigned long flags;
- int i;
local_irq_save(flags);
- old_entryhi = read_c0_entryhi();
- old_pagemask = read_c0_pagemask();
-
kvm_info("HOST TLBs:\n");
- kvm_info("ASID: %#lx\n", read_c0_entryhi() &
- cpu_asid_mask(&current_cpu_data));
-
- for (i = 0; i < current_cpu_data.tlbsize; i++) {
- write_c0_index(i);
- mtc0_tlbw_hazard();
-
- tlb_read();
- tlbw_use_hazard();
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
- tlb.tlb_hi = read_c0_entryhi();
- tlb.tlb_lo0 = read_c0_entrylo0();
- tlb.tlb_lo1 = read_c0_entrylo1();
- tlb.tlb_mask = read_c0_pagemask();
-
- kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
- i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
- }
- write_c0_entryhi(old_entryhi);
- write_c0_pagemask(old_pagemask);
- mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
@@ -132,74 +86,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
tlb = vcpu->arch.guest_tlb[i];
kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
+ ? ' ' : '*',
i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ kvm_info("Lo0=0x%09llx %c%c attr %lx ",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
+ (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
+ kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
+ (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
+ tlb.tlb_mask);
}
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
-{
- int srcu_idx, err = 0;
- kvm_pfn_t pfn;
-
- if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
- return 0;
-
- srcu_idx = srcu_read_lock(&kvm->srcu);
- pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
-
- if (kvm_mips_is_error_pfn(pfn)) {
- kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
- err = -EFAULT;
- goto out;
- }
-
- kvm->arch.guest_pmap[gfn] = pfn;
-out:
- srcu_read_unlock(&kvm->srcu, srcu_idx);
- return err;
-}
-
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long gva)
-{
- gfn_t gfn;
- uint32_t offset = gva & ~PAGE_MASK;
- struct kvm *kvm = vcpu->kvm;
-
- if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
- kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
- __builtin_return_address(0), gva);
- return KVM_INVALID_PAGE;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
- gva);
- return KVM_INVALID_PAGE;
- }
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return KVM_INVALID_ADDR;
-
- return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
-
/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
@@ -243,12 +147,12 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Flush D-cache */
if (flush_dcache_mask) {
- if (entrylo0 & MIPS3_PG_V) {
+ if (entrylo0 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page((entryhi & VPN2_MASK) &
~flush_dcache_mask);
}
- if (entrylo1 & MIPS3_PG_V) {
+ if (entrylo1 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page(((entryhi & VPN2_MASK) &
~flush_dcache_mask) |
@@ -259,96 +163,35 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
-{
- gfn_t gfn;
- kvm_pfn_t pfn0, pfn1;
- unsigned long vaddr = 0;
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- int even;
- struct kvm *kvm = vcpu->kvm;
- const int flush_dcache_mask = 0;
- int ret;
-
- if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
- kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
- gfn, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
- even = !(gfn & 0x1);
- vaddr = badvaddr & (PAGE_MASK << 1);
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return -1;
-
- if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
- return -1;
-
- if (even) {
- pfn0 = kvm->arch.guest_pmap[gfn];
- pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
- } else {
- pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
- pfn1 = kvm->arch.guest_pmap[gfn];
- }
-
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
-
- preempt_disable();
- entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- flush_dcache_mask);
- preempt_enable();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu)
{
- kvm_pfn_t pfn0, pfn1;
+ kvm_pfn_t pfn;
unsigned long flags, old_entryhi = 0, vaddr = 0;
- unsigned long entrylo0 = 0, entrylo1 = 0;
+ unsigned long entrylo[2] = { 0, 0 };
+ unsigned int pair_idx;
- pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
- pfn1 = 0;
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = 0;
+ pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
+ pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
+ entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
vaddr = badvaddr & (PAGE_MASK << 1);
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
- mtc0_tlbw_hazard();
- write_c0_entrylo0(entrylo0);
- mtc0_tlbw_hazard();
- write_c0_entrylo1(entrylo1);
- mtc0_tlbw_hazard();
+ write_c0_entrylo0(entrylo[0]);
+ write_c0_entrylo1(entrylo[1]);
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
tlbw_use_hazard();
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
@@ -358,68 +201,12 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1)
-{
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- struct kvm *kvm = vcpu->kvm;
- kvm_pfn_t pfn0, pfn1;
- int ret;
-
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
- pfn0 = 0;
- pfn1 = 0;
- } else {
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT];
- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT];
- }
-
- if (hpa0)
- *hpa0 = pfn0 << PAGE_SHIFT;
-
- if (hpa1)
- *hpa1 = pfn1 << PAGE_SHIFT;
-
- /* Get attributes from the Guest TLB */
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
-
- kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
- tlb->tlb_lo0, tlb->tlb_lo1);
-
- preempt_disable();
- entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
- kvm_mips_get_kernel_asid(vcpu) :
- kvm_mips_get_user_asid(vcpu));
- ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- tlb->tlb_mask);
- preempt_enable();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
-
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
int i;
@@ -435,7 +222,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
}
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
- __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+ __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
return index;
}
@@ -467,7 +254,6 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -498,21 +284,16 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
if (idx > 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
-
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
-
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -540,61 +321,39 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
/* Blast 'em all away. */
for (entry = 0; entry < maxentry; entry++) {
write_c0_index(entry);
- mtc0_tlbw_hazard();
if (skip_kseg0) {
+ mtc0_tlbr_hazard();
tlb_read();
- tlbw_use_hazard();
+ tlb_read_hazard();
entryhi = read_c0_entryhi();
/* Don't blow away guest kernel entries */
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
continue;
+
+ write_c0_pagemask(old_pagemask);
}
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- mtc0_tlbw_hazard();
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
- tlbw_use_hazard();
-
write_c0_entryhi(old_entryhi);
write_c0_pagemask(old_pagemask);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu)
-{
- unsigned long asid = asid_cache(cpu);
-
- asid += cpu_asid_inc();
- if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- kvm_local_flush_tlb_all(); /* start new asid cycle */
-
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
void kvm_local_flush_tlb_all(void)
{
unsigned long flags;
@@ -614,185 +373,12 @@ void kvm_local_flush_tlb_all(void)
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
+ tlbw_use_hazard();
entry++;
}
- tlbw_use_hazard();
write_c0_entryhi(old_ctx);
mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
-
-/**
- * kvm_mips_migrate_count() - Migrate timer.
- * @vcpu: Virtual CPU.
- *
- * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
- * if it was running prior to being cancelled.
- *
- * Must be called when the VCPU is migrated to a different CPU to ensure that
- * timer expiry during guest execution interrupts the guest and causes the
- * interrupt to be delivered in a timely manner.
- */
-static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
-{
- if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
- hrtimer_restart(&vcpu->arch.comparecount_timer);
-}
-
-/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
- unsigned long flags;
- int newasid = 0;
-
- kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-
- /* Allocate new kernel and user ASIDs if needed */
-
- local_irq_save(flags);
-
- if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
- asid_version_mask(cpu)) {
- kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
- vcpu->arch.guest_kernel_asid[cpu] =
- vcpu->arch.guest_kernel_mm.context.asid[cpu];
- kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
- vcpu->arch.guest_user_asid[cpu] =
- vcpu->arch.guest_user_mm.context.asid[cpu];
- newasid++;
-
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
- kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
- vcpu->arch.guest_user_asid[cpu]);
- }
-
- if (vcpu->arch.last_sched_cpu != cpu) {
- kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
- vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
- /*
- * Migrate the timer interrupt to the current CPU so that it
- * always interrupts the guest and synchronously triggers a
- * guest timer interrupt.
- */
- kvm_mips_migrate_count(vcpu);
- }
-
- if (!newasid) {
- /*
- * If we preempted while the guest was executing, then reload
- * the pre-empted ASID
- */
- if (current->flags & PF_VCPU) {
- write_c0_entryhi(vcpu->arch.
- preempt_entryhi & asid_mask);
- ehb();
- }
- } else {
- /* New ASIDs were allocated for the VM */
-
- /*
- * Were we in guest context? If so then the pre-empted ASID is
- * no longer valid, we need to set it to what it should be based
- * on the mode of the Guest (Kernel/User)
- */
- if (current->flags & PF_VCPU) {
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- write_c0_entryhi(vcpu->arch.
- guest_kernel_asid[cpu] &
- asid_mask);
- else
- write_c0_entryhi(vcpu->arch.
- guest_user_asid[cpu] &
- asid_mask);
- ehb();
- }
- }
-
- /* restore guest state to registers */
- kvm_mips_callbacks->vcpu_set_regs(vcpu);
-
- local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
-
-/* ASID can change if another task is scheduled during preemption */
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- uint32_t cpu;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
-
- vcpu->arch.preempt_entryhi = read_c0_entryhi();
- vcpu->arch.last_sched_cpu = cpu;
-
- /* save guest state in registers */
- kvm_mips_callbacks->vcpu_get_regs(vcpu);
-
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- asid_version_mask(cpu))) {
- kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
- cpu_context(cpu, current->mm));
- drop_mmu_context(current->mm, cpu);
- }
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- ehb();
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
-
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long paddr, flags, vpn2, asid;
- uint32_t inst;
- int index;
-
- if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
- KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
- if (index >= 0) {
- inst = *(opc);
- } else {
- vpn2 = (unsigned long) opc & VPN2_MASK;
- asid = kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID;
- index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
- if (index < 0) {
- kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
- __func__, opc, vcpu, read_c0_entryhi());
- kvm_mips_dump_host_tlbs();
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.
- guest_tlb[index],
- NULL, NULL);
- inst = *(opc);
- }
- local_irq_restore(flags);
- } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- paddr =
- kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
- (unsigned long) opc);
- inst = *(uint32_t *) CKSEG0ADDR(paddr);
- } else {
- kvm_err("%s: illegal address: %p\n", __func__, opc);
- return KVM_INVALID_INST;
- }
-
- return inst;
-}
-EXPORT_SYMBOL_GPL(kvm_get_inst);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bd6437f67dc0..c858cf168078 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -17,8 +17,75 @@
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
-/* Tracepoints for VM eists */
-extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+/*
+ * Tracepoints for VM enters
+ */
+DECLARE_EVENT_CLASS(kvm_transition,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ ),
+
+ TP_printk("PC: 0x%08lx",
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_transition, kvm_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_reenter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_out,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+/* The first 32 exit reasons correspond to Cause.ExcCode */
+#define KVM_TRACE_EXIT_INT 0
+#define KVM_TRACE_EXIT_TLBMOD 1
+#define KVM_TRACE_EXIT_TLBMISS_LD 2
+#define KVM_TRACE_EXIT_TLBMISS_ST 3
+#define KVM_TRACE_EXIT_ADDRERR_LD 4
+#define KVM_TRACE_EXIT_ADDRERR_ST 5
+#define KVM_TRACE_EXIT_SYSCALL 8
+#define KVM_TRACE_EXIT_BREAK_INST 9
+#define KVM_TRACE_EXIT_RESVD_INST 10
+#define KVM_TRACE_EXIT_COP_UNUSABLE 11
+#define KVM_TRACE_EXIT_TRAP_INST 13
+#define KVM_TRACE_EXIT_MSA_FPE 14
+#define KVM_TRACE_EXIT_FPE 15
+#define KVM_TRACE_EXIT_MSA_DISABLED 21
+/* Further exit reasons */
+#define KVM_TRACE_EXIT_WAIT 32
+#define KVM_TRACE_EXIT_CACHE 33
+#define KVM_TRACE_EXIT_SIGNAL 34
+
+/* Tracepoints for VM exits */
+#define kvm_trace_symbol_exit_types \
+ { KVM_TRACE_EXIT_INT, "Interrupt" }, \
+ { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
+ { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
+ { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
+ { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
+ { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
+ { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
+ { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
+ { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
+ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
+ { KVM_TRACE_EXIT_FPE, "FPE" }, \
+ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
+ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }
TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -34,10 +101,173 @@ TRACE_EVENT(kvm_exit,
),
TP_printk("[%s]PC: 0x%08lx",
- kvm_mips_exit_types_str[__entry->reason],
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
__entry->pc)
);
+#define KVM_TRACE_MFC0 0
+#define KVM_TRACE_MTC0 1
+#define KVM_TRACE_DMFC0 2
+#define KVM_TRACE_DMTC0 3
+#define KVM_TRACE_RDHWR 4
+
+#define KVM_TRACE_HWR_COP0 0
+#define KVM_TRACE_HWR_HWR 1
+
+#define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
+ ((REG) << 3) | (SEL))
+#define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
+ ((REG) << 3) | (SEL))
+
+#define kvm_trace_symbol_hwr_ops \
+ { KVM_TRACE_MFC0, "MFC0" }, \
+ { KVM_TRACE_MTC0, "MTC0" }, \
+ { KVM_TRACE_DMFC0, "DMFC0" }, \
+ { KVM_TRACE_DMTC0, "DMTC0" }, \
+ { KVM_TRACE_RDHWR, "RDHWR" }
+
+#define kvm_trace_symbol_hwr_cop \
+ { KVM_TRACE_HWR_COP0, "COP0" }, \
+ { KVM_TRACE_HWR_HWR, "HWR" }
+
+#define kvm_trace_symbol_hwr_regs \
+ { KVM_TRACE_COP0( 0, 0), "Index" }, \
+ { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
+ { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
+ { KVM_TRACE_COP0( 4, 0), "Context" }, \
+ { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
+ { KVM_TRACE_COP0( 5, 0), "PageMask" }, \
+ { KVM_TRACE_COP0( 6, 0), "Wired" }, \
+ { KVM_TRACE_COP0( 7, 0), "HWREna" }, \
+ { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
+ { KVM_TRACE_COP0( 9, 0), "Count" }, \
+ { KVM_TRACE_COP0(10, 0), "EntryHi" }, \
+ { KVM_TRACE_COP0(11, 0), "Compare" }, \
+ { KVM_TRACE_COP0(12, 0), "Status" }, \
+ { KVM_TRACE_COP0(12, 1), "IntCtl" }, \
+ { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
+ { KVM_TRACE_COP0(13, 0), "Cause" }, \
+ { KVM_TRACE_COP0(14, 0), "EPC" }, \
+ { KVM_TRACE_COP0(15, 0), "PRId" }, \
+ { KVM_TRACE_COP0(15, 1), "EBase" }, \
+ { KVM_TRACE_COP0(16, 0), "Config" }, \
+ { KVM_TRACE_COP0(16, 1), "Config1" }, \
+ { KVM_TRACE_COP0(16, 2), "Config2" }, \
+ { KVM_TRACE_COP0(16, 3), "Config3" }, \
+ { KVM_TRACE_COP0(16, 4), "Config4" }, \
+ { KVM_TRACE_COP0(16, 5), "Config5" }, \
+ { KVM_TRACE_COP0(16, 7), "Config7" }, \
+ { KVM_TRACE_COP0(26, 0), "ECC" }, \
+ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
+ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
+ { KVM_TRACE_COP0(31, 3), "KScratch2" }, \
+ { KVM_TRACE_COP0(31, 4), "KScratch3" }, \
+ { KVM_TRACE_COP0(31, 5), "KScratch4" }, \
+ { KVM_TRACE_COP0(31, 6), "KScratch5" }, \
+ { KVM_TRACE_COP0(31, 7), "KScratch6" }, \
+ { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
+ { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
+ { KVM_TRACE_HWR( 2, 0), "CC" }, \
+ { KVM_TRACE_HWR( 3, 0), "CCRes" }, \
+ { KVM_TRACE_HWR(29, 0), "ULR" }
+
+TRACE_EVENT(kvm_hwr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
+ unsigned long val),
+ TP_ARGS(vcpu, op, reg, val),
+ TP_STRUCT__entry(
+ __field(unsigned long, val)
+ __field(u16, reg)
+ __field(u8, op)
+ ),
+
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->reg = reg;
+ __entry->op = op;
+ ),
+
+ TP_printk("%s %s (%s:%u:%u) 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_hwr_ops),
+ __print_symbolic(__entry->reg,
+ kvm_trace_symbol_hwr_regs),
+ __print_symbolic(__entry->reg >> 8,
+ kvm_trace_symbol_hwr_cop),
+ (__entry->reg >> 3) & 0x1f,
+ __entry->reg & 0x7,
+ __entry->val)
+);
+
+#define KVM_TRACE_AUX_RESTORE 0
+#define KVM_TRACE_AUX_SAVE 1
+#define KVM_TRACE_AUX_ENABLE 2
+#define KVM_TRACE_AUX_DISABLE 3
+#define KVM_TRACE_AUX_DISCARD 4
+
+#define KVM_TRACE_AUX_FPU 1
+#define KVM_TRACE_AUX_MSA 2
+#define KVM_TRACE_AUX_FPU_MSA 3
+
+#define kvm_trace_symbol_aux_op \
+ { KVM_TRACE_AUX_RESTORE, "restore" }, \
+ { KVM_TRACE_AUX_SAVE, "save" }, \
+ { KVM_TRACE_AUX_ENABLE, "enable" }, \
+ { KVM_TRACE_AUX_DISABLE, "disable" }, \
+ { KVM_TRACE_AUX_DISCARD, "discard" }
+
+#define kvm_trace_symbol_aux_state \
+ { KVM_TRACE_AUX_FPU, "FPU" }, \
+ { KVM_TRACE_AUX_MSA, "MSA" }, \
+ { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
+
+TRACE_EVENT(kvm_aux,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
+ unsigned int state),
+ TP_ARGS(vcpu, op, state),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, op)
+ __field(u8, state)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->op = op;
+ __entry->state = state;
+ ),
+
+ TP_printk("%s %s PC: 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_aux_op),
+ __print_symbolic(__entry->state,
+ kvm_trace_symbol_aux_state),
+ __entry->pc)
+);
+
+TRACE_EVENT(kvm_asid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
+ unsigned int new_asid),
+ TP_ARGS(vcpu, old_asid, new_asid),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, old_asid)
+ __field(u8, new_asid)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->old_asid = old_asid;
+ __entry->new_asid = new_asid;
+ ),
+
+ TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
+ __entry->pc,
+ __entry->old_asid,
+ __entry->new_asid)
+);
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 6ba0fafcecbc..091553942bcb 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -21,7 +21,7 @@
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{
gpa_t gpa;
- uint32_t kseg = KSEGX(gva);
+ gva_t kseg = KSEGX(gva);
if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva);
@@ -40,8 +40,8 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -87,15 +87,15 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
@@ -111,14 +111,14 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* when we are not using HIGHMEM. Need to address this in a
* HIGHMEM kernel
*/
- kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
- kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
@@ -128,59 +128,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
return ret;
}
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
- && KVM_GUEST_KERNEL_MODE(vcpu)) {
- if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- /*
- * All KSEG0 faults are handled by KVM, as the guest kernel does
- * not expect to ever get them
- */
- if (kvm_mips_handle_kseg0_tlb_fault
- (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else {
- kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -192,8 +145,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
}
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
- vcpu->arch.pc, badvaddr);
+ kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
/*
* User Address (UA) fault, this could happen if
@@ -213,14 +166,18 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /*
+ * All KSEG0 faults are handled by KVM, as the guest kernel does
+ * not expect to ever get them
+ */
if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else {
- kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
+ kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -229,12 +186,22 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, true);
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, false);
+}
+
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -251,7 +218,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -262,9 +229,9 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -280,7 +247,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -292,8 +259,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -310,8 +277,8 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -328,8 +295,8 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -346,8 +313,8 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -364,8 +331,8 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -382,8 +349,8 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -407,8 +374,8 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -451,24 +418,41 @@ static int kvm_trap_emul_vm_init(struct kvm *kvm)
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.kscratch_enabled = 0xfc;
+
return 0;
}
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t config1;
+ u32 config, config1;
int vcpu_id = vcpu->vcpu_id;
/*
* Arch specific stuff, set up config registers properly so that the
- * guest will come up as expected, for now we simulate a MIPS 24kc
+ * guest will come up as expected
*/
+#ifndef CONFIG_CPU_MIPSR6
+ /* r2-r5, simulate a MIPS 24kc */
kvm_write_c0_guest_prid(cop0, 0x00019300);
- /* Have config1, Cacheable, noncoherent, write-back, write allocate */
- kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
- (0x1 << CP0C0_AR) |
- (MMU_TYPE_R4000 << CP0C0_MT));
+#else
+ /* r6+, simulate a generic QEMU machine */
+ kvm_write_c0_guest_prid(cop0, 0x00010000);
+#endif
+ /*
+ * Have config1, Cacheable, noncoherent, write-back, write allocate.
+ * Endianness, arch revision & virtually tagged icache should match
+ * host.
+ */
+ config = read_c0_config() & MIPS_CONF_AR;
+ config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ config |= CONF_BE;
+#endif
+ if (cpu_has_vtag_icache)
+ config |= MIPS_CONF_VI;
+ kvm_write_c0_guest_config(cop0, config);
/* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f);
@@ -478,9 +462,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
/* We unset some bits that we aren't emulating */
- config1 &=
- ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
- (1 << CP0C1_WR) | (1 << CP0C1_CA));
+ config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
+ MIPS_CONF1_WR | MIPS_CONF1_CA);
kvm_write_c0_guest_config1(cop0, config1);
/* Have config3, no tertiary/secondary caches implemented */
@@ -511,6 +494,17 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *indices)
+{
+ return 0;
+}
+
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
@@ -660,6 +654,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
.irq_deliver = kvm_mips_irq_deliver_cb,
.irq_clear = kvm_mips_irq_clear_cb,
+ .num_regs = kvm_trap_emul_num_regs,
+ .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index ff17669e30a3..8ac0e5994ed2 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -66,7 +66,7 @@ int gic_present;
#endif
static int exin_avail;
-static struct resource ltq_eiu_irq[MAX_EIU];
+static u32 ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
@@ -75,7 +75,7 @@ static int ltq_perfcount_irq;
int ltq_eiu_get_irq(int exin)
{
if (exin < exin_avail)
- return ltq_eiu_irq[exin].start;
+ return ltq_eiu_irq[exin];
return -1;
}
@@ -125,8 +125,8 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
int i;
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
int val = 0;
int edge = 0;
@@ -173,8 +173,8 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
int i;
ltq_enable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* by default we are low level triggered */
ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
@@ -195,8 +195,8 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
int i;
ltq_disable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -206,7 +206,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
}
static struct irq_chip ltq_irq_type = {
- "icu",
+ .name = "icu",
.irq_enable = ltq_enable_irq,
.irq_disable = ltq_disable_irq,
.irq_unmask = ltq_enable_irq,
@@ -216,7 +216,7 @@ static struct irq_chip ltq_irq_type = {
};
static struct irq_chip ltq_eiu_type = {
- "eiu",
+ .name = "eiu",
.irq_startup = ltq_startup_eiu_irq,
.irq_shutdown = ltq_shutdown_eiu_irq,
.irq_enable = ltq_enable_irq,
@@ -341,10 +341,10 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
return 0;
for (i = 0; i < exin_avail; i++)
- if (hw == ltq_eiu_irq[i].start)
+ if (hw == ltq_eiu_irq[i])
chip = &ltq_eiu_type;
- irq_set_chip_and_handler(hw, chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, chip, handle_level_irq);
return 0;
}
@@ -439,14 +439,15 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
- exin_avail = of_irq_count(eiu_node);
+ exin_avail = of_property_count_u32_elems(eiu_node,
+ "lantiq,eiu-irqs");
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
- ret = of_irq_to_resource_table(eiu_node,
+ ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
ltq_eiu_irq, exin_avail);
- if (ret != exin_avail)
+ if (ret)
panic("failed to load external irq resources");
if (!request_mem_region(res.start, resource_size(&res),
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 5f693ac77a0d..4cbb000e778e 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -74,8 +74,8 @@ void __init plat_mem_setup(void)
set_io_port_base((unsigned long) KSEG1);
- if (fw_arg0 == -2) /* UHI interface */
- dtb = (void *)fw_arg1;
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 4ffa6fc81c8f..1a80b6f73ab2 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -10,7 +10,7 @@
#include <dma-coherence.h>
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
}
static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
@@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size,
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
dir, attrs);
@@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
mb();
return r;
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 249039af66c4..4788bea62a6a 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -13,8 +13,8 @@
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
-#define HPET_MIN_CYCLES 64
-#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+#define HPET_MIN_CYCLES 16
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned int cnt;
- int res;
+ u32 cnt;
+ s32 res;
cnt = hpet_read(HPET_COUNTER);
- cnt += delta;
+ cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
- res = (int)(cnt - hpet_read(HPET_COUNTER));
+ res = (s32)(cnt - hpet_read(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
- cd->rating = 320;
+ cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index e59759af63d9..2fec6f753a35 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -417,6 +417,7 @@ static int loongson3_cpu_disable(void)
return -EBUSY;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
local_irq_save(flags);
fixup_irqs();
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index d96e912b9d44..36775d20b0e7 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -434,8 +434,8 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
* a single subroutine should be used across both
* modules.
*/
-static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
- unsigned long *contpc)
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
@@ -627,8 +627,8 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
if (!cpu_has_mips_r6)
break;
if (insn.i_format.rt && !insn.i_format.rs)
@@ -683,14 +683,14 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6)
break;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6)
break;
if (!insn.i_format.rs)
@@ -784,10 +784,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
- if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
- else if (config_enabled(CONFIG_32BIT) &&
- !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ else if (IS_ENABLED(CONFIG_32BIT) &&
+ !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
@@ -1268,7 +1268,7 @@ branch_common:
* instruction in the dslot.
*/
sig = mips_dsemul(xcp, ir,
- contpc);
+ bcpc, contpc);
if (sig < 0)
break;
if (sig)
@@ -1323,7 +1323,7 @@ branch_common:
* Single step the non-cp1
* instruction in the dslot
*/
- sig = mips_dsemul(xcp, ir, contpc);
+ sig = mips_dsemul(xcp, ir, bcpc, contpc);
if (sig < 0)
break;
if (sig)
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 47074887e64c..72a4642eee2c 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -1,3 +1,6 @@
+#include <linux/err.h>
+#include <linux/slab.h>
+
#include <asm/branch.h>
#include <asm/cacheflush.h>
#include <asm/fpu_emulator.h>
@@ -5,43 +8,211 @@
#include <asm/mipsregs.h>
#include <asm/uaccess.h>
-#include "ieee754.h"
-
-/*
- * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when
- * we have to emulate the instruction in a COP1 branch delay slot. Do
- * not change cp0_epc due to the instruction
+/**
+ * struct emuframe - The 'emulation' frame structure
+ * @emul: The instruction to 'emulate'.
+ * @badinst: A break instruction to cause a return to the kernel.
*
- * According to the spec:
- * 1) it shouldn't be a branch :-)
- * 2) it can be a COP instruction :-(
- * 3) if we are tring to run a protected memory space we must take
- * special care on memory access instructions :-(
- */
-
-/*
- * "Trampoline" return routine to catch exception following
- * execution of delay-slot instruction execution.
+ * This structure defines the frames placed within the delay slot emulation
+ * page in response to a call to mips_dsemul(). Each thread may be allocated
+ * only one frame at any given time. The kernel stores within it the
+ * instruction to be 'emulated' followed by a break instruction, then
+ * executes the frame in user mode. The break causes a trap to the kernel
+ * which leads to do_dsemulret() being called unless the instruction in
+ * @emul causes a trap itself, is a branch, or a signal is delivered to
+ * the thread. In these cases the allocated frame will either be reused by
+ * a subsequent delay slot 'emulation', or be freed during signal delivery or
+ * upon thread exit.
+ *
+ * This approach is used because:
+ *
+ * - Actually emulating all instructions isn't feasible. We would need to
+ * be able to handle instructions from all revisions of the MIPS ISA,
+ * all ASEs & all vendor instruction set extensions. This would be a
+ * whole lot of work & continual maintenance burden as new instructions
+ * are introduced, and in the case of some vendor extensions may not
+ * even be possible. Thus we need to take the approach of actually
+ * executing the instruction.
+ *
+ * - We must execute the instruction within user context. If we were to
+ * execute the instruction in kernel mode then it would have access to
+ * kernel resources without very careful checks, leaving us with a
+ * high potential for security or stability issues to arise.
+ *
+ * - We used to place the frame on the users stack, but this requires
+ * that the stack be executable. This is bad for security so the
+ * per-process page is now used instead.
+ *
+ * - The instruction in @emul may be something entirely invalid for a
+ * delay slot. The user may (intentionally or otherwise) place a branch
+ * in a delay slot, or a kernel mode instruction, or something else
+ * which generates an exception. Thus we can't rely upon the break in
+ * @badinst always being hit. For this reason we track the index of the
+ * frame allocated to each thread, allowing us to clean it up at later
+ * points such as signal delivery or thread exit.
+ *
+ * - The user may generate a fake struct emuframe if they wish, invoking
+ * the BRK_MEMU break instruction themselves. We must therefore not
+ * trust that BRK_MEMU means there's actually a valid frame allocated
+ * to the thread, and must not allow the user to do anything they
+ * couldn't already.
*/
-
struct emuframe {
mips_instruction emul;
mips_instruction badinst;
- mips_instruction cookie;
- unsigned long epc;
};
-/*
- * Set up an emulation frame for instruction IR, from a delay slot of
- * a branch jumping to CPC. Return 0 if successful, -1 if no emulation
- * required, otherwise a signal number causing a frame setup failure.
- */
-int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
+static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
+
+static inline __user struct emuframe *dsemul_page(void)
+{
+ return (__user struct emuframe *)STACK_TOP;
+}
+
+static int alloc_emuframe(void)
+{
+ mm_context_t *mm_ctx = &current->mm->context;
+ int idx;
+
+retry:
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ /* Ensure we have an allocation bitmap */
+ if (!mm_ctx->bd_emupage_allocmap) {
+ mm_ctx->bd_emupage_allocmap =
+ kcalloc(BITS_TO_LONGS(emupage_frame_count),
+ sizeof(unsigned long),
+ GFP_ATOMIC);
+
+ if (!mm_ctx->bd_emupage_allocmap) {
+ idx = BD_EMUFRAME_NONE;
+ goto out_unlock;
+ }
+ }
+
+ /* Attempt to allocate a single bit/frame */
+ idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count, 0);
+ if (idx < 0) {
+ /*
+ * Failed to allocate a frame. We'll wait until one becomes
+ * available. We unlock the page so that other threads actually
+ * get the opportunity to free their frames, which means
+ * technically the result of bitmap_full may be incorrect.
+ * However the worst case is that we repeat all this and end up
+ * back here again.
+ */
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ if (!wait_event_killable(mm_ctx->bd_emupage_queue,
+ !bitmap_full(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count)))
+ goto retry;
+
+ /* Received a fatal signal - just give in */
+ return BD_EMUFRAME_NONE;
+ }
+
+ /* Success! */
+ pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
+out_unlock:
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ return idx;
+}
+
+static void free_emuframe(int idx, struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ pr_debug("free emuframe %d from %d\n", idx, current->pid);
+ bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
+
+ /* If some thread is waiting for a frame, now's its chance */
+ wake_up(&mm_ctx->bd_emupage_queue);
+
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+}
+
+static bool within_emuframe(struct pt_regs *regs)
+{
+ unsigned long base = (unsigned long)dsemul_page();
+
+ if (regs->cp0_epc < base)
+ return false;
+ if (regs->cp0_epc >= (base + PAGE_SIZE))
+ return false;
+
+ return true;
+}
+
+bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ int fr_idx;
+
+ /* Clear any allocated frame, retrieving its index */
+ fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
+ /* If no frame was allocated, we're done */
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+
+ task_lock(tsk);
+
+ /* Free the frame that this thread had allocated */
+ if (tsk->mm)
+ free_emuframe(fr_idx, tsk->mm);
+
+ task_unlock(tsk);
+ return true;
+}
+
+bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ struct emuframe __user *fr;
+ int fr_idx;
+
+ /* Do nothing if we're not executing from a frame */
+ if (!within_emuframe(regs))
+ return false;
+
+ /* Find the frame being executed */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+ fr = &dsemul_page()[fr_idx];
+
+ /*
+ * If the PC is at the emul instruction, roll back to the branch. If
+ * PC is at the badinst (break) instruction, we've already emulated the
+ * instruction so progress to the continue PC. If it's anything else
+ * then something is amiss & the user has branched into some other area
+ * of the emupage - we'll free the allocated frame anyway.
+ */
+ if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
+ regs->cp0_epc = current->thread.bd_emu_branch_pc;
+ else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
+ regs->cp0_epc = current->thread.bd_emu_cont_pc;
+
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ free_emuframe(fr_idx, current->mm);
+ return true;
+}
+
+void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ kfree(mm_ctx->bd_emupage_allocmap);
+}
+
+int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc)
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
struct emuframe __user *fr;
- int err;
+ int err, fr_idx;
/* NOP is easy */
if (ir == 0)
@@ -68,30 +239,20 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
}
}
- pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc);
+ pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
- /*
- * The strategy is to push the instruction onto the user stack
- * and put a trap after it which we can catch and jump to
- * the required address any alternative apart from full
- * instruction emulation!!.
- *
- * Algorithmics used a system call instruction, and
- * borrowed that vector. MIPS/Linux version is a bit
- * more heavyweight in the interests of portability and
- * multiprocessor support. For Linux we use a BREAK 514
- * instruction causing a breakpoint exception.
- */
- break_math = BREAK_MATH(isa16);
-
- /* Ensure that the two instructions are in the same cache line */
- fr = (struct emuframe __user *)
- ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
-
- /* Verify that the stack pointer is not completely insane */
- if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
+ /* Allocate a frame if we don't already have one */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ fr_idx = alloc_emuframe();
+ if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
+ fr = &dsemul_page()[fr_idx];
+
+ /* Retrieve the appropriately encoded break instruction */
+ break_math = BREAK_MATH(isa16);
+ /* Write the instructions to the frame */
if (isa16) {
err = __put_user(ir >> 16,
(u16 __user *)(&fr->emul));
@@ -106,84 +267,36 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
err |= __put_user(break_math, &fr->badinst);
}
- err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
- err |= __put_user(cpc, &fr->epc);
-
if (unlikely(err)) {
MIPS_FPU_EMU_INC_STATS(errors);
+ free_emuframe(fr_idx, current->mm);
return SIGBUS;
}
+ /* Record the PC of the branch, PC to continue from & frame index */
+ current->thread.bd_emu_branch_pc = branch_pc;
+ current->thread.bd_emu_cont_pc = cont_pc;
+ atomic_set(&current->thread.bd_emu_frame, fr_idx);
+
+ /* Change user register context to execute the frame */
regs->cp0_epc = (unsigned long)&fr->emul | isa16;
+ /* Ensure the icache observes our newly written frame */
flush_cache_sigtramp((unsigned long)&fr->emul);
return 0;
}
-int do_dsemulret(struct pt_regs *xcp)
+bool do_dsemulret(struct pt_regs *xcp)
{
- int isa16 = get_isa16_mode(xcp->cp0_epc);
- struct emuframe __user *fr;
- unsigned long epc;
- u32 insn, cookie;
- int err = 0;
- u16 instr[2];
-
- fr = (struct emuframe __user *)
- (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
-
- /*
- * If we can't even access the area, something is very wrong, but we'll
- * leave that to the default handling
- */
- if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe)))
- return 0;
-
- /*
- * Do some sanity checking on the stackframe:
- *
- * - Is the instruction pointed to by the EPC an BREAK_MATH?
- * - Is the following memory word the BD_COOKIE?
- */
- if (isa16) {
- err = __get_user(instr[0],
- (u16 __user *)(&fr->badinst));
- err |= __get_user(instr[1],
- (u16 __user *)((long)(&fr->badinst) + 2));
- insn = (instr[0] << 16) | instr[1];
- } else {
- err = __get_user(insn, &fr->badinst);
- }
- err |= __get_user(cookie, &fr->cookie);
-
- if (unlikely(err ||
- insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) {
+ /* Cleanup the allocated frame, returning if there wasn't one */
+ if (!dsemul_thread_cleanup(current)) {
MIPS_FPU_EMU_INC_STATS(errors);
- return 0;
- }
-
- /*
- * At this point, we are satisfied that it's a BD emulation trap. Yes,
- * a user might have deliberately put two malformed and useless
- * instructions in a row in his program, in which case he's in for a
- * nasty surprise - the next instruction will be treated as a
- * continuation address! Alas, this seems to be the only way that we
- * can handle signals, recursion, and longjmps() in the context of
- * emulating the branch delay instruction.
- */
-
- pr_debug("dsemulret\n");
-
- if (__get_user(epc, &fr->epc)) { /* Saved EPC */
- /* This is not a good situation to be in */
- force_sig(SIGBUS, current);
-
- return 0;
+ return false;
}
/* Set EPC to return to post-branch instruction */
- xcp->cp0_epc = epc;
- MIPS_FPU_EMU_INC_STATS(ds_emul);
- return 1;
+ xcp->cp0_epc = current->thread.bd_emu_cont_pc;
+ pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+ return true;
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index ef7f925dd1b0..cd72805b64a7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -40,6 +40,51 @@
#include <asm/mips-cm.h>
/*
+ * Bits describing what cache ops an SMP callback function may perform.
+ *
+ * R4K_HIT - Virtual user or kernel address based cache operations. The
+ * active_mm must be checked before using user addresses, falling
+ * back to kmap.
+ * R4K_INDEX - Index based cache operations.
+ */
+
+#define R4K_HIT BIT(0)
+#define R4K_INDEX BIT(1)
+
+/**
+ * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
+ * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
+ *
+ * Decides whether a cache op needs to be performed on every core in the system.
+ * This may change depending on the @type of cache operation, as well as the set
+ * of online CPUs, so preemption should be disabled by the caller to prevent CPU
+ * hotplug from changing the result.
+ *
+ * Returns: 1 if the cache operation @type should be done on every core in
+ * the system.
+ * 0 if the cache operation @type is globalized and only needs to
+ * be performed on a simple CPU.
+ */
+static inline bool r4k_op_needs_ipi(unsigned int type)
+{
+ /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
+ if (type == R4K_HIT && mips_cm_present())
+ return false;
+
+ /*
+ * Hardware doesn't globalize the required cache ops, so SMP calls may
+ * be needed, but only if there are foreign CPUs (non-siblings with
+ * separate caches).
+ */
+ /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ return !cpumask_empty(&cpu_foreign_map[0]);
+#else
+ return false;
+#endif
+}
+
+/*
* Special Variant of smp_call_function for use by cache functions:
*
* o No return value
@@ -48,30 +93,17 @@
* primary cache.
* o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
+static inline void r4k_on_each_cpu(unsigned int type,
+ void (*func)(void *info), void *info)
{
preempt_disable();
-
- /*
- * The Coherent Manager propagates address-based cache ops to other
- * cores but not index-based ops. However, r4k_on_each_cpu is used
- * in both cases so there is no easy way to tell what kind of op is
- * executed to the other cores. The best we can probably do is
- * to restrict that call when a CM is not present because both
- * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
- */
- if (!mips_cm_present())
- smp_call_function_many(&cpu_foreign_map, func, info, 1);
+ if (r4k_op_needs_ipi(type))
+ smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
+ func, info, 1);
func(info);
preempt_enable();
}
-#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
-#define cpu_has_safe_index_cacheops 0
-#else
-#define cpu_has_safe_index_cacheops 1
-#endif
-
/*
* Must die.
*/
@@ -462,22 +494,44 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
}
-static inline int has_valid_asid(const struct mm_struct *mm)
+/**
+ * has_valid_asid() - Determine if an mm already has an ASID.
+ * @mm: Memory map.
+ * @type: R4K_HIT or R4K_INDEX, type of cache op.
+ *
+ * Determines whether @mm already has an ASID on any of the CPUs which cache ops
+ * of type @type within an r4k_on_each_cpu() call will affect. If
+ * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
+ * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
+ * will need to be checked.
+ *
+ * Must be called in non-preemptive context.
+ *
+ * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
+ * 0 otherwise.
+ */
+static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
{
-#ifdef CONFIG_MIPS_MT_SMP
- int i;
+ unsigned int i;
+ const cpumask_t *mask = cpu_present_mask;
- for_each_online_cpu(i)
+ /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ /*
+ * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
+ * each foreign core, so we only need to worry about siblings.
+ * Otherwise we need to worry about all present CPUs.
+ */
+ if (r4k_op_needs_ipi(type))
+ mask = &cpu_sibling_map[smp_processor_id()];
+#endif
+ for_each_cpu(i, mask)
if (cpu_context(i, mm))
return 1;
-
return 0;
-#else
- return cpu_context(smp_processor_id(), mm);
-#endif
}
static void r4k__flush_cache_vmap(void)
@@ -490,12 +544,16 @@ static void r4k__flush_cache_vunmap(void)
r4k_blast_dcache();
}
+/*
+ * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
+ * whole caches when vma is executable.
+ */
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
int exec = vma->vm_flags & VM_EXEC;
- if (!(has_valid_asid(vma->vm_mm)))
+ if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
return;
/*
@@ -516,14 +574,14 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || exec)
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_INDEX))
return;
/*
@@ -548,7 +606,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -573,10 +631,10 @@ static inline void local_r4k_flush_cache_page(void *args)
void *vaddr;
/*
- * If ownes no valid ASID yet, cannot possibly have gotten
+ * If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_HIT))
return;
addr &= PAGE_MASK;
@@ -643,7 +701,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -656,18 +714,23 @@ static void r4k_flush_data_cache_page(unsigned long addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
+ (void *) addr);
}
struct flush_icache_range_args {
unsigned long start;
unsigned long end;
+ unsigned int type;
};
-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+static inline void __local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end,
+ unsigned int type)
{
if (!cpu_has_ic_fills_f_dc) {
- if (end - start >= dcache_size) {
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start >= dcache_size)) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -675,7 +738,8 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
}
}
- if (end - start > icache_size)
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start > icache_size))
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
@@ -701,23 +765,52 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
#endif
}
+static inline void local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+}
+
static inline void local_r4k_flush_icache_range_ipi(void *args)
{
struct flush_icache_range_args *fir_args = args;
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
+ unsigned int type = fir_args->type;
- local_r4k_flush_icache_range(start, end);
+ __local_r4k_flush_icache_range(start, end, type);
}
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
+ unsigned long size, cache_size;
args.start = start;
args.end = end;
+ args.type = R4K_HIT | R4K_INDEX;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
+ /*
+ * Indexed cache ops require an SMP call.
+ * Consider if that can or should be avoided.
+ */
+ preempt_disable();
+ if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
+ /*
+ * If address-based cache ops don't require an SMP call, then
+ * use them exclusively for small flushes.
+ */
+ size = start - end;
+ cache_size = icache_size;
+ if (!cpu_has_ic_fills_f_dc) {
+ size *= 2;
+ cache_size += dcache_size;
+ }
+ if (size <= cache_size)
+ args.type &= ~R4K_INDEX;
+ }
+ r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
+ preempt_enable();
instruction_hazard();
}
@@ -744,7 +837,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* subset property so we have to flush the primary caches
* explicitly
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -781,7 +874,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
}
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -794,25 +887,76 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+struct flush_cache_sigtramp_args {
+ struct mm_struct *mm;
+ struct page *page;
+ unsigned long addr;
+};
+
/*
* While we're protected against bad userland addresses we don't care
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
-static void local_r4k_flush_cache_sigtramp(void * arg)
+static void local_r4k_flush_cache_sigtramp(void *args)
{
+ struct flush_cache_sigtramp_args *fcs_args = args;
+ unsigned long addr = fcs_args->addr;
+ struct page *page = fcs_args->page;
+ struct mm_struct *mm = fcs_args->mm;
+ int map_coherent = 0;
+ void *vaddr;
+
unsigned long ic_lsize = cpu_icache_line_size();
unsigned long dc_lsize = cpu_dcache_line_size();
unsigned long sc_lsize = cpu_scache_line_size();
- unsigned long addr = (unsigned long) arg;
+
+ /*
+ * If owns no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (!has_valid_asid(mm, R4K_HIT))
+ return;
+
+ if (mm == current->active_mm) {
+ vaddr = NULL;
+ } else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapcount(page) &&
+ !Page_dcache_dirty(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page);
+ addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
+ }
R4600_HIT_CACHEOP_WAR_IMPL;
- if (dc_lsize)
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- protected_writeback_scache_line(addr & ~(sc_lsize - 1));
+ if (!cpu_has_ic_fills_f_dc) {
+ if (dc_lsize)
+ vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
+ : protected_writeback_dcache_line(
+ addr & ~(dc_lsize - 1));
+ if (!cpu_icache_snoops_remote_store && scache_size)
+ vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
+ : protected_writeback_scache_line(
+ addr & ~(sc_lsize - 1));
+ }
if (ic_lsize)
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
+ vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
+ : protected_flush_icache_line(addr & ~(ic_lsize - 1));
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr);
+ }
+
if (MIPS4K_ICACHE_REFILL_WAR) {
__asm__ __volatile__ (
".set push\n\t"
@@ -837,7 +981,23 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
+ struct flush_cache_sigtramp_args args;
+ int npages;
+
+ down_read(&current->mm->mmap_sem);
+
+ npages = get_user_pages_fast(addr, 1, 0, &args.page);
+ if (npages < 1)
+ goto out;
+
+ args.mm = current->mm;
+ args.addr = addr;
+
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
+
+ put_page(args.page);
+out:
+ up_read(&current->mm->mmap_sem);
}
static void r4k_flush_icache_all(void)
@@ -851,6 +1011,15 @@ struct flush_kernel_vmap_range_args {
int size;
};
+static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
+{
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ r4k_blast_dcache();
+}
+
static inline void local_r4k_flush_kernel_vmap_range(void *args)
{
struct flush_kernel_vmap_range_args *vmra = args;
@@ -861,12 +1030,8 @@ static inline void local_r4k_flush_kernel_vmap_range(void *args)
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size)
- r4k_blast_dcache();
- else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(vaddr, vaddr + size);
- }
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
}
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
@@ -876,7 +1041,12 @@ static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
args.vaddr = (unsigned long) vaddr;
args.size = size;
- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+ if (size >= dcache_size)
+ r4k_on_each_cpu(R4K_INDEX,
+ local_r4k_flush_kernel_vmap_range_index, NULL);
+ else
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
+ &args);
}
static inline void rm7k_erratum31(void)
@@ -1206,7 +1376,7 @@ static void probe_pcache(void)
c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
- if (config & 0x8) /* VI bit */
+ if (config & MIPS_CONF_VI)
c->icache.flags |= MIPS_CACHE_VTAG;
/*
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index cb557d28cb21..b2eadd6fa9a1 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -131,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
}
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
struct page *page = NULL;
@@ -141,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
* XXX: seems like the coherent and non-coherent implementations could
* be consolidated.
*/
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
@@ -176,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size,
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
}
@@ -200,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -214,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
pfn = page_to_pfn(virt_to_page((void *)addr));
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -291,7 +291,7 @@ static inline void __dma_sync(struct page *page,
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
@@ -301,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
}
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -322,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!plat_device_is_coherent(dev))
__dma_sync(page, offset, size, direction);
@@ -332,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9b58eb5fd0d5..a5509e7dcad2 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -504,7 +504,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
prom_free_prom_memory();
/*
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c
index 5eefe3281b24..01f1154cdb0c 100644
--- a/arch/mips/mm/sc-debugfs.c
+++ b/arch/mips/mm/sc-debugfs.c
@@ -73,8 +73,8 @@ static int __init sc_debugfs_init(void)
file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir,
NULL, &sc_prefetch_fops);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ if (!file)
+ return -ENOMEM;
return 0;
}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 9ac1efcfbcc7..78f900c59276 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -161,7 +161,7 @@ static void rm7k_tc_disable(void)
local_irq_save(flags);
blast_rm7k_tcache();
clear_c0_config(RM7K_CONF_TE);
- local_irq_save(flags);
+ local_irq_restore(flags);
}
static void rm7k_sc_disable(void)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4004b659ce50..55ce39606cb8 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -888,7 +888,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
}
if (!did_vmalloc_branch) {
- if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+ if (single_insn_swpd) {
uasm_il_b(p, r, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
} else {
@@ -1025,7 +1025,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
pte_off_odd += offsetof(pte_t, pte_high);
#endif
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
@@ -1643,7 +1643,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
unsigned int swmode = mode & ~hwmode;
- if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
BUG_ON(swmode & 0xffff);
@@ -2432,7 +2432,7 @@ static void config_htw_params(void)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
/* Set pointer size to size of directory pointers */
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
pwsize |= MIPS_PWSIZE_PS_MASK;
/* PTEs may be multiple pointers long (e.g. with XPA) */
pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
@@ -2448,7 +2448,7 @@ static void config_htw_params(void)
* the pwctl fields.
*/
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
@@ -2522,7 +2522,7 @@ void build_tlb_refill_handler(void)
*/
static int run_once = 0;
- if (config_enabled(CONFIG_XPA) && !cpu_has_rixi)
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
panic("Kernels supporting XPA currently require CPUs with RIXI");
output_pgtable_bits_defines();
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index d78178daea4b..277cf52d80e1 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -53,8 +53,13 @@ static struct insn insn_table_MM[] = {
{ insn_bltzl, 0, 0 },
{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+ { insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS },
+ { insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE },
+ { insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS },
+ { insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE },
{ insn_daddu, 0, 0 },
{ insn_daddiu, 0, 0 },
+ { insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS },
{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
{ insn_dmfc0, 0, 0 },
{ insn_dmtc0, 0, 0 },
@@ -84,6 +89,8 @@ static struct insn insn_table_MM[] = {
{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS },
+ { insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS },
{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
@@ -166,13 +173,15 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rt(va_arg(ap, u32));
else
op |= build_rs(va_arg(ap, u32));
}
if (ip->fields & RT) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rs(va_arg(ap, u32));
else
op |= build_rt(va_arg(ap, u32));
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 9c2220a45189..763d3f1edb8a 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -65,11 +65,16 @@ static struct insn insn_table[] = {
#ifndef CONFIG_CPU_MIPSR6
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#else
- { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+ { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
#endif
+ { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD },
+ { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE },
+ { insn_ctc1, M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD },
+ { insn_ctcmsa, M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE },
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_di, M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
@@ -114,7 +119,13 @@ static struct insn insn_table[] = {
{ insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mthi, M(spec_op, 0, 0, 0, 0, mthi_op), RS },
+ { insn_mtlo, M(spec_op, 0, 0, 0, 0, mtlo_op), RS },
+#ifndef CONFIG_CPU_MIPSR6
{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
+#else
+ { insn_mul, M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
+#endif
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
#ifndef CONFIG_CPU_MIPSR6
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index ad718debc35a..a82970442b8a 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -49,18 +49,19 @@ enum opcode {
insn_invalid,
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
- insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
+ insn_bne, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa,
+ insn_daddiu, insn_daddu, insn_di, insn_dins, insn_dinsm, insn_divu,
+ insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
- insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe,
- insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt,
- insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
- insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
- insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
- insn_lddir, insn_ldpte,
+ insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_or, insn_ori,
+ insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
+ insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
+ insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
+ insn_xori, insn_yield, insn_lddir, insn_ldpte,
};
struct insn {
@@ -268,10 +269,15 @@ I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
I_u2s3u1(_cache)
+I_u1u2(_cfc1)
+I_u2u1(_cfcmsa)
+I_u1u2(_ctc1)
+I_u2u1(_ctcmsa)
I_u1u2u3(_dmfc0)
I_u1u2u3(_dmtc0)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
+I_u1(_di);
I_u1u2(_divu)
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
@@ -301,6 +307,8 @@ I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
I_u1u2u3(_mthc0)
+I_u1(_mthi)
+I_u1(_mtlo)
I_u3u1u2(_mul)
I_u2u1u3(_ori)
I_u3u1u2(_or)
@@ -370,11 +378,7 @@ UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
int ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
-#ifdef CONFIG_64BIT
- return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
-#else
- return 1;
-#endif
+ return addr == (int)addr;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
index f7133efc5843..151f4882ec8a 100644
--- a/arch/mips/mti-malta/malta-dtshim.c
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -31,7 +31,7 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
@@ -82,7 +82,7 @@ static void __init append_memory(void *fdt, int root_off)
physical_memsize = 32 << 20;
}
- if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index d5f8dae6a797..a47556723b85 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -32,7 +32,7 @@ static void free_init_pages_eva_malta(void *begin, void *end)
void __init fw_meminit(void)
{
- bool eva = config_enabled(CONFIG_EVA);
+ bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 33d5ff5069e5..ec5b21678fad 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -261,7 +261,7 @@ void __init plat_mem_setup(void)
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 1a8c96035716..39e7b472f0d8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -426,7 +426,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src,
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
@@ -516,7 +516,7 @@ static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
- unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
@@ -1199,7 +1199,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
memset(&ctx, 0, sizeof(ctx));
- ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
index 3758715d4ab6..0630693bec2a 100644
--- a/arch/mips/netlogic/common/nlm-dma.c
+++ b/arch/mips/netlogic/common/nlm-dma.c
@@ -45,7 +45,7 @@
static char *nlm_swiotlb;
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
@@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
}
static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index f1b11f0dea2d..b4c02f29663e 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -112,7 +112,14 @@ static void pcibios_scanbus(struct pci_controller *hose)
need_domain_info = 1;
}
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
}
@@ -319,6 +326,16 @@ void pcibios_fixup_bus(struct pci_bus *bus)
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ phys_addr_t size = resource_size(rsrc);
+
+ *start = fixup_bigphys_addr(rsrc->start, size);
+ *end = rsrc->start + size;
+}
+
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index 77ecf32ef3dc..51599710472b 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -33,8 +33,8 @@ static ulong get_fdtaddr(void)
{
ulong ftaddr = 0;
- if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3)
- return (ulong)fw_arg1;
+ if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
+ return (ulong)fw_passed_dtb;
if (__dtb_start < __dtb_end)
ftaddr = (ulong)__dtb_start;
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index c50a670e60d2..1c91cad7988f 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -59,29 +59,6 @@ const char *get_system_type(void)
return sys_type;
}
-static void __init plat_setup_iocoherency(void)
-{
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
- if (mips_cm_numiocu() != 0) {
- /* Nothing special needs to be done to enable coherency */
- pr_info("CMP IOCU detected\n");
- hw_coherentio = 1;
- if (coherentio == 0)
- pr_info("Hardware DMA cache coherency disabled\n");
- else
- pr_info("Hardware DMA cache coherency enabled\n");
- } else {
- if (coherentio == 1)
- pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
- else
- pr_info("Software DMA cache coherency enabled\n");
- }
-}
-
void __init *plat_get_fdt(void)
{
if (fw_arg0 != -2)
@@ -92,8 +69,6 @@ void __init *plat_get_fdt(void)
void __init plat_mem_setup(void)
{
__dt_setup_arch(plat_get_fdt());
-
- plat_setup_iocoherency();
}
#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index d40edda0ca3b..3c7c9bf57bf3 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -175,7 +175,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
};
static struct rt2880_pmx_func spis_grp_mt7628[] = {
- FUNC("pwm", 3, 14, 4),
+ FUNC("pwm_uart2", 3, 14, 4),
FUNC("util", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 063c2dd31e72..2f45b0357021 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -7,7 +7,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
-#include <linux/ds1286.h>
+#include <linux/rtc/ds1286.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index fb4b3520cdc6..7ee14f41fc25 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -8,7 +8,6 @@
#include <asm/sni.h>
#include <asm/time.h>
-#include <asm-generic/rtc.h>
#define SNI_CLOCK_TICK_RATE 3686400
#define SNI_COUNTER2_DIV 64
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index a77698ff2b6f..1f6bc9a3036c 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq)
return err;
}
-static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __ref quirk_slc90e66_bridge(struct pci_dev *dev)
{
int irq; /* PCI/ISA Bridge interrupt */
u8 reg_64;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 9627e81a6cbb..38e3494bfb63 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -236,7 +236,9 @@ source "kernel/Kconfig.hz"
config MN10300_RTC
bool "Using MN10300 RTC"
depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
- select GENERIC_CMOS_UPDATE
+ select RTC_CLASS
+ select RTC_DRV_CMOS
+ select RTC_SYSTOHC
default n
help
This option enables support for the RTC, thus enabling time to be
diff --git a/arch/mn10300/include/asm/rtc-regs.h b/arch/mn10300/include/asm/rtc-regs.h
index c42deefaec11..c81cacecb6e3 100644
--- a/arch/mn10300/include/asm/rtc-regs.h
+++ b/arch/mn10300/include/asm/rtc-regs.h
@@ -75,9 +75,9 @@
#define RTC_PORT(x) 0xd8600000
#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
-#define CMOS_READ(addr) __SYSREG(0xd8600000 + (addr), u8)
+#define CMOS_READ(addr) __SYSREG(0xd8600000 + (u32)(addr), u8)
#define CMOS_WRITE(val, addr) \
- do { __SYSREG(0xd8600000 + (addr), u8) = val; } while (0)
+ do { __SYSREG(0xd8600000 + (u32)(addr), u8) = val; } while (0)
#define RTC_IRQ RTIRQ
diff --git a/arch/mn10300/include/asm/rtc.h b/arch/mn10300/include/asm/rtc.h
index 6c14bb1d0d9b..07dc87656197 100644
--- a/arch/mn10300/include/asm/rtc.h
+++ b/arch/mn10300/include/asm/rtc.h
@@ -25,6 +25,4 @@ static inline void calibrate_clock(void)
#endif /* !CONFIG_MN10300_RTC */
-#include <asm-generic/rtc.h>
-
#endif /* _ASM_RTC_H */
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index 48d7058b3295..f81f37025072 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -12,107 +12,19 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
-#include <linux/bcd.h>
-#include <linux/timex.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
#include <asm/rtc-regs.h>
#include <asm/rtc.h>
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-/*
- * Read the current RTC time
- */
-void read_persistent_clock(struct timespec *ts)
-{
- struct rtc_time tm;
-
- get_rtc_time(&tm);
-
- ts->tv_nsec = 0;
- ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
-
- /* if rtc is way off in the past, set something reasonable */
- if (ts->tv_sec < 0)
- ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
-}
-
-/*
- * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
- * ms after the second nowtime has started, because when nowtime is written
- * into the registers of the CMOS clock, it will jump to the next second
- * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
- * sheet for details.
- *
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
- */
-static int set_rtc_mmss(unsigned long nowtime)
-{
- unsigned char save_control, save_freq_select;
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
-
- /* gets recalled with irq locally disabled */
- spin_lock(&rtc_lock);
- save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being
- * set */
- CMOS_WRITE(save_control | RTC_SET, RTC_CONTROL);
-
- save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset
- * prescaler */
- CMOS_WRITE(save_freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
- cmos_minutes = bcd2bin(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
- /* correct for half hour time zone */
- real_minutes += 30;
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- }
- CMOS_WRITE(real_seconds, RTC_SECONDS);
- CMOS_WRITE(real_minutes, RTC_MINUTES);
- } else {
- printk_once(KERN_NOTICE
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- /* The following flags have to be released exactly in this order,
- * otherwise the DS12887 (popular MC146818A clone with integrated
- * battery and quartz) will not reset the oscillator and will not
- * update precisely 500 ms later. You won't find this mentioned in
- * the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
- */
- CMOS_WRITE(save_control, RTC_CONTROL);
- CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- spin_unlock(&rtc_lock);
-
- return retval;
-}
-
-int update_persistent_clock(struct timespec now)
-{
- return set_rtc_mmss(now.tv_sec);
-}
+static const __initdata struct resource res[] = {
+ DEFINE_RES_IO(RTC_PORT(0), RTC_IO_EXTENT),
+ DEFINE_RES_IRQ(RTC_IRQ),
+};
/*
* calibrate the TSC clock against the RTC
@@ -129,4 +41,6 @@ void __init calibrate_clock(void)
RTCRA |= RTCRA_DVR;
RTCRA &= ~RTCRA_DVR;
RTCRB &= ~RTCRB_SET;
+
+ platform_device_register_simple("rtc_cmos", -1, res, ARRAY_SIZE(res));
}
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 8842394cb49a..4f4b9029f0ea 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -21,7 +21,7 @@
static unsigned long pci_sram_allocated = 0xbc000000;
static void *mn10300_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long addr;
void *ret;
@@ -63,7 +63,7 @@ done:
}
static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
@@ -75,7 +75,7 @@ static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -92,7 +92,7 @@ static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
return page_to_bus(page) + offset;
}
diff --git a/arch/mn10300/proc-mn103e010/proc-init.c b/arch/mn10300/proc-mn103e010/proc-init.c
index 27b97980dca4..102d86a6ae56 100644
--- a/arch/mn10300/proc-mn103e010/proc-init.c
+++ b/arch/mn10300/proc-mn103e010/proc-init.c
@@ -9,7 +9,10 @@
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <asm/cacheflush.h>
#include <asm/fpu.h>
+#include <asm/irq.h>
#include <asm/rtc.h>
#include <asm/busctl-regs.h>
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
index ee6d03dbc8d8..950cc8dbb284 100644
--- a/arch/mn10300/proc-mn2ws0050/proc-init.c
+++ b/arch/mn10300/proc-mn2ws0050/proc-init.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 90422c367ed3..d800fad87896 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -59,7 +59,7 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
}
static void *nios2_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -84,7 +84,7 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
}
static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
@@ -93,7 +93,7 @@ static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
@@ -113,7 +113,7 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *addr = page_address(page) + offset;
@@ -123,14 +123,14 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *addr;
int i;
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index e75c75d249d6..c92fe4234009 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -89,7 +89,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
free_initmem_default(-1);
}
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 0b77ddb1ee07..140c99140649 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -22,7 +22,6 @@
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
-#include <linux/dma-attrs.h>
#include <asm/cpuinfo.h>
#include <asm/spr_defs.h>
@@ -83,7 +82,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr,
static void *
or1k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long va;
void *page;
@@ -101,7 +100,7 @@ or1k_dma_alloc(struct device *dev, size_t size,
va = (unsigned long)page;
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
@@ -117,7 +116,7 @@ or1k_dma_alloc(struct device *dev, size_t size,
static void
or1k_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
struct mm_walk walk = {
@@ -125,7 +124,7 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr,
.mm = &init_mm
};
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(va, va + size, &walk));
}
@@ -137,7 +136,7 @@ static dma_addr_t
or1k_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
@@ -170,7 +169,7 @@ or1k_map_page(struct device *dev, struct page *page,
static void
or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* Nothing special to do here... */
}
@@ -178,14 +177,14 @@ or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
static int
or1k_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
- s->length, dir, NULL);
+ s->length, dir, 0);
}
return nents;
@@ -194,13 +193,13 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg,
static void
or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
- or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+ or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
}
}
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 5b2a95116e8f..fa60b81aee3e 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -38,7 +38,7 @@ static unsigned int fixmaps_used __initdata;
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem *__init_refok
+void __iomem *__ref
__ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
{
phys_addr_t p;
@@ -116,7 +116,7 @@ void iounmap(void *addr)
* the memblock infrastructure.
*/
-pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
+pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index dc117385ce2e..cd8778103165 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -31,6 +31,7 @@ config PARISC
select TTY # Needed for pdc_cons.c
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT)
diff --git a/arch/parisc/include/asm/hash.h b/arch/parisc/include/asm/hash.h
new file mode 100644
index 000000000000..dbe93311aa26
--- /dev/null
+++ b/arch/parisc/include/asm/hash.h
@@ -0,0 +1,146 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * HP-PA only implements integer multiply in the FPU. However, for
+ * integer multiplies by constant, it has a number of shift-and-add
+ * (but no shift-and-subtract, sigh!) instructions that a compiler
+ * can synthesize a code sequence with.
+ *
+ * Unfortunately, GCC isn't very efficient at using them. For example
+ * it uses three instructions for "x *= 21" when only two are needed.
+ * But we can find a sequence manually.
+ */
+
+#define HAVE_ARCH__HASH_32 1
+
+/*
+ * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the
+ * PA7100 pairing rules. This is an in-order 2-way superscalar processor.
+ * Only one instruction in a pair may be a shift (by more than 3 bits),
+ * but other than that, simple ALU ops (including shift-and-add by up
+ * to 3 bits) may be paired arbitrarily.
+ *
+ * PA8xxx processors also dual-issue ALU instructions, although with
+ * fewer constraints, so this schedule is good for them, too.
+ *
+ * This 6-step sequence was found by Yevgen Voronenko's implementation
+ * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+ u32 a, b, c;
+
+ /*
+ * Phase 1: Compute a = (x << 19) + x,
+ * b = (x << 9) + a, c = (x << 23) + b.
+ */
+ a = x << 19; /* Two shifts can't be paired */
+ b = x << 9; a += x;
+ c = x << 23; b += a;
+ c += b;
+ /* Phase 2: Return (b<<11) + (c<<6) + (a<<3) - c */
+ b <<= 11;
+ a += c << 3; b -= c;
+ return (a << 3) + b;
+}
+
+#if BITS_PER_LONG == 64
+
+#define HAVE_ARCH_HASH_64 1
+
+/*
+ * Finding a good shift-and-add chain for GOLDEN_RATIO_64 is tricky,
+ * because available software for the purpose chokes on constants this
+ * large. (It's mostly designed for compiling FIR filter coefficients
+ * into FPGAs.)
+ *
+ * However, Jason Thong pointed out a work-around. The Hcub software
+ * (http://spiral.ece.cmu.edu/mcm/gen.html) is designed for *multiple*
+ * constant multiplication, and is good at finding shift-and-add chains
+ * which share common terms.
+ *
+ * Looking at 0x0x61C8864680B583EB in binary:
+ * 0110000111001000100001100100011010000000101101011000001111101011
+ * \______________/ \__________/ \_______/ \________/
+ * \____________________________/ \____________________/
+ * you can see the non-zero bits are divided into several well-separated
+ * blocks. Hcub can find algorithms for those terms separately, which
+ * can then be shifted and added together.
+ *
+ * Dividing the input into 2, 3 or 4 blocks, Hcub can find solutions
+ * with 10, 9 or 8 adds, respectively, making a total of 11 for the
+ * whole number.
+ *
+ * Using just two large blocks, 0xC3910C8D << 31 in the high bits,
+ * and 0xB583EB in the low bits, produces as good an algorithm as any,
+ * and with one more small shift than alternatives.
+ *
+ * The high bits are a larger number and more work to compute, as well
+ * as needing one extra cycle to shift left 31 bits before the final
+ * addition, so they are the critical path for scheduling. The low bits
+ * can fit into the scheduling slots left over.
+ */
+
+
+/*
+ * This _ASSIGN(dst, src) macro performs "dst = src", but prevents GCC
+ * from inferring anything about the value assigned to "dest".
+ *
+ * This prevents it from mis-optimizing certain sequences.
+ * In particular, gcc is annoyingly eager to combine consecutive shifts.
+ * Given "x <<= 19; y += x; z += x << 1;", GCC will turn this into
+ * "y += x << 19; z += x << 20;" even though the latter sequence needs
+ * an additional instruction and temporary register.
+ *
+ * Because no actual assembly code is generated, this construct is
+ * usefully portable across all GCC platforms, and so can be test-compiled
+ * on non-PA systems.
+ *
+ * In two places, additional unused input dependencies are added. This
+ * forces GCC's scheduling so it does not rearrange instructions too much.
+ * Because the PA-8xxx is out of order, I'm not sure how much this matters,
+ * but why make it more difficult for the processor than necessary?
+ */
+#define _ASSIGN(dst, src, ...) asm("" : "=r" (dst) : "0" (src), ##__VA_ARGS__)
+
+/*
+ * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily
+ * optimized shift-and-add sequence.
+ *
+ * Without the final shift, the multiply proper is 19 instructions,
+ * 10 cycles and uses only 4 temporaries. Whew!
+ *
+ * You are not expected to understand this.
+ */
+static __always_inline u32 __attribute_const__
+hash_64(u64 a, unsigned int bits)
+{
+ u64 b, c, d;
+
+ /*
+ * Encourage GCC to move a dynamic shift to %sar early,
+ * thereby freeing up an additional temporary register.
+ */
+ if (!__builtin_constant_p(bits))
+ asm("" : "=q" (bits) : "0" (64 - bits));
+ else
+ bits = 64 - bits;
+
+ _ASSIGN(b, a*5); c = a << 13;
+ b = (b << 2) + a; _ASSIGN(d, a << 17);
+ a = b + (a << 1); c += d;
+ d = a << 10; _ASSIGN(a, a << 19);
+ d = a - d; _ASSIGN(a, a << 4, "X" (d));
+ c += b; a += b;
+ d -= c; c += a << 1;
+ a += c << 3; _ASSIGN(b, b << (7+31), "X" (c), "X" (d));
+ a <<= 31; b += d;
+ a += b;
+ return a >> bits;
+}
+#undef _ASSIGN /* We're a widely-used header file, so don't litter! */
+
+#endif /* BITS_PER_LONG == 64 */
+
+#endif /* _ASM_HASH_H */
diff --git a/arch/parisc/include/asm/mc146818rtc.h b/arch/parisc/include/asm/mc146818rtc.h
deleted file mode 100644
index adf41631449f..000000000000
--- a/arch/parisc/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _ASM_MC146818RTC_H
-#define _ASM_MC146818RTC_H
-
-/* empty include file to satisfy the include in genrtc.c */
-
-#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/parisc/include/asm/rtc.h b/arch/parisc/include/asm/rtc.h
deleted file mode 100644
index 099d641a42c2..000000000000
--- a/arch/parisc/include/asm/rtc.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * include/asm-parisc/rtc.h
- *
- * Copyright 2002 Randolph CHung <tausq@debian.org>
- *
- * Based on: include/asm-ppc/rtc.h and the genrtc driver in the
- * 2.4 parisc linux tree
- */
-
-#ifndef __ASM_RTC_H__
-#define __ASM_RTC_H__
-
-#ifdef __KERNEL__
-
-#include <linux/rtc.h>
-
-#include <asm/pdc.h>
-
-#define SECS_PER_HOUR (60 * 60)
-#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-
-
-#define RTC_PIE 0x40 /* periodic interrupt enable */
-#define RTC_AIE 0x20 /* alarm interrupt enable */
-#define RTC_UIE 0x10 /* update-finished interrupt enable */
-
-#define RTC_BATT_BAD 0x100 /* battery bad */
-
-/* some dummy definitions */
-#define RTC_SQWE 0x08 /* enable square-wave output */
-#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
-# define __isleap(year) \
- ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
-
-/* How many days come before each month (0-12). */
-static const unsigned short int __mon_yday[2][13] =
-{
- /* Normal years. */
- { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
- /* Leap years. */
- { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
-};
-
-static inline unsigned int get_rtc_time(struct rtc_time *wtime)
-{
- struct pdc_tod tod_data;
- long int days, rem, y;
- const unsigned short int *ip;
-
- memset(wtime, 0, sizeof(*wtime));
- if (pdc_tod_read(&tod_data) < 0)
- return RTC_24H | RTC_BATT_BAD;
-
- // most of the remainder of this function is:
-// Copyright (C) 1991, 1993, 1997, 1998 Free Software Foundation, Inc.
-// This was originally a part of the GNU C Library.
-// It is distributed under the GPL, and was swiped from offtime.c
-
-
- days = tod_data.tod_sec / SECS_PER_DAY;
- rem = tod_data.tod_sec % SECS_PER_DAY;
-
- wtime->tm_hour = rem / SECS_PER_HOUR;
- rem %= SECS_PER_HOUR;
- wtime->tm_min = rem / 60;
- wtime->tm_sec = rem % 60;
-
- y = 1970;
-
-#define DIV(a, b) ((a) / (b) - ((a) % (b) < 0))
-#define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400))
-
- while (days < 0 || days >= (__isleap (y) ? 366 : 365))
- {
- /* Guess a corrected year, assuming 365 days per year. */
- long int yg = y + days / 365 - (days % 365 < 0);
-
- /* Adjust DAYS and Y to match the guessed year. */
- days -= ((yg - y) * 365
- + LEAPS_THRU_END_OF (yg - 1)
- - LEAPS_THRU_END_OF (y - 1));
- y = yg;
- }
- wtime->tm_year = y - 1900;
-
- ip = __mon_yday[__isleap(y)];
- for (y = 11; days < (long int) ip[y]; --y)
- continue;
- days -= ip[y];
- wtime->tm_mon = y;
- wtime->tm_mday = days + 1;
-
- return RTC_24H;
-}
-
-static int set_rtc_time(struct rtc_time *wtime)
-{
- u_int32_t secs;
-
- secs = mktime(wtime->tm_year + 1900, wtime->tm_mon + 1, wtime->tm_mday,
- wtime->tm_hour, wtime->tm_min, wtime->tm_sec);
-
- if(pdc_tod_set(secs, 0) < 0)
- return -1;
- else
- return 0;
-
-}
-
-static inline unsigned int get_rtc_ss(void)
-{
- struct rtc_time h;
-
- get_rtc_time(&h);
- return h.tm_sec;
-}
-
-static inline int get_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-static inline int set_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_RTC_H__ */
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 22395901d47b..e5d71905cad5 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1354,9 +1354,9 @@ int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
__pa(pdc_result), pci_addr, pci_size);
switch(pci_size) {
- case 1: *(u8 *) mem_addr = (u8) pdc_result[0];
- case 2: *(u16 *)mem_addr = (u16) pdc_result[0];
- case 4: *(u32 *)mem_addr = (u32) pdc_result[0];
+ case 1: *(u8 *) mem_addr = (u8) pdc_result[0]; break;
+ case 2: *(u16 *)mem_addr = (u16) pdc_result[0]; break;
+ case 4: *(u32 *)mem_addr = (u32) pdc_result[0]; break;
}
spin_unlock_irqrestore(&pdc_lock, flags);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index a27e4928bf73..02d9ed0f3949 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -414,7 +414,7 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
static void *pa11_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
unsigned long paddr;
@@ -441,7 +441,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
}
static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
@@ -454,7 +454,7 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
@@ -465,7 +465,7 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
@@ -484,7 +484,7 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -503,7 +503,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -577,11 +577,11 @@ struct dma_map_ops pcxl_dma_ops = {
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
void *addr;
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
return NULL;
addr = (void *)__get_free_pages(flag, get_order(size));
@@ -592,7 +592,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
}
static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t iova, struct dma_attrs *attrs)
+ dma_addr_t iova, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 31ec99a5f119..505cf1ac5af2 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -12,6 +12,7 @@
*/
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
@@ -248,14 +249,47 @@ void __init start_cpu_itimer(void)
per_cpu(cpu_data, cpu).it_value = next_tick;
}
+#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pdc_tod tod_data;
+
+ memset(tm, 0, sizeof(*tm));
+ if (pdc_tod_read(&tod_data) < 0)
+ return -EOPNOTSUPP;
+
+ /* we treat tod_sec as unsigned, so this can work until year 2106 */
+ rtc_time64_to_tm(tod_data.tod_sec, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
+{
+ time64_t secs = rtc_tm_to_time64(tm);
+
+ if (pdc_tod_set(secs, 0) < 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc_generic_ops = {
+ .read_time = rtc_generic_get_time,
+ .set_time = rtc_generic_set_time,
+};
+
static int __init rtc_init(void)
{
struct platform_device *pdev;
- pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &rtc_generic_ops,
+ sizeof(rtc_generic_ops));
+
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(rtc_init);
+#endif
void read_persistent_clock(struct timespec *ts)
{
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index fb8e10a4fb39..eaffbb90aa14 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -125,22 +125,22 @@ static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
}
static const struct iomap_ops ioport_ops = {
- ioport_read8,
- ioport_read16,
- ioport_read16,
- ioport_read32,
- ioport_read32,
- ioport_write8,
- ioport_write16,
- ioport_write16,
- ioport_write32,
- ioport_write32,
- ioport_read8r,
- ioport_read16r,
- ioport_read32r,
- ioport_write8r,
- ioport_write16r,
- ioport_write32r,
+ .read8 = ioport_read8,
+ .read16 = ioport_read16,
+ .read16be = ioport_read16,
+ .read32 = ioport_read32,
+ .read32be = ioport_read32,
+ .write8 = ioport_write8,
+ .write16 = ioport_write16,
+ .write16be = ioport_write16,
+ .write32 = ioport_write32,
+ .write32be = ioport_write32,
+ .read8r = ioport_read8r,
+ .read16r = ioport_read16r,
+ .read32r = ioport_read32r,
+ .write8r = ioport_write8r,
+ .write16r = ioport_write16r,
+ .write32r = ioport_write32r,
};
/* Legacy I/O memory ops */
@@ -244,22 +244,22 @@ static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
}
static const struct iomap_ops iomem_ops = {
- iomem_read8,
- iomem_read16,
- iomem_read16be,
- iomem_read32,
- iomem_read32be,
- iomem_write8,
- iomem_write16,
- iomem_write16be,
- iomem_write32,
- iomem_write32be,
- iomem_read8r,
- iomem_read16r,
- iomem_read32r,
- iomem_write8r,
- iomem_write16r,
- iomem_write32r,
+ .read8 = iomem_read8,
+ .read16 = iomem_read16,
+ .read16be = iomem_read16be,
+ .read32 = iomem_read32,
+ .read32be = iomem_read32be,
+ .write8 = iomem_write8,
+ .write16 = iomem_write16,
+ .write16be = iomem_write16be,
+ .write32 = iomem_write32,
+ .write32be = iomem_write32be,
+ .read8r = iomem_read8r,
+ .read16r = iomem_read16r,
+ .read32r = iomem_read32r,
+ .write8r = iomem_write8r,
+ .write16r = iomem_write16r,
+ .write32r = iomem_write32r,
};
static const struct iomap_ops *iomap_ops[8] = {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d111044f41a2..927d2ab2ce08 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -127,7 +127,8 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
+ select HAVE_CBPF_JIT if !PPC64
+ select HAVE_EBPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -163,6 +164,9 @@ config PPC
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select GENERIC_CPU_AUTOPROBE
+ select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_ARCH_HARDENED_USERCOPY
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -456,6 +460,29 @@ config KEXEC
interface is strongly in flux, so no good recommendation can be
made.
+config RELOCATABLE
+ bool "Build a relocatable kernel"
+ depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
+ select NONSTATIC_KERNEL
+ help
+ This builds a kernel image that is capable of running at the
+ location the kernel is loaded at. For ppc32, there is no any
+ alignment restrictions, and this feature is a superset of
+ DYNAMIC_MEMSTART and hence overrides it. For ppc64, we should use
+ 16k-aligned base address. The kernel is linked as a
+ position-independent executable (PIE) and contains dynamic relocations
+ which are processed early in the bootup process.
+
+ One use is for the kexec on panic case where the recovery kernel
+ must live at a different physical address than the primary
+ kernel.
+
+ Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
+ it has been loaded at and the compile time physical addresses
+ CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
+ setting can still be useful to bootwrappers that need to know the
+ load address of the kernel (eg. u-boot/mkimage).
+
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
@@ -948,29 +975,6 @@ config DYNAMIC_MEMSTART
This option is overridden by CONFIG_RELOCATABLE
-config RELOCATABLE
- bool "Build a relocatable kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && (44x || FSL_BOOKE)
- select NONSTATIC_KERNEL
- help
- This builds a kernel image that is capable of running at the
- location the kernel is loaded at, without any alignment restrictions.
- This feature is a superset of DYNAMIC_MEMSTART and hence overrides it.
-
- One use is for the kexec on panic case where the recovery kernel
- must live at a different physical address than the primary
- kernel.
-
- Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
- it has been loaded at and the compile time physical addresses
- CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
- setting can still be useful to bootwrappers that need to know the
- load address of the kernel (eg. u-boot/mkimage).
-
-config RELOCATABLE_PPC32
- def_bool y
- depends on PPC32 && RELOCATABLE
-
config PAGE_OFFSET_BOOL
bool "Set custom page offset address"
depends on ADVANCED_OPTIONS
@@ -1053,24 +1057,14 @@ config CONSISTENT_SIZE
config PIN_TLB
bool "Pinned Kernel TLBs (860 ONLY)"
depends on ADVANCED_OPTIONS && 8xx
+
+config PIN_TLB_IMMR
+ bool "Pinned TLB for IMMR"
+ depends on PIN_TLB
+ default y
endmenu
if PPC64
-config RELOCATABLE
- bool "Build a relocatable kernel"
- depends on !COMPILE_TEST
- select NONSTATIC_KERNEL
- help
- This builds a kernel image that is capable of running anywhere
- in the RMA (real memory area) at any 16k-aligned base address.
- The kernel is linked as a position-independent executable (PIE)
- and contains dynamic relocations which are processed early
- in the bootup process.
-
- One use is for the kexec on panic case where the recovery kernel
- must live at a different physical address than the primary
- kernel.
-
# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index d3fcf7e64e3a..63292f64b25a 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -60,6 +60,25 @@ config CODE_PATCHING_SELFTEST
depends on DEBUG_KERNEL
default n
+config JUMP_LABEL_FEATURE_CHECKS
+ bool "Enable use of jump label for cpu/mmu_has_feature()"
+ depends on JUMP_LABEL
+ default y
+ help
+ Selecting this options enables use of jump labels for some internal
+ feature checks. This should generate more optimal code for those
+ checks.
+
+config JUMP_LABEL_FEATURE_CHECK_DEBUG
+ bool "Do extra check on feature fixup calls"
+ depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
+ default n
+ help
+ This tries to catch incorrect usage of cpu_has_feature() and
+ mmu_has_feature() in the code.
+
+ If you don't know what this means, say N.
+
config FTR_FIXUP_SELFTEST
bool "Run self-tests of the feature-fixup code"
depends on DEBUG_KERNEL
@@ -149,14 +168,14 @@ config PPC_EARLY_DEBUG_BOOTX
config PPC_EARLY_DEBUG_LPAR
bool "LPAR HV Console"
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && HVC_CONSOLE
help
Select this to enable early debugging for a machine with a HVC
console on vterm 0.
config PPC_EARLY_DEBUG_LPAR_HVSI
bool "LPAR HVSI Console"
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && HVC_CONSOLE
help
Select this to enable early debugging for a machine with a HVSI
console on a specified vterm.
@@ -212,7 +231,6 @@ config PPC_EARLY_DEBUG_40x
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
depends on SERIAL_CPM
- select PIN_TLB if PPC_8xx
help
Select this to enable early debugging for Freescale chips
using a CPM-based serial port. This assumes that the bootwrapper
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 709a22a3e824..ca254546cd05 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -181,6 +181,11 @@ KBUILD_CFLAGS += -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
+ifdef CONFIG_CPU_BIG_ENDIAN
+CHECKFLAGS += -D__BIG_ENDIAN__
+else
+CHECKFLAGS += -D__LITTLE_ENDIAN__
+endif
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
@@ -221,7 +226,7 @@ KBUILD_CFLAGS += -mno-sched-epilog
endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
-cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
+cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
KBUILD_AFLAGS += $(cpu-as-y)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8fe78a3efc92..1a2a6e8dc40d 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -43,7 +43,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
BOOTCFLAGS += -fno-stack-protector
endif
-BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
+BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
DTC_FLAGS ?= -p 1024
@@ -70,7 +70,7 @@ $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
@@ -78,7 +78,7 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
gunzip_util.c elf_util.c $(zlib) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
- uartlite.c mpc52xx-psc.c
+ uartlite.c mpc52xx-psc.c opal.c opal-calls.S
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
@@ -113,6 +113,7 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
+src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
@@ -296,6 +297,9 @@ image-$(CONFIG_TQM8560) += cuImage.tqm8560
image-$(CONFIG_SBC8548) += cuImage.sbc8548
image-$(CONFIG_KSI8560) += cuImage.ksi8560
+# Board ports in arch/powerpc/platform/86xx/Kconfig
+image-$(CONFIG_MVME7100) += dtbImage.mvme7100
+
# Board ports in arch/powerpc/platform/embedded6xx/Kconfig
image-$(CONFIG_STORCENTER) += cuImage.storcenter
image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
index bc3bf9333dde..88d8423f8ac5 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -51,6 +51,7 @@
serial2 = &serial2;
serial3 = &serial3;
pci0 = &pci0;
+ usb0 = &usb0;
dma0 = &dma0;
dma1 = &dma1;
sdhc = &sdhc;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
index 8797ce146512..f3f968c51f4b 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -51,6 +51,7 @@
serial2 = &serial2;
serial3 = &serial3;
pci0 = &pci0;
+ usb0 = &usb0;
dma0 = &dma0;
dma1 = &dma1;
sdhc = &sdhc;
diff --git a/arch/powerpc/boot/dts/fsl/kmcoge4.dts b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
index 2d4b64fcee88..ae70a24094b0 100644
--- a/arch/powerpc/boot/dts/fsl/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcoge4.dts
@@ -106,6 +106,43 @@
sata@221000 {
status = "disabled";
};
+
+ fman0: fman@400000 {
+ enet0: ethernet@e0000 {
+ phy-connection-type = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ mdio0: mdio@e1120 {
+ front_phy: ethernet-phy@11 {
+ reg = <0x11>;
+ };
+ };
+
+ enet1: ethernet@e2000 {
+ phy-connection-type = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ enet2: ethernet@e4000 {
+ status = "disabled";
+ };
+
+ enet3: ethernet@e6000 {
+ status = "disabled";
+ };
+ enet4: ethernet@e8000 {
+ phy-handle = <&front_phy>;
+ phy-connection-type = "rgmii";
+ };
+ enet5: ethernet@f0000 {
+ status = "disabled";
+ };
+ };
};
rio: rapidio@ffe0c0000 {
diff --git a/arch/powerpc/boot/dts/fsl/mvme7100.dts b/arch/powerpc/boot/dts/fsl/mvme7100.dts
new file mode 100644
index 000000000000..e2d306ad37a6
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/mvme7100.dts
@@ -0,0 +1,153 @@
+/*
+ * Device tree source for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+/include/ "mpc8641si-pre.dtsi"
+
+/ {
+ model = "MVME7100";
+ compatible = "artesyn,MVME7100";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000>;
+ };
+
+ soc: soc@f1000000 {
+ ranges = <0x00000000 0xf1000000 0x00100000>;
+
+ i2c@3000 {
+ hwmon@4c {
+ compatible = "dallas,max6649";
+ reg = <0x4c>;
+ };
+
+ rtc@68 {
+ status = "disabled";
+ };
+ };
+
+
+ enet0: ethernet@24000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@24520 {
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy1: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy2: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy3: ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+
+ enet1: ethernet@25000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@25520 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@26000 {
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ enet3: ethernet@27000 {
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@27520 {
+ status = "disabled";
+ };
+
+ serial1: serial@4600 {
+ status = "disabled";
+ };
+ };
+
+ lbc: localbus@f1005000 {
+ reg = <0xf1005000 0x1000>;
+
+ ranges = <0 0 0xf8000000 0x08000000 // NOR Flash (128MB)
+ 2 0 0xf2030000 0x00010000 // NAND Flash (8GB)
+ 3 0 0xf2400000 0x00080000 // MRAM (512KB)
+ 4 0 0xf2000000 0x00010000 // BCSR
+ 5 0 0xf2010000 0x00010000>; // QUART
+
+ bcsr@4,0 {
+ compatible = "artesyn,mvme7100-bcsr";
+ reg = <4 0 0x10000>;
+ };
+
+ serial@5,1000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x1000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x2000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,3000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x3000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+
+ serial@5,4000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <5 0x4000 0x100>;
+ clock-frequency = <1843200>;
+ interrupts = <11 1 0 0>;
+ };
+ };
+
+ pci0: pcie@f1008000 {
+ status = "disabled";
+ };
+
+ pci1: pcie@f1009000 {
+ status = "disabled";
+ };
+
+ chosen {
+ linux,stdout-path = &serial0;
+ };
+};
+
+/include/ "mpc8641si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
index 29dad723091e..fcc7e5b7fd47 100644
--- a/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qonverge-usb2-dr-0.dtsi
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-usb@210000 {
+usb0: usb@210000 {
compatible = "fsl-usb2-dr";
reg = <0x210000 0x1000>;
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 507649ece0a1..44e399b17f6f 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -607,7 +607,7 @@
/include/ "qoriq-gpio-3.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
usb0: usb@210000 {
- compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph";
+ compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph";
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */
phy_type = "utmi";
@@ -615,7 +615,7 @@
};
/include/ "qoriq-usb2-dr-0.dtsi"
usb1: usb@211000 {
- compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr";
+ compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x524>; /* USB2LIODNR */
dr_mode = "host";
@@ -673,3 +673,48 @@
};
};
};
+
+&qe {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "qe";
+ compatible = "fsl,qe";
+ fsl,qe-num-riscs = <1>;
+ fsl,qe-num-snums = <28>;
+
+ qeic: interrupt-controller@80 {
+ interrupt-controller;
+ compatible = "fsl,qe-ic";
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ reg = <0x80 0x80>;
+ interrupts = <95 2 0 0 94 2 0 0>; //high:79 low:78
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2200 {
+ cell-index = <3>;
+ reg = <0x2200 0x200>;
+ interrupts = <34>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x6000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x6000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
index 8c7ea6c05de9..863f9431285f 100644
--- a/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xd4rdb.dtsi
@@ -212,4 +212,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
index 977af355b388..2fd4cbe7098f 100644
--- a/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xqds.dtsi
@@ -366,4 +366,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
index 7c4afdb44b46..5fdddbd2a62b 100644
--- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
@@ -222,4 +222,42 @@
0 0x00010000>;
};
};
+
+ qe: qe@ffe140000 {
+ ranges = <0x0 0xf 0xfe140000 0x40000>;
+ reg = <0xf 0xfe140000 0 0x480>;
+ brg-frequency = <0>;
+ bus-frequency = <0>;
+
+ si1: si@700 {
+ compatible = "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ compatible = "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+
+ ucc_serial: ucc@2200 {
+ compatible = "fsl,t1040-ucc-uart";
+ port-number = <0>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ };
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
index 1184a746fcb1..038cf8fadee4 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
@@ -56,6 +56,8 @@
pci1 = &pci1;
pci2 = &pci2;
pci3 = &pci3;
+ usb0 = &usb0;
+ usb1 = &usb1;
dma0 = &dma0;
dma1 = &dma1;
dma2 = &dma2;
diff --git a/arch/powerpc/boot/motload-head.S b/arch/powerpc/boot/motload-head.S
new file mode 100644
index 000000000000..41cabb4b63fa
--- /dev/null
+++ b/arch/powerpc/boot/motload-head.S
@@ -0,0 +1,11 @@
+#include "ppc_asm.h"
+
+ .text
+ .globl _zimage_start
+_zimage_start:
+ mfmsr r10
+ rlwinm r10,r10,0,~(1<<15) /* Clear MSR_EE */
+ sync
+ mtmsr r10
+ isync
+ b _zimage_start_lib
diff --git a/arch/powerpc/boot/mvme7100.c b/arch/powerpc/boot/mvme7100.c
new file mode 100644
index 000000000000..8b0a932311af
--- /dev/null
+++ b/arch/powerpc/boot/mvme7100.c
@@ -0,0 +1,59 @@
+/*
+ * Motload compatibility for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "ops.h"
+#include "stdio.h"
+#include "cuboot.h"
+
+#define TARGET_86xx
+#define TARGET_HAS_ETH1
+#define TARGET_HAS_ETH2
+#define TARGET_HAS_ETH3
+#include "ppcboot.h"
+
+static bd_t bd;
+
+BSS_STACK(16384);
+
+static void mvme7100_fixups(void)
+{
+ void *devp;
+ unsigned long busfreq = bd.bi_busfreq * 1000000;
+
+ dt_fixup_cpu_clocks(bd.bi_intfreq * 1000000, busfreq / 4, busfreq);
+
+ devp = finddevice("/soc@f1000000");
+ if (devp)
+ setprop(devp, "bus-frequency", &busfreq, sizeof(busfreq));
+
+ devp = finddevice("/soc/serial@4500");
+ if (devp)
+ setprop(devp, "clock-frequency", &busfreq, sizeof(busfreq));
+
+ dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
+
+ dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
+ dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
+ dt_fixup_mac_address_by_alias("ethernet2", bd.bi_enet2addr);
+ dt_fixup_mac_address_by_alias("ethernet3", bd.bi_enet3addr);
+}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ CUBOOT_INIT();
+ fdt_init(_dtb_start);
+ serial_console_init();
+ platform_ops.fixups = mvme7100_fixups;
+}
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
new file mode 100644
index 000000000000..ff2f1b97bc53
--- /dev/null
+++ b/arch/powerpc/boot/opal-calls.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ppc_asm.h"
+#include "../include/asm/opal-api.h"
+
+ .text
+
+#define OPAL_CALL(name, token) \
+ .globl name; \
+name: \
+ li r0, token; \
+ b opal_call;
+
+opal_call:
+ mflr r11
+ std r11,16(r1)
+ mfcr r12
+ stw r12,8(r1)
+ mr r13,r2
+
+ /* Set opal return address */
+ ld r11,opal_return@got(r2)
+ mtlr r11
+ mfmsr r12
+
+ /* switch to BE when we enter OPAL */
+ li r11,MSR_LE
+ andc r12,r12,r11
+ mtspr SPRN_HSRR1,r12
+
+ /* load the opal call entry point and base */
+ ld r11,opal@got(r2)
+ ld r12,8(r11)
+ ld r2,0(r11)
+ mtspr SPRN_HSRR0,r12
+ hrfid
+
+opal_return:
+ FIXUP_ENDIAN
+ mr r2,r13;
+ lwz r11,8(r1);
+ ld r12,16(r1)
+ mtcr r11;
+ mtlr r12
+ blr
+
+OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
+OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
+OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
+OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS);
+OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
new file mode 100644
index 000000000000..1f37e1c1d6d8
--- /dev/null
+++ b/arch/powerpc/boot/opal.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "ops.h"
+#include "stdio.h"
+#include "io.h"
+#include <libfdt.h>
+#include "../include/asm/opal-api.h"
+
+#ifdef __powerpc64__
+
+/* Global OPAL struct used by opal-call.S */
+struct opal {
+ u64 base;
+ u64 entry;
+} opal;
+
+static u32 opal_con_id;
+
+int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
+int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
+int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
+int64_t opal_console_flush(uint64_t term_number);
+int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+
+static int opal_con_open(void)
+{
+ return 0;
+}
+
+static void opal_con_putc(unsigned char c)
+{
+ int64_t rc;
+ uint64_t olen, len;
+
+ do {
+ rc = opal_console_write_buffer_space(opal_con_id, &olen);
+ len = be64_to_cpu(olen);
+ if (rc)
+ return;
+ opal_poll_events(NULL);
+ } while (len < 1);
+
+
+ olen = cpu_to_be64(1);
+ opal_console_write(opal_con_id, &olen, &c);
+}
+
+static void opal_con_close(void)
+{
+ opal_console_flush(opal_con_id);
+}
+
+static void opal_init(void)
+{
+ void *opal_node;
+
+ opal_node = finddevice("/ibm,opal");
+ if (!opal_node)
+ return;
+ if (getprop(opal_node, "opal-base-address", &opal.base, sizeof(u64)) < 0)
+ return;
+ opal.base = be64_to_cpu(opal.base);
+ if (getprop(opal_node, "opal-entry-address", &opal.entry, sizeof(u64)) < 0)
+ return;
+ opal.entry = be64_to_cpu(opal.entry);
+}
+
+int opal_console_init(void *devp, struct serial_console_data *scdp)
+{
+ opal_init();
+
+ if (devp) {
+ int n = getprop(devp, "reg", &opal_con_id, sizeof(u32));
+ if (n != sizeof(u32))
+ return -1;
+ opal_con_id = be32_to_cpu(opal_con_id);
+ } else
+ opal_con_id = 0;
+
+ scdp->open = opal_con_open;
+ scdp->putc = opal_con_putc;
+ scdp->close = opal_con_close;
+
+ return 0;
+}
+#else
+int opal_console_init(void *devp, struct serial_console_data *scdp)
+{
+ return -1;
+}
+#endif /* __powerpc64__ */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 5e75e1c5518e..e19b64ef977a 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -89,6 +89,7 @@ int mpsc_console_init(void *devp, struct serial_console_data *scdp);
int cpm_console_init(void *devp, struct serial_console_data *scdp);
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp);
int uartlite_console_init(void *devp, struct serial_console_data *scdp);
+int opal_console_init(void *devp, struct serial_console_data *scdp);
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs);
extern void flush_cache(void *, unsigned long);
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index 35ea60c1f070..b03373d8b386 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -61,6 +61,10 @@
#define SPRN_TBRL 268
#define SPRN_TBRU 269
+#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
+#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+
+#define MSR_LE 0x0000000000000001
#define FIXUP_ENDIAN \
tdi 0, 0, 0x48; /* Reverse endian of b . + 8 */ \
diff --git a/arch/powerpc/boot/ppcboot.h b/arch/powerpc/boot/ppcboot.h
index 6ae6f9063952..453df429d5d0 100644
--- a/arch/powerpc/boot/ppcboot.h
+++ b/arch/powerpc/boot/ppcboot.h
@@ -43,7 +43,7 @@ typedef struct bd_info {
unsigned long bi_sramstart; /* start of SRAM memory */
unsigned long bi_sramsize; /* size of SRAM memory */
#if defined(TARGET_8xx) || defined(TARGET_CPM2) || defined(TARGET_85xx) ||\
- defined(TARGET_83xx)
+ defined(TARGET_83xx) || defined(TARGET_86xx)
unsigned long bi_immr_base; /* base of IMMR register */
#endif
#if defined(TARGET_PPC_MPC52xx)
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 167ee9433de6..e04c1e4063ae 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -132,6 +132,8 @@ int serial_console_init(void)
else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
rc = uartlite_console_init(devp, &serial_cd);
+ else if (dt_is_compatible(devp, "ibm,opal-console-raw"))
+ rc = opal_console_init(devp, &serial_cd);
/* Add other serial console driver calls here */
diff --git a/arch/powerpc/boot/types.h b/arch/powerpc/boot/types.h
index 31393d17a9c1..85565a89bcc2 100644
--- a/arch/powerpc/boot/types.h
+++ b/arch/powerpc/boot/types.h
@@ -12,6 +12,16 @@ typedef short s16;
typedef int s32;
typedef long long s64;
+/* required for opal-api.h */
+typedef u8 uint8_t;
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+typedef s8 int8_t;
+typedef s16 int16_t;
+typedef s32 int32_t;
+typedef s64 int64_t;
+
#define min(x,y) ({ \
typeof(x) _x = (x); \
typeof(y) _y = (y); \
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 6a19fcef5596..6681ec3625c9 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -302,6 +302,11 @@ mvme5100)
platformo="$object/fixed-head.o $object/mvme5100.o"
binary=y
;;
+mvme7100)
+ platformo="$object/motload-head.o $object/mvme7100.o"
+ link_address='0x4000000'
+ binary=y
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 9110a5cb1bb7..3438ed99c088 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 790366652ba3..36c44c0b560c 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 01bd71bac027..ad2156c6e2fc 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index e2036b7c7edb..28adb782ec51 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -32,8 +32,6 @@ CONFIG_SCSI_SAS_ATTRS=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -44,7 +42,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_AVERAGE=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index efd51701fb4d..a00f434c4d47 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 5ded3dcdf60a..e500e6a12b3e 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
index bcb0c4d854db..65dc084a154c 100644
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ b/arch/powerpc/configs/40x/virtex_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 37c838f26e53..567f99bd64a3 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -18,7 +18,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index ea4ef02a0578..143b2fbddb46 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -110,10 +109,9 @@ CONFIG_MMC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
CONFIG_VFAT_FS=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 95494209c124..6bba1a55b827 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index a046f08413fd..477d99fefd9a 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index a326b773ac05..6b77aea79b6c 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -49,7 +49,7 @@ CONFIG_SENSORS_AD7414=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index d939e71fff7d..c8e6f048a122 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 5aa312a158dd..3799a26de6f4 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -30,7 +30,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -71,10 +70,9 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index 5909e016c37d..c265f54ab9e5 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -19,7 +19,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index 57499d25c877..bb6bd6d90821 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -43,7 +42,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
-CONFIG_I2O=y
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 5d52185d8f5a..060f2edddb71 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -76,8 +75,7 @@ CONFIG_LOGO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 0ad3e449526e..115a6b2be18b 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -56,10 +55,9 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_THERMAL=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index a042335971da..b999048c4ae6 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index 91c2aff9bd55..b8c9ee45d0a2 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 7fddf3fe275c..a4bb048448da 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -41,7 +40,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_FUSION=y
CONFIG_FUSION_SAS=y
-CONFIG_I2O=y
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_RXB=256
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 6928012f3813..63302fbd184d 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -85,9 +84,8 @@ CONFIG_RTC_DRV_M41T80_WDT=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_REISERFS_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index c294369cc39f..b3792fd8111d 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index e779228d6cd6..ff6f86241418 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
index 53d0300b3390..ce052064bcbb 100644
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ b/arch/powerpc/configs/44x/virtex5_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index ee434375fc24..ab932488e68b 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -23,7 +23,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_VLAN_8021Q=y
@@ -73,9 +72,7 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 19fad0e0016e..c1faac800806 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -61,8 +60,7 @@ CONFIG_USB_STORAGE=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 5f40ba92a39a..9493b02ac660 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -53,8 +52,7 @@ CONFIG_I2C_MPC=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 909e185a88d1..fe8126bc1655 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -73,8 +72,7 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 649a01a0044d..1554de6968ca 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -34,7 +34,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -74,8 +73,7 @@ CONFIG_RTC_DRV_PCF8563=m
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=850
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index efab8388ea92..b8b316b884aa 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -75,8 +74,7 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index bcdfb07921fc..b60cac088a7b 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -63,8 +62,7 @@ CONFIG_USB_EHCI_FSL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 11a959283149..9547dcdd6489 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -28,7 +28,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_TIPC=y
CONFIG_BRIDGE=m
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index b47a41f77836..80aa844c1428 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -79,8 +78,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index e28c83f320c1..d89d13bc6901 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -77,8 +76,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index e84d35b848c0..e789518a2881 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -52,8 +51,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index ae145f410590..917a49ca2bd1 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -64,8 +63,7 @@ CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 87fc15bce407..00f636e95cc8 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -75,8 +74,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 9a2ff25a2e98..a539d44d1dba 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -66,8 +65,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index e44edc575549..9f0ddc830c82 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -51,8 +50,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index 94a7d85f1603..ceed4c1f0ab5 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -57,8 +56,7 @@ CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 761ed8ea0729..a6819bf3ef5e 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -24,7 +24,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -65,8 +64,7 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index bcf1b48cc9e6..4bd1992e4d98 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -50,8 +49,7 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index f0f0ebf75125..2d4bb63882b8 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -24,7 +24,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -77,8 +76,7 @@ CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index d2e4d82de14d..b3380dbd1925 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -65,9 +64,7 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
CONFIG_USB_STORAGE=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index b0939dd9ad6f..c79283be5680 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -165,10 +165,8 @@ CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_FUSE_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/85xx/kmp204x_defconfig b/arch/powerpc/configs/85xx/kmp204x_defconfig
index e94d3eb4a8c1..aaaaa609cd24 100644
--- a/arch/powerpc/configs/85xx/kmp204x_defconfig
+++ b/arch/powerpc/configs/85xx/kmp204x_defconfig
@@ -64,7 +64,6 @@ CONFIG_IP_PIMSM_V2=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_TIPC=y
@@ -189,7 +188,7 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_UIO=y
CONFIG_STAGING=y
-CONFIG_CLK_PPC_CORENET=y
+CONFIG_CLK_QORIQ=y
CONFIG_EXT2_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 6f753a71fe5d..bd814dfb0bbd 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -49,8 +48,7 @@ CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index e38c373f2edf..32af10def641 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -41,8 +40,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index 48fc8e3a7be0..a52b2170ee33 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -21,7 +21,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -44,8 +43,7 @@ CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index ecb0c3bf8796..002bb48abaa3 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -46,8 +45,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 72b7ccfbe2c2..97ae02377cf3 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index 0ad7bd5ee6b6..13579cb30539 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
CONFIG_MTD=y
@@ -79,8 +78,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index b45190556c0c..384926f3ce1d 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -17,7 +17,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_IP_NF_IPTABLES=m
@@ -53,8 +52,7 @@ CONFIG_AGP=m
CONFIG_DRM=m
CONFIG_SOUND=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=m
CONFIG_UDF_FS=m
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 4daaf2943b44..908f3885f4a5 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -50,8 +49,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index bb402b3cf786..f47e57610b7c 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 685d0fb132d6..42f5d0a7698e 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -28,7 +28,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index 02a931d4e954..71552b7929cd 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 633d5b759a36..25aac973d6d7 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -20,7 +20,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -52,8 +51,7 @@ CONFIG_I2C_MPC=y
CONFIG_HWMON_DEBUG_CHIP=y
CONFIG_SENSORS_LM75=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 858b539d004b..dbd961de251e 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -54,7 +54,6 @@ CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -124,8 +123,7 @@ CONFIG_RTC_DRV_CMOS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index f91f8895fc93..d3dd6b8865c0 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -74,9 +74,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_NR_UARTS=5
CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=5
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250=y
CONFIG_SERIO_LIBPS2=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index d89ff40d39b7..6a3f825452e9 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -24,7 +24,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 84f1b4140579..8b83ce8a01e7 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -29,7 +29,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
@@ -106,7 +105,6 @@ CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index 340685caa7b8..7c9d95370150 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -306,15 +306,13 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_AMSO1100=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
CONFIG_INFINIBAND_SRP=m
CONFIG_DMADEVICES=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_AUTOFS4_FS=m
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index db328e618bb9..7b6f30dece34 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -184,7 +184,7 @@ CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_CELL=y
CONFIG_UIO=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 253a9f200097..ac9a50da2dc6 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -110,7 +110,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=m
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 7c137041f1d6..3403b85f9d81 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -50,9 +49,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index ee96be889dac..95411aeeeb8d 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -26,7 +26,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 41e4d359524d..1a61e81ab0cd 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -33,8 +33,7 @@ CONFIG_DUMMY=y
CONFIG_EFS_FS=m
CONFIG_EXPERT=y
CONFIG_EXT2_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_FB=y
CONFIG_FHANDLE=y
CONFIG_FIXED_PHY=y
@@ -55,7 +54,6 @@ CONFIG_IKCONFIG=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1d9ad8500909..3b2511c090d8 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -216,14 +216,13 @@ CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_APPLEDISPLAY=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 6c6c60f1aba4..c0eec4a5df4e 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
@@ -76,9 +75,7 @@ CONFIG_SND_SEQUENCER_OSS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_MSDOS_FS=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 5e0f2551e5c7..e56e80090529 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -52,7 +51,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index 62ae92956d05..b413c19d7031 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -109,8 +108,7 @@ CONFIG_USB_SERIAL_FTDI_SIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RS5C372=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_XFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index ac9666f8abf1..27abfab31219 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -35,7 +35,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
@@ -102,9 +101,7 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_TI=m
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_FS_DAX=y
CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 666922c5b572..197acaa026eb 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_TIPC=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index d16d6c5cb282..0b4854cf26cb 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -27,7 +27,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
@@ -53,7 +52,7 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_BLK_DEV_XIP=y
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_SCSI=y
@@ -113,10 +112,9 @@ CONFIG_RTC_DRV_MPC5121=y
CONFIG_DMADEVICES=y
CONFIG_MPC512X_DMA=y
CONFIG_MPC512x_LPBFIFO=y
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 9fd041bfd778..88336d0df0d6 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -114,8 +113,7 @@ CONFIG_RTC_DRV_PCF8563=m
CONFIG_DMADEVICES=y
CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index e2647d5bb605..d933326b4cf9 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -49,8 +48,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 825b052176af..4cb0f617c0d6 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -22,7 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -56,8 +55,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 671e220a9a98..6574477fd726 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -37,7 +37,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_ESP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -101,8 +100,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 321412c5dae4..998454471a48 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_NETDEVICES=y
@@ -37,8 +36,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
diff --git a/arch/powerpc/configs/mpc86xx_basic_defconfig b/arch/powerpc/configs/mpc86xx_basic_defconfig
index 33af5c5de105..3283f0586e11 100644
--- a/arch/powerpc/configs/mpc86xx_basic_defconfig
+++ b/arch/powerpc/configs/mpc86xx_basic_defconfig
@@ -8,3 +8,4 @@ CONFIG_GEF_SBC610=y
CONFIG_MPC8610_HPCD=y
CONFIG_MPC8641_HPCN=y
CONFIG_SBC8641D=y
+CONFIG_MVME7100=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 2a10f98d4ee5..91f53f1bec5d 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -25,7 +25,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 525a2cb500a7..139add95a16a 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -92,8 +91,7 @@ CONFIG_I2C_MPC=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_XFS_FS=m
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 8f94782eb907..76f4edd441d3 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -152,8 +152,7 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 3f6c9a6c815c..e5a674d4a716 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -40,7 +40,6 @@ CONFIG_INET_AH=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
@@ -281,10 +280,8 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_APPLEDISPLAY=m
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 045031048f8d..dce352e9153b 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -80,6 +80,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_POWERNV_FLASH=y
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -181,6 +182,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_VIRTIO_CONSOLE=m
+CONFIG_POWERNV_OP_PANEL=m
CONFIG_IPMI_HANDLER=y
CONFIG_IPMI_DEVICE_INTERFACE=y
CONFIG_IPMI_POWERNV=y
@@ -233,9 +235,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index a43bf6ea10fb..370c0bbcff71 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
CONFIG_MTD=y
@@ -67,8 +66,7 @@ CONFIG_THERMAL=y
CONFIG_FB=m
CONFIG_FB_XILINX=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index bbc7f76d52c8..2766e8f590bc 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -35,7 +35,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_BRIDGE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CONNECTOR=y
@@ -89,8 +88,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=m
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index b041fb607376..0a8d250cb97e 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -247,7 +247,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_EHCA=m
CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
@@ -262,14 +261,11 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index ddf9773458cf..fd2edd650c20 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -178,15 +178,11 @@ CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 1dde0be2be30..8fbf49801233 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -512,7 +512,6 @@ CONFIG_E1000E=m
CONFIG_IGB=m
CONFIG_IXGB=m
CONFIG_IXGBE=m
-CONFIG_IP1000=m
CONFIG_MV643XX_ETH=m
CONFIG_SKGE=m
CONFIG_SKY2=m
@@ -1029,14 +1028,14 @@ CONFIG_UIO_CIF=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=m
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_JBD2_DEBUG=y
CONFIG_REISERFS_FS=m
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 3e336ee8bb4a..50b2bad51d0a 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
CONFIG_NETFILTER=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -40,7 +39,6 @@ CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
CONFIG_FS_ENET=y
@@ -59,8 +57,7 @@ CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index c40046074f8b..ee0ec5a682fc 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -51,7 +51,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 36871a4bfa54..654aeffc57ef 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,6 +53,7 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y
@@ -223,7 +224,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_EHCA=m
CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
@@ -233,14 +233,11 @@ CONFIG_INFINIBAND_SRP=m
CONFIG_INFINIBAND_ISER=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index b5db7dffe86d..e9122b15e5fd 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -25,7 +25,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
@@ -72,8 +71,7 @@ CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
CONFIG_XFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 4c973c5321c6..78fddf24b5d3 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -29,7 +29,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 34eaf528fa87..dcdd51b57783 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -32,7 +32,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_BT=y
@@ -96,9 +95,7 @@ CONFIG_MMC_SDHCI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_FUSE_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h
new file mode 100644
index 000000000000..c133246df467
--- /dev/null
+++ b/arch/powerpc/include/asm/accounting.h
@@ -0,0 +1,24 @@
+/*
+ * Common time accounting prototypes and such for all ppc machines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_ACCOUNTING_H
+#define __POWERPC_ACCOUNTING_H
+
+/* Stuff for accurate time accounting */
+struct cpu_accounting_data {
+ unsigned long user_time; /* accumulated usermode TB ticks */
+ unsigned long system_time; /* accumulated system TB ticks */
+ unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */
+ unsigned long starttime; /* TB value snapshot */
+ unsigned long starttime_user; /* TB value on exit to usermode */
+ unsigned long startspurr; /* SPURR value snapshot */
+ unsigned long utime_sspurr; /* ->user_time when ->startspurr set */
+};
+
+#endif
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index dc85dcb891cf..cee3aa087653 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -36,11 +36,13 @@
#define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
+#define LHZX_BE stringify_in_c(lhzx)
#define LWZX_BE stringify_in_c(lwzx)
#define LDX_BE stringify_in_c(ldx)
#define STWX_BE stringify_in_c(stwx)
#define STDX_BE stringify_in_c(stdx)
#else
+#define LHZX_BE stringify_in_c(lhbrx)
#define LWZX_BE stringify_in_c(lwbrx)
#define LDX_BE stringify_in_c(ldbrx)
#define STWX_BE stringify_in_c(stwbrx)
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..e71b9097594c
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
+#define _ASM_POWERPC_ASM_PROTOTYPES_H
+/*
+ * This file is for prototypes of C functions that are only called
+ * from asm, and any associated variables.
+ *
+ * Copyright 2016, Daniel Axtens, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <linux/kprobes.h>
+
+/* SMP */
+extern struct thread_info *current_set[NR_CPUS];
+extern struct thread_info *secondary_ti;
+void start_secondary(void *unused);
+
+/* kexec */
+struct paca_struct;
+struct kimage;
+extern struct paca_struct kexec_paca;
+void kexec_copy_flush(struct kimage *image);
+
+/* pseries hcall tracing */
+extern struct static_key hcall_tracepoint_key;
+void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+void __trace_hcall_exit(long opcode, unsigned long retval,
+ unsigned long *retbuf);
+/* OPAL tracing */
+#ifdef HAVE_JUMP_LABEL
+extern struct static_key opal_tracepoint_key;
+#endif
+
+void __trace_opal_entry(unsigned long opcode, unsigned long *args);
+void __trace_opal_exit(long opcode, unsigned long retval);
+
+/* VMX copying */
+int enter_vmx_usercopy(void);
+int exit_vmx_usercopy(void);
+int enter_vmx_copy(void);
+void * exit_vmx_copy(void *dest);
+
+/* Traps */
+long machine_check_early(struct pt_regs *regs);
+long hmi_exception_realmode(struct pt_regs *regs);
+void SMIException(struct pt_regs *regs);
+void handle_hmi_exception(struct pt_regs *regs);
+void instruction_breakpoint_exception(struct pt_regs *regs);
+void RunModeException(struct pt_regs *regs);
+void __kprobes single_step_exception(struct pt_regs *regs);
+void __kprobes program_check_exception(struct pt_regs *regs);
+void alignment_exception(struct pt_regs *regs);
+void StackOverflow(struct pt_regs *regs);
+void nonrecoverable_exception(struct pt_regs *regs);
+void kernel_fp_unavailable_exception(struct pt_regs *regs);
+void altivec_unavailable_exception(struct pt_regs *regs);
+void vsx_unavailable_exception(struct pt_regs *regs);
+void fp_unavailable_tm(struct pt_regs *regs);
+void altivec_unavailable_tm(struct pt_regs *regs);
+void vsx_unavailable_tm(struct pt_regs *regs);
+void facility_unavailable_exception(struct pt_regs *regs);
+void TAUException(struct pt_regs *regs);
+void altivec_assist_exception(struct pt_regs *regs);
+void unrecoverable_exception(struct pt_regs *regs);
+void kernel_bad_stack(struct pt_regs *regs);
+void system_reset_exception(struct pt_regs *regs);
+void machine_check_exception(struct pt_regs *regs);
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs);
+
+#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
index 60f47649306f..c45189aa7476 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
@@ -11,4 +11,19 @@ extern unsigned long
radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
+
+static inline int hstate_get_psize(struct hstate *hstate)
+{
+ unsigned long shift;
+
+ shift = huge_page_shift(hstate);
+ if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+ return MMU_PAGE_2M;
+ else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
+ return MMU_PAGE_1G;
+ else {
+ WARN(1, "Wrong huge page shift\n");
+ return mmu_virtual_psize;
+ }
+}
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 74839f24f412..287a656ceb57 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -24,6 +24,7 @@
#include <asm/book3s/64/pgtable.h>
#include <asm/bug.h>
#include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
/*
* SLB
@@ -124,6 +125,45 @@
#ifndef __ASSEMBLY__
+struct mmu_hash_ops {
+ void (*hpte_invalidate)(unsigned long slot,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, int local);
+ long (*hpte_updatepp)(unsigned long slot,
+ unsigned long newpp,
+ unsigned long vpn,
+ int bpsize, int apsize,
+ int ssize, unsigned long flags);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea,
+ int psize, int ssize);
+ long (*hpte_insert)(unsigned long hpte_group,
+ unsigned long vpn,
+ unsigned long prpn,
+ unsigned long rflags,
+ unsigned long vflags,
+ int psize, int apsize,
+ int ssize);
+ long (*hpte_remove)(unsigned long hpte_group);
+ int (*hpte_removebolted)(unsigned long ea,
+ int psize, int ssize);
+ void (*flush_hash_range)(unsigned long number, int local);
+ void (*hugepage_invalidate)(unsigned long vsid,
+ unsigned long addr,
+ unsigned char *hpte_slot_array,
+ int psize, int ssize, int local);
+ /*
+ * Special for kexec.
+ * To be called in real mode with interrupts disabled. No locks are
+ * taken as such, concurrent access on pre POWER5 hardware could result
+ * in a deadlock.
+ * The linear mapping is destroyed as well.
+ */
+ void (*hpte_clear_all)(void);
+};
+extern struct mmu_hash_ops mmu_hash_ops;
+
struct hash_pte {
__be64 v;
__be64 r;
@@ -151,6 +191,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline unsigned long get_sllp_encoding(int psize)
+{
+ unsigned long sllp;
+
+ sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
+ return sllp;
+}
+
#endif /* __ASSEMBLY__ */
/*
@@ -352,10 +401,13 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+#ifdef CONFIG_PPC_PSERIES
+void hpte_init_pseries(void);
+#else
+static inline void hpte_init_pseries(void) { }
+#endif
+
extern void hpte_init_native(void);
-extern void hpte_init_lpar(void);
-extern void hpte_init_beat(void);
-extern void hpte_init_beat_v3(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
@@ -435,7 +487,7 @@ extern void slb_set_size(u16 size);
* function. Used in slb_allocate() and do_stab_bolted. The function
* computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
*
- * rt = register continaing the proto-VSID and into which the
+ * rt = register containing the proto-VSID and into which the
* VSID will be stored
* rx = scratch register (clobbered)
*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 5854263d4d6e..8afb0e00f7d9 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -23,8 +23,6 @@ struct mmu_psize_def {
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
-#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
-
#endif /* __ASSEMBLY__ */
/* 64-bit classic hash table MMU */
@@ -102,6 +100,9 @@ extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
/* MMU initialization */
+void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
+void radix__early_init_devtree(void);
extern void radix_init_native(void);
extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void);
@@ -127,11 +128,15 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- if (radix_enabled())
+ if (early_radix_enabled())
return radix__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
}
+
+extern int (*register_process_table)(unsigned long base, unsigned long page_size,
+ unsigned long tbl_size);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index 71e9abced493..9db83b4e017d 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -11,7 +11,7 @@ static inline int pmd_huge(pmd_t pmd)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
return 0;
}
@@ -21,7 +21,7 @@ static inline int pud_huge(pud_t pud)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pud_val(pud) & _PAGE_PTE);
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
return 0;
}
@@ -31,7 +31,7 @@ static inline int pgd_huge(pgd_t pgd)
* leaf pte for huge page
*/
if (radix_enabled())
- return !!(pgd_val(pgd) & _PAGE_PTE);
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
return 0;
}
#define pgd_huge pgd_huge
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index cb2d0a5fa3f8..0d2845b44763 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -15,7 +15,7 @@ static inline int pmd_huge(pmd_t pmd)
/*
* leaf pte for huge page
*/
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
static inline int pud_huge(pud_t pud)
@@ -23,7 +23,7 @@ static inline int pud_huge(pud_t pud)
/*
* leaf pte for huge page
*/
- return !!(pud_val(pud) & _PAGE_PTE);
+ return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
static inline int pgd_huge(pgd_t pgd)
@@ -31,7 +31,7 @@ static inline int pgd_huge(pgd_t pgd)
/*
* leaf pte for huge page
*/
- return !!(pgd_val(pgd) & _PAGE_PTE);
+ return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
}
#define pgd_huge pgd_huge
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index ab84c89c9e98..263bf39ced40 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -318,7 +318,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
@@ -336,8 +336,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
-
- if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
@@ -346,7 +345,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_val(*ptep) & _PAGE_WRITE) == 0)
+ if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
@@ -365,17 +364,35 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
{
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
-static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_WRITE);}
-static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
-static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
-static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+
+static inline int pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
+}
+
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pte_soft_dirty(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
}
+
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
@@ -395,14 +412,14 @@ static inline pte_t pte_clear_soft_dirty(pte_t pte)
*/
static inline int pte_protnone(pte_t pte)
{
- return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
- (_PAGE_PRESENT | _PAGE_PRIVILEGED);
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED);
}
#endif /* CONFIG_NUMA_BALANCING */
static inline int pte_present(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_PRESENT);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
}
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -474,7 +491,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline bool pte_user(pte_t pte)
{
- return !(pte_val(pte) & _PAGE_PRIVILEGED);
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
}
/* Encode and de-code a swap entry */
@@ -517,10 +534,12 @@ static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
}
+
static inline bool pte_swp_soft_dirty(pte_t pte)
{
- return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
}
+
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
@@ -626,8 +645,16 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (!pmd_none(pmd))
+static inline int pmd_none(pmd_t pmd)
+{
+ return !pmd_raw(pmd);
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+
+ return !pmd_none(pmd);
+}
static inline int pmd_bad(pmd_t pmd)
{
@@ -646,19 +673,26 @@ static inline void pud_clear(pud_t *pudp)
*pudp = __pud(0);
}
-#define pud_none(pud) (!pud_val(pud))
-#define pud_present(pud) (pud_val(pud) != 0)
+static inline int pud_none(pud_t pud)
+{
+ return !pud_raw(pud);
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return !pud_none(pud);
+}
extern struct page *pud_page(pud_t pud);
extern struct page *pmd_page(pmd_t pmd);
static inline pte_t pud_pte(pud_t pud)
{
- return __pte(pud_val(pud));
+ return __pte_raw(pud_raw(pud));
}
static inline pud_t pte_pud(pte_t pte)
{
- return __pud(pte_val(pte));
+ return __pud_raw(pte_raw(pte));
}
#define pud_write(pud) pte_write(pud_pte(pud))
@@ -681,17 +715,24 @@ static inline void pgd_clear(pgd_t *pgdp)
*pgdp = __pgd(0);
}
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_present(pgd) (!pgd_none(pgd))
+static inline int pgd_none(pgd_t pgd)
+{
+ return !pgd_raw(pgd);
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ return !pgd_none(pgd);
+}
static inline pte_t pgd_pte(pgd_t pgd)
{
- return __pte(pgd_val(pgd));
+ return __pte_raw(pgd_raw(pgd));
}
static inline pgd_t pte_pgd(pte_t pte)
{
- return __pgd(pte_val(pte));
+ return __pgd_raw(pte_raw(pte));
}
static inline int pgd_bad(pgd_t pgd)
@@ -783,12 +824,12 @@ struct page *realmode_pfn_to_page(unsigned long pfn);
static inline pte_t pmd_pte(pmd_t pmd)
{
- return __pte(pmd_val(pmd));
+ return __pte_raw(pmd_raw(pmd));
}
static inline pmd_t pte_pmd(pte_t pte)
{
- return __pmd(pte_val(pte));
+ return __pmd_raw(pte_raw(pte));
}
static inline pte_t *pmdp_ptep(pmd_t *pmd)
@@ -849,7 +890,7 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
static inline int pmd_large(pmd_t pmd)
{
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
@@ -865,7 +906,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
+ if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
return 0;
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
@@ -876,7 +917,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
- if ((pmd_val(*pmdp) & _PAGE_WRITE) == 0)
+ if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
return;
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index f12ddf5e8de5..2f6373144e2c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -75,11 +75,6 @@ static inline void hash__flush_tlb_page(struct vm_area_struct *vma,
{
}
-static inline void hash__flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
-}
-
static inline void hash__flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 3fa94fcac628..65037762b120 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -10,27 +10,35 @@ static inline int mmu_get_ap(int psize)
return mmu_psize_defs[psize].ap;
}
+extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
+extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- unsigned long ap, int nid);
extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- unsigned long ap, int nid);
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
-#define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif
-
+extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
+ unsigned long page_size);
+extern void radix__flush_tlb_lpid(unsigned long lpid);
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 96e5769b18b0..72b925f97bab 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -7,6 +7,25 @@
#include <asm/book3s/64/tlbflush-hash.h>
#include <asm/book3s/64/tlbflush-radix.h>
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_pmd_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
+#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ if (radix_enabled())
+ return radix__flush_hugetlb_tlb_range(vma, start, end);
+ return hash__flush_tlb_range(vma, start, end);
+}
+
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
@@ -38,14 +57,6 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
return hash__local_flush_tlb_page(vma, vmaddr);
}
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- if (radix_enabled())
- return radix__flush_tlb_page(vma, vmaddr);
- return hash__flush_tlb_page_nohash(vma, vmaddr);
-}
-
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (radix_enabled())
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 69fb16d7a811..b77f0364df94 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
/*
* No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 994c60a857ce..2015b072422c 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -49,8 +49,7 @@ void __patch_exception(int exc, unsigned long addr);
static inline unsigned long ppc_function_entry(void *func)
{
-#if defined(CONFIG_PPC64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
u32 *insn = func;
/*
@@ -75,14 +74,13 @@ static inline unsigned long ppc_function_entry(void *func)
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
-#else
+#elif defined(PPC64_ELF_ABI_v1)
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
*/
return ((func_descr_t *)func)->entry;
-#endif
#else
return (unsigned long)func;
#endif
@@ -90,7 +88,7 @@ static inline unsigned long ppc_function_entry(void *func)
static inline unsigned long ppc_global_function_entry(void *func)
{
-#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* PPC64 ABIv2 the global entry point is at the address */
return (unsigned long)func;
#else
@@ -106,7 +104,7 @@ static inline unsigned long ppc_global_function_entry(void *func)
*/
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define R2_STACK_OFFSET 24
#else
#define R2_STACK_OFFSET 40
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index 000000000000..2ef55f8968a2
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_POWERPC_CPUFEATURES_H
+#define __ASM_POWERPC_CPUFEATURES_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bug.h>
+#include <asm/cputable.h>
+
+static inline bool early_cpu_has_feature(unsigned long feature)
+{
+ return !!((CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_CPU_FTR_KEYS 64
+
+extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
+
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+ int i;
+
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+ printk("Warning! cpu_has_feature() used prior to jump label init!\n");
+ dump_stack();
+ return early_cpu_has_feature(feature);
+ }
+#endif
+
+ if (CPU_FTRS_ALWAYS & feature)
+ return true;
+
+ if (!(CPU_FTRS_POSSIBLE & feature))
+ return false;
+
+ i = __builtin_ctzl(feature);
+ return static_branch_likely(&cpu_feature_keys[i]);
+}
+#else
+static inline bool cpu_has_feature(unsigned long feature)
+{
+ return early_cpu_has_feature(feature);
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cpufeature.h b/arch/powerpc/include/asm/cpufeature.h
new file mode 100644
index 000000000000..19e6290699ea
--- /dev/null
+++ b/arch/powerpc/include/asm/cpufeature.h
@@ -0,0 +1,40 @@
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see asm/cputable.h for powerpc CPU features.
+ *
+ * Copyright 2016 Alastair D'Silva, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_POWERPC_CPUFEATURE_H
+#define __ASM_POWERPC_CPUFEATURE_H
+
+#include <asm/cputable.h>
+
+/* Keep these in step with powerpc/include/asm/cputable.h */
+#define MAX_CPU_FEATURES (2 * 32)
+
+/*
+ * Currently we don't have a need for any of the feature bits defined in
+ * cpu_user_features. When we do, they should be defined such as:
+ *
+ * #define PPC_MODULE_FEATURE_32 (ilog2(PPC_FEATURE_32))
+ */
+
+#define PPC_MODULE_FEATURE_VEC_CRYPTO (32 + ilog2(PPC_FEATURE2_VEC_CRYPTO))
+
+#define cpu_feature(x) (x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ if (num < 32)
+ return !!(cur_cpu_spec->cpu_user_features & 1UL << num);
+ else
+ return !!(cur_cpu_spec->cpu_user_features2 & 1UL << (num - 32));
+}
+
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index d2f99ca1e3a6..3d7fc06532a1 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -13,6 +13,8 @@
#ifndef __ASSEMBLY__
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
+
+extern u64 pnv_first_deep_stop_state;
#endif
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index df4fb5faba43..82026b419341 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -2,6 +2,7 @@
#define __ASM_POWERPC_CPUTABLE_H
+#include <linux/types.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <uapi/asm/cputable.h>
@@ -122,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
extern const char *powerpc_base_platform;
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+extern void cpu_feature_keys_init(void);
+#else
+static inline void cpu_feature_keys_init(void) { }
+#endif
+
/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
enum {
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
@@ -576,14 +583,6 @@ enum {
};
#endif /* __powerpc64__ */
-static inline int cpu_has_feature(unsigned long feature)
-{
- return (CPU_FTRS_ALWAYS & feature) ||
- (CPU_FTRS_POSSIBLE
- & cur_cpu_spec->cpu_features
- & feature);
-}
-
#define HBP_NUM 1
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index e2452550bcb1..4f60db074725 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { }
#include <asm/div64.h>
#include <asm/time.h>
#include <asm/param.h>
+#include <asm/cpu_has_feature.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
@@ -90,11 +91,10 @@ static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
u64 ct;
- u64 sec;
+ u64 sec = jif;
/* have to be a little careful about overflow */
- ct = jif % HZ;
- sec = jif / HZ;
+ ct = do_div(sec, HZ);
if (ct) {
ct *= tb_ticks_per_sec;
do_div(ct, HZ);
@@ -230,7 +230,16 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+/*
+ * PPC64 uses PACA which is task independent for storing accounting data while
+ * PPC32 uses struct thread_info, therefore at task switch the accounting data
+ * has to be populated in the new task
+ */
+#ifdef CONFIG_PPC64
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+#else
+void arch_vtime_task_switch(struct task_struct *tsk);
+#endif
#endif /* __KERNEL__ */
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 5fa6b20eba10..378167377065 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,6 +16,7 @@
#include <linux/threads.h>
#include <asm/ppc-opcode.h>
+#include <asm/cpu_has_feature.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 4efc11dacb98..4a2beef74277 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
typedef struct {
unsigned int base;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 77816acd4fd9..84e3f8dd5e4f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -13,7 +13,6 @@
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
@@ -25,14 +24,14 @@
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void __dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int dma_direct_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
- size_t size, struct dma_attrs *attrs);
+ size_t size, unsigned long attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index fb9f376ae27b..8e37b71674f4 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -57,7 +57,7 @@ struct pci_dn;
/*
* The struct is used to trace PE related EEH functionality.
* In theory, there will have one instance of the struct to
- * be created against particular PE. In nature, PEs corelate
+ * be created against particular PE. In nature, PEs correlate
* to each other. the struct has to reflect that hierarchy in
* order to easily pick up those affected PEs when one particular
* PE has EEH errors.
@@ -274,7 +274,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
-void *eeh_dev_init(struct pci_dn *pdn, void *data);
+struct eeh_dev *eeh_dev_init(struct pci_dn *pdn);
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
int eeh_init(void);
int __init eeh_ops_register(struct eeh_ops *ops);
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 93ae809fe5ea..bed66e5743b3 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -287,7 +287,7 @@ do_kvm_##n: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
beq 4f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r9, r10); \
+ ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
SAVE_PPR(area, r9, r10); \
4: EXCEPTION_PROLOG_COMMON_2(area) \
EXCEPTION_PROLOG_COMMON_3(n) \
@@ -403,6 +403,8 @@ label##_relon_hv: \
#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
#define SOFTEN_VALUE_0xe62 PACA_IRQ_HMI
+#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE
+#define SOFTEN_VALUE_0xea2 PACA_IRQ_EE
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 9a67a38bf7b9..57fec8ac7b92 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -184,4 +184,8 @@ label##3: \
FTR_ENTRY_OFFSET label##1b-label##3b; \
.popsection;
+#ifndef __ASSEMBLY__
+void apply_feature_fixups(void);
+#endif
+
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index b0629249778b..1e0b5a5d660a 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -126,6 +126,12 @@ extern int fwnmi_active;
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
+#ifdef CONFIG_PPC_PSERIES
+void pseries_probe_fw_features(void);
+#else
+static inline void pseries_probe_fw_features(void) { };
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 90f604bbcd19..4508b322f2cd 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -51,6 +51,13 @@ enum fixed_addresses {
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
+#ifdef CONFIG_PPC_8xx
+ /* For IMMR we need an aligned 512K area */
+#define FIX_IMMR_SIZE (512 * 1024 / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
/* FIX_PCIE_MCFG, */
__end_of_fixed_addresses
};
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 50ca7585abe2..686c5f70eb84 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -1,6 +1,8 @@
#ifndef _ASM_POWERPC_FTRACE
#define _ASM_POWERPC_FTRACE
+#include <asm/types.h>
+
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
@@ -65,8 +67,8 @@ struct dyn_arch_ftrace {
#endif
#endif
-#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
+#ifdef PPC64_ELF_ABI_v1
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
@@ -79,6 +81,6 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
return !strcmp(sym + 4, name + 3);
}
#endif
-#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
+#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/hmi.h b/arch/powerpc/include/asm/hmi.h
new file mode 100644
index 000000000000..88b4901ac4ee
--- /dev/null
+++ b/arch/powerpc/include/asm/hmi.h
@@ -0,0 +1,45 @@
+/*
+ * Hypervisor Maintenance Interrupt header file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_PPC64_HMI_H__
+#define __ASM_PPC64_HMI_H__
+
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define CORE_TB_RESYNC_REQ_BIT 63
+#define MAX_SUBCORE_PER_CORE 4
+
+/*
+ * sibling_subcore_state structure is used to co-ordinate all threads
+ * during HMI to avoid TB corruption. This structure is allocated once
+ * per each core and shared by all threads on that core.
+ */
+struct sibling_subcore_state {
+ unsigned long flags;
+ u8 in_guest[MAX_SUBCORE_PER_CORE];
+};
+
+extern void wait_for_subcore_guest_exit(void);
+extern void wait_for_tb_resync(void);
+#else
+static inline void wait_for_subcore_guest_exit(void) { }
+static inline void wait_for_tb_resync(void) { }
+#endif
+#endif /* __ASM_PPC64_HMI_H__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index e2d9f4996e5c..c5517f463ec7 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -147,7 +147,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
{
pte_t pte;
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
- flush_tlb_page(vma, addr);
+ flush_hugetlb_page(vma, addr);
}
static inline int huge_pte_none(pte_t pte)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 0bc9c284aa10..708edebcf147 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -431,17 +431,6 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
-
-extern long pSeries_enable_reloc_on_exc(void);
-extern long pSeries_disable_reloc_on_exc(void);
-
-extern long pseries_big_endian_exceptions(void);
-
-#else
-
-#define pSeries_enable_reloc_on_exc() do {} while (0)
-#define pSeries_disable_reloc_on_exc() do {} while (0)
-
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b59ac27a6b7d..c7d82ff62a33 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -130,6 +130,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
extern bool prep_irq_for_idle(void);
+extern void force_external_irq_replay(void);
+
#else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7b87bab09564..2c1d50792944 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -53,7 +53,7 @@ struct iommu_table_ops {
long index, long npages,
unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#ifdef CONFIG_IOMMU_API
/*
* Exchanges existing TCE with new TCE plus direction bits;
@@ -248,12 +248,12 @@ extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
struct scatterlist *sglist,
int nelems,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
size_t size, dma_addr_t *dma_handle,
@@ -264,16 +264,15 @@ extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset,
size_t size, unsigned long mask,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
-extern void alloc_dart_table(void);
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
static inline void iommu_save(void)
{
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 47e155f15433..9a287e0ac8b1 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <asm/feature-fixups.h>
+#include <asm/asm-compat.h>
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
@@ -21,7 +22,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
- "nop\n\t"
+ "nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
@@ -35,7 +36,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
- "b %l[l_yes]\n\t"
+ "b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
".popsection \n\t"
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 039b583db029..2c9759bdb63b 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -40,8 +40,7 @@ struct kprobe;
typedef ppc_opcode_t kprobe_opcode_t;
#define MAX_INSN_SIZE 1
-#ifdef CONFIG_PPC64
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* PPC64 ABIv2 needs local entry point */
#define kprobe_lookup_name(name, addr) \
{ \
@@ -49,7 +48,7 @@ typedef ppc_opcode_t kprobe_opcode_t;
if (addr) \
addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
}
-#else
+#elif defined(PPC64_ELF_ABI_v1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
@@ -92,8 +91,7 @@ typedef ppc_opcode_t kprobe_opcode_t;
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); \
} \
}
-#endif /* defined(_CALL_ELF) && _CALL_ELF == 2 */
-#endif /* CONFIG_PPC64 */
+#endif
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 1f4497fb5b83..88d17b4ea9c8 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -181,8 +181,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
switch (b_psize) {
case MMU_PAGE_4K:
- sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
- ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
+ sllp = get_sllp_encoding(a_psize);
rb |= sllp << 5; /* AP field */
rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
break;
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 72b6225aca73..d318d432caa9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -162,7 +162,7 @@ struct kvmppc_book3s_shadow_vcpu {
/* Values for kvm_state */
#define KVM_HWTHREAD_IN_KERNEL 0
-#define KVM_HWTHREAD_IN_NAP 1
+#define KVM_HWTHREAD_IN_IDLE 1
#define KVM_HWTHREAD_IN_KVM 2
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index e3ad5c72724a..0cf5e21179fc 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -1,8 +1,9 @@
#ifndef _ASM_POWERPC_LINKAGE_H
#define _ASM_POWERPC_LINKAGE_H
-#ifdef CONFIG_PPC64
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#include <asm/types.h>
+
+#ifdef PPC64_ELF_ABI_v1
#define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
"\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
@@ -10,6 +11,5 @@
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#endif
-#endif
#endif /* _ASM_POWERPC_LINKAGE_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 6bdcd0da9e21..0420b388dd83 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,42 +34,6 @@ struct pci_host_bridge;
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
- void (*hpte_invalidate)(unsigned long slot,
- unsigned long vpn,
- int bpsize, int apsize,
- int ssize, int local);
- long (*hpte_updatepp)(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int bpsize, int apsize,
- int ssize, unsigned long flags);
- void (*hpte_updateboltedpp)(unsigned long newpp,
- unsigned long ea,
- int psize, int ssize);
- long (*hpte_insert)(unsigned long hpte_group,
- unsigned long vpn,
- unsigned long prpn,
- unsigned long rflags,
- unsigned long vflags,
- int psize, int apsize,
- int ssize);
- long (*hpte_remove)(unsigned long hpte_group);
- int (*hpte_removebolted)(unsigned long ea,
- int psize, int ssize);
- void (*flush_hash_range)(unsigned long number, int local);
- void (*hugepage_invalidate)(unsigned long vsid,
- unsigned long addr,
- unsigned char *hpte_slot_array,
- int psize, int ssize, int local);
- /*
- * Special for kexec.
- * To be called in real mode with interrupts disabled. No locks are
- * taken as such, concurrent access on pre POWER5 hardware could result
- * in a deadlock.
- * The linear mapping is destroyed as well.
- */
- void (*hpte_clear_all)(void);
-
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token);
@@ -89,7 +53,6 @@ struct machdep_calls {
int (*probe)(void);
void (*setup_arch)(void); /* Optional, may be NULL */
- void (*init_early)(void);
/* Optional, may be NULL. */
void (*show_cpuinfo)(struct seq_file *m);
void (*show_percpuinfo)(struct seq_file *m, int i);
@@ -111,8 +74,8 @@ struct machdep_calls {
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
- void (*restart)(char *cmd);
- void (*halt)(void);
+ void __noreturn (*restart)(char *cmd);
+ void __noreturn (*halt)(void);
void (*panic)(char *str);
void (*cpu_die)(void);
@@ -256,7 +219,6 @@ struct machdep_calls {
#ifdef CONFIG_ARCH_RANDOM
int (*get_random_seed)(unsigned long *v);
#endif
- int (*update_partition_table)(u64);
};
extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 2563c435a4b1..30922f699341 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -13,6 +13,7 @@
#include <asm/cputable.h>
#include <linux/mm.h>
+#include <asm/cpu_has_feature.h>
/*
* This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
@@ -31,13 +32,13 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-static inline int arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
- return 0;
+ return false;
if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
- return 0;
- return 1;
+ return false;
+ return true;
}
#define arch_validate_prot(prot) arch_validate_prot(prot)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 0a566f15f985..3e0e4927811c 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -169,6 +169,9 @@ typedef struct {
unsigned int active;
unsigned long vdso_base;
} mm_context_t;
+
+#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
+#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e53ebebff474..e2fb408f8398 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -12,7 +12,7 @@
*/
/*
- * First half is MMU families
+ * MMU families
*/
#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
@@ -21,9 +21,18 @@
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
+/* Radix page table supported and enabled */
+#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040)
+
+/*
+ * Individual features below.
+ */
+
/*
- * This is individual features
+ * We need to clear top 16bits of va (from the remaining 64 bits )in
+ * tlbie* instructions
*/
+#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000)
/* Enable use of high BAT registers */
#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
@@ -88,16 +97,11 @@
*/
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
-/*
- * Radix page table available
- */
-#define MMU_FTR_RADIX ASM_CONST(0x80000000)
-
/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
-#define MMU_FTRS_PPC970 MMU_FTRS_POWER4
+#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
@@ -108,6 +112,7 @@
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_FSL_BOOK3E
@@ -124,23 +129,74 @@ enum {
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
- MMU_FTR_1T_SEGMENT |
+ MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
#ifdef CONFIG_PPC_RADIX_MMU
- MMU_FTR_RADIX |
+ MMU_FTR_TYPE_RADIX |
#endif
0,
};
-static inline int mmu_has_feature(unsigned long feature)
+static inline bool early_mmu_has_feature(unsigned long feature)
+{
+ return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include <linux/jump_label.h>
+
+#define NUM_MMU_FTR_KEYS 32
+
+extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
+
+extern void mmu_feature_keys_init(void);
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
{
- return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+ int i;
+
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+ printk("Warning! mmu_has_feature() used prior to jump label init!\n");
+ dump_stack();
+ return early_mmu_has_feature(feature);
+ }
+#endif
+
+ if (!(MMU_FTRS_POSSIBLE & feature))
+ return false;
+
+ i = __builtin_ctzl(feature);
+ return static_branch_likely(&mmu_feature_keys[i]);
}
static inline void mmu_clear_feature(unsigned long feature)
{
+ int i;
+
+ i = __builtin_ctzl(feature);
cur_cpu_spec->mmu_features &= ~feature;
+ static_branch_disable(&mmu_feature_keys[i]);
+}
+#else
+
+static inline void mmu_feature_keys_init(void)
+{
+
}
+static inline bool mmu_has_feature(unsigned long feature)
+{
+ return early_mmu_has_feature(feature);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ cur_cpu_spec->mmu_features &= ~feature;
+}
+#endif /* CONFIG_JUMP_LABEL */
+
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
#ifdef CONFIG_PPC64
@@ -159,6 +215,28 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
+#ifdef CONFIG_PPC_RADIX_MMU
+static inline bool radix_enabled(void)
+{
+ return mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+
+static inline bool early_radix_enabled(void)
+{
+ return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+#else
+static inline bool radix_enabled(void)
+{
+ return false;
+}
+
+static inline bool early_radix_enabled(void)
+{
+ return false;
+}
+#endif
+
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
@@ -205,6 +283,7 @@ extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
+static inline void mmu_early_init_devtree(void) { }
#endif /* __ASSEMBLY__ */
#endif
@@ -225,9 +304,5 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
# include <asm/mmu-8xx.h>
#endif
-#ifndef radix_enabled
-#define radix_enabled() (0)
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 0acc7c7c28d1..e94cede14522 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -275,7 +275,7 @@ extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
-extern void mpc52xx_restart(char *cmd);
+extern void __noreturn mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
struct mpc52xx_gpt_priv;
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
index fdab41c654ef..0656ff81e5b0 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -32,7 +32,7 @@
* - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
*
* Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
- * TLB2 storage attibute fields. Those are:
+ * TLB2 storage attribute fields. Those are:
*
* TLB2:
* 0...10 11 12 13 14 15 16...31
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 570fb30be21c..908324574f77 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE PTE_FRAG_SIZE
#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (0)
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 9bb8ddf0be37..0e2e57bcab50 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -158,17 +158,33 @@
#define OPAL_LEDS_SET_INDICATOR 115
#define OPAL_CEC_REBOOT2 116
#define OPAL_CONSOLE_FLUSH 117
-#define OPAL_LAST 117
+#define OPAL_GET_DEVICE_TREE 118
+#define OPAL_PCI_GET_PRESENCE_STATE 119
+#define OPAL_PCI_GET_POWER_STATE 120
+#define OPAL_PCI_SET_POWER_STATE 121
+#define OPAL_INT_GET_XIRR 122
+#define OPAL_INT_SET_CPPR 123
+#define OPAL_INT_EOI 124
+#define OPAL_INT_SET_MFRR 125
+#define OPAL_PCI_TCE_KILL 126
+#define OPAL_LAST 126
/* Device tree flags */
-/* Flags set in power-mgmt nodes in device tree if
- * respective idle states are supported in the platform.
+/*
+ * Flags set in power-mgmt nodes in device tree describing
+ * idle states that are supported in the platform.
*/
+
+#define OPAL_PM_TIMEBASE_STOP 0x00000002
+#define OPAL_PM_LOSE_HYP_CONTEXT 0x00002000
+#define OPAL_PM_LOSE_FULL_CONTEXT 0x00004000
#define OPAL_PM_NAP_ENABLED 0x00010000
#define OPAL_PM_SLEEP_ENABLED 0x00020000
#define OPAL_PM_WINKLE_ENABLED 0x00040000
#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+#define OPAL_PM_STOP_INST_FAST 0x00100000
+#define OPAL_PM_STOP_INST_DEEP 0x00200000
/*
* OPAL_CONFIG_CPU_IDLE_STATE parameters
@@ -344,6 +360,18 @@ enum OpalPciResetState {
OPAL_ASSERT_RESET = 1
};
+enum OpalPciSlotPresence {
+ OPAL_PCI_SLOT_EMPTY = 0,
+ OPAL_PCI_SLOT_PRESENT = 1
+};
+
+enum OpalPciSlotPower {
+ OPAL_PCI_SLOT_POWER_OFF = 0,
+ OPAL_PCI_SLOT_POWER_ON = 1,
+ OPAL_PCI_SLOT_OFFLINE = 2,
+ OPAL_PCI_SLOT_ONLINE = 3
+};
+
enum OpalSlotLedType {
OPAL_SLOT_LED_TYPE_ID = 0, /* IDENTIFY LED */
OPAL_SLOT_LED_TYPE_FAULT = 1, /* FAULT LED */
@@ -802,7 +830,7 @@ struct opal_sg_entry {
};
/*
- * Candiate image SG list.
+ * Candidate image SG list.
*
* length = VER | length
*/
@@ -825,6 +853,7 @@ enum {
OPAL_PHB_CAPI_MODE_CAPI = 1,
OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+ OPAL_PHB_CAPI_MODE_DMA = 4,
};
/* OPAL I2C request */
@@ -852,7 +881,7 @@ struct opal_i2c_request {
* with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the
* EPOW status in it's bit representation for a particular EPOW sub
- * class as defiend here. So multiple detailed EPOW status bits
+ * class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer
* element as it's bit representation.
*/
@@ -891,6 +920,13 @@ enum {
OPAL_REBOOT_PLATFORM_ERROR = 1,
};
+/* Argument to OPAL_PCI_TCE_KILL */
+enum {
+ OPAL_PCI_TCE_KILL_PAGES,
+ OPAL_PCI_TCE_KILL_PE,
+ OPAL_PCI_TCE_KILL_ALL,
+};
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9d86c6651716..ee05bd203630 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -131,7 +131,7 @@ int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint16_t pe_number, uint16_t
int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint16_t pe_number,
uint16_t dma_window_number, uint64_t pci_start_addr,
uint64_t pci_mem_size);
-int64_t opal_pci_reset(uint64_t phb_id, uint8_t reset_scope, uint8_t assert_state);
+int64_t opal_pci_reset(uint64_t id, uint8_t reset_scope, uint8_t assert_state);
int64_t opal_pci_get_hub_diag_data(uint64_t hub_id, void *diag_buffer,
uint64_t diag_buffer_len);
@@ -148,7 +148,7 @@ int64_t opal_get_dpo_status(__be64 *dpo_timeout);
int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
__be16 *pci_error_type, __be16 *severity);
-int64_t opal_pci_poll(uint64_t phb_id);
+int64_t opal_pci_poll(uint64_t id);
int64_t opal_return_cpu(void);
int64_t opal_check_token(uint64_t token);
int64_t opal_reinit_cpus(uint64_t flags);
@@ -178,6 +178,8 @@ int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void);
int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_write_oppanel_async(uint64_t token, oppanel_line_t *lines,
+ uint64_t num_lines);
int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
@@ -209,12 +211,30 @@ int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
uint64_t size, uint64_t token);
int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
uint64_t token);
+int64_t opal_get_device_tree(uint32_t phandle, uint64_t buf, uint64_t len);
+int64_t opal_pci_get_presence_state(uint64_t id, uint64_t data);
+int64_t opal_pci_get_power_state(uint64_t id, uint64_t data);
+int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
+ uint64_t data);
+int64_t opal_pci_poll2(uint64_t id, uint64_t data);
+
+int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_int_set_cppr(uint8_t cppr);
+int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+ uint32_t pe_num, uint32_t tce_size,
+ uint64_t dma_addr, uint32_t npages);
+int64_t opal_rm_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
+ uint32_t pe_num, uint32_t tce_size,
+ uint64_t dma_addr, uint32_t npages);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
+extern void opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
@@ -276,6 +296,16 @@ extern int opal_error_code(int rc);
ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count);
+static inline int opal_get_async_rc(struct opal_msg msg)
+{
+ if (msg.msg_type != OPAL_MSG_ASYNC_COMP)
+ return OPAL_PARAMETER;
+ else
+ return be64_to_cpu(msg.params[1]);
+}
+
+void opal_wake_poller(void);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 546540b91095..148303e7771f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -25,6 +25,8 @@
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_asm.h>
#endif
+#include <asm/accounting.h>
+#include <asm/hmi.h>
register struct paca_struct *local_paca asm("r13");
@@ -181,16 +183,15 @@ struct paca_struct {
*/
u16 in_mce;
u8 hmi_event_available; /* HMI event is available */
+ /*
+ * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
+ * more details
+ */
+ struct sibling_subcore_state *sibling_subcore_state;
#endif
/* Stuff for accurate time accounting */
- u64 user_time; /* accumulated usermode TB ticks */
- u64 system_time; /* accumulated system TB ticks */
- u64 user_time_scaled; /* accumulated usermode SPURR ticks */
- u64 starttime; /* TB value snapshot */
- u64 starttime_user; /* TB value on exit to usermode */
- u64 startspurr; /* SPURR value snapshot */
- u64 utime_sspurr; /* ->user_time when ->startspurr set */
+ struct cpu_accounting_data accounting;
u64 stolen_time; /* TB ticks taken by hypervisor */
u64 dtl_ridx; /* read index in dispatch log */
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 51db3a37bced..56398e7e6100 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -96,7 +96,7 @@ extern unsigned int HPAGE_SHIFT;
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
-#ifdef CONFIG_RELOCATABLE_PPC32
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
extern long long virt_phys_offset;
#endif
@@ -139,9 +139,9 @@ extern long long virt_phys_offset;
* determine MEMORY_START until then. However we can determine PHYSICAL_START
* from information at hand (program counter, TLB lookup).
*
- * On BookE with RELOCATABLE (RELOCATABLE_PPC32)
+ * On BookE with RELOCATABLE && PPC32
*
- * With RELOCATABLE_PPC32, we support loading the kernel at any physical
+ * With RELOCATABLE && PPC32, we support loading the kernel at any physical
* address without any restriction on the page alignment.
*
* We find the runtime address of _stext and relocate ourselves based on
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 467c0b05b6fb..b5e88e4a171a 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -33,6 +33,8 @@ struct pci_controller_ops {
/* Called during PCI resource reassignment */
resource_size_t (*window_alignment)(struct pci_bus *bus,
unsigned long type);
+ void (*setup_bridge)(struct pci_bus *bus,
+ unsigned long type);
void (*reset_secondary_bus)(struct pci_dev *pdev);
#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index a6f3ac0d4602..e9bd6cf0212f 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -136,9 +136,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
pgprot_t prot);
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- resource_size_t *start, resource_size_t *end);
extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index e2bf208605b1..49c0a5a80efa 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -6,6 +6,7 @@
/* PTE level */
typedef struct { __be64 pte; } pte_t;
#define __pte(x) ((pte_t) { cpu_to_be64(x) })
+#define __pte_raw(x) ((pte_t) { (x) })
static inline unsigned long pte_val(pte_t x)
{
return be64_to_cpu(x.pte);
@@ -20,6 +21,7 @@ static inline __be64 pte_raw(pte_t x)
#ifdef CONFIG_PPC64
typedef struct { __be64 pmd; } pmd_t;
#define __pmd(x) ((pmd_t) { cpu_to_be64(x) })
+#define __pmd_raw(x) ((pmd_t) { (x) })
static inline unsigned long pmd_val(pmd_t x)
{
return be64_to_cpu(x.pmd);
@@ -37,21 +39,34 @@ static inline __be64 pmd_raw(pmd_t x)
#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
typedef struct { __be64 pud; } pud_t;
#define __pud(x) ((pud_t) { cpu_to_be64(x) })
+#define __pud_raw(x) ((pud_t) { (x) })
static inline unsigned long pud_val(pud_t x)
{
return be64_to_cpu(x.pud);
}
+
+static inline __be64 pud_raw(pud_t x)
+{
+ return x.pud;
+}
+
#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
/* PGD level */
typedef struct { __be64 pgd; } pgd_t;
#define __pgd(x) ((pgd_t) { cpu_to_be64(x) })
+#define __pgd_raw(x) ((pgd_t) { (x) })
static inline unsigned long pgd_val(pgd_t x)
{
return be64_to_cpu(x.pgd);
}
+static inline __be64 pgd_raw(pgd_t x)
+{
+ return x.pgd;
+}
+
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 925697968946..e08e829261b6 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -210,7 +210,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
* enable/disable the sound chip, whatever it is and provided it can
- * acually be controlled
+ * actually be controlled
*/
#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 6f77f71ee964..0cbd8134ce81 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -11,7 +11,20 @@
#define _ASM_PNV_PCI_H
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#include <misc/cxl-base.h>
+#include <asm/opal-api.h>
+
+#define PCI_SLOT_ID_PREFIX 0x8000000000000000
+#define PCI_SLOT_ID(phb_id, bdfn) \
+ (PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+
+extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
+extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
+extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
+extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
+ struct opal_msg *msg);
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
@@ -26,6 +39,40 @@ int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev, int num);
void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pci_dev *dev);
+
+/* Support for the cxl kernel api on the real PHB (instead of vPHB) */
+int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable);
+bool pnv_pci_on_cxl_phb(struct pci_dev *dev);
+struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose);
+void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu);
+
#endif
+struct pnv_php_slot {
+ struct hotplug_slot slot;
+ struct hotplug_slot_info slot_info;
+ uint64_t id;
+ char *name;
+ int slot_no;
+ struct kref kref;
+#define PNV_PHP_STATE_INITIALIZED 0
+#define PNV_PHP_STATE_REGISTERED 1
+#define PNV_PHP_STATE_POPULATED 2
+#define PNV_PHP_STATE_OFFLINE 3
+ int state;
+ struct device_node *dn;
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+ bool power_state_check;
+ void *fdt;
+ void *dt;
+ struct of_changeset ocs;
+ struct pnv_php_slot *parent;
+ struct list_head children;
+ struct list_head link;
+};
+extern struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn);
+extern int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+ uint8_t state);
+
#endif
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 49cd8760aa7c..127ebf5862b4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -131,6 +131,8 @@
/* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c
#define PPC_INST_CLRBHRB 0x7c00035c
+#define PPC_INST_COPY 0x7c00060c
+#define PPC_INST_COPY_FIRST 0x7c20060c
#define PPC_INST_CP_ABORT 0x7c00068c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
@@ -142,9 +144,11 @@
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
+#define PPC_INST_STDCX 0x7c0001ad
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
+#define PPC_INST_STWCX 0x7c00012d
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
@@ -159,6 +163,8 @@
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
+#define PPC_INST_PASTE 0x7c00070c
+#define PPC_INST_PASTE_LAST 0x7c20070d
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_POPCNTD 0x7c0003f4
@@ -177,6 +183,7 @@
#define PPC_INST_MFVSRD 0x7c000066
#define PPC_INST_MTVSRD 0x7c000166
#define PPC_INST_SLBFEE 0x7c0007a7
+#define PPC_INST_SLBIA 0x7c0003e4
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -186,6 +193,7 @@
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_STXVD2X 0x7c000798
#define PPC_INST_TLBIE 0x7c000264
+#define PPC_INST_TLBIEL 0x7c000224
#define PPC_INST_TLBILX 0x7c000024
#define PPC_INST_WAIT 0x7c00007c
#define PPC_INST_TLBIVAX 0x7c000624
@@ -203,6 +211,8 @@
#define PPC_INST_SLEEP 0x4c0003a4
#define PPC_INST_WINKLE 0x4c0003e4
+#define PPC_INST_STOP 0x4c0002e4
+
/* A2 specific instructions */
#define PPC_INST_ERATWE 0x7c0001a6
#define PPC_INST_ERATRE 0x7c000166
@@ -215,8 +225,11 @@
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
#define PPC_INST_LHZ 0xa0000000
-#define PPC_INST_LHBRX 0x7c00062c
#define PPC_INST_LWZ 0x80000000
+#define PPC_INST_LHBRX 0x7c00062c
+#define PPC_INST_LDBRX 0x7c000428
+#define PPC_INST_STB 0x98000000
+#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
@@ -225,22 +238,34 @@
#define PPC_INST_MTLR 0x7c0803a6
#define PPC_INST_CMPWI 0x2c000000
#define PPC_INST_CMPDI 0x2c200000
+#define PPC_INST_CMPW 0x7c000000
+#define PPC_INST_CMPD 0x7c200000
#define PPC_INST_CMPLW 0x7c000040
+#define PPC_INST_CMPLD 0x7c200040
#define PPC_INST_CMPLWI 0x28000000
+#define PPC_INST_CMPLDI 0x28200000
#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
#define PPC_INST_SUB 0x7c000050
#define PPC_INST_BLR 0x4e800020
#define PPC_INST_BLRL 0x4e800021
+#define PPC_INST_MULLD 0x7c0001d2
#define PPC_INST_MULLW 0x7c0001d6
#define PPC_INST_MULHWU 0x7c000016
#define PPC_INST_MULLI 0x1c000000
#define PPC_INST_DIVWU 0x7c000396
+#define PPC_INST_DIVD 0x7c0003d2
#define PPC_INST_RLWINM 0x54000000
+#define PPC_INST_RLWIMI 0x50000000
+#define PPC_INST_RLDICL 0x78000000
#define PPC_INST_RLDICR 0x78000004
#define PPC_INST_SLW 0x7c000030
+#define PPC_INST_SLD 0x7c000036
#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_SRD 0x7c000436
+#define PPC_INST_SRAD 0x7c000634
+#define PPC_INST_SRADI 0x7c000674
#define PPC_INST_AND 0x7c000038
#define PPC_INST_ANDDOT 0x7c000039
#define PPC_INST_OR 0x7c000378
@@ -251,6 +276,7 @@
#define PPC_INST_XORI 0x68000000
#define PPC_INST_XORIS 0x6c000000
#define PPC_INST_NEG 0x7c0000d0
+#define PPC_INST_EXTSW 0x7c0007b4
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BRANCH_COND 0x40800000
#define PPC_INST_LBZCIX 0x7c0006aa
@@ -261,6 +287,9 @@
#define ___PPC_RB(b) (((b) & 0x1f) << 11)
#define ___PPC_RS(s) (((s) & 0x1f) << 21)
#define ___PPC_RT(t) ___PPC_RS(t)
+#define ___PPC_R(r) (((r) & 0x1) << 16)
+#define ___PPC_PRS(prs) (((prs) & 0x1) << 17)
+#define ___PPC_RIC(ric) (((ric) & 0x3) << 18)
#define __PPC_RA(a) ___PPC_RA(__REG_##a)
#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
#define __PPC_RB(b) ___PPC_RB(__REG_##b)
@@ -276,6 +305,8 @@
#define __PPC_SH(s) __PPC_WS(s)
#define __PPC_MB(s) (((s) & 0x1f) << 6)
#define __PPC_ME(s) (((s) & 0x1f) << 1)
+#define __PPC_MB64(s) (__PPC_MB(s) | ((s) & 0x20))
+#define __PPC_ME64(s) __PPC_MB64(s)
#define __PPC_BI(s) (((s) & 0x1f) << 16)
#define __PPC_CT(t) (((t) & 0x0f) << 21)
@@ -325,6 +356,16 @@
__PPC_WC(w))
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
___PPC_RB(a) | ___PPC_RS(lp))
+#define PPC_TLBIE_5(rb,rs,ric,prs,r) \
+ stringify_in_c(.long PPC_INST_TLBIE | \
+ ___PPC_RB(rb) | ___PPC_RS(rs) | \
+ ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
+#define PPC_TLBIEL(rb,rs,ric,prs,r) \
+ stringify_in_c(.long PPC_INST_TLBIEL | \
+ ___PPC_RB(rb) | ___PPC_RS(rs) | \
+ ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
__PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
@@ -382,6 +423,8 @@
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
+#define PPC_STOP stringify_in_c(.long PPC_INST_STOP)
+
/* BHRB instructions */
#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
@@ -412,5 +455,7 @@
___PPC_RA(a) | \
___PPC_RB(b))
+#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
+ ((IH & 0x7) << 21))
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 8753e4eb9ab5..0f73de069f19 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -39,8 +39,6 @@ void *pci_traverse_device_nodes(struct device_node *start,
void *traverse_pci_dn(struct pci_dn *root,
void *(*fn)(struct pci_dn *, void *),
void *data);
-
-extern void pci_devs_phb_init(void);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
/* From rtas_pci.h */
diff --git a/arch/powerpc/include/asm/ppc4xx.h b/arch/powerpc/include/asm/ppc4xx.h
index 033039a80c42..610a5119ad8c 100644
--- a/arch/powerpc/include/asm/ppc4xx.h
+++ b/arch/powerpc/include/asm/ppc4xx.h
@@ -13,6 +13,6 @@
#ifndef __ASM_POWERPC_PPC4xx_H__
#define __ASM_POWERPC_PPC4xx_H__
-extern void ppc4xx_reset_system(char *cmd);
+extern void __noreturn ppc4xx_reset_system(char *cmd);
#endif /* __ASM_POWERPC_PPC4xx_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 051af612a7e1..d5d5b5e348f2 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,27 +24,27 @@
*/
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ra, rb)
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
#define ACCOUNT_STOLEN_TIME
#else
-#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
+#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
MFTB(ra); /* get timebase */ \
- ld rb,PACA_STARTTIME_USER(r13); \
- std ra,PACA_STARTTIME(r13); \
+ PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
+ PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
subf rb,rb,ra; /* subtract start value */ \
- ld ra,PACA_USER_TIME(r13); \
+ PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
add ra,ra,rb; /* add on to user time */ \
- std ra,PACA_USER_TIME(r13); \
+ PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
+#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
MFTB(ra); /* get timebase */ \
- ld rb,PACA_STARTTIME(r13); \
- std ra,PACA_STARTTIME_USER(r13); \
+ PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
+ PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
subf rb,rb,ra; /* subtract start value */ \
- ld ra,PACA_SYSTEM_TIME(r13); \
+ PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
add ra,ra,rb; /* add on to system time */ \
- std ra,PACA_SYSTEM_TIME(r13)
+ PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
#ifdef CONFIG_PPC_SPLPAR
#define ACCOUNT_STOLEN_TIME \
@@ -189,7 +189,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define STK_GOT 24
#define __STK_PARAM(i) (32 + ((i)-3)*8)
#else
@@ -198,7 +198,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define _GLOBAL(name) \
.section ".text"; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 009fab130cd8..68e3bf57b027 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -224,7 +224,7 @@ struct thread_struct {
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */
- unsigned long accum_tb; /* Total accumilated purr for process */
+ unsigned long accum_tb; /* Total accumulated purr for process */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM];
/*
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
+ unsigned long lmrr;
+ unsigned long lmser;
#endif
};
@@ -347,6 +349,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.fpexc_mode = 0, \
.ppr = INIT_PPR, \
+ .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
@@ -457,6 +460,8 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern unsigned long power7_nap(int check_irq);
extern unsigned long power7_sleep(void);
extern unsigned long power7_winkle(void);
+extern unsigned long power9_idle_stop(unsigned long stop_level);
+
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index a1bc7e758422..a19f831a4cc9 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -526,4 +526,6 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
+void ps3_early_mm_init(void);
+
#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 0427b0b53d2d..a1dc784d70e8 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -104,7 +104,7 @@
#define PS3AV_CMD_AV_INPUTLEN_16 0x02
#define PS3AV_CMD_AV_INPUTLEN_20 0x0a
#define PS3AV_CMD_AV_INPUTLEN_24 0x0b
-/* alayout */
+/* av_layout */
#define PS3AV_CMD_AV_LAYOUT_32 (1 << 0)
#define PS3AV_CMD_AV_LAYOUT_44 (1 << 1)
#define PS3AV_CMD_AV_LAYOUT_48 (1 << 2)
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 2eeaf80d41b7..4ba26dd259fd 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -96,7 +96,7 @@ static inline bool pte_user(pte_t pte)
#define PTE_RPN_SHIFT (PAGE_SHIFT)
#endif
-/* The mask convered by the RPN must be a ULL on 32-bit platforms with
+/* The mask covered by the RPN must be a ULL on 32-bit platforms with
* 64-bit PTEs
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index c0c61fa9cd9e..e4923686e43a 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -47,7 +47,7 @@
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define STACK_FRAME_MIN_SIZE 32
#else
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a0948f40bc7b..f69f40f1519a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -145,6 +145,15 @@
#define MSR_64BIT 0
#endif
+/* Power Management - Processor Stop Status and Control Register Fields */
+#define PSSCR_RL_MASK 0x0000000F /* Requested Level */
+#define PSSCR_MTL_MASK 0x000000F0 /* Maximum Transition Level */
+#define PSSCR_TR_MASK 0x00000300 /* Transition State */
+#define PSSCR_PSLL_MASK 0x000F0000 /* Power-Saving Level Limit */
+#define PSSCR_EC 0x00100000 /* Exit Criterion */
+#define PSSCR_ESL 0x00200000 /* Enable State Loss */
+#define PSSCR_SD 0x00400000 /* Status Disable */
+
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000 /* FPU exception summary */
#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
@@ -268,6 +277,7 @@
#define DSISR_KEYFAULT 0x00200000 /* Key fault */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */
@@ -282,15 +292,19 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
#define SPRN_LDBAR 0x352 /* LD Base Address Register */
#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */
#define SPRN_PMSR 0x355 /* Power Management Status Reg */
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
+#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
@@ -300,10 +314,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
@@ -314,40 +330,43 @@
#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
-#define LPCR_VPM0 (1ul << (63-0))
-#define LPCR_VPM1 (1ul << (63-1))
-#define LPCR_ISL (1ul << (63-2))
-#define LPCR_VC_SH (63-2)
-#define LPCR_DPFD_SH (63-11)
-#define LPCR_DPFD (7ul << LPCR_DPFD_SH)
-#define LPCR_VRMASD (0x1ful << (63-16))
-#define LPCR_VRMA_L (1ul << (63-12))
-#define LPCR_VRMA_LP0 (1ul << (63-15))
-#define LPCR_VRMA_LP1 (1ul << (63-16))
-#define LPCR_VRMASD_SH (63-16)
-#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
-#define LPCR_RMLS_SH (63-37)
-#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
-#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
-#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
-#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
-#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
-#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
-#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
-#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
-#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
-#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
-#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
-#define LPCR_MER 0x00000800 /* Mediated External Exception */
-#define LPCR_MER_SH 11
-#define LPCR_TC 0x00000200 /* Translation control */
-#define LPCR_LPES 0x0000000c
-#define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */
-#define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */
-#define LPCR_LPES_SH 2
-#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
-#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
-#define LPCR_UPRT 0x00400000 /* Use Process Table (ISA 3) */
+#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
+#define LPCR_VPM1 ASM_CONST(0x4000000000000000)
+#define LPCR_ISL ASM_CONST(0x2000000000000000)
+#define LPCR_VC_SH 61
+#define LPCR_DPFD_SH 52
+#define LPCR_DPFD (ASM_CONST(7) << LPCR_DPFD_SH)
+#define LPCR_VRMASD_SH 47
+#define LPCR_VRMASD (ASM_CONST(1) << LPCR_VRMASD_SH)
+#define LPCR_VRMA_L ASM_CONST(0x0008000000000000)
+#define LPCR_VRMA_LP0 ASM_CONST(0x0001000000000000)
+#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
+#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
+#define LPCR_RMLS_SH 26
+#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
+#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
+#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
+#define LPCR_AIL_3 ASM_CONST(0x0000000001800000) /* MMU on exception offset 0xc00...4xxx */
+#define LPCR_ONL ASM_CONST(0x0000000000040000) /* online - PURR/SPURR count */
+#define LPCR_LD ASM_CONST(0x0000000000020000) /* large decremeter */
+#define LPCR_PECE ASM_CONST(0x000000000001f000) /* powersave exit cause enable */
+#define LPCR_PECEDP ASM_CONST(0x0000000000010000) /* directed priv dbells cause exit */
+#define LPCR_PECEDH ASM_CONST(0x0000000000008000) /* directed hyp dbells cause exit */
+#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
+#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
+#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
+#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
+#define LPCR_MER_SH 11
+#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
+#define LPCR_LPES 0x0000000c
+#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
+#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
+#define LPCR_LPES_SH 2
+#define LPCR_RMI ASM_CONST(0x0000000000000002) /* real mode is cache inhibit */
+#define LPCR_HVICE ASM_CONST(0x0000000000000002) /* P9: HV interrupt enable */
+#define LPCR_HDICE ASM_CONST(0x0000000000000001) /* Hyp Decr enable (HV,PR,EE) */
+#define LPCR_UPRT ASM_CONST(0x0000000000400000) /* Use Process Table (ISA 3) */
+#define LPCR_HR ASM_CONST(0x0000000000100000)
#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif
@@ -1237,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long bits)
__msr_check_and_clear(bits);
}
-static inline unsigned long mfvtb (void)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- return mfspr(SPRN_VTB);
-#endif
- return 0;
-}
-
#ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
@@ -1288,6 +1298,7 @@ static inline unsigned long mfvtb (void)
asm volatile("mfspr %0, %1" : "=r" (rval) : \
"i" (SPRN_TBRU)); rval;})
#endif
+#define mftb() mftbl()
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 51400baa8d48..9c23baa10b81 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -339,9 +339,9 @@ extern int rtas_service_present(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
int nret, ...);
-extern void rtas_restart(char *cmd);
+extern void __noreturn rtas_restart(char *cmd);
extern void rtas_power_off(void);
-extern void rtas_halt(void);
+extern void __noreturn rtas_halt(void);
extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
extern int rtas_get_sensor_fast(int sensor, int index, int *state);
@@ -351,7 +351,6 @@ extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
-extern void rtas_initialize(void);
extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
@@ -460,9 +459,11 @@ static inline int page_is_rtas_user_buf(unsigned long pfn)
/* Not the best place to put pSeries_coalesce_init, will be fixed when we
* move some of the rtas suspend-me stuff to pseries */
extern void pSeries_coalesce_init(void);
+void rtas_initialize(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
static inline void pSeries_coalesce_init(void) { }
+static inline void rtas_initialize(void) { };
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
diff --git a/arch/powerpc/include/asm/rtc.h b/arch/powerpc/include/asm/rtc.h
deleted file mode 100644
index f5802926b6c0..000000000000
--- a/arch/powerpc/include/asm/rtc.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Real-time clock definitions and interfaces
- *
- * Author: Tom Rini <trini@mvista.com>
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Based on:
- * include/asm-m68k/rtc.h
- *
- * Copyright Richard Zidlicky
- * implementation details for genrtc/q40rtc driver
- *
- * And the old drivers/macintosh/rtc.c which was heavily based on:
- * Linux/SPARC Real Time Clock Driver
- * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
- *
- * With additional work by Paul Mackerras and Franz Sirl.
- */
-
-#ifndef __ASM_POWERPC_RTC_H__
-#define __ASM_POWERPC_RTC_H__
-
-#ifdef __KERNEL__
-
-#include <linux/rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/time.h>
-
-#define RTC_PIE 0x40 /* periodic interrupt enable */
-#define RTC_AIE 0x20 /* alarm interrupt enable */
-#define RTC_UIE 0x10 /* update-finished interrupt enable */
-
-/* some dummy definitions */
-#define RTC_BATT_BAD 0x100 /* battery bad */
-#define RTC_SQWE 0x08 /* enable square-wave output */
-#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
-static inline unsigned int get_rtc_time(struct rtc_time *time)
-{
- if (ppc_md.get_rtc_time)
- ppc_md.get_rtc_time(time);
- return RTC_24H;
-}
-
-/* Set the current date and time in the real time clock. */
-static inline int set_rtc_time(struct rtc_time *time)
-{
- if (ppc_md.set_rtc_time)
- return ppc_md.set_rtc_time(time);
- return -EINVAL;
-}
-
-static inline unsigned int get_rtc_ss(void)
-{
- struct rtc_time h;
-
- get_rtc_time(&h);
- return h.tm_sec;
-}
-
-static inline int get_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-static inline int set_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_POWERPC_RTC_H__ */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index abf5866e08c6..7dc006b58369 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -62,7 +62,7 @@ static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
#endif
}
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
#undef dereference_function_descriptor
static inline void *dereference_function_descriptor(void *ptr)
{
@@ -73,7 +73,7 @@ static inline void *dereference_function_descriptor(void *ptr)
ptr = p;
return ptr;
}
-#endif
+#endif /* PPC64_ELF_ABI_v1 */
#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e9d384cbd021..654d64c9f3ac 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -26,6 +26,18 @@ void initmem_init(void);
void setup_panic(void);
#define ARCH_PANIC_TIMEOUT 180
+#ifdef CONFIG_PPC_PSERIES
+extern void pseries_enable_reloc_on_exc(void);
+extern void pseries_disable_reloc_on_exc(void);
+extern void pseries_big_endian_exceptions(void);
+extern void pseries_little_endian_exceptions(void);
+#else
+static inline void pseries_enable_reloc_on_exc(void) {}
+static inline void pseries_disable_reloc_on_exc(void) {}
+static inline void pseries_big_endian_exceptions(void) {}
+static inline void pseries_little_endian_exceptions(void) {}
+#endif /* CONFIG_PPC_PSERIES */
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e1afd4c4f695..0d02c11dc331 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -160,9 +160,6 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
{
paca[cpu].hw_cpu_id = phys;
}
-
-extern void smp_release_cpus(void);
-
#else
/* 32-bit */
#ifndef CONFIG_SMP
@@ -179,6 +176,12 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif /* !CONFIG_SMP */
#endif /* !CONFIG_PPC64 */
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC))
+extern void smp_release_cpus(void);
+#else
+static inline void smp_release_cpus(void) { };
+#endif
+
extern int smt_enabled_at_boot;
extern void smp_mpic_probe(void);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index f280dd11243f..09f98e861869 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -185,7 +185,7 @@
* x = processor mask
* y = op. point index
* z = processor freq. step index
- * I haven't yet decyphered result codes
+ * I haven't yet deciphered result codes
*
*/
#define SMU_CMD_POWER_COMMAND 0xaa
@@ -471,13 +471,6 @@ extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
/*
- * SMU command buffer absolute address, exported by pmac_setup,
- * this is allocated very early during boot.
- */
-extern unsigned long smu_cmdbuf_abs;
-
-
-/*
* Kernel asynchronous i2c interface
*/
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 523673d7583c..fa37fe93bc02 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -162,12 +162,38 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 0;
}
-#ifdef CONFIG_PPC64
-extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
-#else
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#endif
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lock_val;
+
+ smp_mb();
+
+ /*
+ * Atomically load and store back the lock value (unchanged). This
+ * ensures that our observation of the lock value is ordered with
+ * respect to other lock operations.
+ */
+ __asm__ __volatile__(
+"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
+" stwcx. %0, 0, %2\n"
+" bne- 1b\n"
+ : "=&r" (lock_val), "+m" (*lock)
+ : "r" (lock)
+ : "cr0", "xer");
+
+ if (arch_spin_value_unlocked(lock_val))
+ goto out;
+
+ while (lock->slock) {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ }
+ HMT_medium();
+
+out:
+ smp_mb();
+}
/*
* Read-write spinlocks, allowing multiple readers
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index e40010abcaf1..da3cdffca440 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -3,12 +3,8 @@
#ifdef __KERNEL__
-#define __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRNCPY
-#define __HAVE_ARCH_STRLEN
-#define __HAVE_ARCH_STRCMP
#define __HAVE_ARCH_STRNCMP
-#define __HAVE_ARCH_STRCAT
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMMOVE
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 17c8380673a6..0a74ebe934e1 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void)
static inline void __giveup_spe(struct task_struct *t) { }
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+extern void flush_tmregs_to_thread(struct task_struct *);
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *t)
+{
+}
+#endif
+
static inline void clear_task_ebb(struct task_struct *t)
{
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index c50868681f9e..78efe8d5d775 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,7 +13,6 @@
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
-extern void do_final_fixups(void);
static inline void eieio(void)
{
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index 743f36b38e5d..12e362935160 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -31,9 +31,6 @@
*/
#define TCE_VB 0
#define TCE_PCI 1
-#define TCE_PCI_SWINV_CREATE 2
-#define TCE_PCI_SWINV_FREE 4
-#define TCE_PCI_SWINV_PAIR 8
/* TCE page size is 4096 bytes (1 << 12) */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8febc3f66d53..87e4b2d8dcd4 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <linux/stringify.h>
+#include <asm/accounting.h>
/*
* low level task data.
@@ -46,6 +47,9 @@ struct thread_info {
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
#endif
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
+ struct cpu_accounting_data accounting;
+#endif
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
@@ -134,40 +138,15 @@ static inline struct thread_info *current_thread_info(void)
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
-#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
#define TLF_LAZY_MMU 3 /* tlb_batch is active */
#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
#define _TLF_NAPPING (1 << TLF_NAPPING)
#define _TLF_SLEEPING (1 << TLF_SLEEPING)
-#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->local_flags |= _TLF_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
- return false;
- ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
- return true;
-}
static inline bool test_thread_local_flags(unsigned int flags)
{
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1092fdd7e737..b240666b7bc1 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,6 +18,7 @@
#include <linux/percpu.h>
#include <asm/processor.h>
+#include <asm/cpu_has_feature.h>
/* time.c */
extern unsigned long tb_ticks_per_jiffy;
@@ -103,7 +104,7 @@ static inline u64 get_vtb(void)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_ARCH_207S))
- return mfvtb();
+ return mfspr(SPRN_VTB);
#endif
return 0;
}
@@ -146,7 +147,7 @@ static inline void set_tb(unsigned int upper, unsigned int lower)
* in auto-reload mode. The problem is PIT stops counting when it
* hits zero. If it would wrap, we could use it just like a decrementer.
*/
-static inline unsigned int get_dec(void)
+static inline u64 get_dec(void)
{
#if defined(CONFIG_40x)
return (mfspr(SPRN_PIT));
@@ -160,10 +161,10 @@ static inline unsigned int get_dec(void)
* in when the decrementer generates its interrupt: on the 1 to 0
* transition for Book E/4xx, but on the 0 to -1 transition for others.
*/
-static inline void set_dec(int val)
+static inline void set_dec(u64 val)
{
#if defined(CONFIG_40x)
- mtspr(SPRN_PIT, val);
+ mtspr(SPRN_PIT, (u32) val);
#else
#ifndef CONFIG_BOOKE
--val;
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 20733fa518ae..f6f68f73e858 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -46,5 +46,18 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif
}
+#ifdef CONFIG_SMP
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+ return cpumask_subset(mm_cpumask(mm),
+ topology_sibling_cpumask(smp_processor_id()));
+}
+#else
+static inline int mm_is_core_local(struct mm_struct *mm)
+{
+ return 1;
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 1b38eea28e5a..13dbcd41885e 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -54,7 +54,6 @@ extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i)
#endif
-#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
#elif defined(CONFIG_PPC_STD_MMU_32)
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h
index d531d9e173ef..c2a955bb0daa 100644
--- a/arch/powerpc/include/asm/tsi108.h
+++ b/arch/powerpc/include/asm/tsi108.h
@@ -77,7 +77,7 @@
* nodes if your board uses the Broadcom PHYs
*/
#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
-#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */
+#define TSI108_PHY_BCM54XX 1 /* Broadcom BCM54xx PHY */
/* Global variables */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index bfb6ded38ffa..49a0678a53fa 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -15,6 +15,14 @@
#include <uapi/asm/types.h>
+#ifdef __powerpc64__
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define PPC64_ELF_ABI_v2
+#else
+#define PPC64_ELF_ABI_v1
+#endif
+#endif /* __powerpc64__ */
+
#ifndef __ASSEMBLY__
typedef __vector128 vector128;
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index b7c20f0b8fbe..c1dc6c14deb8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
{
unsigned long over;
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(VERIFY_READ, from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + n - TASK_SIZE;
+ if (!__builtin_constant_p(n - over))
+ check_object_size(to, n - over, false);
return __copy_tofrom_user((__force void __user *)to, from,
n - over) + over;
}
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
{
unsigned long over;
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
+ }
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + n - TASK_SIZE;
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n - over, true);
return __copy_tofrom_user(to, (__force void __user *)from,
n - over) + over;
}
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
if (ret == 0)
return 0;
}
+
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
+
return __copy_tofrom_user((__force void __user *)to, from, n);
}
@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0)
return 0;
}
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
+
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 04ef3ae511da..f5f729c11578 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -42,6 +42,12 @@ extern int icp_hv_init(void);
static inline int icp_hv_init(void) { return -ENODEV; }
#endif
+#ifdef CONFIG_PPC_POWERNV
+extern int icp_opal_init(void);
+#else
+static inline int icp_opal_init(void) { return -ENODEV; }
+#endif
+
/* ICP ops */
struct icp_ops {
unsigned int (*get_irq)(void);
diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h
index 0abb97f3be10..a36c2069d8ed 100644
--- a/arch/powerpc/include/asm/xor.h
+++ b/arch/powerpc/include/asm/xor.h
@@ -23,6 +23,7 @@
#ifdef CONFIG_ALTIVEC
#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
unsigned long *v2_in);
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index c2d21d11c2d2..3a9e44c45c78 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -91,6 +91,11 @@
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
#define ELF_NFPREG 33 /* includes fpscr */
+#define ELF_NVMX 34 /* includes all vector registers */
+#define ELF_NVSX 32 /* includes all VSX registers */
+#define ELF_NTMSPRREG 3 /* include tfhar, tfiar, texasr */
+#define ELF_NEBB 3 /* includes ebbrr, ebbhr, bescr */
+#define ELF_NPMU 5 /* includes siar, sdar, sier, mmcr2, mmcr0 */
typedef unsigned long elf_greg_t64;
typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2da380fcc34c..b2027a5cf508 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -41,13 +41,12 @@ obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
-obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
-obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
+obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o hmi.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
-obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
+obj-$(CONFIG_PPC_P7_NAP) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
@@ -87,7 +86,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
-obj-$(CONFIG_RELOCATABLE_PPC32) += reloc_32.o
+obj-$(CONFIG_RELOCATABLE) += reloc_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 8e7cb8e2b21a..033f3385fa49 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -26,6 +26,7 @@
#include <asm/emulated_ops.h>
#include <asm/switch_to.h>
#include <asm/disassemble.h>
+#include <asm/cpu_has_feature.h>
struct aligninfo {
unsigned char len;
@@ -228,9 +229,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#else
#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
#endif
-#endif
-
-#ifdef __LITTLE_ENDIAN__
+#else
#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
#endif
@@ -875,6 +874,20 @@ int fix_alignment(struct pt_regs *regs)
return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
}
#endif
+
+ /*
+ * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
+ * check.
+ *
+ * Send a SIGBUS to the process that caused the fault.
+ *
+ * We do not emulate these because paste may contain additional metadata
+ * when pasting to a co-processor. Furthermore, paste_last is the
+ * synchronisation point for preceding copy/paste sequences.
+ */
+ if ((instruction & 0xfc0006fe) == PPC_INST_COPY)
+ return -EIO;
+
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
*/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9ea09551a2cd..b89d14c0352c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -68,17 +68,18 @@
#include "../mm/mmu_decl.h"
#endif
+#ifdef CONFIG_PPC_8xx
+#include <asm/fixmap.h>
+#endif
+
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
- DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
- DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
- DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
@@ -132,17 +133,6 @@ int main(void)
DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(THREAD_TAR, offsetof(struct thread_struct, tar));
- DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
- DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
- DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
- DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
- DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
- DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
- DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
- DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
@@ -178,7 +168,6 @@ int main(void)
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
- DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
@@ -255,13 +244,28 @@ int main(void)
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default));
- DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime));
- DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user));
- DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
- DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct paca_struct, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct paca_struct, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct paca_struct, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct paca_struct, accounting.system_time));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
+#else /* CONFIG_PPC64 */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ DEFINE(ACCOUNT_STARTTIME,
+ offsetof(struct thread_info, accounting.starttime));
+ DEFINE(ACCOUNT_STARTTIME_USER,
+ offsetof(struct thread_info, accounting.starttime_user));
+ DEFINE(ACCOUNT_USER_TIME,
+ offsetof(struct thread_info, accounting.user_time));
+ DEFINE(ACCOUNT_SYSTEM_TIME,
+ offsetof(struct thread_info, accounting.system_time));
+#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -275,12 +279,6 @@ int main(void)
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
-
- /* hcall statistics */
- DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats));
- DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls));
- DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total));
- DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total));
#endif /* CONFIG_PPC64 */
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
@@ -298,23 +296,6 @@ int main(void)
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
#ifndef CONFIG_PPC64
DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
- DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
- DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
- DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
- DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
- DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
- DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
- DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
- DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
- DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
- DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
- DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
- DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
- DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
- DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
- DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
- DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
- DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
#endif /* CONFIG_PPC64 */
/*
* Note: these symbols include _ because they overlap with special
@@ -332,7 +313,6 @@ int main(void)
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
#ifndef CONFIG_PPC64
- DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
/*
* The PowerPC 400-class & Book-E processors have neither the DAR
* nor the DSISR SPRs. Hence, we overload them to hold the similar
@@ -369,8 +349,6 @@ int main(void)
DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif
- DEFINE(CLONE_VM, CLONE_VM);
- DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
#ifndef CONFIG_PPC64
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
@@ -380,7 +358,6 @@ int main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
- DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
@@ -395,7 +372,6 @@ int main(void)
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs));
- DEFINE(CFG_STAMP_XSEC, offsetof(struct vdso_data, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
@@ -517,7 +493,6 @@ int main(void)
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
- DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
@@ -528,7 +503,6 @@ int main(void)
DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu));
#endif
#ifdef CONFIG_PPC_BOOK3S
- DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
@@ -566,7 +540,6 @@ int main(void)
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
- DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
@@ -576,7 +549,6 @@ int main(void)
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
- DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
@@ -693,7 +665,6 @@ int main(void)
DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr));
DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar));
DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar));
- DEFINE(KVM_SPLIT_SIZE, offsetof(struct kvm_split_mode, subcore_size));
DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap));
DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped));
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
@@ -756,7 +727,6 @@ int main(void)
#ifdef CONFIG_KVM_BOOKE_HV
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
- DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
@@ -783,5 +753,9 @@ int main(void)
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+#ifdef CONFIG_PPC_8xx
+ DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f8cd9fba4d35..c5e5a94d9892 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -156,7 +156,7 @@ setup_7410_workarounds:
blr
/* 740/750/7400/7410
- * Enable Store Gathering (SGE), Address Brodcast (ABE),
+ * Enable Store Gathering (SGE), Address Broadcast (ABE),
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
* Clear Instruction cache throttling (ICTC)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119fa8b0..52ff3f025437 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -51,6 +51,7 @@ _GLOBAL(__setup_cpu_power8)
mflr r11
bl __init_FSCR
bl __init_PMU
+ bl __init_PMU_ISA207
bl __init_hvmode_206
mtlr r11
beqlr
@@ -62,6 +63,7 @@ _GLOBAL(__setup_cpu_power8)
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
+ bl __init_PMU_HV_ISA207
mtlr r11
blr
@@ -69,6 +71,7 @@ _GLOBAL(__restore_cpu_power8)
mflr r11
bl __init_FSCR
bl __init_PMU
+ bl __init_PMU_ISA207
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -81,12 +84,14 @@ _GLOBAL(__restore_cpu_power8)
bl __init_HFSCR
bl __init_tlb_power8
bl __init_PMU_HV
+ bl __init_PMU_HV_ISA207
mtlr r11
blr
_GLOBAL(__setup_cpu_power9)
mflr r11
bl __init_FSCR
+ bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -94,15 +99,18 @@ _GLOBAL(__setup_cpu_power9)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
+ ori r3, r3, LPCR_HVICE
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
+ bl __init_PMU_HV
mtlr r11
blr
_GLOBAL(__restore_cpu_power9)
mflr r11
bl __init_FSCR
+ bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -111,9 +119,11 @@ _GLOBAL(__restore_cpu_power9)
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
+ ori r3, r3, LPCR_HVICE
bl __init_LPCR
bl __init_HFSCR
bl __init_tlb_power9
+ bl __init_PMU_HV
mtlr r11
blr
@@ -208,14 +218,22 @@ __init_tlb_power9:
__init_PMU_HV:
li r5,0
mtspr SPRN_MMCRC,r5
+ blr
+
+__init_PMU_HV_ISA207:
+ li r5,0
mtspr SPRN_MMCRH,r5
blr
__init_PMU:
li r5,0
- mtspr SPRN_MMCRS,r5
mtspr SPRN_MMCRA,r5
mtspr SPRN_MMCR0,r5
mtspr SPRN_MMCR1,r5
mtspr SPRN_MMCR2,r5
blr
+
+__init_PMU_ISA207:
+ li r5,0
+ mtspr SPRN_MMCRS,r5
+ blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index eeeacf6235a3..74248ab18e98 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -15,6 +15,7 @@
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/jump_label.h>
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
@@ -137,7 +138,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTRS_POWER4,
+ .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -152,7 +153,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
- .mmu_features = MMU_FTRS_POWER4,
+ .mmu_features = MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -2224,3 +2225,39 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
return NULL;
}
+
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
+ [0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(cpu_feature_keys);
+
+void __init cpu_feature_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
+ unsigned long f = 1ul << i;
+
+ if (!(cur_cpu_spec->cpu_features & f))
+ static_branch_disable(&cpu_feature_keys[i]);
+ }
+}
+
+struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
+ [0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(mmu_feature_keys);
+
+void __init mmu_feature_keys_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
+ unsigned long f = 1ul << i;
+
+ if (!(cur_cpu_spec->mmu_features & f))
+ static_branch_disable(&mmu_feature_keys[i]);
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 2bb252c01f07..47b63de81f9b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,8 +48,8 @@ int crashing_cpu = -1;
static int time_to_dump;
#define CRASH_HANDLER_MAX 3
-/* NULL terminated list of shutdown handles */
-static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
+/* List of shutdown handles */
+static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
static DEFINE_SPINLOCK(crash_handlers_lock);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
@@ -65,7 +65,7 @@ static int handle_fault(struct pt_regs *regs)
#ifdef CONFIG_SMP
static atomic_t cpus_in_crash;
-void crash_ipi_callback(struct pt_regs *regs)
+static void crash_ipi_callback(struct pt_regs *regs)
{
static cpumask_t cpus_state_saved = CPU_MASK_NONE;
@@ -288,9 +288,14 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
rc = 1;
} else {
/* Shift handles down */
- for (; crash_shutdown_handles[i]; i++)
+ for (; i < (CRASH_HANDLER_MAX - 1); i++)
crash_shutdown_handles[i] =
crash_shutdown_handles[i+1];
+ /*
+ * Reset last entry to NULL now that it has been shifted down,
+ * this will allow new handles to be added here.
+ */
+ crash_shutdown_handles[i] = NULL;
rc = 0;
}
@@ -346,7 +351,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault;
crash_shutdown_cpu = smp_processor_id();
- for (i = 0; crash_shutdown_handles[i]; i++) {
+ for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) {
/*
* Insert syncs and delay to ensure
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 41a7d9d49a5a..fb7cbaa37658 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -18,7 +18,7 @@
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
@@ -27,7 +27,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
@@ -40,7 +40,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
size, device_to_mask(dev), direction, attrs);
@@ -49,7 +49,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
attrs);
@@ -58,7 +58,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
device_to_mask(dev), direction, attrs);
@@ -66,7 +66,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3f1472a78f39..e64a6016fba7 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -64,7 +64,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -121,7 +121,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
void __dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr);
@@ -132,7 +132,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size,
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *iommu;
@@ -156,7 +156,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *iommu;
@@ -177,7 +177,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long pfn;
@@ -195,7 +195,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -211,7 +211,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
@@ -232,7 +232,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
unsigned long offset,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(dir == DMA_NONE);
__dma_sync_page(page, offset, size, dir);
@@ -243,7 +243,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index ddbcfab7efdf..d4cc26618809 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -114,9 +114,9 @@ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- pr_debug("PCI: %s addr range %d [%lx-%lx]: %s\n",
+ pr_debug("PCI: %s addr range %d [%pap-%pap]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
- piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
+ &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
cnt++;
n = rb_next(n);
}
@@ -159,8 +159,8 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
piar->flags = flags;
#ifdef DEBUG
- pr_debug("PIAR: insert range=[%lx:%lx] dev=%s\n",
- alo, ahi, pci_name(dev));
+ pr_debug("PIAR: insert range=[%pap:%pap] dev=%s\n",
+ &alo, &ahi, pci_name(dev));
#endif
rb_link_node(&piar->rb_node, parent, p);
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index 7815095fe3d8..d6b2ca70d14d 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -44,14 +44,13 @@
/**
* eeh_dev_init - Create EEH device according to OF node
* @pdn: PCI device node
- * @data: PHB
*
* It will create EEH device according to the given OF node. The function
* might be called by PCI emunation, DR, PHB hotplug.
*/
-void *eeh_dev_init(struct pci_dn *pdn, void *data)
+struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
{
- struct pci_controller *phb = data;
+ struct pci_controller *phb = pdn->phb;
struct eeh_dev *edev;
/* Allocate EEH device */
@@ -69,7 +68,7 @@ void *eeh_dev_init(struct pci_dn *pdn, void *data)
INIT_LIST_HEAD(&edev->list);
INIT_LIST_HEAD(&edev->rmv_list);
- return NULL;
+ return edev;
}
/**
@@ -81,16 +80,8 @@ void *eeh_dev_init(struct pci_dn *pdn, void *data)
*/
void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
- struct pci_dn *root = phb->pci_data;
-
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
-
- /* EEH device for PHB */
- eeh_dev_init(root, phb);
-
- /* EEH devices for children OF nodes */
- traverse_pci_dn(root, eeh_dev_init, phb);
}
/**
@@ -106,8 +97,6 @@ static int __init eeh_dev_phb_init(void)
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
eeh_dev_phb_init_dynamic(phb);
- pr_info("EEH: devices created\n");
-
return 0;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d70101e1e25c..5f36e8a70daa 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -139,7 +139,7 @@ static void eeh_enable_irq(struct pci_dev *dev)
* into it.
*
* That's just wrong.The warning in the core code is
- * there to tell people to fix their assymetries in
+ * there to tell people to fix their asymmetries in
* their own code, not by abusing the core information
* to avoid it.
*
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2405631e91a2..9899032230b4 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -175,6 +175,12 @@ transfer_to_handler:
addi r12,r12,-1
stw r12,4(r11)
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ tophys(r9, r9)
+ ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
+#endif
+
b 3f
2: /* if from kernel, check interrupted DOZE/NAP mode and
@@ -398,6 +404,13 @@ BEGIN_FTR_SECTION
lwarx r7,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ andi. r4,r8,MSR_PR
+ beq 3f
+ CURRENT_THREAD_INFO(r4, r1)
+ ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
+3:
+#endif
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -769,6 +782,10 @@ restore_user:
andis. r10,r0,DBCR0_IDM@h
bnel- load_dbcr0
#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ CURRENT_THREAD_INFO(r9, r1)
+ ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
+#endif
b restore
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 73e461a3dfbb..6b8bc0dd09d4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -72,7 +72,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r0,GPR0(r1)
std r10,GPR1(r1)
beq 2f /* if from kernel mode */
- ACCOUNT_CPU_USER_ENTRY(r10, r11)
+ ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
2: std r2,GPR2(r1)
std r3,GPR3(r1)
mfcr r2
@@ -246,7 +246,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r4,_LINK(r1)
beq- 1f
- ACCOUNT_CPU_USER_EXIT(r11, r12)
+ ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
@@ -453,7 +453,7 @@ _GLOBAL(ret_from_kernel_thread)
REST_NVGPRS(r1)
mtlr r14
mr r3,r15
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
mr r12,r14
#endif
blrl
@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
BEGIN_FTR_SECTION
clrrdi r6,r8,28 /* get its ESID */
clrrdi r9,r1,28 /* get current sp ESID */
@@ -859,7 +859,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
mtspr SPRN_PPR,r2 /* Restore PPR */
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ACCOUNT_CPU_USER_EXIT(r2, r4)
+ ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
1:
mtspr SPRN_SRR1,r3
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 488e6314f993..38a1f96430e1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -386,7 +386,7 @@ exc_##n##_common: \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \
+ ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,scratch; /* get back r13 */ \
@@ -453,7 +453,7 @@ exc_##n##_bad_stack: \
sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
b bad_stack_book3e; /* bad stack error */
-/* WARNING: If you change the layout of this stub, make sure you chcek
+/* WARNING: If you change the layout of this stub, make sure you check
* the debug exception handler which handles single stepping
* into exceptions from userspace, and the MM code in
* arch/powerpc/mm/tlb_nohash.c which patches the branch here
@@ -1059,7 +1059,7 @@ fast_exception_return:
andi. r6,r10,MSR_PR
REST_2GPRS(6, r1)
beq 1f
- ACCOUNT_CPU_USER_EXIT(r10, r11)
+ ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
ld r0,GPR13(r1)
1: stdcx. r0,0,r1 /* to clear the reservation */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 8bcc1b457115..41091fdf9bd8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -107,25 +107,9 @@ BEGIN_FTR_SECTION
beq 9f
cmpwi cr3,r13,2
-
- /*
- * Check if last bit of HSPGR0 is set. This indicates whether we are
- * waking up from winkle.
- */
GET_PACA(r13)
- clrldi r5,r13,63
- clrrdi r13,r13,1
- cmpwi cr4,r5,1
- mtspr SPRN_HSPRG0,r13
-
- lbz r0,PACA_THREAD_IDLE_STATE(r13)
- cmpwi cr2,r0,PNV_THREAD_NAP
- bgt cr2,8f /* Either sleep or Winkle */
+ bl pnv_restore_hyp_resource
- /* Waking up from nap should not cause hypervisor state loss */
- bgt cr3,.
-
- /* Waking up from nap */
li r0,PNV_THREAD_RUNNING
stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
@@ -143,13 +127,9 @@ BEGIN_FTR_SECTION
/* Return SRR1 from power7_nap() */
mfspr r3,SPRN_SRR1
- beq cr3,2f
- b power7_wakeup_noloss
-2: b power7_wakeup_loss
-
- /* Fast Sleep wakeup on PowerNV */
-8: GET_PACA(r13)
- b power7_wakeup_tb_loss
+ blt cr3,2f
+ b pnv_wakeup_loss
+2: b pnv_wakeup_noloss
9:
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
@@ -351,6 +331,12 @@ hv_doorbell_trampoline:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_doorbell_hv
+ . = 0xea0
+hv_virt_irq_trampoline:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_virt_irq_hv
+
/* We need to deal with the Altivec unavailable exception
* here which is at 0xf20, thus in the middle of the
* prolog code of the PerformanceMonitor one. A little
@@ -601,6 +587,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
+ MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
+ KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
+
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
@@ -680,6 +669,10 @@ _GLOBAL(__replay_interrupt)
BEGIN_FTR_SECTION
cmpwi r3,0xe80
beq h_doorbell_common
+ cmpwi r3,0xea0
+ beq h_virt_irq_common
+ cmpwi r3,0xe60
+ beq hmi_exception_common
FTR_SECTION_ELSE
cmpwi r3,0xa00
beq doorbell_super_common
@@ -754,6 +747,7 @@ kvmppc_skip_Hinterrupt:
#else
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
#endif
+ STD_EXCEPTION_COMMON_ASYNC(0xea0, h_virt_irq, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
@@ -762,11 +756,6 @@ kvmppc_skip_Hinterrupt:
#else
STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
#endif
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
- STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
- STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
@@ -877,6 +866,12 @@ h_doorbell_relon_trampoline:
EXCEPTION_PROLOG_0(PACA_EXGEN)
b h_doorbell_relon_hv
+ . = 0x4ea0
+h_virt_irq_relon_trampoline:
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXGEN)
+ b h_virt_irq_relon_hv
+
. = 0x4f00
performance_monitor_relon_pseries_trampoline:
SET_SCRATCH0(r13)
@@ -945,7 +940,7 @@ BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE
b handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
.align 7
.globl h_data_storage_common
@@ -976,7 +971,7 @@ BEGIN_MMU_FTR_SECTION
b do_hash_page /* Try to handle as hpte fault */
MMU_FTR_SECTION_ELSE
b handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
@@ -1131,12 +1126,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
bl vsx_unavailable_exception
b ret_from_except
- STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
- STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
-
/* Equivalents to the above handlers for relocation-on interrupt vectors */
STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
+ MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
@@ -1170,9 +1163,18 @@ fwnmi_data_area:
. = 0x8000
#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
+ STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
+
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
+
.globl hmi_exception_early
hmi_exception_early:
- EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60)
+ EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0xe62)
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
@@ -1289,7 +1291,7 @@ machine_check_handle_early:
GET_PACA(r13)
ld r1,PACAR1(r13)
li r3,PNV_THREAD_NAP
- b power7_enter_nap_mode
+ b pnv_enter_arch207_idle_mode
4:
#endif
/*
@@ -1390,7 +1392,7 @@ slb_miss_realmode:
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
bl slb_allocate_realmode
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
#endif
/* All done -- return from exception. */
@@ -1404,7 +1406,7 @@ BEGIN_MMU_FTR_SECTION
beq- 2f
FTR_SECTION_ELSE
b 2f
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
.machine push
.machine "power4"
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 3cb3b02a13dd..b3a663333d36 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1009,8 +1009,7 @@ static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
} while (wait_time);
if (rc) {
- printk(KERN_ERR "Failed to invalidate firmware-assisted dump "
- "rgistration. unexpected error(%d).\n", rc);
+ pr_err("Failed to invalidate firmware-assisted dump registration. Unexpected error (%d).\n", rc);
return rc;
}
fw_dump.dump_active = 0;
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1123a4d8d8dd..cc52d9795f88 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -144,6 +144,21 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
+#ifdef CC_USING_MPROFILE_KERNEL
+ /* When using -mkernel_profile there is no load to jump over */
+ pop = PPC_INST_NOP;
+
+ if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
+
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
+ pr_err("Unexpected instruction %08x around bl _mcount\n", op);
+ return -EINVAL;
+ }
+#else
/*
* Our original call site looks like:
*
@@ -170,24 +185,10 @@ __ftrace_make_nop(struct module *mod,
}
if (op != PPC_INST_LD_TOC) {
- unsigned int inst;
-
- if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
-
- /* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
- if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
- pr_err("Unexpected instructions around bl _mcount\n"
- "when enabling dynamic ftrace!\t"
- "(%08x,bl,%08x)\n", inst, op);
- return -EINVAL;
- }
-
- /* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
+ return -EINVAL;
}
+#endif /* CC_USING_MPROFILE_KERNEL */
if (patch_instruction((unsigned int *)ip, pop)) {
pr_err("Patching NOP failed.\n");
@@ -608,7 +609,7 @@ unsigned long __init arch_syscall_addr(int nr)
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
-#if defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF != 2)
+#ifdef PPC64_ELF_ABI_v1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
@@ -616,4 +617,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search)
else
return str;
}
-#endif /* defined(CONFIG_PPC64) && (!defined(_CALL_ELF) || _CALL_ELF != 2) */
+#endif /* PPC64_ELF_ABI_v1 */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2d14774af6b4..f765b0434731 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -401,7 +401,7 @@ generic_secondary_common_init:
ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r12,0
beq 3f
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
ld r12,0(r12)
#endif
mtctr r12
@@ -941,7 +941,7 @@ start_here_multiplatform:
mtspr SPRN_SRR1,r4
RFI
b . /* prevent speculative execution */
-
+
/* This is where all platforms converge execution */
start_here_common:
@@ -951,9 +951,6 @@ start_here_common:
/* Load the TOC (virtual address) */
ld r2,PACATOC(r13)
- /* Do more system initializations in virtual mode */
- bl setup_system
-
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
*/
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 80c69472314e..43ddaae42baf 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -30,6 +30,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/fixmap.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
@@ -383,28 +384,57 @@ InstructionTLBMiss:
EXCEPTION_EPILOG_0
rfi
+/*
+ * Bottom part of DataStoreTLBMiss handler for IMMR area
+ * not enough space in the DataStoreTLBMiss area
+ */
+DTLBMissIMMR:
+ mtcr r10
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_IMMR /* Get current IMMR */
+ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT | _PAGE_NO_CACHE
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ EXCEPTION_EPILOG_0
+ rfi
+
. = 0x1200
DataStoreTLBMiss:
- mtspr SPRN_SPRG_SCRATCH2, r3
EXCEPTION_PROLOG_0
- mfcr r3
+ mfcr r10
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- mfspr r10, SPRN_MD_EPN
- IS_KERNEL(r11, r10)
+ mfspr r11, SPRN_MD_EPN
+ rlwinm r11, r11, 16, 0xfff8
+#ifndef CONFIG_PIN_TLB_IMMR
+ cmpli cr0, r11, VIRT_IMMR_BASE@h
+#endif
+ cmpli cr7, r11, PAGE_OFFSET@h
+#ifndef CONFIG_PIN_TLB_IMMR
+_ENTRY(DTLBMiss_jmp)
+ beq- DTLBMissIMMR
+#endif
+ bge- cr7, 4f
+
mfspr r11, SPRN_M_TW /* Get level 1 table */
- BRANCH_UNLESS_KERNEL(3f)
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
+ mtcr r10
+#ifdef CONFIG_8xx_CPU6
+ mtspr SPRN_SPRG_SCRATCH2, r3
+#endif
+ mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt- 28,DTLBMiss8M /* bit 28 = Large page (8M) */
- mtcr r3
/* We have a pte table, so load fetch the pte from the table.
*/
@@ -452,29 +482,30 @@ DataStoreTLBMiss:
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
+#ifdef CONFIG_8xx_CPU6
mfspr r3, SPRN_SPRG_SCRATCH2
+#endif
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
-DTLBMiss8M:
- mtcr r3
- ori r11, r11, MD_SVALID
- MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
-#ifdef CONFIG_PPC_16K_PAGES
- /*
- * In 16k pages mode, each PGD entry defines a 64M block.
- * Here we select the 8M page within the block.
- */
- rlwimi r11, r10, 0, 0x03800000
-#endif
- rlwinm r10, r11, 0, 0xff800000
+4:
+_ENTRY(DTLBMiss_cmp)
+ cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ bge- 3b
+
+ mtcr r10
+ /* Set 8M byte page and mark it valid */
+ li r10, MD_PS8MEG | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_MD_EPN
+ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
_PAGE_PRESENT
- MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
li r11, RPN_PATTERN
- mfspr r3, SPRN_SPRG_SCRATCH2
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
@@ -553,12 +584,14 @@ FixupDAR:/* Entry point for dcbx workaround. */
IS_KERNEL(r11, r10)
mfspr r11, SPRN_M_TW /* Get level 1 table */
BRANCH_UNLESS_KERNEL(3f)
+ rlwinm r11, r10, 16, 0xfff8
+_ENTRY(FixupDAR_cmp)
+ cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+ blt- cr7, 200f
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
- mtcr r11
- bt 28,200f /* bit 28 = Large page (8M) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -584,8 +617,8 @@ FixupDAR:/* Entry point for dcbx workaround. */
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
- /* concat physical page address(r11) and page offset(r10) */
-200: rlwimi r11, r10, 0, 32 - (PAGE_SHIFT << 1), 31
+ /* create physical page address from effective address */
+200: tophys(r11, r10)
b 201b
144: mfspr r10, SPRN_DSISR
@@ -763,10 +796,18 @@ start_here:
* virtual to physical. Also, set the cache mode since that is defined
* by TLB entries and perform any additional mapping (like of the IMMR).
* If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
- * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
+ * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by
* these mappings is mapped by page tables.
*/
initial_mmu:
+ li r8, 0
+ mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
+ lis r10, MD_RESETVAL@h
+#ifndef CONFIG_8xx_COPYBACK
+ oris r10, r10, MD_WTDEF@h
+#endif
+ mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
+
tlbia /* Invalidate all TLB entries */
/* Always pin the first 8 MB ITLB to prevent ITLB
misses while mucking around with SRR0/SRR1 in asm
@@ -777,34 +818,20 @@ initial_mmu:
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
#ifdef CONFIG_PIN_TLB
- lis r10, (MD_RSV4I | MD_RESETVAL)@h
- ori r10, r10, 0x1c00
- mr r8, r10
-#else
- lis r10, MD_RESETVAL@h
-#endif
-#ifndef CONFIG_8xx_COPYBACK
- oris r10, r10, MD_WTDEF@h
-#endif
+ oris r10, r10, MD_RSV4I@h
mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
+#endif
- /* Now map the lower 8 Meg into the TLBs. For this quick hack,
- * we can load the instruction and data TLB registers with the
- * same values.
- */
+ /* Now map the lower 8 Meg into the ITLB. */
lis r8, KERNELBASE@h /* Create vaddr for TLB */
ori r8, r8, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r8
- mtspr SPRN_MD_EPN, r8
li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */
ori r8, r8, MI_SVALID /* Make it valid */
mtspr SPRN_MI_TWC, r8
- li r8, MI_PS8MEG /* Set 8M byte page, APG 0 */
- ori r8, r8, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r8
li r8, MI_BOOTINIT /* Create RPN for address 0 */
mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
- mtspr SPRN_MD_RPN, r8
+
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
mtspr SPRN_MI_AP, r8
@@ -812,51 +839,25 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8
- /* Map another 8 MByte at the IMMR to get the processor
+ /* Map a 512k page for the IMMR to get the processor
* internal registers (among other things).
*/
-#ifdef CONFIG_PIN_TLB
- addi r10, r10, 0x0100
+#ifdef CONFIG_PIN_TLB_IMMR
+ ori r10, r10, 0x1c00
mtspr SPRN_MD_CTR, r10
-#endif
+
mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
+ andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
- mr r8, r9 /* Create vaddr for TLB */
+ lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
ori r8, r8, MD_EVALID /* Mark it valid */
mtspr SPRN_MD_EPN, r8
- li r8, MD_PS8MEG /* Set 8M byte page */
+ li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
ori r8, r8, MD_SVALID /* Make it valid */
mtspr SPRN_MD_TWC, r8
mr r8, r9 /* Create paddr for TLB */
ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
mtspr SPRN_MD_RPN, r8
-
-#ifdef CONFIG_PIN_TLB
- /* Map two more 8M kernel data pages.
- */
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- lis r8, KERNELBASE@h /* Create vaddr for TLB */
- addis r8, r8, 0x0080 /* Add 8M */
- ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr SPRN_MD_EPN, r8
- li r9, MI_PS8MEG /* Set 8M byte page */
- ori r9, r9, MI_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r9
- li r11, MI_BOOTINIT /* Create RPN for address 0 */
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
-
- addi r10, r10, 0x0100
- mtspr SPRN_MD_CTR, r10
-
- addis r8, r8, 0x0080 /* Add 8M */
- mtspr SPRN_MD_EPN, r8
- mtspr SPRN_MD_TWC, r9
- addis r11, r11, 0x0080 /* Add 8M */
- mtspr SPRN_MD_RPN, r11
#endif
/* Since the cache is enabled according to the information we
diff --git a/arch/powerpc/kernel/hmi.c b/arch/powerpc/kernel/hmi.c
new file mode 100644
index 000000000000..e3f738eb1cac
--- /dev/null
+++ b/arch/powerpc/kernel/hmi.c
@@ -0,0 +1,56 @@
+/*
+ * Hypervisor Maintenance Interrupt (HMI) handling.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * Copyright 2015 IBM Corporation
+ * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/paca.h>
+#include <asm/hmi.h>
+
+void wait_for_subcore_guest_exit(void)
+{
+ int i;
+
+ /*
+ * NULL bitmap pointer indicates that KVM module hasn't
+ * been loaded yet and hence no guests are running.
+ * If no KVM is in use, no need to co-ordinate among threads
+ * as all of them will always be in host and no one is going
+ * to modify TB other than the opal hmi handler.
+ * Hence, just return from here.
+ */
+ if (!local_paca->sibling_subcore_state)
+ return;
+
+ for (i = 0; i < MAX_SUBCORE_PER_CORE; i++)
+ while (local_paca->sibling_subcore_state->in_guest[i])
+ cpu_relax();
+}
+
+void wait_for_tb_resync(void)
+{
+ if (!local_paca->sibling_subcore_state)
+ return;
+
+ while (test_bit(CORE_TB_RESYNC_REQ_BIT,
+ &local_paca->sibling_subcore_state->flags))
+ cpu_relax();
+}
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index a89f4f7a66bd..c1ca9282f4a0 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -65,7 +65,7 @@ static void *ibmebus_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *mem;
@@ -78,7 +78,7 @@ static void *ibmebus_alloc_coherent(struct device *dev,
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
kfree(vaddr);
}
@@ -88,7 +88,7 @@ static dma_addr_t ibmebus_map_page(struct device *dev,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return (dma_addr_t)(page_address(page) + offset);
}
@@ -97,7 +97,7 @@ static void ibmebus_unmap_page(struct device *dev,
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return;
}
@@ -105,7 +105,7 @@ static void ibmebus_unmap_page(struct device *dev,
static int ibmebus_map_sg(struct device *dev,
struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -121,7 +121,7 @@ static int ibmebus_map_sg(struct device *dev,
static void ibmebus_unmap_sg(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return;
}
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_book3s.S
index 470ceebd2d23..ba79d15f4ddd 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -1,5 +1,6 @@
/*
- * This file contains the power_save function for Power7 CPUs.
+ * This file contains idle entry/exit functions for POWER7,
+ * POWER8 and POWER9 CPUs.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,6 +21,7 @@
#include <asm/opal.h>
#include <asm/cpuidle.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/mmu.h>
#undef DEBUG
@@ -36,6 +38,11 @@
#define _AMOR GPR9
#define _WORT GPR10
#define _WORC GPR11
+#define _PTCR GPR12
+
+#define PSSCR_HV_TEMPLATE PSSCR_ESL | PSSCR_EC | \
+ PSSCR_PSLL_MASK | PSSCR_TR_MASK | \
+ PSSCR_MTL_MASK
/* Idle state entry routines */
@@ -52,6 +59,45 @@
.text
/*
+ * Used by threads before entering deep idle states. Saves SPRs
+ * in interrupt stack frame
+ */
+save_sprs_to_stack:
+ /*
+ * Note all register i.e per-core, per-subcore or per-thread is saved
+ * here since any thread in the core might wake up first
+ */
+BEGIN_FTR_SECTION
+ mfspr r3,SPRN_PTCR
+ std r3,_PTCR(r1)
+ /*
+ * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
+ * SDR1 here
+ */
+FTR_SECTION_ELSE
+ mfspr r3,SPRN_SDR1
+ std r3,_SDR1(r1)
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
+ mfspr r3,SPRN_RPR
+ std r3,_RPR(r1)
+ mfspr r3,SPRN_SPURR
+ std r3,_SPURR(r1)
+ mfspr r3,SPRN_PURR
+ std r3,_PURR(r1)
+ mfspr r3,SPRN_TSCR
+ std r3,_TSCR(r1)
+ mfspr r3,SPRN_DSCR
+ std r3,_DSCR(r1)
+ mfspr r3,SPRN_AMOR
+ std r3,_AMOR(r1)
+ mfspr r3,SPRN_WORT
+ std r3,_WORT(r1)
+ mfspr r3,SPRN_WORC
+ std r3,_WORC(r1)
+
+ blr
+
+/*
* Used by threads when the lock bit of core_idle_state is set.
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
@@ -69,13 +115,16 @@ core_idle_lock_held:
/*
* Pass requested state in r3:
- * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
+ * - Requested STOP state in POWER9
*
* To check IRQ_HAPPENED in r4
* 0 - don't check
* 1 - check
+ *
+ * Address to 'rfid' to in r5
*/
-_GLOBAL(power7_powersave_common)
+_GLOBAL(pnv_powersave_common)
/* Use r3 to pass state nap/sleep/winkle */
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
@@ -126,28 +175,28 @@ _GLOBAL(power7_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
+
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
* because as soon as we do that, another thread can switch
* the MMU context to the guest.
*/
- LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
+ LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
li r6, MSR_RI
andc r6, r9, r6
- LOAD_REG_ADDR(r7, power7_enter_nap_mode)
mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
- mtspr SPRN_SRR0, r7
- mtspr SPRN_SRR1, r5
+ mtspr SPRN_SRR0, r5
+ mtspr SPRN_SRR1, r7
rfid
- .globl power7_enter_nap_mode
-power7_enter_nap_mode:
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're napping */
- li r4,KVM_HWTHREAD_IN_NAP
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
+ .globl pnv_enter_arch207_idle_mode
+pnv_enter_arch207_idle_mode:
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -196,8 +245,7 @@ fastsleep_workaround_at_entry:
/* Fast sleep workaround */
li r3,1
li r4,1
- li r0,OPAL_CONFIG_CPU_IDLE_STATE
- bl opal_call_realmode
+ bl opal_rm_config_cpu_idle_state
/* Clear Lock bit */
li r0,0
@@ -206,30 +254,45 @@ fastsleep_workaround_at_entry:
b common_enter
enter_winkle:
- /*
- * Note all register i.e per-core, per-subcore or per-thread is saved
- * here since any thread in the core might wake up first
- */
- mfspr r3,SPRN_SDR1
- std r3,_SDR1(r1)
- mfspr r3,SPRN_RPR
- std r3,_RPR(r1)
- mfspr r3,SPRN_SPURR
- std r3,_SPURR(r1)
- mfspr r3,SPRN_PURR
- std r3,_PURR(r1)
- mfspr r3,SPRN_TSCR
- std r3,_TSCR(r1)
- mfspr r3,SPRN_DSCR
- std r3,_DSCR(r1)
- mfspr r3,SPRN_AMOR
- std r3,_AMOR(r1)
- mfspr r3,SPRN_WORT
- std r3,_WORT(r1)
- mfspr r3,SPRN_WORC
- std r3,_WORC(r1)
+ bl save_sprs_to_stack
+
IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+/*
+ * r3 - requested stop state
+ */
+power_enter_stop:
+/*
+ * Check if the requested state is a deep idle state.
+ */
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
+ cmpd r3,r4
+ bge 2f
+ IDLE_STATE_ENTER_SEQ(PPC_STOP)
+2:
+/*
+ * Entering deep idle state.
+ * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
+ * stack and enter stop
+ */
+ lbz r7,PACA_THREAD_MASK(r13)
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
+
+lwarx_loop_stop:
+ lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bnel core_idle_lock_held
+ andc r15,r15,r7 /* Clear thread bit */
+
+ stwcx. r15,0,r14
+ bne- lwarx_loop_stop
+ isync
+
+ bl save_sprs_to_stack
+
+ IDLE_STATE_ENTER_SEQ(PPC_STOP)
+
_GLOBAL(power7_idle)
/* Now check if user or arch enabled NAP mode */
LOAD_REG_ADDRBASE(r3,powersave_nap)
@@ -242,19 +305,22 @@ _GLOBAL(power7_idle)
_GLOBAL(power7_nap)
mr r4,r3
li r3,PNV_THREAD_NAP
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
_GLOBAL(power7_sleep)
li r3,PNV_THREAD_SLEEP
li r4,1
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
_GLOBAL(power7_winkle)
- li r3,3
+ li r3,PNV_THREAD_WINKLE
li r4,1
- b power7_powersave_common
+ LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ b pnv_powersave_common
/* No return */
#define CHECK_HMI_INTERRUPT \
@@ -270,25 +336,106 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
ld r2,PACATOC(r13); \
ld r1,PACAR1(r13); \
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
- li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
- bl opal_call_realmode; \
+ li r3,0; /* NULL argument */ \
+ bl hmi_exception_realmode; \
+ nop; \
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
20: nop;
-_GLOBAL(power7_wakeup_tb_loss)
+/*
+ * r3 - requested stop state
+ */
+_GLOBAL(power9_idle_stop)
+ LOAD_REG_IMMEDIATE(r4, PSSCR_HV_TEMPLATE)
+ or r4,r4,r3
+ mtspr SPRN_PSSCR, r4
+ li r4, 1
+ LOAD_REG_ADDR(r5,power_enter_stop)
+ b pnv_powersave_common
+ /* No return */
+/*
+ * Called from reset vector. Check whether we have woken up with
+ * hypervisor state loss. If yes, restore hypervisor state and return
+ * back to reset vector.
+ *
+ * r13 - Contents of HSPRG0
+ * cr3 - set to gt if waking up with partial/complete hypervisor state loss
+ */
+_GLOBAL(pnv_restore_hyp_resource)
ld r2,PACATOC(r13);
+BEGIN_FTR_SECTION
+ /*
+ * POWER ISA 3. Use PSSCR to determine if we
+ * are waking up from deep idle state
+ */
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
+
+ mfspr r5,SPRN_PSSCR
+ /*
+ * 0-3 bits correspond to Power-Saving Level Status
+ * which indicates the idle state we are waking up from
+ */
+ rldicl r5,r5,4,60
+ cmpd cr4,r5,r4
+ bge cr4,pnv_wakeup_tb_loss
+ /*
+ * Waking up without hypervisor state loss. Return to
+ * reset vector
+ */
+ blr
+
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ /*
+ * POWER ISA 2.07 or less.
+ * Check if last bit of HSPGR0 is set. This indicates whether we are
+ * waking up from winkle.
+ */
+ clrldi r5,r13,63
+ clrrdi r13,r13,1
+ cmpwi cr4,r5,1
+ mtspr SPRN_HSPRG0,r13
+
+ lbz r0,PACA_THREAD_IDLE_STATE(r13)
+ cmpwi cr2,r0,PNV_THREAD_NAP
+ bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
+
+ /*
+ * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
+ * up from nap. At this stage CR3 shouldn't contains 'gt' since that
+ * indicates we are waking with hypervisor state loss from nap.
+ */
+ bgt cr3,.
+
+ blr /* Return back to System Reset vector from where
+ pnv_restore_hyp_resource was invoked */
+
+/*
+ * Called if waking up from idle state which can cause either partial or
+ * complete hyp state loss.
+ * In POWER8, called if waking up from fastsleep or winkle
+ * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
+ *
+ * r13 - PACA
+ * cr3 - gt if waking up with partial/complete hypervisor state loss
+ * cr4 - eq if waking up from complete hypervisor state loss.
+ */
+_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
/*
* Before entering any idle state, the NVGPRs are saved in the stack
* and they are restored before switching to the process context. Hence
* until they are restored, they are free to be used.
*
- * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
- * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
- * wakeup reason if we branch to kvm_start_guest.
+ * Save SRR1 and LR in NVGPRs as they might be clobbered in
+ * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
+ * to determine the wakeup reason if we branch to kvm_start_guest. LR
+ * is required to return back to reset vector after hypervisor state
+ * restore is complete.
*/
-
+ mflr r17
mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
@@ -310,35 +457,35 @@ lwarx_loop2:
bnel core_idle_lock_held
cmpwi cr2,r15,0
- lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
- and r4,r4,r15
- cmpwi cr1,r4,0 /* Check if first in subcore */
/*
* At this stage
- * cr1 - 0b0100 if first thread to wakeup in subcore
- * cr2 - 0b0100 if first thread to wakeup in core
- * cr3- 0b0010 if waking up from sleep or winkle
- * cr4 - 0b0100 if waking up from winkle
+ * cr2 - eq if first thread to wakeup in core
+ * cr3- gt if waking up with partial/complete hypervisor state loss
+ * cr4 - eq if waking up from complete hypervisor state loss.
*/
- or r15,r15,r7 /* Set thread bit */
-
- beq cr1,first_thread_in_subcore
-
- /* Not first thread in subcore to wake up */
- stwcx. r15,0,r14
- bne- lwarx_loop2
- isync
- b common_exit
-
-first_thread_in_subcore:
- /* First thread in subcore to wakeup */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
stwcx. r15,0,r14
bne- lwarx_loop2
isync
+BEGIN_FTR_SECTION
+ lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
+ and r4,r4,r15
+ cmpwi r4,0 /* Check if first in subcore */
+
+ or r15,r15,r7 /* Set thread bit */
+ beq first_thread_in_subcore
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
+ or r15,r15,r7 /* Set thread bit */
+ beq cr2,first_thread_in_core
+
+ /* Not first thread in core or subcore to wake up */
+ b clear_lock
+
+first_thread_in_subcore:
/*
* If waking up from sleep, subcore state is not lost. Hence
* skip subcore state restore
@@ -348,6 +495,7 @@ first_thread_in_subcore:
/* Restore per-subcore state */
ld r4,_SDR1(r1)
mtspr SPRN_SDR1,r4
+
ld r4,_RPR(r1)
mtspr SPRN_RPR,r4
ld r4,_AMOR(r1)
@@ -363,32 +511,44 @@ subcore_state_restored:
first_thread_in_core:
/*
- * First thread in the core waking up from fastsleep. It needs to
+ * First thread in the core waking up from any state which can cause
+ * partial or complete hypervisor state loss. It needs to
* call the fastsleep workaround code if the platform requires it.
* Call it unconditionally here. The below branch instruction will
- * be patched out when the idle states are discovered if platform
- * does not require workaround.
+ * be patched out if the platform does not have fastsleep or does not
+ * require the workaround. Patching will be performed during the
+ * discovery of idle-states.
*/
.global pnv_fastsleep_workaround_at_exit
pnv_fastsleep_workaround_at_exit:
b fastsleep_workaround_at_exit
timebase_resync:
- /* Do timebase resync if we are waking up from sleep. Use cr3 value
- * set in exceptions-64s.S */
+ /*
+ * Use cr3 which indicates that we are waking up with atleast partial
+ * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
+ */
ble cr3,clear_lock
/* Time base re-sync */
- li r0,OPAL_RESYNC_TIMEBASE
- bl opal_call_realmode;
- /* TODO: Check r3 for failure */
-
+ bl opal_rm_resync_timebase;
/*
* If waking up from sleep, per core state is not lost, skip to
* clear_lock.
*/
bne cr4,clear_lock
- /* Restore per core state */
+ /*
+ * First thread in the core to wake up and its waking up with
+ * complete hypervisor state loss. Restore per core hypervisor
+ * state.
+ */
+BEGIN_FTR_SECTION
+ ld r4,_PTCR(r1)
+ mtspr SPRN_PTCR,r4
+ ld r4,_RPR(r1)
+ mtspr SPRN_RPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
ld r4,_TSCR(r1)
mtspr SPRN_TSCR,r4
ld r4,_WORC(r1)
@@ -410,9 +570,9 @@ common_exit:
/* Waking up from winkle */
- /* Restore per thread state */
- bl __restore_cpu_power8
-
+BEGIN_MMU_FTR_SECTION
+ b no_segments
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
/* Restore SLB from PACA */
ld r8,PACA_SLBSHADOWPTR(r13)
@@ -426,6 +586,9 @@ common_exit:
slbmte r6,r5
1: addi r8,r8,16
.endr
+no_segments:
+
+ /* Restore per thread state */
ld r4,_SPURR(r1)
mtspr SPRN_SPURR,r4
@@ -436,48 +599,34 @@ common_exit:
ld r4,_WORT(r1)
mtspr SPRN_WORT,r4
-hypervisor_state_restored:
+ /* Call cur_cpu_spec->cpu_restore() */
+ LOAD_REG_ADDR(r4, cur_cpu_spec)
+ ld r4,0(r4)
+ ld r12,CPU_SPEC_RESTORE(r4)
+#ifdef PPC64_ELF_ABI_v1
+ ld r12,0(r12)
+#endif
+ mtctr r12
+ bctrl
- li r5,PNV_THREAD_RUNNING
- stb r5,PACA_THREAD_IDLE_STATE(r13)
+hypervisor_state_restored:
mtspr SPRN_SRR1,r16
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- li r0,KVM_HWTHREAD_IN_KERNEL
- stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* Order setting hwthread_state vs. testing hwthread_req */
- sync
- lbz r0,HSTATE_HWTHREAD_REQ(r13)
- cmpwi r0,0
- beq 6f
- b kvm_start_guest
-6:
-#endif
-
- REST_NVGPRS(r1)
- REST_GPR(2, r1)
- ld r3,_CCR(r1)
- ld r4,_MSR(r1)
- ld r5,_NIP(r1)
- addi r1,r1,INT_FRAME_SIZE
- mtcr r3
- mfspr r3,SPRN_SRR1 /* Return SRR1 */
- mtspr SPRN_SRR1,r4
- mtspr SPRN_SRR0,r5
- rfid
+ mtlr r17
+ blr /* Return back to System Reset vector from where
+ pnv_restore_hyp_resource was invoked */
fastsleep_workaround_at_exit:
li r3,1
li r4,0
- li r0,OPAL_CONFIG_CPU_IDLE_STATE
- bl opal_call_realmode
+ bl opal_rm_config_cpu_idle_state
b timebase_resync
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(power7_wakeup_loss)
+_GLOBAL(pnv_wakeup_loss)
ld r1,PACAR1(r13)
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
@@ -497,10 +646,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* R3 here contains the value that will be returned to the caller
* of power7_nap.
*/
-_GLOBAL(power7_wakeup_noloss)
+_GLOBAL(pnv_wakeup_noloss)
lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0
- bne power7_wakeup_loss
+ bne pnv_wakeup_loss
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a8e3490b54e3..37d6e741be82 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -307,7 +307,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long entry;
dma_addr_t ret = DMA_ERROR_CODE;
@@ -431,7 +431,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_next = 0, dma_addr;
struct scatterlist *s, *outs, *segstart;
@@ -574,7 +574,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
@@ -753,7 +753,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset, size_t size,
unsigned long mask, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
void *vaddr;
@@ -790,7 +790,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned int npages;
@@ -845,7 +845,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
nio_pages = size >> tbl->it_page_shift;
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> tbl->it_page_shift, io_order, NULL);
+ mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 3cb46a3b1de7..08887cf2b20e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -75,6 +75,7 @@
#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
+#include <asm/cpu_has_feature.h>
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
@@ -250,7 +251,7 @@ notrace void arch_local_irq_restore(unsigned long en)
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
}
-#endif /* CONFIG_TRACE_IRQFLAG */
+#endif /* CONFIG_TRACE_IRQFLAGS */
set_soft_enabled(0);
@@ -342,6 +343,21 @@ bool prep_irq_for_idle(void)
return true;
}
+/*
+ * Force a replay of the external interrupt handler on this CPU.
+ */
+void force_external_irq_replay(void)
+{
+ /*
+ * This must only be called with interrupts soft-disabled,
+ * the replay will happen when re-enabling.
+ */
+ WARN_ON(!arch_irqs_disabled());
+
+ /* Indicate in the PACA that we have an interrupt to replay */
+ local_paca->irq_happened |= PACA_IRQ_EE;
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7c053f281406..3ed8ec09b5c9 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -278,12 +278,11 @@ no_kprobe:
* - When the probed function returns, this probe
* causes the handlers to fire
*/
-static void __used kretprobe_trampoline_holder(void)
-{
- asm volatile(".global kretprobe_trampoline\n"
- "kretprobe_trampoline:\n"
- "nop\n");
-}
+asm(".global kretprobe_trampoline\n"
+ ".type kretprobe_trampoline, @function\n"
+ "kretprobe_trampoline:\n"
+ "nop\n"
+ ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
/*
* Called when the probe at kretprobe trampoline is hit
@@ -506,13 +505,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
/* setup return addr to the jprobe handler routine */
regs->nip = arch_deref_entry_point(jp->entry);
-#ifdef CONFIG_PPC64
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
regs->gpr[12] = (unsigned long)jp->entry;
-#else
+#elif defined(PPC64_ELF_ABI_v1)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
-#endif
return 1;
}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index b8c202d63ecb..4c780a342282 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -29,6 +29,7 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
+#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC_BOOK3E
int default_machine_kexec_prepare(struct kimage *image)
@@ -54,7 +55,7 @@ int default_machine_kexec_prepare(struct kimage *image)
const unsigned long *basep;
const unsigned int *sizep;
- if (!ppc_md.hpte_clear_all)
+ if (!mmu_hash_ops.hpte_clear_all)
return -ENOENT;
/*
@@ -379,7 +380,12 @@ void default_machine_kexec(struct kimage *image)
*/
kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page),
- ppc_md.hpte_clear_all);
+#ifdef CONFIG_PPC_STD_MMU
+ mmu_hash_ops.hpte_clear_all
+#else
+ NULL
+#endif
+ );
/* NOTREACHED */
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 285ca8c6cc2e..d9c912b6e632 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -104,20 +104,6 @@ _GLOBAL(mulhdu)
blr
/*
- * sub_reloc_offset(x) returns x - reloc_offset().
- */
-_GLOBAL(sub_reloc_offset)
- mflr r0
- bl 1f
-1: mflr r5
- lis r4,1b@ha
- addi r4,r4,1b@l
- subf r5,r4,r5
- subf r3,r5,r3
- mtlr r0
- blr
-
-/*
* reloc_got2 runs through the .got2 section adding an offset
* to each entry.
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f28754c497e5..cb195157b318 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -661,13 +661,13 @@ _GLOBAL(kexec_sequence)
#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
#endif
mtctr r12
- bctrl /* ppc_md.hpte_clear_all(void); */
+ bctrl /* mmu_hash_ops.hpte_clear_all(void); */
#endif /* !CONFIG_PPC_BOOK3E */
/*
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 9ce9a25f58b5..183368e008cf 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -41,7 +41,7 @@
this, and makes other things simpler. Anton?
--RR. */
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
/* An address is simply the address of the function. */
typedef unsigned long func_desc_t;
@@ -132,7 +132,7 @@ static u32 ppc64_stub_insns[] = {
/* Save current r2 value in magic place on the stack. */
0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */
0xe98b0020, /* ld r12,32(r11) */
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
+#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
0xe84b0028, /* ld r2,40(r11) */
#endif
@@ -494,9 +494,10 @@ static bool is_early_mcount_callsite(u32 *instruction)
restore r2. */
static int restore_r2(u32 *instruction, struct module *me)
{
+ if (is_early_mcount_callsite(instruction - 1))
+ return 1;
+
if (*instruction != PPC_INST_NOP) {
- if (is_early_mcount_callsite(instruction - 1))
- return 1;
pr_err("%s: Expect noop after relocate, got %08x\n",
me->name, *instruction);
return 0;
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 93dae296b6be..fa20060ff7a5 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been
* applied
*/
- if (cpu_has_feature(CPU_FTR_HVMODE))
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0f7a60f1e9f6..a5c0153ede37 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -41,11 +41,18 @@
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+/* hose_spinlock protects accesses to the the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
-/* XXX kill that some day ... */
-static int global_phb_number; /* Global phb counter */
+/* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
+#define MAX_PHBS 0x10000
+
+/*
+ * For dynamic PHB numbering: used/free PHBs tracking bitmap.
+ * Accesses to this bitmap should be protected by hose_spinlock.
+ */
+static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
/* ISA Memory physical address */
resource_size_t isa_mem_base;
@@ -64,6 +71,42 @@ struct dma_map_ops *get_pci_dma_ops(void)
}
EXPORT_SYMBOL(get_pci_dma_ops);
+/*
+ * This function should run under locking protection, specifically
+ * hose_spinlock.
+ */
+static int get_phb_number(struct device_node *dn)
+{
+ int ret, phb_id = -1;
+ u64 prop;
+
+ /*
+ * Try fixed PHB numbering first, by checking archs and reading
+ * the respective device-tree properties. Firstly, try powernv by
+ * reading "ibm,opal-phbid", only present in OPAL environment.
+ */
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ if (ret)
+ ret = of_property_read_u32_index(dn, "reg", 1, (u32 *)&prop);
+
+ if (!ret)
+ phb_id = (int)(prop & (MAX_PHBS - 1));
+
+ /* We need to be sure to not use the same PHB number twice. */
+ if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
+ return phb_id;
+
+ /*
+ * If not pseries nor powernv, or if fixed PHB numbering tried to add
+ * the same PHB number twice, then fallback to dynamic PHB numbering.
+ */
+ phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
+ BUG_ON(phb_id >= MAX_PHBS);
+ set_bit(phb_id, phb_bitmap);
+
+ return phb_id;
+}
+
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
{
struct pci_controller *phb;
@@ -72,7 +115,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
if (phb == NULL)
return NULL;
spin_lock(&hose_spinlock);
- phb->global_number = global_phb_number++;
+ phb->global_number = get_phb_number(dev);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
@@ -94,6 +137,11 @@ EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
void pcibios_free_controller(struct pci_controller *phb)
{
spin_lock(&hose_spinlock);
+
+ /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
+ if (phb->global_number < MAX_PHBS)
+ clear_bit(phb->global_number, phb_bitmap);
+
list_del(&phb->list_node);
spin_unlock(&hose_spinlock);
@@ -124,6 +172,14 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
+void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+
+ if (hose->controller_ops.setup_bridge)
+ hose->controller_ops.setup_bridge(bus, type);
+}
+
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
@@ -356,36 +412,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
}
/*
- * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
- pgprot_t protection,
- enum pci_mmap_state mmap_state,
- int write_combine)
-{
-
- /* Write combine is always 0 on non-memory space mappings. On
- * memory space, if the user didn't pass 1, we check for a
- * "prefetchable" resource. This is a bit hackish, but we use
- * this to workaround the inability of /sysfs to provide a write
- * combine bit
- */
- if (mmap_state != pci_mmap_mem)
- write_combine = 0;
- else if (write_combine == 0) {
- if (rp->flags & IORESOURCE_PREFETCH)
- write_combine = 1;
- }
-
- /* XXX would be nice to have a way to ask for write-through */
- if (write_combine)
- return pgprot_noncached_wc(protection);
- else
- return pgprot_noncached(protection);
-}
-
-/*
* This one is used by /dev/mem and fbdev who have no clue about the
* PCI device, it tries to find the PCI device first and calls the
* above routine
@@ -458,9 +484,10 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
- vma->vm_page_prot,
- mmap_state, write_combine);
+ if (write_combine)
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
@@ -610,39 +637,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- resource_size_t offset = 0;
+ struct pci_bus_region region;
- if (hose == NULL)
+ if (rsrc->flags & IORESOURCE_IO) {
+ pcibios_resource_to_bus(dev->bus, &region,
+ (struct resource *) rsrc);
+ *start = region.start;
+ *end = region.end;
return;
+ }
- if (rsrc->flags & IORESOURCE_IO)
- offset = (unsigned long)hose->io_base_virt - _IO_BASE;
-
- /* We pass a fully fixed up address to userland for MMIO instead of
- * a BAR value because X is lame and expects to be able to use that
- * to pass to /dev/mem !
- *
- * That means that we'll have potentially 64 bits values where some
- * userland apps only expect 32 (like X itself since it thinks only
- * Sparc has 64 bits MMIO) but if we don't do that, we break it on
- * 32 bits CHRPs :-(
+ /* We pass a CPU physical address to userland for MMIO instead of a
+ * BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem!
*
- * Hopefully, the sysfs insterface is immune to that gunk. Once X
- * has been fixed (and the fix spread enough), we can re-enable the
- * 2 lines below and pass down a BAR value to userland. In that case
- * we'll also have to re-enable the matching code in
- * __pci_mmap_make_offset().
- *
- * BenH.
+ * That means we may have 64-bit values where some apps only expect
+ * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
*/
-#if 0
- else if (rsrc->flags & IORESOURCE_MEM)
- offset = hose->pci_mem_offset;
-#endif
-
- *start = rsrc->start - offset;
- *end = rsrc->end - offset;
+ *start = rsrc->start;
+ *end = rsrc->end;
}
/**
@@ -1362,8 +1375,10 @@ void __init pcibios_resource_survey(void)
/* Allocate and assign resources */
list_for_each_entry(b, &pci_root_buses, node)
pcibios_allocate_bus_resources(b);
- pcibios_allocate_resources(0);
- pcibios_allocate_resources(1);
+ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
+ pcibios_allocate_resources(0);
+ pcibios_allocate_resources(1);
+ }
/* Before we start assigning unassigned resource, we try to reserve
* the low IO area and the VGA memory area if they intersect the
@@ -1436,8 +1451,12 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
/* Allocate bus and devices resources */
pcibios_allocate_bus_resources(bus);
pcibios_claim_one_bus(bus);
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_assign_unassigned_bus_resources(bus);
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ if (bus->self)
+ pci_assign_unassigned_bridge_resources(bus->self);
+ else
+ pci_assign_unassigned_bus_resources(bus);
+ }
/* Fixup EEH */
eeh_add_device_tree_late(bus);
@@ -1485,9 +1504,9 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
res = &hose->io_resource;
if (!res->flags) {
- pr_info("PCI: I/O resource not set for host"
- " bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ pr_debug("PCI: I/O resource not set for host"
+ " bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
} else {
offset = pcibios_io_space_offset(hose);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index a5ae49a2dcc4..ed5e9ff61a68 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -81,7 +81,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
/* If this is not a PHB, we only flush the hash table over
* the area mapped by this bridge. We don't play with the PTE
- * mappings since we might have to deal with sub-page alignemnts
+ * mappings since we might have to deal with sub-page alignments
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index ecdccce78719..592693437070 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -31,6 +31,7 @@
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+#include <asm/eeh.h>
/*
* The function is used to find the firmware data of one
@@ -181,7 +182,6 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct pci_dn *parent, *pdn;
- struct eeh_dev *edev;
int i;
/* Only support IOV for now */
@@ -199,6 +199,8 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
return NULL;
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ struct eeh_dev *edev __maybe_unused;
+
pdn = add_one_dev_pci_data(parent, NULL, i,
pci_iov_virtfn_bus(pdev, i),
pci_iov_virtfn_devfn(pdev, i));
@@ -208,11 +210,12 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
return NULL;
}
+#ifdef CONFIG_EEH
/* Create the EEH device for the VF */
- eeh_dev_init(pdn, pci_bus_to_host(pdev->bus));
- edev = pdn_to_eeh_dev(pdn);
+ edev = eeh_dev_init(pdn);
BUG_ON(!edev);
edev->physfn = pdev;
+#endif /* CONFIG_EEH */
}
#endif /* CONFIG_PCI_IOV */
@@ -224,7 +227,6 @@ void remove_dev_pci_data(struct pci_dev *pdev)
#ifdef CONFIG_PCI_IOV
struct pci_dn *parent;
struct pci_dn *pdn, *tmp;
- struct eeh_dev *edev;
int i;
/*
@@ -260,18 +262,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
* a batch mode.
*/
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ struct eeh_dev *edev __maybe_unused;
+
list_for_each_entry_safe(pdn, tmp,
&parent->child_list, list) {
if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
continue;
+#ifdef CONFIG_EEH
/* Release EEH device for the VF */
edev = pdn_to_eeh_dev(pdn);
if (edev) {
pdn->edev = NULL;
kfree(edev);
}
+#endif /* CONFIG_EEH */
if (!list_empty(&pdn->list))
list_del(&pdn->list);
@@ -289,8 +295,11 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
const __be32 *regs;
struct device_node *parent;
struct pci_dn *pdn;
+#ifdef CONFIG_EEH
+ struct eeh_dev *edev;
+#endif
- pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
if (pdn == NULL)
return NULL;
dn->data = pdn;
@@ -319,6 +328,15 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
/* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
+ /* Create EEH device */
+#ifdef CONFIG_EEH
+ edev = eeh_dev_init(pdn);
+ if (!edev) {
+ kfree(pdn);
+ return NULL;
+ }
+#endif
+
/* Attach to parent node */
INIT_LIST_HEAD(&pdn->child_list);
INIT_LIST_HEAD(&pdn->list);
@@ -504,15 +522,19 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
* pci device found underneath. This routine runs once,
* early in the boot sequence.
*/
-void __init pci_devs_phb_init(void)
+static int __init pci_devs_phb_init(void)
{
struct pci_controller *phb, *tmp;
/* This must be done first so the device nodes have valid pci info! */
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
+
+ return 0;
}
+core_initcall(pci_devs_phb_init);
+
static void pci_dev_pdn_setup(struct pci_dev *pdev)
{
struct pci_dn *pdn;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0b93893424f5..58ccf86415b4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -58,6 +58,7 @@
#include <asm/code-patching.h>
#include <asm/exec.h>
#include <asm/livepatch.h>
+#include <asm/cpu_has_feature.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -139,12 +140,16 @@ EXPORT_SYMBOL(__msr_check_and_clear);
#ifdef CONFIG_PPC_FPU
void __giveup_fpu(struct task_struct *tsk)
{
+ unsigned long msr;
+
save_fpu(tsk);
- tsk->thread.regs->msr &= ~MSR_FP;
+ msr = tsk->thread.regs->msr;
+ msr &= ~MSR_FP;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
- tsk->thread.regs->msr &= ~MSR_VSX;
+ msr &= ~MSR_VSX;
#endif
+ tsk->thread.regs->msr = msr;
}
void giveup_fpu(struct task_struct *tsk)
@@ -219,12 +224,16 @@ static int restore_fp(struct task_struct *tsk) { return 0; }
static void __giveup_altivec(struct task_struct *tsk)
{
+ unsigned long msr;
+
save_altivec(tsk);
- tsk->thread.regs->msr &= ~MSR_VEC;
+ msr = tsk->thread.regs->msr;
+ msr &= ~MSR_VEC;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
- tsk->thread.regs->msr &= ~MSR_VSX;
+ msr &= ~MSR_VSX;
#endif
+ tsk->thread.regs->msr = msr;
}
void giveup_altivec(struct task_struct *tsk)
@@ -794,7 +803,7 @@ static void tm_reclaim_thread(struct thread_struct *thr,
* this state.
* We do this using the current MSR, rather tracking it in
* some specific thread_struct bit, as it has the additional
- * benifit of checking for a potential TM bad thing exception.
+ * benefit of checking for a potential TM bad thing exception.
*/
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
@@ -1009,6 +1018,14 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally save Load Monitor registers, if enabled */
+ if (t->fscr & FSCR_LM) {
+ t->lmrr = mfspr(SPRN_LMRR);
+ t->lmser = mfspr(SPRN_LMSER);
+ }
+ }
#endif
}
@@ -1023,18 +1040,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
- u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-
- if (new_thread->dscr_inherit) {
+ if (new_thread->dscr_inherit)
dscr = new_thread->dscr;
- fscr |= FSCR_DSCR;
- }
if (old_thread->dscr != dscr)
mtspr(SPRN_DSCR, dscr);
-
- if (old_thread->fscr != fscr)
- mtspr(SPRN_FSCR, fscr);
}
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -1045,12 +1055,45 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->ebbrr != new_thread->ebbrr)
mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ if (old_thread->fscr != new_thread->fscr)
+ mtspr(SPRN_FSCR, new_thread->fscr);
+
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Conditionally restore Load Monitor registers, if enabled */
+ if (new_thread->fscr & FSCR_LM) {
+ if (old_thread->lmrr != new_thread->lmrr)
+ mtspr(SPRN_LMRR, new_thread->lmrr);
+ if (old_thread->lmser != new_thread->lmser)
+ mtspr(SPRN_LMSER, new_thread->lmser);
+ }
+ }
#endif
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+ /*
+ * Process self tracing is not yet supported through
+ * ptrace interface. Ptrace generic code should have
+ * prevented this from happening in the first place.
+ * Warn once here with the message, if some how it
+ * is attempted.
+ */
+ WARN_ONCE(tsk == current,
+ "Not expecting ptrace on self: TM regs may be incorrect\n");
+
+ /*
+ * If task is not current, it should have been flushed
+ * already to it's thread_struct during __switch_to().
+ */
+}
+#endif
+
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 946e34ffeae9..b0245bed6f54 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -56,6 +56,8 @@
#include <asm/opal.h>
#include <asm/fadump.h>
#include <asm/debug.h>
+#include <asm/epapr_hcalls.h>
+#include <asm/firmware.h>
#include <mm/mmu_decl.h>
@@ -168,7 +170,7 @@ static struct ibm_pa_feature {
*/
{CPU_FTR_TM_COMP, 0, 0,
PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
- {0, MMU_FTR_RADIX, 0, 0, 40, 0, 0},
+ {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0},
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -735,10 +737,22 @@ void __init early_init_devtree(void *params)
spinning_secondaries = boot_cpu_count - 1;
#endif
+ mmu_early_init_devtree();
+
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
#endif
+ epapr_paravirt_early_init();
+
+ /* Now try to figure out if we are running on LPAR and so on */
+ pseries_probe_fw_features();
+
+#ifdef CONFIG_PPC_PS3
+ /* Identify PS3 firmware */
+ if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
+ powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
+#endif
DBG(" <- early_init_devtree()\n");
}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 134bee9ac664..4f3c5756cc09 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -64,6 +64,10 @@ struct pt_regs_offset {
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
+#define TVSO(f) (offsetof(struct thread_vr_state, f))
+#define TFSO(f) (offsetof(struct thread_fp_state, f))
+#define TSO(f) (offsetof(struct thread_struct, f))
+
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
@@ -181,6 +185,26 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
return 0;
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static unsigned long get_user_ckpt_msr(struct task_struct *task)
+{
+ return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
+}
+
+static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
+{
+ task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
+ task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
+ return 0;
+}
+
+static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
+{
+ task->thread.ckpt_regs.trap = trap & 0xfff0;
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PPC64
static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
@@ -358,6 +382,29 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
return ret;
}
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which returns the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
+ * which determines the final code in this function. All the combinations of
+ * these two config options are possible except the one below as transactional
+ * memory config pulls in CONFIG_VSX automatically.
+ *
+ * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ */
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
@@ -368,14 +415,31 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#endif
flush_fp_to_thread(target);
-#ifdef CONFIG_VSX
+#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ /* copy to local buffer then write that out */
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_TRANS_FPR(i);
+ buf[32] = target->thread.transact_fp.fpscr;
+ } else {
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+ }
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+#endif
+
+#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+#endif
-#else
+#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
@@ -384,6 +448,29 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
#endif
}
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which setss the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
+ * which determines the final code in this function. All the combinations of
+ * these two config options are possible except the one below as transactional
+ * memory config pulls in CONFIG_VSX automatically.
+ *
+ * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ */
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
@@ -394,7 +481,27 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
#endif
flush_fp_to_thread(target);
-#ifdef CONFIG_VSX
+#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_TRANS_FPR(i) = buf[i];
+ target->thread.transact_fp.fpscr = buf[32];
+ } else {
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_FPR(i) = buf[i];
+ target->thread.fp_state.fpscr = buf[32];
+ }
+ return 0;
+#endif
+
+#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -403,7 +510,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
-#else
+#endif
+
+#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
@@ -433,10 +542,28 @@ static int vr_active(struct task_struct *target,
return target->thread.used_vr ? regset->n : 0;
}
+/*
+ * When the transaction is active, 'transact_vr' holds the current running
+ * value of all the VMX registers and 'vr_state' holds the last checkpointed
+ * value of all the VMX registers for the current transaction to fall back
+ * on in case it aborts. When transaction is not active 'vr_state' holds
+ * the current running state of all the VMX registers. So this function which
+ * gets the current running values of all the VMX registers, needs to know
+ * whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
+ struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
@@ -444,8 +571,19 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ flush_fp_to_thread(target);
+ flush_tmregs_to_thread(target);
+ addr = &target->thread.transact_vr;
+ } else {
+ addr = &target->thread.vr_state;
+ }
+#else
+ addr = &target->thread.vr_state;
+#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
+ addr, 0,
33 * sizeof(vector128));
if (!ret) {
/*
@@ -456,7 +594,16 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ vrsave.word = target->thread.transact_vrsave;
+ else
+ vrsave.word = target->thread.vrsave;
+#else
vrsave.word = target->thread.vrsave;
+#endif
+
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
@@ -464,10 +611,28 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
return ret;
}
+/*
+ * When the transaction is active, 'transact_vr' holds the current running
+ * value of all the VMX registers and 'vr_state' holds the last checkpointed
+ * value of all the VMX registers for the current transaction to fall back
+ * on in case it aborts. When transaction is not active 'vr_state' holds
+ * the current running state of all the VMX registers. So this function which
+ * sets the current running values of all the VMX registers, needs to know
+ * whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
+ struct thread_vr_state *addr;
int ret;
flush_altivec_to_thread(target);
@@ -475,8 +640,19 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ flush_fp_to_thread(target);
+ flush_tmregs_to_thread(target);
+ addr = &target->thread.transact_vr;
+ } else {
+ addr = &target->thread.vr_state;
+ }
+#else
+ addr = &target->thread.vr_state;
+#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
+ addr, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
@@ -487,11 +663,28 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ vrsave.word = target->thread.transact_vrsave;
+ else
+ vrsave.word = target->thread.vrsave;
+#else
vrsave.word = target->thread.vrsave;
+#endif
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
- if (!ret)
+ if (!ret) {
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ target->thread.transact_vrsave = vrsave.word;
+ else
+ target->thread.vrsave = vrsave.word;
+#else
target->thread.vrsave = vrsave.word;
+#endif
+ }
}
return ret;
@@ -512,6 +705,21 @@ static int vsr_active(struct task_struct *target,
return target->thread.used_vsr ? regset->n : 0;
}
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which returns the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
@@ -519,16 +727,47 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
u64 buf[32];
int ret, i;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+#endif
flush_vsx_to_thread(target);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.
+ transact_fp.fpr[i][TS_VSRLOWOFFSET];
+ } else {
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.
+ fp_state.fpr[i][TS_VSRLOWOFFSET];
+ }
+#else
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which sets the current running values of all
+ * the FPR registers, needs to know whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
@@ -536,12 +775,30 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
u64 buf[32];
int ret,i;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+#endif
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+ for (i = 0; i < 32 ; i++)
+ target->thread.transact_fp.
+ fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ } else {
+ for (i = 0; i < 32 ; i++)
+ target->thread.fp_state.
+ fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ }
+#else
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+#endif
return ret;
@@ -614,8 +871,1030 @@ static int evr_set(struct task_struct *target, const struct user_regset *regset,
}
#endif /* CONFIG_SPE */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/**
+ * tm_cgpr_active - get active number of registers in CGPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed GPR category.
+ */
+static int tm_cgpr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cgpr_get - get CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds all the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function gets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+static int tm_cgpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, offsetof(struct pt_regs, msr));
+ if (!ret) {
+ unsigned long msr = get_user_ckpt_msr(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
+ offsetof(struct pt_regs, msr),
+ offsetof(struct pt_regs, msr) +
+ sizeof(msr));
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ offsetof(struct pt_regs, orig_gpr3),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
/*
+ * tm_cgpr_set - set the CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function sets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+static int tm_cgpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, PT_MSR * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(
+ &pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(
+ &pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+/**
+ * tm_cfpr_active - get active number of registers in CFPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed FPR category.
+ */
+static int tm_cfpr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cfpr_get - get CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed FPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+static int tm_cfpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+}
+
+/**
+ * tm_cfpr_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * FPR register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+static int tm_cfpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_FPR(i) = buf[i];
+ target->thread.fp_state.fpscr = buf[32];
+ return 0;
+}
+
+/**
+ * tm_cvmx_active - get active number of registers in CVMX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in checkpointed VMX category.
+ */
+static int tm_cvmx_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cvmx_get - get CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'vr_state' and 'vr_save' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+static int tm_cvmx_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
+ if (!ret) {
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.vrsave;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ }
+
+ return ret;
+}
+
+/**
+ * tm_cvmx_set - set CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'vr_state' and 'vr_save' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+static int tm_cvmx_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.vrsave;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ if (!ret)
+ target->thread.vrsave = vrsave.word;
+ }
+
+ return ret;
+}
+
+/**
+ * tm_cvsx_active - get active number of registers in CVSX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed VSX category.
+ */
+static int tm_cvsx_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ flush_vsx_to_thread(target);
+ return target->thread.used_vsr ? regset->n : 0;
+}
+
+/**
+ * tm_cvsx_get - get CVSX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed VSX registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+static int tm_cvsx_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+
+ return ret;
+}
+
+/**
+ * tm_cvsx_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * VSX register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+static int tm_cvsx_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+ for (i = 0; i < 32 ; i++)
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+
+ return ret;
+}
+
+/**
+ * tm_spr_active - get active number of registers in TM SPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks the active number of available
+ * regisers in the transactional memory SPR category.
+ */
+static int tm_spr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+/**
+ * tm_spr_get - get the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy from.
+ * @ubuf: User buffer to copy into.
+ *
+ * This function gets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+static int tm_spr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ /* TFHAR register */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfhar, 0, sizeof(u64));
+
+ /* TEXASR register */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_texasr, sizeof(u64),
+ 2 * sizeof(u64));
+
+ /* TFIAR register */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfiar,
+ 2 * sizeof(u64), 3 * sizeof(u64));
+ return ret;
+}
+
+/**
+ * tm_spr_set - set the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+static int tm_spr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_tmregs_to_thread(target);
+
+ /* TFHAR register */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfhar, 0, sizeof(u64));
+
+ /* TEXASR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_texasr, sizeof(u64),
+ 2 * sizeof(u64));
+
+ /* TFIAR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfiar,
+ 2 * sizeof(u64), 3 * sizeof(u64));
+ return ret;
+}
+
+static int tm_tar_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+static int tm_tar_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tar, 0, sizeof(u64));
+ return ret;
+}
+
+static int tm_tar_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tar, 0, sizeof(u64));
+ return ret;
+}
+
+static int tm_ppr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+
+static int tm_ppr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_ppr, 0, sizeof(u64));
+ return ret;
+}
+
+static int tm_ppr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_ppr, 0, sizeof(u64));
+ return ret;
+}
+
+static int tm_dscr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+static int tm_dscr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_dscr, 0, sizeof(u64));
+ return ret;
+}
+
+static int tm_dscr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_dscr, 0, sizeof(u64));
+ return ret;
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+#ifdef CONFIG_PPC64
+static int ppr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ppr, 0, sizeof(u64));
+ return ret;
+}
+
+static int ppr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ppr, 0, sizeof(u64));
+ return ret;
+}
+
+static int dscr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
+ return ret;
+}
+static int dscr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
+ return ret;
+}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+static int tar_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
+ return ret;
+}
+static int tar_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
+ return ret;
+}
+
+static int ebb_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return regset->n;
+
+ return 0;
+}
+
+static int ebb_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (!target->thread.used_ebb)
+ return -ENODATA;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
+}
+
+static int ebb_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ebbrr, 0, sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ebbhr, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.bescr,
+ 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
+
+ return ret;
+}
+static int pmu_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+static int pmu_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.siar, 0,
+ 5 * sizeof(unsigned long));
+}
+
+static int pmu_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.siar, 0,
+ sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sdar, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sier, 2 * sizeof(unsigned long),
+ 3 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr2, 3 * sizeof(unsigned long),
+ 4 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr0, 4 * sizeof(unsigned long),
+ 5 * sizeof(unsigned long));
+ return ret;
+}
+#endif
+/*
* These are our native regset flavors.
*/
enum powerpc_regset {
@@ -630,6 +1909,25 @@ enum powerpc_regset {
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ REGSET_TM_CGPR, /* TM checkpointed GPR registers */
+ REGSET_TM_CFPR, /* TM checkpointed FPR registers */
+ REGSET_TM_CVMX, /* TM checkpointed VMX registers */
+ REGSET_TM_CVSX, /* TM checkpointed VSX registers */
+ REGSET_TM_SPR, /* TM specific SPR registers */
+ REGSET_TM_CTAR, /* TM checkpointed TAR register */
+ REGSET_TM_CPPR, /* TM checkpointed PPR register */
+ REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
+#endif
+#ifdef CONFIG_PPC64
+ REGSET_PPR, /* PPR register */
+ REGSET_DSCR, /* DSCR register */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ REGSET_TAR, /* TAR register */
+ REGSET_EBB, /* EBB registers */
+ REGSET_PMR, /* Performance Monitor Registers */
+#endif
};
static const struct user_regset native_regsets[] = {
@@ -664,6 +1962,77 @@ static const struct user_regset native_regsets[] = {
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .get = ebb_get, .set = ebb_set
+ },
+ [REGSET_PMR] = {
+ .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = pmu_active, .get = pmu_get, .set = pmu_set
+ },
+#endif
};
static const struct user_regset_view user_ppc_native_view = {
@@ -674,24 +2043,35 @@ static const struct user_regset_view user_ppc_native_view = {
#ifdef CONFIG_PPC64
#include <linux/compat.h>
-static int gpr32_get(struct task_struct *target,
+static int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ void *kbuf, void __user *ubuf, bool tm_active)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
+ const unsigned long *ckpt_regs;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
- if (target->thread.regs == NULL)
- return -EIO;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ ckpt_regs = &target->thread.ckpt_regs.gpr[0];
+#endif
+ if (tm_active) {
+ regs = ckpt_regs;
+ } else {
+ if (target->thread.regs == NULL)
+ return -EIO;
- if (!FULL_REGS(target->thread.regs)) {
- /* We have a partial register set. Fill 14-31 with bogus values */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
+ if (!FULL_REGS(target->thread.regs)) {
+ /*
+ * We have a partial register set.
+ * Fill 14-31 with bogus values.
+ */
+ for (i = 14; i < 32; i++)
+ target->thread.regs->gpr[i] = NV_REG_POISON;
+ }
}
pos /= sizeof(reg);
@@ -731,20 +2111,31 @@ static int gpr32_get(struct task_struct *target,
PT_REGS_COUNT * sizeof(reg), -1);
}
-static int gpr32_set(struct task_struct *target,
+static int gpr32_set_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
+ const void *kbuf, const void __user *ubuf, bool tm_active)
{
unsigned long *regs = &target->thread.regs->gpr[0];
+ unsigned long *ckpt_regs;
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
- if (target->thread.regs == NULL)
- return -EIO;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ ckpt_regs = &target->thread.ckpt_regs.gpr[0];
+#endif
- CHECK_FULL_REGS(target->thread.regs);
+ if (tm_active) {
+ regs = ckpt_regs;
+ } else {
+ regs = &target->thread.regs->gpr[0];
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+ }
pos /= sizeof(reg);
count /= sizeof(reg);
@@ -804,6 +2195,40 @@ static int gpr32_set(struct task_struct *target,
(PT_TRAP + 1) * sizeof(reg), -1);
}
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static int tm_cgpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
+}
+
+static int tm_cgpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
@@ -832,6 +2257,73 @@ static const struct user_regset compat_regsets[] = {
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active,
+ .get = tm_cgpr32_get, .set = tm_cgpr32_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .get = ebb_get, .set = ebb_set
+ },
+#endif
};
static const struct user_regset_view user_ppc_compat_view = {
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index fb2fb3ea85e5..c82eed97bd22 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -698,7 +698,7 @@ static void check_location(struct seq_file *m, const char *c)
/*
* Format:
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
- * the '.' may be an abbrevation
+ * the '.' may be an abbreviation
*/
static void check_location_string(struct seq_file *m, const char *c)
{
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 28736ff27fea..6a3e5de544ce 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -685,7 +685,7 @@ int rtas_set_indicator_fast(int indicator, int index, int new_value)
return rc;
}
-void rtas_restart(char *cmd)
+void __noreturn rtas_restart(char *cmd)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_RESTART);
@@ -704,7 +704,7 @@ void rtas_power_off(void)
for (;;);
}
-void rtas_halt(void)
+void __noreturn rtas_halt(void)
{
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_HALT);
@@ -1070,7 +1070,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
nret = be32_to_cpu(args.nret);
token = be32_to_cpu(args.token);
- if (nargs > ARRAY_SIZE(args.args)
+ if (nargs >= ARRAY_SIZE(args.args)
|| nret > ARRAY_SIZE(args.args)
|| nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL;
@@ -1174,7 +1174,7 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
ibm_suspend_me_token = rtas_token("ibm,suspend-me");
}
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index c638e2487a9c..a26a02006576 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -483,7 +483,7 @@ static void rtas_event_scan(struct work_struct *w)
}
#ifdef CONFIG_PPC64
-static void retreive_nvram_error_log(void)
+static void retrieve_nvram_error_log(void)
{
unsigned int err_type ;
int rc ;
@@ -501,7 +501,7 @@ static void retreive_nvram_error_log(void)
}
}
#else /* CONFIG_PPC64 */
-static void retreive_nvram_error_log(void)
+static void retrieve_nvram_error_log(void)
{
}
#endif /* CONFIG_PPC64 */
@@ -513,7 +513,7 @@ static void start_event_scan(void)
(30000 / rtas_event_scan_rate));
/* Retrieve errors from nvram if any */
- retreive_nvram_error_log();
+ retrieve_nvram_error_log();
schedule_delayed_work_on(cpumask_first(cpu_online_mask),
&event_scan_work, event_scan_delay);
@@ -526,10 +526,8 @@ void rtas_cancel_event_scan(void)
}
EXPORT_SYMBOL_GPL(rtas_cancel_event_scan);
-static int __init rtas_init(void)
+static int __init rtas_event_scan_init(void)
{
- struct proc_dir_entry *entry;
-
if (!machine_is(pseries) && !machine_is(chrp))
return 0;
@@ -562,13 +560,27 @@ static int __init rtas_init(void)
return -ENOMEM;
}
+ start_event_scan();
+
+ return 0;
+}
+arch_initcall(rtas_event_scan_init);
+
+static int __init rtas_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (!machine_is(pseries) && !machine_is(chrp))
+ return 0;
+
+ if (!rtas_log_buf)
+ return -ENODEV;
+
entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL,
&proc_rtas_log_operations);
if (!entry)
printk(KERN_ERR "Failed to create error_log proc entry\n");
- start_event_scan();
-
return 0;
}
__initcall(rtas_init);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8ca79b7503d8..dba265c586df 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -35,6 +35,7 @@
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/of_platform.h>
+#include <linux/hugetlb.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
@@ -61,6 +62,13 @@
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
#include <asm/fadump.h>
+#include <asm/udbg.h>
+#include <asm/hugetlb.h>
+#include <asm/livepatch.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu_has_feature.h>
+
+#include "setup.h"
#ifdef DEBUG
#include <asm/udbg.h>
@@ -494,7 +502,7 @@ void __init smp_setup_cpu_maps(void)
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
- if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
const __be32 *ireg;
@@ -575,6 +583,7 @@ void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
+ unsigned int i;
/*
* Iterate all ppc_md structures until we find the proper
@@ -582,6 +591,17 @@ void probe_machine(void)
*/
DBG("Probing machine type ...\n");
+ /*
+ * Check ppc_md is empty, if not we have a bug, ie, we setup an
+ * entry before probe_machine() which will be overwritten
+ */
+ for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
+ if (((void **)&ppc_md)[i]) {
+ printk(KERN_ERR "Entry %d in ppc_md non empty before"
+ " machine probe !\n", i);
+ }
+ }
+
for (machine_id = &__machine_desc_start;
machine_id < &__machine_desc_end;
machine_id++) {
@@ -676,6 +696,8 @@ static struct notifier_block ppc_panic_block = {
void __init setup_panic(void)
{
+ if (!ppc_md.panic)
+ return;
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
@@ -744,3 +766,169 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
set_dma_ops(&pdev->dev, &dma_direct_ops);
}
+
+static __init void print_system_info(void)
+{
+ pr_info("-----------------------------------------------------\n");
+#ifdef CONFIG_PPC_STD_MMU_64
+ pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ pr_info("Hash_size = 0x%lx\n", Hash_size);
+#endif
+ pr_info("phys_mem_size = 0x%llx\n",
+ (unsigned long long)memblock_phys_mem_size());
+
+ pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
+ pr_info("icache_bsize = 0x%x\n", icache_bsize);
+ if (ucache_bsize != 0)
+ pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
+
+ pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
+ pr_info(" possible = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_POSSIBLE);
+ pr_info(" always = 0x%016lx\n",
+ (unsigned long)CPU_FTRS_ALWAYS);
+ pr_info("cpu_user_features = 0x%08x 0x%08x\n",
+ cur_cpu_spec->cpu_user_features,
+ cur_cpu_spec->cpu_user_features2);
+ pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
+#ifdef CONFIG_PPC64
+ pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (htab_address)
+ pr_info("htab_address = 0x%p\n", htab_address);
+ if (htab_hash_mask)
+ pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif
+#ifdef CONFIG_PPC_STD_MMU_32
+ if (Hash)
+ pr_info("Hash = 0x%p\n", Hash);
+ if (Hash_mask)
+ pr_info("Hash_mask = 0x%lx\n", Hash_mask);
+#endif
+
+ if (PHYSICAL_START > 0)
+ pr_info("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
+ pr_info("-----------------------------------------------------\n");
+}
+
+/*
+ * Called into from start_kernel this initializes memblock, which is used
+ * to manage page allocation until mem_init is called.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ *cmdline_p = boot_command_line;
+
+ /* Set a half-reasonable default so udelay does something sensible */
+ loops_per_jiffy = 500000000 / HZ;
+
+ /* Unflatten the device-tree passed by prom_init or kexec */
+ unflatten_device_tree();
+
+ /*
+ * Initialize cache line/block info from device-tree (on ppc64) or
+ * just cputable (on ppc32).
+ */
+ initialize_cache_info();
+
+ /* Initialize RTAS if available. */
+ rtas_initialize();
+
+ /* Check if we have an initrd provided via the device-tree. */
+ check_for_initrd();
+
+ /* Probe the machine type, establish ppc_md. */
+ probe_machine();
+
+ /* Setup panic notifier if requested by the platform. */
+ setup_panic();
+
+ /*
+ * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
+ * it from their respective probe() function.
+ */
+ setup_power_save();
+
+ /* Discover standard serial ports. */
+ find_legacy_serial_ports();
+
+ /* Register early console with the printk subsystem. */
+ register_early_udbg_console();
+
+ /* Setup the various CPU maps based on the device-tree. */
+ smp_setup_cpu_maps();
+
+ /* Initialize xmon. */
+ xmon_setup();
+
+ /* Check the SMT related command line arguments (ppc64). */
+ check_smt_enabled();
+
+ /* On BookE, setup per-core TLB data structures. */
+ setup_tlb_core_data();
+
+ /*
+ * Release secondary cpus out of their spinloops at 0x60 now that
+ * we can map physical -> logical CPU ids.
+ *
+ * Freescale Book3e parts spin in a loop provided by firmware,
+ * so smp_release_cpus() does nothing for them.
+ */
+#ifdef CONFIG_SMP
+ smp_release_cpus();
+#endif
+
+ /* Print various info about the machine that has been gathered so far. */
+ print_system_info();
+
+ /* Reserve large chunks of memory for use by CMA for KVM. */
+ kvm_cma_reserve();
+
+ /*
+ * Reserve any gigantic pages requested on the command line.
+ * memblock needs to have been initialized by the time this is
+ * called since this will reserve memory.
+ */
+ reserve_hugetlb_gpages();
+
+ klp_init_thread_info(&init_thread_info);
+
+ init_mm.start_code = (unsigned long)_stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = klimit;
+#ifdef CONFIG_PPC_64K_PAGES
+ init_mm.context.pte_frag = NULL;
+#endif
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ mm_iommu_init(&init_mm.context);
+#endif
+ irqstack_early_init();
+ exc_lvl_early_init();
+ emergency_stack_init();
+
+ initmem_init();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+ if (ppc_md.setup_arch)
+ ppc_md.setup_arch();
+
+ paging_init();
+
+ /* Initialize the MMU context management stuff. */
+ mmu_context_init();
+
+#ifdef CONFIG_PPC64
+ /* Interrupt code needs to be 64K-aligned. */
+ if ((unsigned long)_stext & 0xffff)
+ panic("Kernelbase not 64K-aligned (0x%lx)!\n",
+ (unsigned long)_stext);
+#endif
+}
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
new file mode 100644
index 000000000000..cfba134b3024
--- /dev/null
+++ b/arch/powerpc/kernel/setup.h
@@ -0,0 +1,58 @@
+/*
+ * Prototypes for functions that are shared between setup_(32|64|common).c
+ *
+ * Copyright 2016 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ARCH_POWERPC_KERNEL_SETUP_H
+#define __ARCH_POWERPC_KERNEL_SETUP_H
+
+void initialize_cache_info(void);
+void irqstack_early_init(void);
+
+#ifdef CONFIG_PPC32
+void setup_power_save(void);
+#else
+static inline void setup_power_save(void) { };
+#endif
+
+#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
+void check_smt_enabled(void);
+#else
+static inline void check_smt_enabled(void) { };
+#endif
+
+#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+void setup_tlb_core_data(void);
+#else
+static inline void setup_tlb_core_data(void) { };
+#endif
+
+#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+void exc_lvl_early_init(void);
+#else
+static inline void exc_lvl_early_init(void) { };
+#endif
+
+#ifdef CONFIG_PPC64
+void emergency_stack_init(void);
+#else
+static inline void emergency_stack_init(void) { };
+#endif
+
+/*
+ * Having this in kvm_ppc.h makes include dependencies too
+ * tricky to solve for setup-common.c so have it here.
+ */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvm_cma_reserve(void);
+#else
+static inline void kvm_cma_reserve(void) { };
+#endif
+
+#endif /* __ARCH_POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index d544fa311757..c3e861df4b20 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -36,9 +36,8 @@
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
-#include <asm/mmu_context.h>
-#include <asm/epapr_hcalls.h>
#include <asm/code-patching.h>
+#include <asm/cpu_has_feature.h>
#define DBG(fmt...)
@@ -62,9 +61,7 @@ int icache_bsize;
int ucache_bsize;
/*
- * We're called here very early in the boot. We determine the machine
- * type and call the appropriate low-level setup functions.
- * -- Cort <cort@fsmlabs.com>
+ * We're called here very early in the boot.
*
* Note that the kernel may be running at an address which is different
* from the address that it was linked at, so we must use RELOC/PTRRELOC
@@ -73,7 +70,6 @@ int ucache_bsize;
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
- struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */
@@ -84,27 +80,19 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- spec = identify_cpu(offset, mfspr(SPRN_PVR));
+ identify_cpu(offset, mfspr(SPRN_PVR));
- do_feature_fixups(spec->cpu_features,
- PTRRELOC(&__start___ftr_fixup),
- PTRRELOC(&__stop___ftr_fixup));
-
- do_feature_fixups(spec->mmu_features,
- PTRRELOC(&__start___mmu_ftr_fixup),
- PTRRELOC(&__stop___mmu_ftr_fixup));
-
- do_lwsync_fixups(spec->cpu_features,
- PTRRELOC(&__start___lwsync_fixup),
- PTRRELOC(&__stop___lwsync_fixup));
-
- do_final_fixups();
+ apply_feature_fixups();
return KERNELBASE + offset;
}
/*
+ * This is run before start_kernel(), the kernel has been relocated
+ * and we are running with enough of the MMU enabled to have our
+ * proper kernel virtual addresses
+ *
* Find out what kind of machine we're on and save any data we need
* from the early boot process (devtree is copied on pmac by prom_init()).
* This is called very early on the boot process, after a minimal
@@ -123,27 +111,9 @@ notrace void __init machine_init(u64 dt_ptr)
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
- epapr_paravirt_early_init();
-
early_init_mmu();
- probe_machine();
-
setup_kdump_trampoline();
-
-#ifdef CONFIG_6xx
- if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
- cpu_has_feature(CPU_FTR_CAN_NAP))
- ppc_md.power_save = ppc6xx_idle;
-#endif
-
-#ifdef CONFIG_E500
- if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
- cpu_has_feature(CPU_FTR_CAN_NAP))
- ppc_md.power_save = e500_idle;
-#endif
- if (ppc_md.progress)
- ppc_md.progress("id mach(): done", 0x200);
}
/* Checks "l2cr=xxxx" command-line option */
@@ -221,7 +191,7 @@ int __init ppc_init(void)
arch_initcall(ppc_init);
-static void __init irqstack_early_init(void)
+void __init irqstack_early_init(void)
{
unsigned int i;
@@ -236,7 +206,7 @@ static void __init irqstack_early_init(void)
}
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
-static void __init exc_lvl_early_init(void)
+void __init exc_lvl_early_init(void)
{
unsigned int i, hw_cpu;
@@ -259,33 +229,25 @@ static void __init exc_lvl_early_init(void)
#endif
}
}
-#else
-#define exc_lvl_early_init()
#endif
-/* Warning, IO base is not yet inited */
-void __init setup_arch(char **cmdline_p)
+void __init setup_power_save(void)
{
- *cmdline_p = boot_command_line;
-
- /* so udelay does something sensible, assume <= 1000 bogomips */
- loops_per_jiffy = 500000000 / HZ;
-
- unflatten_device_tree();
- check_for_initrd();
-
- if (ppc_md.init_early)
- ppc_md.init_early();
-
- find_legacy_serial_ports();
-
- smp_setup_cpu_maps();
-
- /* Register early console */
- register_early_udbg_console();
+#ifdef CONFIG_6xx
+ if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
+ cpu_has_feature(CPU_FTR_CAN_NAP))
+ ppc_md.power_save = ppc6xx_idle;
+#endif
- xmon_setup();
+#ifdef CONFIG_E500
+ if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
+ cpu_has_feature(CPU_FTR_CAN_NAP))
+ ppc_md.power_save = e500_idle;
+#endif
+}
+__init void initialize_cache_info(void)
+{
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
@@ -296,32 +258,4 @@ void __init setup_arch(char **cmdline_p)
ucache_bsize = 0;
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
ucache_bsize = icache_bsize = dcache_bsize;
-
- if (ppc_md.panic)
- setup_panic();
-
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
-
- exc_lvl_early_init();
-
- irqstack_early_init();
-
- initmem_init();
- if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
- if (ppc_md.setup_arch)
- ppc_md.setup_arch();
- if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
-
- paging_init();
-
- /* Initialize the MMU context management stuff */
- mmu_context_init();
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 96d4a2b23d0f..eafb9a79e011 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -35,7 +35,6 @@
#include <linux/pci.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
-#include <linux/hugetlb.h>
#include <linux/memory.h>
#include <linux/nmi.h>
@@ -64,12 +63,10 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
-#include <asm/mmu_context.h>
#include <asm/code-patching.h>
-#include <asm/kvm_ppc.h>
-#include <asm/hugetlb.h>
-#include <asm/epapr_hcalls.h>
#include <asm/livepatch.h>
+#include <asm/opal.h>
+#include <asm/cputhreads.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -100,7 +97,7 @@ int icache_bsize;
int ucache_bsize;
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
-static void setup_tlb_core_data(void)
+void __init setup_tlb_core_data(void)
{
int cpu;
@@ -133,10 +130,6 @@ static void setup_tlb_core_data(void)
}
}
}
-#else
-static void setup_tlb_core_data(void)
-{
-}
#endif
#ifdef CONFIG_SMP
@@ -144,7 +137,7 @@ static void setup_tlb_core_data(void)
static char *smt_enabled_cmdline;
/* Look for ibm,smt-enabled OF option */
-static void check_smt_enabled(void)
+void __init check_smt_enabled(void)
{
struct device_node *dn;
const char *smt_option;
@@ -193,12 +186,10 @@ static int __init early_smt_enabled(char *p)
}
early_param("smt-enabled", early_smt_enabled);
-#else
-#define check_smt_enabled()
#endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */
-static void fixup_boot_paca(void)
+static void __init fixup_boot_paca(void)
{
/* The boot cpu is started */
get_paca()->cpu_start = 1;
@@ -206,23 +197,50 @@ static void fixup_boot_paca(void)
get_paca()->data_offset = 0;
}
-static void cpu_ready_for_interrupts(void)
+static void __init configure_exceptions(void)
{
- /* Set IR and DR in PACA MSR */
- get_paca()->kernel_msr = MSR_KERNEL;
-
/*
- * Enable AIL if supported, and we are in hypervisor mode. If we are
- * not in hypervisor mode, we enable relocation-on interrupts later
- * in pSeries_setup_arch() using the H_SET_MODE hcall.
+ * Setup the trampolines from the lowmem exception vectors
+ * to the kdump kernel when not using a relocatable kernel.
*/
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S)) {
- unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ setup_kdump_trampoline();
+
+ /* Under a PAPR hypervisor, we need hypercalls */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ /* Enable AIL if possible */
+ pseries_enable_reloc_on_exc();
+
+ /*
+ * Tell the hypervisor that we want our exceptions to
+ * be taken in little endian mode.
+ *
+ * We don't call this for big endian as our calling convention
+ * makes us always enter in BE, and the call may fail under
+ * some circumstances with kdump.
+ */
+#ifdef __LITTLE_ENDIAN__
+ pseries_little_endian_exceptions();
+#endif
+ } else {
+ /* Set endian mode using OPAL */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_configure_cores();
+
+ /* Enable AIL if supported, and we are in hypervisor mode */
+ if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
+ early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ unsigned long lpcr = mfspr(SPRN_LPCR);
+ mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ }
}
}
+static void cpu_ready_for_interrupts(void)
+{
+ /* Set IR and DR in PACA MSR */
+ get_paca()->kernel_msr = MSR_KERNEL;
+}
+
/*
* Early initialization entry point. This is called by head.S
* with MMU translation disabled. We rely on the "feature" of
@@ -270,18 +288,18 @@ void __init early_setup(unsigned long dt_ptr)
*/
early_init_devtree(__va(dt_ptr));
- epapr_paravirt_early_init();
-
/* Now we know the logical id of our boot cpu, setup the paca. */
setup_paca(&paca[boot_cpuid]);
fixup_boot_paca();
- /* Probe the machine type */
- probe_machine();
-
- setup_kdump_trampoline();
+ /*
+ * Configure exception handlers. This include setting up trampolines
+ * if needed, setting exception endian mode, etc...
+ */
+ configure_exceptions();
- DBG("Found, Initializing memory management...\n");
+ /* Apply all the dynamic patching */
+ apply_feature_fixups();
/* Initialize the hash table or TLB handling */
early_init_mmu();
@@ -293,16 +311,6 @@ void __init early_setup(unsigned long dt_ptr)
*/
cpu_ready_for_interrupts();
- /* Reserve large chunks of memory for use by CMA for KVM */
- kvm_cma_reserve();
-
- /*
- * Reserve any gigantic pages requested on the command line.
- * memblock needs to have been initialized by the time this is
- * called since this will reserve memory.
- */
- reserve_hugetlb_gpages();
-
DBG(" <- early_setup()\n");
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
@@ -321,7 +329,7 @@ void __init early_setup(unsigned long dt_ptr)
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
- /* Mark interrupts enabled in PACA */
+ /* Mark interrupts disabled in PACA */
get_paca()->soft_enabled = 0;
/* Initialize the hash table or TLB handling */
@@ -391,7 +399,7 @@ void smp_release_cpus(void)
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
*/
-static void __init initialize_cache_info(void)
+void __init initialize_cache_info(void)
{
struct device_node *np;
unsigned long num_cpus = 0;
@@ -456,127 +464,11 @@ static void __init initialize_cache_info(void)
}
}
- DBG(" <- initialize_cache_info()\n");
-}
-
-
-/*
- * Do some initial setup of the system. The parameters are those which
- * were passed in from the bootloader.
- */
-void __init setup_system(void)
-{
- DBG(" -> setup_system()\n");
-
- /* Apply the CPUs-specific and firmware specific fixups to kernel
- * text (nop out sections not relevant to this CPU or this firmware)
- */
- do_feature_fixups(cur_cpu_spec->cpu_features,
- &__start___ftr_fixup, &__stop___ftr_fixup);
- do_feature_fixups(cur_cpu_spec->mmu_features,
- &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
- do_feature_fixups(powerpc_firmware_features,
- &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
- do_lwsync_fixups(cur_cpu_spec->cpu_features,
- &__start___lwsync_fixup, &__stop___lwsync_fixup);
- do_final_fixups();
-
- /*
- * Unflatten the device-tree passed by prom_init or kexec
- */
- unflatten_device_tree();
-
- /*
- * Fill the ppc64_caches & systemcfg structures with informations
- * retrieved from the device-tree.
- */
- initialize_cache_info();
-
-#ifdef CONFIG_PPC_RTAS
- /*
- * Initialize RTAS if available
- */
- rtas_initialize();
-#endif /* CONFIG_PPC_RTAS */
-
- /*
- * Check if we have an initrd provided via the device-tree
- */
- check_for_initrd();
-
- /*
- * Do some platform specific early initializations, that includes
- * setting up the hash table pointers. It also sets up some interrupt-mapping
- * related options that will be used by finish_device_tree()
- */
- if (ppc_md.init_early)
- ppc_md.init_early();
-
- /*
- * We can discover serial ports now since the above did setup the
- * hash table management for us, thus ioremap works. We do that early
- * so that further code can be debugged
- */
- find_legacy_serial_ports();
-
- /*
- * Register early console
- */
- register_early_udbg_console();
-
- /*
- * Initialize xmon
- */
- xmon_setup();
-
- smp_setup_cpu_maps();
- check_smt_enabled();
- setup_tlb_core_data();
-
- /*
- * Freescale Book3e parts spin in a loop provided by firmware,
- * so smp_release_cpus() does nothing for them
- */
-#if defined(CONFIG_SMP)
- /* Release secondary cpus out of their spinloops at 0x60 now that
- * we can map physical -> logical CPU ids
- */
- smp_release_cpus();
-#endif
-
- pr_info("Starting Linux %s %s\n", init_utsname()->machine,
- init_utsname()->version);
-
- pr_info("-----------------------------------------------------\n");
- pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
- pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
-
- if (ppc64_caches.dline_size != 0x80)
- pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
- if (ppc64_caches.iline_size != 0x80)
- pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
-
- pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
- pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
- pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
- pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
- cur_cpu_spec->cpu_user_features2);
- pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
- pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
-
-#ifdef CONFIG_PPC_STD_MMU_64
- if (htab_address)
- pr_info("htab_address = 0x%p\n", htab_address);
-
- pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
-#endif
-
- if (PHYSICAL_START > 0)
- pr_info("physical_start = 0x%llx\n",
- (unsigned long long)PHYSICAL_START);
- pr_info("-----------------------------------------------------\n");
+ /* For use by binfmt_elf */
+ dcache_bsize = ppc64_caches.dline_size;
+ icache_bsize = ppc64_caches.iline_size;
- DBG(" <- setup_system()\n");
+ DBG(" <- initialize_cache_info()\n");
}
/* This returns the limit below which memory accesses to the linear
@@ -584,7 +476,7 @@ void __init setup_system(void)
* used to allocate interrupt or emergency stacks for which our
* exception entry path doesn't deal with being interrupted.
*/
-static u64 safe_stack_limit(void)
+static __init u64 safe_stack_limit(void)
{
#ifdef CONFIG_PPC_BOOK3E
/* Freescale BookE bolts the entire linear mapping */
@@ -600,7 +492,7 @@ static u64 safe_stack_limit(void)
#endif
}
-static void __init irqstack_early_init(void)
+void __init irqstack_early_init(void)
{
u64 limit = safe_stack_limit();
unsigned int i;
@@ -620,7 +512,7 @@ static void __init irqstack_early_init(void)
}
#ifdef CONFIG_PPC_BOOK3E
-static void __init exc_lvl_early_init(void)
+void __init exc_lvl_early_init(void)
{
unsigned int i;
unsigned long sp;
@@ -642,8 +534,6 @@ static void __init exc_lvl_early_init(void)
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
patch_exception(0x040, exc_debug_debug_book3e);
}
-#else
-#define exc_lvl_early_init()
#endif
/*
@@ -651,7 +541,7 @@ static void __init exc_lvl_early_init(void)
* early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks.
*/
-static void __init emergency_stack_init(void)
+void __init emergency_stack_init(void)
{
u64 limit;
unsigned int i;
@@ -682,61 +572,6 @@ static void __init emergency_stack_init(void)
}
}
-/*
- * Called into from start_kernel this initializes memblock, which is used
- * to manage page allocation until mem_init is called.
- */
-void __init setup_arch(char **cmdline_p)
-{
- *cmdline_p = boot_command_line;
-
- /*
- * Set cache line size based on type of cpu as a default.
- * Systems with OF can look in the properties on the cpu node(s)
- * for a possibly more accurate value.
- */
- dcache_bsize = ppc64_caches.dline_size;
- icache_bsize = ppc64_caches.iline_size;
-
- if (ppc_md.panic)
- setup_panic();
-
- klp_init_thread_info(&init_thread_info);
-
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
-#ifdef CONFIG_PPC_64K_PAGES
- init_mm.context.pte_frag = NULL;
-#endif
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- mm_iommu_init(&init_mm.context);
-#endif
- irqstack_early_init();
- exc_lvl_early_init();
- emergency_stack_init();
-
- initmem_init();
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
- if (ppc_md.setup_arch)
- ppc_md.setup_arch();
-
- paging_init();
-
- /* Initialize the MMU context management stuff */
- mmu_context_init();
-
- /* Interrupt code needs to be 64K-aligned */
- if ((unsigned long)_stext & 0xffff)
- panic("Kernelbase not 64K-aligned (0x%lx)!\n",
- (unsigned long)_stext);
-}
-
#ifdef CONFIG_SMP
#define PCPU_DYN_SIZE ()
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 25520794aa37..7e49984d4331 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -104,6 +104,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
+ unsigned long vrsave;
#endif
unsigned long msr = regs->msr;
long err = 0;
@@ -125,9 +126,13 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.vrsave = mfspr(SPRN_VRSAVE);
- err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
+ vrsave = 0;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ vrsave = mfspr(SPRN_VRSAVE);
+ current->thread.vrsave = vrsave;
+ }
+
+ err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 55c924b65f71..25a39052bf6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -31,6 +31,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/topology.h>
+#include <linux/profile.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -53,6 +54,8 @@
#include <asm/vdso.h>
#include <asm/debug.h>
#include <asm/kexec.h>
+#include <asm/asm-prototypes.h>
+#include <asm/cpu_has_feature.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -593,6 +596,7 @@ out:
of_node_put(np);
return id;
}
+EXPORT_SYMBOL_GPL(cpu_to_core_id);
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 692873bff334..c4f1d1f7bae0 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PPC64
/* Time in microseconds we delay before sleeping in the idle loop */
-DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
+static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 3ed9a5a21d77..3efbedefba6a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -56,6 +56,7 @@
#include <linux/irq_work.h>
#include <linux/clk-provider.h>
#include <linux/suspend.h>
+#include <linux/rtc.h>
#include <asm/trace.h>
#include <asm/io.h>
@@ -96,7 +97,8 @@ static struct clocksource clocksource_timebase = {
.read = timebase_read,
};
-#define DECREMENTER_MAX 0x7fffffff
+#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
+u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
@@ -166,7 +168,15 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
+#ifdef CONFIG_PPC_SPLPAR
void (*dtl_consumer)(struct dtl_entry *, u64);
+#endif
+
+#ifdef CONFIG_PPC64
+#define get_accounting(tsk) (&get_paca()->accounting)
+#else
+#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#endif
static void calc_cputime_factors(void)
{
@@ -186,7 +196,7 @@ static void calc_cputime_factors(void)
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
-static u64 read_spurr(u64 tb)
+static unsigned long read_spurr(unsigned long tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
@@ -249,8 +259,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
void accumulate_stolen_time(void)
{
u64 sst, ust;
-
u8 save_soft_enabled = local_paca->soft_enabled;
+ struct cpu_accounting_data *acct = &local_paca->accounting;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
@@ -260,10 +270,10 @@ void accumulate_stolen_time(void)
*/
local_paca->soft_enabled = 0;
- sst = scan_dispatch_log(local_paca->starttime_user);
- ust = scan_dispatch_log(local_paca->starttime);
- local_paca->system_time -= sst;
- local_paca->user_time -= ust;
+ sst = scan_dispatch_log(acct->starttime_user);
+ ust = scan_dispatch_log(acct->starttime);
+ acct->system_time -= sst;
+ acct->user_time -= ust;
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
@@ -275,7 +285,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
stolen = scan_dispatch_log(stop_tb);
- get_paca()->system_time -= stolen;
+ get_paca()->accounting.system_time -= stolen;
}
stolen += get_paca()->stolen_time;
@@ -295,27 +305,29 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-static u64 vtime_delta(struct task_struct *tsk,
- u64 *sys_scaled, u64 *stolen)
+static unsigned long vtime_delta(struct task_struct *tsk,
+ unsigned long *sys_scaled,
+ unsigned long *stolen)
{
- u64 now, nowscaled, deltascaled;
- u64 udelta, delta, user_scaled;
+ unsigned long now, nowscaled, deltascaled;
+ unsigned long udelta, delta, user_scaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
WARN_ON_ONCE(!irqs_disabled());
now = mftb();
nowscaled = read_spurr(now);
- get_paca()->system_time += now - get_paca()->starttime;
- get_paca()->starttime = now;
- deltascaled = nowscaled - get_paca()->startspurr;
- get_paca()->startspurr = nowscaled;
+ acct->system_time += now - acct->starttime;
+ acct->starttime = now;
+ deltascaled = nowscaled - acct->startspurr;
+ acct->startspurr = nowscaled;
*stolen = calculate_stolen_time(now);
- delta = get_paca()->system_time;
- get_paca()->system_time = 0;
- udelta = get_paca()->user_time - get_paca()->utime_sspurr;
- get_paca()->utime_sspurr = get_paca()->user_time;
+ delta = acct->system_time;
+ acct->system_time = 0;
+ udelta = acct->user_time - acct->utime_sspurr;
+ acct->utime_sspurr = acct->user_time;
/*
* Because we don't read the SPURR on every kernel entry/exit,
@@ -337,14 +349,14 @@ static u64 vtime_delta(struct task_struct *tsk,
*sys_scaled = deltascaled;
}
}
- get_paca()->user_time_scaled += user_scaled;
+ acct->user_time_scaled += user_scaled;
return delta;
}
void vtime_account_system(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_system_time(tsk, 0, delta, sys_scaled);
@@ -355,7 +367,7 @@ EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
- u64 delta, sys_scaled, stolen;
+ unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
account_idle_time(delta + stolen);
@@ -373,15 +385,32 @@ void vtime_account_idle(struct task_struct *tsk)
void vtime_account_user(struct task_struct *tsk)
{
cputime_t utime, utimescaled;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
- utime = get_paca()->user_time;
- utimescaled = get_paca()->user_time_scaled;
- get_paca()->user_time = 0;
- get_paca()->user_time_scaled = 0;
- get_paca()->utime_sspurr = 0;
+ utime = acct->user_time;
+ utimescaled = acct->user_time_scaled;
+ acct->user_time = 0;
+ acct->user_time_scaled = 0;
+ acct->utime_sspurr = 0;
account_user_time(tsk, utime, utimescaled);
}
+#ifdef CONFIG_PPC32
+/*
+ * Called from the context switch with interrupts disabled, to charge all
+ * accumulated times to the current process, and to prepare accounting on
+ * the next process.
+ */
+void arch_vtime_task_switch(struct task_struct *prev)
+{
+ struct cpu_accounting_data *acct = get_accounting(current);
+
+ acct->starttime = get_accounting(prev)->starttime;
+ acct->system_time = 0;
+ acct->user_time = 0;
+}
+#endif /* CONFIG_PPC32 */
+
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#define calc_cputime_factors()
#endif
@@ -504,8 +533,8 @@ static void __timer_interrupt(void)
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
- if (now <= DECREMENTER_MAX)
- set_dec((int)now);
+ if (now <= decrementer_max)
+ set_dec(now);
/* We may have raced with new irq work */
if (test_irq_work_pending())
set_dec(1);
@@ -535,7 +564,7 @@ void timer_interrupt(struct pt_regs * regs)
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
*/
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
/* Some implementations of hotplug will get timer interrupts while
* offline, just ignore these and we also need to set
@@ -583,9 +612,9 @@ static void generic_suspend_disable_irqs(void)
* with suspending.
*/
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
local_irq_disable();
- set_dec(DECREMENTER_MAX);
+ set_dec(decrementer_max);
}
static void generic_suspend_enable_irqs(void)
@@ -866,7 +895,7 @@ static int decrementer_set_next_event(unsigned long evt,
static int decrementer_shutdown(struct clock_event_device *dev)
{
- decrementer_set_next_event(DECREMENTER_MAX, dev);
+ decrementer_set_next_event(decrementer_max, dev);
return 0;
}
@@ -892,6 +921,49 @@ static void register_decrementer_clockevent(int cpu)
clockevents_register_device(dec);
}
+static void enable_large_decrementer(void)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ if (decrementer_max <= DECREMENTER_DEFAULT_MAX)
+ return;
+
+ /*
+ * If we're running as the hypervisor we need to enable the LD manually
+ * otherwise firmware should have done it for us.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_LD);
+}
+
+static void __init set_decrementer_max(void)
+{
+ struct device_node *cpu;
+ u32 bits = 32;
+
+ /* Prior to ISAv3 the decrementer is always 32 bit */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ cpu = of_find_node_by_type(NULL, "cpu");
+
+ if (of_property_read_u32(cpu, "ibm,dec-bits", &bits) == 0) {
+ if (bits > 64 || bits < 32) {
+ pr_warn("time_init: firmware supplied invalid ibm,dec-bits");
+ bits = 32;
+ }
+
+ /* calculate the signed maximum given this many bits */
+ decrementer_max = (1ul << (bits - 1)) - 1;
+ }
+
+ of_node_put(cpu);
+
+ pr_info("time_init: %u bit decrementer (max: %llx)\n",
+ bits, decrementer_max);
+}
+
static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
@@ -899,7 +971,7 @@ static void __init init_decrementer_clockevent(void)
clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
decrementer_clockevent.max_delta_ns =
- clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
+ clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
decrementer_clockevent.min_delta_ns =
clockevent_delta2ns(2, &decrementer_clockevent);
@@ -908,6 +980,9 @@ static void __init init_decrementer_clockevent(void)
void secondary_cpu_time_init(void)
{
+ /* Enable and test the large decrementer for this cpu */
+ enable_large_decrementer();
+
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
@@ -973,6 +1048,10 @@ void __init time_init(void)
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
+ /* initialise and enable the large decrementer (if we have one) */
+ set_decrementer_max();
+ enable_large_decrementer();
+
/* Start the decrementer on CPUs that have manual control
* such as BookE
*/
@@ -1081,6 +1160,29 @@ void calibrate_delay(void)
loops_per_jiffy = tb_ticks_per_jiffy;
}
+#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
+{
+ ppc_md.get_rtc_time(tm);
+ return rtc_valid_tm(tm);
+}
+
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
+{
+ if (!ppc_md.set_rtc_time)
+ return -EOPNOTSUPP;
+
+ if (ppc_md.set_rtc_time(tm) < 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static const struct rtc_class_ops rtc_generic_ops = {
+ .read_time = rtc_generic_get_time,
+ .set_time = rtc_generic_set_time,
+};
+
static int __init rtc_init(void)
{
struct platform_device *pdev;
@@ -1088,9 +1190,12 @@ static int __init rtc_init(void)
if (!ppc_md.get_rtc_time)
return -ENODEV;
- pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &rtc_generic_ops,
+ sizeof(rtc_generic_ops));
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(rtc_init);
+#endif
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index b7019b559ddb..298afcf3bf2a 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -338,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
*/
subi r7, r7, STACK_FRAME_OVERHEAD
- SET_SCRATCH0(r1)
-
mfmsr r6
/* R4 = original MSR to indicate whether thread used FP/Vector etc. */
@@ -468,6 +466,7 @@ restore_gprs:
* until we turn MSR RI back on.
*/
+ SET_SCRATCH0(r1)
ld r5, -8(r1)
ld r1, -16(r1)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9229ba63c370..2cb589264cb7 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -60,6 +60,8 @@
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/debug.h>
+#include <asm/asm-prototypes.h>
+#include <asm/hmi.h>
#include <sysdev/fsl_pci.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -307,9 +309,13 @@ long hmi_exception_realmode(struct pt_regs *regs)
{
__this_cpu_inc(irq_stat.hmi_exceptions);
+ wait_for_subcore_guest_exit();
+
if (ppc_md.hmi_exception_early)
ppc_md.hmi_exception_early(regs);
+ wait_for_tb_resync();
+
return 0;
}
@@ -1376,6 +1382,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
+ [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1418,7 +1425,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
rd = (instword >> 21) & 0x1f;
current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
- mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ current->thread.fscr |= FSCR_DSCR;
+ mtspr(SPRN_FSCR, current->thread.fscr);
}
/* Read from DSCR (mfspr RT, 0x03) */
@@ -1432,6 +1440,14 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
+ } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * This process has touched LM, so turn it on forever
+ * for this process
+ */
+ current->thread.fscr |= FSCR_LM;
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ return;
}
if ((status < ARRAY_SIZE(facility_strings)) &&
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 1c2e7a343bf5..616a6d854638 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -70,10 +70,11 @@ _GLOBAL(load_up_altivec)
MTMSRD(r5) /* enable use of AltiVec now */
isync
- /* Hack: if we get an altivec unavailable trap with VRSAVE
- * set to all zeros, we assume this is a broken application
- * that fails to set it properly, and thus we switch it to
- * all 1's
+ /*
+ * While userspace in general ignores VRSAVE, glibc uses it as a boolean
+ * to optimise userspace context save/restore. Whenever we take an
+ * altivec unavailable exception we must set VRSAVE to something non
+ * zero. Set it to all 1s. See also the programming note in the ISA.
*/
mfspr r4,SPRN_VRSAVE
cmpwi 0,r4,0
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 8d7358f3a273..b3813ddb2fb4 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work)
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
@@ -503,7 +503,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
@@ -515,7 +515,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
@@ -539,7 +539,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
@@ -552,7 +552,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
@@ -588,7 +588,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
static void vio_dma_iommu_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2dd91f79de05..b5fba689fca6 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -165,7 +165,7 @@ SECTIONS
. = ALIGN(8);
.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
{
-#ifdef CONFIG_RELOCATABLE_PPC32
+#ifdef CONFIG_PPC32
__dynamic_symtab = .;
#endif
*(.dynsym)
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index eba0bea6e032..1f9e5529e692 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -20,7 +20,7 @@ common-objs-y += powerpc.o emulate.o emulate_loadstore.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
-AFLAGS_booke_interrupts.o := -I$(obj)
+AFLAGS_booke_interrupts.o := -I$(objtree)/$(obj)
kvm-e500-objs := \
$(common-objs-y) \
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 114edace6cdd..a587e8f4fd26 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -34,9 +34,9 @@
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
- ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
- pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
- false);
+ mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
+ pte->pagesize, pte->pagesize,
+ MMU_SEGSIZE_256M, false);
}
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
@@ -169,13 +169,13 @@ map_again:
/* In case we tried normal mapping already, let's nuke old entries */
if (attempt > 1)
- if (ppc_md.hpte_remove(hpteg) < 0) {
+ if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
r = -1;
goto out_unlock;
}
- ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
- hpsize, hpsize, MMU_SEGSIZE_256M);
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
+ hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */
@@ -187,8 +187,10 @@ map_again:
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte);
- /* The ppc_md code may give us a secondary entry even though we
- asked for a primary. Fix up. */
+ /*
+ * The mmu_hash_ops code may give us a secondary entry even
+ * though we asked for a primary. Fix up.
+ */
if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
hash = ~hash;
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 18cf6d1f8174..c379ff5a4438 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -242,7 +242,8 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
struct kvmppc_spapr_tce_table *stt;
long i, ret = H_SUCCESS, idx;
unsigned long entry, ua = 0;
- u64 __user *tces, tce;
+ u64 __user *tces;
+ u64 tce;
stt = kvmppc_find_table(vcpu, liobn);
if (!stt)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e20beae5ca7a..2fd5580c8f6e 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -52,6 +52,7 @@
#include <asm/switch_to.h>
#include <asm/smp.h>
#include <asm/dbell.h>
+#include <asm/hmi.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -2522,7 +2523,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
spin_unlock(&pvc->lock);
- kvm_guest_enter();
+ guest_enter();
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
@@ -2570,7 +2571,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
- kvm_guest_exit();
+ guest_exit();
for (sub = 0; sub < core_info.n_subcores; ++sub)
list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
@@ -3401,6 +3402,38 @@ static struct kvmppc_ops kvm_ops_hv = {
.hcall_implemented = kvmppc_hcall_impl_hv,
};
+static int kvm_init_subcore_bitmap(void)
+{
+ int i, j;
+ int nr_cores = cpu_nr_cores();
+ struct sibling_subcore_state *sibling_subcore_state;
+
+ for (i = 0; i < nr_cores; i++) {
+ int first_cpu = i * threads_per_core;
+ int node = cpu_to_node(first_cpu);
+
+ /* Ignore if it is already allocated. */
+ if (paca[first_cpu].sibling_subcore_state)
+ continue;
+
+ sibling_subcore_state =
+ kmalloc_node(sizeof(struct sibling_subcore_state),
+ GFP_KERNEL, node);
+ if (!sibling_subcore_state)
+ return -ENOMEM;
+
+ memset(sibling_subcore_state, 0,
+ sizeof(struct sibling_subcore_state));
+
+ for (j = 0; j < threads_per_core; j++) {
+ int cpu = first_cpu + j;
+
+ paca[cpu].sibling_subcore_state = sibling_subcore_state;
+ }
+ }
+ return 0;
+}
+
static int kvmppc_book3s_init_hv(void)
{
int r;
@@ -3411,6 +3444,10 @@ static int kvmppc_book3s_init_hv(void)
if (r < 0)
return -ENODEV;
+ r = kvm_init_subcore_bitmap();
+ if (r)
+ return r;
+
kvm_ops_hv.owner = THIS_MODULE;
kvmppc_hv_ops = &kvm_ops_hv;
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 93b5f5c9b445..0fa70a9618d7 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -13,6 +13,9 @@
#include <linux/kernel.h>
#include <asm/opal.h>
#include <asm/mce.h>
+#include <asm/machdep.h>
+#include <asm/cputhreads.h>
+#include <asm/hmi.h>
/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
@@ -140,3 +143,176 @@ long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
return kvmppc_realmode_mc_power7(vcpu);
}
+
+/* Check if dynamic split is in force and return subcore size accordingly. */
+static inline int kvmppc_cur_subcore_size(void)
+{
+ if (local_paca->kvm_hstate.kvm_split_mode)
+ return local_paca->kvm_hstate.kvm_split_mode->subcore_size;
+
+ return threads_per_subcore;
+}
+
+void kvmppc_subcore_enter_guest(void)
+{
+ int thread_id, subcore_id;
+
+ thread_id = cpu_thread_in_core(local_paca->paca_index);
+ subcore_id = thread_id / kvmppc_cur_subcore_size();
+
+ local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
+}
+
+void kvmppc_subcore_exit_guest(void)
+{
+ int thread_id, subcore_id;
+
+ thread_id = cpu_thread_in_core(local_paca->paca_index);
+ subcore_id = thread_id / kvmppc_cur_subcore_size();
+
+ local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
+}
+
+static bool kvmppc_tb_resync_required(void)
+{
+ if (test_and_set_bit(CORE_TB_RESYNC_REQ_BIT,
+ &local_paca->sibling_subcore_state->flags))
+ return false;
+
+ return true;
+}
+
+static void kvmppc_tb_resync_done(void)
+{
+ clear_bit(CORE_TB_RESYNC_REQ_BIT,
+ &local_paca->sibling_subcore_state->flags);
+}
+
+/*
+ * kvmppc_realmode_hmi_handler() is called only by primary thread during
+ * guest exit path.
+ *
+ * There are multiple reasons why HMI could occur, one of them is
+ * Timebase (TB) error. If this HMI is due to TB error, then TB would
+ * have been in stopped state. The opal hmi handler Will fix it and
+ * restore the TB value with host timebase value. For HMI caused due
+ * to non-TB errors, opal hmi handler will not touch/restore TB register
+ * and hence there won't be any change in TB value.
+ *
+ * Since we are not sure about the cause of this HMI, we can't be sure
+ * about the content of TB register whether it holds guest or host timebase
+ * value. Hence the idea is to resync the TB on every HMI, so that we
+ * know about the exact state of the TB value. Resync TB call will
+ * restore TB to host timebase.
+ *
+ * Things to consider:
+ * - On TB error, HMI interrupt is reported on all the threads of the core
+ * that has encountered TB error irrespective of split-core mode.
+ * - The very first thread on the core that get chance to fix TB error
+ * would rsync the TB with local chipTOD value.
+ * - The resync TB is a core level action i.e. it will sync all the TBs
+ * in that core independent of split-core mode. This means if we trigger
+ * TB sync from a thread from one subcore, it would affect TB values of
+ * sibling subcores of the same core.
+ *
+ * All threads need to co-ordinate before making opal hmi handler.
+ * All threads will use sibling_subcore_state->in_guest[] (shared by all
+ * threads in the core) in paca which holds information about whether
+ * sibling subcores are in Guest mode or host mode. The in_guest[] array
+ * is of size MAX_SUBCORE_PER_CORE=4, indexed using subcore id to set/unset
+ * subcore status. Only primary threads from each subcore is responsible
+ * to set/unset its designated array element while entering/exiting the
+ * guset.
+ *
+ * After invoking opal hmi handler call, one of the thread (of entire core)
+ * will need to resync the TB. Bit 63 from subcore state bitmap flags
+ * (sibling_subcore_state->flags) will be used to co-ordinate between
+ * primary threads to decide who takes up the responsibility.
+ *
+ * This is what we do:
+ * - Primary thread from each subcore tries to set resync required bit[63]
+ * of paca->sibling_subcore_state->flags.
+ * - The first primary thread that is able to set the flag takes the
+ * responsibility of TB resync. (Let us call it as thread leader)
+ * - All other threads which are in host will call
+ * wait_for_subcore_guest_exit() and wait for in_guest[0-3] from
+ * paca->sibling_subcore_state to get cleared.
+ * - All the primary thread will clear its subcore status from subcore
+ * state in_guest[] array respectively.
+ * - Once all primary threads clear in_guest[0-3], all of them will invoke
+ * opal hmi handler.
+ * - Now all threads will wait for TB resync to complete by invoking
+ * wait_for_tb_resync() except the thread leader.
+ * - Thread leader will do a TB resync by invoking opal_resync_timebase()
+ * call and the it will clear the resync required bit.
+ * - All other threads will now come out of resync wait loop and proceed
+ * with individual execution.
+ * - On return of this function, primary thread will signal all
+ * secondary threads to proceed.
+ * - All secondary threads will eventually call opal hmi handler on
+ * their exit path.
+ */
+
+long kvmppc_realmode_hmi_handler(void)
+{
+ int ptid = local_paca->kvm_hstate.ptid;
+ bool resync_req;
+
+ /* This is only called on primary thread. */
+ BUG_ON(ptid != 0);
+ __this_cpu_inc(irq_stat.hmi_exceptions);
+
+ /*
+ * By now primary thread has already completed guest->host
+ * partition switch but haven't signaled secondaries yet.
+ * All the secondary threads on this subcore is waiting
+ * for primary thread to signal them to go ahead.
+ *
+ * For threads from subcore which isn't in guest, they all will
+ * wait until all other subcores on this core exit the guest.
+ *
+ * Now set the resync required bit. If you are the first to
+ * set this bit then kvmppc_tb_resync_required() function will
+ * return true. For rest all other subcores
+ * kvmppc_tb_resync_required() will return false.
+ *
+ * If resync_req == true, then this thread is responsible to
+ * initiate TB resync after hmi handler has completed.
+ * All other threads on this core will wait until this thread
+ * clears the resync required bit flag.
+ */
+ resync_req = kvmppc_tb_resync_required();
+
+ /* Reset the subcore status to indicate it has exited guest */
+ kvmppc_subcore_exit_guest();
+
+ /*
+ * Wait for other subcores on this core to exit the guest.
+ * All the primary threads and threads from subcore that are
+ * not in guest will wait here until all subcores are out
+ * of guest context.
+ */
+ wait_for_subcore_guest_exit();
+
+ /*
+ * At this point we are sure that primary threads from each
+ * subcore on this core have completed guest->host partition
+ * switch. Now it is safe to call HMI handler.
+ */
+ if (ppc_md.hmi_exception_early)
+ ppc_md.hmi_exception_early(NULL);
+
+ /*
+ * Check if this thread is responsible to resync TB.
+ * All other threads will wait until this thread completes the
+ * TB resync.
+ */
+ if (resync_req) {
+ opal_resync_timebase();
+ /* Reset TB resync req bit */
+ kvmppc_tb_resync_done();
+ } else {
+ wait_for_tb_resync();
+ }
+ return 0;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e571ad277398..975655573844 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -29,6 +29,7 @@
#include <asm/kvm_book3s_asm.h>
#include <asm/book3s/64/mmu-hash.h>
#include <asm/tm.h>
+#include <asm/opal.h>
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
@@ -373,6 +374,18 @@ kvm_secondary_got_guest:
lwsync
std r0, HSTATE_KVM_VCORE(r13)
+ /*
+ * All secondaries exiting guest will fall through this path.
+ * Before proceeding, just check for HMI interrupt and
+ * invoke opal hmi handler. By now we are sure that the
+ * primary thread on this core/subcore has already made partition
+ * switch/TB resync and we are good to call opal hmi handler.
+ */
+ cmpwi r12, BOOK3S_INTERRUPT_HMI
+ bne kvm_no_guest
+
+ li r3,0 /* NULL argument */
+ bl hmi_exception_realmode
/*
* At this point we have finished executing in the guest.
* We need to wait for hwthread_req to become zero, since
@@ -392,7 +405,7 @@ kvm_no_guest:
cmpwi r3, 0
bne 54f
/*
- * We jump to power7_wakeup_loss, which will return to the caller
+ * We jump to pnv_wakeup_loss, which will return to the caller
* of power7_nap in the powernv cpu offline loop. The value we
* put in r3 becomes the return value for power7_nap.
*/
@@ -401,7 +414,7 @@ kvm_no_guest:
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
li r3, 0
- b power7_wakeup_loss
+ b pnv_wakeup_loss
53: HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
@@ -428,6 +441,22 @@ kvm_no_guest:
*/
kvm_unsplit_nap:
/*
+ * When secondaries are napping in kvm_unsplit_nap() with
+ * hwthread_req = 1, HMI goes ignored even though subcores are
+ * already exited the guest. Hence HMI keeps waking up secondaries
+ * from nap in a loop and secondaries always go back to nap since
+ * no vcore is assigned to them. This makes impossible for primary
+ * thread to get hold of secondary threads resulting into a soft
+ * lockup in KVM path.
+ *
+ * Let us check if HMI is pending and handle it before we go to nap.
+ */
+ cmpwi r12, BOOK3S_INTERRUPT_HMI
+ bne 55f
+ li r3, 0 /* NULL argument */
+ bl hmi_exception_realmode
+55:
+ /*
* Ensure that secondary doesn't nap when it has
* its vcore pointer set.
*/
@@ -601,6 +630,11 @@ BEGIN_FTR_SECTION
mtspr SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ /* Mark the subcore state as inside guest */
+ bl kvmppc_subcore_enter_guest
+ nop
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r4, HSTATE_KVM_VCPU(r13)
li r0,1
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
@@ -655,112 +689,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b skip_tm
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
-
- /* Turn on TM/FP/VSX/VMX so we can restore them. */
- mfmsr r5
- li r6, MSR_TM >> 32
- sldi r6, r6, 32
- or r5, r5, r6
- ori r5, r5, MSR_FP
- oris r5, r5, (MSR_VEC | MSR_VSX)@h
- mtmsrd r5
-
- /*
- * The user may change these outside of a transaction, so they must
- * always be context switched.
- */
- ld r5, VCPU_TFHAR(r4)
- ld r6, VCPU_TFIAR(r4)
- ld r7, VCPU_TEXASR(r4)
- mtspr SPRN_TFHAR, r5
- mtspr SPRN_TFIAR, r6
- mtspr SPRN_TEXASR, r7
-
- ld r5, VCPU_MSR(r4)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq skip_tm /* TM not active in guest */
-
- /* Make sure the failure summary is set, otherwise we'll program check
- * when we trechkpt. It's possible that this might have been not set
- * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
- * host.
- */
- oris r7, r7, (TEXASR_FS)@h
- mtspr SPRN_TEXASR, r7
-
- /*
- * We need to load up the checkpointed state for the guest.
- * We need to do this early as it will blow away any GPRs, VSRs and
- * some SPRs.
- */
-
- mr r31, r4
- addi r3, r31, VCPU_FPRS_TM
- bl load_fp_state
- addi r3, r31, VCPU_VRS_TM
- bl load_vr_state
- mr r4, r31
- lwz r7, VCPU_VRSAVE_TM(r4)
- mtspr SPRN_VRSAVE, r7
-
- ld r5, VCPU_LR_TM(r4)
- lwz r6, VCPU_CR_TM(r4)
- ld r7, VCPU_CTR_TM(r4)
- ld r8, VCPU_AMR_TM(r4)
- ld r9, VCPU_TAR_TM(r4)
- mtlr r5
- mtcr r6
- mtctr r7
- mtspr SPRN_AMR, r8
- mtspr SPRN_TAR, r9
-
- /*
- * Load up PPR and DSCR values but don't put them in the actual SPRs
- * till the last moment to avoid running with userspace PPR and DSCR for
- * too long.
- */
- ld r29, VCPU_DSCR_TM(r4)
- ld r30, VCPU_PPR_TM(r4)
-
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* Load GPRs r0-r28 */
- reg = 0
- .rept 29
- ld reg, VCPU_GPRS_TM(reg)(r31)
- reg = reg + 1
- .endr
-
- mtspr SPRN_DSCR, r29
- mtspr SPRN_PPR, r30
-
- /* Load final GPRs */
- ld 29, VCPU_GPRS_TM(29)(r31)
- ld 30, VCPU_GPRS_TM(30)(r31)
- ld 31, VCPU_GPRS_TM(31)(r31)
-
- /* TM checkpointed state is now setup. All GPRs are now volatile. */
- TRECHKPT
-
- /* Now let's get back the state we need. */
- HMT_MEDIUM
- GET_PACA(r13)
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATMSCRATCH(r13)
-
- /* Set the MSR RI since we have our registers back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-skip_tm:
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Load guest PMU registers */
@@ -841,12 +771,6 @@ BEGIN_FTR_SECTION
/* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
/* Load up POWER8-specific registers */
ld r5, VCPU_IAMR(r4)
lwz r6, VCPU_PSPB(r4)
@@ -1436,106 +1360,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
- b 2f
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
- /* Turn on TM. */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- ld r5, VCPU_MSR(r9)
- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beq 1f /* TM not active in guest. */
-
- li r3, TM_CAUSE_KVM_RESCHED
-
- /* Clear the MSR RI since r1, r13 are all going to be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
- /* All GPRs are volatile at this point. */
- TRECLAIM(R3)
-
- /* Temporarily store r13 and r9 so we have some regs to play with */
- SET_SCRATCH0(r13)
- GET_PACA(r13)
- std r9, PACATMSCRATCH(r13)
- ld r9, HSTATE_KVM_VCPU(r13)
-
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
- HMT_MEDIUM
- mfspr r30, SPRN_DSCR
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
- .rept 29
- .if (reg != 9) && (reg != 13)
- std reg, VCPU_GPRS_TM(reg)(r9)
- .endif
- reg = reg + 1
- .endr
- /* ... now save r13 */
- GET_SCRATCH0(r4)
- std r4, VCPU_GPRS_TM(13)(r9)
- /* ... and save r9 */
- ld r4, PACATMSCRATCH(r13)
- std r4, VCPU_GPRS_TM(9)(r9)
-
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
- mflr r5
- mfcr r6
- mfctr r7
- mfspr r8, SPRN_AMR
- mfspr r10, SPRN_TAR
- std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
- std r7, VCPU_CTR_TM(r9)
- std r8, VCPU_AMR_TM(r9)
- std r10, VCPU_TAR_TM(r9)
-
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
- /* Save FP/VSX. */
- addi r3, r9, VCPU_FPRS_TM
- bl store_fp_state
- addi r3, r9, VCPU_VRS_TM
- bl store_vr_state
- mfspr r6, SPRN_VRSAVE
- stw r6, VCPU_VRSAVE_TM(r9)
-1:
- /*
- * We need to save these SPRs after the treclaim so that the software
- * error code is recorded correctly in the TEXASR. Also the user may
- * change these outside of a transaction, so they must always be
- * context switched.
- */
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-2:
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
/* Increment yield count if they have a VPA */
@@ -1683,6 +1509,23 @@ BEGIN_FTR_SECTION
mtspr SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ /* If HMI, call kvmppc_realmode_hmi_handler() */
+ cmpwi r12, BOOK3S_INTERRUPT_HMI
+ bne 27f
+ bl kvmppc_realmode_hmi_handler
+ nop
+ li r12, BOOK3S_INTERRUPT_HMI
+ /*
+ * At this point kvmppc_realmode_hmi_handler would have resync-ed
+ * the TB. Hence it is not required to subtract guest timebase
+ * offset from timebase. So, skip it.
+ *
+ * Also, do not call kvmppc_subcore_exit_guest() because it has
+ * been invoked as part of kvmppc_realmode_hmi_handler().
+ */
+ b 30f
+
+27:
/* Subtract timebase offset from timebase */
ld r8,VCORE_TB_OFFSET(r5)
cmpdi r8,0
@@ -1698,8 +1541,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addis r8,r8,0x100 /* if so, increment upper 40 bits */
mtspr SPRN_TBU40,r8
+17: bl kvmppc_subcore_exit_guest
+ nop
+30: ld r5,HSTATE_KVM_VCORE(r13)
+ ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
+
/* Reset PCR */
-17: ld r0, VCORE_PCR(r5)
+ ld r0, VCORE_PCR(r5)
cmpdi r0, 0
beq 18f
li r0, 0
@@ -2245,6 +2093,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
/* save FP state */
bl kvmppc_save_fp
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ ld r9, HSTATE_KVM_VCPU(r13)
+ bl kvmppc_save_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/*
* Set DEC to the smaller of DEC and HDEC, so that we wake
* no later than the end of our timeslice (HDEC interrupts
@@ -2321,6 +2176,12 @@ kvm_end_cede:
bl kvmhv_accumulate_time
#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ bl kvmppc_restore_tm
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+
/* load up FP state */
bl kvmppc_load_fp
@@ -2461,6 +2322,8 @@ BEGIN_FTR_SECTION
cmpwi r6, 3 /* hypervisor doorbell? */
beq 3f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ cmpwi r6, 0xa /* Hypervisor maintenance ? */
+ beq 4f
li r3, 1 /* anything else, return 1 */
0: blr
@@ -2482,6 +2345,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r3, -1
blr
+ /* Woken up due to Hypervisor maintenance interrupt */
+4: li r12, BOOK3S_INTERRUPT_HMI
+ li r3, 1
+ blr
+
/*
* Determine what sort of external interrupt is pending (if any).
* Returns:
@@ -2631,6 +2499,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mr r4,r31
blr
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/*
+ * Save transactional state and TM-related registers.
+ * Called with r9 pointing to the vcpu struct.
+ * This can modify all checkpointed registers, but
+ * restores r1, r2 and r9 (vcpu pointer) before exit.
+ */
+kvmppc_save_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM. */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ ld r5, VCPU_MSR(r9)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beq 1f /* TM not active in guest. */
+
+ std r1, HSTATE_HOST_R1(r13)
+ li r3, TM_CAUSE_KVM_RESCHED
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* All GPRs are volatile at this point. */
+ TRECLAIM(R3)
+
+ /* Temporarily store r13 and r9 so we have some regs to play with */
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9, PACATMSCRATCH(r13)
+ ld r9, HSTATE_KVM_VCPU(r13)
+
+ /* Get a few more GPRs free. */
+ std r29, VCPU_GPRS_TM(29)(r9)
+ std r30, VCPU_GPRS_TM(30)(r9)
+ std r31, VCPU_GPRS_TM(31)(r9)
+
+ /* Save away PPR and DSCR soon so don't run with user values. */
+ mfspr r31, SPRN_PPR
+ HMT_MEDIUM
+ mfspr r30, SPRN_DSCR
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+
+ /* Save all but r9, r13 & r29-r31 */
+ reg = 0
+ .rept 29
+ .if (reg != 9) && (reg != 13)
+ std reg, VCPU_GPRS_TM(reg)(r9)
+ .endif
+ reg = reg + 1
+ .endr
+ /* ... now save r13 */
+ GET_SCRATCH0(r4)
+ std r4, VCPU_GPRS_TM(13)(r9)
+ /* ... and save r9 */
+ ld r4, PACATMSCRATCH(r13)
+ std r4, VCPU_GPRS_TM(9)(r9)
+
+ /* Reload stack pointer and TOC. */
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATOC(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ /* Save away checkpinted SPRs. */
+ std r31, VCPU_PPR_TM(r9)
+ std r30, VCPU_DSCR_TM(r9)
+ mflr r5
+ mfcr r6
+ mfctr r7
+ mfspr r8, SPRN_AMR
+ mfspr r10, SPRN_TAR
+ std r5, VCPU_LR_TM(r9)
+ stw r6, VCPU_CR_TM(r9)
+ std r7, VCPU_CTR_TM(r9)
+ std r8, VCPU_AMR_TM(r9)
+ std r10, VCPU_TAR_TM(r9)
+
+ /* Restore r12 as trap number. */
+ lwz r12, VCPU_TRAP(r9)
+
+ /* Save FP/VSX. */
+ addi r3, r9, VCPU_FPRS_TM
+ bl store_fp_state
+ addi r3, r9, VCPU_VRS_TM
+ bl store_vr_state
+ mfspr r6, SPRN_VRSAVE
+ stw r6, VCPU_VRSAVE_TM(r9)
+1:
+ /*
+ * We need to save these SPRs after the treclaim so that the software
+ * error code is recorded correctly in the TEXASR. Also the user may
+ * change these outside of a transaction, so they must always be
+ * context switched.
+ */
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Restore transactional state and TM-related registers.
+ * Called with r4 pointing to the vcpu struct.
+ * This potentially modifies all checkpointed registers.
+ * It restores r1, r2, r4 from the PACA.
+ */
+kvmppc_restore_tm:
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+
+ /* Turn on TM/FP/VSX/VMX so we can restore them. */
+ mfmsr r5
+ li r6, MSR_TM >> 32
+ sldi r6, r6, 32
+ or r5, r5, r6
+ ori r5, r5, MSR_FP
+ oris r5, r5, (MSR_VEC | MSR_VSX)@h
+ mtmsrd r5
+
+ /*
+ * The user may change these outside of a transaction, so they must
+ * always be context switched.
+ */
+ ld r5, VCPU_TFHAR(r4)
+ ld r6, VCPU_TFIAR(r4)
+ ld r7, VCPU_TEXASR(r4)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+
+ ld r5, VCPU_MSR(r4)
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beqlr /* TM not active in guest */
+ std r1, HSTATE_HOST_R1(r13)
+
+ /* Make sure the failure summary is set, otherwise we'll program check
+ * when we trechkpt. It's possible that this might have been not set
+ * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+ * host.
+ */
+ oris r7, r7, (TEXASR_FS)@h
+ mtspr SPRN_TEXASR, r7
+
+ /*
+ * We need to load up the checkpointed state for the guest.
+ * We need to do this early as it will blow away any GPRs, VSRs and
+ * some SPRs.
+ */
+
+ mr r31, r4
+ addi r3, r31, VCPU_FPRS_TM
+ bl load_fp_state
+ addi r3, r31, VCPU_VRS_TM
+ bl load_vr_state
+ mr r4, r31
+ lwz r7, VCPU_VRSAVE_TM(r4)
+ mtspr SPRN_VRSAVE, r7
+
+ ld r5, VCPU_LR_TM(r4)
+ lwz r6, VCPU_CR_TM(r4)
+ ld r7, VCPU_CTR_TM(r4)
+ ld r8, VCPU_AMR_TM(r4)
+ ld r9, VCPU_TAR_TM(r4)
+ mtlr r5
+ mtcr r6
+ mtctr r7
+ mtspr SPRN_AMR, r8
+ mtspr SPRN_TAR, r9
+
+ /*
+ * Load up PPR and DSCR values but don't put them in the actual SPRs
+ * till the last moment to avoid running with userspace PPR and DSCR for
+ * too long.
+ */
+ ld r29, VCPU_DSCR_TM(r4)
+ ld r30, VCPU_PPR_TM(r4)
+
+ std r2, PACATMSCRATCH(r13) /* Save TOC */
+
+ /* Clear the MSR RI since r1, r13 are all going to be foobar. */
+ li r5, 0
+ mtmsrd r5, 1
+
+ /* Load GPRs r0-r28 */
+ reg = 0
+ .rept 29
+ ld reg, VCPU_GPRS_TM(reg)(r31)
+ reg = reg + 1
+ .endr
+
+ mtspr SPRN_DSCR, r29
+ mtspr SPRN_PPR, r30
+
+ /* Load final GPRs */
+ ld 29, VCPU_GPRS_TM(29)(r31)
+ ld 30, VCPU_GPRS_TM(30)(r31)
+ ld 31, VCPU_GPRS_TM(31)(r31)
+
+ /* TM checkpointed state is now setup. All GPRs are now volatile. */
+ TRECHKPT
+
+ /* Now let's get back the state we need. */
+ HMT_MEDIUM
+ GET_PACA(r13)
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+ ld r4, HSTATE_KVM_VCPU(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ ld r2, PACATMSCRATCH(r13)
+
+ /* Set the MSR RI since we have our registers back. */
+ li r5, MSR_RI
+ mtmsrd r5, 1
+
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+#endif
+
/*
* We come here if we get any exception or interrupt while we are
* executing host real mode code while in guest MMU context.
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index d044b8b7c69d..901e6fe00c39 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,7 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 8e4f64f0b774..e76f79a45988 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -35,7 +35,7 @@
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/firmware.h>
-#include <asm/hvcall.h>
+#include <asm/setup.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
@@ -914,7 +914,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* We get here with MSR.EE=1 */
trace_kvm_exit(exit_nr, vcpu);
- kvm_guest_exit();
+ guest_exit();
switch (exit_nr) {
case BOOK3S_INTERRUPT_INST_STORAGE:
@@ -1049,7 +1049,17 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
int emul;
program_interrupt:
- flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+ /*
+ * shadow_srr1 only contains valid flags if we came here via
+ * a program exception. The other exceptions (emulation assist,
+ * FP unavailable, etc.) do not provide flags in SRR1, so use
+ * an illegal-instruction exception when injecting a program
+ * interrupt into the guest.
+ */
+ if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
+ flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
+ else
+ flags = SRR1_PROGILL;
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
if (emul != EMULATE_DONE) {
@@ -1531,7 +1541,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_clear_debug(vcpu);
- /* No need for kvm_guest_exit. It's done in handle_exit.
+ /* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
/* Make sure we save the guest FPU/Altivec/VSX state */
@@ -1690,7 +1700,7 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
if (++kvm_global_user_count == 1)
- pSeries_disable_reloc_on_exc();
+ pseries_disable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
return 0;
@@ -1706,7 +1716,7 @@ static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
spin_lock(&kvm_global_user_count_lock);
BUG_ON(kvm_global_user_count == 0);
if (--kvm_global_user_count == 0)
- pSeries_enable_reloc_on_exc();
+ pseries_enable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 16c4d88ba27d..42a4b237df5f 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#ifdef PPC64_ELF_ABI_v2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4afae695899a..02b4672f7347 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -776,7 +776,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
- /* No need for kvm_guest_exit. It's done in handle_exit.
+ /* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
/* Switch back to user space debug context */
@@ -1012,7 +1012,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
trace_kvm_exit(exit_nr, vcpu);
- __kvm_guest_exit();
+ guest_exit_irqoff();
local_irq_enable();
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 5cc2e7af3a7b..b379146de55b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
- kvmppc_core_queue_program(vcpu, 0);
}
}
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index 6249cdc834d1..ed38f8114118 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -1823,7 +1823,8 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return 0;
}
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int r = -EINVAL;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 02416fea7653..6ce40dd6fe51 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -119,7 +119,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue;
}
- __kvm_guest_enter();
+ guest_enter_irqoff();
return 1;
}
@@ -588,6 +588,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
#endif
+ case KVM_CAP_PPC_HTM:
+ r = cpu_has_feature(CPU_FTR_TM_COMP) &&
+ is_kvmppc_hv_enabled(kvm);
+ break;
default:
r = 0;
break;
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 60b0b3fc8fc1..a58abe4afbd1 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -6,7 +6,7 @@
#include <asm/setup.h>
-void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 8e6e51016cc5..fdec6e613e95 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -74,9 +74,9 @@ _GLOBAL(__csum_partial)
ld r11,24(r3)
/*
- * On POWER6 and POWER7 back to back addes take 2 cycles because of
- * the XER dependency. This means the fastest this loop can go is
- * 16 cycles per iteration. The scheduling of the loop below has
+ * On POWER6 and POWER7 back to back adde instructions take 2 cycles
+ * because of the XER dependency. This means the fastest this loop can
+ * go is 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
@@ -275,9 +275,9 @@ source; ld r10,16(r3)
source; ld r11,24(r3)
/*
- * On POWER6 and POWER7 back to back addes take 2 cycles because of
- * the XER dependency. This means the fastest this loop can go is
- * 16 cycles per iteration. The scheduling of the loop below has
+ * On POWER6 and POWER7 back to back adde instructions take 2 cycles
+ * because of the XER dependency. This means the fastest this loop can
+ * go is 16 cycles per iteration. The scheduling of the loop below has
* been shown to hit this on both POWER6 and POWER7.
*/
.align 5
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 7ce3870d7ddd..74145f02ad41 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -13,6 +13,7 @@
*/
#include <linux/types.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -20,7 +21,8 @@
#include <asm/code-patching.h>
#include <asm/page.h>
#include <asm/sections.h>
-
+#include <asm/setup.h>
+#include <asm/firmware.h>
struct fixup_entry {
unsigned long mask;
@@ -130,7 +132,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
-void do_final_fixups(void)
+static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
@@ -151,6 +153,67 @@ void do_final_fixups(void)
#endif
}
+static unsigned long __initdata saved_cpu_features;
+static unsigned int __initdata saved_mmu_features;
+#ifdef CONFIG_PPC64
+static unsigned long __initdata saved_firmware_features;
+#endif
+
+void __init apply_feature_fixups(void)
+{
+ struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec));
+
+ *PTRRELOC(&saved_cpu_features) = spec->cpu_features;
+ *PTRRELOC(&saved_mmu_features) = spec->mmu_features;
+
+ /*
+ * Apply the CPU-specific and firmware specific fixups to kernel text
+ * (nop out sections not relevant to this CPU or this firmware).
+ */
+ do_feature_fixups(spec->cpu_features,
+ PTRRELOC(&__start___ftr_fixup),
+ PTRRELOC(&__stop___ftr_fixup));
+
+ do_feature_fixups(spec->mmu_features,
+ PTRRELOC(&__start___mmu_ftr_fixup),
+ PTRRELOC(&__stop___mmu_ftr_fixup));
+
+ do_lwsync_fixups(spec->cpu_features,
+ PTRRELOC(&__start___lwsync_fixup),
+ PTRRELOC(&__stop___lwsync_fixup));
+
+#ifdef CONFIG_PPC64
+ saved_firmware_features = powerpc_firmware_features;
+ do_feature_fixups(powerpc_firmware_features,
+ &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+#endif
+ do_final_fixups();
+
+ /*
+ * Initialise jump label. This causes all the cpu/mmu_has_feature()
+ * checks to take on their correct polarity based on the current set of
+ * CPU/MMU features.
+ */
+ jump_label_init();
+ cpu_feature_keys_init();
+ mmu_feature_keys_init();
+}
+
+static int __init check_features(void)
+{
+ WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
+ "CPU features changed after feature patching!\n");
+ WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
+ "MMU features changed after feature patching!\n");
+#ifdef CONFIG_PPC64
+ WARN(saved_firmware_features != powerpc_firmware_features,
+ "Firmware features changed after feature patching!\n");
+#endif
+
+ return 0;
+}
+late_initcall(check_features);
+
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index f7deebdf3365..b7b1237d4aa6 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw)
get_hard_smp_processor_id(holder_cpu), yield_count);
}
#endif
-
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_mb();
-
- while (lock->slock) {
- HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
- }
- HMT_medium();
-
- smp_mb();
-}
-
-EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index c422812f7405..ae69d846a841 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -9,11 +9,7 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
#ifndef CONFIG_GENERIC_CSUM
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 69abf844c2c3..94058c21a482 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
}
EXPORT_SYMBOL_GPL(rh_init);
-/* Attach a free memory region, coalesces regions if adjuscent */
+/* Attach a free memory region, coalesces regions if adjacent */
int rh_attach_region(rh_info_t * info, unsigned long start, int size)
{
rh_block_t *blk;
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index c80fb49ce607..beabc68d9a1e 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -16,15 +16,6 @@
PPC_LONG_ALIGN
.text
-_GLOBAL(strcpy)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
/* This clears out any unused part of the destination buffer,
just as the libc version does. -- paulus */
_GLOBAL(strncpy)
@@ -33,6 +24,7 @@ _GLOBAL(strncpy)
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
+ .balign 16
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
@@ -45,36 +37,13 @@ _GLOBAL(strncpy)
bdnz 2b
blr
-_GLOBAL(strcat)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r0,1(r5)
- cmpwi 0,r0,0
- bne 1b
- addi r5,r5,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r5)
- bne 1b
- blr
-
-_GLOBAL(strcmp)
- addi r5,r3,-1
- addi r4,r4,-1
-1: lbzu r3,1(r5)
- cmpwi 1,r3,0
- lbzu r0,1(r4)
- subf. r3,r0,r3
- beqlr 1
- beq 1b
- blr
-
_GLOBAL(strncmp)
PPC_LCMPI 0,r5,0
beq- 2f
mtctr r5
addi r5,r3,-1
addi r4,r4,-1
+ .balign 16
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
@@ -85,14 +54,6 @@ _GLOBAL(strncmp)
2: li r3,0
blr
-_GLOBAL(strlen)
- addi r4,r3,-1
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- bne 1b
- subf r3,r3,r4
- blr
-
#ifdef CONFIG_PPC32
_GLOBAL(memcmp)
PPC_LCMPI 0,r5,0
@@ -114,6 +75,7 @@ _GLOBAL(memchr)
beq- 2f
mtctr r5
addi r3,r3,-1
+ .balign 16
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index b27e030fc9f8..bf925cdcaca9 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/switch_to.h>
+#include <asm/asm-prototypes.h>
int enter_vmx_usercopy(void)
{
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 949100577db5..6c5025e81236 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -13,62 +13,115 @@
*/
#include <linux/memblock.h>
+#include <asm/fixmap.h>
+#include <asm/code-patching.h>
#include "mmu_decl.h"
+#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
+
extern int __map_without_ltlbs;
+
/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ * Return PA for this VA if it is in IMMR area, or 0
*/
-void __init MMU_init_hw(void)
+phys_addr_t v_block_mapped(unsigned long va)
{
- /* Nothing to do for the time being but keep it similar to other PPC */
+ unsigned long p = PHYS_IMMR_BASE;
+
+ if (__map_without_ltlbs)
+ return 0;
+ if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
+ return p + va - VIRT_IMMR_BASE;
+ return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_block_mapped(phys_addr_t pa)
+{
+ unsigned long p = PHYS_IMMR_BASE;
+
+ if (__map_without_ltlbs)
+ return 0;
+ if (pa >= p && pa < p + IMMR_SIZE)
+ return VIRT_IMMR_BASE + pa - p;
+ return 0;
}
-#define LARGE_PAGE_SIZE_4M (1<<22)
#define LARGE_PAGE_SIZE_8M (1<<23)
-#define LARGE_PAGE_SIZE_64M (1<<26)
-unsigned long __init mmu_mapin_ram(unsigned long top)
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
{
- unsigned long v, s, mapped;
- phys_addr_t p;
+ /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
+#ifdef CONFIG_PIN_TLB
+ unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
+ unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY;
+#ifdef CONFIG_PIN_TLB_IMMR
+ int i = 29;
+#else
+ int i = 28;
+#endif
+ unsigned long addr = 0;
+ unsigned long mem = total_lowmem;
+
+ for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
+ mtspr(SPRN_MD_CTR, ctr | (i << 8));
+ mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
+ mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
+ mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
+ addr += LARGE_PAGE_SIZE_8M;
+ mem -= LARGE_PAGE_SIZE_8M;
+ }
+#endif
+}
- v = KERNELBASE;
- p = 0;
- s = top;
+static void mmu_mapin_immr(void)
+{
+ unsigned long p = PHYS_IMMR_BASE;
+ unsigned long v = VIRT_IMMR_BASE;
+ unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
+ int offset;
- if (__map_without_ltlbs)
- return 0;
+ for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
+ map_page(v + offset, p + offset, f);
+}
-#ifdef CONFIG_PPC_4K_PAGES
- while (s >= LARGE_PAGE_SIZE_8M) {
- pmd_t *pmdp;
- unsigned long val = p | MD_PS8MEG;
+/* Address of instructions to patch */
+#ifndef CONFIG_PIN_TLB_IMMR
+extern unsigned int DTLBMiss_jmp;
+#endif
+extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
- *pmdp++ = __pmd(val);
- *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M);
+void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
+{
+ unsigned int instr = *addr;
- v += LARGE_PAGE_SIZE_8M;
- p += LARGE_PAGE_SIZE_8M;
- s -= LARGE_PAGE_SIZE_8M;
- }
-#else /* CONFIG_PPC_16K_PAGES */
- while (s >= LARGE_PAGE_SIZE_64M) {
- pmd_t *pmdp;
- unsigned long val = p | MD_PS8MEG;
+ instr &= 0xffff0000;
+ instr |= (unsigned long)__va(mapped) >> 16;
+ patch_instruction(addr, instr);
+}
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
- *pmdp++ = __pmd(val);
+unsigned long __init mmu_mapin_ram(unsigned long top)
+{
+ unsigned long mapped;
- v += LARGE_PAGE_SIZE_64M;
- p += LARGE_PAGE_SIZE_64M;
- s -= LARGE_PAGE_SIZE_64M;
- }
+ if (__map_without_ltlbs) {
+ mapped = 0;
+ mmu_mapin_immr();
+#ifndef CONFIG_PIN_TLB_IMMR
+ patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
#endif
+ } else {
+ mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
+ }
- mapped = top - s;
+ mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
+ mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
/* If the size of RAM is not an exact power of two, we may not
* have covered RAM in its entirety with 8 MiB
@@ -77,7 +130,8 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
* coverage with normal-sized pages (or other reasons) do not
* attempt to allocate outside the allowed range.
*/
- memblock_set_current_limit(mapped);
+ if (mapped)
+ memblock_set_current_limit(mapped);
return mapped;
}
@@ -90,13 +144,8 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
*/
BUG_ON(first_memblock_base != 0);
-#ifdef CONFIG_PIN_TLB
/* 8xx can only access 24MB at the moment */
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
-#else
- /* 8xx can only access 8MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
-#endif
}
/*
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
index 6333b273d2d5..42c702b3be1f 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/hash64_4k.c
@@ -70,8 +70,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
- MMU_PAGE_4K, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
+ MMU_PAGE_4K, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
@@ -84,21 +84,23 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ MMU_PAGE_4K,
+ MMU_PAGE_4K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 16644e1f4e6b..3bbbea07378c 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -133,9 +133,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
- MMU_PAGE_4K, MMU_PAGE_4K,
- ssize, flags);
+ ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
+ MMU_PAGE_4K, MMU_PAGE_4K,
+ ssize, flags);
/*
*if we failed because typically the HPTE wasn't really here
* we try an insertion.
@@ -166,21 +166,22 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags, HPTE_V_SECONDARY,
+ MMU_PAGE_4K, MMU_PAGE_4K,
+ ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
@@ -272,8 +273,9 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
- MMU_PAGE_64K, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
+ MMU_PAGE_64K, ssize,
+ flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
@@ -286,21 +288,24 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ MMU_PAGE_64K, MMU_PAGE_64K,
+ ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ MMU_PAGE_64K,
+ MMU_PAGE_64K, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
/*
* FIXME!! Should be try the group from which we removed ?
*/
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index f8a871a72985..0e4e9654bd2c 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -55,7 +55,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
* We need 14 to 65 bits of va for a tlibe of 4K page
* With vpn we ignore the lower VPN_SHIFT bits already.
* And top two bits are already ignored because we can
- * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
+ * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
* of 12.
*/
va = vpn << VPN_SHIFT;
@@ -64,15 +64,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
- va &= ~(0xffffULL << 48);
+ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
+ va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
/* clear out bits after (52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
- sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
- ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
+ sllp = get_sllp_encoding(apsize);
va |= sllp << 5;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -113,15 +113,15 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
* Older versions of the architecture (2.02 and earler) require the
* masking of the top 16 bits.
*/
- va &= ~(0xffffULL << 48);
+ if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
+ va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
/* clear out bits after(52) [0....52.....63] */
va &= ~((1ul << (64 - 52)) - 1);
va |= ssize << 8;
- sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
- ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
+ sllp = get_sllp_encoding(apsize);
va |= sllp << 5;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
@@ -605,7 +605,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* crashdump and all bets are off anyway.
*
* TODO: add batching support when enabled. remember, no dynamic memory here,
- * athough there is the control page available...
+ * although there is the control page available...
*/
static void native_hpte_clear(void)
{
@@ -723,23 +723,29 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_restore(flags);
}
-static int native_update_partition_table(u64 patb1)
+static int native_register_proc_table(unsigned long base, unsigned long page_size,
+ unsigned long table_size)
{
+ unsigned long patb1 = base << 25; /* VSID */
+
+ patb1 |= (page_size << 5); /* sllp */
+ patb1 |= table_size;
+
partition_tb->patb1 = cpu_to_be64(patb1);
return 0;
}
void __init hpte_init_native(void)
{
- ppc_md.hpte_invalidate = native_hpte_invalidate;
- ppc_md.hpte_updatepp = native_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
- ppc_md.hpte_insert = native_hpte_insert;
- ppc_md.hpte_remove = native_hpte_remove;
- ppc_md.hpte_clear_all = native_hpte_clear;
- ppc_md.flush_hash_range = native_flush_hash_range;
- ppc_md.hugepage_invalidate = native_hugepage_invalidate;
+ mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = native_hpte_insert;
+ mmu_hash_ops.hpte_remove = native_hpte_remove;
+ mmu_hash_ops.hpte_clear_all = native_hpte_clear;
+ mmu_hash_ops.flush_hash_range = native_flush_hash_range;
+ mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
if (cpu_has_feature(CPU_FTR_ARCH_300))
- ppc_md.update_partition_table = native_update_partition_table;
+ register_process_table = native_register_proc_table;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2971ea18c768..0821556e16f4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -34,6 +34,7 @@
#include <linux/signal.h>
#include <linux/memblock.h>
#include <linux/context_tracking.h>
+#include <linux/libfdt.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
@@ -58,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/tm.h>
#include <asm/trace.h>
+#include <asm/ps3.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -87,10 +89,6 @@
*
*/
-#ifdef CONFIG_U3_DART
-extern unsigned long dart_tablebase;
-#endif /* CONFIG_U3_DART */
-
static unsigned long _SDR1;
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
EXPORT_SYMBOL_GPL(mmu_psize_defs);
@@ -120,6 +118,8 @@ static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
static DEFINE_SPINLOCK(linear_map_hash_lock);
#endif /* CONFIG_DEBUG_PAGEALLOC */
+struct mmu_hash_ops mmu_hash_ops;
+EXPORT_SYMBOL(mmu_hash_ops);
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
@@ -278,9 +278,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
- HPTE_V_BOLTED, psize, psize, ssize);
+ BUG_ON(!mmu_hash_ops.hpte_insert);
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
if (ret < 0)
break;
@@ -305,11 +306,11 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
shift = mmu_psize_defs[psize].shift;
step = 1 << shift;
- if (!ppc_md.hpte_removebolted)
+ if (!mmu_hash_ops.hpte_removebolted)
return -ENODEV;
for (vaddr = vstart; vaddr < vend; vaddr += step) {
- rc = ppc_md.hpte_removebolted(vaddr, psize, ssize);
+ rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
if (rc == -ENOENT) {
ret = -ENOENT;
continue;
@@ -321,6 +322,15 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
return ret;
}
+static bool disable_1tb_segments = false;
+
+static int __init parse_disable_1tb_segments(char *p)
+{
+ disable_1tb_segments = true;
+ return 0;
+}
+early_param("disable_1tb_segments", parse_disable_1tb_segments);
+
static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -339,6 +349,12 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
for (; size >= 4; size -= 4, ++prop) {
if (be32_to_cpu(prop[0]) == 40) {
DBG("1T segment support detected\n");
+
+ if (disable_1tb_segments) {
+ DBG("1T segments disabled by command line\n");
+ break;
+ }
+
cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
return 1;
}
@@ -347,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
return 0;
}
-static void __init htab_init_seg_sizes(void)
-{
- of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
-}
-
static int __init get_idx_from_shift(unsigned int shift)
{
int idx = -1;
@@ -514,7 +525,8 @@ static bool might_have_hea(void)
* we will never see an HEA ethernet device.
*/
#ifdef CONFIG_IBMEBUS
- return !cpu_has_feature(CPU_FTR_ARCH_207S);
+ return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ !firmware_has_feature(FW_FEATURE_SPLPAR);
#else
return false;
#endif
@@ -522,7 +534,7 @@ static bool might_have_hea(void)
#endif /* #ifdef CONFIG_PPC_64K_PAGES */
-static void __init htab_init_page_sizes(void)
+static void __init htab_scan_page_sizes(void)
{
int rc;
@@ -537,17 +549,23 @@ static void __init htab_init_page_sizes(void)
* Try to find the available page sizes in the device-tree
*/
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
- if (rc != 0) /* Found */
- goto found;
-
- /*
- * Not in the device-tree, let's fallback on known size
- * list for 16M capable GP & GR
- */
- if (mmu_has_feature(MMU_FTR_16M_PAGE))
+ if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
+ /*
+ * Nothing in the device-tree, but the CPU supports 16M pages,
+ * so let's fallback on a known size list for 16M capable CPUs.
+ */
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
-found:
+ }
+
+#ifdef CONFIG_HUGETLB_PAGE
+ /* Reserve 16G huge page memory sections for huge pages */
+ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
+#endif /* CONFIG_HUGETLB_PAGE */
+}
+
+static void __init htab_init_page_sizes(void)
+{
if (!debug_pagealloc_enabled()) {
/*
* Pick a size for the linear mapping. Currently, we only
@@ -580,7 +598,7 @@ found:
* would stop us accessing the HEA ethernet. So if we
* have the chance of ever seeing one, stay at 4k.
*/
- if (!might_have_hea() || !machine_is(pseries))
+ if (!might_have_hea())
mmu_io_psize = MMU_PAGE_64K;
} else
mmu_ci_restrictions = 1;
@@ -613,11 +631,6 @@ found:
,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif
);
-
-#ifdef CONFIG_HUGETLB_PAGE
- /* Reserve 16G huge page memory sections for huge pages */
- of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
-#endif /* CONFIG_HUGETLB_PAGE */
}
static int __init htab_dt_scan_pftsize(unsigned long node,
@@ -699,10 +712,9 @@ int remove_section_mapping(unsigned long start, unsigned long end)
#endif /* CONFIG_MEMORY_HOTPLUG */
static void __init hash_init_partition_table(phys_addr_t hash_table,
- unsigned long pteg_count)
+ unsigned long htab_size)
{
unsigned long ps_field;
- unsigned long htab_size;
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
/*
@@ -710,7 +722,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
* We can ignore that for lpid 0
*/
ps_field = 0;
- htab_size = __ilog2(pteg_count) - 11;
+ htab_size = __ilog2(htab_size) - 18;
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
@@ -724,7 +736,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
* For now UPRT is 0 for us.
*/
partition_tb->patb1 = 0;
- DBG("Partition table %p\n", partition_tb);
+ pr_info("Partition table %p\n", partition_tb);
/*
* update partition table control register,
* 64 K size.
@@ -738,17 +750,11 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
- unsigned long base = 0, size = 0, limit;
+ unsigned long base = 0, size = 0;
struct memblock_region *reg;
DBG(" -> htab_initialize()\n");
- /* Initialize segment sizes */
- htab_init_seg_sizes();
-
- /* Initialize page sizes */
- htab_init_page_sizes();
-
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T;
@@ -764,7 +770,8 @@ static void __init htab_initialize(void)
htab_hash_mask = pteg_count - 1;
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR) ||
+ firmware_has_feature(FW_FEATURE_PS3_LV1)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
@@ -775,20 +782,26 @@ static void __init htab_initialize(void)
* Clear the htab if firmware assisted dump is active so
* that we dont end up using old mappings.
*/
- if (is_fadump_active() && ppc_md.hpte_clear_all)
- ppc_md.hpte_clear_all();
+ if (is_fadump_active() && mmu_hash_ops.hpte_clear_all)
+ mmu_hash_ops.hpte_clear_all();
#endif
} else {
- /* Find storage for the HPT. Must be contiguous in
- * the absolute address space. On cell we want it to be
- * in the first 2 Gig so we can use it for IOMMU hacks.
+ unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
+
+#ifdef CONFIG_PPC_CELL
+ /*
+ * Cell may require the hash table down low when using the
+ * Axon IOMMU in order to fit the dynamic region over it, see
+ * comments in cell/iommu.c
*/
- if (machine_is(cell))
+ if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
limit = 0x80000000;
- else
- limit = MEMBLOCK_ALLOC_ANYWHERE;
+ pr_info("Hash table forced below 2G for Axon IOMMU\n");
+ }
+#endif /* CONFIG_PPC_CELL */
- table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
+ table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
+ limit);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
@@ -796,7 +809,7 @@ static void __init htab_initialize(void)
htab_address = __va(table);
/* htab absolute addr + encoded htabsize */
- _SDR1 = table + __ilog2(pteg_count) - 11;
+ _SDR1 = table + __ilog2(htab_size_bytes) - 18;
/* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes);
@@ -805,7 +818,7 @@ static void __init htab_initialize(void)
/* Set SDR1 */
mtspr(SPRN_SDR1, _SDR1);
else
- hash_init_partition_table(table, pteg_count);
+ hash_init_partition_table(table, htab_size_bytes);
}
prot = pgprot_val(PAGE_KERNEL);
@@ -832,34 +845,6 @@ static void __init htab_initialize(void)
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
-#ifdef CONFIG_U3_DART
- /* Do not map the DART space. Fortunately, it will be aligned
- * in such a way that it will not cross two memblock regions and
- * will fit within a single 16Mb page.
- * The DART space is assumed to be a full 16Mb region even if
- * we only use 2Mb of that space. We will use more of it later
- * for AGP GART. We have to use a full 16Mb large page.
- */
- DBG("DART base: %lx\n", dart_tablebase);
-
- if (dart_tablebase != 0 && dart_tablebase >= base
- && dart_tablebase < (base + size)) {
- unsigned long dart_table_end = dart_tablebase + 16 * MB;
- if (base != dart_tablebase)
- BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- __pa(base), prot,
- mmu_linear_psize,
- mmu_kernel_ssize));
- if ((base + size) > dart_table_end)
- BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
- base + size,
- __pa(dart_table_end),
- prot,
- mmu_linear_psize,
- mmu_kernel_ssize));
- continue;
- }
-#endif /* CONFIG_U3_DART */
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
}
@@ -890,8 +875,19 @@ static void __init htab_initialize(void)
#undef KB
#undef MB
+void __init hash__early_init_devtree(void)
+{
+ /* Initialize segment sizes */
+ of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
+
+ /* Initialize page sizes */
+ htab_scan_page_sizes();
+}
+
void __init hash__early_init_mmu(void)
{
+ htab_init_page_sizes();
+
/*
* initialize page table size
*/
@@ -926,12 +922,24 @@ void __init hash__early_init_mmu(void)
pci_io_base = ISA_IO_BASE;
#endif
+ /* Select appropriate backend */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1))
+ ps3_early_mm_init();
+ else if (firmware_has_feature(FW_FEATURE_LPAR))
+ hpte_init_pseries();
+ else if (IS_ENABLED(CONFIG_PPC_NATIVE))
+ hpte_init_native();
+
+ if (!mmu_hash_ops.hpte_insert)
+ panic("hash__early_init_mmu: No MMU hash ops defined!\n");
+
/* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
*/
htab_initialize();
+ pr_info("Initializing hash mmu with SLB\n");
/* Initialize SLB management */
slb_initialize();
}
@@ -1474,7 +1482,8 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
* We use same base page size and actual psize, because we don't
* use these functions for hugepage
*/
- ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, psize, psize,
+ ssize, local);
} pte_iterate_hashed_end();
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1515,9 +1524,9 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
if (!hpte_slot_array)
return;
- if (ppc_md.hugepage_invalidate) {
- ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
- psize, ssize, local);
+ if (mmu_hash_ops.hugepage_invalidate) {
+ mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
+ psize, ssize, local);
goto tm_abort;
}
/*
@@ -1544,8 +1553,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, psize,
- MMU_PAGE_16M, ssize, local);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, psize,
+ MMU_PAGE_16M, ssize, local);
}
tm_abort:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1569,8 +1578,8 @@ tm_abort:
void flush_hash_range(unsigned long number, int local)
{
- if (ppc_md.flush_hash_range)
- ppc_md.flush_hash_range(number, local);
+ if (mmu_hash_ops.flush_hash_range)
+ mmu_hash_ops.flush_hash_range(number, local);
else {
int i;
struct ppc64_tlb_batch *batch =
@@ -1615,22 +1624,22 @@ repeat:
HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
- psize, psize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
+ psize, psize, ssize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
- vflags | HPTE_V_SECONDARY,
- psize, psize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
+ vflags | HPTE_V_SECONDARY,
+ psize, psize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP)&~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
}
}
@@ -1680,8 +1689,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize,
- mmu_kernel_ssize, 0);
+ mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
+ mmu_linear_psize,
+ mmu_kernel_ssize, 0);
}
void __kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index ba3fc229468a..f20d16f849c5 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -103,8 +103,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
- psize, lpsize, ssize, flags);
+ ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
+ psize, lpsize, ssize, flags);
/*
* We failed to update, try to insert a new entry.
*/
@@ -131,23 +131,24 @@ repeat:
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
- psize, lpsize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ psize, lpsize, ssize);
/*
* Primary is full, try the secondary
*/
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
- rflags, HPTE_V_SECONDARY,
- psize, lpsize, ssize);
+ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
+ rflags,
+ HPTE_V_SECONDARY,
+ psize, lpsize, ssize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- ppc_md.hpte_remove(hpte_group);
+ mmu_hash_ops.hpte_remove(hpte_group);
goto repeat;
}
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index 3058560b6121..d5026f3800b6 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -79,8 +79,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
- if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
- mmu_psize, ssize, flags) == -1)
+ if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, mmu_psize,
+ mmu_psize, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 1e11559e1aac..35254a678456 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -5,39 +5,34 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/mman.h>
+#include <asm/tlb.h>
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
- unsigned long ap, shift;
+ int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
- shift = huge_page_shift(hstate);
- if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- ap = mmu_get_ap(MMU_PAGE_2M);
- else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
- ap = mmu_get_ap(MMU_PAGE_1G);
- else {
- WARN(1, "Wrong huge page shift\n");
- return ;
- }
- radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
+ psize = hstate_get_psize(hstate);
+ radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
}
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
- unsigned long ap, shift;
+ int psize;
struct hstate *hstate = hstate_file(vma->vm_file);
- shift = huge_page_shift(hstate);
- if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- ap = mmu_get_ap(MMU_PAGE_2M);
- else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
- ap = mmu_get_ap(MMU_PAGE_1G);
- else {
- WARN(1, "Wrong huge page shift\n");
- return ;
- }
- radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
+ psize = hstate_get_psize(hstate);
+ radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
+}
+
+void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ int psize;
+ struct hstate *hstate = hstate_file(vma->vm_file);
+
+ psize = hstate_get_psize(hstate);
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
}
/*
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 119d18611500..7372ee13eb1e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -81,6 +81,13 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (! new)
return -ENOMEM;
+ /*
+ * Make sure other cpus find the hugepd set only after a
+ * properly initialized page table is visible to them.
+ * For more details look for comment in __pte_alloc().
+ */
+ smp_wmb();
+
spin_lock(&mm->page_table_lock);
#ifdef CONFIG_PPC_FSL_BOOK3E
/*
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index c899fe340bbd..448685fbf27c 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL(kernstart_addr);
-#ifdef CONFIG_RELOCATABLE_PPC32
+#ifdef CONFIG_RELOCATABLE
/* Used in __va()/__pa() */
long long virt_phys_offset;
EXPORT_SYMBOL(virt_phys_offset);
@@ -80,9 +80,6 @@ EXPORT_SYMBOL(agp_special_page);
void MMU_init(void);
-/* XXX should be in current.h -- paulus */
-extern struct task_struct *current_set[NR_CPUS];
-
/*
* this tells the system to map all of ram with the segregs
* (i.e. page tables) instead of the bats.
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 33709bdb0419..16ada1eb7e26 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -411,3 +411,25 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+static bool disable_radix;
+static int __init parse_disable_radix(char *p)
+{
+ disable_radix = true;
+ return 0;
+}
+early_param("disable_radix", parse_disable_radix);
+
+void __init mmu_early_init_devtree(void)
+{
+ /* Disable radix mode based on kernel command line. */
+ if (disable_radix)
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+
+ if (early_radix_enabled())
+ radix__early_init_devtree();
+ else
+ hash__early_init_devtree();
+}
+#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2fd57fa48429..5f844337de21 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -116,6 +116,16 @@ int memory_add_physaddr_to_nid(u64 start)
}
#endif
+int __weak create_section_mapping(unsigned long start, unsigned long end)
+{
+ return -ENODEV;
+}
+
+int __weak remove_section_mapping(unsigned long start, unsigned long end)
+{
+ return -ENODEV;
+}
+
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
struct pglist_data *pgdata;
@@ -239,8 +249,14 @@ static int __init mark_nonram_nosave(void)
static bool zone_limits_final;
+/*
+ * The memory zones past TOP_ZONE are managed by generic mm code.
+ * These should be set to zero since that's what every other
+ * architecture does.
+ */
static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- [0 ... MAX_NR_ZONES - 1] = ~0UL
+ [0 ... TOP_ZONE ] = ~0UL,
+ [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
};
/*
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 196222227e82..b114f8b93ec9 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -181,7 +181,10 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
- mtspr(SPRN_PID, next->context.id);
asm volatile("isync": : :"memory");
+ mtspr(SPRN_PID, next->context.id);
+ asm volatile("isync \n"
+ PPC_SLBIA(0x7)
+ : : :"memory");
}
#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 6af65327c993..f988db655e5b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -154,9 +154,10 @@ struct tlbcam {
};
#endif
-#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE)
+#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
/* 6xx have BATS */
/* FSL_BOOKE have TLBCAM */
+/* 8xx have LTLB */
phys_addr_t v_block_mapped(unsigned long va);
unsigned long p_block_mapped(phys_addr_t pa);
#else
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6dc07ddbfd04..75b9cd6150cc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1153,18 +1153,34 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
static u64 hot_add_drconf_memory_max(void)
{
- struct device_node *memory = NULL;
- unsigned int drconf_cell_cnt = 0;
- u64 lmb_size = 0;
+ struct device_node *memory = NULL;
+ struct device_node *dn = NULL;
+ unsigned int drconf_cell_cnt = 0;
+ u64 lmb_size = 0;
const __be32 *dm = NULL;
+ const __be64 *lrdr = NULL;
+ struct of_drconf_cell drmem;
+
+ dn = of_find_node_by_path("/rtas");
+ if (dn) {
+ lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
+ of_node_put(dn);
+ if (lrdr)
+ return be64_to_cpup(lrdr);
+ }
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+ lmb_size = of_get_lmb_size(memory);
- memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
- if (memory) {
- drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
- lmb_size = of_get_lmb_size(memory);
- of_node_put(memory);
- }
- return lmb_size * drconf_cell_cnt;
+ /* Advance to the last cell, each cell has 6 32 bit integers */
+ dm += (drconf_cell_cnt - 1) * 6;
+ read_drconf_cell(&drmem, &dm);
+ of_node_put(memory);
+ return drmem.base_addr + lmb_size;
+ }
+ return 0;
}
/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 670318766545..34079302cc17 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -14,6 +14,9 @@
#include "mmu_decl.h"
#include <trace/events/thp.h>
+int (*register_process_table)(unsigned long base, unsigned long page_size,
+ unsigned long tbl_size);
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* This is called when relaxing access to a hugepage. It's also called in the page
@@ -33,7 +36,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
changed = !pmd_same(*(pmdp), entry);
if (changed) {
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
@@ -66,7 +69,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 7931e1496f0d..af897d91d09f 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -21,8 +21,11 @@
#include <trace/events/thp.h>
-static int native_update_partition_table(u64 patb1)
+static int native_register_process_table(unsigned long base, unsigned long pg_sz,
+ unsigned long table_size)
{
+ unsigned long patb1 = base | table_size | PATB_GR;
+
partition_tb->patb1 = cpu_to_be64(patb1);
return 0;
}
@@ -168,7 +171,7 @@ redo:
* of process table here. But our linear mapping also enable us to use
* physical address here.
*/
- ppc_md.update_partition_table(__pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR);
+ register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
}
@@ -182,7 +185,8 @@ static void __init radix_init_partition_table(void)
partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
RADIX_PGD_INDEX_SIZE | PATB_HR);
- printk("Partition table %p\n", partition_tb);
+ pr_info("Initializing Radix MMU\n");
+ pr_info("Partition table %p\n", partition_tb);
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
/*
@@ -194,7 +198,7 @@ static void __init radix_init_partition_table(void)
void __init radix_init_native(void)
{
- ppc_md.update_partition_table = native_update_partition_table;
+ register_process_table = native_register_process_table;
}
static int __init get_idx_from_shift(unsigned int shift)
@@ -260,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
return 1;
}
-static void __init radix_init_page_sizes(void)
+void __init radix__early_init_devtree(void)
{
int rc;
@@ -339,10 +343,10 @@ void __init radix__early_init_mmu(void)
__pte_frag_nr = H_PTE_FRAG_NR;
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
- radix_init_page_sizes();
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ radix_init_native();
lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
}
@@ -357,7 +361,7 @@ void radix__early_init_mmu_secondary(void)
*/
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+ mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
mtspr(SPRN_PTCR,
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 88a307504b5a..0b6fb244d0a1 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(ptep, entry);
- flush_tlb_page_nohash(vma, address);
+ flush_tlb_page(vma, address);
}
return changed;
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 7f922f557936..0ae0572bc239 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -79,7 +79,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
-__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index ab2f60e812e2..48df05ef5231 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
+#include <asm/ppc-opcode.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
@@ -34,8 +35,7 @@ static inline void __tlbiel_pid(unsigned long pid, int set,
r = 1; /* raidx format */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
}
@@ -63,8 +63,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
r = 1; /* raidx format */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -81,8 +80,7 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
r = 1; /* raidx format */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
}
@@ -99,8 +97,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
r = 1; /* raidx format */
asm volatile("ptesync": : :"memory");
- asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
- "(%2 << 17) | (%3 << 18) | (%4 << 21)"
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
@@ -143,10 +140,11 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
}
EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
-void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- unsigned long ap, int nid)
+void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize)
{
unsigned long pid;
+ unsigned long ap = mmu_get_ap(psize);
preempt_disable();
pid = mm ? mm->context.id : 0;
@@ -162,18 +160,12 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
if (vma && is_vm_hugetlb_page(vma))
return __local_flush_hugetlb_page(vma, vmaddr);
#endif
- radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
- mmu_get_ap(mmu_virtual_psize), 0);
+ radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
+ mmu_virtual_psize);
}
EXPORT_SYMBOL(radix__local_flush_tlb_page);
#ifdef CONFIG_SMP
-static int mm_is_core_local(struct mm_struct *mm)
-{
- return cpumask_subset(mm_cpumask(mm),
- topology_sibling_cpumask(smp_processor_id()));
-}
-
void radix__flush_tlb_mm(struct mm_struct *mm)
{
unsigned long pid;
@@ -224,10 +216,11 @@ no_context:
}
EXPORT_SYMBOL(radix__flush_tlb_pwc);
-void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
- unsigned long ap, int nid)
+void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
+ int psize)
{
unsigned long pid;
+ unsigned long ap = mmu_get_ap(psize);
preempt_disable();
pid = mm ? mm->context.id : 0;
@@ -253,8 +246,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if (vma && is_vm_hugetlb_page(vma))
return flush_hugetlb_page(vma, vmaddr);
#endif
- radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
- mmu_get_ap(mmu_virtual_psize), 0);
+ radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
+ mmu_virtual_psize);
}
EXPORT_SYMBOL(radix__flush_tlb_page);
@@ -285,9 +278,125 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
EXPORT_SYMBOL(radix__flush_tlb_range);
+static int radix_get_mmu_psize(int page_size)
+{
+ int psize;
+
+ if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
+ psize = mmu_virtual_psize;
+ else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
+ psize = MMU_PAGE_2M;
+ else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
+ psize = MMU_PAGE_1G;
+ else
+ return -1;
+ return psize;
+}
void radix__tlb_flush(struct mmu_gather *tlb)
{
+ int psize = 0;
struct mm_struct *mm = tlb->mm;
- radix__flush_tlb_mm(mm);
+ int page_size = tlb->page_size;
+
+ psize = radix_get_mmu_psize(page_size);
+ /*
+ * if page size is not something we understand, do a full mm flush
+ */
+ if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
+ radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
+ else
+ radix__flush_tlb_mm(mm);
+}
+
+#define TLB_FLUSH_ALL -1UL
+/*
+ * Number of pages above which we will do a bcast tlbie. Just a
+ * number at this point copied from x86
+ */
+static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+
+void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize)
+{
+ unsigned long pid;
+ unsigned long addr;
+ int local = mm_is_core_local(mm);
+ unsigned long ap = mmu_get_ap(psize);
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+ unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
+
+
+ preempt_disable();
+ pid = mm ? mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto err_out;
+
+ if (end == TLB_FLUSH_ALL ||
+ (end - start) > tlb_single_page_flush_ceiling * page_size) {
+ if (local)
+ _tlbiel_pid(pid, RIC_FLUSH_TLB);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
+ goto err_out;
+ }
+ for (addr = start; addr < end; addr += page_size) {
+
+ if (local)
+ _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
+ else {
+ if (lock_tlbie)
+ raw_spin_lock(&native_tlbie_lock);
+ _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
+ if (lock_tlbie)
+ raw_spin_unlock(&native_tlbie_lock);
+ }
+ }
+err_out:
+ preempt_enable();
+}
+
+void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
+ unsigned long page_size)
+{
+ unsigned long rb,rs,prs,r;
+ unsigned long ap;
+ unsigned long ric = RIC_FLUSH_TLB;
+
+ ap = mmu_get_ap(radix_get_mmu_psize(page_size));
+ rb = gpa & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = lpid & ((1UL << 32) - 1);
+ prs = 0; /* process scoped */
+ r = 1; /* raidx format */
+
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
+
+void radix__flush_tlb_lpid(unsigned long lpid)
+{
+ unsigned long rb,rs,prs,r;
+ unsigned long ric = RIC_FLUSH_ALL;
+
+ rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
+ rs = lpid & ((1UL << 32) - 1);
+ prs = 0; /* partition scoped */
+ r = 1; /* raidx format */
+
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+}
+EXPORT_SYMBOL(radix__flush_tlb_lpid);
+
+void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
}
+EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c
index 558e30cce33e..702d7689d714 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -49,17 +49,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
EXPORT_SYMBOL(flush_hash_entry);
/*
- * Called by ptep_set_access_flags, must flush on CPUs for which the
- * DSI handler can't just "fixup" the TLB on a write fault
- */
-void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
-{
- if (Hash != 0)
- return;
- _tlbie(addr);
-}
-
-/*
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index f4668488512c..050badc0ebd3 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page);
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
-static int mm_is_core_local(struct mm_struct *mm)
-{
- return cpumask_subset(mm_cpumask(mm),
- topology_sibling_cpumask(smp_processor_id()));
-}
-
struct tlb_flush_param {
unsigned long addr;
unsigned int pid;
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 1306a58ac541..c1ff16a6eb51 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -1,4 +1,8 @@
#
# Arch-specific network modules
#
+ifeq ($(CONFIG_PPC64),y)
+obj-$(CONFIG_BPF_JIT) += bpf_jit_asm64.o bpf_jit_comp64.o
+else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
+endif
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 889fd199a821..d5301b6f20d0 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -1,6 +1,8 @@
-/* bpf_jit.h: BPF JIT compiler for PPC64
+/*
+ * bpf_jit.h: BPF JIT compiler for PPC
*
* Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ * 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -10,67 +12,11 @@
#ifndef _BPF_JIT_H
#define _BPF_JIT_H
-#ifdef CONFIG_PPC64
-#define BPF_PPC_STACK_R3_OFF 48
-#define BPF_PPC_STACK_LOCALS 32
-#define BPF_PPC_STACK_BASIC (48+64)
-#define BPF_PPC_STACK_SAVE (18*8)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (48+64)
-#else
-#define BPF_PPC_STACK_R3_OFF 24
-#define BPF_PPC_STACK_LOCALS 16
-#define BPF_PPC_STACK_BASIC (24+32)
-#define BPF_PPC_STACK_SAVE (18*4)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (24+32)
-#endif
-
-#define REG_SZ (BITS_PER_LONG/8)
-
-/*
- * Generated code register usage:
- *
- * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
- *
- * skb r3 (Entry parameter)
- * A register r4
- * X register r5
- * addr param r6
- * r7-r10 scratch
- * skb->data r14
- * skb headlen r15 (skb->len - skb->data_len)
- * m[0] r16
- * m[...] ...
- * m[15] r31
- */
-#define r_skb 3
-#define r_ret 3
-#define r_A 4
-#define r_X 5
-#define r_addr 6
-#define r_scratch1 7
-#define r_scratch2 8
-#define r_D 14
-#define r_HL 15
-#define r_M 16
-
#ifndef __ASSEMBLY__
-/*
- * Assembly helpers from arch/powerpc/net/bpf_jit.S:
- */
-#define DECLARE_LOAD_FUNC(func) \
- extern u8 func[], func##_negative_offset[], func##_positive_offset[]
-
-DECLARE_LOAD_FUNC(sk_load_word);
-DECLARE_LOAD_FUNC(sk_load_half);
-DECLARE_LOAD_FUNC(sk_load_byte);
-DECLARE_LOAD_FUNC(sk_load_byte_msh);
+#include <asm/types.h>
-#ifdef CONFIG_PPC64
+#ifdef PPC64_ELF_ABI_v1
#define FUNCTION_DESCR_SIZE 24
#else
#define FUNCTION_DESCR_SIZE 0
@@ -83,7 +29,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
*/
#define IMM_H(i) ((uintptr_t)(i)>>16)
#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
- (((uintptr_t)(i) & 0x8000) >> 15))
+ (((uintptr_t)(i) & 0x8000) >> 15))
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define PLANT_INSTR(d, idx, instr) \
@@ -99,16 +45,20 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_MR(d, a) PPC_OR(d, a, a)
#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
- ___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i))
+ ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
+ ___PPC_RA(base) | IMM_L(i))
#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_STH(r, base, i) EMIT(PPC_INST_STH | ___PPC_RS(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_STB(r, base, i) EMIT(PPC_INST_STB | ___PPC_RS(r) | \
+ ___PPC_RA(base) | IMM_L(i))
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
@@ -120,6 +70,19 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(base) | IMM_L(i))
#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_LDBRX(r, base, b) EMIT(PPC_INST_LDBRX | ___PPC_RT(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
+
+#define PPC_BPF_LDARX(t, a, b, eh) EMIT(PPC_INST_LDARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define PPC_BPF_LWARX(t, a, b, eh) EMIT(PPC_INST_LWARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define PPC_BPF_STWCX(s, a, b) EMIT(PPC_INST_STWCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#ifdef CONFIG_PPC64
#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
@@ -131,56 +94,26 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
#endif
-/* Convenience helpers for the above with 'far' offsets: */
-#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LBZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LD(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LWZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LHZ(r, r, IMM_L(i)); } } while(0)
-
-#ifdef CONFIG_PPC64
-#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
-#else
-#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
- PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
- } while (0)
-#else
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
- PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
- offsetof(struct thread_info, cpu)); \
- } while(0)
-#endif
-#else
-#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
-#endif
-
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_CMPD(a, b) EMIT(PPC_INST_CMPD | ___PPC_RA(a) | \
+ ___PPC_RB(b))
#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_CMPLDI(a, i) EMIT(PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | \
+ ___PPC_RB(b))
+#define PPC_CMPLD(a, b) EMIT(PPC_INST_CMPLD | ___PPC_RA(a) | \
+ ___PPC_RB(b))
#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \
___PPC_RB(a) | ___PPC_RA(b))
#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
+#define PPC_MULD(d, a, b) EMIT(PPC_INST_MULLD | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
@@ -188,6 +121,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RA(a) | IMM_L(i))
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \
@@ -196,6 +131,7 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RS(a) | ___PPC_RB(b))
#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_MR(d, a) PPC_OR(d, a, a)
#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \
___PPC_RS(a) | IMM_L(i))
#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \
@@ -206,22 +142,43 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
___PPC_RS(a) | IMM_L(i))
#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \
___PPC_RS(a) | IMM_L(i))
+#define PPC_EXTSW(d, a) EMIT(PPC_INST_EXTSW | ___PPC_RA(d) | \
+ ___PPC_RS(a))
#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SLD(d, a, s) EMIT(PPC_INST_SLD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRADI(d, a, i) EMIT(PPC_INST_SRADI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ (((i) & 0x20) >> 4))
+#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLDICL(d, a, i, mb) EMIT(PPC_INST_RLDICL | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_MB64(mb) | (((i) & 0x20) >> 4))
+#define PPC_RLDICR(d, a, i, me) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
+ __PPC_ME64(me) | (((i) & 0x20) >> 4))
+
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
-#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(0) | __PPC_ME(31-(i)))
+#define PPC_SLWI(d, a, i) PPC_RLWINM(d, a, i, 0, 31-(i))
/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
-#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(32-(i)) | \
- __PPC_MB(i) | __PPC_ME(31))
+#define PPC_SRWI(d, a, i) PPC_RLWINM(d, a, 32-(i), i, 31)
/* sldi = rldicr Rx, Ry, n, 63-n */
-#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
+#define PPC_SLDI(d, a, i) PPC_RLDICR(d, a, i, 63-(i))
+/* sldi = rldicl Rx, Ry, 64-n, n */
+#define PPC_SRDI(d, a, i) PPC_RLDICL(d, a, 64-(i), i)
+
#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a))
/* Long jump; (unconditional 'branch') */
@@ -232,25 +189,37 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
(((cond) & 0x3ff) << 16) | \
(((dest) - (ctx->idx * 4)) & \
0xfffc))
-#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \
- if ((u32)(uintptr_t)(i) >= 32768) { \
- PPC_ADDIS(d, d, IMM_HA(i)); \
+/* Sign-extended 32-bit immediate load */
+#define PPC_LI32(d, i) do { \
+ if ((int)(uintptr_t)(i) >= -32768 && \
+ (int)(uintptr_t)(i) < 32768) \
+ PPC_LI(d, i); \
+ else { \
+ PPC_LIS(d, IMM_H(i)); \
+ if (IMM_L(i)) \
+ PPC_ORI(d, d, IMM_L(i)); \
} } while(0)
+
#define PPC_LI64(d, i) do { \
- if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \
+ if ((long)(i) >= -2147483648 && \
+ (long)(i) < 2147483648) \
PPC_LI32(d, i); \
else { \
- PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
- if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
- PPC_ORI(d, d, \
- ((uintptr_t)(i) >> 32) & 0xffff); \
+ if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
+ PPC_LI(d, ((uintptr_t)(i) >> 32) & 0xffff); \
+ else { \
+ PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
+ if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
+ PPC_ORI(d, d, \
+ ((uintptr_t)(i) >> 32) & 0xffff); \
+ } \
PPC_SLDI(d, d, 32); \
if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
PPC_ORIS(d, d, \
((uintptr_t)(i) >> 16) & 0xffff); \
if ((uintptr_t)(i) & 0x000000000000ffffULL) \
PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
- } } while (0);
+ } } while (0)
#ifdef CONFIG_PPC64
#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
@@ -258,14 +227,6 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
-#define PPC_LHBRX_OFFS(r, base, i) \
- do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
-#ifdef __LITTLE_ENDIAN__
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
-#else
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
-#endif
-
static inline bool is_nearbranch(int offset)
{
return (offset < 32768) && (offset >= -32768);
@@ -302,18 +263,6 @@ static inline bool is_nearbranch(int offset)
#define COND_NE (CR0_EQ | COND_CMP_FALSE)
#define COND_LT (CR0_LT | COND_CMP_TRUE)
-#define SEEN_DATAREF 0x10000 /* might call external helpers */
-#define SEEN_XREG 0x20000 /* X reg is used */
-#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
- * storage */
-#define SEEN_MEM_MSK 0x0ffff
-
-struct codegen_context {
- unsigned int seen;
- unsigned int idx;
- int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
-};
-
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
new file mode 100644
index 000000000000..a8cd7e289ecd
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -0,0 +1,139 @@
+/*
+ * bpf_jit32.h: BPF JIT compiler for PPC
+ *
+ * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ *
+ * Split from bpf_jit.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT32_H
+#define _BPF_JIT32_H
+
+#include "bpf_jit.h"
+
+#ifdef CONFIG_PPC64
+#define BPF_PPC_STACK_R3_OFF 48
+#define BPF_PPC_STACK_LOCALS 32
+#define BPF_PPC_STACK_BASIC (48+64)
+#define BPF_PPC_STACK_SAVE (18*8)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (48+64)
+#else
+#define BPF_PPC_STACK_R3_OFF 24
+#define BPF_PPC_STACK_LOCALS 16
+#define BPF_PPC_STACK_BASIC (24+32)
+#define BPF_PPC_STACK_SAVE (18*4)
+#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
+ BPF_PPC_STACK_SAVE)
+#define BPF_PPC_SLOWPATH_FRAME (24+32)
+#endif
+
+#define REG_SZ (BITS_PER_LONG/8)
+
+/*
+ * Generated code register usage:
+ *
+ * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
+ *
+ * skb r3 (Entry parameter)
+ * A register r4
+ * X register r5
+ * addr param r6
+ * r7-r10 scratch
+ * skb->data r14
+ * skb headlen r15 (skb->len - skb->data_len)
+ * m[0] r16
+ * m[...] ...
+ * m[15] r31
+ */
+#define r_skb 3
+#define r_ret 3
+#define r_A 4
+#define r_X 5
+#define r_addr 6
+#define r_scratch1 7
+#define r_scratch2 8
+#define r_D 14
+#define r_HL 15
+#define r_M 16
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Assembly helpers from arch/powerpc/net/bpf_jit.S:
+ */
+#define DECLARE_LOAD_FUNC(func) \
+ extern u8 func[], func##_negative_offset[], func##_positive_offset[]
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+DECLARE_LOAD_FUNC(sk_load_byte_msh);
+
+#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LBZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LD(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LWZ(r, r, IMM_L(i)); } } while(0)
+
+#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
+ else { PPC_ADDIS(r, base, IMM_HA(i)); \
+ PPC_LHZ(r, r, IMM_L(i)); } } while(0)
+
+#ifdef CONFIG_PPC64
+#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
+#else
+#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
+#endif
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_PPC64
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
+ PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
+ } while (0)
+#else
+#define PPC_BPF_LOAD_CPU(r) \
+ do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
+ PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
+ offsetof(struct thread_info, cpu)); \
+ } while(0)
+#endif
+#else
+#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
+#endif
+
+#define PPC_LHBRX_OFFS(r, base, i) \
+ do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
+#ifdef __LITTLE_ENDIAN__
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
+#else
+#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
+#endif
+
+#define SEEN_DATAREF 0x10000 /* might call external helpers */
+#define SEEN_XREG 0x20000 /* X reg is used */
+#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
+ * storage */
+#define SEEN_MEM_MSK 0x0ffff
+
+struct codegen_context {
+ unsigned int seen;
+ unsigned int idx;
+ int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
+};
+
+#endif
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
new file mode 100644
index 000000000000..5046d6f65c02
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -0,0 +1,102 @@
+/*
+ * bpf_jit64.h: BPF JIT compiler for PPC64
+ *
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _BPF_JIT64_H
+#define _BPF_JIT64_H
+
+#include "bpf_jit.h"
+
+/*
+ * Stack layout:
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 8*8 |
+ * fp (r31) --> [ ebpf stack space ] 512 |
+ * [ local/tmp var space ] 16 |
+ * [ frame header ] 32/112 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
+
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS 16
+/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
+#define BPF_PPC_STACK_SAVE (8*8)
+/* Ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS + \
+ MAX_BPF_STACK + BPF_PPC_STACK_SAVE)
+
+#ifndef __ASSEMBLY__
+
+/* BPF register usage */
+#define SKB_HLEN_REG (MAX_BPF_REG + 0)
+#define SKB_DATA_REG (MAX_BPF_REG + 1)
+#define TMP_REG_1 (MAX_BPF_REG + 2)
+#define TMP_REG_2 (MAX_BPF_REG + 3)
+
+/* BPF to ppc register mappings */
+static const int b2p[] = {
+ /* function return value */
+ [BPF_REG_0] = 8,
+ /* function arguments */
+ [BPF_REG_1] = 3,
+ [BPF_REG_2] = 4,
+ [BPF_REG_3] = 5,
+ [BPF_REG_4] = 6,
+ [BPF_REG_5] = 7,
+ /* non volatile registers */
+ [BPF_REG_6] = 27,
+ [BPF_REG_7] = 28,
+ [BPF_REG_8] = 29,
+ [BPF_REG_9] = 30,
+ /* frame pointer aka BPF_REG_10 */
+ [BPF_REG_FP] = 31,
+ /* eBPF jit internal registers */
+ [SKB_HLEN_REG] = 25,
+ [SKB_DATA_REG] = 26,
+ [TMP_REG_1] = 9,
+ [TMP_REG_2] = 10
+};
+
+/* Assembly helpers */
+#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
+ u64 func##_negative_offset(u64 r3, u64 r4); \
+ u64 func##_positive_offset(u64 r3, u64 r4);
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+
+#define CHOOSE_LOAD_FUNC(imm, func) \
+ (imm < 0 ? \
+ (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
+ func##_positive_offset)
+
+#define SEEN_FUNC 0x1000 /* might call external helpers */
+#define SEEN_STACK 0x2000 /* uses BPF stack */
+#define SEEN_SKB 0x4000 /* uses sk_buff */
+
+struct codegen_context {
+ /*
+ * This is used to track register usage as well
+ * as calls to external helpers.
+ * - register usage is tracked with corresponding
+ * bits (r3-r10 and r25-r31)
+ * - rest of the bits can be used to track other
+ * things -- for now, we use bits 16 to 23
+ * encoded in SEEN_* macros above
+ */
+ unsigned int seen;
+ unsigned int idx;
+};
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
index 8ff5a3b5d1c3..3dd9c43d40c9 100644
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -10,7 +10,7 @@
*/
#include <asm/ppc_asm.h>
-#include "bpf_jit.h"
+#include "bpf_jit32.h"
/*
* All of these routines are called directly from generated code,
diff --git a/arch/powerpc/net/bpf_jit_asm64.S b/arch/powerpc/net/bpf_jit_asm64.S
new file mode 100644
index 000000000000..7e4c51430b84
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_asm64.S
@@ -0,0 +1,180 @@
+/*
+ * bpf_jit_asm64.S: Packet/header access helper functions
+ * for PPC64 BPF compiler.
+ *
+ * Copyright 2016, Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * Based on bpf_jit_asm.S by Matt Evans
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include "bpf_jit64.h"
+
+/*
+ * All of these routines are called directly from generated code,
+ * with the below register usage:
+ * r27 skb pointer (ctx)
+ * r25 skb header length
+ * r26 skb->data pointer
+ * r4 offset
+ *
+ * Result is passed back in:
+ * r8 data read in host endian format (accumulator)
+ *
+ * r9 is used as a temporary register
+ */
+
+#define r_skb r27
+#define r_hlen r25
+#define r_data r26
+#define r_off r4
+#define r_val r8
+#define r_tmp r9
+
+_GLOBAL_TOC(sk_load_word)
+ cmpdi r_off, 0
+ blt bpf_slow_path_word_neg
+ b sk_load_word_positive_offset
+
+_GLOBAL_TOC(sk_load_word_positive_offset)
+ /* Are we accessing past headlen? */
+ subi r_tmp, r_hlen, 4
+ cmpd r_tmp, r_off
+ blt bpf_slow_path_word
+ /* Nope, just hitting the header. cr0 here is eq or gt! */
+ LWZX_BE r_val, r_data, r_off
+ blr /* Return success, cr0 != LT */
+
+_GLOBAL_TOC(sk_load_half)
+ cmpdi r_off, 0
+ blt bpf_slow_path_half_neg
+ b sk_load_half_positive_offset
+
+_GLOBAL_TOC(sk_load_half_positive_offset)
+ subi r_tmp, r_hlen, 2
+ cmpd r_tmp, r_off
+ blt bpf_slow_path_half
+ LHZX_BE r_val, r_data, r_off
+ blr
+
+_GLOBAL_TOC(sk_load_byte)
+ cmpdi r_off, 0
+ blt bpf_slow_path_byte_neg
+ b sk_load_byte_positive_offset
+
+_GLOBAL_TOC(sk_load_byte_positive_offset)
+ cmpd r_hlen, r_off
+ ble bpf_slow_path_byte
+ lbzx r_val, r_data, r_off
+ blr
+
+/*
+ * Call out to skb_copy_bits:
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define bpf_slow_path_common(SIZE) \
+ mflr r0; \
+ std r0, PPC_LR_STKOFF(r1); \
+ stdu r1, -(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS)(r1); \
+ mr r3, r_skb; \
+ /* r4 = r_off as passed */ \
+ addi r5, r1, STACK_FRAME_MIN_SIZE; \
+ li r6, SIZE; \
+ bl skb_copy_bits; \
+ nop; \
+ /* save r5 */ \
+ addi r5, r1, STACK_FRAME_MIN_SIZE; \
+ /* r3 = 0 on success */ \
+ addi r1, r1, STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS; \
+ ld r0, PPC_LR_STKOFF(r1); \
+ mtlr r0; \
+ cmpdi r3, 0; \
+ blt bpf_error; /* cr0 = LT */
+
+bpf_slow_path_word:
+ bpf_slow_path_common(4)
+ /* Data value is on stack, and cr0 != LT */
+ LWZX_BE r_val, 0, r5
+ blr
+
+bpf_slow_path_half:
+ bpf_slow_path_common(2)
+ LHZX_BE r_val, 0, r5
+ blr
+
+bpf_slow_path_byte:
+ bpf_slow_path_common(1)
+ lbzx r_val, 0, r5
+ blr
+
+/*
+ * Call out to bpf_internal_load_pointer_neg_helper
+ */
+#define sk_negative_common(SIZE) \
+ mflr r0; \
+ std r0, PPC_LR_STKOFF(r1); \
+ stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
+ mr r3, r_skb; \
+ /* r4 = r_off, as passed */ \
+ li r5, SIZE; \
+ bl bpf_internal_load_pointer_neg_helper; \
+ nop; \
+ addi r1, r1, STACK_FRAME_MIN_SIZE; \
+ ld r0, PPC_LR_STKOFF(r1); \
+ mtlr r0; \
+ /* R3 != 0 on success */ \
+ cmpldi r3, 0; \
+ beq bpf_error_slow; /* cr0 = EQ */
+
+bpf_slow_path_word_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_word_negative_offset
+
+_GLOBAL_TOC(sk_load_word_negative_offset)
+ sk_negative_common(4)
+ LWZX_BE r_val, 0, r3
+ blr
+
+bpf_slow_path_half_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_half_negative_offset
+
+_GLOBAL_TOC(sk_load_half_negative_offset)
+ sk_negative_common(2)
+ LHZX_BE r_val, 0, r3
+ blr
+
+bpf_slow_path_byte_neg:
+ lis r_tmp, -32 /* SKF_LL_OFF */
+ cmpd r_off, r_tmp /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ b sk_load_byte_negative_offset
+
+_GLOBAL_TOC(sk_load_byte_negative_offset)
+ sk_negative_common(1)
+ lbzx r_val, 0, r3
+ blr
+
+bpf_error_slow:
+ /* fabricate a cr0 = lt */
+ li r_tmp, -1
+ cmpdi r_tmp, 0
+bpf_error:
+ /*
+ * Entered with cr0 = lt
+ * Generated code will 'blt epilogue', returning 0.
+ */
+ li r_val, 0
+ blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2d66a8446198..7e706f36e364 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -16,7 +16,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include "bpf_jit.h"
+#include "bpf_jit32.h"
int bpf_jit_enable __read_mostly;
@@ -161,14 +161,14 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
break;
case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
ctx->seen |= SEEN_XREG;
- PPC_MUL(r_A, r_A, r_X);
+ PPC_MULW(r_A, r_A, r_X);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
if (K < 32768)
PPC_MULI(r_A, r_A, K);
else {
PPC_LI32(r_scratch1, K);
- PPC_MUL(r_A, r_A, r_scratch1);
+ PPC_MULW(r_A, r_A, r_scratch1);
}
break;
case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
@@ -184,7 +184,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
}
if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
PPC_DIVWU(r_scratch1, r_A, r_X);
- PPC_MUL(r_scratch1, r_X, r_scratch1);
+ PPC_MULW(r_scratch1, r_X, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
} else {
PPC_DIVWU(r_A, r_A, r_X);
@@ -193,7 +193,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
PPC_LI32(r_scratch2, K);
PPC_DIVWU(r_scratch1, r_A, r_scratch2);
- PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
+ PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
PPC_SUB(r_A, r_A, r_scratch1);
break;
case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
new file mode 100644
index 000000000000..6073b78516f6
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -0,0 +1,954 @@
+/*
+ * bpf_jit_comp64.c: eBPF JIT compiler
+ *
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
+ *
+ * Based on the powerpc classic BPF JIT compiler by Matt Evans
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+
+#include "bpf_jit64.h"
+
+int bpf_jit_enable __read_mostly;
+
+static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
+{
+ int *p = area;
+
+ /* Fill whole space with trap instructions */
+ while (p < (int *)((char *)area + size))
+ *p++ = BREAKPOINT_INSTRUCTION;
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb();
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
+{
+ return (ctx->seen & (1 << (31 - b2p[i])));
+}
+
+static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen |= (1 << (31 - b2p[i]));
+}
+
+static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
+{
+ /*
+ * We only need a stack frame if:
+ * - we call other functions (kernel helpers), or
+ * - the bpf program uses its stack area
+ * The latter condition is deduced from the usage of BPF_REG_FP
+ */
+ return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
+}
+
+static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
+{
+ /*
+ * Load skb->len and skb->data_len
+ * r3 points to skb
+ */
+ PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
+ PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
+ /* header_len = len - data_len */
+ PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
+
+ /* skb->data pointer */
+ PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
+}
+
+static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+{
+#ifdef PPC64_ELF_ABI_v1
+ /* func points to the function descriptor */
+ PPC_LI64(b2p[TMP_REG_2], func);
+ /* Load actual entry point from function descriptor */
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
+ /* ... and move it to LR */
+ PPC_MTLR(b2p[TMP_REG_1]);
+ /*
+ * Load TOC from function descriptor at offset 8.
+ * We can clobber r2 since we get called through a
+ * function pointer (so caller will save/restore r2)
+ * and since we don't use a TOC ourself.
+ */
+ PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
+#else
+ /* We can clobber r12 */
+ PPC_FUNC_ADDR(12, func);
+ PPC_MTLR(12);
+#endif
+ PPC_BLRL();
+}
+
+static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+ bool new_stack_frame = bpf_has_stack_frame(ctx);
+
+ if (new_stack_frame) {
+ /*
+ * We need a stack frame, but we don't necessarily need to
+ * save/restore LR unless we call other functions
+ */
+ if (ctx->seen & SEEN_FUNC) {
+ EMIT(PPC_INST_MFLR | __PPC_RT(R0));
+ PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
+ }
+
+ PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
+ }
+
+ /*
+ * Back up non-volatile regs -- BPF registers 6-10
+ * If we haven't created our own stack frame, we save these
+ * in the protected zone below the previous stack frame
+ */
+ for (i = BPF_REG_6; i <= BPF_REG_10; i++)
+ if (bpf_is_seen_register(ctx, i))
+ PPC_BPF_STL(b2p[i], 1,
+ (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
+ (8 * (32 - b2p[i])));
+
+ /*
+ * Save additional non-volatile regs if we cache skb
+ * Also, setup skb data
+ */
+ if (ctx->seen & SEEN_SKB) {
+ PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ bpf_jit_emit_skb_loads(image, ctx);
+ }
+
+ /* Setup frame pointer to point to the bpf stack area */
+ if (bpf_is_seen_register(ctx, BPF_REG_FP))
+ PPC_ADDI(b2p[BPF_REG_FP], 1,
+ BPF_PPC_STACKFRAME - BPF_PPC_STACK_SAVE);
+}
+
+static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+ bool new_stack_frame = bpf_has_stack_frame(ctx);
+
+ /* Move result to r3 */
+ PPC_MR(3, b2p[BPF_REG_0]);
+
+ /* Restore NVRs */
+ for (i = BPF_REG_6; i <= BPF_REG_10; i++)
+ if (bpf_is_seen_register(ctx, i))
+ PPC_BPF_LL(b2p[i], 1,
+ (new_stack_frame ? BPF_PPC_STACKFRAME : 0) -
+ (8 * (32 - b2p[i])));
+
+ /* Restore non-volatile registers used for skb cache */
+ if (ctx->seen & SEEN_SKB) {
+ PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_HLEN_REG])));
+ PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
+ BPF_PPC_STACKFRAME - (8 * (32 - b2p[SKB_DATA_REG])));
+ }
+
+ /* Tear down our stack frame */
+ if (new_stack_frame) {
+ PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
+ if (ctx->seen & SEEN_FUNC) {
+ PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
+ PPC_MTLR(0);
+ }
+ }
+
+ PPC_BLR();
+}
+
+/* Assemble the body code between the prologue & epilogue */
+static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx,
+ u32 *addrs)
+{
+ const struct bpf_insn *insn = fp->insnsi;
+ int flen = fp->len;
+ int i;
+
+ /* Start of epilogue code - will only be valid 2nd pass onwards */
+ u32 exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ u32 code = insn[i].code;
+ u32 dst_reg = b2p[insn[i].dst_reg];
+ u32 src_reg = b2p[insn[i].src_reg];
+ s16 off = insn[i].off;
+ s32 imm = insn[i].imm;
+ u64 imm64;
+ u8 *func;
+ u32 true_cond;
+ int stack_local_off;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ /*
+ * As an optimization, we note down which non-volatile registers
+ * are used so that we can only save/restore those in our
+ * prologue and epilogue. We do this here regardless of whether
+ * the actual BPF instruction uses src/dst registers or not
+ * (for instance, BPF_CALL does not use them). The expectation
+ * is that those instructions will have src_reg/dst_reg set to
+ * 0. Even otherwise, we just lose some prologue/epilogue
+ * optimization but everything else should work without
+ * any issues.
+ */
+ if (dst_reg >= 24 && dst_reg <= 31)
+ bpf_set_seen_register(ctx, insn[i].dst_reg);
+ if (src_reg >= 24 && src_reg <= 31)
+ bpf_set_seen_register(ctx, insn[i].src_reg);
+
+ switch (code) {
+ /*
+ * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
+ */
+ case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
+ PPC_ADD(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
+ PPC_SUB(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+ if (BPF_OP(code) == BPF_SUB)
+ imm = -imm;
+ if (imm) {
+ if (imm >= -32768 && imm < 32768)
+ PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ }
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_MULW(dst_reg, dst_reg, src_reg);
+ else
+ PPC_MULD(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
+ if (imm >= -32768 && imm < 32768)
+ PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_MULW(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ else
+ PPC_MULD(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
+ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
+ PPC_CMPWI(src_reg, 0);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_MULW(b2p[TMP_REG_1], src_reg,
+ b2p[TMP_REG_1]);
+ PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else
+ PPC_DIVWU(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
+ PPC_CMPDI(src_reg, 0);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_MULD(b2p[TMP_REG_1], src_reg,
+ b2p[TMP_REG_1]);
+ PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else
+ PPC_DIVD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
+ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ if (imm == 0)
+ return -EINVAL;
+ else if (imm == 1)
+ goto bpf_alu32_trunc;
+
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ switch (BPF_CLASS(code)) {
+ case BPF_ALU:
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
+ b2p[TMP_REG_1]);
+ PPC_MULW(b2p[TMP_REG_1],
+ b2p[TMP_REG_1],
+ b2p[TMP_REG_2]);
+ PPC_SUB(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ } else
+ PPC_DIVWU(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ break;
+ case BPF_ALU64:
+ if (BPF_OP(code) == BPF_MOD) {
+ PPC_DIVD(b2p[TMP_REG_2], dst_reg,
+ b2p[TMP_REG_1]);
+ PPC_MULD(b2p[TMP_REG_1],
+ b2p[TMP_REG_1],
+ b2p[TMP_REG_2]);
+ PPC_SUB(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ } else
+ PPC_DIVD(dst_reg, dst_reg,
+ b2p[TMP_REG_1]);
+ break;
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ PPC_NEG(dst_reg, dst_reg);
+ goto bpf_alu32_trunc;
+
+ /*
+ * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
+ */
+ case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
+ PPC_AND(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
+ if (!IMM_H(imm))
+ PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
+ else {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
+ PPC_OR(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
+ case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
+ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else {
+ if (IMM_L(imm))
+ PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
+ if (IMM_H(imm))
+ PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
+ PPC_XOR(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
+ if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
+ /* Sign-extended */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ } else {
+ if (IMM_L(imm))
+ PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
+ if (IMM_H(imm))
+ PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
+ /* slw clears top 32 bits */
+ PPC_SLW(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
+ PPC_SLD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
+ /* with imm 0, we still need to clear top 32 bits */
+ PPC_SLWI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
+ if (imm != 0)
+ PPC_SLDI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
+ PPC_SRW(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
+ PPC_SRD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
+ PPC_SRWI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
+ if (imm != 0)
+ PPC_SRDI(dst_reg, dst_reg, imm);
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
+ PPC_SRAD(dst_reg, dst_reg, src_reg);
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
+ if (imm != 0)
+ PPC_SRADI(dst_reg, dst_reg, imm);
+ break;
+
+ /*
+ * MOV
+ */
+ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+ PPC_MR(dst_reg, src_reg);
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
+ PPC_LI32(dst_reg, imm);
+ if (imm < 0)
+ goto bpf_alu32_trunc;
+ break;
+
+bpf_alu32_trunc:
+ /* Truncate to 32-bits */
+ if (BPF_CLASS(code) == BPF_ALU)
+ PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ break;
+
+ /*
+ * BPF_FROM_BE/LE
+ */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+#ifdef __BIG_ENDIAN__
+ if (BPF_SRC(code) == BPF_FROM_BE)
+ goto emit_clear;
+#else /* !__BIG_ENDIAN__ */
+ if (BPF_SRC(code) == BPF_FROM_LE)
+ goto emit_clear;
+#endif
+ switch (imm) {
+ case 16:
+ /* Rotate 8 bits left & mask with 0x0000ff00 */
+ PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
+ /* Rotate 8 bits right & insert LSB to reg */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
+ /* Move result back to dst_reg */
+ PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ break;
+ case 32:
+ /*
+ * Rotate word left by 8 bits:
+ * 2 bytes are already in their final position
+ * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
+ */
+ PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
+ /* Rotate 24 bits and insert byte 1 */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
+ /* Rotate 24 bits and insert byte 3 */
+ PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
+ PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ break;
+ case 64:
+ /*
+ * Way easier and faster(?) to store the value
+ * into stack and then use ldbrx
+ *
+ * First, determine where in stack we can store
+ * this:
+ * - if we have allotted a stack frame, then we
+ * will utilize the area set aside by
+ * BPF_PPC_STACK_LOCALS
+ * - else, we use the area beneath the NV GPR
+ * save area
+ *
+ * ctx->seen will be reliable in pass2, but
+ * the instructions generated will remain the
+ * same across all passes
+ */
+ if (bpf_has_stack_frame(ctx))
+ stack_local_off = STACK_FRAME_MIN_SIZE;
+ else
+ stack_local_off = -(BPF_PPC_STACK_SAVE + 8);
+
+ PPC_STD(dst_reg, 1, stack_local_off);
+ PPC_ADDI(b2p[TMP_REG_1], 1, stack_local_off);
+ PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
+ break;
+ }
+ break;
+
+emit_clear:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 64 bits */
+ PPC_RLDICL(dst_reg, dst_reg, 0, 48);
+ break;
+ case 32:
+ /* zero-extend 32 bits into 64 bits */
+ PPC_RLDICL(dst_reg, dst_reg, 0, 32);
+ break;
+ case 64:
+ /* nop */
+ break;
+ }
+ break;
+
+ /*
+ * BPF_ST(X)
+ */
+ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STB(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STH(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STW(src_reg, dst_reg, off);
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
+ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ if (BPF_CLASS(code) == BPF_ST) {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ src_reg = b2p[TMP_REG_1];
+ }
+ PPC_STD(src_reg, dst_reg, off);
+ break;
+
+ /*
+ * BPF_STX XADD (atomic_add)
+ */
+ /* *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* Get EA into TMP_REG_1 */
+ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ /* error if EA is not word-aligned */
+ PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ /* load value from memory into TMP_REG_2 */
+ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ /* add value from src_reg into this */
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ /* store result back */
+ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ /* we're done if this succeeded */
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ /* otherwise, let's try once more */
+ PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ /* exit if the store was not successful */
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_BCC(COND_NE, exit_addr);
+ break;
+ /* *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ /* error if EA is not doubleword-aligned */
+ PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_JMP(exit_addr);
+ PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
+ PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
+ PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ PPC_LI(b2p[BPF_REG_0], 0);
+ PPC_BCC(COND_NE, exit_addr);
+ break;
+
+ /*
+ * BPF_LDX
+ */
+ /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ PPC_LBZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_H:
+ PPC_LHZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ PPC_LWZ(dst_reg, src_reg, off);
+ break;
+ /* dst = *(u64 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ PPC_LD(dst_reg, src_reg, off);
+ break;
+
+ /*
+ * Doubleword load
+ * 16 byte instruction that uses two 'struct bpf_insn'
+ */
+ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ imm64 = ((u64)(u32) insn[i].imm) |
+ (((u64)(u32) insn[i+1].imm) << 32);
+ /* Adjust for two bpf instructions */
+ addrs[++i] = ctx->idx * 4;
+ PPC_LI64(dst_reg, imm64);
+ break;
+
+ /*
+ * Return/Exit
+ */
+ case BPF_JMP | BPF_EXIT:
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue. If we _are_ the last instruction,
+ * we'll just fall through to the epilogue.
+ */
+ if (i != flen - 1)
+ PPC_JMP(exit_addr);
+ /* else fall through to the epilogue */
+ break;
+
+ /*
+ * Call kernel helper
+ */
+ case BPF_JMP | BPF_CALL:
+ ctx->seen |= SEEN_FUNC;
+ func = (u8 *) __bpf_call_base + imm;
+
+ /* Save skb pointer if we need to re-cache skb data */
+ if (bpf_helper_changes_skb_data(func))
+ PPC_BPF_STL(3, 1, STACK_FRAME_MIN_SIZE);
+
+ bpf_jit_emit_func_call(image, ctx, (u64)func);
+
+ /* move return value from r3 to BPF_REG_0 */
+ PPC_MR(b2p[BPF_REG_0], 3);
+
+ /* refresh skb cache */
+ if (bpf_helper_changes_skb_data(func)) {
+ /* reload skb pointer to r3 */
+ PPC_BPF_LL(3, 1, STACK_FRAME_MIN_SIZE);
+ bpf_jit_emit_skb_loads(image, ctx);
+ }
+ break;
+
+ /*
+ * Jumps and branches
+ */
+ case BPF_JMP | BPF_JA:
+ PPC_JMP(addrs[i + 1 + off]);
+ break;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ true_cond = COND_NE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ true_cond = COND_NE;
+ /* Fall through */
+
+cond_branch:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ PPC_CMPLD(dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ /* signed comparison */
+ PPC_CMPD(dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
+ break;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ /*
+ * Need sign-extended load, so only positive
+ * values can be used as imm in cmpldi
+ */
+ if (imm >= 0 && imm < 32768)
+ PPC_CMPLDI(dst_reg, imm);
+ else {
+ /* sign-extending load */
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ /* ... but unsigned comparison */
+ PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ /*
+ * signed comparison, so any 16-bit value
+ * can be used in cmpdi
+ */
+ if (imm >= -32768 && imm < 32768)
+ PPC_CMPDI(dst_reg, imm);
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= 0 && imm < 32768)
+ /* PPC_ANDI is _only/always_ dot-form */
+ PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
+ b2p[TMP_REG_1]);
+ }
+ break;
+ }
+ PPC_BCC(true_cond, addrs[i + 1 + off]);
+ break;
+
+ /*
+ * Loads from packet header/data
+ * Assume 32-bit input value in imm and X (src_reg)
+ */
+
+ /* Absolute loads */
+ case BPF_LD | BPF_W | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
+ goto common_load_abs;
+ case BPF_LD | BPF_H | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
+ goto common_load_abs;
+ case BPF_LD | BPF_B | BPF_ABS:
+ func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
+common_load_abs:
+ /*
+ * Load from [imm]
+ * Load into r4, which can just be passed onto
+ * skb load helpers as the second parameter
+ */
+ PPC_LI32(4, imm);
+ goto common_load;
+
+ /* Indirect loads */
+ case BPF_LD | BPF_W | BPF_IND:
+ func = (u8 *)sk_load_word;
+ goto common_load_ind;
+ case BPF_LD | BPF_H | BPF_IND:
+ func = (u8 *)sk_load_half;
+ goto common_load_ind;
+ case BPF_LD | BPF_B | BPF_IND:
+ func = (u8 *)sk_load_byte;
+common_load_ind:
+ /*
+ * Load from [src_reg + imm]
+ * Treat src_reg as a 32-bit value
+ */
+ PPC_EXTSW(4, src_reg);
+ if (imm) {
+ if (imm >= -32768 && imm < 32768)
+ PPC_ADDI(4, 4, IMM_L(imm));
+ else {
+ PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_ADD(4, 4, b2p[TMP_REG_1]);
+ }
+ }
+
+common_load:
+ ctx->seen |= SEEN_SKB;
+ ctx->seen |= SEEN_FUNC;
+ bpf_jit_emit_func_call(image, ctx, (u64)func);
+
+ /*
+ * Helper returns 'lt' condition on error, and an
+ * appropriate return value in BPF_REG_0
+ */
+ PPC_BCC(COND_LT, exit_addr);
+ break;
+
+ /*
+ * TODO: Tail call
+ */
+ case BPF_JMP | BPF_CALL | BPF_X:
+
+ default:
+ /*
+ * The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
+ code, i);
+ return -ENOTSUPP;
+ }
+ }
+
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct bpf_prog *fp) { }
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+{
+ u32 proglen;
+ u32 alloclen;
+ u8 *image = NULL;
+ u32 *code_base;
+ u32 *addrs;
+ struct codegen_context cgctx;
+ int pass;
+ int flen;
+ struct bpf_binary_header *bpf_hdr;
+
+ if (!bpf_jit_enable)
+ return fp;
+
+ flen = fp->len;
+ addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
+ if (addrs == NULL)
+ return fp;
+
+ cgctx.idx = 0;
+ cgctx.seen = 0;
+ /* Scouting faux-generate pass 0 */
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ /* We hit something illegal or unsupported. */
+ goto out;
+
+ /*
+ * Pretend to build prologue, given the features we've seen. This will
+ * update ctgtx.idx as it pretends to output instructions, then we can
+ * calculate total size from idx.
+ */
+ bpf_jit_build_prologue(0, &cgctx);
+ bpf_jit_build_epilogue(0, &cgctx);
+
+ proglen = cgctx.idx * 4;
+ alloclen = proglen + FUNCTION_DESCR_SIZE;
+
+ bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
+ bpf_jit_fill_ill_insns);
+ if (!bpf_hdr)
+ goto out;
+
+ code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+
+ /* Code generation passes 1-2 */
+ for (pass = 1; pass < 3; pass++) {
+ /* Now build the prologue, body code & epilogue for real. */
+ cgctx.idx = 0;
+ bpf_jit_build_prologue(code_base, &cgctx);
+ bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ bpf_jit_build_epilogue(code_base, &cgctx);
+
+ if (bpf_jit_enable > 1)
+ pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
+ proglen - (cgctx.idx * 4), cgctx.seen);
+ }
+
+ if (bpf_jit_enable > 1)
+ /*
+ * Note that we output the base address of the code_base
+ * rather than image, since opcodes are in code_base.
+ */
+ bpf_jit_dump(flen, proglen, pass, code_base);
+
+ if (image) {
+ bpf_flush_icache(bpf_hdr, image + alloclen);
+#ifdef PPC64_ELF_ABI_v1
+ /* Function descriptor nastiness: Address + TOC */
+ ((u64 *)image)[0] = (u64)code_base;
+ ((u64 *)image)[1] = local_paca->kernel_toc;
+#endif
+ fp->bpf_func = (void *)image;
+ fp->jited = 1;
+ }
+
+out:
+ kfree(addrs);
+ return fp;
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+ struct bpf_binary_header *bpf_hdr = (void *)addr;
+
+ if (fp->jited)
+ bpf_jit_binary_free(bpf_hdr);
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index ed7b0977072a..ef2142ff7dbd 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -51,7 +51,7 @@ static void spu_buff_add(unsigned long int value, int spu)
* That way we can tell the difference between the
* buffer being full versus empty.
*
- * ASSUPTION: the buffer_lock is held when this function
+ * ASSUMPTION: the buffer_lock is held when this function
* is called to lock the buffer, head and tail.
*/
int full = 1;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 77b6394a7c50..f102d5370101 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
- power8-pmu.o
+ isa207-common.o power8-pmu.o power9-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index ffd61d55fb25..4ed377f0f7b2 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -992,7 +992,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
* than the previous value it will cause the delta and the counter to
* have bogus values unless we rolled a counter over. If a coutner is
* rolled back, it will be smaller, but within 256, which is the maximum
- * number of events to rollback at once. If we dectect a rollback
+ * number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the
* counters.
*/
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 2da41b78cb6d..7b2ca16b1eb4 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -1298,7 +1298,7 @@ static void h_24x7_event_read(struct perf_event *event)
__this_cpu_write(hv_24x7_txn_err, ret);
} else {
/*
- * Assoicate the event with the HCALL request index,
+ * Associate the event with the HCALL request index,
* so ->commit_txn() can quickly find/update count.
*/
i = request_buffer->num_requests - 1;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 791455e7f5cf..634ef4082cdc 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
/* -1 if @performance_domain does not refer to a virtual processor */
__be32 lpar_cfg_instance_id;
- /* size = @result_element_data_size of cointaining result. */
+ /* size = @result_element_data_size of containing result. */
__u64 element_data[1];
} __packed;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
new file mode 100644
index 000000000000..6143c99f3ec5
--- /dev/null
+++ b/arch/powerpc/perf/isa207-common.c
@@ -0,0 +1,263 @@
+/*
+ * Common Performance counter support functions for PowerISA v2.07 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "isa207-common.h"
+
+static inline bool event_is_fab_match(u64 event)
+{
+ /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
+ event &= 0xff0fe;
+
+ /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
+ return (event == 0x30056 || event == 0x4f052);
+}
+
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+{
+ unsigned int unit, pmc, cache, ebb;
+ unsigned long mask, value;
+
+ mask = value = 0;
+
+ if (event & ~EVENT_VALID_MASK)
+ return -1;
+
+ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+ ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
+
+ if (pmc) {
+ u64 base_event;
+
+ if (pmc > 6)
+ return -1;
+
+ /* Ignore Linux defined bits when checking event below */
+ base_event = event & ~EVENT_LINUX_MASK;
+
+ if (pmc >= 5 && base_event != 0x500fa &&
+ base_event != 0x600f4)
+ return -1;
+
+ mask |= CNST_PMC_MASK(pmc);
+ value |= CNST_PMC_VAL(pmc);
+ }
+
+ if (pmc <= 4) {
+ /*
+ * Add to number of counters in use. Note this includes events with
+ * a PMC of 0 - they still need a PMC, it's just assigned later.
+ * Don't count events on PMC 5 & 6, there is only one valid event
+ * on each of those counters, and they are handled above.
+ */
+ mask |= CNST_NC_MASK;
+ value |= CNST_NC_VAL;
+ }
+
+ if (unit >= 6 && unit <= 9) {
+ /*
+ * L2/L3 events contain a cache selector field, which is
+ * supposed to be programmed into MMCRC. However MMCRC is only
+ * HV writable, and there is no API for guest kernels to modify
+ * it. The solution is for the hypervisor to initialise the
+ * field to zeroes, and for us to only ever allow events that
+ * have a cache selector of zero. The bank selector (bit 3) is
+ * irrelevant, as long as the rest of the value is 0.
+ */
+ if (cache & 0x7)
+ return -1;
+
+ } else if (event & EVENT_IS_L1) {
+ mask |= CNST_L1_QUAL_MASK;
+ value |= CNST_L1_QUAL_VAL(cache);
+ }
+
+ if (event & EVENT_IS_MARKED) {
+ mask |= CNST_SAMPLE_MASK;
+ value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
+ }
+
+ /*
+ * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold control bits are used for the match value.
+ */
+ if (event_is_fab_match(event)) {
+ mask |= CNST_FAB_MATCH_MASK;
+ value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
+ } else {
+ /*
+ * Check the mantissa upper two bits are not zero, unless the
+ * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
+ */
+ unsigned int cmp, exp;
+
+ cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ exp = cmp >> 7;
+
+ if (exp && (cmp & 0x60) == 0)
+ return -1;
+
+ mask |= CNST_THRESH_MASK;
+ value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
+ }
+
+ if (!pmc && ebb)
+ /* EBB events must specify the PMC */
+ return -1;
+
+ if (event & EVENT_WANTS_BHRB) {
+ if (!ebb)
+ /* Only EBB events can request BHRB */
+ return -1;
+
+ mask |= CNST_IFM_MASK;
+ value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
+ }
+
+ /*
+ * All events must agree on EBB, either all request it or none.
+ * EBB events are pinned & exclusive, so this should never actually
+ * hit, but we leave it as a fallback in case.
+ */
+ mask |= CNST_EBB_VAL(ebb);
+ value |= CNST_EBB_MASK;
+
+ *maskp = mask;
+ *valp = value;
+
+ return 0;
+}
+
+int isa207_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[])
+{
+ unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
+ unsigned int pmc, pmc_inuse;
+ int i;
+
+ pmc_inuse = 0;
+
+ /* First pass to count resource use */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ if (pmc)
+ pmc_inuse |= 1 << pmc;
+ }
+
+ /* In continuous sampling mode, update SDAR on TLB miss */
+ mmcra = MMCRA_SDAR_MODE_TLB;
+ mmcr1 = mmcr2 = 0;
+
+ /* Second pass: assign PMCs, set all MMCR1 fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
+ unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
+ combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+ psel = event[i] & EVENT_PSEL_MASK;
+
+ if (!pmc) {
+ for (pmc = 1; pmc <= 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+
+ pmc_inuse |= 1 << pmc;
+ }
+
+ if (pmc <= 4) {
+ mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
+ mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+ mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
+ }
+
+ if (event[i] & EVENT_IS_L1) {
+ cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
+ mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
+ cache >>= 1;
+ mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+ }
+
+ if (event[i] & EVENT_IS_MARKED) {
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+
+ val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val) {
+ mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
+ mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
+ }
+ }
+
+ /*
+ * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
+ * the threshold bits are used for the match value.
+ */
+ if (event_is_fab_match(event[i])) {
+ mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
+ EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
+ } else {
+ val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ mmcra |= val << MMCRA_THR_CTL_SHIFT;
+ val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
+ mmcra |= val << MMCRA_THR_SEL_SHIFT;
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
+ mmcra |= val << MMCRA_THR_CMP_SHIFT;
+ }
+
+ if (event[i] & EVENT_WANTS_BHRB) {
+ val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
+ mmcra |= val << MMCRA_IFM_SHIFT;
+ }
+
+ if (pevents[i]->attr.exclude_user)
+ mmcr2 |= MMCR2_FCP(pmc);
+
+ if (pevents[i]->attr.exclude_hv)
+ mmcr2 |= MMCR2_FCH(pmc);
+
+ if (pevents[i]->attr.exclude_kernel) {
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mmcr2 |= MMCR2_FCH(pmc);
+ else
+ mmcr2 |= MMCR2_FCS(pmc);
+ }
+
+ hwc[i] = pmc - 1;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+
+ /* pmc_inuse is 1-based */
+ if (pmc_inuse & 2)
+ mmcr[0] = MMCR0_PMC1CE;
+
+ if (pmc_inuse & 0x7c)
+ mmcr[0] |= MMCR0_PMCjCE;
+
+ /* If we're not using PMC 5 or 6, freeze them */
+ if (!(pmc_inuse & 0x60))
+ mmcr[0] |= MMCR0_FC56;
+
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ mmcr[3] = mmcr2;
+
+ return 0;
+}
+
+void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
+}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
new file mode 100644
index 000000000000..4d0a4e5017c2
--- /dev/null
+++ b/arch/powerpc/perf/isa207-common.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or any later version.
+ */
+
+#ifndef _LINUX_POWERPC_PERF_ISA207_COMMON_H_
+#define _LINUX_POWERPC_PERF_ISA207_COMMON_H_
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <asm/firmware.h>
+#include <asm/cputable.h>
+
+/*
+ * Raw event encoding for PowerISA v2.07:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
+ * | | | |
+ * | | *- IFM (Linux) thresh start/stop OR FAB match -*
+ * | *- BHRB (Linux)
+ * *- EBB (Linux)
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
+ * | | | | |
+ * | | | | *- mark
+ * | | *- L1/L2/L3 cache_sel |
+ * | | |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[x] = combine (PMCxCOMB)
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * # PM_MRK_FAB_RSP_MATCH
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * # PM_MRK_FAB_RSP_MATCH_CYC
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ * MMCRA[22:24] = thresh_cmp[0:2]
+ * MMCRA[25:31] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ * if cache_sel[0] == 0: # L3 bank
+ * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
+ * else if cache_sel[0] == 1:
+ * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ * if EBB and BHRB:
+ * MMCRA[32:33] = IFM
+ *
+ */
+
+#define EVENT_EBB_MASK 1ull
+#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
+#define EVENT_BHRB_MASK 1ull
+#define EVENT_BHRB_SHIFT 62
+#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
+#define EVENT_IFM_MASK 3ull
+#define EVENT_IFM_SHIFT 60
+#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
+#define EVENT_THR_CMP_MASK 0x3ff
+#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
+#define EVENT_THR_CTL_MASK 0xffull
+#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
+#define EVENT_THR_SEL_MASK 0x7
+#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
+#define EVENT_THRESH_MASK 0x1fffffull
+#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
+#define EVENT_SAMPLE_MASK 0x1f
+#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
+#define EVENT_CACHE_SEL_MASK 0xf
+#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
+#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
+#define EVENT_PMC_MASK 0xf
+#define EVENT_UNIT_SHIFT 12 /* Unit */
+#define EVENT_UNIT_MASK 0xf
+#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
+#define EVENT_COMBINE_MASK 0x1
+#define EVENT_MARKED_SHIFT 8 /* Marked bit */
+#define EVENT_MARKED_MASK 0x1
+#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
+#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
+
+/* Bits defined by Linux */
+#define EVENT_LINUX_MASK \
+ ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
+ (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
+ (EVENT_IFM_MASK << EVENT_IFM_SHIFT))
+
+#define EVENT_VALID_MASK \
+ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ EVENT_LINUX_MASK | \
+ EVENT_PSEL_MASK)
+
+#define ONLY_PLM \
+ (PERF_SAMPLE_BRANCH_USER |\
+ PERF_SAMPLE_BRANCH_KERNEL |\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Layout of constraint bits:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
+ * |
+ * thresh_sel -*
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
+ * | | | |
+ * BHRB IFM -* | | | Count of events for each PMC.
+ * EBB -* | | p1, p2, p3, p4, p5, p6.
+ * L1 I/D qualifier -* |
+ * nc - number of counters -*
+ *
+ * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
+ * we want the low bit of each field to be added to any existing value.
+ *
+ * Everything else is a value field.
+ */
+
+#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
+#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
+
+/* We just throw all the threshold bits into the constraint */
+#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
+#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
+
+#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
+#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
+
+#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
+#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
+
+#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
+#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
+
+#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
+#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
+
+/*
+ * For NC we are counting up to 4 events. This requires three bits, and we need
+ * the fifth event to overflow and set the 4th bit. To achieve that we bias the
+ * fields by 3 in test_adder.
+ */
+#define CNST_NC_SHIFT 12
+#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
+#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
+#define ISA207_TEST_ADDER (3 << CNST_NC_SHIFT)
+
+/*
+ * For the per-PMC fields we have two bits. The low bit is added, so if two
+ * events ask for the same PMC the sum will overflow, setting the high bit,
+ * indicating an error. So our mask sets the high bit.
+ */
+#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
+#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
+#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
+
+/* Our add_fields is defined as: */
+#define ISA207_ADD_FIELDS \
+ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
+ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+
+
+/* Bits in MMCR1 for PowerISA v2.07 */
+#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
+#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
+#define MMCR1_FAB_SHIFT 36
+#define MMCR1_DC_QUAL_SHIFT 47
+#define MMCR1_IC_QUAL_SHIFT 46
+
+/* Bits in MMCRA for PowerISA v2.07 */
+#define MMCRA_SAMP_MODE_SHIFT 1
+#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_THR_CTL_SHIFT 8
+#define MMCRA_THR_SEL_SHIFT 16
+#define MMCRA_THR_CMP_SHIFT 32
+#define MMCRA_SDAR_MODE_TLB (1ull << 42)
+#define MMCRA_IFM_SHIFT 30
+
+/* Bits in MMCR2 for PowerISA v2.07 */
+#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
+#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
+#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
+
+#define MAX_ALT 2
+#define MAX_PMU_COUNTERS 6
+
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp);
+int isa207_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[]);
+void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
+
+#endif
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 7cf3b4378192..5fde2b192fec 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -12,10 +12,7 @@
#define pr_fmt(fmt) "power8-pmu: " fmt
-#include <linux/kernel.h>
-#include <linux/perf_event.h>
-#include <asm/firmware.h>
-#include <asm/cputable.h>
+#include "isa207-common.h"
/*
* Some power8 event codes.
@@ -28,465 +25,11 @@ enum {
#undef EVENT
-/*
- * Raw event encoding for POWER8:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
- * | | | |
- * | | *- IFM (Linux) thresh start/stop OR FAB match -*
- * | *- BHRB (Linux)
- * *- EBB (Linux)
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
- * | | | | |
- * | | | | *- mark
- * | | *- L1/L2/L3 cache_sel |
- * | | |
- * | *- sampling mode for marked events *- combine
- * |
- * *- thresh_sel
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[x] = combine (PMCxCOMB)
- *
- * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
- * # PM_MRK_FAB_RSP_MATCH
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
- * # PM_MRK_FAB_RSP_MATCH_CYC
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else
- * MMCRA[48:55] = thresh_ctl (THRESH START/END)
- *
- * if thresh_sel:
- * MMCRA[45:47] = thresh_sel
- *
- * if thresh_cmp:
- * MMCRA[22:24] = thresh_cmp[0:2]
- * MMCRA[25:31] = thresh_cmp[3:9]
- *
- * if unit == 6 or unit == 7
- * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
- * else if unit == 8 or unit == 9:
- * if cache_sel[0] == 0: # L3 bank
- * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
- * else if cache_sel[0] == 1:
- * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
- * else if cache_sel[1]: # L1 event
- * MMCR1[16] = cache_sel[2]
- * MMCR1[17] = cache_sel[3]
- *
- * if mark:
- * MMCRA[63] = 1 (SAMPLE_ENABLE)
- * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
- * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
- *
- * if EBB and BHRB:
- * MMCRA[32:33] = IFM
- *
- */
-
-#define EVENT_EBB_MASK 1ull
-#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
-#define EVENT_BHRB_MASK 1ull
-#define EVENT_BHRB_SHIFT 62
-#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT)
-#define EVENT_IFM_MASK 3ull
-#define EVENT_IFM_SHIFT 60
-#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
-#define EVENT_THR_CMP_MASK 0x3ff
-#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
-#define EVENT_THR_CTL_MASK 0xffull
-#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
-#define EVENT_THR_SEL_MASK 0x7
-#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
-#define EVENT_THRESH_MASK 0x1fffffull
-#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
-#define EVENT_SAMPLE_MASK 0x1f
-#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
-#define EVENT_CACHE_SEL_MASK 0xf
-#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
-#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
-#define EVENT_PMC_MASK 0xf
-#define EVENT_UNIT_SHIFT 12 /* Unit */
-#define EVENT_UNIT_MASK 0xf
-#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
-#define EVENT_COMBINE_MASK 0x1
-#define EVENT_MARKED_SHIFT 8 /* Marked bit */
-#define EVENT_MARKED_MASK 0x1
-#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
-#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
-
-/* Bits defined by Linux */
-#define EVENT_LINUX_MASK \
- ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \
- (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \
- (EVENT_IFM_MASK << EVENT_IFM_SHIFT))
-
-#define EVENT_VALID_MASK \
- ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
- (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
- (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
- (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
- (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
- (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
- (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
- EVENT_LINUX_MASK | \
- EVENT_PSEL_MASK)
-
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
-#define ONLY_PLM \
- (PERF_SAMPLE_BRANCH_USER |\
- PERF_SAMPLE_BRANCH_KERNEL |\
- PERF_SAMPLE_BRANCH_HV)
-
-/*
- * Layout of constraint bits:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
- * |
- * thresh_sel -*
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
- * | | | |
- * BHRB IFM -* | | | Count of events for each PMC.
- * EBB -* | | p1, p2, p3, p4, p5, p6.
- * L1 I/D qualifier -* |
- * nc - number of counters -*
- *
- * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
- * we want the low bit of each field to be added to any existing value.
- *
- * Everything else is a value field.
- */
-
-#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
-#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
-
-/* We just throw all the threshold bits into the constraint */
-#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
-#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
-
-#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
-#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
-
-#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25)
-#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK)
-
-#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
-#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
-
-#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
-#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
-
-/*
- * For NC we are counting up to 4 events. This requires three bits, and we need
- * the fifth event to overflow and set the 4th bit. To achieve that we bias the
- * fields by 3 in test_adder.
- */
-#define CNST_NC_SHIFT 12
-#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
-#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
-#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
-
-/*
- * For the per-PMC fields we have two bits. The low bit is added, so if two
- * events ask for the same PMC the sum will overflow, setting the high bit,
- * indicating an error. So our mask sets the high bit.
- */
-#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
-#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
-#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
-
-/* Our add_fields is defined as: */
-#define POWER8_ADD_FIELDS \
- CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
- CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-
-
-/* Bits in MMCR1 for POWER8 */
-#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
-#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
-#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
-#define MMCR1_FAB_SHIFT 36
-#define MMCR1_DC_QUAL_SHIFT 47
-#define MMCR1_IC_QUAL_SHIFT 46
-
-/* Bits in MMCRA for POWER8 */
-#define MMCRA_SAMP_MODE_SHIFT 1
-#define MMCRA_SAMP_ELIG_SHIFT 4
-#define MMCRA_THR_CTL_SHIFT 8
-#define MMCRA_THR_SEL_SHIFT 16
-#define MMCRA_THR_CMP_SHIFT 32
-#define MMCRA_SDAR_MODE_TLB (1ull << 42)
-#define MMCRA_IFM_SHIFT 30
-
-/* Bits in MMCR2 for POWER8 */
-#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
-#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
-#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
-
-
-static inline bool event_is_fab_match(u64 event)
-{
- /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
- event &= 0xff0fe;
-
- /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
- return (event == 0x30056 || event == 0x4f052);
-}
-
-static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
-{
- unsigned int unit, pmc, cache, ebb;
- unsigned long mask, value;
-
- mask = value = 0;
-
- if (event & ~EVENT_VALID_MASK)
- return -1;
-
- pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
- ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
-
- if (pmc) {
- u64 base_event;
-
- if (pmc > 6)
- return -1;
-
- /* Ignore Linux defined bits when checking event below */
- base_event = event & ~EVENT_LINUX_MASK;
-
- if (pmc >= 5 && base_event != PM_RUN_INST_CMPL &&
- base_event != PM_RUN_CYC)
- return -1;
-
- mask |= CNST_PMC_MASK(pmc);
- value |= CNST_PMC_VAL(pmc);
- }
-
- if (pmc <= 4) {
- /*
- * Add to number of counters in use. Note this includes events with
- * a PMC of 0 - they still need a PMC, it's just assigned later.
- * Don't count events on PMC 5 & 6, there is only one valid event
- * on each of those counters, and they are handled above.
- */
- mask |= CNST_NC_MASK;
- value |= CNST_NC_VAL;
- }
-
- if (unit >= 6 && unit <= 9) {
- /*
- * L2/L3 events contain a cache selector field, which is
- * supposed to be programmed into MMCRC. However MMCRC is only
- * HV writable, and there is no API for guest kernels to modify
- * it. The solution is for the hypervisor to initialise the
- * field to zeroes, and for us to only ever allow events that
- * have a cache selector of zero. The bank selector (bit 3) is
- * irrelevant, as long as the rest of the value is 0.
- */
- if (cache & 0x7)
- return -1;
-
- } else if (event & EVENT_IS_L1) {
- mask |= CNST_L1_QUAL_MASK;
- value |= CNST_L1_QUAL_VAL(cache);
- }
-
- if (event & EVENT_IS_MARKED) {
- mask |= CNST_SAMPLE_MASK;
- value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
- }
-
- /*
- * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold control bits are used for the match value.
- */
- if (event_is_fab_match(event)) {
- mask |= CNST_FAB_MATCH_MASK;
- value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
- } else {
- /*
- * Check the mantissa upper two bits are not zero, unless the
- * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
- */
- unsigned int cmp, exp;
-
- cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- exp = cmp >> 7;
-
- if (exp && (cmp & 0x60) == 0)
- return -1;
-
- mask |= CNST_THRESH_MASK;
- value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
- }
-
- if (!pmc && ebb)
- /* EBB events must specify the PMC */
- return -1;
-
- if (event & EVENT_WANTS_BHRB) {
- if (!ebb)
- /* Only EBB events can request BHRB */
- return -1;
-
- mask |= CNST_IFM_MASK;
- value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
- }
-
- /*
- * All events must agree on EBB, either all request it or none.
- * EBB events are pinned & exclusive, so this should never actually
- * hit, but we leave it as a fallback in case.
- */
- mask |= CNST_EBB_VAL(ebb);
- value |= CNST_EBB_MASK;
-
- *maskp = mask;
- *valp = value;
-
- return 0;
-}
-
-static int power8_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[],
- struct perf_event *pevents[])
-{
- unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
- unsigned int pmc, pmc_inuse;
- int i;
-
- pmc_inuse = 0;
-
- /* First pass to count resource use */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- if (pmc)
- pmc_inuse |= 1 << pmc;
- }
-
- /* In continuous sampling mode, update SDAR on TLB miss */
- mmcra = MMCRA_SDAR_MODE_TLB;
- mmcr1 = mmcr2 = 0;
-
- /* Second pass: assign PMCs, set all MMCR1 fields */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
- psel = event[i] & EVENT_PSEL_MASK;
-
- if (!pmc) {
- for (pmc = 1; pmc <= 4; ++pmc) {
- if (!(pmc_inuse & (1 << pmc)))
- break;
- }
-
- pmc_inuse |= 1 << pmc;
- }
-
- if (pmc <= 4) {
- mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
- mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
- mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
- }
-
- if (event[i] & EVENT_IS_L1) {
- cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
- mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
- cache >>= 1;
- mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
- }
-
- if (event[i] & EVENT_IS_MARKED) {
- mmcra |= MMCRA_SAMPLE_ENABLE;
-
- val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
- if (val) {
- mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
- mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
- }
- }
-
- /*
- * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold bits are used for the match value.
- */
- if (event_is_fab_match(event[i])) {
- mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
- EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
- } else {
- val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
- mmcra |= val << MMCRA_THR_CTL_SHIFT;
- val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
- mmcra |= val << MMCRA_THR_SEL_SHIFT;
- val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- mmcra |= val << MMCRA_THR_CMP_SHIFT;
- }
-
- if (event[i] & EVENT_WANTS_BHRB) {
- val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
- mmcra |= val << MMCRA_IFM_SHIFT;
- }
-
- if (pevents[i]->attr.exclude_user)
- mmcr2 |= MMCR2_FCP(pmc);
-
- if (pevents[i]->attr.exclude_hv)
- mmcr2 |= MMCR2_FCH(pmc);
-
- if (pevents[i]->attr.exclude_kernel) {
- if (cpu_has_feature(CPU_FTR_HVMODE))
- mmcr2 |= MMCR2_FCH(pmc);
- else
- mmcr2 |= MMCR2_FCS(pmc);
- }
-
- hwc[i] = pmc - 1;
- }
-
- /* Return MMCRx values */
- mmcr[0] = 0;
-
- /* pmc_inuse is 1-based */
- if (pmc_inuse & 2)
- mmcr[0] = MMCR0_PMC1CE;
-
- if (pmc_inuse & 0x7c)
- mmcr[0] |= MMCR0_PMCjCE;
-
- /* If we're not using PMC 5 or 6, freeze them */
- if (!(pmc_inuse & 0x60))
- mmcr[0] |= MMCR0_FC56;
-
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
- mmcr[3] = mmcr2;
-
- return 0;
-}
-
-#define MAX_ALT 2
-
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
@@ -567,12 +110,6 @@ static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return num_alt;
}
-static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
-{
- if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
-}
-
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
@@ -841,16 +378,16 @@ static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
static struct power_pmu power8_pmu = {
.name = "POWER8",
- .n_counter = 6,
+ .n_counter = MAX_PMU_COUNTERS,
.max_alternatives = MAX_ALT + 1,
- .add_fields = POWER8_ADD_FIELDS,
- .test_adder = POWER8_TEST_ADDER,
- .compute_mmcr = power8_compute_mmcr,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power8_config_bhrb,
.bhrb_filter_map = power8_bhrb_filter_map,
- .get_constraint = power8_get_constraint,
+ .get_constraint = isa207_get_constraint,
.get_alternatives = power8_get_alternatives,
- .disable_pmc = power8_disable_pmc,
+ .disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
new file mode 100644
index 000000000000..6447dc1c3d89
--- /dev/null
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -0,0 +1,55 @@
+/*
+ * Performance counter support for POWER9 processors.
+ *
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Power9 event codes.
+ */
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
+EVENT(PM_CMPLU_STALL, 0x1e054)
+EVENT(PM_INST_CMPL, 0x00002)
+EVENT(PM_BRU_CMPL, 0x40060)
+EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+
+/* All L1 D cache load references counted at finish, gated by reject */
+EVENT(PM_LD_REF_L1, 0x100fc)
+/* Load Missed L1 */
+EVENT(PM_LD_MISS_L1_FIN, 0x2c04e)
+/* Store Missed L1 */
+EVENT(PM_ST_MISS_L1, 0x300f0)
+/* L1 cache data prefetches */
+EVENT(PM_L1_PREF, 0x20054)
+/* Instruction fetches from L1 */
+EVENT(PM_INST_FROM_L1, 0x04080)
+/* Demand iCache Miss */
+EVENT(PM_L1_ICACHE_MISS, 0x200fd)
+/* Instruction Demand sectors wriittent into IL1 */
+EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
+/* Instruction prefetch written into IL1 */
+EVENT(PM_IC_PREF_WRITE, 0x0488c)
+/* The data cache was reloaded from local core's L3 due to a demand load */
+EVENT(PM_DATA_FROM_L3, 0x4c042)
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+/* All successful D-side store dispatches for this thread */
+EVENT(PM_L2_ST, 0x16880)
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+EVENT(PM_L2_ST_MISS, 0x26880)
+/* Total HW L3 prefetches(Load+store) */
+EVENT(PM_L3_PREF_ALL, 0x4e052)
+/* Data PTEG reload */
+EVENT(PM_DTLB_MISS, 0x300fc)
+/* ITLB Reloaded */
+EVENT(PM_ITLB_MISS, 0x400fc)
+/* Run_Instructions */
+EVENT(PM_RUN_INST_CMPL, 0x500fa)
+/* Run_cycles */
+EVENT(PM_RUN_CYC, 0x600f4)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
new file mode 100644
index 000000000000..788346303852
--- /dev/null
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -0,0 +1,330 @@
+/*
+ * Performance counter support for POWER9 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ * Copyright 2013 Michael Ellerman, IBM Corporation.
+ * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+
+#define pr_fmt(fmt) "power9-pmu: " fmt
+
+#include "isa207-common.h"
+
+/*
+ * Some power9 event codes.
+ */
+#define EVENT(_name, _code) _name = _code,
+
+enum {
+#include "power9-events-list.h"
+};
+
+#undef EVENT
+
+/* MMCRA IFM bits - POWER9 */
+#define POWER9_MMCRA_IFM1 0x0000000040000000UL
+#define POWER9_MMCRA_IFM2 0x0000000080000000UL
+#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_CMPL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
+CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
+CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
+CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
+CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
+CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
+CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(branch-loads, PM_BRU_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
+
+static struct attribute *power9_events_attr[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(PM_CMPLU_STALL),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_BRU_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
+ CACHE_EVENT_PTR(PM_LD_REF_L1),
+ CACHE_EVENT_PTR(PM_L1_PREF),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_INST_FROM_L1),
+ CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+ CACHE_EVENT_PTR(PM_L3_PREF_ALL),
+ CACHE_EVENT_PTR(PM_L2_ST_MISS),
+ CACHE_EVENT_PTR(PM_L2_ST),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_BRU_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
+ NULL
+};
+
+static struct attribute_group power9_pmu_events_group = {
+ .name = "events",
+ .attrs = power9_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-49");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+
+static struct attribute *power9_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+struct attribute_group power9_pmu_format_group = {
+ .name = "format",
+ .attrs = power9_pmu_format_attr,
+};
+
+static const struct attribute_group *power9_pmu_attr_groups[] = {
+ &power9_pmu_format_group,
+ &power9_pmu_events_group,
+ NULL,
+};
+
+static int power9_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
+};
+
+static u64 power9_bhrb_filter_map(u64 branch_sample_type)
+{
+ u64 pmu_bhrb_filter = 0;
+
+ /* BHRB and regular PMU events share the same privilege state
+ * filter configuration. BHRB is always recorded along with a
+ * regular PMU event. As the privilege state filter is handled
+ * in the basic PMC configuration of the accompanying regular
+ * PMU event, we ignore any separate BHRB specific request.
+ */
+
+ /* No branch filter requested */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ return pmu_bhrb_filter;
+
+ /* Invalid branch filter options - HW does not support */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
+ return pmu_bhrb_filter;
+ }
+
+ /* Every thing else is unsupported */
+ return -1;
+}
+
+static void power9_config_bhrb(u64 pmu_bhrb_filter)
+{
+ /* Enable BHRB filter in PMU */
+ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
+}
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_CMPL,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
+static struct power_pmu power9_pmu = {
+ .name = "POWER9",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
+ .config_bhrb = power9_config_bhrb,
+ .bhrb_filter_map = power9_bhrb_filter_map,
+ .get_constraint = isa207_get_constraint,
+ .disable_pmc = isa207_disable_pmc,
+ .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .n_generic = ARRAY_SIZE(power9_generic_events),
+ .generic_events = power9_generic_events,
+ .cache_events = &power9_cache_events,
+ .attr_groups = power9_pmu_attr_groups,
+ .bhrb_nr = 32,
+};
+
+static int __init init_power9_pmu(void)
+{
+ int rc;
+
+ /* Comes from cpu_specs[] */
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
+ return -ENODEV;
+
+ rc = register_power_pmu(&power9_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
+}
+early_initcall(init_power9_pmu);
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index ddc12a1926ef..1c8aec6e9bb7 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -105,9 +105,7 @@ static void __init ep405_setup_arch(void)
static int __init ep405_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ep405"))
+ if (!of_machine_is_compatible("ep405"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index b0c46375dd95..2a050007bbae 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -63,7 +63,7 @@ static const char * const board[] __initconst = {
static int __init ppc40x_probe(void)
{
- if (of_flat_dt_match(of_get_flat_dt_root(), board)) {
+ if (of_device_compatible_match(of_root, board)) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index 9aa7ae2f4164..91a08ea758a8 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -37,9 +37,7 @@ machine_device_initcall(virtex, virtex_device_probe);
static int __init virtex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "xlnx,virtex"))
+ if (!of_machine_is_compatible("xlnx,virtex"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
index f7ac2d0fcb44..e5797815e2f1 100644
--- a/arch/powerpc/platforms/40x/walnut.c
+++ b/arch/powerpc/platforms/40x/walnut.c
@@ -46,9 +46,7 @@ machine_device_initcall(walnut, walnut_device_probe);
static int __init walnut_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,walnut"))
+ if (!of_machine_is_compatible("ibm,walnut"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index 22ca5430c9cb..157f4ce46386 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -53,11 +53,10 @@ machine_device_initcall(canyonlands, ppc460ex_device_probe);
static int __init ppc460ex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) {
+ if (of_machine_is_compatible("amcc,canyonlands")) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
- }
+ }
return 0;
}
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index ae893226392d..1070225f5f9b 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -49,9 +49,7 @@ machine_device_initcall(ebony, ebony_device_probe);
*/
static int __init ebony_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
+ if (!of_machine_is_compatible("ibm,ebony"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index c7c6758b3cfe..5f296dd6b1c0 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -149,9 +149,7 @@ static void __init iss4xx_setup_arch(void)
*/
static int __init iss4xx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
+ if (!of_machine_is_compatible("ibm,iss-4xx"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 573c3d2689c6..8d6e4da9dfbe 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -67,11 +67,10 @@ static char *board[] __initdata = {
static int __init ppc44x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
int i = 0;
for (i = 0; i < ARRAY_SIZE(board); i++) {
- if (of_flat_dt_is_compatible(root, board[i])) {
+ if (of_machine_is_compatible(board[i])) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index c11ce6516c8f..e55933f9cd55 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -68,7 +68,7 @@ DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
#define AVR_PWRCTL_RESET (0x02)
static struct i2c_client *avr_i2c_client;
-static void avr_halt_system(int pwrctl_flags)
+static void __noreturn avr_halt_system(int pwrctl_flags)
{
/* Request the AVR to reset the system */
i2c_smbus_write_byte_data(avr_i2c_client,
@@ -84,7 +84,7 @@ static void avr_power_off_system(void)
avr_halt_system(AVR_PWRCTL_PWROFF);
}
-static void avr_reset_system(char *cmd)
+static void __noreturn avr_reset_system(char *cmd)
{
avr_halt_system(AVR_PWRCTL_RESET);
}
@@ -275,12 +275,10 @@ static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
*/
static int __init ppc47x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "ibm,akebono"))
+ if (of_machine_is_compatible("ibm,akebono"))
return 1;
- if (of_flat_dt_is_compatible(root, "ibm,currituck")) {
+ if (of_machine_is_compatible("ibm,currituck")) {
ppc_md.pci_irq_fixup = ppc47x_pci_irq_fixup;
return 1;
}
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 3ee4a03c1496..688ffeab0699 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -46,9 +46,7 @@ machine_device_initcall(sam440ep, sam440ep_device_probe);
static int __init sam440ep_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
+ if (!of_machine_is_compatible("acube,sam440ep"))
return 0;
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index ad272c17c640..a7e08026097a 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -43,9 +43,7 @@ machine_device_initcall(virtex, virtex_device_probe);
static int __init virtex_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "xlnx,virtex440"))
+ if (!of_machine_is_compatible("xlnx,virtex440"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 501333cf42cf..5ecce543103e 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -44,9 +44,7 @@ machine_device_initcall(warp, warp_device_probe);
static int __init warp_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "pika,warp"))
+ if (!of_machine_is_compatible("pika,warp"))
return 0;
/* For __dma_alloc_coherent */
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 6081fbd75330..add5a5374fa0 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -719,7 +719,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
* most one of a mux, div, and gate each into one 'struct clk'
* item
* - PSC/MSCAN/SPDIF clock generation OTOH already is very
- * specific and cannot get mapped to componsites (at least not
+ * specific and cannot get mapped to composites (at least not
* a single one, maybe two of them, but then some of these
* intermediate clock signals get referenced elsewhere (e.g.
* in the clock frequency measurement, CFM) and thus need
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index 3e90ece10ae9..f65d5033cdb0 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -57,9 +57,12 @@ static void __init mpc5121_ads_init_IRQ(void)
*/
static int __init mpc5121_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
+ if (!of_machine_is_compatible("fsl,mpc5121ads"))
+ return 0;
- return of_flat_dt_is_compatible(root, "fsl,mpc5121ads");
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(mpc5121_ads) {
@@ -67,7 +70,6 @@ define_machine(mpc5121_ads) {
.probe = mpc5121_ads_probe,
.setup_arch = mpc5121_ads_setup_arch,
.init = mpc512x_init,
- .init_early = mpc512x_init_early,
.init_IRQ = mpc5121_ads_init_IRQ,
.get_irq = ipic_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index cc97f022d028..14ba49fd7938 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -18,6 +18,6 @@ extern void __init mpc512x_setup_arch(void);
extern int __init mpc5121_clk_init(void);
extern const char *mpc512x_select_psc_compat(void);
extern const char *mpc512x_select_reset_compat(void);
-extern void mpc512x_restart(char *cmd);
+extern void __noreturn mpc512x_restart(char *cmd);
#endif /* __MPC512X_H__ */
diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index ce71408781a0..bf884d3075e4 100644
--- a/arch/powerpc/platforms/512x/mpc512x_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -38,14 +38,18 @@ static const char * const board[] __initconst = {
*/
static int __init mpc512x_generic_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ if (!of_device_compatible_match(of_root, board))
+ return 0;
+
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(mpc512x_generic) {
.name = "MPC512x generic",
.probe = mpc512x_generic_probe,
.init = mpc512x_init,
- .init_early = mpc512x_init_early,
.setup_arch = mpc512x_setup_arch,
.init_IRQ = mpc512x_init_IRQ,
.get_irq = ipic_get_irq,
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 452da2391153..6b4f4cb7009a 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -47,7 +47,7 @@ static void __init mpc512x_restart_init(void)
of_node_put(np);
}
-void mpc512x_restart(char *cmd)
+void __noreturn mpc512x_restart(char *cmd)
{
if (reset_module_base) {
/* Enable software reset "RSTE" */
diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c
index 116f2325b20b..dc81f05e0bce 100644
--- a/arch/powerpc/platforms/512x/pdm360ng.c
+++ b/arch/powerpc/platforms/512x/pdm360ng.c
@@ -113,9 +113,12 @@ void __init pdm360ng_init(void)
static int __init pdm360ng_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
+ if (!of_machine_is_compatible("ifm,pdm360ng"))
+ return 0;
- return of_flat_dt_is_compatible(root, "ifm,pdm360ng");
+ mpc512x_init_early();
+
+ return 1;
}
define_machine(pdm360ng) {
@@ -123,7 +126,6 @@ define_machine(pdm360ng) {
.probe = pdm360ng_probe,
.setup_arch = mpc512x_setup_arch,
.init = pdm360ng_init,
- .init_early = mpc512x_init_early,
.init_IRQ = mpc512x_init_IRQ,
.get_irq = ipic_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 6af651e69129..39b49822ace1 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -200,8 +200,7 @@ static void __init efika_setup_arch(void)
static int __init efika_probe(void)
{
- const char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "model", NULL);
+ const char *model = of_get_property(of_root, "model", NULL);
if (model == NULL)
return 0;
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 7492de3cf6d0..c94c385cc919 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -183,7 +183,7 @@ static const char * const board[] __initconst = {
*/
static int __init lite5200_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(lite5200) {
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 8fb95480fd73..a3227040cc86 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -242,7 +242,7 @@ static const char * const board[] __initconst = {
*/
static int __init media5200_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(media5200_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 792a301a0bf0..a80c6278d515 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -70,7 +70,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc5200_simple_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(mpc5200_simple_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 26993826a797..565e3a83dc9e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -243,8 +243,7 @@ EXPORT_SYMBOL(mpc52xx_get_xtal_freq);
/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
*/
-void
-mpc52xx_restart(char *cmd)
+void __noreturn mpc52xx_restart(char *cmd)
{
local_irq_disable();
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 6781bda117be..cdab847749e6 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -309,8 +309,7 @@ machine_device_initcall(ep8248e, declare_of_platform_devices);
*/
static int __init ep8248e_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,ep8248e");
+ return of_machine_is_compatible("fsl,ep8248e");
}
define_machine(ep8248e)
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 387b446f4161..28860e40b5db 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -198,8 +198,7 @@ machine_device_initcall(km82xx, declare_of_platform_devices);
*/
static int __init km82xx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "keymile,km82xx");
+ return of_machine_is_compatible("keymile,km82xx");
}
define_machine(km82xx)
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index d24deacf07d0..d23c10a96bde 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -201,8 +201,7 @@ machine_device_initcall(mpc8272_ads, declare_of_platform_devices);
*/
static int __init mpc8272_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc8272ads");
+ return of_machine_is_compatible("fsl,mpc8272ads");
}
define_machine(mpc8272_ads)
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index fc8b2d6a7d8d..c4f7029fc9ae 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -22,7 +22,7 @@
#define RMR_CSRE 0x00000001
-void pq2_restart(char *cmd)
+void __noreturn pq2_restart(char *cmd)
{
local_irq_disable();
setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE);
diff --git a/arch/powerpc/platforms/82xx/pq2.h b/arch/powerpc/platforms/82xx/pq2.h
index a41f84ae2325..3080ce3441c1 100644
--- a/arch/powerpc/platforms/82xx/pq2.h
+++ b/arch/powerpc/platforms/82xx/pq2.h
@@ -1,7 +1,7 @@
#ifndef _PQ2_H
#define _PQ2_H
-void pq2_restart(char *cmd);
+void __noreturn pq2_restart(char *cmd);
#ifdef CONFIG_PCI
int pq2ads_pci_init_irq(void);
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index 3a5164ad10ad..6c654dc74a4b 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -164,8 +164,7 @@ static void __init pq2fads_setup_arch(void)
*/
static int __init pq2fads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,pq2fads");
+ return of_machine_is_compatible("fsl,pq2fads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 464ea8e0292d..17e54339f8d9 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -43,8 +43,7 @@ machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices);
*/
static int __init asp834x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "analogue-and-micro,asp8347e");
+ return of_machine_is_compatible("analogue-and-micro,asp8347e");
}
define_machine(asp834x) {
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 4bc6bbbe9ada..e7fbd6366abb 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -171,11 +171,10 @@ static char *board[] __initdata = {
*/
static int __init mpc83xx_km_probe(void)
{
- unsigned long node = of_get_flat_dt_root();
int i = 0;
while (board[i]) {
- if (of_flat_dt_is_compatible(node, board[i]))
+ if (of_machine_is_compatible(board[i]))
break;
i++;
}
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 7e923cad56cf..8899aa9d11f5 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -35,7 +35,7 @@ static int __init mpc83xx_restart_init(void)
arch_initcall(mpc83xx_restart_init);
-void mpc83xx_restart(char *cmd)
+void __noreturn mpc83xx_restart(char *cmd)
{
#define RST_OFFSET 0x00000900
#define RST_PROT_REG 0x00000018
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 4f2d9fea77b7..040d5d085467 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -46,7 +46,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc830x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index fa25977c52de..40e0d8307b59 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -46,7 +46,7 @@ static const char *board[] __initdata = {
*/
static int __init mpc831x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index a973b2ae5df6..cdfa47c4d394 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -102,9 +102,7 @@ machine_device_initcall(mpc832x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc832x_sys_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC832xMDS");
+ return of_machine_is_compatible("MPC832xMDS");
}
define_machine(mpc832x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index ea2b87d202ca..2ef03e7d248c 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -220,9 +220,7 @@ machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc832x_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC832xRDB");
+ return of_machine_is_compatible("MPC832xRDB");
}
define_machine(mpc832x_rdb) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 80aea8c4b5a3..8fd0c1e8b182 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -70,9 +70,7 @@ static void __init mpc834x_itx_setup_arch(void)
*/
static int __init mpc834x_itx_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC834xMITX");
+ return of_machine_is_compatible("MPC834xMITX");
}
define_machine(mpc834x_itx) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 553e793a4a93..eeaee6123bb3 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -91,9 +91,7 @@ machine_device_initcall(mpc834x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc834x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC834xMDS");
+ return of_machine_is_compatible("MPC834xMDS");
}
define_machine(mpc834x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index dd70b85f56d4..dacf4c2df069 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -209,9 +209,7 @@ machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
*/
static int __init mpc836x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC836xMDS");
+ return of_machine_is_compatible("MPC836xMDS");
}
define_machine(mpc836x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index 4cd7153a6c88..cf67ac93ddcb 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -42,9 +42,7 @@ static void __init mpc836x_rdk_setup_arch(void)
*/
static int __init mpc836x_rdk_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc8360rdk");
+ return of_machine_is_compatible("fsl,mpc8360rdk");
}
define_machine(mpc836x_rdk) {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index e53a60b6c863..652b97d699c9 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -93,9 +93,7 @@ machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices);
*/
static int __init mpc837x_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc837xmds");
+ return of_machine_is_compatible("fsl,mpc837xmds");
}
define_machine(mpc837x_mds) {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 9813c81e8e5b..667731d81676 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -73,7 +73,7 @@ static const char * const board[] __initconst = {
*/
static int __init mpc837x_rdb_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(mpc837x_rdb) {
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 0cf74d7ea1c5..ad484199eff7 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -65,7 +65,7 @@
* mpc83xx_* files. Mostly for use by mpc83xx_setup
*/
-extern void mpc83xx_restart(char *cmd);
+extern void __noreturn mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index 26cb3e934722..b867e88dfb0d 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -60,9 +60,7 @@ machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
*/
static int __init sbc834x_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "SBC834xE");
+ return of_machine_is_compatible("SBC834xE");
}
define_machine(sbc834x) {
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index dcfafd6b91ee..07dd6ae3ec52 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -60,9 +60,7 @@ machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
static int __init bsc9132_qds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,bsc9132qds");
+ return of_machine_is_compatible("fsl,bsc9132qds");
}
define_machine(bsc9132_qds) {
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index 9d57bedb940c..e48f6710e6d5 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -50,9 +50,7 @@ machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices);
static int __init bsc9131_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,bsc9131rdb");
+ return of_machine_is_compatible("fsl,bsc9131rdb");
}
define_machine(bsc9131_rdb) {
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 61bc851e9a8e..3b9e3f0f9aec 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -54,9 +54,7 @@ machine_arch_initcall(c293_pcie, mpc85xx_common_publish_devices);
*/
static int __init c293_pcie_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,C293PCIE"))
+ if (of_machine_is_compatible("fsl,C293PCIE"))
return 1;
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index a2b0bc859de0..3a6a84f07f43 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -170,20 +170,19 @@ static const char * const boards[] __initconst = {
*/
static int __init corenet_generic_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
char hv_compat[24];
int i;
#ifdef CONFIG_SMP
extern struct smp_ops_t smp_85xx_ops;
#endif
- if (of_flat_dt_match(root, boards))
+ if (of_device_compatible_match(of_root, boards))
return 1;
/* Check if we're running under the Freescale hypervisor */
for (i = 0; boards[i]; i++) {
snprintf(hv_compat, sizeof(hv_compat), "%s-hv", boards[i]);
- if (of_flat_dt_is_compatible(root, hv_compat)) {
+ if (of_machine_is_compatible(hv_compat)) {
ppc_md.init_IRQ = ehv_pic_init;
ppc_md.get_irq = ehv_pic_get_irq;
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 11790e074c8a..14af36a7fa9c 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -47,9 +47,8 @@ void __init ge_imp3a_pic_init(void)
struct mpic *mpic;
struct device_node *np;
struct device_node *cascade_node = NULL;
- unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
@@ -198,9 +197,7 @@ static void ge_imp3a_show_cpuinfo(struct seq_file *m)
*/
static int __init ge_imp3a_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "ge,IMP3A");
+ return of_machine_is_compatible("ge,IMP3A");
}
machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 3dc1bda3ddc3..6ef8580fdc0e 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -44,7 +44,7 @@
static void __iomem *cpld_base = NULL;
-static void machine_restart(char *cmd)
+static void __noreturn machine_restart(char *cmd)
{
if (cpld_base)
out_8(cpld_base + KSI8560_CPLD_RCR1, KSI8560_CPLD_RCR1_CPUHR);
@@ -176,9 +176,7 @@ machine_device_initcall(ksi8560, mpc85xx_common_publish_devices);
*/
static int __init ksi8560_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "emerson,KSI8560");
+ return of_machine_is_compatible("emerson,KSI8560");
}
define_machine(ksi8560) {
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index a378ba3519e9..6ba687f19e45 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -64,9 +64,7 @@ machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier);
*/
static int __init mpc8536_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,mpc8536ds");
+ return of_machine_is_compatible("fsl,mpc8536ds");
}
define_machine(mpc8536_ds) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index de72a5f464b1..8756715c7a47 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -160,9 +160,7 @@ machine_arch_initcall(mpc85xx_ads, mpc85xx_common_publish_devices);
*/
static int __init mpc85xx_ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxADS");
+ return of_machine_is_compatible("MPC85xxADS");
}
define_machine(mpc85xx_ads) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index d7e87ff912d7..62f171c71c4c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -83,7 +83,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static void mpc85xx_cds_restart(char *cmd)
+static void __noreturn mpc85xx_cds_restart(char *cmd)
{
struct pci_dev *dev;
u_char tmp;
@@ -367,9 +367,7 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
*/
static int __init mpc85xx_cds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxCDS");
+ return of_machine_is_compatible("MPC85xxCDS");
}
machine_arch_initcall(mpc85xx_cds, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index f858306dba6a..6bc07d837b1c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -66,9 +66,7 @@ void __init mpc85xx_ds_pic_init(void)
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
@@ -169,9 +167,7 @@ static void __init mpc85xx_ds_setup_arch(void)
*/
static int __init mpc8544_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "MPC8544DS");
+ return !!of_machine_is_compatible("MPC8544DS");
}
machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
@@ -187,9 +183,7 @@ machine_arch_initcall(p2020_ds, swiotlb_setup_bus_notifier);
*/
static int __init mpc8572_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,MPC8572DS");
+ return !!of_machine_is_compatible("fsl,MPC8572DS");
}
/*
@@ -197,9 +191,7 @@ static int __init mpc8572_ds_probe(void)
*/
static int __init p2020_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,P2020DS");
+ return !!of_machine_is_compatible("fsl,P2020DS");
}
define_machine(mpc8544_ds) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index f61cbe235581..fa9cd710d2ae 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -376,9 +376,7 @@ static void __init mpc85xx_mds_pic_init(void)
static int __init mpc85xx_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MPC85xxMDS");
+ return of_machine_is_compatible("MPC85xxMDS");
}
define_machine(mpc8568_mds) {
@@ -398,9 +396,7 @@ define_machine(mpc8568_mds) {
static int __init mpc8569_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,MPC8569EMDS");
+ return of_machine_is_compatible("fsl,MPC8569EMDS");
}
define_machine(mpc8569_mds) {
@@ -420,9 +416,7 @@ define_machine(mpc8569_mds) {
static int __init p1021_mds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1021MDS");
+ return of_machine_is_compatible("fsl,P1021MDS");
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 3f4dad133338..c1499cbf3786 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -47,13 +47,12 @@
void __init mpc85xx_rdb_pic_init(void)
{
struct mpic *mpic;
- unsigned long root = of_get_flat_dt_root();
#ifdef CONFIG_QUICC_ENGINE
struct device_node *np;
#endif
- if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
+ if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) {
mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
@@ -148,80 +147,60 @@ machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices);
*/
static int __init p2020_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P2020RDB"))
+ if (of_machine_is_compatible("fsl,P2020RDB"))
return 1;
return 0;
}
static int __init p1020_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1020RDB"))
+ if (of_machine_is_compatible("fsl,P1020RDB"))
return 1;
return 0;
}
static int __init p1020_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
+ return of_machine_is_compatible("fsl,P1020RDB-PC");
}
static int __init p1020_rdb_pd_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PD");
+ return of_machine_is_compatible("fsl,P1020RDB-PD");
}
static int __init p1021_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1021RDB-PC"))
+ if (of_machine_is_compatible("fsl,P1021RDB-PC"))
return 1;
return 0;
}
static int __init p2020_rdb_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P2020RDB-PC"))
+ if (of_machine_is_compatible("fsl,P2020RDB-PC"))
return 1;
return 0;
}
static int __init p1025_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1025RDB");
+ return of_machine_is_compatible("fsl,P1025RDB");
}
static int __init p1020_mbg_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020MBG-PC");
+ return of_machine_is_compatible("fsl,P1020MBG-PC");
}
static int __init p1020_utm_pc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC");
+ return of_machine_is_compatible("fsl,P1020UTM-PC");
}
static int __init p1024_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1024RDB");
+ return of_machine_is_compatible("fsl,P1024RDB");
}
define_machine(p2020_rdb) {
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index 1233050560ae..acc3d0d6049d 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -53,9 +53,7 @@ machine_arch_initcall(mvme2500, mpc85xx_common_publish_devices);
*/
static int __init mvme2500_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "artesyn,MVME2500");
+ return of_machine_is_compatible("artesyn,MVME2500");
}
define_machine(mvme2500) {
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index ad1a3d438a9e..661d7b59e413 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -62,11 +62,9 @@ machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier);
*/
static int __init p1010_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,P1010RDB"))
+ if (of_machine_is_compatible("fsl,P1010RDB"))
return 1;
- if (of_flat_dt_is_compatible(root, "fsl,P1010RDB-PB"))
+ if (of_machine_is_compatible("fsl,P1010RDB-PB"))
return 1;
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 371df822e88e..63568d68c76f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -555,9 +555,7 @@ machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
*/
static int __init p1022_ds_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,p1022ds");
+ return of_machine_is_compatible("fsl,p1022ds");
}
define_machine(p1022_ds) {
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 5087becaa8bc..2f2943600301 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -135,9 +135,7 @@ machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier);
*/
static int __init p1022_rdk_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,p1022rdk");
+ return of_machine_is_compatible("fsl,p1022rdk");
}
define_machine(p1022_rdk) {
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index d5b7509825de..40d8de57c341 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -100,9 +100,7 @@ static void __init mpc85xx_rdb_pic_init(void)
static int __init p1023_rdb_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,P1023RDB");
+ return of_machine_is_compatible("fsl,P1023RDB");
}
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 12019f17f297..2410167b290a 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -81,9 +81,7 @@ machine_device_initcall(ppa8548, declare_of_platform_devices);
*/
static int __init ppa8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "ppa8548");
+ return of_machine_is_compatible("ppa8548");
}
define_machine(ppa8548) {
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 8ad2fe6f200a..50d745809809 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -62,9 +62,7 @@ static void __init qemu_e500_setup_arch(void)
*/
static int __init qemu_e500_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
+ return !!of_machine_is_compatible("fsl,qemu-e500");
}
machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index b07214666d65..62b6c45a5a9b 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -120,9 +120,7 @@ machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
*/
static int __init sbc8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "SBC8548");
+ return of_machine_is_compatible("SBC8548");
}
define_machine(sbc8548) {
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index ae368e0e1076..cd255acde2e2 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -79,9 +79,7 @@ machine_arch_initcall(socrates, mpc85xx_common_publish_devices);
*/
static int __init socrates_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "abb,socrates"))
+ if (of_machine_is_compatible("abb,socrates"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 6f4939b6309e..91b824c4dc08 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -93,9 +93,7 @@ machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices);
*/
static int __init stx_gp3_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "stx,gp3-8560");
+ return of_machine_is_compatible("stx,gp3-8560");
}
define_machine(stx_gp3) {
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index ec0b7272fae2..b7c54454d611 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -122,7 +122,7 @@ static const char * const board[] __initconst = {
*/
static int __init tqm85xx_probe(void)
{
- return of_flat_dt_match(of_get_flat_dt_root(), board);
+ return of_device_compatible_match(of_root, board);
}
define_machine(tqm85xx) {
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 71bc255b4324..1bc02a87f597 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -128,9 +128,7 @@ machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices);
static int __init twr_p1025_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "fsl,TWR-P1025");
+ return of_machine_is_compatible("fsl,TWR-P1025");
}
define_machine(twr_p1025) {
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 1a9c1085855f..cf0c70ff026e 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -144,23 +144,17 @@ machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices);
*/
static int __init xes_mpc8572_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8572");
+ return of_machine_is_compatible("xes,MPC8572");
}
static int __init xes_mpc8548_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8548");
+ return of_machine_is_compatible("xes,MPC8548");
}
static int __init xes_mpc8540_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "xes,MPC8540");
+ return of_machine_is_compatible("xes,MPC8540");
}
define_machine(xes_mpc8572) {
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 3988f16e46c1..ce619bd1f82d 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -60,6 +60,11 @@ config GEF_SBC610
help
This option enables support for the GE SBC610.
+config MVME7100
+ bool "Artesyn MVME7100"
+ help
+ This option enables support for the Emerson/Artesyn MVME7100 board.
+
endif
config MPC8641
@@ -68,7 +73,8 @@ config MPC8641
select FSL_PCI if PCI
select PPC_UDBG_16550
select MPIC
- default y if MPC8641_HPCN || SBC8641D || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A
+ default y if MPC8641_HPCN || SBC8641D || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \
+ || MVME7100
config MPC8610
bool
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 2d889ad7dc89..01958fedc3f2 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o
obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o
obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o
+obj-$(CONFIG_MVME7100) += mvme7100.o
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 8e63b752712c..ef684afb63c6 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -189,9 +189,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_ppc9a_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,ppc9a"))
+ if (of_machine_is_compatible("gef,ppc9a"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 0e0be94f551f..67dd0c231646 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -176,9 +176,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_sbc310_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,sbc310"))
+ if (of_machine_is_compatible("gef,sbc310"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index e8292b492d7e..805026976cac 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -166,9 +166,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*/
static int __init gef_sbc610_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "gef,sbc610"))
+ if (of_machine_is_compatible("gef,sbc610"))
return 1;
return 0;
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 957473e5c8e5..fef0582eddf1 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -319,9 +319,7 @@ static void __init mpc86xx_hpcd_setup_arch(void)
*/
static int __init mpc86xx_hpcd_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,MPC8610HPCD"))
+ if (of_machine_is_compatible("fsl,MPC8610HPCD"))
return 1; /* Looks good */
return 0;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index e5084811b9c6..5ae42a037065 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -96,13 +96,11 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
*/
static int __init mpc86xx_hpcn_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "fsl,mpc8641hpcn"))
+ if (of_machine_is_compatible("fsl,mpc8641hpcn"))
return 1; /* Looks good */
/* Be nice and don't give silent boot death. Delete this in 2.6.27 */
- if (of_flat_dt_is_compatible(root, "mpc86xx")) {
+ if (of_machine_is_compatible("mpc86xx")) {
pr_warning("WARNING: your dts/dtb is old. You must update before the next kernel release\n");
return 1;
}
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
new file mode 100644
index 000000000000..addb41e7cd14
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -0,0 +1,121 @@
+/*
+ * Board setup routines for the Emerson/Artesyn MVME7100
+ *
+ * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
+ *
+ * Author: Alessio Igor Bogani <alessio.bogani@elettra.eu>
+ *
+ * Based on earlier code by:
+ *
+ * Ajit Prem <ajit.prem@emerson.com>
+ * Copyright 2008 Emerson
+ *
+ * USB host fixup is borrowed by:
+ *
+ * Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+#include "mpc86xx.h"
+
+#define MVME7100_INTERRUPT_REG_2_OFFSET 0x05
+#define MVME7100_DS1375_MASK 0x40
+#define MVME7100_MAX6649_MASK 0x20
+#define MVME7100_ABORT_MASK 0x10
+
+/*
+ * Setup the architecture
+ */
+static void __init mvme7100_setup_arch(void)
+{
+ struct device_node *bcsr_node;
+ void __iomem *mvme7100_regs = NULL;
+ u8 reg;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mvme7100_setup_arch()", 0);
+
+#ifdef CONFIG_SMP
+ mpc86xx_smp_init();
+#endif
+
+ fsl_pci_assign_primary();
+
+ /* Remap BCSR registers */
+ bcsr_node = of_find_compatible_node(NULL, NULL,
+ "artesyn,mvme7100-bcsr");
+ if (bcsr_node) {
+ mvme7100_regs = of_iomap(bcsr_node, 0);
+ of_node_put(bcsr_node);
+ }
+
+ if (mvme7100_regs) {
+ /* Disable ds1375, max6649, and abort interrupts */
+ reg = readb(mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
+ reg |= MVME7100_DS1375_MASK | MVME7100_MAX6649_MASK
+ | MVME7100_ABORT_MASK;
+ writeb(reg, mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
+ } else
+ pr_warn("Unable to map board registers\n");
+
+ pr_info("MVME7100 board from Artesyn\n");
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mvme7100_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "artesyn,MVME7100");
+}
+
+static void mvme7100_usb_host_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ if (!machine_is(mvme7100))
+ return;
+
+ /* Ensure only ports 1 & 2 are enabled */
+ pci_read_config_dword(pdev, 0xe0, &val);
+ pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x2);
+
+ /* System clock is 48-MHz Oscillator and EHCI Enabled. */
+ pci_write_config_dword(pdev, 0xe4, 1 << 5);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
+ mvme7100_usb_host_fixup);
+
+machine_arch_initcall(mvme7100, mpc86xx_common_publish_devices);
+
+define_machine(mvme7100) {
+ .name = "MVME7100",
+ .probe = mvme7100_probe,
+ .setup_arch = mvme7100_setup_arch,
+ .init_IRQ = mpc86xx_init_irq,
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .time_init = mpc86xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+};
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index 2a9cf278c12a..52af5735742e 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -67,9 +67,7 @@ sbc8641_show_cpuinfo(struct seq_file *m)
*/
static int __init sbc8641_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "wind,sbc8641"))
+ if (of_machine_is_compatible("wind,sbc8641"))
return 1; /* Looks good */
return 0;
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 61cae4c1edb8..333dece79394 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -88,8 +88,7 @@ static void __init adder875_setup(void)
static int __init adder875_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875");
+ return of_machine_is_compatible("analogue-and-micro,adder875");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index 2bedeb7d5f8f..cd0d90f1fb1c 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -143,8 +143,7 @@ static void __init ep88xc_setup_arch(void)
static int __init ep88xc_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,ep88xc");
+ return of_machine_is_compatible("fsl,ep88xc");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index c289fc77b4ba..b1ab6e96cb31 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -198,7 +198,7 @@ void mpc8xx_get_rtc_time(struct rtc_time *tm)
return;
}
-void mpc8xx_restart(char *cmd)
+void __noreturn mpc8xx_restart(char *cmd)
{
car8xx_t __iomem *clk_r = immr_map(im_clkrst);
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 78180c5e73ff..8d02f5ff4481 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -118,8 +118,7 @@ static void __init mpc86xads_setup_arch(void)
static int __init mpc86xads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc866ads");
+ return of_machine_is_compatible("fsl,mpc866ads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 4d62bf9dc789..e821a42d5816 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -193,8 +193,7 @@ static void __init mpc885ads_setup_arch(void)
static int __init mpc885ads_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "fsl,mpc885ads");
+ return of_machine_is_compatible("fsl,mpc885ads");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h
index 239a243a6161..31cc2ecace42 100644
--- a/arch/powerpc/platforms/8xx/mpc8xx.h
+++ b/arch/powerpc/platforms/8xx/mpc8xx.h
@@ -11,7 +11,7 @@
#ifndef __MPC8xx_H
#define __MPC8xx_H
-extern void mpc8xx_restart(char *cmd);
+extern void __noreturn mpc8xx_restart(char *cmd);
extern void mpc8xx_calibrate_decr(void);
extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index bee47a2b23e6..4cea8b1afa44 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -119,9 +119,7 @@ static void __init tqm8xx_setup_arch(void)
static int __init tqm8xx_probe(void)
{
- unsigned long node = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(node, "tqc,tqm8xx");
+ return of_machine_is_compatible("tqc,tqm8xx");
}
static const struct of_device_id of_bus_ids[] __initconst = {
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 3663f71fd913..fbdae8377b71 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -321,6 +321,17 @@ config OF_RTC
Uses information from the OF or flattened device tree to instantiate
platform devices for direct mapped RTC chips like the DS1742 or DS1743.
+config GEN_RTC
+ bool "Use the platform RTC operations from user space"
+ select RTC_CLASS
+ select RTC_DRV_GENERIC
+ help
+ This option provides backwards compatibility with the old gen_rtc.ko
+ module that was traditionally used for old PowerPC machines.
+ Platforms should migrate to enabling the RTC_DRV_GENERIC by hand
+ replacing their get_rtc_time/set_rtc_time callbacks with
+ a proper RTC device driver.
+
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 77e9b8d591fb..f32edec13fd1 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,6 @@
config PPC64
bool "64-bit kernel"
default n
- select HAVE_VIRT_CPU_ACCOUNTING
select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 2fe12046279e..45cb9821173c 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -123,7 +123,7 @@ static int __init request_isa_regions(void)
}
machine_device_initcall(amigaone, request_isa_regions);
-void amigaone_restart(char *cmd)
+void __noreturn amigaone_restart(char *cmd)
{
local_irq_disable();
@@ -143,9 +143,7 @@ void amigaone_restart(char *cmd)
static int __init amigaone_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "eyetech,amigaone")) {
+ if (of_machine_is_compatible("eyetech,amigaone")) {
/*
* Coherent memory access cause complete system lockup! Thus
* disable this CPU feature, even if the CPU needs it.
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 14a582b21274..f7d1a4953ea0 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -166,7 +166,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
unsigned long *io_pte, base_pte;
@@ -178,7 +178,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
* default for now.*/
#ifdef CELL_IOMMU_STRICT_PROTECTION
/* to avoid referencing a global, we use a trick here to setup the
- * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+ * protection bit. "prot" is setup to be 3 fields of 4 bits appended
* together for each of the 3 supported direction values. It is then
* shifted left so that the fields matching the desired direction
* lands on the appropriate bits, and other bits are masked out.
@@ -193,7 +193,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
- if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
+ if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
base_pte &= ~CBE_IOPTE_SO_RW;
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
@@ -338,7 +338,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
start_seg = base >> IO_SEGMENT_SHIFT;
segments = size >> IO_SEGMENT_SHIFT;
pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
- /* PTEs for each segment must start on a 4K bounday */
+ /* PTEs for each segment must start on a 4K boundary */
pages_per_segment = max(pages_per_segment,
(1 << 12) / sizeof(unsigned long));
@@ -526,7 +526,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
__set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1,
- (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
+ (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
return window;
}
@@ -572,7 +572,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (iommu_fixed_is_weak)
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
@@ -586,7 +586,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
static void dma_fixed_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (iommu_fixed_is_weak)
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
@@ -598,9 +598,9 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
return dma_direct_ops.map_page(dev, page, offset, size,
direction, attrs);
else
@@ -611,9 +611,9 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
@@ -623,9 +623,9 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
else
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
@@ -635,9 +635,9 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
@@ -1162,7 +1162,7 @@ static int __init setup_iommu_fixed(char *str)
pciep = of_find_node_by_type(NULL, "pcie-endpoint");
if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
- iommu_fixed_is_weak = 1;
+ iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
of_node_put(pciep);
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index d17e98bc0c10..e7d075077cb0 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -35,6 +35,7 @@
#include <asm/pgtable.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
+#include <asm/cpu_has_feature.h>
#include "pervasive.h"
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 36cff28d0293..d3543e68efe8 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -255,13 +255,10 @@ static void __init cell_setup_arch(void)
static int __init cell_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "IBM,CBEA") &&
- !of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (!of_machine_is_compatible("IBM,CBEA") &&
+ !of_machine_is_compatible("IBM,CPBW-1.0"))
return 0;
- hpte_init_native();
pm_power_off = rtas_power_off;
return 1;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 54ee5743cb72..d06dcac66fcb 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -217,7 +217,7 @@ static void spider_irq_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-/* For hooking up the cascace we have a problem. Our device-tree is
+/* For hooking up the cascade we have a problem. Our device-tree is
* crap and we don't know on which BE iic interrupt we are hooked on at
* least not the "standard" way. We can reconstitute it based on two
* informations though: which BE node we are connected to and whether
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 3cbe38fad609..bb4a8e07c229 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -69,7 +69,7 @@ static DEFINE_SPINLOCK(spu_lock);
* spu_full_list_lock and spu_full_list_mutex held, while iterating
* through it requires either of these locks.
*
- * In addition spu_full_list_lock protects all assignmens to
+ * In addition spu_full_list_lock protects all assignments to
* spu->mm.
*/
static LIST_HEAD(spu_full_list);
@@ -253,7 +253,7 @@ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
* Setup the SPU kernel SLBs, in preparation for a context save/restore. We
* need to map both the context save area, and the save/restore code.
*
- * Because the lscsa and code may cross segment boundaires, we check to see
+ * Because the lscsa and code may cross segment boundaries, we check to see
* if mappings are required for the start and end of each range. We currently
* assume that the mappings are smaller that one segment - if not, something
* is seriously wrong.
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index c3327f3d8cf7..21b4bfb97200 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -535,8 +535,7 @@ static int __init init_affinity(void)
if (of_has_vicinity()) {
init_affinity_fw();
} else {
- long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ if (of_machine_is_compatible("IBM,CPBW-1.0"))
init_affinity_qs20_harcoded();
else
printk("No affinity configuration found\n");
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 2936a0044c04..06254467e4dd 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -866,7 +866,7 @@ void spufs_wbox_callback(struct spu *spu)
* - end of the mapped area
*
* If the file is opened without O_NONBLOCK, we wait here until
- * space is availabyl, but return when we have been able to
+ * space is available, but return when we have been able to
* write something.
*/
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 9f79004e6d6f..cfacbee24d7b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -435,7 +435,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
/* Note: we don't need to force_sig SIGTRAP on single-step
* since we have TIF_SINGLESTEP set, thus the kernel will do
- * it upon return from the syscall anyawy
+ * it upon return from the syscall anyway.
*/
if (unlikely(status & SPU_STATUS_SINGLE_STEP))
ret = -ERESTARTSYS;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 998f632e7cce..460f5f31d5cb 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -622,7 +622,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
/**
* find_victim - find a lower priority context to preempt
- * @ctx: canidate context for running
+ * @ctx: candidate context for running
*
* Returns the freed physical spu to run the new context on.
*/
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 987d1b8d68e3..bfb300633dfe 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -239,7 +239,7 @@ out:
of_node_put(np);
}
-static void briq_restart(char *cmd)
+static void __noreturn briq_restart(char *cmd)
{
local_irq_disable();
if (briq_SPOR)
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* the the built-in serial node. Instead, a /failsafe node is created.
*/
-static __init void chrp_init_early(void)
+static __init void chrp_init(void)
{
struct device_node *node;
const char *property;
@@ -587,6 +587,8 @@ static int __init chrp_probe(void)
pm_power_off = rtas_power_off;
+ chrp_init();
+
return 1;
}
@@ -595,7 +597,6 @@ define_machine(chrp) {
.probe = chrp_probe,
.setup_arch = chrp_setup_arch,
.init = chrp_init2,
- .init_early = chrp_init_early,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
.restart = rtas_restart,
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index ebd3963fdf91..d19e4e759597 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -99,7 +99,7 @@ static void c2k_reset_board(void)
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004);
}
-static void c2k_restart(char *cmd)
+static void __noreturn c2k_restart(char *cmd)
{
c2k_reset_board();
msleep(100);
@@ -123,15 +123,16 @@ void c2k_show_cpuinfo(struct seq_file *m)
*/
static int __init c2k_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "GEFanuc,C2K"))
+ if (!of_machine_is_compatible("GEFanuc,C2K"))
return 0;
printk(KERN_INFO "Detected a GEFanuc C2K board\n");
_set_L2CR(0);
_set_L2CR(L2CR_L2E | L2CR_L2PE | L2CR_L2I);
+
+ mv64x60_init_early();
+
return 1;
}
@@ -139,7 +140,6 @@ define_machine(c2k) {
.name = "C2K",
.probe = c2k_probe,
.setup_arch = c2k_setup_arch,
- .init_early = mv64x60_init_early,
.show_cpuinfo = c2k_show_cpuinfo,
.init_IRQ = mv64x60_init_irq,
.get_irq = mv64x60_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index fe0ed6ee285e..36789cec957c 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -29,14 +29,14 @@
#include "usbgecko_udbg.h"
-static void gamecube_spin(void)
+static void __noreturn gamecube_spin(void)
{
/* spin until power button pressed */
for (;;)
cpu_relax();
}
-static void gamecube_restart(char *cmd)
+static void __noreturn gamecube_restart(char *cmd)
{
local_irq_disable();
flipper_platform_reset();
@@ -49,26 +49,20 @@ static void gamecube_power_off(void)
gamecube_spin();
}
-static void gamecube_halt(void)
+static void __noreturn gamecube_halt(void)
{
gamecube_restart(NULL);
}
-static void __init gamecube_init_early(void)
-{
- ug_udbg_init();
-}
-
static int __init gamecube_probe(void)
{
- unsigned long dt_root;
-
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
+ if (!of_machine_is_compatible("nintendo,gamecube"))
return 0;
pm_power_off = gamecube_power_off;
+ ug_udbg_init();
+
return 1;
}
@@ -80,7 +74,6 @@ static void gamecube_shutdown(void)
define_machine(gamecube) {
.name = "gamecube",
.probe = gamecube_probe,
- .init_early = gamecube_init_early,
.restart = gamecube_restart,
.halt = gamecube_halt,
.init_IRQ = flipper_pic_probe,
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 8c305c7c8977..dafba1057a47 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -193,7 +193,7 @@ void holly_show_cpuinfo(struct seq_file *m)
seq_printf(m, "machine\t\t: PPC750 GX/CL\n");
}
-void holly_restart(char *cmd)
+void __noreturn holly_restart(char *cmd)
{
__be32 __iomem *ocn_bar1 = NULL;
unsigned long bar;
@@ -250,9 +250,7 @@ void holly_halt(void)
*/
static int __init holly_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,holly"))
+ if (!of_machine_is_compatible("ibm,holly"))
return 0;
return 1;
}
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 540eeb58d3f0..f29cf29b11f8 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -100,7 +100,7 @@ static void __init linkstation_init_IRQ(void)
extern void avr_uart_configure(void);
extern void avr_uart_send(const char);
-static void linkstation_restart(char *cmd)
+static void __noreturn linkstation_restart(char *cmd)
{
local_irq_disable();
@@ -113,7 +113,7 @@ static void linkstation_restart(char *cmd)
avr_uart_send('G'); /* "kick" */
}
-static void linkstation_power_off(void)
+static void __noreturn linkstation_power_off(void)
{
local_irq_disable();
@@ -127,7 +127,7 @@ static void linkstation_power_off(void)
/* NOTREACHED */
}
-static void linkstation_halt(void)
+static void __noreturn linkstation_halt(void)
{
linkstation_power_off();
/* NOTREACHED */
@@ -141,11 +141,7 @@ static void linkstation_show_cpuinfo(struct seq_file *m)
static int __init linkstation_probe(void)
{
- unsigned long root;
-
- root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "linkstation"))
+ if (!of_machine_is_compatible("linkstation"))
return 0;
pm_power_off = linkstation_power_off;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index df4ad95f183e..80804f9916ee 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -146,7 +146,7 @@ void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
seq_printf(m, "vendor\t\t: Freescale Semiconductor\n");
}
-void mpc7448_hpc2_restart(char *cmd)
+static void __noreturn mpc7448_hpc2_restart(char *cmd)
{
local_irq_disable();
@@ -161,9 +161,7 @@ void mpc7448_hpc2_restart(char *cmd)
*/
static int __init mpc7448_hpc2_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "mpc74xx"))
+ if (!of_machine_is_compatible("mpc74xx"))
return 0;
return 1;
}
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 8f65aa3747f5..ed7321d6772e 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -177,7 +177,7 @@ static void mvme5100_show_cpuinfo(struct seq_file *m)
seq_puts(m, "Machine\t\t: MVME5100\n");
}
-static void mvme5100_restart(char *cmd)
+static void __noreturn mvme5100_restart(char *cmd)
{
local_irq_disable();
@@ -194,9 +194,7 @@ static void mvme5100_restart(char *cmd)
*/
static int __init mvme5100_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "MVME5100");
+ return of_machine_is_compatible("MVME5100");
}
static int __init probe_of_platform_devices(void)
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index d572833ebd00..471a50bcd074 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -96,7 +96,7 @@ static void __init storcenter_init_IRQ(void)
mpic_init(mpic);
}
-static void storcenter_restart(char *cmd)
+static void __noreturn storcenter_restart(char *cmd)
{
local_irq_disable();
@@ -109,9 +109,7 @@ static void storcenter_restart(char *cmd)
static int __init storcenter_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "iomega,storcenter");
+ return of_machine_is_compatible("iomega,storcenter");
}
define_machine(storcenter){
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 352592d3e44e..3fd683e40bc9 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -112,7 +112,7 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
return delta + bl;
}
-static void wii_spin(void)
+static void __noreturn wii_spin(void)
{
local_irq_disable();
for (;;)
@@ -160,7 +160,7 @@ static void __init wii_setup_arch(void)
}
}
-static void wii_restart(char *cmd)
+static void __noreturn wii_restart(char *cmd)
{
local_irq_disable();
@@ -185,18 +185,13 @@ static void wii_power_off(void)
wii_spin();
}
-static void wii_halt(void)
+static void __noreturn wii_halt(void)
{
if (ppc_md.restart)
ppc_md.restart(NULL);
wii_spin();
}
-static void __init wii_init_early(void)
-{
- ug_udbg_init();
-}
-
static void __init wii_pic_probe(void)
{
flipper_pic_probe();
@@ -205,14 +200,13 @@ static void __init wii_pic_probe(void)
static int __init wii_probe(void)
{
- unsigned long dt_root;
-
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii"))
+ if (!of_machine_is_compatible("nintendo,wii"))
return 0;
pm_power_off = wii_power_off;
+ ug_udbg_init();
+
return 1;
}
@@ -225,7 +219,6 @@ static void wii_shutdown(void)
define_machine(wii) {
.name = "wii",
.probe = wii_probe,
- .init_early = wii_init_early,
.setup_arch = wii_setup_arch,
.restart = wii_restart,
.halt = wii_halt,
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index a923230e575b..a2f89e6326ce 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -568,6 +568,26 @@ void maple_pci_irq_fixup(struct pci_dev *dev)
DBG(" <- maple_pci_irq_fixup\n");
}
+static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *hose = pci_bus_to_host(bridge->bus);
+ struct device_node *np, *child;
+
+ if (hose != u3_agp)
+ return 0;
+
+ /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+ * assume there is no P2P bridge on the AGP bus, which should be a
+ * safe assumptions hopefully.
+ */
+ np = hose->dn;
+ PCI_DN(np)->busno = 0xf0;
+ for_each_child_of_node(np, child)
+ PCI_DN(child)->busno = 0xf0;
+
+ return 0;
+}
+
void __init maple_pci_init(void)
{
struct device_node *np, *root;
@@ -605,19 +625,7 @@ void __init maple_pci_init(void)
if (ht && maple_add_bridge(ht) != 0)
of_node_put(ht);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
- /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
- * assume there is no P2P bridge on the AGP bus, which should be a
- * safe assumptions hopefully.
- */
- if (u3_agp) {
- struct device_node *np = u3_agp->dn;
- PCI_DN(np)->busno = 0xf0;
- for (np = np->child; np; np = np->sibling)
- PCI_DN(np)->busno = 0xf0;
- }
+ ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare;
/* Tell pci.c to not change any resource allocations. */
pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index a837188544c8..3c30c7a4534d 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -94,7 +94,7 @@ static unsigned long maple_find_nvram_base(void)
return result;
}
-static void maple_restart(char *cmd)
+static void __noreturn maple_restart(char *cmd)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
@@ -119,9 +119,10 @@ static void maple_restart(char *cmd)
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Restart Required\n");
+ for (;;) ;
}
-static void maple_power_off(void)
+static void __noreturn maple_power_off(void)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
@@ -146,9 +147,10 @@ static void maple_power_off(void)
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
+ for (;;) ;
}
-static void maple_halt(void)
+static void __noreturn maple_halt(void)
{
maple_power_off();
}
@@ -196,18 +198,6 @@ void __init maple_setup_arch(void)
mmio_nvram_init();
}
-/*
- * Early initialization.
- */
-static void __init maple_init_early(void)
-{
- DBG(" -> maple_init_early\n");
-
- iommu_init_early_dart(&maple_pci_controller_ops);
-
- DBG(" <- maple_init_early\n");
-}
-
/*
* This is almost identical to pSeries and CHRP. We need to make that
* code generic at one point, with appropriate bits in the device-tree to
@@ -298,22 +288,14 @@ static void __init maple_progress(char *s, unsigned short hex)
*/
static int __init maple_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Momentum,Maple") &&
- !of_flat_dt_is_compatible(root, "Momentum,Apache"))
+ if (!of_machine_is_compatible("Momentum,Maple") &&
+ !of_machine_is_compatible("Momentum,Apache"))
return 0;
- /*
- * On U3, the DART (iommu) must be allocated now since it
- * has an impact on htab_initialize (due to the large page it
- * occupies having to be broken up so the DART itself is not
- * part of the cacheable linar mapping
- */
- alloc_dart_table();
- hpte_init_native();
pm_power_off = maple_power_off;
+ iommu_init_early_dart(&maple_pci_controller_ops);
+
return 1;
}
@@ -321,7 +303,6 @@ define_machine(maple) {
.name = "Maple",
.probe = maple_probe,
.setup_arch = maple_setup_arch,
- .init_early = maple_init_early,
.init_IRQ = maple_init_IRQ,
.pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index c929644e74a6..309d9ccccd50 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -88,7 +88,7 @@ static int iommu_table_iobmap_inited;
static int iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u32 *ip;
u32 rpn;
@@ -202,6 +202,11 @@ int __init iob_init(struct device_node *dn)
pr_debug(" -> %s\n", __func__);
+ /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
+ iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
+
+ printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
+
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
@@ -260,13 +265,3 @@ void __init iommu_init_early_pasemi(void)
set_pci_dma_ops(&dma_iommu_ops);
}
-void __init alloc_iobmap_l2(void)
-{
-#ifndef CONFIG_PPC_PASEMI_IOMMU
- return;
-#endif
- /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
- iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
-
- printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
-}
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index 11f230a48227..74cbcb357612 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -8,7 +8,6 @@ extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
-extern void __init alloc_iobmap_l2(void);
extern void __init pasemi_map_registers(void);
/* Power savings modes, implemented in asm */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index f3a68a0fef23..10c4e8fc6ea9 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -229,9 +229,6 @@ void __init pas_pci_init(void)
of_node_get(np);
of_node_put(root);
-
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
}
void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index d71b2c7e8403..e86c1bd08f1f 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -62,7 +62,7 @@ static int num_mce_regs;
static int nmi_virq = NO_IRQ;
-static void pas_restart(char *cmd)
+static void __noreturn pas_restart(char *cmd)
{
/* Need to put others cpu in hold loop so they're not sleeping */
smp_send_stop();
@@ -339,11 +339,6 @@ out:
return !!(srr1 & 0x2);
}
-static void __init pas_init_early(void)
-{
- iommu_init_early_pasemi();
-}
-
#ifdef CONFIG_PCMCIA
static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
void *data)
@@ -420,15 +415,11 @@ machine_device_initcall(pasemi, pasemi_publish_devices);
*/
static int __init pas_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "PA6T-1682M") &&
- !of_flat_dt_is_compatible(root, "pasemi,pwrficient"))
+ if (!of_machine_is_compatible("PA6T-1682M") &&
+ !of_machine_is_compatible("pasemi,pwrficient"))
return 0;
- hpte_init_native();
-
- alloc_iobmap_l2();
+ iommu_init_early_pasemi();
return 1;
}
@@ -437,7 +428,6 @@ define_machine(pasemi) {
.name = "PA Semi PWRficient",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
- .init_early = pas_init_early,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
.restart = pas_restart,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 7553b6a77c64..6d6f277477aa 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -15,7 +15,7 @@
* This file thus provides a simple low level unified i2c interface for
* powermac that covers the various types of i2c busses used in Apple machines.
* For now, keywest, PMU and SMU, though we could add Cuda, or other bit
- * banging busses found on older chipstes in earlier machines if we ever need
+ * banging busses found on older chipsets in earlier machines if we ever need
* one of them.
*
* The drivers in this file are synchronous/blocking. In addition, the
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 59ab16fa600f..6e06c3be2e9a 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -878,6 +878,29 @@ void pmac_pci_irq_fixup(struct pci_dev *dev)
#endif /* CONFIG_PPC32 */
}
+#ifdef CONFIG_PPC64
+static int pmac_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *hose = pci_bus_to_host(bridge->bus);
+ struct device_node *np, *child;
+
+ if (hose != u3_agp)
+ return 0;
+
+ /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
+ * assume there is no P2P bridge on the AGP bus, which should be a
+ * safe assumptions for now. We should do something better in the
+ * future though
+ */
+ np = hose->dn;
+ PCI_DN(np)->busno = 0xf0;
+ for_each_child_of_node(np, child)
+ PCI_DN(child)->busno = 0xf0;
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
@@ -914,20 +937,7 @@ void __init pmac_pci_init(void)
if (ht && pmac_add_bridge(ht) != 0)
of_node_put(ht);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
- /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
- * assume there is no P2P bridge on the AGP bus, which should be a
- * safe assumptions for now. We should do something better in the
- * future though
- */
- if (u3_agp) {
- struct device_node *np = u3_agp->dn;
- PCI_DN(np)->busno = 0xf0;
- for (np = np->child; np; np = np->sibling)
- PCI_DN(np)->busno = 0xf0;
- }
+ ppc_md.pcibios_root_bridge_prepare = pmac_pci_root_bridge_prepare;
/* pmac_check_ht_link(); */
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 8dd78f4e1af4..6b4e9d181126 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -52,7 +52,6 @@
#include <linux/suspend.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
#include <asm/reg.h>
#include <asm/sections.h>
@@ -97,11 +96,6 @@ int sccdbg;
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
-#ifdef CONFIG_PMAC_SMU
-unsigned long smu_cmdbuf_abs;
-EXPORT_SYMBOL(smu_cmdbuf_abs);
-#endif
-
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
@@ -325,7 +319,6 @@ static void __init pmac_setup_arch(void)
defined(CONFIG_PPC64)
pmac_nvram_init();
#endif
-
#ifdef CONFIG_PPC32
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
@@ -360,12 +353,12 @@ static int pmac_late_init(void)
machine_late_initcall(powermac, pmac_late_init);
/*
- * This is __init_refok because we check for "initializing" before
+ * This is __ref because we check for "initializing" before
* touching any of the __init sensitive things and "initializing"
* will be false after __init time. This can't be __init because it
* can be called whenever a disk is first accessed.
*/
-void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
+void __ref note_bootable_part(dev_t dev, int part, int goodness)
{
char *p;
@@ -383,7 +376,7 @@ void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
}
#ifdef CONFIG_ADB_CUDA
-static void cuda_restart(void)
+static void __noreturn cuda_restart(void)
{
struct adb_request req;
@@ -392,7 +385,7 @@ static void cuda_restart(void)
cuda_poll();
}
-static void cuda_shutdown(void)
+static void __noreturn cuda_shutdown(void)
{
struct adb_request req;
@@ -416,7 +409,7 @@ static void cuda_shutdown(void)
#define smu_shutdown()
#endif
-static void pmac_restart(char *cmd)
+static void __noreturn pmac_restart(char *cmd)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
@@ -430,9 +423,10 @@ static void pmac_restart(char *cmd)
break;
default: ;
}
+ while (1) ;
}
-static void pmac_power_off(void)
+static void __noreturn pmac_power_off(void)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
@@ -446,9 +440,10 @@ static void pmac_power_off(void)
break;
default: ;
}
+ while (1) ;
}
-static void
+static void __noreturn
pmac_halt(void)
{
pmac_power_off();
@@ -457,7 +452,7 @@ pmac_halt(void)
/*
* Early initialization.
*/
-static void __init pmac_init_early(void)
+static void __init pmac_init(void)
{
/* Enable early btext debug if requested */
if (strstr(boot_command_line, "btextdbg")) {
@@ -489,9 +484,6 @@ static int __init pmac_declare_of_platform_devices(void)
{
struct device_node *np;
- if (machine_is(chrp))
- return -1;
-
np = of_find_node_by_name(NULL, "valkyrie");
if (np) {
of_platform_device_create(np, "valkyrie", NULL);
@@ -598,24 +590,10 @@ console_initcall(check_pmac_serial_console);
*/
static int __init pmac_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Power Macintosh") &&
- !of_flat_dt_is_compatible(root, "MacRISC"))
+ if (!of_machine_is_compatible("Power Macintosh") &&
+ !of_machine_is_compatible("MacRISC"))
return 0;
-#ifdef CONFIG_PPC64
- /*
- * On U3, the DART (iommu) must be allocated now since it
- * has an impact on htab_initialize (due to the large page it
- * occupies having to be broken up so the DART itself is not
- * part of the cacheable linar mapping
- */
- alloc_dart_table();
-
- hpte_init_native();
-#endif
-
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
ISA_DMA_THRESHOLD = ~0L;
@@ -623,17 +601,10 @@ static int __init pmac_probe(void)
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PMAC_SMU
- /*
- * SMU based G5s need some memory below 2Gb, at least the current
- * driver needs that. We have to allocate it now. We allocate 4k
- * (1 small page) for now.
- */
- smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
-#endif /* CONFIG_PMAC_SMU */
-
pm_power_off = pmac_power_off;
+ pmac_init();
+
return 1;
}
@@ -641,7 +612,6 @@ define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
- .init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 28a147ca32ba..834868b9fdc9 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -857,9 +857,8 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
{
int rc;
- switch(action) {
+ switch(action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
/* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host) {
rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index cd9711e72df6..b5d98cb3f482 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -6,6 +6,7 @@ obj-y += opal-kmsg.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o
+obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 9226df11bf39..86544ea85dc3 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -36,6 +36,7 @@
#include <asm/msi_bitmap.h>
#include <asm/opal.h>
#include <asm/ppc-pci.h>
+#include <asm/pnv-pci.h>
#include "powernv.h"
#include "pci.h"
@@ -717,12 +718,12 @@ static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
return ret;
}
-static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
+static s64 pnv_eeh_poll(unsigned long id)
{
s64 rc = OPAL_HARDWARE;
while (1) {
- rc = opal_pci_poll(phb->opal_id);
+ rc = opal_pci_poll(id);
if (rc <= 0)
break;
@@ -762,7 +763,7 @@ int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
* reset followed by hot reset on root bus. So we also
* need the PCI bus settlement delay.
*/
- rc = pnv_eeh_phb_poll(phb);
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE) {
if (system_state < SYSTEM_RUNNING)
udelay(1000 * EEH_PE_RST_SETTLE_TIME);
@@ -805,7 +806,7 @@ static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
goto out;
/* Poll state of the PHB until the request is done */
- rc = pnv_eeh_phb_poll(phb);
+ rc = pnv_eeh_poll(phb->opal_id);
if (option == EEH_RESET_DEACTIVATE)
msleep(EEH_PE_RST_SETTLE_TIME);
out:
@@ -815,7 +816,7 @@ out:
return 0;
}
-static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
+static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
{
struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
@@ -866,6 +867,44 @@ static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
return 0;
}
+static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct device_node *dn = pci_device_to_OF_node(pdev);
+ uint64_t id = PCI_SLOT_ID(phb->opal_id,
+ (pdev->bus->number << 8) | pdev->devfn);
+ uint8_t scope;
+ int64_t rc;
+
+ /* Hot reset to the bus if firmware cannot handle */
+ if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
+ return __pnv_eeh_bridge_reset(pdev, option);
+
+ switch (option) {
+ case EEH_RESET_FUNDAMENTAL:
+ scope = OPAL_RESET_PCI_FUNDAMENTAL;
+ break;
+ case EEH_RESET_HOT:
+ scope = OPAL_RESET_PCI_HOT;
+ break;
+ case EEH_RESET_DEACTIVATE:
+ return 0;
+ default:
+ dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n",
+ __func__, option);
+ return -EINVAL;
+ }
+
+ rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET);
+ if (rc <= OPAL_SUCCESS)
+ goto out;
+
+ rc = pnv_eeh_poll(id);
+out:
+ return (rc == OPAL_SUCCESS) ? 0 : -EIO;
+}
+
void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index fcc8b6861b63..479c25601612 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -27,9 +27,12 @@
#include "powernv.h"
#include "subcore.h"
+/* Power ISA 3.0 allows for stop states 0x0 - 0xF */
+#define MAX_STOP_STATE 0xF
+
static u32 supported_cpuidle_states;
-int pnv_save_sprs_for_winkle(void)
+static int pnv_save_sprs_for_deep_states(void)
{
int cpu;
int rc;
@@ -50,15 +53,19 @@ int pnv_save_sprs_for_winkle(void)
uint64_t pir = get_hard_smp_processor_id(cpu);
uint64_t hsprg0_val = (uint64_t)&paca[cpu];
- /*
- * HSPRG0 is used to store the cpu's pointer to paca. Hence last
- * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
- * with 63rd bit set, so that when a thread wakes up at 0x100 we
- * can use this bit to distinguish between fastsleep and
- * deep winkle.
- */
- hsprg0_val |= 1;
-
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * HSPRG0 is used to store the cpu's pointer to paca.
+ * Hence last 3 bits are guaranteed to be 0. Program
+ * slw to restore HSPRG0 with 63rd bit set, so that
+ * when a thread wakes up at 0x100 we can use this bit
+ * to distinguish between fastsleep and deep winkle.
+ * This is not necessary with stop/psscr since PLS
+ * field of psscr indicates which state we are waking
+ * up from.
+ */
+ hsprg0_val |= 1;
+ }
rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
if (rc != 0)
return rc;
@@ -130,8 +137,8 @@ static void pnv_alloc_idle_core_states(void)
update_subcore_sibling_mask();
- if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
- pnv_save_sprs_for_winkle();
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
+ pnv_save_sprs_for_deep_states();
}
u32 pnv_get_supported_cpuidle_states(void)
@@ -230,43 +237,162 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
show_fastsleep_workaround_applyonce,
store_fastsleep_workaround_applyonce);
-static int __init pnv_init_idle_states(void)
+
+/*
+ * Used for ppc_md.power_save which needs a function with no parameters
+ */
+static void power9_idle(void)
{
- struct device_node *power_mgt;
- int dt_idle_states;
- u32 *flags;
- int i;
+ /* Requesting stop state 0 */
+ power9_idle_stop(0);
+}
+/*
+ * First deep stop state. Used to figure out when to save/restore
+ * hypervisor context.
+ */
+u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
- supported_cpuidle_states = 0;
+/*
+ * Deepest stop idle state. Used when a cpu is offlined
+ */
+u64 pnv_deepest_stop_state;
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- goto out;
+/*
+ * Power ISA 3.0 idle initialization.
+ *
+ * POWER ISA 3.0 defines a new SPR Processor stop Status and Control
+ * Register (PSSCR) to control idle behavior.
+ *
+ * PSSCR layout:
+ * ----------------------------------------------------------
+ * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL |
+ * ----------------------------------------------------------
+ * 0 4 41 42 43 44 48 54 56 60
+ *
+ * PSSCR key fields:
+ * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the
+ * lowest power-saving state the thread entered since stop instruction was
+ * last executed.
+ *
+ * Bit 41 - Status Disable(SD)
+ * 0 - Shows PLS entries
+ * 1 - PLS entries are all 0
+ *
+ * Bit 42 - Enable State Loss
+ * 0 - No state is lost irrespective of other fields
+ * 1 - Allows state loss
+ *
+ * Bit 43 - Exit Criterion
+ * 0 - Exit from power-save mode on any interrupt
+ * 1 - Exit from power-save mode controlled by LPCR's PECE bits
+ *
+ * Bits 44:47 - Power-Saving Level Limit
+ * This limits the power-saving level that can be entered into.
+ *
+ * Bits 60:63 - Requested Level
+ * Used to specify which power-saving level must be entered on executing
+ * stop instruction
+ *
+ * @np: /ibm,opal/power-mgt device node
+ * @flags: cpu-idle-state-flags array
+ * @dt_idle_states: Number of idle state entries
+ * Returns 0 on success
+ */
+static int __init pnv_arch300_idle_init(struct device_node *np, u32 *flags,
+ int dt_idle_states)
+{
+ u64 *psscr_val = NULL;
+ int rc = 0, i;
- if (!firmware_has_feature(FW_FEATURE_OPAL))
+ psscr_val = kcalloc(dt_idle_states, sizeof(*psscr_val),
+ GFP_KERNEL);
+ if (!psscr_val) {
+ rc = -1;
+ goto out;
+ }
+ if (of_property_read_u64_array(np,
+ "ibm,cpu-idle-state-psscr",
+ psscr_val, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
+ rc = -1;
goto out;
+ }
- power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
- if (!power_mgt) {
+ /*
+ * Set pnv_first_deep_stop_state and pnv_deepest_stop_state.
+ * pnv_first_deep_stop_state should be set to the first stop
+ * level to cause hypervisor state loss.
+ * pnv_deepest_stop_state should be set to the deepest stop
+ * stop state.
+ */
+ pnv_first_deep_stop_state = MAX_STOP_STATE;
+ for (i = 0; i < dt_idle_states; i++) {
+ u64 psscr_rl = psscr_val[i] & PSSCR_RL_MASK;
+
+ if ((flags[i] & OPAL_PM_LOSE_FULL_CONTEXT) &&
+ (pnv_first_deep_stop_state > psscr_rl))
+ pnv_first_deep_stop_state = psscr_rl;
+
+ if (pnv_deepest_stop_state < psscr_rl)
+ pnv_deepest_stop_state = psscr_rl;
+ }
+
+out:
+ kfree(psscr_val);
+ return rc;
+}
+
+/*
+ * Probe device tree for supported idle states
+ */
+static void __init pnv_probe_idle_states(void)
+{
+ struct device_node *np;
+ int dt_idle_states;
+ u32 *flags = NULL;
+ int i;
+
+ np = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!np) {
pr_warn("opal: PowerMgmt Node not found\n");
goto out;
}
- dt_idle_states = of_property_count_u32_elems(power_mgt,
+ dt_idle_states = of_property_count_u32_elems(np,
"ibm,cpu-idle-state-flags");
if (dt_idle_states < 0) {
pr_warn("cpuidle-powernv: no idle states found in the DT\n");
goto out;
}
- flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
- if (of_property_read_u32_array(power_mgt,
+ flags = kcalloc(dt_idle_states, sizeof(*flags), GFP_KERNEL);
+
+ if (of_property_read_u32_array(np,
"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
- goto out_free;
+ goto out;
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (pnv_arch300_idle_init(np, flags, dt_idle_states))
+ goto out;
}
for (i = 0; i < dt_idle_states; i++)
supported_cpuidle_states |= flags[i];
+out:
+ kfree(flags);
+}
+static int __init pnv_init_idle_states(void)
+{
+
+ supported_cpuidle_states = 0;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ goto out;
+
+ pnv_probe_idle_states();
+
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
patch_instruction(
(unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -285,8 +411,12 @@ static int __init pnv_init_idle_states(void)
}
pnv_alloc_idle_core_states();
-out_free:
- kfree(flags);
+
+ if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
+ ppc_md.power_save = power7_idle;
+ else if (supported_cpuidle_states & OPAL_PM_STOP_INST_FAST)
+ ppc_md.power_save = power9_idle;
+
out:
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 0459e100b4e7..00e1a0195c78 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(pnv_pci_get_npu_dev);
static void *dma_npu_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return NULL;
@@ -81,7 +81,7 @@ static void *dma_npu_alloc(struct device *dev, size_t size,
static void dma_npu_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
}
@@ -89,7 +89,7 @@ static void dma_npu_free(struct device *dev, size_t size,
static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
@@ -97,7 +97,7 @@ static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
@@ -180,7 +180,7 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
return rc;
}
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
/* Add the table to the list so its TCE cache will get invalidated */
pnv_pci_link_table_and_group(phb->hose->node, num,
@@ -204,7 +204,7 @@ long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
return rc;
}
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
&npe->table_group);
@@ -270,7 +270,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
0 /* bypass base */, top);
if (rc == OPAL_SUCCESS)
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(phb, false);
return rc;
}
@@ -334,7 +334,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
return;
}
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
+ pnv_pci_phb3_tce_invalidate_entire(npe->phb, false);
}
struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index bdc8c0c71d15..83bebeec0fea 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -117,6 +117,11 @@ int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
return -EINVAL;
}
+ /* Wakeup the poller before we wait for events to speed things
+ * up on platforms or simulators where the interrupts aren't
+ * functional.
+ */
+ opal_wake_poller();
wait_event(opal_async_wait, test_bit(token, opal_async_complete_map));
memcpy(msg, &opal_async_responses[token], sizeof(*msg));
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index 00a29432be39..4495f428b500 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -44,7 +44,7 @@ static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
{
uint64_t paddr_start, paddr_end;
- pr_debug("%s: Retrived memory error event, type: 0x%x\n",
+ pr_debug("%s: Retrieved memory error event, type: 0x%x\n",
__func__, merr_evt->type);
switch (merr_evt->type) {
case OPAL_MEM_ERR_TYPE_RESILIENCE:
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index a06059df9239..308efd170c27 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -55,7 +55,7 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
*sensor_data = be32_to_cpu(data);
break;
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index afe66c576a38..23fb6647dced 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -67,7 +67,7 @@ static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
@@ -103,7 +103,7 @@ static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index e11273b2386d..1e496b780efd 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -1,6 +1,7 @@
#include <linux/percpu.h>
#include <linux/jump_label.h>
#include <asm/trace.h>
+#include <asm/asm-prototypes.h>
#ifdef HAVE_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e45b88a5d7e0..3d29d40eb0e9 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -59,12 +59,11 @@ END_FTR_SECTION(0, 1); \
#define OPAL_CALL(name, token) \
_GLOBAL_TOC(name); \
mflr r0; \
- std r0,16(r1); \
+ std r0,PPC_LR_STKOFF(r1); \
li r0,token; \
OPAL_BRANCH(opal_tracepoint_entry) \
mfcr r12; \
stw r12,8(r1); \
- std r1,PACAR1(r13); \
li r11,0; \
mfmsr r12; \
ori r11,r11,MSR_EE; \
@@ -92,7 +91,7 @@ opal_return:
FIXUP_ENDIAN
ld r2,PACATOC(r13);
lwz r4,8(r1);
- ld r5,16(r1);
+ ld r5,PPC_LR_STKOFF(r1);
ld r6,PACASAVEDMSR(r13);
mtspr SPRN_SRR0,r5;
mtspr SPRN_SRR1,r6;
@@ -127,7 +126,6 @@ opal_tracepoint_entry:
mfcr r12
std r11,16(r1)
stw r12,8(r1)
- std r1,PACAR1(r13)
li r11,0
mfmsr r12
ori r11,r11,MSR_EE
@@ -157,43 +155,37 @@ opal_tracepoint_return:
blr
#endif
-/*
- * Make opal call in realmode. This is a generic function to be called
- * from realmode. It handles endianness.
- *
- * r13 - paca pointer
- * r1 - stack pointer
- * r0 - opal token
- */
-_GLOBAL(opal_call_realmode)
- mflr r12
- std r12,PPC_LR_STKOFF(r1)
- ld r2,PACATOC(r13)
- /* Set opal return address */
- LOAD_REG_ADDR(r12,return_from_opal_call)
- mtlr r12
-
- mfmsr r12
-#ifdef __LITTLE_ENDIAN__
- /* Handle endian-ness */
- li r11,MSR_LE
- andc r12,r12,r11
-#endif
- mtspr SPRN_HSRR1,r12
- LOAD_REG_ADDR(r11,opal)
- ld r12,8(r11)
- ld r2,0(r11)
- mtspr SPRN_HSRR0,r12
+#define OPAL_CALL_REAL(name, token) \
+ _GLOBAL_TOC(name); \
+ mflr r0; \
+ std r0,PPC_LR_STKOFF(r1); \
+ li r0,token; \
+ mfcr r12; \
+ stw r12,8(r1); \
+ \
+ /* Set opal return address */ \
+ LOAD_REG_ADDR(r11, opal_return_realmode); \
+ mtlr r11; \
+ mfmsr r12; \
+ li r11,MSR_LE; \
+ andc r12,r12,r11; \
+ mtspr SPRN_HSRR1,r12; \
+ LOAD_REG_ADDR(r11,opal); \
+ ld r12,8(r11); \
+ ld r2,0(r11); \
+ mtspr SPRN_HSRR0,r12; \
hrfid
-return_from_opal_call:
-#ifdef __LITTLE_ENDIAN__
+opal_return_realmode:
FIXUP_ENDIAN
-#endif
+ ld r2,PACATOC(r13);
+ lwz r11,8(r1);
ld r12,PPC_LR_STKOFF(r1)
+ mtcr r11;
mtlr r12
blr
+
OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL);
OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
@@ -271,6 +263,7 @@ OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE);
+OPAL_CALL_REAL(opal_rm_resync_timebase, OPAL_RESYNC_TIMEBASE);
OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN);
OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT);
OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO);
@@ -278,6 +271,7 @@ OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2);
OPAL_CALL(opal_dump_read, OPAL_DUMP_READ);
OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK);
OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
+OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC);
OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND);
OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
@@ -285,7 +279,9 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL_REAL(opal_rm_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
+OPAL_CALL_REAL(opal_rm_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
@@ -302,3 +298,13 @@ OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH);
+OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE);
+OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
+OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE);
+OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE);
+OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR);
+OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR);
+OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
+OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
+OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
+OPAL_CALL_REAL(opal_rm_pci_tce_kill, OPAL_PCI_TCE_KILL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 0256d0729252..8b4fc68cebcb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -55,8 +55,9 @@ struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static uint32_t opal_heartbeat;
+static struct task_struct *kopald_tsk;
-static void opal_reinit_cores(void)
+void opal_configure_cores(void)
{
/* Do the actual re-init, This will clobber all FPRs, VRs, etc...
*
@@ -69,6 +70,10 @@ static void opal_reinit_cores(void)
#else
opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE);
#endif
+
+ /* Restore some bits */
+ if (cur_cpu_spec->cpu_restore)
+ cur_cpu_spec->cpu_restore();
}
int __init early_init_dt_scan_opal(unsigned long node,
@@ -105,13 +110,6 @@ int __init early_init_dt_scan_opal(unsigned long node,
panic("OPAL != V3 detected, no longer supported.\n");
}
- /* Reinit all cores with the right endian */
- opal_reinit_cores();
-
- /* Restore some bits */
- if (cur_cpu_spec->cpu_restore)
- cur_cpu_spec->cpu_restore();
-
return 1;
}
@@ -653,6 +651,7 @@ static void opal_i2c_create_devs(void)
static int kopald(void *unused)
{
+ unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
__be64 events;
set_freezable();
@@ -660,12 +659,18 @@ static int kopald(void *unused)
try_to_freeze();
opal_poll_events(&events);
opal_handle_events(be64_to_cpu(events));
- msleep_interruptible(opal_heartbeat);
+ schedule_timeout_interruptible(timeout);
} while (!kthread_should_stop());
return 0;
}
+void opal_wake_poller(void)
+{
+ if (kopald_tsk)
+ wake_up_process(kopald_tsk);
+}
+
static void opal_init_heartbeat(void)
{
/* Old firwmware, we assume the HVC heartbeat is sufficient */
@@ -674,7 +679,7 @@ static void opal_init_heartbeat(void)
opal_heartbeat = 0;
if (opal_heartbeat)
- kthread_run(kopald, NULL, "kopald");
+ kopald_tsk = kthread_run(kopald, NULL, "kopald");
}
static int __init opal_init(void)
@@ -751,6 +756,9 @@ static int __init opal_init(void)
opal_pdev_init(opal_node, "ibm,opal-flash");
opal_pdev_init(opal_node, "ibm,opal-prd");
+ /* Initialise platform device: oppanel interface */
+ opal_pdev_init(opal_node, "ibm,opal-oppanel");
+
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();
@@ -885,3 +893,5 @@ EXPORT_SYMBOL_GPL(opal_i2c_request);
/* Export these symbols for PowerNV LED class driver */
EXPORT_SYMBOL_GPL(opal_leds_get_ind);
EXPORT_SYMBOL_GPL(opal_leds_set_ind);
+/* Export this symbol for PowerNV Operator Panel class driver */
+EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
new file mode 100644
index 000000000000..1349a099c74c
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2014-2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <asm/pci-bridge.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
+#include <misc/cxl.h>
+
+#include "pci.h"
+
+struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ return of_node_get(hose->dn);
+}
+EXPORT_SYMBOL(pnv_pci_get_phb_node);
+
+int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ pe = pnv_ioda_get_pe(dev);
+ if (!pe)
+ return -ENODEV;
+
+ pe_info(pe, "Switching PHB to CXL\n");
+
+ rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
+ if (rc == OPAL_UNSUPPORTED)
+ dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
+ else if (rc)
+ dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
+
+/* Find PHB for cxl dev and allocate MSI hwirqs?
+ * Returns the absolute hardware IRQ number
+ */
+int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
+
+ if (hwirq < 0) {
+ dev_warn(&dev->dev, "Failed to find a free MSI\n");
+ return -ENOSPC;
+ }
+
+ return phb->msi_base + hwirq;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
+
+void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
+
+void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq;
+
+ for (i = 1; i < CXL_IRQ_RANGES; i++) {
+ if (!irqs->range[i])
+ continue;
+ pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
+ i, irqs->offset[i],
+ irqs->range[i]);
+ hwirq = irqs->offset[i] - phb->msi_base;
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
+ irqs->range[i]);
+ }
+}
+EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
+
+int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
+ struct pci_dev *dev, int num)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ int i, hwirq, try;
+
+ memset(irqs, 0, sizeof(struct cxl_irq_ranges));
+
+ /* 0 is reserved for the multiplexed PSL DSI interrupt */
+ for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
+ try = num;
+ while (try) {
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
+ if (hwirq >= 0)
+ break;
+ try /= 2;
+ }
+ if (!try)
+ goto fail;
+
+ irqs->offset[i] = phb->msi_base + hwirq;
+ irqs->range[i] = try;
+ pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
+ i, irqs->offset[i], irqs->range[i]);
+ num -= try;
+ }
+ if (num)
+ goto fail;
+
+ return 0;
+fail:
+ pnv_cxl_release_hwirq_ranges(irqs, dev);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
+
+int pnv_cxl_get_irq_count(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return phb->msi_bmp.irq_count;
+}
+EXPORT_SYMBOL(pnv_cxl_get_irq_count);
+
+int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
+ unsigned int virq)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ unsigned int xive_num = hwirq - phb->msi_base;
+ struct pnv_ioda_pe *pe;
+ int rc;
+
+ if (!(pe = pnv_ioda_get_pe(dev)))
+ return -ENODEV;
+
+ /* Assign XIVE to PE */
+ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
+ if (rc) {
+ pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
+ "hwirq 0x%x XIVE 0x%x PE\n",
+ pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
+ return -EIO;
+ }
+ pnv_set_msi_irq_chip(phb, virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
+
+#if IS_MODULE(CONFIG_CXL)
+static inline int get_cxl_module(void)
+{
+ struct module *cxl_module;
+
+ mutex_lock(&module_mutex);
+
+ cxl_module = find_module("cxl");
+ if (cxl_module)
+ __module_get(cxl_module);
+
+ mutex_unlock(&module_mutex);
+
+ if (!cxl_module)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int get_cxl_module(void) { return 0; }
+#endif
+
+/*
+ * Sets flags and switches the controller ops to enable the cxl kernel api.
+ * Originally the cxl kernel API operated on a virtual PHB, but certain cards
+ * such as the Mellanox CX4 use a peer model instead and for these cards the
+ * cxl kernel api will operate on the real PHB.
+ */
+int pnv_cxl_enable_phb_kernel_api(struct pci_controller *hose, bool enable)
+{
+ struct pnv_phb *phb = hose->private_data;
+ int rc;
+
+ if (!enable) {
+ /*
+ * Once cxl mode is enabled on the PHB, there is currently no
+ * known safe method to disable it again, and trying risks a
+ * checkstop. If we can find a way to safely disable cxl mode
+ * in the future we can revisit this, but for now the only sane
+ * thing to do is to refuse to disable cxl mode:
+ */
+ return -EPERM;
+ }
+
+ /*
+ * Hold a reference to the cxl module since several PHB operations now
+ * depend on it, and it would be insane to allow it to be removed so
+ * long as we are in this mode (and since we can't safely disable this
+ * mode once enabled...).
+ */
+ rc = get_cxl_module();
+ if (rc)
+ return rc;
+
+ phb->flags |= PNV_PHB_FLAG_CXL;
+ hose->controller_ops = pnv_cxl_cx4_ioda_controller_ops;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_enable_phb_kernel_api);
+
+bool pnv_pci_on_cxl_phb(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return !!(phb->flags & PNV_PHB_FLAG_CXL);
+}
+EXPORT_SYMBOL_GPL(pnv_pci_on_cxl_phb);
+
+struct cxl_afu *pnv_cxl_phb_to_afu(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+
+ return (struct cxl_afu *)phb->cxl_afu;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_phb_to_afu);
+
+void pnv_cxl_phb_set_peer_afu(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ phb->cxl_afu = afu;
+}
+EXPORT_SYMBOL_GPL(pnv_cxl_phb_set_peer_afu);
+
+/*
+ * In the peer cxl model, the XSL/PSL is physical function 0, and will be used
+ * by other functions on the device for memory access and interrupts. When the
+ * other functions are enabled we explicitly take a reference on the cxl
+ * function since they will use it, and allocate a default context associated
+ * with that function just like the vPHB model of the cxl kernel API.
+ */
+bool pnv_cxl_enable_device_hook(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct cxl_afu *afu = phb->cxl_afu;
+
+ if (!pnv_pci_enable_device_hook(dev))
+ return false;
+
+
+ /* No special handling for the cxl function, which is always PF 0 */
+ if (PCI_FUNC(dev->devfn) == 0)
+ return true;
+
+ if (!afu) {
+ dev_WARN(&dev->dev, "Attempted to enable function > 0 on CXL PHB without a peer AFU\n");
+ return false;
+ }
+
+ dev_info(&dev->dev, "Enabling function on CXL enabled PHB with peer AFU\n");
+
+ /* Make sure the peer AFU can't go away while this device is active */
+ cxl_afu_get(afu);
+
+ return cxl_pci_associate_default_context(dev, afu);
+}
+
+void pnv_cxl_disable_device(struct pci_dev *dev)
+{
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct cxl_afu *afu = phb->cxl_afu;
+
+ /* No special handling for cxl function: */
+ if (PCI_FUNC(dev->devfn) == 0)
+ return;
+
+ cxl_pci_disable_device(dev);
+ cxl_afu_put(afu);
+}
+
+/*
+ * This is a special version of pnv_setup_msi_irqs for cards in cxl mode. This
+ * function handles setting up the IVTE entries for the XSL to use.
+ *
+ * We are currently not filling out the MSIX table, since the only currently
+ * supported adapter (CX4) uses a custom MSIX table format in cxl mode and it
+ * is up to their driver to fill that out. In the future we may fill out the
+ * MSIX table (and change the IVTE entries to be an index to the MSIX table)
+ * for adapters implementing the Full MSI-X mode described in the CAIA.
+ */
+int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+ struct cxl_context *ctx = NULL;
+ unsigned int virq;
+ int hwirq;
+ int afu_irq = 0;
+ int rc;
+
+ if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
+ return -ENODEV;
+
+ if (pdev->no_64bit_msi && !phb->msi32_support)
+ return -ENODEV;
+
+ rc = cxl_cx4_setup_msi_irqs(pdev, nvec, type);
+ if (rc)
+ return rc;
+
+ for_each_pci_msi_entry(entry, pdev) {
+ if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
+ pr_warn("%s: Supports only 64-bit MSIs\n",
+ pci_name(pdev));
+ return -ENXIO;
+ }
+
+ hwirq = cxl_next_msi_hwirq(pdev, &ctx, &afu_irq);
+ if (WARN_ON(hwirq <= 0))
+ return (hwirq ? hwirq : -ENOMEM);
+
+ virq = irq_create_mapping(NULL, hwirq);
+ if (virq == NO_IRQ) {
+ pr_warn("%s: Failed to map cxl mode MSI to linux irq\n",
+ pci_name(pdev));
+ return -ENOMEM;
+ }
+
+ rc = pnv_cxl_ioda_msi_setup(pdev, hwirq, virq);
+ if (rc) {
+ pr_warn("%s: Failed to setup cxl mode MSI\n", pci_name(pdev));
+ irq_dispose_mapping(virq);
+ return rc;
+ }
+
+ irq_set_msi_desc(virq, entry);
+ }
+
+ return 0;
+}
+
+void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+ irq_hw_number_t hwirq;
+
+ if (WARN_ON(!phb))
+ return;
+
+ for_each_pci_msi_entry(entry, pdev) {
+ if (entry->irq == NO_IRQ)
+ continue;
+ hwirq = virq_to_hw(entry->irq);
+ irq_set_msi_desc(entry->irq, NULL);
+ irq_dispose_mapping(entry->irq);
+ }
+
+ cxl_cx4_teardown_msi_irqs(pdev);
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3a5ea8236db8..6b9528307f62 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -55,6 +55,7 @@
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
#define POWERNV_IOMMU_MAX_LEVELS 5
+static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU" };
static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
@@ -141,16 +142,14 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
{
- unsigned long pe;
+ unsigned long pe = phb->ioda.total_pe_num - 1;
- do {
- pe = find_next_zero_bit(phb->ioda.pe_alloc,
- phb->ioda.total_pe_num, 0);
- if (pe >= phb->ioda.total_pe_num)
- return NULL;
- } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
+ for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
+ if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
+ return pnv_ioda_init_pe(phb, pe);
+ }
- return pnv_ioda_init_pe(phb, pe);
+ return NULL;
}
static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
@@ -192,18 +191,15 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
goto fail;
}
- /* Mark the M64 BAR assigned */
- set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
-
/*
- * Strip off the segment used by the reserved PE, which is
- * expected to be 0 or last one of PE capabicity.
+ * Exclude the segments for reserved and root bus PE, which
+ * are first or last two PEs.
*/
r = &phb->hose->mem_resources[1];
if (phb->ioda.reserved_pe_idx == 0)
- r->start += phb->ioda.m64_segsize;
+ r->start += (2 * phb->ioda.m64_segsize);
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
- r->end -= phb->ioda.m64_segsize;
+ r->end -= (2 * phb->ioda.m64_segsize);
else
pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
phb->ioda.reserved_pe_idx);
@@ -283,14 +279,14 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
}
/*
- * Exclude the segment used by the reserved PE, which
- * is expected to be 0 or last supported PE#.
+ * Exclude the segments for reserved and root bus PE, which
+ * are first or last two PEs.
*/
r = &phb->hose->mem_resources[1];
if (phb->ioda.reserved_pe_idx == 0)
- r->start += phb->ioda.m64_segsize;
+ r->start += (2 * phb->ioda.m64_segsize);
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
- r->end -= phb->ioda.m64_segsize;
+ r->end -= (2 * phb->ioda.m64_segsize);
else
WARN(1, "Wrong reserved PE#%d on PHB#%d\n",
phb->ioda.reserved_pe_idx, phb->hose->global_number);
@@ -405,6 +401,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
struct pci_controller *hose = phb->hose;
struct device_node *dn = hose->dn;
struct resource *res;
+ u32 m64_range[2], i;
const u32 *r;
u64 pci_addr;
@@ -425,6 +422,30 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
return;
}
+ /*
+ * Find the available M64 BAR range and pickup the last one for
+ * covering the whole 64-bits space. We support only one range.
+ */
+ if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges",
+ m64_range, 2)) {
+ /* In absence of the property, assume 0..15 */
+ m64_range[0] = 0;
+ m64_range[1] = 16;
+ }
+ /* We only support 64 bits in our allocator */
+ if (m64_range[1] > 63) {
+ pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n",
+ __func__, m64_range[1], phb->hose->global_number);
+ m64_range[1] = 63;
+ }
+ /* Empty range, no m64 */
+ if (m64_range[1] <= m64_range[0]) {
+ pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n",
+ __func__, phb->hose->global_number);
+ return;
+ }
+
+ /* Configure M64 informations */
res = &hose->mem_resources[1];
res->name = dn->full_name;
res->start = of_translate_address(dn, r + 2);
@@ -437,11 +458,28 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num;
phb->ioda.m64_base = pci_addr;
- pr_info(" MEM64 0x%016llx..0x%016llx -> 0x%016llx\n",
- res->start, res->end, pci_addr);
+ /* This lines up nicely with the display from processing OF ranges */
+ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n",
+ res->start, res->end, pci_addr, m64_range[0],
+ m64_range[0] + m64_range[1] - 1);
+
+ /* Mark all M64 used up by default */
+ phb->ioda.m64_bar_alloc = (unsigned long)-1;
/* Use last M64 BAR to cover M64 window */
- phb->ioda.m64_bar_idx = 15;
+ m64_range[1]--;
+ phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1];
+
+ pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx);
+
+ /* Mark remaining ones free */
+ for (i = m64_range[0]; i < m64_range[1]; i++)
+ clear_bit(i, &phb->ioda.m64_bar_alloc);
+
+ /*
+ * Setup init functions for M64 based on IODA version, IODA3 uses
+ * the IODA2 code.
+ */
if (phb->type == PNV_PHB_IODA1)
phb->init_m64 = pnv_ioda1_init_m64;
else
@@ -596,7 +634,7 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
* but in the meantime, we need to protect them to avoid warnings
*/
#ifdef CONFIG_PCI_MSI
-static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
+struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -714,7 +752,6 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
return 0;
}
-#ifdef CONFIG_PCI_IOV
static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -749,9 +786,11 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
}
rid_end = pe->rid + (count << 8);
} else {
+#ifdef CONFIG_PCI_IOV
if (pe->flags & PNV_IODA_PE_VF)
parent = pe->parent_dev;
else
+#endif
parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
@@ -761,7 +800,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
/* Clear the reverse map */
for (rid = pe->rid; rid < rid_end; rid++)
- phb->ioda.pe_rmap[rid] = 0;
+ phb->ioda.pe_rmap[rid] = IODA_INVALID_PE;
/* Release from all parents PELT-V */
while (parent) {
@@ -789,11 +828,12 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
pe->pbus = NULL;
pe->pdev = NULL;
+#ifdef CONFIG_PCI_IOV
pe->parent_dev = NULL;
+#endif
return 0;
}
-#endif /* CONFIG_PCI_IOV */
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
@@ -1024,6 +1064,16 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
+
+ /*
+ * In partial hotplug case, the PCI device might be still
+ * associated with the PE and needn't attach it to the PE
+ * again.
+ */
+ if (pdn->pe_number != IODA_INVALID_PE)
+ continue;
+
+ pe->device_count++;
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -1042,9 +1092,26 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
struct pci_controller *hose = pci_bus_to_host(bus);
struct pnv_phb *phb = hose->private_data;
struct pnv_ioda_pe *pe = NULL;
+ unsigned int pe_num;
+
+ /*
+ * In partial hotplug case, the PE instance might be still alive.
+ * We should reuse it instead of allocating a new one.
+ */
+ pe_num = phb->ioda.pe_rmap[bus->number << 8];
+ if (pe_num != IODA_INVALID_PE) {
+ pe = &phb->ioda.pe_array[pe_num];
+ pnv_ioda_setup_same_PE(bus, pe);
+ return NULL;
+ }
+
+ /* PE number for root bus should have been reserved */
+ if (pci_is_root_bus(bus) &&
+ phb->ioda.root_pe_idx != IODA_INVALID_PE)
+ pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
/* Check if PE is determined by M64 */
- if (phb->pick_m64_pe)
+ if (!pe && phb->pick_m64_pe)
pe = phb->pick_m64_pe(bus, all);
/* The PE number isn't pinned by M64 */
@@ -1156,30 +1223,6 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
pnv_ioda_setup_npu_PE(pdev);
}
-static void pnv_ioda_setup_PEs(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- pnv_ioda_setup_bus_PE(bus, false);
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
- pnv_ioda_setup_bus_PE(dev->subordinate, true);
- else
- pnv_ioda_setup_PEs(dev->subordinate);
- }
- }
-}
-
-/*
- * Configure PEs so that the downstream PCI buses and devices
- * could have their associated PE#. Unfortunately, we didn't
- * figure out the way to identify the PLX bridge yet. So we
- * simply put the PCI bus and the subordinate behind the root
- * port to PE# here. The game rule here is expected to be changed
- * as soon as we can detected PLX bridge correctly.
- */
static void pnv_pci_ioda_setup_PEs(void)
{
struct pci_controller *hose, *tmp;
@@ -1187,22 +1230,11 @@ static void pnv_pci_ioda_setup_PEs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
-
- /* M64 layout might affect PE allocation */
- if (phb->reserve_m64_pe)
- phb->reserve_m64_pe(hose->bus, NULL, true);
-
- /*
- * On NPU PHB, we expect separate PEs for individual PCI
- * functions. PCI bus dependent PEs are required for the
- * remaining types of PHBs.
- */
if (phb->type == PNV_PHB_NPU) {
/* PE#0 is needed for error reporting */
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
- } else
- pnv_ioda_setup_PEs(hose->bus);
+ }
}
}
@@ -1728,7 +1760,14 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
}
}
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
+ bool real_mode)
+{
+ return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
+ (phb->regs + 0x210);
+}
+
+static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm)
{
struct iommu_table_group_link *tgl = list_first_entry_or_null(
@@ -1736,33 +1775,17 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
- pe->phb->ioda.tce_inval_reg;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
unsigned long start, end, inc;
- const unsigned shift = tbl->it_page_shift;
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
npages - 1);
- /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
- if (tbl->it_busno) {
- start <<= shift;
- end <<= shift;
- inc = 128ull << shift;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
- /* p7ioc-style invalidation, 2 TCEs per write */
- start |= (1ull << 63);
- end |= (1ull << 63);
- inc = 16;
- } else {
- /* Default (older HW) */
- inc = 128;
- }
-
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
end |= inc - 1; /* round up end to be different than start */
mb(); /* Ensure above stores are visible */
@@ -1783,13 +1806,13 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
- if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
- pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+ if (!ret)
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
return ret;
}
@@ -1800,9 +1823,8 @@ static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
- if (!ret && (tbl->it_type &
- (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
- pnv_pci_ioda1_tce_invalidate(tbl, index, 1, false);
+ if (!ret)
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, 1, false);
return ret;
}
@@ -1813,8 +1835,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
}
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
@@ -1826,45 +1847,42 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.get = pnv_tce_get,
};
-#define TCE_KILL_INVAL_ALL PPC_BIT(0)
-#define TCE_KILL_INVAL_PE PPC_BIT(1)
-#define TCE_KILL_INVAL_TCE PPC_BIT(2)
+#define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0)
+#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
+#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
-void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
+void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
- const unsigned long val = TCE_KILL_INVAL_ALL;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
+ const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
mb(); /* Ensure previous TCE table stores are visible */
if (rm)
- __raw_rm_writeq(cpu_to_be64(val),
- (__be64 __iomem *)
- phb->ioda.tce_inval_reg_phys);
+ __raw_rm_writeq(cpu_to_be64(val), invalidate);
else
- __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+ __raw_writeq(cpu_to_be64(val), invalidate);
}
-static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
+static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{
/* 01xb - invalidate TCEs that match the specified PE# */
- unsigned long val = TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
- struct pnv_phb *phb = pe->phb;
-
- if (!phb->ioda.tce_inval_reg)
- return;
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+ unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
mb(); /* Ensure above stores are visible */
- __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+ __raw_writeq(cpu_to_be64(val), invalidate);
}
-static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
- __be64 __iomem *invalidate, unsigned shift,
- unsigned long index, unsigned long npages)
+static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
+ unsigned shift, unsigned long index,
+ unsigned long npages)
{
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
- start = TCE_KILL_INVAL_TCE;
- start |= (pe_number & 0xFF);
+ start = PHB3_TCE_KILL_INVAL_ONE;
+ start |= (pe->pe_number & 0xFF);
end = start;
/* Figure out the start, end and step */
@@ -1882,6 +1900,17 @@ static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
}
}
+static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
+ pnv_pci_phb3_tce_invalidate_pe(pe);
+ else
+ opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE,
+ pe->pe_number, 0, 0, 0);
+}
+
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
unsigned long index, unsigned long npages, bool rm)
{
@@ -1890,34 +1919,43 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
- pe->phb->ioda.tce_inval_reg;
+ struct pnv_phb *phb = pe->phb;
+ unsigned int shift = tbl->it_page_shift;
- if (pe->phb->type == PNV_PHB_NPU) {
+ if (phb->type == PNV_PHB_NPU) {
/*
* The NVLink hardware does not support TCE kill
* per TCE entry so we have to invalidate
* the entire cache for it.
*/
- pnv_pci_ioda2_tce_invalidate_entire(pe->phb, rm);
+ pnv_pci_phb3_tce_invalidate_entire(phb, rm);
continue;
}
- pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
- invalidate, tbl->it_page_shift,
- index, npages);
+ if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
+ pnv_pci_phb3_tce_invalidate(pe, rm, shift,
+ index, npages);
+ else if (rm)
+ opal_rm_pci_tce_kill(phb->opal_id,
+ OPAL_PCI_TCE_KILL_PAGES,
+ pe->pe_number, 1u << shift,
+ index << shift, npages);
+ else
+ opal_pci_tce_kill(phb->opal_id,
+ OPAL_PCI_TCE_KILL_PAGES,
+ pe->pe_number, 1u << shift,
+ index << shift, npages);
}
}
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
- if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
+ if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
return ret;
@@ -1929,8 +1967,7 @@ static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
{
long ret = pnv_tce_xchg(tbl, index, hpa, direction);
- if (!ret && (tbl->it_type &
- (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
+ if (!ret)
pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
return ret;
@@ -1942,8 +1979,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
}
static void pnv_ioda2_table_free(struct iommu_table *tbl)
@@ -2112,12 +2148,6 @@ found:
base * PNV_IODA1_DMA32_SEGSIZE,
IOMMU_PAGE_SHIFT_4K);
- /* OPAL variant of P7IOC SW invalidated TCEs */
- if (phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE |
- TCE_PCI_SWINV_FREE |
- TCE_PCI_SWINV_PAIR);
-
tbl->it_ops = &pnv_ioda1_iommu_ops;
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
@@ -2179,7 +2209,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
pnv_pci_link_table_and_group(phb->hose->node, num,
tbl, &pe->table_group);
- pnv_pci_ioda2_tce_invalidate_pe(pe);
+ pnv_pci_phb3_tce_invalidate_pe(pe);
return 0;
}
@@ -2240,8 +2270,6 @@ static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
}
tbl->it_ops = &pnv_ioda2_iommu_ops;
- if (pe->phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
*ptbl = tbl;
@@ -2290,10 +2318,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
- /* OPAL variant of PHB3 invalidated TCEs */
- if (pe->phb->ioda.tce_inval_reg)
- tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
-
/*
* Setting table base here only for carrying iommu_group
* further down to let iommu_add_device() do the job.
@@ -2323,7 +2347,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
if (ret)
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
else
- pnv_pci_ioda2_tce_invalidate_pe(pe);
+ pnv_pci_phb3_tce_invalidate_pe(pe);
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
@@ -2504,19 +2528,6 @@ static void pnv_pci_ioda_setup_iommu_api(void)
static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
-{
- const __be64 *swinvp;
-
- /* OPAL variant of PHB3 invalidated TCEs */
- swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
- if (!swinvp)
- return;
-
- phb->ioda.tce_inval_reg_phys = be64_to_cpup(swinvp);
- phb->ioda.tce_inval_reg = ioremap(phb->ioda.tce_inval_reg_phys, 8);
-}
-
static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
unsigned levels, unsigned long limit,
unsigned long *current_offset, unsigned long *total_allocated)
@@ -2657,6 +2668,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
{
int64_t rc;
+ if (!pnv_pci_ioda_pe_dma_weight(pe))
+ return;
+
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
@@ -2688,49 +2702,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
pnv_ioda_setup_bus_dma(pe, pe->pbus);
}
-static void pnv_ioda_setup_dma(struct pnv_phb *phb)
-{
- struct pci_controller *hose = phb->hose;
- struct pnv_ioda_pe *pe;
- unsigned int weight;
-
- /* If we have more PE# than segments available, hand out one
- * per PE until we run out and let the rest fail. If not,
- * then we assign at least one segment per PE, plus more based
- * on the amount of devices under that PE
- */
- pr_info("PCI: Domain %04x has %d available 32-bit DMA segments\n",
- hose->global_number, phb->ioda.dma32_count);
-
- pnv_pci_ioda_setup_opal_tce_kill(phb);
-
- /* Walk our PE list and configure their DMA segments */
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- weight = pnv_pci_ioda_pe_dma_weight(pe);
- if (!weight)
- continue;
-
- /*
- * For IODA2 compliant PHB3, we needn't care about the weight.
- * The all available 32-bits DMA space will be assigned to
- * the specific PE.
- */
- if (phb->type == PNV_PHB_IODA1) {
- pnv_pci_ioda1_setup_dma_pe(phb, pe);
- } else if (phb->type == PNV_PHB_IODA2) {
- pe_info(pe, "Assign DMA32 space\n");
- pnv_pci_ioda2_setup_dma_pe(phb, pe);
- } else if (phb->type == PNV_PHB_NPU) {
- /*
- * We initialise the DMA space for an NPU PHB
- * after setup of the PHB is complete as we
- * point the NPU TVT to the the same location
- * as the PHB3 TVT.
- */
- }
- }
-}
-
#ifdef CONFIG_PCI_MSI
static void pnv_ioda2_msi_eoi(struct irq_data *d)
{
@@ -2747,12 +2718,13 @@ static void pnv_ioda2_msi_eoi(struct irq_data *d)
}
-static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
+void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
{
struct irq_data *idata;
struct irq_chip *ichip;
- if (phb->type != PNV_PHB_IODA2)
+ /* The MSI EOI OPAL call is only needed on PHB3 */
+ if (phb->model != PNV_PHB_MODEL_PHB3)
return;
if (!phb->ioda.irq_chip_init) {
@@ -2769,157 +2741,6 @@ static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
irq_set_chip(virq, &phb->ioda.irq_chip);
}
-#ifdef CONFIG_CXL_BASE
-
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- return of_node_get(hose->dn);
-}
-EXPORT_SYMBOL(pnv_pci_get_phb_node);
-
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pe;
- int rc;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- pe_info(pe, "Switching PHB to CXL\n");
-
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
- if (rc)
- dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
-
- return rc;
-}
-EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
-
-/* Find PHB for cxl dev and allocate MSI hwirqs?
- * Returns the absolute hardware IRQ number
- */
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
-
- if (hwirq < 0) {
- dev_warn(&dev->dev, "Failed to find a free MSI\n");
- return -ENOSPC;
- }
-
- return phb->msi_base + hwirq;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
-
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
-
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq;
-
- for (i = 1; i < CXL_IRQ_RANGES; i++) {
- if (!irqs->range[i])
- continue;
- pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
- i, irqs->offset[i],
- irqs->range[i]);
- hwirq = irqs->offset[i] - phb->msi_base;
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
- irqs->range[i]);
- }
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
-
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq, try;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- /* 0 is reserved for the multiplexed PSL DSI interrupt */
- for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
- if (hwirq >= 0)
- break;
- try /= 2;
- }
- if (!try)
- goto fail;
-
- irqs->offset[i] = phb->msi_base + hwirq;
- irqs->range[i] = try;
- pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
- i, irqs->offset[i], irqs->range[i]);
- num -= try;
- }
- if (num)
- goto fail;
-
- return 0;
-fail:
- pnv_cxl_release_hwirq_ranges(irqs, dev);
- return -ENOSPC;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
-
-int pnv_cxl_get_irq_count(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return phb->msi_bmp.irq_count;
-}
-EXPORT_SYMBOL(pnv_cxl_get_irq_count);
-
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- unsigned int xive_num = hwirq - phb->msi_base;
- struct pnv_ioda_pe *pe;
- int rc;
-
- if (!(pe = pnv_ioda_get_pe(dev)))
- return -ENODEV;
-
- /* Assign XIVE to PE */
- rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
- if (rc) {
- pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
- "hwirq 0x%x XIVE 0x%x PE\n",
- pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
- return -EIO;
- }
- set_msi_irq_chip(phb, virq);
-
- return 0;
-}
-EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
-#endif
-
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg)
@@ -2976,7 +2797,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = be32_to_cpu(data);
- set_msi_irq_chip(phb, virq);
+ pnv_set_msi_irq_chip(phb, virq);
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
@@ -3197,41 +3018,6 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
}
}
-static void pnv_pci_ioda_setup_seg(void)
-{
- struct pci_controller *tmp, *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- phb = hose->private_data;
-
- /* NPU PHB does not support IO or MMIO segmentation */
- if (phb->type == PNV_PHB_NPU)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- pnv_ioda_setup_pe_seg(pe);
- }
- }
-}
-
-static void pnv_pci_ioda_setup_DMA(void)
-{
- struct pci_controller *hose, *tmp;
- struct pnv_phb *phb;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- pnv_ioda_setup_dma(hose->private_data);
-
- /* Mark the PHB initialization done */
- phb = hose->private_data;
- phb->initialized = 1;
- }
-
- pnv_pci_ioda_setup_iommu_api();
-}
-
static void pnv_pci_ioda_create_dbgfs(void)
{
#ifdef CONFIG_DEBUG_FS
@@ -3242,6 +3028,9 @@ static void pnv_pci_ioda_create_dbgfs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
+ /* Notify initialization of PHB done */
+ phb->initialized = 1;
+
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
if (!phb->dbgfs)
@@ -3254,9 +3043,7 @@ static void pnv_pci_ioda_create_dbgfs(void)
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
- pnv_pci_ioda_setup_seg();
- pnv_pci_ioda_setup_DMA();
-
+ pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
#ifdef CONFIG_EEH
@@ -3306,6 +3093,115 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
return phb->ioda.io_segsize;
}
+/*
+ * We are updating root port or the upstream port of the
+ * bridge behind the root port with PHB's windows in order
+ * to accommodate the changes on required resources during
+ * PCI (slot) hotplug, which is connected to either root
+ * port or the downstream ports of PCIe switch behind the
+ * root port.
+ */
+static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
+ unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dev *bridge = bus->self;
+ struct resource *r, *w;
+ bool msi_region = false;
+ int i;
+
+ /* Check if we need apply fixup to the bridge's windows */
+ if (!pci_is_root_bus(bridge->bus) &&
+ !pci_is_root_bus(bridge->bus->self->bus))
+ return;
+
+ /* Fixup the resources */
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ r = &bridge->resource[PCI_BRIDGE_RESOURCES + i];
+ if (!r->flags || !r->parent)
+ continue;
+
+ w = NULL;
+ if (r->flags & type & IORESOURCE_IO)
+ w = &hose->io_resource;
+ else if (pnv_pci_is_mem_pref_64(r->flags) &&
+ (type & IORESOURCE_PREFETCH) &&
+ phb->ioda.m64_segsize)
+ w = &hose->mem_resources[1];
+ else if (r->flags & type & IORESOURCE_MEM) {
+ w = &hose->mem_resources[0];
+ msi_region = true;
+ }
+
+ r->start = w->start;
+ r->end = w->end;
+
+ /* The 64KB 32-bits MSI region shouldn't be included in
+ * the 32-bits bridge window. Otherwise, we can see strange
+ * issues. One of them is EEH error observed on Garrison.
+ *
+ * Exclude top 1MB region which is the minimal alignment of
+ * 32-bits bridge window.
+ */
+ if (msi_region) {
+ r->end += 0x10000;
+ r->end -= 0x100000;
+ }
+ }
+}
+
+static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dev *bridge = bus->self;
+ struct pnv_ioda_pe *pe;
+ bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
+
+ /* Extend bridge's windows if necessary */
+ pnv_pci_fixup_bridge_resources(bus, type);
+
+ /* The PE for root bus should be realized before any one else */
+ if (!phb->ioda.root_pe_populated) {
+ pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
+ if (pe) {
+ phb->ioda.root_pe_idx = pe->pe_number;
+ phb->ioda.root_pe_populated = true;
+ }
+ }
+
+ /* Don't assign PE to PCI bus, which doesn't have subordinate devices */
+ if (list_empty(&bus->devices))
+ return;
+
+ /* Reserve PEs according to used M64 resources */
+ if (phb->reserve_m64_pe)
+ phb->reserve_m64_pe(bus, NULL, all);
+
+ /*
+ * Assign PE. We might run here because of partial hotplug.
+ * For the case, we just pick up the existing PE and should
+ * not allocate resources again.
+ */
+ pe = pnv_ioda_setup_bus_PE(bus, all);
+ if (!pe)
+ return;
+
+ pnv_ioda_setup_pe_seg(pe);
+ switch (phb->type) {
+ case PNV_PHB_IODA1:
+ pnv_pci_ioda1_setup_dma_pe(phb, pe);
+ break;
+ case PNV_PHB_IODA2:
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ break;
+ default:
+ pr_warn("%s: No DMA for PHB#%d (type %d)\n",
+ __func__, phb->hose->global_number, phb->type);
+ }
+}
+
#ifdef CONFIG_PCI_IOV
static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
int resno)
@@ -3345,7 +3241,7 @@ static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
+bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -3366,6 +3262,178 @@ static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
return true;
}
+static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
+ int num)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group,
+ struct pnv_ioda_pe, table_group);
+ struct pnv_phb *phb = pe->phb;
+ unsigned int idx;
+ long rc;
+
+ pe_info(pe, "Removing DMA window #%d\n", num);
+ for (idx = 0; idx < phb->ioda.dma32_count; idx++) {
+ if (phb->ioda.dma32_segmap[idx] != pe->pe_number)
+ continue;
+
+ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ idx, 0, 0ul, 0ul, 0ul);
+ if (rc != OPAL_SUCCESS) {
+ pe_warn(pe, "Failure %ld unmapping DMA32 segment#%d\n",
+ rc, idx);
+ return rc;
+ }
+
+ phb->ioda.dma32_segmap[idx] = IODA_INVALID_PE;
+ }
+
+ pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
+ return OPAL_SUCCESS;
+}
+
+static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
+{
+ unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
+ struct iommu_table *tbl = pe->table_group.tables[0];
+ int64_t rc;
+
+ if (!weight)
+ return;
+
+ rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
+ if (rc != OPAL_SUCCESS)
+ return;
+
+ pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
+ if (pe->table_group.group) {
+ iommu_group_put(pe->table_group.group);
+ WARN_ON(pe->table_group.group);
+ }
+
+ free_pages(tbl->it_base, get_order(tbl->it_size << 3));
+ iommu_free_table(tbl, "pnv");
+}
+
+static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
+{
+ struct iommu_table *tbl = pe->table_group.tables[0];
+ unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
+#ifdef CONFIG_IOMMU_API
+ int64_t rc;
+#endif
+
+ if (!weight)
+ return;
+
+#ifdef CONFIG_IOMMU_API
+ rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+#endif
+
+ pnv_pci_ioda2_set_bypass(pe, false);
+ if (pe->table_group.group) {
+ iommu_group_put(pe->table_group.group);
+ WARN_ON(pe->table_group.group);
+ }
+
+ pnv_pci_ioda2_table_free_pages(tbl);
+ iommu_free_table(tbl, "pnv");
+}
+
+static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
+ unsigned short win,
+ unsigned int *map)
+{
+ struct pnv_phb *phb = pe->phb;
+ int idx;
+ int64_t rc;
+
+ for (idx = 0; idx < phb->ioda.total_pe_num; idx++) {
+ if (map[idx] != pe->pe_number)
+ continue;
+
+ if (win == OPAL_M64_WINDOW_TYPE)
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ phb->ioda.reserved_pe_idx, win,
+ idx / PNV_IODA1_M64_SEGS,
+ idx % PNV_IODA1_M64_SEGS);
+ else
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ phb->ioda.reserved_pe_idx, win, 0, idx);
+
+ if (rc != OPAL_SUCCESS)
+ pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
+ rc, win, idx);
+
+ map[idx] = IODA_INVALID_PE;
+ }
+}
+
+static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+
+ if (phb->type == PNV_PHB_IODA1) {
+ pnv_ioda_free_pe_seg(pe, OPAL_IO_WINDOW_TYPE,
+ phb->ioda.io_segmap);
+ pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
+ phb->ioda.m32_segmap);
+ pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
+ phb->ioda.m64_segmap);
+ } else if (phb->type == PNV_PHB_IODA2) {
+ pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
+ phb->ioda.m32_segmap);
+ }
+}
+
+static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb;
+ struct pnv_ioda_pe *slave, *tmp;
+
+ /* Release slave PEs in compound PE */
+ if (pe->flags & PNV_IODA_PE_MASTER) {
+ list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
+ pnv_ioda_release_pe(slave);
+ }
+
+ list_del(&pe->list);
+ switch (phb->type) {
+ case PNV_PHB_IODA1:
+ pnv_pci_ioda1_release_pe_dma(pe);
+ break;
+ case PNV_PHB_IODA2:
+ pnv_pci_ioda2_release_pe_dma(pe);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ pnv_ioda_release_pe_seg(pe);
+ pnv_ioda_deconfigure_pe(pe->phb, pe);
+ pnv_ioda_free_pe(pe);
+}
+
+static void pnv_pci_release_device(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+
+ if (pdev->is_virtfn)
+ return;
+
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ return;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(--pe->device_count < 0);
+ if (pe->device_count == 0)
+ pnv_ioda_release_pe(pe);
+}
+
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -3382,7 +3450,9 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.teardown_msi_irqs = pnv_teardown_msi_irqs,
#endif
.enable_device_hook = pnv_pci_enable_device_hook,
+ .release_device = pnv_pci_release_device,
.window_alignment = pnv_pci_window_alignment,
+ .setup_bridge = pnv_pci_setup_bridge,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
.dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
@@ -3410,6 +3480,26 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown,
};
+#ifdef CONFIG_CXL_BASE
+const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
+#ifdef CONFIG_PCI_MSI
+ .setup_msi_irqs = pnv_cxl_cx4_setup_msi_irqs,
+ .teardown_msi_irqs = pnv_cxl_cx4_teardown_msi_irqs,
+#endif
+ .enable_device_hook = pnv_cxl_enable_device_hook,
+ .disable_device = pnv_cxl_disable_device,
+ .release_device = pnv_pci_release_device,
+ .window_alignment = pnv_pci_window_alignment,
+ .setup_bridge = pnv_pci_setup_bridge,
+ .reset_secondary_bus = pnv_pci_reset_secondary_bus,
+ .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+ .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
+ .shutdown = pnv_pci_ioda_shutdown,
+};
+#endif
+
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
u64 hub_id, int ioda_type)
{
@@ -3417,6 +3507,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
struct pnv_phb *phb;
unsigned long size, m64map_off, m32map_off, pemap_off;
unsigned long iomap_off = 0, dma32map_off = 0;
+ struct resource r;
const __be64 *prop64;
const __be32 *prop32;
int len;
@@ -3425,7 +3516,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
void *aux;
long rc;
- pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
+ if (!of_device_is_available(np))
+ return;
+
+ pr_info("Initializing %s PHB (%s)\n",
+ pnv_phb_names[ioda_type], of_node_full_name(np));
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -3476,9 +3571,12 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
/* Get registers */
- phb->regs = of_iomap(np, 0);
- if (phb->regs == NULL)
- pr_err(" Failed to map registers !\n");
+ if (!of_address_to_resource(np, 0, &r)) {
+ phb->regs_phys = r.start;
+ phb->regs = ioremap(r.start, resource_size(&r));
+ if (phb->regs == NULL)
+ pr_err(" Failed to map registers !\n");
+ }
/* Initialize more IODA stuff */
phb->ioda.total_pe_num = 1;
@@ -3489,6 +3587,10 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (prop32)
phb->ioda.reserved_pe_idx = be32_to_cpup(prop32);
+ /* Invalidate RID to PE# mapping */
+ for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++)
+ phb->ioda.pe_rmap[segno] = IODA_INVALID_PE;
+
/* Parse 64-bit MMIO range */
pnv_ioda_parse_m64_window(phb);
@@ -3540,7 +3642,22 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->ioda.dma32_segmap[segno] = IODA_INVALID_PE;
}
phb->ioda.pe_array = aux + pemap_off;
- set_bit(phb->ioda.reserved_pe_idx, phb->ioda.pe_alloc);
+
+ /*
+ * Choose PE number for root bus, which shouldn't have
+ * M64 resources consumed by its child devices. To pick
+ * the PE number adjacent to the reserved one if possible.
+ */
+ pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx);
+ if (phb->ioda.reserved_pe_idx == 0) {
+ phb->ioda.root_pe_idx = 1;
+ pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
+ } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) {
+ phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
+ pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
+ } else {
+ phb->ioda.root_pe_idx = IODA_INVALID_PE;
+ }
INIT_LIST_HEAD(&phb->ioda.pe_list);
mutex_init(&phb->ioda.pe_list_mutex);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 1d92bd93bcd9..a21d831c1114 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -26,6 +26,7 @@
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
+#include <asm/pnv-pci.h>
#include <asm/opal.h>
#include <asm/iommu.h>
#include <asm/tce.h>
@@ -36,8 +37,124 @@
#include "powernv.h"
#include "pci.h"
-/* Delay in usec */
-#define PCI_RESET_DELAY_US 3000000
+int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
+{
+ struct device_node *parent = np;
+ u32 bdfn;
+ u64 phbid;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &bdfn);
+ if (ret)
+ return -ENXIO;
+
+ bdfn = ((bdfn & 0x00ffff00) >> 8);
+ while ((parent = of_get_parent(parent))) {
+ if (!PCI_DN(parent)) {
+ of_node_put(parent);
+ break;
+ }
+
+ if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
+ of_node_put(parent);
+ continue;
+ }
+
+ ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
+ if (ret) {
+ of_node_put(parent);
+ return -ENXIO;
+ }
+
+ *id = PCI_SLOT_ID(phbid, bdfn);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
+
+int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_GET_DEVICE_TREE))
+ return -ENXIO;
+
+ rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
+ if (rc < OPAL_SUCCESS)
+ return -EIO;
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
+
+int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
+ return -ENXIO;
+
+ rc = opal_pci_get_presence_state(id, (uint64_t)state);
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
+
+int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
+{
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
+ return -ENXIO;
+
+ rc = opal_pci_get_power_state(id, (uint64_t)state);
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
+
+int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
+{
+ struct opal_msg m;
+ int token, ret;
+ int64_t rc;
+
+ if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
+ return -ENXIO;
+
+ token = opal_async_get_token_interruptible();
+ if (unlikely(token < 0))
+ return token;
+
+ rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
+ if (rc == OPAL_SUCCESS) {
+ ret = 0;
+ goto exit;
+ } else if (rc != OPAL_ASYNC_COMPLETION) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ ret = opal_async_wait_response(token, &m);
+ if (ret < 0)
+ goto exit;
+
+ if (msg) {
+ ret = 1;
+ memcpy(msg, &m, sizeof(m));
+ }
+
+exit:
+ opal_async_release_token(token);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
#ifdef CONFIG_PCI_MSI
int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -587,7 +704,7 @@ static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 proto_tce = iommu_direction_to_tce_perm(direction);
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
@@ -620,8 +737,8 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
if (newtce & TCE_PCI_WRITE)
newtce |= TCE_PCI_READ;
- oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
- *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
+ *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
return 0;
@@ -815,13 +932,14 @@ void __init pnv_pci_init(void)
for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
pnv_pci_init_ioda2_phb(np);
+ /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
+ for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
+ pnv_pci_init_ioda2_phb(np);
+
/* Look for NPU PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
pnv_pci_init_npu_phb(np);
- /* Setup the linkage between OF nodes and PHBs */
- pci_devs_phb_init();
-
/* Configure IOMMU DMA hooks */
set_pci_dma_ops(&dma_iommu_ops);
}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7dee25e304db..e64df7894d6e 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -1,6 +1,10 @@
#ifndef __POWERNV_PCI_H
#define __POWERNV_PCI_H
+#include <linux/iommu.h>
+#include <asm/iommu.h>
+#include <asm/msi_bitmap.h>
+
struct pci_dn;
enum pnv_phb_type {
@@ -30,6 +34,7 @@ struct pnv_phb;
struct pnv_ioda_pe {
unsigned long flags;
struct pnv_phb *phb;
+ int device_count;
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
@@ -71,6 +76,7 @@ struct pnv_ioda_pe {
};
#define PNV_PHB_FLAG_EEH (1 << 0)
+#define PNV_PHB_FLAG_CXL (1 << 1) /* Real PHB supporting the cxl kernel API */
struct pnv_phb {
struct pci_controller *hose;
@@ -80,6 +86,7 @@ struct pnv_phb {
u64 opal_id;
int flags;
void __iomem *regs;
+ u64 regs_phys;
int initialized;
spinlock_t lock;
@@ -110,6 +117,8 @@ struct pnv_phb {
/* Global bridge info */
unsigned int total_pe_num;
unsigned int reserved_pe_idx;
+ unsigned int root_pe_idx;
+ bool root_pe_populated;
/* 32-bit MMIO window */
unsigned int m32_size;
@@ -152,17 +161,8 @@ struct pnv_phb {
struct list_head pe_list;
struct mutex pe_list_mutex;
- /* Reverse map of PEs, will have to extend if
- * we are to support more than 256 PEs, indexed
- * bus { bus, devfn }
- */
- unsigned char pe_rmap[0x10000];
-
- /* TCE cache invalidate registers (physical and
- * remapped)
- */
- phys_addr_t tce_inval_reg_phys;
- __be64 __iomem *tce_inval_reg;
+ /* Reverse map of PEs, indexed by {bus, devfn} */
+ unsigned int pe_rmap[0x10000];
} ioda;
/* PHB and hub status structure */
@@ -173,12 +173,15 @@ struct pnv_phb {
struct OpalIoP7IOCErrorData hub_diag;
} diag;
+#ifdef CONFIG_CXL_BASE
+ struct cxl_afu *cxl_afu;
+#endif
};
extern struct pci_ops pnv_pci_ops;
extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction);
@@ -203,8 +206,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
-extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- __be64 *startp, __be64 *endp, bool rm);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
@@ -212,6 +213,9 @@ extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
+extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
+extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
+extern bool pnv_pci_enable_device_hook(struct pci_dev *dev);
extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...);
@@ -224,7 +228,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
/* Nvlink functions */
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
-extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
+extern void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
struct iommu_table *tbl);
@@ -232,4 +236,15 @@ extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num);
extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
+
+/* cxl functions */
+extern bool pnv_cxl_enable_device_hook(struct pci_dev *dev);
+extern void pnv_cxl_disable_device(struct pci_dev *dev);
+extern int pnv_cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+extern void pnv_cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
+
+
+/* phb ops (cxl switches these when enabling the kernel api on the phb) */
+extern const struct pci_controller_ops pnv_cxl_cx4_ioda_controller_ops;
+
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 6dbc0a1da1f6..da7c843ac7f1 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -18,6 +18,7 @@ static inline void pnv_pci_shutdown(void) { }
#endif
extern u32 pnv_get_supported_cpuidle_states(void);
+extern u64 pnv_deepest_stop_state;
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index ee6430bedcc3..efe8b6bb168b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -58,7 +58,7 @@ static void __init pnv_setup_arch(void)
/* XXX PMCS */
}
-static void __init pnv_init_early(void)
+static void __init pnv_init(void)
{
/*
* Initialize the LPC bus now so that legacy serial
@@ -268,21 +268,16 @@ static void __init pnv_setup_machdep_opal(void)
static int __init pnv_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "ibm,powernv"))
+ if (!of_machine_is_compatible("ibm,powernv"))
return 0;
- if (IS_ENABLED(CONFIG_PPC_RADIX_MMU) && radix_enabled())
- radix_init_native();
- else if (IS_ENABLED(CONFIG_PPC_STD_MMU_64))
- hpte_init_native();
-
if (firmware_has_feature(FW_FEATURE_OPAL))
pnv_setup_machdep_opal();
pr_debug("PowerNV detected !\n");
+ pnv_init();
+
return 1;
}
@@ -308,14 +303,13 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
define_machine(powernv) {
.name = "PowerNV",
.probe = pnv_probe,
- .init_early = pnv_init_early,
.setup_arch = pnv_setup_arch,
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.get_proc_freq = pnv_get_proc_freq,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
- .power_save = power7_idle,
+ .power_save = NULL,
.calibrate_decr = generic_calibrate_decr,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index ad7b1a3dbed0..c789258ae1e1 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -182,7 +182,9 @@ static void pnv_smp_cpu_kill_self(void)
ppc64_runlatch_off();
- if (idle_states & OPAL_PM_WINKLE_ENABLED)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ srr1 = power9_idle_stop(pnv_deepest_stop_state);
+ else if (idle_states & OPAL_PM_WINKLE_ENABLED)
srr1 = power7_winkle();
else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 3f175e8aedb4..57caaf11a83f 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -189,7 +189,7 @@ fail_malloc:
return result;
}
-static int __init_refok ps3_setup_uhc_device(
+static int __ref ps3_setup_uhc_device(
const struct ps3_repository_device *repo, enum ps3_match_id match_id,
enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type)
{
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index c9a3e677192a..cb3c50328de8 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -195,12 +195,12 @@ static void ps3_hpte_clear(void)
void __init ps3_hpte_init(unsigned long htab_size)
{
- ppc_md.hpte_invalidate = ps3_hpte_invalidate;
- ppc_md.hpte_updatepp = ps3_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
- ppc_md.hpte_insert = ps3_hpte_insert;
- ppc_md.hpte_remove = ps3_hpte_remove;
- ppc_md.hpte_clear_all = ps3_hpte_clear;
+ mmu_hash_ops.hpte_invalidate = ps3_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = ps3_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = ps3_hpte_insert;
+ mmu_hash_ops.hpte_remove = ps3_hpte_remove;
+ mmu_hash_ops.hpte_clear_all = ps3_hpte_clear;
ppc64_pft_size = __ilog2(htab_size);
}
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index bfccdc7cb85f..814a7eaa7769 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -1198,7 +1198,7 @@ int ps3_repository_delete_highmem_info(unsigned int region_index)
return result ? -1 : 0;
}
-#endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */
+#endif /* defined(CONFIG_PS3_REPOSITORY_WRITE) */
#if defined(DEBUG)
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 799c8580ab09..3a487e7f4a5e 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -80,7 +80,7 @@ static void ps3_power_save(void)
lv1_pause(0);
}
-static void ps3_restart(char *cmd)
+static void __noreturn ps3_restart(char *cmd)
{
DBG("%s:%d cmd '%s'\n", __func__, __LINE__, cmd);
@@ -96,7 +96,7 @@ static void ps3_power_off(void)
ps3_sys_manager_power_off(); /* never returns */
}
-static void ps3_halt(void)
+static void __noreturn ps3_halt(void)
{
DBG("%s:%d\n", __func__, __LINE__);
@@ -226,23 +226,24 @@ static void __init ps3_progress(char *s, unsigned short hex)
printk("*** %04x : %s\n", hex, s ? s : "");
}
-static int __init ps3_probe(void)
+void __init ps3_early_mm_init(void)
{
unsigned long htab_size;
- unsigned long dt_root;
+ ps3_mm_init();
+ ps3_mm_vas_create(&htab_size);
+ ps3_hpte_init(htab_size);
+}
+
+static int __init ps3_probe(void)
+{
DBG(" -> %s:%d\n", __func__, __LINE__);
- dt_root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(dt_root, "sony,ps3"))
+ if (!of_machine_is_compatible("sony,ps3"))
return 0;
- powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
-
ps3_os_area_save_params();
- ps3_mm_init();
- ps3_mm_vas_create(&htab_size);
- ps3_hpte_init(htab_size);
+
pm_power_off = ps3_power_off;
DBG(" <- %s:%d\n", __func__, __LINE__);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 5606fe36faf2..8af1c15aef85 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -516,7 +516,7 @@ core_initcall(ps3_system_bus_init);
*/
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@@ -553,7 +553,7 @@ clean_none:
}
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
@@ -569,7 +569,7 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
@@ -592,7 +592,7 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
@@ -626,7 +626,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
}
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
@@ -640,7 +640,7 @@ static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
}
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
@@ -670,14 +670,14 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG();
return 0;
}
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
@@ -686,7 +686,7 @@ static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG();
}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index 791c6142c4a7..11b45b58c81b 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -20,9 +20,9 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
#include <asm/firmware.h>
-#include <asm/rtc.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index fc44ad0475f8..66e7227469b8 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -574,7 +574,7 @@ static int cmm_mem_going_offline(void *arg)
cmm_dbg("Failed to allocate memory for list "
"management. Memory hotplug "
"failed.\n");
- return ENOMEM;
+ return -ENOMEM;
}
memcpy(npa, pa_curr, PAGE_SIZE);
if (pa_curr == cmm_page_list)
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 2b93ae8d557a..4748124faa10 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -27,6 +27,15 @@
#include <asm/uaccess.h>
#include <asm/rtas.h>
+struct workqueue_struct *pseries_hp_wq;
+
+struct pseries_hp_work {
+ struct work_struct work;
+ struct pseries_hp_errorlog *errlog;
+ struct completion *hp_completion;
+ int *rc;
+};
+
struct cc_workarea {
__be32 drc_index;
__be32 zero;
@@ -368,10 +377,51 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
return rc;
}
+void pseries_hp_work_fn(struct work_struct *work)
+{
+ struct pseries_hp_work *hp_work =
+ container_of(work, struct pseries_hp_work, work);
+
+ if (hp_work->rc)
+ *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
+ else
+ handle_dlpar_errorlog(hp_work->errlog);
+
+ if (hp_work->hp_completion)
+ complete(hp_work->hp_completion);
+
+ kfree(hp_work->errlog);
+ kfree((void *)work);
+}
+
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
+ struct completion *hotplug_done, int *rc)
+{
+ struct pseries_hp_work *work;
+ struct pseries_hp_errorlog *hp_errlog_copy;
+
+ hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
+ GFP_KERNEL);
+ memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
+
+ work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
+ if (work) {
+ INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
+ work->errlog = hp_errlog_copy;
+ work->hp_completion = hotplug_done;
+ work->rc = rc;
+ queue_work(pseries_hp_wq, (struct work_struct *)work);
+ } else {
+ *rc = -ENOMEM;
+ complete(hotplug_done);
+ }
+}
+
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
struct pseries_hp_errorlog *hp_elog;
+ struct completion hotplug_done;
const char *arg;
int rc;
@@ -439,7 +489,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
goto dlpar_store_out;
}
- rc = handle_dlpar_errorlog(hp_elog);
+ init_completion(&hotplug_done);
+ queue_hotplug_event(hp_elog, &hotplug_done, &rc);
+ wait_for_completion(&hotplug_done);
dlpar_store_out:
kfree(hp_elog);
@@ -450,6 +502,8 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
static int __init pseries_dlpar_init(void)
{
+ pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
+ WQ_UNBOUND, 1);
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
}
machine_device_initcall(pseries, pseries_dlpar_init);
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 3998e0f9a03b..1c428f06b14c 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -2,7 +2,7 @@
* The file intends to implement the platform dependent EEH operations on pseries.
* Actually, the pseries platform is built based on RTAS heavily. That means the
* pseries platform dependent EEH operations will be built on RTAS calls. The functions
- * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
+ * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
* been done.
*
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 18380e8f6dfe..a6ddca833119 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -26,48 +26,21 @@ void request_event_sources_irqs(struct device_node *np,
{
int i, index, count = 0;
struct of_phandle_args oirq;
- const u32 *opicprop;
- unsigned int opicplen;
unsigned int virqs[16];
- /* Check for obsolete "open-pic-interrupt" property. If present, then
- * map those interrupts using the default interrupt host and default
- * trigger
- */
- opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
- if (opicprop) {
- opicplen /= sizeof(u32);
- for (i = 0; i < opicplen; i++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_mapping(NULL, *(opicprop++));
- if (virqs[count] == NO_IRQ) {
- pr_err("event-sources: Unable to allocate "
- "interrupt number for %s\n",
- np->full_name);
- WARN_ON(1);
- }
- else
- count++;
-
- }
- }
- /* Else use normal interrupt tree parsing */
- else {
- /* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
- index++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_of_mapping(&oirq);
- if (virqs[count] == NO_IRQ) {
- pr_err("event-sources: Unable to allocate "
- "interrupt number for %s\n",
- np->full_name);
- WARN_ON(1);
- }
- else
- count++;
+ /* First try to do a proper OF tree parsing */
+ for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
+ index++) {
+ if (count > 15)
+ break;
+ virqs[count] = irq_create_of_mapping(&oirq);
+ if (virqs[count] == NO_IRQ) {
+ pr_err("event-sources: Unable to allocate "
+ "interrupt number for %s\n",
+ np->full_name);
+ WARN_ON(1);
+ } else {
+ count++;
}
}
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 8c80588abacc..ea7f09bd73b1 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -22,6 +22,7 @@
*/
+#include <linux/of_fdt.h>
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/udbg.h>
@@ -69,7 +70,8 @@ hypertas_fw_features_table[] = {
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
+static void __init fw_hypertas_feature_init(const char *hypertas,
+ unsigned long len)
{
const char *s;
int i;
@@ -113,7 +115,7 @@ vec5_fw_features_table[] = {
{FW_FEATURE_PRRN, OV5_PRRN},
};
-void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
+static void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
{
unsigned int index, feat;
int i;
@@ -131,3 +133,45 @@ void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
pr_debug(" <- fw_vec5_feature_init()\n");
}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init probe_fw_features(unsigned long node, const char *uname, int
+ depth, void *data)
+{
+ const char *prop;
+ int len;
+ static int hypertas_found;
+ static int vec5_found;
+
+ if (depth != 1)
+ return 0;
+
+ if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
+ prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
+ &len);
+ if (prop) {
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_hypertas_feature_init(prop, len);
+ }
+
+ hypertas_found = 1;
+ }
+
+ if (!strcmp(uname, "chosen")) {
+ prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
+ &len);
+ if (prop)
+ fw_vec5_feature_init(prop, len);
+
+ vec5_found = 1;
+ }
+
+ return hypertas_found && vec5_found;
+}
+
+void __init pseries_probe_fw_features(void)
+{
+ of_scan_flat_dt(probe_fw_features, NULL);
+}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 282837a1d74b..a1b63e00b2f7 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -903,8 +903,6 @@ static int parse_cede_parameters(void)
static int __init pseries_cpu_hotplug_init(void)
{
- struct device_node *np;
- const char *typep;
int cpu;
int qcss_tok;
@@ -913,17 +911,6 @@ static int __init pseries_cpu_hotplug_init(void)
ppc_md.cpu_release = dlpar_cpu_release;
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
- for_each_node_by_name(np, "interrupt-controller") {
- typep = of_get_property(np, "compatible", NULL);
- if (strstr(typep, "open-pic")) {
- of_node_put(np);
-
- printk(KERN_INFO "CPU Hotplug not supported on "
- "systems using MPIC\n");
- return 0;
- }
- }
-
rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2ce138542083..43f7beb2902d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -69,13 +69,36 @@ unsigned long pseries_memory_block_size(void)
return memblock_size;
}
-static void dlpar_free_drconf_property(struct property *prop)
+static void dlpar_free_property(struct property *prop)
{
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
+static struct property *dlpar_clone_property(struct property *prop,
+ u32 prop_size)
+{
+ struct property *new_prop;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kzalloc(prop_size, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ dlpar_free_property(new_prop);
+ return NULL;
+ }
+
+ memcpy(new_prop->value, prop->value, prop->length);
+ new_prop->length = prop_size;
+
+ of_property_set_flag(new_prop, OF_DYNAMIC);
+ return new_prop;
+}
+
static struct property *dlpar_clone_drconf_property(struct device_node *dn)
{
struct property *prop, *new_prop;
@@ -87,19 +110,10 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
if (!prop)
return NULL;
- new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ new_prop = dlpar_clone_property(prop, prop->length);
if (!new_prop)
return NULL;
- new_prop->name = kstrdup(prop->name, GFP_KERNEL);
- new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!new_prop->name || !new_prop->value) {
- dlpar_free_drconf_property(new_prop);
- return NULL;
- }
-
- new_prop->length = prop->length;
-
/* Convert the property to cpu endian-ness */
p = new_prop->value;
*p = be32_to_cpu(*p);
@@ -177,14 +191,74 @@ static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
return 0;
}
+static u32 find_aa_index(struct device_node *dr_node,
+ struct property *ala_prop, const u32 *lmb_assoc)
+{
+ u32 *assoc_arrays;
+ u32 aa_index;
+ int aa_arrays, aa_array_entries, aa_array_sz;
+ int i, index;
+
+ /*
+ * The ibm,associativity-lookup-arrays property is defined to be
+ * a 32-bit value specifying the number of associativity arrays
+ * followed by a 32-bitvalue specifying the number of entries per
+ * array, followed by the associativity arrays.
+ */
+ assoc_arrays = ala_prop->value;
+
+ aa_arrays = be32_to_cpu(assoc_arrays[0]);
+ aa_array_entries = be32_to_cpu(assoc_arrays[1]);
+ aa_array_sz = aa_array_entries * sizeof(u32);
+
+ aa_index = -1;
+ for (i = 0; i < aa_arrays; i++) {
+ index = (i * aa_array_entries) + 2;
+
+ if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
+ continue;
+
+ aa_index = i;
+ break;
+ }
+
+ if (aa_index == -1) {
+ struct property *new_prop;
+ u32 new_prop_size;
+
+ new_prop_size = ala_prop->length + aa_array_sz;
+ new_prop = dlpar_clone_property(ala_prop, new_prop_size);
+ if (!new_prop)
+ return -1;
+
+ assoc_arrays = new_prop->value;
+
+ /* increment the number of entries in the lookup array */
+ assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
+
+ /* copy the new associativity into the lookup array */
+ index = aa_arrays * aa_array_entries + 2;
+ memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
+
+ of_update_property(dr_node, new_prop);
+
+ /*
+ * The associativity lookup array index for this lmb is
+ * number of entries - 1 since we added its associativity
+ * to the end of the lookup array.
+ */
+ aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
+ }
+
+ return aa_index;
+}
+
static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
{
struct device_node *parent, *lmb_node, *dr_node;
+ struct property *ala_prop;
const u32 *lmb_assoc;
- const u32 *assoc_arrays;
u32 aa_index;
- int aa_arrays, aa_array_entries, aa_array_sz;
- int i;
parent = of_find_node_by_path("/");
if (!parent)
@@ -208,34 +282,15 @@ static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
return -ENODEV;
}
- assoc_arrays = of_get_property(dr_node,
- "ibm,associativity-lookup-arrays",
- NULL);
- of_node_put(dr_node);
- if (!assoc_arrays) {
+ ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
+ NULL);
+ if (!ala_prop) {
+ of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
return -ENODEV;
}
- /* The ibm,associativity-lookup-arrays property is defined to be
- * a 32-bit value specifying the number of associativity arrays
- * followed by a 32-bitvalue specifying the number of entries per
- * array, followed by the associativity arrays.
- */
- aa_arrays = be32_to_cpu(assoc_arrays[0]);
- aa_array_entries = be32_to_cpu(assoc_arrays[1]);
- aa_array_sz = aa_array_entries * sizeof(u32);
-
- aa_index = -1;
- for (i = 0; i < aa_arrays; i++) {
- int indx = (i * aa_array_entries) + 2;
-
- if (memcmp(&assoc_arrays[indx], &lmb_assoc[1], aa_array_sz))
- continue;
-
- aa_index = i;
- break;
- }
+ aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
dlpar_free_cc_nodes(lmb_node);
return aa_index;
@@ -533,50 +588,11 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static int dlpar_add_lmb_memory(struct of_drconf_cell *lmb)
+static int dlpar_add_lmb(struct of_drconf_cell *lmb)
{
- struct memory_block *mem_block;
unsigned long block_sz;
int nid, rc;
- block_sz = memory_block_size_bytes();
-
- /* Find the node id for this address */
- nid = memory_add_physaddr_to_nid(lmb->base_addr);
-
- /* Add the memory */
- rc = add_memory(nid, lmb->base_addr, block_sz);
- if (rc)
- return rc;
-
- /* Register this block of memory */
- rc = memblock_add(lmb->base_addr, block_sz);
- if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return rc;
- }
-
- mem_block = lmb_to_memblock(lmb);
- if (!mem_block) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return -EINVAL;
- }
-
- rc = device_online(&mem_block->dev);
- put_device(&mem_block->dev);
- if (rc) {
- remove_memory(nid, lmb->base_addr, block_sz);
- return rc;
- }
-
- lmb->flags |= DRCONF_MEM_ASSIGNED;
- return 0;
-}
-
-static int dlpar_add_lmb(struct of_drconf_cell *lmb)
-{
- int rc;
-
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
@@ -592,10 +608,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb)
return rc;
}
- rc = dlpar_add_lmb_memory(lmb);
+ block_sz = memory_block_size_bytes();
+
+ /* Find the node id for this address */
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ /* Add the memory */
+ rc = add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
dlpar_remove_device_tree_lmb(lmb);
dlpar_release_drc(lmb->drc_index);
+ } else {
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
}
return rc;
@@ -748,7 +772,7 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
break;
}
- dlpar_free_drconf_property(prop);
+ dlpar_free_property(prop);
dlpar_memory_out:
of_node_put(dn);
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index 0240c4ff878a..f053bda64ee7 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -113,7 +113,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
* - The owner of an event is determined by combinations of scope,
* event type, and sub-type. There is no easy way to pre-sort clients
* by scope or event type alone. For example, Torrent ISR route change
- * event is reported with scope 0x00 (Not Applicatable) rather than
+ * event is reported with scope 0x00 (Not Applicable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify
* who owns the event.
*/
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 3e8865b187de..0024e451bb36 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -120,39 +120,10 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
kfree(table_group);
}
-static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
- __be64 *startp, __be64 *endp)
-{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
- unsigned long start, end, inc;
-
- start = __pa(startp);
- end = __pa(endp);
- inc = L1_CACHE_BYTES; /* invalidate a cacheline of TCEs at a time */
-
- /* If this is non-zero, change the format. We shift the
- * address and or in the magic from the device tree. */
- if (tbl->it_busno) {
- start <<= 12;
- end <<= 12;
- inc <<= 12;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- }
-
- end |= inc - 1; /* round up end to be different than start */
-
- mb(); /* Make sure TCEs in memory are written */
- while (start <= end) {
- out_be64(invalidate, start);
- start += inc;
- }
-}
-
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 proto_tce;
__be64 *tcep, *tces;
@@ -173,9 +144,6 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
uaddr += TCE_PAGE_SIZE;
tcep++;
}
-
- if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
return 0;
}
@@ -188,9 +156,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
-
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
}
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
@@ -208,7 +173,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce, tce;
@@ -251,7 +216,7 @@ static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce;
@@ -537,7 +502,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table *tbl)
{
struct device_node *node;
- const unsigned long *basep, *sw_inval;
+ const unsigned long *basep;
const u32 *sizep;
node = phb->dn;
@@ -575,22 +540,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_index = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
-
- sw_inval = of_get_property(node, "linux,tce-sw-invalidate-info", NULL);
- if (sw_inval) {
- /*
- * This property contains information on how to
- * invalidate the TCE entry. The first property is
- * the base MMIO address used to invalidate entries.
- * The second property tells us the format of the TCE
- * invalidate (whether it needs to be shifted) and
- * some magic routing info to add to our invalidate
- * command.
- */
- tbl->it_index = (unsigned long) ioremap(sw_inval[0], 8);
- tbl->it_busno = sw_inval[1]; /* overload this with magic */
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
}
/*
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 13fa95b3aa8b..6681ac97fb18 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -14,14 +14,13 @@
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
-#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/smp.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
-static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
@@ -51,26 +50,6 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
"(hw %d) failed with %d\n", cpu, hwcpu, ret);
}
}
-}
-
-static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary)
-{
- pseries_kexec_cpu_down(crash_shutdown, secondary);
- mpic_teardown_this_cpu(secondary);
-}
-void __init setup_kexec_cpu_down_mpic(void)
-{
- ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic;
-}
-
-static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
-{
- pseries_kexec_cpu_down(crash_shutdown, secondary);
xics_kexec_teardown_cpu(secondary);
}
-
-void __init setup_kexec_cpu_down_xics(void)
-{
- ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
-}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7f6100d91b4b..86707e67843f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -45,6 +45,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
+#include <asm/asm-prototypes.h>
#include "pseries.h"
@@ -260,24 +261,8 @@ static void pSeries_lpar_hptab_clear(void)
* This is also called on boot when a fadump happens. In that case we
* must not change the exception endian mode.
*/
- if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
- long rc;
-
- rc = pseries_big_endian_exceptions();
- /*
- * At this point it is unlikely panic() will get anything
- * out to the user, but at least this will stop us from
- * continuing on further and creating an even more
- * difficult to debug situation.
- *
- * There is a known problem when kdump'ing, if cpus are offline
- * the above call will fail. Rather than panicking again, keep
- * going and hope the kdump kernel is also little endian, which
- * it usually is.
- */
- if (rc && !kdump_in_progress())
- panic("Could not enable big endian exceptions");
- }
+ if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
+ pseries_big_endian_exceptions();
#endif
}
@@ -604,17 +589,17 @@ static int __init disable_bulk_remove(char *str)
__setup("bulk_remove=", disable_bulk_remove);
-void __init hpte_init_lpar(void)
+void __init hpte_init_pseries(void)
{
- ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
- ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
- ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted;
- ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
- ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
- ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
+ mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
+ mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
+ mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
+ mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
+ mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
+ mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
+ mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
+ mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
}
#ifdef CONFIG_PPC_SMLPAR
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8184175c86..79aef8c1c5b3 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -17,8 +17,6 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/kmsg_dump.h>
-#include <linux/pstore.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c
index c26eadde434c..a4a0b57d1d81 100644
--- a/arch/powerpc/platforms/pseries/power.c
+++ b/arch/powerpc/platforms/pseries/power.c
@@ -27,6 +27,8 @@
#include <linux/init.h>
#include <asm/machdep.h>
+#include "pseries.h"
+
unsigned long rtas_poweron_auto; /* default and normal state is 0 */
static ssize_t auto_poweron_show(struct kobject *kobj,
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 7aa83f00ac62..b1be7b713fe6 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -20,31 +20,18 @@ extern void request_event_sources_irqs(struct device_node *np,
#include <linux/of.h>
-extern void __init fw_hypertas_feature_init(const char *hypertas,
- unsigned long len);
-extern void __init fw_vec5_feature_init(const char *hypertas,
- unsigned long len);
-
struct pt_regs;
extern int pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
#ifdef CONFIG_SMP
-extern void smp_init_pseries_mpic(void);
-extern void smp_init_pseries_xics(void);
+extern void smp_init_pseries(void);
#else
-static inline void smp_init_pseries_mpic(void) { };
-static inline void smp_init_pseries_xics(void) { };
+static inline void smp_init_pseries(void) { };
#endif
-#ifdef CONFIG_KEXEC
-extern void setup_kexec_cpu_down_xics(void);
-extern void setup_kexec_cpu_down_mpic(void);
-#else
-static inline void setup_kexec_cpu_down_xics(void) { }
-static inline void setup_kexec_cpu_down_mpic(void) { }
-#endif
+extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
extern void pSeries_final_fixup(void);
@@ -64,6 +51,8 @@ extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
+ struct completion *hotplug_done, int *rc);
#ifdef CONFIG_MEMORY_HOTPLUG
int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
#else
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 92767791f93b..164a13d3998a 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -208,19 +208,19 @@ static ssize_t percpu_deactivate_hint_show(struct device *dev,
* Per-cpu value of the hint
*/
-struct device_attribute attr_cpu_activate_hint_list =
+static struct device_attribute attr_cpu_activate_hint_list =
__ATTR(pseries_activate_hint_list, 0444,
cpu_activate_hint_list_show, NULL);
-struct device_attribute attr_cpu_deactivate_hint_list =
+static struct device_attribute attr_cpu_deactivate_hint_list =
__ATTR(pseries_deactivate_hint_list, 0444,
cpu_deactivate_hint_list_show, NULL);
-struct device_attribute attr_percpu_activate_hint =
+static struct device_attribute attr_percpu_activate_hint =
__ATTR(pseries_activate_hint, 0444,
percpu_activate_hint_show, NULL);
-struct device_attribute attr_percpu_deactivate_hint =
+static struct device_attribute attr_percpu_deactivate_hint =
__ATTR(pseries_deactivate_hint, 0444,
percpu_deactivate_hint_show, NULL);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 9a3e27b863ce..904a677208d1 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -43,6 +43,7 @@ static int ras_check_exception_token;
/* EPOW events counter variable */
static int num_epow_events;
+static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id);
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
@@ -65,6 +66,14 @@ static int __init init_ras_IRQ(void)
of_node_put(np);
}
+ /* Hotplug Events */
+ np = of_find_node_by_path("/event-sources/hot-plug-events");
+ if (np != NULL) {
+ request_event_sources_irqs(np, ras_hotplug_interrupt,
+ "RAS_HOTPLUG");
+ of_node_put(np);
+ }
+
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
@@ -190,6 +199,36 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
num_epow_events++;
}
+static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
+{
+ struct pseries_errorlog *pseries_log;
+ struct pseries_hp_errorlog *hp_elog;
+
+ spin_lock(&ras_log_buf_lock);
+
+ rtas_call(ras_check_exception_token, 6, 1, NULL,
+ RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq),
+ RTAS_HOTPLUG_EVENTS, 0, __pa(&ras_log_buf),
+ rtas_get_error_log_max());
+
+ pseries_log = get_pseries_errorlog((struct rtas_error_log *)ras_log_buf,
+ PSERIES_ELOG_SECT_ID_HOTPLUG);
+ hp_elog = (struct pseries_hp_errorlog *)pseries_log->data;
+
+ /*
+ * Since PCI hotplug is not currently supported on pseries, put PCI
+ * hotplug events on the ras_log_buf to be handled by rtas_errd.
+ */
+ if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM ||
+ hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU)
+ queue_hotplug_event(hp_elog, NULL, NULL);
+ else
+ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
+
+ spin_unlock(&ras_log_buf_lock);
+ return IRQ_HANDLED;
+}
+
/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 9883bc7ea007..4ffcaa6f8670 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -57,7 +57,6 @@
#include <asm/time.h>
#include <asm/nvram.h>
#include <asm/pmc.h>
-#include <asm/mpic.h>
#include <asm/xics.h>
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
@@ -77,8 +76,6 @@ EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
-static struct device_node *pSeries_mpic_node;
-
static void pSeries_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
@@ -172,48 +169,7 @@ static void __init pseries_setup_i8259_cascade(void)
irq_set_chained_handler(cascade, pseries_8259_cascade);
}
-static void __init pseries_mpic_init_IRQ(void)
-{
- struct device_node *np;
- const unsigned int *opprop;
- unsigned long openpic_addr = 0;
- int naddr, n, i, opplen;
- struct mpic *mpic;
-
- np = of_find_node_by_path("/");
- naddr = of_n_addr_cells(np);
- opprop = of_get_property(np, "platform-open-pic", &opplen);
- if (opprop != NULL) {
- openpic_addr = of_read_number(opprop, naddr);
- printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
- }
- of_node_put(np);
-
- BUG_ON(openpic_addr == 0);
-
- /* Setup the openpic driver */
- mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
- MPIC_NO_RESET, 16, 0, " MPIC ");
- BUG_ON(mpic == NULL);
-
- /* Add ISUs */
- opplen /= sizeof(u32);
- for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
- unsigned long isuaddr = of_read_number(opprop + i, naddr);
- mpic_assign_isu(mpic, n, isuaddr);
- }
-
- /* Setup top-level get_irq */
- ppc_md.get_irq = mpic_get_irq;
-
- /* All ISUs are setup, complete initialization */
- mpic_init(mpic);
-
- /* Look for cascade */
- pseries_setup_i8259_cascade();
-}
-
-static void __init pseries_xics_init_IRQ(void)
+static void __init pseries_init_irq(void)
{
xics_init();
pseries_setup_i8259_cascade();
@@ -228,32 +184,6 @@ static void pseries_lpar_enable_pmcs(void)
plpar_hcall_norets(H_PERFMON, set, reset);
}
-static void __init pseries_discover_pic(void)
-{
- struct device_node *np;
- const char *typep;
-
- for_each_node_by_name(np, "interrupt-controller") {
- typep = of_get_property(np, "compatible", NULL);
- if (!typep)
- continue;
- if (strstr(typep, "open-pic")) {
- pSeries_mpic_node = of_node_get(np);
- ppc_md.init_IRQ = pseries_mpic_init_IRQ;
- setup_kexec_cpu_down_mpic();
- smp_init_pseries_mpic();
- return;
- } else if (strstr(typep, "ppc-xicp")) {
- ppc_md.init_IRQ = pseries_xics_init_IRQ;
- setup_kexec_cpu_down_xics();
- smp_init_pseries_xics();
- return;
- }
- }
- printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
- " interrupt-controller\n");
-}
-
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
@@ -265,11 +195,8 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
case OF_RECONFIG_ATTACH_NODE:
parent = of_get_parent(np);
pdn = parent ? PCI_DN(parent) : NULL;
- if (pdn) {
- /* Create pdn and EEH device */
+ if (pdn)
pci_add_device_node_info(pdn->phb, np);
- eeh_dev_init(PCI_DN(np), pdn->phb);
- }
of_node_put(parent);
break;
@@ -367,7 +294,7 @@ static void pseries_lpar_idle(void)
{
/*
* Default handler to go into low thread priority and possibly
- * low power mode by cedeing processor to hypervisor
+ * low power mode by ceding processor to hypervisor
*/
/* Indicate to hypervisor that we are idle. */
@@ -392,15 +319,23 @@ static void pseries_lpar_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
-long pSeries_enable_reloc_on_exc(void)
+void pseries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
while (1) {
rc = enable_reloc_on_exceptions();
- if (!H_IS_LONG_BUSY(rc))
- return rc;
+ if (!H_IS_LONG_BUSY(rc)) {
+ if (rc == H_P2) {
+ pr_info("Relocation on exceptions not"
+ " supported\n");
+ } else if (rc != H_SUCCESS) {
+ pr_warn("Unable to enable relocation"
+ " on exceptions: %ld\n", rc);
+ }
+ break;
+ }
delay = get_longbusy_msecs(rc);
total_delay += delay;
@@ -408,66 +343,81 @@ long pSeries_enable_reloc_on_exc(void)
pr_warn("Warning: Giving up waiting to enable "
"relocation on exceptions (%u msec)!\n",
total_delay);
- return rc;
+ return;
}
mdelay(delay);
}
}
-EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
+EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
-long pSeries_disable_reloc_on_exc(void)
+void pseries_disable_reloc_on_exc(void)
{
long rc;
while (1) {
rc = disable_reloc_on_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+ if (rc != H_SUCCESS)
+ pr_warning("Warning: Failed to disable relocation on "
+ "exceptions: %ld\n", rc);
}
-EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
+EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
- long rc;
-
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- rc = pSeries_disable_reloc_on_exc();
- if (rc != H_SUCCESS)
- pr_warning("Warning: Failed to disable relocation on "
- "exceptions: %ld\n", rc);
- }
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
default_machine_kexec(image);
}
#endif
#ifdef __LITTLE_ENDIAN__
-long pseries_big_endian_exceptions(void)
+void pseries_big_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_big_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+
+ /*
+ * At this point it is unlikely panic() will get anything
+ * out to the user, since this is called very late in kexec
+ * but at least this will stop us from continuing on further
+ * and creating an even more difficult to debug situation.
+ *
+ * There is a known problem when kdump'ing, if cpus are offline
+ * the above call will fail. Rather than panicking again, keep
+ * going and hope the kdump kernel is also little endian, which
+ * it usually is.
+ */
+ if (rc && !kdump_in_progress())
+ panic("Could not enable big endian exceptions");
}
-static long pseries_little_endian_exceptions(void)
+void pseries_little_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_little_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
- return rc;
+ break;
mdelay(get_longbusy_msecs(rc));
}
+ if (rc) {
+ ppc_md.progress("H_SET_MODE LE exception fail", 0);
+ panic("Could not enable little endian exceptions");
+ }
}
#endif
@@ -492,7 +442,6 @@ static void __init find_and_init_phbs(void)
}
of_node_put(root);
- pci_devs_phb_init();
/*
* PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
@@ -506,7 +455,8 @@ static void __init pSeries_setup_arch(void)
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
- pseries_discover_pic();
+ smp_init_pseries();
+
/* openpic global configuration register (64-bit format). */
/* openpic Interrupt Source Unit pointer (64-bit format). */
@@ -537,18 +487,6 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- long rc;
-
- rc = pSeries_enable_reloc_on_exc();
- if (rc == H_P2) {
- pr_info("Relocation on exceptions not supported\n");
- } else if (rc != H_SUCCESS) {
- pr_warn("Unable to enable relocation on exceptions: "
- "%ld\n", rc);
- }
- }
}
static int __init pSeries_init_panel(void)
@@ -682,9 +620,9 @@ static void pSeries_cmo_feature_init(void)
/*
* Early initialization. Relocation is on but do not reference unbolted pages
*/
-static void __init pSeries_init_early(void)
+static void __init pseries_init(void)
{
- pr_debug(" -> pSeries_init_early()\n");
+ pr_debug(" -> pseries_init()\n");
#ifdef CONFIG_HVC_CONSOLE
if (firmware_has_feature(FW_FEATURE_LPAR))
@@ -701,7 +639,7 @@ static void __init pSeries_init_early(void)
pSeries_cmo_feature_init();
iommu_init_early_pSeries();
- pr_debug(" <- pSeries_init_early()\n");
+ pr_debug(" <- pseries_init()\n");
}
/**
@@ -732,49 +670,9 @@ static void pseries_power_off(void)
for (;;);
}
-/*
- * Called very early, MMU is off, device-tree isn't unflattened
- */
-
-static int __init pseries_probe_fw_features(unsigned long node,
- const char *uname, int depth,
- void *data)
-{
- const char *prop;
- int len;
- static int hypertas_found;
- static int vec5_found;
-
- if (depth != 1)
- return 0;
-
- if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
- prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
- &len);
- if (prop) {
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_hypertas_feature_init(prop, len);
- }
-
- hypertas_found = 1;
- }
-
- if (!strcmp(uname, "chosen")) {
- prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
- &len);
- if (prop)
- fw_vec5_feature_init(prop, len);
-
- vec5_found = 1;
- }
-
- return hypertas_found && vec5_found;
-}
-
static int __init pSeries_probe(void)
{
- unsigned long root = of_get_flat_dt_root();
- const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+ const char *dtype = of_get_property(of_root, "device_type", NULL);
if (dtype == NULL)
return 0;
@@ -784,41 +682,17 @@ static int __init pSeries_probe(void)
/* Cell blades firmware claims to be chrp while it's not. Until this
* is fixed, we need to avoid those here.
*/
- if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
- of_flat_dt_is_compatible(root, "IBM,CBEA"))
+ if (of_machine_is_compatible("IBM,CPBW-1.0") ||
+ of_machine_is_compatible("IBM,CBEA"))
return 0;
- pr_debug("pSeries detected, looking for LPAR capability...\n");
-
- /* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pseries_probe_fw_features, NULL);
-
-#ifdef __LITTLE_ENDIAN__
- if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
- long rc;
- /*
- * Tell the hypervisor that we want our exceptions to
- * be taken in little endian mode. If this fails we don't
- * want to use BUG() because it will trigger an exception.
- */
- rc = pseries_little_endian_exceptions();
- if (rc) {
- ppc_md.progress("H_SET_MODE LE exception fail", 0);
- panic("Could not enable little endian exceptions");
- }
- }
-#endif
-
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hpte_init_lpar();
- else
- hpte_init_native();
-
pm_power_off = pseries_power_off;
pr_debug("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
+ pseries_init();
+
return 1;
}
@@ -837,7 +711,7 @@ define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
- .init_early = pSeries_init_early,
+ .init_IRQ = pseries_init_irq,
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
@@ -853,6 +727,7 @@ define_machine(pseries) {
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
.machine_kexec = pSeries_machine_kexec,
+ .kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
.memory_block_size = pseries_memory_block_size,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 6932ea803e33..f6f83aeccaaa 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -38,7 +38,6 @@
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
-#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
@@ -140,7 +139,7 @@ out:
return 1;
}
-static void smp_xics_setup_cpu(int cpu)
+static void smp_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
xics_setup_cpu();
@@ -207,28 +206,22 @@ static __init void pSeries_smp_probe(void)
}
}
-static struct smp_ops_t pSeries_mpic_smp_ops = {
- .message_pass = smp_mpic_message_pass,
- .probe = smp_mpic_probe,
- .kick_cpu = smp_pSeries_kick_cpu,
- .setup_cpu = smp_mpic_setup_cpu,
-};
-
-static struct smp_ops_t pSeries_xics_smp_ops = {
+static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
.probe = pSeries_smp_probe,
.kick_cpu = smp_pSeries_kick_cpu,
- .setup_cpu = smp_xics_setup_cpu,
+ .setup_cpu = smp_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */
-static void __init smp_init_pseries(void)
+void __init smp_init_pseries(void)
{
int i;
pr_debug(" -> smp_init_pSeries()\n");
+ smp_ops = &pseries_smp_ops;
alloc_bootmem_cpumask_var(&of_spin_mask);
@@ -258,17 +251,3 @@ static void __init smp_init_pseries(void)
pr_debug(" <- smp_init_pSeries()\n");
}
-
-void __init smp_init_pseries_mpic(void)
-{
- smp_ops = &pSeries_mpic_smp_ops;
-
- smp_init_pseries();
-}
-
-void __init smp_init_pseries_xics(void)
-{
- smp_ops = &pSeries_xics_smp_ops;
-
- smp_init_pseries();
-}
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 0ac12e5fd8ab..911456d17713 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -28,6 +28,7 @@
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/cpm.h>
+#include <asm/fixmap.h>
#include <soc/fsl/qe/qe.h>
#include <mm/mmu_decl.h>
@@ -37,25 +38,36 @@
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
-static u32 __iomem *cpm_udbg_txdesc =
- (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
+static u32 __iomem *cpm_udbg_txdesc;
+static u8 __iomem *cpm_udbg_txbuf;
static void udbg_putc_cpm(char c)
{
- u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
-
if (c == '\n')
udbg_putc_cpm('\r');
while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
;
- out_8(txbuf, c);
+ out_8(cpm_udbg_txbuf, c);
out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
}
void __init udbg_init_cpm(void)
{
+#ifdef CONFIG_PPC_8xx
+ cpm_udbg_txdesc = (u32 __iomem __force *)
+ (CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
+ VIRT_IMMR_BASE);
+ cpm_udbg_txbuf = (u8 __iomem __force *)
+ (in_be32(&cpm_udbg_txdesc[1]) - PHYS_IMMR_BASE +
+ VIRT_IMMR_BASE);
+#else
+ cpm_udbg_txdesc = (u32 __iomem __force *)
+ CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
+ cpm_udbg_txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
+#endif
+
if (cpm_udbg_txdesc) {
#ifdef CONFIG_CPM2
setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index b7348637eae0..3573d54b2770 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -48,16 +48,10 @@
#include "dart.h"
-/* Physical base address and size of the DART table */
-unsigned long dart_tablebase; /* exported to htab_initialize */
+/* DART table address and size */
+static u32 *dart_tablebase;
static unsigned long dart_tablesize;
-/* Virtual base address of the DART table */
-static u32 *dart_vbase;
-#ifdef CONFIG_PM
-static u32 *dart_copy;
-#endif
-
/* Mapped base address for the dart */
static unsigned int __iomem *dart;
@@ -151,6 +145,34 @@ wait_more:
spin_unlock_irqrestore(&invalidate_lock, flags);
}
+static void dart_cache_sync(unsigned int *base, unsigned int count)
+{
+ /*
+ * We add 1 to the number of entries to flush, following a
+ * comment in Darwin indicating that the memory controller
+ * can prefetch unmapped memory under some circumstances.
+ */
+ unsigned long start = (unsigned long)base;
+ unsigned long end = start + (count + 1) * sizeof(unsigned int);
+ unsigned int tmp;
+
+ /* Perform a standard cache flush */
+ flush_inval_dcache_range(start, end);
+
+ /*
+ * Perform the sequence described in the CPC925 manual to
+ * ensure all the data gets to a point the cache incoherent
+ * DART hardware will see.
+ */
+ asm volatile(" sync;"
+ " isync;"
+ " dcbf 0,%1;"
+ " sync;"
+ " isync;"
+ " lwz %0,0(%1);"
+ " isync" : "=r" (tmp) : "r" (end) : "memory");
+}
+
static void dart_flush(struct iommu_table *tbl)
{
mb();
@@ -163,15 +185,15 @@ static void dart_flush(struct iommu_table *tbl)
static int dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- unsigned int *dp;
+ unsigned int *dp, *orig_dp;
unsigned int rpn;
long l;
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
- dp = ((unsigned int*)tbl->it_base) + index;
+ orig_dp = dp = ((unsigned int*)tbl->it_base) + index;
/* On U3, all memory is contiguous, so we can move this
* out of the loop.
@@ -184,11 +206,7 @@ static int dart_build(struct iommu_table *tbl, long index,
uaddr += DART_PAGE_SIZE;
}
-
- /* make sure all updates have reached memory */
- mb();
- in_be32((unsigned __iomem *)dp);
- mb();
+ dart_cache_sync(orig_dp, npages);
if (dart_is_u4) {
rpn = index;
@@ -203,7 +221,8 @@ static int dart_build(struct iommu_table *tbl, long index,
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
- unsigned int *dp;
+ unsigned int *dp, *orig_dp;
+ long orig_npages = npages;
/* We don't worry about flushing the TLB cache. The only drawback of
* not doing it is that we won't catch buggy device drivers doing
@@ -212,34 +231,30 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
DBG("dart: free at: %lx, %lx\n", index, npages);
- dp = ((unsigned int *)tbl->it_base) + index;
+ orig_dp = dp = ((unsigned int *)tbl->it_base) + index;
while (npages--)
*(dp++) = dart_emptyval;
-}
+ dart_cache_sync(orig_dp, orig_npages);
+}
-static int __init dart_init(struct device_node *dart_node)
+static void allocate_dart(void)
{
- unsigned int i;
- unsigned long tmp, base, size;
- struct resource r;
-
- if (dart_tablebase == 0 || dart_tablesize == 0) {
- printk(KERN_INFO "DART: table not allocated, using "
- "direct DMA\n");
- return -ENODEV;
- }
+ unsigned long tmp;
- if (of_address_to_resource(dart_node, 0, &r))
- panic("DART: can't get register base ! ");
+ /* 512 pages (2MB) is max DART tablesize. */
+ dart_tablesize = 1UL << 21;
- /* Make sure nothing from the DART range remains in the CPU cache
- * from a previous mapping that existed before the kernel took
- * over
+ /*
+ * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
+ * will blow up an entire large page anyway in the kernel mapping.
*/
- flush_dcache_phys_range(dart_tablebase,
- dart_tablebase + dart_tablesize);
+ dart_tablebase = __va(memblock_alloc_base(1UL<<24,
+ 1UL<<24, 0x80000000L));
+
+ /* There is no point scanning the DART space for leaks*/
+ kmemleak_no_scan((void *)dart_tablebase);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
@@ -249,20 +264,51 @@ static int __init dart_init(struct device_node *dart_node)
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
+ printk(KERN_INFO "DART table allocated at: %p\n", dart_tablebase);
+}
+
+static int __init dart_init(struct device_node *dart_node)
+{
+ unsigned int i;
+ unsigned long base, size;
+ struct resource r;
+
+ /* IOMMU disabled by the user ? bail out */
+ if (iommu_is_off)
+ return -ENODEV;
+
+ /*
+ * Only use the DART if the machine has more than 1GB of RAM
+ * or if requested with iommu=on on cmdline.
+ *
+ * 1GB of RAM is picked as limit because some default devices
+ * (i.e. Airport Extreme) have 30 bit address range limits.
+ */
+
+ if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
+ return -ENODEV;
+
+ /* Get DART registers */
+ if (of_address_to_resource(dart_node, 0, &r))
+ panic("DART: can't get register base ! ");
+
/* Map in DART registers */
dart = ioremap(r.start, resource_size(&r));
if (dart == NULL)
panic("DART: Cannot map registers!");
- /* Map in DART table */
- dart_vbase = ioremap(__pa(dart_tablebase), dart_tablesize);
+ /* Allocate the DART and dummy page */
+ allocate_dart();
/* Fill initial table */
for (i = 0; i < dart_tablesize/4; i++)
- dart_vbase[i] = dart_emptyval;
+ dart_tablebase[i] = dart_emptyval;
+
+ /* Push to memory */
+ dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
/* Initialize DART with table base and enable it. */
- base = dart_tablebase >> DART_PAGE_SHIFT;
+ base = ((unsigned long)dart_tablebase) >> DART_PAGE_SHIFT;
size = dart_tablesize >> DART_PAGE_SHIFT;
if (dart_is_u4) {
size &= DART_SIZE_U4_SIZE_MASK;
@@ -301,7 +347,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Initialize the common IOMMU code */
- iommu_table_dart.it_base = (unsigned long)dart_vbase;
+ iommu_table_dart.it_base = (unsigned long)dart_tablebase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
@@ -404,75 +450,21 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
}
#ifdef CONFIG_PM
-static void iommu_dart_save(void)
-{
- memcpy(dart_copy, dart_vbase, 2*1024*1024);
-}
-
static void iommu_dart_restore(void)
{
- memcpy(dart_vbase, dart_copy, 2*1024*1024);
+ dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
dart_tlb_invalidate_all();
}
static int __init iommu_init_late_dart(void)
{
- unsigned long tbasepfn;
- struct page *p;
-
- /* if no dart table exists then we won't need to save it
- * and the area has also not been reserved */
if (!dart_tablebase)
return 0;
- tbasepfn = __pa(dart_tablebase) >> PAGE_SHIFT;
- register_nosave_region_late(tbasepfn,
- tbasepfn + ((1<<24) >> PAGE_SHIFT));
-
- /* For suspend we need to copy the dart contents because
- * it is not part of the regular mapping (see above) and
- * thus not saved automatically. The memory for this copy
- * must be allocated early because we need 2 MB. */
- p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT);
- BUG_ON(!p);
- dart_copy = page_address(p);
-
- ppc_md.iommu_save = iommu_dart_save;
ppc_md.iommu_restore = iommu_dart_restore;
return 0;
}
late_initcall(iommu_init_late_dart);
-#endif
-
-void __init alloc_dart_table(void)
-{
- /* Only reserve DART space if machine has more than 1GB of RAM
- * or if requested with iommu=on on cmdline.
- *
- * 1GB of RAM is picked as limit because some default devices
- * (i.e. Airport Extreme) have 30 bit address range limits.
- */
-
- if (iommu_is_off)
- return;
-
- if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
- return;
-
- /* 512 pages (2MB) is max DART tablesize. */
- dart_tablesize = 1UL << 21;
- /* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
- * will blow up an entire large page anyway in the kernel mapping
- */
- dart_tablebase = (unsigned long)
- __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
- /*
- * The DART space is later unmapped from the kernel linear mapping and
- * accessing dart_tablebase during kmemleak scanning will fault.
- */
- kmemleak_no_scan((void *)dart_tablebase);
-
- printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
-}
+#endif /* CONFIG_PM */
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 861cebf9c292..c27058e5df26 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -90,12 +90,8 @@ static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
}
l2cache_size = *prop;
- if (get_cache_sram_params(&sram_params)) {
- dev_err(&dev->dev,
- "Entire L2 as cache, provide valid sram offset and size\n");
- return -EINVAL;
- }
-
+ if (get_cache_sram_params(&sram_params))
+ return 0; /* fall back to L2 cache only */
rem = l2cache_size % sram_params.sram_size;
ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size;
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index f5bf38b94595..68e7c0dd2e45 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -289,7 +289,7 @@ static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
}
int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
- u64 rstart, u32 size, u32 flags)
+ u64 rstart, u64 size, u32 flags)
{
struct rio_priv *priv = mport->priv;
u32 base_size;
@@ -298,7 +298,7 @@ int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
u32 riwar;
int i;
- if ((size & (size - 1)) != 0)
+ if ((size & (size - 1)) != 0 || size > 0x400000000ULL)
return -EINVAL;
base_size_log = ilog2(size);
@@ -491,6 +491,7 @@ int fsl_rio_setup(struct platform_device *dev)
rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
if (!rmu_node) {
dev_err(&dev->dev, "No valid fsl,srio-rmu-handle property\n");
+ rc = -ENOENT;
goto err_rmu;
}
rc = of_address_to_resource(rmu_node, 0, &rmu_regs);
@@ -643,19 +644,11 @@ int fsl_rio_setup(struct platform_device *dev)
port->ops = ops;
port->priv = priv;
port->phys_efptr = 0x100;
+ port->phys_rmap = 1;
priv->regs_win = rio_regs_win;
- /* Probe the master port phy type */
ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20);
- port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
- if (port->phy_type == RIO_PHY_PARALLEL) {
- dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n");
- release_resource(&port->iores);
- kfree(priv);
- kfree(port);
- continue;
- }
- dev_info(&dev->dev, "RapidIO PHY type: Serial\n");
+
/* Checking the port training status */
if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) {
dev_err(&dev->dev, "Port %d is not ready. "
@@ -705,11 +698,9 @@ int fsl_rio_setup(struct platform_device *dev)
((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET :
RIO_INB_ATMU_REGS_PORT2_OFFSET));
-
- /* Set to receive any dist ID for serial RapidIO controller. */
- if (port->phy_type == RIO_PHY_SERIAL)
- out_be32((priv->regs_win
- + RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA);
+ /* Set to receive packets with any dest ID */
+ out_be32((priv->regs_win + RIO_ISR_AACR + i*0x80),
+ RIO_ISR_AACR_AA);
/* Configure maintenance transaction window */
out_be32(&priv->maint_atmu_regs->rowbar,
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 99269c041615..a09ca704de58 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -204,7 +204,7 @@ static int __init setup_rstcr(void)
arch_initcall(setup_rstcr);
-void fsl_rstcr_restart(char *cmd)
+void __noreturn fsl_rstcr_restart(char *cmd)
{
local_irq_disable();
if (rstcr)
@@ -228,10 +228,11 @@ EXPORT_SYMBOL(diu_ops);
* to initiate a partition restart when we're running under the Freescale
* hypervisor.
*/
-void fsl_hv_restart(char *cmd)
+void __noreturn fsl_hv_restart(char *cmd)
{
pr_info("hv restart\n");
fh_partition_restart(-1);
+ while (1) ;
}
/*
@@ -241,9 +242,10 @@ void fsl_hv_restart(char *cmd)
* function pointers, to shut down the partition when we're running under
* the Freescale hypervisor.
*/
-void fsl_hv_halt(void)
+void __noreturn fsl_hv_halt(void)
{
pr_info("hv exit\n");
fh_partition_stop(-1);
+ while (1) ;
}
#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 4c5a19ef4f0b..433566a5ef19 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -19,7 +19,7 @@ extern u32 fsl_get_sys_freq(void);
struct spi_board_info;
struct device_node;
-extern void fsl_rstcr_restart(char *cmd);
+extern void __noreturn fsl_rstcr_restart(char *cmd);
/* The different ports that the DIU can be connected to */
enum fsl_diu_monitor_port {
@@ -42,8 +42,8 @@ struct platform_diu_data_ops {
extern struct platform_diu_data_ops diu_ops;
-void fsl_hv_restart(char *cmd);
-void fsl_hv_halt(void);
+void __noreturn fsl_hv_restart(char *cmd);
+void __noreturn fsl_hv_halt(void);
#endif
#endif
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index ed5234ed8d3f..5ebd3f018295 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -112,7 +112,7 @@ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
return 0;
}
-int __init_refok msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
+int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
struct device_node *of_node)
{
int size;
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index c606aa8ba60a..5d7f5a6564de 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,4 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
-obj-$(CONFIG_PPC_POWERNV) += ics-opal.o
+obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
new file mode 100644
index 000000000000..57d72f10a97f
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2016 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/io.h>
+#include <asm/opal.h>
+
+static void icp_opal_teardown_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ /* Clear any pending IPI */
+ opal_int_set_mfrr(cpu, 0xff);
+}
+
+static void icp_opal_flush_ipi(void)
+{
+ /*
+ * We take the ipi irq but and never return so we need to EOI the IPI,
+ * but want to leave our priority 0.
+ *
+ * Should we check all the other interrupts too?
+ * Should we be flagging idle loop instead?
+ * Or creating some task to be scheduled?
+ */
+ opal_int_eoi((0x00 << 24) | XICS_IPI);
+}
+
+static unsigned int icp_opal_get_irq(void)
+{
+ unsigned int xirr;
+ unsigned int vec;
+ unsigned int irq;
+ int64_t rc;
+
+ rc = opal_int_get_xirr(&xirr, false);
+ if (rc < 0)
+ return NO_IRQ;
+ xirr = be32_to_cpu(xirr);
+ vec = xirr & 0x00ffffff;
+ if (vec == XICS_IRQ_SPURIOUS)
+ return NO_IRQ;
+
+ irq = irq_find_mapping(xics_host, vec);
+ if (likely(irq != NO_IRQ)) {
+ xics_push_cppr(vec);
+ return irq;
+ }
+
+ /* We don't have a linux mapping, so have rtas mask it. */
+ xics_mask_unknown_vec(vec);
+
+ /* We might learn about it later, so EOI it */
+ opal_int_eoi(xirr);
+
+ return NO_IRQ;
+}
+
+static void icp_opal_set_cpu_priority(unsigned char cppr)
+{
+ xics_set_base_cppr(cppr);
+ opal_int_set_cppr(cppr);
+ iosync();
+}
+
+static void icp_opal_eoi(struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int64_t rc;
+
+ iosync();
+ rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
+
+ /*
+ * EOI tells us whether there are more interrupts to fetch.
+ *
+ * Some HW implementations might not be able to send us another
+ * external interrupt in that case, so we force a replay.
+ */
+ if (rc > 0)
+ force_external_irq_replay();
+}
+
+#ifdef CONFIG_SMP
+
+static void icp_opal_cause_ipi(int cpu, unsigned long data)
+{
+ opal_int_set_mfrr(cpu, IPI_PRIORITY);
+}
+
+static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+
+ opal_int_set_mfrr(cpu, 0xff);
+
+ return smp_ipi_demux();
+}
+
+#endif /* CONFIG_SMP */
+
+static const struct icp_ops icp_opal_ops = {
+ .get_irq = icp_opal_get_irq,
+ .eoi = icp_opal_eoi,
+ .set_priority = icp_opal_set_cpu_priority,
+ .teardown_cpu = icp_opal_teardown_cpu,
+ .flush_ipi = icp_opal_flush_ipi,
+#ifdef CONFIG_SMP
+ .ipi_action = icp_opal_ipi_action,
+ .cause_ipi = icp_opal_cause_ipi,
+#endif
+};
+
+int icp_opal_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
+ if (!np)
+ return -ENODEV;
+
+ icp_ops = &icp_opal_ops;
+
+ printk("XICS: Using OPAL ICP fallbacks\n");
+
+ return 0;
+}
+
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 47e43b7b076b..a795a5f0301c 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -404,8 +404,11 @@ void __init xics_init(void)
/* Fist locate ICP */
if (firmware_has_feature(FW_FEATURE_LPAR))
rc = icp_hv_init();
- if (rc < 0)
+ if (rc < 0) {
rc = icp_native_init();
+ if (rc == -ENODEV)
+ rc = icp_opal_init();
+ }
if (rc < 0) {
pr_warning("XICS: Cannot find a Presentation Controller !\n");
return;
diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index 89098f320ad5..ee9891734149 100644
--- a/arch/powerpc/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
@@ -20,6 +20,7 @@ along with this file; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
#include "nonstdio.h"
#include "ansidecl.h"
#include "ppc.h"
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c5e155108be5..760545519a0b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -184,9 +184,6 @@ static void dump_tlb_book3e(void);
static int xmon_no_auto_backtrace;
-extern void xmon_enter(void);
-extern void xmon_leave(void);
-
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#else
@@ -1685,9 +1682,78 @@ write_spr(int n, unsigned long val)
catch_spr_faults = 0;
}
-static unsigned long regno;
-extern char exc_prolog;
-extern char dec_exc;
+static void dump_206_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
+
+ /* Actually some of these pre-date 2.06, but whatevs */
+
+ printf("srr0 = %.16x srr1 = %.16x dsisr = %.8x\n",
+ mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
+ printf("dscr = %.16x ppr = %.16x pir = %.8x\n",
+ mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR));
+
+ if (!(mfmsr() & MSR_HV))
+ return;
+
+ printf("sdr1 = %.16x hdar = %.16x hdsisr = %.8x\n",
+ mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR));
+ printf("hsrr0 = %.16x hsrr1 = %.16x hdec = %.8x\n",
+ mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC));
+ printf("lpcr = %.16x pcr = %.16x lpidr = %.8x\n",
+ mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID));
+ printf("hsprg0 = %.16x hsprg1 = %.16x\n",
+ mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1));
+ printf("dabr = %.16x dabrx = %.16x\n",
+ mfspr(SPRN_DABR), mfspr(SPRN_DABRX));
+#endif
+}
+
+static void dump_207_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ unsigned long msr;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return;
+
+ printf("dpdes = %.16x tir = %.16x cir = %.8x\n",
+ mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR));
+
+ printf("fscr = %.16x tar = %.16x pspb = %.8x\n",
+ mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB));
+
+ msr = mfmsr();
+ if (msr & MSR_TM) {
+ /* Only if TM has been enabled in the kernel */
+ printf("tfhar = %.16x tfiar = %.16x texasr = %.16x\n",
+ mfspr(SPRN_TFHAR), mfspr(SPRN_TFIAR),
+ mfspr(SPRN_TEXASR));
+ }
+
+ printf("mmcr0 = %.16x mmcr1 = %.16x mmcr2 = %.16x\n",
+ mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2));
+ printf("pmc1 = %.8x pmc2 = %.8x pmc3 = %.8x pmc4 = %.8x\n",
+ mfspr(SPRN_PMC1), mfspr(SPRN_PMC2),
+ mfspr(SPRN_PMC3), mfspr(SPRN_PMC4));
+ printf("mmcra = %.16x siar = %.16x pmc5 = %.8x\n",
+ mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5));
+ printf("sdar = %.16x sier = %.16x pmc6 = %.8x\n",
+ mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
+ printf("ebbhr = %.16x ebbrr = %.16x bescr = %.16x\n",
+ mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+
+ if (!(msr & MSR_HV))
+ return;
+
+ printf("hfscr = %.16x dhdes = %.16x rpr = %.16x\n",
+ mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR));
+ printf("dawr = %.16x dawrx = %.16x ciabr = %.16x\n",
+ mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR));
+#endif
+}
static void dump_one_spr(int spr, bool show_unimplemented)
{
@@ -1719,6 +1785,7 @@ static void dump_one_spr(int spr, bool show_unimplemented)
static void super_regs(void)
{
+ static unsigned long regno;
int cmd;
int spr;
@@ -1730,14 +1797,18 @@ static void super_regs(void)
asm("mr %0,1" : "=r" (sp) :);
asm("mr %0,2" : "=r" (toc) :);
- printf("msr = "REG" sprg0= "REG"\n",
+ printf("msr = "REG" sprg0 = "REG"\n",
mfmsr(), mfspr(SPRN_SPRG0));
- printf("pvr = "REG" sprg1= "REG"\n",
+ printf("pvr = "REG" sprg1 = "REG"\n",
mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
- printf("dec = "REG" sprg2= "REG"\n",
+ printf("dec = "REG" sprg2 = "REG"\n",
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
- printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
- printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
+ printf("sp = "REG" sprg3 = "REG"\n", sp, mfspr(SPRN_SPRG3));
+ printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
+
+ dump_206_sprs();
+ dump_207_sprs();
+
return;
}
case 'w': {
@@ -2213,13 +2284,13 @@ static void dump_one_paca(int cpu)
DUMP(p, subcore_sibling_mask, "x");
#endif
- DUMP(p, user_time, "llx");
- DUMP(p, system_time, "llx");
- DUMP(p, user_time_scaled, "llx");
- DUMP(p, starttime, "llx");
- DUMP(p, starttime_user, "llx");
- DUMP(p, startspurr, "llx");
- DUMP(p, utime_sspurr, "llx");
+ DUMP(p, accounting.user_time, "llx");
+ DUMP(p, accounting.system_time, "llx");
+ DUMP(p, accounting.user_time_scaled, "llx");
+ DUMP(p, accounting.starttime, "llx");
+ DUMP(p, accounting.starttime_user, "llx");
+ DUMP(p, accounting.startspurr, "llx");
+ DUMP(p, accounting.utime_sspurr, "llx");
DUMP(p, stolen_time, "llx");
#undef DUMP
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9e607bf2d640..0e348781327b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -123,6 +123,7 @@ config S390
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_EARLY_PFN_TO_NID
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 98ec652cc332..33ba697c782d 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -18,7 +18,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
-OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
+OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
@@ -33,10 +33,10 @@ quiet_cmd_sizes = GEN $@
$(obj)/sizes.h: vmlinux
$(call if_changed,sizes)
-AFLAGS_head.o += -I$(obj)
+AFLAGS_head.o += -I$(objtree)/$(obj)
$(obj)/head.o: $(obj)/sizes.h
-CFLAGS_misc.o += -I$(obj)
+CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 67d43a0eabb4..28f03ca60100 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -19,29 +19,10 @@
#include <asm/ebcdic.h>
#include "hypfs.h"
-#define LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
-#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
#define TMP_SIZE 64 /* size of temporary buffers */
#define DBFS_D204_HDR_VERSION 0
-/* diag 204 subcodes */
-enum diag204_sc {
- SUBC_STIB4 = 4,
- SUBC_RSI = 5,
- SUBC_STIB6 = 6,
- SUBC_STIB7 = 7
-};
-
-/* The two available diag 204 data formats */
-enum diag204_format {
- INFO_SIMPLE = 0,
- INFO_EXT = 0x00010000
-};
-
-/* bit is set in flags, when physical cpu info is included in diag 204 data */
-#define LPAR_PHYS_FLG 0x80
-
static char *diag224_cpu_names; /* diag 224 name table */
static enum diag204_sc diag204_store_sc; /* used subcode for store */
static enum diag204_format diag204_info_type; /* used diag 204 data format */
@@ -53,7 +34,7 @@ static int diag204_buf_pages; /* number of pages for diag204 data */
static struct dentry *dbfs_d204_file;
/*
- * DIAG 204 data structures and member access functions.
+ * DIAG 204 member access functions.
*
* Since we have two different diag 204 data formats for old and new s390
* machines, we do not access the structs directly, but use getter functions for
@@ -62,304 +43,173 @@ static struct dentry *dbfs_d204_file;
/* Time information block */
-struct info_blk_hdr {
- __u8 npar;
- __u8 flags;
- __u16 tslice;
- __u16 phys_cpus;
- __u16 this_part;
- __u64 curtod;
-} __attribute__ ((packed));
-
-struct x_info_blk_hdr {
- __u8 npar;
- __u8 flags;
- __u16 tslice;
- __u16 phys_cpus;
- __u16 this_part;
- __u64 curtod1;
- __u64 curtod2;
- char reserved[40];
-} __attribute__ ((packed));
-
static inline int info_blk_hdr__size(enum diag204_format type)
{
- if (type == INFO_SIMPLE)
- return sizeof(struct info_blk_hdr);
- else /* INFO_EXT */
- return sizeof(struct x_info_blk_hdr);
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_info_blk_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_info_blk_hdr);
}
static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct info_blk_hdr *)hdr)->npar;
- else /* INFO_EXT */
- return ((struct x_info_blk_hdr *)hdr)->npar;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_info_blk_hdr *)hdr)->npar;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
}
static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct info_blk_hdr *)hdr)->flags;
- else /* INFO_EXT */
- return ((struct x_info_blk_hdr *)hdr)->flags;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_info_blk_hdr *)hdr)->flags;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
}
static inline __u16 info_blk_hdr__pcpus(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct info_blk_hdr *)hdr)->phys_cpus;
- else /* INFO_EXT */
- return ((struct x_info_blk_hdr *)hdr)->phys_cpus;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_info_blk_hdr *)hdr)->phys_cpus;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_info_blk_hdr *)hdr)->phys_cpus;
}
/* Partition header */
-struct part_hdr {
- __u8 pn;
- __u8 cpus;
- char reserved[6];
- char part_name[LPAR_NAME_LEN];
-} __attribute__ ((packed));
-
-struct x_part_hdr {
- __u8 pn;
- __u8 cpus;
- __u8 rcpus;
- __u8 pflag;
- __u32 mlu;
- char part_name[LPAR_NAME_LEN];
- char lpc_name[8];
- char os_name[8];
- __u64 online_cs;
- __u64 online_es;
- __u8 upid;
- char reserved1[3];
- __u32 group_mlu;
- char group_name[8];
- char reserved2[32];
-} __attribute__ ((packed));
-
static inline int part_hdr__size(enum diag204_format type)
{
- if (type == INFO_SIMPLE)
- return sizeof(struct part_hdr);
- else /* INFO_EXT */
- return sizeof(struct x_part_hdr);
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_part_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_part_hdr);
}
static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct part_hdr *)hdr)->cpus;
- else /* INFO_EXT */
- return ((struct x_part_hdr *)hdr)->rcpus;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_part_hdr *)hdr)->cpus;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_part_hdr *)hdr)->rcpus;
}
static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
char *name)
{
- if (type == INFO_SIMPLE)
- memcpy(name, ((struct part_hdr *)hdr)->part_name,
- LPAR_NAME_LEN);
- else /* INFO_EXT */
- memcpy(name, ((struct x_part_hdr *)hdr)->part_name,
- LPAR_NAME_LEN);
- EBCASC(name, LPAR_NAME_LEN);
- name[LPAR_NAME_LEN] = 0;
+ if (type == DIAG204_INFO_SIMPLE)
+ memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
+ DIAG204_LPAR_NAME_LEN);
+ else /* DIAG204_INFO_EXT */
+ memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
+ DIAG204_LPAR_NAME_LEN);
+ EBCASC(name, DIAG204_LPAR_NAME_LEN);
+ name[DIAG204_LPAR_NAME_LEN] = 0;
strim(name);
}
-struct cpu_info {
- __u16 cpu_addr;
- char reserved1[2];
- __u8 ctidx;
- __u8 cflag;
- __u16 weight;
- __u64 acc_time;
- __u64 lp_time;
-} __attribute__ ((packed));
-
-struct x_cpu_info {
- __u16 cpu_addr;
- char reserved1[2];
- __u8 ctidx;
- __u8 cflag;
- __u16 weight;
- __u64 acc_time;
- __u64 lp_time;
- __u16 min_weight;
- __u16 cur_weight;
- __u16 max_weight;
- char reseved2[2];
- __u64 online_time;
- __u64 wait_time;
- __u32 pma_weight;
- __u32 polar_weight;
- char reserved3[40];
-} __attribute__ ((packed));
-
/* CPU info block */
static inline int cpu_info__size(enum diag204_format type)
{
- if (type == INFO_SIMPLE)
- return sizeof(struct cpu_info);
- else /* INFO_EXT */
- return sizeof(struct x_cpu_info);
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_cpu_info);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_cpu_info);
}
static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct cpu_info *)hdr)->ctidx;
- else /* INFO_EXT */
- return ((struct x_cpu_info *)hdr)->ctidx;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->ctidx;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->ctidx;
}
static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct cpu_info *)hdr)->cpu_addr;
- else /* INFO_EXT */
- return ((struct x_cpu_info *)hdr)->cpu_addr;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->cpu_addr;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
}
static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct cpu_info *)hdr)->acc_time;
- else /* INFO_EXT */
- return ((struct x_cpu_info *)hdr)->acc_time;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->acc_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->acc_time;
}
static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct cpu_info *)hdr)->lp_time;
- else /* INFO_EXT */
- return ((struct x_cpu_info *)hdr)->lp_time;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->lp_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->lp_time;
}
static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
+ if (type == DIAG204_INFO_SIMPLE)
return 0; /* online_time not available in simple info */
- else /* INFO_EXT */
- return ((struct x_cpu_info *)hdr)->online_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->online_time;
}
/* Physical header */
-struct phys_hdr {
- char reserved1[1];
- __u8 cpus;
- char reserved2[6];
- char mgm_name[8];
-} __attribute__ ((packed));
-
-struct x_phys_hdr {
- char reserved1[1];
- __u8 cpus;
- char reserved2[6];
- char mgm_name[8];
- char reserved3[80];
-} __attribute__ ((packed));
-
static inline int phys_hdr__size(enum diag204_format type)
{
- if (type == INFO_SIMPLE)
- return sizeof(struct phys_hdr);
- else /* INFO_EXT */
- return sizeof(struct x_phys_hdr);
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_phys_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_phys_hdr);
}
static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct phys_hdr *)hdr)->cpus;
- else /* INFO_EXT */
- return ((struct x_phys_hdr *)hdr)->cpus;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_hdr *)hdr)->cpus;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_hdr *)hdr)->cpus;
}
/* Physical CPU info block */
-struct phys_cpu {
- __u16 cpu_addr;
- char reserved1[2];
- __u8 ctidx;
- char reserved2[3];
- __u64 mgm_time;
- char reserved3[8];
-} __attribute__ ((packed));
-
-struct x_phys_cpu {
- __u16 cpu_addr;
- char reserved1[2];
- __u8 ctidx;
- char reserved2[3];
- __u64 mgm_time;
- char reserved3[80];
-} __attribute__ ((packed));
-
static inline int phys_cpu__size(enum diag204_format type)
{
- if (type == INFO_SIMPLE)
- return sizeof(struct phys_cpu);
- else /* INFO_EXT */
- return sizeof(struct x_phys_cpu);
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_phys_cpu);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_phys_cpu);
}
static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct phys_cpu *)hdr)->cpu_addr;
- else /* INFO_EXT */
- return ((struct x_phys_cpu *)hdr)->cpu_addr;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
}
static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct phys_cpu *)hdr)->mgm_time;
- else /* INFO_EXT */
- return ((struct x_phys_cpu *)hdr)->mgm_time;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->mgm_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
}
static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
{
- if (type == INFO_SIMPLE)
- return ((struct phys_cpu *)hdr)->ctidx;
- else /* INFO_EXT */
- return ((struct x_phys_cpu *)hdr)->ctidx;
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->ctidx;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
}
/* Diagnose 204 functions */
-
-static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
-{
- register unsigned long _subcode asm("0") = *subcode;
- register unsigned long _size asm("1") = size;
-
- asm volatile(
- " diag %2,%0,0x204\n"
- "0: nopr %%r7\n"
- EX_TABLE(0b,0b)
- : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
- *subcode = _subcode;
- return _size;
-}
-
-static int diag204(unsigned long subcode, unsigned long size, void *addr)
-{
- diag_stat_inc(DIAG_STAT_X204);
- size = __diag204(&subcode, size, addr);
- if (subcode)
- return -1;
- return size;
-}
-
/*
* For the old diag subcode 4 with simple data format we have to use real
* memory. If we use subcode 6 or 7 with extended data format, we can (and
@@ -411,12 +261,12 @@ static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
*pages = diag204_buf_pages;
return diag204_buf;
}
- if (fmt == INFO_SIMPLE) {
+ if (fmt == DIAG204_INFO_SIMPLE) {
*pages = 1;
return diag204_alloc_rbuf();
- } else {/* INFO_EXT */
- *pages = diag204((unsigned long)SUBC_RSI |
- (unsigned long)INFO_EXT, 0, NULL);
+ } else {/* DIAG204_INFO_EXT */
+ *pages = diag204((unsigned long)DIAG204_SUBC_RSI |
+ (unsigned long)DIAG204_INFO_EXT, 0, NULL);
if (*pages <= 0)
return ERR_PTR(-ENOSYS);
else
@@ -443,18 +293,18 @@ static int diag204_probe(void)
void *buf;
int pages, rc;
- buf = diag204_get_buffer(INFO_EXT, &pages);
+ buf = diag204_get_buffer(DIAG204_INFO_EXT, &pages);
if (!IS_ERR(buf)) {
- if (diag204((unsigned long)SUBC_STIB7 |
- (unsigned long)INFO_EXT, pages, buf) >= 0) {
- diag204_store_sc = SUBC_STIB7;
- diag204_info_type = INFO_EXT;
+ if (diag204((unsigned long)DIAG204_SUBC_STIB7 |
+ (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = DIAG204_SUBC_STIB7;
+ diag204_info_type = DIAG204_INFO_EXT;
goto out;
}
- if (diag204((unsigned long)SUBC_STIB6 |
- (unsigned long)INFO_EXT, pages, buf) >= 0) {
- diag204_store_sc = SUBC_STIB6;
- diag204_info_type = INFO_EXT;
+ if (diag204((unsigned long)DIAG204_SUBC_STIB6 |
+ (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = DIAG204_SUBC_STIB6;
+ diag204_info_type = DIAG204_INFO_EXT;
goto out;
}
diag204_free_buffer();
@@ -462,15 +312,15 @@ static int diag204_probe(void)
/* subcodes 6 and 7 failed, now try subcode 4 */
- buf = diag204_get_buffer(INFO_SIMPLE, &pages);
+ buf = diag204_get_buffer(DIAG204_INFO_SIMPLE, &pages);
if (IS_ERR(buf)) {
rc = PTR_ERR(buf);
goto fail_alloc;
}
- if (diag204((unsigned long)SUBC_STIB4 |
- (unsigned long)INFO_SIMPLE, pages, buf) >= 0) {
- diag204_store_sc = SUBC_STIB4;
- diag204_info_type = INFO_SIMPLE;
+ if (diag204((unsigned long)DIAG204_SUBC_STIB4 |
+ (unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) {
+ diag204_store_sc = DIAG204_SUBC_STIB4;
+ diag204_info_type = DIAG204_INFO_SIMPLE;
goto out;
} else {
rc = -ENOSYS;
@@ -510,20 +360,6 @@ out:
/* Diagnose 224 functions */
-static int diag224(void *ptr)
-{
- int rc = -EOPNOTSUPP;
-
- diag_stat_inc(DIAG_STAT_X224);
- asm volatile(
- " diag %1,%2,0x224\n"
- "0: lhi %0,0x0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) :"d" (0), "d" (ptr) : "memory");
- return rc;
-}
-
static int diag224_get_name_table(void)
{
/* memory must be below 2GB */
@@ -545,9 +381,9 @@ static void diag224_delete_name_table(void)
static int diag224_idx2name(int index, char *name)
{
- memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN),
- CPU_NAME_LEN);
- name[CPU_NAME_LEN] = 0;
+ memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
+ DIAG204_CPU_NAME_LEN);
+ name[DIAG204_CPU_NAME_LEN] = 0;
strim(name);
return 0;
}
@@ -603,7 +439,7 @@ __init int hypfs_diag_init(void)
pr_err("The hardware system does not support hypfs\n");
return -ENODATA;
}
- if (diag204_info_type == INFO_EXT) {
+ if (diag204_info_type == DIAG204_INFO_EXT) {
rc = hypfs_dbfs_create_file(&dbfs_file_d204);
if (rc)
return rc;
@@ -651,7 +487,7 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
cpu_info__lp_time(diag204_info_type, cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
- if (diag204_info_type == INFO_EXT) {
+ if (diag204_info_type == DIAG204_INFO_EXT) {
rc = hypfs_create_u64(cpu_dir, "onlinetime",
cpu_info__online_time(diag204_info_type,
cpu_info));
@@ -667,12 +503,12 @@ static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
{
struct dentry *cpus_dir;
struct dentry *lpar_dir;
- char lpar_name[LPAR_NAME_LEN + 1];
+ char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
void *cpu_info;
int i;
part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
- lpar_name[LPAR_NAME_LEN] = 0;
+ lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
if (IS_ERR(lpar_dir))
return lpar_dir;
@@ -755,7 +591,8 @@ int hypfs_diag_create_files(struct dentry *root)
goto err_out;
}
}
- if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
+ if (info_blk_hdr__flags(diag204_info_type, time_hdr) &
+ DIAG204_LPAR_PHYS_FLG) {
ptr = hypfs_create_phys_files(root, part_hdr);
if (IS_ERR(ptr)) {
rc = PTR_ERR(ptr);
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 1a82cf26ee11..d28621de8e0b 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -20,6 +20,9 @@
#define CPACF_KMC 0xb92f /* MSA */
#define CPACF_KIMD 0xb93e /* MSA */
#define CPACF_KLMD 0xb93f /* MSA */
+#define CPACF_PCKMO 0xb928 /* MSA3 */
+#define CPACF_KMF 0xb92a /* MSA4 */
+#define CPACF_KMO 0xb92b /* MSA4 */
#define CPACF_PCC 0xb92c /* MSA4 */
#define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PPNO 0xb93c /* MSA5 */
@@ -136,6 +139,7 @@ static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
register unsigned long r1 asm("1") = (unsigned long) status;
asm volatile(
+ " spm 0\n" /* pckmo doesn't change the cc */
/* Parameter registers are ignored, but may not be 0 */
"0: .insn rrf,%[opc] << 16,2,2,2,0\n"
" brc 1,0b\n" /* handle partial completion */
@@ -157,6 +161,12 @@ static inline int cpacf_query(unsigned int opcode, unsigned int func)
if (!test_facility(17)) /* check for MSA */
return 0;
break;
+ case CPACF_PCKMO:
+ if (!test_facility(76)) /* check for MSA3 */
+ return 0;
+ break;
+ case CPACF_KMF:
+ case CPACF_KMO:
case CPACF_PCC:
case CPACF_KMCTR:
if (!test_facility(77)) /* check for MSA4 */
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 86cae09e076a..8acf482162ed 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -78,4 +78,153 @@ struct diag210 {
extern int diag210(struct diag210 *addr);
+/* bit is set in flags, when physical cpu info is included in diag 204 data */
+#define DIAG204_LPAR_PHYS_FLG 0x80
+#define DIAG204_LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
+#define DIAG204_CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
+
+/* diag 204 subcodes */
+enum diag204_sc {
+ DIAG204_SUBC_STIB4 = 4,
+ DIAG204_SUBC_RSI = 5,
+ DIAG204_SUBC_STIB6 = 6,
+ DIAG204_SUBC_STIB7 = 7
+};
+
+/* The two available diag 204 data formats */
+enum diag204_format {
+ DIAG204_INFO_SIMPLE = 0,
+ DIAG204_INFO_EXT = 0x00010000
+};
+
+enum diag204_cpu_flags {
+ DIAG204_CPU_ONLINE = 0x20,
+ DIAG204_CPU_CAPPED = 0x40,
+};
+
+struct diag204_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod;
+} __packed;
+
+struct diag204_x_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod1;
+ __u64 curtod2;
+ char reserved[40];
+} __packed;
+
+struct diag204_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ char reserved[6];
+ char part_name[DIAG204_LPAR_NAME_LEN];
+} __packed;
+
+struct diag204_x_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ __u8 rcpus;
+ __u8 pflag;
+ __u32 mlu;
+ char part_name[DIAG204_LPAR_NAME_LEN];
+ char lpc_name[8];
+ char os_name[8];
+ __u64 online_cs;
+ __u64 online_es;
+ __u8 upid;
+ __u8 reserved:3;
+ __u8 mtid:5;
+ char reserved1[2];
+ __u32 group_mlu;
+ char group_name[8];
+ char hardware_group_name[8];
+ char reserved2[24];
+} __packed;
+
+struct diag204_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+} __packed;
+
+struct diag204_x_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+ __u16 min_weight;
+ __u16 cur_weight;
+ __u16 max_weight;
+ char reseved2[2];
+ __u64 online_time;
+ __u64 wait_time;
+ __u32 pma_weight;
+ __u32 polar_weight;
+ __u32 cpu_type_cap;
+ __u32 group_cpu_type_cap;
+ char reserved3[32];
+} __packed;
+
+struct diag204_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+} __packed;
+
+struct diag204_x_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+ char reserved3[80];
+} __packed;
+
+struct diag204_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[8];
+} __packed;
+
+struct diag204_x_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[1];
+ __u16 weight;
+ __u64 mgm_time;
+ char reserved3[80];
+} __packed;
+
+struct diag204_x_part_block {
+ struct diag204_x_part_hdr hdr;
+ struct diag204_x_cpu_info cpus[];
+} __packed;
+
+struct diag204_x_phys_block {
+ struct diag204_x_phys_hdr hdr;
+ struct diag204_x_phys_cpu cpus[];
+} __packed;
+
+int diag204(unsigned long subcode, unsigned long size, void *addr);
+int diag224(void *ptr);
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3249b7464889..ffaba07f50ab 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 563ab9f44874..1736c7d3c94c 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -225,6 +225,7 @@ do { \
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index d054c1b07a3c..741ddba0bf11 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -10,14 +10,25 @@
/**
* struct gmap_struct - guest address space
+ * @list: list head for the mm->context gmap list
* @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table
+ * @ref_count: reference counter for the gmap structure
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
+ * @host_to_rmap: radix tree with gmap_rmap lists
+ * @children: list of shadow gmap structures
+ * @pt_list: list of all page tables used in the shadow guest address space
+ * @shadow_lock: spinlock to protect the shadow gmap list
+ * @parent: pointer to the parent gmap for shadow guest address spaces
+ * @orig_asce: ASCE for which the shadow page table has been created
+ * @edat_level: edat level to be used for the shadow translation
+ * @removed: flag to indicate if a shadow guest address space has been removed
+ * @initialized: flag to indicate if a shadow guest address space can be used
*/
struct gmap {
struct list_head list;
@@ -26,26 +37,64 @@ struct gmap {
struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest;
spinlock_t guest_table_lock;
+ atomic_t ref_count;
unsigned long *table;
unsigned long asce;
unsigned long asce_end;
void *private;
bool pfault_enabled;
+ /* Additional data for shadow guest address spaces */
+ struct radix_tree_root host_to_rmap;
+ struct list_head children;
+ struct list_head pt_list;
+ spinlock_t shadow_lock;
+ struct gmap *parent;
+ unsigned long orig_asce;
+ int edat_level;
+ bool removed;
+ bool initialized;
};
/**
+ * struct gmap_rmap - reverse mapping for shadow page table entries
+ * @next: pointer to next rmap in the list
+ * @raddr: virtual rmap address in the shadow guest address space
+ */
+struct gmap_rmap {
+ struct gmap_rmap *next;
+ unsigned long raddr;
+};
+
+#define gmap_for_each_rmap(pos, head) \
+ for (pos = (head); pos; pos = pos->next)
+
+#define gmap_for_each_rmap_safe(pos, n, head) \
+ for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
+
+/**
* struct gmap_notifier - notify function block for page invalidation
* @notifier_call: address of callback function
*/
struct gmap_notifier {
struct list_head list;
- void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
+ struct rcu_head rcu;
+ void (*notifier_call)(struct gmap *gmap, unsigned long start,
+ unsigned long end);
};
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
-void gmap_free(struct gmap *gmap);
+static inline int gmap_is_shadow(struct gmap *gmap)
+{
+ return !!gmap->parent;
+}
+
+struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
+void gmap_remove(struct gmap *gmap);
+struct gmap *gmap_get(struct gmap *gmap);
+void gmap_put(struct gmap *gmap);
+
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
+struct gmap *gmap_get_enabled(void);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
@@ -57,8 +106,29 @@ void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
-void gmap_register_ipte_notifier(struct gmap_notifier *);
-void gmap_unregister_ipte_notifier(struct gmap_notifier *);
-int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
+int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
+
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
+ int edat_level);
+int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
+int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
+ int fake);
+int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
+ int fake);
+int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+ int fake);
+int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
+ int fake);
+int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
+ unsigned long *pgt, int *dat_protection, int *fake);
+int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
+
+void gmap_register_pte_notifier(struct gmap_notifier *);
+void gmap_unregister_pte_notifier(struct gmap_notifier *);
+void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
+ unsigned long bits);
+
+int gmap_mprotect_notify(struct gmap *, unsigned long start,
+ unsigned long len, int prot);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index ac82e8eb936d..8e5daf7a76ce 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -43,6 +43,7 @@
/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS 8
#define KVM_REQ_DISABLE_IBS 9
+#define KVM_REQ_ICPT_OPEREXC 10
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -145,7 +146,7 @@ struct kvm_s390_sie_block {
__u64 cputm; /* 0x0028 */
__u64 ckc; /* 0x0030 */
__u64 epoch; /* 0x0038 */
- __u8 reserved40[4]; /* 0x0040 */
+ __u32 svcc; /* 0x0040 */
#define LCTL_CR0 0x8000
#define LCTL_CR6 0x0200
#define LCTL_CR9 0x0040
@@ -154,6 +155,7 @@ struct kvm_s390_sie_block {
#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
+#define ICTL_OPEREXC 0x80000000
#define ICTL_PINT 0x20000000
#define ICTL_LPSW 0x00400000
#define ICTL_STCTL 0x00040000
@@ -166,6 +168,9 @@ struct kvm_s390_sie_block {
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTINT 0x14
+#define ICPT_VALIDITY 0x20
+#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
@@ -185,7 +190,9 @@ struct kvm_s390_sie_block {
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
- __u8 reserved70[32]; /* 0x0070 */
+ __u8 reserved70[16]; /* 0x0070 */
+ __u64 mso; /* 0x0080 */
+ __u64 msl; /* 0x0088 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
@@ -223,7 +230,7 @@ struct kvm_s390_sie_block {
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u64 riccbd; /* 0x01f0 */
- __u8 reserved1f8[8]; /* 0x01f8 */
+ __u64 gvrd; /* 0x01f8 */
} __attribute__((packed));
struct kvm_s390_itdb {
@@ -256,6 +263,7 @@ struct kvm_vcpu_stat {
u32 instruction_stctg;
u32 exit_program_interruption;
u32 exit_instr_and_program;
+ u32 exit_operation_exception;
u32 deliver_external_call;
u32 deliver_emergency_signal;
u32 deliver_service_signal;
@@ -278,7 +286,9 @@ struct kvm_vcpu_stat {
u32 instruction_stsi;
u32 instruction_stfl;
u32 instruction_tprot;
+ u32 instruction_sie;
u32 instruction_essa;
+ u32 instruction_sthyi;
u32 instruction_sigp_sense;
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call;
@@ -541,12 +551,16 @@ struct kvm_guestdbg_info_arch {
struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
+ /* if vsie is active, currently executed shadow sie control block */
+ struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
struct gmap *gmap;
+ /* backup location for the currently enabled gmap when scheduled out */
+ struct gmap *enabled_gmap;
struct kvm_guestdbg_info_arch guestdbg;
unsigned long pfault_token;
unsigned long pfault_select;
@@ -631,6 +645,14 @@ struct sie_page2 {
u8 reserved900[0x1000 - 0x900]; /* 0x0900 */
} __packed;
+struct kvm_s390_vsie {
+ struct mutex mutex;
+ struct radix_tree_root addr_to_page;
+ int page_count;
+ int next;
+ struct page *pages[KVM_MAX_VCPUS];
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -646,15 +668,20 @@ struct kvm_arch{
int user_cpu_state_ctrl;
int user_sigp;
int user_stsi;
+ int user_instr0;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
+ struct ratelimit_state sthyi_limit;
spinlock_t start_stop_lock;
struct sie_page2 *sie_page2;
struct kvm_s390_cpu_model model;
struct kvm_s390_crypto crypto;
+ struct kvm_s390_vsie vsie;
u64 epoch;
+ /* subset of available cpu features enabled by user space */
+ DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
};
#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 18226437a832..6d39329c894b 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -8,8 +8,9 @@ typedef struct {
cpumask_t cpu_attach_mask;
atomic_t flush_count;
unsigned int flush_mm;
- spinlock_t list_lock;
+ spinlock_t pgtable_lock;
struct list_head pgtable_list;
+ spinlock_t gmap_lock;
struct list_head gmap_list;
unsigned long asce;
unsigned long asce_limit;
@@ -22,9 +23,11 @@ typedef struct {
unsigned int use_skey:1;
} mm_context_t;
-#define INIT_MM_CONTEXT(name) \
- .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+#define INIT_MM_CONTEXT(name) \
+ .context.pgtable_lock = \
+ __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+ .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
static inline int tprot(unsigned long addr)
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f77c638bf397..c6a088c91aee 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,8 +15,9 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- spin_lock_init(&mm->context.list_lock);
+ spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
+ spin_lock_init(&mm->context.gmap_lock);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index b2146c4119b2..69b8a41fca84 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -111,13 +111,14 @@ static inline unsigned char page_get_storage_key(unsigned long addr)
static inline int page_reset_referenced(unsigned long addr)
{
- unsigned int ipm;
+ int cc;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
- : "=d" (ipm) : "a" (addr) : "cc");
- return !!(ipm & 0x20000000);
+ " srl %0,28\n"
+ : "=d" (cc) : "a" (addr) : "cc");
+ return cc;
}
/* Bits int the storage key */
@@ -148,6 +149,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index da34cb6b1f3b..f4eb9843eed4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,8 +19,10 @@ unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
+struct page *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ea1533e07271..72c7f60bfe83 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -242,8 +242,8 @@ static inline int is_module_addr(void *addr)
* swap .11..ttttt.0
* prot-none, clean, old .11.xx0000.1
* prot-none, clean, young .11.xx0001.1
- * prot-none, dirty, old .10.xx0010.1
- * prot-none, dirty, young .10.xx0011.1
+ * prot-none, dirty, old .11.xx0010.1
+ * prot-none, dirty, young .11.xx0011.1
* read-only, clean, old .11.xx0100.1
* read-only, clean, young .01.xx0101.1
* read-only, dirty, old .11.xx0110.1
@@ -277,6 +277,7 @@ static inline int is_module_addr(void *addr)
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
@@ -323,8 +324,8 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
-#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
-#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
+#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
+#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
@@ -335,15 +336,15 @@ static inline int is_module_addr(void *addr)
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
- * dy..R...I...rw
+ * dy..R...I...wr
* prot-none, clean, old 00..1...1...00
* prot-none, clean, young 01..1...1...00
* prot-none, dirty, old 10..1...1...00
* prot-none, dirty, young 11..1...1...00
- * read-only, clean, old 00..1...1...10
- * read-only, clean, young 01..1...0...10
- * read-only, dirty, old 10..1...1...10
- * read-only, dirty, young 11..1...0...10
+ * read-only, clean, old 00..1...1...01
+ * read-only, clean, young 01..1...0...01
+ * read-only, dirty, old 10..1...1...01
+ * read-only, dirty, young 11..1...0...01
* read-write, clean, old 00..1...1...11
* read-write, clean, young 01..1...0...11
* read-write, dirty, old 10..0...1...11
@@ -364,6 +365,7 @@ static inline int is_module_addr(void *addr)
#define PGSTE_GC_BIT 0x0002000000000000UL
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
+#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
@@ -382,7 +384,7 @@ static inline int is_module_addr(void *addr)
/*
* Page protection definitions.
*/
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
@@ -1002,15 +1004,26 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry);
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_notify(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long bits);
+int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
+ pte_t *ptep, int prot, unsigned long bit);
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
+ pte_t *sptep, pte_t *tptep, pte_t pte);
+void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char key, unsigned char *oldkey,
+ bool nq, bool mr, bool mc);
+int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
+int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char *key);
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 09529202ea77..03323175de30 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -112,6 +112,8 @@ struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
+ unsigned int gmap_write_flag; /* gmap fault write indication */
+ unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e4f6f73afe2f..2ad9c204b1a2 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -32,12 +32,19 @@ struct sclp_core_entry {
u8 reserved0;
u8 : 4;
u8 sief2 : 1;
- u8 : 3;
- u8 : 3;
+ u8 skey : 1;
+ u8 : 2;
+ u8 : 2;
+ u8 gpere : 1;
u8 siif : 1;
u8 sigpif : 1;
u8 : 3;
- u8 reserved2[10];
+ u8 reserved2[3];
+ u8 : 2;
+ u8 ib : 1;
+ u8 cei : 1;
+ u8 : 4;
+ u8 reserved3[6];
u8 type;
u8 reserved1;
} __attribute__((packed));
@@ -59,6 +66,15 @@ struct sclp_info {
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
+ unsigned char has_64bscao : 1;
+ unsigned char has_gpere : 1;
+ unsigned char has_cmma : 1;
+ unsigned char has_gsls : 1;
+ unsigned char has_ib : 1;
+ unsigned char has_cei : 1;
+ unsigned char has_pfmfi : 1;
+ unsigned char has_ibs : 1;
+ unsigned char has_skey : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
@@ -101,5 +117,6 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_early_detect(void);
void _sclp_print_early(const char *);
+void sclp_ocf_cpc_name_copy(char *dst);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/uapi/asm/auxvec.h b/arch/s390/include/uapi/asm/auxvec.h
index a1f153e89133..c53e08442255 100644
--- a/arch/s390/include/uapi/asm/auxvec.h
+++ b/arch/s390/include/uapi/asm/auxvec.h
@@ -3,4 +3,6 @@
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 3b8e99ef9d58..a2ffec4139ad 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -93,6 +93,47 @@ struct kvm_s390_vm_cpu_machine {
__u64 fac_list[256];
};
+#define KVM_S390_VM_CPU_PROCESSOR_FEAT 2
+#define KVM_S390_VM_CPU_MACHINE_FEAT 3
+
+#define KVM_S390_VM_CPU_FEAT_NR_BITS 1024
+#define KVM_S390_VM_CPU_FEAT_ESOP 0
+#define KVM_S390_VM_CPU_FEAT_SIEF2 1
+#define KVM_S390_VM_CPU_FEAT_64BSCAO 2
+#define KVM_S390_VM_CPU_FEAT_SIIF 3
+#define KVM_S390_VM_CPU_FEAT_GPERE 4
+#define KVM_S390_VM_CPU_FEAT_GSLS 5
+#define KVM_S390_VM_CPU_FEAT_IB 6
+#define KVM_S390_VM_CPU_FEAT_CEI 7
+#define KVM_S390_VM_CPU_FEAT_IBS 8
+#define KVM_S390_VM_CPU_FEAT_SKEY 9
+#define KVM_S390_VM_CPU_FEAT_CMMA 10
+#define KVM_S390_VM_CPU_FEAT_PFMFI 11
+#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
+struct kvm_s390_vm_cpu_feat {
+ __u64 feat[16];
+};
+
+#define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4
+#define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5
+/* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */
+struct kvm_s390_vm_cpu_subfunc {
+ __u8 plo[32]; /* always */
+ __u8 ptff[16]; /* with TOD-clock steering */
+ __u8 kmac[16]; /* with MSA */
+ __u8 kmc[16]; /* with MSA */
+ __u8 km[16]; /* with MSA */
+ __u8 kimd[16]; /* with MSA */
+ __u8 klmd[16]; /* with MSA */
+ __u8 pckmo[16]; /* with MSA3 */
+ __u8 kmctr[16]; /* with MSA4 */
+ __u8 kmf[16]; /* with MSA4 */
+ __u8 kmo[16]; /* with MSA4 */
+ __u8 pcc[16]; /* with MSA4 */
+ __u8 ppno[16]; /* with MSA5 */
+ __u8 reserved[1824];
+};
+
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 8fb5d4a6dd25..3ac634368939 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -140,6 +140,7 @@
exit_code_ipa0(0xB2, 0x4c, "TAR"), \
exit_code_ipa0(0xB2, 0x50, "CSP"), \
exit_code_ipa0(0xB2, 0x54, "MVPG"), \
+ exit_code_ipa0(0xB2, 0x56, "STHYI"), \
exit_code_ipa0(0xB2, 0x58, "BSG"), \
exit_code_ipa0(0xB2, 0x5a, "BSA"), \
exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index f37be37edd3a..3234817c7d47 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,6 +4,7 @@
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_sclp.o := n
+KCOV_INSTRUMENT_als.o := n
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
@@ -32,21 +33,25 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
CFLAGS_sysinfo.o += -w
#
-# Use -march=z900 for sclp.c to be able to print an error message if
-# the kernel is started on a machine which is too old
+# Use -march=z900 for sclp.c and als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
#
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_als.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
GCOV_PROFILE_sclp.o := n
+GCOV_PROFILE_als.o := n
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
-obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
+obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
obj-y += entry.o reipl.o relocate_kernel.o
diff --git a/arch/s390/kernel/als.c b/arch/s390/kernel/als.c
new file mode 100644
index 000000000000..a16e9d1bf9e3
--- /dev/null
+++ b/arch/s390/kernel/als.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright IBM Corp. 2016
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/facility.h>
+#include <asm/lowcore.h>
+#include <asm/sclp.h>
+#include "entry.h"
+
+/*
+ * The code within this file will be called very early. It may _not_
+ * access anything within the bss section, since that is not cleared
+ * yet and may contain data (e.g. initrd) that must be saved by other
+ * code.
+ * For temporary objects the stack (16k) should be used.
+ */
+
+static unsigned long als[] __initdata = { FACILITIES_ALS };
+
+static void __init u16_to_hex(char *str, u16 val)
+{
+ int i, num;
+
+ for (i = 1; i <= 4; i++) {
+ num = (val >> (16 - 4 * i)) & 0xf;
+ if (num >= 10)
+ num += 7;
+ *str++ = '0' + num;
+ }
+ *str = '\0';
+}
+
+static void __init print_machine_type(void)
+{
+ static char mach_str[80] __initdata = "Detected machine-type number: ";
+ char type_str[5];
+ struct cpuid id;
+
+ get_cpu_id(&id);
+ u16_to_hex(type_str, id.machine);
+ strcat(mach_str, type_str);
+ _sclp_print_early(mach_str);
+}
+
+static void __init u16_to_decimal(char *str, u16 val)
+{
+ int div = 1;
+
+ while (div * 10 <= val)
+ div *= 10;
+ while (div) {
+ *str++ = '0' + val / div;
+ val %= div;
+ div /= 10;
+ }
+ *str = '\0';
+}
+
+static void __init print_missing_facilities(void)
+{
+ static char als_str[80] __initdata = "Missing facilities: ";
+ unsigned long val;
+ char val_str[6];
+ int i, j, first;
+
+ first = 1;
+ for (i = 0; i < ARRAY_SIZE(als); i++) {
+ val = ~S390_lowcore.stfle_fac_list[i] & als[i];
+ for (j = 0; j < BITS_PER_LONG; j++) {
+ if (!(val & (1UL << (BITS_PER_LONG - 1 - j))))
+ continue;
+ if (!first)
+ strcat(als_str, ",");
+ /*
+ * Make sure we stay within one line. Consider that
+ * each facility bit adds up to five characters and
+ * z/VM adds a four character prefix.
+ */
+ if (strlen(als_str) > 70) {
+ _sclp_print_early(als_str);
+ *als_str = '\0';
+ }
+ u16_to_decimal(val_str, i * BITS_PER_LONG + j);
+ strcat(als_str, val_str);
+ first = 0;
+ }
+ }
+ _sclp_print_early(als_str);
+ _sclp_print_early("See Principles of Operations for facility bits");
+}
+
+static void __init facility_mismatch(void)
+{
+ _sclp_print_early("The Linux kernel requires more recent processor hardware");
+ print_machine_type();
+ print_missing_facilities();
+ disabled_wait(0x8badcccc);
+}
+
+void __init verify_facilities(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(S390_lowcore.stfle_fac_list); i++)
+ S390_lowcore.stfle_fac_list[i] = 0;
+ asm volatile(
+ " stfl 0(0)\n"
+ : "=m" (S390_lowcore.stfl_fac_list));
+ S390_lowcore.stfle_fac_list[0] = (u64)S390_lowcore.stfl_fac_list << 32;
+ if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ register unsigned long reg0 asm("0") = ARRAY_SIZE(als) - 1;
+
+ asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+ : "+d" (reg0)
+ : "a" (&S390_lowcore.stfle_fac_list)
+ : "memory", "cc");
+ }
+ for (i = 0; i < ARRAY_SIZE(als); i++) {
+ if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
+ facility_mismatch();
+ }
+}
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 48b37b8357e6..a97354c8c667 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -162,6 +162,30 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
}
EXPORT_SYMBOL(diag14);
+static inline int __diag204(unsigned long *subcode, unsigned long size, void *addr)
+{
+ register unsigned long _subcode asm("0") = *subcode;
+ register unsigned long _size asm("1") = size;
+
+ asm volatile(
+ " diag %2,%0,0x204\n"
+ "0: nopr %%r7\n"
+ EX_TABLE(0b,0b)
+ : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
+ *subcode = _subcode;
+ return _size;
+}
+
+int diag204(unsigned long subcode, unsigned long size, void *addr)
+{
+ diag_stat_inc(DIAG_STAT_X204);
+ size = __diag204(&subcode, size, addr);
+ if (subcode)
+ return -1;
+ return size;
+}
+EXPORT_SYMBOL(diag204);
+
/*
* Diagnose 210: Get information about a virtual device
*/
@@ -196,3 +220,18 @@ int diag210(struct diag210 *addr)
return ccode;
}
EXPORT_SYMBOL(diag210);
+
+int diag224(void *ptr)
+{
+ int rc = -EOPNOTSUPP;
+
+ diag_stat_inc(DIAG_STAT_X224);
+ asm volatile(
+ " diag %1,%2,0x224\n"
+ "0: lhi %0,0x0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) :"d" (0), "d" (ptr) : "memory");
+ return rc;
+}
+EXPORT_SYMBOL(diag224);
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index bedd2f55d860..e79f030dd276 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -79,4 +79,6 @@ long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
DECLARE_PER_CPU(u64, mt_cycles[8]);
+void verify_facilities(void);
+
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fcaefb041364..56e4d8234ef2 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -306,49 +306,14 @@ ENTRY(startup_kdump)
stck __LC_LAST_UPDATE_CLOCK
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
- stfl 0(%r0) # store facilities @ __LC_STFL_FAC_LIST
- mvc __LC_STFLE_FAC_LIST(4),__LC_STFL_FAC_LIST
- tm __LC_STFLE_FAC_LIST,0x01 # stfle available ?
- jz 0f
- lghi %r0,FACILITIES_ALS_DWORDS-1
- .insn s,0xb2b00000,__LC_STFLE_FAC_LIST # store facility list extended
- # verify if all required facilities are supported by the machine
-0: la %r1,__LC_STFLE_FAC_LIST
- la %r2,3f+8-.LPG0(%r13)
- lhi %r3,FACILITIES_ALS_DWORDS
-1: lg %r0,0(%r1)
- ng %r0,0(%r2)
- clg %r0,0(%r2)
- jne 2f
- la %r1,8(%r1)
- la %r2,8(%r2)
- ahi %r3,-1
- jnz 1b
- j 4f
-2: l %r15,.Lstack-.LPG0(%r13)
+ l %r15,.Lstack-.LPG0(%r13)
ahi %r15,-STACK_FRAME_OVERHEAD
- la %r2,.Lals_string-.LPG0(%r13)
- l %r3,.Lsclp_print-.LPG0(%r13)
- basr %r14,%r3
- lpsw 3f-.LPG0(%r13) # machine type not good enough, crash
-.Lals_string:
- .asciz "The Linux kernel requires more recent processor hardware"
-.Lsclp_print:
- .long _sclp_print_early
-.Lstack:
- .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
- .align 16
-3: .long 0x000a0000,0x8badcccc
-
-# List of facilities that are required. If not all facilities are present
-# the kernel will crash.
-
- .quad FACILITIES_ALS
-
-4:
+ brasl %r14,verify_facilities
/* Continue with startup code in head64.S */
jg startup_continue
+.Lstack:
+ .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 250f5972536a..dd6306c51bd6 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -690,6 +690,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
stack = (unsigned long) regs->gprs[15];
memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
+
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer to get messed up.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -705,6 +714,9 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long stack;
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
+
stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
/* Put the regs back */
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index d88db40bdf15..f08af675f36f 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -12,8 +12,9 @@
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_MSG_MASK 0x40000000
-static char _sclp_work_area[4096] __aligned(PAGE_SIZE);
-static bool have_vt220, have_linemode;
+static char _sclp_work_area[4096] __aligned(PAGE_SIZE) __section(data);
+static bool have_vt220 __section(data);
+static bool have_linemode __section(data);
static void _sclp_wait_int(void)
{
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index d42fa38c2429..09a9e6dfc09f 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -12,6 +12,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-objs += diag.o gaccess.o guestdbg.o
+kvm-objs += diag.o gaccess.o guestdbg.o sthyi.o vsie.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 1ea4095b67d7..ce865bd4f81d 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -212,6 +212,11 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
return -EOPNOTSUPP;
+ VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
+ (u32) vcpu->run->s.regs.gprs[2],
+ (u32) vcpu->run->s.regs.gprs[3],
+ vcpu->run->s.regs.gprs[4]);
+
/*
* The layout is as follows:
* - gpr 2 contains the subchannel id (passed as addr)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 66938d283b77..54200208bf24 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -8,6 +8,7 @@
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <asm/pgtable.h>
+#include <asm/gmap.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include <asm/switch_to.h>
@@ -476,18 +477,73 @@ enum {
FSI_FETCH = 2 /* Exception was due to fetch operation */
};
-static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
- ar_t ar, enum gacc_mode mode)
+enum prot_type {
+ PROT_TYPE_LA = 0,
+ PROT_TYPE_KEYC = 1,
+ PROT_TYPE_ALC = 2,
+ PROT_TYPE_DAT = 3,
+};
+
+static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
+ ar_t ar, enum gacc_mode mode, enum prot_type prot)
{
- int rc;
- struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
- struct trans_exc_code_bits *tec_bits;
+ struct trans_exc_code_bits *tec;
memset(pgm, 0, sizeof(*pgm));
- tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
- tec_bits->as = psw.as;
+ pgm->code = code;
+ tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
+
+ switch (code) {
+ case PGM_ASCE_TYPE:
+ case PGM_PAGE_TRANSLATION:
+ case PGM_REGION_FIRST_TRANS:
+ case PGM_REGION_SECOND_TRANS:
+ case PGM_REGION_THIRD_TRANS:
+ case PGM_SEGMENT_TRANSLATION:
+ /*
+ * op_access_id only applies to MOVE_PAGE -> set bit 61
+ * exc_access_id has to be set to 0 for some instructions. Both
+ * cases have to be handled by the caller. We can always store
+ * exc_access_id, as it is undefined for non-ar cases.
+ */
+ tec->addr = gva >> PAGE_SHIFT;
+ tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
+ tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
+ /* FALL THROUGH */
+ case PGM_ALEN_TRANSLATION:
+ case PGM_ALE_SEQUENCE:
+ case PGM_ASTE_VALIDITY:
+ case PGM_ASTE_SEQUENCE:
+ case PGM_EXTENDED_AUTHORITY:
+ pgm->exc_access_id = ar;
+ break;
+ case PGM_PROTECTION:
+ switch (prot) {
+ case PROT_TYPE_ALC:
+ tec->b60 = 1;
+ /* FALL THROUGH */
+ case PROT_TYPE_DAT:
+ tec->b61 = 1;
+ tec->addr = gva >> PAGE_SHIFT;
+ tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
+ tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
+ /* exc_access_id is undefined for most cases */
+ pgm->exc_access_id = ar;
+ break;
+ default: /* LA and KEYC set b61 to 0, other params undefined */
+ break;
+ }
+ break;
+ }
+ return code;
+}
+
+static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
+ unsigned long ga, ar_t ar, enum gacc_mode mode)
+{
+ int rc;
+ struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
if (!psw.t) {
asce->val = 0;
@@ -510,21 +566,8 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
return 0;
case PSW_AS_ACCREG:
rc = ar_translation(vcpu, asce, ar, mode);
- switch (rc) {
- case PGM_ALEN_TRANSLATION:
- case PGM_ALE_SEQUENCE:
- case PGM_ASTE_VALIDITY:
- case PGM_ASTE_SEQUENCE:
- case PGM_EXTENDED_AUTHORITY:
- vcpu->arch.pgm.exc_access_id = ar;
- break;
- case PGM_PROTECTION:
- tec_bits->b60 = 1;
- tec_bits->b61 = 1;
- break;
- }
if (rc > 0)
- pgm->code = rc;
+ return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
return rc;
}
return 0;
@@ -729,40 +772,31 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
-static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
+static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar,
unsigned long *pages, unsigned long nr_pages,
const union asce asce, enum gacc_mode mode)
{
- struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- struct trans_exc_code_bits *tec_bits;
- int lap_enabled, rc;
+ int lap_enabled, rc = 0;
- tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
ga = kvm_s390_logical_to_effective(vcpu, ga);
- tec_bits->addr = ga >> PAGE_SHIFT;
- if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
- pgm->code = PGM_PROTECTION;
- return pgm->code;
- }
+ if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
+ return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
+ PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).t) {
rc = guest_translate(vcpu, ga, pages, asce, mode);
if (rc < 0)
return rc;
- if (rc == PGM_PROTECTION)
- tec_bits->b61 = 1;
- if (rc)
- pgm->code = rc;
} else {
*pages = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, *pages))
- pgm->code = PGM_ADDRESSING;
+ rc = PGM_ADDRESSING;
}
- if (pgm->code)
- return pgm->code;
+ if (rc)
+ return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
ga += PAGE_SIZE;
pages++;
nr_pages--;
@@ -783,7 +817,8 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
if (!len)
return 0;
- rc = get_vcpu_asce(vcpu, &asce, ar, mode);
+ ga = kvm_s390_logical_to_effective(vcpu, ga);
+ rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
@@ -795,7 +830,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
need_ipte_lock = psw_bits(*psw).t && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
+ rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
@@ -846,37 +881,28 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
unsigned long *gpa, enum gacc_mode mode)
{
- struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- struct trans_exc_code_bits *tec;
union asce asce;
int rc;
gva = kvm_s390_logical_to_effective(vcpu, gva);
- tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- rc = get_vcpu_asce(vcpu, &asce, ar, mode);
- tec->addr = gva >> PAGE_SHIFT;
+ rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
- if (mode == GACC_STORE) {
- rc = pgm->code = PGM_PROTECTION;
- return rc;
- }
+ if (mode == GACC_STORE)
+ return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
+ mode, PROT_TYPE_LA);
}
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, mode);
- if (rc > 0) {
- if (rc == PGM_PROTECTION)
- tec->b61 = 1;
- pgm->code = rc;
- }
+ if (rc > 0)
+ return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
} else {
- rc = 0;
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
- rc = pgm->code = PGM_ADDRESSING;
+ return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
}
return rc;
@@ -915,20 +941,247 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
*/
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
{
- struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
- struct trans_exc_code_bits *tec_bits;
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
if (!ctlreg0.lap || !is_low_address(gra))
return 0;
+ return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
+}
- memset(pgm, 0, sizeof(*pgm));
- tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
- tec_bits->fsi = FSI_STORE;
- tec_bits->as = psw_bits(*psw).as;
- tec_bits->addr = gra >> PAGE_SHIFT;
- pgm->code = PGM_PROTECTION;
+/**
+ * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @pgt: pointer to the page table address result
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ */
+static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
+ unsigned long *pgt, int *dat_protection,
+ int *fake)
+{
+ struct gmap *parent;
+ union asce asce;
+ union vaddress vaddr;
+ unsigned long ptr;
+ int rc;
+
+ *fake = 0;
+ *dat_protection = 0;
+ parent = sg->parent;
+ vaddr.addr = saddr;
+ asce.val = sg->orig_asce;
+ ptr = asce.origin * 4096;
+ if (asce.r) {
+ *fake = 1;
+ asce.dt = ASCE_TYPE_REGION1;
+ }
+ switch (asce.dt) {
+ case ASCE_TYPE_REGION1:
+ if (vaddr.rfx01 > asce.tl && !asce.r)
+ return PGM_REGION_FIRST_TRANS;
+ break;
+ case ASCE_TYPE_REGION2:
+ if (vaddr.rfx)
+ return PGM_ASCE_TYPE;
+ if (vaddr.rsx01 > asce.tl)
+ return PGM_REGION_SECOND_TRANS;
+ break;
+ case ASCE_TYPE_REGION3:
+ if (vaddr.rfx || vaddr.rsx)
+ return PGM_ASCE_TYPE;
+ if (vaddr.rtx01 > asce.tl)
+ return PGM_REGION_THIRD_TRANS;
+ break;
+ case ASCE_TYPE_SEGMENT:
+ if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
+ return PGM_ASCE_TYPE;
+ if (vaddr.sx01 > asce.tl)
+ return PGM_SEGMENT_TRANSLATION;
+ break;
+ }
+
+ switch (asce.dt) {
+ case ASCE_TYPE_REGION1: {
+ union region1_table_entry rfte;
- return pgm->code;
+ if (*fake) {
+ /* offset in 16EB guest memory block */
+ ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+ rfte.val = ptr;
+ goto shadow_r2t;
+ }
+ rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
+ if (rc)
+ return rc;
+ if (rfte.i)
+ return PGM_REGION_FIRST_TRANS;
+ if (rfte.tt != TABLE_TYPE_REGION1)
+ return PGM_TRANSLATION_SPEC;
+ if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
+ return PGM_REGION_SECOND_TRANS;
+ if (sg->edat_level >= 1)
+ *dat_protection |= rfte.p;
+ ptr = rfte.rto << 12UL;
+shadow_r2t:
+ rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
+ if (rc)
+ return rc;
+ /* fallthrough */
+ }
+ case ASCE_TYPE_REGION2: {
+ union region2_table_entry rste;
+
+ if (*fake) {
+ /* offset in 8PB guest memory block */
+ ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+ rste.val = ptr;
+ goto shadow_r3t;
+ }
+ rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
+ if (rc)
+ return rc;
+ if (rste.i)
+ return PGM_REGION_SECOND_TRANS;
+ if (rste.tt != TABLE_TYPE_REGION2)
+ return PGM_TRANSLATION_SPEC;
+ if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
+ return PGM_REGION_THIRD_TRANS;
+ if (sg->edat_level >= 1)
+ *dat_protection |= rste.p;
+ ptr = rste.rto << 12UL;
+shadow_r3t:
+ rste.p |= *dat_protection;
+ rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
+ if (rc)
+ return rc;
+ /* fallthrough */
+ }
+ case ASCE_TYPE_REGION3: {
+ union region3_table_entry rtte;
+
+ if (*fake) {
+ /* offset in 4TB guest memory block */
+ ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+ rtte.val = ptr;
+ goto shadow_sgt;
+ }
+ rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
+ if (rc)
+ return rc;
+ if (rtte.i)
+ return PGM_REGION_THIRD_TRANS;
+ if (rtte.tt != TABLE_TYPE_REGION3)
+ return PGM_TRANSLATION_SPEC;
+ if (rtte.cr && asce.p && sg->edat_level >= 2)
+ return PGM_TRANSLATION_SPEC;
+ if (rtte.fc && sg->edat_level >= 2) {
+ *dat_protection |= rtte.fc0.p;
+ *fake = 1;
+ ptr = rtte.fc1.rfaa << 31UL;
+ rtte.val = ptr;
+ goto shadow_sgt;
+ }
+ if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
+ return PGM_SEGMENT_TRANSLATION;
+ if (sg->edat_level >= 1)
+ *dat_protection |= rtte.fc0.p;
+ ptr = rtte.fc0.sto << 12UL;
+shadow_sgt:
+ rtte.fc0.p |= *dat_protection;
+ rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
+ if (rc)
+ return rc;
+ /* fallthrough */
+ }
+ case ASCE_TYPE_SEGMENT: {
+ union segment_table_entry ste;
+
+ if (*fake) {
+ /* offset in 2G guest memory block */
+ ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+ ste.val = ptr;
+ goto shadow_pgt;
+ }
+ rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
+ if (rc)
+ return rc;
+ if (ste.i)
+ return PGM_SEGMENT_TRANSLATION;
+ if (ste.tt != TABLE_TYPE_SEGMENT)
+ return PGM_TRANSLATION_SPEC;
+ if (ste.cs && asce.p)
+ return PGM_TRANSLATION_SPEC;
+ *dat_protection |= ste.fc0.p;
+ if (ste.fc && sg->edat_level >= 1) {
+ *fake = 1;
+ ptr = ste.fc1.sfaa << 20UL;
+ ste.val = ptr;
+ goto shadow_pgt;
+ }
+ ptr = ste.fc0.pto << 11UL;
+shadow_pgt:
+ ste.fc0.p |= *dat_protection;
+ rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
+ if (rc)
+ return rc;
+ }
+ }
+ /* Return the parent address of the page table */
+ *pgt = ptr;
+ return 0;
+}
+
+/**
+ * kvm_s390_shadow_fault - handle fault on a shadow page table
+ * @vcpu: virtual cpu
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ *
+ * Returns: - 0 if the shadow fault was successfully resolved
+ * - > 0 (pgm exception code) on exceptions while faulting
+ * - -EAGAIN if the caller can retry immediately
+ * - -EFAULT when accessing invalid guest addresses
+ * - -ENOMEM if out of memory
+ */
+int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ unsigned long saddr)
+{
+ union vaddress vaddr;
+ union page_table_entry pte;
+ unsigned long pgt;
+ int dat_protection, fake;
+ int rc;
+
+ down_read(&sg->mm->mmap_sem);
+ /*
+ * We don't want any guest-2 tables to change - so the parent
+ * tables/pointers we read stay valid - unshadowing is however
+ * always possible - only guest_table_lock protects us.
+ */
+ ipte_lock(vcpu);
+
+ rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
+ if (rc)
+ rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
+ &fake);
+
+ vaddr.addr = saddr;
+ if (fake) {
+ /* offset in 1MB guest memory block */
+ pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
+ goto shadow_page;
+ }
+ if (!rc)
+ rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
+ if (!rc && pte.i)
+ rc = PGM_PAGE_TRANSLATION;
+ if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
+ rc = PGM_TRANSLATION_SPEC;
+shadow_page:
+ pte.p |= dat_protection;
+ if (!rc)
+ rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
+ ipte_unlock(vcpu);
+ up_read(&sg->mm->mmap_sem);
+ return rc;
}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index df0a79dd8159..8756569ad938 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -361,4 +361,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
+int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
+ unsigned long saddr);
+
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index e8c6843b9600..31a05330d11c 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -439,6 +439,23 @@ exit_required:
#define guest_per_enabled(vcpu) \
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
+int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
+{
+ const u8 ilen = kvm_s390_get_ilen(vcpu);
+ struct kvm_s390_pgm_info pgm_info = {
+ .code = PGM_PER,
+ .per_code = PER_EVENT_IFETCH >> 24,
+ .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
+ };
+
+ /*
+ * The PSW points to the next instruction, therefore the intercepted
+ * instruction generated a PER i-fetch event. PER address therefore
+ * points at the previous PSW address (could be an EXECUTE function).
+ */
+ return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+}
+
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
{
u32 perc = vcpu->arch.sie_block->perc << 24;
@@ -465,7 +482,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
guest_perc &= ~PER_EVENT_IFETCH;
/* All other PER events will be given to the guest */
- /* TODO: Check alterated address/address space */
+ /* TODO: Check altered address/address space */
vcpu->arch.sie_block->perc = guest_perc >> 24;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 252157181302..dfd0ca2638fa 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -351,8 +351,26 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_operexc(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.exit_operation_exception++;
+ trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
+ vcpu->arch.sie_block->ipb);
+
+ if (vcpu->arch.sie_block->ipa == 0xb256 &&
+ test_kvm_facility(vcpu->kvm, 74))
+ return handle_sthyi(vcpu);
+
+ if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
+ return -EOPNOTSUPP;
+
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
+ int rc, per_rc = 0;
+
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
@@ -361,7 +379,8 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
case 0x18:
return handle_noop(vcpu);
case 0x04:
- return handle_instruction(vcpu);
+ rc = handle_instruction(vcpu);
+ break;
case 0x08:
return handle_prog(vcpu);
case 0x14:
@@ -372,9 +391,19 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return handle_validity(vcpu);
case 0x28:
return handle_stop(vcpu);
+ case 0x2c:
+ rc = handle_operexc(vcpu);
+ break;
case 0x38:
- return handle_partial_execution(vcpu);
+ rc = handle_partial_execution(vcpu);
+ break;
default:
return -EOPNOTSUPP;
}
+
+ /* process PER, also if the instrution is processed in user space */
+ if (vcpu->arch.sie_block->icptstatus & 0x02 &&
+ (!rc || rc == -EOPNOTSUPP))
+ per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
+ return per_rc ? per_rc : rc;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 5a80af740d3e..24524c0f3ef8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -28,9 +28,6 @@
#include "gaccess.h"
#include "trace-s390.h"
-#define IOINT_SCHID_MASK 0x0000ffff
-#define IOINT_SSID_MASK 0x00030000
-#define IOINT_CSSID_MASK 0x03fc0000
#define PFAULT_INIT 0x0600
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
@@ -821,7 +818,14 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
struct kvm_s390_interrupt_info,
list);
if (inti) {
- VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
+ if (inti->type & KVM_S390_INT_IO_AI_MASK)
+ VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
+ else
+ VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
+ inti->io.subchannel_id >> 8,
+ inti->io.subchannel_id >> 1 & 0x3,
+ inti->io.subchannel_nr);
+
vcpu->stat.deliver_io_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
inti->type,
@@ -991,6 +995,11 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
swake_up(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
+ /*
+ * The VCPU might not be sleeping but is executing the VSIE. Let's
+ * kick it, so it leaves the SIE to process the request.
+ */
+ kvm_s390_vsie_kick(vcpu);
}
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
@@ -1415,6 +1424,13 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
}
fi->counters[FIRQ_CNTR_IO] += 1;
+ if (inti->type & KVM_S390_INT_IO_AI_MASK)
+ VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
+ else
+ VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
+ inti->io.subchannel_id >> 8,
+ inti->io.subchannel_id >> 1 & 0x3,
+ inti->io.subchannel_nr);
isc = int_word_to_isc(inti->io.io_int_word);
list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
list_add_tail(&inti->list, list);
@@ -1531,13 +1547,6 @@ int kvm_s390_inject_vm(struct kvm *kvm,
inti->mchk.mcic = s390int->parm64;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (inti->type & KVM_S390_INT_IO_AI_MASK)
- VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
- else
- VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
- s390int->type & IOINT_CSSID_MASK,
- s390int->type & IOINT_SSID_MASK,
- s390int->type & IOINT_SCHID_MASK);
inti->io.subchannel_id = s390int->parm >> 16;
inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
inti->io.io_int_parm = s390int->parm64 >> 32;
@@ -2237,7 +2246,8 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
return ret;
}
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int ret;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6f5c344cd785..3f3ae4865d57 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -21,11 +21,13 @@
#include <linux/init.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/mman.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/stp.h>
@@ -35,6 +37,8 @@
#include <asm/switch_to.h>
#include <asm/isc.h>
#include <asm/sclp.h>
+#include <asm/cpacf.h>
+#include <asm/timex.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -64,6 +68,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_pei", VCPU_STAT(exit_pei) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+ { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
@@ -94,6 +99,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
+ { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
+ { "instruction_sie", VCPU_STAT(instruction_sie) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
{ "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
{ "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
@@ -119,6 +126,11 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+/* allow nested virtualization in KVM (if enabled by user space) */
+static int nested;
+module_param(nested, int, S_IRUGO);
+MODULE_PARM_DESC(nested, "Nested virtualization support");
+
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[16] = {
0xffe6000000000000UL,
@@ -131,7 +143,13 @@ unsigned long kvm_s390_fac_list_mask_size(void)
return ARRAY_SIZE(kvm_s390_fac_list_mask);
}
+/* available cpu features supported by kvm */
+static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
+/* available subfunctions indicated via query / "test bit" */
+static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
+
static struct gmap_notifier gmap_notifier;
+static struct gmap_notifier vsie_gmap_notifier;
debug_info_t *kvm_s390_dbf;
/* Section: not file related */
@@ -141,7 +159,8 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
+static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
+ unsigned long end);
/*
* This callback is executed during stop_machine(). All CPUs are therefore
@@ -163,6 +182,8 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
vcpu->arch.sie_block->epoch -= *delta;
if (vcpu->arch.cputm_enabled)
vcpu->arch.cputm_start += *delta;
+ if (vcpu->arch.vsie_block)
+ vcpu->arch.vsie_block->epoch -= *delta;
}
}
return NOTIFY_OK;
@@ -175,7 +196,9 @@ static struct notifier_block kvm_clock_notifier = {
int kvm_arch_hardware_setup(void)
{
gmap_notifier.notifier_call = kvm_gmap_notifier;
- gmap_register_ipte_notifier(&gmap_notifier);
+ gmap_register_pte_notifier(&gmap_notifier);
+ vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
+ gmap_register_pte_notifier(&vsie_gmap_notifier);
atomic_notifier_chain_register(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
return 0;
@@ -183,11 +206,109 @@ int kvm_arch_hardware_setup(void)
void kvm_arch_hardware_unsetup(void)
{
- gmap_unregister_ipte_notifier(&gmap_notifier);
+ gmap_unregister_pte_notifier(&gmap_notifier);
+ gmap_unregister_pte_notifier(&vsie_gmap_notifier);
atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
&kvm_clock_notifier);
}
+static void allow_cpu_feat(unsigned long nr)
+{
+ set_bit_inv(nr, kvm_s390_available_cpu_feat);
+}
+
+static inline int plo_test_bit(unsigned char nr)
+{
+ register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
+ int cc = 3; /* subfunction not available */
+
+ asm volatile(
+ /* Parameter registers are ignored for "test bit" */
+ " plo 0,0,0,0(0)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc)
+ : "d" (r0)
+ : "cc");
+ return cc == 0;
+}
+
+static void kvm_s390_cpu_feat_init(void)
+{
+ int i;
+
+ for (i = 0; i < 256; ++i) {
+ if (plo_test_bit(i))
+ kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
+ }
+
+ if (test_facility(28)) /* TOD-clock steering */
+ ptff(kvm_s390_available_subfunc.ptff,
+ sizeof(kvm_s390_available_subfunc.ptff),
+ PTFF_QAF);
+
+ if (test_facility(17)) { /* MSA */
+ __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
+ __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
+ __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
+ __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
+ __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
+ }
+ if (test_facility(76)) /* MSA3 */
+ __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
+ if (test_facility(77)) { /* MSA4 */
+ __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
+ __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
+ __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
+ __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
+ }
+ if (test_facility(57)) /* MSA5 */
+ __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
+
+ if (MACHINE_HAS_ESOP)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
+ /*
+ * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
+ * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
+ */
+ if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
+ !test_facility(3) || !nested)
+ return;
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
+ if (sclp.has_64bscao)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
+ if (sclp.has_siif)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
+ if (sclp.has_gpere)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
+ if (sclp.has_gsls)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
+ if (sclp.has_ib)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
+ if (sclp.has_cei)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
+ if (sclp.has_ibs)
+ allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
+ /*
+ * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
+ * all skey handling functions read/set the skey from the PGSTE
+ * instead of the real storage key.
+ *
+ * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
+ * pages being detected as preserved although they are resident.
+ *
+ * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
+ * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
+ *
+ * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
+ * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
+ * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
+ *
+ * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
+ * cannot easily shadow the SCA because of the ipte lock.
+ */
+}
+
int kvm_arch_init(void *opaque)
{
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
@@ -199,6 +320,8 @@ int kvm_arch_init(void *opaque)
return -ENOMEM;
}
+ kvm_s390_cpu_feat_init();
+
/* Register floating interrupt controller interface. */
return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
}
@@ -244,6 +367,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_USER_STSI:
case KVM_CAP_S390_SKEYS:
case KVM_CAP_S390_IRQ_STATE:
+ case KVM_CAP_S390_USER_INSTR0:
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -251,8 +375,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
- r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
- : KVM_S390_BSCA_CPU_SLOTS;
+ r = KVM_S390_BSCA_CPU_SLOTS;
+ if (sclp.has_esca && sclp.has_64bscao)
+ r = KVM_S390_ESCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
@@ -335,6 +460,16 @@ out:
return r;
}
+static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
+ }
+}
+
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
int r;
@@ -355,7 +490,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
break;
case KVM_CAP_S390_VECTOR_REGISTERS:
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
r = -EBUSY;
} else if (MACHINE_HAS_VX) {
set_kvm_facility(kvm->arch.model.fac_mask, 129);
@@ -370,7 +505,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
case KVM_CAP_S390_RI:
r = -EINVAL;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(64)) {
set_kvm_facility(kvm->arch.model.fac_mask, 64);
@@ -386,6 +521,12 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_stsi = 1;
r = 0;
break;
+ case KVM_CAP_S390_USER_INSTR0:
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
+ kvm->arch.user_instr0 = 1;
+ icpt_operexc_on_all_vcpus(kvm);
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -418,21 +559,23 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
unsigned int idx;
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
- /* enable CMMA only for z10 and later (EDAT_1) */
- ret = -EINVAL;
- if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
+ ret = -ENXIO;
+ if (!sclp.has_cmma)
break;
ret = -EBUSY;
VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) == 0) {
+ if (!kvm->created_vcpus) {
kvm->arch.use_cmma = 1;
ret = 0;
}
mutex_unlock(&kvm->lock);
break;
case KVM_S390_VM_MEM_CLR_CMMA:
+ ret = -ENXIO;
+ if (!sclp.has_cmma)
+ break;
ret = -EINVAL;
if (!kvm->arch.use_cmma)
break;
@@ -461,20 +604,20 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (!new_limit)
return -EINVAL;
- /* gmap_alloc takes last usable address */
+ /* gmap_create takes last usable address */
if (new_limit != KVM_S390_NO_MEM_LIMIT)
new_limit -= 1;
ret = -EBUSY;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) == 0) {
- /* gmap_alloc will round the limit up */
- struct gmap *new = gmap_alloc(current->mm, new_limit);
+ if (!kvm->created_vcpus) {
+ /* gmap_create will round the limit up */
+ struct gmap *new = gmap_create(current->mm, new_limit);
if (!new) {
ret = -ENOMEM;
} else {
- gmap_free(kvm->arch.gmap);
+ gmap_remove(kvm->arch.gmap);
new->private = kvm;
kvm->arch.gmap = new;
ret = 0;
@@ -644,7 +787,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
int ret = 0;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
ret = -EBUSY;
goto out;
}
@@ -676,6 +819,39 @@ out:
return ret;
}
+static int kvm_s390_set_processor_feat(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_feat data;
+ int ret = -EBUSY;
+
+ if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
+ return -EFAULT;
+ if (!bitmap_subset((unsigned long *) data.feat,
+ kvm_s390_available_cpu_feat,
+ KVM_S390_VM_CPU_FEAT_NR_BITS))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+ if (!atomic_read(&kvm->online_vcpus)) {
+ bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
+ KVM_S390_VM_CPU_FEAT_NR_BITS);
+ ret = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ /*
+ * Once supported by kernel + hw, we have to store the subfunctions
+ * in kvm->arch and remember that user space configured them.
+ */
+ return -ENXIO;
+}
+
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -684,6 +860,12 @@ static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR:
ret = kvm_s390_set_processor(kvm, attr);
break;
+ case KVM_S390_VM_CPU_PROCESSOR_FEAT:
+ ret = kvm_s390_set_processor_feat(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
+ ret = kvm_s390_set_processor_subfunc(kvm, attr);
+ break;
}
return ret;
}
@@ -732,6 +914,50 @@ out:
return ret;
}
+static int kvm_s390_get_processor_feat(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_feat data;
+
+ bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
+ KVM_S390_VM_CPU_FEAT_NR_BITS);
+ if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
+ return -EFAULT;
+ return 0;
+}
+
+static int kvm_s390_get_machine_feat(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_feat data;
+
+ bitmap_copy((unsigned long *) data.feat,
+ kvm_s390_available_cpu_feat,
+ KVM_S390_VM_CPU_FEAT_NR_BITS);
+ if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
+ return -EFAULT;
+ return 0;
+}
+
+static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ /*
+ * Once we can actually configure subfunctions (kernel + hw support),
+ * we have to check if they were already set by user space, if so copy
+ * them from kvm->arch.
+ */
+ return -ENXIO;
+}
+
+static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
+ sizeof(struct kvm_s390_vm_cpu_subfunc)))
+ return -EFAULT;
+ return 0;
+}
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -743,6 +969,18 @@ static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_MACHINE:
ret = kvm_s390_get_machine(kvm, attr);
break;
+ case KVM_S390_VM_CPU_PROCESSOR_FEAT:
+ ret = kvm_s390_get_processor_feat(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MACHINE_FEAT:
+ ret = kvm_s390_get_machine_feat(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
+ ret = kvm_s390_get_processor_subfunc(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
+ ret = kvm_s390_get_machine_subfunc(kvm, attr);
+ break;
}
return ret;
}
@@ -803,6 +1041,8 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
case KVM_S390_VM_MEM_CLR_CMMA:
+ ret = sclp.has_cmma ? 0 : -ENXIO;
+ break;
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
break;
@@ -826,8 +1066,13 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
switch (attr->attr) {
case KVM_S390_VM_CPU_PROCESSOR:
case KVM_S390_VM_CPU_MACHINE:
+ case KVM_S390_VM_CPU_PROCESSOR_FEAT:
+ case KVM_S390_VM_CPU_MACHINE_FEAT:
+ case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
ret = 0;
break;
+ /* configuring subfunctions is not supported yet */
+ case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
default:
ret = -ENXIO;
break;
@@ -858,7 +1103,6 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva;
- unsigned long curkey;
int i, r = 0;
if (args->flags != 0)
@@ -879,26 +1123,27 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (!keys)
return -ENOMEM;
+ down_read(&current->mm->mmap_sem);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
- goto out;
+ break;
}
- curkey = get_guest_storage_key(current->mm, hva);
- if (IS_ERR_VALUE(curkey)) {
- r = curkey;
- goto out;
- }
- keys[i] = curkey;
+ r = get_guest_storage_key(current->mm, hva, &keys[i]);
+ if (r)
+ break;
+ }
+ up_read(&current->mm->mmap_sem);
+
+ if (!r) {
+ r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
+ sizeof(uint8_t) * args->count);
+ if (r)
+ r = -EFAULT;
}
- r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
- sizeof(uint8_t) * args->count);
- if (r)
- r = -EFAULT;
-out:
kvfree(keys);
return r;
}
@@ -935,24 +1180,25 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
goto out;
+ down_read(&current->mm->mmap_sem);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
r = -EFAULT;
- goto out;
+ break;
}
/* Lowest order bit is reserved */
if (keys[i] & 0x01) {
r = -EINVAL;
- goto out;
+ break;
}
- r = set_guest_storage_key(current->mm, hva,
- (unsigned long)keys[i], 0);
+ r = set_guest_storage_key(current->mm, hva, keys[i], 0);
if (r)
- goto out;
+ break;
}
+ up_read(&current->mm->mmap_sem);
out:
kvfree(keys);
return r;
@@ -1129,6 +1375,7 @@ static void sca_dispose(struct kvm *kvm)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
+ gfp_t alloc_flags = GFP_KERNEL;
int i, rc;
char debug_name[16];
static unsigned long sca_offset;
@@ -1150,9 +1397,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM;
+ ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
+
kvm->arch.use_esca = 0; /* start with basic SCA */
+ if (!sclp.has_64bscao)
+ alloc_flags |= GFP_DMA;
rwlock_init(&kvm->arch.sca_lock);
- kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
+ kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
@@ -1189,6 +1440,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
+ set_kvm_facility(kvm->arch.model.fac_mask, 74);
+ set_kvm_facility(kvm->arch.model.fac_list, 74);
+
kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
kvm->arch.model.ibc = sclp.ibc & 0x0fff;
@@ -1212,7 +1466,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
else
kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
sclp.hamax + 1);
- kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
+ kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap)
goto out_err;
kvm->arch.gmap->private = kvm;
@@ -1224,6 +1478,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
+ kvm_s390_vsie_init(kvm);
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
@@ -1245,7 +1500,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
sca_del_vcpu(vcpu);
if (kvm_is_ucontrol(vcpu->kvm))
- gmap_free(vcpu->arch.gmap);
+ gmap_remove(vcpu->arch.gmap);
if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
@@ -1278,16 +1533,17 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
debug_unregister(kvm->arch.dbf);
free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm))
- gmap_free(kvm->arch.gmap);
+ gmap_remove(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
+ kvm_s390_vsie_destroy(kvm);
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
}
/* Section: vcpu related */
static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
{
- vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
+ vcpu->arch.gmap = gmap_create(current->mm, -1UL);
if (!vcpu->arch.gmap)
return -ENOMEM;
vcpu->arch.gmap->private = vcpu->kvm;
@@ -1396,7 +1652,7 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
- if (!sclp.has_esca)
+ if (!sclp.has_esca || !sclp.has_64bscao)
return false;
mutex_lock(&kvm->lock);
@@ -1537,7 +1793,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
- gmap_enable(vcpu->arch.gmap);
+ gmap_enable(vcpu->arch.enabled_gmap);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__start_cpu_timer_accounting(vcpu);
@@ -1550,7 +1806,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__stop_cpu_timer_accounting(vcpu);
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
- gmap_disable(vcpu->arch.gmap);
+ vcpu->arch.enabled_gmap = gmap_get_enabled();
+ gmap_disable(vcpu->arch.enabled_gmap);
/* Save guest register state */
save_fpu_regs();
@@ -1599,7 +1856,10 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
sca_add_vcpu(vcpu);
}
-
+ if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
+ vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
+ /* make vcpu_load load the right gmap on the first trigger */
+ vcpu->arch.enabled_gmap = vcpu->arch.gmap;
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
@@ -1658,15 +1918,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_s390_vcpu_setup_model(vcpu);
- vcpu->arch.sie_block->ecb = 0x02;
+ /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
+ if (MACHINE_HAS_ESOP)
+ vcpu->arch.sie_block->ecb |= 0x02;
if (test_kvm_facility(vcpu->kvm, 9))
vcpu->arch.sie_block->ecb |= 0x04;
- if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
+ if (test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10;
- if (test_kvm_facility(vcpu->kvm, 8))
+ if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
vcpu->arch.sie_block->ecb2 |= 0x08;
- vcpu->arch.sie_block->eca = 0xC1002000U;
+ vcpu->arch.sie_block->eca = 0x1002000U;
+ if (sclp.has_cei)
+ vcpu->arch.sie_block->eca |= 0x80000000U;
+ if (sclp.has_ib)
+ vcpu->arch.sie_block->eca |= 0x40000000U;
if (sclp.has_siif)
vcpu->arch.sie_block->eca |= 1;
if (sclp.has_sigpif)
@@ -1716,6 +1982,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
+ /* the real guest size will always be smaller than msl */
+ vcpu->arch.sie_block->mso = 0;
+ vcpu->arch.sie_block->msl = sclp.hamax;
+
vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
@@ -1784,16 +2054,25 @@ void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
kvm_s390_vcpu_request(vcpu);
}
-static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
+static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
+ unsigned long end)
{
- int i;
struct kvm *kvm = gmap->private;
struct kvm_vcpu *vcpu;
+ unsigned long prefix;
+ int i;
+ if (gmap_is_shadow(gmap))
+ return;
+ if (start >= 1UL << 31)
+ /* We are only interested in prefix pages */
+ return;
kvm_for_each_vcpu(i, vcpu, kvm) {
/* match against both prefix pages */
- if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
- VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
+ prefix = kvm_s390_get_prefix(vcpu);
+ if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
+ VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
+ start, end);
kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
}
}
@@ -2002,6 +2281,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & ~VALID_GUESTDBG_FLAGS)
return -EINVAL;
+ if (!sclp.has_gpere)
+ return -EINVAL;
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
@@ -2070,16 +2351,16 @@ retry:
return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
- * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
+ * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
* This ensures that the ipte instruction for this request has
* already finished. We might race against a second unmapper that
* wants to set the blocking bit. Lets just retry the request loop.
*/
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
int rc;
- rc = gmap_ipte_notify(vcpu->arch.gmap,
- kvm_s390_get_prefix(vcpu),
- PAGE_SIZE * 2);
+ rc = gmap_mprotect_notify(vcpu->arch.gmap,
+ kvm_s390_get_prefix(vcpu),
+ PAGE_SIZE * 2, PROT_WRITE);
if (rc)
return rc;
goto retry;
@@ -2108,6 +2389,11 @@ retry:
goto retry;
}
+ if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
+ vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
+ goto retry;
+ }
+
/* nothing to do, just clear the request */
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
@@ -2362,14 +2648,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
* guest_enter and guest_exit should be no uaccess.
*/
local_irq_disable();
- __kvm_guest_enter();
+ guest_enter_irqoff();
__disable_cpu_timer_accounting(vcpu);
local_irq_enable();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
local_irq_disable();
__enable_cpu_timer_accounting(vcpu);
- __kvm_guest_exit();
+ guest_exit_irqoff();
local_irq_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -2598,6 +2884,8 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
+ if (!sclp.has_ibs)
+ return;
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 8621ab00ec8e..b8432862a817 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -56,7 +56,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
- return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_WAIT;
+ return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -175,6 +175,12 @@ static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
return 0;
}
+static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
+{
+ WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
+ return test_bit_inv(nr, kvm->arch.cpu_feat);
+}
+
/* are cpu states controlled by user space */
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{
@@ -232,6 +238,8 @@ static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
}
static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
{
+ /* don't inject PER events if we re-execute the instruction */
+ vcpu->arch.sie_block->icptstatus &= ~0x02;
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
@@ -246,10 +254,21 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
+/* implemented in vsie.c */
+int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
+void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
+void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
+ unsigned long end);
+void kvm_s390_vsie_init(struct kvm *kvm);
+void kvm_s390_vsie_destroy(struct kvm *kvm);
+
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
+/* implemented in sthyi.c */
+int handle_sthyi(struct kvm_vcpu *vcpu);
+
/* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
@@ -360,6 +379,7 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 95916fa7c670..46160388e996 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -27,6 +27,7 @@
#include <asm/io.h>
#include <asm/ptrace.h>
#include <asm/compat.h>
+#include <asm/sclp.h>
#include "gaccess.h"
#include "kvm-s390.h"
#include "trace.h"
@@ -152,30 +153,166 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
static int __skey_check_enable(struct kvm_vcpu *vcpu)
{
int rc = 0;
+
+ trace_kvm_s390_skey_related_inst(vcpu);
if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
return rc;
rc = s390_enable_skey();
- VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
- trace_kvm_s390_skey_related_inst(vcpu);
- vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
+ VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
+ if (!rc)
+ vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
return rc;
}
-
-static int handle_skey(struct kvm_vcpu *vcpu)
+static int try_handle_skey(struct kvm_vcpu *vcpu)
{
- int rc = __skey_check_enable(vcpu);
+ int rc;
+ vcpu->stat.instruction_storage_key++;
+ rc = __skey_check_enable(vcpu);
if (rc)
return rc;
- vcpu->stat.instruction_storage_key++;
-
+ if (sclp.has_skey) {
+ /* with storage-key facility, SIE interprets it for us */
+ kvm_s390_retry_instr(vcpu);
+ VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
+ return -EAGAIN;
+ }
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+ return 0;
+}
- kvm_s390_retry_instr(vcpu);
- VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
+static int handle_iske(struct kvm_vcpu *vcpu)
+{
+ unsigned long addr;
+ unsigned char key;
+ int reg1, reg2;
+ int rc;
+
+ rc = try_handle_skey(vcpu);
+ if (rc)
+ return rc != -EAGAIN ? rc : 0;
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+
+ addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ addr = kvm_s390_logical_to_effective(vcpu, addr);
+ addr = kvm_s390_real_to_abs(vcpu, addr);
+ addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
+ if (kvm_is_error_hva(addr))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ down_read(&current->mm->mmap_sem);
+ rc = get_guest_storage_key(current->mm, addr, &key);
+ up_read(&current->mm->mmap_sem);
+ if (rc)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ vcpu->run->s.regs.gprs[reg1] &= ~0xff;
+ vcpu->run->s.regs.gprs[reg1] |= key;
+ return 0;
+}
+
+static int handle_rrbe(struct kvm_vcpu *vcpu)
+{
+ unsigned long addr;
+ int reg1, reg2;
+ int rc;
+
+ rc = try_handle_skey(vcpu);
+ if (rc)
+ return rc != -EAGAIN ? rc : 0;
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+
+ addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ addr = kvm_s390_logical_to_effective(vcpu, addr);
+ addr = kvm_s390_real_to_abs(vcpu, addr);
+ addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
+ if (kvm_is_error_hva(addr))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ down_read(&current->mm->mmap_sem);
+ rc = reset_guest_reference_bit(current->mm, addr);
+ up_read(&current->mm->mmap_sem);
+ if (rc < 0)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ kvm_s390_set_psw_cc(vcpu, rc);
+ return 0;
+}
+
+#define SSKE_NQ 0x8
+#define SSKE_MR 0x4
+#define SSKE_MC 0x2
+#define SSKE_MB 0x1
+static int handle_sske(struct kvm_vcpu *vcpu)
+{
+ unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
+ unsigned long start, end;
+ unsigned char key, oldkey;
+ int reg1, reg2;
+ int rc;
+
+ rc = try_handle_skey(vcpu);
+ if (rc)
+ return rc != -EAGAIN ? rc : 0;
+
+ if (!test_kvm_facility(vcpu->kvm, 8))
+ m3 &= ~SSKE_MB;
+ if (!test_kvm_facility(vcpu->kvm, 10))
+ m3 &= ~(SSKE_MC | SSKE_MR);
+ if (!test_kvm_facility(vcpu->kvm, 14))
+ m3 &= ~SSKE_NQ;
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+
+ key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
+ start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
+ start = kvm_s390_logical_to_effective(vcpu, start);
+ if (m3 & SSKE_MB) {
+ /* start already designates an absolute address */
+ end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
+ } else {
+ start = kvm_s390_real_to_abs(vcpu, start);
+ end = start + PAGE_SIZE;
+ }
+
+ while (start != end) {
+ unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
+
+ if (kvm_is_error_hva(addr))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ down_read(&current->mm->mmap_sem);
+ rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
+ m3 & SSKE_NQ, m3 & SSKE_MR,
+ m3 & SSKE_MC);
+ up_read(&current->mm->mmap_sem);
+ if (rc < 0)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ start += PAGE_SIZE;
+ };
+
+ if (m3 & (SSKE_MC | SSKE_MR)) {
+ if (m3 & SSKE_MB) {
+ /* skey in reg1 is unpredictable */
+ kvm_s390_set_psw_cc(vcpu, 3);
+ } else {
+ kvm_s390_set_psw_cc(vcpu, rc);
+ vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
+ vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
+ }
+ }
+ if (m3 & SSKE_MB) {
+ if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
+ vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
+ else
+ vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
+ end = kvm_s390_logical_to_effective(vcpu, end);
+ vcpu->run->s.regs.gprs[reg2] |= end;
+ }
return 0;
}
@@ -582,10 +719,11 @@ static const intercept_handler_t b2_handlers[256] = {
[0x10] = handle_set_prefix,
[0x11] = handle_store_prefix,
[0x12] = handle_store_cpu_address,
+ [0x14] = kvm_s390_handle_vsie,
[0x21] = handle_ipte_interlock,
- [0x29] = handle_skey,
- [0x2a] = handle_skey,
- [0x2b] = handle_skey,
+ [0x29] = handle_iske,
+ [0x2a] = handle_rrbe,
+ [0x2b] = handle_sske,
[0x2c] = handle_test_block,
[0x30] = handle_io_inst,
[0x31] = handle_io_inst,
@@ -654,8 +792,10 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
static int handle_pfmf(struct kvm_vcpu *vcpu)
{
+ bool mr = false, mc = false, nq;
int reg1, reg2;
unsigned long start, end;
+ unsigned char key;
vcpu->stat.instruction_pfmf++;
@@ -675,15 +815,27 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
!test_kvm_facility(vcpu->kvm, 14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- /* No support for conditional-SSKE */
- if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ /* Only provide conditional-SSKE support if enabled for the guest */
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
+ test_kvm_facility(vcpu->kvm, 10)) {
+ mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
+ mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
+ }
+ nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
+ key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
start = kvm_s390_logical_to_effective(vcpu, start);
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
+ if (kvm_s390_check_low_addr_prot_real(vcpu, start))
+ return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
+ }
+
switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
case 0x00000000:
+ /* only 4k frames specify a real address */
+ start = kvm_s390_real_to_abs(vcpu, start);
end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
break;
case 0x00001000:
@@ -701,20 +853,11 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
}
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
- if (kvm_s390_check_low_addr_prot_real(vcpu, start))
- return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
- }
-
- while (start < end) {
- unsigned long useraddr, abs_addr;
+ while (start != end) {
+ unsigned long useraddr;
/* Translate guest address to host address */
- if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
- abs_addr = kvm_s390_real_to_abs(vcpu, start);
- else
- abs_addr = start;
- useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
+ useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
if (kvm_is_error_hva(useraddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
@@ -728,16 +871,25 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
- if (set_guest_storage_key(current->mm, useraddr,
- vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
- vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
+ down_read(&current->mm->mmap_sem);
+ rc = cond_set_guest_storage_key(current->mm, useraddr,
+ key, NULL, nq, mr, mc);
+ up_read(&current->mm->mmap_sem);
+ if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
start += PAGE_SIZE;
}
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
- vcpu->run->s.regs.gprs[reg2] = end;
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
+ if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
+ vcpu->run->s.regs.gprs[reg2] = end;
+ } else {
+ vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
+ end = kvm_s390_logical_to_effective(vcpu, end);
+ vcpu->run->s.regs.gprs[reg2] |= end;
+ }
+ }
return 0;
}
@@ -1033,7 +1185,15 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
return 0;
}
+static int handle_ptff(struct kvm_vcpu *vcpu)
+{
+ /* we don't emulate any control instructions yet */
+ kvm_s390_set_psw_cc(vcpu, 3);
+ return 0;
+}
+
static const intercept_handler_t x01_handlers[256] = {
+ [0x04] = handle_ptff,
[0x07] = handle_sckpf,
};
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 28ea0cab1f1b..1a252f537081 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -77,18 +77,18 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
u16 p_asn, s_asn;
psw_t *psw;
- u32 flags;
+ bool idle;
- flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
+ idle = is_vcpu_idle(vcpu);
psw = &dst_vcpu->arch.sie_block->gpsw;
p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
/* Inject the emergency signal? */
- if (!(flags & CPUSTAT_STOPPED)
+ if (!is_vcpu_stopped(vcpu)
|| (psw->mask & psw_int_mask) != psw_int_mask
- || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
- || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
+ || (idle && psw->addr != 0)
+ || (!idle && (asn == p_asn || asn == s_asn))) {
return __inject_sigp_emergency(vcpu, dst_vcpu);
} else {
*reg &= 0xffffffff00000000UL;
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
new file mode 100644
index 000000000000..bd98b7d25200
--- /dev/null
+++ b/arch/s390/kvm/sthyi.c
@@ -0,0 +1,471 @@
+/*
+ * store hypervisor information instruction emulation functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
+ */
+#include <linux/kvm_host.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/ratelimit.h>
+
+#include <asm/kvm_host.h>
+#include <asm/asm-offsets.h>
+#include <asm/sclp.h>
+#include <asm/diag.h>
+#include <asm/sysinfo.h>
+#include <asm/ebcdic.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+#include "trace.h"
+
+#define DED_WEIGHT 0xffff
+/*
+ * CP and IFL as EBCDIC strings, SP/0x40 determines the end of string
+ * as they are justified with spaces.
+ */
+#define CP 0xc3d7404040404040UL
+#define IFL 0xc9c6d34040404040UL
+
+enum hdr_flags {
+ HDR_NOT_LPAR = 0x10,
+ HDR_STACK_INCM = 0x20,
+ HDR_STSI_UNAV = 0x40,
+ HDR_PERF_UNAV = 0x80,
+};
+
+enum mac_validity {
+ MAC_NAME_VLD = 0x20,
+ MAC_ID_VLD = 0x40,
+ MAC_CNT_VLD = 0x80,
+};
+
+enum par_flag {
+ PAR_MT_EN = 0x80,
+};
+
+enum par_validity {
+ PAR_GRP_VLD = 0x08,
+ PAR_ID_VLD = 0x10,
+ PAR_ABS_VLD = 0x20,
+ PAR_WGHT_VLD = 0x40,
+ PAR_PCNT_VLD = 0x80,
+};
+
+struct hdr_sctn {
+ u8 infhflg1;
+ u8 infhflg2; /* reserved */
+ u8 infhval1; /* reserved */
+ u8 infhval2; /* reserved */
+ u8 reserved[3];
+ u8 infhygct;
+ u16 infhtotl;
+ u16 infhdln;
+ u16 infmoff;
+ u16 infmlen;
+ u16 infpoff;
+ u16 infplen;
+ u16 infhoff1;
+ u16 infhlen1;
+ u16 infgoff1;
+ u16 infglen1;
+ u16 infhoff2;
+ u16 infhlen2;
+ u16 infgoff2;
+ u16 infglen2;
+ u16 infhoff3;
+ u16 infhlen3;
+ u16 infgoff3;
+ u16 infglen3;
+ u8 reserved2[4];
+} __packed;
+
+struct mac_sctn {
+ u8 infmflg1; /* reserved */
+ u8 infmflg2; /* reserved */
+ u8 infmval1;
+ u8 infmval2; /* reserved */
+ u16 infmscps;
+ u16 infmdcps;
+ u16 infmsifl;
+ u16 infmdifl;
+ char infmname[8];
+ char infmtype[4];
+ char infmmanu[16];
+ char infmseq[16];
+ char infmpman[4];
+ u8 reserved[4];
+} __packed;
+
+struct par_sctn {
+ u8 infpflg1;
+ u8 infpflg2; /* reserved */
+ u8 infpval1;
+ u8 infpval2; /* reserved */
+ u16 infppnum;
+ u16 infpscps;
+ u16 infpdcps;
+ u16 infpsifl;
+ u16 infpdifl;
+ u16 reserved;
+ char infppnam[8];
+ u32 infpwbcp;
+ u32 infpabcp;
+ u32 infpwbif;
+ u32 infpabif;
+ char infplgnm[8];
+ u32 infplgcp;
+ u32 infplgif;
+} __packed;
+
+struct sthyi_sctns {
+ struct hdr_sctn hdr;
+ struct mac_sctn mac;
+ struct par_sctn par;
+} __packed;
+
+struct cpu_inf {
+ u64 lpar_cap;
+ u64 lpar_grp_cap;
+ u64 lpar_weight;
+ u64 all_weight;
+ int cpu_num_ded;
+ int cpu_num_shd;
+};
+
+struct lpar_cpu_inf {
+ struct cpu_inf cp;
+ struct cpu_inf ifl;
+};
+
+static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
+{
+ return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
+}
+
+/*
+ * Scales the cpu capping from the lpar range to the one expected in
+ * sthyi data.
+ *
+ * diag204 reports a cap in hundredths of processor units.
+ * z/VM's range for one core is 0 - 0x10000.
+ */
+static u32 scale_cap(u32 in)
+{
+ return (0x10000 * in) / 100;
+}
+
+static void fill_hdr(struct sthyi_sctns *sctns)
+{
+ sctns->hdr.infhdln = sizeof(sctns->hdr);
+ sctns->hdr.infmoff = sizeof(sctns->hdr);
+ sctns->hdr.infmlen = sizeof(sctns->mac);
+ sctns->hdr.infplen = sizeof(sctns->par);
+ sctns->hdr.infpoff = sctns->hdr.infhdln + sctns->hdr.infmlen;
+ sctns->hdr.infhtotl = sctns->hdr.infpoff + sctns->hdr.infplen;
+}
+
+static void fill_stsi_mac(struct sthyi_sctns *sctns,
+ struct sysinfo_1_1_1 *sysinfo)
+{
+ if (stsi(sysinfo, 1, 1, 1))
+ return;
+
+ sclp_ocf_cpc_name_copy(sctns->mac.infmname);
+
+ memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
+ memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
+ memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
+ memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
+
+ sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
+}
+
+static void fill_stsi_par(struct sthyi_sctns *sctns,
+ struct sysinfo_2_2_2 *sysinfo)
+{
+ if (stsi(sysinfo, 2, 2, 2))
+ return;
+
+ sctns->par.infppnum = sysinfo->lpar_number;
+ memcpy(sctns->par.infppnam, sysinfo->name, sizeof(sctns->par.infppnam));
+
+ sctns->par.infpval1 |= PAR_ID_VLD;
+}
+
+static void fill_stsi(struct sthyi_sctns *sctns)
+{
+ void *sysinfo;
+
+ /* Errors are handled through the validity bits in the response. */
+ sysinfo = (void *)__get_free_page(GFP_KERNEL);
+ if (!sysinfo)
+ return;
+
+ fill_stsi_mac(sctns, sysinfo);
+ fill_stsi_par(sctns, sysinfo);
+
+ free_pages((unsigned long)sysinfo, 0);
+}
+
+static void fill_diag_mac(struct sthyi_sctns *sctns,
+ struct diag204_x_phys_block *block,
+ void *diag224_buf)
+{
+ int i;
+
+ for (i = 0; i < block->hdr.cpus; i++) {
+ switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
+ case CP:
+ if (block->cpus[i].weight == DED_WEIGHT)
+ sctns->mac.infmdcps++;
+ else
+ sctns->mac.infmscps++;
+ break;
+ case IFL:
+ if (block->cpus[i].weight == DED_WEIGHT)
+ sctns->mac.infmdifl++;
+ else
+ sctns->mac.infmsifl++;
+ break;
+ }
+ }
+ sctns->mac.infmval1 |= MAC_CNT_VLD;
+}
+
+/* Returns a pointer to the the next partition block. */
+static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf,
+ bool this_lpar,
+ void *diag224_buf,
+ struct diag204_x_part_block *block)
+{
+ int i, capped = 0, weight_cp = 0, weight_ifl = 0;
+ struct cpu_inf *cpu_inf;
+
+ for (i = 0; i < block->hdr.rcpus; i++) {
+ if (!(block->cpus[i].cflag & DIAG204_CPU_ONLINE))
+ continue;
+
+ switch (cpu_id(block->cpus[i].ctidx, diag224_buf)) {
+ case CP:
+ cpu_inf = &part_inf->cp;
+ if (block->cpus[i].cur_weight < DED_WEIGHT)
+ weight_cp |= block->cpus[i].cur_weight;
+ break;
+ case IFL:
+ cpu_inf = &part_inf->ifl;
+ if (block->cpus[i].cur_weight < DED_WEIGHT)
+ weight_ifl |= block->cpus[i].cur_weight;
+ break;
+ default:
+ continue;
+ }
+
+ if (!this_lpar)
+ continue;
+
+ capped |= block->cpus[i].cflag & DIAG204_CPU_CAPPED;
+ cpu_inf->lpar_cap |= block->cpus[i].cpu_type_cap;
+ cpu_inf->lpar_grp_cap |= block->cpus[i].group_cpu_type_cap;
+
+ if (block->cpus[i].weight == DED_WEIGHT)
+ cpu_inf->cpu_num_ded += 1;
+ else
+ cpu_inf->cpu_num_shd += 1;
+ }
+
+ if (this_lpar && capped) {
+ part_inf->cp.lpar_weight = weight_cp;
+ part_inf->ifl.lpar_weight = weight_ifl;
+ }
+ part_inf->cp.all_weight += weight_cp;
+ part_inf->ifl.all_weight += weight_ifl;
+ return (struct diag204_x_part_block *)&block->cpus[i];
+}
+
+static void fill_diag(struct sthyi_sctns *sctns)
+{
+ int i, r, pages;
+ bool this_lpar;
+ void *diag204_buf;
+ void *diag224_buf = NULL;
+ struct diag204_x_info_blk_hdr *ti_hdr;
+ struct diag204_x_part_block *part_block;
+ struct diag204_x_phys_block *phys_block;
+ struct lpar_cpu_inf lpar_inf = {};
+
+ /* Errors are handled through the validity bits in the response. */
+ pages = diag204((unsigned long)DIAG204_SUBC_RSI |
+ (unsigned long)DIAG204_INFO_EXT, 0, NULL);
+ if (pages <= 0)
+ return;
+
+ diag204_buf = vmalloc(PAGE_SIZE * pages);
+ if (!diag204_buf)
+ return;
+
+ r = diag204((unsigned long)DIAG204_SUBC_STIB7 |
+ (unsigned long)DIAG204_INFO_EXT, pages, diag204_buf);
+ if (r < 0)
+ goto out;
+
+ diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!diag224_buf || diag224(diag224_buf))
+ goto out;
+
+ ti_hdr = diag204_buf;
+ part_block = diag204_buf + sizeof(*ti_hdr);
+
+ for (i = 0; i < ti_hdr->npar; i++) {
+ /*
+ * For the calling lpar we also need to get the cpu
+ * caps and weights. The time information block header
+ * specifies the offset to the partition block of the
+ * caller lpar, so we know when we process its data.
+ */
+ this_lpar = (void *)part_block - diag204_buf == ti_hdr->this_part;
+ part_block = lpar_cpu_inf(&lpar_inf, this_lpar, diag224_buf,
+ part_block);
+ }
+
+ phys_block = (struct diag204_x_phys_block *)part_block;
+ part_block = diag204_buf + ti_hdr->this_part;
+ if (part_block->hdr.mtid)
+ sctns->par.infpflg1 = PAR_MT_EN;
+
+ sctns->par.infpval1 |= PAR_GRP_VLD;
+ sctns->par.infplgcp = scale_cap(lpar_inf.cp.lpar_grp_cap);
+ sctns->par.infplgif = scale_cap(lpar_inf.ifl.lpar_grp_cap);
+ memcpy(sctns->par.infplgnm, part_block->hdr.hardware_group_name,
+ sizeof(sctns->par.infplgnm));
+
+ sctns->par.infpscps = lpar_inf.cp.cpu_num_shd;
+ sctns->par.infpdcps = lpar_inf.cp.cpu_num_ded;
+ sctns->par.infpsifl = lpar_inf.ifl.cpu_num_shd;
+ sctns->par.infpdifl = lpar_inf.ifl.cpu_num_ded;
+ sctns->par.infpval1 |= PAR_PCNT_VLD;
+
+ sctns->par.infpabcp = scale_cap(lpar_inf.cp.lpar_cap);
+ sctns->par.infpabif = scale_cap(lpar_inf.ifl.lpar_cap);
+ sctns->par.infpval1 |= PAR_ABS_VLD;
+
+ /*
+ * Everything below needs global performance data to be
+ * meaningful.
+ */
+ if (!(ti_hdr->flags & DIAG204_LPAR_PHYS_FLG)) {
+ sctns->hdr.infhflg1 |= HDR_PERF_UNAV;
+ goto out;
+ }
+
+ fill_diag_mac(sctns, phys_block, diag224_buf);
+
+ if (lpar_inf.cp.lpar_weight) {
+ sctns->par.infpwbcp = sctns->mac.infmscps * 0x10000 *
+ lpar_inf.cp.lpar_weight / lpar_inf.cp.all_weight;
+ }
+
+ if (lpar_inf.ifl.lpar_weight) {
+ sctns->par.infpwbif = sctns->mac.infmsifl * 0x10000 *
+ lpar_inf.ifl.lpar_weight / lpar_inf.ifl.all_weight;
+ }
+ sctns->par.infpval1 |= PAR_WGHT_VLD;
+
+out:
+ kfree(diag224_buf);
+ vfree(diag204_buf);
+}
+
+static int sthyi(u64 vaddr)
+{
+ register u64 code asm("0") = 0;
+ register u64 addr asm("2") = vaddr;
+ int cc;
+
+ asm volatile(
+ ".insn rre,0xB2560000,%[code],%[addr]\n"
+ "ipm %[cc]\n"
+ "srl %[cc],28\n"
+ : [cc] "=d" (cc)
+ : [code] "d" (code), [addr] "a" (addr)
+ : "memory", "cc");
+ return cc;
+}
+
+int handle_sthyi(struct kvm_vcpu *vcpu)
+{
+ int reg1, reg2, r = 0;
+ u64 code, addr, cc = 0;
+ struct sthyi_sctns *sctns = NULL;
+
+ /*
+ * STHYI requires extensive locking in the higher hypervisors
+ * and is very computational/memory expensive. Therefore we
+ * ratelimit the executions per VM.
+ */
+ if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
+ kvm_s390_retry_instr(vcpu);
+ return 0;
+ }
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+ code = vcpu->run->s.regs.gprs[reg1];
+ addr = vcpu->run->s.regs.gprs[reg2];
+
+ vcpu->stat.instruction_sthyi++;
+ VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+ trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+
+ if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (code & 0xffff) {
+ cc = 3;
+ goto out;
+ }
+
+ /*
+ * If the page has not yet been faulted in, we want to do that
+ * now and not after all the expensive calculations.
+ */
+ r = write_guest(vcpu, addr, reg2, &cc, 1);
+ if (r)
+ return kvm_s390_inject_prog_cond(vcpu, r);
+
+ sctns = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sctns)
+ return -ENOMEM;
+
+ /*
+ * If we are a guest, we don't want to emulate an emulated
+ * instruction. We ask the hypervisor to provide the data.
+ */
+ if (test_facility(74)) {
+ cc = sthyi((u64)sctns);
+ goto out;
+ }
+
+ fill_hdr(sctns);
+ fill_stsi(sctns);
+ fill_diag(sctns);
+
+out:
+ if (!cc) {
+ r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
+ if (r) {
+ free_page((unsigned long)sctns);
+ return kvm_s390_inject_prog_cond(vcpu, r);
+ }
+ }
+
+ free_page((unsigned long)sctns);
+ vcpu->run->s.regs.gprs[reg2 + 1] = cc ? 4 : 0;
+ kvm_s390_set_psw_cc(vcpu, cc);
+ return r;
+}
diff --git a/arch/s390/kvm/trace.h b/arch/s390/kvm/trace.h
index 916834d7a73a..4fc9d4e5be89 100644
--- a/arch/s390/kvm/trace.h
+++ b/arch/s390/kvm/trace.h
@@ -41,7 +41,7 @@ TRACE_EVENT(kvm_s390_skey_related_inst,
TP_fast_assign(
VCPU_ASSIGN_COMMON
),
- VCPU_TP_PRINTK("%s", "first instruction related to skeys on vcpu")
+ VCPU_TP_PRINTK("%s", "storage key related instruction")
);
TRACE_EVENT(kvm_s390_major_guest_pfault,
@@ -185,8 +185,10 @@ TRACE_EVENT(kvm_s390_intercept_prog,
__entry->code = code;
),
- VCPU_TP_PRINTK("intercepted program interruption %04x",
- __entry->code)
+ VCPU_TP_PRINTK("intercepted program interruption %04x (%s)",
+ __entry->code,
+ __print_symbolic(__entry->code,
+ icpt_prog_codes))
);
/*
@@ -412,6 +414,47 @@ TRACE_EVENT(kvm_s390_handle_stsi,
__entry->addr)
);
+TRACE_EVENT(kvm_s390_handle_operexc,
+ TP_PROTO(VCPU_PROTO_COMMON, __u16 ipa, __u32 ipb),
+ TP_ARGS(VCPU_ARGS_COMMON, ipa, ipb),
+
+ TP_STRUCT__entry(
+ VCPU_FIELD_COMMON
+ __field(__u64, instruction)
+ ),
+
+ TP_fast_assign(
+ VCPU_ASSIGN_COMMON
+ __entry->instruction = ((__u64)ipa << 48) |
+ ((__u64)ipb << 16);
+ ),
+
+ VCPU_TP_PRINTK("operation exception on instruction %016llx (%s)",
+ __entry->instruction,
+ __print_symbolic(icpt_insn_decoder(__entry->instruction),
+ icpt_insn_codes))
+ );
+
+TRACE_EVENT(kvm_s390_handle_sthyi,
+ TP_PROTO(VCPU_PROTO_COMMON, u64 code, u64 addr),
+ TP_ARGS(VCPU_ARGS_COMMON, code, addr),
+
+ TP_STRUCT__entry(
+ VCPU_FIELD_COMMON
+ __field(u64, code)
+ __field(u64, addr)
+ ),
+
+ TP_fast_assign(
+ VCPU_ASSIGN_COMMON
+ __entry->code = code;
+ __entry->addr = addr;
+ ),
+
+ VCPU_TP_PRINTK("STHYI fc: %llu addr: %016llx",
+ __entry->code, __entry->addr)
+ );
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
new file mode 100644
index 000000000000..c106488b4137
--- /dev/null
+++ b/arch/s390/kvm/vsie.c
@@ -0,0 +1,1091 @@
+/*
+ * kvm nested virtualization support for s390x
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ */
+#include <linux/vmalloc.h>
+#include <linux/kvm_host.h>
+#include <linux/bug.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <asm/gmap.h>
+#include <asm/mmu_context.h>
+#include <asm/sclp.h>
+#include <asm/nmi.h>
+#include <asm/dis.h>
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+struct vsie_page {
+ struct kvm_s390_sie_block scb_s; /* 0x0000 */
+ /* the pinned originial scb */
+ struct kvm_s390_sie_block *scb_o; /* 0x0200 */
+ /* the shadow gmap in use by the vsie_page */
+ struct gmap *gmap; /* 0x0208 */
+ /* address of the last reported fault to guest2 */
+ unsigned long fault_addr; /* 0x0210 */
+ __u8 reserved[0x0700 - 0x0218]; /* 0x0218 */
+ struct kvm_s390_crypto_cb crycb; /* 0x0700 */
+ __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
+} __packed;
+
+/* trigger a validity icpt for the given scb */
+static int set_validity_icpt(struct kvm_s390_sie_block *scb,
+ __u16 reason_code)
+{
+ scb->ipa = 0x1000;
+ scb->ipb = ((__u32) reason_code) << 16;
+ scb->icptcode = ICPT_VALIDITY;
+ return 1;
+}
+
+/* mark the prefix as unmapped, this will block the VSIE */
+static void prefix_unmapped(struct vsie_page *vsie_page)
+{
+ atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
+}
+
+/* mark the prefix as unmapped and wait until the VSIE has been left */
+static void prefix_unmapped_sync(struct vsie_page *vsie_page)
+{
+ prefix_unmapped(vsie_page);
+ if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
+ atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
+ while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
+ cpu_relax();
+}
+
+/* mark the prefix as mapped, this will allow the VSIE to run */
+static void prefix_mapped(struct vsie_page *vsie_page)
+{
+ atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
+}
+
+/* test if the prefix is mapped into the gmap shadow */
+static int prefix_is_mapped(struct vsie_page *vsie_page)
+{
+ return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
+}
+
+/* copy the updated intervention request bits into the shadow scb */
+static void update_intervention_requests(struct vsie_page *vsie_page)
+{
+ const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
+ int cpuflags;
+
+ cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
+ atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
+ atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
+}
+
+/* shadow (filter and validate) the cpuflags */
+static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
+
+ /* we don't allow ESA/390 guests */
+ if (!(cpuflags & CPUSTAT_ZARCH))
+ return set_validity_icpt(scb_s, 0x0001U);
+
+ if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
+ return set_validity_icpt(scb_s, 0x0001U);
+ else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
+ return set_validity_icpt(scb_s, 0x0007U);
+
+ /* intervention requests will be set later */
+ newflags = CPUSTAT_ZARCH;
+ if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
+ newflags |= CPUSTAT_GED;
+ if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
+ if (cpuflags & CPUSTAT_GED)
+ return set_validity_icpt(scb_s, 0x0001U);
+ newflags |= CPUSTAT_GED2;
+ }
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
+ newflags |= cpuflags & CPUSTAT_P;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
+ newflags |= cpuflags & CPUSTAT_SM;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
+ newflags |= cpuflags & CPUSTAT_IBS;
+
+ atomic_set(&scb_s->cpuflags, newflags);
+ return 0;
+}
+
+/*
+ * Create a shadow copy of the crycb block and setup key wrapping, if
+ * requested for guest 3 and enabled for guest 2.
+ *
+ * We only accept format-1 (no AP in g2), but convert it into format-2
+ * There is nothing to do for format-0.
+ *
+ * Returns: - 0 if shadowed or nothing to do
+ * - > 0 if control has to be given to guest 2
+ */
+static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
+ unsigned long *b1, *b2;
+ u8 ecb3_flags;
+
+ scb_s->crycbd = 0;
+ if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
+ return 0;
+ /* format-1 is supported with message-security-assist extension 3 */
+ if (!test_kvm_facility(vcpu->kvm, 76))
+ return 0;
+ /* we may only allow it if enabled for guest 2 */
+ ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
+ (ECB3_AES | ECB3_DEA);
+ if (!ecb3_flags)
+ return 0;
+
+ if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
+ return set_validity_icpt(scb_s, 0x003CU);
+ else if (!crycb_addr)
+ return set_validity_icpt(scb_s, 0x0039U);
+
+ /* copy only the wrapping keys */
+ if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+ return set_validity_icpt(scb_s, 0x0035U);
+
+ scb_s->ecb3 |= ecb3_flags;
+ scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
+ CRYCB_FORMAT2;
+
+ /* xor both blocks in one run */
+ b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
+ b2 = (unsigned long *)
+ vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
+ /* as 56%8 == 0, bitmap_xor won't overwrite any data */
+ bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
+ return 0;
+}
+
+/* shadow (round up/down) the ibc to avoid validity icpt */
+static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
+
+ scb_s->ibc = 0;
+ /* ibc installed in g2 and requested for g3 */
+ if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
+ scb_s->ibc = scb_o->ibc & 0x0fffU;
+ /* takte care of the minimum ibc level of the machine */
+ if (scb_s->ibc < min_ibc)
+ scb_s->ibc = min_ibc;
+ /* take care of the maximum ibc level set for the guest */
+ if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
+ scb_s->ibc = vcpu->kvm->arch.model.ibc;
+ }
+}
+
+/* unshadow the scb, copying parameters back to the real scb */
+static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+
+ /* interception */
+ scb_o->icptcode = scb_s->icptcode;
+ scb_o->icptstatus = scb_s->icptstatus;
+ scb_o->ipa = scb_s->ipa;
+ scb_o->ipb = scb_s->ipb;
+ scb_o->gbea = scb_s->gbea;
+
+ /* timer */
+ scb_o->cputm = scb_s->cputm;
+ scb_o->ckc = scb_s->ckc;
+ scb_o->todpr = scb_s->todpr;
+
+ /* guest state */
+ scb_o->gpsw = scb_s->gpsw;
+ scb_o->gg14 = scb_s->gg14;
+ scb_o->gg15 = scb_s->gg15;
+ memcpy(scb_o->gcr, scb_s->gcr, 128);
+ scb_o->pp = scb_s->pp;
+
+ /* interrupt intercept */
+ switch (scb_s->icptcode) {
+ case ICPT_PROGI:
+ case ICPT_INSTPROGI:
+ case ICPT_EXTINT:
+ memcpy((void *)((u64)scb_o + 0xc0),
+ (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
+ break;
+ case ICPT_PARTEXEC:
+ /* MVPG only */
+ memcpy((void *)((u64)scb_o + 0xc0),
+ (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
+ break;
+ }
+
+ if (scb_s->ihcpu != 0xffffU)
+ scb_o->ihcpu = scb_s->ihcpu;
+}
+
+/*
+ * Setup the shadow scb by copying and checking the relevant parts of the g2
+ * provided scb.
+ *
+ * Returns: - 0 if the scb has been shadowed
+ * - > 0 if control has to be given to guest 2
+ */
+static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ bool had_tx = scb_s->ecb & 0x10U;
+ unsigned long new_mso = 0;
+ int rc;
+
+ /* make sure we don't have any leftovers when reusing the scb */
+ scb_s->icptcode = 0;
+ scb_s->eca = 0;
+ scb_s->ecb = 0;
+ scb_s->ecb2 = 0;
+ scb_s->ecb3 = 0;
+ scb_s->ecd = 0;
+ scb_s->fac = 0;
+
+ rc = prepare_cpuflags(vcpu, vsie_page);
+ if (rc)
+ goto out;
+
+ /* timer */
+ scb_s->cputm = scb_o->cputm;
+ scb_s->ckc = scb_o->ckc;
+ scb_s->todpr = scb_o->todpr;
+ scb_s->epoch = scb_o->epoch;
+
+ /* guest state */
+ scb_s->gpsw = scb_o->gpsw;
+ scb_s->gg14 = scb_o->gg14;
+ scb_s->gg15 = scb_o->gg15;
+ memcpy(scb_s->gcr, scb_o->gcr, 128);
+ scb_s->pp = scb_o->pp;
+
+ /* interception / execution handling */
+ scb_s->gbea = scb_o->gbea;
+ scb_s->lctl = scb_o->lctl;
+ scb_s->svcc = scb_o->svcc;
+ scb_s->ictl = scb_o->ictl;
+ /*
+ * SKEY handling functions can't deal with false setting of PTE invalid
+ * bits. Therefore we cannot provide interpretation and would later
+ * have to provide own emulation handlers.
+ */
+ scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
+ scb_s->icpua = scb_o->icpua;
+
+ if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
+ new_mso = scb_o->mso & 0xfffffffffff00000UL;
+ /* if the hva of the prefix changes, we have to remap the prefix */
+ if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
+ prefix_unmapped(vsie_page);
+ /* SIE will do mso/msl validity and exception checks for us */
+ scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
+ scb_s->mso = new_mso;
+ scb_s->prefix = scb_o->prefix;
+
+ /* We have to definetly flush the tlb if this scb never ran */
+ if (scb_s->ihcpu != 0xffffU)
+ scb_s->ihcpu = scb_o->ihcpu;
+
+ /* MVPG and Protection Exception Interpretation are always available */
+ scb_s->eca |= scb_o->eca & 0x01002000U;
+ /* Host-protection-interruption introduced with ESOP */
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
+ scb_s->ecb |= scb_o->ecb & 0x02U;
+ /* transactional execution */
+ if (test_kvm_facility(vcpu->kvm, 73)) {
+ /* remap the prefix is tx is toggled on */
+ if ((scb_o->ecb & 0x10U) && !had_tx)
+ prefix_unmapped(vsie_page);
+ scb_s->ecb |= scb_o->ecb & 0x10U;
+ }
+ /* SIMD */
+ if (test_kvm_facility(vcpu->kvm, 129)) {
+ scb_s->eca |= scb_o->eca & 0x00020000U;
+ scb_s->ecd |= scb_o->ecd & 0x20000000U;
+ }
+ /* Run-time-Instrumentation */
+ if (test_kvm_facility(vcpu->kvm, 64))
+ scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
+ scb_s->eca |= scb_o->eca & 0x00000001U;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
+ scb_s->eca |= scb_o->eca & 0x40000000U;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
+ scb_s->eca |= scb_o->eca & 0x80000000U;
+
+ prepare_ibc(vcpu, vsie_page);
+ rc = shadow_crycb(vcpu, vsie_page);
+out:
+ if (rc)
+ unshadow_scb(vcpu, vsie_page);
+ return rc;
+}
+
+void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
+ unsigned long end)
+{
+ struct kvm *kvm = gmap->private;
+ struct vsie_page *cur;
+ unsigned long prefix;
+ struct page *page;
+ int i;
+
+ if (!gmap_is_shadow(gmap))
+ return;
+ if (start >= 1UL << 31)
+ /* We are only interested in prefix pages */
+ return;
+
+ /*
+ * Only new shadow blocks are added to the list during runtime,
+ * therefore we can safely reference them all the time.
+ */
+ for (i = 0; i < kvm->arch.vsie.page_count; i++) {
+ page = READ_ONCE(kvm->arch.vsie.pages[i]);
+ if (!page)
+ continue;
+ cur = page_to_virt(page);
+ if (READ_ONCE(cur->gmap) != gmap)
+ continue;
+ prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
+ /* with mso/msl, the prefix lies at an offset */
+ prefix += cur->scb_s.mso;
+ if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
+ prefix_unmapped_sync(cur);
+ }
+}
+
+/*
+ * Map the first prefix page and if tx is enabled also the second prefix page.
+ *
+ * The prefix will be protected, a gmap notifier will inform about unmaps.
+ * The shadow scb must not be executed until the prefix is remapped, this is
+ * guaranteed by properly handling PROG_REQUEST.
+ *
+ * Returns: - 0 on if successfully mapped or already mapped
+ * - > 0 if control has to be given to guest 2
+ * - -EAGAIN if the caller can retry immediately
+ * - -ENOMEM if out of memory
+ */
+static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
+ int rc;
+
+ if (prefix_is_mapped(vsie_page))
+ return 0;
+
+ /* mark it as mapped so we can catch any concurrent unmappers */
+ prefix_mapped(vsie_page);
+
+ /* with mso/msl, the prefix lies at offset *mso* */
+ prefix += scb_s->mso;
+
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
+ if (!rc && (scb_s->ecb & 0x10U))
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+ prefix + PAGE_SIZE);
+ /*
+ * We don't have to mprotect, we will be called for all unshadows.
+ * SIE will detect if protection applies and trigger a validity.
+ */
+ if (rc)
+ prefix_unmapped(vsie_page);
+ if (rc > 0 || rc == -EFAULT)
+ rc = set_validity_icpt(scb_s, 0x0037U);
+ return rc;
+}
+
+/*
+ * Pin the guest page given by gpa and set hpa to the pinned host address.
+ * Will always be pinned writable.
+ *
+ * Returns: - 0 on success
+ * - -EINVAL if the gpa is not valid guest storage
+ * - -ENOMEM if out of memory
+ */
+static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
+{
+ struct page *page;
+ hva_t hva;
+ int rc;
+
+ hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+ if (kvm_is_error_hva(hva))
+ return -EINVAL;
+ rc = get_user_pages_fast(hva, 1, 1, &page);
+ if (rc < 0)
+ return rc;
+ else if (rc != 1)
+ return -ENOMEM;
+ *hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
+ return 0;
+}
+
+/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
+static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
+{
+ struct page *page;
+
+ page = virt_to_page(hpa);
+ set_page_dirty_lock(page);
+ put_page(page);
+ /* mark the page always as dirty for migration */
+ mark_page_dirty(kvm, gpa_to_gfn(gpa));
+}
+
+/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
+static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ hpa_t hpa;
+ gpa_t gpa;
+
+ hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
+ if (hpa) {
+ gpa = scb_o->scaol & ~0xfUL;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
+ gpa |= (u64) scb_o->scaoh << 32;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->scaol = 0;
+ scb_s->scaoh = 0;
+ }
+
+ hpa = scb_s->itdba;
+ if (hpa) {
+ gpa = scb_o->itdba & ~0xffUL;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->itdba = 0;
+ }
+
+ hpa = scb_s->gvrd;
+ if (hpa) {
+ gpa = scb_o->gvrd & ~0x1ffUL;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->gvrd = 0;
+ }
+
+ hpa = scb_s->riccbd;
+ if (hpa) {
+ gpa = scb_o->riccbd & ~0x3fUL;
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ scb_s->riccbd = 0;
+ }
+}
+
+/*
+ * Instead of shadowing some blocks, we can simply forward them because the
+ * addresses in the scb are 64 bit long.
+ *
+ * This works as long as the data lies in one page. If blocks ever exceed one
+ * page, we have to fall back to shadowing.
+ *
+ * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
+ * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
+ *
+ * Returns: - 0 if all blocks were pinned.
+ * - > 0 if control has to be given to guest 2
+ * - -ENOMEM if out of memory
+ */
+static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ hpa_t hpa;
+ gpa_t gpa;
+ int rc = 0;
+
+ gpa = scb_o->scaol & ~0xfUL;
+ if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
+ gpa |= (u64) scb_o->scaoh << 32;
+ if (gpa) {
+ if (!(gpa & ~0x1fffUL))
+ rc = set_validity_icpt(scb_s, 0x0038U);
+ else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
+ rc = set_validity_icpt(scb_s, 0x0011U);
+ else if ((gpa & PAGE_MASK) !=
+ ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
+ rc = set_validity_icpt(scb_s, 0x003bU);
+ if (!rc) {
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x0034U);
+ }
+ if (rc)
+ goto unpin;
+ scb_s->scaoh = (u32)((u64)hpa >> 32);
+ scb_s->scaol = (u32)(u64)hpa;
+ }
+
+ gpa = scb_o->itdba & ~0xffUL;
+ if (gpa && (scb_s->ecb & 0x10U)) {
+ if (!(gpa & ~0x1fffU)) {
+ rc = set_validity_icpt(scb_s, 0x0080U);
+ goto unpin;
+ }
+ /* 256 bytes cannot cross page boundaries */
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x0080U);
+ if (rc)
+ goto unpin;
+ scb_s->itdba = hpa;
+ }
+
+ gpa = scb_o->gvrd & ~0x1ffUL;
+ if (gpa && (scb_s->eca & 0x00020000U) &&
+ !(scb_s->ecd & 0x20000000U)) {
+ if (!(gpa & ~0x1fffUL)) {
+ rc = set_validity_icpt(scb_s, 0x1310U);
+ goto unpin;
+ }
+ /*
+ * 512 bytes vector registers cannot cross page boundaries
+ * if this block gets bigger, we have to shadow it.
+ */
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x1310U);
+ if (rc)
+ goto unpin;
+ scb_s->gvrd = hpa;
+ }
+
+ gpa = scb_o->riccbd & ~0x3fUL;
+ if (gpa && (scb_s->ecb3 & 0x01U)) {
+ if (!(gpa & ~0x1fffUL)) {
+ rc = set_validity_icpt(scb_s, 0x0043U);
+ goto unpin;
+ }
+ /* 64 bytes cannot cross page boundaries */
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL)
+ rc = set_validity_icpt(scb_s, 0x0043U);
+ /* Validity 0x0044 will be checked by SIE */
+ if (rc)
+ goto unpin;
+ scb_s->gvrd = hpa;
+ }
+ return 0;
+unpin:
+ unpin_blocks(vcpu, vsie_page);
+ return rc;
+}
+
+/* unpin the scb provided by guest 2, marking it as dirty */
+static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
+ gpa_t gpa)
+{
+ hpa_t hpa = (hpa_t) vsie_page->scb_o;
+
+ if (hpa)
+ unpin_guest_page(vcpu->kvm, gpa, hpa);
+ vsie_page->scb_o = NULL;
+}
+
+/*
+ * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
+ *
+ * Returns: - 0 if the scb was pinned.
+ * - > 0 if control has to be given to guest 2
+ * - -ENOMEM if out of memory
+ */
+static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
+ gpa_t gpa)
+{
+ hpa_t hpa;
+ int rc;
+
+ rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
+ if (rc == -EINVAL) {
+ rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (!rc)
+ rc = 1;
+ }
+ if (!rc)
+ vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
+ return rc;
+}
+
+/*
+ * Inject a fault into guest 2.
+ *
+ * Returns: - > 0 if control has to be given to guest 2
+ * < 0 if an error occurred during injection.
+ */
+static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
+ bool write_flag)
+{
+ struct kvm_s390_pgm_info pgm = {
+ .code = code,
+ .trans_exc_code =
+ /* 0-51: virtual address */
+ (vaddr & 0xfffffffffffff000UL) |
+ /* 52-53: store / fetch */
+ (((unsigned int) !write_flag) + 1) << 10,
+ /* 62-63: asce id (alway primary == 0) */
+ .exc_access_id = 0, /* always primary */
+ .op_access_id = 0, /* not MVPG */
+ };
+ int rc;
+
+ if (code == PGM_PROTECTION)
+ pgm.trans_exc_code |= 0x4UL;
+
+ rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
+ return rc ? rc : 1;
+}
+
+/*
+ * Handle a fault during vsie execution on a gmap shadow.
+ *
+ * Returns: - 0 if the fault was resolved
+ * - > 0 if control has to be given to guest 2
+ * - < 0 if an error occurred
+ */
+static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ int rc;
+
+ if (current->thread.gmap_int_code == PGM_PROTECTION)
+ /* we can directly forward all protection exceptions */
+ return inject_fault(vcpu, PGM_PROTECTION,
+ current->thread.gmap_addr, 1);
+
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+ current->thread.gmap_addr);
+ if (rc > 0) {
+ rc = inject_fault(vcpu, rc,
+ current->thread.gmap_addr,
+ current->thread.gmap_write_flag);
+ if (rc >= 0)
+ vsie_page->fault_addr = current->thread.gmap_addr;
+ }
+ return rc;
+}
+
+/*
+ * Retry the previous fault that required guest 2 intervention. This avoids
+ * one superfluous SIE re-entry and direct exit.
+ *
+ * Will ignore any errors. The next SIE fault will do proper fault handling.
+ */
+static void handle_last_fault(struct kvm_vcpu *vcpu,
+ struct vsie_page *vsie_page)
+{
+ if (vsie_page->fault_addr)
+ kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
+ vsie_page->fault_addr);
+ vsie_page->fault_addr = 0;
+}
+
+static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
+{
+ vsie_page->scb_s.icptcode = 0;
+}
+
+/* rewind the psw and clear the vsie icpt, so we can retry execution */
+static void retry_vsie_icpt(struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ int ilen = insn_length(scb_s->ipa >> 8);
+
+ /* take care of EXECUTE instructions */
+ if (scb_s->icptstatus & 1) {
+ ilen = (scb_s->icptstatus >> 4) & 0x6;
+ if (!ilen)
+ ilen = 4;
+ }
+ scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
+ clear_vsie_icpt(vsie_page);
+}
+
+/*
+ * Try to shadow + enable the guest 2 provided facility list.
+ * Retry instruction execution if enabled for and provided by guest 2.
+ *
+ * Returns: - 0 if handled (retry or guest 2 icpt)
+ * - > 0 if control has to be given to guest 2
+ */
+static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
+
+ if (fac && test_kvm_facility(vcpu->kvm, 7)) {
+ retry_vsie_icpt(vsie_page);
+ if (read_guest_real(vcpu, fac, &vsie_page->fac,
+ sizeof(vsie_page->fac)))
+ return set_validity_icpt(scb_s, 0x1090U);
+ scb_s->fac = (__u32)(__u64) &vsie_page->fac;
+ }
+ return 0;
+}
+
+/*
+ * Run the vsie on a shadow scb and a shadow gmap, without any further
+ * sanity checks, handling SIE faults.
+ *
+ * Returns: - 0 everything went fine
+ * - > 0 if control has to be given to guest 2
+ * - < 0 if an error occurred
+ */
+static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ int rc;
+
+ handle_last_fault(vcpu, vsie_page);
+
+ if (need_resched())
+ schedule();
+ if (test_cpu_flag(CIF_MCCK_PENDING))
+ s390_handle_mcck();
+
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ local_irq_disable();
+ guest_enter_irqoff();
+ local_irq_enable();
+
+ rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+
+ local_irq_disable();
+ guest_exit_irqoff();
+ local_irq_enable();
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ if (rc > 0)
+ rc = 0; /* we could still have an icpt */
+ else if (rc == -EFAULT)
+ return handle_fault(vcpu, vsie_page);
+
+ switch (scb_s->icptcode) {
+ case ICPT_INST:
+ if (scb_s->ipa == 0xb2b0)
+ rc = handle_stfle(vcpu, vsie_page);
+ break;
+ case ICPT_STOP:
+ /* stop not requested by g2 - must have been a kick */
+ if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
+ clear_vsie_icpt(vsie_page);
+ break;
+ case ICPT_VALIDITY:
+ if ((scb_s->ipa & 0xf000) != 0xf000)
+ scb_s->ipa += 0x1000;
+ break;
+ }
+ return rc;
+}
+
+static void release_gmap_shadow(struct vsie_page *vsie_page)
+{
+ if (vsie_page->gmap)
+ gmap_put(vsie_page->gmap);
+ WRITE_ONCE(vsie_page->gmap, NULL);
+ prefix_unmapped(vsie_page);
+}
+
+static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
+ struct vsie_page *vsie_page)
+{
+ unsigned long asce;
+ union ctlreg0 cr0;
+ struct gmap *gmap;
+ int edat;
+
+ asce = vcpu->arch.sie_block->gcr[1];
+ cr0.val = vcpu->arch.sie_block->gcr[0];
+ edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
+ edat += edat && test_kvm_facility(vcpu->kvm, 78);
+
+ /*
+ * ASCE or EDAT could have changed since last icpt, or the gmap
+ * we're holding has been unshadowed. If the gmap is still valid,
+ * we can safely reuse it.
+ */
+ if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
+ return 0;
+
+ /* release the old shadow - if any, and mark the prefix as unmapped */
+ release_gmap_shadow(vsie_page);
+ gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
+ if (IS_ERR(gmap))
+ return PTR_ERR(gmap);
+ gmap->private = vcpu->kvm;
+ WRITE_ONCE(vsie_page->gmap, gmap);
+ return 0;
+}
+
+/*
+ * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
+ */
+static void register_shadow_scb(struct kvm_vcpu *vcpu,
+ struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+
+ WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
+ /*
+ * External calls have to lead to a kick of the vcpu and
+ * therefore the vsie -> Simulate Wait state.
+ */
+ atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ /*
+ * We have to adjust the g3 epoch by the g2 epoch. The epoch will
+ * automatically be adjusted on tod clock changes via kvm_sync_clock.
+ */
+ preempt_disable();
+ scb_s->epoch += vcpu->kvm->arch.epoch;
+ preempt_enable();
+}
+
+/*
+ * Unregister a shadow scb from a VCPU.
+ */
+static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
+{
+ atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ WRITE_ONCE(vcpu->arch.vsie_block, NULL);
+}
+
+/*
+ * Run the vsie on a shadowed scb, managing the gmap shadow, handling
+ * prefix pages and faults.
+ *
+ * Returns: - 0 if no errors occurred
+ * - > 0 if control has to be given to guest 2
+ * - -ENOMEM if out of memory
+ */
+static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ int rc = 0;
+
+ while (1) {
+ rc = acquire_gmap_shadow(vcpu, vsie_page);
+ if (!rc)
+ rc = map_prefix(vcpu, vsie_page);
+ if (!rc) {
+ gmap_enable(vsie_page->gmap);
+ update_intervention_requests(vsie_page);
+ rc = do_vsie_run(vcpu, vsie_page);
+ gmap_enable(vcpu->arch.gmap);
+ }
+ atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
+
+ if (rc == -EAGAIN)
+ rc = 0;
+ if (rc || scb_s->icptcode || signal_pending(current) ||
+ kvm_s390_vcpu_has_irq(vcpu, 0))
+ break;
+ };
+
+ if (rc == -EFAULT) {
+ /*
+ * Addressing exceptions are always presentes as intercepts.
+ * As addressing exceptions are suppressing and our guest 3 PSW
+ * points at the responsible instruction, we have to
+ * forward the PSW and set the ilc. If we can't read guest 3
+ * instruction, we can use an arbitrary ilc. Let's always use
+ * ilen = 4 for now, so we can avoid reading in guest 3 virtual
+ * memory. (we could also fake the shadow so the hardware
+ * handles it).
+ */
+ scb_s->icptcode = ICPT_PROGI;
+ scb_s->iprcc = PGM_ADDRESSING;
+ scb_s->pgmilc = 4;
+ scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
+ }
+ return rc;
+}
+
+/*
+ * Get or create a vsie page for a scb address.
+ *
+ * Returns: - address of a vsie page (cached or new one)
+ * - NULL if the same scb address is already used by another VCPU
+ * - ERR_PTR(-ENOMEM) if out of memory
+ */
+static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
+{
+ struct vsie_page *vsie_page;
+ struct page *page;
+ int nr_vcpus;
+
+ rcu_read_lock();
+ page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
+ rcu_read_unlock();
+ if (page) {
+ if (page_ref_inc_return(page) == 2)
+ return page_to_virt(page);
+ page_ref_dec(page);
+ }
+
+ /*
+ * We want at least #online_vcpus shadows, so every VCPU can execute
+ * the VSIE in parallel.
+ */
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+
+ mutex_lock(&kvm->arch.vsie.mutex);
+ if (kvm->arch.vsie.page_count < nr_vcpus) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
+ if (!page) {
+ mutex_unlock(&kvm->arch.vsie.mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+ page_ref_inc(page);
+ kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
+ kvm->arch.vsie.page_count++;
+ } else {
+ /* reuse an existing entry that belongs to nobody */
+ while (true) {
+ page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
+ if (page_ref_inc_return(page) == 2)
+ break;
+ page_ref_dec(page);
+ kvm->arch.vsie.next++;
+ kvm->arch.vsie.next %= nr_vcpus;
+ }
+ radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+ }
+ page->index = addr;
+ /* double use of the same address */
+ if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
+ page_ref_dec(page);
+ mutex_unlock(&kvm->arch.vsie.mutex);
+ return NULL;
+ }
+ mutex_unlock(&kvm->arch.vsie.mutex);
+
+ vsie_page = page_to_virt(page);
+ memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
+ release_gmap_shadow(vsie_page);
+ vsie_page->fault_addr = 0;
+ vsie_page->scb_s.ihcpu = 0xffffU;
+ return vsie_page;
+}
+
+/* put a vsie page acquired via get_vsie_page */
+static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
+{
+ struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
+
+ page_ref_dec(page);
+}
+
+int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
+{
+ struct vsie_page *vsie_page;
+ unsigned long scb_addr;
+ int rc;
+
+ vcpu->stat.instruction_sie++;
+ if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
+ return -EOPNOTSUPP;
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+ BUILD_BUG_ON(sizeof(struct vsie_page) != 4096);
+ scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
+
+ /* 512 byte alignment */
+ if (unlikely(scb_addr & 0x1ffUL))
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
+ return 0;
+
+ vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
+ if (IS_ERR(vsie_page))
+ return PTR_ERR(vsie_page);
+ else if (!vsie_page)
+ /* double use of sie control block - simply do nothing */
+ return 0;
+
+ rc = pin_scb(vcpu, vsie_page, scb_addr);
+ if (rc)
+ goto out_put;
+ rc = shadow_scb(vcpu, vsie_page);
+ if (rc)
+ goto out_unpin_scb;
+ rc = pin_blocks(vcpu, vsie_page);
+ if (rc)
+ goto out_unshadow;
+ register_shadow_scb(vcpu, vsie_page);
+ rc = vsie_run(vcpu, vsie_page);
+ unregister_shadow_scb(vcpu);
+ unpin_blocks(vcpu, vsie_page);
+out_unshadow:
+ unshadow_scb(vcpu, vsie_page);
+out_unpin_scb:
+ unpin_scb(vcpu, vsie_page, scb_addr);
+out_put:
+ put_vsie_page(vcpu->kvm, vsie_page);
+
+ return rc < 0 ? rc : 0;
+}
+
+/* Init the vsie data structures. To be called when a vm is initialized. */
+void kvm_s390_vsie_init(struct kvm *kvm)
+{
+ mutex_init(&kvm->arch.vsie.mutex);
+ INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
+}
+
+/* Destroy the vsie data structures. To be called when a vm is destroyed. */
+void kvm_s390_vsie_destroy(struct kvm *kvm)
+{
+ struct vsie_page *vsie_page;
+ struct page *page;
+ int i;
+
+ mutex_lock(&kvm->arch.vsie.mutex);
+ for (i = 0; i < kvm->arch.vsie.page_count; i++) {
+ page = kvm->arch.vsie.pages[i];
+ kvm->arch.vsie.pages[i] = NULL;
+ vsie_page = page_to_virt(page);
+ release_gmap_shadow(vsie_page);
+ /* free the radix tree entry */
+ radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+ __free_page(page);
+ }
+ kvm->arch.vsie.page_count = 0;
+ mutex_unlock(&kvm->arch.vsie.mutex);
+}
+
+void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
+
+ /*
+ * Even if the VCPU lets go of the shadow sie block reference, it is
+ * still valid in the cache. So we can safely kick it.
+ */
+ if (scb) {
+ atomic_or(PROG_BLOCK_SIE, &scb->prog20);
+ if (scb->prog0c & PROG_IN_SIE)
+ atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
+ }
+}
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d96596128e9f..f481fcde067b 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 25783dc3c813..a58bca62a93b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -418,6 +418,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
(struct gmap *) S390_lowcore.gmap : NULL;
if (gmap) {
current->thread.gmap_addr = address;
+ current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
+ current->thread.gmap_int_code = regs->int_code & 0xffff;
address = __gmap_translate(gmap, address);
if (address == -EFAULT) {
fault = VM_FAULT_BADMAP;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 063c721ec0dc..2ce6bb3bab32 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -20,14 +20,16 @@
#include <asm/gmap.h>
#include <asm/tlb.h>
+#define GMAP_SHADOW_FAKE_TABLE 1ULL
+
/**
- * gmap_alloc - allocate a guest address space
+ * gmap_alloc - allocate and initialize a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
*/
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
+static struct gmap *gmap_alloc(unsigned long limit)
{
struct gmap *gmap;
struct page *page;
@@ -55,10 +57,14 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
if (!gmap)
goto out;
INIT_LIST_HEAD(&gmap->crst_list);
+ INIT_LIST_HEAD(&gmap->children);
+ INIT_LIST_HEAD(&gmap->pt_list);
INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
+ INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
spin_lock_init(&gmap->guest_table_lock);
- gmap->mm = mm;
+ spin_lock_init(&gmap->shadow_lock);
+ atomic_set(&gmap->ref_count, 1);
page = alloc_pages(GFP_KERNEL, 2);
if (!page)
goto out_free;
@@ -70,9 +76,6 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
gmap->asce = atype | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table);
gmap->asce_end = limit;
- down_write(&mm->mmap_sem);
- list_add(&gmap->list, &mm->context.gmap_list);
- up_write(&mm->mmap_sem);
return gmap;
out_free:
@@ -80,7 +83,28 @@ out_free:
out:
return NULL;
}
-EXPORT_SYMBOL_GPL(gmap_alloc);
+
+/**
+ * gmap_create - create a guest address space
+ * @mm: pointer to the parent mm_struct
+ * @limit: maximum size of the gmap address space
+ *
+ * Returns a guest address space structure.
+ */
+struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
+{
+ struct gmap *gmap;
+
+ gmap = gmap_alloc(limit);
+ if (!gmap)
+ return NULL;
+ gmap->mm = mm;
+ spin_lock(&mm->context.gmap_lock);
+ list_add_rcu(&gmap->list, &mm->context.gmap_list);
+ spin_unlock(&mm->context.gmap_lock);
+ return gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_create);
static void gmap_flush_tlb(struct gmap *gmap)
{
@@ -114,31 +138,117 @@ static void gmap_radix_tree_free(struct radix_tree_root *root)
} while (nr > 0);
}
+static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
+{
+ struct gmap_rmap *rmap, *rnext, *head;
+ struct radix_tree_iter iter;
+ unsigned long indices[16];
+ unsigned long index;
+ void **slot;
+ int i, nr;
+
+ /* A radix tree is freed by deleting all of its entries */
+ index = 0;
+ do {
+ nr = 0;
+ radix_tree_for_each_slot(slot, root, &iter, index) {
+ indices[nr] = iter.index;
+ if (++nr == 16)
+ break;
+ }
+ for (i = 0; i < nr; i++) {
+ index = indices[i];
+ head = radix_tree_delete(root, index);
+ gmap_for_each_rmap_safe(rmap, rnext, head)
+ kfree(rmap);
+ }
+ } while (nr > 0);
+}
+
/**
* gmap_free - free a guest address space
* @gmap: pointer to the guest address space structure
+ *
+ * No locks required. There are no references to this gmap anymore.
*/
-void gmap_free(struct gmap *gmap)
+static void gmap_free(struct gmap *gmap)
{
struct page *page, *next;
- /* Flush tlb. */
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte(gmap->asce);
- else
- __tlb_flush_global();
-
+ /* Flush tlb of all gmaps (if not already done for shadows) */
+ if (!(gmap_is_shadow(gmap) && gmap->removed))
+ gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, 2);
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
- down_write(&gmap->mm->mmap_sem);
- list_del(&gmap->list);
- up_write(&gmap->mm->mmap_sem);
+
+ /* Free additional data for a shadow gmap */
+ if (gmap_is_shadow(gmap)) {
+ /* Free all page tables. */
+ list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
+ page_table_free_pgste(page);
+ gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
+ /* Release reference to the parent */
+ gmap_put(gmap->parent);
+ }
+
kfree(gmap);
}
-EXPORT_SYMBOL_GPL(gmap_free);
+
+/**
+ * gmap_get - increase reference counter for guest address space
+ * @gmap: pointer to the guest address space structure
+ *
+ * Returns the gmap pointer
+ */
+struct gmap *gmap_get(struct gmap *gmap)
+{
+ atomic_inc(&gmap->ref_count);
+ return gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_get);
+
+/**
+ * gmap_put - decrease reference counter for guest address space
+ * @gmap: pointer to the guest address space structure
+ *
+ * If the reference counter reaches zero the guest address space is freed.
+ */
+void gmap_put(struct gmap *gmap)
+{
+ if (atomic_dec_return(&gmap->ref_count) == 0)
+ gmap_free(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_put);
+
+/**
+ * gmap_remove - remove a guest address space but do not free it yet
+ * @gmap: pointer to the guest address space structure
+ */
+void gmap_remove(struct gmap *gmap)
+{
+ struct gmap *sg, *next;
+
+ /* Remove all shadow gmaps linked to this gmap */
+ if (!list_empty(&gmap->children)) {
+ spin_lock(&gmap->shadow_lock);
+ list_for_each_entry_safe(sg, next, &gmap->children, list) {
+ list_del(&sg->list);
+ gmap_put(sg);
+ }
+ spin_unlock(&gmap->shadow_lock);
+ }
+ /* Remove gmap from the pre-mm list */
+ spin_lock(&gmap->mm->context.gmap_lock);
+ list_del_rcu(&gmap->list);
+ spin_unlock(&gmap->mm->context.gmap_lock);
+ synchronize_rcu();
+ /* Put reference */
+ gmap_put(gmap);
+}
+EXPORT_SYMBOL_GPL(gmap_remove);
/**
* gmap_enable - switch primary space to the guest address space
@@ -160,6 +270,17 @@ void gmap_disable(struct gmap *gmap)
}
EXPORT_SYMBOL_GPL(gmap_disable);
+/**
+ * gmap_get_enabled - get a pointer to the currently enabled gmap
+ *
+ * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
+ */
+struct gmap *gmap_get_enabled(void)
+{
+ return (struct gmap *) S390_lowcore.gmap;
+}
+EXPORT_SYMBOL_GPL(gmap_get_enabled);
+
/*
* gmap_alloc_table is assumed to be called with mmap_sem held
*/
@@ -175,7 +296,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init);
- spin_lock(&gmap->mm->page_table_lock);
+ spin_lock(&gmap->guest_table_lock);
if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
@@ -183,7 +304,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
page->index = gaddr;
page = NULL;
}
- spin_unlock(&gmap->mm->page_table_lock);
+ spin_unlock(&gmap->guest_table_lock);
if (page)
__free_pages(page, 2);
return 0;
@@ -219,6 +340,7 @@ static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
unsigned long *entry;
int flush = 0;
+ BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
@@ -258,6 +380,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
unsigned long off;
int flush;
+ BUG_ON(gmap_is_shadow(gmap));
if ((to | len) & (PMD_SIZE - 1))
return -EINVAL;
if (len == 0 || to + len < to)
@@ -289,6 +412,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long off;
int flush;
+ BUG_ON(gmap_is_shadow(gmap));
if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL;
if (len == 0 || from + len < from || to + len < to ||
@@ -326,6 +450,8 @@ EXPORT_SYMBOL_GPL(gmap_map_segment);
* This function does not establish potentially missing page table entries.
* The mmap_sem of the mm that belongs to the address space must be held
* when this function gets called.
+ *
+ * Note: Can also be called for shadow gmaps.
*/
unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
@@ -333,6 +459,7 @@ unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
vmaddr = (unsigned long)
radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
+ /* Note: guest_to_host is empty for a shadow gmap */
return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
}
EXPORT_SYMBOL_GPL(__gmap_translate);
@@ -369,11 +496,13 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
struct gmap *gmap;
int flush;
- list_for_each_entry(gmap, &mm->context.gmap_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
if (flush)
gmap_flush_tlb(gmap);
}
+ rcu_read_unlock();
}
/**
@@ -397,6 +526,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
pmd_t *pmd;
int rc;
+ BUG_ON(gmap_is_shadow(gmap));
/* Create higher level tables in the gmap page table */
table = gmap->table;
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
@@ -552,116 +682,1412 @@ static LIST_HEAD(gmap_notifier_list);
static DEFINE_SPINLOCK(gmap_notifier_lock);
/**
- * gmap_register_ipte_notifier - register a pte invalidation callback
+ * gmap_register_pte_notifier - register a pte invalidation callback
* @nb: pointer to the gmap notifier block
*/
-void gmap_register_ipte_notifier(struct gmap_notifier *nb)
+void gmap_register_pte_notifier(struct gmap_notifier *nb)
{
spin_lock(&gmap_notifier_lock);
- list_add(&nb->list, &gmap_notifier_list);
+ list_add_rcu(&nb->list, &gmap_notifier_list);
spin_unlock(&gmap_notifier_lock);
}
-EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
+EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
/**
- * gmap_unregister_ipte_notifier - remove a pte invalidation callback
+ * gmap_unregister_pte_notifier - remove a pte invalidation callback
* @nb: pointer to the gmap notifier block
*/
-void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
+void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
{
spin_lock(&gmap_notifier_lock);
- list_del_init(&nb->list);
+ list_del_rcu(&nb->list);
spin_unlock(&gmap_notifier_lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
+
+/**
+ * gmap_call_notifier - call all registered invalidation callbacks
+ * @gmap: pointer to guest mapping meta data structure
+ * @start: start virtual address in the guest address space
+ * @end: end virtual address in the guest address space
+ */
+static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
+ unsigned long end)
+{
+ struct gmap_notifier *nb;
+
+ list_for_each_entry(nb, &gmap_notifier_list, list)
+ nb->notifier_call(gmap, start, end);
+}
+
+/**
+ * gmap_table_walk - walk the gmap page tables
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @level: page table level to stop at
+ *
+ * Returns a table entry pointer for the given guest address and @level
+ * @level=0 : returns a pointer to a page table table entry (or NULL)
+ * @level=1 : returns a pointer to a segment table entry (or NULL)
+ * @level=2 : returns a pointer to a region-3 table entry (or NULL)
+ * @level=3 : returns a pointer to a region-2 table entry (or NULL)
+ * @level=4 : returns a pointer to a region-1 table entry (or NULL)
+ *
+ * Returns NULL if the gmap page tables could not be walked to the
+ * requested level.
+ *
+ * Note: Can also be called for shadow gmaps.
+ */
+static inline unsigned long *gmap_table_walk(struct gmap *gmap,
+ unsigned long gaddr, int level)
+{
+ unsigned long *table;
+
+ if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
+ return NULL;
+ if (gmap_is_shadow(gmap) && gmap->removed)
+ return NULL;
+ if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
+ return NULL;
+ table = gmap->table;
+ switch (gmap->asce & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table += (gaddr >> 53) & 0x7ff;
+ if (level == 4)
+ break;
+ if (*table & _REGION_ENTRY_INVALID)
+ return NULL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* Fallthrough */
+ case _ASCE_TYPE_REGION2:
+ table += (gaddr >> 42) & 0x7ff;
+ if (level == 3)
+ break;
+ if (*table & _REGION_ENTRY_INVALID)
+ return NULL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* Fallthrough */
+ case _ASCE_TYPE_REGION3:
+ table += (gaddr >> 31) & 0x7ff;
+ if (level == 2)
+ break;
+ if (*table & _REGION_ENTRY_INVALID)
+ return NULL;
+ table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+ /* Fallthrough */
+ case _ASCE_TYPE_SEGMENT:
+ table += (gaddr >> 20) & 0x7ff;
+ if (level == 1)
+ break;
+ if (*table & _REGION_ENTRY_INVALID)
+ return NULL;
+ table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+ table += (gaddr >> 12) & 0xff;
+ }
+ return table;
+}
+
+/**
+ * gmap_pte_op_walk - walk the gmap page table, get the page table lock
+ * and return the pte pointer
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @ptl: pointer to the spinlock pointer
+ *
+ * Returns a pointer to the locked pte for a guest address, or NULL
+ *
+ * Note: Can also be called for shadow gmaps.
+ */
+static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
+ spinlock_t **ptl)
+{
+ unsigned long *table;
+
+ if (gmap_is_shadow(gmap))
+ spin_lock(&gmap->guest_table_lock);
+ /* Walk the gmap page table, lock and get pte pointer */
+ table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
+ if (!table || *table & _SEGMENT_ENTRY_INVALID) {
+ if (gmap_is_shadow(gmap))
+ spin_unlock(&gmap->guest_table_lock);
+ return NULL;
+ }
+ if (gmap_is_shadow(gmap)) {
+ *ptl = &gmap->guest_table_lock;
+ return pte_offset_map((pmd_t *) table, gaddr);
+ }
+ return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
+}
+
+/**
+ * gmap_pte_op_fixup - force a page in and connect the gmap page table
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: address in the host process address space
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ *
+ * Returns 0 if the caller can retry __gmap_translate (might fail again),
+ * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
+ * up or connecting the gmap page table.
+ */
+static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
+ unsigned long vmaddr, int prot)
+{
+ struct mm_struct *mm = gmap->mm;
+ unsigned int fault_flags;
+ bool unlocked = false;
+
+ BUG_ON(gmap_is_shadow(gmap));
+ fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
+ if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
+ return -EFAULT;
+ if (unlocked)
+ /* lost mmap_sem, caller has to retry __gmap_translate */
+ return 0;
+ /* Connect the page tables */
+ return __gmap_link(gmap, gaddr, vmaddr);
}
-EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
/**
- * gmap_ipte_notify - mark a range of ptes for invalidation notification
+ * gmap_pte_op_end - release the page table lock
+ * @ptl: pointer to the spinlock pointer
+ */
+static void gmap_pte_op_end(spinlock_t *ptl)
+{
+ spin_unlock(ptl);
+}
+
+/*
+ * gmap_protect_range - remove access rights to memory and set pgste bits
* @gmap: pointer to guest mapping meta data structure
* @gaddr: virtual address in the guest address space
* @len: size of area
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bits: pgste notification bits to set
*
- * Returns 0 if for each page in the given range a gmap mapping exists and
- * the invalidation notification could be set. If the gmap mapping is missing
- * for one or more pages -EFAULT is returned. If no memory could be allocated
- * -ENOMEM is returned. This function establishes missing page table entries.
+ * Returns 0 if successfully protected, -ENOMEM if out of memory and
+ * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
+ *
+ * Called with sg->mm->mmap_sem in read.
+ *
+ * Note: Can also be called for shadow gmaps.
*/
-int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
+static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
+ unsigned long len, int prot, unsigned long bits)
{
- unsigned long addr;
+ unsigned long vmaddr;
spinlock_t *ptl;
pte_t *ptep;
- bool unlocked;
- int rc = 0;
+ int rc;
+
+ while (len) {
+ rc = -EAGAIN;
+ ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
+ if (ptep) {
+ rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
+ gmap_pte_op_end(ptl);
+ }
+ if (rc) {
+ vmaddr = __gmap_translate(gmap, gaddr);
+ if (IS_ERR_VALUE(vmaddr))
+ return vmaddr;
+ rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
+ if (rc)
+ return rc;
+ continue;
+ }
+ gaddr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * gmap_mprotect_notify - change access rights for a range of ptes and
+ * call the notifier if any pte changes again
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @len: size of area
+ * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ *
+ * Returns 0 if for each page in the given range a gmap mapping exists,
+ * the new access rights could be set and the notifier could be armed.
+ * If the gmap mapping is missing for one or more pages -EFAULT is
+ * returned. If no memory could be allocated -ENOMEM is returned.
+ * This function establishes missing page table entries.
+ */
+int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
+ unsigned long len, int prot)
+{
+ int rc;
- if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
+ if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
+ return -EINVAL;
+ if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
down_read(&gmap->mm->mmap_sem);
- while (len) {
- unlocked = false;
- /* Convert gmap address and connect the page tables */
- addr = __gmap_translate(gmap, gaddr);
- if (IS_ERR_VALUE(addr)) {
- rc = addr;
+ rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
+ up_read(&gmap->mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
+
+/**
+ * gmap_read_table - get an unsigned long value from a guest page table using
+ * absolute addressing, without marking the page referenced.
+ * @gmap: pointer to guest mapping meta data structure
+ * @gaddr: virtual address in the guest address space
+ * @val: pointer to the unsigned long value to return
+ *
+ * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
+ * if reading using the virtual address failed.
+ *
+ * Called with gmap->mm->mmap_sem in read.
+ */
+int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
+{
+ unsigned long address, vmaddr;
+ spinlock_t *ptl;
+ pte_t *ptep, pte;
+ int rc;
+
+ while (1) {
+ rc = -EAGAIN;
+ ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
+ address = pte_val(pte) & PAGE_MASK;
+ address += gaddr & ~PAGE_MASK;
+ *val = *(unsigned long *) address;
+ pte_val(*ptep) |= _PAGE_YOUNG;
+ /* Do *NOT* clear the _PAGE_INVALID bit! */
+ rc = 0;
+ }
+ gmap_pte_op_end(ptl);
+ }
+ if (!rc)
+ break;
+ vmaddr = __gmap_translate(gmap, gaddr);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = vmaddr;
break;
}
- /* Get the page mapped */
- if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
- &unlocked)) {
- rc = -EFAULT;
+ rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
+ if (rc)
break;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_read_table);
+
+/**
+ * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
+ * @sg: pointer to the shadow guest address space structure
+ * @vmaddr: vm address associated with the rmap
+ * @rmap: pointer to the rmap structure
+ *
+ * Called with the sg->guest_table_lock
+ */
+static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
+ struct gmap_rmap *rmap)
+{
+ void **slot;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
+ if (slot) {
+ rmap->next = radix_tree_deref_slot_protected(slot,
+ &sg->guest_table_lock);
+ radix_tree_replace_slot(slot, rmap);
+ } else {
+ rmap->next = NULL;
+ radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
+ rmap);
+ }
+}
+
+/**
+ * gmap_protect_rmap - modify access rights to memory and create an rmap
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow gmap
+ * @paddr: address in the parent guest address space
+ * @len: length of the memory area to protect
+ * @prot: indicates access rights: none, read-only or read-write
+ *
+ * Returns 0 if successfully protected and the rmap was created, -ENOMEM
+ * if out of memory and -EFAULT if paddr is invalid.
+ */
+static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
+ unsigned long paddr, unsigned long len, int prot)
+{
+ struct gmap *parent;
+ struct gmap_rmap *rmap;
+ unsigned long vmaddr;
+ spinlock_t *ptl;
+ pte_t *ptep;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ parent = sg->parent;
+ while (len) {
+ vmaddr = __gmap_translate(parent, paddr);
+ if (IS_ERR_VALUE(vmaddr))
+ return vmaddr;
+ rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
+ if (!rmap)
+ return -ENOMEM;
+ rmap->raddr = raddr;
+ rc = radix_tree_preload(GFP_KERNEL);
+ if (rc) {
+ kfree(rmap);
+ return rc;
+ }
+ rc = -EAGAIN;
+ ptep = gmap_pte_op_walk(parent, paddr, &ptl);
+ if (ptep) {
+ spin_lock(&sg->guest_table_lock);
+ rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
+ PGSTE_VSIE_BIT);
+ if (!rc)
+ gmap_insert_rmap(sg, vmaddr, rmap);
+ spin_unlock(&sg->guest_table_lock);
+ gmap_pte_op_end(ptl);
}
- /* While trying to map mmap_sem got unlocked. Let us retry */
- if (unlocked)
+ radix_tree_preload_end();
+ if (rc) {
+ kfree(rmap);
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
+ if (rc)
+ return rc;
continue;
- rc = __gmap_link(gmap, gaddr, addr);
+ }
+ paddr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ return 0;
+}
+
+#define _SHADOW_RMAP_MASK 0x7
+#define _SHADOW_RMAP_REGION1 0x5
+#define _SHADOW_RMAP_REGION2 0x4
+#define _SHADOW_RMAP_REGION3 0x3
+#define _SHADOW_RMAP_SEGMENT 0x2
+#define _SHADOW_RMAP_PGTABLE 0x1
+
+/**
+ * gmap_idte_one - invalidate a single region or segment table entry
+ * @asce: region or segment table *origin* + table-type bits
+ * @vaddr: virtual address to identify the table entry to flush
+ *
+ * The invalid bit of a single region or segment table entry is set
+ * and the associated TLB entries depending on the entry are flushed.
+ * The table-type of the @asce identifies the portion of the @vaddr
+ * that is used as the invalidation index.
+ */
+static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
+{
+ asm volatile(
+ " .insn rrf,0xb98e0000,%0,%1,0,0"
+ : : "a" (asce), "a" (vaddr) : "cc", "memory");
+}
+
+/**
+ * gmap_unshadow_page - remove a page from a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
+{
+ unsigned long *table;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
+ if (!table || *table & _PAGE_INVALID)
+ return;
+ gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
+ ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
+}
+
+/**
+ * __gmap_unshadow_pgt - remove all entries from a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ * @pgt: pointer to the start of a shadow page table
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
+ unsigned long *pgt)
+{
+ int i;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ for (i = 0; i < 256; i++, raddr += 1UL << 12)
+ pgt[i] = _PAGE_INVALID;
+}
+
+/**
+ * gmap_unshadow_pgt - remove a shadow page table from a segment entry
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: address in the shadow guest address space
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
+{
+ unsigned long sto, *ste, *pgt;
+ struct page *page;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
+ if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
+ return;
+ gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
+ sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
+ gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
+ pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
+ *ste = _SEGMENT_ENTRY_EMPTY;
+ __gmap_unshadow_pgt(sg, raddr, pgt);
+ /* Free page table */
+ page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ page_table_free_pgste(page);
+}
+
+/**
+ * __gmap_unshadow_sgt - remove all entries from a shadow segment table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ * @sgt: pointer to the start of a shadow segment table
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
+ unsigned long *sgt)
+{
+ unsigned long asce, *pgt;
+ struct page *page;
+ int i;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
+ for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
+ if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
+ continue;
+ pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
+ sgt[i] = _SEGMENT_ENTRY_EMPTY;
+ __gmap_unshadow_pgt(sg, raddr, pgt);
+ /* Free page table */
+ page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ page_table_free_pgste(page);
+ }
+}
+
+/**
+ * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ *
+ * Called with the shadow->guest_table_lock
+ */
+static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
+{
+ unsigned long r3o, *r3e, *sgt;
+ struct page *page;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
+ if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
+ return;
+ gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
+ r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
+ gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
+ sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
+ *r3e = _REGION3_ENTRY_EMPTY;
+ __gmap_unshadow_sgt(sg, raddr, sgt);
+ /* Free segment table */
+ page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+}
+
+/**
+ * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: address in the shadow guest address space
+ * @r3t: pointer to the start of a shadow region-3 table
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
+ unsigned long *r3t)
+{
+ unsigned long asce, *sgt;
+ struct page *page;
+ int i;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
+ for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
+ if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
+ continue;
+ sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
+ r3t[i] = _REGION3_ENTRY_EMPTY;
+ __gmap_unshadow_sgt(sg, raddr, sgt);
+ /* Free segment table */
+ page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+ }
+}
+
+/**
+ * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
+{
+ unsigned long r2o, *r2e, *r3t;
+ struct page *page;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
+ if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
+ return;
+ gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
+ r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
+ gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
+ r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
+ *r2e = _REGION2_ENTRY_EMPTY;
+ __gmap_unshadow_r3t(sg, raddr, r3t);
+ /* Free region 3 table */
+ page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+}
+
+/**
+ * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ * @r2t: pointer to the start of a shadow region-2 table
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
+ unsigned long *r2t)
+{
+ unsigned long asce, *r3t;
+ struct page *page;
+ int i;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
+ for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
+ if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
+ continue;
+ r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
+ r2t[i] = _REGION2_ENTRY_EMPTY;
+ __gmap_unshadow_r3t(sg, raddr, r3t);
+ /* Free region 3 table */
+ page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+ }
+}
+
+/**
+ * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ *
+ * Called with the sg->guest_table_lock
+ */
+static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
+{
+ unsigned long r1o, *r1e, *r2t;
+ struct page *page;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
+ if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
+ return;
+ gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
+ r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
+ gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
+ r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
+ *r1e = _REGION1_ENTRY_EMPTY;
+ __gmap_unshadow_r2t(sg, raddr, r2t);
+ /* Free region 2 table */
+ page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+}
+
+/**
+ * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
+ * @sg: pointer to the shadow guest address space structure
+ * @raddr: rmap address in the shadow guest address space
+ * @r1t: pointer to the start of a shadow region-1 table
+ *
+ * Called with the shadow->guest_table_lock
+ */
+static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
+ unsigned long *r1t)
+{
+ unsigned long asce, *r2t;
+ struct page *page;
+ int i;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
+ for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
+ if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
+ continue;
+ r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
+ __gmap_unshadow_r2t(sg, raddr, r2t);
+ /* Clear entry and flush translation r1t -> r2t */
+ gmap_idte_one(asce, raddr);
+ r1t[i] = _REGION1_ENTRY_EMPTY;
+ /* Free region 2 table */
+ page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
+ list_del(&page->lru);
+ __free_pages(page, 2);
+ }
+}
+
+/**
+ * gmap_unshadow - remove a shadow page table completely
+ * @sg: pointer to the shadow guest address space structure
+ *
+ * Called with sg->guest_table_lock
+ */
+static void gmap_unshadow(struct gmap *sg)
+{
+ unsigned long *table;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ if (sg->removed)
+ return;
+ sg->removed = 1;
+ gmap_call_notifier(sg, 0, -1UL);
+ gmap_flush_tlb(sg);
+ table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
+ switch (sg->asce & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ __gmap_unshadow_r1t(sg, 0, table);
+ break;
+ case _ASCE_TYPE_REGION2:
+ __gmap_unshadow_r2t(sg, 0, table);
+ break;
+ case _ASCE_TYPE_REGION3:
+ __gmap_unshadow_r3t(sg, 0, table);
+ break;
+ case _ASCE_TYPE_SEGMENT:
+ __gmap_unshadow_sgt(sg, 0, table);
+ break;
+ }
+}
+
+/**
+ * gmap_find_shadow - find a specific asce in the list of shadow tables
+ * @parent: pointer to the parent gmap
+ * @asce: ASCE for which the shadow table is created
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * Returns the pointer to a gmap if a shadow table with the given asce is
+ * already available, ERR_PTR(-EAGAIN) if another one is just being created,
+ * otherwise NULL
+ */
+static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
+ int edat_level)
+{
+ struct gmap *sg;
+
+ list_for_each_entry(sg, &parent->children, list) {
+ if (sg->orig_asce != asce || sg->edat_level != edat_level ||
+ sg->removed)
+ continue;
+ if (!sg->initialized)
+ return ERR_PTR(-EAGAIN);
+ atomic_inc(&sg->ref_count);
+ return sg;
+ }
+ return NULL;
+}
+
+/**
+ * gmap_shadow_valid - check if a shadow guest address space matches the
+ * given properties and is still valid
+ * @sg: pointer to the shadow guest address space structure
+ * @asce: ASCE for which the shadow table is requested
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * Returns 1 if the gmap shadow is still valid and matches the given
+ * properties, the caller can continue using it. Returns 0 otherwise, the
+ * caller has to request a new shadow gmap in this case.
+ *
+ */
+int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
+{
+ if (sg->removed)
+ return 0;
+ return sg->orig_asce == asce && sg->edat_level == edat_level;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_valid);
+
+/**
+ * gmap_shadow - create/find a shadow guest address space
+ * @parent: pointer to the parent gmap
+ * @asce: ASCE for which the shadow table is created
+ * @edat_level: edat level to be used for the shadow translation
+ *
+ * The pages of the top level page table referred by the asce parameter
+ * will be set to read-only and marked in the PGSTEs of the kvm process.
+ * The shadow table will be removed automatically on any change to the
+ * PTE mapping for the source table.
+ *
+ * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
+ * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
+ * parent gmap table could not be protected.
+ */
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
+ int edat_level)
+{
+ struct gmap *sg, *new;
+ unsigned long limit;
+ int rc;
+
+ BUG_ON(gmap_is_shadow(parent));
+ spin_lock(&parent->shadow_lock);
+ sg = gmap_find_shadow(parent, asce, edat_level);
+ spin_unlock(&parent->shadow_lock);
+ if (sg)
+ return sg;
+ /* Create a new shadow gmap */
+ limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
+ if (asce & _ASCE_REAL_SPACE)
+ limit = -1UL;
+ new = gmap_alloc(limit);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+ new->mm = parent->mm;
+ new->parent = gmap_get(parent);
+ new->orig_asce = asce;
+ new->edat_level = edat_level;
+ new->initialized = false;
+ spin_lock(&parent->shadow_lock);
+ /* Recheck if another CPU created the same shadow */
+ sg = gmap_find_shadow(parent, asce, edat_level);
+ if (sg) {
+ spin_unlock(&parent->shadow_lock);
+ gmap_free(new);
+ return sg;
+ }
+ if (asce & _ASCE_REAL_SPACE) {
+ /* only allow one real-space gmap shadow */
+ list_for_each_entry(sg, &parent->children, list) {
+ if (sg->orig_asce & _ASCE_REAL_SPACE) {
+ spin_lock(&sg->guest_table_lock);
+ gmap_unshadow(sg);
+ spin_unlock(&sg->guest_table_lock);
+ list_del(&sg->list);
+ gmap_put(sg);
+ break;
+ }
+ }
+ }
+ atomic_set(&new->ref_count, 2);
+ list_add(&new->list, &parent->children);
+ if (asce & _ASCE_REAL_SPACE) {
+ /* nothing to protect, return right away */
+ new->initialized = true;
+ spin_unlock(&parent->shadow_lock);
+ return new;
+ }
+ spin_unlock(&parent->shadow_lock);
+ /* protect after insertion, so it will get properly invalidated */
+ down_read(&parent->mm->mmap_sem);
+ rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
+ ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
+ PROT_READ, PGSTE_VSIE_BIT);
+ up_read(&parent->mm->mmap_sem);
+ spin_lock(&parent->shadow_lock);
+ new->initialized = true;
+ if (rc) {
+ list_del(&new->list);
+ gmap_free(new);
+ new = ERR_PTR(rc);
+ }
+ spin_unlock(&parent->shadow_lock);
+ return new;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow);
+
+/**
+ * gmap_shadow_r2t - create an empty shadow region 2 table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @r2t: parent gmap address of the region 2 table to get shadowed
+ * @fake: r2t references contiguous guest memory block, not a r2t
+ *
+ * The r2t parameter specifies the address of the source table. The
+ * four pages of the source table are made read-only in the parent gmap
+ * address space. A write to the source table area @r2t will automatically
+ * remove the shadow r2 table and all of its decendents.
+ *
+ * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
+ * shadow table structure is incomplete, -ENOMEM if out of memory and
+ * -EFAULT if an address in the parent gmap could not be resolved.
+ *
+ * Called with sg->mm->mmap_sem in read.
+ */
+int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
+ int fake)
+{
+ unsigned long raddr, origin, offset, len;
+ unsigned long *s_r2t, *table;
+ struct page *page;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (!page)
+ return -ENOMEM;
+ page->index = r2t & _REGION_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
+ s_r2t = (unsigned long *) page_to_phys(page);
+ /* Install shadow region second table */
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
+ if (!table) {
+ rc = -EAGAIN; /* Race with unshadow */
+ goto out_free;
+ }
+ if (!(*table & _REGION_ENTRY_INVALID)) {
+ rc = 0; /* Already established */
+ goto out_free;
+ } else if (*table & _REGION_ENTRY_ORIGIN) {
+ rc = -EAGAIN; /* Race with shadow */
+ goto out_free;
+ }
+ crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
+ /* mark as invalid as long as the parent table is not protected */
+ *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
+ _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= (r2t & _REGION_ENTRY_PROTECT);
+ list_add(&page->lru, &sg->crst_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_REGION_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ /* Make r2t read-only in parent gmap page table */
+ raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
+ origin = r2t & _REGION_ENTRY_ORIGIN;
+ offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
+ len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ spin_lock(&sg->guest_table_lock);
+ if (!rc) {
+ table = gmap_table_walk(sg, saddr, 4);
+ if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
+ (unsigned long) s_r2t)
+ rc = -EAGAIN; /* Race with unshadow */
+ else
+ *table &= ~_REGION_ENTRY_INVALID;
+ } else {
+ gmap_unshadow_r2t(sg, raddr);
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+out_free:
+ spin_unlock(&sg->guest_table_lock);
+ __free_pages(page, 2);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
+
+/**
+ * gmap_shadow_r3t - create a shadow region 3 table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @r3t: parent gmap address of the region 3 table to get shadowed
+ * @fake: r3t references contiguous guest memory block, not a r3t
+ *
+ * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
+ * shadow table structure is incomplete, -ENOMEM if out of memory and
+ * -EFAULT if an address in the parent gmap could not be resolved.
+ *
+ * Called with sg->mm->mmap_sem in read.
+ */
+int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
+ int fake)
+{
+ unsigned long raddr, origin, offset, len;
+ unsigned long *s_r3t, *table;
+ struct page *page;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ /* Allocate a shadow region second table */
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (!page)
+ return -ENOMEM;
+ page->index = r3t & _REGION_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
+ s_r3t = (unsigned long *) page_to_phys(page);
+ /* Install shadow region second table */
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
+ if (!table) {
+ rc = -EAGAIN; /* Race with unshadow */
+ goto out_free;
+ }
+ if (!(*table & _REGION_ENTRY_INVALID)) {
+ rc = 0; /* Already established */
+ goto out_free;
+ } else if (*table & _REGION_ENTRY_ORIGIN) {
+ rc = -EAGAIN; /* Race with shadow */
+ }
+ crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
+ /* mark as invalid as long as the parent table is not protected */
+ *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
+ _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= (r3t & _REGION_ENTRY_PROTECT);
+ list_add(&page->lru, &sg->crst_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_REGION_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ /* Make r3t read-only in parent gmap page table */
+ raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
+ origin = r3t & _REGION_ENTRY_ORIGIN;
+ offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
+ len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ spin_lock(&sg->guest_table_lock);
+ if (!rc) {
+ table = gmap_table_walk(sg, saddr, 3);
+ if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
+ (unsigned long) s_r3t)
+ rc = -EAGAIN; /* Race with unshadow */
+ else
+ *table &= ~_REGION_ENTRY_INVALID;
+ } else {
+ gmap_unshadow_r3t(sg, raddr);
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+out_free:
+ spin_unlock(&sg->guest_table_lock);
+ __free_pages(page, 2);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
+
+/**
+ * gmap_shadow_sgt - create a shadow segment table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @sgt: parent gmap address of the segment table to get shadowed
+ * @fake: sgt references contiguous guest memory block, not a sgt
+ *
+ * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
+ * shadow table structure is incomplete, -ENOMEM if out of memory and
+ * -EFAULT if an address in the parent gmap could not be resolved.
+ *
+ * Called with sg->mm->mmap_sem in read.
+ */
+int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+ int fake)
+{
+ unsigned long raddr, origin, offset, len;
+ unsigned long *s_sgt, *table;
+ struct page *page;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
+ /* Allocate a shadow segment table */
+ page = alloc_pages(GFP_KERNEL, 2);
+ if (!page)
+ return -ENOMEM;
+ page->index = sgt & _REGION_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
+ s_sgt = (unsigned long *) page_to_phys(page);
+ /* Install shadow region second table */
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
+ if (!table) {
+ rc = -EAGAIN; /* Race with unshadow */
+ goto out_free;
+ }
+ if (!(*table & _REGION_ENTRY_INVALID)) {
+ rc = 0; /* Already established */
+ goto out_free;
+ } else if (*table & _REGION_ENTRY_ORIGIN) {
+ rc = -EAGAIN; /* Race with shadow */
+ goto out_free;
+ }
+ crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
+ /* mark as invalid as long as the parent table is not protected */
+ *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
+ _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= sgt & _REGION_ENTRY_PROTECT;
+ list_add(&page->lru, &sg->crst_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_REGION_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ /* Make sgt read-only in parent gmap page table */
+ raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
+ origin = sgt & _REGION_ENTRY_ORIGIN;
+ offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
+ len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ spin_lock(&sg->guest_table_lock);
+ if (!rc) {
+ table = gmap_table_walk(sg, saddr, 2);
+ if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
+ (unsigned long) s_sgt)
+ rc = -EAGAIN; /* Race with unshadow */
+ else
+ *table &= ~_REGION_ENTRY_INVALID;
+ } else {
+ gmap_unshadow_sgt(sg, raddr);
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+out_free:
+ spin_unlock(&sg->guest_table_lock);
+ __free_pages(page, 2);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
+
+/**
+ * gmap_shadow_lookup_pgtable - find a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: the address in the shadow aguest address space
+ * @pgt: parent gmap address of the page table to get shadowed
+ * @dat_protection: if the pgtable is marked as protected by dat
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ *
+ * Returns 0 if the shadow page table was found and -EAGAIN if the page
+ * table was not found.
+ *
+ * Called with sg->mm->mmap_sem in read.
+ */
+int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
+ unsigned long *pgt, int *dat_protection,
+ int *fake)
+{
+ unsigned long *table;
+ struct page *page;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
+ if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
+ /* Shadow page tables are full pages (pte+pgste) */
+ page = pfn_to_page(*table >> PAGE_SHIFT);
+ *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+ *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
+ *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+ rc = 0;
+ } else {
+ rc = -EAGAIN;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
+
+/**
+ * gmap_shadow_pgt - instantiate a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @pgt: parent gmap address of the page table to get shadowed
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ *
+ * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
+ * shadow table structure is incomplete, -ENOMEM if out of memory,
+ * -EFAULT if an address in the parent gmap could not be resolved and
+ *
+ * Called with gmap->mm->mmap_sem in read
+ */
+int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
+ int fake)
+{
+ unsigned long raddr, origin;
+ unsigned long *s_pgt, *table;
+ struct page *page;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
+ /* Allocate a shadow page table */
+ page = page_table_alloc_pgste(sg->mm);
+ if (!page)
+ return -ENOMEM;
+ page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
+ s_pgt = (unsigned long *) page_to_phys(page);
+ /* Install shadow page table */
+ spin_lock(&sg->guest_table_lock);
+ table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
+ if (!table) {
+ rc = -EAGAIN; /* Race with unshadow */
+ goto out_free;
+ }
+ if (!(*table & _SEGMENT_ENTRY_INVALID)) {
+ rc = 0; /* Already established */
+ goto out_free;
+ } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
+ rc = -EAGAIN; /* Race with shadow */
+ goto out_free;
+ }
+ /* mark as invalid as long as the parent table is not protected */
+ *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
+ (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
+ list_add(&page->lru, &sg->pt_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_SEGMENT_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
+ spin_unlock(&sg->guest_table_lock);
+ /* Make pgt read-only in parent gmap page table (not the pgste) */
+ raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
+ origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
+ rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
+ spin_lock(&sg->guest_table_lock);
+ if (!rc) {
+ table = gmap_table_walk(sg, saddr, 1);
+ if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
+ (unsigned long) s_pgt)
+ rc = -EAGAIN; /* Race with unshadow */
+ else
+ *table &= ~_SEGMENT_ENTRY_INVALID;
+ } else {
+ gmap_unshadow_pgt(sg, raddr);
+ }
+ spin_unlock(&sg->guest_table_lock);
+ return rc;
+out_free:
+ spin_unlock(&sg->guest_table_lock);
+ page_table_free_pgste(page);
+ return rc;
+
+}
+EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
+
+/**
+ * gmap_shadow_page - create a shadow page mapping
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: faulting address in the shadow gmap
+ * @pte: pte in parent gmap address space to get shadowed
+ *
+ * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
+ * shadow table structure is incomplete, -ENOMEM if out of memory and
+ * -EFAULT if an address in the parent gmap could not be resolved.
+ *
+ * Called with sg->mm->mmap_sem in read.
+ */
+int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
+{
+ struct gmap *parent;
+ struct gmap_rmap *rmap;
+ unsigned long vmaddr, paddr;
+ spinlock_t *ptl;
+ pte_t *sptep, *tptep;
+ int prot;
+ int rc;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ parent = sg->parent;
+ prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
+
+ rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
+ if (!rmap)
+ return -ENOMEM;
+ rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
+
+ while (1) {
+ paddr = pte_val(pte) & PAGE_MASK;
+ vmaddr = __gmap_translate(parent, paddr);
+ if (IS_ERR_VALUE(vmaddr)) {
+ rc = vmaddr;
+ break;
+ }
+ rc = radix_tree_preload(GFP_KERNEL);
if (rc)
break;
- /* Walk the process page table, lock and get pte pointer */
- ptep = get_locked_pte(gmap->mm, addr, &ptl);
- VM_BUG_ON(!ptep);
- /* Set notification bit in the pgste of the pte */
- if ((pte_val(*ptep) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
- ptep_set_notify(gmap->mm, addr, ptep);
- gaddr += PAGE_SIZE;
- len -= PAGE_SIZE;
+ rc = -EAGAIN;
+ sptep = gmap_pte_op_walk(parent, paddr, &ptl);
+ if (sptep) {
+ spin_lock(&sg->guest_table_lock);
+ /* Get page table pointer */
+ tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
+ if (!tptep) {
+ spin_unlock(&sg->guest_table_lock);
+ gmap_pte_op_end(ptl);
+ radix_tree_preload_end();
+ break;
+ }
+ rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
+ if (rc > 0) {
+ /* Success and a new mapping */
+ gmap_insert_rmap(sg, vmaddr, rmap);
+ rmap = NULL;
+ rc = 0;
+ }
+ gmap_pte_op_end(ptl);
+ spin_unlock(&sg->guest_table_lock);
}
- pte_unmap_unlock(ptep, ptl);
+ radix_tree_preload_end();
+ if (!rc)
+ break;
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
+ if (rc)
+ break;
}
- up_read(&gmap->mm->mmap_sem);
+ kfree(rmap);
return rc;
}
-EXPORT_SYMBOL_GPL(gmap_ipte_notify);
+EXPORT_SYMBOL_GPL(gmap_shadow_page);
+
+/**
+ * gmap_shadow_notify - handle notifications for shadow gmap
+ *
+ * Called with sg->parent->shadow_lock.
+ */
+static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
+ unsigned long offset, pte_t *pte)
+{
+ struct gmap_rmap *rmap, *rnext, *head;
+ unsigned long gaddr, start, end, bits, raddr;
+ unsigned long *table;
+
+ BUG_ON(!gmap_is_shadow(sg));
+ spin_lock(&sg->parent->guest_table_lock);
+ table = radix_tree_lookup(&sg->parent->host_to_guest,
+ vmaddr >> PMD_SHIFT);
+ gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
+ spin_unlock(&sg->parent->guest_table_lock);
+ if (!table)
+ return;
+
+ spin_lock(&sg->guest_table_lock);
+ if (sg->removed) {
+ spin_unlock(&sg->guest_table_lock);
+ return;
+ }
+ /* Check for top level table */
+ start = sg->orig_asce & _ASCE_ORIGIN;
+ end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
+ if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
+ gaddr < end) {
+ /* The complete shadow table has to go */
+ gmap_unshadow(sg);
+ spin_unlock(&sg->guest_table_lock);
+ list_del(&sg->list);
+ gmap_put(sg);
+ return;
+ }
+ /* Remove the page table tree from on specific entry */
+ head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
+ gmap_for_each_rmap_safe(rmap, rnext, head) {
+ bits = rmap->raddr & _SHADOW_RMAP_MASK;
+ raddr = rmap->raddr ^ bits;
+ switch (bits) {
+ case _SHADOW_RMAP_REGION1:
+ gmap_unshadow_r2t(sg, raddr);
+ break;
+ case _SHADOW_RMAP_REGION2:
+ gmap_unshadow_r3t(sg, raddr);
+ break;
+ case _SHADOW_RMAP_REGION3:
+ gmap_unshadow_sgt(sg, raddr);
+ break;
+ case _SHADOW_RMAP_SEGMENT:
+ gmap_unshadow_pgt(sg, raddr);
+ break;
+ case _SHADOW_RMAP_PGTABLE:
+ gmap_unshadow_page(sg, raddr);
+ break;
+ }
+ kfree(rmap);
+ }
+ spin_unlock(&sg->guest_table_lock);
+}
/**
* ptep_notify - call all invalidation callbacks for a specific pte.
* @mm: pointer to the process mm_struct
* @addr: virtual address in the process address space
* @pte: pointer to the page table entry
+ * @bits: bits from the pgste that caused the notify call
*
* This function is assumed to be called with the page table lock held
* for the pte to notify.
*/
-void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
+void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
+ pte_t *pte, unsigned long bits)
{
unsigned long offset, gaddr;
unsigned long *table;
- struct gmap_notifier *nb;
- struct gmap *gmap;
+ struct gmap *gmap, *sg, *next;
offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
offset = offset * (4096 / sizeof(pte_t));
- spin_lock(&gmap_notifier_lock);
- list_for_each_entry(gmap, &mm->context.gmap_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+ if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
+ spin_lock(&gmap->shadow_lock);
+ list_for_each_entry_safe(sg, next,
+ &gmap->children, list)
+ gmap_shadow_notify(sg, vmaddr, offset, pte);
+ spin_unlock(&gmap->shadow_lock);
+ }
+ if (!(bits & PGSTE_IN_BIT))
+ continue;
+ spin_lock(&gmap->guest_table_lock);
table = radix_tree_lookup(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
- if (!table)
- continue;
- gaddr = __gmap_segment_gaddr(table) + offset;
- list_for_each_entry(nb, &gmap_notifier_list, list)
- nb->notifier_call(gmap, gaddr);
+ if (table)
+ gaddr = __gmap_segment_gaddr(table) + offset;
+ spin_unlock(&gmap->guest_table_lock);
+ if (table)
+ gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
}
- spin_unlock(&gmap_notifier_lock);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ptep_notify);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e19d853883be..cd404aa3931c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -11,6 +11,12 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+/*
+ * If the bit selected by single-bit bitmask "a" is set within "x", move
+ * it to the position indicated by single-bit bitmask "b".
+ */
+#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
+
static inline unsigned long __pte_to_rste(pte_t pte)
{
unsigned long rste;
@@ -37,13 +43,22 @@ static inline unsigned long __pte_to_rste(pte_t pte)
*/
if (pte_present(pte)) {
rste = pte_val(pte) & PAGE_MASK;
- rste |= (pte_val(pte) & _PAGE_READ) >> 4;
- rste |= (pte_val(pte) & _PAGE_WRITE) >> 4;
- rste |= (pte_val(pte) & _PAGE_INVALID) >> 5;
- rste |= (pte_val(pte) & _PAGE_PROTECT);
- rste |= (pte_val(pte) & _PAGE_DIRTY) << 10;
- rste |= (pte_val(pte) & _PAGE_YOUNG) << 10;
- rste |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13;
+ rste |= move_set_bit(pte_val(pte), _PAGE_READ,
+ _SEGMENT_ENTRY_READ);
+ rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
+ _SEGMENT_ENTRY_WRITE);
+ rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
+ _SEGMENT_ENTRY_INVALID);
+ rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
+ _SEGMENT_ENTRY_PROTECT);
+ rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
+ _SEGMENT_ENTRY_DIRTY);
+ rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
+ _SEGMENT_ENTRY_YOUNG);
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
+ _SEGMENT_ENTRY_SOFT_DIRTY);
+#endif
} else
rste = _SEGMENT_ENTRY_INVALID;
return rste;
@@ -82,13 +97,22 @@ static inline pte_t __rste_to_pte(unsigned long rste)
if (present) {
pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT);
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10;
- pte_val(pte) |= (rste & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13;
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
+ _PAGE_READ);
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
+ _PAGE_WRITE);
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
+ _PAGE_INVALID);
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
+ _PAGE_PROTECT);
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
+ _PAGE_DIRTY);
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
+ _PAGE_YOUNG);
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
+ _PAGE_DIRTY);
+#endif
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index e2565d2d0c32..995f78532cc2 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -137,6 +137,29 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
return new;
}
+#ifdef CONFIG_PGSTE
+
+struct page *page_table_alloc_pgste(struct mm_struct *mm)
+{
+ struct page *page;
+ unsigned long *table;
+
+ page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ if (page) {
+ table = (unsigned long *) page_to_phys(page);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ }
+ return page;
+}
+
+void page_table_free_pgste(struct page *page)
+{
+ __free_page(page);
+}
+
+#endif /* CONFIG_PGSTE */
+
/*
* page table entry allocation/free routines.
*/
@@ -149,7 +172,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Try to get a fragment of a 4K page as a 2K page table */
if (!mm_alloc_pgste(mm)) {
table = NULL;
- spin_lock_bh(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.pgtable_lock);
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
@@ -164,7 +187,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
list_del(&page->lru);
}
}
- spin_unlock_bh(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.pgtable_lock);
if (table)
return table;
}
@@ -187,9 +210,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
- spin_lock_bh(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.pgtable_lock);
list_add(&page->lru, &mm->context.pgtable_list);
- spin_unlock_bh(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.pgtable_lock);
}
return table;
}
@@ -203,13 +226,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
- spin_lock_bh(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.pgtable_lock);
mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
if (mask & 3)
list_add(&page->lru, &mm->context.pgtable_list);
else
list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.pgtable_lock);
if (mask != 0)
return;
}
@@ -235,13 +258,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
return;
}
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
- spin_lock_bh(&mm->context.list_lock);
+ spin_lock_bh(&mm->context.pgtable_lock);
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
if (mask & 3)
list_add_tail(&page->lru, &mm->context.pgtable_list);
else
list_del(&page->lru);
- spin_unlock_bh(&mm->context.list_lock);
+ spin_unlock_bh(&mm->context.pgtable_lock);
table = (unsigned long *) (__pa(table) | (1U << bit));
tlb_remove_table(tlb, table);
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b98d1a152d46..5f092015aaa7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -174,14 +174,17 @@ static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
return pgste;
}
-static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, pgste_t pgste)
+static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- if (pgste_val(pgste) & PGSTE_IN_BIT) {
- pgste_val(pgste) &= ~PGSTE_IN_BIT;
- ptep_notify(mm, addr, ptep);
+ unsigned long bits;
+
+ bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
+ if (bits) {
+ pgste_val(pgste) ^= bits;
+ ptep_notify(mm, addr, ptep, bits);
}
#endif
return pgste;
@@ -194,7 +197,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
- pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
+ pgste = pgste_pte_notify(mm, addr, ptep, pgste);
}
return pgste;
}
@@ -459,6 +462,90 @@ void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
preempt_enable();
}
+/**
+ * ptep_force_prot - change access rights of a locked pte
+ * @mm: pointer to the process mm_struct
+ * @addr: virtual address in the guest address space
+ * @ptep: pointer to the page table entry
+ * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
+ * @bit: pgste bit to set (e.g. for notification)
+ *
+ * Returns 0 if the access rights were changed and -EAGAIN if the current
+ * and requested access rights are incompatible.
+ */
+int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, int prot, unsigned long bit)
+{
+ pte_t entry;
+ pgste_t pgste;
+ int pte_i, pte_p;
+
+ pgste = pgste_get_lock(ptep);
+ entry = *ptep;
+ /* Check pte entry after all locks have been acquired */
+ pte_i = pte_val(entry) & _PAGE_INVALID;
+ pte_p = pte_val(entry) & _PAGE_PROTECT;
+ if ((pte_i && (prot != PROT_NONE)) ||
+ (pte_p && (prot & PROT_WRITE))) {
+ pgste_set_unlock(ptep, pgste);
+ return -EAGAIN;
+ }
+ /* Change access rights and set pgste bit */
+ if (prot == PROT_NONE && !pte_i) {
+ ptep_flush_direct(mm, addr, ptep);
+ pgste = pgste_update_all(entry, pgste, mm);
+ pte_val(entry) |= _PAGE_INVALID;
+ }
+ if (prot == PROT_READ && !pte_p) {
+ ptep_flush_direct(mm, addr, ptep);
+ pte_val(entry) &= ~_PAGE_INVALID;
+ pte_val(entry) |= _PAGE_PROTECT;
+ }
+ pgste_val(pgste) |= bit;
+ pgste = pgste_set_pte(ptep, pgste, entry);
+ pgste_set_unlock(ptep, pgste);
+ return 0;
+}
+
+int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
+ pte_t *sptep, pte_t *tptep, pte_t pte)
+{
+ pgste_t spgste, tpgste;
+ pte_t spte, tpte;
+ int rc = -EAGAIN;
+
+ if (!(pte_val(*tptep) & _PAGE_INVALID))
+ return 0; /* already shadowed */
+ spgste = pgste_get_lock(sptep);
+ spte = *sptep;
+ if (!(pte_val(spte) & _PAGE_INVALID) &&
+ !((pte_val(spte) & _PAGE_PROTECT) &&
+ !(pte_val(pte) & _PAGE_PROTECT))) {
+ pgste_val(spgste) |= PGSTE_VSIE_BIT;
+ tpgste = pgste_get_lock(tptep);
+ pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
+ (pte_val(pte) & _PAGE_PROTECT);
+ /* don't touch the storage key - it belongs to parent pgste */
+ tpgste = pgste_set_pte(tptep, tpgste, tpte);
+ pgste_set_unlock(tptep, tpgste);
+ rc = 1;
+ }
+ pgste_set_unlock(sptep, spgste);
+ return rc;
+}
+
+void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
+{
+ pgste_t pgste;
+
+ pgste = pgste_get_lock(ptep);
+ /* notifier is called by the caller */
+ ptep_flush_direct(mm, saddr, ptep);
+ /* don't touch the storage key - it belongs to parent pgste */
+ pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
+ pgste_set_unlock(ptep, pgste);
+}
+
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
{
if (!non_swap_entry(entry))
@@ -532,7 +619,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pgste_val(pgste) &= ~PGSTE_UC_BIT;
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
- pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
+ pgste = pgste_pte_notify(mm, addr, ptep, pgste);
__ptep_ipte(addr, ptep);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT;
@@ -555,12 +642,9 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_t old, new;
pte_t *ptep;
- down_read(&mm->mmap_sem);
ptep = get_locked_pte(mm, addr, &ptl);
- if (unlikely(!ptep)) {
- up_read(&mm->mmap_sem);
+ if (unlikely(!ptep))
return -EFAULT;
- }
new = old = pgste_get_lock(ptep);
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
@@ -587,45 +671,100 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_set_unlock(ptep, new);
pte_unmap_unlock(ptep, ptl);
- up_read(&mm->mmap_sem);
return 0;
}
EXPORT_SYMBOL(set_guest_storage_key);
-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+/**
+ * Conditionally set a guest storage key (handling csske).
+ * oldkey will be updated when either mr or mc is set and a pointer is given.
+ *
+ * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
+ * storage key was updated and -EFAULT on access errors.
+ */
+int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char key, unsigned char *oldkey,
+ bool nq, bool mr, bool mc)
+{
+ unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
+ int rc;
+
+ /* we can drop the pgste lock between getting and setting the key */
+ if (mr | mc) {
+ rc = get_guest_storage_key(current->mm, addr, &tmp);
+ if (rc)
+ return rc;
+ if (oldkey)
+ *oldkey = tmp;
+ if (!mr)
+ mask |= _PAGE_REFERENCED;
+ if (!mc)
+ mask |= _PAGE_CHANGED;
+ if (!((tmp ^ key) & mask))
+ return 0;
+ }
+ rc = set_guest_storage_key(current->mm, addr, key, nq);
+ return rc < 0 ? rc : 1;
+}
+EXPORT_SYMBOL(cond_set_guest_storage_key);
+
+/**
+ * Reset a guest reference bit (rrbe), returning the reference and changed bit.
+ *
+ * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
+ */
+int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
{
- unsigned char key;
spinlock_t *ptl;
- pgste_t pgste;
+ pgste_t old, new;
pte_t *ptep;
+ int cc = 0;
- down_read(&mm->mmap_sem);
ptep = get_locked_pte(mm, addr, &ptl);
- if (unlikely(!ptep)) {
- up_read(&mm->mmap_sem);
+ if (unlikely(!ptep))
return -EFAULT;
- }
- pgste = pgste_get_lock(ptep);
- if (pte_val(*ptep) & _PAGE_INVALID) {
- key = (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
- key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
- key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
- key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
- } else {
- key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+ new = old = pgste_get_lock(ptep);
+ /* Reset guest reference bit only */
+ pgste_val(new) &= ~PGSTE_GR_BIT;
- /* Reflect guest's logical view, not physical */
- if (pgste_val(pgste) & PGSTE_GR_BIT)
- key |= _PAGE_REFERENCED;
- if (pgste_val(pgste) & PGSTE_GC_BIT)
- key |= _PAGE_CHANGED;
+ if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+ cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
+ /* Merge real referenced bit into host-set */
+ pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
}
+ /* Reflect guest's logical view, not physical */
+ cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
+ /* Changing the guest storage key is considered a change of the page */
+ if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
+ pgste_val(new) |= PGSTE_UC_BIT;
+
+ pgste_set_unlock(ptep, new);
+ pte_unmap_unlock(ptep, ptl);
+ return 0;
+}
+EXPORT_SYMBOL(reset_guest_reference_bit);
+
+int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char *key)
+{
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep;
+ ptep = get_locked_pte(mm, addr, &ptl);
+ if (unlikely(!ptep))
+ return -EFAULT;
+
+ pgste = pgste_get_lock(ptep);
+ *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
+ if (!(pte_val(*ptep) & _PAGE_INVALID))
+ *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
+ /* Reflect guest's logical view, not physical */
+ *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
pgste_set_unlock(ptep, pgste);
pte_unmap_unlock(ptep, ptl);
- up_read(&mm->mmap_sem);
- return key;
+ return 0;
}
EXPORT_SYMBOL(get_guest_storage_key);
#endif
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index fbc394e16b2c..37e0bb835516 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -482,8 +482,12 @@ static int emu_setup_nodes_adjust(int nodes)
*/
static void emu_setup(void)
{
+ int nid;
+
emu_size = emu_setup_size_adjust(emu_size);
emu_nodes = emu_setup_nodes_adjust(emu_nodes);
+ for (nid = 0; nid < emu_nodes; nid++)
+ node_set(nid, node_possible_map);
pr_info("Creating %d nodes with memory stripe size %ld MB\n",
emu_nodes, emu_size >> 20);
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 2794845061c6..f576f1073378 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -26,8 +26,14 @@ EXPORT_SYMBOL(node_data);
cpumask_t node_to_cpumask_map[MAX_NUMNODES];
EXPORT_SYMBOL(node_to_cpumask_map);
+static void plain_setup(void)
+{
+ node_set(0, node_possible_map);
+}
+
const struct numa_mode numa_mode_plain = {
.name = "plain",
+ .setup = plain_setup,
};
static const struct numa_mode *mode = &numa_mode_plain;
@@ -126,13 +132,13 @@ static void __init numa_setup_memory(void)
void __init numa_setup(void)
{
pr_info("NUMA mode: %s\n", mode->name);
+ nodes_clear(node_possible_map);
if (mode->setup)
mode->setup();
numa_setup_memory();
memblock_dump_all();
}
-
/*
* numa_init_early() - Initialization initcall
*
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 070f1ae5cfad..7297fce9bf80 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -286,7 +286,7 @@ static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long nr_pages, iommu_page_index;
@@ -332,7 +332,7 @@ out_err:
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long iommu_page_index;
@@ -355,7 +355,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
struct page *page;
@@ -370,7 +370,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
pa = page_to_phys(page);
memset((void *) pa, 0, size);
- map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL);
+ map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
return NULL;
@@ -384,19 +384,19 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
- s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+ s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
free_pages((unsigned long) pa, get_order(size));
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int mapped_elements = 0;
struct scatterlist *s;
@@ -405,7 +405,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nr_elements, i) {
struct page *page = sg_page(s);
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, NULL);
+ s->length, dir, 0);
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length;
mapped_elements++;
@@ -419,7 +419,7 @@ unmap:
for_each_sg(sg, s, mapped_elements, i) {
if (s->dma_address)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, NULL);
+ dir, 0);
s->dma_address = 0;
s->dma_length = 0;
}
@@ -429,13 +429,14 @@ unmap:
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
+ 0);
s->dma_address = 0;
s->dma_length = 0;
}
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index e2660d27889b..fe4e6c910dd7 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -39,7 +39,6 @@ static void print_facility_list(struct facility_def *def)
printf("#define %s ", def->name);
for (i = 0; i <= high; i++)
printf("_AC(0x%016llx,UL)%c", array[i], i < high ? ',' : '\n');
- printf("#define %s_DWORDS %d\n", def->name, high + 1);
free(array);
}
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 9fbce49ad3bd..444c26c0f750 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -91,7 +91,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0d5f3a9bb315..ee086958b2b2 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -38,6 +38,7 @@ config SUPERH
select GENERIC_IDLE_POLL_SETUP
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
+ select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HAVE_MOD_ARCH_SPECIFIC if DWARF_UNWINDER
@@ -45,6 +46,7 @@ config SUPERH
select OLD_SIGSUSPEND
select OLD_SIGACTION
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_NMI
help
The SuperH is a RISC processor targeted for use in embedded systems
@@ -184,6 +186,12 @@ config CPU_SH2A
select CPU_SH2
select UNCACHED_MAPPING
+config CPU_J2
+ bool
+ select CPU_SH2
+ select OF
+ select OF_EARLY_FLATTREE
+
config CPU_SH3
bool
select CPU_HAS_INTEVT
@@ -250,6 +258,12 @@ config CPU_SUBTYPE_SH7619
select CPU_SH2
select SYS_SUPPORTS_SH_CMT
+config CPU_SUBTYPE_J2
+ bool "Support J2 processor"
+ select CPU_J2
+ select SYS_SUPPORTS_SMP
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+
# SH-2A Processor Support
config CPU_SUBTYPE_SH7201
@@ -739,6 +753,26 @@ endmenu
menu "Boot options"
+config USE_BUILTIN_DTB
+ bool "Use builtin DTB"
+ default n
+ depends on SH_DEVICE_TREE
+ help
+ Link a device tree blob for particular hardware into the kernel,
+ suppressing use of the DTB pointer provided by the bootloader.
+ This option should only be used with legacy bootloaders that are
+ not capable of providing a DTB to the kernel, or for experimental
+ hardware without stable device tree bindings.
+
+config BUILTIN_DTB_SOURCE
+ string "Source file for builtin DTB"
+ default ""
+ depends on USE_BUILTIN_DTB
+ help
+ Base name (without suffix, relative to arch/sh/boot/dts) for the
+ a DTS file that will be used to produce the DTB linked into the
+ kernel.
+
config ZERO_PAGE_OFFSET
hex
default "0x00010000" if PAGE_SIZE_64KB || SH_RTS7751R2D || \
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index bf5b3f5f4962..00476662ac2c 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -31,6 +31,7 @@ isa-y := $(isa-y)-up
endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
+cflags-$(CONFIG_CPU_J2) := $(call cc-option,-mj2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
@@ -130,6 +131,8 @@ head-y := arch/sh/kernel/head_$(BITS).o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
+core-$(CONFIG_USE_BUILTIN_DTB) += arch/sh/boot/dts/
+
# Mach groups
machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index e0db04664e2e..e9c2c42031fe 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -11,6 +11,7 @@ config SH_DEVICE_TREE
select OF
select OF_EARLY_FLATTREE
select CLKSRC_OF
+ select COMMON_CLK
select GENERIC_CALIBRATE_DELAY
help
Select Board Described by Device Tree to build a kernel that
diff --git a/arch/sh/boards/board-secureedge5410.c b/arch/sh/boards/board-secureedge5410.c
index 98b36205aa7b..97ec67ffec2b 100644
--- a/arch/sh/boards/board-secureedge5410.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/delay.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <asm/machvec.h>
#include <mach/secureedge5410.h>
@@ -49,7 +48,7 @@ static int __init eraseconfig_init(void)
irq);
return 0;
}
-module_init(eraseconfig_init);
+device_initcall(eraseconfig_init);
/*
* Initialize IRQ setting
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 911ffb9f115b..1fb6d5714bae 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -124,13 +124,22 @@ static void __init sh_of_time_init(void)
static void __init sh_of_setup(char **cmdline_p)
{
+ struct device_node *root;
+
+#ifdef CONFIG_USE_BUILTIN_DTB
+ unflatten_and_copy_device_tree();
+#else
unflatten_device_tree();
+#endif
board_time_init = sh_of_time_init;
- sh_mv.mv_name = of_flat_dt_get_machine_name();
- if (!sh_mv.mv_name)
- sh_mv.mv_name = "Unknown SH model";
+ sh_mv.mv_name = "Unknown SH model";
+ root = of_find_node_by_path("/");
+ if (root) {
+ of_property_read_string(root, "model", &sh_mv.mv_name);
+ of_node_put(root);
+ }
sh_of_smp_probe();
}
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
new file mode 100644
index 000000000000..e5ce3a0de7f4
--- /dev/null
+++ b/arch/sh/boot/dts/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+
+clean-files := *.dtb.S
diff --git a/arch/sh/boot/dts/j2_mimas_v2.dts b/arch/sh/boot/dts/j2_mimas_v2.dts
new file mode 100755
index 000000000000..880de75360b3
--- /dev/null
+++ b/arch/sh/boot/dts/j2_mimas_v2.dts
@@ -0,0 +1,96 @@
+/dts-v1/;
+
+/ {
+ compatible = "jcore,j2-soc";
+ model = "J2 FPGA SoC on Mimas v2 board";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&aic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "jcore,j2";
+ reg = <0>;
+ clock-frequency = <50000000>;
+ d-cache-size = <8192>;
+ i-cache-size = <8192>;
+ d-cache-block-size = <16>;
+ i-cache-block-size = <16>;
+ };
+ };
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x4000000>;
+ };
+
+ aliases {
+ serial0 = &uart0;
+ spi0 = &spi0;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ soc@abcd0000 {
+ compatible = "simple-bus";
+ ranges = <0 0xabcd0000 0x100000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aic: interrupt-controller@200 {
+ compatible = "jcore,aic1";
+ reg = <0x200 0x10>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ cache-controller@c0 {
+ compatible = "jcore,cache";
+ reg = <0xc0 4>;
+ };
+
+ timer@200 {
+ compatible = "jcore,pit";
+ reg = <0x200 0x30>;
+ interrupts = <0x48>;
+ };
+
+ spi0: spi@40 {
+ compatible = "jcore,spi2";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ spi-max-frequency = <25000000>;
+
+ reg = <0x40 0x8>;
+
+ sdcard@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ voltage-ranges = <3200 3400>;
+ mode = <0>;
+ };
+ };
+
+ uart0: serial@100 {
+ clock-frequency = <125000000>;
+ compatible = "xlnx,xps-uartlite-1.00.a";
+ current-speed = <19200>;
+ device_type = "serial";
+ interrupts = <0x12>;
+ port-number = <0>;
+ reg = <0x100 0x10>;
+ };
+ };
+};
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig
new file mode 100644
index 000000000000..94d1eca52f72
--- /dev/null
+++ b/arch/sh/configs/j2_defconfig
@@ -0,0 +1,40 @@
+CONFIG_SMP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_SUBTYPE_J2=y
+CONFIG_MEMORY_START=0x10000000
+CONFIG_MEMORY_SIZE=0x04000000
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_SH_DEVICE_TREE=y
+CONFIG_HZ_100=y
+CONFIG_CMDLINE_OVERWRITE=y
+CONFIG_CMDLINE="console=ttyUL0 earlycon"
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_NETDEVICES=y
+CONFIG_SERIAL_UARTLITE=y
+CONFIG_SERIAL_UARTLITE_CONSOLE=y
+CONFIG_I2C=y
+CONFIG_SPI=y
+CONFIG_SPI_JCORE=y
+CONFIG_WATCHDOG=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_CLKSRC_JCORE_PIT=y
+CONFIG_JCORE_AIC=y
+CONFIG_EXT4_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_FAT_DEFAULT_UTF8=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c
index 7efc9c354fc7..49bace446a1a 100644
--- a/arch/sh/drivers/heartbeat.c
+++ b/arch/sh/drivers/heartbeat.c
@@ -19,7 +19,6 @@
* for more details.
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -139,26 +138,11 @@ static int heartbeat_drv_probe(struct platform_device *pdev)
return mod_timer(&hd->timer, jiffies + 1);
}
-static int heartbeat_drv_remove(struct platform_device *pdev)
-{
- struct heartbeat_data *hd = platform_get_drvdata(pdev);
-
- del_timer_sync(&hd->timer);
- iounmap(hd->base);
-
- platform_set_drvdata(pdev, NULL);
-
- if (!pdev->dev.platform_data)
- kfree(hd);
-
- return 0;
-}
-
static struct platform_driver heartbeat_driver = {
.probe = heartbeat_drv_probe,
- .remove = heartbeat_drv_remove,
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .suppress_bind_attrs = true,
},
};
@@ -167,14 +151,4 @@ static int __init heartbeat_init(void)
printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
return platform_driver_register(&heartbeat_driver);
}
-
-static void __exit heartbeat_exit(void)
-{
- platform_driver_unregister(&heartbeat_driver);
-}
-module_init(heartbeat_init);
-module_exit(heartbeat_exit);
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR("Paul Mundt");
-MODULE_LICENSE("GPL v2");
+device_initcall(heartbeat_init);
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index d5462b7bc514..84563e39a5b8 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -221,7 +221,7 @@ pcibios_bus_report_status_early(struct pci_channel *hose,
* We can't use pci_find_device() here since we are
* called from interrupt context.
*/
-static void __init_refok
+static void __ref
pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
int warn)
{
@@ -256,7 +256,7 @@ pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
pcibios_bus_report_status(dev->subordinate, status_mask, warn);
}
-void __init_refok pcibios_report_status(unsigned int status_mask, int warn)
+void __ref pcibios_report_status(unsigned int status_mask, int warn)
{
struct pci_channel *hose;
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index c399e1c55685..8a7bd80c8b33 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -1,6 +1,12 @@
#ifndef __ASM_SH_ATOMIC_H
#define __ASM_SH_ATOMIC_H
+#if defined(CONFIG_CPU_J2)
+
+#include <asm-generic/atomic.h>
+
+#else
+
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
@@ -63,4 +69,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#endif /* CONFIG_CPU_J2 */
+
#endif /* __ASM_SH_ATOMIC_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 8a84e05adb2e..3c30b6e166b6 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -29,6 +29,11 @@
#define wmb() mb()
#define ctrl_barrier() __icbi(PAGE_OFFSET)
#else
+#if defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
+#define __smp_mb() do { int tmp = 0; __asm__ __volatile__ ("cas.l %0,%0,@%1" : "+r"(tmp) : "z"(&tmp) : "memory", "t"); } while(0)
+#define __smp_rmb() __smp_mb()
+#define __smp_wmb() __smp_mb()
+#endif
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif
diff --git a/arch/sh/include/asm/bitops-cas.h b/arch/sh/include/asm/bitops-cas.h
new file mode 100644
index 000000000000..88f793c04d3c
--- /dev/null
+++ b/arch/sh/include/asm/bitops-cas.h
@@ -0,0 +1,93 @@
+#ifndef __ASM_SH_BITOPS_CAS_H
+#define __ASM_SH_BITOPS_CAS_H
+
+static inline unsigned __bo_cas(volatile unsigned *p, unsigned old, unsigned new)
+{
+ __asm__ __volatile__("cas.l %1,%0,@r0"
+ : "+r"(new)
+ : "r"(old), "z"(p)
+ : "t", "memory" );
+ return new;
+}
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old|mask) != old);
+}
+
+static inline void clear_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old&~mask) != old);
+}
+
+static inline void change_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old^mask) != old);
+}
+
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old|mask) != old);
+
+ return !!(old & mask);
+}
+
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old&~mask) != old);
+
+ return !!(old & mask);
+}
+
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+ unsigned mask, old;
+ volatile unsigned *a = addr;
+
+ a += nr >> 5;
+ mask = 1U << (nr & 0x1f);
+
+ do old = *a;
+ while (__bo_cas(a, old, old^mask) != old);
+
+ return !!(old & mask);
+}
+
+#include <asm-generic/bitops/non-atomic.h>
+
+#endif /* __ASM_SH_BITOPS_CAS_H */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index fc8e652cf173..a8699d60a8c4 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -18,6 +18,8 @@
#include <asm/bitops-op32.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/bitops-llsc.h>
+#elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
+#include <asm/bitops-cas.h>
#else
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
diff --git a/arch/sh/include/asm/cmpxchg-cas.h b/arch/sh/include/asm/cmpxchg-cas.h
new file mode 100644
index 000000000000..d0d86649e8c1
--- /dev/null
+++ b/arch/sh/include/asm/cmpxchg-cas.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_SH_CMPXCHG_CAS_H
+#define __ASM_SH_CMPXCHG_CAS_H
+
+static inline unsigned long
+__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new)
+{
+ __asm__ __volatile__("cas.l %1,%0,@r0"
+ : "+r"(new)
+ : "r"(old), "z"(m)
+ : "t", "memory" );
+ return new;
+}
+
+static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
+{
+ unsigned long old;
+ do old = *m;
+ while (__cmpxchg_u32(m, old, val) != old);
+ return old;
+}
+
+#include <asm/cmpxchg-xchg.h>
+
+#endif /* __ASM_SH_CMPXCHG_CAS_H */
diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h
index 7219719c23a3..1e881f5db659 100644
--- a/arch/sh/include/asm/cmpxchg-xchg.h
+++ b/arch/sh/include/asm/cmpxchg-xchg.h
@@ -21,7 +21,7 @@ static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size)
int off = (unsigned long)ptr % sizeof(u32);
volatile u32 *p = ptr - off;
#ifdef __BIG_ENDIAN
- int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE;
+ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
#else
int bitoff = off * BITS_PER_BYTE;
#endif
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index 5225916c1057..3dfe0467a773 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -13,6 +13,8 @@
#include <asm/cmpxchg-grb.h>
#elif defined(CONFIG_CPU_SH4A)
#include <asm/cmpxchg-llsc.h>
+#elif defined(CONFIG_CPU_J2) && defined(CONFIG_SMP)
+#include <asm/cmpxchg-cas.h>
#else
#include <asm/cmpxchg-irq.h>
#endif
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index e11cf0c8206b..0052ad40e86d 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -17,9 +17,9 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/futex-cas.h b/arch/sh/include/asm/futex-cas.h
new file mode 100644
index 000000000000..267cb7a5f101
--- /dev/null
+++ b/arch/sh/include/asm/futex-cas.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_SH_FUTEX_CAS_H
+#define __ASM_SH_FUTEX_CAS_H
+
+static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
+ u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int err = 0;
+ __asm__ __volatile__(
+ "1:\n\t"
+ "cas.l %2, %1, @r0\n"
+ "2:\n\t"
+#ifdef CONFIG_MMU
+ ".section .fixup,\"ax\"\n"
+ "3:\n\t"
+ "mov.l 4f, %0\n\t"
+ "jmp @%0\n\t"
+ " mov %3, %0\n\t"
+ ".balign 4\n"
+ "4: .long 2b\n\t"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n\t"
+ ".long 1b, 3b\n\t"
+ ".previous"
+#endif
+ :"+r" (err), "+r" (newval)
+ :"r" (oldval), "i" (-EFAULT), "z" (uaddr)
+ :"t", "memory");
+ if (err) return err;
+ *uval = newval;
+ return 0;
+}
+
+#endif /* __ASM_SH_FUTEX_CAS_H */
diff --git a/arch/sh/include/asm/futex-irq.h b/arch/sh/include/asm/futex-irq.h
index 63d33129ea23..ab01dbee0a82 100644
--- a/arch/sh/include/asm/futex-irq.h
+++ b/arch/sh/include/asm/futex-irq.h
@@ -1,92 +1,6 @@
#ifndef __ASM_SH_FUTEX_IRQ_H
#define __ASM_SH_FUTEX_IRQ_H
-
-static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
- int *oldval)
-{
- unsigned long flags;
- int ret;
-
- local_irq_save(flags);
-
- ret = get_user(*oldval, uaddr);
- if (!ret)
- ret = put_user(oparg, uaddr);
-
- local_irq_restore(flags);
-
- return ret;
-}
-
-static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
- int *oldval)
-{
- unsigned long flags;
- int ret;
-
- local_irq_save(flags);
-
- ret = get_user(*oldval, uaddr);
- if (!ret)
- ret = put_user(*oldval + oparg, uaddr);
-
- local_irq_restore(flags);
-
- return ret;
-}
-
-static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
- int *oldval)
-{
- unsigned long flags;
- int ret;
-
- local_irq_save(flags);
-
- ret = get_user(*oldval, uaddr);
- if (!ret)
- ret = put_user(*oldval | oparg, uaddr);
-
- local_irq_restore(flags);
-
- return ret;
-}
-
-static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
- int *oldval)
-{
- unsigned long flags;
- int ret;
-
- local_irq_save(flags);
-
- ret = get_user(*oldval, uaddr);
- if (!ret)
- ret = put_user(*oldval & oparg, uaddr);
-
- local_irq_restore(flags);
-
- return ret;
-}
-
-static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
- int *oldval)
-{
- unsigned long flags;
- int ret;
-
- local_irq_save(flags);
-
- ret = get_user(*oldval, uaddr);
- if (!ret)
- ret = put_user(*oldval ^ oparg, uaddr);
-
- local_irq_restore(flags);
-
- return ret;
-}
-
static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
u32 __user *uaddr,
u32 oldval, u32 newval)
diff --git a/arch/sh/include/asm/futex-llsc.h b/arch/sh/include/asm/futex-llsc.h
new file mode 100644
index 000000000000..23591703bec0
--- /dev/null
+++ b/arch/sh/include/asm/futex-llsc.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_SH_FUTEX_LLSC_H
+#define __ASM_SH_FUTEX_LLSC_H
+
+static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
+ u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int err = 0;
+ __asm__ __volatile__(
+ "synco\n"
+ "1:\n\t"
+ "movli.l @%2, r0\n\t"
+ "mov r0, %1\n\t"
+ "cmp/eq %1, %4\n\t"
+ "bf 2f\n\t"
+ "mov %5, r0\n\t"
+ "movco.l r0, @%2\n\t"
+ "bf 1b\n"
+ "2:\n\t"
+ "synco\n\t"
+#ifdef CONFIG_MMU
+ ".section .fixup,\"ax\"\n"
+ "3:\n\t"
+ "mov.l 4f, %0\n\t"
+ "jmp @%0\n\t"
+ " mov %3, %0\n\t"
+ ".balign 4\n"
+ "4: .long 2b\n\t"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n\t"
+ ".long 1b, 3b\n\t"
+ ".previous"
+#endif
+ :"+r" (err), "=&r" (*uval)
+ :"r" (uaddr), "i" (-EFAULT), "r" (oldval), "r" (newval)
+ :"t", "memory", "r0");
+ if (err) return err;
+ return 0;
+}
+
+#endif /* __ASM_SH_FUTEX_LLSC_H */
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 7be39a646fbd..d0078747d308 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -7,16 +7,34 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
-/* XXX: UP variants, fix for SH-4A and SMP.. */
+#if !defined(CONFIG_SMP)
#include <asm/futex-irq.h>
+#elif defined(CONFIG_CPU_J2)
+#include <asm/futex-cas.h>
+#elif defined(CONFIG_CPU_SH4A)
+#include <asm/futex-llsc.h>
+#else
+#error SMP not supported on this configuration.
+#endif
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
+}
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
+ u32 oparg = (encoded_op << 8) >> 20;
+ u32 cmparg = (encoded_op << 20) >> 20;
+ u32 oldval, newval, prev;
+ int ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -26,26 +44,39 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_disable();
- switch (op) {
- case FUTEX_OP_SET:
- ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
- break;
- case FUTEX_OP_ADD:
- ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
- break;
- case FUTEX_OP_OR:
- ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
- break;
- case FUTEX_OP_ANDN:
- ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
- break;
- case FUTEX_OP_XOR:
- ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
- break;
- default:
- ret = -ENOSYS;
- break;
- }
+ do {
+ if (op == FUTEX_OP_SET)
+ ret = oldval = 0;
+ else
+ ret = get_user(oldval, uaddr);
+
+ if (ret) break;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ newval = oparg;
+ break;
+ case FUTEX_OP_ADD:
+ newval = oldval + oparg;
+ break;
+ case FUTEX_OP_OR:
+ newval = oldval | oparg;
+ break;
+ case FUTEX_OP_ANDN:
+ newval = oldval & ~oparg;
+ break;
+ case FUTEX_OP_XOR:
+ newval = oldval ^ oparg;
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ if (ret) break;
+
+ ret = futex_atomic_cmpxchg_inatomic(&prev, uaddr, oldval, newval);
+ } while (!ret && prev != oldval);
pagefault_enable();
@@ -53,10 +84,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
default: ret = -ENOSYS;
}
}
@@ -64,15 +95,5 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
-}
-
#endif /* __KERNEL__ */
#endif /* __ASM_SH_FUTEX_H */
diff --git a/arch/sh/include/asm/mc146818rtc.h b/arch/sh/include/asm/mc146818rtc.h
deleted file mode 100644
index 0aee96a97330..000000000000
--- a/arch/sh/include/asm/mc146818rtc.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _ASM_MC146818RTC_H
-#define _ASM_MC146818RTC_H
-
-#endif /* _ASM_MC146818RTC_H */
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 1506897648aa..f9a09942a32d 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -15,7 +15,7 @@
*/
enum cpu_type {
/* SH-2 types */
- CPU_SH7619,
+ CPU_SH7619, CPU_J2,
/* SH-2A types */
CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_SH7264, CPU_SH7269,
diff --git a/arch/sh/include/asm/rtc.h b/arch/sh/include/asm/rtc.h
index 52b0c2dba979..f7b010d48af7 100644
--- a/arch/sh/include/asm/rtc.h
+++ b/arch/sh/include/asm/rtc.h
@@ -6,17 +6,6 @@ extern void (*board_time_init)(void);
extern void (*rtc_sh_get_time)(struct timespec *);
extern int (*rtc_sh_set_time)(const time_t);
-/* some dummy definitions */
-#define RTC_BATT_BAD 0x100 /* battery bad */
-#define RTC_SQWE 0x08 /* enable square-wave output */
-#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
-struct rtc_time;
-unsigned int get_rtc_time(struct rtc_time *);
-int set_rtc_time(struct rtc_time *);
-
#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
struct sh_rtc_platform_info {
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
new file mode 100644
index 000000000000..c46e8cc7b515
--- /dev/null
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -0,0 +1,117 @@
+/*
+ * include/asm-sh/spinlock-cas.h
+ *
+ * Copyright (C) 2015 SEI
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_SH_SPINLOCK_CAS_H
+#define __ASM_SH_SPINLOCK_CAS_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
+{
+ __asm__ __volatile__("cas.l %1,%0,@r0"
+ : "+r"(new)
+ : "r"(old), "z"(p)
+ : "t", "memory" );
+ return new;
+}
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+#define arch_spin_is_locked(x) ((x)->lock <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, VAL > 0);
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (!__sl_cas(&lock->lock, 1, 0));
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __sl_cas(&lock->lock, 0, 1);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ return __sl_cas(&lock->lock, 1, 0);
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts but no interrupt
+ * writers. For those circumstances we can "mix" irq-safe locks - any writer
+ * needs to get a irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned old;
+ do old = rw->lock;
+ while (!old || __sl_cas(&rw->lock, old, old-1) != old);
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned old;
+ do old = rw->lock;
+ while (__sl_cas(&rw->lock, old, old+1) != old);
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned old;
+ do old = rw->lock;
+ while (old && __sl_cas(&rw->lock, old, old-1) != old);
+ return !!old;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SH_SPINLOCK_CAS_H */
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
new file mode 100644
index 000000000000..cec78143fa83
--- /dev/null
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -0,0 +1,224 @@
+/*
+ * include/asm-sh/spinlock-llsc.h
+ *
+ * Copyright (C) 2002, 2003 Paul Mundt
+ * Copyright (C) 2006, 2007 Akio Idehara
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_SH_SPINLOCK_LLSC_H
+#define __ASM_SH_SPINLOCK_LLSC_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ */
+
+#define arch_spin_is_locked(x) ((x)->lock <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_cond_load_acquire(&lock->lock, VAL > 0);
+}
+
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+ unsigned long oldval;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%2, %0 ! arch_spin_lock \n\t"
+ "mov %0, %1 \n\t"
+ "mov #0, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
+ "bf 1b \n\t"
+ "cmp/pl %1 \n\t"
+ "bf 1b \n\t"
+ : "=&z" (tmp), "=&r" (oldval)
+ : "r" (&lock->lock)
+ : "t", "memory"
+ );
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "mov #1, %0 ! arch_spin_unlock \n\t"
+ "mov.l %0, @%1 \n\t"
+ : "=&z" (tmp)
+ : "r" (&lock->lock)
+ : "t", "memory"
+ );
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp, oldval;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%2, %0 ! arch_spin_trylock \n\t"
+ "mov %0, %1 \n\t"
+ "mov #0, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
+ "bf 1b \n\t"
+ "synco \n\t"
+ : "=&z" (tmp), "=&r" (oldval)
+ : "r" (&lock->lock)
+ : "t", "memory"
+ );
+
+ return oldval;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts but no interrupt
+ * writers. For those circumstances we can "mix" irq-safe locks - any writer
+ * needs to get a irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%1, %0 ! arch_read_lock \n\t"
+ "cmp/pl %0 \n\t"
+ "bf 1b \n\t"
+ "add #-1, %0 \n\t"
+ "movco.l %0, @%1 \n\t"
+ "bf 1b \n\t"
+ : "=&z" (tmp)
+ : "r" (&rw->lock)
+ : "t", "memory"
+ );
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%1, %0 ! arch_read_unlock \n\t"
+ "add #1, %0 \n\t"
+ "movco.l %0, @%1 \n\t"
+ "bf 1b \n\t"
+ : "=&z" (tmp)
+ : "r" (&rw->lock)
+ : "t", "memory"
+ );
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%1, %0 ! arch_write_lock \n\t"
+ "cmp/hs %2, %0 \n\t"
+ "bf 1b \n\t"
+ "sub %2, %0 \n\t"
+ "movco.l %0, @%1 \n\t"
+ "bf 1b \n\t"
+ : "=&z" (tmp)
+ : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
+ : "t", "memory"
+ );
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ __asm__ __volatile__ (
+ "mov.l %1, @%0 ! arch_write_unlock \n\t"
+ :
+ : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
+ : "t", "memory"
+ );
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ unsigned long tmp, oldval;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%2, %0 ! arch_read_trylock \n\t"
+ "mov %0, %1 \n\t"
+ "cmp/pl %0 \n\t"
+ "bf 2f \n\t"
+ "add #-1, %0 \n\t"
+ "movco.l %0, @%2 \n\t"
+ "bf 1b \n\t"
+ "2: \n\t"
+ "synco \n\t"
+ : "=&z" (tmp), "=&r" (oldval)
+ : "r" (&rw->lock)
+ : "t", "memory"
+ );
+
+ return (oldval > 0);
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ unsigned long tmp, oldval;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "movli.l @%2, %0 ! arch_write_trylock \n\t"
+ "mov %0, %1 \n\t"
+ "cmp/hs %3, %0 \n\t"
+ "bf 2f \n\t"
+ "sub %3, %0 \n\t"
+ "2: \n\t"
+ "movco.l %0, @%2 \n\t"
+ "bf 1b \n\t"
+ "synco \n\t"
+ : "=&z" (tmp), "=&r" (oldval)
+ : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
+ : "t", "memory"
+ );
+
+ return (oldval > (RW_LOCK_BIAS - 1));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SH_SPINLOCK_LLSC_H */
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index 416834b60ad0..c2c61ea6a8e2 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -11,222 +11,12 @@
#ifndef __ASM_SH_SPINLOCK_H
#define __ASM_SH_SPINLOCK_H
-/*
- * The only locking implemented here uses SH-4A opcodes. For others,
- * split this out as per atomic-*.h.
- */
-#ifndef CONFIG_CPU_SH4A
-#error "Need movli.l/movco.l for spinlocks"
+#if defined(CONFIG_CPU_SH4A)
+#include <asm/spinlock-llsc.h>
+#elif defined(CONFIG_CPU_J2)
+#include <asm/spinlock-cas.h>
+#else
+#error "The configured cpu type does not support spinlocks"
#endif
-#include <asm/barrier.h>
-#include <asm/processor.h>
-
-/*
- * Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, VAL > 0);
-}
-
-/*
- * Simple spin lock operations. There are two variants, one clears IRQ's
- * on the local processor, one does not.
- *
- * We make no fairness assumptions. They have a cost.
- */
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
- unsigned long oldval;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%2, %0 ! arch_spin_lock \n\t"
- "mov %0, %1 \n\t"
- "mov #0, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "bf 1b \n\t"
- "cmp/pl %1 \n\t"
- "bf 1b \n\t"
- : "=&z" (tmp), "=&r" (oldval)
- : "r" (&lock->lock)
- : "t", "memory"
- );
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "mov #1, %0 ! arch_spin_unlock \n\t"
- "mov.l %0, @%1 \n\t"
- : "=&z" (tmp)
- : "r" (&lock->lock)
- : "t", "memory"
- );
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned long tmp, oldval;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%2, %0 ! arch_spin_trylock \n\t"
- "mov %0, %1 \n\t"
- "mov #0, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "bf 1b \n\t"
- "synco \n\t"
- : "=&z" (tmp), "=&r" (oldval)
- : "r" (&lock->lock)
- : "t", "memory"
- );
-
- return oldval;
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts but no interrupt
- * writers. For those circumstances we can "mix" irq-safe locks - any writer
- * needs to get a irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
-
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%1, %0 ! arch_read_lock \n\t"
- "cmp/pl %0 \n\t"
- "bf 1b \n\t"
- "add #-1, %0 \n\t"
- "movco.l %0, @%1 \n\t"
- "bf 1b \n\t"
- : "=&z" (tmp)
- : "r" (&rw->lock)
- : "t", "memory"
- );
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%1, %0 ! arch_read_unlock \n\t"
- "add #1, %0 \n\t"
- "movco.l %0, @%1 \n\t"
- "bf 1b \n\t"
- : "=&z" (tmp)
- : "r" (&rw->lock)
- : "t", "memory"
- );
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned long tmp;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%1, %0 ! arch_write_lock \n\t"
- "cmp/hs %2, %0 \n\t"
- "bf 1b \n\t"
- "sub %2, %0 \n\t"
- "movco.l %0, @%1 \n\t"
- "bf 1b \n\t"
- : "=&z" (tmp)
- : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
- : "t", "memory"
- );
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- __asm__ __volatile__ (
- "mov.l %1, @%0 ! arch_write_unlock \n\t"
- :
- : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
- : "t", "memory"
- );
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned long tmp, oldval;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%2, %0 ! arch_read_trylock \n\t"
- "mov %0, %1 \n\t"
- "cmp/pl %0 \n\t"
- "bf 2f \n\t"
- "add #-1, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "bf 1b \n\t"
- "2: \n\t"
- "synco \n\t"
- : "=&z" (tmp), "=&r" (oldval)
- : "r" (&rw->lock)
- : "t", "memory"
- );
-
- return (oldval > 0);
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned long tmp, oldval;
-
- __asm__ __volatile__ (
- "1: \n\t"
- "movli.l @%2, %0 ! arch_write_trylock \n\t"
- "mov %0, %1 \n\t"
- "cmp/hs %3, %0 \n\t"
- "bf 2f \n\t"
- "sub %3, %0 \n\t"
- "2: \n\t"
- "movco.l %0, @%2 \n\t"
- "bf 1b \n\t"
- "synco \n\t"
- : "=&z" (tmp), "=&r" (oldval)
- : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
- : "t", "memory"
- );
-
- return (oldval > (RW_LOCK_BIAS - 1));
-}
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 2afa321157be..6c65dcd470ab 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -151,19 +151,10 @@ extern void init_thread_xstate(void);
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-
#define TI_FLAG_FAULT_CODE_SHIFT 24
/*
@@ -182,23 +173,6 @@ static inline unsigned int get_thread_fault_code(void)
return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/sh/include/uapi/asm/cpu-features.h b/arch/sh/include/uapi/asm/cpu-features.h
index 694abe490edb..2f1bc851042a 100644
--- a/arch/sh/include/uapi/asm/cpu-features.h
+++ b/arch/sh/include/uapi/asm/cpu-features.h
@@ -22,5 +22,6 @@
#define CPU_HAS_L2_CACHE 0x0080 /* Secondary cache / URAM */
#define CPU_HAS_OP32 0x0100 /* 32-bit instruction support */
#define CPU_HAS_PTEAEX 0x0200 /* PTE ASID Extension support */
+#define CPU_HAS_CAS_L 0x0400 /* cas.l atomic compare-and-swap */
#endif /* __ASM_SH_CPU_FEATURES_H */
diff --git a/arch/sh/include/uapi/asm/sigcontext.h b/arch/sh/include/uapi/asm/sigcontext.h
index 8ce1435bc0bf..faa5d0833412 100644
--- a/arch/sh/include/uapi/asm/sigcontext.h
+++ b/arch/sh/include/uapi/asm/sigcontext.h
@@ -25,8 +25,6 @@ struct sigcontext {
unsigned long sc_mach;
unsigned long sc_macl;
-#if defined(__SH4__) || defined(CONFIG_CPU_SH4) || \
- defined(__SH2A__) || defined(CONFIG_CPU_SH2A)
/* FPU registers */
unsigned long sc_fpregs[16];
unsigned long sc_xfpregs[16];
@@ -34,7 +32,6 @@ struct sigcontext {
unsigned int sc_fpul;
unsigned int sc_ownedfp;
#endif
-#endif
};
#endif /* __ASM_SH_SIGCONTEXT_H */
diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h
index d13a1d623736..c801bde9e6f5 100644
--- a/arch/sh/include/uapi/asm/unistd_32.h
+++ b/arch/sh/include/uapi/asm/unistd_32.h
@@ -380,7 +380,21 @@
#define __NR_process_vm_writev 366
#define __NR_kcmp 367
#define __NR_finit_module 368
+#define __NR_sched_getattr 369
+#define __NR_sched_setattr 370
+#define __NR_renameat2 371
+#define __NR_seccomp 372
+#define __NR_getrandom 373
+#define __NR_memfd_create 374
+#define __NR_bpf 375
+#define __NR_execveat 376
+#define __NR_userfaultfd 377
+#define __NR_membarrier 378
+#define __NR_mlock2 379
+#define __NR_copy_file_range 380
+#define __NR_preadv2 381
+#define __NR_pwritev2 382
-#define NR_syscalls 369
+#define NR_syscalls 383
#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index 47ebd5b5ed55..ce0cb3598b62 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -400,7 +400,21 @@
#define __NR_process_vm_writev 377
#define __NR_kcmp 378
#define __NR_finit_module 379
+#define __NR_sched_getattr 380
+#define __NR_sched_setattr 381
+#define __NR_renameat2 382
+#define __NR_seccomp 383
+#define __NR_getrandom 384
+#define __NR_memfd_create 385
+#define __NR_bpf 386
+#define __NR_execveat 387
+#define __NR_userfaultfd 388
+#define __NR_membarrier 389
+#define __NR_mlock2 390
+#define __NR_copy_file_range 391
+#define __NR_preadv2 392
+#define __NR_pwritev2 393
-#define NR_syscalls 380
+#define NR_syscalls 394
#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 4187cf4fe185..fca9b1e78a63 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -24,11 +24,13 @@ int __init clk_init(void)
{
int ret;
+#ifndef CONFIG_COMMON_CLK
ret = arch_clk_init();
if (unlikely(ret)) {
pr_err("%s: CPU clock registration failed.\n", __func__);
return ret;
}
+#endif
if (sh_mv.mv_clk_init) {
ret = sh_mv.mv_clk_init();
@@ -39,11 +41,13 @@ int __init clk_init(void)
}
}
+#ifndef CONFIG_COMMON_CLK
/* Kick the child clocks.. */
recalculate_root_clocks();
/* Enable the necessary init clocks */
clk_enable_init_clocks();
+#endif
return ret;
}
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 0d7360d549c1..c8b3be1b54e6 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -106,7 +106,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
-#ifdef CONFIG_SUPERH32
+#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
@@ -323,9 +323,13 @@ asmlinkage void cpu_init(void)
cache_init();
if (raw_smp_processor_id() == 0) {
+#ifdef CONFIG_MMU
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
+#else
+ shm_align_mask = PAGE_SIZE - 1;
+#endif
/* Boot CPU sets the cache shape */
detect_cache_shape();
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index 9e6624c9108b..4df4b284f591 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -27,6 +27,7 @@ static const char *cpu_name[] = {
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
+ [CPU_J2] = "J2",
[CPU_SH_NONE] = "Unknown"
};
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile
index f0f059acfcfb..904c4283d923 100644
--- a/arch/sh/kernel/cpu/sh2/Makefile
+++ b/arch/sh/kernel/cpu/sh2/Makefile
@@ -5,3 +5,7 @@
obj-y := ex.o probe.o entry.o
obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
+
+# SMP setup
+smp-$(CONFIG_CPU_J2) := smp-j2.o
+obj-$(CONFIG_SMP) += $(smp-y)
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index a1505956ef28..1ee0a6e774c6 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -47,6 +47,13 @@ ENTRY(exception_handler)
mov.l r3,@-sp
cli
mov.l $cpu_mode,r2
+#ifdef CONFIG_SMP
+ mov.l $cpuid,r3
+ mov.l @r3,r3
+ mov.l @r3,r3
+ shll2 r3
+ add r3,r2
+#endif
mov.l @r2,r0
mov.l @(5*4,r15),r3 ! previous SR
or r0,r3 ! set MD
@@ -57,6 +64,13 @@ ENTRY(exception_handler)
mov.l __md_bit,r0
mov.l r0,@r2 ! enter kernel mode
mov.l $current_thread_info,r2
+#ifdef CONFIG_SMP
+ mov.l $cpuid,r0
+ mov.l @r0,r0
+ mov.l @r0,r0
+ shll2 r0
+ add r0,r2
+#endif
mov.l @r2,r2
mov #(THREAD_SIZE >> 8),r0
shll8 r0
@@ -147,6 +161,11 @@ ENTRY(exception_handler)
mov #31,r8
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 31 is trap
+#ifdef CONFIG_CPU_J2
+ mov #16,r8
+ cmp/hs r8,r9
+ bt interrupt_entry ! 31 > vec >= 16 is interrupt
+#endif
mov.l 4f,r8
mov r9,r4
@@ -260,6 +279,13 @@ restore_all:
lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
+#ifdef CONFIG_SMP
+ mov.l $cpuid,r3
+ mov.l @r3,r3
+ mov.l @r3,r3
+ shll2 r3
+ add r3,r2
+#endif
mov #OFF_SR,r3
mov.l @(r0,r3),r1
mov.l __md_bit,r3
@@ -276,6 +302,13 @@ restore_all:
mov.l r1,@r2 ! set pc
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
+#ifdef CONFIG_SMP
+ mov.l $cpuid,r3
+ mov.l @r3,r3
+ mov.l @r3,r3
+ shll2 r3
+ add r3,r1
+#endif
mov.l r0,@r1
mov.l @r15+,r0
mov.l @r15+,r1
@@ -303,19 +336,41 @@ $current_thread_info:
.long __current_thread_info
$cpu_mode:
.long __cpu_mode
+#ifdef CONFIG_SMP
+$cpuid:
+ .long sh2_cpuid_addr
+#endif
! common exception handler
#include "../../entry-common.S"
+
+#ifdef CONFIG_NR_CPUS
+#define NR_CPUS CONFIG_NR_CPUS
+#else
+#define NR_CPUS 1
+#endif
.data
! cpu operation mode
! bit30 = MD (compatible SH3/4)
__cpu_mode:
+ .rept NR_CPUS
.long 0x40000000
+ .endr
+
+#ifdef CONFIG_SMP
+.global sh2_cpuid_addr
+sh2_cpuid_addr:
+ .long dummy_cpuid
+dummy_cpuid:
+ .long 0
+#endif
.section .bss
__current_thread_info:
+ .rept NR_CPUS
.long 0
+ .endr
ENTRY(exception_handling_table)
.space 4*32
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 6c687ae812ef..4205f6d42b69 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -10,10 +10,27 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/smp.h>
+#include <linux/io.h>
#include <asm/processor.h>
#include <asm/cache.h>
-void cpu_probe(void)
+#if defined(CONFIG_CPU_J2)
+extern u32 __iomem *j2_ccr_base;
+static int __init scan_cache(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ if (!of_flat_dt_is_compatible(node, "jcore,cache"))
+ return 0;
+
+ j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node);
+
+ return 1;
+}
+#endif
+
+void __ref cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
@@ -24,10 +41,30 @@ void cpu_probe(void)
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
boot_cpu_data.dcache.flags = 0;
#endif
+
+#if defined(CONFIG_CPU_J2)
+ unsigned cpu = hard_smp_processor_id();
+ if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
+ if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
+ if (cpu != 0) return;
+ boot_cpu_data.type = CPU_J2;
+
+ /* These defaults are appropriate for the original/current
+ * J2 cache. Once there is a proper framework for getting cache
+ * info from device tree, we should switch to that. */
+ boot_cpu_data.dcache.ways = 1;
+ boot_cpu_data.dcache.sets = 256;
+ boot_cpu_data.dcache.entry_shift = 5;
+ boot_cpu_data.dcache.linesz = 32;
+ boot_cpu_data.dcache.flags = 0;
+
+ boot_cpu_data.flags |= CPU_HAS_CAS_L;
+#else
/*
* SH-2 doesn't have separate caches
*/
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
+#endif
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH2;
}
diff --git a/arch/sh/kernel/cpu/sh2/smp-j2.c b/arch/sh/kernel/cpu/sh2/smp-j2.c
new file mode 100644
index 000000000000..6ccd7e4dc008
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2/smp-j2.c
@@ -0,0 +1,139 @@
+/*
+ * SMP support for J2 processor
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <asm/cmpxchg.h>
+
+DEFINE_PER_CPU(unsigned, j2_ipi_messages);
+
+extern u32 *sh2_cpuid_addr;
+static u32 *j2_ipi_trigger;
+static int j2_ipi_irq;
+
+static irqreturn_t j2_ipi_interrupt_handler(int irq, void *arg)
+{
+ unsigned cpu = hard_smp_processor_id();
+ volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
+ unsigned messages, i;
+
+ do messages = *pmsg;
+ while (cmpxchg(pmsg, messages, 0) != messages);
+
+ if (!messages) return IRQ_NONE;
+
+ for (i=0; i<SMP_MSG_NR; i++)
+ if (messages & (1U<<i))
+ smp_message_recv(i);
+
+ return IRQ_HANDLED;
+}
+
+static void j2_smp_setup(void)
+{
+}
+
+static void j2_prepare_cpus(unsigned int max_cpus)
+{
+ struct device_node *np;
+ unsigned i, max = 1;
+
+ np = of_find_compatible_node(NULL, NULL, "jcore,ipi-controller");
+ if (!np)
+ goto out;
+
+ j2_ipi_irq = irq_of_parse_and_map(np, 0);
+ j2_ipi_trigger = of_iomap(np, 0);
+ if (!j2_ipi_irq || !j2_ipi_trigger)
+ goto out;
+
+ np = of_find_compatible_node(NULL, NULL, "jcore,cpuid-mmio");
+ if (!np)
+ goto out;
+
+ sh2_cpuid_addr = of_iomap(np, 0);
+ if (!sh2_cpuid_addr)
+ goto out;
+
+ if (request_irq(j2_ipi_irq, j2_ipi_interrupt_handler, IRQF_PERCPU,
+ "ipi", (void *)j2_ipi_interrupt_handler) != 0)
+ goto out;
+
+ max = max_cpus;
+out:
+ /* Disable any cpus past max_cpus, or all secondaries if we didn't
+ * get the necessary resources to support SMP. */
+ for (i=max; i<NR_CPUS; i++) {
+ set_cpu_possible(i, false);
+ set_cpu_present(i, false);
+ }
+}
+
+static void j2_start_cpu(unsigned int cpu, unsigned long entry_point)
+{
+ struct device_node *np;
+ u32 regs[2];
+ void __iomem *release, *initpc;
+
+ if (!cpu) return;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np) return;
+
+ if (of_property_read_u32_array(np, "cpu-release-addr", regs, 2)) return;
+ release = ioremap_nocache(regs[0], sizeof(u32));
+ initpc = ioremap_nocache(regs[1], sizeof(u32));
+
+ __raw_writel(entry_point, initpc);
+ __raw_writel(1, release);
+
+ iounmap(initpc);
+ iounmap(release);
+
+ pr_info("J2 SMP: requested start of cpu %u\n", cpu);
+}
+
+static unsigned int j2_smp_processor_id(void)
+{
+ return __raw_readl(sh2_cpuid_addr);
+}
+
+static void j2_send_ipi(unsigned int cpu, unsigned int message)
+{
+ volatile unsigned *pmsg;
+ unsigned old;
+ unsigned long val;
+
+ /* There is only one IPI interrupt shared by all messages, so
+ * we keep a separate interrupt flag per message type in sw. */
+ pmsg = &per_cpu(j2_ipi_messages, cpu);
+ do old = *pmsg;
+ while (cmpxchg(pmsg, old, old|(1U<<message)) != old);
+
+ /* Generate the actual interrupt by writing to CCRn bit 28. */
+ val = __raw_readl(j2_ipi_trigger + cpu);
+ __raw_writel(val | (1U<<28), j2_ipi_trigger + cpu);
+}
+
+static struct plat_smp_ops j2_smp_ops = {
+ .smp_setup = j2_smp_setup,
+ .prepare_cpus = j2_prepare_cpus,
+ .start_cpu = j2_start_cpu,
+ .smp_processor_id = j2_smp_processor_id,
+ .send_ipi = j2_send_ipi,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+};
+
+CPU_METHOD_OF_DECLARE(j2_cpu_method, "jcore,spin-table", &j2_smp_ops);
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index 5b0bfcda6d0b..eadb669a7329 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -13,7 +13,7 @@
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t addr = page_to_phys(page) + offset;
@@ -25,7 +25,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 9d209a07235e..e1d751ae2498 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -1009,10 +1009,8 @@ static void __init dwarf_unwinder_cleanup(void)
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
kfree(cie);
- if (dwarf_reg_pool)
- mempool_destroy(dwarf_reg_pool);
- if (dwarf_frame_pool)
- mempool_destroy(dwarf_frame_pool);
+ mempool_destroy(dwarf_reg_pool);
+ mempool_destroy(dwarf_frame_pool);
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
}
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index 974bc152cc84..4e352c3f79e6 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -67,7 +67,7 @@ ENTRY(_stext)
ldc r0, r6_bank
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_FLATTREE
mov r4, r12 ! Store device tree blob pointer in r12
#endif
@@ -318,7 +318,7 @@ ENTRY(_stext)
10:
#endif
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_FLATTREE
mov.l 8f, r0 ! Make flat device tree available early.
jsr @r0
mov r12, r4
@@ -349,7 +349,7 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long cpu_init
7: .long init_thread_union
-#if defined(CONFIG_OF)
+#if defined(CONFIG_OF_FLATTREE)
8: .long sh_fdt_init
#endif
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 5d34605b58b5..e7b49d81053e 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -242,7 +242,7 @@ void __init __weak plat_early_device_setup(void)
{
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_FLATTREE
void __ref sh_fdt_init(phys_addr_t dt_phys)
{
static int done = 0;
@@ -251,7 +251,11 @@ void __ref sh_fdt_init(phys_addr_t dt_phys)
/* Avoid calling an __init function on secondary cpus. */
if (done) return;
+#ifdef CONFIG_USE_BUILTIN_DTB
+ dt_virt = __dtb_start;
+#else
dt_virt = phys_to_virt(dt_phys);
+#endif
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("Error: invalid device tree blob"
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 734234be2f01..254bc22ee57d 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -386,3 +386,17 @@ ENTRY(sys_call_table)
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
+ .long sys_sched_getattr
+ .long sys_sched_setattr /* 370 */
+ .long sys_renameat2
+ .long sys_seccomp
+ .long sys_getrandom
+ .long sys_memfd_create
+ .long sys_bpf /* 375 */
+ .long sys_execveat
+ .long sys_userfaultfd
+ .long sys_membarrier
+ .long sys_mlock2
+ .long sys_copy_file_range /* 380 */
+ .long sys_preadv2
+ .long sys_pwritev2
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 579fcb9a896b..d6a27f7a4c54 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -406,3 +406,17 @@ sys_call_table:
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
+ .long sys_sched_getattr /* 380 */
+ .long sys_sched_setattr
+ .long sys_renameat2
+ .long sys_seccomp
+ .long sys_getrandom
+ .long sys_memfd_create /* 385 */
+ .long sys_bpf
+ .long sys_execveat
+ .long sys_userfaultfd
+ .long sys_membarrier
+ .long sys_mlock2 /* 390 */
+ .long sys_copy_file_range
+ .long sys_preadv2
+ .long sys_pwritev2
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index d6d0a986c6e9..fcd5e41977d1 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -11,7 +11,6 @@
* for more details.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/timex.h>
@@ -50,27 +49,31 @@ int update_persistent_clock(struct timespec now)
}
#endif
-unsigned int get_rtc_time(struct rtc_time *tm)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
{
- if (rtc_sh_get_time != null_rtc_get_time) {
- struct timespec tv;
+ struct timespec tv;
- rtc_sh_get_time(&tv);
- rtc_time_to_tm(tv.tv_sec, tm);
- }
-
- return RTC_24H;
+ rtc_sh_get_time(&tv);
+ rtc_time_to_tm(tv.tv_sec, tm);
+ return 0;
}
-EXPORT_SYMBOL(get_rtc_time);
-int set_rtc_time(struct rtc_time *tm)
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned long secs;
rtc_tm_to_time(tm, &secs);
- return rtc_sh_set_time(secs);
+ if ((rtc_sh_set_time == null_rtc_set_time) ||
+ (rtc_sh_set_time(secs) < 0))
+ return -EOPNOTSUPP;
+
+ return 0;
}
-EXPORT_SYMBOL(set_rtc_time);
+
+static const struct rtc_class_ops rtc_generic_ops = {
+ .read_time = rtc_generic_get_time,
+ .set_time = rtc_generic_set_time,
+};
static int __init rtc_generic_init(void)
{
@@ -79,11 +82,14 @@ static int __init rtc_generic_init(void)
if (rtc_sh_get_time == null_rtc_get_time)
return -ENODEV;
- pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &rtc_generic_ops,
+ sizeof(rtc_generic_ops));
+
return PTR_ERR_OR_ZERO(pdev);
}
-module_init(rtc_generic_init);
+device_initcall(rtc_generic_init);
void (*board_time_init)(void);
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index cee6b9999d86..92c3bd96aee5 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -4,7 +4,8 @@
obj-y := alignment.o cache.o init.o consistent.o mmap.o
-cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o
+cacheops-$(CONFIG_CPU_J2) := cache-j2.o
+cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
index ecfc6b0c1da1..bf95fdaedd0c 100644
--- a/arch/sh/mm/asids-debugfs.c
+++ b/arch/sh/mm/asids-debugfs.c
@@ -17,7 +17,6 @@
* for more details.
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
@@ -70,6 +69,4 @@ static int __init asids_debugfs_init(void)
return PTR_ERR_OR_ZERO(asids_dentry);
}
-module_init(asids_debugfs_init);
-
-MODULE_LICENSE("GPL v2");
+device_initcall(asids_debugfs_init);
diff --git a/arch/sh/mm/cache-j2.c b/arch/sh/mm/cache-j2.c
new file mode 100644
index 000000000000..391698bcac5b
--- /dev/null
+++ b/arch/sh/mm/cache-j2.c
@@ -0,0 +1,65 @@
+/*
+ * arch/sh/mm/cache-j2.c
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * Released under the terms of the GNU GPL v2.0.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/cpumask.h>
+
+#include <asm/cache.h>
+#include <asm/addrspace.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+
+#define ICACHE_ENABLE 0x1
+#define DCACHE_ENABLE 0x2
+#define CACHE_ENABLE (ICACHE_ENABLE | DCACHE_ENABLE)
+#define ICACHE_FLUSH 0x100
+#define DCACHE_FLUSH 0x200
+#define CACHE_FLUSH (ICACHE_FLUSH | DCACHE_FLUSH)
+
+u32 __iomem *j2_ccr_base;
+
+static void j2_flush_icache(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | ICACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+static void j2_flush_dcache(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | DCACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+static void j2_flush_both(void *args)
+{
+ unsigned cpu;
+ for_each_possible_cpu(cpu)
+ __raw_writel(CACHE_ENABLE | CACHE_FLUSH, j2_ccr_base + cpu);
+}
+
+void __init j2_cache_init(void)
+{
+ if (!j2_ccr_base)
+ return;
+
+ local_flush_cache_all = j2_flush_both;
+ local_flush_cache_mm = j2_flush_both;
+ local_flush_cache_dup_mm = j2_flush_both;
+ local_flush_cache_page = j2_flush_both;
+ local_flush_cache_range = j2_flush_both;
+ local_flush_dcache_page = j2_flush_dcache;
+ local_flush_icache_range = j2_flush_icache;
+ local_flush_icache_page = j2_flush_icache;
+ local_flush_cache_sigtramp = j2_flush_icache;
+
+ pr_info("Initial J2 CCR is %.8x\n", __raw_readl(j2_ccr_base));
+}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index e58cfbf45150..36554a9ea99b 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -42,6 +42,8 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
{
preempt_disable();
+ /* Needing IPI for cross-core flush is SHX3-specific. */
+#ifdef CONFIG_CPU_SHX3
/*
* It's possible that this gets called early on when IRQs are
* still disabled due to ioremapping by the boot CPU, so don't
@@ -49,6 +51,7 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
*/
if (num_online_cpus() > 1)
smp_call_function(func, info, wait);
+#endif
func(info);
@@ -244,7 +247,11 @@ void flush_cache_sigtramp(unsigned long address)
static void compute_alias(struct cache_info *c)
{
+#ifdef CONFIG_MMU
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
+#else
+ c->alias_mask = 0;
+#endif
c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}
@@ -305,7 +312,11 @@ void __init cpu_cache_init(void)
if (unlikely(cache_disabled))
goto skip;
- if (boot_cpu_data.family == CPU_FAMILY_SH2) {
+ if (boot_cpu_data.type == CPU_J2) {
+ extern void __weak j2_cache_init(void);
+
+ j2_cache_init();
+ } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
extern void __weak sh2_cache_init(void);
sh2_cache_init();
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index b81d9dbf9fef..92b6976fde59 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -34,7 +34,7 @@ fs_initcall(dma_init);
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret, *ret_nocache;
int order = get_order(size);
@@ -66,7 +66,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 0c99ec2e7ed8..d09ddfe58fd8 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -34,7 +34,7 @@
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem * __init_refok
+void __iomem * __ref
__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
pgprot_t pgprot, void *caller)
{
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 546293d9e6c5..59b09600dd32 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
select OLD_SIGSUSPEND
select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
+ select HAVE_ARCH_HARDENED_USERCOPY
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index 57f26c398dc9..4dd268a3a8b0 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -140,16 +140,6 @@ void ioport_unmap(void __iomem *);
struct pci_dev;
void pci_iounmap(struct pci_dev *dev, void __iomem *);
-
-
-/*
- * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
- * so rtc_port is static in it. This should not change unless a new
- * hardware pops up.
- */
-#define RTC_PORT(x) (rtc_port + (x))
-#define RTC_ALWAYS_BCD 0
-
static inline int sbus_can_dma_64bit(void)
{
return 0; /* actually, sparc_cpu_model==sun4d */
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 022d16008a00..2303635158f5 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -55,9 +55,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc,
- resource_size_t *start, resource_size_t *end);
#endif /* __KERNEL__ */
#endif /* __SPARC64_PCI_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index bde59825d06c..3d7b925f6516 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -222,32 +222,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
*
* Note that there are only 8 bits available.
*/
-#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
#define test_thread_64bit_stack(__SP) \
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 57aca2792d29..341a5a133f48 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) to, n))
+ if (n && __access_ok((unsigned long) to, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
- else
+ } else
return n;
}
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ if (!__builtin_constant_p(n))
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (n && __access_ok((unsigned long) from, n))
+ if (n && __access_ok((unsigned long) from, n)) {
+ if (!__builtin_constant_p(n))
+ check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
- else
+ } else
return n;
}
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e9a51d64974d..8bda94fab8e8 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(to, size, false);
+
+ ret = ___copy_from_user(to, from, size);
if (unlikely(ret))
ret = copy_from_user_fixup(to, from, size);
@@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
- unsigned long ret = ___copy_to_user(to, from, size);
+ unsigned long ret;
+ if (!__builtin_constant_p(size))
+ check_object_size(from, size, true);
+ ret = ___copy_to_user(to, from, size);
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
return ret;
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 37686828c3d9..5c615abff030 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -196,7 +196,7 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long order, first_page;
struct iommu *iommu;
@@ -245,7 +245,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
unsigned long order, npages;
@@ -263,7 +263,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -385,7 +385,7 @@ do_flush_sync:
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -431,7 +431,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
@@ -607,7 +607,7 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index ffd5ff4678cf..2344103414d1 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -260,7 +260,7 @@ EXPORT_SYMBOL(sbus_set_sbus64);
*/
static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct platform_device *op = to_platform_device(dev);
unsigned long len_total = PAGE_ALIGN(len);
@@ -315,7 +315,7 @@ err_nopages:
}
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
+ dma_addr_t ba, unsigned long attrs)
{
struct resource *res;
struct page *pgv;
@@ -355,7 +355,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t len,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = page_address(page) + offset;
@@ -371,20 +371,20 @@ static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
}
static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_release_scsi_one(dev, ba, n);
}
static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
return n;
}
static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_release_scsi_sgl(dev, sg, n);
}
@@ -429,7 +429,7 @@ arch_initcall(sparc_register_ioport);
*/
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long len_total = PAGE_ALIGN(len);
void *va;
@@ -482,7 +482,7 @@ err_nopages:
* past this call are illegal.
*/
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
+ dma_addr_t ba, unsigned long attrs)
{
struct resource *res;
@@ -518,14 +518,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
if (dir != PCI_DMA_TODEVICE)
dma_make_coherent(ba, PAGE_ALIGN(size));
@@ -548,7 +548,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
*/
static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int n;
@@ -567,7 +567,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
*/
static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int n;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index c2b202d763a1..9c1878f4fa9f 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -986,16 +986,18 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
resource_size_t *end)
{
- struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long offset;
-
- if (rp->flags & IORESOURCE_IO)
- offset = pbm->io_space.start;
- else
- offset = pbm->mem_space.start;
+ struct pci_bus_region region;
- *start = rp->start - offset;
- *end = rp->end - offset;
+ /*
+ * "User" addresses are shown in /sys/devices/pci.../.../resource
+ * and /proc/bus/pci/devices and used as mmap offsets for
+ * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
+ *
+ * On sparc, these are PCI bus addresses, i.e., raw BAR values.
+ */
+ pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
+ *start = region.start;
+ *end = region.end;
}
void pcibios_set_master(struct pci_dev *dev)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 836e8cef47e2..61c6f935accc 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -130,7 +130,7 @@ static inline long iommu_batch_end(void)
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, order, first_page, npages, n;
struct iommu *iommu;
@@ -213,7 +213,7 @@ static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
}
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
- dma_addr_t dvma, struct dma_attrs *attrs)
+ dma_addr_t dvma, unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -235,7 +235,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -294,7 +294,7 @@ iommu_map_fail:
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -322,7 +322,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
@@ -466,7 +466,7 @@ iommu_map_failed:
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 7d02b1fef025..d79b3b734245 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -150,6 +150,13 @@ SECTIONS
}
PERCPU_SECTION(SMP_CACHE_BYTES)
+#ifdef CONFIG_JUMP_LABEL
+ . = ALIGN(PAGE_SIZE);
+ .exit.text : {
+ EXIT_TEXT
+ }
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 0)
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index c1467ac59ce6..b7659b8f1117 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -166,32 +166,5 @@ extern void _cpu_idle(void);
#ifdef __tilegx__
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
#endif
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_THREAD_INFO_H */
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index b6bc0547a4f6..09bb774b39cd 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -34,7 +34,7 @@
static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 dma_mask = (dev && dev->coherent_dma_mask) ?
dev->coherent_dma_mask : DMA_BIT_MASK(32);
@@ -78,7 +78,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
*/
static void tile_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
@@ -202,7 +202,7 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -224,7 +224,7 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -240,7 +240,7 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
@@ -252,7 +252,7 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
@@ -343,7 +343,7 @@ EXPORT_SYMBOL(tile_dma_map_ops);
static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int node = dev_to_node(dev);
int order = get_order(size);
@@ -368,14 +368,14 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
*/
static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -400,7 +400,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static void tile_pci_dma_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -416,7 +416,7 @@ static void tile_pci_dma_unmap_sg(struct device *dev,
static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
@@ -429,7 +429,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
@@ -531,7 +531,7 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops);
#ifdef CONFIG_SWIOTLB
static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
@@ -539,7 +539,7 @@ static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index 378f5d8d1ec8..9d449caf8910 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -60,6 +60,18 @@ SECTIONS
/* "Init" is divided into two areas with very different virtual addresses. */
INIT_TEXT_SECTION(PAGE_SIZE)
+ /*
+ * Some things, like the __jump_table, may contain symbol references
+ * to __exit text, so include such text in the final image if so.
+ * In that case we also override the _einittext from INIT_TEXT_SECTION.
+ */
+#ifdef CONFIG_JUMP_LABEL
+ .exit.text : {
+ EXIT_TEXT
+ _einittext = .;
+ }
+#endif
+
/* Now we skip back to PAGE_OFFSET for the data. */
. = (. - TEXT_OFFSET + PAGE_OFFSET);
#undef LOAD_OFFSET
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index cc0013475444..fd443852103c 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -1,14 +1,17 @@
config UML
bool
default y
+ select ARCH_HAS_KCOV
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_DEBUG_KMEMLEAK
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
select GENERIC_CLOCKEVENTS
+ select HAVE_GCC_PLUGINS
select TTY # Needed for line.c
config MMU
@@ -30,10 +33,9 @@ config PCI
config PCMCIA
bool
-# Yet to do!
config TRACE_IRQFLAGS_SUPPORT
bool
- default n
+ default y
config LOCKDEP_SUPPORT
bool
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e3abe6f3156d..0ca46ededfc7 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -78,8 +78,8 @@ include $(ARCH_DIR)/Makefile-os-$(OS)
KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/include \
-I$(srctree)/$(HOST_DIR)/include/uapi \
- -I$(HOST_DIR)/include/generated \
- -I$(HOST_DIR)/include/generated/uapi
+ -I$(objtree)/$(HOST_DIR)/include/generated \
+ -I$(objtree)/$(HOST_DIR)/include/generated/uapi
# -Derrno=kernel_errno - This turns all kernel references to errno into
# kernel_errno to separate them from the libc errno. This allows -fno-common
diff --git a/arch/um/include/asm/irqflags.h b/arch/um/include/asm/irqflags.h
index c780d8a16773..3bb221e1d5a4 100644
--- a/arch/um/include/asm/irqflags.h
+++ b/arch/um/include/asm/irqflags.h
@@ -6,37 +6,33 @@ extern int set_signals(int enable);
extern void block_signals(void);
extern void unblock_signals(void);
+#define arch_local_save_flags arch_local_save_flags
static inline unsigned long arch_local_save_flags(void)
{
return get_signals();
}
+#define arch_local_irq_restore arch_local_irq_restore
static inline void arch_local_irq_restore(unsigned long flags)
{
set_signals(flags);
}
+#define arch_local_irq_enable arch_local_irq_enable
static inline void arch_local_irq_enable(void)
{
unblock_signals();
}
+#define arch_local_irq_disable arch_local_irq_disable
static inline void arch_local_irq_disable(void)
{
block_signals();
}
-static inline unsigned long arch_local_irq_save(void)
-{
- unsigned long flags;
- flags = arch_local_save_flags();
- arch_local_irq_disable();
- return flags;
-}
+#define ARCH_IRQ_DISABLED 0
+#define ARCh_IRQ_ENABLED (SIGIO|SIGVTALRM)
-static inline bool arch_irqs_disabled(void)
-{
- return arch_local_save_flags() == 0;
-}
+#include <asm-generic/irqflags.h>
#endif
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index a6a5e42caaef..2f36d515762e 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,6 +3,11 @@
# Licensed under the GPL
#
+# Don't instrument UML-specific code; without this, we may crash when
+# accessing the instrumentation buffer for the first time from the
+# kernel.
+KCOV_INSTRUMENT := n
+
CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
-DELF_ARCH=$(LDS_ELF_ARCH) \
-DELF_FORMAT=$(LDS_ELF_FORMAT) \
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 55cead809b18..48bae81f8dca 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -37,8 +37,6 @@ static int __init read_initrd(void)
}
area = alloc_bootmem(size);
- if (area == NULL)
- return 0;
if (load_initrd(initrd, area, size) == -1)
return 0;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 16630e75f056..e8175a8aa22c 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -319,9 +319,6 @@ int __init linux_main(int argc, char **argv)
start_vm = VMALLOC_START;
- setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
- mem_total_pages(physmem_size, iomem_size, highmem);
-
virtmem_size = physmem_size;
stack = (unsigned long) argv;
stack &= ~(1024 * 1024 - 1);
@@ -334,7 +331,6 @@ int __init linux_main(int argc, char **argv)
printf("Kernel virtual memory size shrunk to %lu bytes\n",
virtmem_size);
- stack_protections((unsigned long) &init_thread_info);
os_flush_stdout();
return start_uml();
@@ -342,6 +338,10 @@ int __init linux_main(int argc, char **argv)
void __init setup_arch(char **cmdline_p)
{
+ stack_protections((unsigned long) &init_thread_info);
+ setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
+ mem_total_pages(physmem_size, iomem_size, highmem);
+
paging_init();
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 08ff5094fcdd..ada473bf6f46 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -3,6 +3,9 @@
# Licensed under the GPL
#
+# Don't instrument UML-specific code
+KCOV_INSTRUMENT := n
+
obj-y = aio.o execvp.o file.o helper.o irq.o main.o mem.o process.o \
registers.o sigio.o signal.o start_up.o time.o tty.o \
umid.o user_syms.o util.o drivers/ skas/
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 8acaf4e384c0..a86d7cc2c2d8 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -15,6 +15,7 @@
#include <kern_util.h>
#include <os.h>
#include <sysdep/mcontext.h>
+#include <um_malloc.h>
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
@@ -32,7 +33,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
struct uml_pt_regs *r;
int save_errno = errno;
- r = malloc(sizeof(struct uml_pt_regs));
+ r = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
if (!r)
panic("out of memory");
@@ -91,7 +92,7 @@ static void timer_real_alarm_handler(mcontext_t *mc)
{
struct uml_pt_regs *regs;
- regs = malloc(sizeof(struct uml_pt_regs));
+ regs = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
if (!regs)
panic("out of memory");
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index d45fa5f3e9c4..62137d13c6f9 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -265,10 +265,8 @@ static int __init pci_common_init(void)
pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_size_bridges(puv3_bus);
- pci_bus_assign_resources(puv3_bus);
- }
+ pci_bus_size_bridges(puv3_bus);
+ pci_bus_assign_resources(puv3_bus);
pci_bus_add_devices(puv3_bus);
return 0;
}
@@ -279,9 +277,6 @@ char * __init pcibios_setup(char *str)
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
- } else if (!strcmp(str, "firmware")) {
- pci_add_flags(PCI_PROBE_ONLY);
- return NULL;
}
return str;
}
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
index 16c08b2143a7..3e9f6489ba38 100644
--- a/arch/unicore32/mm/dma-swiotlb.c
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -19,14 +19,14 @@
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2fa55851d2a9..c580d8c33562 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -80,6 +80,7 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_EBPF_JIT if X86_64
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
@@ -111,6 +113,7 @@ config X86
select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
+ select HAVE_GCC_PLUGINS
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_HW_BREAKPOINT
select HAVE_IDE
@@ -151,6 +154,7 @@ config X86
select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
select PERF_EVENTS
select RTC_LIB
+ select RTC_MC146818_LIB
select SPARSE_IRQ
select SRCU
select SYSCTL_EXCEPTION_TRACE
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index be8e688fa0d4..12ea8f8384f4 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -96,7 +96,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
$(call if_changed,zoffset)
-AFLAGS_header.o += -I$(obj)
+AFLAGS_header.o += -I$(objtree)/$(obj)
$(obj)/header.o: $(obj)/zoffset.h
LDFLAGS_setup.elf := -T
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index a1e71d431fed..1433f6b4607d 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -204,8 +204,12 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
* handling, because syscall restart has a fixup for compat
* syscalls. The fixup is exercised by the ptrace_syscall_32
* selftest.
+ *
+ * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
+ * special case only applies after poking regs and before the
+ * very next return to user mode.
*/
- ti->status &= ~TS_COMPAT;
+ ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif
user_enter_irqoff();
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 4cddd17153fb..f848572169ea 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -294,7 +294,7 @@
# 285 sys_setaltroot
286 i386 add_key sys_add_key
287 i386 request_key sys_request_key
-288 i386 keyctl sys_keyctl
+288 i386 keyctl sys_keyctl compat_sys_keyctl
289 i386 ioprio_set sys_ioprio_set
290 i386 ioprio_get sys_ioprio_get
291 i386 inotify_init sys_inotify_init
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 6ba89a1ab0e5..d5409660f5de 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -75,7 +75,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-$(vobjs): KBUILD_CFLAGS += $(CFL)
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -145,6 +145,7 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 2f02d23a05ef..94d54d0defa7 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -96,9 +96,8 @@ static notrace cycle_t vread_pvclock(int *mode)
{
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
cycle_t ret;
- u64 tsc, pvti_tsc;
- u64 last, delta, pvti_system_time;
- u32 version, pvti_tsc_to_system_mul, pvti_tsc_shift;
+ u64 last;
+ u32 version;
/*
* Note: The kernel and hypervisor must guarantee that cpu ID
@@ -123,29 +122,15 @@ static notrace cycle_t vread_pvclock(int *mode)
*/
do {
- version = pvti->version;
-
- smp_rmb();
+ version = pvclock_read_begin(pvti);
if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
*mode = VCLOCK_NONE;
return 0;
}
- tsc = rdtsc_ordered();
- pvti_tsc_to_system_mul = pvti->tsc_to_system_mul;
- pvti_tsc_shift = pvti->tsc_shift;
- pvti_system_time = pvti->system_time;
- pvti_tsc = pvti->tsc_timestamp;
-
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while (unlikely((version & 1) || version != pvti->version));
-
- delta = tsc - pvti_tsc;
- ret = pvti_system_time +
- pvclock_scale_delta(delta, pvti_tsc_to_system_mul,
- pvti_tsc_shift);
+ ret = __pvclock_read_cycles(pvti);
+ } while (pvclock_read_retry(pvti, version));
/* refer to vread_tsc() comment for rationale */
last = gtod->cycle_last;
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 63a03bb91497..4f741192846d 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -22,6 +22,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
+ if (hdr->e_type != ET_DYN)
+ fail("input is not a shared object\n");
+
/* Walk the segment table. */
for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
if (GET_LE(&pt[i].p_type) == PT_LOAD) {
@@ -49,6 +52,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
if (stripped_len < load_size)
fail("stripped input is too short\n");
+ if (!dyn)
+ fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
+
/* Walk the dynamic table */
for (i = 0; dyn + i < dyn_end &&
GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 155ea5324ae0..b26ee32f73e8 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -7,7 +7,8 @@
*/
#include <linux/perf_event.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
#include <linux/syscore_ops.h>
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 6011a573dd64..b28200dea715 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -12,7 +12,7 @@
*/
#include <linux/perf_event.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c17f0de5fd39..d0efb5cb1b00 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -17,7 +17,8 @@
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -262,10 +263,13 @@ static bool check_hw_exists(void)
return true;
msr_fail:
- pr_cont("Broken PMU hardware detected, using software events only.\n");
- printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
- boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
- reg, val_new);
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ pr_cont("PMU not available due to virtualization, using software events only.\n");
+ } else {
+ pr_cont("Broken PMU hardware detected, using software events only.\n");
+ pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
+ reg, val_new);
+ }
return false;
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 3f3d0d67749b..463dc7a5a6c3 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1,3 +1,5 @@
+#include <linux/module.h>
+
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "uncore.h"
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index d6063e438158..78b9c23e2d8d 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -1,4 +1,3 @@
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <asm/apicdef.h>
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 3a27b93e6261..44461626830e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -9,7 +9,6 @@
#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
@@ -48,11 +47,11 @@ extern int dma_supported(struct device *hwdev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index fea7724141a0..e7f155c3045e 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -344,8 +344,8 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
*/
static inline int mmap_is_ia32(void)
{
- return config_enabled(CONFIG_X86_32) ||
- (config_enabled(CONFIG_COMPAT) &&
+ return IS_ENABLED(CONFIG_X86_32) ||
+ (IS_ENABLED(CONFIG_COMPAT) &&
test_thread_flag(TIF_ADDR32));
}
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 116b58347501..2737366ea583 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -137,9 +137,9 @@ static inline int copy_fregs_to_user(struct fregs_state __user *fx)
static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
@@ -150,10 +150,10 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{
int err;
- if (config_enabled(CONFIG_X86_32)) {
+ if (IS_ENABLED(CONFIG_X86_32)) {
err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
- if (config_enabled(CONFIG_AS_FXSAVEQ)) {
+ if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
@@ -166,9 +166,9 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
@@ -190,9 +190,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
static inline void copy_fxregs_to_kernel(struct fpu *fpu)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 69e62862b622..33ae3a4d0159 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -35,8 +35,9 @@
#include <asm/asm.h>
#include <asm/kvm_page_track.h>
-#define KVM_MAX_VCPUS 255
-#define KVM_SOFT_MAX_VCPUS 160
+#define KVM_MAX_VCPUS 288
+#define KVM_SOFT_MAX_VCPUS 240
+#define KVM_MAX_VCPU_ID 1023
#define KVM_USER_MEM_SLOTS 509
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
@@ -599,6 +600,7 @@ struct kvm_vcpu_arch {
u64 mcg_cap;
u64 mcg_status;
u64 mcg_ctl;
+ u64 mcg_ext_ctl;
u64 *mce_banks;
/* Cache MMIO info */
@@ -682,9 +684,12 @@ struct kvm_arch_memory_slot {
struct kvm_apic_map {
struct rcu_head rcu;
u8 mode;
- struct kvm_lapic *phys_map[256];
- /* first index is cluster id second is cpu id in a cluster */
- struct kvm_lapic *logical_map[16][16];
+ u32 max_apic_id;
+ union {
+ struct kvm_lapic *xapic_flat_map[8];
+ struct kvm_lapic *xapic_cluster_map[16][4];
+ };
+ struct kvm_lapic *phys_map[];
};
/* Hyper-V emulation context */
@@ -779,6 +784,9 @@ struct kvm_arch {
u32 ldr_mode;
struct page *avic_logical_id_table_page;
struct page *avic_physical_id_table_page;
+
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
};
struct kvm_vm_stat {
@@ -1006,6 +1014,11 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+ void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
+
+ void (*setup_mce)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -1026,7 +1039,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
- u64 dirty_mask, u64 nx_mask, u64 x_mask);
+ u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
@@ -1077,6 +1090,10 @@ extern u32 kvm_max_guest_tsc_khz;
extern u8 kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64 kvm_max_tsc_scaling_ratio;
+/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
+extern u64 kvm_default_tsc_scaling_ratio;
+
+extern u64 kvm_mce_cap_supported;
enum emulation_result {
EMULATE_DONE, /* no further processing */
@@ -1352,7 +1369,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
-void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
+void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq *irq);
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index a7f9181f63f3..ed80003ce3e2 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -22,7 +22,6 @@
#define _ASM_X86_LIVEPATCH_H
#include <asm/setup.h>
-#include <linux/module.h>
#include <linux/ftrace.h>
static inline int klp_check_compiler_support(void)
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h
index 0f555cc31984..24acd9ba7837 100644
--- a/arch/x86/include/asm/mc146818rtc.h
+++ b/arch/x86/include/asm/mc146818rtc.h
@@ -6,7 +6,6 @@
#include <asm/io.h>
#include <asm/processor.h>
-#include <linux/mc146818rtc.h>
#ifndef RTC_PORT
#define RTC_PORT(x) (0x70 + (x))
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 396348196aa7..d8abfcf524d1 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -155,7 +155,7 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
#ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
- return !config_enabled(CONFIG_IA32_EMULATION) ||
+ return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
!(mm->context.ia32_compat == TIF_IA32);
}
#else
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7c1c89598688..d019f0cc80ec 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -25,6 +25,24 @@ void pvclock_resume(void);
void pvclock_touch_watchdogs(void);
+static __always_inline
+unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src)
+{
+ unsigned version = src->version & ~1;
+ /* Make sure that the version is read before the data. */
+ virt_rmb();
+ return version;
+}
+
+static __always_inline
+bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
+ unsigned version)
+{
+ /* Make sure that the version is re-read after the data. */
+ virt_rmb();
+ return unlikely(version != src->version);
+}
+
/*
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
@@ -69,23 +87,12 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
}
static __always_inline
-unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
- cycle_t *cycles, u8 *flags)
+cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src)
{
- unsigned version;
- cycle_t offset;
- u64 delta;
-
- version = src->version;
- /* Make the latest version visible */
- smp_rmb();
-
- delta = rdtsc_ordered() - src->tsc_timestamp;
- offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
- src->tsc_shift);
- *cycles = src->system_time + offset;
- *flags = src->flags;
- return version;
+ u64 delta = rdtsc_ordered() - src->tsc_timestamp;
+ cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ return src->system_time + offset;
}
struct pvclock_vsyscall_time_info {
diff --git a/arch/x86/include/asm/rtc.h b/arch/x86/include/asm/rtc.h
deleted file mode 100644
index f71c3b0ed360..000000000000
--- a/arch/x86/include/asm/rtc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/rtc.h>
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index d0fe23ec7e98..14824fc78f7e 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -193,7 +193,6 @@ struct __attribute__ ((__packed__)) vmcb {
struct vmcb_save_area save;
};
-#define SVM_CPUID_FEATURE_SHIFT 2
#define SVM_CPUID_FUNC 0x8000000a
#define SVM_VM_CR_SVM_DISABLE 4
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index ab05d73e2bb7..d2f69b9ff732 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -31,9 +31,9 @@ static inline void dma_mark_clean(void *addr, size_t size) {}
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* _ASM_X86_SWIOTLB_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 999b7cd2e78c..4e23dd15c661 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -60,7 +60,7 @@ static inline long syscall_get_error(struct task_struct *task,
* TS_COMPAT is set for 32-bit syscall entries and then
* remains set until we return to user mode.
*/
- if (task_thread_info(task)->status & TS_COMPAT)
+ if (task_thread_info(task)->status & (TS_COMPAT|TS_I386_REGS_POKED))
/*
* Sign-extend the value so (int)-EFOO becomes (long)-EFOO
* and will match correctly in comparisons.
@@ -239,9 +239,6 @@ static inline int syscall_get_arch(void)
* TS_COMPAT is set for 32-bit syscall entry and then
* remains set until we return to user mode.
*
- * TIF_IA32 tasks should always have TS_COMPAT set at
- * system call time.
- *
* x32 tasks should be considered AUDIT_ARCH_X86_64.
*/
if (task_thread_info(current)->status & TS_COMPAT)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 89bff044a6f5..8b7c8d8e0852 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ * 1 if within a frame
+ * -1 if placed across a frame boundary (or outside stack)
+ * 0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+#if defined(CONFIG_FRAME_POINTER)
+ const void *frame = NULL;
+ const void *oldframe;
+
+ oldframe = __builtin_frame_address(1);
+ if (oldframe)
+ frame = __builtin_frame_address(2);
+ /*
+ * low ----------------------------------------------> high
+ * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+ * ^----------------^
+ * allow copies only within here
+ */
+ while (stack <= frame && frame < stackend) {
+ /*
+ * If obj + len extends past the last frame, this
+ * check won't pass and the next frame will be 0,
+ * causing us to bail out and correctly report
+ * the copy as invalid.
+ */
+ if (obj + len <= frame)
+ return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+ oldframe = frame;
+ frame = *(const void * const *)frame;
+ }
+ return -1;
+#else
+ return 0;
+#endif
+}
+
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_64
@@ -219,32 +263,11 @@ static inline unsigned long current_stack_pointer(void)
* have to worry about atomic accesses.
*/
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
+#ifdef CONFIG_COMPAT
+#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
+#endif
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
static inline bool in_ia32_syscall(void)
{
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 43e87a3dd95c..cf75871d2f81 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -36,6 +36,7 @@
#include <linux/cpumask.h>
#include <asm/mpspec.h>
+#include <asm/percpu.h>
/* Mappings between logical cpu number and node number */
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c03bfb68c503..a0ae610b9280 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -761,9 +761,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
* case, and do only runtime checking for non-constant sizes.
*/
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_from_user_overflow();
else
__copy_from_user_overflow(sz, n);
@@ -781,9 +782,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();
/* See the comment in copy_from_user() above. */
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
@@ -812,21 +814,21 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
#define user_access_begin() __uaccess_begin()
#define user_access_end() __uaccess_end()
-#define unsafe_put_user(x, ptr) \
-({ \
+#define unsafe_put_user(x, ptr, err_label) \
+do { \
int __pu_err; \
__put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
- __builtin_expect(__pu_err, 0); \
-})
+ if (unlikely(__pu_err)) goto err_label; \
+} while (0)
-#define unsafe_get_user(x, ptr) \
-({ \
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
- __builtin_expect(__gu_err, 0); \
-})
+ if (unlikely(__gu_err)) goto err_label; \
+} while (0)
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 4b32da24faaf..7d3bdd1ed697 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -37,6 +37,7 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
return __copy_to_user_ll(to, from, n);
}
@@ -95,6 +96,7 @@ static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
+ check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 2eac2aa3e37f..673059a109fe 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -54,6 +54,7 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
+ check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
@@ -119,6 +120,7 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
+ check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index cce9ee68e335..0116b2ee9e64 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -83,23 +83,19 @@ static inline void cpu_emergency_vmxoff(void)
*/
static inline int cpu_has_svm(const char **msg)
{
- uint32_t eax, ebx, ecx, edx;
-
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
if (msg)
*msg = "not amd";
return 0;
}
- cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
- if (eax < SVM_CPUID_FUNC) {
+ if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
if (msg)
*msg = "can't execute cpuid_8000000a";
return 0;
}
- cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
- if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
+ if (!boot_cpu_has(X86_FEATURE_SVM)) {
if (msg)
*msg = "svm not available";
return 0;
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
index acd844c017d3..f02f025ff988 100644
--- a/arch/x86/include/asm/xen/page-coherent.h
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -2,12 +2,11 @@
#define _ASM_X86_XEN_PAGE_COHERENT_H
#include <asm/page.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vstart = (void*)__get_free_pages(flags, get_order(size));
*dma_handle = virt_to_phys(vstart);
@@ -16,18 +15,18 @@ static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs) { }
+ enum dma_data_direction dir, unsigned long attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs) { }
+ unsigned long attrs) { }
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6738e5c82cca..90d84c3eee53 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -28,7 +28,7 @@
#include <linux/acpi_pmtmr.h>
#include <linux/efi.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/dmi.h>
#include <linux/irq.h>
#include <linux/slab.h>
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 4b28159e0421..bdfad642123f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -5,7 +5,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 8e3842fc8bea..63ff468a7986 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -20,7 +20,6 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitmap.h>
@@ -242,7 +241,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long bus;
phys_addr_t paddr = page_to_phys(page) + offset;
@@ -264,7 +263,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page,
*/
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long iommu_page;
int npages;
@@ -286,7 +285,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
* Wrapper for pci_unmap_single working with scatterlists.
*/
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
@@ -294,7 +293,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
- gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
+ gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
}
}
@@ -316,7 +315,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_addr) {
if (i > 0)
- gart_unmap_sg(dev, sg, i, dir, NULL);
+ gart_unmap_sg(dev, sg, i, dir, 0);
nents = 0;
sg[0].dma_length = 0;
break;
@@ -387,7 +386,7 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s, *ps, *start_sg, *sgmap;
int need = 0, nextneed, i, out, start;
@@ -457,7 +456,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
error:
flush_gart();
- gart_unmap_sg(dev, sg, out, dir, NULL);
+ gart_unmap_sg(dev, sg, out, dir, 0);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
@@ -477,7 +476,7 @@ error:
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
dma_addr_t paddr;
unsigned long align_mask;
@@ -509,9 +508,9 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
- gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
+ gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index e45ec2b4e15e..4fdf6230d93c 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <asm/amd_nb.h>
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 0fd3d659f13c..cea4fc19e844 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -23,7 +23,7 @@
#include <linux/bootmem.h>
#include <linux/ftrace.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/timex.h>
@@ -147,7 +147,7 @@ static int force_enable_local_apic __initdata;
*/
static int __init parse_lapic(char *arg)
{
- if (config_enabled(CONFIG_X86_32) && !arg)
+ if (IS_ENABLED(CONFIG_X86_32) && !arg)
force_enable_local_apic = 1;
else if (arg && !strncmp(arg, "notscdeadline", 13))
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 048747778d37..5b2ae106bd4a 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/hardirq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 2cebf59092d8..c05688b2deff 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -11,7 +11,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 7788ce643bf4..f29501e1a5c1 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -16,7 +16,7 @@
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/delay.h>
#ifdef CONFIG_HARDLOCKUP_DETECTOR
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f072b9572634..7491f417a8e4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -39,7 +39,7 @@
#include <linux/mc146818rtc.h>
#include <linux/compiler.h>
#include <linux/acpi.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 2a0f225afebd..3a205d4a12d0 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -8,7 +8,6 @@
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/cpu.h>
-#include <linux/module.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 93edfa01b408..7c43e716c158 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -8,7 +8,7 @@
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 1793dba7a741..c303054b90b5 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -11,10 +11,9 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/dmar.h>
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index a5e400afc563..6066d945c40e 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -523,7 +523,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
struct apic_chip_data *data = irq_data->chip_data;
int err, irq = irq_data->irq;
- if (!config_enabled(CONFIG_SMP))
+ if (!IS_ENABLED(CONFIG_SMP))
return -EPERM;
if (!cpumask_intersects(dest, cpu_online_mask))
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 64dd38fbf218..09b59adaea3f 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -12,7 +12,7 @@
#include <linux/proc_fs.h>
#include <linux/threads.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d22a7b9c4f0e..809eda03c527 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/string.h>
#include <linux/ctype.h>
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 73d391ae452f..27e46658ebe3 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -21,7 +21,8 @@
*
*/
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index abf601235b29..fcd484d2bb03 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -5,7 +5,7 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index fbb5e90557a5..e42117d5f4d7 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -1,7 +1,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <linux/cpu.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
/**
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 10c11b4da31d..8f44c5a50ab8 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -13,7 +13,8 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/clocksource.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/hardirq.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 31e951ce6dff..3b442b64c72d 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -17,7 +17,6 @@
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 16e37a2581ac..fdc55215d44d 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -4,7 +4,7 @@
*/
#define DEBUG
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mm.h>
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d6d8d6..6d9b45549109 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -2,7 +2,6 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
-#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 7d393ecdeee6..28f1b54b7fad 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -38,7 +38,7 @@
#include <linux/stop_machine.h>
#include <linux/kvm_para.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/sort.h>
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 2e8caf03f593..181eabecae25 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -12,7 +12,7 @@
*/
#include <linux/percpu.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 8cac429b6a1d..1ff0598d309c 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -22,7 +22,8 @@
*/
#include <linux/dmi.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 9ef978d69c22..9616cf76940c 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -20,7 +20,7 @@
#include <linux/delay.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 948d77da3881..09675712eba8 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -7,7 +7,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6dede08dd98b..9ee4520ce83c 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -7,7 +7,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 57b71373bae3..de7501edb21c 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -237,36 +237,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
* despite the efforts of the "RAM buffer" approach, which simply rounds
* memory boundaries up to 64M to try to catch space that may decode
* as RAM and so is not suitable for MMIO.
- *
- * And yes, so far on current devices the base addr is always under 4G.
*/
-static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
-{
- u32 base;
-
- /*
- * For the PCI IDs in this quirk, the stolen base is always
- * in 0x5c, aka the BDSM register (yes that's really what
- * it's called).
- */
- base = read_pci_config(num, slot, func, 0x5c);
- base &= ~((1<<20) - 1);
-
- return base;
-}
#define KB(x) ((x) * 1024UL)
#define MB(x) (KB (KB (x)))
-#define GB(x) (MB (KB (x)))
static size_t __init i830_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
- if (tmp & I830_TSEG_SIZE_1M)
+ if (esmramc & I830_TSEG_SIZE_1M)
return MB(1);
else
return KB(512);
@@ -274,27 +257,26 @@ static size_t __init i830_tseg_size(void)
static size_t __init i845_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+ u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- return KB(512);
- case I845_TSEG_SIZE_1M:
- return MB(1);
+ switch (tseg_size) {
+ case I845_TSEG_SIZE_512K: return KB(512);
+ case I845_TSEG_SIZE_1M: return MB(1);
default:
- WARN_ON(1);
- return 0;
+ WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
}
+ return 0;
}
static size_t __init i85x_tseg_size(void)
{
- u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
+ u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
- if (!(tmp & TSEG_ENABLE))
+ if (!(esmramc & TSEG_ENABLE))
return 0;
return MB(1);
@@ -314,285 +296,287 @@ static size_t __init i85x_mem_size(void)
* On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*/
-static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i830_mem_size() - i830_tseg_size() - stolen_size;
+ return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
}
-static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i830_mem_size() - i845_tseg_size() - stolen_size;
+ return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
}
-static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
- return i85x_mem_size() - i85x_tseg_size() - stolen_size;
+ return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
}
-static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
{
+ u16 toud;
+
/*
* FIXME is the graphics stolen memory region
* always at TOUD? Ie. is it always the last
* one to be allocated by the BIOS?
*/
- return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
+ toud = read_pci_config_16(0, 0, 0, I865_TOUD);
+
+ return (phys_addr_t)toud << 16;
+}
+
+static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
+ size_t stolen_size)
+{
+ u32 bsm;
+
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at register BSM (0x5c) in the igfx configuration space. On a few
+ * (desktop) machines this is also mirrored in the bridge device at
+ * different locations, or in the MCHBAR.
+ */
+ bsm = read_pci_config(num, slot, func, INTEL_BSM);
+
+ return (phys_addr_t)bsm & INTEL_BSM_MASK;
}
static size_t __init i830_stolen_size(int num, int slot, int func)
{
- size_t stolen_size;
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
- switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
- case I830_GMCH_GMS_STOLEN_512:
- stolen_size = KB(512);
- break;
- case I830_GMCH_GMS_STOLEN_1024:
- stolen_size = MB(1);
- break;
- case I830_GMCH_GMS_STOLEN_8192:
- stolen_size = MB(8);
- break;
- case I830_GMCH_GMS_LOCAL:
- /* local memory isn't part of the normal address space */
- stolen_size = 0;
- break;
+ gms = gmch_ctrl & I830_GMCH_GMS_MASK;
+
+ switch (gms) {
+ case I830_GMCH_GMS_STOLEN_512: return KB(512);
+ case I830_GMCH_GMS_STOLEN_1024: return MB(1);
+ case I830_GMCH_GMS_STOLEN_8192: return MB(8);
+ /* local memory isn't part of the normal address space */
+ case I830_GMCH_GMS_LOCAL: return 0;
default:
- return 0;
+ WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
}
- return stolen_size;
+ return 0;
}
static size_t __init gen3_stolen_size(int num, int slot, int func)
{
- size_t stolen_size;
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
- switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
- case I855_GMCH_GMS_STOLEN_1M:
- stolen_size = MB(1);
- break;
- case I855_GMCH_GMS_STOLEN_4M:
- stolen_size = MB(4);
- break;
- case I855_GMCH_GMS_STOLEN_8M:
- stolen_size = MB(8);
- break;
- case I855_GMCH_GMS_STOLEN_16M:
- stolen_size = MB(16);
- break;
- case I855_GMCH_GMS_STOLEN_32M:
- stolen_size = MB(32);
- break;
- case I915_GMCH_GMS_STOLEN_48M:
- stolen_size = MB(48);
- break;
- case I915_GMCH_GMS_STOLEN_64M:
- stolen_size = MB(64);
- break;
- case G33_GMCH_GMS_STOLEN_128M:
- stolen_size = MB(128);
- break;
- case G33_GMCH_GMS_STOLEN_256M:
- stolen_size = MB(256);
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen_size = MB(96);
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen_size = MB(160);
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen_size = MB(224);
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen_size = MB(352);
- break;
+ gms = gmch_ctrl & I855_GMCH_GMS_MASK;
+
+ switch (gms) {
+ case I855_GMCH_GMS_STOLEN_1M: return MB(1);
+ case I855_GMCH_GMS_STOLEN_4M: return MB(4);
+ case I855_GMCH_GMS_STOLEN_8M: return MB(8);
+ case I855_GMCH_GMS_STOLEN_16M: return MB(16);
+ case I855_GMCH_GMS_STOLEN_32M: return MB(32);
+ case I915_GMCH_GMS_STOLEN_48M: return MB(48);
+ case I915_GMCH_GMS_STOLEN_64M: return MB(64);
+ case G33_GMCH_GMS_STOLEN_128M: return MB(128);
+ case G33_GMCH_GMS_STOLEN_256M: return MB(256);
+ case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
+ case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
+ case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
+ case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
default:
- stolen_size = 0;
- break;
+ WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
}
- return stolen_size;
+ return 0;
}
static size_t __init gen6_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
- return gmch_ctrl << 25; /* 32 MB units */
+ return (size_t)gms * MB(32);
}
static size_t __init gen8_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
- gmch_ctrl &= BDW_GMCH_GMS_MASK;
- return gmch_ctrl << 25; /* 32 MB units */
+ gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
+
+ return (size_t)gms * MB(32);
}
static size_t __init chv_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
/*
* 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
- if (gmch_ctrl < 0x11)
- return gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (gmch_ctrl - 0x11 + 2) << 22;
+ if (gms < 0x11)
+ return (size_t)gms * MB(32);
+ else if (gms < 0x17)
+ return (size_t)(gms - 0x11 + 2) * MB(4);
else
- return (gmch_ctrl - 0x17 + 9) << 22;
+ return (size_t)(gms - 0x17 + 9) * MB(4);
}
-struct intel_stolen_funcs {
- size_t (*size)(int num, int slot, int func);
- u32 (*base)(int num, int slot, int func, size_t size);
-};
-
static size_t __init gen9_stolen_size(int num, int slot, int func)
{
u16 gmch_ctrl;
+ u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
- gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
- gmch_ctrl &= BDW_GMCH_GMS_MASK;
+ gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
- if (gmch_ctrl < 0xf0)
- return gmch_ctrl << 25; /* 32 MB units */
+ /* 0x0 to 0xef: 32MB increments starting at 0MB */
+ /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
+ if (gms < 0xf0)
+ return (size_t)gms * MB(32);
else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (gmch_ctrl - 0xf0 + 1) << 22;
+ return (size_t)(gms - 0xf0 + 1) * MB(4);
}
-typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+struct intel_early_ops {
+ size_t (*stolen_size)(int num, int slot, int func);
+ phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
+};
-static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
- .base = i830_stolen_base,
- .size = i830_stolen_size,
+static const struct intel_early_ops i830_early_ops __initconst = {
+ .stolen_base = i830_stolen_base,
+ .stolen_size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
- .base = i845_stolen_base,
- .size = i830_stolen_size,
+static const struct intel_early_ops i845_early_ops __initconst = {
+ .stolen_base = i845_stolen_base,
+ .stolen_size = i830_stolen_size,
};
-static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
- .base = i85x_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops i85x_early_ops __initconst = {
+ .stolen_base = i85x_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
- .base = i865_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops i865_early_ops __initconst = {
+ .stolen_base = i865_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen3_stolen_size,
+static const struct intel_early_ops gen3_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen3_stolen_size,
};
-static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen6_stolen_size,
+static const struct intel_early_ops gen6_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen6_stolen_size,
};
-static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen8_stolen_size,
+static const struct intel_early_ops gen8_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen8_stolen_size,
};
-static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = gen9_stolen_size,
+static const struct intel_early_ops gen9_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = gen9_stolen_size,
};
-static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
- .base = intel_stolen_base,
- .size = chv_stolen_size,
+static const struct intel_early_ops chv_early_ops __initconst = {
+ .stolen_base = gen3_stolen_base,
+ .stolen_size = chv_stolen_size,
};
-static const struct pci_device_id intel_stolen_ids[] __initconst = {
- INTEL_I830_IDS(&i830_stolen_funcs),
- INTEL_I845G_IDS(&i845_stolen_funcs),
- INTEL_I85X_IDS(&i85x_stolen_funcs),
- INTEL_I865G_IDS(&i865_stolen_funcs),
- INTEL_I915G_IDS(&gen3_stolen_funcs),
- INTEL_I915GM_IDS(&gen3_stolen_funcs),
- INTEL_I945G_IDS(&gen3_stolen_funcs),
- INTEL_I945GM_IDS(&gen3_stolen_funcs),
- INTEL_VLV_M_IDS(&gen6_stolen_funcs),
- INTEL_VLV_D_IDS(&gen6_stolen_funcs),
- INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
- INTEL_I965G_IDS(&gen3_stolen_funcs),
- INTEL_G33_IDS(&gen3_stolen_funcs),
- INTEL_I965GM_IDS(&gen3_stolen_funcs),
- INTEL_GM45_IDS(&gen3_stolen_funcs),
- INTEL_G45_IDS(&gen3_stolen_funcs),
- INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
- INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
- INTEL_SNB_D_IDS(&gen6_stolen_funcs),
- INTEL_SNB_M_IDS(&gen6_stolen_funcs),
- INTEL_IVB_M_IDS(&gen6_stolen_funcs),
- INTEL_IVB_D_IDS(&gen6_stolen_funcs),
- INTEL_HSW_D_IDS(&gen6_stolen_funcs),
- INTEL_HSW_M_IDS(&gen6_stolen_funcs),
- INTEL_BDW_M_IDS(&gen8_stolen_funcs),
- INTEL_BDW_D_IDS(&gen8_stolen_funcs),
- INTEL_CHV_IDS(&chv_stolen_funcs),
- INTEL_SKL_IDS(&gen9_stolen_funcs),
- INTEL_BXT_IDS(&gen9_stolen_funcs),
- INTEL_KBL_IDS(&gen9_stolen_funcs),
+static const struct pci_device_id intel_early_ids[] __initconst = {
+ INTEL_I830_IDS(&i830_early_ops),
+ INTEL_I845G_IDS(&i845_early_ops),
+ INTEL_I85X_IDS(&i85x_early_ops),
+ INTEL_I865G_IDS(&i865_early_ops),
+ INTEL_I915G_IDS(&gen3_early_ops),
+ INTEL_I915GM_IDS(&gen3_early_ops),
+ INTEL_I945G_IDS(&gen3_early_ops),
+ INTEL_I945GM_IDS(&gen3_early_ops),
+ INTEL_VLV_M_IDS(&gen6_early_ops),
+ INTEL_VLV_D_IDS(&gen6_early_ops),
+ INTEL_PINEVIEW_IDS(&gen3_early_ops),
+ INTEL_I965G_IDS(&gen3_early_ops),
+ INTEL_G33_IDS(&gen3_early_ops),
+ INTEL_I965GM_IDS(&gen3_early_ops),
+ INTEL_GM45_IDS(&gen3_early_ops),
+ INTEL_G45_IDS(&gen3_early_ops),
+ INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
+ INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
+ INTEL_SNB_D_IDS(&gen6_early_ops),
+ INTEL_SNB_M_IDS(&gen6_early_ops),
+ INTEL_IVB_M_IDS(&gen6_early_ops),
+ INTEL_IVB_D_IDS(&gen6_early_ops),
+ INTEL_HSW_D_IDS(&gen6_early_ops),
+ INTEL_HSW_M_IDS(&gen6_early_ops),
+ INTEL_BDW_M_IDS(&gen8_early_ops),
+ INTEL_BDW_D_IDS(&gen8_early_ops),
+ INTEL_CHV_IDS(&chv_early_ops),
+ INTEL_SKL_IDS(&gen9_early_ops),
+ INTEL_BXT_IDS(&gen9_early_ops),
+ INTEL_KBL_IDS(&gen9_early_ops),
};
-static void __init intel_graphics_stolen(int num, int slot, int func)
+static void __init
+intel_graphics_stolen(int num, int slot, int func,
+ const struct intel_early_ops *early_ops)
{
+ phys_addr_t base, end;
size_t size;
+
+ size = early_ops->stolen_size(num, slot, func);
+ base = early_ops->stolen_base(num, slot, func, size);
+
+ if (!size || !base)
+ return;
+
+ end = base + size - 1;
+ printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
+ &base, &end);
+
+ /* Mark this space as reserved */
+ e820_add_region(base, size, E820_RESERVED);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
+static void __init intel_graphics_quirks(int num, int slot, int func)
+{
+ const struct intel_early_ops *early_ops;
+ u16 device;
int i;
- u32 start;
- u16 device, subvendor, subdevice;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
- subvendor = read_pci_config_16(num, slot, func,
- PCI_SUBSYSTEM_VENDOR_ID);
- subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
-
- for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
- if (intel_stolen_ids[i].device == device) {
- const struct intel_stolen_funcs *stolen_funcs =
- (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
- size = stolen_funcs->size(num, slot, func);
- start = stolen_funcs->base(num, slot, func, size);
- if (size && start) {
- printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
- start, start + (u32)size - 1);
- /* Mark this space as reserved */
- e820_add_region(start, size, E820_RESERVED);
- sanitize_e820_map(e820.map,
- ARRAY_SIZE(e820.map),
- &e820.nr_map);
- }
- return;
- }
+
+ for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
+ kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
+
+ if (intel_early_ids[i].device != device)
+ continue;
+
+ early_ops = (typeof(early_ops))driver_data;
+
+ intel_graphics_stolen(num, slot, func, early_ops);
+
+ return;
}
}
@@ -690,7 +674,7 @@ static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
- QFLAG_APPLY_ONCE, intel_graphics_stolen },
+ QFLAG_APPLY_ONCE, intel_graphics_quirks },
/*
* HPET on the current version of the Baytrail platform has accuracy
* problems: it will halt in deep idle state - so we disable it.
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 9e231d88bb33..a184c210efba 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -159,8 +159,8 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
- ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
- config_enabled(CONFIG_IA32_EMULATION));
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
if (!access_ok(VERIFY_WRITE, buf, size))
return -EACCES;
@@ -268,8 +268,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
u64 xfeatures = 0;
int fx_only = 0;
- ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
- config_enabled(CONFIG_IA32_EMULATION));
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
if (!buf) {
fpu__clear(fpu);
@@ -416,8 +416,8 @@ void fpu__init_prepare_fx_sw_frame(void)
fx_sw_reserved.xfeatures = xfeatures_mask;
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
- if (config_enabled(CONFIG_IA32_EMULATION) ||
- config_enabled(CONFIG_X86_32)) {
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
+ IS_ENABLED(CONFIG_X86_32)) {
int fsave_header_size = sizeof(struct fregs_state);
fx_sw_reserved_ia32 = fx_sw_reserved;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 3d747070fe67..ed16e58658a4 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1019,7 +1019,6 @@ void hpet_disable(void)
*/
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
-#include <asm/rtc.h>
#define DEFAULT_RTC_INT_FREQ 64
#define DEFAULT_RTC_SHIFT 6
@@ -1243,7 +1242,7 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
memset(&curr_time, 0, sizeof(struct rtc_time));
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
- get_rtc_time(&curr_time);
+ mc146818_set_time(&curr_time);
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 2bcfb5f2bc44..8771766d46b6 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -36,13 +36,14 @@
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
+#include <asm/user.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index d40ee8a38fed..1f9b878ef5ef 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index efb82f07b29c..6ebe00cb4a3b 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -3,7 +3,7 @@
*
*/
#include <linux/clockchips.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/timex.h>
#include <linux/i8253.h>
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
index a979b5bd2fc0..50c89e8a95f2 100644
--- a/arch/x86/kernel/io_delay.c
+++ b/arch/x86/kernel/io_delay.c
@@ -6,7 +6,7 @@
* outb_p/inb_p API uses.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/dmi.h>
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c627bf8d98ad..1f38d9a4d9de 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -8,7 +8,6 @@
* io_apic.c.)
*/
-#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 206d0b90a3ab..4a7903714065 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -11,7 +11,6 @@
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index dc1404bf8e4b..bdb83e431d89 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -8,7 +8,7 @@
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/stat.h>
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 1ef5e48b3a36..1726c4c12336 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -21,7 +21,7 @@
*/
#include <linux/context_tracking.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 97340f2c437c..068c4a929de6 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -16,7 +16,6 @@
#include <linux/mc146818rtc.h>
#include <linux/bitops.h>
#include <linux/acpi.h>
-#include <linux/module.h>
#include <linux/smp.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 04b132a767f1..bfe4d6c96fbd 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 33ee3e0efd65..1939a0269377 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -3,7 +3,7 @@
* compiled in a FTRACE-compatible way.
*/
#include <linux/spinlock.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/jump_label.h>
#include <asm/paravirt.h>
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 7b3b3f24c3ea..ad5bc9578a73 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -19,7 +19,8 @@
*/
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/efi.h>
#include <linux/bcd.h>
#include <linux/highmem.h>
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 833b1d329c47..5d400ba1349d 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -340,7 +340,7 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems,enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
@@ -364,7 +364,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
@@ -396,7 +396,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
return nelems;
error:
- calgary_unmap_sg(dev, sg, nelems, dir, NULL);
+ calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
sg->dma_address = DMA_ERROR_CODE;
sg->dma_length = 0;
@@ -407,7 +407,7 @@ error:
static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr = page_address(page) + offset;
unsigned long uaddr;
@@ -422,7 +422,7 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
unsigned int npages;
@@ -432,7 +432,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
}
static void* calgary_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
void *ret = NULL;
dma_addr_t mapping;
@@ -466,7 +466,7 @@ error:
static void calgary_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned int npages;
struct iommu_table *tbl = find_iommu_table(dev);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 6ba014c61d62..d30c37750765 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -77,7 +77,7 @@ void __init pci_iommu_alloc(void)
}
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long dma_mask;
struct page *page;
@@ -120,7 +120,7 @@ again:
}
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index da15918d1c81..00e71ce396a8 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -28,7 +28,7 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
@@ -55,7 +55,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
*/
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7c577a178859..b47edb8f5256 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -2,7 +2,7 @@
#include <linux/pci.h>
#include <linux/cache.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
@@ -16,7 +16,7 @@ int swiotlb __read_mostly;
void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr;
@@ -37,7 +37,7 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 92f70147a9a6..0c5315d322c8 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -3,7 +3,7 @@
* Copyright (c) 2015, Intel Corporation.
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioport.h>
static int found(u64 start, u64 end, void *data)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 59f68f1d734b..62c0b0ea2ce4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -7,7 +7,8 @@
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/random.h>
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9f950917528b..d86be29c38c7 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/mc146818rtc.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6e789ca1f841..63236d8f84bf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -26,7 +26,7 @@
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 600edd225e81..f79576a541ff 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -923,15 +923,18 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
case offsetof(struct user32, regs.orig_eax):
/*
- * A 32-bit debugger setting orig_eax means to restore
- * the state of the task restarting a 32-bit syscall.
- * Make sure we interpret the -ERESTART* codes correctly
- * in case the task is not actually still sitting at the
- * exit from a 32-bit syscall with TS_COMPAT still set.
+ * Warning: bizarre corner case fixup here. A 32-bit
+ * debugger setting orig_eax to -1 wants to disable
+ * syscall restart. Make sure that the syscall
+ * restart code sign-extends orig_ax. Also make sure
+ * we interpret the -ERESTART* codes correctly if
+ * loaded into regs->ax in case the task is not
+ * actually still sitting at the exit from a 32-bit
+ * syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
if (syscall_get_nr(child, regs) >= 0)
- task_thread_info(child)->status |= TS_COMPAT;
+ task_thread_info(child)->status |= TS_I386_REGS_POKED;
break;
case offsetof(struct user32, regs.eflags):
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 06c58ce46762..3599404e3089 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -64,14 +64,9 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
u8 flags;
do {
- version = src->version;
- /* Make the latest version visible */
- smp_rmb();
-
+ version = pvclock_read_begin(src);
flags = src->flags;
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while ((src->version & 1) || version != src->version);
+ } while (pvclock_read_retry(src, version));
return flags & valid_flags;
}
@@ -84,10 +79,10 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
u8 flags;
do {
- version = __pvclock_read_cycles(src, &ret, &flags);
- /* Make sure that the version double-check is last. */
- smp_rmb();
- } while ((src->version & 1) || version != src->version);
+ version = pvclock_read_begin(src);
+ ret = __pvclock_read_cycles(src);
+ flags = src->flags;
+ } while (pvclock_read_retry(src, version));
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
src->flags &= ~PVCLOCK_GUEST_STOPPED;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 15ed70f8278b..63bf27d972b7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -1,6 +1,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pm.h>
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index eceaa082ec3f..79c6311cd912 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -13,7 +13,6 @@
#include <asm/x86_init.h>
#include <asm/time.h>
#include <asm/intel-mid.h>
-#include <asm/rtc.h>
#include <asm/setup.h>
#ifdef CONFIG_X86_32
@@ -47,7 +46,7 @@ int mach_set_rtc_mmss(const struct timespec *now)
rtc_time_to_tm(nowtime, &tm);
if (!rtc_valid_tm(&tm)) {
- retval = set_rtc_time(&tm);
+ retval = mc146818_set_time(&tm);
if (retval)
printk(KERN_ERR "%s: RTC write failed with error %d\n",
__func__, retval);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6cb2b02fcc87..991b77986d57 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -36,7 +36,7 @@
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/edd.h>
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 22cc2f9f8aec..04cb3212db2d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -146,7 +146,7 @@ static int restore_sigcontext(struct pt_regs *regs,
buf = (void __user *)buf_val;
} get_user_catch(err);
- err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32));
+ err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
@@ -245,14 +245,14 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
struct fpu *fpu = &current->thread.fpu;
/* redzone */
- if (config_enabled(CONFIG_X86_64))
+ if (IS_ENABLED(CONFIG_X86_64))
sp -= 128;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
- } else if (config_enabled(CONFIG_X86_32) &&
+ } else if (IS_ENABLED(CONFIG_X86_32) &&
!onsigstack &&
(regs->ss & 0xffff) != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
@@ -262,7 +262,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
}
if (fpu->fpstate_active) {
- sp = fpu__alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
+ sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
}
@@ -662,18 +662,18 @@ badframe:
static inline int is_ia32_compat_frame(void)
{
- return config_enabled(CONFIG_IA32_EMULATION) &&
+ return IS_ENABLED(CONFIG_IA32_EMULATION) &&
test_thread_flag(TIF_IA32);
}
static inline int is_ia32_frame(void)
{
- return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+ return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame();
}
static inline int is_x32_frame(void)
{
- return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+ return IS_ENABLED(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
}
static int
@@ -760,8 +760,30 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{
-#ifdef CONFIG_X86_64
- if (in_ia32_syscall())
+ /*
+ * This function is fundamentally broken as currently
+ * implemented.
+ *
+ * The idea is that we want to trigger a call to the
+ * restart_block() syscall and that we want in_ia32_syscall(),
+ * in_x32_syscall(), etc. to match whatever they were in the
+ * syscall being restarted. We assume that the syscall
+ * instruction at (regs->ip - 2) matches whatever syscall
+ * instruction we used to enter in the first place.
+ *
+ * The problem is that we can get here when ptrace pokes
+ * syscall-like values into regs even if we're not in a syscall
+ * at all.
+ *
+ * For now, we maintain historical behavior and guess based on
+ * stored state. We could do better by saving the actual
+ * syscall arch in restart_block or (with caveats on x32) by
+ * checking if regs->ip points to 'int $0x80'. The current
+ * behavior is incorrect if a tracer has a different bitness
+ * than the tracee.
+ */
+#ifdef CONFIG_IA32_EMULATION
+ if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
return __NR_ia32_restart_syscall;
#endif
#ifdef CONFIG_X86_X32_ABI
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c93609c97406..2a6e84a30a54 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -43,7 +43,7 @@
#include <linux/init.h>
#include <linux/smp.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/bootmem.h>
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 9ee98eefc44d..4738f5e0f2ab 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -5,7 +5,7 @@
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index cb4a01b41e27..222e84e2432e 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -9,7 +9,6 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
-#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/asm.h>
@@ -74,7 +73,3 @@ int rodata_test(void)
return 0;
}
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Testcase for marking rodata as read-only");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 00f03d82e69a..b70ca12dd389 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -21,7 +21,7 @@
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <linux/string.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8fb4b6abac0e..78b9cb5a26af 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/timer.h>
#include <linux/acpi_pmtmr.h>
#include <linux/cpufreq.h>
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index f1aebfb49c36..95e49f6e4fc3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -1,7 +1,8 @@
/* Exports for assembly files.
All C exports should go in the respective C files. */
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <linux/smp.h>
#include <net/checksum.h>
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 58b459296e13..76c5e52436c4 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -5,7 +5,7 @@
*/
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 639a6e34500c..ab8e32f7b9a8 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -32,7 +32,6 @@ config KVM
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_EVENTFD
- select KVM_APIC_ARCHITECTURE
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 643565364497..3235e0fe7792 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -13,7 +13,7 @@
*/
#include <linux/kvm_host.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a2f24af3c999..4e95d3eb2955 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,7 +22,6 @@
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
-#include <linux/module.h>
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index a4bf5b45d65a..5fb6c620180e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -645,7 +645,6 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
.write = speaker_ioport_write,
};
-/* Caller must hold slots_lock */
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
@@ -690,6 +689,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm_pit_set_reinject(pit, true);
+ mutex_lock(&kvm->slots_lock);
kvm_iodevice_init(&pit->dev, &pit_dev_ops);
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
KVM_PIT_MEM_LENGTH, &pit->dev);
@@ -704,12 +704,14 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
if (ret < 0)
goto fail_register_speaker;
}
+ mutex_unlock(&kvm->slots_lock);
return pit;
fail_register_speaker:
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
fail_register_pit:
+ mutex_unlock(&kvm->slots_lock);
kvm_pit_set_reinject(pit, false);
kthread_stop(pit->worker_task);
fail_kthread:
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 3069281904d3..b181426f67b4 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -25,12 +25,10 @@
#include <linux/list.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/stat.h>
-#include <linux/dmar.h>
#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
#include "assigned-dev.h"
static bool allow_unsafe_assigned_interrupts;
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 95fcc7b13866..60d91c9d160c 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -20,7 +20,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kvm_host.h>
#include "irq.h"
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 61ebdc13a29a..035731eb3897 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -120,4 +120,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
+int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_setup_empty_irq_routing(struct kvm *kvm);
+
#endif
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index dfb4c6476877..25810b144b58 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -110,13 +110,17 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
return r;
}
-void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
+void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq *irq)
{
- trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
+ trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ?
+ (u64)e->msi.address_hi << 32 : 0),
+ e->msi.data);
irq->dest_id = (e->msi.address_lo &
MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+ if (kvm->arch.x2apic_format)
+ irq->dest_id |= MSI_ADDR_EXT_DEST_ID(e->msi.address_hi);
irq->vector = (e->msi.data &
MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
@@ -129,15 +133,24 @@ void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
}
EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
+static inline bool kvm_msi_route_invalid(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e)
+{
+ return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
+}
+
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level, bool line_status)
{
struct kvm_lapic_irq irq;
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
+
if (!level)
return -1;
- kvm_set_msi_irq(e, &irq);
+ kvm_set_msi_irq(kvm, e, &irq);
return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
}
@@ -153,7 +166,10 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
return -EWOULDBLOCK;
- kvm_set_msi_irq(e, &irq);
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
+
+ kvm_set_msi_irq(kvm, e, &irq);
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
return r;
@@ -248,7 +264,8 @@ static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
}
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int r = -EINVAL;
@@ -285,6 +302,9 @@ int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
e->msi.address_lo = ue->u.msi.address_lo;
e->msi.address_hi = ue->u.msi.address_hi;
e->msi.data = ue->u.msi.data;
+
+ if (kvm_msi_route_invalid(kvm, e))
+ goto out;
break;
case KVM_IRQ_ROUTING_HV_SINT:
e->set = kvm_hv_set_sint;
@@ -388,21 +408,16 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm->arch.nr_reserved_ioapic_pins);
for (i = 0; i < nr_ioapic_pins; ++i) {
hlist_for_each_entry(entry, &table->map[i], link) {
- u32 dest_id, dest_mode;
- bool level;
+ struct kvm_lapic_irq irq;
if (entry->type != KVM_IRQ_ROUTING_MSI)
continue;
- dest_id = (entry->msi.address_lo >> 12) & 0xff;
- dest_mode = (entry->msi.address_lo >> 2) & 0x1;
- level = entry->msi.data & MSI_DATA_TRIGGER_LEVEL;
- if (level && kvm_apic_match_dest(vcpu, NULL, 0,
- dest_id, dest_mode)) {
- u32 vector = entry->msi.data & 0xff;
-
- __set_bit(vector,
- ioapic_handled_vectors);
- }
+
+ kvm_set_msi_irq(vcpu->kvm, entry, &irq);
+
+ if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
+ irq.dest_id, irq.dest_mode))
+ __set_bit(irq.vector, ioapic_handled_vectors);
}
}
srcu_read_unlock(&kvm->irq_srcu, idx);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a397200281c1..b62c85229711 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -25,7 +25,7 @@
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
@@ -115,26 +115,43 @@ static inline int apic_enabled(struct kvm_lapic *apic)
(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
-/* The logical map is definitely wrong if we have multiple
- * modes at the same time. (Physical map is always right.)
- */
-static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
-{
- return !(map->mode & (map->mode - 1));
+static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
+ u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
+ switch (map->mode) {
+ case KVM_APIC_MODE_X2APIC: {
+ u32 offset = (dest_id >> 16) * 16;
+ u32 max_apic_id = map->max_apic_id;
+
+ if (offset <= max_apic_id) {
+ u8 cluster_size = min(max_apic_id - offset + 1, 16U);
+
+ *cluster = &map->phys_map[offset];
+ *mask = dest_id & (0xffff >> (16 - cluster_size));
+ } else {
+ *mask = 0;
+ }
+
+ return true;
+ }
+ case KVM_APIC_MODE_XAPIC_FLAT:
+ *cluster = map->xapic_flat_map;
+ *mask = dest_id & 0xff;
+ return true;
+ case KVM_APIC_MODE_XAPIC_CLUSTER:
+ *cluster = map->xapic_cluster_map[dest_id >> 4];
+ *mask = dest_id & 0xf;
+ return true;
+ default:
+ /* Not optimized. */
+ return false;
+ }
}
-static inline void
-apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
+static void kvm_apic_map_free(struct rcu_head *rcu)
{
- unsigned lid_bits;
-
- BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
- BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
- BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
- lid_bits = map->mode;
+ struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
- *cid = dest_id >> lid_bits;
- *lid = dest_id & ((1 << lid_bits) - 1);
+ kvfree(map);
}
static void recalculate_apic_map(struct kvm *kvm)
@@ -142,17 +159,26 @@ static void recalculate_apic_map(struct kvm *kvm)
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
int i;
-
- new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
+ u32 max_id = 255;
mutex_lock(&kvm->arch.apic_map_lock);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (kvm_apic_present(vcpu))
+ max_id = max(max_id, kvm_apic_id(vcpu->arch.apic));
+
+ new = kvm_kvzalloc(sizeof(struct kvm_apic_map) +
+ sizeof(struct kvm_lapic *) * ((u64)max_id + 1));
+
if (!new)
goto out;
+ new->max_apic_id = max_id;
+
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvm_lapic *apic = vcpu->arch.apic;
- u16 cid, lid;
+ struct kvm_lapic **cluster;
+ u16 mask;
u32 ldr, aid;
if (!kvm_apic_present(vcpu))
@@ -161,7 +187,7 @@ static void recalculate_apic_map(struct kvm *kvm)
aid = kvm_apic_id(apic);
ldr = kvm_lapic_get_reg(apic, APIC_LDR);
- if (aid < ARRAY_SIZE(new->phys_map))
+ if (aid <= new->max_apic_id)
new->phys_map[aid] = apic;
if (apic_x2apic_mode(apic)) {
@@ -174,13 +200,11 @@ static void recalculate_apic_map(struct kvm *kvm)
new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
}
- if (!kvm_apic_logical_map_valid(new))
+ if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
continue;
- apic_logical_id(new, ldr, &cid, &lid);
-
- if (lid && cid < ARRAY_SIZE(new->logical_map))
- new->logical_map[cid][ffs(lid) - 1] = apic;
+ if (mask)
+ cluster[ffs(mask) - 1] = apic;
}
out:
old = rcu_dereference_protected(kvm->arch.apic_map,
@@ -189,7 +213,7 @@ out:
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
- kfree_rcu(old, rcu);
+ call_rcu(&old->rcu, kvm_apic_map_free);
kvm_make_scan_ioapic_request(kvm);
}
@@ -210,7 +234,7 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
}
}
-static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
+static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{
kvm_lapic_set_reg(apic, APIC_ID, id << 24);
recalculate_apic_map(apic->vcpu->kvm);
@@ -222,11 +246,11 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
recalculate_apic_map(apic->vcpu->kvm);
}
-static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u8 id)
+static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
{
u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
- kvm_lapic_set_reg(apic, APIC_ID, id << 24);
+ kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr);
recalculate_apic_map(apic->vcpu->kvm);
}
@@ -599,17 +623,30 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
}
}
-/* KVM APIC implementation has two quirks
- * - dest always begins at 0 while xAPIC MDA has offset 24,
- * - IOxAPIC messages have to be delivered (directly) to x2APIC.
+/* The KVM local APIC implementation has two quirks:
+ *
+ * - the xAPIC MDA stores the destination at bits 24-31, while this
+ * is not true of struct kvm_lapic_irq's dest_id field. This is
+ * just a quirk in the API and is not problematic.
+ *
+ * - in-kernel IOAPIC messages have to be delivered directly to
+ * x2APIC, because the kernel does not support interrupt remapping.
+ * In order to support broadcast without interrupt remapping, x2APIC
+ * rewrites the destination of non-IPI messages from APIC_BROADCAST
+ * to X2APIC_BROADCAST.
+ *
+ * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
+ * important when userspace wants to use x2APIC-format MSIs, because
+ * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
*/
-static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
- struct kvm_lapic *target)
+static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
+ struct kvm_lapic *source, struct kvm_lapic *target)
{
bool ipi = source != NULL;
bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
- if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
+ if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
+ !ipi && dest_id == APIC_BROADCAST && x2apic_mda)
return X2APIC_BROADCAST;
return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
@@ -619,7 +656,7 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode)
{
struct kvm_lapic *target = vcpu->arch.apic;
- u32 mda = kvm_apic_mda(dest, source, target);
+ u32 mda = kvm_apic_mda(vcpu, dest, source, target);
apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x\n",
@@ -671,102 +708,126 @@ static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
}
}
-bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
+static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
+ struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
{
- struct kvm_apic_map *map;
- unsigned long bitmap = 1;
- struct kvm_lapic **dst;
- int i;
- bool ret, x2apic_ipi;
+ if (kvm->arch.x2apic_broadcast_quirk_disabled) {
+ if ((irq->dest_id == APIC_BROADCAST &&
+ map->mode != KVM_APIC_MODE_X2APIC))
+ return true;
+ if (irq->dest_id == X2APIC_BROADCAST)
+ return true;
+ } else {
+ bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
+ if (irq->dest_id == (x2apic_ipi ?
+ X2APIC_BROADCAST : APIC_BROADCAST))
+ return true;
+ }
- *r = -1;
+ return false;
+}
- if (irq->shorthand == APIC_DEST_SELF) {
- *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
- return true;
- }
+/* Return true if the interrupt can be handled by using *bitmap as index mask
+ * for valid destinations in *dst array.
+ * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
+ * Note: we may have zero kvm_lapic destinations when we return true, which
+ * means that the interrupt should be dropped. In this case, *bitmap would be
+ * zero and *dst undefined.
+ */
+static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
+ struct kvm_lapic **src, struct kvm_lapic_irq *irq,
+ struct kvm_apic_map *map, struct kvm_lapic ***dst,
+ unsigned long *bitmap)
+{
+ int i, lowest;
- if (irq->shorthand)
+ if (irq->shorthand == APIC_DEST_SELF && src) {
+ *dst = src;
+ *bitmap = 1;
+ return true;
+ } else if (irq->shorthand)
return false;
- x2apic_ipi = src && apic_x2apic_mode(src);
- if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
+ if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
return false;
- ret = true;
- rcu_read_lock();
- map = rcu_dereference(kvm->arch.apic_map);
-
- if (!map) {
- ret = false;
- goto out;
+ if (irq->dest_mode == APIC_DEST_PHYSICAL) {
+ if (irq->dest_id > map->max_apic_id) {
+ *bitmap = 0;
+ } else {
+ *dst = &map->phys_map[irq->dest_id];
+ *bitmap = 1;
+ }
+ return true;
}
- if (irq->dest_mode == APIC_DEST_PHYSICAL) {
- if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
- goto out;
+ *bitmap = 0;
+ if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
+ (u16 *)bitmap))
+ return false;
- dst = &map->phys_map[irq->dest_id];
- } else {
- u16 cid;
+ if (!kvm_lowest_prio_delivery(irq))
+ return true;
- if (!kvm_apic_logical_map_valid(map)) {
- ret = false;
- goto out;
+ if (!kvm_vector_hashing_enabled()) {
+ lowest = -1;
+ for_each_set_bit(i, bitmap, 16) {
+ if (!(*dst)[i])
+ continue;
+ if (lowest < 0)
+ lowest = i;
+ else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
+ (*dst)[lowest]->vcpu) < 0)
+ lowest = i;
}
+ } else {
+ if (!*bitmap)
+ return true;
- apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
+ lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
+ bitmap, 16);
- if (cid >= ARRAY_SIZE(map->logical_map))
- goto out;
+ if (!(*dst)[lowest]) {
+ kvm_apic_disabled_lapic_found(kvm);
+ *bitmap = 0;
+ return true;
+ }
+ }
- dst = map->logical_map[cid];
+ *bitmap = (lowest >= 0) ? 1 << lowest : 0;
- if (!kvm_lowest_prio_delivery(irq))
- goto set_irq;
+ return true;
+}
- if (!kvm_vector_hashing_enabled()) {
- int l = -1;
- for_each_set_bit(i, &bitmap, 16) {
- if (!dst[i])
- continue;
- if (l < 0)
- l = i;
- else if (kvm_apic_compare_prio(dst[i]->vcpu,
- dst[l]->vcpu) < 0)
- l = i;
- }
- bitmap = (l >= 0) ? 1 << l : 0;
- } else {
- int idx;
- unsigned int dest_vcpus;
+bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
+{
+ struct kvm_apic_map *map;
+ unsigned long bitmap;
+ struct kvm_lapic **dst = NULL;
+ int i;
+ bool ret;
- dest_vcpus = hweight16(bitmap);
- if (dest_vcpus == 0)
- goto out;
+ *r = -1;
- idx = kvm_vector_to_index(irq->vector,
- dest_vcpus, &bitmap, 16);
+ if (irq->shorthand == APIC_DEST_SELF) {
+ *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
+ return true;
+ }
- if (!dst[idx]) {
- kvm_apic_disabled_lapic_found(kvm);
- goto out;
- }
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
- bitmap = (idx >= 0) ? 1 << idx : 0;
+ ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
+ if (ret)
+ for_each_set_bit(i, &bitmap, 16) {
+ if (!dst[i])
+ continue;
+ if (*r < 0)
+ *r = 0;
+ *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
- }
-set_irq:
- for_each_set_bit(i, &bitmap, 16) {
- if (!dst[i])
- continue;
- if (*r < 0)
- *r = 0;
- *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
- }
-out:
rcu_read_unlock();
return ret;
}
@@ -789,8 +850,9 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu)
{
struct kvm_apic_map *map;
+ unsigned long bitmap;
+ struct kvm_lapic **dst = NULL;
bool ret = false;
- struct kvm_lapic *dst = NULL;
if (irq->shorthand)
return false;
@@ -798,69 +860,16 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
- if (!map)
- goto out;
-
- if (irq->dest_mode == APIC_DEST_PHYSICAL) {
- if (irq->dest_id == 0xFF)
- goto out;
-
- if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
- goto out;
-
- dst = map->phys_map[irq->dest_id];
- if (dst && kvm_apic_present(dst->vcpu))
- *dest_vcpu = dst->vcpu;
- else
- goto out;
- } else {
- u16 cid;
- unsigned long bitmap = 1;
- int i, r = 0;
-
- if (!kvm_apic_logical_map_valid(map))
- goto out;
-
- apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
-
- if (cid >= ARRAY_SIZE(map->logical_map))
- goto out;
-
- if (kvm_vector_hashing_enabled() &&
- kvm_lowest_prio_delivery(irq)) {
- int idx;
- unsigned int dest_vcpus;
+ if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
+ hweight16(bitmap) == 1) {
+ unsigned long i = find_first_bit(&bitmap, 16);
- dest_vcpus = hweight16(bitmap);
- if (dest_vcpus == 0)
- goto out;
-
- idx = kvm_vector_to_index(irq->vector, dest_vcpus,
- &bitmap, 16);
-
- dst = map->logical_map[cid][idx];
- if (!dst) {
- kvm_apic_disabled_lapic_found(kvm);
- goto out;
- }
-
- *dest_vcpu = dst->vcpu;
- } else {
- for_each_set_bit(i, &bitmap, 16) {
- dst = map->logical_map[cid][i];
- if (++r == 2)
- goto out;
- }
-
- if (dst && kvm_apic_present(dst->vcpu))
- *dest_vcpu = dst->vcpu;
- else
- goto out;
+ if (dst[i]) {
+ *dest_vcpu = dst[i]->vcpu;
+ ret = true;
}
}
- ret = true;
-out:
rcu_read_unlock();
return ret;
}
@@ -1127,12 +1136,6 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
return 0;
switch (offset) {
- case APIC_ID:
- if (apic_x2apic_mode(apic))
- val = kvm_apic_id(apic);
- else
- val = kvm_apic_id(apic) << 24;
- break;
case APIC_ARBPRI:
apic_debug("Access APIC ARBPRI register which is for P6\n");
break;
@@ -1314,6 +1317,111 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
}
+static void start_sw_tscdeadline(struct kvm_lapic *apic)
+{
+ u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
+ u64 ns = 0;
+ ktime_t expire;
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
+ unsigned long flags;
+ ktime_t now;
+
+ if (unlikely(!tscdeadline || !this_tsc_khz))
+ return;
+
+ local_irq_save(flags);
+
+ now = apic->lapic_timer.timer.base->get_time();
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ if (likely(tscdeadline > guest_tsc)) {
+ ns = (tscdeadline - guest_tsc) * 1000000ULL;
+ do_div(ns, this_tsc_khz);
+ expire = ktime_add_ns(now, ns);
+ expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
+ hrtimer_start(&apic->lapic_timer.timer,
+ expire, HRTIMER_MODE_ABS_PINNED);
+ } else
+ apic_timer_expired(apic);
+
+ local_irq_restore(flags);
+}
+
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+{
+ if (!lapic_in_kernel(vcpu))
+ return false;
+
+ return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
+
+static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
+{
+ kvm_x86_ops->cancel_hv_timer(apic->vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+}
+
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ WARN_ON(swait_active(&vcpu->wq));
+ cancel_hv_tscdeadline(apic);
+ apic_timer_expired(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+
+static bool start_hv_tscdeadline(struct kvm_lapic *apic)
+{
+ u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+ if (atomic_read(&apic->lapic_timer.pending) ||
+ kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_tscdeadline(apic);
+ } else {
+ apic->lapic_timer.hv_timer_in_use = true;
+ hrtimer_cancel(&apic->lapic_timer.timer);
+
+ /* In case the sw timer triggered in the window */
+ if (atomic_read(&apic->lapic_timer.pending))
+ cancel_hv_tscdeadline(apic);
+ }
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
+ apic->lapic_timer.hv_timer_in_use);
+ return apic->lapic_timer.hv_timer_in_use;
+}
+
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ WARN_ON(apic->lapic_timer.hv_timer_in_use);
+
+ if (apic_lvtt_tscdeadline(apic))
+ start_hv_tscdeadline(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
+
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ /* Possibly the TSC deadline timer is not enabled yet */
+ if (!apic->lapic_timer.hv_timer_in_use)
+ return;
+
+ cancel_hv_tscdeadline(apic);
+
+ if (atomic_read(&apic->lapic_timer.pending))
+ return;
+
+ start_sw_tscdeadline(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
+
static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now;
@@ -1360,32 +1468,8 @@ static void start_apic_timer(struct kvm_lapic *apic)
ktime_to_ns(ktime_add_ns(now,
apic->lapic_timer.period)));
} else if (apic_lvtt_tscdeadline(apic)) {
- /* lapic timer in tsc deadline mode */
- u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
- u64 ns = 0;
- ktime_t expire;
- struct kvm_vcpu *vcpu = apic->vcpu;
- unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
- unsigned long flags;
-
- if (unlikely(!tscdeadline || !this_tsc_khz))
- return;
-
- local_irq_save(flags);
-
- now = apic->lapic_timer.timer.base->get_time();
- guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- if (likely(tscdeadline > guest_tsc)) {
- ns = (tscdeadline - guest_tsc) * 1000000ULL;
- do_div(ns, this_tsc_khz);
- expire = ktime_add_ns(now, ns);
- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
- hrtimer_start(&apic->lapic_timer.timer,
- expire, HRTIMER_MODE_ABS_PINNED);
- } else
- apic_timer_expired(apic);
-
- local_irq_restore(flags);
+ if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
+ start_sw_tscdeadline(apic);
}
}
@@ -1413,7 +1497,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
switch (reg) {
case APIC_ID: /* Local APIC ID */
if (!apic_x2apic_mode(apic))
- kvm_apic_set_id(apic, val >> 24);
+ kvm_apic_set_xapic_id(apic, val >> 24);
else
ret = 1;
break;
@@ -1674,9 +1758,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
/* update jump label if enable bit changes */
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
- if (value & MSR_IA32_APICBASE_ENABLE)
+ if (value & MSR_IA32_APICBASE_ENABLE) {
+ kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
static_key_slow_dec_deferred(&apic_hw_disabled);
- else
+ } else
static_key_slow_inc(&apic_hw_disabled.key);
recalculate_apic_map(vcpu->kvm);
}
@@ -1716,8 +1801,11 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
/* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer);
- if (!init_event)
- kvm_apic_set_id(apic, vcpu->vcpu_id);
+ if (!init_event) {
+ kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
+ MSR_IA32_APICBASE_ENABLE);
+ kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
+ }
kvm_apic_set_version(apic->vcpu);
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
@@ -1856,9 +1944,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
* thinking that APIC satet has changed.
*/
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
- kvm_lapic_set_base(vcpu,
- APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
-
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
kvm_lapic_reset(vcpu, false);
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
@@ -1938,17 +2023,48 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
return vector;
}
-void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
- struct kvm_lapic_state *s)
+static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
+ struct kvm_lapic_state *s, bool set)
+{
+ if (apic_x2apic_mode(vcpu->arch.apic)) {
+ u32 *id = (u32 *)(s->regs + APIC_ID);
+
+ if (vcpu->kvm->arch.x2apic_format) {
+ if (*id != vcpu->vcpu_id)
+ return -EINVAL;
+ } else {
+ if (set)
+ *id >>= 24;
+ else
+ *id <<= 24;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+{
+ memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
+ return kvm_apic_state_fixup(vcpu, s, false);
+}
+
+int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ int r;
+
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
+
+ r = kvm_apic_state_fixup(vcpu, s, true);
+ if (r)
+ return r;
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
- /* call kvm_apic_set_id() to put apic into apic_map */
- kvm_apic_set_id(apic, kvm_apic_id(apic));
+
+ recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
@@ -1974,6 +2090,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
kvm_rtc_eoi_tracking_restore_one(vcpu);
vcpu->arch.apic_arb_prio = 0;
+
+ return 0;
}
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 891c6da7d4aa..f60d01c29d51 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -20,6 +20,7 @@ struct kvm_timer {
u64 tscdeadline;
u64 expired_tscdeadline;
atomic_t pending; /* accumulated triggered timers */
+ bool hv_timer_in_use;
};
struct kvm_lapic {
@@ -80,8 +81,8 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
-void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
- struct kvm_lapic_state *s);
+int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
+int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
@@ -199,9 +200,15 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
}
-static inline int kvm_apic_id(struct kvm_lapic *apic)
+static inline u32 kvm_apic_id(struct kvm_lapic *apic)
{
- return (kvm_lapic_get_reg(apic, APIC_ID) >> 24) & 0xff;
+ /* To avoid a race between apic_base and following APIC_ID update when
+ * switching to x2apic_mode, the x2apic mode returns initial x2apic id.
+ */
+ if (apic_x2apic_mode(apic))
+ return apic->vcpu->vcpu_id;
+
+ return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
}
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
@@ -212,4 +219,8 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu);
int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
const unsigned long *bitmap, u32 bitmap_size);
+void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
+void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
+bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index def97b3a392b..3d4cc8cc56a3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -29,7 +29,8 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
@@ -175,6 +176,7 @@ static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
+static u64 __read_mostly shadow_present_mask;
static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);
@@ -282,13 +284,14 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
}
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
- u64 dirty_mask, u64 nx_mask, u64 x_mask)
+ u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask)
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
+ shadow_present_mask = p_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
@@ -304,7 +307,7 @@ static int is_nx(struct kvm_vcpu *vcpu)
static int is_shadow_present_pte(u64 pte)
{
- return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
+ return (pte & 0xFFFFFFFFull) && !is_mmio_spte(pte);
}
static int is_large_pte(u64 pte)
@@ -523,7 +526,7 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
}
/* Rules for using mmu_spte_update:
- * Update the state bits, it means the mapped pfn is not changged.
+ * Update the state bits, it means the mapped pfn is not changed.
*
* Whenever we overwrite a writable spte with a read-only one we
* should flush remote TLBs. Otherwise rmap_write_protect
@@ -2245,10 +2248,9 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
{
u64 spte;
- BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
- VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
+ BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
- spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
+ spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
mmu_spte_set(sptep, spte);
@@ -2515,13 +2517,19 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
gfn_t gfn, kvm_pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
- u64 spte;
+ u64 spte = 0;
int ret = 0;
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
- spte = PT_PRESENT_MASK;
+ /*
+ * For the EPT case, shadow_present_mask is 0 if hardware
+ * supports exec-only page table entries. In that case,
+ * ACC_USER_MASK and shadow_user_mask are used to represent
+ * read access. See FNAME(gpte_access) in paging_tmpl.h.
+ */
+ spte |= shadow_present_mask;
if (!speculative)
spte |= shadow_accessed_mask;
@@ -3189,7 +3197,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
MMU_WARN_ON(VALID_PAGE(root));
if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
- if (!is_present_gpte(pdptr)) {
+ if (!(pdptr & PT_PRESENT_MASK)) {
vcpu->arch.mmu.pae_root[i] = 0;
continue;
}
@@ -3914,9 +3922,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* clearer.
*/
smap = cr4_smap && u && !uf && !ff;
- } else
- /* Not really needed: no U/S accesses on ept */
- u = 1;
+ }
fault = (ff && !x) || (uf && !u) || (wf && !w) ||
(smapf && smap);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 66b33b96a31b..ddc56e91f2e4 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -93,11 +93,6 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
return kvm_mmu_load(vcpu);
}
-static inline int is_present_gpte(unsigned long pte)
-{
- return pte & PT_PRESENT_MASK;
-}
-
/*
* Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bc019f70e0b6..a01105485315 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -131,7 +131,7 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
static inline int FNAME(is_present_gpte)(unsigned long pte)
{
#if PTTYPE != PTTYPE_EPT
- return is_present_gpte(pte);
+ return pte & PT_PRESENT_MASK;
#else
return pte & 7;
#endif
@@ -181,13 +181,19 @@ no_present:
return true;
}
+/*
+ * For PTTYPE_EPT, a page table can be executable but not readable
+ * on supported processors. Therefore, set_spte does not automatically
+ * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
+ * to signify readability since it isn't used in the EPT case
+ */
static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
{
unsigned access;
#if PTTYPE == PTTYPE_EPT
access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
- ACC_USER_MASK;
+ ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
#else
BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
BUILD_BUG_ON(ACC_EXEC_MASK != 1);
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index ab38af4f4947..9d4a8504a95a 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -93,7 +93,7 @@ static unsigned intel_find_fixed_event(int idx)
return intel_arch_events[fixed_pmc_events[idx]].event_type;
}
-/* check if a PMC is enabled by comparising it with globl_ctrl bits. */
+/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 16ef31b87452..af523d84d102 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1577,7 +1577,7 @@ static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
/*
- * Any change of EFLAGS.VM is accompained by a reload of SS
+ * Any change of EFLAGS.VM is accompanied by a reload of SS
* (caused by either a task switch or an inter-privilege IRET),
* so we do not need to update the CPL here.
*/
@@ -4940,6 +4940,12 @@ out:
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
local_irq_enable();
+ /*
+ * We must have an instruction with interrupts enabled, so
+ * the timer interrupt isn't delayed by the interrupt shadow.
+ */
+ asm("nop");
+ local_irq_disable();
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 8de925031b5c..0a6cc6754ec5 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1348,6 +1348,21 @@ TRACE_EVENT(kvm_avic_unaccelerated_access,
__entry->vec)
);
+TRACE_EVENT(kvm_hv_timer_state,
+ TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
+ TP_ARGS(vcpu_id, hv_timer_in_use),
+ TP_STRUCT__entry(
+ __field(unsigned int, vcpu_id)
+ __field(unsigned int, hv_timer_in_use)
+ ),
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu_id;
+ __entry->hv_timer_in_use = hv_timer_in_use;
+ ),
+ TP_printk("vcpu_id %x hv_timer %x\n",
+ __entry->vcpu_id,
+ __entry->hv_timer_in_use)
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index df07a0a4611f..a45d8580f91e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -110,6 +110,13 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
+/* Guest_tsc -> host_tsc conversion requires 64-bit division. */
+static int __read_mostly cpu_preemption_timer_multi;
+static bool __read_mostly enable_preemption_timer = 1;
+#ifdef CONFIG_X86_64
+module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
+#endif
+
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
@@ -398,6 +405,12 @@ struct nested_vmx {
/* The host-usable pointer to the above */
struct page *current_vmcs12_page;
struct vmcs12 *current_vmcs12;
+ /*
+ * Cache of the guest's VMCS, existing outside of guest memory.
+ * Loaded from guest memory during VMPTRLD. Flushed to guest
+ * memory during VMXOFF, VMCLEAR, VMPTRLD.
+ */
+ struct vmcs12 *cached_vmcs12;
struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
@@ -421,7 +434,6 @@ struct nested_vmx {
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
- u64 msr_ia32_feature_control;
struct hrtimer preemption_timer;
bool preemption_timer_expired;
@@ -597,11 +609,22 @@ struct vcpu_vmx {
#define PML_ENTITY_NUM 512
struct page *pml_pg;
+ /* apic deadline value in host tsc */
+ u64 hv_deadline_tsc;
+
u64 current_tsc_ratio;
bool guest_pkru_valid;
u32 guest_pkru;
u32 host_pkru;
+
+ /*
+ * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
+ * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
+ * in msr_ia32_feature_control_valid_bits.
+ */
+ u64 msr_ia32_feature_control;
+ u64 msr_ia32_feature_control_valid_bits;
};
enum segment_cache_field {
@@ -841,7 +864,7 @@ static inline short vmcs_field_to_offset(unsigned long field)
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
- return to_vmx(vcpu)->nested.current_vmcs12;
+ return to_vmx(vcpu)->nested.cached_vmcs12;
}
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
@@ -1056,6 +1079,58 @@ static inline bool cpu_has_vmx_virtual_intr_delivery(void)
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
}
+/*
+ * Comment's format: document - errata name - stepping - processor name.
+ * Refer from
+ * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
+ */
+static u32 vmx_preemption_cpu_tfms[] = {
+/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
+0x000206E6,
+/* 323056.pdf - AAX65 - C2 - Xeon L3406 */
+/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
+/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
+0x00020652,
+/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
+0x00020655,
+/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
+/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
+/*
+ * 320767.pdf - AAP86 - B1 -
+ * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
+ */
+0x000106E5,
+/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
+0x000106A0,
+/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
+0x000106A1,
+/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
+0x000106A4,
+ /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
+ /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
+ /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
+0x000106A5,
+};
+
+static inline bool cpu_has_broken_vmx_preemption_timer(void)
+{
+ u32 eax = cpuid_eax(0x00000001), i;
+
+ /* Clear the reserved bits */
+ eax &= ~(0x3U << 14 | 0xfU << 28);
+ for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
+ if (eax == vmx_preemption_cpu_tfms[i])
+ return true;
+
+ return false;
+}
+
+static inline bool cpu_has_vmx_preemption_timer(void)
+{
+ return vmcs_config.pin_based_exec_ctrl &
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+}
+
static inline bool cpu_has_vmx_posted_intr(void)
{
return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
@@ -1603,6 +1678,11 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
__vmcs_writel(field, __vmcs_readl(field) | mask);
}
+static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
+}
+
static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_ENTRY_CONTROLS, val);
@@ -1631,6 +1711,11 @@ static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
}
+static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
+}
+
static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_EXIT_CONTROLS, val);
@@ -2121,22 +2206,14 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+ bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
if (!vmm_exclusive)
kvm_cpu_vmxon(phys_addr);
- else if (vmx->loaded_vmcs->cpu != cpu)
+ else if (!already_loaded)
loaded_vmcs_clear(vmx->loaded_vmcs);
- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
- per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
- vmcs_load(vmx->loaded_vmcs->vmcs);
- }
-
- if (vmx->loaded_vmcs->cpu != cpu) {
- struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
- unsigned long sysenter_esp;
-
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ if (!already_loaded) {
local_irq_disable();
crash_disable_local_vmclear(cpu);
@@ -2151,6 +2228,18 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
&per_cpu(loaded_vmcss_on_cpu, cpu));
crash_enable_local_vmclear(cpu);
local_irq_enable();
+ }
+
+ if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
+ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+ vmcs_load(vmx->loaded_vmcs->vmcs);
+ }
+
+ if (!already_loaded) {
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
+ unsigned long sysenter_esp;
+
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
@@ -2716,13 +2805,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
+ if (cpu_has_vmx_ept_execute_only())
+ vmx->nested.nested_vmx_ept_caps |=
+ VMX_EPT_EXECUTE_ONLY_BIT;
vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
- /*
- * For nested guests, we don't do anything specific
- * for single context invalidation. Hence, only advertise
- * support for global context invalidation.
- */
- vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
+ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
+ VMX_EPT_EXTENT_CONTEXT_BIT;
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -2853,7 +2941,6 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
vmx->nested.nested_vmx_secondary_ctls_high);
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
- /* Currently, no nested vpid support */
*pdata = vmx->nested.nested_vmx_ept_caps |
((u64)vmx->nested.nested_vmx_vpid_caps << 32);
break;
@@ -2864,6 +2951,14 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 0;
}
+static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
+ uint64_t val)
+{
+ uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
+
+ return !(val & ~valid_bits);
+}
+
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
@@ -2905,10 +3000,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
- case MSR_IA32_FEATURE_CONTROL:
- if (!nested_vmx_allowed(vcpu))
+ case MSR_IA32_MCG_EXT_CTL:
+ if (!msr_info->host_initiated &&
+ !(to_vmx(vcpu)->msr_ia32_feature_control &
+ FEATURE_CONTROL_LMCE))
return 1;
- msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ msr_info->data = vcpu->arch.mcg_ext_ctl;
+ break;
+ case MSR_IA32_FEATURE_CONTROL:
+ msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
@@ -2998,12 +3098,20 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
+ case MSR_IA32_MCG_EXT_CTL:
+ if ((!msr_info->host_initiated &&
+ !(to_vmx(vcpu)->msr_ia32_feature_control &
+ FEATURE_CONTROL_LMCE)) ||
+ (data & ~MCG_EXT_CTL_LMCE_EN))
+ return 1;
+ vcpu->arch.mcg_ext_ctl = data;
+ break;
case MSR_IA32_FEATURE_CONTROL:
- if (!nested_vmx_allowed(vcpu) ||
- (to_vmx(vcpu)->nested.msr_ia32_feature_control &
+ if (!vmx_feature_control_msr_valid(vcpu, data) ||
+ (to_vmx(vcpu)->msr_ia32_feature_control &
FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
return 1;
- vmx->nested.msr_ia32_feature_control = data;
+ vmx->msr_ia32_feature_control = data;
if (msr_info->host_initiated && data == 0)
vmx_leave_nested(vcpu);
break;
@@ -3297,25 +3405,27 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
vmx_capability.ept, vmx_capability.vpid);
}
- min = VM_EXIT_SAVE_DEBUG_CONTROLS;
+ min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
#ifdef CONFIG_X86_64
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
#endif
opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
- VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
+ VM_EXIT_CLEAR_BNDCFGS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
&_vmexit_control) < 0)
return -EIO;
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
- opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
+ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
return -EIO;
+ if (cpu_has_broken_vmx_preemption_timer())
+ _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
if (!(_cpu_based_2nd_exec_control &
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
- !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3364,7 +3474,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
/*
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
- * but due to arrata below it can't be used. Workaround is to use
+ * but due to errata below it can't be used. Workaround is to use
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
*
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
@@ -4781,6 +4891,8 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
if (!kvm_vcpu_apicv_active(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+ /* Enable the preemption timer dynamically */
+ pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
return pin_based_exec_ctrl;
}
@@ -4896,6 +5008,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
+ vmx->hv_deadline_tsc = -1;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
@@ -6016,12 +6129,14 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
- /* It is a write fault? */
- error_code = exit_qualification & PFERR_WRITE_MASK;
+ /* it is a read fault? */
+ error_code = (exit_qualification << 2) & PFERR_USER_MASK;
+ /* it is a write fault? */
+ error_code |= exit_qualification & PFERR_WRITE_MASK;
/* It is a fetch fault? */
error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
/* ept page table is present? */
- error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
+ error_code |= (exit_qualification & 0x38) != 0;
vcpu->arch.exit_qualification = exit_qualification;
@@ -6355,9 +6470,6 @@ static __init int hardware_setup(void)
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
- /* According SDM, in x2apic mode, the whole id reg is used. But in
- * KVM, it only use the highest eight bits. Need to intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
@@ -6368,10 +6480,12 @@ static __init int hardware_setup(void)
vmx_disable_intercept_msr_write_x2apic(0x83f);
if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
+ kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
+ 0ull, VMX_EPT_EXECUTABLE_MASK,
+ cpu_has_vmx_ept_execute_only() ?
+ 0ull : VMX_EPT_READABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
@@ -6393,8 +6507,21 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
+ u64 vmx_msr;
+
+ rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
+ cpu_preemption_timer_multi =
+ vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ } else {
+ kvm_x86_ops->set_hv_timer = NULL;
+ kvm_x86_ops->cancel_hv_timer = NULL;
+ }
+
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
+ kvm_mce_cap_supported |= MCG_LMCE_P;
+
return alloc_kvm_area();
out8:
@@ -6862,16 +6989,22 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
+ if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
kvm_inject_gp(vcpu, 0);
return 1;
}
+ vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ if (!vmx->nested.cached_vmcs12)
+ return -ENOMEM;
+
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
- if (!shadow_vmcs)
+ if (!shadow_vmcs) {
+ kfree(vmx->nested.cached_vmcs12);
return -ENOMEM;
+ }
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
@@ -6942,6 +7075,11 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
vmx->nested.posted_intr_nv = -1;
+
+ /* Flush VMCS12 to guest memory */
+ memcpy(vmx->nested.current_vmcs12, vmx->nested.cached_vmcs12,
+ VMCS12_SIZE);
+
kunmap(vmx->nested.current_vmcs12_page);
nested_release_page(vmx->nested.current_vmcs12_page);
vmx->nested.current_vmptr = -1ull;
@@ -6962,6 +7100,7 @@ static void free_nested(struct vcpu_vmx *vmx)
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
+ kfree(vmx->nested.cached_vmcs12);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
@@ -7365,6 +7504,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
vmx->nested.current_vmptr = vmptr;
vmx->nested.current_vmcs12 = new_vmcs12;
vmx->nested.current_vmcs12_page = page;
+ /*
+ * Load VMCS12 from guest memory since it is not already
+ * cached.
+ */
+ memcpy(vmx->nested.cached_vmcs12,
+ vmx->nested.current_vmcs12, VMCS12_SIZE);
+
if (enable_shadow_vmcs) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
@@ -7458,12 +7604,16 @@ static int handle_invept(struct kvm_vcpu *vcpu)
switch (type) {
case VMX_EPT_EXTENT_GLOBAL:
+ /*
+ * TODO: track mappings and invalidate
+ * single context requests appropriately
+ */
+ case VMX_EPT_EXTENT_CONTEXT:
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
nested_vmx_succeed(vcpu);
break;
default:
- /* Trap single context invalidation invept calls */
BUG_ON(1);
break;
}
@@ -7560,6 +7710,12 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ kvm_lapic_expired_hv_timer(vcpu);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -7610,6 +7766,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
[EXIT_REASON_PML_FULL] = handle_pml_full,
+ [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
};
static const int kvm_vmx_max_exit_handlers =
@@ -7918,6 +8075,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
* the XSS exit bitmap in vmcs12.
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+ case EXIT_REASON_PREEMPTION_TIMER:
+ return false;
default:
return true;
}
@@ -8303,7 +8462,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
* the next L2->L1 exit.
*/
if (!is_guest_mode(vcpu) ||
- !nested_cpu_has2(vmx->nested.current_vmcs12,
+ !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
vmcs_write64(APIC_ACCESS_ADDR, hpa);
}
@@ -8436,7 +8595,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
"push %[sp]\n\t"
#endif
"pushf\n\t"
- "orl $0x200, (%%" _ASM_SP ")\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t"
:
@@ -8449,8 +8607,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
- } else
- local_irq_enable();
+ }
}
static bool vmx_has_high_real_mode_segbase(void)
@@ -8601,6 +8758,26 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host);
}
+void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 tscl;
+ u32 delta_tsc;
+
+ if (vmx->hv_deadline_tsc == -1)
+ return;
+
+ tscl = rdtsc();
+ if (vmx->hv_deadline_tsc > tscl)
+ /* sure to be 32 bit only because checked on set_hv_timer */
+ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+ cpu_preemption_timer_multi);
+ else
+ delta_tsc = 0;
+
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+}
+
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -8650,6 +8827,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
+ vmx_arm_hv_timer(vcpu);
+
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
@@ -8940,6 +9119,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
+ vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+
return &vmx->vcpu;
free_vmcs:
@@ -9080,6 +9261,13 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (cpu_has_secondary_exec_ctrls())
vmcs_set_secondary_exec_control(secondary_exec_ctl);
+
+ if (nested_vmx_allowed(vcpu))
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+ FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ else
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
+ ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9636,9 +9824,14 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
- exec_control |= vmcs_config.pin_based_exec_ctrl;
+
+ /* Preemption timer setting is only taken from vmcs01. */
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ exec_control |= vmcs_config.pin_based_exec_ctrl;
+ if (vmx->hv_deadline_tsc == -1)
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ /* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
/*
* Note that we use L0's vector here and in
@@ -10556,8 +10749,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs12->vm_exit_intr_error_code,
KVM_ISA_VMX);
- vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
- vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
+ vm_entry_controls_reset_shadow(vmx);
+ vm_exit_controls_reset_shadow(vmx);
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
@@ -10566,8 +10759,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
load_vmcs12_host_state(vcpu, vmcs12);
- /* Update TSC_OFFSET if TSC was changed while L2 ran */
+ /* Update any VMCS fields that might have changed while L2 ran */
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ if (vmx->hv_deadline_tsc == -1)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ else
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
@@ -10647,6 +10846,64 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
return X86EMUL_CONTINUE;
}
+#ifdef CONFIG_X86_64
+/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
+static inline int u64_shl_div_u64(u64 a, unsigned int shift,
+ u64 divisor, u64 *result)
+{
+ u64 low = a << shift, high = a >> (64 - shift);
+
+ /* To avoid the overflow on divq */
+ if (high >= divisor)
+ return 1;
+
+ /* Low hold the result, high hold rem which is discarded */
+ asm("divq %2\n\t" : "=a" (low), "=d" (high) :
+ "rm" (divisor), "0" (low), "1" (high));
+ *result = low;
+
+ return 0;
+}
+
+static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 tscl = rdtsc();
+ u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
+ u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
+
+ /* Convert to host delta tsc if tsc scaling is enabled */
+ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
+ u64_shl_div_u64(delta_tsc,
+ kvm_tsc_scaling_ratio_frac_bits,
+ vcpu->arch.tsc_scaling_ratio,
+ &delta_tsc))
+ return -ERANGE;
+
+ /*
+ * If the delta tsc can't fit in the 32 bit after the multi shift,
+ * we can't use the preemption timer.
+ * It's possible that it fits on later vmentries, but checking
+ * on every vmentry is costly so we just use an hrtimer.
+ */
+ if (delta_tsc >> (cpu_preemption_timer_multi + 32))
+ return -ERANGE;
+
+ vmx->hv_deadline_tsc = tscl + delta_tsc;
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ return 0;
+}
+
+static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ vmx->hv_deadline_tsc = -1;
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+}
+#endif
+
static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
if (ple_gap)
@@ -10691,7 +10948,7 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
* this case, return 1, otherwise, return 0.
*
*/
-static int vmx_pre_block(struct kvm_vcpu *vcpu)
+static int pi_pre_block(struct kvm_vcpu *vcpu)
{
unsigned long flags;
unsigned int dest;
@@ -10758,7 +11015,18 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
return 0;
}
-static void vmx_post_block(struct kvm_vcpu *vcpu)
+static int vmx_pre_block(struct kvm_vcpu *vcpu)
+{
+ if (pi_pre_block(vcpu))
+ return 1;
+
+ if (kvm_lapic_hv_timer_in_use(vcpu))
+ kvm_lapic_switch_to_sw_timer(vcpu);
+
+ return 0;
+}
+
+static void pi_post_block(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct pi_desc old, new;
@@ -10800,6 +11068,14 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
}
}
+static void vmx_post_block(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->set_hv_timer)
+ kvm_lapic_switch_to_hv_timer(vcpu);
+
+ pi_post_block(vcpu);
+}
+
/*
* vmx_update_pi_irte - set IRTE for Posted-Interrupts
*
@@ -10844,7 +11120,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
* We will support full lowest-priority interrupt later.
*/
- kvm_set_msi_irq(e, &irq);
+ kvm_set_msi_irq(kvm, e, &irq);
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
/*
* Make sure the IRTE is in remapped mode if
@@ -10889,6 +11165,16 @@ out:
return ret;
}
+static void vmx_setup_mce(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.mcg_cap & MCG_LMCE_P)
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+ FEATURE_CONTROL_LMCE;
+ else
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
+ ~FEATURE_CONTROL_LMCE;
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -11013,6 +11299,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
.pmu_ops = &intel_pmu_ops,
.update_pi_irte = vmx_update_pi_irte,
+
+#ifdef CONFIG_X86_64
+ .set_hv_timer = vmx_set_hv_timer,
+ .cancel_hv_timer = vmx_cancel_hv_timer,
+#endif
+
+ .setup_mce = vmx_setup_mce,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 45608a7da9b3..19f9f9e05c2a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -36,7 +36,8 @@
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/moduleparam.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
@@ -70,7 +71,8 @@
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
-#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
+u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
+EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
#define emul_to_vcpu(ctxt) \
container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
@@ -89,8 +91,12 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
+ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
+
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
+static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
@@ -113,7 +119,8 @@ u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits;
EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
u64 __read_mostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
-static u64 __read_mostly kvm_default_tsc_scaling_ratio;
+u64 __read_mostly kvm_default_tsc_scaling_ratio;
+EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 __read_mostly tsc_tolerance_ppm = 250;
@@ -537,7 +544,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
goto out;
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
- if (is_present_gpte(pdpte[i]) &&
+ if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] &
vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0;
@@ -982,6 +989,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
+ MSR_IA32_MCG_EXT_CTL,
MSR_IA32_SMBASE,
};
@@ -1161,7 +1169,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
int version;
int r;
struct pvclock_wall_clock wc;
- struct timespec boot;
+ struct timespec64 boot;
if (!wall_clock)
return;
@@ -1184,13 +1192,13 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
- getboottime(&boot);
+ getboottime64(&boot);
if (kvm->arch.kvmclock_offset) {
- struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
- boot = timespec_sub(boot, ts);
+ struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
+ boot = timespec64_sub(boot, ts);
}
- wc.sec = boot.tv_sec;
+ wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
wc.nsec = boot.tv_nsec;
wc.version = version;
@@ -2615,6 +2623,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
+ case KVM_CAP_X2APIC_API:
+ r = KVM_X2APIC_API_VALID_FLAGS;
+ break;
default:
r = 0;
break;
@@ -2677,11 +2688,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
break;
}
case KVM_X86_GET_MCE_CAP_SUPPORTED: {
- u64 mce_cap;
-
- mce_cap = KVM_MCE_CAP_SUPPORTED;
r = -EFAULT;
- if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
+ if (copy_to_user(argp, &kvm_mce_cap_supported,
+ sizeof(kvm_mce_cap_supported)))
goto out;
r = 0;
break;
@@ -2733,6 +2742,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
+
+ if (kvm_lapic_hv_timer_in_use(vcpu) &&
+ kvm_x86_ops->set_hv_timer(vcpu,
+ kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
@@ -2766,15 +2780,17 @@ static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
if (vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
- memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
-
- return 0;
+ return kvm_apic_get_state(vcpu, s);
}
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
- kvm_apic_post_state_restore(vcpu, s);
+ int r;
+
+ r = kvm_apic_set_state(vcpu, s);
+ if (r)
+ return r;
update_cr8_intercept(vcpu);
return 0;
@@ -2859,7 +2875,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
- if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
+ if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
goto out;
r = 0;
vcpu->arch.mcg_cap = mcg_cap;
@@ -2869,6 +2885,9 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
/* Init IA32_MCi_CTL to all 1s */
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
+
+ if (kvm_x86_ops->setup_mce)
+ kvm_x86_ops->setup_mce(vcpu);
out:
return r;
}
@@ -3767,7 +3786,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = -EEXIST;
if (irqchip_in_kernel(kvm))
goto split_irqchip_unlock;
- if (atomic_read(&kvm->online_vcpus))
+ if (kvm->created_vcpus)
goto split_irqchip_unlock;
r = kvm_setup_empty_irq_routing(kvm);
if (r)
@@ -3781,6 +3800,18 @@ split_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
+ case KVM_CAP_X2APIC_API:
+ r = -EINVAL;
+ if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
+ break;
+
+ if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
+ kvm->arch.x2apic_format = true;
+ if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
+ kvm->arch.x2apic_broadcast_quirk_disabled = true;
+
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -3832,7 +3863,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -EINVAL;
- if (atomic_read(&kvm->online_vcpus))
+ if (kvm->created_vcpus)
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
@@ -3872,7 +3903,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
- mutex_lock(&kvm->slots_lock);
+ mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
@@ -3881,7 +3912,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
- mutex_unlock(&kvm->slots_lock);
+ mutex_unlock(&kvm->lock);
break;
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
@@ -3988,7 +4019,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_SET_BOOT_CPU_ID:
r = 0;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) != 0)
+ if (kvm->created_vcpus)
r = -EBUSY;
else
kvm->arch.bsp_vcpu_id = arg;
@@ -5296,13 +5327,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
/* This is a good place to trace that we are exiting SMM. */
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
- if (unlikely(vcpu->arch.smi_pending)) {
- kvm_make_request(KVM_REQ_SMI, vcpu);
- vcpu->arch.smi_pending = 0;
- } else {
- /* Process a latched INIT, if any. */
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- }
+ /* Process a latched INIT or SMI, if any. */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
kvm_mmu_reset_context(vcpu);
@@ -5848,8 +5874,8 @@ int kvm_arch_init(void *opaque)
kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
- PT_DIRTY_MASK, PT64_NX_MASK, 0);
-
+ PT_DIRTY_MASK, PT64_NX_MASK, 0,
+ PT_PRESENT_MASK);
kvm_timer_init();
perf_register_guest_info_callbacks(&kvm_guest_cbs);
@@ -6083,7 +6109,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
/* try to inject new event if pending */
- if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
+ vcpu->arch.smi_pending = false;
+ enter_smm(vcpu);
+ } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
@@ -6106,6 +6135,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->set_irq(vcpu);
}
}
+
return 0;
}
@@ -6129,7 +6159,7 @@ static void process_nmi(struct kvm_vcpu *vcpu)
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
-static u32 process_smi_get_segment_flags(struct kvm_segment *seg)
+static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
{
u32 flags = 0;
flags |= seg->g << 23;
@@ -6143,7 +6173,7 @@ static u32 process_smi_get_segment_flags(struct kvm_segment *seg)
return flags;
}
-static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
+static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
{
struct kvm_segment seg;
int offset;
@@ -6158,11 +6188,11 @@ static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
put_smstate(u32, buf, offset + 8, seg.base);
put_smstate(u32, buf, offset + 4, seg.limit);
- put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
+ put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
}
#ifdef CONFIG_X86_64
-static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
+static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
{
struct kvm_segment seg;
int offset;
@@ -6171,7 +6201,7 @@ static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
kvm_get_segment(vcpu, &seg, n);
offset = 0x7e00 + n * 16;
- flags = process_smi_get_segment_flags(&seg) >> 8;
+ flags = enter_smm_get_segment_flags(&seg) >> 8;
put_smstate(u16, buf, offset, seg.selector);
put_smstate(u16, buf, offset + 2, flags);
put_smstate(u32, buf, offset + 4, seg.limit);
@@ -6179,7 +6209,7 @@ static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
}
#endif
-static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
+static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
{
struct desc_ptr dt;
struct kvm_segment seg;
@@ -6203,13 +6233,13 @@ static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7fc4, seg.selector);
put_smstate(u32, buf, 0x7f64, seg.base);
put_smstate(u32, buf, 0x7f60, seg.limit);
- put_smstate(u32, buf, 0x7f5c, process_smi_get_segment_flags(&seg));
+ put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
put_smstate(u32, buf, 0x7fc0, seg.selector);
put_smstate(u32, buf, 0x7f80, seg.base);
put_smstate(u32, buf, 0x7f7c, seg.limit);
- put_smstate(u32, buf, 0x7f78, process_smi_get_segment_flags(&seg));
+ put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
kvm_x86_ops->get_gdt(vcpu, &dt);
put_smstate(u32, buf, 0x7f74, dt.address);
@@ -6220,7 +6250,7 @@ static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7f54, dt.size);
for (i = 0; i < 6; i++)
- process_smi_save_seg_32(vcpu, buf, i);
+ enter_smm_save_seg_32(vcpu, buf, i);
put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
@@ -6229,7 +6259,7 @@ static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}
-static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
+static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
{
#ifdef CONFIG_X86_64
struct desc_ptr dt;
@@ -6261,7 +6291,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
put_smstate(u16, buf, 0x7e90, seg.selector);
- put_smstate(u16, buf, 0x7e92, process_smi_get_segment_flags(&seg) >> 8);
+ put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
put_smstate(u32, buf, 0x7e94, seg.limit);
put_smstate(u64, buf, 0x7e98, seg.base);
@@ -6271,7 +6301,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
put_smstate(u16, buf, 0x7e70, seg.selector);
- put_smstate(u16, buf, 0x7e72, process_smi_get_segment_flags(&seg) >> 8);
+ put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
put_smstate(u32, buf, 0x7e74, seg.limit);
put_smstate(u64, buf, 0x7e78, seg.base);
@@ -6280,31 +6310,26 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u64, buf, 0x7e68, dt.address);
for (i = 0; i < 6; i++)
- process_smi_save_seg_64(vcpu, buf, i);
+ enter_smm_save_seg_64(vcpu, buf, i);
#else
WARN_ON_ONCE(1);
#endif
}
-static void process_smi(struct kvm_vcpu *vcpu)
+static void enter_smm(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
char buf[512];
u32 cr0;
- if (is_smm(vcpu)) {
- vcpu->arch.smi_pending = true;
- return;
- }
-
trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu->arch.hflags |= HF_SMM_MASK;
memset(buf, 0, 512);
if (guest_cpuid_has_longmode(vcpu))
- process_smi_save_state_64(vcpu, buf);
+ enter_smm_save_state_64(vcpu, buf);
else
- process_smi_save_state_32(vcpu, buf);
+ enter_smm_save_state_32(vcpu, buf);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
@@ -6360,6 +6385,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
}
+static void process_smi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.smi_pending = true;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+}
+
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
@@ -6554,8 +6585,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
- /* enable NMI/IRQ window open exits if needed */
else {
+ /* Enable NMI/IRQ window open exits if needed.
+ *
+ * SMIs have two cases: 1) they can be nested, and
+ * then there is nothing to do here because RSM will
+ * cause a vmexit anyway; 2) or the SMI can be pending
+ * because inject_pending_event has completed the
+ * injection of an IRQ or NMI from the previous vmexit,
+ * and then we request an immediate exit to inject the SMI.
+ */
+ if (vcpu->arch.smi_pending && !is_smm(vcpu))
+ req_immediate_exit = true;
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
@@ -6606,12 +6647,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_load_guest_xcr0(vcpu);
- if (req_immediate_exit)
+ if (req_immediate_exit) {
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
smp_send_reschedule(vcpu->cpu);
+ }
trace_kvm_entry(vcpu->vcpu_id);
wait_lapic_expire(vcpu);
- __kvm_guest_enter();
+ guest_enter_irqoff();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
@@ -6662,16 +6705,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++vcpu->stat.exits;
- /*
- * We must have an instruction between local_irq_enable() and
- * kvm_guest_exit(), so the timer interrupt isn't delayed by
- * the interrupt shadow. The stat.exits increment will do nicely.
- * But we need to prevent reordering, hence this barrier():
- */
- barrier();
-
- kvm_guest_exit();
+ guest_exit_irqoff();
+ local_irq_enable();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -7408,6 +7444,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
vcpu->arch.hflags = 0;
+ vcpu->arch.smi_pending = 0;
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
@@ -7600,11 +7637,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
}
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
-{
- return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
-}
-
struct static_key kvm_no_apic_vcpu __read_mostly;
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
@@ -7871,7 +7903,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
- kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+ kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm_mmu_uninit_vm(kvm);
}
@@ -8379,7 +8411,7 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
/*
* When producer of consumer is unregistered, we change back to
* remapped mode, so we can re-use the current implementation
- * when the irq is masked/disabed or the consumer side (KVM
+ * when the irq is masked/disabled or the consumer side (KVM
* int this case doesn't want to receive the interrupts.
*/
ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
diff --git a/arch/x86/lib/cache-smp.c b/arch/x86/lib/cache-smp.c
index a3c668875038..216a629a4a1a 100644
--- a/arch/x86/lib/cache-smp.c
+++ b/arch/x86/lib/cache-smp.c
@@ -1,5 +1,5 @@
#include <linux/smp.h>
-#include <linux/module.h>
+#include <linux/export.h>
static void __wbinvd(void *dummy)
{
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index aa417a97511c..d6f848d1211d 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/export.h>
unsigned int x86_family(unsigned int sig)
{
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9845371c5c36..9a7fe6a70491 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -6,7 +6,7 @@
*/
#include <linux/compiler.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/checksum.h>
static inline unsigned short from32to16(unsigned a)
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index b6fcb9a9ddbc..8bd53589ecfb 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -5,7 +5,7 @@
* Wrappers of assembly checksum functions for x86-64.
*/
#include <asm/checksum.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/smap.h>
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 2f07c291dcc8..073d1f1a620b 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -11,7 +11,7 @@
* we have to worry about.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/timex.h>
#include <linux/preempt.h>
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 02de3d74d2c5..8a602a1e404a 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -35,6 +35,7 @@ ENDPROC(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
+ pushq %rdi
pushq %rdx
movq %rdi, %rdx # w -> t
@@ -60,6 +61,7 @@ ENTRY(__sw_hweight64)
shrq $56, %rax # w = w_tmp >> 56
popq %rdx
+ popq %rdi
ret
#else /* CONFIG_X86_32 */
/* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index a404b4b75533..cad12634d6bd 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -1,5 +1,5 @@
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#undef memcpy
#undef memset
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index e5e3ed8dc079..c2311a678332 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -18,7 +18,7 @@
*/
#include <linux/hardirq.h>
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
index 8d6ef78b5d01..ff29e8d39414 100644
--- a/arch/x86/lib/msr-reg-export.c
+++ b/arch/x86/lib/msr-reg-export.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/msr.h>
EXPORT_SYMBOL(rdmsr_safe_regs);
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 518532e6a3fa..ce68b6a9d7d1 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/preempt.h>
#include <linux/smp.h>
#include <asm/msr.h>
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 004c861b1648..d1dee753b949 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/percpu.h>
#include <linux/preempt.h>
#include <asm/msr.h>
#define CREATE_TRACE_POINTS
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index bd59090825db..dc0ad12f80bb 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -11,7 +11,7 @@
*/
#include <linux/string.h>
-#include <linux/module.h>
+#include <linux/export.h>
#ifdef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index e342586db6e4..b4908789484e 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -5,7 +5,7 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/word-at-a-time.h>
#include <linux/sched.h>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index b559d9238781..3bc7baf2a711 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -8,7 +8,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/backing-dev.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 9f760cdcaf40..69873589c0ba 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -5,7 +5,7 @@
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/uaccess.h>
/*
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 2ca15b59fb3f..ba47524f56e8 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 9a17250bcbe0..ea9c49adaa1f 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/pgtable.h>
@@ -454,8 +454,4 @@ static int __init pt_dump_init(void)
return 0;
}
-
__initcall(pt_dump_init);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
-MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index a6d739258137..6d18b70ed5a9 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,5 @@
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fb4c1b42fc7e..620928903be3 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -208,7 +208,7 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
* adjust the page_size_mask for small range to go with
* big page size instead small one if nearby are ram too.
*/
-static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
+static void __ref adjust_range_page_size_mask(struct map_range *mr,
int nr_range)
{
int i;
@@ -396,7 +396,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
-unsigned long __init_refok init_memory_mapping(unsigned long start,
+unsigned long __ref init_memory_mapping(unsigned long start,
unsigned long end)
{
struct map_range mr[NR_RANGE_MR];
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 84df150ee77e..cf8059016ec8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -5,7 +5,6 @@
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
-#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 53cc2256cf23..14b9dd71d9e8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -27,7 +27,6 @@
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
-#include <linux/module.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9c0ff045fdd4..ada98b39b8ad 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,7 +18,7 @@
#include <asm/iomap.h>
#include <asm/pat.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/highmem.h>
static int is_io_mapping_possible(resource_size_t base, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index f0894910bdd7..7aaa2635862d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -9,7 +9,6 @@
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index b4f2e7e9e907..4515bae36bbe 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/page-flags.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
index aec124214d97..c2638a7d2c10 100644
--- a/arch/x86/mm/kmemcheck/shadow.c
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -1,5 +1,5 @@
#include <linux/kmemcheck.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index ddb2244b06a1..afc47f5c9531 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -11,7 +11,7 @@
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/hash.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 0057a7accfb1..bef36622e408 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -24,7 +24,7 @@
#define DEBUG 1
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 968ac028c34e..fb682108f4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -8,7 +8,6 @@
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <linux/topology.h>
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436e41c2..6b7ce6279133 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,7 +24,7 @@
#include <linux/bootmem.h>
#include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index db00e3e2f3dc..ecb1b69c1651 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,7 +11,6 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/mm.h>
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 2f7702253ccf..de391b7bc19a 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -11,7 +11,6 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/rbtree_augmented.h>
#include <linux/sched.h>
#include <linux/gfp.h>
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index 9f0614daea85..a235869532bc 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -26,7 +26,6 @@
* Bjorn Steinbrink (B.Steinbrink@gmx.de), 2007
*/
-#include <linux/module.h>
#include <linux/ptrace.h> /* struct pt_regs */
#include "pf_in.h"
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index e67ae0e6c59d..9adce776852b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -8,7 +8,6 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index e666cbbb9261..cfc3b9121ce4 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,6 +1,6 @@
#include <linux/bootmem.h>
#include <linux/mmdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index b1ecff460a46..35fe69529bc1 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -13,7 +13,7 @@
#include <linux/acpi.h>
#include <linux/mmzone.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/topology.h>
#include <linux/mm.h>
#include <asm/proto.h>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5643fd0b1a7d..4dbe65622810 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8196054fedb0..7b6a9d14c8c0 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -133,7 +133,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev)
if (pci_probe & PCI_NOASSIGN_BARS) {
/*
* If the BIOS did not assign the BAR, zero out the
- * resource so the kernel doesn't attmept to assign
+ * resource so the kernel doesn't attempt to assign
* it later on in pci_assign_unassigned_resources
*/
for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 5ceda85b8687..052c1cb76305 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -169,7 +169,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr;
diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c
index 613cac7395c4..b814ca675131 100644
--- a/arch/x86/pci/vmd.c
+++ b/arch/x86/pci/vmd.c
@@ -119,10 +119,11 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
- raw_spin_lock(&list_lock);
+ raw_spin_lock_irqsave(&list_lock, flags);
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
- raw_spin_unlock(&list_lock);
+ raw_spin_unlock_irqrestore(&list_lock, flags);
data->chip->irq_unmask(data);
}
@@ -130,12 +131,14 @@ static void vmd_irq_enable(struct irq_data *data)
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
+ unsigned long flags;
data->chip->irq_mask(data);
- raw_spin_lock(&list_lock);
+ raw_spin_lock_irqsave(&list_lock, flags);
list_del_rcu(&vmdirq->node);
- raw_spin_unlock(&list_lock);
+ INIT_LIST_HEAD_RCU(&vmdirq->node);
+ raw_spin_unlock_irqrestore(&list_lock, flags);
}
/*
@@ -166,16 +169,20 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
*/
-static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd)
+static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- int i, best = 0;
+ int i, best = 1;
+ unsigned long flags;
- raw_spin_lock(&list_lock);
+ if (!desc->msi_attrib.is_msix || vmd->msix_count == 1)
+ return &vmd->irqs[0];
+
+ raw_spin_lock_irqsave(&list_lock, flags);
for (i = 1; i < vmd->msix_count; i++)
if (vmd->irqs[i].count < vmd->irqs[best].count)
best = i;
vmd->irqs[best].count++;
- raw_spin_unlock(&list_lock);
+ raw_spin_unlock_irqrestore(&list_lock, flags);
return &vmd->irqs[best];
}
@@ -184,14 +191,15 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
unsigned int virq, irq_hw_number_t hwirq,
msi_alloc_info_t *arg)
{
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus);
+ struct msi_desc *desc = arg->desc;
+ struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
if (!vmdirq)
return -ENOMEM;
INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip,
@@ -203,11 +211,12 @@ static void vmd_msi_free(struct irq_domain *domain,
struct msi_domain_info *info, unsigned int virq)
{
struct vmd_irq *vmdirq = irq_get_chip_data(virq);
+ unsigned long flags;
/* XXX: Potential optimization to rebalance */
- raw_spin_lock(&list_lock);
+ raw_spin_lock_irqsave(&list_lock, flags);
vmdirq->irq->count--;
- raw_spin_unlock(&list_lock);
+ raw_spin_unlock_irqrestore(&list_lock, flags);
kfree_rcu(vmdirq, rcu);
}
@@ -261,18 +270,18 @@ static struct device *to_vmd_dev(struct device *dev)
static struct dma_map_ops *vmd_dma_ops(struct device *dev)
{
- return to_vmd_dev(dev)->archdata.dma_ops;
+ return get_dma_ops(to_vmd_dev(dev));
}
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
attrs);
}
static void vmd_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t addr, struct dma_attrs *attrs)
+ dma_addr_t addr, unsigned long attrs)
{
return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
attrs);
@@ -280,7 +289,7 @@ static void vmd_free(struct device *dev, size_t size, void *vaddr,
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
size, attrs);
@@ -288,7 +297,7 @@ static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
addr, size, attrs);
@@ -297,26 +306,26 @@ static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
dir, attrs);
}
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
}
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
}
@@ -367,7 +376,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
{
struct dma_domain *domain = &vmd->dma_domain;
- if (vmd->dev->dev.archdata.dma_ops)
+ if (get_dma_ops(&vmd->dev->dev))
del_dma_domain(domain);
}
@@ -379,7 +388,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
static void vmd_setup_dma_ops(struct vmd_dev *vmd)
{
- const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops;
+ const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev);
struct dma_map_ops *dest = &vmd->dma_ops;
struct dma_domain *domain = &vmd->dma_domain;
@@ -594,7 +603,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd)
sd->node = pcibus_to_node(vmd->dev->bus);
vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info,
- NULL);
+ x86_vector_domain);
if (!vmd->irq_domain)
return -ENODEV;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 99ddab79215e..3a483cb5ac81 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -9,7 +9,7 @@
* Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
* Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/acpi.h>
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 701fd5843c87..b27bccd4390f 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -11,11 +11,9 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
-#include <linux/reboot.h>
#include <asm/ce4100.h>
#include <asm/prom.h>
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 524142117296..5fdacb322ceb 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -44,7 +44,7 @@ early_initcall(early_efi_map_fb);
* In case earlyprintk=efi,keep we have the whole framebuffer mapped already
* so just return the offset efi_fb + start.
*/
-static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
+static __ref void *early_efi_map(unsigned long start, unsigned long len)
{
unsigned long base;
@@ -56,7 +56,7 @@ static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
return early_ioremap(base + start, len);
}
-static __init_refok void early_efi_unmap(void *addr, unsigned long len)
+static __ref void early_efi_unmap(void *addr, unsigned long len)
{
if (!efi_fb)
early_iounmap(addr, len);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 17c8bbd4e2f0..1fbb408e2e72 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -51,7 +51,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
-#include <asm/rtc.h>
#include <asm/uv/uv.h>
static struct efi efi_phys __initdata;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 3e12c44f88a2..677e29e29473 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -24,7 +24,8 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mc146818rtc.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
#include <linux/io.h>
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index abbf49c6e9d3..ce119d2ba0d0 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -20,7 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/sfi.h>
#include <linux/irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <asm/setup.h>
diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
index ee40fcb6e54d..58024862a7eb 100644
--- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c
+++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h>
#include <asm/intel-mid.h>
#include <asm/intel_mid_vrtc.h>
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5bc90dd102d4..c901a3423772 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -21,10 +21,9 @@
#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -407,7 +406,6 @@ static const struct pci_device_id mid_pwr_pci_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&mid_info },
{}
};
-MODULE_DEVICE_TABLE(pci, mid_pwr_pci_ids);
static struct pci_driver mid_pwr_pci_driver = {
.name = "intel_mid_pwr",
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 1555672d436f..051d264fce2e 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -24,7 +24,7 @@
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 27376081ddec..7c3077e58fa0 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/string.h>
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index e7604f62870d..f1aab8cdb33f 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -1,9 +1,12 @@
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
+#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
#include <asm/pgtable.h>
#include <asm/olpc_ofw.h>
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
index baf16e72e668..fd39301f25ac 100644
--- a/arch/x86/platform/ts5500/ts5500.c
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -23,7 +23,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/leds.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_data/gpio-ts5500.h>
#include <linux/platform_data/max197.h>
#include <linux/platform_device.h>
@@ -345,7 +345,3 @@ error:
return err;
}
device_initcall(ts5500_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
-MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index e1c24631afbb..776c6592136c 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -8,7 +8,7 @@
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8dd80050d705..cd5173a2733f 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -24,7 +24,7 @@
#include <linux/kdb.h>
#include <linux/kexec.h>
#include <linux/kgdb.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index f2b5e6a5cf95..f0b5f2d402af 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -37,11 +37,11 @@ unsigned long jump_address_phys;
*/
unsigned long restore_cr3 __visible;
-pgd_t *temp_level4_pgt __visible;
+unsigned long temp_level4_pgt __visible;
unsigned long relocated_restore_code __visible;
-static int set_up_temporary_text_mapping(void)
+static int set_up_temporary_text_mapping(pgd_t *pgd)
{
pmd_t *pmd;
pud_t *pud;
@@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void)
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE));
- set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+ set_pgd(pgd + pgd_index(restore_jump_address),
__pgd(__pa(pud) | _KERNPG_TABLE));
return 0;
@@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void)
.kernel_mapping = true,
};
unsigned long mstart, mend;
+ pgd_t *pgd;
int result;
int i;
- temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!temp_level4_pgt)
+ pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!pgd)
return -ENOMEM;
/* Prepare a temporary mapping for the kernel text */
- result = set_up_temporary_text_mapping();
+ result = set_up_temporary_text_mapping(pgd);
if (result)
return result;
@@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void)
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
- result = kernel_ident_mapping_init(&info, temp_level4_pgt,
- mstart, mend);
-
+ result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
if (result)
return result;
}
+ temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
return 0;
}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 8eee0e9c93f0..ce8da3a0412c 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -72,8 +72,6 @@ ENTRY(restore_image)
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
/* switch to temporary page tables */
- movq $__PAGE_OFFSET, %rcx
- subq %rcx, %rax
movq %rax, %cr3
/* flush TLB */
movq %rbx, %rcx
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 12734a96df47..ac58c1616408 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -8,6 +8,8 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro
+KCOV_INSTRUMENT := n
+
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
# sure how to relocate those. Like kexec-tools, use custom flags.
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index c556c5ae8de5..25012abc3409 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -48,7 +48,7 @@ targets += realmode.lds
$(obj)/realmode.lds: $(obj)/pasyms.h
LDFLAGS_realmode.elf := --emit-relocs -T
-CPPFLAGS_realmode.lds += -P -C -I$(obj)
+CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
targets += realmode.elf
$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
diff --git a/arch/x86/um/delay.c b/arch/x86/um/delay.c
index f3fe1a688f7e..a8fb7ca4822b 100644
--- a/arch/x86/um/delay.c
+++ b/arch/x86/um/delay.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/param.h>
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 6c803ca49b5d..d72dec406ccb 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -2,6 +2,9 @@
# Building vDSO images for x86.
#
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
VDSO64-y := y
vdso-install-$(VDSO64-y) += vdso.so
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index c8377fb26cdf..1daff5545c0a 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -1,7 +1,6 @@
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include "debugfs.h"
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 69b4b6d29738..8ffb089b19a5 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -23,7 +23,7 @@
#include <linux/sched.h>
#include <linux/kprobes.h>
#include <linux/bootmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/highmem.h>
@@ -34,9 +34,7 @@
#include <linux/edd.h>
#include <linux/frame.h>
-#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
-#endif
#include <xen/xen.h>
#include <xen/events.h>
@@ -1334,7 +1332,8 @@ static void xen_crash_shutdown(struct pt_regs *regs)
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
- xen_reboot(SHUTDOWN_crash);
+ if (!kexec_crash_loaded())
+ xen_reboot(SHUTDOWN_crash);
return NOTIFY_DONE;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 67433714b791..7d5afdb417cc 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -43,7 +43,8 @@
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index dd2a49a8aacc..37129db76d33 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -60,7 +60,7 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/sched.h>
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 9586ff32810c..d37a0c7f82cb 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <xen/platform_pci.h>
#include "xen-ops.h"
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e345891450c3..176425233e4d 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -4,7 +4,7 @@
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pm.h>
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index cd66698348ca..1e68806d6695 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -142,7 +142,7 @@ static void xtensa_sync_sg_for_device(struct device *dev,
static void *xtensa_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long ret;
unsigned long uncached = 0;
@@ -171,7 +171,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
}
static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
@@ -185,7 +185,7 @@ static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
@@ -195,14 +195,14 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
@@ -217,7 +217,7 @@ static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
static void xtensa_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
diff --git a/block/Kconfig b/block/Kconfig
index 0363cd731320..161491d0a879 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -88,19 +88,6 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
-config BLK_DEV_DAX
- bool "Block device DAX support"
- depends on FS_DAX
- depends on BROKEN
- help
- When DAX support is available (CONFIG_FS_DAX) raw block
- devices can also support direct userspace access to the
- storage capacity via MMAP(2) similar to a file on a
- DAX-enabled filesystem. However, the DAX I/O-path disables
- some standard I/O-statistics, and the MMAP(2) path has some
- operational differences due to bypassing the page
- cache. If in doubt, say N.
-
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index f70cc3bdfd01..63f72f00c72e 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
bip->bip_bio = bio;
bio->bi_integrity = bip;
- bio->bi_rw |= REQ_INTEGRITY;
+ bio->bi_opf |= REQ_INTEGRITY;
return bip;
err:
diff --git a/block/bio.c b/block/bio.c
index 54ee3846c3a5..f39477538fef 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -580,9 +580,11 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
*/
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
+
+ bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -661,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
if (!bio)
return NULL;
bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_rw = bio_src->bi_rw;
+ bio->bi_opf = bio_src->bi_opf;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
@@ -687,6 +689,8 @@ integrity_clone:
}
}
+ bio_clone_blkcg_association(bio, bio_src);
+
return bio;
}
EXPORT_SYMBOL(bio_clone_bioset);
@@ -869,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
init_completion(&ret.event);
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
- bio->bi_rw |= REQ_SYNC;
+ bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
wait_for_completion_io(&ret.event);
@@ -2004,6 +2008,17 @@ void bio_disassociate_task(struct bio *bio)
}
}
+/**
+ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * @dst: destination bio
+ * @src: source bio
+ */
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+{
+ if (src->bi_css)
+ WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+}
+
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-core.c b/block/blk-core.c
index a687e9cc16c2..999442ec4601 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
return false;
return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_back_merge_fn(q, req, bio))
return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
- const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+ const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
if (!ll_front_merge_fn(q, req, bio))
return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
- req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
- if (bio->bi_rw & REQ_RAHEAD)
+ req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
+ if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
- const bool sync = !!(bio->bi_rw & REQ_SYNC);
+ const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
@@ -1728,7 +1728,7 @@ get_rq:
/*
* Add in META/PRIO flags, if set, before we get to the IO scheduler
*/
- rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+ rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
/*
* Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
- bio->bi_rw,
+ bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
- if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+ if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
- bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
* one.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_rw & ff) != ff)
+ if ((bio->bi_opf & ff) != ff)
break;
bytes += bio->bi_iter.bi_size;
}
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
- req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+ req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
/*
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 41cbd4878958..3eec75a9e91d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
if (split) {
/* there isn't chance to merge the splitted bio */
- split->bi_rw |= REQ_NOMERGE;
+ split->bi_opf |= REQ_NOMERGE;
bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
@@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
* Distributes the attributs to each bio.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
- WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
- (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
- bio->bi_rw |= ff;
+ WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
+ (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
+ bio->bi_opf |= ff;
}
rq->cmd_flags |= REQ_MIXED_MERGE;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 4ea4dd8a1eed..fe822aa5b8e4 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -380,15 +380,13 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
-void blk_mq_unregister_disk(struct gendisk *disk)
+static void __blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
int i, j;
- blk_mq_disable_hotplug();
-
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_unregister_hctx(hctx);
@@ -405,6 +403,12 @@ void blk_mq_unregister_disk(struct gendisk *disk)
kobject_put(&disk_to_dev(disk)->kobj);
q->mq_sysfs_init_done = false;
+}
+
+void blk_mq_unregister_disk(struct gendisk *disk)
+{
+ blk_mq_disable_hotplug();
+ __blk_mq_unregister_disk(disk);
blk_mq_enable_hotplug();
}
@@ -450,7 +454,7 @@ int blk_mq_register_disk(struct gendisk *disk)
}
if (ret)
- blk_mq_unregister_disk(disk);
+ __blk_mq_unregister_disk(disk);
else
q->mq_sysfs_init_done = true;
out:
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 576e7112f807..e931a0e8e73d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -672,7 +672,20 @@ static void blk_mq_timeout_work(struct work_struct *work)
};
int i;
- if (blk_queue_enter(q, true))
+ /* A deadlock might occur if a request is stuck requiring a
+ * timeout at the same time a queue freeze is waiting
+ * completion, since the timeout code would not be able to
+ * acquire the queue reference here.
+ *
+ * That's why we don't use blk_queue_enter here; instead, we use
+ * percpu_ref_tryget directly, because we need to be able to
+ * obtain a reference even in the short window between the queue
+ * starting to freeze, by dropping the first reference in
+ * blk_mq_freeze_queue_start, and the moment the last request is
+ * consumed, marked by the instant q_usage_counter reaches
+ * zero.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter))
return;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
@@ -1221,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rw_is_sync(bio_op(bio), bio->bi_rw))
+ if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, op);
@@ -1289,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
- const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_map_ctx data;
struct request *rq;
unsigned int request_count = 0;
@@ -1383,8 +1396,8 @@ done:
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
- const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
- const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+ const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
struct blk_map_ctx data;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 47a3e540631a..f1aba26f4719 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -145,11 +145,6 @@ struct throtl_data
/* Total Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2];
- /*
- * number of total undestroyed groups
- */
- unsigned int nr_undestroyed_grps;
-
/* Work for dispatching throttled bios */
struct work_struct dispatch_work;
};
@@ -826,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
- if (!(bio->bi_rw & REQ_THROTTLED))
- bio->bi_rw |= REQ_THROTTLED;
+ if (!(bio->bi_opf & REQ_THROTTLED))
+ bio->bi_opf |= REQ_THROTTLED;
}
/**
@@ -1404,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
- if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
+ if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
goto out;
spin_lock_irq(q->queue_lock);
@@ -1483,7 +1478,7 @@ out:
* being issued.
*/
if (!throttled)
- bio->bi_rw &= ~REQ_THROTTLED;
+ bio->bi_opf &= ~REQ_THROTTLED;
return throttled;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index acabba198de9..cc2f6dbd4303 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
- return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
+ return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
}
/*
@@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
+ cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
}
static void
diff --git a/block/genhd.c b/block/genhd.c
index 3c9dede4e04f..fcd6d4fae657 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -614,7 +614,7 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
/* Register BDI before referencing it from bdev */
bdi = &disk->queue->backing_dev_info;
- bdi_register_dev(bdi, disk_devt(disk));
+ bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
@@ -856,6 +856,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
if (iter) {
class_dev_iter_exit(iter);
kfree(iter);
+ seqf->private = NULL;
}
}
diff --git a/drivers/Makefile b/drivers/Makefile
index 954824438fd1..53abb4a5f736 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -138,6 +138,7 @@ obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
+obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index aebd944bdaa1..445ce28475b3 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -221,6 +221,9 @@ config ACPI_PROCESSOR_IDLE
bool
select CPU_IDLE
+config ACPI_MCFG
+ bool
+
config ACPI_CPPC_LIB
bool
depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 35a6ccbe3025..5ae9d85c5159 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -40,6 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
+obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 81dc75033f15..0980a133916f 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm-generic/rtc.h>
+#include <linux/mc146818rtc.h>
#include "internal.h"
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 148f4e5ca104..31abb0bdd4f2 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -232,8 +232,10 @@ remove_dev_dir:
acpi_device_dir(device) = NULL;
remove_lid_dir:
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
+ acpi_lid_dir = NULL;
remove_button_dir:
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ acpi_button_dir = NULL;
goto done;
}
@@ -250,7 +252,9 @@ static int acpi_button_remove_fs(struct acpi_device *device)
acpi_lid_dir);
acpi_device_dir(device) = NULL;
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
+ acpi_lid_dir = NULL;
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ acpi_button_dir = NULL;
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 999a10914678..e7bd57cc550a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -101,6 +101,7 @@ enum ec_command {
#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
* when trying to clear the EC */
+#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
module_param(ec_delay, uint, 0644);
MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
+module_param(ec_max_queries, uint, 0644);
+MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
+
static bool ec_busy_polling __read_mostly;
module_param(ec_busy_polling, bool, 0644);
MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
@@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
+static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
@@ -1098,7 +1104,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
* work queue execution.
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- if (!schedule_work(&q->work)) {
+ if (!queue_work(ec_query_wq, &q->work)) {
ec_dbg_evt("Query(0x%02x) overlapped", value);
result = -EBUSY;
}
@@ -1660,15 +1666,41 @@ static struct acpi_driver acpi_ec_driver = {
},
};
+static inline int acpi_ec_query_init(void)
+{
+ if (!ec_query_wq) {
+ ec_query_wq = alloc_workqueue("kec_query", 0,
+ ec_max_queries);
+ if (!ec_query_wq)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static inline void acpi_ec_query_exit(void)
+{
+ if (ec_query_wq) {
+ destroy_workqueue(ec_query_wq);
+ ec_query_wq = NULL;
+ }
+}
+
int __init acpi_ec_init(void)
{
- int result = 0;
+ int result;
+ /* register workqueue for _Qxx evaluations */
+ result = acpi_ec_query_init();
+ if (result)
+ goto err_exit;
/* Now register the driver for the EC */
result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result < 0)
- return -ENODEV;
+ if (result)
+ goto err_exit;
+err_exit:
+ if (result)
+ acpi_ec_query_exit();
return result;
}
@@ -1678,5 +1710,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
+ acpi_ec_query_exit();
}
#endif /* 0 */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b108f1358a32..4305ee9db4b2 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -309,7 +309,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_gbl_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
-void __iomem *__init_refok
+void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
@@ -362,8 +362,7 @@ out:
}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
-void *__init_refok
-acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
return (void *)acpi_os_map_iomem(phys, size);
}
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
new file mode 100644
index 000000000000..b5b376e081f5
--- /dev/null
+++ b/drivers/acpi/pci_mcfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ * Author: Jayachandran C <jchandra@broadcom.com>
+ * Copyright (C) 2016 Semihalf
+ * Author: Tomasz Nowicki <tn@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+
+/* Structure to hold entries from the MCFG table */
+struct mcfg_entry {
+ struct list_head list;
+ phys_addr_t addr;
+ u16 segment;
+ u8 bus_start;
+ u8 bus_end;
+};
+
+/* List to save MCFG entries */
+static LIST_HEAD(pci_mcfg_list);
+
+phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+{
+ struct mcfg_entry *e;
+
+ /*
+ * We expect exact match, unless MCFG entry end bus covers more than
+ * specified by caller.
+ */
+ list_for_each_entry(e, &pci_mcfg_list, list) {
+ if (e->segment == seg && e->bus_start == bus_res->start &&
+ e->bus_end >= bus_res->end)
+ return e->addr;
+ }
+
+ return 0;
+}
+
+static __init int pci_mcfg_parse(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *mptr;
+ struct mcfg_entry *e, *arr;
+ int i, n;
+
+ if (header->length < sizeof(struct acpi_table_mcfg))
+ return -EINVAL;
+
+ n = (header->length - sizeof(struct acpi_table_mcfg)) /
+ sizeof(struct acpi_mcfg_allocation);
+ mcfg = (struct acpi_table_mcfg *)header;
+ mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
+
+ arr = kcalloc(n, sizeof(*arr), GFP_KERNEL);
+ if (!arr)
+ return -ENOMEM;
+
+ for (i = 0, e = arr; i < n; i++, mptr++, e++) {
+ e->segment = mptr->pci_segment;
+ e->addr = mptr->address;
+ e->bus_start = mptr->start_bus_number;
+ e->bus_end = mptr->end_bus_number;
+ list_add(&e->list, &pci_mcfg_list);
+ }
+
+ pr_info("MCFG table detected, %d entries\n", n);
+ return 0;
+}
+
+/* Interface called by ACPI - parse and save MCFG table */
+void __init pci_mmcfg_late_init(void)
+{
+ int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse);
+ if (err)
+ pr_err("Failed to parse MCFG (%d)\n", err);
+}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ae3fe4e64203..d144168d4ef9 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -720,6 +720,36 @@ next:
}
}
+static void acpi_pci_root_remap_iospace(struct resource_entry *entry)
+{
+#ifdef PCI_IOBASE
+ struct resource *res = entry->res;
+ resource_size_t cpu_addr = res->start;
+ resource_size_t pci_addr = cpu_addr - entry->offset;
+ resource_size_t length = resource_size(res);
+ unsigned long port;
+
+ if (pci_register_io_range(cpu_addr, length))
+ goto err;
+
+ port = pci_address_to_pio(cpu_addr);
+ if (port == (unsigned long)-1)
+ goto err;
+
+ res->start = port;
+ res->end = port + length - 1;
+ entry->offset = port - pci_addr;
+
+ if (pci_remap_iospace(res, cpu_addr) < 0)
+ goto err;
+
+ pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res);
+ return;
+err:
+ res->flags |= IORESOURCE_DISABLED;
+#endif
+}
+
int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
{
int ret;
@@ -740,6 +770,9 @@ int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
"no IO and memory resources present in _CRS\n");
else {
resource_list_for_each_entry_safe(entry, tmp, list) {
+ if (entry->res->flags & IORESOURCE_IO)
+ acpi_pci_root_remap_iospace(entry);
+
if (entry->res->flags & IORESOURCE_DISABLED)
resource_list_destroy_entry(entry);
else
@@ -811,6 +844,8 @@ static void acpi_pci_root_release_info(struct pci_host_bridge *bridge)
resource_list_for_each_entry(entry, &bridge->windows) {
res = entry->res;
+ if (res->flags & IORESOURCE_IO)
+ pci_unmap_iospace(res);
if (res->parent &&
(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
release_resource(res);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 773fc3099769..22d1760a4278 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -46,7 +46,8 @@ MODULE_LICENSE("GPL");
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
-static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
+ void *buf, size_t size)
{
struct builtin_fw *b_fw;
@@ -54,6 +55,9 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
+
+ if (buf && fw->size <= size)
+ memcpy(buf, fw->data, fw->size);
return true;
}
}
@@ -74,7 +78,9 @@ static bool fw_is_builtin_firmware(const struct firmware *fw)
#else /* Module case - no builtin firmware support */
-static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static inline bool fw_get_builtin_firmware(struct firmware *fw,
+ const char *name, void *buf,
+ size_t size)
{
return false;
}
@@ -112,6 +118,7 @@ static inline long firmware_loading_timeout(void)
#define FW_OPT_FALLBACK 0
#endif
#define FW_OPT_NO_WARN (1U << 3)
+#define FW_OPT_NOCACHE (1U << 4)
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
@@ -143,6 +150,7 @@ struct firmware_buf {
unsigned long status;
void *data;
size_t size;
+ size_t allocated_size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
bool need_uevent;
@@ -178,7 +186,8 @@ static DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
- struct firmware_cache *fwc)
+ struct firmware_cache *fwc,
+ void *dbuf, size_t size)
{
struct firmware_buf *buf;
@@ -194,6 +203,8 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
kref_init(&buf->ref);
buf->fwc = fwc;
+ buf->data = dbuf;
+ buf->allocated_size = size;
init_completion(&buf->completion);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
@@ -217,7 +228,8 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
- struct firmware_buf **buf)
+ struct firmware_buf **buf, void *dbuf,
+ size_t size)
{
struct firmware_buf *tmp;
@@ -229,7 +241,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
*buf = tmp;
return 1;
}
- tmp = __allocate_fw_buf(fw_name, fwc);
+ tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
if (tmp)
list_add(&tmp->list, &fwc->head);
spin_unlock(&fwc->lock);
@@ -261,6 +273,7 @@ static void __fw_free_buf(struct kref *ref)
vfree(buf->pages);
} else
#endif
+ if (!buf->allocated_size)
vfree(buf->data);
kfree_const(buf->fw_id);
kfree(buf);
@@ -301,13 +314,21 @@ static void fw_finish_direct_load(struct device *device,
mutex_unlock(&fw_lock);
}
-static int fw_get_filesystem_firmware(struct device *device,
- struct firmware_buf *buf)
+static int
+fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
loff_t size;
int i, len;
int rc = -ENOENT;
char *path;
+ enum kernel_read_file_id id = READING_FIRMWARE;
+ size_t msize = INT_MAX;
+
+ /* Already populated data member means we're loading into a buffer */
+ if (buf->data) {
+ id = READING_FIRMWARE_PREALLOC_BUFFER;
+ msize = buf->allocated_size;
+ }
path = __getname();
if (!path)
@@ -326,8 +347,8 @@ static int fw_get_filesystem_firmware(struct device *device,
}
buf->size = 0;
- rc = kernel_read_file_from_path(path, &buf->data, &size,
- INT_MAX, READING_FIRMWARE);
+ rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
+ id);
if (rc) {
if (rc == -ENOENT)
dev_dbg(device, "loading %s failed with error %d\n",
@@ -691,6 +712,38 @@ out:
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, buf->data + offset, count);
+ else
+ memcpy(buf->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct firmware_buf *buf, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE-1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(buf->pages[page_nr]);
+
+ if (read)
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+ else
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(buf->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
@@ -715,21 +768,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
ret_count = count;
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(buf->pages[page_nr]);
-
- memcpy(buffer, page_data + page_ofs, page_cnt);
+ if (buf->data)
+ firmware_rw_buf(buf, buffer, offset, count, true);
+ else
+ firmware_rw(buf, buffer, offset, count, true);
- kunmap(buf->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
out:
mutex_unlock(&fw_lock);
return ret_count;
@@ -804,29 +847,23 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
goto out;
}
- retval = fw_realloc_buffer(fw_priv, offset + count);
- if (retval)
- goto out;
-
- retval = count;
-
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE - 1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(buf->pages[page_nr]);
-
- memcpy(page_data + page_ofs, buffer, page_cnt);
+ if (buf->data) {
+ if (offset + count > buf->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_buf(buf, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_buffer(fw_priv, offset + count);
+ if (retval)
+ goto out;
- kunmap(buf->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
+ retval = count;
+ firmware_rw(buf, buffer, offset, count, false);
}
- buf->size = max_t(size_t, offset, buf->size);
+ buf->size = max_t(size_t, offset + count, buf->size);
out:
mutex_unlock(&fw_lock);
return retval;
@@ -894,7 +931,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
struct firmware_buf *buf = fw_priv->buf;
/* fall back on userspace loading */
- buf->is_paged_buf = true;
+ if (!buf->data)
+ buf->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
@@ -929,7 +967,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
if (is_fw_load_aborted(buf))
retval = -EAGAIN;
- else if (!buf->data)
+ else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1012,7 +1050,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
- struct device *device)
+ struct device *device, void *dbuf, size_t size)
{
struct firmware *firmware;
struct firmware_buf *buf;
@@ -1025,12 +1063,12 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return -ENOMEM;
}
- if (fw_get_builtin_firmware(firmware, name)) {
+ if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
/*
* bind with 'buf' now to avoid warning in failure path
@@ -1070,14 +1108,16 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* should be fixed in devres or driver core.
*/
/* don't cache firmware handled without uevent */
- if (device && (opt_flags & FW_OPT_UEVENT))
+ if (device && (opt_flags & FW_OPT_UEVENT) &&
+ !(opt_flags & FW_OPT_NOCACHE))
fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (buf->fwc->state == FW_LOADER_START_CACHE) {
+ if (!(opt_flags & FW_OPT_NOCACHE) &&
+ buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
@@ -1091,7 +1131,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, unsigned int opt_flags)
+ struct device *device, void *buf, size_t size,
+ unsigned int opt_flags)
{
struct firmware *fw = NULL;
long timeout;
@@ -1105,7 +1146,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- ret = _request_firmware_prepare(&fw, name, device);
+ ret = _request_firmware_prepare(&fw, name, device, buf, size);
if (ret <= 0) /* error or already assigned */
goto out;
@@ -1184,7 +1225,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_FALLBACK);
module_put(THIS_MODULE);
return ret;
@@ -1208,7 +1249,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
int ret;
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN);
module_put(THIS_MODULE);
return ret;
@@ -1216,6 +1257,36 @@ int request_firmware_direct(const struct firmware **firmware_p,
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
+ * request_firmware_into_buf - load firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ *
+ * This function works pretty much like request_firmware(), but it doesn't
+ * allocate a buffer to hold the firmware data. Instead, the firmware
+ * is loaded directly into the buffer pointed to by @buf and the @firmware_p
+ * data member is pointed at @buf.
+ *
+ * This function doesn't cache firmware either.
+ */
+int
+request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
+ struct device *device, void *buf, size_t size)
+{
+ int ret;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, buf, size,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK |
+ FW_OPT_NOCACHE);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_firmware_into_buf);
+
+/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
@@ -1247,7 +1318,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
- _request_firmware(&fw, fw_work->name, fw_work->device,
+ _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1380,7 +1451,7 @@ static int uncache_firmware(const char *fw_name)
pr_debug("%s: %s\n", __func__, fw_name);
- if (fw_get_builtin_firmware(&fw, fw_name))
+ if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
return 0;
buf = fw_lookup_buf(fw_name);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 29cd96661b30..5548f9686016 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -370,7 +370,7 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#define page_initialized(page) (page->lru.next)
-static int __init_refok get_nid_for_pfn(unsigned long pfn)
+static int __ref get_nid_for_pfn(unsigned long pfn)
{
struct page *page;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 7c04c87738a6..df0c70963d9e 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
+static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
+ unsigned long *freq)
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->rate >= *freq) {
+ opp = temp_opp;
+ *freq = opp->rate;
+ break;
+ }
+ }
+
+ return opp;
+}
+
/**
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
@@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
if (IS_ERR(opp_table))
return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
- break;
- }
- }
-
- return opp;
+ return _find_freq_ceil(opp_table, freq);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return PTR_ERR(opp_table);
}
- old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ old_opp = _find_freq_ceil(opp_table, &old_freq);
if (!IS_ERR(old_opp)) {
ou_volt = old_opp->u_volt;
ou_volt_min = old_opp->u_volt_min;
@@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, old_freq, PTR_ERR(old_opp));
}
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ opp = _find_freq_ceil(opp_table, &freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a6975795e7f3..efec10b49d59 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
-#include <asm/rtc.h>
+#include <linux/mc146818rtc.h>
#include "power.h"
@@ -103,7 +103,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
n /= 24;
time.tm_min = (n % 20) * 3;
n /= 20;
- set_rtc_time(&time);
+ mc146818_set_time(&time);
return n ? -1 : 0;
}
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- get_rtc_time(&time);
+ mc146818_get_time(&time);
pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fb7718f256c..62e4de2aa8d1 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void)
struct wakeup_source *ws;
rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- if (ws->wakeirq)
- dev_pm_arm_wake_irq(ws->wakeirq);
- }
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ dev_pm_arm_wake_irq(ws->wakeirq);
+
rcu_read_unlock();
}
@@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void)
struct wakeup_source *ws;
rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- if (ws->wakeirq)
- dev_pm_disarm_wake_irq(ws->wakeirq);
- }
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ dev_pm_disarm_wake_irq(ws->wakeirq);
+
rcu_read_unlock();
}
@@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev)
return -EINVAL;
ws = device_wakeup_detach(dev);
- if (ws)
- wakeup_source_unregister(ws);
-
+ wakeup_source_unregister(ws);
return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f38c21de29b7..43a36d68c3fd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -888,6 +888,34 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
EXPORT_SYMBOL_GPL(device_get_next_child_node);
/**
+ * device_get_named_child_node - Return first matching named child node handle
+ * @dev: Device to find the named child node for.
+ * @childname: String to match child node name against.
+ */
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this device.
+ * For ACPI this will be a data only sub-node.
+ */
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child)) {
+ if (!of_node_cmp(to_of_node(child)->name, childname))
+ return child;
+ } else if (is_acpi_data_node(child)) {
+ if (acpi_data_node_match(child, childname))
+ return child;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(device_get_named_child_node);
+
+/**
* fwnode_handle_put - Drop reference to a device node
* @fwnode: Pointer to the device node to drop the reference to.
*
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3022dad24071..0c76d4016eeb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
void *mem;
int err = 0;
- if (rw != READ) {
+ if (is_write) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (rw == READ) {
+ if (!is_write) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
- int rw;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
@@ -347,14 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
- rw = bio_data_dir(bio);
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
- err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, rw, sector);
+ err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -369,11 +366,11 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, err);
return err;
}
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 0a1aaf8c24c4..2d3d50ab74bf 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -27,7 +27,6 @@
#include <linux/crc32c.h>
#include <linux/drbd.h>
#include <linux/drbd_limits.h>
-#include <linux/dynamic_debug.h>
#include "drbd_int.h"
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 7b54354976a5..4cb8f21ff4ef 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -41,6 +41,7 @@
#include <linux/backing-dev.h>
#include <linux/genhd.h>
#include <linux/idr.h>
+#include <linux/dynamic_debug.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0501ae0c517b..100be556e613 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+ return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index df45713dfbe8..942384f34e22 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
- * @rw: flag field, see bio->bi_rw
+ * @rw: flag field, see bio->bi_opf
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 66b8e4bb74d8..de279fe4e4fd 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
*/
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
- !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+ !(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
+ D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
- else if (bio->bi_rw & REQ_RAHEAD)
+ else if (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 35dbb3dca47e..c6755c9a0aea 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
- if (bio->bi_rw & REQ_RAHEAD)
+ if (bio->bi_opf & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c557057fe8ae..b71a9c767009 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
opened_bdev[drive] = bdev;
- if (!(mode & (FMODE_READ|FMODE_WRITE))) {
- res = -EINVAL;
- goto out;
- }
-
res = -ENXIO;
if (!floppy_track_buffer) {
@@ -3711,13 +3706,15 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
- goto out;
+ if (mode & (FMODE_READ|FMODE_WRITE)) {
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+ }
res = -EROFS;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 075377eee0c0..c9f2107f7095 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
return 0;
}
-
-static inline int lo_rw_simple(struct loop_device *lo,
- struct request *rq, loff_t pos, bool rw)
+static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, rw);
+ loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
* lo_write_simple and lo_read_simple should have been covered
@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo,
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
- if (rw == WRITE)
- return lo_write_simple(lo, rq, pos);
- else
- return lo_read_simple(lo, rq, pos);
-}
-
-static int do_req_filebacked(struct loop_device *lo, struct request *rq)
-{
- loff_t pos;
- int ret;
-
- pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
-
- if (op_is_write(req_op(rq))) {
- if (req_op(rq) == REQ_OP_FLUSH)
- ret = lo_req_flush(lo, rq);
- else if (req_op(rq) == REQ_OP_DISCARD)
- ret = lo_discard(lo, rq, pos);
- else if (lo->transfer)
- ret = lo_write_transfer(lo, rq, pos);
+ switch (req_op(rq)) {
+ case REQ_OP_FLUSH:
+ return lo_req_flush(lo, rq);
+ case REQ_OP_DISCARD:
+ return lo_discard(lo, rq, pos);
+ case REQ_OP_WRITE:
+ if (lo->transfer)
+ return lo_write_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, WRITE);
else
- ret = lo_rw_simple(lo, rq, pos, WRITE);
-
- } else {
+ return lo_write_simple(lo, rq, pos);
+ case REQ_OP_READ:
if (lo->transfer)
- ret = lo_read_transfer(lo, rq, pos);
+ return lo_read_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, READ);
else
- ret = lo_rw_simple(lo, rq, pos, READ);
+ return lo_read_simple(lo, rq, pos);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ break;
}
-
- return ret;
}
struct switch_request {
@@ -1659,11 +1648,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
- req_op(cmd->rq) == REQ_OP_DISCARD))
- cmd->use_aio = true;
- else
+ switch (req_op(cmd->rq)) {
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
cmd->use_aio = false;
+ break;
+ default:
+ cmd->use_aio = lo->use_dio;
+ break;
+ }
queue_kthread_work(&lo->worker, &cmd->work);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6f55b262b5ce..a9e398019f38 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -451,14 +451,9 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
sk_set_memalloc(nbd->sock->sk);
- nbd->task_recv = current;
-
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -477,9 +472,6 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -788,6 +780,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock)
return -EINVAL;
+ /* We have to claim the device under the lock */
+ nbd->task_recv = current;
mutex_unlock(&nbd->tx_lock);
nbd_parse_flags(nbd, bdev);
@@ -796,6 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_name(nbd));
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
return PTR_ERR(thread);
}
@@ -805,6 +800,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
sock_shutdown(nbd);
nbd_clear_que(nbd);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9393bc730acf..90fa4ac149db 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
- pkt->bio->bi_rw = REQ_WRITE;
+ bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 450662055d97..1a04af6d2421 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1937,7 +1937,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -1991,7 +1991,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -3995,10 +3995,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* Initialize the layout used for all rbd requests */
- rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
- rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+ rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.stripe_count = 1;
+ rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.pool_id = spec->pool_id;
+ RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
/*
* If this is a mapping rbd_dev (as opposed to a parent one),
@@ -5187,7 +5188,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
- rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
if (rbd_dev->image_format == 1)
ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
spec->image_name, RBD_SUFFIX);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d0a3e6d4515f..be90e15854ed 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7454cf188c8e..04365b17ee67 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int rw)
+ int offset, bool is_write)
{
unsigned long start_time = jiffies;
+ int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
int ret;
- generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (rw == READ) {
+ if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(rw, &zram->disk->part0, start_time);
+ generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
if (unlikely(ret)) {
- if (rw == READ)
+ if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -873,7 +874,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset, rw;
+ int offset;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -888,7 +889,6 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
return;
}
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -903,15 +903,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0,
+ op_is_write(bio_op(bio))) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -968,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
int offset, err = -EIO;
u32 index;
@@ -992,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, rw);
+ err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
zram_meta_put(zram);
out:
@@ -1005,7 +1008,7 @@ out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, rw, 0);
+ page_endio(page, is_write, 0);
return err;
}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index c5a7de9bc783..3b205e212337 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,6 +132,19 @@ config SUNXI_RSB
with various RSB based devices, such as AXP223, AXP8XX PMICs,
and AC100/AC200 ICs.
+# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence
+# the driver will fail to build as a module. However there are patches to
+# address that queued for v4.8, so this can be turned into a tristate symbol
+# after v4.8-rc1.
+config TEGRA_ACONNECT
+ bool "Tegra ACONNECT Bus Driver"
+ depends on ARCH_TEGRA_210_SOC
+ depends on OF && PM
+ select PM_CLK
+ help
+ Driver for the Tegra ACONNECT bus which is used to interface with
+ the devices inside the Audio Processing Engine (APE) for Tegra210.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ccff007ee7e8..ac84cc4348e3 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index ce54a0160faa..c7f396903184 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -117,7 +117,7 @@ struct mvebu_mbus_soc_data {
unsigned int (*win_remap_offset)(const int win);
void (*setup_cpu_target)(struct mvebu_mbus_state *s);
int (*save_cpu_target)(struct mvebu_mbus_state *s,
- u32 *store_addr);
+ u32 __iomem *store_addr);
int (*show_cpu_target)(struct mvebu_mbus_state *s,
struct seq_file *seq, void *v);
};
@@ -728,7 +728,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
static int
mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
- u32 *store_addr)
+ u32 __iomem *store_addr)
{
int i;
@@ -780,7 +780,7 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
static int
mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
- u32 *store_addr)
+ u32 __iomem *store_addr)
{
int i;
@@ -796,7 +796,7 @@ mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
return 4;
}
-int mvebu_mbus_save_cpu_target(u32 *store_addr)
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
{
return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
}
@@ -1089,7 +1089,7 @@ static void mvebu_mbus_resume(void)
}
}
-struct syscore_ops mvebu_mbus_syscore_ops = {
+static struct syscore_ops mvebu_mbus_syscore_ops = {
.suspend = mvebu_mbus_suspend,
.resume = mvebu_mbus_resume,
};
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
new file mode 100644
index 000000000000..7e4104b74fa8
--- /dev/null
+++ b/drivers/bus/tegra-aconnect.c
@@ -0,0 +1,112 @@
+/*
+ * Tegra ACONNECT Bus Driver
+ *
+ * Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+
+static int tegra_aconnect_add_clock(struct device *dev, char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s clock not found\n", name);
+ return PTR_ERR(clk);
+ }
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret)
+ clk_put(clk);
+
+ return ret;
+}
+
+static int tegra_aconnect_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -EINVAL;
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = tegra_aconnect_add_clock(&pdev->dev, "ape");
+ if (ret)
+ goto clk_destroy;
+
+ ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape");
+ if (ret)
+ goto clk_destroy;
+
+ pm_runtime_enable(&pdev->dev);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
+
+ return 0;
+
+clk_destroy:
+ pm_clk_destroy(&pdev->dev);
+
+ return ret;
+}
+
+static int tegra_aconnect_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_resume(struct device *dev)
+{
+ return pm_clk_resume(dev);
+}
+
+static int tegra_aconnect_runtime_suspend(struct device *dev)
+{
+ return pm_clk_suspend(dev);
+}
+
+static const struct dev_pm_ops tegra_aconnect_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
+ tegra_aconnect_runtime_resume, NULL)
+};
+
+static const struct of_device_id tegra_aconnect_of_match[] = {
+ { .compatible = "nvidia,tegra210-aconnect", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
+
+static struct platform_driver tegra_aconnect_driver = {
+ .probe = tegra_aconnect_probe,
+ .remove = tegra_aconnect_remove,
+ .driver = {
+ .name = "tegra-aconnect",
+ .of_match_table = tegra_aconnect_of_match,
+ .pm = &tegra_aconnect_pm_ops,
+ },
+};
+module_platform_driver(tegra_aconnect_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 601f64fcc890..dcc09739a54e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -178,6 +178,20 @@ config IBM_BSR
of threads across a large system which avoids bouncing a cacheline
between several cores on a system
+config POWERNV_OP_PANEL
+ tristate "IBM POWERNV Operator Panel Display support"
+ depends on PPC_POWERNV
+ default m
+ help
+ If you say Y here, a special character device node, /dev/op_panel,
+ will be created which exposes the operator panel display on IBM
+ Power Systems machines with FSPs.
+
+ If you don't require access to the operator panel display from user
+ space, say N.
+
+ If unsure, say M here to build it as a module called powernv-op-panel.
+
source "drivers/char/ipmi/Kconfig"
config DS1620
@@ -279,7 +293,7 @@ if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
- depends on ALPHA || (MIPS && MACH_LOONGSON64) || MN10300
+ depends on ALPHA || (MIPS && MACH_LOONGSON64)
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -325,32 +339,6 @@ config JS_RTC
To compile this driver as a module, choose M here: the
module will be called js-rtc.
-config GEN_RTC
- tristate "Generic /dev/rtc emulation"
- depends on RTC!=y
- depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- It reports status information via the file /proc/driver/rtc and its
- behaviour is set by various ioctls on /dev/rtc. If you enable the
- "extended RTC operation" below it will also provide an emulation
- for RTC_UIE which is required by some programs and may improve
- precision in some cases.
-
- To compile this driver as a module, choose M here: the
- module will be called genrtc.
-
-config GEN_RTC_X
- bool "Extended RTC operation"
- depends on GEN_RTC
- help
- Provides an emulation for RTC_UIE which is required by some programs
- and may improve precision of the generic RTC support in some cases.
-
config EFI_RTC
bool "EFI Real Time Clock Services"
depends on IA64
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d8a7579300d2..6e6c244a66a0 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
-obj-$(CONFIG_GEN_RTC) += genrtc.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_DS1302) += ds1302.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
@@ -60,3 +59,4 @@ js-rtc-y = rtc.o
obj-$(CONFIG_TILE_SROM) += tile-srom.o
obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index aef87fdbd187..44311296ec02 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
+void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags)
+{
+ intel_private.driver->write_entry(addr, pg, flags);
+}
+EXPORT_SYMBOL(intel_gtt_insert_page);
+
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
deleted file mode 100644
index 4f943759d376..000000000000
--- a/drivers/char/genrtc.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Real Time Clock interface for
- * - q40 and other m68k machines,
- * - HP PARISC machines
- * - PowerPC machines
- * emulate some RTC irq capabilities in software
- *
- * Copyright (C) 1999 Richard Zidlicky
- *
- * based on Paul Gortmaker's rtc.c device and
- * Sam Creasey Generic rtc driver
- *
- * This driver allows use of the real time clock (built into
- * nearly all computers) from user space. It exports the /dev/rtc
- * interface supporting various ioctl() and also the /proc/driver/rtc
- * pseudo-file for status information.
- *
- * The ioctls can be used to set the interrupt behaviour where
- * supported.
- *
- * The /dev/rtc interface will block on reads until an interrupt
- * has been received. If a RTC interrupt has already happened,
- * it will output an unsigned long and then block. The output value
- * contains the interrupt status in the low byte and the number of
- * interrupts since the last read in the remaining high bytes. The
- * /dev/rtc interface can also be used with the select(2) call.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
-
- * 1.01 fix for 2.3.X rz@linux-m68k.org
- * 1.02 merged with code from genrtc.c rz@linux-m68k.org
- * 1.03 make it more portable zippel@linux-m68k.org
- * 1.04 removed useless timer code rz@linux-m68k.org
- * 1.05 portable RTC_UIE emulation rz@linux-m68k.org
- * 1.06 set_rtc_time can return an error trini@kernel.crashing.org
- * 1.07 ported to HP PARISC (hppa) Helge Deller <deller@gmx.de>
- */
-
-#define RTC_VERSION "1.07"
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/fcntl.h>
-
-#include <linux/rtc.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/rtc.h>
-
-/*
- * We sponge a minor off of the misc major. No need slurping
- * up another valuable major dev number for this. If you add
- * an ioctl, make sure you don't conflict with SPARC's RTC
- * ioctls.
- */
-
-static DEFINE_MUTEX(gen_rtc_mutex);
-static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait);
-
-/*
- * Bits in gen_rtc_status.
- */
-
-#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
-
-static unsigned char gen_rtc_status; /* bitmapped status byte. */
-static unsigned long gen_rtc_irq_data; /* our output to the world */
-
-/* months start at 0 now */
-static unsigned char days_in_mo[] =
-{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-static int irq_active;
-
-#ifdef CONFIG_GEN_RTC_X
-static struct work_struct genrtc_task;
-static struct timer_list timer_task;
-
-static unsigned int oldsecs;
-static int lostint;
-static unsigned long tt_exp;
-
-static void gen_rtc_timer(unsigned long data);
-
-static volatile int stask_active; /* schedule_work */
-static volatile int ttask_active; /* timer_task */
-static int stop_rtc_timers; /* don't requeue tasks */
-static DEFINE_SPINLOCK(gen_rtc_lock);
-
-static void gen_rtc_interrupt(unsigned long arg);
-
-/*
- * Routine to poll RTC seconds field for change as often as possible,
- * after first RTC_UIE use timer to reduce polling
- */
-static void genrtc_troutine(struct work_struct *work)
-{
- unsigned int tmp = get_rtc_ss();
-
- if (stop_rtc_timers) {
- stask_active = 0;
- return;
- }
-
- if (oldsecs != tmp){
- oldsecs = tmp;
-
- timer_task.function = gen_rtc_timer;
- timer_task.expires = jiffies + HZ - (HZ/10);
- tt_exp=timer_task.expires;
- ttask_active=1;
- stask_active=0;
- add_timer(&timer_task);
-
- gen_rtc_interrupt(0);
- } else if (schedule_work(&genrtc_task) == 0)
- stask_active = 0;
-}
-
-static void gen_rtc_timer(unsigned long data)
-{
- lostint = get_rtc_ss() - oldsecs ;
- if (lostint<0)
- lostint = 60 - lostint;
- if (time_after(jiffies, tt_exp))
- printk(KERN_INFO "genrtc: timer task delayed by %ld jiffies\n",
- jiffies-tt_exp);
- ttask_active=0;
- stask_active=1;
- if ((schedule_work(&genrtc_task) == 0))
- stask_active = 0;
-}
-
-/*
- * call gen_rtc_interrupt function to signal an RTC_UIE,
- * arg is unused.
- * Could be invoked either from a real interrupt handler or
- * from some routine that periodically (eg 100HZ) monitors
- * whether RTC_SECS changed
- */
-static void gen_rtc_interrupt(unsigned long arg)
-{
- /* We store the status in the low byte and the number of
- * interrupts received since the last read in the remainder
- * of rtc_irq_data. */
-
- gen_rtc_irq_data += 0x100;
- gen_rtc_irq_data &= ~0xff;
- gen_rtc_irq_data |= RTC_UIE;
-
- if (lostint){
- printk("genrtc: system delaying clock ticks?\n");
- /* increment count so that userspace knows something is wrong */
- gen_rtc_irq_data += ((lostint-1)<<8);
- lostint = 0;
- }
-
- wake_up_interruptible(&gen_rtc_wait);
-}
-
-/*
- * Now all the various file operations that we export.
- */
-static ssize_t gen_rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned long data;
- ssize_t retval;
-
- if (count != sizeof (unsigned int) && count != sizeof (unsigned long))
- return -EINVAL;
-
- if (file->f_flags & O_NONBLOCK && !gen_rtc_irq_data)
- return -EAGAIN;
-
- retval = wait_event_interruptible(gen_rtc_wait,
- (data = xchg(&gen_rtc_irq_data, 0)));
- if (retval)
- goto out;
-
- /* first test allows optimizer to nuke this case for 32-bit machines */
- if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
- unsigned int uidata = data;
- retval = put_user(uidata, (unsigned int __user *)buf) ?:
- sizeof(unsigned int);
- }
- else {
- retval = put_user(data, (unsigned long __user *)buf) ?:
- sizeof(unsigned long);
- }
-out:
- return retval;
-}
-
-static unsigned int gen_rtc_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- poll_wait(file, &gen_rtc_wait, wait);
- if (gen_rtc_irq_data != 0)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-#endif
-
-/*
- * Used to disable/enable interrupts, only RTC_UIE supported
- * We also clear out any old irq data after an ioctl() that
- * meddles with the interrupt enable/disable bits.
- */
-
-static inline void gen_clear_rtc_irq_bit(unsigned char bit)
-{
-#ifdef CONFIG_GEN_RTC_X
- stop_rtc_timers = 1;
- if (ttask_active){
- del_timer_sync(&timer_task);
- ttask_active = 0;
- }
- while (stask_active)
- schedule();
-
- spin_lock(&gen_rtc_lock);
- irq_active = 0;
- spin_unlock(&gen_rtc_lock);
-#endif
-}
-
-static inline int gen_set_rtc_irq_bit(unsigned char bit)
-{
-#ifdef CONFIG_GEN_RTC_X
- spin_lock(&gen_rtc_lock);
- if ( !irq_active ) {
- irq_active = 1;
- stop_rtc_timers = 0;
- lostint = 0;
- INIT_WORK(&genrtc_task, genrtc_troutine);
- oldsecs = get_rtc_ss();
- init_timer(&timer_task);
-
- stask_active = 1;
- if (schedule_work(&genrtc_task) == 0){
- stask_active = 0;
- }
- }
- spin_unlock(&gen_rtc_lock);
- gen_rtc_irq_data = 0;
- return 0;
-#else
- return -EINVAL;
-#endif
-}
-
-static int gen_rtc_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct rtc_time wtime;
- struct rtc_pll_info pll;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
-
- case RTC_PLL_GET:
- if (get_rtc_pll(&pll))
- return -EINVAL;
- else
- return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
-
- case RTC_PLL_SET:
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
- if (copy_from_user(&pll, argp, sizeof(pll)))
- return -EFAULT;
- return set_rtc_pll(&pll);
-
- case RTC_UIE_OFF: /* disable ints from RTC updates. */
- gen_clear_rtc_irq_bit(RTC_UIE);
- return 0;
-
- case RTC_UIE_ON: /* enable ints for RTC updates. */
- return gen_set_rtc_irq_bit(RTC_UIE);
-
- case RTC_RD_TIME: /* Read the time/date from RTC */
- /* this doesn't get week-day, who cares */
- memset(&wtime, 0, sizeof(wtime));
- get_rtc_time(&wtime);
-
- return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0;
-
- case RTC_SET_TIME: /* Set the RTC */
- {
- int year;
- unsigned char leap_yr;
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- if (copy_from_user(&wtime, argp, sizeof(wtime)))
- return -EFAULT;
-
- year = wtime.tm_year + 1900;
- leap_yr = ((!(year % 4) && (year % 100)) ||
- !(year % 400));
-
- if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1))
- return -EINVAL;
-
- if (wtime.tm_mday < 0 || wtime.tm_mday >
- (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
- return -EINVAL;
-
- if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||
- wtime.tm_min < 0 || wtime.tm_min >= 60 ||
- wtime.tm_sec < 0 || wtime.tm_sec >= 60)
- return -EINVAL;
-
- return set_rtc_time(&wtime);
- }
- }
-
- return -EINVAL;
-}
-
-static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- mutex_lock(&gen_rtc_mutex);
- ret = gen_rtc_ioctl(file, cmd, arg);
- mutex_unlock(&gen_rtc_mutex);
-
- return ret;
-}
-
-/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
- */
-
-static int gen_rtc_open(struct inode *inode, struct file *file)
-{
- mutex_lock(&gen_rtc_mutex);
- if (gen_rtc_status & RTC_IS_OPEN) {
- mutex_unlock(&gen_rtc_mutex);
- return -EBUSY;
- }
-
- gen_rtc_status |= RTC_IS_OPEN;
- gen_rtc_irq_data = 0;
- irq_active = 0;
- mutex_unlock(&gen_rtc_mutex);
-
- return 0;
-}
-
-static int gen_rtc_release(struct inode *inode, struct file *file)
-{
- /*
- * Turn off all interrupts once the device is no longer
- * in use and clear the data.
- */
-
- gen_clear_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
-
- gen_rtc_status &= ~RTC_IS_OPEN;
- return 0;
-}
-
-
-#ifdef CONFIG_PROC_FS
-
-/*
- * Info exported via "/proc/driver/rtc".
- */
-
-static int gen_rtc_proc_show(struct seq_file *m, void *v)
-{
- struct rtc_time tm;
- unsigned int flags;
- struct rtc_pll_info pll;
-
- flags = get_rtc_time(&tm);
-
- seq_printf(m,
- "rtc_time\t: %02d:%02d:%02d\n"
- "rtc_date\t: %04d-%02d-%02d\n"
- "rtc_epoch\t: %04u\n",
- tm.tm_hour, tm.tm_min, tm.tm_sec,
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 1900);
-
- tm.tm_hour = tm.tm_min = tm.tm_sec = 0;
-
- seq_puts(m, "alarm\t\t: ");
- if (tm.tm_hour <= 24)
- seq_printf(m, "%02d:", tm.tm_hour);
- else
- seq_puts(m, "**:");
-
- if (tm.tm_min <= 59)
- seq_printf(m, "%02d:", tm.tm_min);
- else
- seq_puts(m, "**:");
-
- if (tm.tm_sec <= 59)
- seq_printf(m, "%02d\n", tm.tm_sec);
- else
- seq_puts(m, "**\n");
-
- seq_printf(m,
- "DST_enable\t: %s\n"
- "BCD\t\t: %s\n"
- "24hr\t\t: %s\n"
- "square_wave\t: %s\n"
- "alarm_IRQ\t: %s\n"
- "update_IRQ\t: %s\n"
- "periodic_IRQ\t: %s\n"
- "periodic_freq\t: %ld\n"
- "batt_status\t: %s\n",
- (flags & RTC_DST_EN) ? "yes" : "no",
- (flags & RTC_DM_BINARY) ? "no" : "yes",
- (flags & RTC_24H) ? "yes" : "no",
- (flags & RTC_SQWE) ? "yes" : "no",
- (flags & RTC_AIE) ? "yes" : "no",
- irq_active ? "yes" : "no",
- (flags & RTC_PIE) ? "yes" : "no",
- 0L /* freq */,
- (flags & RTC_BATT_BAD) ? "bad" : "okay");
- if (!get_rtc_pll(&pll))
- seq_printf(m,
- "PLL adjustment\t: %d\n"
- "PLL max +ve adjustment\t: %d\n"
- "PLL max -ve adjustment\t: %d\n"
- "PLL +ve adjustment factor\t: %d\n"
- "PLL -ve adjustment factor\t: %d\n"
- "PLL frequency\t: %ld\n",
- pll.pll_value,
- pll.pll_max,
- pll.pll_min,
- pll.pll_posmult,
- pll.pll_negmult,
- pll.pll_clock);
- return 0;
-}
-
-static int gen_rtc_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, gen_rtc_proc_show, NULL);
-}
-
-static const struct file_operations gen_rtc_proc_fops = {
- .open = gen_rtc_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init gen_rtc_proc_init(void)
-{
- struct proc_dir_entry *r;
-
- r = proc_create("driver/rtc", 0, NULL, &gen_rtc_proc_fops);
- if (!r)
- return -ENOMEM;
- return 0;
-}
-#else
-static inline int gen_rtc_proc_init(void) { return 0; }
-#endif /* CONFIG_PROC_FS */
-
-
-/*
- * The various file operations we support.
- */
-
-static const struct file_operations gen_rtc_fops = {
- .owner = THIS_MODULE,
-#ifdef CONFIG_GEN_RTC_X
- .read = gen_rtc_read,
- .poll = gen_rtc_poll,
-#endif
- .unlocked_ioctl = gen_rtc_unlocked_ioctl,
- .open = gen_rtc_open,
- .release = gen_rtc_release,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice rtc_gen_dev =
-{
- .minor = RTC_MINOR,
- .name = "rtc",
- .fops = &gen_rtc_fops,
-};
-
-static int __init rtc_generic_init(void)
-{
- int retval;
-
- printk(KERN_INFO "Generic RTC Driver v%s\n", RTC_VERSION);
-
- retval = misc_register(&rtc_gen_dev);
- if (retval < 0)
- return retval;
-
- retval = gen_rtc_proc_init();
- if (retval) {
- misc_deregister(&rtc_gen_dev);
- return retval;
- }
-
- return 0;
-}
-
-static void __exit rtc_generic_exit(void)
-{
- remove_proc_entry ("driver/rtc", NULL);
- misc_deregister(&rtc_gen_dev);
-}
-
-
-module_init(rtc_generic_init);
-module_exit(rtc_generic_exit);
-
-MODULE_AUTHOR("Richard Zidlicky");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(RTC_MINOR);
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
new file mode 100644
index 000000000000..a45dabcc8e10
--- /dev/null
+++ b/drivers/char/powernv-op-panel.c
@@ -0,0 +1,223 @@
+/*
+ * OPAL Operator Panel Display Driver
+ *
+ * Copyright 2016, Suraj Jitindar Singh, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+
+#include <asm/opal.h>
+
+/*
+ * This driver creates a character device (/dev/op_panel) which exposes the
+ * operator panel (character LCD display) on IBM Power Systems machines
+ * with FSPs.
+ * A character buffer written to the device will be displayed on the
+ * operator panel.
+ */
+
+static DEFINE_MUTEX(oppanel_mutex);
+
+static u32 num_lines, oppanel_size;
+static oppanel_line_t *oppanel_lines;
+static char *oppanel_data;
+
+static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence)
+{
+ return fixed_size_llseek(filp, offset, whence, oppanel_size);
+}
+
+static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len,
+ loff_t *f_pos)
+{
+ return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data,
+ oppanel_size);
+}
+
+static int __op_panel_update_display(void)
+{
+ struct opal_msg msg;
+ int rc, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_debug("Couldn't get OPAL async token [token=%d]\n",
+ token);
+ return token;
+ }
+
+ rc = opal_write_oppanel_async(token, oppanel_lines, num_lines);
+ switch (rc) {
+ case OPAL_ASYNC_COMPLETION:
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ pr_debug("Failed to wait for async response [rc=%d]\n",
+ rc);
+ break;
+ }
+ rc = opal_get_async_rc(msg);
+ if (rc != OPAL_SUCCESS) {
+ pr_debug("OPAL async call returned failed [rc=%d]\n",
+ rc);
+ break;
+ }
+ case OPAL_SUCCESS:
+ break;
+ default:
+ pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc);
+ }
+
+ opal_async_release_token(token);
+ return rc;
+}
+
+static ssize_t oppanel_write(struct file *filp, const char __user *userbuf,
+ size_t len, loff_t *f_pos)
+{
+ loff_t f_pos_prev = *f_pos;
+ ssize_t ret;
+ int rc;
+
+ if (!*f_pos)
+ memset(oppanel_data, ' ', oppanel_size);
+ else if (*f_pos >= oppanel_size)
+ return -EFBIG;
+
+ ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf,
+ len);
+ if (ret > 0) {
+ rc = __op_panel_update_display();
+ if (rc != OPAL_SUCCESS) {
+ pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n",
+ rc);
+ *f_pos = f_pos_prev;
+ return -EIO;
+ }
+ }
+ return ret;
+}
+
+static int oppanel_open(struct inode *inode, struct file *filp)
+{
+ if (!mutex_trylock(&oppanel_mutex)) {
+ pr_debug("Device Busy\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int oppanel_release(struct inode *inode, struct file *filp)
+{
+ mutex_unlock(&oppanel_mutex);
+ return 0;
+}
+
+static const struct file_operations oppanel_fops = {
+ .owner = THIS_MODULE,
+ .llseek = oppanel_llseek,
+ .read = oppanel_read,
+ .write = oppanel_write,
+ .open = oppanel_open,
+ .release = oppanel_release
+};
+
+static struct miscdevice oppanel_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "op_panel",
+ .fops = &oppanel_fops
+};
+
+static int oppanel_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 line_len;
+ int rc, i;
+
+ rc = of_property_read_u32(np, "#length", &line_len);
+ if (rc) {
+ pr_err_ratelimited("Operator panel length property not found\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "#lines", &num_lines);
+ if (rc) {
+ pr_err_ratelimited("Operator panel lines property not found\n");
+ return rc;
+ }
+ oppanel_size = line_len * num_lines;
+
+ pr_devel("Operator panel of size %u found with %u lines of length %u\n",
+ oppanel_size, num_lines, line_len);
+
+ oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL);
+ if (!oppanel_data)
+ return -ENOMEM;
+
+ oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL);
+ if (!oppanel_lines) {
+ rc = -ENOMEM;
+ goto free_oppanel_data;
+ }
+
+ memset(oppanel_data, ' ', oppanel_size);
+ for (i = 0; i < num_lines; i++) {
+ oppanel_lines[i].line_len = cpu_to_be64(line_len);
+ oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i *
+ line_len]));
+ }
+
+ rc = misc_register(&oppanel_dev);
+ if (rc) {
+ pr_err_ratelimited("Failed to register as misc device\n");
+ goto free_oppanel;
+ }
+
+ return 0;
+
+free_oppanel:
+ kfree(oppanel_lines);
+free_oppanel_data:
+ kfree(oppanel_data);
+ return rc;
+}
+
+static int oppanel_remove(struct platform_device *pdev)
+{
+ misc_deregister(&oppanel_dev);
+ kfree(oppanel_lines);
+ kfree(oppanel_data);
+ return 0;
+}
+
+static const struct of_device_id oppanel_match[] = {
+ { .compatible = "ibm,opal-oppanel" },
+ { },
+};
+
+static struct platform_driver oppanel_driver = {
+ .driver = {
+ .name = "powernv-op-panel",
+ .of_match_table = oppanel_match,
+ },
+ .probe = oppanel_probe,
+ .remove = oppanel_remove,
+};
+
+module_platform_driver(oppanel_driver);
+
+MODULE_DEVICE_TABLE(of, oppanel_match);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver");
+MODULE_AUTHOR("Suraj Jitindar Singh <sjitindarsingh@gmail.com>");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7f0622426b97..3efb3bf0ab83 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -249,6 +249,7 @@
#include <linux/genhd.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
+#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
@@ -1656,7 +1657,6 @@ static int rand_initialize(void)
{
#ifdef CONFIG_NUMA
int i;
- int num_nodes = num_possible_nodes();
struct crng_state *crng;
struct crng_state **pool;
#endif
@@ -1666,8 +1666,7 @@ static int rand_initialize(void)
crng_initialize(&primary_crng);
#ifdef CONFIG_NUMA
- pool = kmalloc(num_nodes * sizeof(void *),
- GFP_KERNEL|__GFP_NOFAIL|__GFP_ZERO);
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 1f60b02416a7..adaf109f2fe2 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -184,5 +184,5 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get,
&clps711x_clk->clk_data);
}
-CLK_OF_DECLARE(clps711x, "cirrus,clps711x-clk", clps711x_clk_init_dt);
+CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 89cc700fbc37..97ae60fa1584 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -250,7 +250,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __init_refok
+static struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -287,7 +287,7 @@ vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
return cl;
}
-struct clk_lookup * __init_refok
+struct clk_lookup * __ref
clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
{
struct clk_lookup *cl;
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index ef2ec64fe547..0e47d95faf49 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -108,6 +108,9 @@ static struct ti_dt_clk am33xx_clks[] = {
DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+ DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 097fc90bf19a..e816a7500e43 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -58,6 +58,7 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
DT_CLK(NULL, "sha0_fck", "sha0_fck"),
DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "rng_fck", "rng_fck"),
DT_CLK(NULL, "timer1_fck", "timer1_fck"),
DT_CLK(NULL, "timer2_fck", "timer2_fck"),
DT_CLK(NULL, "timer3_fck", "timer3_fck"),
@@ -115,6 +116,12 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"),
DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"),
DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"),
+ DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"),
+ DT_CLK("48306200.pwm", "tbclk", "ehrpwm3_tbclk"),
+ DT_CLK("48308200.pwm", "tbclk", "ehrpwm4_tbclk"),
+ DT_CLK("4830a200.pwm", "tbclk", "ehrpwm5_tbclk"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 9a736d939806..e960d686d9db 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
#define PRCC_NUM_PERIPH_CLUSTERS 6
@@ -48,11 +47,6 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
return PRCC_SHOW(clk_data, base, bit);
}
-static const struct of_device_id u8500_clk_of_match[] = {
- { .compatible = "stericsson,u8500-clks", },
- { },
-};
-
/* CLKRST4 is missing making it hard to index things */
enum clkrst_index {
CLKRST1_INDEX = 0,
@@ -63,22 +57,15 @@ enum clkrst_index {
CLKRST_MAX,
};
-void u8500_clk_init(void)
+static void u8500_clk_init(struct device_node *np)
{
struct prcmu_fw_version *fw_version;
- struct device_node *np = NULL;
struct device_node *child = NULL;
const char *sgaclk_parent = NULL;
struct clk *clk, *rtc_clk, *twd_clk;
u32 bases[CLKRST_MAX];
int i;
- if (of_have_populated_dt())
- np = of_find_matching_node(NULL, u8500_clk_of_match);
- if (!np) {
- pr_err("Either DT or U8500 Clock node not found\n");
- return;
- }
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -573,3 +560,4 @@ void u8500_clk_init(void)
of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
}
}
+CLK_OF_DECLARE(u8500_clks, "stericsson,u8500-clks", u8500_clk_init);
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index 86549e59fb42..133859f0e2bf 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -12,14 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-static const struct of_device_id u8540_clk_of_match[] = {
- { .compatible = "stericsson,u8540-clks", },
- { }
-};
-
/* CLKRST4 is missing making it hard to index things */
enum clkrst_index {
CLKRST1_INDEX = 0,
@@ -30,19 +24,12 @@ enum clkrst_index {
CLKRST_MAX,
};
-void u8540_clk_init(void)
+static void u8540_clk_init(struct device_node *np)
{
struct clk *clk;
- struct device_node *np = NULL;
u32 bases[CLKRST_MAX];
int i;
- if (of_have_populated_dt())
- np = of_find_matching_node(NULL, u8540_clk_of_match);
- if (!np) {
- pr_err("Either DT or U8540 Clock node not found\n");
- return;
- }
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -607,3 +594,4 @@ void u8540_clk_init(void)
bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "rng");
}
+CLK_OF_DECLARE(u8540_clks, "stericsson,u8540-clks", u8540_clk_init);
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
index 2138a4c8cbca..7b6bca49ce42 100644
--- a/drivers/clk/ux500/u9540_clk.c
+++ b/drivers/clk/ux500/u9540_clk.c
@@ -9,10 +9,10 @@
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-void u9540_clk_init(void)
+static void u9540_clk_init(struct device_node *np)
{
/* register clocks here */
}
+CLK_OF_DECLARE(u9540_clks, "stericsson,u9540-clks", u9540_clk_init);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index 84aed78261e4..24db6d605549 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -119,5 +119,5 @@ static int __init clps711x_timer_init(struct device_node *np)
return -EINVAL;
}
}
-CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
+CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index c822d72629d5..74919aa81dcb 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -32,7 +32,6 @@ config CPU_FREQ_BOOST_SW
config CPU_FREQ_STAT
bool "CPU frequency transition statistics"
- default y
help
Export CPU frequency statistics information through sysfs.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9ec033b4f2d9..be9eade147f2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1374,6 +1374,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
{}
};
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 9e07588ea9f5..f82074eea779 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -220,7 +220,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
tmp1 /= tmp;
- __raw_writel(tmp1, reg);
+ writel_relaxed(tmp1, reg);
}
static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
@@ -296,29 +296,29 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* 1. Temporary Change divider for MFC and G3D
* SCLKA2M(200/1=200)->(200/4=50)Mhz
*/
- reg = __raw_readl(S5P_CLK_DIV2);
+ reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
(3 << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
+ writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/*
* 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
* (200/4=50)->(667/4=166)Mhz
*/
- reg = __raw_readl(S5P_CLK_SRC2);
+ reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
(1 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
+ writel_relaxed(reg, S5P_CLK_SRC2);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
@@ -330,19 +330,19 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
s5pv210_set_refresh(DMC1, 133000);
/* 4. SCLKAPLL -> SCLKMPLL */
- reg = __raw_readl(S5P_CLK_SRC0);
+ reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
+ writel_relaxed(reg, S5P_CLK_SRC0);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
}
/* Change divider */
- reg = __raw_readl(S5P_CLK_DIV0);
+ reg = readl_relaxed(S5P_CLK_DIV0);
reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
@@ -358,25 +358,25 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
(clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
(clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
- __raw_writel(reg, S5P_CLK_DIV0);
+ writel_relaxed(reg, S5P_CLK_DIV0);
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & 0xff);
/* ARM MCS value changed */
- reg = __raw_readl(S5P_ARM_MCS_CON);
+ reg = readl_relaxed(S5P_ARM_MCS_CON);
reg &= ~0x3;
if (index >= L3)
reg |= 0x3;
else
reg |= 0x1;
- __raw_writel(reg, S5P_ARM_MCS_CON);
+ writel_relaxed(reg, S5P_ARM_MCS_CON);
if (pll_changing) {
/* 5. Set Lock time = 30us*24Mhz = 0x2cf */
- __raw_writel(0x2cf, S5P_APLL_LOCK);
+ writel_relaxed(0x2cf, S5P_APLL_LOCK);
/*
* 6. Turn on APLL
@@ -384,12 +384,12 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* 6-2. Wait untile the PLL is locked
*/
if (index == L0)
- __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
+ writel_relaxed(APLL_VAL_1000, S5P_APLL_CON);
else
- __raw_writel(APLL_VAL_800, S5P_APLL_CON);
+ writel_relaxed(APLL_VAL_800, S5P_APLL_CON);
do {
- reg = __raw_readl(S5P_APLL_CON);
+ reg = readl_relaxed(S5P_APLL_CON);
} while (!(reg & (0x1 << 29)));
/*
@@ -397,39 +397,39 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
* (667/4=166)->(200/4=50)Mhz
*/
- reg = __raw_readl(S5P_CLK_SRC2);
+ reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
(0 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
+ writel_relaxed(reg, S5P_CLK_SRC2);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
* 8. Change divider for MFC and G3D
* (200/4=50)->(200/1=200)Mhz
*/
- reg = __raw_readl(S5P_CLK_DIV2);
+ reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
(clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
+ writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/* 9. Change MPLL to APLL in MSYS_MUX */
- reg = __raw_readl(S5P_CLK_SRC0);
+ reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
+ writel_relaxed(reg, S5P_CLK_SRC0);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
/*
@@ -446,13 +446,13 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* and memory refresh parameter should be changed
*/
if (bus_speed_changing) {
- reg = __raw_readl(S5P_CLK_DIV6);
+ reg = readl_relaxed(S5P_CLK_DIV6);
reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV6);
+ writel_relaxed(reg, S5P_CLK_DIV6);
do {
- reg = __raw_readl(S5P_CLKDIV_STAT1);
+ reg = readl_relaxed(S5P_CLKDIV_STAT1);
} while (reg & (1 << 15));
/* Reconfigure DRAM refresh counter value */
@@ -492,7 +492,7 @@ static int check_mem_type(void __iomem *dmc_reg)
{
unsigned long val;
- val = __raw_readl(dmc_reg + 0x4);
+ val = readl_relaxed(dmc_reg + 0x4);
val = (val & (0xf << 8));
return val >> 8;
@@ -537,10 +537,10 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
}
/* Find current refresh counter and frequency each DMC */
- s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000);
+ s5pv210_dram_conf[0].refresh = (readl_relaxed(dmc_base[0] + 0x30) * 1000);
s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
- s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000);
+ s5pv210_dram_conf[1].refresh = (readl_relaxed(dmc_base[1] + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->suspend_freq = SLEEP_FREQ;
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index e12dc30d8864..f7ca891b5b59 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -20,7 +20,7 @@
#include <asm/opal.h>
#include <asm/runlatch.h>
-#define MAX_POWERNV_IDLE_STATES 8
+#define POWERNV_THRESHOLD_LATENCY_NS 200000
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -29,6 +29,9 @@ struct cpuidle_driver powernv_idle_driver = {
static int max_idle_state;
static struct cpuidle_state *cpuidle_state_table;
+
+static u64 stop_psscr_table[CPUIDLE_STATE_MAX];
+
static u64 snooze_timeout;
static bool snooze_timeout_en;
@@ -93,16 +96,27 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index;
}
#endif
+
+static int stop_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ ppc64_runlatch_off();
+ power9_idle_stop(stop_psscr_table[index]);
+ ppc64_runlatch_on();
+ return index;
+}
+
/*
* States for dedicated partition case.
*/
-static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
+static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
{ /* Snooze */
.name = "snooze",
.desc = "snooze",
.exit_latency = 0,
.target_residency = 0,
- .enter = &snooze_loop },
+ .enter = snooze_loop },
};
static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
@@ -168,7 +182,11 @@ static int powernv_add_idle_states(void)
struct device_node *power_mgt;
int nr_idle_states = 1; /* Snooze */
int dt_idle_states;
- u32 *latency_ns, *residency_ns, *flags;
+ u32 latency_ns[CPUIDLE_STATE_MAX];
+ u32 residency_ns[CPUIDLE_STATE_MAX];
+ u32 flags[CPUIDLE_STATE_MAX];
+ u64 psscr_val[CPUIDLE_STATE_MAX];
+ const char *names[CPUIDLE_STATE_MAX];
int i, rc;
/* Currently we have snooze statically defined */
@@ -186,26 +204,55 @@ static int powernv_add_idle_states(void)
goto out;
}
- flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ /*
+ * Since snooze is used as first idle state, max idle states allowed is
+ * CPUIDLE_STATE_MAX -1
+ */
+ if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
+ pr_warn("cpuidle-powernv: discovered idle states more than allowed");
+ dt_idle_states = CPUIDLE_STATE_MAX - 1;
+ }
+
if (of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
- goto out_free_flags;
+ goto out;
}
- latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
- rc = of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
- if (rc) {
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-latencies-ns", latency_ns,
+ dt_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
- goto out_free_latency;
+ goto out;
+ }
+ if (of_property_read_string_array(power_mgt,
+ "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
+ goto out;
}
- residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+ /*
+ * If the idle states use stop instruction, probe for psscr values
+ * which are necessary to specify required stop level.
+ */
+ if (flags[0] & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP))
+ if (of_property_read_u64_array(power_mgt,
+ "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
+ goto out;
+ }
+
rc = of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
for (i = 0; i < dt_idle_states; i++) {
+ /*
+ * If an idle state has exit latency beyond
+ * POWERNV_THRESHOLD_LATENCY_NS then don't use it
+ * in cpu-idle.
+ */
+ if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
+ continue;
/*
* Cpuidle accepts exit_latency and target_residency in us.
@@ -217,7 +264,17 @@ static int powernv_add_idle_states(void)
strcpy(powernv_states[nr_idle_states].desc, "Nap");
powernv_states[nr_idle_states].flags = 0;
powernv_states[nr_idle_states].target_residency = 100;
- powernv_states[nr_idle_states].enter = &nap_loop;
+ powernv_states[nr_idle_states].enter = nap_loop;
+ } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) &&
+ !(flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ strncpy(powernv_states[nr_idle_states].name,
+ names[i], CPUIDLE_NAME_LEN);
+ strncpy(powernv_states[nr_idle_states].desc,
+ names[i], CPUIDLE_NAME_LEN);
+ powernv_states[nr_idle_states].flags = 0;
+
+ powernv_states[nr_idle_states].enter = stop_loop;
+ stop_psscr_table[nr_idle_states] = psscr_val[i];
}
/*
@@ -232,7 +289,17 @@ static int powernv_add_idle_states(void)
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
powernv_states[nr_idle_states].target_residency = 300000;
- powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ powernv_states[nr_idle_states].enter = fastsleep_loop;
+ } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) &&
+ (flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ strncpy(powernv_states[nr_idle_states].name,
+ names[i], CPUIDLE_NAME_LEN);
+ strncpy(powernv_states[nr_idle_states].desc,
+ names[i], CPUIDLE_NAME_LEN);
+
+ powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
+ powernv_states[nr_idle_states].enter = stop_loop;
+ stop_psscr_table[nr_idle_states] = psscr_val[i];
}
#endif
powernv_states[nr_idle_states].exit_latency =
@@ -245,12 +312,6 @@ static int powernv_add_idle_states(void)
nr_idle_states++;
}
-
- kfree(residency_ns);
-out_free_latency:
- kfree(latency_ns);
-out_free_flags:
- kfree(flags);
out:
return nr_idle_states;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index e373cc6557c6..d64af8625d7e 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -180,10 +180,11 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
struct mv_cesa_engine *engine = creq->engine;
spin_lock_bh(&engine->lock);
- if (mv_cesa_req_get_type(creq) == CESA_DMA_REQ)
- mv_cesa_tdma_chain(engine, creq);
-
ret = crypto_enqueue_request(&engine->queue, req);
+ if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
+ (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 48df03a06066..d19dc9614e6e 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -139,20 +139,11 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
struct mv_cesa_req *basereq = &creq->base;
- unsigned int ivsize;
- int ret;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
return mv_cesa_ablkcipher_std_process(ablkreq, status);
- ret = mv_cesa_dma_process(basereq, status);
- if (ret)
- return ret;
-
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
- memcpy_fromio(ablkreq->info, basereq->chain.last->data, ivsize);
-
- return 0;
+ return mv_cesa_dma_process(basereq, status);
}
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
@@ -320,7 +311,6 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ablkcipher_dma_iter iter;
- struct mv_cesa_tdma_chain chain;
bool skip_ctx = false;
int ret;
unsigned int ivsize;
@@ -347,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
return -ENOMEM;
}
- mv_cesa_tdma_desc_iter_init(&chain);
+ mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ablkcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
- op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
+ op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
@@ -363,18 +353,18 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
/* Add input transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
/* Add dummy desc to launch the crypto operation */
- ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
+ ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
/* Add output transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.dst, flags);
if (ret)
goto err_free_tdma;
@@ -383,13 +373,12 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
/* Add output data for IV */
ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- ret = mv_cesa_dma_add_iv_op(&chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
if (ret)
goto err_free_tdma;
- basereq->chain = chain;
basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
return 0;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index c35912b4fffb..82e0f4e6eb1c 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -315,12 +315,6 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
for (i = 0; i < digsize / 4; i++)
creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->cache_ptr)
- sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
- creq->cache,
- creq->cache_ptr,
- ahashreq->nbytes - creq->cache_ptr);
-
if (creq->last_req) {
/*
* Hardware's MD5 digest is in little endian format, but
@@ -365,6 +359,12 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
mv_cesa_ahash_last_cleanup(ahashreq);
mv_cesa_ahash_cleanup(ahashreq);
+
+ if (creq->cache_ptr)
+ sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+ creq->cache,
+ creq->cache_ptr,
+ ahashreq->nbytes - creq->cache_ptr);
}
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index 89d8208d9851..a83ead109d5f 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -1,7 +1,7 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
- default y
+ default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
This module supports acceleration for AES and GHASH in hardware. If you
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index f688c32fbcc7..31a98dc6f849 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -23,6 +23,7 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <asm/cputable.h>
#include <crypto/internal/hash.h>
@@ -45,9 +46,6 @@ int __init p8_init(void)
int ret = 0;
struct crypto_alg **alg_it;
- if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
- return -ENODEV;
-
for (alg_it = algs; *alg_it; alg_it++) {
ret = crypto_register_alg(*alg_it);
printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
@@ -80,7 +78,7 @@ void __exit p8_exit(void)
crypto_unregister_shash(&p8_ghash_alg);
}
-module_init(p8_init);
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init);
module_exit(p8_exit);
MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9824bc4addf8..25bcfa0b474f 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -1,11 +1,20 @@
menu "DMABUF options"
config SYNC_FILE
- bool "sync_file support for fences"
+ bool "Explicit Synchronization Framework"
default n
select ANON_INODES
select DMA_SHARED_BUFFER
---help---
- This option enables the fence framework synchronization to export
- sync_files to userspace that can represent one or more fences.
+ The Sync File Framework adds explicit syncronization via
+ userspace. It enables send/receive 'struct fence' objects to/from
+ userspace via Sync File fds for synchronization between drivers via
+ userspace components. It has been ported from Android.
+
+ The first and main user for this is graphics in which a fence is
+ associated with a buffer. When a job is submitted to the GPU a fence
+ is attached to the buffer and is transferred via userspace, using Sync
+ Files fds, to the DRM driver for example. More details at
+ Documentation/sync_file.txt.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca75ed..f353db213a81 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6355ab38d630..ddaee60ae52a 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
+ int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
@@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
- module_put(exp_info->owner);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_module;
}
dmabuf->priv = exp_info->priv;
@@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
exp_info->flags);
if (IS_ERR(file)) {
- kfree(dmabuf);
- return ERR_CAST(file);
+ ret = PTR_ERR(file);
+ goto err_dmabuf;
}
file->f_mode |= FMODE_LSEEK;
@@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
mutex_unlock(&db_list.lock);
return dmabuf;
+
+err_dmabuf:
+ kfree(dmabuf);
+err_module:
+ module_put(exp_info->owner);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -824,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
#ifdef CONFIG_DEBUG_FS
-static int dma_buf_describe(struct seq_file *s)
+static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
int ret;
struct dma_buf *buf_obj;
@@ -879,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s)
return 0;
}
-static int dma_buf_show(struct seq_file *s, void *unused)
-{
- void (*func)(struct seq_file *) = s->private;
-
- func(s);
- return 0;
-}
-
static int dma_buf_debug_open(struct inode *inode, struct file *file)
{
- return single_open(file, dma_buf_show, inode->i_private);
+ return single_open(file, dma_buf_debug_show, NULL);
}
static const struct file_operations dma_buf_debug_fops = {
@@ -903,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir;
static int dma_buf_init_debugfs(void)
{
+ struct dentry *d;
int err = 0;
- dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
+ d = debugfs_create_dir("dma_buf", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
- if (IS_ERR(dma_buf_debugfs_dir)) {
- err = PTR_ERR(dma_buf_debugfs_dir);
- dma_buf_debugfs_dir = NULL;
- return err;
- }
-
- err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
+ dma_buf_debugfs_dir = d;
- if (err)
+ d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ NULL, &dma_buf_debug_fops);
+ if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+ dma_buf_debugfs_dir = NULL;
+ err = PTR_ERR(d);
+ }
return err;
}
@@ -926,17 +928,6 @@ static void dma_buf_uninit_debugfs(void)
if (dma_buf_debugfs_dir)
debugfs_remove_recursive(dma_buf_debugfs_dir);
}
-
-int dma_buf_debugfs_create_file(const char *name,
- int (*write)(struct seq_file *))
-{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
- write, &dma_buf_debug_fops);
-
- return PTR_ERR_OR_ZERO(d);
-}
#else
static inline int dma_buf_init_debugfs(void)
{
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000000..a8731c853da6
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
+/*
+ * fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/fence-array.h>
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
+
+static const char *fence_array_get_driver_name(struct fence *fence)
+{
+ return "fence_array";
+}
+
+static const char *fence_array_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct fence_array_cb *array_cb =
+ container_of(cb, struct fence_array_cb, cb);
+ struct fence_array *array = array_cb->array;
+
+ if (atomic_dec_and_test(&array->num_pending))
+ fence_signal(&array->base);
+ fence_put(&array->base);
+}
+
+static bool fence_array_enable_signaling(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ struct fence_array_cb *cb = (void *)(&array[1]);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i) {
+ cb[i].array = array;
+ /*
+ * As we may report that the fence is signaled before all
+ * callbacks are complete, we need to take an additional
+ * reference count on the array so that we do not free it too
+ * early. The core fence handling will only hold the reference
+ * until we signal the array as complete (but that is now
+ * insufficient).
+ */
+ fence_get(&array->base);
+ if (fence_add_callback(array->fences[i], &cb[i].cb,
+ fence_array_cb_func)) {
+ fence_put(&array->base);
+ if (atomic_dec_and_test(&array->num_pending))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool fence_array_signaled(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+
+ return atomic_read(&array->num_pending) <= 0;
+}
+
+static void fence_array_release(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ fence_put(array->fences[i]);
+
+ kfree(array->fences);
+ fence_free(fence);
+}
+
+const struct fence_ops fence_array_ops = {
+ .get_driver_name = fence_array_get_driver_name,
+ .get_timeline_name = fence_array_get_timeline_name,
+ .enable_signaling = fence_array_enable_signaling,
+ .signaled = fence_array_signaled,
+ .wait = fence_default_wait,
+ .release = fence_array_release,
+};
+
+/**
+ * fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any [in] signal on any fence in the array
+ *
+ * Allocate a fence_array object and initialize the base fence with fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocte the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is take and fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct fence_array *array;
+ size_t size = sizeof(*array);
+
+ /* Allocate the callback structures behind the array. */
+ size += num_fences * sizeof(struct fence_array_cb);
+ array = kzalloc(size, GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ spin_lock_init(&array->lock);
+ fence_init(&array->base, &fence_array_ops, &array->lock,
+ context, seqno);
+
+ array->num_fences = num_fences;
+ atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+ array->fences = fences;
+
+ return array;
+}
+EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic_t fence_context_counter = ATOMIC_INIT(0);
+static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-unsigned fence_context_alloc(unsigned num)
+u64 fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno)
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d8309e..9aaa608dfe01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
sync_file->num_fences = 1;
atomic_set(&sync_file->status, 1);
- snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+ snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6664f1108c7c..0e22f241403b 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -10,7 +10,7 @@ config ARM_PSCI_FW
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
- depends on ARM_MHU
+ depends on MAILBOX
help
System Control and Power Interface (SCPI) Message Protocol is
defined for the purpose of communication between the Application
@@ -27,6 +27,15 @@ config ARM_SCPI_PROTOCOL
This protocol library provides interface for all the client drivers
making use of the features offered by the SCP.
+config ARM_SCPI_POWER_DOMAIN
+ tristate "SCPI power domain driver"
+ depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCPI power domains which can be
+ enabled or disabled via the SCP firmware
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
@@ -184,6 +193,7 @@ config FW_CFG_SYSFS_CMDLINE
config QCOM_SCM
bool
depends on ARM || ARM64
+ select RESET_CONTROLLER
config QCOM_SCM_32
def_bool y
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 474bada56fcd..44a59dcfc398 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 7e3e595c9f30..438893762076 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -210,10 +210,6 @@ struct dvfs_info {
} opps[MAX_DVFS_OPPS];
} __packed;
-struct dvfs_get {
- u8 index;
-} __packed;
-
struct dvfs_set {
u8 domain;
u8 index;
@@ -235,6 +231,11 @@ struct sensor_value {
__le32 hi_val;
} __packed;
+struct dev_pstate_set {
+ u16 dev_id;
+ u8 pstate;
+} __packed;
+
static struct scpi_drvinfo *scpi_info;
static int scpi_linux_errmap[SCPI_ERR_MAX] = {
@@ -431,11 +432,11 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
static int scpi_dvfs_get_idx(u8 domain)
{
int ret;
- struct dvfs_get dvfs;
+ u8 dvfs_idx;
ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
- &dvfs, sizeof(dvfs));
- return ret ? ret : dvfs.index;
+ &dvfs_idx, sizeof(dvfs_idx));
+ return ret ? ret : dvfs_idx;
}
static int scpi_dvfs_set_idx(u8 domain, u8 index)
@@ -526,7 +527,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
return ret;
}
-int scpi_sensor_get_value(u16 sensor, u64 *val)
+static int scpi_sensor_get_value(u16 sensor, u64 *val)
{
__le16 id = cpu_to_le16(sensor);
struct sensor_value buf;
@@ -541,6 +542,29 @@ int scpi_sensor_get_value(u16 sensor, u64 *val)
return ret;
}
+static int scpi_device_get_power_state(u16 dev_id)
+{
+ int ret;
+ u8 pstate;
+ __le16 id = cpu_to_le16(dev_id);
+
+ ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+ sizeof(id), &pstate, sizeof(pstate));
+ return ret ? ret : pstate;
+}
+
+static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
+{
+ int stat;
+ struct dev_pstate_set dev_set = {
+ .dev_id = cpu_to_le16(dev_id),
+ .pstate = pstate,
+ };
+
+ return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ sizeof(dev_set), &stat, sizeof(stat));
+}
+
static struct scpi_ops scpi_ops = {
.get_version = scpi_get_version,
.clk_get_range = scpi_clk_get_range,
@@ -552,6 +576,8 @@ static struct scpi_ops scpi_ops = {
.sensor_get_capability = scpi_sensor_get_capability,
.sensor_get_info = scpi_sensor_get_info,
.sensor_get_value = scpi_sensor_get_value,
+ .device_get_power_state = scpi_device_get_power_state,
+ .device_set_power_state = scpi_device_set_power_state,
};
struct scpi_ops *get_scpi_ops(void)
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index b6eb875d4af3..62aa3cf09b4d 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -669,7 +669,7 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
case BCMA_HOSTTYPE_PCI:
memset(out, 0, sizeof(struct ssb_sprom));
/* On BCM47XX all PCI buses share the same domain */
- if (config_enabled(CONFIG_BCM47XX))
+ if (IS_ENABLED(CONFIG_BCM47XX))
snprintf(buf, sizeof(buf), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 0883292f640f..c6aeedbdcbb0 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -23,8 +23,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
-
-#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
#include "qcom_scm.h"
@@ -97,44 +96,6 @@ struct qcom_scm_response {
};
/**
- * alloc_qcom_scm_command() - Allocate an SCM command
- * @cmd_size: size of the command buffer
- * @resp_size: size of the response buffer
- *
- * Allocate an SCM command, including enough room for the command
- * and response headers as well as the command and response buffers.
- *
- * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
- */
-static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
-{
- struct qcom_scm_command *cmd;
- size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
- resp_size;
- u32 offset;
-
- cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
- if (cmd) {
- cmd->len = cpu_to_le32(len);
- offset = offsetof(struct qcom_scm_command, buf);
- cmd->buf_offset = cpu_to_le32(offset);
- cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
- }
- return cmd;
-}
-
-/**
- * free_qcom_scm_command() - Free an SCM command
- * @cmd: command to free
- *
- * Free an SCM command.
- */
-static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
-{
- kfree(cmd);
-}
-
-/**
* qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
* @cmd: command
*
@@ -168,23 +129,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
-static int qcom_scm_remap_error(int err)
-{
- pr_err("qcom_scm_call failed with error code %d\n", err);
- switch (err) {
- case QCOM_SCM_ERROR:
- return -EIO;
- case QCOM_SCM_EINVAL_ADDR:
- case QCOM_SCM_EINVAL_ARG:
- return -EINVAL;
- case QCOM_SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case QCOM_SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
static u32 smc(u32 cmd_addr)
{
int context_id;
@@ -209,45 +153,9 @@ static u32 smc(u32 cmd_addr)
return r0;
}
-static int __qcom_scm_call(const struct qcom_scm_command *cmd)
-{
- int ret;
- u32 cmd_addr = virt_to_phys(cmd);
-
- /*
- * Flush the command buffer so that the secure world sees
- * the correct data.
- */
- secure_flush_area(cmd, cmd->len);
-
- ret = smc(cmd_addr);
- if (ret < 0)
- ret = qcom_scm_remap_error(ret);
-
- return ret;
-}
-
-static void qcom_scm_inv_range(unsigned long start, unsigned long end)
-{
- u32 cacheline_size, ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- start = round_down(start, cacheline_size);
- end = round_up(end, cacheline_size);
- outer_inv_range(start, end);
- while (start < end) {
- asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
- : "memory");
- start += cacheline_size;
- }
- dsb();
- isb();
-}
-
/**
* qcom_scm_call() - Send an SCM command
+ * @dev: struct device
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
@@ -264,42 +172,59 @@ static void qcom_scm_inv_range(unsigned long start, unsigned long end)
* and response buffers is taken care of by qcom_scm_call; however, callers are
* responsible for any other cached buffers passed over to the secure world.
*/
-static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len)
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const void *cmd_buf, size_t cmd_len, void *resp_buf,
+ size_t resp_len)
{
int ret;
struct qcom_scm_command *cmd;
struct qcom_scm_response *rsp;
- unsigned long start, end;
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
- cmd = alloc_qcom_scm_command(cmd_len, resp_len);
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+
cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
if (cmd_buf)
memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
+ rsp = qcom_scm_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
mutex_lock(&qcom_scm_lock);
- ret = __qcom_scm_call(cmd);
+ ret = smc(cmd_phys);
+ if (ret < 0)
+ ret = qcom_scm_remap_error(ret);
mutex_unlock(&qcom_scm_lock);
if (ret)
goto out;
- rsp = qcom_scm_command_to_response(cmd);
- start = (unsigned long)rsp;
-
do {
- qcom_scm_inv_range(start, start + sizeof(*rsp));
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
} while (!rsp->is_complete);
- end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
- qcom_scm_inv_range(start, end);
-
- if (resp_buf)
- memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
+ if (resp_buf) {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+ memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
+ resp_len);
+ }
out:
- free_qcom_scm_command(cmd);
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
return ret;
}
@@ -342,6 +267,41 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
return r0;
}
+/**
+ * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+ int context_id;
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+ __asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
+ );
+ return r0;
+}
+
u32 qcom_scm_get_version(void)
{
int context_id;
@@ -378,22 +338,6 @@ u32 qcom_scm_get_version(void)
}
EXPORT_SYMBOL(qcom_scm_get_version);
-/*
- * Set the cold/warm boot address for one of the CPU cores.
- */
-static int qcom_scm_set_boot_addr(u32 addr, int flags)
-{
- struct {
- __le32 flags;
- __le32 addr;
- } cmd;
-
- cmd.addr = cpu_to_le32(addr);
- cmd.flags = cpu_to_le32(flags);
- return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
-}
-
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
@@ -423,7 +367,8 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
set_cpu_present(cpu, false);
}
- return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+ flags, virt_to_phys(entry));
}
/**
@@ -434,11 +379,16 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus)
{
int ret;
int flags = 0;
int cpu;
+ struct {
+ __le32 flags;
+ __le32 addr;
+ } cmd;
/*
* Reassign only if we are switching from hotplug entry point
@@ -454,7 +404,10 @@ int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
if (!flags)
return 0;
- ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+ cmd.addr = cpu_to_le32(virt_to_phys(entry));
+ cmd.flags = cpu_to_le32(flags);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+ &cmd, sizeof(cmd), NULL, 0);
if (!ret) {
for_each_cpu(cpu, cpus)
qcom_scm_wb[cpu].entry = entry;
@@ -477,25 +430,133 @@ void __qcom_scm_cpu_power_down(u32 flags)
flags & QCOM_SCM_FLUSH_FLAG_MASK);
}
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
int ret;
__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
__le32 ret_val = 0;
- ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd,
- sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+ &svc_cmd, sizeof(svc_cmd), &ret_val,
+ sizeof(ret_val));
if (ret)
return ret;
return le32_to_cpu(ret_val);
}
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+ u32 req_cnt, u32 *resp)
{
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
- return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
+ return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+
+void __qcom_scm_init(void)
+{
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 image_addr;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.image_addr = cpu_to_le32(metadata_phys);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 addr;
+ __le32 len;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.addr = cpu_to_le32(addr);
+ request.len = cpu_to_le32(size);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ __le32 out;
+ __le32 in = cpu_to_le32(reset);
+ int ret;
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index bb6555f6d63b..4a0f5ead4fb5 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -12,7 +12,150 @@
#include <linux/io.h>
#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0x3) << 4) | \
+ (((b) & 0x3) << 6) | \
+ (((c) & 0x3) << 8) | \
+ (((d) & 0x3) << 10) | \
+ (((e) & 0x3) << 12) | \
+ (((f) & 0x3) << 14) | \
+ (((g) & 0x3) << 16) | \
+ (((h) & 0x3) << 18) | \
+ (((i) & 0x3) << 20) | \
+ (((j) & 0x3) << 22) | \
+ ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @res: The values returned by the secure syscall
+ */
+struct qcom_scm_desc {
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+};
+
+static u64 qcom_smccc_convention = -1;
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+*/
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ int arglen = desc->arginfo & 0xf;
+ int retry_count = 0, i;
+ u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
+ u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+
+ if (unlikely(arglen > N_REGISTER_ARGS)) {
+ alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ FIRST_EXT_ARG_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ FIRST_EXT_ARG_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ x5 = args_phys;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ do {
+ arm_smccc_smc(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5, 0, 0,
+ res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (res->a0 < 0)
+ return qcom_scm_remap_error(res->a0);
+
+ return 0;
+}
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
@@ -29,13 +172,15 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @dev: Device pointer
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus)
{
return -ENOTSUPP;
}
@@ -52,12 +197,164 @@ void __qcom_scm_cpu_power_down(u32 flags)
{
}
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
}
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+ u32 req_cnt, u32 *resp)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = QCOM_SCM_ARGS(10);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
+ &res);
+ *resp = res.a1;
+
+ return ret;
+}
+
+void __qcom_scm_init(void)
+{
+ u64 cmd;
+ struct arm_smccc_res res;
+ u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
+
+ /* First try a SMC64 call */
+ cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+ ARM_SMCCC_OWNER_SIP, function);
+
+ arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
+ 0, 0, 0, 0, 0, &res);
+
+ if (!res.a0 && res.a1)
+ qcom_smccc_convention = ARM_SMCCC_SMC_64;
+ else
+ qcom_smccc_convention = ARM_SMCCC_SMC_32;
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &desc, &res);
+
+ return ret ? false : !!res.a1;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = metadata_phys;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = reset;
+ desc.args[1] = 0;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
+ &res);
+
+ return ret ? : res.a1;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 45c008d68891..e64a501adbf4 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -10,19 +10,64 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
#include "qcom_scm.h"
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct reset_controller_dev reset;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ return 0;
+
+disable_iface:
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ clk_disable_unprepare(__scm->core_clk);
+ clk_disable_unprepare(__scm->iface_clk);
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
@@ -47,7 +92,7 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
*/
int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
{
- return __qcom_scm_set_warm_boot_addr(entry, cpus);
+ return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
}
EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
@@ -72,12 +117,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
*/
bool qcom_scm_hdcp_available(void)
{
- int ret;
+ int ret = qcom_scm_clk_enable();
- ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
- QCOM_SCM_CMD_HDCP);
+ if (ret)
+ return ret;
- return (ret > 0) ? true : false;
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_CMD_HDCP);
+
+ qcom_scm_clk_disable();
+
+ return ret > 0 ? true : false;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -91,6 +141,287 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return __qcom_scm_hdcp_req(req, req_cnt, resp);
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
+ qcom_scm_clk_disable();
+ return ret;
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(__scm->dev, peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+ GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto free_metadata;
+
+ ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+
+ qcom_scm_clk_disable();
+
+free_metadata:
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+ .assert = qcom_scm_pas_reset_assert,
+ .deassert = qcom_scm_pas_reset_deassert,
+};
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ int ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ return PTR_ERR(scm->core_clk);
+
+ scm->core_clk = NULL;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk)) {
+ if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ return PTR_ERR(scm->iface_clk);
+ }
+
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk)) {
+ if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ return PTR_ERR(scm->bus_clk);
+ }
+ }
+
+ scm->reset.ops = &qcom_scm_pas_reset_ops;
+ scm->reset.nr_resets = 1;
+ scm->reset.of_node = pdev->dev.of_node;
+ reset_controller_register(&scm->reset);
+
+ /* vote for max clk rate for highest performance */
+ ret = clk_set_rate(scm->core_clk, INT_MAX);
+ if (ret)
+ return ret;
+
+ __scm = scm;
+ __scm->dev = &pdev->dev;
+
+ __qcom_scm_init();
+
+ return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm-apq8064",},
+ { .compatible = "qcom,scm-msm8660",},
+ { .compatible = "qcom,scm-msm8960",},
+ { .compatible = "qcom,scm",},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ },
+ .probe = qcom_scm_probe,
+};
+
+static int __init qcom_scm_init(void)
+{
+ struct device_node *np, *fw_np;
+ int ret;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, qcom_scm_dt_match);
+
+ if (!np) {
+ of_node_put(fw_np);
+ return -ENODEV;
+ }
+
+ of_node_put(np);
+
+ ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL);
+
+ of_node_put(fw_np);
+
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&qcom_scm_driver);
+}
+
+subsys_initcall(qcom_scm_init);
+
+static void __exit qcom_scm_exit(void)
+{
+ platform_driver_unregister(&qcom_scm_driver);
+}
+module_exit(qcom_scm_exit);
+
+MODULE_DESCRIPTION("Qualcomm SCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 2cce75c08b99..3584b00fe7e6 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -19,7 +19,8 @@
#define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04
-extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus);
extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_TERMINATE_PC 0x2
@@ -29,14 +30,34 @@ extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
-extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id);
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_CMD_HDCP 0x01
-extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
+extern int __qcom_scm_hdcp_req(struct device *dev,
+ struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+extern void __qcom_scm_init(void);
+
+#define QCOM_SCM_SVC_PIL 0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
+#define QCOM_SCM_PAS_MSS_RESET 0xa
+extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys);
+extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size);
+extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
/* common error codes */
+#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
#define QCOM_SCM_EOPNOTSUPP -4
#define QCOM_SCM_EINVAL_ADDR -3
@@ -44,4 +65,22 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_V2_EBUSY:
+ return -EBUSY;
+ }
+ return -EINVAL;
+}
+
#endif
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
new file mode 100644
index 000000000000..f395dec27113
--- /dev/null
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -0,0 +1,163 @@
+/*
+ * SCPI Generic power domain support.
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_pm_domain {
+ struct generic_pm_domain genpd;
+ struct scpi_ops *ops;
+ u32 domain;
+ char name[30];
+};
+
+/*
+ * These device power state values are not well-defined in the specification.
+ * In case, different implementations use different values, we can make these
+ * specific to compatibles rather than getting these values from device tree.
+ */
+enum scpi_power_domain_state {
+ SCPI_PD_STATE_ON = 0,
+ SCPI_PD_STATE_OFF = 3,
+};
+
+#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
+
+static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
+{
+ int ret;
+ enum scpi_power_domain_state state;
+
+ if (power_on)
+ state = SCPI_PD_STATE_ON;
+ else
+ state = SCPI_PD_STATE_OFF;
+
+ ret = pd->ops->device_set_power_state(pd->domain, state);
+ if (ret)
+ return ret;
+
+ return !(state == pd->ops->device_get_power_state(pd->domain));
+}
+
+static int scpi_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, true);
+}
+
+static int scpi_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, false);
+}
+
+static int scpi_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_pm_domain *scpi_pd;
+ struct genpd_onecell_data *scpi_pd_data;
+ struct generic_pm_domain **domains;
+ struct scpi_ops *scpi_ops;
+ int ret, num_domains, i;
+
+ scpi_ops = get_scpi_ops();
+ if (!scpi_ops)
+ return -EPROBE_DEFER;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ if (!scpi_ops->device_set_power_state ||
+ !scpi_ops->device_get_power_state) {
+ dev_err(dev, "power domains not supported in the firmware\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "num-domains", &num_domains);
+ if (ret) {
+ dev_err(dev, "number of domains not found\n");
+ return -EINVAL;
+ }
+
+ scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
+ if (!scpi_pd)
+ return -ENOMEM;
+
+ scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
+ if (!scpi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scpi_pd++) {
+ domains[i] = &scpi_pd->genpd;
+
+ scpi_pd->domain = i;
+ scpi_pd->ops = scpi_ops;
+ sprintf(scpi_pd->name, "%s.%d", np->name, i);
+ scpi_pd->genpd.name = scpi_pd->name;
+ scpi_pd->genpd.power_off = scpi_pd_power_off;
+ scpi_pd->genpd.power_on = scpi_pd_power_on;
+
+ /*
+ * Treat all power domains as off at boot.
+ *
+ * The SCP firmware itself may have switched on some domains,
+ * but for reference counting purpose, keep it this way.
+ */
+ pm_genpd_init(&scpi_pd->genpd, NULL, true);
+ }
+
+ scpi_pd_data->domains = domains;
+ scpi_pd_data->num_domains = num_domains;
+
+ of_genpd_add_provider_onecell(np, scpi_pd_data);
+
+ return 0;
+}
+
+static const struct of_device_id scpi_power_domain_ids[] = {
+ { .compatible = "arm,scpi-power-domains", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
+
+static struct platform_driver scpi_power_domain_driver = {
+ .driver = {
+ .name = "scpi_power_domain",
+ .of_match_table = scpi_power_domain_ids,
+ },
+ .probe = scpi_pm_domain_probe,
+};
+module_platform_driver(scpi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index c9b9fdf6cfbb..d61410299ec0 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -21,6 +21,7 @@ config FPGA_MGR_SOCFPGA
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
+ depends on HAS_DMA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index be43afb08c69..0238bf8bc8c3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
- drm_crtc.o drm_modes.o drm_edid.o \
+ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
- drm_kms_helper_common.o drm_dp_dual_mode_helper.o
+ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
+ drm_simple_kms_helper.o drm_blend.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e055d5be1c3c..8ebc5f1eb4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -85,8 +85,12 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
struct amdgpu_ip_block_version {
enum amd_ip_block_type type;
@@ -298,13 +306,16 @@ struct amdgpu_ring_funcs {
uint32_t oa_base, uint32_t oa_size);
/* testing functions */
int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
};
/*
@@ -594,11 +605,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
@@ -754,12 +763,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
+void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
@@ -767,12 +775,9 @@ struct amdgpu_ring {
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
- spinlock_t fence_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
- u64 next_rptr_gpu_addr;
- volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -791,14 +796,16 @@ struct amdgpu_ring {
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
- unsigned next_rptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
enum amdgpu_ring_type type;
char name[16];
unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
};
/*
@@ -861,6 +868,7 @@ struct amdgpu_vm {
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
+ uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
@@ -883,13 +891,14 @@ struct amdgpu_vm_id {
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
- struct amdgpu_ring *last_user;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
+ uint32_t current_gpu_reset_count;
+
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
@@ -905,6 +914,10 @@ struct amdgpu_vm_manager {
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
@@ -926,17 +939,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1142,6 +1152,12 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_funcs {
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
@@ -1178,6 +1194,7 @@ struct amdgpu_gfx {
/* ce ram size*/
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
+ const struct amdgpu_gfx_funcs *funcs;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1195,10 +1212,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1250,6 +1263,7 @@ struct amdgpu_job {
uint32_t num_ibs;
void *owner;
uint64_t ctx;
+ bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
@@ -1257,8 +1271,7 @@ struct amdgpu_job {
uint32_t oa_base, oa_size;
/* user fence handling */
- struct amdgpu_bo *uf_bo;
- uint32_t uf_offset;
+ uint64_t uf_addr;
uint64_t uf_sequence;
};
@@ -1560,6 +1573,12 @@ struct amdgpu_dpm_funcs {
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
};
struct amdgpu_dpm {
@@ -1662,6 +1681,7 @@ struct amdgpu_uvd {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
bool address_64_bit;
+ bool use_ctx_buf;
struct amd_sched_entity entity;
};
@@ -1683,6 +1703,7 @@ struct amdgpu_vce {
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
struct delayed_work idle_work;
+ struct mutex idle_mutex;
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
@@ -1767,6 +1788,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
void amdgpu_debugfs_cleanup(struct drm_minor *minor);
#endif
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
/*
* amdgpu smumgr functions
*/
@@ -1811,12 +1834,8 @@ struct amdgpu_asic_funcs {
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
- /* wait for mc_idle */
- int (*wait_for_mc_idle)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@@ -2003,6 +2022,10 @@ struct amdgpu_device {
spinlock_t didt_idx_lock;
amdgpu_rreg_t didt_rreg;
amdgpu_wreg_t didt_wreg;
+ /* protects concurrent gc_cac register access */
+ spinlock_t gc_cac_idx_lock;
+ amdgpu_rreg_t gc_cac_rreg;
+ amdgpu_wreg_t gc_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
@@ -2028,6 +2051,7 @@ struct amdgpu_device {
atomic64_t vram_vis_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
+ atomic64_t num_evictions;
atomic_t gpu_reset_counter;
/* display */
@@ -2038,7 +2062,7 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- unsigned fence_context;
+ u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
@@ -2131,6 +2155,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -2206,12 +2232,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
*/
#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2222,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
+#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2264,6 +2288,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
@@ -2342,6 +2368,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
@@ -2383,9 +2421,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 32809f749903..d080d0807a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
- if (rdev->asic_funcs->get_gpu_clock_counter)
- return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ if (rdev->gfx.funcs->get_gpu_clock_counter)
+ return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9df1bcb35bf0..983175363b06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le16_to_cpu(firmware_info->info.usReferenceClock);
ppll->reference_div = 0;
- if (crev < 2)
- ppll->pll_out_min =
- le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
- else
- ppll->pll_out_min =
- le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ ppll->pll_out_min =
+ le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
ppll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
- ppll->lcd_pll_out_min =
- le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_min == 0)
- ppll->lcd_pll_out_min = ppll->pll_out_min;
- ppll->lcd_pll_out_max =
- le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
- if (ppll->lcd_pll_out_max == 0)
- ppll->lcd_pll_out_max = ppll->pll_out_max;
- } else {
+ ppll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_min == 0)
ppll->lcd_pll_out_min = ppll->pll_out_min;
+ ppll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (ppll->lcd_pll_out_max == 0)
ppll->lcd_pll_out_max = ppll->pll_out_max;
- }
if (ppll->pll_out_min == 0)
ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 35a1248aaa77..49de92600074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "amd_acpi.h"
@@ -27,6 +28,7 @@ struct amdgpu_atpx_functions {
struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
+ bool is_hybrid;
};
static struct amdgpu_atpx_priv {
@@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+ return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
@@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+#if 1
+ /* This is a temporary hack until the D3 cold support
+ * makes it upstream. The ATPX power_control method seems
+ * to still work on even if the system should be using
+ * the new standardized hybrid D3 cold ACPI interface.
+ */
+ atpx->functions.power_cntl = true;
+#else
+ atpx->functions.power_cntl = false;
+#endif
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
@@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
@@ -507,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.switchto = amdgpu_atpx_switchto,
.power_state = amdgpu_atpx_power_state,
- .init = amdgpu_atpx_init,
.get_client_id = amdgpu_atpx_get_client_id,
};
@@ -542,6 +573,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_init();
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 99ca75baa47d..2b6afe123f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
- if (r == false)
+ if (!r)
r = amdgpu_acpi_vfct_bios(adev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(adev);
- if (r == false)
+ if (!r)
r = amdgpu_read_bios(adev);
- if (r == false) {
+ if (!r) {
r = amdgpu_read_bios_from_rom(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_disabled_bios(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_platform_bios(adev);
}
- if (r == false || adev->bios == NULL) {
+ if (!r || adev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
adev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 823bf5e0b0c8..651115dcce12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
int r;
+ unsigned long total_size = 0;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
+ total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
list->array = array;
list->num_entries = num_entries;
+ trace_amdgpu_cs_bo_status(list->num_entries, total_size);
return 0;
error_free:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index cf6f49fc1c75..bc0440f7a31d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_UVD_CTX(index);
case CGS_IND_REG__DIDT:
return RREG32_DIDT(index);
+ case CGS_IND_REG_GC_CAC:
+ return RREG32_GC_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_UVD_CTX(index, value);
case CGS_IND_REG__DIDT:
return WREG32_DIDT(index, value);
+ case CGS_IND_REG_GC_CAC:
+ return WREG32_GC_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
@@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ break;
case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break;
@@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = adev->pm.fw_version;
info->image_size = ucode_size;
+ info->ucode_start_address = ucode_start_address;
info->kptr = (void *)src;
}
return 0;
}
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info)
+ struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
@@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
+ case CGS_SYSTEM_INFO_PCIE_DEV:
+ sys_info->value = adev->pdev->device;
+ break;
+ case CGS_SYSTEM_INFO_PCIE_REV:
+ sys_info->value = adev->pdev->revision;
+ break;
case CGS_SYSTEM_INFO_CG_FLAGS:
sys_info->value = adev->cg_flags;
break;
@@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_GFX_CU_INFO:
sys_info->value = adev->gfx.cu_info.number;
break;
+ case CGS_SYSTEM_INFO_GFX_SE_INFO:
+ sys_info->value = adev->gfx.config.max_shader_engines;
+ break;
default:
return -ENODEV;
}
@@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
acpi_handle handle;
struct acpi_object_list input;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *params = NULL;
- union acpi_object *obj = NULL;
+ union acpi_object *params, *obj;
uint8_t name[5] = {'\0'};
- struct cgs_acpi_method_argument *argument = NULL;
+ struct cgs_acpi_method_argument *argument;
uint32_t i, count;
acpi_status status;
- int result = 0;
- uint32_t func_no = 0xFFFFFFFF;
+ int result;
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
@@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (info->pinput_argument == NULL)
return -EINVAL;
argument = info->pinput_argument;
- func_no = argument->value;
for (i = 0; i < info->input_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER)) &&
@@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
- params->string.length = argument->method_length;
+ params->string.length = argument->data_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->method_length;
+ params->buffer.length = argument->data_length;
params->buffer.pointer = argument->pointer;
break;
default:
@@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (ACPI_FAILURE(status)) {
result = -EIO;
- goto error;
+ goto free_input;
}
/* return the output info */
@@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((obj->type != ACPI_TYPE_PACKAGE) ||
(obj->package.count != count)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
params = obj->package.elements;
} else
@@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (params == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
for (i = 0; i < count; i++) {
if (argument->type != params->type) {
result = -EIO;
- goto error;
+ goto free_obj;
}
switch (params->type) {
case ACPI_TYPE_INTEGER:
@@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((params->string.length != argument->data_length) ||
(params->string.pointer == NULL)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
strncpy(argument->pointer,
params->string.pointer,
@@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
case ACPI_TYPE_BUFFER:
if (params->buffer.pointer == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
memcpy(argument->pointer,
params->buffer.pointer,
@@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params++;
}
-error:
- if (obj != NULL)
- kfree(obj);
+ result = 0;
+free_obj:
+ kfree(obj);
+free_input:
kfree((void *)input.pointer);
return result;
}
@@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
}
#endif
-int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cb07da41152b..ff0b55a65ca3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9bc8f1d99733..0307ff5887c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
- if (p->uf_entry.robj) {
- p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
- p->job->uf_offset = uf_offset;
- }
-
+ if (p->uf_entry.robj)
+ p->job->uf_addr = uf_offset;
kfree(chunk_array);
return 0;
@@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
@@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r)
goto error_validate;
+ fpriv->vm.last_eviction_counter =
+ atomic64_read(&p->adev->num_evictions);
+
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
+ if (p->uf_entry.robj)
+ p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
error_validate:
if (r) {
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
+ p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- }
+ } else {
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
- r = amdgpu_bo_vm_update_pte(p, vm);
- if (!r)
- amdgpu_cs_sync_rings(p);
+ r = amdgpu_bo_vm_update_pte(p, vm);
+ if (r)
+ return r;
+ }
- return r;
+ return amdgpu_cs_sync_rings(p);
}
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
/* UVD & VCE fw doesn't support user fences */
- if (parser->job->uf_bo && (
+ if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
@@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
- struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- p->filp, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
return r;
@@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->ctx = entity->fence_context;
- p->fence = fence_get(fence);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+ p->fence = fence_get(&job->base.s_fence->finished);
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
+ amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e920086af46..df7ab2458e50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -35,6 +36,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
bool always_indirect)
{
+ uint32_t ret;
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else {
unsigned long flags;
- uint32_t ret;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- return ret;
}
+ trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ return ret;
}
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
@@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
}
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (r)
+ return r;
+ break;
+ }
+ }
+ return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type)
+ return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ }
+ return true;
+
+}
+
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
struct amdgpu_device *adev,
enum amd_ip_block_type type)
@@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
}
}
+ adev->cg_flags &= amdgpu_cg_mask;
+ adev->pg_flags &= amdgpu_pg_mask;
+
return 0;
}
@@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
adev->didt_wreg = &amdgpu_invalid_wreg;
+ adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+ adev->gc_cac_wreg = &amdgpu_invalid_wreg;
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
+
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
+ spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
/* Read BIOS */
- if (!amdgpu_get_bios(adev))
- return -EINVAL;
+ if (!amdgpu_get_bios(adev)) {
+ r = -EINVAL;
+ goto failed;
+ }
/* Must be an ATOMBIOS */
if (!adev->is_atom_bios) {
dev_err(adev->dev, "Expecting atombios for GPU\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- return r;
+ goto failed;
}
/* See if the asic supports SR-IOV */
@@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
!(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
DRM_INFO("GPU not posted. posting now...\n");
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- return r;
+ goto failed;
}
/* init the mode config */
@@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev);
- return r;
+ goto failed;
}
adev->accel_working = true;
@@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- return r;
+ goto failed;
}
r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_ERROR("registering register debugfs failed (%d).\n", r);
}
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r) {
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ return r;
+ }
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- return r;
+ goto failed;
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ return r;
}
static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1645,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
+ drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);
@@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
vga_switcheroo_unregister_client(adev->pdev);
+ if (adev->flags & AMD_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1841,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
}
drm_kms_helper_poll_enable(dev);
+
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth++;
+#endif
drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+ dev->dev->power.disable_depth--;
+#endif
if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0);
@@ -1861,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
- unsigned ring_sizes[AMDGPU_MAX_RINGS];
- uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
- bool saved = false;
-
int i, r;
int resched;
@@ -1874,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- r = amdgpu_suspend(adev);
-
+ /* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring)
continue;
-
- ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
- if (ring_sizes[i]) {
- saved = true;
- dev_info(adev->dev, "Saved %d dwords of commands "
- "on ring %d.\n", ring_sizes[i], i);
- }
+ kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched);
}
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(adev);
+
+ /* save scratch */
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_suspend(adev);
retry:
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
+
r = amdgpu_asic_reset(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1898,32 +1987,29 @@ retry:
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
-
+ /* restore scratch */
+ amdgpu_atombios_scratch_regs_restore(adev);
if (!r) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_suspend(adev);
+ goto retry;
+ }
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
-
- amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
- ring_sizes[i] = 0;
- ring_data[i] = NULL;
- }
-
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- if (saved) {
- saved = false;
- r = amdgpu_suspend(adev);
- goto retry;
- }
+ amd_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
}
} else {
- amdgpu_fence_driver_force_completion(adev);
+ dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i])
- kfree(ring_data[i]);
+ if (adev->rings[i]) {
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
}
}
@@ -1934,13 +2020,11 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
+ amdgpu_irq_gpu_reset_resume_helper(adev);
return r;
}
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
@@ -2094,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+ use_bank = 1;
+ *pos &= 0xFFFFFF;
+ } else {
+ use_bank = 0;
+ }
+
+ if (use_bank) {
+ if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+ se_bank >= adev->gfx.config.max_shader_engines)
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
- return result;
+ goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto end;
+ }
result += 4;
buf += 4;
@@ -2115,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size -= 4;
}
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
return result;
}
@@ -2314,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
return result;
}
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 0;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2339,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2351,6 +2533,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
+ "amdgpu_gca_config",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..76f96028313d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- };
+ }
if (!repcnt)
DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
@@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto cleanup;
+ goto unreserve;
}
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
- goto cleanup;
+ goto unpin;
}
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
@@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->base = base;
- r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(crtc);
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
+unpin:
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
+unreserve:
amdgpu_bo_unreserve(new_rbo);
cleanup:
@@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- if (amdgpu_fb->obj) {
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
- }
+ drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f888c015f76c..9aa533cf4ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -52,9 +52,10 @@
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
* - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
* at the end of IBs.
+ * - 3.3.0 - Add VM support for UVD on supported hardware.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 2
+#define KMS_DRIVER_MINOR 3
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -82,8 +83,12 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
#endif
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
@@ -168,6 +176,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
@@ -413,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (amdgpu_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
@@ -430,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (amdgpu_is_atpx_hybrid() ||
+ !amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
@@ -515,7 +537,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
.dev_priv_size = 0,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
@@ -590,7 +612,6 @@ static int __init amdgpu_init(void)
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
- driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d1558768cfb7..0b109aebfec6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
- while (last_seq != seq) {
+ if (unlikely(seq == last_seq))
+ return;
+
+ last_seq &= drv->num_fences_mask;
+ seq &= drv->num_fences_mask;
+
+ do {
struct fence *fence, **ptr;
- ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[last_seq];
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
RCU_INIT_POINTER(*ptr, NULL);
- BUG_ON(!fence);
+ if (!fence)
+ continue;
r = fence_signal(fence);
if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
fence_put(fence);
- }
+ } while (last_seq != seq);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8fab6486064f..88fbed2389c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9f95da4f0536..a074edd95c70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
}
}
}
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+ unsigned se, sh, cu;
+ const char *p;
+
+ memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+ if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+ return;
+
+ p = amdgpu_disable_cu;
+ for (;;) {
+ char *next;
+ int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+ if (ret < 3) {
+ DRM_ERROR("amdgpu: could not parse disable_cu\n");
+ return;
+ }
+
+ if (se < max_se && sh < max_sh && cu < 16) {
+ DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+ mask[se * max_sh + sh] |= 1u << cu;
+ } else {
+ DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+ se, sh, cu);
+ }
+
+ next = strchr(p, ',');
+ if (!next)
+ break;
+ p = next + 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index dc06cbda7be6..51321e154c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,4 +27,6 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b78e..a31d7ef3032c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -33,6 +33,8 @@
#include "amdgpu.h"
#include "atom.h"
+#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+
/*
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
@@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
- struct fence *hwf;
uint64_t ctx;
unsigned i;
@@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
- r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ r = amdgpu_vm_flush(ring, job);
if (r) {
amdgpu_ring_undo(ring);
return r;
@@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring);
- r = amdgpu_fence_emit(ring, &hwf);
+ r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
@@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
/* wrap the last IB with fence */
- if (job && job->uf_bo) {
- uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
- addr += job->uf_offset;
- amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
- if (f)
- *f = fence_get(hwf);
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
@@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
if (!ring || !ring->ready)
continue;
- r = amdgpu_ring_test_ib(ring);
+ r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
if (r) {
ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 835a3fa8d8df..278708f5a744 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+ int i, j;
+ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+ struct amdgpu_irq_src *src = adev->irq.sources[i];
+ if (!src)
+ continue;
+ for (j = 0; j < src->num_types; j++)
+ amdgpu_irq_update(adev, src, j);
+ }
+}
+
/**
* amdgpu_irq_get - enable interrupt
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index e124b59f39c1..7ef09352e534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index f0dafa514fe4..6674d40eb3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,21 +28,15 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
- amd_sched_job_put(&job->base);
-}
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
- struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
-
- amd_sched_job_put(&job->base);
+ job->base.sched->name,
+ atomic_read(&job->ring->fence_drv.last_seq),
+ job->ring->fence_drv.sync_seq);
+ amdgpu_gpu_reset(job->adev);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
- INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- unsigned i;
struct fence *f;
+ unsigned i;
+
/* use sched fence if available */
- f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
- fence_put(job->fence);
+ amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
- amdgpu_bo_unref(&job->uf_bo);
- amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- if (!job->base.use_sched)
- kfree(job);
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ kfree(job);
}
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+ amdgpu_job_free_resources(job);
+
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
- struct fence *fence;
int r;
job->ring = ring;
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func, owner, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
- *f = fence_get(fence);
+ *f = fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->base,
- &job->vm_id, &job->vm_pd_addr);
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
@@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
job = to_amdgpu_job(sched_job);
- r = amdgpu_sync_wait(&job->sync);
- if (r) {
- DRM_ERROR("failed to sync wait (%d)\n", r);
- return NULL;
- }
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, job, &fence);
- if (r) {
+ if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
- goto err;
- }
-err:
- job->fence = fence;
- amdgpu_job_free(job);
+ /* if gpu reset, hw fence will be replaced here */
+ fence_put(job->fence);
+ job->fence = fence_get(fence);
+ amdgpu_job_free_resources(job);
return fence;
}
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
- .begin_job = amd_sched_job_begin,
- .finish_job = amd_sched_job_finish,
+ .timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d851ea15059f..d942654a1de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (amdgpu_device_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
amdgpu_amdkfd_device_fini(adev);
@@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
out:
- if (r)
+ if (r) {
+ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+ if (adev->rmmio && amdgpu_device_is_px(dev))
+ pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
-
+ }
return r;
}
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ struct drm_amdgpu_query_fw *query_fw,
+ struct amdgpu_device *adev)
+{
+ switch (query_fw->fw_type) {
+ case AMDGPU_INFO_FW_VCE:
+ fw_info->ver = adev->vce.fw_version;
+ fw_info->feature = adev->vce.fb_version;
+ break;
+ case AMDGPU_INFO_FW_UVD:
+ fw_info->ver = adev->uvd.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GMC:
+ fw_info->ver = adev->mc.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GFX_ME:
+ fw_info->ver = adev->gfx.me_fw_version;
+ fw_info->feature = adev->gfx.me_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_PFP:
+ fw_info->ver = adev->gfx.pfp_fw_version;
+ fw_info->feature = adev->gfx.pfp_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_CE:
+ fw_info->ver = adev->gfx.ce_fw_version;
+ fw_info->feature = adev->gfx.ce_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC:
+ fw_info->ver = adev->gfx.rlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_MEC:
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->gfx.mec_fw_version;
+ fw_info->feature = adev->gfx.mec_feature_version;
+ } else if (query_fw->index == 1) {
+ fw_info->ver = adev->gfx.mec2_fw_version;
+ fw_info->feature = adev->gfx.mec2_feature_version;
+ } else
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_FW_SMC:
+ fw_info->ver = adev->pm.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_SDMA:
+ if (query_fw->index >= adev->sdma.num_instances)
+ return -EINVAL;
+ fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+ fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Userspace get information ioctl
*/
@@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
- ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+ ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
+ int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
return -EINVAL;
- switch (info->query_fw.fw_type) {
- case AMDGPU_INFO_FW_VCE:
- fw_info.ver = adev->vce.fw_version;
- fw_info.feature = adev->vce.fb_version;
- break;
- case AMDGPU_INFO_FW_UVD:
- fw_info.ver = adev->uvd.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GMC:
- fw_info.ver = adev->mc.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GFX_ME:
- fw_info.ver = adev->gfx.me_fw_version;
- fw_info.feature = adev->gfx.me_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_PFP:
- fw_info.ver = adev->gfx.pfp_fw_version;
- fw_info.feature = adev->gfx.pfp_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_CE:
- fw_info.ver = adev->gfx.ce_fw_version;
- fw_info.feature = adev->gfx.ce_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_RLC:
- fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = adev->gfx.rlc_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0) {
- fw_info.ver = adev->gfx.mec_fw_version;
- fw_info.feature = adev->gfx.mec_feature_version;
- } else if (info->query_fw.index == 1) {
- fw_info.ver = adev->gfx.mec2_fw_version;
- fw_info.feature = adev->gfx.mec2_feature_version;
- } else
- return -EINVAL;
- break;
- case AMDGPU_INFO_FW_SMC:
- fw_info.ver = adev->pm.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_SDMA:
- if (info->query_fw.index >= adev->sdma.num_instances)
- return -EINVAL;
- fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
- fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
- break;
- default:
- return -EINVAL;
- }
+ ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+ if (ret)
+ return ret;
+
return copy_to_user(out, &fw_info,
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
}
@@ -566,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_uvd_free_handles(adev, file_priv);
+ amdgpu_vce_free_handles(adev, file_priv);
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -590,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_uvd_free_handles(adev, file_priv);
- amdgpu_vce_free_handles(adev, file_priv);
}
/*
@@ -756,3 +773,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_info_firmware fw_info;
+ struct drm_amdgpu_query_fw query_fw;
+ int ret, i;
+
+ /* VCE */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* UVD */
+ query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* GMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* ME */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* PFP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* CE */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC2 */
+ if (adev->asic_type == CHIP_KAVERI ||
+ (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+ query_fw.index = 1;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
+ /* SMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* SDMA */
+ query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, fw_info.feature, fw_info.ver);
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+ {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+ ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7ecea83ce453..6f0873c75a25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_bo *rbo;
+ struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
@@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* move_notify is called before move happens */
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+ trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 0e13d80d2a95..5cc7052e391d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size, i;
+ int size;
if (adev->pp_enabled)
size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
- for (i = 0; i < size; i++) {
- sprintf(buf + i, "%02x", table[i]);
- }
- sprintf(buf + i, "\n");
+ memcpy(buf, table, size);
return size;
}
@@ -347,6 +344,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
return size;
}
@@ -363,7 +362,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -377,6 +378,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@@ -391,6 +394,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
return size;
}
@@ -407,7 +412,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -421,6 +428,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@@ -435,6 +444,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
return size;
}
@@ -451,7 +462,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -465,6 +478,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_sclk_od(adev);
+ else if (adev->pm.funcs->get_sclk_od)
+ value = adev->pm.funcs->get_sclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_sclk_od) {
+ adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_mclk_od(adev);
+ else if (adev->pm.funcs->get_mclk_od)
+ value = adev->pm.funcs->get_mclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_mclk_od) {
+ adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
fail:
return count;
}
@@ -490,6 +597,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_pcie,
amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_sclk_od,
+ amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_mclk_od,
+ amdgpu_set_pp_mclk_od);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1108,22 +1221,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
}
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_sclk_od\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_mclk_od\n");
+ return ret;
+ }
+
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1146,10 +1271,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
}
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+ device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 82256558e0f5..c5738a22b690 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
+ pp_init->powercontainment_enabled = amdgpu_powercontainment;
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 870f9494252c..85aeb0a804bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,6 +28,7 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -48,6 +49,7 @@
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+
return 0;
}
@@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
mb();
amdgpu_ring_set_wptr(ring);
+
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
void amdgpu_ring_undo(struct amdgpu_ring *ring)
{
ring->wptr = ring->wptr_old;
-}
-
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data)
-{
- unsigned size, ptr, i;
-
- *data = NULL;
-
- if (ring->ring_obj == NULL)
- return 0;
-
- /* it doesn't make sense to save anything if all fences are signaled */
- if (!amdgpu_fence_count_emitted(ring))
- return 0;
-
- ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- size = ring->wptr + (ring->ring_size / 4);
- size -= ptr;
- size &= ring->ptr_mask;
- if (size == 0)
- return 0;
-
- /* and then save the content of the ring */
- *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
- if (!*data)
- return 0;
- for (i = 0; i < size; ++i) {
- (*data)[i] = ring->ring[ptr++];
- ptr &= ring->ptr_mask;
- }
-
- return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data)
-{
- int i, r;
-
- if (!size || !data)
- return 0;
-
- /* restore the saved ring content */
- r = amdgpu_ring_alloc(ring, size);
- if (r)
- return r;
-
- for (i = 0; i < size; ++i) {
- amdgpu_ring_write(ring, data[i]);
- }
- amdgpu_ring_commit(ring);
- kfree(data);
- return 0;
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
- return r;
- }
- ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
- ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
- spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
@@ -310,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
r = amdgpu_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
+
+ memset((void *)ring->ring, 0, ring->ring_size);
+
amdgpu_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(adev->dev, "(%d) ring map failed\n", r);
@@ -347,7 +281,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
if (ring_obj) {
r = amdgpu_bo_reserve(ring_obj, false);
@@ -358,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
}
amdgpu_bo_unref(&ring_obj);
}
+ amdgpu_debugfs_ring_fini(ring);
}
/*
@@ -365,57 +299,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int roffset = (unsigned long)node->info_ent->data;
- struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
- uint32_t rptr, wptr, rptr_next;
- unsigned i;
-
- wptr = amdgpu_ring_get_wptr(ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
- rptr = amdgpu_ring_get_rptr(ring);
- rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
- ring->wptr, ring->wptr);
-
- if (!ring->ready)
- return 0;
-
- /* print 8 dw before current rptr as often it's the last executed
- * packet that is the root issue
- */
- i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- while (i != rptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+ struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ int r, i;
+ uint32_t value, result, early[3];
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ if (*pos < 12) {
+ early[0] = amdgpu_ring_get_rptr(ring);
+ early[1] = amdgpu_ring_get_wptr(ring);
+ early[2] = ring->wptr;
+ for (i = *pos / 4; i < 3 && size; i++) {
+ r = put_user(early[i], (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
}
- while (i != wptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
+
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t*)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
}
- return 0;
+
+ return result;
}
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
#endif
@@ -423,28 +362,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
- unsigned i;
- struct drm_info_list *info;
- char *name;
-
- for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
- info = &amdgpu_debugfs_ring_info_list[i];
- if (!info->data)
- break;
- }
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ char name[32];
- if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
- return -ENOSPC;
-
- name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
- info->name = name;
- info->show = amdgpu_debugfs_ring_info;
- info->driver_features = 0;
- info->data = (void*)(uintptr_t)offset;
- return amdgpu_debugfs_add_files(adev, info, 1);
+ ent = debugfs_create_file(name,
+ S_IFREG | S_IRUGO, root,
+ ring, &amdgpu_debugfs_ring_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ i_size_write(ent->d_inode, ring->ring_size + 12);
+ ring->ent = ent;
#endif
return 0;
}
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(ring->ent);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 48618ee324eb..d8af37a845f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %d",
+ seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 34a92808bbd4..5c8d3022fb87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
}
/**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
+ * @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+ */
+ if (s_fence->sched == &ring->sched) {
+ if (fence_is_signaled(&s_fence->scheduled))
+ continue;
+
+ return &s_fence->scheduled;
+ }
+ }
if (fence_is_signaled(f)) {
hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
continue;
}
- return false;
+ return f;
}
- return true;
+ return NULL;
}
/**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
*
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
*
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
*/
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence)
-{
- struct amdgpu_sync_entry *e, *newone;
- struct hlist_node *tmp;
- int i;
-
- /* Allocate the new entry before moving the old ones */
- newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
- if (!newone)
- return -ENOMEM;
-
- hash_for_each_safe(src->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
-
- hash_del(&e->node);
- if (fence_is_signaled(f)) {
- fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- if (amdgpu_sync_add_later(dst, f)) {
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- hash_add(dst->fences, &e->node, f->context);
- }
-
- hash_add(src->fences, &newone->node, fence->context);
- newone->fence = fence_get(fence);
-
- return 0;
-}
-
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
return NULL;
}
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
- struct amdgpu_sync_entry *e;
- struct hlist_node *tmp;
- int i, r;
-
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- r = fence_wait(e->fence, false);
- if (r)
- return r;
-
- hash_del(&e->node);
- fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
-
- return 0;
-}
-
/**
* amdgpu_sync_free - free the sync object
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 26a5f4acf584..0d8d65eb46cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -11,19 +11,68 @@
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
+TRACE_EVENT(amdgpu_mm_rreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
TRACE_EVENT(amdgpu_bo_create,
TP_PROTO(struct amdgpu_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
__field(u32, pages)
+ __field(u32, type)
+ __field(u32, prefer)
+ __field(u32, allow)
+ __field(u32, visible)
),
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
+ __entry->type = bo->tbo.mem.mem_type;
+ __entry->prefer = bo->prefered_domains;
+ __entry->allow = bo->allowed_domains;
+ __entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+ TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+ __entry->bo, __entry->pages, __entry->type,
+ __entry->prefer, __entry->allow, __entry->visible)
);
TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
- TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
- uint64_t pd_addr),
- TP_ARGS(vm, ring, vmid, pd_addr),
+ TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+ TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
__field(u32, vmid)
__field(u64, pd_addr)
+ __field(u32, needs_flush)
),
TP_fast_assign(
__entry->vm = vm;
__entry->ring = ring;
- __entry->vmid = vmid;
- __entry->pd_addr = pd_addr;
+ __entry->vmid = job->vm_id;
+ __entry->pd_addr = job->vm_pd_addr;
+ __entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
- __entry->ring, __entry->vmid, __entry->pd_addr)
+ TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->vm, __entry->ring, __entry->vmid,
+ __entry->pd_addr, __entry->needs_flush)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, list)
__field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
),
TP_fast_assign(
__entry->list = list;
__entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
),
- TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+ TP_printk("list=%p, bo=%p, bo_size = %Ld",
+ __entry->list,
+ __entry->bo,
+ __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+ TP_PROTO(uint64_t total_bo, uint64_t total_size),
+ TP_ARGS(total_bo, total_size),
+ TP_STRUCT__entry(
+ __field(u64, total_bo)
+ __field(u64, total_size)
+ ),
+
+ TP_fast_assign(
+ __entry->total_bo = total_bo;
+ __entry->total_size = total_size;
+ ),
+ TP_printk("total bo size = %Ld, total bo count = %Ld",
+ __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+ TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ ),
+ TP_printk("bo=%p from:%d to %d with size = %Ld",
+ __entry->bo, __entry->old_placement,
+ __entry->new_placement, __entry->bo_size)
);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b9053af4762..9b61c8ba7aaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence);
- /* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, fence,
- evict, no_wait_gpu, new_mem);
+ if (r)
+ return r;
+
+ r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
fence_put(fence);
return r;
}
@@ -334,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -367,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
return -EINVAL;
adev = amdgpu_get_adev(bo->bdev);
+
+ /* remember the eviction */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
return 0;
@@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d9c88d13f8db..b11f4e8868d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -40,9 +40,16 @@
#include "uvd/uvd_4_2_d.h"
/* 1 second timeout */
-#define UVD_IDLE_TIMEOUT_MS 1000
+#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+/* Firmware versions for VI */
+#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
+#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
+#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
+#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
+
/* Polaris10/11 firmware version */
-#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
@@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
+ break;
+ case CHIP_CARRIZO:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
+ break;
+ case CHIP_FIJI:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
+ break;
+ case CHIP_STONEY:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
+ break;
+ default:
+ adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
+ }
+
return 0;
}
@@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
- amdgpu_uvd_note_usage(adev);
-
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
@@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
unsigned fs_in_mb = width_in_mb * height_in_mb;
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
- unsigned min_ctx_size = 0;
+ unsigned min_ctx_size = ~0;
image_size = width * height;
image_size += image_size / 2;
@@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
- if (adev->asic_type < CHIP_POLARIS10){
+ if (!adev->uvd.use_ctx_buf){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -662,7 +683,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
}
DRM_ERROR("No more free UVD handles!\n");
- return -EINVAL;
+ return -ENOSPC;
case 1:
/* it's a decode msg, calc buffer sizes */
@@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- amdgpu_uvd_note_usage(ctx.parser->adev);
-
return 0;
}
@@ -968,7 +987,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err_free;
@@ -1106,24 +1125,18 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
- /* just work around for uvd clock remain high even
- * when uvd dpm disabled on Polaris10 */
- if (adev->asic_type == CHIP_POLARIS10)
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
- set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
if (set_clocks) {
if (adev->pm.dpm_enabled) {
@@ -1133,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_uvd_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct fence *fence;
+ long r;
+
+ r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+
+error:
+ fence_put(fence);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 9a3b449081a7..c850009602d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 875626a2eccb..05865ce35351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -36,7 +36,7 @@
#include "cikd.h"
/* 1 second timeout */
-#define VCE_IDLE_TIMEOUT_MS 1000
+#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
unsigned ucode_version, version_major, version_minor, binary_id;
int i, r;
- INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
-
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
@@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
adev->vce.filp[i] = NULL;
}
+ INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
+ mutex_init(&adev->vce.idle_mutex);
+
return 0;
}
@@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vce.ring[1]);
release_firmware(adev->vce.fw);
+ mutex_destroy(&adev->vce.idle_mutex);
return 0;
}
@@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
}
/**
- * amdgpu_vce_note_usage - power up VCE
+ * amdgpu_vce_ring_begin_use - power up VCE
*
- * @adev: amdgpu_device pointer
+ * @ring: amdgpu ring
*
* Make sure VCE is powerd up when we want to use it
*/
-static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
{
- bool streams_changed = false;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
- set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
-
- if (adev->pm.dpm_enabled) {
- /* XXX figure out if the streams changed */
- streams_changed = false;
- }
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks;
- if (set_clocks || streams_changed) {
+ mutex_lock(&adev->vce.idle_mutex);
+ set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
+ if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
}
}
+ mutex_unlock(&adev->vce.idle_mutex);
+}
+
+/**
+ * amdgpu_vce_ring_end_use - power VCE down
+ *
+ * @ring: amdgpu ring
+ *
+ * Schedule work to power VCE down again
+ */
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
/**
@@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
int i, r;
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]);
+
if (!handle || adev->vce.filp[i] != filp)
continue;
- amdgpu_vce_note_usage(adev);
-
r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
if (r)
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
- uint64_t dummy;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
ib->length_dw = 0;
@@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = 0x00000014; /* len */
- ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
- ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000020; /* len */
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
+ ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
+ ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008; /* len */
ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
@@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* we we don't have another free session index.
*/
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
- uint32_t handle, bool *allocated)
+ uint32_t handle, uint32_t *allocated)
{
unsigned i;
- *allocated = false;
-
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
p->adev->vce.filp[i] = p->filp;
p->adev->vce.img_size[i] = 0;
- *allocated = true;
+ *allocated |= 1 << i;
return i;
}
}
@@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1;
- bool destroyed = false;
- bool created = false;
- bool allocated = false;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
int i, r = 0, idx = 0;
- amdgpu_vce_note_usage(p->adev);
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
}
- if (destroyed) {
- DRM_ERROR("No other command allowed after destroy!\n");
- r = -EINVAL;
- goto out;
- }
-
switch (cmd) {
- case 0x00000001: // session
+ case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
- if (session_idx < 0)
- return session_idx;
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
size = &p->adev->vce.img_size[session_idx];
break;
- case 0x00000002: // task info
+ case 0x00000002: /* task info */
fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
break;
- case 0x01000001: // create
- created = true;
- if (!allocated) {
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
@@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
8 * 3 / 2;
break;
- case 0x04000001: // config extension
- case 0x04000002: // pic control
- case 0x04000005: // rate control
- case 0x04000007: // motion estimation
- case 0x04000008: // rdo
- case 0x04000009: // vui
- case 0x05000002: // auxiliary buffer
+ case 0x04000001: /* config extension */
+ case 0x04000002: /* pic control */
+ case 0x04000005: /* rate control */
+ case 0x04000007: /* motion estimation */
+ case 0x04000008: /* rdo */
+ case 0x04000009: /* vui */
+ case 0x05000002: /* auxiliary buffer */
break;
- case 0x03000001: // encode
+ case 0x03000001: /* encode */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
*size, 0);
if (r)
@@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x02000001: // destroy
- destroyed = true;
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
break;
- case 0x05000001: // context buffer
+ case 0x05000001: /* context buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
*size * 2, 0);
if (r)
goto out;
break;
- case 0x05000004: // video bitstream buffer
+ case 0x05000004: /* video bitstream buffer */
tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
tmp, bs_idx);
@@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x05000005: // feedback buffer
+ case 0x05000005: /* feedback buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
4096, fb_idx);
if (r)
@@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
idx += len / 4;
}
- if (allocated && !created) {
+ if (allocated & ~created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
- if ((!r && destroyed) || (r && allocated)) {
- /*
- * IB contains a destroy msg or we have allocated an
- * handle and got an error, anyway free the handle
- */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
- atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
}
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
return r;
}
@@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
* @ring: the engine to test on
*
*/
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct fence *fence = NULL;
- int r;
+ long r;
/* skip vce ring1 ib test for now, since it's not reliable */
if (ring == &ring->adev->vce.ring[1])
@@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
goto error;
}
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
goto error;
}
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
}
error:
fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index f40cf761c66f..63f83d0d985c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30ba11..8e642fc48df4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
+ * @adev: amdgpu device pointer
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates)
{
+ uint64_t num_evictions;
unsigned i;
+ /* We only need to validate the page tables
+ * if they aren't already valid.
+ */
+ num_evictions = atomic64_read(&adev->num_evictions);
+ if (num_evictions == vm->last_eviction_counter)
+ return;
+
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_unlock(&glob->lru_lock);
}
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
@@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr)
+ struct amdgpu_job *job)
{
- uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
+ uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id;
- unsigned i = ring->idx;
- int r;
+ struct amdgpu_vm_id *id, *idle;
+ struct fence **fences;
+ unsigned i;
+ int r = 0;
+
+ fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+ GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &adev->vm_manager.ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ fence_get(fences[j]);
+
+ array = fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+ fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = true;
/* Check if we can use a VMID already assigned to this VM */
+ i = ring->idx;
do {
struct fence *flushed;
@@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
+ if (amdgpu_vm_is_gpu_reset(adev, id))
+ continue;
if (atomic64_read(&id->owner) != vm->client_id)
continue;
- if (pd_addr != id->pd_gpu_addr)
+ if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
- if (id->last_user != ring &&
- (!id->last_flush || !fence_is_signaled(id->last_flush)))
+ if (!id->last_flush)
continue;
- flushed = id->flushed_updates;
- if (updates && (!flushed || fence_is_later(updates, flushed)))
+ if (id->last_flush->context != fence_context &&
+ !fence_is_signaled(id->last_flush))
continue;
- /* Good we can use this VMID */
- if (id->last_user == ring) {
- r = amdgpu_sync_fence(ring->adev, sync,
- id->first);
- if (r)
- goto error;
- }
+ flushed = id->flushed_updates;
+ if (updates &&
+ (!flushed || fence_is_later(updates, flushed)))
+ continue;
- /* And remember this submission as user of the VMID */
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ job->vm_needs_flush = false;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
-
- if (!amdgpu_sync_is_idle(&id->active)) {
- struct list_head *head = &adev->vm_manager.ids_lru;
- struct amdgpu_vm_id *tmp;
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
- list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
- list) {
- if (amdgpu_sync_is_idle(&id->active)) {
- list_move(&id->list, head);
- head = &id->list;
- }
- }
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
- }
-
- r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
@@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
- id->pd_gpu_addr = pd_addr;
-
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
- id->last_user = ring;
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = pd_addr;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const struct amdgpu_ip_block_version *ip_block;
+
+ if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ return false;
+
+ ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (!ip_block)
+ return false;
+
+ if (ip_block->major <= 7) {
+ /* gfx7 has no workaround */
+ return true;
+ } else if (ip_block->major == 8) {
+ if (adev->gfx.mec_fw_version >= 673)
+ /* gfx8 is fixed in MEC firmware 673 */
+ return false;
+ else
+ return true;
+ }
+ return false;
+}
+
/**
* amdgpu_vm_flush - hardware flush the vm
*
@@ -294,59 +370,52 @@ error:
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != gds_base ||
- id->gds_size != gds_size ||
- id->gws_base != gws_base ||
- id->gws_size != gws_size ||
- id->oa_base != oa_base ||
- id->oa_size != oa_size);
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush &&
- pd_addr != AMDGPU_VM_NO_FLUSH) {
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
- amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+ trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
mutex_lock(&adev->vm_manager.lock);
- if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
- r = amdgpu_fence_emit(ring, &fence);
- if (r) {
- mutex_unlock(&adev->vm_manager.lock);
- return r;
- }
- fence_put(id->last_flush);
- id->last_flush = fence;
- }
+ fence_put(id->last_flush);
+ id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
- id->gds_base = gds_base;
- id->gds_size = gds_size;
- id->gws_base = gws_base;
- id->gws_size = gws_size;
- id->oa_base = oa_base;
- id->oa_size = oa_size;
- amdgpu_ring_emit_gds_switch(ring, vm_id,
- gds_base, gds_size,
- gws_base, gws_size,
- oa_base, oa_size);
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
}
return 0;
@@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
- * @dst: destination address to map to
+ * @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
@@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
- uint64_t addr;
+ uint64_t cur_pe_start, cur_pe_end, cur_dst;
+ uint64_t addr; /* next GPU address to be updated */
+ uint64_t pt_idx;
+ struct amdgpu_bo *pt;
+ unsigned nptes; /* next number of ptes to be updated */
+ uint64_t next_pe_start;
+
+ /* initialize the variables */
+ addr = start;
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+ cur_pe_start = amdgpu_bo_gpu_offset(pt);
+ cur_pe_start += (addr & mask) * 8;
+ cur_pe_end = cur_pe_start + 8 * nptes;
+ cur_dst = dst;
+
+ /* for next ptb*/
+ addr += nptes;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
/* walk over the address space and update the page tables */
- for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> amdgpu_vm_block_size;
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
- unsigned nptes;
- uint64_t pe_start;
+ while (addr < end) {
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
- pe_start = amdgpu_bo_gpu_offset(pt);
- pe_start += (addr & mask) * 8;
-
- if (last_pe_end != pe_start) {
+ next_pe_start = amdgpu_bo_gpu_offset(pt);
+ next_pe_start += (addr & mask) * 8;
+ if (cur_pe_end == next_pe_start) {
+ /* The next ptb is consecutive to current ptb.
+ * Don't call amdgpu_vm_frag_ptes now.
+ * Will update two ptbs together in future.
+ */
+ cur_pe_end += 8 * nptes;
+ } else {
amdgpu_vm_frag_ptes(adev, vm_update_params,
- last_pe_start, last_pe_end,
- last_dst, flags);
+ cur_pe_start, cur_pe_end,
+ cur_dst, flags);
- last_pe_start = pe_start;
- last_pe_end = pe_start + 8 * nptes;
- last_dst = dst;
- } else {
- last_pe_end += 8 * nptes;
+ cur_pe_start = next_pe_start;
+ cur_pe_end = next_pe_start + 8 * nptes;
+ cur_dst = dst;
}
+ /* for next ptb*/
addr += nptes;
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
- last_pe_end, last_dst, flags);
+ amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
+ cur_pe_end, cur_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
vm_update_params.ib = &job->ibs[0];
+ r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+ if (r)
+ goto error_free;
+
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
if (r)
@@ -889,6 +989,7 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -902,6 +1003,7 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += mapping->offset;
if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ return amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
- r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct fence *exclusive;
uint64_t addr;
int r;
@@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
default:
break;
}
+
+ exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
} else {
addr = 0;
+ exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+ gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
@@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+ r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
@@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unreserve(vm->page_directory);
if (r)
goto error_free_page_directory;
+ vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
return 0;
@@ -1516,6 +1626,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
+ adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 48b6bd671cda..c32eca26155c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5ec1f1e9c983..a5c94b482459 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -50,7 +50,9 @@
#include "gmc/gmc_7_1_sh_mask.h"
MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
+#if 0
static const struct ci_pt_defaults defaults_bonaire_pro =
{
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
{ 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
};
+#endif
static const struct ci_pt_defaults defaults_saturn_xt =
{
@@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
+#if 0
static const struct ci_pt_defaults defaults_saturn_pro =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
{ 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
};
+#endif
static const struct ci_pt_config_reg didt_config_ci[] =
{
@@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -3030,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(memory_clock <= pi->mclk_stutter_mode_threshold) &&
- (pi->uvd_enabled == false) &&
+ (!pi->uvd_enabled) &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2))
memory_level->StutterEnable = true;
@@ -3636,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
ci_setup_default_pcie_tables(adev);
+ /* save a copy of the default DPM table */
+ memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+ sizeof(struct ci_dpm_table));
+
return 0;
}
@@ -5754,13 +5764,22 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- chip_name = "bonaire";
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f))
+ chip_name = "bonaire_k";
+ else
+ chip_name = "bonaire";
break;
case CHIP_HAWAII:
- chip_name = "hawaii";
+ if (adev->pdev->revision == 0x80)
+ chip_name = "hawaii_k";
+ else
+ chip_name = "hawaii";
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
default: BUG();
}
@@ -6404,6 +6423,186 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type, char *buf)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = ci_get_current_pcie_speed(adev);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (adev->pm.dpm.forced_level
+ != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!pi->sclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_MCLK:
+ if (!pi->mclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!pi->pcie_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].sclk =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].mclk =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6438,6 +6637,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+ .print_clock_levels = ci_dpm_print_clock_levels,
+ .force_clock_level = ci_dpm_force_clock_level,
+ .get_sclk_od = ci_dpm_get_sclk_od,
+ .set_sclk_od = ci_dpm_set_sclk_od,
+ .get_mclk_od = ci_dpm_get_mclk_od,
+ .set_mclk_od = ci_dpm_set_mclk_od,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index faccc30c93bf..91be2996ae7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
struct ci_power_info {
struct ci_dpm_table dpm_table;
+ struct ci_dpm_table golden_dpm_table;
u32 voltage_control;
u32 mvdd_control;
u32 vddci_control;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 910431808542..4efc901f658c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
uint32_t tmp;
tmp = RREG32(mmCONFIG_CNTL);
- if (state == false)
+ if (!state)
tmp |= CONFIG_CNTL__VGA_DIS_MASK;
else
tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
@@ -1035,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -1158,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
+ int r = -EINVAL;
dev_info(adev->dev, "GPU pci config reset\n");
@@ -1177,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ r = 0;
break;
+ }
udelay(1);
}
/* does asic init need to be run first??? */
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
+
+ return r;
}
static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1210,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
+ int r;
cik_set_bios_scratch_engine_hung(adev, true);
- cik_gpu_pci_config_reset(adev);
+ r = cik_gpu_pci_config_reset(adev);
cik_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -2014,9 +2022,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_virtual_caps = &cik_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 9dc4e24e31e7..ee6466912497 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 4)
- next_rptr++;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1); /* number of DWs to follow */
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -365,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
u32 me_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(adev);
cik_sdma_rlc_stop(adev);
}
@@ -628,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (CIK).
* Returns 0 on success, error on failure.
*/
-static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -651,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
@@ -665,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 933e425a8154..2a11413ed54a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->mgcg_cgtt_local1 = 0x0;
pi->clock_slow_down_step = 25000;
pi->skip_clock_slow_down = 1;
- pi->enable_nb_ps_policy = 0;
+ pi->enable_nb_ps_policy = false;
pi->caps_power_containment = true;
pi->caps_cac = true;
pi->didt_enabled = false;
@@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
} else { /*pi->caps_vce_pg*/
+ pi->vce_power_gated = gate;
cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, !gate);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..c1b04e9aab57 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v10_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v10_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..d4bf133908b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
- /* flip at hsync for async, default is vsync */
- /* use UPDATE_IMMEDIATE_EN instead for async? */
+ /* flip immediate for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v11_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v11_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..4fdfab1e9200 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
+#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ /*it is correct only for RGB ; black is 0*/
+ WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
+ mdelay(20);
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp, frame_count;
- int i, j;
+ u32 tmp;
+ int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
}
+ mdelay(20);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
@@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v8_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v8_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index b336c918d6a7..b3e19ba4c57f 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!fiji_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fc8ff4d3ccf8..d869d058ef24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1583,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
* registers are instanced per SE or SH. 0xffffffff means
* broadcast to all SEs or SHs (CIK).
*/
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1659,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1746,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2050,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -2089,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
-
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -2123,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
* Provides a basic gfx ring test to verify that IBs are working.
* Returns 0 on success, error on failure.
*/
-static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -2154,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2176,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -3221,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
}
adev->gfx.rlc.cs_data = ci_cs_data;
- adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
src_ptr = adev->gfx.rlc.reg_list;
dws = adev->gfx.rlc.reg_list_size;
@@ -3379,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3387,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3434,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3456,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3471,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
*
* Halt the RLC ME (MicroEngine) (CIK).
*/
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32(mmRLC_CNTL, 0);
@@ -3547,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3587,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3638,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3689,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3867,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
}
}
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -4123,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -4183,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+ .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+ .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
static int gfx_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
gfx_v7_0_set_ring_funcs(adev);
gfx_v7_0_set_irq_funcs(adev);
gfx_v7_0_set_gds_init(adev);
@@ -5032,16 +5037,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v7_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -5057,7 +5068,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index e747aa935c88..94e3ea147c26 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index c2ef94511f70..b8184617ca25 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] =
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
- mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
- mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] =
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
@@ -787,26 +791,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
-static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -818,28 +821,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -1160,6 +1160,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_CARRIZO)
+ max_me = 5;
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
int r;
@@ -1175,6 +1240,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
+
+ /* jump table block */
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1231,6 +1308,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ if (adev->gfx.rlc.cp_table_obj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ return r;
+ }
+
+ cz_init_cp_jump_table(adev);
+
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ }
+
return 0;
}
@@ -1612,7 +1729,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
@@ -3339,9 +3455,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
}
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
@@ -3391,13 +3513,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3501,7 +3623,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3524,7 +3646,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3532,7 +3654,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3693,13 +3815,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
WREG32(mmRLC_SRM_CNTL, data);
}
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG)) {
data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
@@ -3724,6 +3846,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev)
}
}
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+ else
+ data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3736,8 +3905,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
- if (adev->asic_type == CHIP_POLARIS11)
- polaris11_init_power_gating(adev);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
+ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_power_gating(adev);
+ }
}
}
@@ -4976,7 +5162,7 @@ static int gfx_v8_0_soft_reset(void *handle)
* Fetches a GPU clock counter snapshot.
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -5036,12 +5222,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
@@ -5074,51 +5266,43 @@ static int gfx_v8_0_late_init(void *handle)
return 0;
}
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- /* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ if (adev->asic_type == CHIP_POLARIS11)
+ /* Send msg to SMU via Powerplay */
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
- if (enable) {
- /* Enable static MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable static MGPG */
+ if (enable)
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- if (enable) {
- /* Enable dynamic MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable dynamic MGPG */
+ if (enable)
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
@@ -5126,19 +5310,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade
{
uint32_t data, temp;
- if (enable) {
- /* Enable quick PG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= 0x100000;
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable quick PG */
+ if (enable)
+ data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~0x100000;
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ /* Read any GFX register to wake up GFX. */
+ if (!enable)
+ data = RREG32(mmDB_RENDER_CONTROL);
+}
+
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ cz_enable_gfx_cg_power_gating(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ cz_enable_gfx_pipeline_power_gating(adev, true);
+ } else {
+ cz_enable_gfx_cg_power_gating(adev, false);
+ cz_enable_gfx_pipeline_power_gating(adev, false);
}
}
@@ -5146,21 +5374,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+ cz_update_gfx_cg_power_gating(adev, enable);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+ break;
case CHIP_POLARIS11:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
- polaris11_enable_gfx_static_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
- else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
- polaris11_enable_gfx_dynamic_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+ polaris11_enable_gfx_quick_mg_power_gating(adev, true);
else
- polaris11_enable_gfx_quick_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ polaris11_enable_gfx_quick_mg_power_gating(adev, false);
break;
default:
break;
@@ -5174,7 +5423,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5562,6 +5811,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5687,17 +5938,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -5726,23 +5966,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -6195,9 +6421,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- case CHIP_STONEY:
adev->gfx.rlc.funcs = &iceland_rlc_funcs;
break;
+ case CHIP_STONEY:
case CHIP_CARRIZO:
adev->gfx.rlc.funcs = &cz_rlc_funcs;
break;
@@ -6235,6 +6461,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -6255,16 +6495,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v8_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -6280,7 +6526,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 16a49f53a2fa..bc82c794312c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,7 +26,6 @@
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 1feb6439cb0b..0b0f08641eed 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,6 +39,7 @@
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_mc.bin");
@@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -167,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
return 0;
default: BUG();
}
@@ -311,7 +289,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -331,7 +309,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v7_0_mc_resume(adev, &save);
@@ -1137,7 +1115,7 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev, &save);
- if (gmc_v7_0_wait_for_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 36fcbbc46ada..0b386b5d2f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9945d5bbf1fe..2aee2c6f3cd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,6 +41,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -102,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_stoney_common[] =
+{
+ mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+ mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -141,50 +147,24 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ (const u32)ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
}
}
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
- SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC1_BUSY_MASK);
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -199,8 +179,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -393,7 +373,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -413,7 +393,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v8_0_mc_resume(adev, &save);
@@ -1140,7 +1120,7 @@ static int gmc_v8_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index 973436086b38..fc5001a8119d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 825ccd63f2dc..2f078ad6095c 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 52ee08193295..211839913728 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -25,7 +25,7 @@
#include "drmP.h"
#include "amdgpu.h"
#include "ppsmc.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
@@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;;
+ return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
index 1e0769e110fa..5983e3150cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
@@ -21,8 +21,8 @@
*
*/
-#ifndef ICELAND_SMUMGR_H
-#define ICELAND_SMUMGR_H
+#ifndef ICELAND_SMUM_H
+#define ICELAND_SMUM_H
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a789a863d677..a845e883f5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
vid_mapping_table->num_entries = i;
}
+#if 0
static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
{
{ 0, 4, 1 },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
{
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
+#endif
static const struct kv_pt_config_reg didt_config_kv[] =
{
@@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
index 7837f2ecc357..8463245f424f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_StartFanControl ((uint8_t)0x5B)
#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
+#define PPSMC_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
+#define PPSMC_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
/* CI/KV/KB */
#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index b556bd0a8797..1351c7e834a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
-
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -406,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
@@ -580,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->firmware.smu_load) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
+ if (!adev->pp_enabled) {
+ if (!adev->firmware.smu_load) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
+ } else {
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA0);
+ if (r)
+ return -EINVAL;
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA1);
+ if (r)
+ return -EINVAL;
+ }
}
/* halt the engine before programing */
@@ -679,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -702,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -721,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 532ea88da66a..653ce5ed55ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -616,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v3_0_gfx_stop(adev);
sdma_v3_0_rlc_stop(adev);
}
@@ -908,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -931,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -950,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 083893dd68c0..940de1836f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!tonga_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f07551476a70..132e613ed674 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -34,6 +34,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v4_2_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
-/**
* uvd_v4_2_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
@@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
+ .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring,
- .test_ib = uvd_v4_2_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e0a76a883d46..101de136ba63 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v5_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring,
- .test_ib = uvd_v5_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9929d665c01..7f21102bfb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
+#include "gmc/gmc_8_1_d.h"
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uint32_t mp_swap_cntl;
int i, j, r;
- /*disable DPG */
- WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+ /* disable DPG */
+ WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* disable byte swapping */
lmi_swap_cntl = 0;
@@ -405,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
}
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+ WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ WREG32(mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
@@ -424,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* initialize UVD memory controller */
- WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
+ WREG32(mmUVD_LMI_CTRL,
+ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
@@ -447,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* enable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+ WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
@@ -484,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
return r;
}
/* enable master interrupt */
- WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+ WREG32_P(mmUVD_MASTINT_EN,
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
- WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
rb_bufsz = order_base_2(ring->ring_size);
tmp = 0;
@@ -581,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -634,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+ amdgpu_ring_write(ring, vm_id);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -642,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v6_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct fence *fence = NULL;
- int r;
+ uint32_t reg;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
+ if (vm_id < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- return r;
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xC);
+}
+
+static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xE);
}
static bool uvd_v6_0_is_idle(void *handle)
@@ -847,7 +905,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
- if (adev->asic_type == CHIP_FIJI)
+ if (adev->asic_type == CHIP_FIJI ||
+ adev->asic_type == CHIP_POLARIS10)
uvd_v6_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
@@ -912,22 +971,51 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
-static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v6_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .get_rptr = uvd_v6_0_ring_get_rptr,
+ .get_wptr = uvd_v6_0_ring_get_wptr,
+ .set_wptr = uvd_v6_0_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = uvd_v6_0_ring_emit_ib,
+ .emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
- .test_ib = uvd_v6_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+ if (adev->asic_type >= CHIP_POLARIS10) {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+ DRM_INFO("UVD is enabled in VM mode\n");
+ } else {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+ DRM_INFO("UVD is enabled in physical mode\n");
+ }
}
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 45d92aceb485..80a37a602181 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 30e8099e94c5..c271abffd8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,6 +43,7 @@
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -51,6 +52,7 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vce_v3_0_wait_for_idle(void *handle);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
vce_v3_0_override_vce_clock_gating(adev, false);
}
+static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v3_0_start - start VCE block
*
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
static int vce_v3_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int idx, i, j, r;
+ int idx, r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
-
if (adev->vce.harvest_config & (1 << idx))
continue;
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
vce_v3_0_mc_resume(adev, idx);
- /* set BUSY flag */
- WREG32_P(mmVCE_STATUS, 1, ~1);
+ WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
+ ~VCE_STATUS__JOB_BUSY_MASK);
+
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- mdelay(100);
-
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ mdelay(100);
+
+ r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~1);
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
/* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
mutex_unlock(&adev->grbm_idx_mutex);
- ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, ring->wptr);
- WREG32(mmVCE_RB_WPTR, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+ return 0;
+}
- ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, ring->wptr);
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+static int vce_v3_0_stop(struct amdgpu_device *adev)
+{
+ int idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
+ if (idx == 0)
+ WREG32_P(mmGRBM_GFX_INDEX, 0,
+ ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ else
+ WREG32_P(mmGRBM_GFX_INDEX,
+ GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
+ ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+
+ if (adev->asic_type >= CHIP_STONEY)
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
+ else
+ WREG32_P(mmVCE_VCPU_CNTL, 0,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ /* hold on ECPU */
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ /* clear BUSY flag */
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+ /* Set Clock-Gating off */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+ vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ }
+
+ WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
static int vce_v3_0_hw_fini(void *handle)
{
- return 0;
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vce_v3_0_wait_for_idle(handle);
+ if (r)
+ return r;
+
+ return vce_v3_0_stop(adev);
}
static int vce_v3_0_suspend(void *handle)
@@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+ else
+ tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
+ if (adev->asic_type == CHIP_POLARIS10)
+ vce_v3_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index a65c96029476..03a31c53aec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32(mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ WREG32(mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
static const u32 tonga_mgcg_cgcg_init[] =
{
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -533,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -597,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -612,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
- break;
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ return 0;
+ }
udelay(1);
}
-
+ return -EINVAL;
}
static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -642,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
*/
static int vi_asic_reset(struct amdgpu_device *adev)
{
+ int r;
+
vi_set_bios_scratch_engine_hung(adev, true);
- vi_gpu_pci_config_reset(adev);
+ r = vi_gpu_pci_config_reset(adev);
vi_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1133,9 +1161,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
.get_virtual_caps = &vi_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
};
static int vi_common_early_init(void *handle)
@@ -1156,6 +1181,8 @@ static int vi_common_early_init(void *handle)
adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
adev->didt_rreg = &vi_didt_rreg;
adev->didt_wreg = &vi_didt_wreg;
+ adev->gc_cac_rreg = &vi_gc_cac_rreg;
+ adev->gc_cac_wreg = &vi_gc_cac_wreg;
adev->asic_funcs = &vi_asic_funcs;
@@ -1229,12 +1256,18 @@ static int vi_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ec4036a09f3e..a625b9137da2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm,
unsigned int get_first_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_num(struct device_queue_manager *dqm);
-extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 16) & 0xFF;
}
-extern inline unsigned int
+static inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 60) & 0x0E;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d0d5f4baf72d..80113c335966 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
-inline uint32_t lower_32(uint64_t x);
-inline uint32_t upper_32(uint64_t x);
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
int init_queue(struct queue **q, struct queue_properties properties);
void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7708d90b9da9..4f3849ac8c07 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread);
void kfd_process_create_wq(void)
{
if (!kfd_process_wq)
- kfd_process_wq = create_workqueue("kfd_process_wq");
+ kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
}
void kfd_process_destroy_wq(void)
{
if (kfd_process_wq) {
- flush_workqueue(kfd_process_wq);
destroy_workqueue(kfd_process_wq);
kfd_process_wq = NULL;
}
@@ -330,6 +329,7 @@ err_process_pqm_init:
synchronize_rcu();
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
+ mutex_destroy(&process->mutex);
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfree(process->queues);
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 7c2a916c1e63..5eb895fd98bf 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -37,6 +37,13 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
+/* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
@@ -47,4 +54,11 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
+/* 1/2/4/8/16 lanes */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index afce1edbe250..a74a0d2ff1ca 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -26,15 +26,6 @@
#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/*
-* Supported GPU families (aligned with amdgpu_drm.h)
-*/
-#define AMD_FAMILY_UNKNOWN 0
-#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
-#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
-#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
-#define AMD_FAMILY_CZ 135 /* Carrizo */
-
-/*
* Supported ASIC types
*/
enum amd_asic_type {
@@ -120,6 +111,8 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_SDMA (1 << 8)
#define AMD_PG_SUPPORT_ACP (1 << 9)
#define AMD_PG_SUPPORT_SAMU (1 << 10)
+#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
+#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
enum amd_pm_state_type {
/* not used for dpm */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 293329719bba..809759f7bb81 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
#define mmBUS_CNTL 0x1508
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index ebaf67bb1589..90ff7c8a6011 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -2823,4 +2823,7 @@
#define mmDC_EDC_CSINVOC_CNT 0x3192
#define mmDC_EDC_RESTORE_CNT 0x3193
+#define mmGC_CAC_IND_INDEX 0x129a
+#define mmGC_CAC_IND_DATA 0x129b
+
#endif /* GFX_8_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
index 7d722458d9f5..4070ca3a68eb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
@@ -8730,8 +8730,6 @@
#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
-#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
-#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
@@ -8764,8 +8762,10 @@
#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
-#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
-#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
+#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
+#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
@@ -9102,8 +9102,6 @@
#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
-#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
-#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
@@ -9124,14 +9122,8 @@
#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
#define RLC_SRM_DEBUG__DATA__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
-#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
-#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
@@ -17946,8 +17938,6 @@
#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
@@ -20502,8 +20492,6 @@
#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20558,8 +20546,6 @@
#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20614,8 +20600,6 @@
#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20670,8 +20654,6 @@
#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20726,8 +20708,6 @@
#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20773,4 +20753,84 @@
#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
#endif /* GFX_8_0_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index 6f6fb34742d2..ec69869c55ff 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,6 +111,8 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
+#define mmUVD_GP_SCRATCH8 0x3c0a
+#define mmUVD_GP_SCRATCH9 0x3c0b
#define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 7464daf89ca1..b86aba9d019f 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -49,6 +49,7 @@ enum cgs_ind_reg {
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
CGS_IND_REG__DIDT,
+ CGS_IND_REG_GC_CAC,
CGS_IND_REG__AUDIO_ENDPT
};
@@ -112,20 +113,23 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW,
+ CGS_SYSTEM_INFO_PCIE_DEV,
+ CGS_SYSTEM_INFO_PCIE_REV,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
+ CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
struct cgs_system_info {
- uint64_t size;
- uint64_t info_id;
+ uint64_t size;
+ enum cgs_system_info_id info_id;
union {
- void *ptr;
- uint64_t value;
+ void *ptr;
+ uint64_t value;
};
- uint64_t padding[13];
+ uint64_t padding[13];
};
/*
@@ -158,6 +162,10 @@ struct cgs_firmware_info {
uint16_t feature_version;
uint32_t image_size;
uint64_t mc_addr;
+
+ /* only for smc firmware */
+ uint32_t ucode_start_address;
+
void *kptr;
};
@@ -189,7 +197,6 @@ typedef unsigned long cgs_handle_t;
struct cgs_acpi_method_argument {
uint32_t type;
- uint32_t method_length;
uint32_t data_length;
union{
uint32_t value;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index e629f8a9fe93..abbb658bdc1e 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle)
static bool pp_is_idle(void *handle)
{
- return 0;
+ return false;
}
static int pp_wait_for_idle(void *handle)
@@ -536,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
+ case AMD_PP_EVENT_READJUST_POWER_STATE:
+ pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
+ ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
default:
break;
}
@@ -740,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->get_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
- }
+ if (!hwmgr->soft_pp_table)
+ return -EINVAL;
- return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+ *table = (char *)hwmgr->soft_pp_table;
+
+ return hwmgr->soft_pp_table_size;
}
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
@@ -759,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->set_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table =
+ kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+
+ if (!hwmgr->hardcode_pp_table)
+ return -ENOMEM;
+
+ /* to avoid powerplay crash when hardcode pptable is empty */
+ memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size);
}
- return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+ memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+
+ return amd_powerplay_reset(handle);
}
static int pp_dpm_force_clock_level(void *handle,
@@ -806,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle,
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
+static int pp_dpm_get_sclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+}
+
+static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+}
+
+static int pp_dpm_get_mclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+}
+
+static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -828,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
.print_clock_levels = pp_dpm_print_clock_levels,
+ .get_sclk_od = pp_dpm_get_sclk_od,
+ .set_sclk_od = pp_dpm_set_sclk_od,
+ .get_mclk_od = pp_dpm_get_mclk_od,
+ .set_mclk_od = pp_dpm_set_mclk_od,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -909,6 +1004,44 @@ int amd_powerplay_fini(void *handle)
return 0;
}
+int amd_powerplay_reset(void *handle)
+{
+ struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_eventmgr *eventmgr;
+ struct pem_event_data event_data = { {0} };
+ int ret;
+
+ if (instance == NULL)
+ return -EINVAL;
+
+ eventmgr = instance->eventmgr;
+ if (!eventmgr || !eventmgr->pp_eventmgr_fini)
+ return -EINVAL;
+
+ eventmgr->pp_eventmgr_fini(eventmgr);
+
+ ret = pp_sw_fini(handle);
+ if (ret)
+ return ret;
+
+ kfree(instance->hwmgr->ps);
+
+ ret = pp_sw_init(handle);
+ if (ret)
+ return ret;
+
+ hw_init_power_state_table(instance->hwmgr);
+
+ if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
+ return -EINVAL;
+
+ ret = eventmgr->pp_eventmgr_init(eventmgr);
+ if (ret)
+ return ret;
+
+ return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+}
+
/* export this function to DAL */
int amd_powerplay_display_configuration_change(void *handle,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc4b0fc..635fc4b48184 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
system_config_tasks,
setup_asic_tasks,
enable_dynamic_state_management_tasks,
- enable_clock_power_gatings_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
- enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
get_2d_performance_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index 5cd123472db4..b6f45fd01fa6 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc
int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
- /* TODO */
- return 0;
+ return phm_disable_dynamic_state_management(eventmgr->hwmgr);
}
int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 436fc16dabb6..2028980f1ed4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
@@ -206,25 +206,26 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
cz_enable_disable_vce_dpm(hwmgr, false);
- /* TODO: to figure out why vce can't be poweroff*/
+ cz_dpm_powerdown_vce(hwmgr);
cz_hwmgr->vce_power_gated = true;
} else {
cz_dpm_powerup_vce(hwmgr);
cz_hwmgr->vce_power_gated = false;
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
}
} else {
+ cz_hwmgr->vce_power_gated = bgate;
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 1f14c477d15d..8cc0df9b534a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_ps->action = cz_current_ps->action;
- if ((force_high == false) && (cz_ps->action == FORCE_HIGH))
+ if (!force_high && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
- else if ((force_high == true) && (cz_ps->action != FORCE_HIGH))
+ else if (force_high && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
else
cz_ps->action = DO_NOTHING;
@@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
+ struct cz_hwmgr *data;
+
+ data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
result = cz_initialize_dpm_defaults(hwmgr);
if (result != 0) {
@@ -1649,7 +1656,7 @@ static void cz_hw_print_display_cfg(
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t data = 0;
- if (hw_data->cc6_settings.cc6_setting_changed == true) {
+ if (hw_data->cc6_settings.cc6_setting_changed) {
hw_data->cc6_settings.cc6_setting_changed = false;
@@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr;
- int ret = 0;
-
- cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
- if (cz_hwmgr == NULL)
- return -ENOMEM;
-
- hwmgr->backend = cz_hwmgr;
hwmgr->hwmgr_func = &cz_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
index e1b649bd5344..5afe82068b29 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
fiji_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ AMD_CG_STATE_UNGATE);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 92912ab20944..120a9e2c3152 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_hwmgr *data;
uint32_t i;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
bool stay_in_boot;
int result;
+ data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
@@ -699,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
if (0 == result) {
struct cgs_system_info sys_info = {0};
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
FIJI_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -734,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -743,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
@@ -1236,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
@@ -1363,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
}
/**
+* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return if success then 0;
+*/
+static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
* Initial switch from ARB F0->F1
*
* @param hwmgr the address of the powerplay hardware manager.
@@ -1375,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return fiji_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
struct fiji_single_dpm_table *dpm_table, uint32_t count)
{
@@ -1397,7 +1450,7 @@ static void fiji_setup_pcie_table_entry(
{
dpm_table->dpm_levels[index].value = pcie_gen;
dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = 1;
+ dpm_table->dpm_levels[index].enabled = true;
}
static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
@@ -1609,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
{
uint32_t count;
uint8_t index;
- int result = 0;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1631,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
VOLTAGE_SCALE)) / 25);
}
- return result;
+ return 0;
}
/**
@@ -3177,6 +3229,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3197,6 +3260,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -3357,6 +3435,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
+ "Failed to force MCLK DPM0!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Disable) == 0),
+ "Failed to disable voltage DPM during DPM Stop Function!",
+ return -1);
+
+ return 0;
+}
+
static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
uint32_t sources)
{
@@ -3415,6 +3557,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -3529,6 +3688,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = fiji_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = fiji_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = fiji_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = fiji_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = fiji_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = fiji_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = fiji_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
+}
+
static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -4171,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4182,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4353,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct fiji_power_state *fiji_ps)
{
- int result = 0;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -4373,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
fiji_ps->performance_levels[0].memory_clock,
fiji_ps->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int fiji_generate_dpm_level_enable_mask(
@@ -4632,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -4643,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -5071,42 +5291,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -5276,12 +5460,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
return is_update_required;
}
+static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &fiji_hwmgr_backend_fini,
.asic_setup = &fiji_setup_asic_task,
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
+ .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
.force_dpm_level = &fiji_dpm_force_dpm_level,
.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
.get_power_state_size = &fiji_get_power_state_size,
@@ -5314,24 +5582,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.get_fan_control_mode = fiji_get_fan_control_mode,
.check_states_equal = fiji_check_states_equal,
.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .get_pp_table = fiji_get_pp_table,
- .set_pp_table = fiji_set_pp_table,
.force_clock_level = fiji_force_clock_level,
.print_clock_levels = fiji_print_clock_levels,
+ .get_sclk_od = fiji_get_sclk_od,
+ .set_sclk_od = fiji_set_sclk_od,
+ .get_mclk_od = fiji_get_mclk_od,
+ .set_mclk_od = fiji_set_mclk_od,
};
int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data;
- int ret = 0;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_fiji_thermal_initialize(hwmgr);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 170edf5a772d..bf67c2a92c68 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -302,9 +302,6 @@ struct fiji_hwmgr {
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index db23a4068baf..44658451a8d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
if (!tmp) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
fiji_hwmgr->fast_watermark_threshold = 100;
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ if (hwmgr->powercontainment_enabled) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ tmp = 1;
+ fiji_hwmgr->enable_dte_feature = tmp ? false : true;
+ fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+ fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ }
}
}
@@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
index 55e58200f33a..fec772421733 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
@@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type {
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+
struct fiji_pt_config_reg {
uint32_t offset;
uint32_t mask;
@@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 7a705cee0cc2..a6abe81bc843 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
struct phm_runtime_table_header *rt_table,
void *input, void *output)
{
- int result = 0;
- void *temp_storage = NULL;
+ int result;
+ void *temp_storage;
if (hwmgr == NULL || rt_table == NULL) {
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
@@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
return -ENOMEM;
}
+ } else {
+ temp_storage = NULL;
}
result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
- if (NULL != temp_storage)
- kfree(temp_storage);
+ kfree(temp_storage);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index efb77eda7508..789f98ad2615 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
return ret;
}
+int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
+{
+ int ret = -1;
+ bool enabled;
+
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface)) {
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
+ } else {
+ ret = phm_dispatch_table(hwmgr,
+ &(hwmgr->disable_dynamic_state_management),
+ NULL, NULL);
+ }
+
+ enabled = ret == 0 ? false : true;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
+}
+
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
{
PHM_FUNC_CHECK(hwmgr);
@@ -314,7 +338,7 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (hwmgr->hwmgr_func->store_cc6_data == NULL)
return -EINVAL;
- /* to do pass other display configuration in furture */
+ /* TODO: pass other display configuration in the future */
if (hwmgr->hwmgr_func->store_cc6_data)
hwmgr->hwmgr_func->store_cc6_data(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 20f20e075588..27e07624ac28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
@@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
hwmgr->hw_revision = pp_init->rev_id;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
+ hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
switch (hwmgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_hwmgr_init(hwmgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TONGA:
tonga_hwmgr_init(hwmgr);
@@ -94,6 +96,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
return -EINVAL;
/* do hwmgr finish*/
+ kfree(hwmgr->hardcode_pp_table);
+
kfree(hwmgr->backend);
kfree(hwmgr->start_thermal_controller.function_list);
@@ -530,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
/* initialize vddc_dep_on_dal_pwrl table */
table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
index 8f142a74ad08..b5edb5105986 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
polaris10_update_uvd_dpm(hwmgr, true);
polaris10_phm_powerdown_uvd(hwmgr);
} else {
polaris10_phm_powerup_uvd(hwmgr);
polaris10_update_uvd_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
}
return 0;
@@ -125,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
data->vce_power_gated = bgate;
- if (bgate)
+ if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ polaris10_update_vce_dpm(hwmgr, true);
polaris10_phm_powerdown_vce(hwmgr);
- else
+ } else {
polaris10_phm_powerup_vce(hwmgr);
-
+ polaris10_update_vce_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 91e25f942d90..769636a0c5b5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
@@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
return 0;
}
+static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
/**
* Initial switch from ARB F0->F1
*
@@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return polaris10_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -1356,9 +1404,9 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
return result;
}
- /* in order to prevent MC activity from stutter mode to push DPM up.
+ /* In order to prevent MC activity from stutter mode to push DPM up,
* the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
+ * a higher state by default such that we are not affected by
* up threshold or and MCLK DPM latency.
*/
levels[0].ActivityLevel = 0x1f;
@@ -1425,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
/* Get MinVoltage and Frequency from DPM0,
* already converted to SMC_UL */
- sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_sclk,
sclk_frequency,
@@ -1461,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_mclk,
table->MemoryACPILevel.MclkFrequency,
@@ -1780,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1831,11 +1878,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
+ stretch_amount != 4 && stretch_amount != 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
@@ -1890,9 +1934,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
config = VR_SVI2_PLANE_2;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
} else {
config = VR_STATIC_VOLTAGE;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
@@ -2262,6 +2305,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -2282,6 +2336,21 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2379,6 +2448,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ return 0;
+}
+
static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
{
bool protection;
@@ -2436,6 +2557,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2530,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable deep sleep master switch!", result = tmp_result);
+ tmp_result = polaris10_enable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
tmp_result = polaris10_start_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to start DPM!", result = tmp_result);
@@ -2559,8 +2701,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
+ int tmp_result, result = 0;
- return 0;
+ tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = polaris10_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = polaris10_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = polaris10_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = polaris10_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = polaris10_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = polaris10_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
}
int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
@@ -2571,13 +2765,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
@@ -2624,17 +2811,22 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_DynamicUVDState);
/* power tune caps Assume disabled */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
+ if (hwmgr->powercontainment_enabled)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
@@ -2706,12 +2898,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
}
-
- PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
- "Error retrieving EVV voltage value!",
- continue);
-
+ if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+ VOLTAGE_TYPE_VDDC,
+ sclk, vv_id, &vddc) != 0) {
+ printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ continue;
+ }
/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
* real voltage level in unit of 0.01mv */
@@ -2968,13 +3160,19 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_hwmgr *data;
struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
uint32_t temp_reg;
int result;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
data->mclk_dpm0_activity_target = 0xa;
@@ -3063,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
if (0 == result) {
struct cgs_system_info sys_info = {0};
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
POLARIS10_MAX_HARDWARE_POWERLEVELS;
@@ -3148,7 +3346,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -3157,7 +3355,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
@@ -3446,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
disable_mclk_switching = (1 < info.display_count) ||
disable_mclk_switching_for_frame_lock;
@@ -3950,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
@@ -3962,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
@@ -4123,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct polaris10_power_state *polaris10_ps)
{
- int result = 0;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -4143,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
polaris10_ps->performance_levels[0].memory_clock,
polaris10_ps->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int polaris10_generate_dpm_level_enable_mask(
@@ -4226,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
-static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_nps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- const struct polaris10_power_state *polaris10_cps =
- cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (polaris10_nps->vce_clks.evclk > 0 &&
- (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) {
-
- data->smc_state_table.VceBootLevel =
+ if (!bgate) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ data->smc_state_table.VceBootLevel =
(uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ data->smc_state_table.VceBootLevel = 0;
mm_boot_level_offset = data->dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
@@ -4257,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
cgs_write_ind_register(hwmgr->device,
CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(uint32_t)1 << data->smc_state_table.VceBootLevel);
-
- polaris10_enable_disable_vce_dpm(hwmgr, true);
- } else if (polaris10_nps->vce_clks.evclk == 0 &&
- polaris10_cps != NULL &&
- polaris10_cps->vce_clks.evclk > 0)
- polaris10_enable_disable_vce_dpm(hwmgr, false);
}
+ polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
+
return 0;
}
@@ -4353,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4365,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4422,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
+
+
static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int tmp_result, result = 0;
@@ -4455,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
"Failed to generate DPM level enabled mask!",
result = tmp_result);
- tmp_result = polaris10_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update VCE DPM!",
- result = tmp_result);
-
tmp_result = polaris10_update_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update SCLK threshold!",
@@ -4530,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
polaris10_notify_smc_display_change(hwmgr, false);
+
return 0;
}
@@ -4579,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
return 0;
}
@@ -4820,42 +5008,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
return result;
}
-static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -4998,6 +5150,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
+static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
.backend_init = &polaris10_hwmgr_backend_init,
.backend_fini = &polaris10_hwmgr_backend_fini,
@@ -5036,22 +5271,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
.check_states_equal = polaris10_check_states_equal,
.set_fan_control_mode = polaris10_set_fan_control_mode,
.get_fan_control_mode = polaris10_get_fan_control_mode,
- .get_pp_table = polaris10_get_pp_table,
- .set_pp_table = polaris10_set_pp_table,
.force_clock_level = polaris10_force_clock_level,
.print_clock_levels = polaris10_print_clock_levels,
.enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+ .get_sclk_od = polaris10_get_sclk_od,
+ .set_sclk_od = polaris10_set_sclk_od,
+ .get_mclk_od = polaris10_get_mclk_od,
+ .set_mclk_od = polaris10_set_mclk_od,
};
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data;
-
- data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_polaris10_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index afc3434822d1..33c33947e827 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -309,10 +309,6 @@ struct polaris10_hwmgr {
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
-
uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2;
@@ -356,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index ae96f14b827c..b9cb240a135d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -28,10 +28,360 @@
#include "polaris10_smumgr.h"
#include "smu74_discrete.h"
#include "pp_debug.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "oss/oss_3_0_sh_mask.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
+uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
@@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+{
+
+ uint32_t en = enable ? 1 : 0;
+ int32_t result = 0;
+ uint32_t data;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ DIDTBlock_Info &= ~SQ_Enable_MASK;
+ DIDTBlock_Info |= en << SQ_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ DIDTBlock_Info &= ~DB_Enable_MASK;
+ DIDTBlock_Info |= en << DB_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ DIDTBlock_Info &= ~TD_Enable_MASK;
+ DIDTBlock_Info |= en << TD_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ DIDTBlock_Info &= ~TCP_Enable_MASK;
+ DIDTBlock_Info |= en << TCP_Enable_SHIFT;
+ }
+
+ if (enable)
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
+
+ return result;
+}
+
+static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+ struct polaris10_pt_config_reg *cac_config_regs)
+{
+ struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+ uint32_t cache = 0;
+ uint32_t data = 0;
+
+ PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ else {
+ switch (config_regs->type) {
+ case POLARIS10_CONFIGREG_SMC_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
+ break;
+
+ case POLARIS10_CONFIGREG_DIDT_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+ break;
+
+ case POLARIS10_CONFIGREG_GC_CAC_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+ break;
+
+ default:
+ data = cgs_read_register(hwmgr->device, config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case POLARIS10_CONFIGREG_SMC_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
+ break;
+
+ case POLARIS10_CONFIGREG_DIDT_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+ break;
+
+ case POLARIS10_CONFIGREG_GC_CAC_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+ break;
+
+ default:
+ cgs_write_register(hwmgr->device, config_regs->offset, data);
+ break;
+ }
+ cache = 0;
+ }
+
+ config_regs++;
+ }
+
+ return 0;
+}
+
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0;
+ uint32_t count, value, value2;
+ struct cgs_system_info sys_info = {0};
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+
+ if (result == 0)
+ num_se = sys_info.value;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+
+ /* TO DO Pre DIDT disable clock gating */
+ value = 0;
+ value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
+ | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
+ | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ }
+ }
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+
+ result = polaris10_enable_didt(hwmgr, true);
+ PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
+
+ /* TO DO Post DIDT enable clock gating */
+ }
+
+ return 0;
+}
+
+int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ /* TO DO Pre DIDT disable clock gating */
+
+ result = polaris10_enable_didt(hwmgr, false);
+ PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
+ /* TO DO Post DIDT enable clock gating */
+ }
+
+ return 0;
+}
+
+
static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -312,6 +843,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -373,6 +921,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
index 68bc1cb6d40c..bc78e28f010d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -27,15 +27,37 @@ enum polaris10_pt_config_reg_type {
POLARIS10_CONFIGREG_MMR = 0,
POLARIS10_CONFIGREG_SMC_IND,
POLARIS10_CONFIGREG_DIDT_IND,
+ POLARIS10_CONFIGREG_GC_CAC_IND,
POLARIS10_CONFIGREG_CACHE,
POLARIS10_CONFIGREG_MAX
};
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define ixGC_CAC_CNTL 0x0000
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+
struct polaris10_pt_config_reg {
uint32_t offset;
uint32_t mask;
@@ -62,9 +84,11 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
#endif /* POLARIS10_POWERTUNE_H */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index a3c38bbd1e94..1944d289f846 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
int result;
struct cgs_system_info info = {0};
- if( 0 != acpi_atcs_notify_pcie_device_ready(device))
+ if (acpi_atcs_notify_pcie_device_ready(device))
return -EINVAL;
info.size = sizeof(struct cgs_system_info);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 90b35c5c10a4..26f3e30d0fef 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770(
/* They are both in 10KHz Units. */
engine_clock_parameters.ulTargetEngineClock =
- (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK;
- engine_clock_parameters.ulTargetEngineClock |=
- (COMPUTE_ENGINE_PLL_PARAM << 24);
+ cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
+ ((COMPUTE_ENGINE_PLL_PARAM << 24)));
/* in 10 khz units.*/
engine_clock_parameters.sReserved.ulClock =
- (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK;
+ cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
return cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
&engine_clock_parameters);
@@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si(
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
int result;
- mpll_parameters.ulClock = (uint32_t) clock_value;
+ mpll_parameters.ulClock = cpu_to_le32(clock_value);
mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
result = cgs_atom_exec_cmd_table
@@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si(
if (0 == result) {
mpll_param->mpll_fb_divider.clk_frac =
- mpll_parameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
mpll_param->mpll_fb_divider.cl_kf =
- mpll_parameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
mpll_param->mpll_post_divider =
(uint32_t)mpll_parameters.ucPostDiv;
mpll_param->vco_mode =
@@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
int result;
- mpll_parameters.ulClock.ulClock = (uint32_t)clock_value;
+ mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
@@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
int result;
- pll_parameters.ulClock = clock_value;
+ pll_parameters.ulClock = cpu_to_le32(clock_value);
result = cgs_atom_exec_cmd_table
(hwmgr->device,
@@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
if (0 == result) {
dividers->pll_post_divider = pll_parameters.ucPostDiv;
- dividers->real_clock = pll_parameters.ulClock;
+ dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
}
return result;
@@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi(
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
result = cgs_atom_exec_cmd_table
@@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi(
dividers->pll_post_divider =
pll_patameters.ulClock.ucPostDiv;
dividers->real_clock =
- pll_patameters.ulClock.ulClock;
+ le32_to_cpu(pll_patameters.ulClock.ulClock);
dividers->ul_fb_div.ul_fb_div_frac =
- pll_patameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
dividers->ul_fb_div.ul_fb_div =
- pll_patameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
dividers->uc_pll_ref_div =
pll_patameters.ucPllRefDiv;
@@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
result = cgs_atom_exec_cmd_table
@@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv =
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
@@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi(
dividers->pll_post_divider =
pll_patameters.ulClock.ucPostDiv;
dividers->real_clock =
- pll_patameters.ulClock.ulClock;
+ le32_to_cpu(pll_patameters.ulClock.ulClock);
dividers->ul_fb_div.ul_fb_div_frac =
- pll_patameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
dividers->ul_fb_div.ul_fb_div =
- pll_patameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
dividers->uc_pll_ref_div =
pll_patameters.ucPllRefDiv;
@@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3(
for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
- voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue;
+ le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
voltage_table->entries[i].smio_low =
- voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId;
+ le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
}
voltage_table->mask_low =
- voltage_object->asGpioVoltageObj.ulGpioMaskVal;
+ le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
voltage_table->count =
voltage_object->asGpioVoltageObj.ucGpioEntryNum;
voltage_table->phase_delay =
@@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin(
pin_assignment->ucGpioPinBitShift;
gpio_pin_assignment->us_gpio_pin_aindex =
le16_to_cpu(pin_assignment->usGpioPin_AIndex);
- return false;
+ return true;
}
offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
}
- return true;
+ return false;
}
/**
@@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin(
const uint32_t pinId,
pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
{
- bool bRet = 0;
+ bool bRet = false;
ATOM_GPIO_PIN_LUT *gpio_lookup_table =
get_gpio_lookup_table(hwmgr->device);
PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
- "Could not find GPIO lookup Table in BIOS.", return -1);
+ "Could not find GPIO lookup Table in BIOS.", return false);
bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
gpio_pin_assignment);
@@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk(
return -1;
if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
- (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
- getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
+ (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
+ getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
return -1;
/*-----------------------------------------------------------
@@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk(
switch (dpm_level) {
case 1:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
break;
case 2:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
break;
case 3:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
break;
case 4:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
break;
case 5:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
break;
case 6:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
break;
case 7:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
break;
default:
printk(KERN_ERR "DPM Level not supported\n");
fPowerDPMx = Convert_ULONG_ToFraction(1);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000);
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
}
/*-------------------------
@@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
return result;
/* Finally, the actual fuse value */
- ul_RO_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1);
- fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1);
+ ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
+ fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
sCACm_fuse = getASICProfilingInfo->sCACm;
@@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_CACm_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000);
- fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000);
+ ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
@@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_CACb_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000);
- fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000);
+ ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
@@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000);
+ ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
@@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000);
+ ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
fRange = fMultiply(fRange, ConvertToFraction(-1));
fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
@@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000);
+ ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
fAverage, fRange, sKv_b_fuse.ucEfuseLength);
@@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue;
- fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000);
- fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000);
+ ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
+ fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
@@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk(
* PART 2 - Grabbing all required values
*-------------------------------------------
*/
- fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000),
+ fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
- fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000),
+ fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
- fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000),
+ fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
- fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000),
+ fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
- fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000),
+ fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
- fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000),
+ fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
- fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000),
+ fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
- fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000),
+ fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
- fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a);
- fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b);
- fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c);
+ fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
+ fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
+ fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
- fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed);
+ fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
fMargin_FMAX_mean = GetScaledFraction(
- getASICProfilingInfo->ulMargin_Fmax_mean, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
fMargin_Plat_mean = GetScaledFraction(
- getASICProfilingInfo->ulMargin_plat_mean, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
fMargin_FMAX_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_Fmax_sigma, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
fMargin_Plat_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_plat_sigma, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
fMargin_DC_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_DC_sigma, 100);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
@@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fSclk = GetScaledFraction(sclk, 100);
fV_max = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4));
- fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10);
- fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100);
- fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10);
+ le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
+ fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
+ fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
+ fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
fV_FT = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4));
+ le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
fV_min = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4));
+ le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
/*-----------------------
* PART 3
@@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
fC_Term = fAdd(fMargin_RO_c,
- fAdd(fMultiply(fSM_A0,fLkg_FT),
+ fAdd(fMultiply(fSM_A0, fLkg_FT),
fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
fAdd(fMultiply(fSM_A3, fSclk),
fSubtract(fSM_A7, fRO_fused)))));
@@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk(
get_voltage_info_param_space.ucVoltageMode =
ATOM_GET_VOLTAGE_EVV_VOLTAGE;
get_voltage_info_param_space.usVoltageLevel =
- virtual_voltage_Id;
+ cpu_to_le16(virtual_voltage_Id);
get_voltage_info_param_space.ulSCLKFreq =
- sclk;
+ cpu_to_le32(sclk);
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+ &get_voltage_info_param_space);
+
+ if (0 != result)
+ return result;
+
+ *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+ (&get_voltage_info_param_space))->usVoltageLevel);
+
+ return result;
+}
+
+/**
+ * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
+ * @param hwmgr input: pointer to hwManager
+ * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
+ * @param voltage output: real voltage level in unit of mv
+ */
+int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
+ uint16_t virtual_voltage_id,
+ uint16_t *voltage)
+{
+ int result;
+ int entry_id;
+ GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+ for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
+ /* found */
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+ return -EINVAL;
+ );
+
+ get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
+ get_voltage_info_param_space.ulSCLKFreq =
+ cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk(
if (0 != result)
return result;
- *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
- (&get_voltage_info_param_space))->usVoltageLevel;
+ *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+ (&get_voltage_info_param_space))->usVoltageLevel);
return result;
}
@@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
if (entry_found) {
ssEntry->speed_spectrum_percentage =
- ssInfo->usSpreadSpectrumPercentage;
- ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz;
+ le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
+ ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
@@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
int result;
READ_EFUSE_VALUE_PARAMETER efuse_param;
- efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4;
+ efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
efuse_param.sEfuse.ucBitShift = (uint8_t)
(start_index - ((start_index / 32) * 32));
efuse_param.sEfuse.ucBitLength = (uint8_t)
@@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
&efuse_param);
if (!result)
- *efuse = efuse_param.ulEfuseValue & mask;
+ *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
return result;
}
int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
- uint8_t level)
+ uint8_t level)
{
DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
int result;
- memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
- memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+ memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
+ memory_clock & SET_CLOCK_FREQ_MASK;
+ memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
+ ADJUST_MC_SETTING_PARAM;
memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
result = cgs_atom_exec_cmd_table
@@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
get_voltage_info_param_space.ucVoltageType = voltage_type;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
- get_voltage_info_param_space.ulSCLKFreq = sclk;
+ get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
+ get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
@@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
- table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
- table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
- table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+ table->entry[i].usFcw_pcc =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
+ table->entry[i].usFcw_trans_upper =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
+ table->entry[i].usRcw_trans_lower =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
}
return 0;
}
-int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
+ struct pp_atom_ctrl__avfs_parameters *param)
{
ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
@@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a
if (!profile)
return -1;
- param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
- param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
- param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
- param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
- param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
- param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
- param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
- param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
- param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
- param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
- param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
- param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
- param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
- param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
- param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
- param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
- param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
- param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
- param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+ param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
+ param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
+ param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
+ param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
+ param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
+ param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
+ param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
+ param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
+ param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
+ param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
+ param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
- param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+ param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 1e35a9625baf..fc898afce002 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters {
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index 009bd5963ed8..8f50a038396c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -50,55 +50,45 @@ typedef union _fInt {
* Function Declarations
* -------------------------------------------------------------------------------
*/
-fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
-fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
-fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
-int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
-
-fInt fNegate(fInt); /* Returns -1 * input fInt value */
-fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
-fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
-fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
-fInt fDivide (fInt A, fInt B); /* Returns A/B */
-fInt fGetSquare(fInt); /* Returns the square of a fInt number */
-fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
-
-int uAbs(int); /* Returns the Absolute value of the Int */
-fInt fAbs(fInt); /* Returns the Absolute value of the fInt */
-int uPow(int base, int exponent); /* Returns base^exponent an INT */
-
-void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
-bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
-bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
-
-fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
-fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
+static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
+static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
+static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
+static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
+
+static fInt fNegate(fInt); /* Returns -1 * input fInt value */
+static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
+static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
+static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
+static fInt fDivide (fInt A, fInt B); /* Returns A/B */
+static fInt fGetSquare(fInt); /* Returns the square of a fInt number */
+static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
+
+static int uAbs(int); /* Returns the Absolute value of the Int */
+static int uPow(int base, int exponent); /* Returns base^exponent an INT */
+
+static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
+static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
+static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
+
+static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
+static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
/* Fuse decoding functions
* -------------------------------------------------------------------------------------
*/
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
* -------------------------------------------------------------------------------------
* Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
*/
-fInt Add (int, int); /* Add two INTs and return Sum as FINT */
-fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */
-fInt Divide (int, int); /* You get the idea... */
-fInt fNegate(fInt);
+static fInt Divide (int, int); /* Divide two INTs and return result as FINT */
+static fInt fNegate(fInt);
-int uGetScaledDecimal (fInt); /* Internal function */
-int GetReal (fInt A); /* Internal function */
-
-/* Future Additions and Incomplete Functions
- * -------------------------------------------------------------------------------------
- */
-int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */
- /* Let us say we have 2.126 but can only handle 2 decimal points. We could */
- /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */
+static int uGetScaledDecimal (fInt); /* Internal function */
+static int GetReal (fInt A); /* Internal function */
/* -------------------------------------------------------------------------------------
* TROUBLESHOOTING INFORMATION
@@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef
* START OF CODE
* -------------------------------------------------------------------------------------
*/
-fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
+static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
{
uint32_t i;
bool bNegated = false;
@@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
return solution;
}
-fInt fNaturalLog(fInt value)
+static fInt fNaturalLog(fInt value)
{
uint32_t i;
fInt upper_bound = Divide(8, 1000);
@@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value)
return (fAdd(solution, error_term));
}
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
{
fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b
}
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
{
fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint
return f_decoded_value;
}
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
{
fInt fLeakage;
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min,
return fLeakage;
}
-fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
+static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
{
fInt temp;
@@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m
return temp;
}
-fInt fNegate(fInt X)
+static fInt fNegate(fInt X)
{
fInt CONSTANT_NEGONE = ConvertToFraction(-1);
return (fMultiply(X, CONSTANT_NEGONE));
}
-fInt Convert_ULONG_ToFraction(uint32_t X)
+static fInt Convert_ULONG_ToFraction(uint32_t X)
{
fInt temp;
@@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X)
return temp;
}
-fInt GetScaledFraction(int X, int factor)
+static fInt GetScaledFraction(int X, int factor)
{
int times_shifted, factor_shifted;
bool bNEGATED;
@@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor)
}
/* Addition using two fInts */
-fInt fAdd (fInt X, fInt Y)
+static fInt fAdd (fInt X, fInt Y)
{
fInt Sum;
@@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y)
}
/* Addition using two fInts */
-fInt fSubtract (fInt X, fInt Y)
+static fInt fSubtract (fInt X, fInt Y)
{
fInt Difference;
@@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y)
return Difference;
}
-bool Equal(fInt A, fInt B)
+static bool Equal(fInt A, fInt B)
{
if (A.full == B.full)
return true;
@@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B)
return false;
}
-bool GreaterThan(fInt A, fInt B)
+static bool GreaterThan(fInt A, fInt B)
{
if (A.full > B.full)
return true;
@@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B)
return false;
}
-fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
+static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
{
fInt Product;
int64_t tempProduct;
@@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
return Product;
}
-fInt fDivide (fInt X, fInt Y)
+static fInt fDivide (fInt X, fInt Y)
{
fInt fZERO, fQuotient;
int64_t longlongX, longlongY;
@@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y)
return fQuotient;
}
-int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
+static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
{
fInt fullNumber, scaledDecimal, scaledReal;
@@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch
return fullNumber.full;
}
-fInt fGetSquare(fInt A)
+static fInt fGetSquare(fInt A)
{
return fMultiply(A,A);
}
/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
-fInt fSqrt(fInt num)
+static fInt fSqrt(fInt num)
{
fInt F_divide_Fprime, Fprime;
fInt test;
@@ -460,7 +450,7 @@ fInt fSqrt(fInt num)
return (x_new);
}
-void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
+static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
{
fInt *pRoots = &Roots[0];
fInt temp, root_first, root_second;
@@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
* -----------------------------------------------------------------------------
*/
-/* Addition using two normal ints - Temporary - Use only for testing purposes?. */
-fInt Add (int X, int Y)
-{
- fInt A, B, Sum;
-
- A.full = (X << SHIFT_AMOUNT);
- B.full = (Y << SHIFT_AMOUNT);
-
- Sum.full = A.full + B.full;
-
- return Sum;
-}
-
/* Conversion Functions */
-int GetReal (fInt A)
+static int GetReal (fInt A)
{
return (A.full >> SHIFT_AMOUNT);
}
-/* Temporarily Disabled */
-int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */
-{
- /* ROUNDING TEMPORARLY DISABLED
- int temp = A.full;
- int decimal_cutoff, decimal_mask = 0x000001FF;
- decimal_cutoff = temp & decimal_mask;
- if (decimal_cutoff > 0x147) {
- temp += 673;
- }*/
-
- return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */
-}
-
-fInt Multiply (int X, int Y)
-{
- fInt A, B, Product;
-
- A.full = X << SHIFT_AMOUNT;
- B.full = Y << SHIFT_AMOUNT;
-
- Product = fMultiply(A, B);
-
- return Product;
-}
-
-fInt Divide (int X, int Y)
+static fInt Divide (int X, int Y)
{
fInt A, B, Quotient;
@@ -555,7 +506,7 @@ fInt Divide (int X, int Y)
return Quotient;
}
-int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
+static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
{
int dec[PRECISION];
int i, scaledDecimal = 0, tmp = A.partial.decimal;
@@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege
return scaledDecimal;
}
-int uPow(int base, int power)
+static int uPow(int base, int power)
{
if (power == 0)
return 1;
@@ -578,15 +529,7 @@ int uPow(int base, int power)
return (base)*uPow(base, power - 1);
}
-fInt fAbs(fInt A)
-{
- if (A.partial.real < 0)
- return (fMultiply(A, ConvertToFraction(-1)));
- else
- return A;
-}
-
-int uAbs(int X)
+static int uAbs(int X)
{
if (X < 0)
return (X * -1);
@@ -594,7 +537,7 @@ int uAbs(int X)
return X;
}
-fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
+static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
{
fInt solution;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2f1a14fe05b1..6c321b0d8a1e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
{
- const void *table_addr = NULL;
+ const void *table_addr = hwmgr->soft_pp_table;
uint8_t frev, crev;
uint16_t size;
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
+ if (!table_addr) {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
- hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
}
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+ uint32_t *vol_rep_time, uint32_t *bb_rep_time)
+{
+ const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
+ "Missing PowerPlay Table!", return -EINVAL);
+
+ *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
+ *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
+
+ return 0;
+}
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
unsigned long *num_of_entries)
@@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries(
const ATOM_PPLIB_VCE_State_Table *vce_table =
get_vce_state_table(hwmgr, table);
- if (vce_table > 0)
+ if (vce_table)
return vce_table->numEntries;
return 0;
@@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
- hwmgr->soft_pp_table = NULL;
- }
-
if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
index 30434802417e..baddaa75693b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
@@ -32,16 +32,19 @@ struct pp_hw_power_state;
extern const struct pp_table_func pptable_funcs;
typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps,
- unsigned int index,
- const void *clock_info);
+ struct pp_hw_power_state *hw_ps,
+ unsigned int index,
+ const void *clock_info);
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
- unsigned long *num_of_entries);
+ unsigned long *num_of_entries);
int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index,
- struct pp_power_state *ps,
- pp_tables_hw_clock_info_callback func);
+ unsigned long entry_index,
+ struct pp_power_state *ps,
+ pp_tables_hw_clock_info_callback func);
+
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+ uint32_t *vol_rep_time, uint32_t *bb_rep_time);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 5d0f655bf160..c7dc111221c2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->sclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable SCLK DPM when DPM is disabled",
return -1
);
@@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->mclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable MCLK DPM when DPM is disabled",
return -1
);
@@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->pcie_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable PCIE DPM when DPM is disabled",
return -1
);
@@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable Voltage CNTL when DPM is disabled",
return -1
);
@@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
uint32_t level_mask = 1 << n;
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to force SCLK when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to force SCLK when DPM is disabled",
+ return -1;);
if (0 == data->sclk_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
uint32_t level_mask = 1 << n;
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Force MCLK when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Force MCLK when DPM is disabled",
+ return -1;);
if (0 == data->mclk_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Force PCIE level when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Force PCIE level when DPM is disabled",
+ return -1;);
if (0 == data->pcie_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int result;
- bool error = 0;
+ bool error = false;
result = tonga_read_smc_sram_dword(hwmgr->smumgr,
SMU72_FIRMWARE_HEADER_LOCATION +
@@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- data->uvd_power_gated = 0;
- data->vce_power_gated = 0;
- data->samu_power_gated = 0;
- data->acp_power_gated = 0;
- data->pg_acp_init = 1;
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+ data->acp_power_gated = false;
+ data->pg_acp_init = true;
return 0;
}
@@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
* because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
* whereas voltage control is a fundemental change that will not be disabled
*/
- return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1);
+ return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
}
/**
@@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
{
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- if (0 != tonga_is_dpm_running(hwmgr)) {
+ if (tonga_is_dpm_running(hwmgr)) {
/* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
if (!data->dpm_table_start) {
return 1;
@@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
{
uint32_t table_size, i, j;
uint16_t vvalue;
- bool bVoltageFound = 0;
+ bool bVoltageFound = false;
pp_atomctrl_voltage_table *table;
PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
@@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
for (i = 0; i < voltage_table->count; i++) {
vvalue = voltage_table->entries[i].value;
- bVoltageFound = 0;
+ bVoltageFound = false;
for (j = 0; j < table->count; j++) {
if (vvalue == table->entries[j].value) {
- bVoltageFound = 1;
+ bVoltageFound = true;
break;
}
}
@@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
{
uint32_t count;
uint8_t index;
- int result = 0;
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
@@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
}
}
- return result;
+ return 0;
}
@@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level(
if ((data->mclk_stutter_mode_threshold != 0) &&
(memory_clock <= data->mclk_stutter_mode_threshold) &&
- (data->is_uvd_enabled == 0)
+ (!data->is_uvd_enabled)
&& (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
&& (data->display_timing.num_existing_displays <= 2)
&& (data->display_timing.num_existing_displays != 0))
@@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table(
dpm_table->count = count;
for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
- dpm_table->dpm_levels[i].enabled = 0;
+ dpm_table->dpm_levels[i].enabled = false;
}
return 0;
@@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry(
{
dpm_table->dpm_levels[index].value = pcie_gen;
dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = 1;
+ dpm_table->dpm_levels[index].enabled = true;
}
static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
@@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
allowed_vdd_sclk_table->entries[i].clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
data->dpm_table.sclk_table.count++;
}
}
@@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
allowed_vdd_mclk_table->entries[i].clk) {
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
data->dpm_table.mclk_table.count++;
}
}
@@ -3026,8 +3028,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
@@ -3040,8 +3042,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
/* ACDC Switch GPIO */
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
@@ -3063,8 +3065,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
}
reg_value = 0;
- if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
+ if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalOutGPIO);
@@ -3135,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (0 == data->sclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (0 != tonga_is_dpm_running(hwmgr))
+ if (tonga_is_dpm_running(hwmgr))
printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -3150,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (0 == data->mclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (0 != tonga_is_dpm_running(hwmgr))
+ if (tonga_is_dpm_running(hwmgr))
printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
@@ -3261,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm
/* initialize vddc_dep_on_dal_pwrl table */
table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
@@ -3336,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
int result = 1;
- PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
- return result);
+ PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
+ "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
+ return result);
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
@@ -3742,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
{
- bool result = 1;
+ bool result = true;
switch (inReg) {
case mmMC_SEQ_RAS_TIMING:
@@ -3826,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
break;
default:
- result = 0;
+ result = false;
break;
}
@@ -4422,13 +4423,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
@@ -4442,7 +4436,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
SMU72_Discrete_DpmTable *table = NULL;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ tonga_hwmgr *data;
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
phw_tonga_ulv_parm *ulv;
@@ -4451,7 +4445,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((NULL != hwmgr),
"Invalid Parameter!", return -1;);
- data->dll_defaule_on = 0;
+ data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ data->dll_defaule_on = false;
data->sram_end = SMC_RAM_END;
data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
@@ -4557,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* ULV Support*/
ulv = &(data->ulv);
- ulv->ulv_supported = 0;
+ ulv->ulv_supported = false;
/* Initalize Dynamic State Adjustment Rule Settings*/
result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
if (result)
printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
- data->uvd_enabled = 0;
+ data->uvd_enabled = false;
table = &(data->smc_state_table);
@@ -4571,7 +4571,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
* if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
* Peak Current Control feature is enabled and we should program PCC HW register
*/
- if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
@@ -4610,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMU7);
- data->vddc_phase_shed_control = 0;
+ data->vddc_phase_shed_control = false;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
@@ -4629,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
TONGA_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -4639,7 +4639,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -4648,7 +4648,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
@@ -5310,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(
- 0 == tonga_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5324,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5460,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
{
- int result = 0;
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -5480,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe
hw_state->performance_levels[0].memory_clock,
hw_state->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
@@ -5627,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5640,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(
- 0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -6031,42 +6028,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -6174,11 +6135,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.backend_init = &tonga_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
.asic_setup = &tonga_setup_asic_task,
.dynamic_state_management_enable = &tonga_enable_dpm_tasks,
+ .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
.apply_state_adjust_rules = tonga_apply_state_adjust_rules,
.force_dpm_level = &tonga_force_dpm_level,
.power_state_set = tonga_set_power_state_tasks,
@@ -6212,22 +6258,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.check_states_equal = tonga_check_states_equal,
.set_fan_control_mode = tonga_set_fan_control_mode,
.get_fan_control_mode = tonga_get_fan_control_mode,
- .get_pp_table = tonga_get_pp_table,
- .set_pp_table = tonga_set_pp_table,
.force_clock_level = tonga_force_clock_level,
.print_clock_levels = tonga_print_clock_levels,
+ .get_sclk_od = tonga_get_sclk_od,
+ .set_sclk_od = tonga_set_sclk_od,
+ .get_mclk_od = tonga_get_mclk_od,
+ .set_mclk_od = tonga_set_mclk_od,
};
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- tonga_hwmgr *data;
-
- data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
- memset(data, 0x00, sizeof(tonga_hwmgr));
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_tonga_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 573cd39fe78d..3961884bfa9b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -352,9 +352,6 @@ struct tonga_hwmgr {
bool samu_power_gated; /* 1: gated, 0:not gated */
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
typedef struct tonga_hwmgr tonga_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index dccc859f638c..cfb647f76cbe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -167,8 +167,7 @@ static int get_vddc_lookup_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
- table = (phm_ppt_v1_voltage_lookup_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
@@ -327,7 +326,7 @@ static int get_valid_clk(
table_size = sizeof(uint32_t) +
sizeof(uint32_t) * clk_volt_pp_table->count;
- table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
@@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* mclk_dep_table->ucNumEntries;
- mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == mclk_table)
return -ENOMEM;
@@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* tonga_table->ucNumEntries;
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ sclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == sclk_table)
return -ENOMEM;
@@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* polaris_table->ucNumEntries;
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ sclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == sclk_table)
return -ENOMEM;
@@ -504,7 +500,7 @@ static int get_pcie_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
- pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (pcie_table == NULL)
return -ENOMEM;
@@ -541,7 +537,7 @@ static int get_pcie_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
- pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (pcie_table == NULL)
return -ENOMEM;
@@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
* mm_dependency_table->ucNumEntries;
- mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mm_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == mm_table)
return -ENOMEM;
@@ -1073,13 +1068,9 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- int result = 0;
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table)
- hwmgr->soft_pp_table = NULL;
-
kfree(pp_table_information->vdd_dep_on_sclk);
pp_table_information->vdd_dep_on_sclk = NULL;
@@ -1116,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
- return result;
+ return 0;
}
const struct pp_table_func tonga_pptable_funcs = {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 50b367d44307..b764c8c05ec8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -132,6 +132,7 @@ struct amd_pp_init {
uint32_t chip_family;
uint32_t chip_id;
uint32_t rev_id;
+ bool powercontainment_enabled;
};
enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_None = 0,
@@ -342,6 +343,10 @@ struct amd_powerplay_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
};
struct amd_powerplay {
@@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init,
int amd_powerplay_fini(void *handle);
+int amd_powerplay_reset(void *handle);
+
int amd_powerplay_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *input);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 56f712c7d07a..962cb5385951 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
+extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 77e8e33d5870..bf0d2accf7bf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -278,6 +278,8 @@ struct pp_hwmgr_func {
int (*dynamic_state_management_enable)(
struct pp_hwmgr *hw_mgr);
+ int (*dynamic_state_management_disable)(
+ struct pp_hwmgr *hw_mgr);
int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps);
@@ -333,11 +335,13 @@ struct pp_hwmgr_func {
int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
- int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
- int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
};
struct pp_table_func {
@@ -580,6 +584,7 @@ struct pp_hwmgr {
struct pp_smumgr *smumgr;
const void *soft_pp_table;
uint32_t soft_pp_table_size;
+ void *hardcode_pp_table;
bool need_pp_table_upload;
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
@@ -609,6 +614,7 @@ struct pp_hwmgr {
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
+ bool powercontainment_enabled;
uint32_t fan_ctrl_default_mode;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index fc9e3d1dd409..3c235f0177cd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
smum_wait_on_indirect_register(smumgr, \
mm##port##_INDEX, index, value, mask)
+#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
+ SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
+ SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+ SMUM_FIELD_MASK(reg, field) )
#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
index, value, mask) \
@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
(SMUM_FIELD_MASK(reg, field) & ((field_val) << \
SMUM_FIELD_SHIFT(reg, field))))
+#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
+ SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
port, index, value, mask) \
smum_wait_on_indirect_register(smumgr, \
@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval))
+
+#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+
#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \
@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \
SMUM_FIELD_MASK(reg, field))
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
+ smum_wait_for_indirect_register_unequal(smumgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
+ SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
+ SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+ SMUM_FIELD_MASK(reg, field) )
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 0728c1e3d97a..7723473e51a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
handle->smu_mgr = smumgr;
switch (smumgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_smum_init(smumgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
case CHIP_TONGA:
tonga_smum_init(smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b22722eabafc..f42c536b3af1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
struct tonga_smumgr *tonga_smu =
(struct tonga_smumgr *)(smumgr->backend);
uint16_t fw_to_load;
- int result = 0;
struct SMU_DRAMData_TOC *toc;
/**
* First time this gets called during SmuMgr init,
@@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
"Fail to Request SMU Load uCode", return 0);
- return result;
+ return 0;
}
static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index c89dc777768f..b961a1c6caf3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
- __entry->fence = &sched_job->s_fence->base;
+ __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
),
TP_fast_assign(
- __entry->fence = &fence->base;
+ __entry->fence = &fence->finished;
),
TP_printk("fence=%p signaled", __entry->fence)
);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index c16248cee779..ef312bb75fda 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,6 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(1);
+ entity->fence_context = fence_context_alloc(2);
return 0;
}
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
- /* Fence is from the same scheduler */
- if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
- /* Ignore it when it is already scheduled */
- fence_put(entity->dependency);
- return false;
- }
- /* Wait for fence to be scheduled */
- entity->cb.func = amd_sched_entity_clear_dep;
- list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
- return true;
+ /*
+ * Fence is from the same scheduler, only need to wait for
+ * it to be scheduled
+ */
+ fence = fence_get(&s_fence->scheduled);
+ fence_put(entity->dependency);
+ entity->dependency = fence;
+ if (!fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
+ return true;
+
+ /* Ignore it when it is already scheduled */
+ fence_put(fence);
+ return false;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
return added;
}
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
- schedule_work(&job->work_free_job);
-}
-
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *next;
+ struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
+ /* remove job from ring_mirror_list */
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- if (cancel_delayed_work(&s_job->work_tdr))
- amd_sched_job_put(s_job);
+ struct amd_sched_job *next;
+
+ spin_unlock(&sched->job_list_lock);
+ cancel_delayed_work_sync(&s_job->work_tdr);
+ spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (next) {
- INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(next);
+ if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
}
+ spin_unlock(&sched->job_list_lock);
+ sched->ops->free_job(s_job);
}
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ finish_cb);
+ schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ spin_lock(&sched->job_list_lock);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
- {
- INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(s_job);
+ list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node) == s_job)
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+ struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ work_tdr.work);
+
+ job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ fence_put(s_job->s_fence->parent);
+ s_job->s_fence->parent = NULL;
+ }
+ }
+ atomic_set(&sched->hw_rq_count, 0);
+ spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job, *tmp;
+ int r;
+
+ spin_lock(&sched->job_list_lock);
+ s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+ if (s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct fence *fence;
+
+ spin_unlock(&sched->job_list_lock);
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
+ if (fence) {
+ s_fence->parent = fence_get(fence);
+ r = fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
+ if (r == -ENOENT)
+ amd_sched_process_job(fence, &s_fence->cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
+ fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
+ }
+ spin_lock(&sched->job_list_lock);
}
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
- sched_job->use_sched = 1;
- fence_add_callback(&sched_job->s_fence->base,
- &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
+ fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref *refcount),
- void *owner, struct fence **fence)
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner)
{
- INIT_LIST_HEAD(&job->node);
- kref_init(&job->refcount);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->s_fence->s_job = job;
- job->timeout_callback = timeout_cb;
- job->free_callback = free_cb;
+ INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_LIST_HEAD(&job->node);
+ INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
- if (fence)
- *fence = &job->s_fence->base;
return 0;
}
@@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
atomic_dec(&sched->hw_rq_count);
-
- /* remove job from ring_mirror_list */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_del_init(&s_fence->s_job->node);
- sched->ops->finish_job(s_fence->s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- amd_sched_fence_signal(s_fence);
+ amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->base);
+ fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +544,15 @@ static int amd_sched_main(void *param)
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity;
+ struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (entity = amd_sched_select_entity(sched)) ||
- kthread_should_stop());
+ (!amd_sched_blocked(sched) &&
+ (entity = amd_sched_select_entity(sched))) ||
+ kthread_should_stop());
if (!entity)
continue;
@@ -495,16 +564,19 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_pre_schedule(sched, sched_job);
+ amd_sched_job_begin(sched_job);
+
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
+ s_fence->parent = fence_get(fence);
r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 070095a9433c..7cbbbfb502ef 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,8 +27,6 @@
#include <linux/kfifo.h>
#include <linux/fence.h>
-#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
-
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -68,36 +66,34 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence base;
+ struct fence scheduled;
+ struct fence finished;
struct fence_cb cb;
- struct list_head scheduled_cb;
+ struct fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct amd_sched_job *s_job;
};
struct amd_sched_job {
- struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- bool use_sched; /* true if the job goes to scheduler */
- struct fence_cb cb_free_job;
- struct work_struct work_free_job;
- struct list_head node;
- struct delayed_work work_tdr;
- void (*timeout_callback) (struct work_struct *work);
- void (*free_callback)(struct kref *refcount);
+ struct fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
- struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+ if (f->ops == &amd_sched_fence_ops_scheduled)
+ return container_of(f, struct amd_sched_fence, scheduled);
- if (__f->base.ops == &amd_sched_fence_ops)
- return __f;
+ if (f->ops == &amd_sched_fence_ops_finished)
+ return container_of(f, struct amd_sched_fence, finished);
return NULL;
}
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*begin_job)(struct amd_sched_job *sched_job);
- void (*finish_job)(struct amd_sched_job *sched_job);
+ void (*timedout_job)(struct amd_sched_job *sched_job);
+ void (*free_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref* refcount),
- void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
- if (job)
- kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
- if (job)
- kref_put(&job->refcount, job->free_callback);
-}
-
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 2a732c490375..6b63beaf7574 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,7 +27,8 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
{
struct amd_sched_fence *fence = NULL;
unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL)
return NULL;
- INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner;
- fence->sched = s_entity->sched;
+ fence->sched = entity->sched;
spin_lock_init(&fence->lock);
- seq = atomic_inc_return(&s_entity->fence_seq);
- fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
- s_entity->fence_context, seq);
+ seq = atomic_inc_return(&entity->fence_seq);
+ fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->base);
+ int ret = fence_signal(&fence->scheduled);
+
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job)
-{
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
- sched->ops->begin_job(s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ FENCE_TRACE(&fence->scheduled, "was already signaled\n");
}
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ int ret = fence_signal(&fence->finished);
- set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
- list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
- list_del_init(&cur->node);
- cur->func(&s_fence->base, cur);
- }
+ if (!ret)
+ FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->finished, "was already signaled\n");
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
{
- call_rcu(&f->rcu, amd_sched_fence_free);
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ call_rcu(&fence->finished.rcu, amd_sched_fence_free);
}
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+ .get_driver_name = amd_sched_fence_get_driver_name,
+ .get_timeline_name = amd_sched_fence_get_timeline_name,
+ .enable_signaling = amd_sched_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = amd_sched_fence_release,
+ .release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
index f9a13b658fea..f47d88ba4fa5 100644
--- a/drivers/gpu/drm/arc/Kconfig
+++ b/drivers/gpu/drm/arc/Kconfig
@@ -2,7 +2,6 @@ config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
index d48fda70f857..73de56a0139a 100644
--- a/drivers/gpu/drm/arc/Makefile
+++ b/drivers/gpu/drm/arc/Makefile
@@ -1,2 +1,2 @@
-arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o
+arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index 86574b698a78..e8fcf3ab1d9a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
struct clk *clk;
struct drm_fbdev_cma *fbdev;
struct drm_framebuffer *fb;
- struct list_head event_list;
struct drm_crtc crtc;
struct drm_plane *plane;
};
@@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
int arc_pgu_setup_crtc(struct drm_device *dev);
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
unsigned int max_conn_count);
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 92f8beff8e60..ee0a61c2861b 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- unsigned long flags;
-
- if (crtc->state->event) {
- struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ if (event) {
crtc->state->event = NULL;
- event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- list_add_tail(&event->base.link, &arcpgu->event_list);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5bde0..6d4ff34737cb 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -28,21 +28,14 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
{
struct arcpgu_drm_private *arcpgu = dev->dev_private;
- if (arcpgu->fbdev)
- drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
-static int arcpgu_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
-{
- return drm_atomic_helper_commit(dev, state, false);
+ drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = arcpgu_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -55,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
-int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
@@ -81,22 +74,6 @@ static const struct file_operations arcpgu_drm_ops = {
.mmap = arcpgu_gem_mmap,
};
-static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
- struct drm_pending_vblank_event *e, *t;
- unsigned long flags;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
- if (e->base.file_priv != file)
- continue;
- list_del(&e->base.link);
- e->base.destroy(&e->base);
- }
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
static void arcpgu_lastclose(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,16 +99,12 @@ static int arcpgu_load(struct drm_device *drm)
if (IS_ERR(arcpgu->clk))
return PTR_ERR(arcpgu->clk);
- INIT_LIST_HEAD(&arcpgu->event_list);
-
arcpgu_setup_mode_config(drm);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(arcpgu->regs)) {
- dev_err(drm->dev, "Could not remap IO mem\n");
+ if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
- }
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
@@ -149,15 +122,17 @@ static int arcpgu_load(struct drm_device *drm)
/* find the encoder node and initialize it */
encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
- if (!encoder_node) {
- dev_err(drm->dev, "failed to get an encoder slave node\n");
- return -ENODEV;
+ if (encoder_node) {
+ ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = arcpgu_drm_sim_init(drm, NULL);
+ if (ret < 0)
+ return ret;
}
- ret = arcpgu_drm_hdmi_init(drm, encoder_node);
- if (ret < 0)
- return ret;
-
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
@@ -174,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
return 0;
}
-int arcpgu_unload(struct drm_device *drm)
+static int arcpgu_unload(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -192,7 +167,6 @@ int arcpgu_unload(struct drm_device *drm)
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .preclose = arcpgu_preclose,
.lastclose = arcpgu_lastclose,
.name = "drm-arcpgu",
.desc = "ARC PGU Controller",
@@ -207,7 +181,7 @@ static struct drm_driver arcpgu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
@@ -235,15 +209,8 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- ret = drm_connector_register_all(drm);
- if (ret)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(drm);
-
err_unload:
arcpgu_unload(drm);
@@ -257,7 +224,6 @@ static int arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
arcpgu_unload(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 08b6baeb320d..b7a8b2ac4055 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
return sfuncs->get_modes(&slave->base, connector);
}
-struct drm_encoder *
-arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
-{
- struct drm_encoder_slave *slave;
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_best_encoder: cannot find slave encoder for connector\n");
- return NULL;
- }
-
- return &slave->base;
-}
-
static enum drm_connector_status
arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
{
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
arcpgu_drm_connector_helper_funcs = {
.get_modes = arcpgu_drm_connector_get_modes,
- .best_encoder = arcpgu_drm_connector_best_encoder,
};
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
new file mode 100644
index 000000000000..2bf06d71556a
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -0,0 +1,128 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "arcpgu.h"
+
+#define XRES_DEF 640
+#define YRES_DEF 480
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+
+struct arcpgu_drm_connector {
+ struct drm_connector connector;
+ struct drm_encoder_slave *encoder_slave;
+};
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+ return count;
+}
+
+static enum drm_connector_status
+arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+ .get_modes = arcpgu_drm_connector_get_modes,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = arcpgu_drm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = arcpgu_drm_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
+{
+ struct arcpgu_drm_connector *arcpgu_connector;
+ struct drm_encoder_slave *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (encoder == NULL)
+ return -ENOMEM;
+
+ encoder->base.possible_crtcs = 1;
+ encoder->base.possible_clones = 0;
+
+ ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+
+ arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
+ GFP_KERNEL);
+ if (!arcpgu_connector) {
+ ret = -ENOMEM;
+ goto error_encoder_cleanup;
+ }
+
+ connector = &arcpgu_connector->connector;
+ drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+
+ ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret < 0) {
+ dev_err(drm->dev, "failed to initialize drm connector\n");
+ goto error_encoder_cleanup;
+ }
+
+ ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+ if (ret < 0) {
+ dev_err(drm->dev, "could not attach connector to encoder\n");
+ drm_connector_unregister(connector);
+ goto error_connector_cleanup;
+ }
+
+ arcpgu_connector->encoder_slave = encoder;
+
+ return 0;
+
+error_connector_cleanup:
+ drm_connector_cleanup(connector);
+
+error_encoder_cleanup:
+ drm_encoder_cleanup(&encoder->base);
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index eaed454e043c..9a18e1bd57b4 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,7 +9,6 @@ config DRM_HDLCD
depends on COMMON_CLK
select DRM_ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
@@ -25,3 +24,19 @@ config DRM_HDLCD_SHOW_UNDERRUN
Enable this option to show in red colour the pixels that the
HDLCD device did not fetch from framebuffer due to underrun
conditions.
+
+config DRM_MALI_DISPLAY
+ tristate "ARM Mali Display Processor"
+ depends on DRM && OF && (ARM || ARM64)
+ depends on COMMON_CLK
+ select DRM_ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you want to compile the ARM Mali Display
+ Processor driver. It supports the DP500, DP550 and DP650 variants
+ of the hardware.
+
+ If compiled as a module it will be called mali-dp.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index 89dcb7bab93a..bb8b158ff90d 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,2 +1,4 @@
hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
+mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0813c2f06931..48019ae22ddb 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
}
-static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
-}
-
-static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .mode_fixup = hdlcd_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
- .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
.enable = hdlcd_crtc_enable,
.disable = hdlcd_crtc_disable,
- .prepare = hdlcd_crtc_disable,
- .commit = hdlcd_crtc_enable,
.atomic_check = hdlcd_crtc_atomic_check,
.atomic_begin = hdlcd_crtc_atomic_begin,
- .atomic_flush = hdlcd_crtc_atomic_flush,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a6ca36f0096f..d83b46a30327 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -102,21 +102,14 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
- if (hdlcd->fbdev)
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
-static int hdlcd_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock)
-{
- return drm_atomic_helper_commit(dev, state, false);
+ drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = hdlcd_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = hdlcd_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -296,7 +289,7 @@ static struct drm_driver hdlcd_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = hdlcd_enable_vblank,
.disable_vblank = hdlcd_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
new file mode 100644
index 000000000000..08e6a71f5d05
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -0,0 +1,216 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 driver (crtc operations)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/clk.h>
+#include <video/videomode.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ /*
+ * check that the hardware can drive the required clock rate,
+ * but skip the check if the clock is meant to be disabled (req_rate = 0)
+ */
+ long rate, req_rate = mode->crtc_clock * 1000;
+
+ if (req_rate) {
+ rate = clk_round_rate(hwdev->mclk, req_rate);
+ if (rate < req_rate) {
+ DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
+ mode->crtc_clock);
+ return false;
+ }
+
+ rate = clk_round_rate(hwdev->pxlclk, req_rate);
+ if (rate != req_rate) {
+ DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
+ req_rate);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void malidp_crtc_enable(struct drm_crtc *crtc)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct videomode vm;
+
+ drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
+
+ clk_prepare_enable(hwdev->pxlclk);
+
+ /* mclk needs to be set to the same or higher rate than pxlclk */
+ clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+ clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+
+ hwdev->modeset(hwdev, &vm);
+ hwdev->leave_config_mode(hwdev);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void malidp_crtc_disable(struct drm_crtc *crtc)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ drm_crtc_vblank_off(crtc);
+ hwdev->enter_config_mode(hwdev);
+ clk_disable_unprepare(hwdev->pxlclk);
+}
+
+static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ u32 rot_mem_free, rot_mem_usable;
+ int rotated_planes = 0;
+
+ /*
+ * check if there is enough rotation memory available for planes
+ * that need 90° and 270° rotation. Each plane has set its required
+ * memory size in the ->plane_check() callback, here we only make
+ * sure that the sums are less that the total usable memory.
+ *
+ * The rotation memory allocation algorithm (for each plane):
+ * a. If no more rotated planes exist, all remaining rotate
+ * memory in the bank is available for use by the plane.
+ * b. If other rotated planes exist, and plane's layer ID is
+ * DE_VIDEO1, it can use all the memory from first bank if
+ * secondary rotation memory bank is available, otherwise it can
+ * use up to half the bank's memory.
+ * c. If other rotated planes exist, and plane's layer ID is not
+ * DE_VIDEO1, it can use half of the available memory
+ *
+ * Note: this algorithm assumes that the order in which the planes are
+ * checked always has DE_VIDEO1 plane first in the list if it is
+ * rotated. Because that is how we create the planes in the first
+ * place, under current DRM version things work, but if ever the order
+ * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
+ * changes, we need to pre-sort the planes before validation.
+ */
+
+ /* first count the number of rotated planes */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ if (pstate->rotation & MALIDP_ROTATED_MASK)
+ rotated_planes++;
+ }
+
+ rot_mem_free = hwdev->rotation_memory[0];
+ /*
+ * if we have more than 1 plane using rotation memory, use the second
+ * block of rotation memory as well
+ */
+ if (rotated_planes > 1)
+ rot_mem_free += hwdev->rotation_memory[1];
+
+ /* now validate the rotation memory requirements */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
+
+ if (pstate->rotation & MALIDP_ROTATED_MASK) {
+ /* process current plane */
+ rotated_planes--;
+
+ if (!rotated_planes) {
+ /* no more rotated planes, we can use what's left */
+ rot_mem_usable = rot_mem_free;
+ } else {
+ if ((mp->layer->id != DE_VIDEO1) ||
+ (hwdev->rotation_memory[1] == 0))
+ rot_mem_usable = rot_mem_free / 2;
+ else
+ rot_mem_usable = hwdev->rotation_memory[0];
+ }
+
+ rot_mem_free -= rot_mem_usable;
+
+ if (ms->rotmem_size > rot_mem_usable)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
+ .mode_fixup = malidp_crtc_mode_fixup,
+ .enable = malidp_crtc_enable,
+ .disable = malidp_crtc_disable,
+ .atomic_check = malidp_crtc_atomic_check,
+};
+
+static const struct drm_crtc_funcs malidp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+int malidp_crtc_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct drm_plane *primary = NULL, *plane;
+ int ret;
+
+ ret = malidp_de_planes_init(drm);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialise planes\n");
+ return ret;
+ }
+
+ drm_for_each_plane(plane, drm) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ primary = plane;
+ break;
+ }
+ }
+
+ if (!primary) {
+ DRM_ERROR("no primary plane found\n");
+ ret = -EINVAL;
+ goto crtc_cleanup_planes;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
+ &malidp_crtc_funcs, NULL);
+
+ if (!ret) {
+ drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
+ return 0;
+ }
+
+crtc_cleanup_planes:
+ malidp_de_planes_destroy(drm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
new file mode 100644
index 000000000000..82171d223f2d
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -0,0 +1,519 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "malidp_drv.h"
+#include "malidp_regs.h"
+#include "malidp_hw.h"
+
+#define MALIDP_CONF_VALID_TIMEOUT 250
+
+/*
+ * set the "config valid" bit and wait until the hardware acts on it
+ */
+static int malidp_set_and_wait_config_valid(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ hwdev->set_config_valid(hwdev);
+ /* don't wait for config_valid flag if we are in config mode */
+ if (hwdev->in_config_mode(hwdev))
+ return 0;
+
+ ret = wait_event_interruptible_timeout(malidp->wq,
+ atomic_read(&malidp->config_valid) == 1,
+ msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
+
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+
+static void malidp_output_poll_changed(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(malidp->fbdev);
+}
+
+static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *drm = state->dev;
+ struct malidp_drm *malidp = drm->dev_private;
+ int ret = malidp_set_and_wait_config_valid(drm);
+
+ if (ret)
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+
+ event = malidp->crtc.state->event;
+ if (event) {
+ malidp->crtc.state->event = NULL;
+
+ spin_lock_irq(&drm->event_lock);
+ if (drm_crtc_vblank_get(&malidp->crtc) == 0)
+ drm_crtc_arm_vblank_event(&malidp->crtc, event);
+ else
+ drm_crtc_send_vblank_event(&malidp->crtc, event);
+ spin_unlock_irq(&drm->event_lock);
+ }
+ drm_atomic_helper_commit_hw_done(state);
+}
+
+static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *drm = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, true);
+
+ malidp_atomic_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+
+ drm_atomic_helper_cleanup_planes(drm, state);
+}
+
+static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
+ .atomic_commit_tail = malidp_atomic_commit_tail,
+};
+
+static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = malidp_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.vsync_irq);
+ return 0;
+}
+
+static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.vsync_irq);
+}
+
+static int malidp_init(struct drm_device *drm)
+{
+ int ret;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = hwdev->min_line_size;
+ drm->mode_config.min_height = hwdev->min_line_size;
+ drm->mode_config.max_width = hwdev->max_line_size;
+ drm->mode_config.max_height = hwdev->max_line_size;
+ drm->mode_config.funcs = &malidp_mode_config_funcs;
+ drm->mode_config.helper_private = &malidp_mode_config_helpers;
+
+ ret = malidp_crtc_init(drm);
+ if (ret) {
+ drm_mode_config_cleanup(drm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int malidp_irq_init(struct platform_device *pdev)
+{
+ int irq_de, irq_se, ret = 0;
+ struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+
+ /* fetch the interrupts from DT */
+ irq_de = platform_get_irq_byname(pdev, "DE");
+ if (irq_de < 0) {
+ DRM_ERROR("no 'DE' IRQ specified!\n");
+ return irq_de;
+ }
+ irq_se = platform_get_irq_byname(pdev, "SE");
+ if (irq_se < 0) {
+ DRM_ERROR("no 'SE' IRQ specified!\n");
+ return irq_se;
+ }
+
+ ret = malidp_de_irq_init(drm, irq_de);
+ if (ret)
+ return ret;
+
+ ret = malidp_se_irq_init(drm, irq_se);
+ if (ret) {
+ malidp_de_irq_fini(drm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void malidp_lastclose(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(malidp->fbdev);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver malidp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
+ DRIVER_PRIME,
+ .lastclose = malidp_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = malidp_enable_vblank,
+ .disable_vblank = malidp_disable_vblank,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &fops,
+ .name = "mali-dp",
+ .desc = "ARM Mali Display Processor driver",
+ .date = "20160106",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id malidp_drm_of_match[] = {
+ {
+ .compatible = "arm,mali-dp500",
+ .data = &malidp_device[MALIDP_500]
+ },
+ {
+ .compatible = "arm,mali-dp550",
+ .data = &malidp_device[MALIDP_550]
+ },
+ {
+ .compatible = "arm,mali-dp650",
+ .data = &malidp_device[MALIDP_650]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
+
+#define MAX_OUTPUT_CHANNELS 3
+
+static int malidp_bind(struct device *dev)
+{
+ struct resource *res;
+ struct drm_device *drm;
+ struct device_node *ep;
+ struct malidp_drm *malidp;
+ struct malidp_hw_device *hwdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ /* number of lines for the R, G and B output */
+ u8 output_width[MAX_OUTPUT_CHANNELS];
+ int ret = 0, i;
+ u32 version, out_depth = 0;
+
+ malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
+ if (!malidp)
+ return -ENOMEM;
+
+ hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ /*
+ * copy the associated data from malidp_drm_of_match to avoid
+ * having to keep a reference to the OF node after binding
+ */
+ memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+ malidp->dev = hwdev;
+
+ INIT_LIST_HEAD(&malidp->event_list);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hwdev->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hwdev->regs))
+ return PTR_ERR(hwdev->regs);
+
+ hwdev->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(hwdev->pclk))
+ return PTR_ERR(hwdev->pclk);
+
+ hwdev->aclk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(hwdev->aclk))
+ return PTR_ERR(hwdev->aclk);
+
+ hwdev->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(hwdev->mclk))
+ return PTR_ERR(hwdev->mclk);
+
+ hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
+ if (IS_ERR(hwdev->pxlclk))
+ return PTR_ERR(hwdev->pxlclk);
+
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ drm = drm_dev_alloc(&malidp_driver, dev);
+ if (!drm) {
+ ret = -ENOMEM;
+ goto alloc_fail;
+ }
+
+ /* Enable APB clock in order to get access to the registers */
+ clk_prepare_enable(hwdev->pclk);
+ /*
+ * Enable AXI clock and main clock so that prefetch can start once
+ * the registers are set
+ */
+ clk_prepare_enable(hwdev->aclk);
+ clk_prepare_enable(hwdev->mclk);
+
+ ret = hwdev->query_hw(hwdev);
+ if (ret) {
+ DRM_ERROR("Invalid HW configuration\n");
+ goto query_hw_fail;
+ }
+
+ version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+ DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
+ (version >> 12) & 0xf, (version >> 8) & 0xf);
+
+ /* set the number of lines used for output of RGB data */
+ ret = of_property_read_u8_array(dev->of_node,
+ "arm,malidp-output-port-lines",
+ output_width, MAX_OUTPUT_CHANNELS);
+ if (ret)
+ goto query_hw_fail;
+
+ for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
+ out_depth = (out_depth << 8) | (output_width[i] & 0xf);
+ malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+
+ drm->dev_private = malidp;
+ dev_set_drvdata(dev, drm);
+ atomic_set(&malidp->config_valid, 0);
+ init_waitqueue_head(&malidp->wq);
+
+ ret = malidp_init(drm);
+ if (ret < 0)
+ goto init_fail;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
+ /* Set the CRTC's port so that the encoder component can find it */
+ ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!ep) {
+ ret = -EINVAL;
+ goto port_fail;
+ }
+ malidp->crtc.port = of_get_next_parent(ep);
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_ERROR("Failed to bind all components\n");
+ goto bind_fail;
+ }
+
+ ret = malidp_irq_init(pdev);
+ if (ret < 0)
+ goto irq_init_fail;
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialise vblank\n");
+ goto vblank_fail;
+ }
+
+ drm_mode_config_reset(drm);
+
+ malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+
+ if (IS_ERR(malidp->fbdev)) {
+ ret = PTR_ERR(malidp->fbdev);
+ malidp->fbdev = NULL;
+ goto fbdev_fail;
+ }
+
+ drm_kms_helper_poll_init(drm);
+ return 0;
+
+fbdev_fail:
+ drm_vblank_cleanup(drm);
+vblank_fail:
+ malidp_se_irq_fini(drm);
+ malidp_de_irq_fini(drm);
+irq_init_fail:
+ component_unbind_all(dev, drm);
+bind_fail:
+ of_node_put(malidp->crtc.port);
+ malidp->crtc.port = NULL;
+port_fail:
+ drm_dev_unregister(drm);
+register_fail:
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+init_fail:
+ drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
+query_hw_fail:
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+ drm_dev_unref(drm);
+alloc_fail:
+ of_reserved_mem_device_release(dev);
+
+ return ret;
+}
+
+static void malidp_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ malidp_se_irq_fini(drm);
+ malidp_de_irq_fini(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ of_node_put(malidp->crtc.port);
+ malidp->crtc.port = NULL;
+ drm_dev_unregister(drm);
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+ drm_dev_unref(drm);
+ of_reserved_mem_device_release(dev);
+}
+
+static const struct component_master_ops malidp_master_ops = {
+ .bind = malidp_bind,
+ .unbind = malidp_unbind,
+};
+
+static int malidp_compare_dev(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int malidp_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *port, *ep;
+ struct component_match *match = NULL;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ /* there is only one output port inside each device, find it */
+ ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (!ep)
+ return -ENODEV;
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ return -ENODEV;
+ }
+
+ /* add the remote encoder port as component */
+ port = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!port || !of_device_is_available(port)) {
+ of_node_put(port);
+ return -EAGAIN;
+ }
+
+ component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
+ match);
+}
+
+static int malidp_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &malidp_master_ops);
+ return 0;
+}
+
+static struct platform_driver malidp_platform_driver = {
+ .probe = malidp_platform_probe,
+ .remove = malidp_platform_remove,
+ .driver = {
+ .name = "mali-dp",
+ .of_match_table = malidp_drm_of_match,
+ },
+};
+
+module_platform_driver(malidp_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
+MODULE_DESCRIPTION("ARM Mali DP DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
new file mode 100644
index 000000000000..95558fde214b
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -0,0 +1,54 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures
+ */
+
+#ifndef __MALIDP_DRV_H__
+#define __MALIDP_DRV_H__
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "malidp_hw.h"
+
+struct malidp_drm {
+ struct malidp_hw_device *dev;
+ struct drm_fbdev_cma *fbdev;
+ struct list_head event_list;
+ struct drm_crtc crtc;
+ wait_queue_head_t wq;
+ atomic_t config_valid;
+};
+
+#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
+
+struct malidp_plane {
+ struct drm_plane base;
+ struct malidp_hw_device *hwdev;
+ const struct malidp_layer *layer;
+};
+
+struct malidp_plane_state {
+ struct drm_plane_state base;
+
+ /* size of the required rotation memory if plane is rotated */
+ u32 rotmem_size;
+};
+
+#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
+#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
+
+int malidp_de_planes_init(struct drm_device *drm);
+void malidp_de_planes_destroy(struct drm_device *drm);
+int malidp_crtc_init(struct drm_device *drm);
+
+/* often used combination of rotational bits */
+#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+
+#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
new file mode 100644
index 000000000000..a6132f1d58c1
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -0,0 +1,691 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
+ * the difference between various versions of the hardware is being dealt with
+ * in an attempt to provide to the rest of the driver code a unified view
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static const struct malidp_input_format malidp500_de_formats[] = {
+ /* fourcc, layers supporting the format, internal id */
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
+ { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
+ { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
+ { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
+ { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 },
+ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
+ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
+ { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
+ { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
+ { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+ { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+};
+
+#define MALIDP_ID(__group, __format) \
+ ((((__group) & 0x7) << 3) | ((__format) & 0x7))
+
+#define MALIDP_COMMON_FORMATS \
+ /* fourcc, layers supporting the format, internal id */ \
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
+ { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
+ { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+ { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
+ { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
+ { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
+ { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
+ { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
+ { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+ { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
+ { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
+ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
+ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+ { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
+ { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
+ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
+ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+
+static const struct malidp_input_format malidp550_de_formats[] = {
+ MALIDP_COMMON_FORMATS,
+};
+
+static const struct malidp_layer malidp500_layers[] = {
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
+};
+
+static const struct malidp_layer malidp550_layers[] = {
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
+};
+
+#define MALIDP_DE_DEFAULT_PREFETCH_START 5
+
+static int malidp500_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
+ /* bit 4 of the CONFIG_ID register holds the line size multiplier */
+ u8 ln_size_mult = conf & 0x10 ? 2 : 1;
+
+ hwdev->min_line_size = 2;
+ hwdev->max_line_size = SZ_2K * ln_size_mult;
+ hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
+ hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
+
+ return 0;
+}
+
+static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+ break;
+ /*
+ * entering config mode can take as long as the rendering
+ * of a full frame, hence the long sleep here
+ */
+ usleep_range(1000, 10000);
+ count--;
+ }
+ WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
+ break;
+ usleep_range(100, 1000);
+ count--;
+ }
+ WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status;
+
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+ return true;
+
+ return false;
+}
+
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+{
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+}
+
+static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+ u32 val = 0;
+
+ malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
+ if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ val |= MALIDP500_HSYNCPOL;
+ if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ val |= MALIDP500_VSYNCPOL;
+ val |= MALIDP_DE_DEFAULT_PREFETCH_START;
+ malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
+
+ /*
+ * Mali-DP500 encodes the background color like this:
+ * - red @ MALIDP500_BGND_COLOR[12:0]
+ * - green @ MALIDP500_BGND_COLOR[27:16]
+ * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0]
+ */
+ val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
+ (MALIDP_BGND_COLOR_R & 0xfff);
+ malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
+ malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
+
+ val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+ MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+ val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
+ MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+ val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+ MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+ val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+ if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+ unsigned int depth;
+ int bpp;
+
+ /* RGB888 or BGR888 can't be rotated */
+ if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ /*
+ * Each layer needs enough rotation memory to fit 8 lines
+ * worth of pixel data. Required size is then:
+ * size = rotated_width * (bpp / 8) * 8;
+ */
+ drm_fb_get_bpp_depth(fmt, &depth, &bpp);
+
+ return w * bpp;
+}
+
+static int malidp550_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+ u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+ hwdev->min_line_size = 2;
+
+ switch (ln_size) {
+ case 0:
+ hwdev->max_line_size = SZ_2K;
+ /* two banks of 64KB for rotation memory */
+ rsize = 64;
+ break;
+ case 1:
+ hwdev->max_line_size = SZ_4K;
+ /* two banks of 128KB for rotation memory */
+ rsize = 128;
+ break;
+ case 2:
+ hwdev->max_line_size = 1280;
+ /* two banks of 40KB for rotation memory */
+ rsize = 40;
+ break;
+ case 3:
+ /* reserved value */
+ hwdev->max_line_size = 0;
+ return -EINVAL;
+ }
+
+ hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+ return 0;
+}
+
+static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+ break;
+ /*
+ * entering config mode can take as long as the rendering
+ * of a full frame, hence the long sleep here
+ */
+ usleep_range(1000, 10000);
+ count--;
+ }
+ WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
+ break;
+ usleep_range(100, 1000);
+ count--;
+ }
+ WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status;
+
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+ return true;
+
+ return false;
+}
+
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+{
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+}
+
+static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+ u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+
+ malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
+ /*
+ * Mali-DP550 and Mali-DP650 encode the background color like this:
+ * - red @ MALIDP550_DE_BGND_COLOR[23:16]
+ * - green @ MALIDP550_DE_BGND_COLOR[15:8]
+ * - blue @ MALIDP550_DE_BGND_COLOR[7:0]
+ *
+ * We need to truncate the least significant 4 bits from the default
+ * MALIDP_BGND_COLOR_x values
+ */
+ val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
+ (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
+ ((MALIDP_BGND_COLOR_B >> 4) & 0xff);
+ malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
+
+ val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+ MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+ val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
+ MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+ val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+ MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+ if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ val |= MALIDP550_HSYNCPOL;
+ if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ val |= MALIDP550_VSYNCPOL;
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+ val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+ if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+ u32 bytes_per_col;
+
+ /* raw RGB888 or BGR888 can't be rotated */
+ if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ switch (fmt) {
+ /* 8 lines at 4 bytes per pixel */
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ /* 16 lines at 2 bytes per pixel */
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUYV:
+ bytes_per_col = 32;
+ break;
+ /* 16 lines at 1.5 bytes per pixel */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_YUV420:
+ bytes_per_col = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return w * bytes_per_col;
+}
+
+static int malidp650_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+ u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+ hwdev->min_line_size = 4;
+
+ switch (ln_size) {
+ case 0:
+ case 2:
+ /* reserved values */
+ hwdev->max_line_size = 0;
+ return -EINVAL;
+ case 1:
+ hwdev->max_line_size = SZ_4K;
+ /* two banks of 128KB for rotation memory */
+ rsize = 128;
+ break;
+ case 3:
+ hwdev->max_line_size = 2560;
+ /* two banks of 80KB for rotation memory */
+ rsize = 80;
+ }
+
+ hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+ return 0;
+}
+
+const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+ [MALIDP_500] = {
+ .map = {
+ .se_base = MALIDP500_SE_BASE,
+ .dc_base = MALIDP500_DC_BASE,
+ .out_depth_base = MALIDP500_OUTPUT_DEPTH,
+ .features = 0, /* no CLEARIRQ register */
+ .n_layers = ARRAY_SIZE(malidp500_layers),
+ .layers = malidp500_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP500_DE_IRQ_AXI_ERR |
+ MALIDP500_DE_IRQ_VSYNC |
+ MALIDP500_DE_IRQ_GLOBAL,
+ .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+ .vsync_irq = 0,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp500_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ },
+ .query_hw = malidp500_query_hw,
+ .enter_config_mode = malidp500_enter_config_mode,
+ .leave_config_mode = malidp500_leave_config_mode,
+ .in_config_mode = malidp500_in_config_mode,
+ .set_config_valid = malidp500_set_config_valid,
+ .modeset = malidp500_modeset,
+ .rotmem_required = malidp500_rotmem_required,
+ },
+ [MALIDP_550] = {
+ .map = {
+ .se_base = MALIDP550_SE_BASE,
+ .dc_base = MALIDP550_DC_BASE,
+ .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .n_layers = ARRAY_SIZE(malidp550_layers),
+ .layers = malidp550_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP550_DE_IRQ_VSYNC,
+ .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP550_SE_IRQ_EOW |
+ MALIDP550_SE_IRQ_AXI_ERR,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp550_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ },
+ .query_hw = malidp550_query_hw,
+ .enter_config_mode = malidp550_enter_config_mode,
+ .leave_config_mode = malidp550_leave_config_mode,
+ .in_config_mode = malidp550_in_config_mode,
+ .set_config_valid = malidp550_set_config_valid,
+ .modeset = malidp550_modeset,
+ .rotmem_required = malidp550_rotmem_required,
+ },
+ [MALIDP_650] = {
+ .map = {
+ .se_base = MALIDP550_SE_BASE,
+ .dc_base = MALIDP550_DC_BASE,
+ .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .n_layers = ARRAY_SIZE(malidp550_layers),
+ .layers = malidp550_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP650_DE_IRQ_DRIFT |
+ MALIDP550_DE_IRQ_VSYNC,
+ .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP550_SE_IRQ_EOW |
+ MALIDP550_SE_IRQ_AXI_ERR,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp550_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ },
+ .query_hw = malidp650_query_hw,
+ .enter_config_mode = malidp550_enter_config_mode,
+ .leave_config_mode = malidp550_leave_config_mode,
+ .in_config_mode = malidp550_in_config_mode,
+ .set_config_valid = malidp550_set_config_valid,
+ .modeset = malidp550_modeset,
+ .rotmem_required = malidp550_rotmem_required,
+ },
+};
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+ u8 layer_id, u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < map->n_input_formats; i++) {
+ if (((map->input_formats[i].layer & layer_id) == layer_id) &&
+ (map->input_formats[i].format == format))
+ return map->input_formats[i].id;
+ }
+
+ return MALIDP_INVALID_FORMAT_ID;
+}
+
+static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+ malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
+ else
+ malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
+}
+
+static irqreturn_t malidp_de_irq(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev;
+ const struct malidp_irq_map *de;
+ u32 status, mask, dc_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (!drm->dev_private)
+ return IRQ_HANDLED;
+
+ hwdev = malidp->dev;
+ de = &hwdev->map.de_irq_map;
+
+ /* first handle the config valid IRQ */
+ dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+ /* we have a page flip event */
+ atomic_set(&malidp->config_valid, 1);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
+ if (!(status & de->irq_mask))
+ return ret;
+
+ mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
+ status &= mask;
+ if (status & de->vsync_irq)
+ drm_crtc_handle_vblank(&malidp->crtc);
+
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
+
+ return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
+}
+
+static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+
+ wake_up(&malidp->wq);
+
+ return IRQ_HANDLED;
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+ ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
+ malidp_de_irq_thread_handler,
+ IRQF_SHARED, "malidp-de", drm);
+ if (ret < 0) {
+ DRM_ERROR("failed to install DE IRQ handler\n");
+ return ret;
+ }
+
+ /* first enable the DC block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->map.dc_irq_map.irq_mask);
+
+ /* now enable the DE block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.irq_mask);
+
+ return 0;
+}
+
+void malidp_de_irq_fini(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.irq_mask);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->map.dc_irq_map.irq_mask);
+}
+
+static irqreturn_t malidp_se_irq(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ u32 status, mask;
+
+ status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ if (!(status & hwdev->map.se_irq_map.irq_mask))
+ return IRQ_NONE;
+
+ mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
+ status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ status &= mask;
+ /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
+{
+ return IRQ_HANDLED;
+}
+
+int malidp_se_irq_init(struct drm_device *drm, int irq)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+ ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
+ malidp_se_irq_thread_handler,
+ IRQF_SHARED, "malidp-se", drm);
+ if (ret < 0) {
+ DRM_ERROR("failed to install SE IRQ handler\n");
+ return ret;
+ }
+
+ malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->map.se_irq_map.irq_mask);
+
+ return 0;
+}
+
+void malidp_se_irq_fini(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->map.se_irq_map.irq_mask);
+}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
new file mode 100644
index 000000000000..141743e9f3a6
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -0,0 +1,241 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP hardware manipulation routines.
+ */
+
+#ifndef __MALIDP_HW_H__
+#define __MALIDP_HW_H__
+
+#include <linux/bitops.h>
+#include "malidp_regs.h"
+
+struct videomode;
+struct clk;
+
+/* Mali DP IP blocks */
+enum {
+ MALIDP_DE_BLOCK = 0,
+ MALIDP_SE_BLOCK,
+ MALIDP_DC_BLOCK
+};
+
+/* Mali DP layer IDs */
+enum {
+ DE_VIDEO1 = BIT(0),
+ DE_GRAPHICS1 = BIT(1),
+ DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
+ DE_VIDEO2 = BIT(3),
+ DE_SMART = BIT(4),
+};
+
+struct malidp_input_format {
+ u32 format; /* DRM fourcc */
+ u8 layer; /* bitmask of layers supporting it */
+ u8 id; /* used internally */
+};
+
+#define MALIDP_INVALID_FORMAT_ID 0xff
+
+/*
+ * hide the differences between register maps
+ * by using a common structure to hold the
+ * base register offsets
+ */
+
+struct malidp_irq_map {
+ u32 irq_mask; /* mask of IRQs that can be enabled in the block */
+ u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
+};
+
+struct malidp_layer {
+ u16 id; /* layer ID */
+ u16 base; /* address offset for the register bank */
+ u16 ptr; /* address offset for the pointer register */
+};
+
+/* regmap features */
+#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
+
+struct malidp_hw_regmap {
+ /* address offset of the DE register bank */
+ /* is always 0x0000 */
+ /* address offset of the SE registers bank */
+ const u16 se_base;
+ /* address offset of the DC registers bank */
+ const u16 dc_base;
+
+ /* address offset for the output depth register */
+ const u16 out_depth_base;
+
+ /* bitmap with register map features */
+ const u8 features;
+
+ /* list of supported layers */
+ const u8 n_layers;
+ const struct malidp_layer *layers;
+
+ const struct malidp_irq_map de_irq_map;
+ const struct malidp_irq_map se_irq_map;
+ const struct malidp_irq_map dc_irq_map;
+
+ /* list of supported input formats for each layer */
+ const struct malidp_input_format *input_formats;
+ const u8 n_input_formats;
+};
+
+struct malidp_hw_device {
+ const struct malidp_hw_regmap map;
+ void __iomem *regs;
+
+ /* APB clock */
+ struct clk *pclk;
+ /* AXI clock */
+ struct clk *aclk;
+ /* main clock for display core */
+ struct clk *mclk;
+ /* pixel clock for display core */
+ struct clk *pxlclk;
+
+ /*
+ * Validate the driver instance against the hardware bits
+ */
+ int (*query_hw)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set the hardware into config mode, ready to accept mode changes
+ */
+ void (*enter_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Tell hardware to exit configuration mode
+ */
+ void (*leave_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Query if hardware is in configuration mode
+ */
+ bool (*in_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set configuration valid flag for hardware parameters that can
+ * be changed outside the configuration mode. Hardware will use
+ * the new settings when config valid is set after the end of the
+ * current buffer scanout
+ */
+ void (*set_config_valid)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set a new mode in hardware. Requires the hardware to be in
+ * configuration mode before this function is called.
+ */
+ void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m);
+
+ /*
+ * Calculate the required rotation memory given the active area
+ * and the buffer format.
+ */
+ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+
+ u8 features;
+
+ u8 min_line_size;
+ u16 max_line_size;
+
+ /* size of memory used for rotating layers, up to two banks available */
+ u32 rotation_memory[2];
+};
+
+/* Supported variants of the hardware */
+enum {
+ MALIDP_500 = 0,
+ MALIDP_550,
+ MALIDP_650,
+ /* keep the next entry last */
+ MALIDP_MAX_DEVICES
+};
+
+extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+
+static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
+{
+ return readl(hwdev->regs + reg);
+}
+
+static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
+ u32 value, u32 reg)
+{
+ writel(value, hwdev->regs + reg);
+}
+
+static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev,
+ u32 mask, u32 reg)
+{
+ u32 data = malidp_hw_read(hwdev, reg);
+
+ data |= mask;
+ malidp_hw_write(hwdev, data, reg);
+}
+
+static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev,
+ u32 mask, u32 reg)
+{
+ u32 data = malidp_hw_read(hwdev, reg);
+
+ data &= ~mask;
+ malidp_hw_write(hwdev, data, reg);
+}
+
+static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
+ u8 block)
+{
+ switch (block) {
+ case MALIDP_SE_BLOCK:
+ return hwdev->map.se_base;
+ case MALIDP_DC_BLOCK:
+ return hwdev->map.dc_base;
+ }
+
+ return 0;
+}
+
+static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev,
+ u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
+ u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq);
+void malidp_de_irq_fini(struct drm_device *drm);
+int malidp_se_irq_init(struct drm_device *drm, int irq);
+void malidp_se_irq_fini(struct drm_device *drm);
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+ u8 layer_id, u32 format);
+
+/*
+ * background color components are defined as 12bits values,
+ * they will be shifted right when stored on hardware that
+ * supports only 8bits per channel
+ */
+#define MALIDP_BGND_COLOR_R 0x000
+#define MALIDP_BGND_COLOR_G 0x000
+#define MALIDP_BGND_COLOR_B 0x000
+
+#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
new file mode 100644
index 000000000000..725098d6179a
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -0,0 +1,298 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP plane manipulation routines.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "malidp_hw.h"
+#include "malidp_drv.h"
+
+/* Layer specific register offsets */
+#define MALIDP_LAYER_FORMAT 0x000
+#define MALIDP_LAYER_CONTROL 0x004
+#define LAYER_ENABLE (1 << 0)
+#define LAYER_ROT_OFFSET 8
+#define LAYER_H_FLIP (1 << 10)
+#define LAYER_V_FLIP (1 << 11)
+#define LAYER_ROT_MASK (0xf << 8)
+#define MALIDP_LAYER_SIZE 0x00c
+#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
+#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
+#define MALIDP_LAYER_COMP_SIZE 0x010
+#define MALIDP_LAYER_OFFSET 0x014
+#define MALIDP_LAYER_STRIDE 0x018
+
+static void malidp_de_plane_destroy(struct drm_plane *plane)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+
+ if (mp->base.fb)
+ drm_framebuffer_unreference(mp->base.fb);
+
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+ devm_kfree(plane->dev->dev, mp);
+}
+
+struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+{
+ struct malidp_plane_state *state, *m_state;
+
+ if (!plane->state)
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ m_state = to_malidp_plane_state(plane->state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ state->rotmem_size = m_state->rotmem_size;
+ }
+
+ return &state->base;
+}
+
+void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane_state *m_state = to_malidp_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(m_state);
+}
+
+static const struct drm_plane_funcs malidp_de_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = malidp_de_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = malidp_duplicate_plane_state,
+ .atomic_destroy_state = malidp_destroy_plane_state,
+};
+
+static int malidp_de_plane_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ struct malidp_plane_state *ms = to_malidp_plane_state(state);
+ u8 format_id;
+ u32 src_w, src_h;
+
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ state->fb->pixel_format);
+ if (format_id == MALIDP_INVALID_FORMAT_ID)
+ return -EINVAL;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ if ((state->crtc_w > mp->hwdev->max_line_size) ||
+ (state->crtc_h > mp->hwdev->max_line_size) ||
+ (state->crtc_w < mp->hwdev->min_line_size) ||
+ (state->crtc_h < mp->hwdev->min_line_size) ||
+ (state->crtc_w != src_w) || (state->crtc_h != src_h))
+ return -EINVAL;
+
+ /* packed RGB888 / BGR888 can't be rotated or flipped */
+ if (state->rotation != BIT(DRM_ROTATE_0) &&
+ (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
+ state->fb->pixel_format == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ ms->rotmem_size = 0;
+ if (state->rotation & MALIDP_ROTATED_MASK) {
+ int val;
+
+ val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
+ state->crtc_w,
+ state->fb->pixel_format);
+ if (val < 0)
+ return val;
+
+ ms->rotmem_size = val;
+ }
+
+ return 0;
+}
+
+static void malidp_de_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_gem_cma_object *obj;
+ struct malidp_plane *mp;
+ const struct malidp_hw_regmap *map;
+ u8 format_id;
+ u16 ptr;
+ u32 format, src_w, src_h, dest_w, dest_h, val = 0;
+ int num_planes, i;
+
+ mp = to_malidp_plane(plane);
+
+ map = &mp->hwdev->map;
+ format = plane->state->fb->pixel_format;
+ format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
+ num_planes = drm_format_num_planes(format);
+
+ /* convert src values from Q16 fixed point to integer */
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ if (plane->state->rotation & MALIDP_ROTATED_MASK) {
+ dest_w = plane->state->crtc_h;
+ dest_h = plane->state->crtc_w;
+ } else {
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
+ }
+
+ malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+
+ for (i = 0; i < num_planes; i++) {
+ /* calculate the offset for the layer's plane registers */
+ ptr = mp->layer->ptr + (i << 4);
+
+ obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
+ malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
+ malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
+ mp->layer->base + MALIDP_LAYER_STRIDE);
+ }
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+ mp->layer->base + MALIDP_LAYER_SIZE);
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
+ mp->layer->base + MALIDP_LAYER_COMP_SIZE);
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
+ LAYER_V_VAL(plane->state->crtc_y),
+ mp->layer->base + MALIDP_LAYER_OFFSET);
+
+ /* first clear the rotation bits in the register */
+ malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+
+ /* setup the rotation and axis flip bits */
+ if (plane->state->rotation & DRM_ROTATE_MASK)
+ val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+ if (plane->state->rotation & BIT(DRM_REFLECT_X))
+ val |= LAYER_V_FLIP;
+ if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+ val |= LAYER_H_FLIP;
+
+ /* set the 'enable layer' bit */
+ val |= LAYER_ENABLE;
+
+ malidp_hw_setbits(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static void malidp_de_plane_disable(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+
+ malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
+ .atomic_check = malidp_de_plane_check,
+ .atomic_update = malidp_de_plane_update,
+ .atomic_disable = malidp_de_plane_disable,
+};
+
+int malidp_de_planes_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ const struct malidp_hw_regmap *map = &malidp->dev->map;
+ struct malidp_plane *plane = NULL;
+ enum drm_plane_type plane_type;
+ unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ u32 *formats;
+ int ret, i, j, n;
+
+ formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
+ if (!formats) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ for (i = 0; i < map->n_layers; i++) {
+ u8 id = map->layers[i].id;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* build the list of DRM supported formats based on the map */
+ for (n = 0, j = 0; j < map->n_input_formats; j++) {
+ if ((map->input_formats[j].layer & id) == id)
+ formats[n++] = map->input_formats[j].format;
+ }
+
+ plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(drm, &plane->base, crtcs,
+ &malidp_de_plane_funcs, formats,
+ n, plane_type, NULL);
+ if (ret < 0)
+ goto cleanup;
+
+ if (!drm->mode_config.rotation_property) {
+ unsigned long flags = BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270) |
+ BIT(DRM_REFLECT_X) |
+ BIT(DRM_REFLECT_Y);
+ drm->mode_config.rotation_property =
+ drm_mode_create_rotation_property(drm, flags);
+ }
+ /* SMART layer can't be rotated */
+ if (drm->mode_config.rotation_property && (id != DE_SMART))
+ drm_object_attach_property(&plane->base.base,
+ drm->mode_config.rotation_property,
+ BIT(DRM_ROTATE_0));
+
+ drm_plane_helper_add(&plane->base,
+ &malidp_de_plane_helper_funcs);
+ plane->hwdev = malidp->dev;
+ plane->layer = &map->layers[i];
+ }
+
+ kfree(formats);
+
+ return 0;
+
+cleanup:
+ malidp_de_planes_destroy(drm);
+ kfree(formats);
+
+ return ret;
+}
+
+void malidp_de_planes_destroy(struct drm_device *drm)
+{
+ struct drm_plane *p, *pt;
+
+ list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
+ drm_plane_cleanup(p);
+ kfree(p);
+ }
+}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
new file mode 100644
index 000000000000..73fecb38f955
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -0,0 +1,172 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 registers definition.
+ */
+
+#ifndef __MALIDP_REGS_H__
+#define __MALIDP_REGS_H__
+
+/*
+ * abbreviations used:
+ * - DC - display core (general settings)
+ * - DE - display engine
+ * - SE - scaling engine
+ */
+
+/* interrupt bit masks */
+#define MALIDP_DE_IRQ_UNDERRUN (1 << 0)
+
+#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4)
+#define MALIDP500_DE_IRQ_VSYNC (1 << 5)
+#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6)
+#define MALIDP500_DE_IRQ_SATURATION (1 << 7)
+#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8)
+#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11)
+#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17)
+#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18)
+#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19)
+#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24)
+#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28)
+#define MALIDP500_DE_IRQ_GLOBAL (1 << 31)
+#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0)
+#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4)
+#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5)
+#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8)
+#define MALIDP500_SE_IRQ_OVERRUN (1 << 9)
+#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12)
+#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13)
+#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17)
+#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18)
+#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28)
+#define MALIDP500_SE_IRQ_GLOBAL (1 << 31)
+
+#define MALIDP550_DE_IRQ_SATURATION (1 << 8)
+#define MALIDP550_DE_IRQ_VSYNC (1 << 12)
+#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13)
+#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_SE_IRQ_EOW (1 << 0)
+#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
+#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
+#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
+#define MALIDP550_DC_IRQ_DE (1 << 20)
+#define MALIDP550_DC_IRQ_SE (1 << 24)
+
+#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
+
+/* bit masks that are common between products */
+#define MALIDP_CFG_VALID (1 << 0)
+#define MALIDP_DISP_FUNC_ILACED (1 << 8)
+
+/* register offsets for IRQ management */
+#define MALIDP_REG_STATUS 0x00000
+#define MALIDP_REG_SETIRQ 0x00004
+#define MALIDP_REG_MASKIRQ 0x00008
+#define MALIDP_REG_CLEARIRQ 0x0000c
+
+/* register offsets */
+#define MALIDP_DE_CORE_ID 0x00018
+#define MALIDP_DE_DISPLAY_FUNC 0x00020
+
+/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */
+#define MALIDP_DE_H_TIMINGS 0x0
+#define MALIDP_DE_V_TIMINGS 0x4
+#define MALIDP_DE_SYNC_WIDTH 0x8
+#define MALIDP_DE_HV_ACTIVE 0xc
+
+/* macros to set values into registers */
+#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
+#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16)
+#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0)
+#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0)
+#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16)
+#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0)
+#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16)
+#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0)
+#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16)
+
+/* register offsets and bits specific to DP500 */
+#define MALIDP500_DC_BASE 0x00000
+#define MALIDP500_DC_CONTROL 0x0000c
+#define MALIDP500_DC_CONFIG_REQ (1 << 17)
+#define MALIDP500_HSYNCPOL (1 << 20)
+#define MALIDP500_VSYNCPOL (1 << 21)
+#define MALIDP500_DC_CLEAR_MASK 0x300fff
+#define MALIDP500_DE_LINE_COUNTER 0x00010
+#define MALIDP500_DE_AXI_CONTROL 0x00014
+#define MALIDP500_DE_SECURE_CTRL 0x0001c
+#define MALIDP500_DE_CHROMA_KEY 0x00024
+#define MALIDP500_TIMINGS_BASE 0x00028
+
+#define MALIDP500_CONFIG_3D 0x00038
+#define MALIDP500_BGND_COLOR 0x0003c
+#define MALIDP500_OUTPUT_DEPTH 0x00044
+#define MALIDP500_YUV_RGB_COEF 0x00048
+#define MALIDP500_COLOR_ADJ_COEF 0x00078
+#define MALIDP500_COEF_TABLE_ADDR 0x000a8
+#define MALIDP500_COEF_TABLE_DATA 0x000ac
+#define MALIDP500_DE_LV_BASE 0x00100
+#define MALIDP500_DE_LV_PTR_BASE 0x00124
+#define MALIDP500_DE_LG1_BASE 0x00200
+#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
+#define MALIDP500_DE_LG2_BASE 0x00300
+#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
+#define MALIDP500_SE_BASE 0x00c00
+#define MALIDP500_SE_PTR_BASE 0x00e0c
+#define MALIDP500_DC_IRQ_BASE 0x00f00
+#define MALIDP500_CONFIG_VALID 0x00f00
+#define MALIDP500_CONFIG_ID 0x00fd4
+
+/* register offsets and bits specific to DP550/DP650 */
+#define MALIDP550_DE_CONTROL 0x00010
+#define MALIDP550_DE_LINE_COUNTER 0x00014
+#define MALIDP550_DE_AXI_CONTROL 0x00018
+#define MALIDP550_DE_QOS 0x0001c
+#define MALIDP550_TIMINGS_BASE 0x00030
+#define MALIDP550_HSYNCPOL (1 << 12)
+#define MALIDP550_VSYNCPOL (1 << 28)
+
+#define MALIDP550_DE_DISP_SIDEBAND 0x00040
+#define MALIDP550_DE_BGND_COLOR 0x00044
+#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
+#define MALIDP550_DE_COLOR_COEF 0x00050
+#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
+#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
+#define MALIDP550_DE_LV1_BASE 0x00100
+#define MALIDP550_DE_LV1_PTR_BASE 0x00124
+#define MALIDP550_DE_LV2_BASE 0x00200
+#define MALIDP550_DE_LV2_PTR_BASE 0x00224
+#define MALIDP550_DE_LG_BASE 0x00300
+#define MALIDP550_DE_LG_PTR_BASE 0x0031c
+#define MALIDP550_DE_LS_BASE 0x00400
+#define MALIDP550_DE_LS_PTR_BASE 0x0042c
+#define MALIDP550_DE_PERF_BASE 0x00500
+#define MALIDP550_SE_BASE 0x08000
+#define MALIDP550_DC_BASE 0x0c000
+#define MALIDP550_DC_CONTROL 0x0c010
+#define MALIDP550_DC_CONFIG_REQ (1 << 16)
+#define MALIDP550_CONFIG_VALID 0x0c014
+#define MALIDP550_CONFIG_ID 0x0ffd4
+
+/*
+ * Starting with DP550 the register map blocks has been standardised to the
+ * following layout:
+ *
+ * Offset Block registers
+ * 0x00000 Display Engine
+ * 0x08000 Scaling Engine
+ * 0x0c000 Display Core
+ * 0x10000 Secure control
+ *
+ * The old DP500 IP mixes some DC with the DE registers, hence the need
+ * for a mapping structure.
+ */
+
+#endif /* __MALIDP_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index eb773e9af313..15f3ecfb16f1 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -1,11 +1,7 @@
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bcdd0..2f58e9e2a59c 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
/* Handle any pending frame work. */
if (work) {
work->fn(dcrtc, plane, work);
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
}
wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
{
int ret;
- ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ ret = drm_crtc_vblank_get(&dcrtc->crtc);
if (ret) {
DRM_ERROR("failed to acquire vblank counter\n");
return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
if (ret)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return ret;
}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_plane_work *work = xchg(&plane->work, NULL);
if (work)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return work;
}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
if (fwork->event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, dcrtc->num, fwork->event);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
if (stat & VSYNC_IRQ)
- drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_handle_vblank(&dcrtc->crtc);
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
if (interlaced ^ dcrtc->interlaced) {
if (adj->flags & DRM_MODE_FLAG_INTERLACE)
- drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_get(&dcrtc->crtc);
else
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
dcrtc->interlaced = interlaced;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61aa5..f5ebdd681445 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = {
.load = armada_drm_load,
.lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
@@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = {
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
#endif
- .gem_free_object = armada_gem_free_object,
+ .gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 148e8a42b2c6..1ee707ef6b8d 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+ BIT(DRM_ROTATE_0),
0, INT_MAX, true, false, &visible);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 8a784c460c89..15f6ce7acb2a 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,11 +2,7 @@ config DRM_AST
tristate "AST server chips"
depends on DRM && PCI
select DRM_TTM
- select FB_SYS_COPYAREA
- select FB_SYS_FILLRECT
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714836..f54afd2113a9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = ast_gem_free_object,
+ .gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57884..c017a9330a18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
- u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7bc3aa6dda8c..904beaa932d0 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- if (ast_fb->obj)
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c337922606e3..5957c3e659fe 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
}
-static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
ast_crtc->lut_r[i] = red[i] >> 8;
ast_crtc->lut_g[i] = green[i] >> 8;
ast_crtc->lut_b[i] = blue[i] >> 8;
}
ast_crtc_load_lut(crtc);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 59f2f93b6f84..b29a41218fc9 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
{
}
-static int ast_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void ast_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
.evict_flags = ast_bo_evict_flags,
- .move = ast_bo_move,
+ .move = NULL,
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 99b4f0698a30..32bcc4bad06a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index bd12231ab0cd..a978381ef95b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (crtc->event) {
- drm_send_vblank_event(dev, crtc->id, crtc->event);
- drm_vblank_put(dev, crtc->id);
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ drm_crtc_vblank_put(&crtc->base);
crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
{
- drm_handle_vblank(c->dev, 0);
+ drm_crtc_handle_vblank(c);
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded7645747e..d4a3d61b7b06 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
}
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (async)
queue_work(dc->wq, &commit->work);
@@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
destroy_workqueue(dc->wq);
}
-static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
-{
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_unregister_all(dev);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = atmel_hlcdc_dc_enable_vblank,
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- ret = drm_connector_register_all(ddev);
- if (ret)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(ddev);
-
err_unload:
atmel_hlcdc_dc_unload(ddev);
@@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
- atmel_hlcdc_dc_connector_unplug_all(ddev);
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
drm_dev_unref(ddev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 3d34fc4ca826..6119b5085501 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
}
-
-
-static struct drm_encoder *
-atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- return &rgb->encoder;
-}
-
static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
.get_modes = atmel_hlcdc_panel_get_modes,
.mode_valid = atmel_hlcdc_rgb_mode_valid,
- .best_encoder = atmel_hlcdc_rgb_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 5f8b0c2b9a44..f739763f47ce 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,10 +2,6 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_TTM
help
Choose this option for qemu.
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b0e2..abace82de6ea 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
.date = "20130925",
.major = 1,
.minor = 0,
- .gem_free_object = bochs_gem_free_object,
+ .gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 6cf912c45e48..5c5638a777a1 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
{
}
-static int bochs_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-}
-
-
static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
.evict_flags = bochs_bo_evict_flags,
- .move = bochs_bo_move,
+ .move = NULL,
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
@@ -474,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
- if (bochs_fb->obj)
- drm_gem_object_unreference_unlocked(bochs_fb->obj);
+
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8f7423f18da5..b590e678052d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -50,6 +50,25 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SII902X
+ tristate "Silicon Image sii902x RGB/HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ ---help---
+ Silicon Image sii902x bridge chip driver.
+
+config DRM_TOSHIBA_TC358767
+ tristate "Toshiba TC358767 eDP bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ ---help---
+ Toshiba TC358767 eDP bridge chip driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
+source "drivers/gpu/drm/bridge/adv7511/Kconfig"
+
endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 96b13b30e6ab..efdb07e878f5 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
new file mode 100644
index 000000000000..d2b0499ab7d7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -0,0 +1,15 @@
+config DRM_I2C_ADV7511
+ tristate "AV7511 encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
+config DRM_I2C_ADV7533
+ bool "ADV7533 encoder"
+ depends on DRM_I2C_ADV7511
+ select DRM_MIPI_DSI
+ default y
+ help
+ Support for the Analog Devices ADV7533 DSI to HDMI encoder.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
new file mode 100644
index 000000000000..9019327fff4c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -0,0 +1,3 @@
+adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 38515b30cedf..161c923d6162 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -10,6 +10,11 @@
#define __DRM_I2C_ADV7511_H__
#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
#define ADV7511_REG_CHIP_REVISION 0x00
#define ADV7511_REG_N0 0x01
@@ -286,4 +291,102 @@ struct adv7511_video_config {
struct hdmi_avi_infoframe avi_infoframe;
};
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+ bool use_timing_gen;
+
+ enum adv7511_type type;
+};
+
+#ifdef CONFIG_DRM_I2C_ADV7533
+void adv7533_dsi_power_on(struct adv7511 *adv);
+void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+int adv7533_patch_registers(struct adv7511 *adv);
+void adv7533_uninit_cec(struct adv7511 *adv);
+int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_attach_dsi(struct adv7511 *adv);
+void adv7533_detach_dsi(struct adv7511 *adv);
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
+#else
+static inline void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_mode_set(struct adv7511 *adv,
+ struct drm_display_mode *mode)
+{
+}
+
+static inline int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_uninit_cec(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_init_cec(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_detach_dsi(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a02112ba1c3d..ec8fb2ed3275 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -8,51 +8,17 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/regmap.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include "adv7511.h"
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
-
-static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
-{
- return to_encoder_slave(encoder)->slave_priv;
-}
-
/* ADI recommended values for proper operation. */
static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x98, 0x03 },
@@ -394,6 +360,9 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_on(adv7511);
+
adv7511->powered = true;
}
@@ -405,6 +374,9 @@ static void adv7511_power_off(struct adv7511 *adv7511)
ADV7511_POWER_POWER_DOWN);
regcache_mark_dirty(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_off(adv7511);
+
adv7511->powered = false;
}
@@ -430,7 +402,7 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -446,8 +418,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+ drm_helper_hpd_irq_event(adv7511->connector.dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -464,7 +436,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -481,7 +453,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -563,13 +535,12 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
}
/* -----------------------------------------------------------------------------
- * Encoder operations
+ * ADV75xx helpers
*/
-static int adv7511_get_modes(struct drm_encoder *encoder,
+static int adv7511_get_modes(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
struct edid *edid;
unsigned int count;
@@ -606,21 +577,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
return count;
}
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
enum drm_connector_status status;
unsigned int val;
bool hpd;
@@ -644,7 +603,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
+ adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -658,8 +617,8 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
return status;
}
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -667,11 +626,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
return MODE_OK;
}
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void adv7511_mode_set(struct adv7511 *adv7511,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
unsigned int vsync_polarity = 0;
@@ -754,6 +712,11 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ if (adv7511->type == ADV7533)
+ adv7533_mode_set(adv7511, adj_mode);
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
@@ -762,12 +725,114 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
-static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
- .dpms = adv7511_encoder_dpms,
- .mode_valid = adv7511_encoder_mode_valid,
- .mode_set = adv7511_encoder_mode_set,
- .detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
+/* Connector funcs */
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+static int adv7511_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static enum drm_mode_status
+adv7511_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
+ .get_modes = adv7511_connector_get_modes,
+ .mode_valid = adv7511_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7511_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7511_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7511_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7511_bridge_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7511_bridge_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_off(adv);
+}
+
+static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7511_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7511_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7511_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ if (adv->type == ADV7533)
+ ret = adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7511_bridge_funcs = {
+ .enable = adv7511_bridge_enable,
+ .disable = adv7511_bridge_disable,
+ .mode_set = adv7511_bridge_mode_set,
+ .attach = adv7511_bridge_attach,
};
/* -----------------------------------------------------------------------------
@@ -780,8 +845,6 @@ static int adv7511_parse_dt(struct device_node *np,
const char *str;
int ret;
- memset(config, 0, sizeof(*config));
-
of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
config->input_color_depth != 12)
@@ -881,7 +944,17 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
- ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (dev->of_node)
+ adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
+ else
+ adv7511->type = id->driver_data;
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
if (ret)
return ret;
@@ -907,8 +980,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
dev_dbg(dev, "Rev. %d\n", val);
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
+ if (adv7511->type == ADV7511)
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ else
+ ret = adv7533_patch_registers(adv7511);
if (ret)
return ret;
@@ -923,6 +1000,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ if (adv7511->type == ADV7533) {
+ ret = adv7533_init_cec(adv7511);
+ if (ret)
+ goto err_i2c_unregister_edid;
+ }
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -931,7 +1014,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
IRQF_ONESHOT, dev_name(dev),
adv7511);
if (ret)
- goto err_i2c_unregister_device;
+ goto err_unregister_cec;
}
/* CEC is unused for now */
@@ -942,11 +1025,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
i2c_set_clientdata(i2c, adv7511);
- adv7511_set_link_config(adv7511, &link_config);
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ adv7511->bridge.funcs = &adv7511_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7511 bridge\n");
+ goto err_unregister_cec;
+ }
return 0;
-err_i2c_unregister_device:
+err_unregister_cec:
+ adv7533_uninit_cec(adv7511);
+err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
return ret;
@@ -956,66 +1051,71 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- i2c_unregister_device(adv7511->i2c_edid);
-
- kfree(adv7511->edid);
-
- return 0;
-}
-
-static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
- struct drm_encoder_slave *encoder)
-{
+ if (adv7511->type == ADV7533) {
+ adv7533_detach_dsi(adv7511);
+ adv7533_uninit_cec(adv7511);
+ }
- struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ drm_bridge_remove(&adv7511->bridge);
- encoder->slave_priv = adv7511;
- encoder->slave_funcs = &adv7511_encoder_funcs;
+ i2c_unregister_device(adv7511->i2c_edid);
- adv7511->encoder = &encoder->base;
+ kfree(adv7511->edid);
return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { "adv7533", ADV7533 },
+#endif
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
+ { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-static struct drm_i2c_encoder_driver adv7511_driver = {
- .i2c_driver = {
- .driver = {
- .name = "adv7511",
- .of_match_table = adv7511_of_ids,
- },
- .id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
- .remove = adv7511_remove,
- },
+static struct mipi_dsi_driver adv7533_dsi_driver = {
+ .driver.name = "adv7533",
+};
- .encoder_init = adv7511_encoder_init,
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
};
static int __init adv7511_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_register(&adv7533_dsi_driver);
+
+ return i2c_add_driver(&adv7511_driver);
}
module_init(adv7511_init);
static void __exit adv7511_exit(void)
{
- drm_i2c_encoder_unregister(&adv7511_driver);
+ i2c_del_driver(&adv7511_driver);
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
}
module_exit(adv7511_exit);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
new file mode 100644
index 000000000000..5eebd15899b1
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include "adv7511.h"
+
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+};
+
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ struct drm_display_mode *mode = &adv->curr_mode;
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+
+ if (adv->use_timing_gen)
+ adv7511_dsi_config_timing_gen(adv);
+
+ /* set number of dsi lanes */
+ regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ if (adv->use_timing_gen) {
+ /* reset internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ } else {
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+
+ /* enable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv->regmap_cec, 0x55, 0x00);
+
+ regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+}
+
+void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+ /* disable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x0b);
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+}
+
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ int lanes, ret;
+
+ if (adv->num_dsi_lanes != 4)
+ return;
+
+ if (mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "failed to change host lanes\n");
+ }
+}
+
+int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return regmap_register_patch(adv->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+}
+
+void adv7533_uninit_cec(struct adv7511 *adv)
+{
+ i2c_unregister_device(adv->i2c_cec);
+}
+
+static const int cec_i2c_addr = 0x78;
+
+int adv7533_init_cec(struct adv7511 *adv)
+{
+ int ret;
+
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+ if (!adv->i2c_cec)
+ return -ENOMEM;
+
+ adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv->regmap_cec)) {
+ ret = PTR_ERR(adv->regmap_cec);
+ goto err;
+ }
+
+ ret = regmap_register_patch(adv->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ adv7533_uninit_cec(adv);
+ return ret;
+}
+
+int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ struct device *dev = &adv->i2c_main->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ int ret = 0;
+ const struct mipi_dsi_device_info info = { .type = "adv7533",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(adv->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv->dsi = dsi;
+
+ dsi->lanes = adv->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+ return ret;
+}
+
+void adv7533_detach_dsi(struct adv7511 *adv)
+{
+ mipi_dsi_detach(adv->dsi);
+ mipi_dsi_device_unregister(adv->dsi);
+}
+
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
+ adv->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv->host_node) {
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv->host_node);
+
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv->rgb = true;
+ adv->embedded_sync = false;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index d087b054c360..f9f03bcba0af 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -986,16 +986,8 @@ unlock:
return num_modes;
}
-static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
-{
- struct anx78xx *anx78xx = connector_to_anx78xx(connector);
-
- return anx78xx->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
.get_modes = anx78xx_get_modes,
- .best_encoder = anx78xx_best_encoder,
};
static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 7699597070a1..32715daf73cb 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_panel_get_modes(dp->plat_data->panel);
if (dp->plat_data->get_modes)
- num_modes += dp->plat_data->get_modes(dp->plat_data);
+ num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
return num_modes;
}
@@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
switch (dp->plat_data->dev_type) {
case RK3288_DP:
+ case RK3399_EDP:
/*
* Like Rk3288 DisplayPort TRM indicate that "Main link
* containing 4 physical lanes of 2.7/1.62 Gbps/lane".
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index f09275d40f70..b45638043ec4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -127,10 +127,10 @@ enum analog_power_block {
};
enum dp_irq_type {
- DP_IRQ_TYPE_HP_CABLE_IN,
- DP_IRQ_TYPE_HP_CABLE_OUT,
- DP_IRQ_TYPE_HP_CHANGE,
- DP_IRQ_TYPE_UNKNOWN,
+ DP_IRQ_TYPE_HP_CABLE_IN = BIT(0),
+ DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
+ DP_IRQ_TYPE_HP_CHANGE = BIT(2),
+ DP_IRQ_TYPE_UNKNOWN = BIT(3),
};
struct video_info {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 49205ef02be3..48030f0cf497 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
reg = SEL_24M | TX_DVDD_BIT_1_0625V;
writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
- writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+ reg = REF_CLK_24M;
+ if (dp->plat_data->dev_type == RK3288_DP)
+ reg ^= REF_CLK_MASK;
+
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
@@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
u32 reg;
u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
phy_pd_addr = ANALOGIX_DP_PD;
switch (block) {
@@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
analogix_dp_reset_aux(dp);
/* Disable AUX transaction H/W retry */
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
AUX_HW_RETRY_COUNT_SEL(3) |
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 337912b0aeab..cdcc6c5add5e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -163,8 +163,9 @@
#define HSYNC_POLARITY_CFG (0x1 << 0)
/* ANALOGIX_DP_PLL_REG_1 */
-#define REF_CLK_24M (0x1 << 1)
-#define REF_CLK_27M (0x0 << 1)
+#define REF_CLK_24M (0x1 << 0)
+#define REF_CLK_27M (0x0 << 0)
+#define REF_CLK_MASK (0x1 << 0)
/* ANALOGIX_DP_LANE_MAP */
#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index c9d941283d30..77ab47341658 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
return mode_status;
}
-static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
- *connector)
-{
- struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
- connector);
-
- return hdmi->encoder;
-}
-
static void dw_hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -1504,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
}
static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = dw_hdmi_connector_detect,
- .destroy = dw_hdmi_connector_destroy,
- .force = dw_hdmi_connector_force,
-};
-
-static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
@@ -1525,7 +1508,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
.mode_valid = dw_hdmi_connector_mode_valid,
- .best_encoder = dw_hdmi_connector_best_encoder,
+ .best_encoder = drm_atomic_helper_best_encoder,
};
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -1643,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&dw_hdmi_connector_helper_funcs);
- if (drm_core_check_feature(drm, DRIVER_ATOMIC))
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- else
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init(drm, &hdmi->connector,
+ &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7ecd59f70b8e..93f3dacf9e27 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -235,16 +235,8 @@ out:
return num_modes;
}
-static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
-{
- struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
-
- return ptn_bridge->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
- .best_encoder = ptn3460_best_encoder,
};
static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index be881e9fef8f..583b8ce614e3 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(ps8622->panel);
}
-static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
-{
- struct ps8622_bridge *ps8622;
-
- ps8622 = connector_to_ps8622(connector);
-
- return ps8622->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
.get_modes = ps8622_get_modes,
- .best_encoder = ps8622_best_encoder,
};
static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
@@ -646,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
- if (ps8622->bl)
- backlight_device_unregister(ps8622->bl);
-
+ backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
new file mode 100644
index 000000000000..9126d0306ab5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2016 Atmel
+ * Bo Shen <voice.shen@atmel.com>
+ *
+ * Authors: Bo Shen <voice.shen@atmel.com>
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Wu, Songjun <Songjun.Wu@atmel.com>
+ *
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#define SII902X_TPI_VIDEO_DATA 0x0
+
+#define SII902X_TPI_PIXEL_REPETITION 0x8
+#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5)
+#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4)
+#define SII902X_TPI_AVI_PIXEL_REP_4X 3
+#define SII902X_TPI_AVI_PIXEL_REP_2X 1
+#define SII902X_TPI_AVI_PIXEL_REP_NONE 0
+#define SII902X_TPI_CLK_RATIO_HALF (0 << 6)
+#define SII902X_TPI_CLK_RATIO_1X (1 << 6)
+#define SII902X_TPI_CLK_RATIO_2X (2 << 6)
+#define SII902X_TPI_CLK_RATIO_4X (3 << 6)
+
+#define SII902X_TPI_AVI_IN_FORMAT 0x9
+#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7)
+#define SII902X_TPI_AVI_INPUT_DITHER BIT(6)
+#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0)
+
+#define SII902X_TPI_AVI_INFOFRAME 0x0c
+
+#define SII902X_SYS_CTRL_DATA 0x1a
+#define SII902X_SYS_CTRL_PWR_DWN BIT(4)
+#define SII902X_SYS_CTRL_AV_MUTE BIT(3)
+#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2)
+#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1)
+#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0)
+#define SII902X_SYS_CTRL_OUTPUT_HDMI 1
+#define SII902X_SYS_CTRL_OUTPUT_DVI 0
+
+#define SII902X_REG_CHIPID(n) (0x1b + (n))
+
+#define SII902X_PWR_STATE_CTRL 0x1e
+#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0)
+#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK)
+
+#define SII902X_INT_ENABLE 0x3c
+#define SII902X_INT_STATUS 0x3d
+#define SII902X_HOTPLUG_EVENT BIT(0)
+#define SII902X_PLUGGED_STATUS BIT(2)
+
+#define SII902X_REG_TPI_RQB 0xc7
+
+#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
+
+struct sii902x {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii902x, bridge);
+}
+
+static inline struct sii902x *connector_to_sii902x(struct drm_connector *con)
+{
+ return container_of(con, struct sii902x, connector);
+}
+
+static void sii902x_reset(struct sii902x *sii902x)
+{
+ if (!sii902x->reset_gpio)
+ return;
+
+ gpiod_set_value(sii902x->reset_gpio, 1);
+
+ /* The datasheet says treset-min = 100us. Make it 150us to be sure. */
+ usleep_range(150, 200);
+
+ gpiod_set_value(sii902x->reset_gpio, 0);
+}
+
+static enum drm_connector_status
+sii902x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ unsigned int status;
+
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+
+ return (status & SII902X_PLUGGED_STATUS) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs sii902x_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = sii902x_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int sii902x_get_modes(struct drm_connector *connector)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ struct regmap *regmap = sii902x->regmap;
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ unsigned long timeout;
+ unsigned int status;
+ struct edid *edid;
+ int num = 0;
+ int ret;
+
+ ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ,
+ SII902X_SYS_CTRL_DDC_BUS_REQ);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+ } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus");
+ return -ETIMEDOUT;
+ }
+
+ ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
+ if (ret)
+ return ret;
+
+ edid = drm_get_edid(connector, sii902x->i2c->adapter);
+ drm_mode_connector_update_edid_property(connector, edid);
+ if (edid) {
+ num = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ ret = drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+ } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(&sii902x->i2c->dev, "failed to release the i2c bus");
+ return -ETIMEDOUT;
+ }
+
+ return num;
+}
+
+static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO: check mode */
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = {
+ .get_modes = sii902x_get_modes,
+ .mode_valid = sii902x_mode_valid,
+};
+
+static void sii902x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_PWR_DWN,
+ SII902X_SYS_CTRL_PWR_DWN);
+}
+
+static void sii902x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
+ SII902X_AVI_POWER_STATE_MSK,
+ SII902X_AVI_POWER_STATE_D(0));
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_PWR_DWN, 0);
+}
+
+static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct regmap *regmap = sii902x->regmap;
+ u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
+ int ret;
+
+ buf[0] = adj->clock;
+ buf[1] = adj->clock >> 8;
+ buf[2] = adj->vrefresh;
+ buf[3] = 0x00;
+ buf[4] = adj->hdisplay;
+ buf[5] = adj->hdisplay >> 8;
+ buf[6] = adj->vdisplay;
+ buf[7] = adj->vdisplay >> 8;
+ buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE |
+ SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT;
+ buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
+ SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
+
+ ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
+ if (ret)
+ return;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+ return;
+ }
+
+ /* Do not send the infoframe header, but keep the CRC field. */
+ regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
+ buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
+ HDMI_AVI_INFOFRAME_SIZE + 1);
+}
+
+static int sii902x_bridge_attach(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ drm_connector_helper_add(&sii902x->connector,
+ &sii902x_connector_helper_funcs);
+
+ if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
+ dev_err(&sii902x->i2c->dev,
+ "sii902x driver is only compatible with DRM devices supporting atomic updates");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_connector_init(drm, &sii902x->connector,
+ &sii902x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ if (sii902x->i2c->irq > 0)
+ sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs sii902x_bridge_funcs = {
+ .attach = sii902x_bridge_attach,
+ .mode_set = sii902x_bridge_mode_set,
+ .disable = sii902x_bridge_disable,
+ .enable = sii902x_bridge_enable,
+};
+
+static const struct regmap_range sii902x_volatile_ranges[] = {
+ { .range_min = 0, .range_max = 0xff },
+};
+
+static const struct regmap_access_table sii902x_volatile_table = {
+ .yes_ranges = sii902x_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges),
+};
+
+static const struct regmap_config sii902x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &sii902x_volatile_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t sii902x_interrupt(int irq, void *data)
+{
+ struct sii902x *sii902x = data;
+ unsigned int status = 0;
+
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+ if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
+ drm_helper_hpd_irq_event(sii902x->bridge.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int sii902x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ unsigned int status = 0;
+ struct sii902x *sii902x;
+ u8 chipid[4];
+ int ret;
+
+ sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
+ if (!sii902x)
+ return -ENOMEM;
+
+ sii902x->i2c = client;
+ sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
+ if (IS_ERR(sii902x->regmap))
+ return PTR_ERR(sii902x->regmap);
+
+ sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sii902x->reset_gpio)) {
+ dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n",
+ PTR_ERR(sii902x->reset_gpio));
+ return PTR_ERR(sii902x->reset_gpio);
+ }
+
+ sii902x_reset(sii902x);
+
+ ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
+ &chipid, 4);
+ if (ret) {
+ dev_err(dev, "regmap_read failed %d\n", ret);
+ return ret;
+ }
+
+ if (chipid[0] != 0xb0) {
+ dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
+ chipid[0]);
+ return -EINVAL;
+ }
+
+ /* Clear all pending interrupts */
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+ if (client->irq > 0) {
+ regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
+ SII902X_HOTPLUG_EVENT);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii902x_interrupt,
+ IRQF_ONESHOT, dev_name(dev),
+ sii902x);
+ if (ret)
+ return ret;
+ }
+
+ sii902x->bridge.funcs = &sii902x_bridge_funcs;
+ sii902x->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&sii902x->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add drm_bridge\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, sii902x);
+
+ return 0;
+}
+
+static int sii902x_remove(struct i2c_client *client)
+
+{
+ struct sii902x *sii902x = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&sii902x->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id sii902x_dt_ids[] = {
+ { .compatible = "sil,sii9022", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
+
+static const struct i2c_device_id sii902x_i2c_ids[] = {
+ { "sii9022", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
+
+static struct i2c_driver sii902x_driver = {
+ .probe = sii902x_probe,
+ .remove = sii902x_remove,
+ .driver = {
+ .name = "sii902x",
+ .of_match_table = sii902x_dt_ids,
+ },
+ .id_table = sii902x_i2c_ids,
+};
+module_i2c_driver(sii902x_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
new file mode 100644
index 000000000000..a09825d8c94a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -0,0 +1,1413 @@
+/*
+ * tc358767 eDP bridge driver
+ *
+ * Copyright (C) 2016 CogentEmbedded Inc
+ * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
+ *
+ * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+/* Registers */
+
+/* Display Parallel Interface */
+#define DPIPXLFMT 0x0440
+#define VS_POL_ACTIVE_LOW (1 << 10)
+#define HS_POL_ACTIVE_LOW (1 << 9)
+#define DE_POL_ACTIVE_HIGH (0 << 8)
+#define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */
+#define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */
+#define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */
+#define DPI_BPP_RGB888 (0 << 0)
+#define DPI_BPP_RGB666 (1 << 0)
+#define DPI_BPP_RGB565 (2 << 0)
+
+/* Video Path */
+#define VPCTRL0 0x0450
+#define OPXLFMT_RGB666 (0 << 8)
+#define OPXLFMT_RGB888 (1 << 8)
+#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
+#define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */
+#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
+#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
+#define HTIM01 0x0454
+#define HTIM02 0x0458
+#define VTIM01 0x045c
+#define VTIM02 0x0460
+#define VFUEN0 0x0464
+#define VFUEN BIT(0) /* Video Frame Timing Upload */
+
+/* System */
+#define TC_IDREG 0x0500
+#define SYSCTRL 0x0510
+#define DP0_AUDSRC_NO_INPUT (0 << 3)
+#define DP0_AUDSRC_I2S_RX (1 << 3)
+#define DP0_VIDSRC_NO_INPUT (0 << 0)
+#define DP0_VIDSRC_DSI_RX (1 << 0)
+#define DP0_VIDSRC_DPI_RX (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+
+/* Control */
+#define DP0CTL 0x0600
+#define VID_MN_GEN BIT(6) /* Auto-generate M/N values */
+#define EF_EN BIT(5) /* Enable Enhanced Framing */
+#define VID_EN BIT(1) /* Video transmission enable */
+#define DP_EN BIT(0) /* Enable DPTX function */
+
+/* Clocks */
+#define DP0_VIDMNGEN0 0x0610
+#define DP0_VIDMNGEN1 0x0614
+#define DP0_VMNGENSTATUS 0x0618
+
+/* Main Channel */
+#define DP0_SECSAMPLE 0x0640
+#define DP0_VIDSYNCDELAY 0x0644
+#define DP0_TOTALVAL 0x0648
+#define DP0_STARTVAL 0x064c
+#define DP0_ACTIVEVAL 0x0650
+#define DP0_SYNCVAL 0x0654
+#define DP0_MISC 0x0658
+#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define BPC_6 (0 << 5)
+#define BPC_8 (1 << 5)
+
+/* AUX channel */
+#define DP0_AUXCFG0 0x0660
+#define DP0_AUXCFG1 0x0664
+#define AUX_RX_FILTER_EN BIT(16)
+
+#define DP0_AUXADDR 0x0668
+#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
+#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
+#define DP0_AUXSTATUS 0x068c
+#define AUX_STATUS_MASK 0xf0
+#define AUX_STATUS_SHIFT 4
+#define AUX_TIMEOUT BIT(1)
+#define AUX_BUSY BIT(0)
+#define DP0_AUXI2CADR 0x0698
+
+/* Link Training */
+#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
+#define DP0_SRCCTRL_EN810B BIT(12)
+#define DP0_SRCCTRL_NOTP (0 << 8)
+#define DP0_SRCCTRL_TP1 (1 << 8)
+#define DP0_SRCCTRL_TP2 (2 << 8)
+#define DP0_SRCCTRL_LANESKEW BIT(7)
+#define DP0_SRCCTRL_SSCG BIT(3)
+#define DP0_SRCCTRL_LANES_1 (0 << 2)
+#define DP0_SRCCTRL_LANES_2 (1 << 2)
+#define DP0_SRCCTRL_BW27 (1 << 1)
+#define DP0_SRCCTRL_BW162 (0 << 1)
+#define DP0_SRCCTRL_AUTOCORRECT BIT(0)
+#define DP0_LTSTAT 0x06d0
+#define LT_LOOPDONE BIT(13)
+#define LT_STATUS_MASK (0x1f << 8)
+#define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4)
+#define LT_INTERLANE_ALIGN_DONE BIT(3)
+#define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS)
+#define DP0_SNKLTCHGREQ 0x06d4
+#define DP0_LTLOOPCTRL 0x06d8
+#define DP0_SNKLTCTRL 0x06e4
+
+/* PHY */
+#define DP_PHY_CTRL 0x0800
+#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
+#define BGREN BIT(25) /* AUX PHY BGR Enable */
+#define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */
+#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
+#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
+#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
+#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
+
+/* PLL */
+#define DP0_PLLCTRL 0x0900
+#define DP1_PLLCTRL 0x0904 /* not defined in DS */
+#define PXL_PLLCTRL 0x0908
+#define PLLUPDATE BIT(2)
+#define PLLBYP BIT(1)
+#define PLLEN BIT(0)
+#define PXL_PLLPARAM 0x0914
+#define IN_SEL_REFCLK (0 << 14)
+#define SYS_PLLPARAM 0x0918
+#define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */
+#define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */
+#define REF_FREQ_26M (2 << 8) /* 26 MHz */
+#define REF_FREQ_13M (3 << 8) /* 13 MHz */
+#define SYSCLK_SEL_LSCLK (0 << 4)
+#define LSCLK_DIV_1 (0 << 0)
+#define LSCLK_DIV_2 (1 << 0)
+
+/* Test & Debug */
+#define TSTCTL 0x0a00
+#define PLL_DBG 0x0a04
+
+static bool tc_test_pattern;
+module_param_named(test, tc_test_pattern, bool, 0644);
+
+struct tc_edp_link {
+ struct drm_dp_link base;
+ u8 assr;
+ int scrambler_dis;
+ int spread;
+ int coding8b10b;
+ u8 swing;
+ u8 preemp;
+};
+
+struct tc_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_dp_aux aux;
+
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+
+ /* link settings */
+ struct tc_edp_link link;
+
+ /* display edid */
+ struct edid *edid;
+ /* current mode */
+ struct drm_display_mode *mode;
+
+ u32 rev;
+ u8 assr;
+
+ struct gpio_desc *sd_gpio;
+ struct gpio_desc *reset_gpio;
+ struct clk *refclk;
+};
+
+static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
+{
+ return container_of(a, struct tc_data, aux);
+}
+
+static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
+{
+ return container_of(b, struct tc_data, bridge);
+}
+
+static inline struct tc_data *connector_to_tc(struct drm_connector *c)
+{
+ return container_of(c, struct tc_data, connector);
+}
+
+/* Simple macros to avoid repeated error checks */
+#define tc_write(reg, var) \
+ do { \
+ ret = regmap_write(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+#define tc_read(reg, var) \
+ do { \
+ ret = regmap_read(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+
+static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+ unsigned int cond_mask,
+ unsigned int cond_value,
+ unsigned long sleep_us, u64 timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+ unsigned int val;
+ int ret;
+
+ for (;;) {
+ ret = regmap_read(map, addr, &val);
+ if (ret)
+ break;
+ if ((val & cond_mask) == cond_value)
+ break;
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
+ ret = regmap_read(map, addr, &val);
+ break;
+ }
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+}
+
+static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+{
+ return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
+ 1000, 1000 * timeout_ms);
+}
+
+static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+{
+ int ret;
+ u32 value;
+
+ ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
+ if (ret < 0)
+ return ret;
+ if (value & AUX_BUSY) {
+ if (value & AUX_TIMEOUT) {
+ dev_err(tc->dev, "i2c access timeout!\n");
+ return -ETIMEDOUT;
+ }
+ return -EBUSY;
+ }
+
+ *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
+ return 0;
+}
+
+static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct tc_data *tc = aux_to_tc(aux);
+ size_t size = min_t(size_t, 8, msg->size);
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ u8 *buf = msg->buffer;
+ u32 tmp = 0;
+ int i = 0;
+ int ret;
+
+ if (size == 0)
+ return 0;
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
+ /* Store data */
+ while (i < size) {
+ if (request == DP_AUX_NATIVE_WRITE)
+ tmp = tmp | (buf[i] << (8 * (i & 0x3)));
+ else
+ tmp = (tmp << 8) | buf[i];
+ i++;
+ if (((i % 4) == 0) || (i == size)) {
+ tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tmp = 0;
+ }
+ }
+ } else if (request != DP_AUX_I2C_READ &&
+ request != DP_AUX_NATIVE_READ) {
+ return -EINVAL;
+ }
+
+ /* Store address */
+ tc_write(DP0_AUXADDR, msg->address);
+ /* Start transfer */
+ tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ ret = tc_aux_get_status(tc, &msg->reply);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
+ /* Read data */
+ while (i < size) {
+ if ((i % 4) == 0)
+ tc_read(DP0_AUXRDATA(i >> 2), &tmp);
+ buf[i] = tmp & 0xff;
+ tmp = tmp >> 8;
+ i++;
+ }
+ }
+
+ return size;
+err:
+ return ret;
+}
+
+static const char * const training_pattern1_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Max voltage reached error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static const char * const training_pattern2_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Clock recovery failed error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static u32 tc_srcctrl(struct tc_data *tc)
+{
+ /*
+ * No training pattern, skew lane 1 data by two LSCLK cycles with
+ * respect to lane 0 data, AutoCorrect Mode = 0
+ */
+ u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
+
+ if (tc->link.scrambler_dis)
+ reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
+ if (tc->link.coding8b10b)
+ /* Enable 8/10B Encoder (TxData[19:16] not used) */
+ reg |= DP0_SRCCTRL_EN810B;
+ if (tc->link.spread)
+ reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
+ if (tc->link.base.num_lanes == 2)
+ reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
+ if (tc->link.base.rate != 162000)
+ reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
+ return reg;
+}
+
+static void tc_wait_pll_lock(struct tc_data *tc)
+{
+ /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
+ usleep_range(3000, 6000);
+}
+
+static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
+{
+ int ret;
+ int i_pre, best_pre = 1;
+ int i_post, best_post = 1;
+ int div, best_div = 1;
+ int mul, best_mul = 1;
+ int delta, best_delta;
+ int ext_div[] = {1, 2, 3, 5, 7};
+ int best_pixelclock = 0;
+ int vco_hi = 0;
+
+ dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
+ refclk);
+ best_delta = pixelclock;
+ /* Loop over all possible ext_divs, skipping invalid configurations */
+ for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) {
+ /*
+ * refclk / ext_pre_div should be in the 1 to 200 MHz range.
+ * We don't allow any refclk > 200 MHz, only check lower bounds.
+ */
+ if (refclk / ext_div[i_pre] < 1000000)
+ continue;
+ for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
+ for (div = 1; div <= 16; div++) {
+ u32 clk;
+ u64 tmp;
+
+ tmp = pixelclock * ext_div[i_pre] *
+ ext_div[i_post] * div;
+ do_div(tmp, refclk);
+ mul = tmp;
+
+ /* Check limits */
+ if ((mul < 1) || (mul > 128))
+ continue;
+
+ clk = (refclk / ext_div[i_pre] / div) * mul;
+ /*
+ * refclk * mul / (ext_pre_div * pre_div)
+ * should be in the 150 to 650 MHz range
+ */
+ if ((clk > 650000000) || (clk < 150000000))
+ continue;
+
+ clk = clk / ext_div[i_post];
+ delta = clk - pixelclock;
+
+ if (abs(delta) < abs(best_delta)) {
+ best_pre = i_pre;
+ best_post = i_post;
+ best_div = div;
+ best_mul = mul;
+ best_delta = delta;
+ best_pixelclock = clk;
+ }
+ }
+ }
+ }
+ if (best_pixelclock == 0) {
+ dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n",
+ pixelclock);
+ return -EINVAL;
+ }
+
+ dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock,
+ best_delta);
+ dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk,
+ ext_div[best_pre], best_div, best_mul, ext_div[best_post]);
+
+ /* if VCO >= 300 MHz */
+ if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000)
+ vco_hi = 1;
+ /* see DS */
+ if (best_div == 16)
+ best_div = 0;
+ if (best_mul == 128)
+ best_mul = 0;
+
+ /* Power up PLL and switch to bypass */
+ tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
+
+ tc_write(PXL_PLLPARAM,
+ (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
+ (ext_div[best_pre] << 20) | /* External Pre-divider */
+ (ext_div[best_post] << 16) | /* External Post-divider */
+ IN_SEL_REFCLK | /* Use RefClk as PLL input */
+ (best_div << 8) | /* Divider for PLL RefClk */
+ (best_mul << 0)); /* Multiplier for PLL */
+
+ /* Force PLL parameter update and disable bypass */
+ tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+
+ tc_wait_pll_lock(tc);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_pxl_pll_dis(struct tc_data *tc)
+{
+ /* Enable PLL bypass, power down PLL */
+ return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP);
+}
+
+static int tc_stream_clock_calc(struct tc_data *tc)
+{
+ int ret;
+ /*
+ * If the Stream clock and Link Symbol clock are
+ * asynchronous with each other, the value of M changes over
+ * time. This way of generating link clock and stream
+ * clock is called Asynchronous Clock mode. The value M
+ * must change while the value N stays constant. The
+ * value of N in this Asynchronous Clock mode must be set
+ * to 2^15 or 32,768.
+ *
+ * LSCLK = 1/10 of high speed link clock
+ *
+ * f_STRMCLK = M/N * f_LSCLK
+ * M/N = f_STRMCLK / f_LSCLK
+ *
+ */
+ tc_write(DP0_VIDMNGEN1, 32768);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+ unsigned long rate;
+ u32 value;
+ int ret;
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
+ return -EINVAL;
+ }
+
+ /* Setup DP-PHY / PLL */
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+
+ tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+
+ /*
+ * Initially PLLs are in bypass. Force PLL parameter update,
+ * disable PLL bypass, enable PLL
+ */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
+ 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+ return ret;
+ } else if (ret)
+ goto err;
+
+ /* Setup AUX link */
+ tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
+ (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
+ (0x3f << 0)); /* Aux Response Timeout Timer */
+
+ return 0;
+err:
+ dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
+ return ret;
+}
+
+static int tc_get_display_props(struct tc_data *tc)
+{
+ int ret;
+ /* temp buffer */
+ u8 tmp[8];
+
+ /* Read DP Rx Link Capability */
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_read;
+ if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+ goto err_dpcd_inval;
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.coding8b10b = tmp[0] & BIT(0);
+ tc->link.scrambler_dis = 0;
+ /* read assr */
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+
+ dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
+ tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
+ (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.base.num_lanes,
+ (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
+ "enhanced" : "non-enhanced");
+ dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
+ dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
+ tc->link.assr, tc->assr);
+
+ return 0;
+
+err_dpcd_read:
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_inval:
+ dev_err(tc->dev, "invalid DPCD\n");
+ return -EINVAL;
+}
+
+static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+{
+ int ret;
+ int vid_sync_dly;
+ int max_tu_symbol;
+
+ int left_margin = mode->htotal - mode->hsync_end;
+ int right_margin = mode->hsync_start - mode->hdisplay;
+ int hsync_len = mode->hsync_end - mode->hsync_start;
+ int upper_margin = mode->vtotal - mode->vsync_end;
+ int lower_margin = mode->vsync_start - mode->vdisplay;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+
+ dev_dbg(tc->dev, "set mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+ left_margin, right_margin, hsync_len);
+ dev_dbg(tc->dev, "V margin %d,%d sync %d\n",
+ upper_margin, lower_margin, vsync_len);
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+ /* LCD Ctl Frame Size */
+ tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+ tc_write(HTIM01, (left_margin << 16) | /* H back porch */
+ (hsync_len << 0)); /* Hsync */
+ tc_write(HTIM02, (right_margin << 16) | /* H front porch */
+ (mode->hdisplay << 0)); /* width */
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
+ (vsync_len << 0)); /* Vsync */
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
+ (mode->vdisplay << 0)); /* height */
+ tc_write(VFUEN0, VFUEN); /* update settings */
+
+ /* Test pattern settings */
+ tc_write(TSTCTL,
+ (120 << 24) | /* Red Color component value */
+ (20 << 16) | /* Green Color component value */
+ (99 << 8) | /* Blue Color component value */
+ (1 << 4) | /* Enable I2C Filter */
+ (2 << 0) | /* Color bar Mode */
+ 0);
+
+ /* DP Main Stream Attributes */
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+ tc_write(DP0_VIDSYNCDELAY,
+ (0x003e << 16) | /* thresh_dly */
+ (vid_sync_dly << 0));
+
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+
+ tc_write(DP0_STARTVAL,
+ ((upper_margin + vsync_len) << 16) |
+ ((left_margin + hsync_len) << 0));
+
+ tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_link_training(struct tc_data *tc, int pattern)
+{
+ const char * const *errors;
+ u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT;
+ int timeout;
+ int retry;
+ u32 value;
+ int ret;
+
+ if (pattern == DP_TRAINING_PATTERN_1) {
+ srcctrl |= DP0_SRCCTRL_TP1;
+ errors = training_pattern1_errors;
+ } else {
+ srcctrl |= DP0_SRCCTRL_TP2;
+ errors = training_pattern2_errors;
+ }
+
+ /* Set DPCD 0x102 for Training Part 1 or 2 */
+ tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
+
+ tc_write(DP0_LTLOOPCTRL,
+ (0x0f << 28) | /* Defer Iteration Count */
+ (0x0f << 24) | /* Loop Iteration Count */
+ (0x0d << 0)); /* Loop Timer Delay */
+
+ retry = 5;
+ do {
+ /* Set DP0 Training Pattern */
+ tc_write(DP0_SRCCTRL, srcctrl);
+
+ /* Enable DP0 to start Link Training */
+ tc_write(DP0CTL, DP_EN);
+
+ /* wait */
+ timeout = 1000;
+ do {
+ tc_read(DP0_LTSTAT, &value);
+ udelay(1);
+ } while ((!(value & LT_LOOPDONE)) && (--timeout));
+ if (timeout == 0) {
+ dev_err(tc->dev, "Link training timeout!\n");
+ } else {
+ int pattern = (value >> 11) & 0x3;
+ int error = (value >> 8) & 0x7;
+
+ dev_dbg(tc->dev,
+ "Link training phase %d done after %d uS: %s\n",
+ pattern, 1000 - timeout, errors[error]);
+ if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
+ break;
+ if (pattern == DP_TRAINING_PATTERN_2) {
+ value &= LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS;
+ /* in case of two lanes */
+ if ((tc->link.base.num_lanes == 2) &&
+ (value == (LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ /* in case of one line */
+ if ((tc->link.base.num_lanes == 1) &&
+ (value == (LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ }
+ }
+ /* restart */
+ tc_write(DP0CTL, 0);
+ usleep_range(10, 20);
+ } while (--retry);
+ if (retry == 0) {
+ dev_err(tc->dev, "Failed to finish training phase %d\n",
+ pattern);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_main_link_setup(struct tc_data *tc)
+{
+ struct drm_dp_aux *aux = &tc->aux;
+ struct device *dev = tc->dev;
+ unsigned int rate;
+ u32 dp_phy_ctrl;
+ int timeout;
+ bool aligned;
+ bool ready;
+ u32 value;
+ int ret;
+ u8 tmp[8];
+
+ /* display mode should be set at this point */
+ if (!tc->mode)
+ return -EINVAL;
+
+ /* from excel file - DP0_SrcCtrl */
+ tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
+ DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
+ DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
+ /* from excel file - DP1_SrcCtrl */
+ tc_write(0x07a0, 0x00003083);
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ return -EINVAL;
+ }
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+ /* Setup Main Link */
+ dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ msleep(100);
+
+ /* PLL setup */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ /* PXL PLL setup */
+ if (tc_test_pattern) {
+ ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
+ 1000 * tc->mode->clock);
+ if (ret)
+ goto err;
+ }
+
+ /* Reset/Enable Main Links */
+ dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ usleep_range(100, 200);
+ dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+ timeout = 1000;
+ do {
+ tc_read(DP_PHY_CTRL, &value);
+ udelay(1);
+ } while ((!(value & PHY_RDY)) && (--timeout));
+
+ if (timeout == 0) {
+ dev_err(dev, "timeout waiting for phy become ready");
+ return -ETIMEDOUT;
+ }
+
+ /* Set misc: 8 bits per color */
+ ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
+ if (ret)
+ goto err;
+
+ /*
+ * ASSR mode
+ * on TC358767 side ASSR configured through strap pin
+ * seems there is no way to change this setting from SW
+ *
+ * check is tc configured for same mode
+ */
+ if (tc->assr != tc->link.assr) {
+ dev_dbg(dev, "Trying to set display to ASSR: %d\n",
+ tc->assr);
+ /* try to set ASSR on display side */
+ tmp[0] = tc->assr;
+ ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_read;
+ /* read back */
+ ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+
+ if (tmp[0] != tc->assr) {
+ dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+ tc->assr);
+ /* trying with disabled scrambler */
+ tc->link.scrambler_dis = 1;
+ }
+ }
+
+ /* Setup Link & DPRx Config for Training */
+ ret = drm_dp_link_configure(aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* DOWNSPREAD_CTRL */
+ tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
+ /* MAIN_LINK_CHANNEL_CODING_SET */
+ tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
+ ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto err;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto err;
+
+ /* Clear DPCD 0x102 */
+ /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
+ tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
+ ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* Clear Training Pattern, set AutoCorrect Mode = 1 */
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+
+ /* Wait */
+ timeout = 100;
+ do {
+ udelay(1);
+ /* Read DPCD 0x202-0x207 */
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+ DP_CHANNEL_EQ_BITS)); /* Lane0 */
+ aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+ } while ((--timeout) && !(ready && aligned));
+
+ if (timeout == 0) {
+ /* Read DPCD 0x200-0x201 */
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+ tmp[1]);
+ dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
+ dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
+ tmp[4]);
+ dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+ tmp[6]);
+
+ if (!ready)
+ dev_err(dev, "Lane0/1 not ready\n");
+ if (!aligned)
+ dev_err(dev, "Lane0/1 not aligned\n");
+ return -EAGAIN;
+ }
+
+ ret = tc_set_video_mode(tc, tc->mode);
+ if (ret)
+ goto err;
+
+ /* Set M/N */
+ ret = tc_stream_clock_calc(tc);
+ if (ret)
+ goto err;
+
+ return 0;
+err_dpcd_read:
+ dev_err(tc->dev, "Failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_write:
+ dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
+err:
+ return ret;
+}
+
+static int tc_main_link_stream(struct tc_data *tc, int state)
+{
+ int ret;
+ u32 value;
+
+ dev_dbg(tc->dev, "stream: %d\n", state);
+
+ if (state) {
+ value = VID_MN_GEN | DP_EN;
+ if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= EF_EN;
+ tc_write(DP0CTL, value);
+ /*
+ * VID_EN assertion should be delayed by at least N * LSCLK
+ * cycles from the time VID_MN_GEN is enabled in order to
+ * generate stable values for VID_M. LSCLK is 270 MHz or
+ * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
+ * so a delay of at least 203 us should suffice.
+ */
+ usleep_range(500, 1000);
+ value |= VID_EN;
+ tc_write(DP0CTL, value);
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DPI_RX;
+ tc_write(SYSCTRL, value);
+ } else {
+ tc_write(DP0CTL, 0);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static enum drm_connector_status
+tc_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_prepare(tc->panel);
+}
+
+static void tc_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ ret = tc_main_link_setup(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link setup error: %d\n", ret);
+ return;
+ }
+
+ ret = tc_main_link_stream(tc, 1);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link stream start error: %d\n", ret);
+ return;
+ }
+
+ drm_panel_enable(tc->panel);
+}
+
+static void tc_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ drm_panel_disable(tc->panel);
+
+ ret = tc_main_link_stream(tc, 0);
+ if (ret < 0)
+ dev_err(tc->dev, "main link stream stop error: %d\n", ret);
+}
+
+static void tc_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_unprepare(tc->panel);
+}
+
+static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ /* Fixup sync polarities, both hsync and vsync are active low */
+ adj->flags = mode->flags;
+ adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+
+ return true;
+}
+
+static int tc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Accept any mode */
+ return MODE_OK;
+}
+
+static void tc_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ tc->mode = mode;
+}
+
+static int tc_connector_get_modes(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+ struct edid *edid;
+ unsigned int count;
+
+ if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
+ count = tc->panel->funcs->get_modes(tc->panel);
+ if (count > 0)
+ return count;
+ }
+
+ edid = drm_get_edid(connector, &tc->aux.ddc);
+
+ kfree(tc->edid);
+ tc->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ return count;
+}
+
+static void tc_connector_set_polling(struct tc_data *tc,
+ struct drm_connector *connector)
+{
+ /* TODO: add support for HPD */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+}
+
+static struct drm_encoder *
+tc_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+
+ return tc->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
+ .get_modes = tc_connector_get_modes,
+ .mode_valid = tc_connector_mode_valid,
+ .best_encoder = tc_connector_best_encoder,
+};
+
+static void tc_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tc_connector_detect,
+ .destroy = tc_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tc_bridge_attach(struct drm_bridge *bridge)
+{
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ /* Create eDP connector */
+ drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
+ ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret)
+ return ret;
+
+ if (tc->panel)
+ drm_panel_attach(tc->panel, &tc->connector);
+
+ drm_display_info_set_bus_formats(&tc->connector.display_info,
+ &bus_format, 1);
+ drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+ .attach = tc_bridge_attach,
+ .mode_set = tc_bridge_mode_set,
+ .pre_enable = tc_bridge_pre_enable,
+ .enable = tc_bridge_enable,
+ .disable = tc_bridge_disable,
+ .post_disable = tc_bridge_post_disable,
+ .mode_fixup = tc_bridge_mode_fixup,
+};
+
+static bool tc_readable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != SYSCTRL;
+}
+
+static const struct regmap_range tc_volatile_ranges[] = {
+ regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
+ regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+ regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
+ regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
+ regmap_reg_range(VFUEN0, VFUEN0),
+};
+
+static const struct regmap_access_table tc_volatile_table = {
+ .yes_ranges = tc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
+};
+
+static bool tc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg != TC_IDREG) &&
+ (reg != DP0_LTSTAT) &&
+ (reg != DP0_SNKLTCHGREQ);
+}
+
+static const struct regmap_config tc_regmap_config = {
+ .name = "tc358767",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_DBG,
+ .cache_type = REGCACHE_RBTREE,
+ .readable_reg = tc_readable_reg,
+ .volatile_table = &tc_volatile_table,
+ .writeable_reg = tc_writeable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *ep;
+ struct tc_data *tc;
+ int ret;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->dev = dev;
+
+ /* port@2 is the output port */
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
+ of_node_put(ep);
+ tc->panel = of_drm_find_panel(remote);
+ if (tc->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ dev_dbg(dev, "waiting for panel %s\n",
+ remote->full_name);
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
+ }
+
+ /* Shut down GPIO is optional */
+ tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->sd_gpio))
+ return PTR_ERR(tc->sd_gpio);
+
+ if (tc->sd_gpio) {
+ gpiod_set_value_cansleep(tc->sd_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
+ /* Reset GPIO is optional */
+ tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(tc->reset_gpio))
+ return PTR_ERR(tc->reset_gpio);
+
+ if (tc->reset_gpio) {
+ gpiod_set_value_cansleep(tc->reset_gpio, 1);
+ usleep_range(5000, 10000);
+ }
+
+ tc->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(tc->refclk)) {
+ ret = PTR_ERR(tc->refclk);
+ dev_err(dev, "Failed to get refclk: %d\n", ret);
+ return ret;
+ }
+
+ tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
+ if (IS_ERR(tc->regmap)) {
+ ret = PTR_ERR(tc->regmap);
+ dev_err(dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
+ if (ret) {
+ dev_err(tc->dev, "can not read device ID: %d\n", ret);
+ return ret;
+ }
+
+ if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) {
+ dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev);
+ return -EINVAL;
+ }
+
+ tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+
+ ret = tc_aux_link_setup(tc);
+ if (ret)
+ return ret;
+
+ /* Register DP AUX channel */
+ tc->aux.name = "TC358767 AUX i2c adapter";
+ tc->aux.dev = tc->dev;
+ tc->aux.transfer = tc_aux_transfer;
+ ret = drm_dp_aux_register(&tc->aux);
+ if (ret)
+ return ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret)
+ goto err_unregister_aux;
+
+ tc_connector_set_polling(tc, &tc->connector);
+
+ tc->bridge.funcs = &tc_bridge_funcs;
+ tc->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&tc->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
+ goto err_unregister_aux;
+ }
+
+ i2c_set_clientdata(client, tc);
+
+ return 0;
+err_unregister_aux:
+ drm_dp_aux_unregister(&tc->aux);
+ return ret;
+}
+
+static int tc_remove(struct i2c_client *client)
+{
+ struct tc_data *tc = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&tc->bridge);
+ drm_dp_aux_unregister(&tc->aux);
+
+ tc_pxl_pll_dis(tc);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358767_i2c_ids[] = {
+ { "tc358767", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
+
+static const struct of_device_id tc358767_of_ids[] = {
+ { .compatible = "toshiba,tc358767", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358767_of_ids);
+
+static struct i2c_driver tc358767_driver = {
+ .driver = {
+ .name = "tc358767",
+ .of_match_table = tc358767_of_ids,
+ },
+ .id_table = tc358767_i2c_ids,
+ .probe = tc_probe,
+ .remove = tc_remove,
+};
+module_i2c_driver(tc358767_driver);
+
+MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>");
+MODULE_DESCRIPTION("tc358767 eDP encoder driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 9864559e5fb9..04b3c161dfae 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,11 +1,7 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da6f1..b05f7eae32ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = cirrus_gem_free_object,
+ .gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 32d32c5b7b17..76bcb43e7c06 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -17,8 +17,8 @@
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- if (cirrus_fb->obj)
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
goto out;
}
+ /*
+ * cirrus_modeset_init() is initializing/registering the emulated fbdev
+ * and DRM internals can access/test some of the fields in
+ * mode_config->funcs as part of the fbdev registration process.
+ * Make sure dev->mode_config.funcs is properly set to avoid
+ * dereferencing a NULL pointer.
+ * FIXME: mode_config.funcs assignment should probably be done in
+ * cirrus_modeset_init() (that's a common pattern seen in other DRM
+ * drivers).
+ */
+ dev->mode_config.funcs = &cirrus_mode_funcs;
r = cirrus_modeset_init(cdev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
goto out;
}
- dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
return 0;
out:
cirrus_driver_unload(dev);
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfcc57..17c915d9a03e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
int i;
- if (size != CIRRUS_LUT_SIZE)
- return;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ for (i = 0; i < size; i++) {
cirrus_crtc->lut_r[i] = red[i];
cirrus_crtc->lut_g[i] = green[i];
cirrus_crtc->lut_b[i] = blue[i];
}
cirrus_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 6768b7b1af32..1cc9ee607128 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int cirrus_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
.evict_flags = cirrus_bo_evict_flags,
- .move = cirrus_bo_move,
+ .move = NULL,
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9bb99e274d23..fa3930757972 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,20 @@
#include "drm_crtc_internal.h"
+static void crtc_commit_free(struct kref *kref)
+{
+ struct drm_crtc_commit *commit =
+ container_of(kref, struct drm_crtc_commit, ref);
+
+ kfree(commit);
+}
+
+void drm_crtc_commit_put(struct drm_crtc_commit *commit)
+{
+ kref_put(&commit->ref, crtc_commit_free);
+}
+EXPORT_SYMBOL(drm_crtc_commit_put);
+
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
@@ -44,11 +58,8 @@
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
kfree(state->connectors);
- kfree(state->connector_states);
kfree(state->crtcs);
- kfree(state->crtc_states);
kfree(state->planes);
- kfree(state->plane_states);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
goto fail;
- state->crtc_states = kcalloc(dev->mode_config.num_crtc,
- sizeof(*state->crtc_states), GFP_KERNEL);
- if (!state->crtc_states)
- goto fail;
state->planes = kcalloc(dev->mode_config.num_total_plane,
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
- state->plane_states = kcalloc(dev->mode_config.num_total_plane,
- sizeof(*state->plane_states), GFP_KERNEL);
- if (!state->plane_states)
- goto fail;
state->dev = dev;
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
+ struct drm_connector *connector = state->connectors[i].ptr;
if (!connector)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connector_states[i]);
- state->connectors[i] = NULL;
- state->connector_states[i] = NULL;
+ state->connectors[i].state);
+ state->connectors[i].ptr = NULL;
+ state->connectors[i].state = NULL;
drm_connector_unreference(connector);
}
for (i = 0; i < config->num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc *crtc = state->crtcs[i].ptr;
if (!crtc)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtc_states[i]);
- state->crtcs[i] = NULL;
- state->crtc_states[i] = NULL;
+ state->crtcs[i].state);
+
+ if (state->crtcs[i].commit) {
+ kfree(state->crtcs[i].commit->event);
+ state->crtcs[i].commit->event = NULL;
+ drm_crtc_commit_put(state->crtcs[i].commit);
+ }
+
+ state->crtcs[i].commit = NULL;
+ state->crtcs[i].ptr = NULL;
+ state->crtcs[i].state = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
+ struct drm_plane *plane = state->planes[i].ptr;
if (!plane)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->plane_states[i]);
- state->planes[i] = NULL;
- state->plane_states[i] = NULL;
+ state->planes[i].state);
+ state->planes[i].ptr = NULL;
+ state->planes[i].state = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtc_states[index] = crtc_state;
- state->crtcs[index] = crtc;
+ state->crtcs[index].state = crtc_state;
+ state->crtcs[index].ptr = crtc;
crtc_state->state = state;
DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -393,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
if (old_blob == new_blob)
return;
- if (old_blob)
- drm_property_unreference_blob(old_blob);
+ drm_property_unreference_blob(old_blob);
if (new_blob)
drm_property_reference_blob(new_blob);
*blob = new_blob;
@@ -632,8 +642,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->plane_states[index] = plane_state;
- state->planes[index] = plane;
+ state->planes[index].state = plane_state;
+ state->planes[index].ptr = plane;
plane_state->state = state;
DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -701,6 +711,8 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_h = val;
} else if (property == config->rotation_property) {
state->rotation = val;
+ } else if (property == plane->zpos_property) {
+ state->zpos = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -757,6 +769,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_h;
} else if (property == config->rotation_property) {
*val = state->rotation;
+ } else if (property == plane->zpos_property) {
+ *val = state->zpos;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -897,8 +911,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
index = drm_connector_index(connector);
if (index >= state->num_connector) {
- struct drm_connector **c;
- struct drm_connector_state **cs;
+ struct __drm_connnectors_state *c;
int alloc = max(index + 1, config->num_connector);
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -909,26 +922,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
- cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
- if (!cs)
- return ERR_PTR(-ENOMEM);
-
- state->connector_states = cs;
- memset(&state->connector_states[state->num_connector], 0,
- sizeof(*state->connector_states) * (alloc - state->num_connector));
state->num_connector = alloc;
}
- if (state->connector_states[index])
- return state->connector_states[index];
+ if (state->connectors[index].state)
+ return state->connectors[index].state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_reference(connector);
- state->connector_states[index] = connector_state;
- state->connectors[index] = connector;
+ state->connectors[index].state = connector_state;
+ state->connectors[index].ptr = connector;
connector_state->state = state;
DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1457,7 +1463,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+ struct drm_device *dev, struct drm_file *file_priv,
+ struct fence *fence, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
int ret;
@@ -1470,12 +1477,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
+ if (file_priv) {
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ return NULL;
+ }
}
+ e->base.fence = fence;
+
return e;
}
@@ -1715,7 +1727,8 @@ retry:
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *e;
- e = create_vblank_event(dev, file_priv, arg->user_data);
+ e = create_vblank_event(dev, file_priv, NULL,
+ arg->user_data);
if (!e) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120e39..20be86d89a20 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,8 @@
#include <drm/drm_atomic_helper.h>
#include <linux/fence.h>
+#include "drm_crtc_internal.h"
+
/**
* DOC: overview
*
@@ -110,8 +112,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, conn_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +302,10 @@ update_connector_routing(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
connector_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +420,9 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
+ if (!crtc_state->enable)
+ continue;
+
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;
@@ -458,7 +467,7 @@ mode_fixup(struct drm_atomic_state *state)
* times for the same update, e.g. when the ->atomic_check functions depend upon
* the adjusted dotclock for fifo space allocation and watermark computation.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int
@@ -572,7 +581,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* It also sets crtc_state->planes_changed to indicate that a crtc has
* updated planes.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int
@@ -585,6 +594,10 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
+ ret = drm_atomic_helper_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -611,7 +624,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
- ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+ ret = funcs->atomic_check(crtc, crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
@@ -640,7 +653,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* ->atomic_check functions depend upon an updated adjusted_mode.clock to
* e.g. properly compute watermarks.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1126,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblocking: whether nonblocking behavior is requested.
+ * drm_atomic_helper_commit_tail - commit atomic update to hardware
+ * @state: new modeset state to be committed
*
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement nonblocking commits.
+ * This is the default implemenation for the ->atomic_commit_tail() hook of the
+ * &drm_mode_config_helper_funcs vtable.
*
- * Note that right now this function does not support nonblocking commits, hence
- * driver writers must implement their own version for now. Also note that the
- * default ordering of how the various stages are called is to match the legacy
- * modeset helper library closest. One peculiarity of that is that it doesn't
- * mesh well with runtime PM at all.
+ * Note that the default ordering of how the various stages are called is to
+ * match the legacy modeset helper library closest. One peculiarity of that is
+ * that it doesn't mesh well with runtime PM at all.
*
- * For drivers supporting runtime PM the recommended sequence is
+ * For drivers supporting runtime PM the recommended sequence is instead ::
*
* drm_atomic_helper_commit_modeset_disables(dev, state);
*
@@ -1136,9 +1144,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* drm_atomic_helper_commit_planes(dev, state, true);
*
- * See the kerneldoc entries for these three functions for more details.
+ * for committing the atomic update to hardware. See the kerneldoc entries for
+ * these three functions for more details.
+ */
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, false);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
+
+static void commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config_helper_funcs *funcs;
+
+ funcs = dev->mode_config.helper_private;
+
+ drm_atomic_helper_wait_for_fences(dev, state);
+
+ drm_atomic_helper_wait_for_dependencies(state);
+
+ if (funcs && funcs->atomic_commit_tail)
+ funcs->atomic_commit_tail(state);
+ else
+ drm_atomic_helper_commit_tail(state);
+
+ drm_atomic_helper_commit_cleanup_done(state);
+
+ drm_atomic_state_free(state);
+}
+
+static void commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ commit_tail(state);
+}
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. This
+ * function implements nonblocking commits, using
+ * drm_atomic_helper_setup_commit() and related functions.
+ *
+ * Note that right now this function does not support nonblocking commits, hence
+ * driver writers must implement their own version for now.
+ *
+ * Committing the actual hardware state is done through the
+ * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
+ * or it's default implementation drm_atomic_helper_commit_tail().
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno.
*/
int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1221,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
{
int ret;
- if (nonblock)
- return -EBUSY;
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&state->commit_work, commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
@@ -1160,7 +1237,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1253,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
+ *
+ * NOTE: Commit work has multiple phases, first hardware commit, then
+ * cleanup. We want them to overlap, hence need system_unbound_wq to
+ * make sure work items don't artifically stall on each another.
*/
- drm_atomic_helper_wait_for_fences(dev, state);
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_planes(dev, state, false);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ commit_tail(state);
return 0;
}
@@ -1199,12 +1271,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
* DOC: implementing nonblocking commit
*
- * For now the atomic helpers don't support nonblocking commit directly. If
- * there is real need it could be added though, using the dma-buf fence
- * infrastructure for generic synchronization with outstanding rendering.
- *
- * For now drivers have to implement nonblocking commit themselves, with the
- * following sequence being the recommended one:
+ * Nonblocking atomic commits have to be implemented in the following sequence:
*
* 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
* which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1283,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* cancelled updates. Note that it is important to ensure that the framebuffer
* cleanup is still done when cancelling.
*
- * For sufficient parallelism it is recommended to have a work item per crtc
- * (for updates which don't touch global state) and a global one. Then we only
- * need to synchronize with the crtc work items for changed crtcs and the global
- * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ * Asynchronous workers need to have sufficient parallelism to be able to run
+ * different atomic commits on different CRTCs in parallel. The simplest way to
+ * achive this is by running them on the &system_unbound_wq work queue. Note
+ * that drivers are not required to split up atomic commits and run an
+ * individual commit in parallel - userspace is supposed to do that if it cares.
+ * But it might be beneficial to do that for modesets, since those necessarily
+ * must be done as one global operation, and enabling or disabling a CRTC can
+ * take a long time. But even that is not required.
*
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,8 +1303,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
* being displayed.
+ *
+ * The above scheme is implemented in the atomic helper libraries in
+ * drm_atomic_helper_commit() using a bunch of helper functions. See
+ * drm_atomic_helper_setup_commit() for a starting point.
*/
+static int stall_checks(struct drm_crtc *crtc, bool nonblock)
+{
+ struct drm_crtc_commit *commit, *stall_commit = NULL;
+ bool completed = true;
+ int i;
+ long ret = 0;
+
+ spin_lock(&crtc->commit_lock);
+ i = 0;
+ list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+ if (i == 0) {
+ completed = try_wait_for_completion(&commit->flip_done);
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (!completed && nonblock) {
+ spin_unlock(&crtc->commit_lock);
+ return -EBUSY;
+ }
+ } else if (i == 1) {
+ stall_commit = commit;
+ drm_crtc_commit_get(stall_commit);
+ break;
+ }
+
+ i++;
+ }
+ spin_unlock(&crtc->commit_lock);
+
+ if (!stall_commit)
+ return 0;
+
+ /* We don't want to let commits get ahead of cleanup work too much,
+ * stalling on 2nd previous commit means triple-buffer won't ever stall.
+ */
+ ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(stall_commit);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
+ * @state: new modeset state to be committed
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function prepares @state to be used by the atomic helper's support for
+ * nonblocking commits. Drivers using the nonblocking commit infrastructure
+ * should always call this function from their ->atomic_commit hook.
+ *
+ * To be able to use this support drivers need to use a few more helper
+ * functions. drm_atomic_helper_wait_for_dependencies() must be called before
+ * actually committing the hardware state, and for nonblocking commits this call
+ * must be placed in the async worker. See also drm_atomic_helper_swap_state()
+ * and it's stall parameter, for when a driver's commit hooks look at the
+ * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
+ *
+ * Completion of the hardware commit step must be signalled using
+ * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
+ * to read or change any permanent software or hardware modeset state. The only
+ * exception is state protected by other means than &drm_modeset_lock locks.
+ * Only the free standing @state with pointers to the old state structures can
+ * be inspected, e.g. to clean up old buffers using
+ * drm_atomic_helper_cleanup_planes().
+ *
+ * At the very end, before cleaning up @state drivers must call
+ * drm_atomic_helper_commit_cleanup_done().
+ *
+ * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
+ * complete and esay-to-use default implementation of the atomic_commit() hook.
+ *
+ * The tracking of asynchronously executed and still pending commits is done
+ * using the core structure &drm_crtc_commit.
+ *
+ * By default there's no need to clean up resources allocated by this function
+ * explicitly: drm_atomic_state_default_clear() will take care of that
+ * automatically.
+ *
+ * Returns:
+ *
+ * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
+ * -ENOMEM on allocation failures and -EINTR when a signal is pending.
+ */
+int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i, ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit)
+ return -ENOMEM;
+
+ init_completion(&commit->flip_done);
+ init_completion(&commit->hw_done);
+ init_completion(&commit->cleanup_done);
+ INIT_LIST_HEAD(&commit->commit_entry);
+ kref_init(&commit->ref);
+ commit->crtc = crtc;
+
+ state->crtcs[i].commit = commit;
+
+ ret = stall_checks(crtc, nonblock);
+ if (ret)
+ return ret;
+
+ /* Drivers only send out events when at least either current or
+ * new CRTC state is active. Complete right away if everything
+ * stays off. */
+ if (!crtc->state->active && !crtc_state->active) {
+ complete_all(&commit->flip_done);
+ continue;
+ }
+
+ /* Legacy cursor updates are fully unsynced. */
+ if (state->legacy_cursor_update) {
+ complete_all(&commit->flip_done);
+ continue;
+ }
+
+ if (!crtc_state->event) {
+ commit->event = kzalloc(sizeof(*commit->event),
+ GFP_KERNEL);
+ if (!commit->event)
+ return -ENOMEM;
+
+ crtc_state->event = commit->event;
+ }
+
+ crtc_state->event->base.completion = &commit->flip_done;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
+
+
+static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_commit *commit;
+ int i = 0;
+
+ list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+ /* skip the first entry, that's the current commit */
+ if (i == 1)
+ return commit;
+ i++;
+ }
+
+ return NULL;
+}
+
+/**
+ * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
+ * @state: new modeset state to be committed
+ *
+ * This function waits for all preceeding commits that touch the same CRTC as
+ * @state to both be committed to the hardware (as signalled by
+ * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * by calling drm_crtc_vblank_send_event on the event member of
+ * &drm_crtc_state).
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+ long ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ spin_lock(&crtc->commit_lock);
+ commit = preceeding_commit(crtc);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
+
+/**
+ * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
+ * @state: new modeset state to be committed
+ *
+ * This function is used to signal completion of the hardware commit step. After
+ * this step the driver is not allowed to read or change any permanent software
+ * or hardware modeset state. The only exception is state protected by other
+ * means than &drm_modeset_lock locks.
+ *
+ * Drivers should try to postpone any expensive or delayed cleanup work after
+ * this function is called.
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = state->crtcs[i].commit;
+ if (!commit)
+ continue;
+
+ /* backend must have consumed any event by now */
+ WARN_ON(crtc->state->event);
+ spin_lock(&crtc->commit_lock);
+ complete_all(&commit->hw_done);
+ spin_unlock(&crtc->commit_lock);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
+
+/**
+ * drm_atomic_helper_commit_cleanup_done - signal completion of commit
+ * @state: new modeset state to be committed
+ *
+ * This signals completion of the atomic update @state, including any cleanup
+ * work. If used, it must be called right before calling
+ * drm_atomic_state_free().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+ long ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = state->crtcs[i].commit;
+ if (WARN_ON(!commit))
+ continue;
+
+ spin_lock(&crtc->commit_lock);
+ complete_all(&commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&commit->hw_done));
+
+ /* commit_list borrows our reference, need to remove before we
+ * clean up our drm_atomic_state. But only after it actually
+ * completed, otherwise subsequent commits won't stall properly. */
+ if (try_wait_for_completion(&commit->flip_done))
+ goto del_commit;
+
+ spin_unlock(&crtc->commit_lock);
+
+ /* We must wait for the vblank event to signal our completion
+ * before releasing our reference, since the vblank work does
+ * not hold a reference of its own. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ spin_lock(&crtc->commit_lock);
+del_commit:
+ list_del(&commit->commit_entry);
+ spin_unlock(&crtc->commit_lock);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
+
/**
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
@@ -1249,16 +1622,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ret, i;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int ret, i, j;
- for (i = 0; i < nplanes; i++) {
+ for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
funcs = plane->helper_private;
@@ -1272,12 +1641,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
return 0;
fail:
- for (i--; i >= 0; i--) {
+ for_each_plane_in_state(state, plane, plane_state, j) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
- if (!plane)
+ if (j >= i)
continue;
funcs = plane->helper_private;
@@ -1537,8 +1904,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
- * @dev: DRM device
* @state: atomic state
+ * @stall: stall for proceeding commits
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1926,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
*
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
* contains the old state. Also do any other cleanup required with that state.
+ *
+ * @stall must be set when nonblocking commits for this driver directly access
+ * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
+ * current atomic helpers this is almost always the case, since the helpers
+ * don't pass the right state structures to the callbacks.
*/
-void drm_atomic_helper_swap_state(struct drm_device *dev,
- struct drm_atomic_state *state)
+void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+ bool stall)
{
int i;
+ long ret;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_commit *commit;
+
+ if (stall) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
-
- if (!connector)
- continue;
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+ crtc->base.id, crtc->name);
+ drm_crtc_commit_put(commit);
+ }
+ }
+ for_each_connector_in_state(state, connector, conn_state, i) {
connector->state->state = state;
- swap(state->connector_states[i], connector->state);
+ swap(state->connectors[i].state, connector->state);
connector->state->state = NULL;
}
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
crtc->state->state = state;
- swap(state->crtc_states[i], crtc->state);
+ swap(state->crtcs[i].state, crtc->state);
crtc->state->state = NULL;
- }
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
+ if (state->crtcs[i].commit) {
+ spin_lock(&crtc->commit_lock);
+ list_add(&state->crtcs[i].commit->commit_entry,
+ &crtc->commit_list);
+ spin_unlock(&crtc->commit_lock);
- if (!plane)
- continue;
+ state->crtcs[i].commit->event = NULL;
+ }
+ }
+ for_each_plane_in_state(state, plane, plane_state, i) {
plane->state->state = state;
- swap(state->plane_states[i], plane->state);
+ swap(state->planes[i].state, plane->state);
plane->state->state = NULL;
}
}
@@ -2409,7 +2804,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
- * updates it.
+ * updates it.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2566,6 +2961,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->planes_changed = false;
state->connectors_changed = false;
state->color_mgmt_changed = false;
+ state->zpos_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2930,16 +3326,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
* @red: red correction table
* @green: green correction table
* @blue: green correction table
- * @start:
* @size: size of the tables
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties.
*/
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size)
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3346,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
- return;
+ return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3397,7 @@ retry:
drm_property_unreference_blob(blob);
- return;
+ return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
@@ -3010,7 +3405,7 @@ fail:
drm_atomic_state_free(state);
drm_property_unreference_blob(blob);
- return;
+ return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 50d0baa06db0..4153e8a193af 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -30,25 +30,36 @@
#include <drm/drmP.h>
#include "drm_internal.h"
+#include "drm_legacy.h"
/**
- * drm_getmagic - Get unique magic of a client
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
+ * DOC: master and authentication
*
- * This looks up the unique magic of the passed client and returns it. If the
- * client did not have a magic assigned, yet, a new one is registered. The magic
- * is stored in the passed drm_auth object.
+ * struct &drm_master is used to track groups of clients with open
+ * primary/legacy device nodes. For every struct &drm_file which has had at
+ * least once successfully became the device master (either through the
+ * SET_MASTER IOCTL, or implicitly through opening the primary device node when
+ * no one else is the current master that time) there exists one &drm_master.
+ * This is noted in the is_master member of &drm_file. All other clients have
+ * just a pointer to the &drm_master they are associated with.
*
- * Returns: 0 on success, negative error code on failure.
+ * In addition only one &drm_master can be the current master for a &drm_device.
+ * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
+ * implicitly through closing/openeing the primary device node. See also
+ * drm_is_current_master().
+ *
+ * Clients can authenticate against the current master (if it matches their own)
+ * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
+ * this allows controlled access to the device for an entire group of mutually
+ * trusted clients.
*/
+
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->master_mutex);
DRM_DEBUG("%u\n", auth->magic);
return ret < 0 ? ret : 0;
}
-/**
- * drm_authmagic - Authenticate client with a magic
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
- *
- * This looks up a DRM client by the passed magic and authenticates it.
- *
- * Returns: 0 on success, negative error code on failure.
- */
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data,
DRM_DEBUG("%u\n", auth->magic);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
+
+static struct drm_master *drm_master_create(struct drm_device *dev)
+{
+ struct drm_master *master;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ idr_init(&master->magic_map);
+ master->dev = dev;
+
+ return master;
+}
+
+static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
+ bool new_master)
+{
+ int ret = 0;
+
+ dev->master = drm_master_get(fpriv->master);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, fpriv, new_master);
+ if (unlikely(ret != 0)) {
+ drm_master_put(&dev->master);
+ }
+ }
+
+ return ret;
+}
+
+static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *old_master;
+ int ret;
+
+ lockdep_assert_held_once(&dev->master_mutex);
+
+ old_master = fpriv->master;
+ fpriv->master = drm_master_create(dev);
+ if (!fpriv->master) {
+ fpriv->master = old_master;
+ return -ENOMEM;
+ }
+
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, fpriv->master);
+ if (ret)
+ goto out_err;
+ }
+ fpriv->is_master = 1;
+ fpriv->authenticated = 1;
+
+ ret = drm_set_master(dev, fpriv, true);
+ if (ret)
+ goto out_err;
+
+ if (old_master)
+ drm_master_put(&old_master);
+
+ return 0;
+
+out_err:
+ /* drop references and restore old master on failure */
+ drm_master_put(&fpriv->master);
+ fpriv->master = old_master;
+
+ return ret;
+}
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->master_mutex);
+ if (drm_is_current_master(file_priv))
+ goto out_unlock;
+
+ if (dev->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->is_master) {
+ ret = drm_new_set_master(dev, file_priv);
+ goto out_unlock;
+ }
+
+ ret = drm_set_master(dev, file_priv, false);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+static void drm_drop_master(struct drm_device *dev,
+ struct drm_file *fpriv)
+{
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, fpriv);
+ drm_master_put(&dev->master);
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&dev->master_mutex);
+ if (!drm_is_current_master(file_priv))
+ goto out_unlock;
+
+ if (!dev->master)
+ goto out_unlock;
+
+ ret = 0;
+ drm_drop_master(dev, file_priv);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+int drm_master_open(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ int ret = 0;
+
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
+ mutex_lock(&dev->master_mutex);
+ if (!dev->master)
+ ret = drm_new_set_master(dev, file_priv);
+ else
+ file_priv->master = drm_master_get(dev->master);
+ mutex_unlock(&dev->master_mutex);
+
+ return ret;
+}
+
+void drm_master_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_master *master = file_priv->master;
+
+ mutex_lock(&dev->master_mutex);
+ if (file_priv->magic)
+ idr_remove(&file_priv->master->magic_map, file_priv->magic);
+
+ if (!drm_is_current_master(file_priv))
+ goto out;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */
+ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (dev->master == file_priv->master)
+ drm_drop_master(dev, file_priv);
+out:
+ /* drop the master reference held by the file priv */
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
+ mutex_unlock(&dev->master_mutex);
+}
+
+/**
+ * drm_is_current_master - checks whether @priv is the current master
+ * @fpriv: DRM file private
+ *
+ * Checks whether @fpriv is current master on its device. This decides whether a
+ * client is allowed to run DRM_MASTER IOCTLs.
+ *
+ * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
+ * - the current master is assumed to own the non-shareable display hardware.
+ */
+bool drm_is_current_master(struct drm_file *fpriv)
+{
+ return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
+}
+EXPORT_SYMBOL(drm_is_current_master);
+
+/**
+ * drm_master_get - reference a master pointer
+ * @master: struct &drm_master
+ *
+ * Increments the reference count of @master and returns a pointer to @master.
+ */
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_device *dev = master->dev;
+
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ drm_legacy_master_rmmaps(dev, master);
+
+ idr_destroy(&master->magic_map);
+ kfree(master->unique);
+ kfree(master);
+}
+
+/**
+ * drm_master_put - unreference and clear a master pointer
+ * @master: pointer to a pointer of struct &drm_master
+ *
+ * This decrements the &drm_master behind @master and sets it to NULL.
+ */
+void drm_master_put(struct drm_master **master)
+{
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
new file mode 100644
index 000000000000..f3c0942bd756
--- /dev/null
+++ b/drivers/gpu/drm/drm_blend.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2016 Samsung Electronics Co.Ltd
+ * Authors:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * DRM core plane blending related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "drm_internal.h"
+
+/**
+ * drm_plane_create_zpos_property - create mutable zpos property
+ * @plane: drm plane
+ * @zpos: initial value of zpos property
+ * @min: minimal possible value of zpos property
+ * @max: maximal possible value of zpos property
+ *
+ * This function initializes generic mutable zpos property and enables support
+ * for it in drm core. Drivers can then attach this property to planes to enable
+ * support for configurable planes arrangement during blending operation.
+ * Once mutable zpos property has been enabled, the DRM core will automatically
+ * calculate drm_plane_state->normalized_zpos values. Usually min should be set
+ * to 0 and max to maximal number of planes for given crtc - 1.
+ *
+ * If zpos of some planes cannot be changed (like fixed background or
+ * cursor/topmost planes), driver should adjust min/max values and assign those
+ * planes immutable zpos property with lower or higher values (for more
+ * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * case driver should also assign proper initial zpos values for all planes in
+ * its plane_reset() callback, so the planes will be always sorted properly.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+ unsigned int zpos,
+ unsigned int min, unsigned int max)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create_range(plane->dev, 0, "zpos", min, max);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, zpos);
+
+ plane->zpos_property = prop;
+
+ if (plane->state) {
+ plane->state->zpos = zpos;
+ plane->state->normalized_zpos = zpos;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_zpos_property);
+
+/**
+ * drm_plane_create_zpos_immutable_property - create immuttable zpos property
+ * @plane: drm plane
+ * @zpos: value of zpos property
+ *
+ * This function initializes generic immutable zpos property and enables
+ * support for it in drm core. Using this property driver lets userspace
+ * to get the arrangement of the planes for blending operation and notifies
+ * it that the hardware (or driver) doesn't support changing of the planes'
+ * order.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+ unsigned int zpos)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create_range(plane->dev, DRM_MODE_PROP_IMMUTABLE,
+ "zpos", zpos, zpos);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, zpos);
+
+ plane->zpos_property = prop;
+
+ if (plane->state) {
+ plane->state->zpos = zpos;
+ plane->state->normalized_zpos = zpos;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_zpos_immutable_property);
+
+static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ if (sa->zpos != sb->zpos)
+ return sa->zpos - sb->zpos;
+ else
+ return sa->plane->base.id - sb->plane->base.id;
+}
+
+/**
+ * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
+ * @crtc: crtc with planes, which have to be considered for normalization
+ * @crtc_state: new atomic state to apply
+ *
+ * This function checks new states of all planes assigned to given crtc and
+ * calculates normalized zpos value for them. Planes are compared first by their
+ * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
+ * is at the bottom. The plane_state->normalized_zpos is then filled with unique
+ * values from 0 to number of active planes in crtc minus one.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_device *dev = crtc->dev;
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+ struct drm_plane *plane;
+ int i, n = 0;
+ int ret = 0;
+
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY);
+ if (!states)
+ return -ENOMEM;
+
+ /*
+ * Normalization process might create new states for planes which
+ * normalized_zpos has to be recalculated.
+ */
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+ states[n++] = plane_state;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n",
+ plane->base.id, plane->name,
+ plane_state->zpos);
+ }
+
+ sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
+
+ for (i = 0; i < n; i++) {
+ plane = states[i]->plane;
+
+ states[i]->normalized_zpos = i;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n",
+ plane->base.id, plane->name, i);
+ }
+ crtc_state->zpos_changed = true;
+
+done:
+ kfree(states);
+ return ret;
+}
+
+/**
+ * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
+ * crtcs
+ * @dev: DRM device
+ * @state: atomic state of DRM device
+ *
+ * This function calculates normalized zpos value for all modified planes in
+ * the provided atomic state of DRM device. For more information, see
+ * drm_atomic_helper_crtc_normalize_zpos() function.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i, ret = 0;
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ crtc = plane_state->crtc;
+ if (!crtc)
+ continue;
+ if (plane->state->zpos != plane_state->zpos) {
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state->zpos_changed = true;
+ }
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->plane_mask != crtc->state->plane_mask ||
+ crtc_state->zpos_changed) {
+ ret = drm_atomic_helper_crtc_normalize_zpos(crtc,
+ crtc_state);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404abd0..255543086590 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge:
+ * either connected to it directly, or through an intermediate bridge::
*
* encoder ---> bridge B ---> bridge A
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 9b34158c0f77..c3a12cd8bd0d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
*/
if (!entry->map ||
map->type != entry->map->type ||
- entry->master != dev->primary->master)
+ entry->master != dev->master)
continue;
switch (map->type) {
case _DRM_SHM:
@@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->primary->master->lock.hw_lock != NULL) {
+ if (dev->master->lock.hw_lock != NULL) {
vfree(map->handle);
kfree(map);
return -EBUSY;
}
- dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
+ dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
@@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
mutex_unlock(&dev->struct_mutex);
if (!(map->flags & _DRM_DRIVER))
- list->master = dev->primary->master;
+ list->master = dev->master;
*maplist = list;
return 0;
}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 059f7c39c582..a7916e5f8864 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
mb();
for (; addr < end; addr += size)
clflushopt(addr);
+ clflushopt(end - 1); /* force serialisation */
mb();
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 0e3cc66aa8b7..b1dbb60af99f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -39,6 +39,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
}
EXPORT_SYMBOL(drm_get_subpixel_order_name);
-static char printable_char(int c)
-{
- return isascii(c) && isprint(c) ? c : '?';
-}
-
-/**
- * drm_get_format_name - return a string for drm fourcc format
- * @format: format to compute name of
- *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
- */
-const char *drm_get_format_name(uint32_t format)
-{
- static char buf[32];
-
- snprintf(buf, sizeof(buf),
- "%c%c%c%c %s-endian (0x%08x)",
- printable_char(format & 0xff),
- printable_char((format >> 8) & 0xff),
- printable_char((format >> 16) & 0xff),
- printable_char((format >> 24) & 0x7f),
- format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
- format);
-
- return buf;
-}
-EXPORT_SYMBOL(drm_get_format_name);
-
/*
* Internal function to assign a slot in the object idr and optionally
* register the object into the idr.
@@ -426,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
}
EXPORT_SYMBOL(drm_mode_object_reference);
+/**
+ * drm_crtc_force_disable - Forcibly turn off a CRTC
+ * @crtc: CRTC to turn off
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable(struct drm_crtc *crtc)
+{
+ struct drm_mode_set set = {
+ .crtc = crtc,
+ };
+
+ return drm_mode_set_config_internal(&set);
+}
+EXPORT_SYMBOL(drm_crtc_force_disable);
+
+/**
+ * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
+ * @dev: DRM device whose CRTCs to turn off
+ *
+ * Drivers may want to call this on unload to ensure that all displays are
+ * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->enabled) {
+ ret = drm_crtc_force_disable(crtc);
+ if (ret)
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_force_disable_all);
+
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
@@ -535,7 +550,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
*
* Cleanup framebuffer. This function is intended to be used from the drivers
* ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
+ * framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -574,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_mode_set set;
- int ret;
if (!fb)
return;
@@ -605,11 +618,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = drm_mode_set_config_internal(&set);
- if (ret)
+ if (drm_crtc_force_disable(crtc))
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
@@ -639,6 +648,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
+static int drm_crtc_register_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->funcs->late_register)
+ ret = crtc->funcs->late_register(crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_crtc_unregister_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->funcs->early_unregister)
+ crtc->funcs->early_unregister(crtc);
+ }
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -669,6 +703,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->dev = dev;
crtc->funcs = funcs;
+ INIT_LIST_HEAD(&crtc->commit_list);
+ spin_lock_init(&crtc->commit_lock);
+
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -692,7 +729,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
- config->num_crtc++;
+ crtc->index = config->num_crtc++;
crtc->primary = primary;
crtc->cursor = cursor;
@@ -722,6 +759,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ /* Note that the crtc_list is considered to be static; should we
+ * remove the drm_crtc at runtime we would have to decrement all
+ * the indices on the drm_crtc after us in the crtc_list.
+ */
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -741,29 +783,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-/**
- * drm_crtc_index - find the index of a registered CRTC
- * @crtc: CRTC to find index for
- *
- * Given a registered CRTC, return the index of that CRTC within a DRM
- * device's list of CRTCs.
- */
-unsigned int drm_crtc_index(struct drm_crtc *crtc)
-{
- unsigned int index = 0;
- struct drm_crtc *tmp;
-
- drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp == crtc)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_crtc_index);
-
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
@@ -909,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
connector->dev = dev;
connector->funcs = funcs;
- connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
- if (connector->connector_id < 0) {
- ret = connector->connector_id;
+ ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out_put;
- }
+ connector->index = ret;
+ ret = 0;
connector->connector_type = connector_type;
connector->connector_type_id =
@@ -961,7 +980,7 @@ out_put_type_id:
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
- ida_remove(&config->connector_ida, connector->connector_id);
+ ida_remove(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
@@ -984,6 +1003,12 @@ void drm_connector_cleanup(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
+ /* The connector should have been removed from userspace long before
+ * it is finally destroyed.
+ */
+ if (WARN_ON(connector->registered))
+ drm_connector_unregister(connector);
+
if (connector->tile_group) {
drm_mode_put_tile_group(dev, connector->tile_group);
connector->tile_group = NULL;
@@ -999,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_type_id);
ida_remove(&dev->mode_config.connector_ida,
- connector->connector_id);
+ connector->index);
kfree(connector->display_info.bus_formats);
drm_mode_object_unregister(dev, &connector->base);
@@ -1030,19 +1055,34 @@ int drm_connector_register(struct drm_connector *connector)
{
int ret;
+ if (connector->registered)
+ return 0;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
ret = drm_debugfs_connector_add(connector);
if (ret) {
- drm_sysfs_connector_remove(connector);
- return ret;
+ goto err_sysfs;
+ }
+
+ if (connector->funcs->late_register) {
+ ret = connector->funcs->late_register(connector);
+ if (ret)
+ goto err_debugfs;
}
drm_mode_object_register(connector->dev, &connector->base);
+ connector->registered = true;
return 0;
+
+err_debugfs:
+ drm_debugfs_connector_remove(connector);
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+ return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -1054,42 +1094,41 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
+ if (!connector->registered)
+ return;
+
+ if (connector->funcs->early_unregister)
+ connector->funcs->early_unregister(connector);
+
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
+
+ connector->registered = false;
}
EXPORT_SYMBOL(drm_connector_unregister);
-/**
- * drm_connector_register_all - register all connectors
- * @dev: drm device
- *
- * This function registers all connectors in sysfs and other places so that
- * userspace can start to access them. Drivers can call it after calling
- * drm_dev_register() to complete the device registration, if they don't call
- * drm_connector_register() on each connector individually.
- *
- * When a device is unplugged and should be removed from userspace access,
- * call drm_connector_unregister_all(), which is the inverse of this
- * function.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register_all(struct drm_device *dev)
+static void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
- int ret;
- mutex_lock(&dev->mode_config.mutex);
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_connector_unregister(connector);
+}
- drm_for_each_connector(connector, dev) {
+static int drm_connector_register_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ /* FIXME: taking the mode config mutex ends up in a clash with
+ * fbcon/backlight registration */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
ret = drm_connector_register(connector);
if (ret)
goto err;
}
- mutex_unlock(&dev->mode_config.mutex);
-
return 0;
err:
@@ -1097,27 +1136,31 @@ err:
drm_connector_unregister_all(dev);
return ret;
}
-EXPORT_SYMBOL(drm_connector_register_all);
-/**
- * drm_connector_unregister_all - unregister connector userspace interfaces
- * @dev: drm device
- *
- * This functions unregisters all connectors from sysfs and other places so
- * that userspace can no longer access them. Drivers should call this as the
- * first step tearing down the device instace, or when the underlying
- * physical device disappeared (e.g. USB unplug), right before calling
- * drm_dev_unregister().
- */
-void drm_connector_unregister_all(struct drm_device *dev)
+static int drm_encoder_register_all(struct drm_device *dev)
{
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret = 0;
- /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_connector_unregister(connector);
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->late_register)
+ ret = encoder->funcs->late_register(encoder);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_encoder_unregister_all(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->early_unregister)
+ encoder->funcs->early_unregister(encoder);
+ }
}
-EXPORT_SYMBOL(drm_connector_unregister_all);
/**
* drm_encoder_init - Init a preallocated encoder
@@ -1166,7 +1209,7 @@ int drm_encoder_init(struct drm_device *dev,
}
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
+ encoder->index = dev->mode_config.num_encoder++;
out_put:
if (ret)
@@ -1180,29 +1223,6 @@ out_unlock:
EXPORT_SYMBOL(drm_encoder_init);
/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
- unsigned int index = 0;
- struct drm_encoder *tmp;
-
- drm_for_each_encoder(tmp, encoder->dev) {
- if (tmp == encoder)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_encoder_index);
-
-/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
@@ -1212,6 +1232,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ /* Note that the encoder_list is considered to be static; should we
+ * remove the drm_encoder at runtime we would have to decrement all
+ * the indices on the drm_encoder after us in the encoder_list.
+ */
+
drm_modeset_lock_all(dev);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
@@ -1300,7 +1325,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->type = type;
list_add_tail(&plane->head, &config->plane_list);
- config->num_total_plane++;
+ plane->index = config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
config->num_overlay_plane++;
@@ -1325,6 +1350,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
EXPORT_SYMBOL(drm_universal_plane_init);
+static int drm_plane_register_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+ int ret = 0;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->late_register)
+ ret = plane->funcs->late_register(plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_plane_unregister_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->early_unregister)
+ plane->funcs->early_unregister(plane);
+ }
+}
+
/**
* drm_plane_init - Initialize a legacy plane
* @dev: DRM device
@@ -1374,6 +1424,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
BUG_ON(list_empty(&plane->head));
+ /* Note that the plane_list is considered to be static; should we
+ * remove the drm_plane at runtime we would have to decrement all
+ * the indices on the drm_plane after us in the plane_list.
+ */
+
list_del(&plane->head);
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1446,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
EXPORT_SYMBOL(drm_plane_cleanup);
/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that CRTC within a DRM
- * device's list of planes.
- */
-unsigned int drm_plane_index(struct drm_plane *plane)
-{
- unsigned int index = 0;
- struct drm_plane *tmp;
-
- drm_for_each_plane(tmp, plane->dev) {
- if (tmp == plane)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_plane_index);
-
-/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
@@ -1425,13 +1457,11 @@ struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
- unsigned int i = 0;
- drm_for_each_plane(plane, dev) {
- if (i == idx)
+ drm_for_each_plane(plane, dev)
+ if (idx == plane->index)
return plane;
- i++;
- }
+
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
@@ -1467,6 +1497,46 @@ void drm_plane_force_disable(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_force_disable);
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
@@ -2975,6 +3045,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb);
}
+ fb->hot_x = req->hot_x;
+ fb->hot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -3581,7 +3653,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
- if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+ if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
@@ -3738,6 +3810,13 @@ void drm_fb_release(struct drm_file *priv)
}
}
+static bool drm_property_type_valid(struct drm_property *property)
+{
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
/**
* drm_property_create - create a new property type
* @dev: drm device
@@ -5138,6 +5217,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
crtc->gamma_size = gamma_size;
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5147,6 +5229,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
return -ENOMEM;
}
+ r_base = crtc->gamma_store;
+ g_base = r_base + gamma_size;
+ b_base = g_base + gamma_size;
+ for (i = 0; i < gamma_size; i++) {
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+ }
+
+
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5214,7 +5306,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
out:
drm_modeset_unlock_all(dev);
@@ -5544,264 +5636,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
}
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
- * @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
- *
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
- */
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
-{
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format %s\n",
- drm_get_format_name(format));
- *depth = 0;
- *bpp = 0;
- break;
- }
-}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
-
-/**
- * drm_format_plane_cpp - determine the bytes per pixel value
- * @format: pixel format (DRM_FORMAT_*)
- * @plane: plane index
- *
- * Returns:
- * The bytes per pixel value for the specified plane.
- */
-int drm_format_plane_cpp(uint32_t format, int plane)
-{
- unsigned int depth;
- int bpp;
-
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
-}
-EXPORT_SYMBOL(drm_format_plane_cpp);
-
-/**
- * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The horizontal chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_horz_chroma_subsampling(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
-
-/**
- * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The vertical chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_vert_chroma_subsampling(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
-
-/**
- * drm_format_plane_width - width of the plane given the first plane
- * @width: width of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The width of @plane, given that the width of the first plane is @width.
- */
-int drm_format_plane_width(int width, uint32_t format, int plane)
-{
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- if (plane == 0)
- return width;
-
- return width / drm_format_horz_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_width);
-
-/**
- * drm_format_plane_height - height of the plane given the first plane
- * @height: height of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The height of @plane, given that the height of the first plane is @height.
- */
-int drm_format_plane_height(int height, uint32_t format, int plane)
-{
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- if (plane == 0)
- return height;
-
- return height / drm_format_vert_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_height);
-
-/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified
* @supported_rotations: Supported rotations
@@ -6064,3 +5898,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ uint degamma_lut_size,
+ bool has_ctm,
+ uint gamma_lut_size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (degamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_size_property,
+ degamma_lut_size);
+ }
+
+ if (has_ctm)
+ drm_object_attach_property(&crtc->base,
+ config->ctm_property, 0);
+
+ if (gamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_size_property,
+ gamma_lut_size);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 26feb2f8453f..604d3ef72ffa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC))
+ DRM_ERROR("Called for atomic driver, this is not what you want.\n");
+
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
@@ -1123,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
-
-/**
- * drm_helper_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction properties on a
- * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
- * set and 2 size properties to inform the userspace of the lut sizes.
- */
-void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
- int degamma_lut_size,
- int gamma_lut_size)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_property, 0);
- drm_object_attach_property(&crtc->base,
- config->ctm_property, 0);
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_property, 0);
-
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_size_property,
- degamma_lut_size);
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_size_property,
- gamma_lut_size);
-}
-EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a78c138282ea..0c34e6d906d1 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,14 +31,104 @@
* and are not exported to drivers.
*/
+
+/* drm_crtc.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type);
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value,
+ struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb);
+
+void drm_fb_release(struct drm_file *file_priv);
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+ struct drm_file *file_priv);
+
+/* dumb buffer support IOCTLs */
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* framebuffer IOCTLs */
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* IOCTLs */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getencoder(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val);
+ struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* drm_blend.c */
+int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3bcf8e6a85b3..fa10cef2ba37 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,11 +46,8 @@
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"vma", drm_vma_info, 0},
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 3334baacf43d..734f86a345f6 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
- res = PTR_ERR(drm_dp_aux_dev_class);
- goto out;
+ return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index eeaf5a7c3aa7..eae5ef963cb7 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
ret = aux->transfer(aux, &msg);
- if (ret > 0) {
+ if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
if (ret == size)
@@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
- mutex_lock(&aux->hw_mutex);
-
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
-
return err;
}
@@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
.master_xfer = drm_dp_i2c_xfer,
};
+static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct drm_dp_aux, ddc);
+}
+
+static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
/**
- * drm_dp_aux_register() - initialise and register aux channel
+ * drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
- * Returns 0 on success or a negative error code on failure.
+ * If you need to use the drm_dp_aux's i2c adapter prior to registering it
+ * with the outside world, call drm_dp_aux_init() first. You must still
+ * call drm_dp_aux_register() once the connector has been registered to
+ * allow userspace access to the auxiliary DP channel.
*/
-int drm_dp_aux_register(struct drm_dp_aux *aux)
+void drm_dp_aux_init(struct drm_dp_aux *aux)
{
- int ret;
-
mutex_init(&aux->hw_mutex);
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
+ aux->ddc.lock_bus = lock_bus;
+ aux->ddc.trylock_bus = trylock_bus;
+ aux->ddc.unlock_bus = unlock_bus;
+}
+EXPORT_SYMBOL(drm_dp_aux_init);
+
+/**
+ * drm_dp_aux_register() - initialise and register aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ if (!aux->ddc.algo)
+ drm_dp_aux_init(aux);
+
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
@@ -822,3 +860,35 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux)
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister);
+
+#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x)
+
+/**
+ * drm_dp_psr_setup_time() - PSR setup in time usec
+ * @psr_cap: PSR capabilities from DPCD
+ *
+ * Returns:
+ * PSR setup time for the panel in microseconds, negative
+ * error code on failure.
+ */
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
+{
+ static const u16 psr_setup_time_us[] = {
+ PSR_SETUP_TIME(330),
+ PSR_SETUP_TIME(275),
+ PSR_SETUP_TIME(165),
+ PSR_SETUP_TIME(110),
+ PSR_SETUP_TIME(55),
+ PSR_SETUP_TIME(0),
+ };
+ int i;
+
+ i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT;
+ if (i >= ARRAY_SIZE(psr_setup_time_us))
+ return -EINVAL;
+
+ return psr_setup_time_us[i];
+}
+EXPORT_SYMBOL(drm_dp_psr_setup_time);
+
+#undef PSR_SETUP_TIME
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6537908050d7..04e457117980 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
+ if (list_empty(&mgr->tx_msg_downq))
return;
- }
- mgr->tx_down_in_progress = true;
txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up(&mgr->tx_waitq);
}
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
- return;
- }
}
/* called holding qlock */
@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
- if (!mgr->tx_down_in_progress)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
+ * @connector: DRM connector for this port
* @mgr: manager for this port
* @port: unverified pointer to a port
*
@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (mgr->tx_down_in_progress)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bff89226a344..be27ed36f56e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,8 +34,10 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/*
* drm_debug: Enable debug output.
@@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
}
EXPORT_SYMBOL(drm_ut_debug_printk);
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- idr_init(&master->magic_map);
- master->minor = minor;
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_device *dev = master->minor->dev;
-
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- drm_legacy_master_rmmaps(dev, master);
-
- idr_destroy(&master->magic_map);
- kfree(master->unique);
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- mutex_lock(&dev->master_mutex);
- if (file_priv->is_master)
- goto out_unlock;
-
- if (file_priv->minor->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->allowed_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
-
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = -EINVAL;
-
- mutex_lock(&dev->master_mutex);
- if (!file_priv->is_master)
- goto out_unlock;
-
- if (!file_priv->minor->master)
- goto out_unlock;
-
- ret = 0;
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
/*
* DRM Minors
* A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. An important part of this is
- * also calling drm_dev_set_unique() to set the userspace-visible unique name of
- * this device instance. Finally when everything is up and running and ready for
- * userspace the device instance can be published using drm_dev_register().
+ * initialize all the corresponding hardware bits. Finally when everything is up
+ * and running and ready for userspace the device instance can be published
+ * using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
+static int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -461,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
@@ -549,11 +448,12 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * drm_dev_init - Initialise new DRM device
+ * @dev: DRM device
+ * @driver: DRM driver
* @parent: Parent device object
*
- * Allocate and initialize a new DRM device. No device registration is done.
+ * Initialize a new DRM device. No device registration is done.
* Call drm_dev_register() to advertice the device to user space and register it
* with other core subsystems. This should be done last in the device
* initialization sequence to make sure userspace can't access an inconsistent
@@ -564,19 +464,18 @@ static void drm_fs_inode_free(struct inode *inode)
*
* Note that for purely virtual devices @parent can be NULL.
*
+ * Drivers that do not want to allocate their own device struct
+ * embedding struct &drm_device can call drm_dev_alloc() instead.
+ *
* RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * 0 on success, or error code on failure.
*/
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent)
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent)
{
- struct drm_device *dev;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -605,8 +504,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
if (ret)
goto err_minors;
-
- WARN_ON(driver->suspend || driver->resume);
}
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -619,7 +516,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (ret)
goto err_minors;
- if (drm_ht_create(&dev->map_hash, 12))
+ ret = drm_ht_create(&dev->map_hash, 12);
+ if (ret)
goto err_minors;
drm_legacy_ctxbitmap_init(dev);
@@ -632,13 +530,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
}
}
- if (parent) {
- ret = drm_dev_set_unique(dev, dev_name(parent));
- if (ret)
- goto err_setunique;
- }
+ /* Use the parent device name as DRM device unique identifier, but fall
+ * back to the driver name for virtual devices like vgem. */
+ ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+ if (ret)
+ goto err_setunique;
- return dev;
+ return 0;
err_setunique:
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -653,8 +551,49 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
- kfree(dev);
- return NULL;
+ return ret;
+}
+EXPORT_SYMBOL(drm_dev_init);
+
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * Note that for purely virtual devices @parent can be NULL.
+ *
+ * Drivers that wish to subclass or embed struct &drm_device into their
+ * own struct should look at using drm_dev_init() instead.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ret = drm_dev_init(dev, driver, parent);
+ if (ret) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -718,11 +657,7 @@ EXPORT_SYMBOL(drm_dev_unref);
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously. Right after drm_dev_register() the driver should call
- * drm_connector_register_all() to register all connectors in sysfs. This is
- * a separate call for backward compatibility with drivers still using
- * the deprecated ->load() callback, where connectors are registered from within
- * the ->load() callback.
+ * previously.
*
* Never call this twice on any device!
*
@@ -759,6 +694,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_register_all(dev);
+
ret = 0;
goto out_unlock;
@@ -789,6 +727,9 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_unregister_all(dev);
+
if (dev->driver->unload)
dev->driver->unload(dev);
@@ -806,26 +747,6 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @name: unique name
- *
- * Sets the unique name of a DRM device using the specified string. Drivers
- * can use this at driver probe time if the unique name of the devices they
- * drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
-
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7df26d4b7ad8..637a0aa4d3a0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -74,6 +74,8 @@
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
/* Force 12bpc */
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
+/* Force 6bpc */
+#define EDID_QUIRK_FORCE_6BPC (1 << 10)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -100,6 +102,9 @@ static struct edid_quirk {
/* Unknown Acer */
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
+ { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -3862,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid,
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
+ /*
+ * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+ *
+ * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+ * tells us to assume 8 bpc color depth if the EDID doesn't have
+ * extensions which tell otherwise.
+ */
+ if ((info->bpc == 0) && (edid->revision < 4) &&
+ (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+ info->bpc = 8;
+ DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+ }
+
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
@@ -4082,6 +4101,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info, connector);
+ if (quirks & EDID_QUIRK_FORCE_6BPC)
+ connector->display_info.bpc = 6;
+
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed98e0..622f788bff46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
* by commas, search through the list looking for one that
* matches the connector.
*
- * If there's one or more that don't't specify a connector, keep
+ * If there's one or more that doesn't specify a connector, keep
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5075fae3c4e2..1fd6eac1400c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
* will be set up automatically. dirty() is called by
* drm_fb_helper_deferred_io() in process context (struct delayed_work).
*
- * Example fbdev deferred io code:
+ * Example fbdev deferred io code::
*
* static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
* struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs ->fb_create
* callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
/**
* drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
* This function will usually be called from the CRTC callback functions.
*/
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
+ unsigned int plane)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
#ifdef CONFIG_DEBUG_FS
-/*
- * drm_fb_cma_describe() - Helper to dump information about a single
- * CMA framebuffer object
- */
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
/**
* drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
- * in debugfs.
+ * in debugfs.
+ * @m: output file
+ * @arg: private data for the callback
*/
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
#endif
+static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(info->device, vma, info->screen_base,
+ info->fix.smem_start, info->fix.smem_len);
+}
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_mmap = drm_fb_cma_mmap,
};
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
if (!fbdefio || !fbops) {
kfree(fbdefio);
+ kfree(fbops);
return -ENOMEM;
}
@@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+ if (fbdev_cma)
+ drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db60f..ce54e985d91b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
}
/**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- if (fb_helper->atomic)
+ if (dev->mode_config.funcs->atomic_commit)
return restore_fbdev_mode_atomic(fb_helper);
drm_for_each_plane(plane, dev) {
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
- if (dev->primary->master)
+ if (lockless_dereference(dev->master))
return false;
drm_for_each_crtc(crtc, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
i++;
}
- fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
-
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
!fb_helper->funcs->gamma_get))
return -EINVAL;
- pindex = regno;
-
- if (fb->bits_per_pixel == 16) {
- pindex = regno << 3;
-
- if (fb->depth == 16 && regno > 63)
- return -EINVAL;
- if (fb->depth == 15 && regno > 31)
- return -EINVAL;
-
- if (fb->depth == 16) {
- u16 r, g, b;
- int i;
- if (regno < 32) {
- for (i = 0; i < 8; i++)
- fb_helper->funcs->gamma_set(crtc, red,
- green, blue, pindex + i);
- }
+ WARN_ON(fb->bits_per_pixel != 8);
- fb_helper->funcs->gamma_get(crtc, &r,
- &g, &b,
- pindex >> 1);
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
- for (i = 0; i < 4; i++)
- fb_helper->funcs->gamma_set(crtc, r,
- green, b,
- (pindex >> 1) + i);
- }
- }
-
- if (fb->depth != 16)
- fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
return -EBUSY;
}
- if (fb_helper->atomic) {
+ if (dev->mode_config.funcs->atomic_commit) {
ret = pan_display_atomic(var, info);
goto unlock;
}
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
my_score++;
connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
+
+ /*
+ * If the DRM device implements atomic hooks and ->best_encoder() is
+ * NULL we fallback to the default drm_atomic_helper_best_encoder()
+ * helper.
+ */
+ if (fb_helper->dev->mode_config.funcs->atomic_commit &&
+ !connector_funcs->best_encoder)
+ encoder = drm_atomic_helper_best_encoder(connector);
+ else
+ encoder = connector_funcs->best_encoder(connector);
+
if (!encoder)
goto out;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb355..323c238fcac7 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -40,6 +40,7 @@
#include <linux/module.h>
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex);
* specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure:
+ * following is an example #file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
@@ -168,60 +169,6 @@ static int drm_cpu_valid(void)
}
/*
- * drm_new_set_master - Allocate a new master object and become master for the
- * associated master realm.
- *
- * @dev: The associated device.
- * @fpriv: File private identifying the client.
- *
- * This function must be called with dev::struct_mutex held.
- * Returns negative error code on failure. Zero on success.
- */
-int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
-{
- struct drm_master *old_master;
- int ret;
-
- lockdep_assert_held_once(&dev->master_mutex);
-
- /* create a new master */
- fpriv->minor->master = drm_master_create(fpriv->minor);
- if (!fpriv->minor->master)
- return -ENOMEM;
-
- /* take another reference for the copy in the local file priv */
- old_master = fpriv->master;
- fpriv->master = drm_master_get(fpriv->minor->master);
-
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, fpriv, true);
- if (ret)
- goto out_err;
- }
-
- fpriv->is_master = 1;
- fpriv->allowed_master = 1;
- fpriv->authenticated = 1;
- if (old_master)
- drm_master_put(&old_master);
-
- return 0;
-
-out_err:
- /* drop both references and restore old master on failure */
- drm_master_put(&fpriv->minor->master);
- drm_master_put(&fpriv->master);
- fpriv->master = old_master;
-
- return ret;
-}
-
-/*
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
goto out_prime_destroy;
}
- /* if there is no current master make this fd it, but do not create
- * any master object for render clients */
- mutex_lock(&dev->master_mutex);
- if (drm_is_primary_client(priv) && !priv->minor->master) {
- /* create a new master */
- ret = drm_new_set_master(dev, priv);
+ if (drm_is_primary_client(priv)) {
+ ret = drm_master_open(priv);
if (ret)
goto out_close;
- } else if (drm_is_primary_client(priv)) {
- /* get a reference to the master */
- priv->master = drm_master_get(priv->minor->master);
}
- mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
out_close:
- mutex_unlock(&dev->master_mutex);
if (dev->driver->postclose)
dev->driver->postclose(dev, priv);
out_prime_destroy:
@@ -338,18 +276,6 @@ out_prime_destroy:
return ret;
}
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_legacy_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-}
-
static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
@@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv)
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
list_del(&e->link);
- e->destroy(e);
+ kfree(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
- mutex_lock(&dev->struct_mutex);
- if (file_priv->magic)
- idr_remove(&file_priv->master->magic_map, file_priv->magic);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
@@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->kdev->devt),
dev->open_count);
- /* if the master has gone away we can't do anything with the lock */
- if (file_priv->minor->master)
- drm_master_release(dev, filp);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_legacy_lock_release(dev, filp);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_legacy_reclaim_buffers(dev, file_priv);
@@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp)
drm_legacy_ctxbitmap_flush(dev, file_priv);
- mutex_lock(&dev->master_mutex);
-
- if (file_priv->is_master) {
- struct drm_master *master = file_priv->master;
-
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
-
- if (file_priv->minor->master == file_priv->master) {
- /* drop the reference held my the minor */
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, true);
- drm_master_put(&file_priv->minor->master);
- }
- }
-
- /* drop the master reference held by the file priv */
- if (file_priv->master)
- drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
- mutex_unlock(&dev->master_mutex);
+ if (drm_is_primary_client(file_priv))
+ drm_master_release(file_priv);
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
-
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
@@ -636,7 +525,7 @@ put_back_event:
}
ret += length;
- e->destroy(e);
+ kfree(e);
}
}
mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
list_add(&p->pending_link, &file_priv->pending_event_list);
p->file_priv = file_priv;
- /* we *could* pass this in as arg, but everyone uses kfree: */
- p->destroy = (void (*) (struct drm_pending_event *)) kfree;
-
return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- p->destroy(p);
+ kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
assert_spin_locked(&dev->event_lock);
+ if (e->completion) {
+ /* ->completion might disappear as soon as it signalled. */
+ complete_all(e->completion);
+ e->completion = NULL;
+ }
+
+ if (e->fence) {
+ fence_signal(e->fence);
+ fence_put(e->fence);
+ }
+
if (!e->file_priv) {
- e->destroy(e);
+ kfree(e);
return;
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644
index 000000000000..0645c85d5f95
--- /dev/null
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * DRM core format related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fourcc.h>
+
+static char printable_char(int c)
+{
+ return isascii(c) && isprint(c) ? c : '?';
+}
+
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
+const char *drm_get_format_name(uint32_t format)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "%c%c%c%c %s-endian (0x%08x)",
+ printable_char(format & 0xff),
+ printable_char((format >> 8) & 0xff),
+ printable_char((format >> 16) & 0xff),
+ printable_char((format >> 24) & 0x7f),
+ format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+ format);
+
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format %s\n",
+ drm_get_format_name(format));
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * Returns:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad89db36ca25..9134ae134667 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding struct_ mutex
+ * Must be called holding &drm_device->struct_mutex.
*
* Frees the object
*/
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5d469b2f26f4..9ae353f4dd06 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
- struct drm_master *master = minor->master;
- if (!master)
- return 0;
-
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n",
- dev->driver->name, dev_name(dev->dev));
- }
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "slot offset size type flags address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
-
- seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- (unsigned long long)map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0)
- seq_printf(m, "none\n");
- else
- seq_printf(m, "%4d\n", map->mtrr);
- i++;
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
+ struct drm_master *master;
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_device_dma *dma;
- int i, seg_pages;
-
- mutex_lock(&dev->struct_mutex);
- dma = dev->dma;
- if (!dma) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- seq_printf(m, " o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count) {
- seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
- seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- 0,
- dma->bufs[i].seg_count,
- seg_pages,
- seg_pages * PAGE_SIZE / 1024);
- }
- }
- seq_printf(m, "\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- seq_printf(m, "\n");
- seq_printf(m, " %d", dma->buflist[i]->list);
- }
+ mutex_lock(&dev->master_mutex);
+ master = dev->master;
+ if (!master)
+ goto out_unlock;
+
+ seq_printf(m, "%s", dev->driver->name);
+ if (dev->dev)
+ seq_printf(m, " dev=%s", dev_name(dev->dev));
+ if (master && master->unique)
+ seq_printf(m, " master=%s", master->unique);
+ if (dev->unique)
+ seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
- mutex_unlock(&dev->struct_mutex);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+
return 0;
}
@@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data)
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
- priv->is_master ? 'y' : 'n',
+ drm_is_current_master(priv) ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), priv->uid),
priv->magic);
@@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data)
return 0;
}
-
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 902cf6a15212..b86dc9b921a5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* drm_vm.c */
-int drm_vma_info(struct seq_file *m, void *data);
-
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
-int drm_vm_info(struct seq_file *m, void *data);
-int drm_bufs_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
@@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_master_open(struct drm_file *file_priv);
+void drm_master_release(struct drm_file *file_priv);
/* drm_sysfs.c */
extern struct class *drm_class;
@@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-/* drm_drv.c */
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-struct drm_master *drm_master_create(struct drm_minor *minor);
-
/* drm_debugfs.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b7a39771c152..33af4a5ddca1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include <drm/drm_auth.h>
#include "drm_legacy.h"
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -37,6 +38,64 @@
#include <linux/pci.h>
#include <linux/export.h>
+/**
+ * DOC: getunique and setversion story
+ *
+ * BEWARE THE DRAGONS! MIND THE TRAPDOORS!
+ *
+ * In an attempt to warn anyone else who's trying to figure out what's going
+ * on here, I'll try to summarize the story. First things first, let's clear up
+ * the names, because the kernel internals, libdrm and the ioctls are all named
+ * differently:
+ *
+ * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
+ * through the drmGetBusid function.
+ * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
+ * that code is nerved in the kernel with drm_invalid_op().
+ * - The internal set_busid kernel functions and driver callbacks are
+ * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
+ * nerved) allowed userspace to set the busid through the above ioctl.
+ * - Other ioctls and functions involved are named consistently.
+ *
+ * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
+ * handling pci domains in the busid on ppc. Doing this correctly was only
+ * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
+ * special with drm 1.2 and 1.3.
+ *
+ * Now the actual horror story of how device lookup in drm works. At large,
+ * there's 2 different ways, either by busid, or by device driver name.
+ *
+ * Opening by busid is fairly simple:
+ *
+ * 1. First call SET_VERSION to make sure pci domains are handled properly. As a
+ * side-effect this fills out the unique name in the master structure.
+ * 2. Call GET_UNIQUE to read out the unique name from the master structure,
+ * which matches the busid thanks to step 1. If it doesn't, proceed to try
+ * the next device node.
+ *
+ * Opening by name is slightly different:
+ *
+ * 1. Directly call VERSION to get the version and to match against the driver
+ * name returned by that ioctl. Note that SET_VERSION is not called, which
+ * means the the unique name for the master node just opening is _not_ filled
+ * out. This despite that with current drm device nodes are always bound to
+ * one device, and can't be runtime assigned like with drm 1.0.
+ * 2. Match driver name. If it mismatches, proceed to the next device node.
+ * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
+ * checking that the first byte in the string is 0). If that's not the case
+ * libdrm skips and proceeds to the next device node. Probably this is just
+ * copypasta from drm 1.0 times where a set unique name meant that the driver
+ * was in use already, but that's just conjecture.
+ *
+ * Long story short: To keep the open by name logic working, GET_UNIQUE must
+ * _not_ return a unique string when SET_VERSION hasn't been called yet,
+ * otherwise libdrm breaks. Even when that unique string can't ever change, and
+ * is totally irrelevant for actually opening the device because runtime
+ * assignable device instances were only support in drm 1.0, which is long dead.
+ * But the libdrm code in drmOpenByName somehow survived, hence this can't be
+ * broken.
+ */
+
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev,
master->unique_len = 0;
}
-/*
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
- * UMS was only ever supported on pci devices.
- */
-static int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
- int ret;
-
- if (master->unique_len || master->unique)
- return -EBUSY;
-
- if (!u->unique_len || u->unique_len > 1024)
- return -EINVAL;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- if (WARN_ON(!dev->pdev))
- return -EINVAL;
-
- ret = drm_pci_set_unique(dev, master, u);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- drm_unset_busid(dev, master);
- return ret;
-}
-
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
@@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
return ret;
}
} else {
- if (WARN(dev->unique == NULL,
- "No drm_driver.set_busid() implementation provided by "
- "%ps. Use drm_dev_set_unique() to set the unique "
- "name explicitly.", dev->driver))
- return -EINVAL;
-
+ WARN_ON(!dev->unique);
master->unique = kstrdup(dev->unique, GFP_KERNEL);
if (master->unique)
master->unique_len = strlen(dev->unique);
@@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return -EACCES;
/* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ if (unlikely((flags & DRM_MASTER) &&
+ !drm_is_current_master(file_priv) &&
!drm_is_control_client(file_priv)))
return -EACCES;
@@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
@@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -638,7 +648,7 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize, drv_size;
+ unsigned int in_size, out_size, drv_size, ksize;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
@@ -661,9 +671,12 @@ long drm_ioctl(struct file *filp,
}
drv_size = _IOC_SIZE(ioctl->cmd);
- usize = _IOC_SIZE(cmd);
- asize = max(usize, drv_size);
- cmd = ioctl->cmd;
+ out_size = in_size = _IOC_SIZE(cmd);
+ if ((cmd & ioctl->cmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((cmd & ioctl->cmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@@ -683,30 +696,24 @@ long drm_ioctl(struct file *filp,
if (unlikely(retcode))
goto err_i1;
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (ksize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
}
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
/* Enforce sane locking for kms driver ioctls. Core ioctls are
* too messy still. */
if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@@ -718,11 +725,8 @@ long drm_ioctl(struct file *filp,
mutex_unlock(&drm_global_mutex);
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
- }
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ retcode = -EFAULT;
err_i1:
if (!ioctl)
@@ -749,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
* shouldn't be used by any drivers.
*
* Returns:
- * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
+ * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c18fe..77f357b2c386 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
#include <linux/vgaarb.h>
#include <linux/export.h>
-/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, pipe, count) \
- ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
-
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
- /* All writers hold the spinlock, but readers are serialized by
- * the latching of vblank->count below.
- */
- tslot = vblank->count + vblank_count_inc;
- vblanktimestamp(dev, pipe, tslot) = *t_vblank;
-
- /*
- * vblank timestamp updates are protected on the write side with
- * vblank_time_lock, but on the read side done locklessly using a
- * sequence-lock on the vblank counter. Ensure correct ordering using
- * memory barrriers. We need the barrier both before and also after the
- * counter update to synchronize with the next timestamp write.
- * The read-side barriers for this are in drm_vblank_count_and_time.
- */
- smp_wmb();
+ write_seqlock(&vblank->seqlock);
+ vblank->time = *t_vblank;
vblank->count += vblank_count_inc;
- smp_wmb();
+ write_sequnlock(&vblank->seqlock);
}
-/**
- * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
- * @dev: DRM device
- * @pipe: index of CRTC for which to reset the timestamp
- *
+/*
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
spin_unlock(&dev->vblank_time_lock);
}
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @pipe: counter to update
- *
+/*
* Call back into the driver to update the appropriate vblank counter
* (specified by @pipe). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
const struct timeval *t_old;
u64 diff_ns;
- t_old = &vblanktimestamp(dev, pipe, vblank->count);
+ t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
/*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = 1;
}
- /*
- * FIMXE: Need to replace this hack with proper seqlocks.
- *
- * Restrict the bump of the software vblank counter to a safe maximum
- * value of +1 whenever there is the possibility that concurrent readers
- * of vblank timestamps could be active at the moment, as the current
- * implementation of the timestamp caching and updating is not safe
- * against concurrent readers for calls to store_vblank() with a bump
- * of anything but +1. A bump != 1 would very likely return corrupted
- * timestamps to userspace, because the same slot in the cache could
- * be concurrently written by store_vblank() and read by one of those
- * readers without the read-retry logic detecting the collision.
- *
- * Concurrent readers can exist when we are called from the
- * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
- * irq callers. However, all those calls to us are happening with the
- * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
- * can't increase while we are executing. Therefore a zero refcount at
- * this point is safe for arbitrary counter bumps if we are called
- * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
- * we must also accept a refcount of 1, as whenever we are called from
- * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
- * we must let that one pass through in order to not lose vblank counts
- * during vblank irq off - which would completely defeat the whole
- * point of this routine.
- *
- * Whenever we are called from vblank irq, we have to assume concurrent
- * readers exist or can show up any time during our execution, even if
- * the refcount is currently zero, as vblank irqs are usually only
- * enabled due to the presence of readers, and because when we are called
- * from vblank irq we can't hold the vbl_lock to protect us from sudden
- * bumps in vblank refcount. Therefore also restrict bumps to +1 when
- * called from vblank irq.
- */
- if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
- (flags & DRM_CALLED_FROM_VBLIRQ))) {
- DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
- "refcount %u, vblirq %u\n", pipe, diff,
- atomic_read(&vblank->refcount),
- (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
- diff = 1;
- }
-
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+/**
+ * drm_accurate_vblank_count - retrieve the master vblank counter
+ * @crtc: which counter to retrieve
+ *
+ * This function is similar to @drm_crtc_vblank_count but this
+ * function interpolates to handle a race with vblank irq's.
+ *
+ * This is mostly useful for hardware that can obtain the scanout
+ * position, but doesn't have a frame counter.
+ */
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ u32 vblank;
+ unsigned long flags;
+
+ WARN(!dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
+
+ spin_lock_irqsave(&dev->vblank_time_lock, flags);
+
+ drm_update_vblank_count(dev, pipe, 0);
+ vblank = drm_vblank_count(dev, pipe);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+
+ return vblank;
+}
+EXPORT_SYMBOL(drm_accurate_vblank_count);
+
/*
* Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
+ seqlock_init(&vblank->seqlock);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -569,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
/*
* Wake up any waiters so they don't hang. This is just to paper over
- * isssues for UMS drivers which aren't in full control of their
+ * issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
@@ -631,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- /* UMS was only ever support on pci devices. */
+ /* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
@@ -982,31 +945,24 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
-u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
+static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int count = DRM_TIMESTAMP_MAXRETRIES;
- u32 cur_vblank;
+ u32 vblank_count;
+ unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- /*
- * Vblank timestamps are read lockless. To ensure consistency the vblank
- * counter is rechecked and ordering is ensured using memory barriers.
- * This works like a seqlock. The write-side barriers are in store_vblank.
- */
do {
- cur_vblank = vblank->count;
- smp_rmb();
- *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
- smp_rmb();
- } while (cur_vblank != vblank->count && --count > 0);
+ seq = read_seqbegin(&vblank->seqlock);
+ vblank_count = vblank->count;
+ *vblanktime = vblank->time;
+ } while (read_seqretry(&vblank->seqlock, seq));
- return cur_vblank;
+ return vblank_count;
}
-EXPORT_SYMBOL(drm_vblank_count_and_time);
/**
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -1018,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the native KMS version of drm_vblank_count_and_time().
*/
u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime)
@@ -1037,39 +991,11 @@ static void send_vblank_event(struct drm_device *dev,
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
- drm_send_event_locked(dev, &e->base);
-
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
-}
-
-/**
- * drm_arm_vblank_event - arm vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
- * @e: the event to prepare to send
- *
- * A lot of drivers need to generate vblank events for the very next vblank
- * interrupt. For example when the page flip interrupt happens when the page
- * flip gets armed, but not when it actually executes within the next vblank
- * period. This helper function implements exactly the required vblank arming
- * behaviour.
- *
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the legacy version of drm_crtc_arm_vblank_event().
- */
-void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e)
-{
- assert_spin_locked(&dev->event_lock);
- e->pipe = pipe;
- e->event.sequence = drm_vblank_count(dev, pipe);
- list_add_tail(&e->base.link, &dev->vblank_event_list);
+ drm_send_event_locked(dev, &e->base);
}
-EXPORT_SYMBOL(drm_arm_vblank_event);
/**
* drm_crtc_arm_vblank_event - arm vblank event after pageflip
@@ -1084,32 +1010,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
*
* Caller must hold event lock. Caller must also hold a vblank reference for
* the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the native KMS version of drm_arm_vblank_event().
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
- drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+
+ assert_spin_locked(&dev->event_lock);
+
+ e->pipe = pipe;
+ e->event.sequence = drm_vblank_count(dev, pipe);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
/**
- * drm_send_vblank_event - helper to send vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
- *
- * This is the legacy version of drm_crtc_send_vblank_event().
*/
-void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e)
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int seq, pipe = drm_crtc_index(crtc);
struct timeval now;
- unsigned int seq;
if (dev->num_crtcs > 0) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1050,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
e->pipe = pipe;
send_vblank_event(dev, e, seq, &now);
}
-EXPORT_SYMBOL(drm_send_vblank_event);
-
-/**
- * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
- * @crtc: the source CRTC of the vblank event
- * @e: the event to send
- *
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
- *
- * This is the native KMS version of drm_send_vblank_event().
- */
-void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e)
-{
- drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
-}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
@@ -1193,7 +1105,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
* Returns:
* Zero on success or a negative error code on failure.
*/
-int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
+static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1219,7 +1131,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
return ret;
}
-EXPORT_SYMBOL(drm_vblank_get);
/**
* drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1139,6 @@ EXPORT_SYMBOL(drm_vblank_get);
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * This is the native kms version of drm_vblank_get().
- *
* Returns:
* Zero on success or a negative error code on failure.
*/
@@ -1249,7 +1158,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
*
* This is the legacy version of drm_crtc_vblank_put().
*/
-void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
+static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1270,7 +1179,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
}
-EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1186,6 @@ EXPORT_SYMBOL(drm_vblank_put);
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the native kms version of drm_vblank_put().
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
@@ -1679,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
@@ -1781,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ }
+
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
@@ -1788,14 +1693,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
vblwait->request.sequence, pipe);
- vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d3b6ee357a2b..c6f422e879dd 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -88,14 +88,10 @@ struct drm_agp_mem {
struct list_head head;
};
-/*
- * Generic Userspace Locking-API
- */
-
-int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
+/* drm_lock.c */
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
/* DMA support */
int drm_legacy_dma_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index daa2ff12101b..48ac0ebbd663 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -41,6 +41,110 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock_bh(&lock_data->spinlock);
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD)
+ new = old | _DRM_LOCK_CONT;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ spin_unlock_bh(&lock_data->spinlock);
+
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ return 1;
+}
+
+static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock_bh(&lock_data->spinlock);
+ if (lock_data->kernel_waiters != 0) {
+ drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ spin_unlock_bh(&lock_data->spinlock);
+ return 1;
+ }
+ spin_unlock_bh(&lock_data->spinlock);
+
+ do {
+ old = *lock;
+ new = _DRM_LOCKING_CONTEXT(old);
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 1;
+ }
+ wake_up_interruptible(&lock_data->lock_queue);
+ return 0;
+}
+
+/**
* Lock ioctl.
*
* \param inode device inode.
@@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
- if (!file_priv->is_master) {
+ if (!drm_is_current_master(file_priv)) {
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
}
@@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
}
/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_legacy_idlelock_release);
-int drm_legacy_i_have_hw_lock(struct drm_device *dev,
- struct drm_file *file_priv)
+static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
+ struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
+
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (!dev->master)
+ return;
+
+ if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ drm_legacy_lock_free(&file_priv->master->lock,
+ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ }
+}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 87a8cb73366f..fc0ebd273ef8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -44,7 +44,7 @@
# include <asm/agp.h>
#else
# ifdef __powerpc__
-# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a90c..af0d471ee246 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
+static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ int err;
+
+ err = of_device_uevent_modalias(dev, env);
+ if (err != -ENODEV)
+ return err;
+
+ add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
+ dsi->name);
+
+ return 0;
+}
+
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
static struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
+ .uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops,
};
@@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
+ * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
+ * the Tearing Effect output signal of the display module
+ * @dsi: DSI peripheral device
+ * @scanline: scanline to use as trigger
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
+{
+ u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
+ scanline & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
+
+/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..cb39f45d6a16 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
- u64 end = node->start + node->size;
+ u64 end;
u64 hole_start;
u64 hole_end;
BUG_ON(node == NULL);
+ end = node->start + node->size;
+
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e5e6f504d8cc..fc5040ae5f25 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
+ *
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
- * Standard GTF parameters:
+ * Standard GTF parameters::
+ *
* M = 600
* C = 40
* K = 128
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03e7b..61146f5b4f56 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
- * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
+ * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
- * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
+ * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
- * The basic usage pattern is to:
+ * The basic usage pattern is to::
*
* drm_modeset_acquire_init(&ctx)
* retry:
@@ -51,6 +51,13 @@
* ... do stuff ...
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
+ *
+ * On top of of these per-object locks using &ww_mutex there's also an overall
+ * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * probe state of connectors, and preventing hotplug add/removal of connectors.
+ *
+ * Finally there's a bunch of dedicated locks to protect drm core internal
+ * lists and lookup data structures.
*/
/**
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 29d5a548d07a..b2f8f1062d5f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
}
EXPORT_SYMBOL(drm_pci_set_busid);
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
-{
- int domain, bus, slot, func, ret;
-
- master->unique_len = u->unique_len;
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
- goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
- return 0;
-err:
- return ret;
-}
-
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
return -EINVAL;
}
-
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
-{
- return -EINVAL;
-}
#endif
EXPORT_SYMBOL(drm_pci_init);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898ff9e..16c4a7bd7465 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* @src: source coordinates in 16.16 fixed point
* @dest: integer destination coordinates
* @clip: integer clipping coordinates
+ * @rotation: plane rotation
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
@@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* Zero if update appears valid, error code on failure
*/
int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- const struct drm_rect *clip,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible)
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ unsigned int rotation,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
{
int hscale, vscale;
@@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -EINVAL;
}
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
/* Check scaling */
hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
@@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
}
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
if (!*visible)
/*
* Plane isn't visible; some drivers can handle this
@@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
*
* Note that we make some assumptions about hardware limitations that may not be
* true for all hardware --
- * 1) Primary plane cannot be repositioned.
- * 2) Primary plane cannot be scaled.
- * 3) Primary plane must cover the entire CRTC.
- * 4) Subpixel positioning is not supported.
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ *
* Drivers for hardware that don't have these restrictions can provide their
* own implementation rather than using this helper.
*
@@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
+ BIT(DRM_ROTATE_0),
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 644169e1a029..2c819ef90090 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,24 +68,6 @@ err_free:
return ret;
}
-int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- int id;
-
- id = dev->platformdev->id;
- if (id < 0)
- id = 0;
-
- master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
- dev->platformdev->name, id);
- if (!master->unique)
- return -ENOMEM;
-
- master->unique_len = strlen(master->unique);
- return 0;
-}
-EXPORT_SYMBOL(drm_platform_set_busid);
-
/**
* drm_platform_init - Register a platform device with the DRM subsystem
* @driver: DRM device driver
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index aab0f3f1f42d..780589b420a4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
get_dma_buf(dma_buf);
}
- /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
+ mutex_unlock(&file_priv->prime.lock);
if (ret)
goto fail;
- mutex_unlock(&file_priv->prime.lock);
-
dma_buf_put(dma_buf);
return 0;
@@ -615,11 +614,14 @@ fail:
* to detach.. which seems ok..
*/
drm_gem_handle_delete(file_priv, *handle);
+ dma_buf_put(dma_buf);
+ return ret;
+
out_unlock:
mutex_unlock(&dev->object_name_lock);
out_put:
- dma_buf_put(dma_buf);
mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7f7c..a0df377d7d1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
+ struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
- if (!connector->cmdline_mode.specified)
+ cmdline_mode = &connector->cmdline_mode;
+ if (!cmdline_mode->specified)
return 0;
+ /* Only add a GTF mode if we find no matching probed modes */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ /* The probed mode's vrefresh is set until later */
+ if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
+ continue;
+ }
+
+ return 0;
+ }
+
mode = drm_mode_create_from_cmdline_mode(connector->dev,
- &connector->cmdline_mode);
+ cmdline_mode);
if (mode == NULL)
return 0;
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 4f0f3b36d537..bf70431073f6 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -41,7 +41,7 @@
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+ return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
#else
return vmalloc_32(size);
#endif
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644
index 000000000000..0db36d27e90b
--- /dev/null
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/slab.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides helpers for drivers for simple display
+ * hardware.
+ *
+ * drm_simple_display_pipe_init() initializes a simple display pipeline
+ * which has only one full-screen scanout buffer feeding one output. The
+ * pipeline is represented by struct &drm_simple_display_pipe and binds
+ * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
+ * entity. Some flexibility for code reuse is provided through a separately
+ * allocated &drm_connector object and supporting optional &drm_bridge
+ * encoder drivers.
+ */
+
+static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->enable)
+ return;
+
+ pipe->funcs->enable(pipe, crtc->state);
+}
+
+static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->disable)
+ return;
+
+ pipe->funcs->disable(pipe);
+}
+
+static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+ .disable = drm_simple_kms_crtc_disable,
+ .enable = drm_simple_kms_crtc_enable,
+};
+
+static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_rect src = {
+ .x1 = plane_state->src_x,
+ .y1 = plane_state->src_y,
+ .x2 = plane_state->src_x + plane_state->src_w,
+ .y2 = plane_state->src_y + plane_state->src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = plane_state->crtc_x,
+ .y1 = plane_state->crtc_y,
+ .x2 = plane_state->crtc_x + plane_state->crtc_w,
+ .y2 = plane_state->crtc_y + plane_state->crtc_h,
+ };
+ struct drm_rect clip = { 0 };
+ struct drm_simple_display_pipe *pipe;
+ struct drm_crtc_state *crtc_state;
+ bool visible;
+ int ret;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ &pipe->crtc);
+ if (crtc_state->enable != !!plane_state->crtc)
+ return -EINVAL; /* plane must match crtc enable state */
+
+ if (!crtc_state->enable)
+ return 0; /* nothing to check when disabling or disabled */
+
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_update(plane, &pipe->crtc,
+ plane_state->fb,
+ &src, &dest, &clip,
+ plane_state->rotation,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ return -EINVAL;
+
+ if (!pipe->funcs || !pipe->funcs->check)
+ return 0;
+
+ return pipe->funcs->check(pipe, plane_state, crtc_state);
+}
+
+static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *pstate)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->update)
+ return;
+
+ pipe->funcs->update(pipe, pstate);
+}
+
+static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
+ .atomic_check = drm_simple_kms_plane_atomic_check,
+ .atomic_update = drm_simple_kms_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * drm_simple_display_pipe_init - Initialize a simple display pipeline
+ * @dev: DRM device
+ * @pipe: simple display pipe object to initialize
+ * @funcs: callbacks for the display pipe (optional)
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @connector: connector to attach and register
+ *
+ * Sets up a display pipeline which consist of a really simple
+ * plane-crtc-encoder pipe coupled with the provided connector.
+ * Teardown of a simple display pipe is all handled automatically by the drm
+ * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
+ * release the memory for the structure themselves.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_init(struct drm_device *dev,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = &pipe->encoder;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_crtc *crtc = &pipe->crtc;
+ int ret;
+
+ pipe->connector = connector;
+ pipe->funcs = funcs;
+
+ drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &drm_simple_kms_plane_funcs,
+ formats, format_count,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &drm_simple_kms_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ return drm_mode_connector_attach_encoder(connector, encoder);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fa7fadce8063..32dd821b7202 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
struct class *drm_class;
-/**
- * __drm_class_suspend - internal DRM class suspend routine
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int __drm_class_suspend(struct device *dev, pm_message_t state)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
- }
- return 0;
-}
-
-/**
- * drm_class_suspend - internal DRM class suspend hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to suspend
- */
-static int drm_class_suspend(struct device *dev)
-{
- return __drm_class_suspend(dev, PMSG_SUSPEND);
-}
-
-/**
- * drm_class_freeze - internal DRM class freeze hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to freeze
- */
-static int drm_class_freeze(struct device *dev)
-{
- return __drm_class_suspend(dev, PMSG_FREEZE);
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
- }
- return 0;
-}
-
-static const struct dev_pm_ops drm_class_dev_pm_ops = {
- .suspend = drm_class_suspend,
- .resume = drm_class_resume,
- .freeze = drm_class_freeze,
-};
-
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
if (IS_ERR(drm_class))
return PTR_ERR(drm_class);
- drm_class->pm = &drm_class_dev_pm_ops;
-
err = class_create_file(drm_class, &class_attr_version.attr);
if (err) {
class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index ac9f4b3ec615..caa4e4ca616d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
+ tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
@@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
* pages and mappings in fault()
*/
#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;
@@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev)
kfree(vma);
}
}
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
- unsigned long vma_count = 0;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(pt, &dev->vmalist, head)
- vma_count++;
-
- seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
- vma_count, high_memory,
- (void *)(unsigned long)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
- pt->pid,
- (void *)vma->vm_start, (void *)vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..f306c8855978 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
+ *
+ * ::
+ *
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 3d4f56df8359..ffd1b32caa8d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev)
int ret;
ret = etnaviv_gpu_init(g);
- if (ret) {
- dev_err(g->dev, "hw init failed: %d\n", ret);
+ if (ret)
priv->gpu[i] = NULL;
- }
}
}
}
@@ -496,7 +494,6 @@ static struct drm_driver etnaviv_drm_driver = {
DRIVER_RENDER,
.open = etnaviv_open,
.preclose = etnaviv_preclose,
- .set_busid = drm_platform_set_busid,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8c6f750634af..5ce3603e6eac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
- if (etnaviv_obj->vaddr)
- vunmap(etnaviv_obj->vaddr);
+ vunmap(etnaviv_obj->vaddr);
put_pages(etnaviv_obj);
}
@@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
@@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
get_task_struct(current);
ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret) {
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
- return ret;
- }
+ if (ret)
+ goto unreference;
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
-
+unreference:
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
-
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ff6aa5dfb2d7..87ef34150d46 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
return 0;
}
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+ u32 pmc, ppc;
+
+ /* enable clock gating */
+ ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+ if (gpu->identity.revision == 0x4301 ||
+ gpu->identity.revision == 0x4302)
+ ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+ pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+ /* Disable PA clock gating for GC400+ except for GC420 */
+ if (gpu->identity.model >= chipModel_GC400 &&
+ gpu->identity.model != chipModel_GC420)
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+ /*
+ * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+ * present without a bug fix.
+ */
+ if (gpu->identity.revision < 0x5000 &&
+ gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+ !(gpu->identity.minor_features1 &
+ chipMinorFeatures1_DISABLE_PE_GATING))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+ if (gpu->identity.revision < 0x5422)
+ pmc |= BIT(15); /* Unknown bit */
+
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+ gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
}
+ /* enable module-level clock gating */
+ etnaviv_gpu_enable_mlcg(gpu);
+
/*
* Update GPU AXI cache atttribute to "cacheable, no allocate".
* This is necessary to prevent the iMX6 SoC locking up.
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
bool mmuv2;
ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
return ret;
+ }
etnaviv_hw_identify(gpu);
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
ret = etnaviv_hw_reset(gpu);
- if (ret)
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
goto fail;
+ }
/* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context. For now, to keep things
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
if (!iommu) {
+ dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
ret = -ENOMEM;
goto fail;
}
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
if (!gpu->mmu) {
+ dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
iommu_domain_free(iommu);
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
u32 completed_fence;
u32 retired_fence;
wait_queue_head_t fence_event;
- unsigned int fence_context;
+ u64 fence_context;
spinlock_t fence_spinlock;
/* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 6a7de5f1454a..807a3d9e0dd5 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -218,6 +218,13 @@ Copyright (C) 2015
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000
#define VIVS_PM_MODULE_STATUS 0x00000108
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index d814b3048ee5..83f61c513b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,10 +2,6 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
@@ -15,7 +11,7 @@ if DRM_EXYNOS
config DRM_EXYNOS_IOMMU
bool
- depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ depends on EXYNOS_IOMMU
default y
comment "CRTCs"
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4c1fb3f8b5a6..4f0850585b8e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
return exynos_dp_crtc_clock_enable(plat_data, false);
}
-static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
- struct drm_connector *connector = dp->connector;
struct drm_display_mode *mode;
int num_modes = 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 5e38e749ac17..ad6b73c7fc59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-exynos_dpi_best_encoder(struct drm_connector *connector)
-{
- struct exynos_dpi *ctx = connector_to_dpi(connector);
-
- return &ctx->encoder;
-}
-
static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
- .best_encoder = exynos_dpi_best_encoder,
};
static int exynos_dpi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23b0c..877d2efa28e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
dev_name(private->dma_dev));
- /*
- * create mapping to manage iommu table and set a pointer to iommu
- * mapping structure to iommu_mapping of private data.
- * also this iommu_mapping can be used to check if iommu is supported
- * or not.
- */
+ /* create common IOMMU mapping for all devices attached to Exynos DRM */
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
@@ -267,6 +262,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
{
struct exynos_drm_private *priv = dev->dev_private;
struct exynos_atomic_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int i, ret;
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +285,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
@@ -299,7 +294,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
priv->pending |= commit->crtcs;
spin_unlock(&priv->lock);
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -407,7 +402,6 @@ static struct drm_driver exynos_drm_driver = {
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cc33ec9296e7..7f1a49d5bdbe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -64,7 +64,6 @@ struct exynos_drm_plane_state {
struct exynos_drm_rect src;
unsigned int h_ratio;
unsigned int v_ratio;
- unsigned int zpos;
};
static inline struct exynos_drm_plane_state *
@@ -221,11 +220,8 @@ struct exynos_drm_private {
* this array is used to be aware of which crtc did it request vblank.
*/
struct drm_crtc *crtc[MAX_CRTC];
- struct drm_property *plane_zpos_property;
struct device *dma_dev;
- unsigned long da_start;
- unsigned long da_space_size;
void *mapping;
unsigned int pipe;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 601ecf8006a7..e07cb1fe4860 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-exynos_dsi_best_encoder(struct drm_connector *connector)
-{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
- .best_encoder = exynos_dsi_best_encoder,
};
static int exynos_dsi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67dcd6831291..4cfb39d543b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -52,7 +52,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- if (exynos_gem->kvaddr)
- vunmap(exynos_gem->kvaddr);
+ vunmap(exynos_gem->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 8564c3da0d22..4bf00f57ffe8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-attrs.h>
#include <linux/of.h>
#include <drm/drmP.h>
@@ -235,7 +234,7 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
- struct dma_attrs cmdlist_dma_attrs;
+ unsigned long cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
@@ -256,13 +255,12 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
int ret;
struct g2d_buf_info *buf_info;
- init_dma_attrs(&g2d->cmdlist_dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
- &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -295,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
err:
dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
}
@@ -309,7 +307,7 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
dma_free_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index cdf9f1af4347..f2ae72ba7d5a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -24,7 +24,7 @@
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
struct drm_device *dev = exynos_gem->base.dev;
- enum dma_attr attr;
+ unsigned long attr;
unsigned int nr_pages;
struct sg_table sgt;
int ret = -ENOMEM;
@@ -34,7 +34,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
return 0;
}
- init_dma_attrs(&exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs = 0;
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
@@ -42,7 +42,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -54,8 +54,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
else
attr = DMA_ATTR_NON_CONSISTENT;
- dma_set_attr(attr, &exynos_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= attr;
+ exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
nr_pages = exynos_gem->size >> PAGE_SHIFT;
@@ -67,7 +67,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
goto err_free;
@@ -75,7 +75,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_dma_free;
@@ -99,7 +99,7 @@ err_sgt_free:
sg_free_table(&sgt);
err_dma_free:
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, &exynos_gem->dma_attrs);
+ exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
drm_free_large(exynos_gem->pages);
@@ -120,7 +120,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
drm_free_large(exynos_gem->pages);
}
@@ -346,7 +346,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 78100742281d..df7c543d6558 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -50,7 +50,7 @@ struct exynos_drm_gem {
void *cookie;
void __iomem *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct page **pages;
struct sg_table *sgt;
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 7ca09ee19656..0f373702414e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -14,13 +14,27 @@
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
-#include <linux/kref.h>
-
-#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
/*
* drm_create_iommu_mapping - create a mapping structure
*
@@ -28,38 +42,22 @@
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
- struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (!priv->da_start)
- priv->da_start = EXYNOS_DEV_ADDR_START;
- if (!priv->da_space_size)
- priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
-
- mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size);
-
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
-
- priv->mapping = mapping;
-
- return 0;
+ return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
+ EXYNOS_DEV_ADDR_SIZE);
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- arm_iommu_release_mapping(priv->mapping);
+ __exynos_iommu_release_mapping(priv);
}
/*
@@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
- if (!priv->mapping)
- return 0;
-
- subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
- sizeof(*subdrv_dev->dma_parms),
- GFP_KERNEL);
- if (!subdrv_dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
- if (subdrv_dev->archdata.mapping)
- arm_iommu_detach_device(subdrv_dev);
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
- ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed iommu attach.\n");
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
return ret;
- }
+
+ ret = __exynos_iommu_attach(priv, subdrv_dev);
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
return 0;
}
@@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- struct dma_iommu_mapping *mapping = priv->mapping;
-
- if (!mapping || !mapping->domain)
- return;
- arm_iommu_detach_device(subdrv_dev);
+ __exynos_iommu_detach(priv, subdrv_dev);
+ clear_dma_max_seg_size(subdrv_dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 5ffebe02ee4d..c8de4913fdbe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -17,6 +17,97 @@
#ifdef CONFIG_DRM_EXYNOS_IOMMU
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
+ size);
+ return IS_ERR(priv->mapping);
+}
+
+static inline void
+__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ arm_iommu_release_mapping(priv->mapping);
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ if (dev->archdata.mapping)
+ arm_iommu_detach_device(dev);
+
+ return arm_iommu_attach_device(dev, priv->mapping);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+
+#elif defined(CONFIG_IOMMU_DMA)
+#include <linux/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ struct iommu_domain *domain;
+ int ret;
+
+ domain = iommu_domain_alloc(priv->dma_dev->bus);
+ if (!domain)
+ return -ENOMEM;
+
+ ret = iommu_get_dma_cookie(domain);
+ if (ret)
+ goto free_domain;
+
+ ret = iommu_dma_init_domain(domain, start, size);
+ if (ret)
+ goto put_cookie;
+
+ priv->mapping = domain;
+ return 0;
+
+put_cookie:
+ iommu_put_dma_cookie(domain);
+free_domain:
+ iommu_domain_free(domain);
+ return ret;
+}
+
+static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_put_dma_cookie(domain);
+ iommu_domain_free(domain);
+ priv->mapping = NULL;
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ return iommu_attach_device(domain, dev);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_detach_device(domain, dev);
+}
+#else
+#error Unsupported architecture and IOMMU/DMA-mapping glue code
+#endif
+
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 77f12c00abf9..7f32419b25ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -139,9 +139,9 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
if (exynos_state) {
- exynos_state->zpos = exynos_plane->config->zpos;
plane->state = &exynos_state->base;
plane->state->plane = plane;
+ plane->state->zpos = exynos_plane->config->zpos;
}
}
@@ -157,7 +157,6 @@ exynos_drm_plane_duplicate_state(struct drm_plane *plane)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
- copy->zpos = exynos_state->zpos;
return &copy->base;
}
@@ -170,43 +169,6 @@ static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
kfree(old_exynos_state);
}
-static int exynos_drm_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_plane_state *exynos_state =
- to_exynos_plane_state(state);
- struct exynos_drm_private *dev_priv = plane->dev->dev_private;
- const struct exynos_drm_plane_config *config = exynos_plane->config;
-
- if (property == dev_priv->plane_zpos_property &&
- (config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS))
- exynos_state->zpos = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int exynos_drm_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- const struct exynos_drm_plane_state *exynos_state =
- container_of(state, const struct exynos_drm_plane_state, base);
- struct exynos_drm_private *dev_priv = plane->dev->dev_private;
-
- if (property == dev_priv->plane_zpos_property)
- *val = exynos_state->zpos;
- else
- return -EINVAL;
-
- return 0;
-}
-
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -215,8 +177,6 @@ static struct drm_plane_funcs exynos_plane_funcs = {
.reset = exynos_drm_plane_reset,
.atomic_duplicate_state = exynos_drm_plane_duplicate_state,
.atomic_destroy_state = exynos_drm_plane_destroy_state,
- .atomic_set_property = exynos_drm_plane_atomic_set_property,
- .atomic_get_property = exynos_drm_plane_atomic_get_property,
};
static int
@@ -304,23 +264,13 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
};
static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
- unsigned int zpos)
+ bool immutable)
{
- struct drm_device *dev = plane->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_property *prop;
-
- prop = dev_priv->plane_zpos_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos",
- 0, MAX_PLANE - 1);
- if (!prop)
- return;
-
- dev_priv->plane_zpos_property = prop;
- }
-
- drm_object_attach_property(&plane->base, prop, zpos);
+ /* FIXME */
+ if (immutable)
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ else
+ drm_plane_create_zpos_property(plane, 0, 0, MAX_PLANE - 1);
}
int exynos_plane_init(struct drm_device *dev,
@@ -346,7 +296,8 @@ int exynos_plane_init(struct drm_device *dev,
exynos_plane->index = index;
exynos_plane->config = config;
- exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos);
+ exynos_plane_attach_zpos_property(&exynos_plane->base,
+ !(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 608b0afa337f..e8f6c92b2a36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, edid);
}
-static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
-{
- struct vidi_context *ctx = ctx_from_connector(connector);
-
- return &ctx->encoder;
-}
-
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
- .best_encoder = vidi_best_encoder,
};
static int vidi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 58de5a430508..2275efe41acd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
-{
- struct hdmi_context *hdata = connector_to_hdmi(connector);
-
- return &hdata->encoder;
-}
-
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.get_modes = hdmi_get_modes,
.mode_valid = hdmi_mode_valid,
- .best_encoder = hdmi_best_encoder,
};
static int hdmi_create_connector(struct drm_encoder *encoder)
@@ -1828,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
DRM_ERROR("Failed to find ddc node in device tree\n");
return -ENODEV;
}
+ of_node_put(dev->of_node);
out_get_ddc_adpt:
hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
@@ -1846,6 +1839,7 @@ out_get_ddc_adpt:
ret = -ENODEV;
goto err_ddc;
}
+ of_node_put(dev->of_node);
out_get_phy_port:
if (hdata->drv_data->is_apb_phy) {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 74a4269cc1b0..e1d47f9435fc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -477,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
+ unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -561,7 +562,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
mixer_cfg_scan(ctx, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
- mixer_cfg_layer(ctx, plane->index, state->zpos + 1, true);
+ mixer_cfg_layer(ctx, plane->index, priority, true);
mixer_cfg_vp_blend(ctx);
mixer_run(ctx);
@@ -586,6 +587,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
+ unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
@@ -677,7 +679,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_scan(ctx, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
- mixer_cfg_layer(ctx, win, state->zpos + 1, true);
+ mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format));
/* layer update mandatory for mixer 16.0.33.0 */
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index b9c714de6e40..14a72c4c496d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -5,12 +5,7 @@ config DRM_FSL_DCU
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
select REGMAP_MMIO
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 89c0084c2814..3371635cd4d7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -22,20 +22,21 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_dcu_drm_plane.h"
-static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
-}
+ struct drm_pending_vblank_event *event = crtc->state->event;
-static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- return 0;
-}
+ if (event) {
+ crtc->state->event = NULL;
-static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ drm_crtc_vblank_off(crtc);
+
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+
+ drm_crtc_vblank_on(crtc);
}
static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
- .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
- .atomic_check = fsl_dcu_drm_crtc_atomic_check,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
.disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
@@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
{
struct drm_plane *primary;
struct drm_crtc *crtc = &fsl_dev->crtc;
- unsigned int i, j, reg_num;
int ret;
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+
primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
if (!primary)
return -ENOMEM;
@@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
- if (!strcmp(fsl_dev->soc->name, "ls1021a"))
- reg_num = LS1021A_LAYER_REG_NUM;
- else
- reg_num = VF610_LAYER_REG_NUM;
- for (i = 0; i < fsl_dev->soc->total_layer; i++) {
- for (j = 1; j <= reg_num; j++)
- regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
- }
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
-
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index dc723f7ead7d..7882387f9bff 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/console.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mm.h>
@@ -22,6 +23,7 @@
#include <linux/regmap.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -42,10 +44,8 @@ static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
- .max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
@@ -199,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = fsl_dcu_drm_enable_vblank,
.disable_vblank = fsl_dcu_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -229,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
if (!fsl_dev)
return 0;
+ disable_irq(fsl_dev->irq);
drm_kms_helper_poll_disable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, true);
- regcache_mark_dirty(fsl_dev->regmap);
- clk_disable(fsl_dev->clk);
- clk_unprepare(fsl_dev->clk);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
+ console_unlock();
+
+ fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
+ if (IS_ERR(fsl_dev->state)) {
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
+ drm_kms_helper_poll_enable(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
+ return PTR_ERR(fsl_dev->state);
+ }
+
+ clk_disable_unprepare(fsl_dev->pix_clk);
+ clk_disable_unprepare(fsl_dev->clk);
return 0;
}
@@ -246,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (!fsl_dev)
return 0;
- ret = clk_enable(fsl_dev->clk);
+ ret = clk_prepare_enable(fsl_dev->clk);
if (ret < 0) {
dev_err(dev, "failed to enable dcu clk\n");
- clk_unprepare(fsl_dev->clk);
return ret;
}
- ret = clk_prepare(fsl_dev->clk);
+
+ ret = clk_prepare_enable(fsl_dev->pix_clk);
if (ret < 0) {
- dev_err(dev, "failed to prepare dcu clk\n");
+ dev_err(dev, "failed to enable pix clk\n");
return ret;
}
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+ drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
drm_kms_helper_poll_enable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, false);
- regcache_sync(fsl_dev->regmap);
+ enable_irq(fsl_dev->irq);
return 0;
}
@@ -274,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
.name = "ls1021a",
.total_layer = 16,
.max_layer = 4,
+ .layer_regs = LS1021A_LAYER_REG_NUM,
};
static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
.name = "vf610",
.total_layer = 64,
.max_layer = 6,
+ .layer_regs = VF610_LAYER_REG_NUM,
};
static const struct of_device_id fsl_dcu_of_match[] = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index c275f900ff23..3b371fe7491e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
unsigned int total_layer;
/*max layer number DCU supported*/
unsigned int max_layer;
+ unsigned int layer_regs;
};
struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
const struct fsl_dcu_soc_data *soc;
+ struct drm_atomic_state *state;
};
void fsl_dcu_fbdev_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index c564ec612b59..d9d6cc1c8e39 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
ret = fsl_dcu_drm_crtc_create(fsl_dev);
if (ret)
- return ret;
+ goto err;
ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
if (ret)
- goto fail_encoder;
+ goto err;
- ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ ret = fsl_dcu_create_outputs(fsl_dev);
if (ret)
- goto fail_connector;
+ goto err;
drm_mode_config_reset(fsl_dev->drm);
drm_kms_helper_poll_init(fsl_dev->drm);
return 0;
-fail_encoder:
- fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc);
-fail_connector:
- fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
+
+err:
+ drm_mode_config_cleanup(fsl_dev->drm);
return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
index 7093109fbc21..5a7b88e19e44 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -25,9 +25,8 @@ to_fsl_dcu_connector(struct drm_connector *con)
: NULL;
}
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder);
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc);
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 274558b3b32b..e50467a0deb0 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
DRM_FORMAT_YUV422,
};
+void fsl_dcu_drm_init_planes(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ int i, j;
+
+ for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+ for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
+ }
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+}
+
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
{
struct drm_plane *primary;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
index d657f088d859..8ee45f813ee8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -12,6 +12,7 @@
#ifndef __FSL_DCU_DRM_PLANE_H__
#define __FSL_DCU_DRM_PLANE_H__
+void fsl_dcu_drm_init_planes(struct drm_device *dev);
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 98c998da91eb..26edcc899712 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -10,6 +10,7 @@
*/
#include <linux/backlight.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -102,14 +103,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
};
-static struct drm_encoder *
-fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
-{
- struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
-
- return fsl_con->encoder;
-}
-
static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
{
struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,17 +129,16 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .best_encoder = fsl_dcu_drm_connector_best_encoder,
.get_modes = fsl_dcu_drm_connector_get_modes,
.mode_valid = fsl_dcu_drm_connector_mode_valid,
};
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder)
+static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_panel *panel)
{
+ struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
- struct device_node *panel_node;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -170,21 +162,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
mode_config->dpms_property,
DRM_MODE_DPMS_OFF);
- panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
- if (!panel_node) {
- dev_err(fsl_dev->dev, "fsl,panel property not found\n");
- ret = -ENODEV;
- goto err_sysfs;
- }
-
- fsl_dev->connector.panel = of_drm_find_panel(panel_node);
- if (!fsl_dev->connector.panel) {
- ret = -EPROBE_DEFER;
- goto err_panel;
- }
- of_node_put(panel_node);
-
- ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+ ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
goto err_sysfs;
@@ -192,11 +170,62 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
return 0;
-err_panel:
- of_node_put(panel_node);
err_sysfs:
drm_connector_unregister(connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
}
+
+static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
+ const struct of_endpoint *ep)
+{
+ struct drm_bridge *bridge;
+ struct device_node *np;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+
+ fsl_dev->connector.panel = of_drm_find_panel(np);
+ if (fsl_dev->connector.panel) {
+ of_node_put(np);
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ bridge = of_drm_find_bridge(np);
+ of_node_put(np);
+ if (!bridge)
+ return -ENODEV;
+
+ fsl_dev->encoder.bridge = bridge;
+ bridge->encoder = &fsl_dev->encoder;
+
+ return drm_bridge_attach(fsl_dev->drm, bridge);
+}
+
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
+{
+ struct of_endpoint ep;
+ struct device_node *ep_node, *panel_node;
+ int ret;
+
+ /* This is for backward compatibility */
+ panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+ if (panel_node) {
+ fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!fsl_dev->connector.panel)
+ return -EPROBE_DEFER;
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
+ if (!ep_node)
+ return -ENODEV;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ of_node_put(ep_node);
+ if (ret)
+ return -ENODEV;
+
+ return fsl_dcu_attach_endpoint(fsl_dev, &ep);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index bbe34f1c0505..bca09ea24632 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -92,6 +92,7 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
goto err_node_put;
}
+ of_node_put(np);
clk_prepare_enable(tcon->ipg_clk);
dev_info(dev, "Using TCON in bypass mode\n");
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 17f928ec84ea..8906d67494fc 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,11 +1,7 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
- select FB_CFB_COPYAREA
- select FB_CFB_FILLRECT
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 28f9d90988ff..563f193fcfac 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 813ef23a8054..38dc89083148 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -780,12 +779,10 @@ out:
failed_find:
mutex_unlock(&dev->mode_config.mutex);
printk(KERN_ERR "Failed find\n");
- if (gma_encoder->ddc_bus)
- psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 7440bf90ac9c..0fcdce0817de 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return 0;
}
-static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
- .fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_roll_ops = {
@@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
- .fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_unaccel_ops = {
@@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = {
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
- .fb_ioctl = psbfb_ioctl,
};
/**
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f44d..1a1cf7a3b5ef 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
}
}
-void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
- u32 start, u32 size)
+int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 size)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int i;
- int end = (start + size > 256) ? 256 : start + size;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
gma_crtc->lut_r[i] = red[i] >> 8;
gma_crtc->lut_g[i] = green[i] >> 8;
gma_crtc->lut_b[i] = blue[i] >> 8;
}
gma_crtc_load_lut(crtc);
+
+ return 0;
}
/**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Turn off vblank interrupts */
- drm_vblank_off(dev, pipe);
+ drm_crtc_vblank_off(crtc);
/* Wait for vblank for the disable to take effect */
gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f053..e72dd08b701b 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height);
extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
extern void gma_crtc_load_lut(struct drm_crtc *crtc);
-extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, u32 start, u32 size);
+extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 size);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 82b8ce418b27..50eb944fb78a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev)
iounmap(dev_priv->aux_reg);
dev_priv->aux_reg = NULL;
}
- if (dev_priv->aux_pdev)
- pci_dev_put(dev_priv->aux_pdev);
- if (dev_priv->lpc_pdev)
- pci_dev_put(dev_priv->lpc_pdev);
+ pci_dev_put(dev_priv->aux_pdev);
+ pci_dev_put(dev_priv->lpc_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be87e4..7b6c84925098 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc;
int i;
- uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
- r_base = gma_crtc->base.gamma_store;
- g_base = r_base + 256;
- b_base = g_base + 256;
for (i = 0; i < 256; i++) {
gma_crtc->lut_r[i] = i;
gma_crtc->lut_g[i] = i;
gma_crtc->lut_b[i] = i;
- r_base[i] = i << 8;
- g_base[i] = i << 8;
- b_base[i] = i << 8;
gma_crtc->lut_adj[i] = 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index b1b93317d054..e55733ca46d2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
- if (lvds_priv->ddc_bus)
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -835,11 +834,9 @@ out:
failed_find:
mutex_unlock(&dev->mode_config.mutex);
- if (lvds_priv->ddc_bus)
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
failed_ddc:
- if (lvds_priv->i2c_bus)
- psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
failed_blc_i2c:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index ea0df6115f7e..499f64405dac 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -4,6 +4,7 @@ config DRM_HISI_KIRIN
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select HISI_KIRIN_DW_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
If M is selected the module will be called kirin-drm.
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index fba6372d060e..c3707d47cd89 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
ade_set_medianoc_qos(acrtc);
ade_display_enable(acrtc);
ade_dump_regs(ctx->base);
+ drm_crtc_vblank_on(crtc);
acrtc->enable = true;
}
@@ -498,17 +499,11 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
if (!acrtc->enable)
return;
+ drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
acrtc->enable = false;
}
-static int ade_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- /* do nothing */
- return 0;
-}
-
static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +532,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_pending_vblank_event *event = crtc->state->event;
void __iomem *base = ctx->base;
/* only crtc is enabled regs take effect */
@@ -545,12 +541,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
/* flush ade registers */
writel(ADE_ENABLE, base + ADE_EN);
}
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
.enable = ade_crtc_enable,
.disable = ade_crtc_disable,
- .atomic_check = ade_crtc_atomic_check,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
@@ -961,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
- if (!ctx->ade_core_clk) {
+ if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_core_clk);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
- if (!ctx->media_noc_clk) {
+ if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
- return -ENODEV;
+ return PTR_ERR(ctx->media_noc_clk);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
- if (!ctx->ade_pix_clk) {
+ if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_pix_clk);
}
return 0;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fbcca..1edd9bc80294 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
.fops = &kirin_drm_fops,
- .set_busid = drm_platform_set_busid,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev)
if (ret)
goto err_kms_cleanup;
- /* connectors should be registered after drm device register */
- ret = drm_connector_register_all(drm_dev);
- if (ret)
- goto err_drm_dev_unregister;
-
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, drm_dev->primary->index);
return 0;
-err_drm_dev_unregister:
- drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_unref:
@@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- drm_connector_unregister_all(drm_dev);
drm_dev_unregister(drm_dev);
kirin_drm_kms_cleanup(drm_dev);
drm_dev_unref(drm_dev);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 22c7ed63a001..4d341db462a2 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,12 +1,6 @@
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
-config DRM_I2C_ADV7511
- tristate "AV7511 encoder"
- select REGMAP_I2C
- help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
-
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 2c72eb584ab7..43aa33baebed 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,7 +1,5 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
-
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 0594c45f7164..e9e8ae2ec06b 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 29a32b11953b..7769e469118f 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
+config DRM_I915_GVT
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option if you want to enable Intel GVT-g graphics
+ virtualization technology host support with integrated graphics.
+ With GVT-g, it's possible to have one integrated graphics
+ device shared by multiple VMs under different hypervisors.
+
+ Note that at least one hypervisor like Xen or KVM is required for
+ this driver to work, and it only supports newer device from
+ Broadwell+. For further information and setup guide, you can
+ visit: http://01.org/igvt-g.
+
+ Now it's just a stub to support the modifications of i915 for
+ GVT device model. It requires at least one MPT modules for Xen/KVM
+ and other components of GVT device model to work. Use it under
+ you own risk.
+
+ If in doubt, say "N".
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8f404103341d..cee87bfd10c4 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ select PREEMPT_COUNT
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..684fc1cd08fa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
i915-y := i915_drv.o \
i915_irq.o \
i915_params.o \
+ i915_pci.o \
i915_suspend.o \
i915_sysfs.o \
intel_csr.o \
+ intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_trace_points.o \
+ intel_breadcrumbs.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -59,6 +62,7 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_color.o \
intel_display.o \
+ intel_dpio_phy.o \
intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
@@ -81,10 +85,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \
intel_crt.o \
intel_ddi.o \
+ intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
intel_dsi.o \
+ intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
intel_dvo.o \
@@ -98,8 +104,10 @@ i915-y += dvo_ch7017.o \
# virtual gpu code
i915-y += i915_vgpu.o
-# legacy horrors
-i915-y += i915_dma.o
+ifeq ($(CONFIG_DRM_I915_GVT),y)
+i915-y += intel_gvt.o
+include $(src)/gvt/Makefile
+endif
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
new file mode 100644
index 000000000000..d0f21a6ad60d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -0,0 +1,5 @@
+GVT_DIR := gvt
+GVT_SOURCE := gvt.o
+
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
new file mode 100644
index 000000000000..7ef412be665f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __GVT_DEBUG_H__
+#define __GVT_DEBUG_H__
+
+#define gvt_dbg_core(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+
+/*
+ * Other GVT debug stuff will be introduced in the GVT device model patches.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
new file mode 100644
index 000000000000..927f4579f5b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <xen/xen.h>
+
+#include "i915_drv.h"
+
+struct intel_gvt_host intel_gvt_host;
+
+static const char * const supported_hypervisors[] = {
+ [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
+ [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
+};
+
+/**
+ * intel_gvt_init_host - Load MPT modules and detect if we're running in host
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver loading stage. If failed to find a
+ * loadable MPT module or detect currently we're running in a VM, then GVT-g
+ * will be disabled
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_host(void)
+{
+ if (intel_gvt_host.initialized)
+ return 0;
+
+ /* Xen DOM U */
+ if (xen_domain() && !xen_initial_domain())
+ return -ENODEV;
+
+ /* Try to load MPT modules for hypervisors */
+ if (xen_initial_domain()) {
+ /* In Xen dom0 */
+ intel_gvt_host.mpt = try_then_request_module(
+ symbol_get(xengt_mpt), "xengt");
+ intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
+ } else {
+ /* not in Xen. Try KVMGT */
+ intel_gvt_host.mpt = try_then_request_module(
+ symbol_get(kvmgt_mpt), "kvm");
+ intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+ }
+
+ /* Fail to load MPT modules - bail out */
+ if (!intel_gvt_host.mpt)
+ return -EINVAL;
+
+ /* Try to detect if we're running in host instead of VM. */
+ if (!intel_gvt_hypervisor_detect_host())
+ return -ENODEV;
+
+ gvt_dbg_core("Running with hypervisor %s in host mode\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+
+ intel_gvt_host.initialized = true;
+ return 0;
+}
+
+static void init_device_info(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ gvt->device_info.max_support_vgpus = 8;
+ /* This function will grow large in GVT device model patches. */
+}
+
+/**
+ * intel_gvt_clean_device - clean a GVT device
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver unloading stage, to free the
+ * resources owned by a GVT device.
+ *
+ */
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+ struct intel_gvt *gvt = &dev_priv->gvt;
+
+ if (WARN_ON(!gvt->initialized))
+ return;
+
+ /* Other de-initialization of GVT components will be introduced. */
+
+ gvt->initialized = false;
+}
+
+/**
+ * intel_gvt_init_device - initialize a GVT device
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage, to initialize
+ * necessary GVT components.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+{
+ struct intel_gvt *gvt = &dev_priv->gvt;
+ /*
+ * Cannot initialize GVT device without intel_gvt_host gets
+ * initialized first.
+ */
+ if (WARN_ON(!intel_gvt_host.initialized))
+ return -EINVAL;
+
+ if (WARN_ON(gvt->initialized))
+ return -EEXIST;
+
+ gvt_dbg_core("init gvt device\n");
+
+ init_device_info(gvt);
+ /*
+ * Other initialization of GVT components will be introduce here.
+ */
+ gvt_dbg_core("gvt device creation is done\n");
+ gvt->initialized = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
new file mode 100644
index 000000000000..fb619a6e519d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_H_
+#define _GVT_H_
+
+#include "debug.h"
+#include "hypercall.h"
+
+#define GVT_MAX_VGPU 8
+
+enum {
+ INTEL_GVT_HYPERVISOR_XEN = 0,
+ INTEL_GVT_HYPERVISOR_KVM,
+};
+
+struct intel_gvt_host {
+ bool initialized;
+ int hypervisor_type;
+ struct intel_gvt_mpt *mpt;
+};
+
+extern struct intel_gvt_host intel_gvt_host;
+
+/* Describe per-platform limitations. */
+struct intel_gvt_device_info {
+ u32 max_support_vgpus;
+ /* This data structure will grow bigger in GVT device model patches */
+};
+
+struct intel_vgpu {
+ struct intel_gvt *gvt;
+ int id;
+ unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+};
+
+struct intel_gvt {
+ struct mutex lock;
+ bool initialized;
+
+ struct drm_i915_private *dev_priv;
+ struct idr vgpu_idr; /* vGPU IDR pool */
+
+ struct intel_gvt_device_info device_info;
+};
+
+#include "mpt.h"
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
new file mode 100644
index 000000000000..254df8bf1f35
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_HYPERCALL_H_
+#define _GVT_HYPERCALL_H_
+
+/*
+ * Specific GVT-g MPT modules function collections. Currently GVT-g supports
+ * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
+ */
+struct intel_gvt_mpt {
+ int (*detect_host)(void);
+};
+
+extern struct intel_gvt_mpt xengt_mpt;
+extern struct intel_gvt_mpt kvmgt_mpt;
+
+#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
new file mode 100644
index 000000000000..03601e3ffa7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_MPT_H_
+#define _GVT_MPT_H_
+
+/**
+ * DOC: Hypervisor Service APIs for GVT-g Core Logic
+ *
+ * This is the glue layer between specific hypervisor MPT modules and GVT-g core
+ * logic. Each kind of hypervisor MPT module provides a collection of function
+ * callbacks and will be attached to GVT host when the driver is loading.
+ * GVT-g core logic will call these APIs to request specific services from
+ * hypervisor.
+ */
+
+/**
+ * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
+ * hypervisor host/privilged domain
+ *
+ * Returns:
+ * Zero on success, -ENODEV if current kernel is running inside a VM
+ */
+static inline int intel_gvt_hypervisor_detect_host(void)
+{
+ return intel_gvt_host.mpt->detect_host();
+}
+
+#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..b0fd6a7b0603 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
- CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
- * @ring: the ringbuffer to initialize
+ * @engine: the engine to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_engine_cs based on whether the platform requires software
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->dev))
+ if (!IS_GEN7(engine->i915))
return 0;
switch (engine->id) {
case RCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
@@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
- * @ring: the ringbuffer to clean up
+ * @engine: the engine to clean up
*
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
@@ -1023,7 +1024,7 @@ unpin_src:
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
- * @ring: the ring in question
+ * @engine: the engine in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module parameter.
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(engine->dev))
+ if (!USES_PPGTT(engine->i915))
return false;
return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0);
}
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ring: the ring on which the batch is to execute
+ * @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
@@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
/**
* i915_cmd_parser_get_version() - get the cmd parser version number
+ * @dev_priv: i915 device private
*
* The cmd parser maintains a simple increasing integer version number suitable
* for passing to userspace clients to determine what operations are permitted.
*
* Return: the current version number of the cmd parser
*/
-int i915_cmd_parser_get_version(void)
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
+ struct intel_engine_cs *engine;
+ bool active = false;
+
+ /* If the command parser is not enabled, report 0 - unsupported */
+ for_each_engine(engine, dev_priv) {
+ if (i915_needs_cmd_parser(engine)) {
+ active = true;
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
/*
* Command parser version history
*
@@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
+ * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/
- return 6;
+ return 7;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2a6e12956baf..844fea795bae 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char get_active_flag(struct drm_i915_gem_object *obj)
+static char get_active_flag(struct drm_i915_gem_object *obj)
{
return obj->active ? '*' : ' ';
}
-static const char get_pin_flag(struct drm_i915_gem_object *obj)
+static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_display ? 'p' : ' ';
}
-static const char get_tiling_flag(struct drm_i915_gem_object *obj)
+static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static inline const char get_global_flag(struct drm_i915_gem_object *obj)
+static char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}
-static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
-static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
-{
- seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, ' ');
-}
-
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -272,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
+static int per_file_ctx_stats(int id, void *ptr, void *data)
+{
+ struct i915_gem_context *ctx = ptr;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
+ if (ctx->engine[n].state)
+ per_file_stats(0, ctx->engine[n].state, data);
+ if (ctx->engine[n].ringbuf)
+ per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+ }
+
+ return 0;
+}
+
+static void print_context_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct file_stats stats;
+ struct drm_file *file;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ if (dev_priv->kernel_context)
+ per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+
+ list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ print_file_stats(m, "[k]contexts", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
-
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->filelist_mutex);
+ print_context_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -562,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
uintptr_t list = (uintptr_t) node->info_ent->data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -596,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
int ret;
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = crtc->unpin_work;
+ work = crtc->flip_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
+ u32 pending;
u32 addr;
- if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pending = atomic_read(&work->pending);
+ if (pending) {
+ seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -631,18 +662,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- engine->get_seqno(engine),
- i915_gem_request_completed(work->flip_queued_req, true));
+ intel_engine_get_seqno(engine),
+ i915_gem_request_completed(work->flip_queued_req));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_crtc_vblank_count(&crtc->base));
- if (work->enable_stall_check)
- seq_puts(m, "Stall check enabled, ");
- else
- seq_puts(m, "Stall check waiting for page flip ioctl, ");
+ intel_crtc_get_vblank_counter(crtc));
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4)
@@ -668,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
int total = 0;
@@ -713,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any;
@@ -761,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine)
{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "Current sequence (%s): %x\n",
- engine->name, engine->get_seqno(engine));
- seq_printf(m, "Current user interrupts (%s): %x\n",
- engine->name, READ_ONCE(engine->user_interrupts));
+ engine->name, intel_engine_get_seqno(engine));
+ seq_printf(m, "Current user interrupts (%s): %lx\n",
+ engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
+
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
+ engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int ret;
@@ -794,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int ret, i, pipe;
@@ -985,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1013,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
const u32 *hws;
int i;
@@ -1124,7 +1163,7 @@ static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1161,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1281,6 +1320,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+ seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1363,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
@@ -1380,10 +1420,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine_id(engine, dev_priv, id) {
acthd[id] = intel_ring_get_active_head(engine);
- seqno[id] = engine->get_seqno(engine);
+ seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv);
@@ -1400,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
- seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+ seq_printf(m, "\twaiters? %d\n",
+ intel_engine_has_waiter(engine));
+ seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
engine->hangcheck.user_interrupts,
- READ_ONCE(engine->user_interrupts));
+ READ_ONCE(engine->breadcrumbs.irq_wakeups));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1432,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
@@ -1500,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore_forcewake_domain *fw_domain;
spin_lock_irq(&dev_priv->uncore.lock);
@@ -1518,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rpmodectl1, rcctl1, pw_status;
intel_runtime_pm_get(dev_priv);
@@ -1558,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count = 0, ret;
@@ -1670,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
dev_priv->fb_tracking.busy_bits);
@@ -1685,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_FBC(dev)) {
seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1715,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
static int i915_fbc_fc_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
@@ -1728,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
static int i915_fbc_fc_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 reg;
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1755,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_IPS(dev)) {
seq_puts(m, "not supported\n");
@@ -1785,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -1814,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long temp, chipset, gfx;
int ret;
@@ -1842,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
@@ -1897,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_opregion *opregion = &dev_priv->opregion;
int ret;
@@ -1918,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_opregion *opregion = &dev_priv->opregion;
if (opregion->vbt)
@@ -1940,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (to_i915(dev)->fbdev) {
- fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fbdev_fb->base.width,
- fbdev_fb->base.height,
- fbdev_fb->base.depth,
- fbdev_fb->base.bits_per_pixel,
- fbdev_fb->base.modifier[0],
- drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
- seq_putc(m, '\n');
- }
+ if (to_i915(dev)->fbdev) {
+ fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.depth,
+ fbdev_fb->base.bits_per_pixel,
+ fbdev_fb->base.modifier[0],
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ describe_obj(m, fbdev_fb->obj);
+ seq_putc(m, '\n');
+ }
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1989,10 +2031,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- struct intel_context *ctx;
- enum intel_engine_id id;
+ struct i915_gem_context *ctx;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2041,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (!i915.enable_execlists &&
- ctx->legacy_hw_ctx.rcs_state == NULL)
- continue;
-
- seq_puts(m, "HW context ");
- describe_ctx(m, ctx);
- if (ctx == dev_priv->kernel_context)
- seq_printf(m, "(kernel context) ");
+ seq_printf(m, "HW context %u ", ctx->hw_id);
+ if (IS_ERR(ctx->file_priv)) {
+ seq_puts(m, "(deleted) ");
+ } else if (ctx->file_priv) {
+ struct pid *pid = ctx->file_priv->file->pid;
+ struct task_struct *task;
- if (i915.enable_execlists) {
- seq_putc(m, '\n');
- for_each_engine_id(engine, dev_priv, id) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[id].ringbuf;
-
- seq_printf(m, "%s: ", engine->name);
- if (ctx_obj)
- describe_obj(m, ctx_obj);
- if (ringbuf)
- describe_ctx_ringbuf(m, ringbuf);
- seq_putc(m, '\n');
+ task = get_pid_task(pid, PIDTYPE_PID);
+ if (task) {
+ seq_printf(m, "(%s [%d]) ",
+ task->comm, task->pid);
+ put_task_struct(task);
}
} else {
- describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+ seq_puts(m, "(kernel) ");
+ }
+
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, '\n');
+
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ seq_printf(m, "%s: ", engine->name);
+ seq_putc(m, ce->initialised ? 'I' : 'i');
+ if (ce->state)
+ describe_obj(m, ce->state);
+ if (ce->ringbuf)
+ describe_ctx_ringbuf(m, ce->ringbuf);
+ seq_putc(m, '\n');
}
seq_putc(m, '\n');
@@ -2037,24 +2082,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct page *page;
uint32_t *reg_state;
int j;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0;
+ seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
+
if (ctx_obj == NULL) {
- seq_printf(m, "Context on %s with no gem object\n",
- engine->name);
+ seq_puts(m, "\tNot allocated\n");
return;
}
- seq_printf(m, "CONTEXT: %s %u\n", engine->name,
- intel_execlists_ctx_id(ctx, engine));
-
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
else
@@ -2085,9 +2128,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!i915.enable_execlists) {
@@ -2100,10 +2143,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- if (ctx != dev_priv->kernel_context) {
- for_each_engine(engine, dev_priv)
- i915_dump_lrc_obj(m, ctx, engine);
- }
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2114,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
@@ -2174,8 +2215,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
- seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, engine));
+ seq_printf(m, "\tHead request context: %u\n",
+ head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2217,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2269,7 +2310,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int per_file_ctx(int id, void *ptr, void *data)
{
- struct intel_context *ctx = ptr;
+ struct i915_gem_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2290,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
@@ -2311,15 +2352,15 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name);
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2345,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_file *file;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2388,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
int count = 0;
for_each_engine(engine, i915)
- count += engine->irq_refcount;
+ count += intel_engine_has_waiter(engine);
return count;
}
@@ -2397,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+ seq_printf(m, "GPU busy? %s [%x]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2442,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2455,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
@@ -2510,15 +2552,16 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
+ seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_engine(engine, dev_priv) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[engine->guc_id],
+ client->submissions[engine->id],
engine->name);
- tot += client->submissions[engine->guc_id];
+ tot += client->submissions[engine->id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2527,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc guc;
struct i915_guc_client client = {};
struct intel_engine_cs *engine;
@@ -2546,6 +2589,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
+ seq_printf(m, "Doorbell map:\n");
+ seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
@@ -2555,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC submissions:\n");
for_each_engine(engine, dev_priv) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, guc.submissions[engine->guc_id],
- guc.last_seqno[engine->guc_id]);
- total += guc.submissions[engine->guc_id];
+ engine->name, guc.submissions[engine->id],
+ guc.last_seqno[engine->id]);
+ total += guc.submissions[engine->id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2573,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
u32 *log;
int i = 0, pg;
@@ -2601,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
@@ -2669,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct intel_encoder *encoder;
struct intel_connector *connector;
struct intel_dp *intel_dp = NULL;
int ret;
@@ -2677,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
drm_modeset_lock_all(dev);
for_each_intel_connector(dev, connector) {
+ struct drm_crtc *crtc;
- if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ if (!connector->base.state->best_encoder)
continue;
- if (!connector->base.encoder)
+ crtc = connector->base.state->crtc;
+ if (!crtc->state->active)
continue;
- encoder = to_intel_encoder(connector->base.encoder);
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
ret = intel_dp_sink_crc(intel_dp, crc);
if (ret)
@@ -2709,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 power;
u32 units;
@@ -2735,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2750,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
seq_printf(m, "PCI device power state: %s [%d]\n",
- pci_power_name(dev_priv->dev->pdev->current_state),
- dev_priv->dev->pdev->current_state);
+ pci_power_name(dev_priv->drm.pdev->current_state),
+ dev_priv->drm.pdev->current_state);
return 0;
}
@@ -2760,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
@@ -2795,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_csr *csr;
if (!HAS_CSR(dev)) {
@@ -2918,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_info(m, &intel_connector->panel);
}
@@ -2957,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder) {
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
+ intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
}
seq_printf(m, "\tmodes:\n");
@@ -2974,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
static bool cursor_active(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 state;
if (IS_845G(dev) || IS_I865G(dev))
@@ -2987,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pos;
pos = I915_READ(CURPOS(pipe));
@@ -3108,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
struct drm_connector *connector;
@@ -3163,13 +3222,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
enum intel_engine_id id;
int j, ret;
- if (!i915_semaphore_is_enabled(dev)) {
+ if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -3236,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
drm_modeset_lock_all(dev);
@@ -3266,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_workarounds *workarounds = &dev_priv->workarounds;
enum intel_engine_id id;
@@ -3304,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
enum pipe pipe;
@@ -3342,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
static void drrs_status_per_crtc(struct seq_file *m,
struct drm_device *dev, struct intel_crtc *intel_crtc)
{
- struct intel_encoder *intel_encoder;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_drrs *drrs = &dev_priv->drrs;
int vrefresh = 0;
+ struct drm_connector *connector;
- for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
- /* Encoder connected on this CRTC */
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_EDP:
- seq_puts(m, "eDP:\n");
- break;
- case INTEL_OUTPUT_DSI:
- seq_puts(m, "DSI:\n");
- break;
- case INTEL_OUTPUT_HDMI:
- seq_puts(m, "HDMI:\n");
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- seq_puts(m, "DP:\n");
- break;
- default:
- seq_printf(m, "Other encoder (id=%d).\n",
- intel_encoder->type);
- return;
- }
+ drm_for_each_connector(connector, dev) {
+ if (connector->state->crtc != &intel_crtc->base)
+ continue;
+
+ seq_printf(m, "%s:\n", connector->name);
}
if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3429,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
struct intel_crtc *intel_crtc;
int active_crtc_cnt = 0;
+ drm_modeset_lock_all(dev);
for_each_intel_crtc(dev, intel_crtc) {
- drm_modeset_lock(&intel_crtc->base.mutex, NULL);
-
if (intel_crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
drrs_status_per_crtc(m, dev, intel_crtc);
}
-
- drm_modeset_unlock(&intel_crtc->base.mutex);
}
+ drm_modeset_unlock_all(dev);
if (!active_crtc_cnt)
seq_puts(m, "No active crtc found\n");
@@ -3458,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
+ struct drm_connector *connector;
+
drm_modeset_lock_all(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = to_intel_encoder(encoder);
- if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ drm_for_each_connector(connector, dev) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
- intel_dig_port = enc_to_dig_port(encoder);
+
+ intel_encoder = intel_attached_encoder(connector);
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
if (!intel_dig_port->dp.can_mst)
continue;
+
seq_printf(m, "MST Source Port %c\n",
port_name(intel_dig_port->port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3480,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(info->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3504,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(info->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
spin_lock_irq(&pipe_crc->lock);
@@ -3532,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
{
struct pipe_crc_info *info = filep->private_data;
struct drm_device *dev = info->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
int n_entries;
@@ -3665,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3726,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
case INTEL_OUTPUT_TVOUT:
*source = INTEL_PIPE_CRC_SOURCE_TV;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
dig_port = enc_to_dig_port(&encoder->base);
switch (dig_port->port) {
@@ -3759,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3830,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3904,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
@@ -3929,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
@@ -3972,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
struct intel_crtc_state *pipe_config;
@@ -4040,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
@@ -4547,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4563,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4579,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4670,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4686,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4702,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4744,7 +4792,7 @@ static int
i915_wedged_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = i915_terminally_wedged(&dev_priv->gpu_error);
@@ -4755,7 +4803,7 @@ static int
i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* There is no safeguard against this debugfs entry colliding
@@ -4770,7 +4818,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv);
- i915_handle_error(dev, val,
+ i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
@@ -4783,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
"%llu\n");
static int
-i915_ring_stop_get(void *data, u64 *val)
-{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- *val = dev_priv->gpu_error.stop_rings;
-
- return 0;
-}
-
-static int
-i915_ring_stop_set(void *data, u64 val)
-{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- dev_priv->gpu_error.stop_rings = val;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
- i915_ring_stop_get, i915_ring_stop_set,
- "0x%08llx\n");
-
-static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = dev_priv->gpu_error.missed_irq_rings;
return 0;
@@ -4830,7 +4844,7 @@ static int
i915_ring_missed_irq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
/* Lock against concurrent debugfs callers */
@@ -4851,7 +4865,7 @@ static int
i915_ring_test_irq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = dev_priv->gpu_error.test_irq_rings;
@@ -4862,18 +4876,11 @@ static int
i915_ring_test_irq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ val &= INTEL_INFO(dev_priv)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
- /* Lock against concurrent debugfs callers */
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
dev_priv->gpu_error.test_irq_rings = val;
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4902,7 +4909,7 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4914,13 +4921,13 @@ i915_drop_caches_set(void *data, u64 val)
return ret;
if (val & DROP_ACTIVE) {
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
goto unlock;
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4942,7 +4949,7 @@ static int
i915_max_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4964,7 +4971,7 @@ static int
i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_max, hw_min;
int ret;
@@ -4994,7 +5001,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5009,7 +5016,7 @@ static int
i915_min_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -5031,7 +5038,7 @@ static int
i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_max, hw_min;
int ret;
@@ -5061,7 +5068,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5076,7 +5083,7 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 snpcr;
int ret;
@@ -5091,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5102,7 +5109,7 @@ static int
i915_cache_sharing_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 snpcr;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5139,7 +5146,7 @@ struct sseu_dev_status {
static void cherryview_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ss_max = 2;
int ss;
u32 sig1[ss_max], sig2[ss_max];
@@ -5171,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
static void gen9_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5236,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
static void broadwell_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int s;
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -5278,6 +5285,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
INTEL_INFO(dev)->eu_total);
seq_printf(m, " Available EU Per Subslice: %u\n",
INTEL_INFO(dev)->eu_per_subslice);
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
+ if (HAS_POOLED_EU(dev))
+ seq_printf(m, " Min EU in pool: %u\n",
+ INTEL_INFO(dev)->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
yesno(INTEL_INFO(dev)->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
@@ -5311,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 6)
return 0;
@@ -5325,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 6)
return 0;
@@ -5441,7 +5452,6 @@ static const struct i915_debugfs_files {
{"i915_max_freq", &i915_max_freq_fops},
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
- {"i915_ring_stop", &i915_ring_stop_fops},
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5459,7 +5469,7 @@ static const struct i915_debugfs_files {
void intel_display_crc_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -5471,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
}
}
-int i915_debugfs_init(struct drm_minor *minor)
+int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
+ struct drm_minor *minor = dev_priv->drm.primary;
int ret, i;
ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5498,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
}
-void i915_debugfs_cleanup(struct drm_minor *minor)
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
{
+ struct drm_minor *minor = dev_priv->drm.primary;
int i;
drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
deleted file mode 100644
index b3198fcd0536..000000000000
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ /dev/null
@@ -1,1587 +0,0 @@
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_legacy.h>
-#include "intel_drv.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include <linux/pci.h>
-#include <linux/console.h>
-#include <linux/vt.h>
-#include <linux/vgaarb.h>
-#include <linux/acpi.h>
-#include <linux/pnp.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/slab.h>
-#include <acpi/video.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/oom.h>
-
-static unsigned int i915_load_fail_count;
-
-bool __i915_inject_load_failure(const char *func, int line)
-{
- if (i915_load_fail_count >= i915.inject_load_failure)
- return false;
-
- if (++i915_load_fail_count == i915.inject_load_failure) {
- DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915.inject_load_failure, func, line);
- return true;
- }
-
- return false;
-}
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
- const char *fmt, ...)
-{
- static bool shown_bug_once;
- struct device *dev = dev_priv->dev->dev;
- bool is_error = level[1] <= KERN_ERR[1];
- bool is_debug = level[1] == KERN_DEBUG[1];
- struct va_format vaf;
- va_list args;
-
- if (is_debug && !(drm_debug & DRM_UT_DRIVER))
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
- __builtin_return_address(0), &vaf);
-
- if (is_error && !shown_bug_once) {
- dev_notice(dev, "%s", FDO_BUG_MSG);
- shown_bug_once = true;
- }
-
- va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
- return i915.inject_load_failure &&
- i915_load_fail_count == i915.inject_load_failure;
-}
-
-#define i915_load_error(dev_priv, fmt, ...) \
- __i915_printk(dev_priv, \
- i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
- fmt, ##__VA_ARGS__)
-
-static int i915_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- drm_i915_getparam_t *param = data;
- int value;
-
- switch (param->param) {
- case I915_PARAM_IRQ_ACTIVE:
- case I915_PARAM_ALLOW_BATCHBUFFER:
- case I915_PARAM_LAST_DISPATCH:
- /* Reject all old ums/dri params. */
- return -ENODEV;
- case I915_PARAM_CHIPSET_ID:
- value = dev->pdev->device;
- break;
- case I915_PARAM_REVISION:
- value = dev->pdev->revision;
- break;
- case I915_PARAM_HAS_GEM:
- value = 1;
- break;
- case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs;
- break;
- case I915_PARAM_HAS_OVERLAY:
- value = dev_priv->overlay ? 1 : 0;
- break;
- case I915_PARAM_HAS_PAGEFLIPPING:
- value = 1;
- break;
- case I915_PARAM_HAS_EXECBUF2:
- /* depends on GEM */
- value = 1;
- break;
- case I915_PARAM_HAS_BSD:
- value = intel_engine_initialized(&dev_priv->engine[VCS]);
- break;
- case I915_PARAM_HAS_BLT:
- value = intel_engine_initialized(&dev_priv->engine[BCS]);
- break;
- case I915_PARAM_HAS_VEBOX:
- value = intel_engine_initialized(&dev_priv->engine[VECS]);
- break;
- case I915_PARAM_HAS_BSD2:
- value = intel_engine_initialized(&dev_priv->engine[VCS2]);
- break;
- case I915_PARAM_HAS_RELAXED_FENCING:
- value = 1;
- break;
- case I915_PARAM_HAS_COHERENT_RINGS:
- value = 1;
- break;
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_INFO(dev)->gen >= 4;
- break;
- case I915_PARAM_HAS_RELAXED_DELTA:
- value = 1;
- break;
- case I915_PARAM_HAS_GEN7_SOL_RESET:
- value = 1;
- break;
- case I915_PARAM_HAS_LLC:
- value = HAS_LLC(dev);
- break;
- case I915_PARAM_HAS_WT:
- value = HAS_WT(dev);
- break;
- case I915_PARAM_HAS_ALIASING_PPGTT:
- value = USES_PPGTT(dev);
- break;
- case I915_PARAM_HAS_WAIT_TIMEOUT:
- value = 1;
- break;
- case I915_PARAM_HAS_SEMAPHORES:
- value = i915_semaphore_is_enabled(dev);
- break;
- case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
- value = 1;
- break;
- case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
- break;
- case I915_PARAM_HAS_PINNED_BATCHES:
- value = 1;
- break;
- case I915_PARAM_HAS_EXEC_NO_RELOC:
- value = 1;
- break;
- case I915_PARAM_HAS_EXEC_HANDLE_LUT:
- value = 1;
- break;
- case I915_PARAM_CMD_PARSER_VERSION:
- value = i915_cmd_parser_get_version();
- break;
- case I915_PARAM_HAS_COHERENT_PHYS_GTT:
- value = 1;
- break;
- case I915_PARAM_MMAP_VERSION:
- value = 1;
- break;
- case I915_PARAM_SUBSLICE_TOTAL:
- value = INTEL_INFO(dev)->subslice_total;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_EU_TOTAL:
- value = INTEL_INFO(dev)->eu_total;
- if (!value)
- return -ENODEV;
- break;
- case I915_PARAM_HAS_GPU_RESET:
- value = i915.enable_hangcheck &&
- intel_has_gpu_reset(dev);
- break;
- case I915_PARAM_HAS_RESOURCE_STREAMER:
- value = HAS_RESOURCE_STREAMER(dev);
- break;
- case I915_PARAM_HAS_EXEC_SOFTPIN:
- value = 1;
- break;
- default:
- DRM_DEBUG("Unknown parameter %d\n", param->param);
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user failed\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i915_get_bridge_dev(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
- if (!dev_priv->bridge_dev) {
- DRM_ERROR("bridge device not found\n");
- return -1;
- }
- return 0;
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret;
-
- if (INTEL_INFO(dev)->gen >= 4)
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
- return 0;
-#endif
-
- /* Get some space for it */
- dev_priv->mch_res.name = "i915 MCHBAR";
- dev_priv->mch_res.flags = IORESOURCE_MEM;
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
- &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- return ret;
- }
-
- if (INTEL_INFO(dev)->gen >= 4)
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
- return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool enabled;
-
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- return;
-
- dev_priv->mchbar_need_disable = false;
-
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- return;
-
- if (intel_alloc_mchbar_resource(dev))
- return;
-
- dev_priv->mchbar_need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
- if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- u32 deven_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
- &deven_val);
- deven_val &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- deven_val);
- } else {
- u32 mchbar_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
- &mchbar_val);
- mchbar_val &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
- mchbar_val);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
-{
- struct drm_device *dev = cookie;
-
- intel_modeset_vga_set_state(dev, state);
- if (state)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
- if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- /* i915 resume handler doesn't set to D0 */
- pci_set_power_state(dev->pdev, PCI_D0);
- i915_resume_switcheroo(dev);
- dev->switch_power_state = DRM_SWITCH_POWER_ON;
- } else {
- pr_info("switched off\n");
- dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(dev, pmm);
- dev->switch_power_state = DRM_SWITCH_POWER_OFF;
- }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return dev->open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
- .set_gpu_state = i915_switcheroo_set_state,
- .reprobe = NULL,
- .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_load_modeset_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- if (i915_inject_load_failure())
- return -ENODEV;
-
- ret = intel_bios_init(dev_priv);
- if (ret)
- DRM_INFO("failed to find VBIOS tables\n");
-
- /* If we have > 1 VGA cards, then we need to arbitrate access
- * to the common VGA resources.
- *
- * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
- * then we do not take part in VGA arbitration and the
- * vga_client_register() fails with -ENODEV.
- */
- ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
- goto out;
-
- intel_register_dsm_handler();
-
- ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
- if (ret)
- goto cleanup_vga_client;
-
- intel_power_domains_init_hw(dev_priv, false);
-
- intel_csr_ucode_init(dev_priv);
-
- ret = intel_irq_install(dev_priv);
- if (ret)
- goto cleanup_csr;
-
- intel_setup_gmbus(dev);
-
- /* Important: The output setup functions called by modeset_init need
- * working irqs for e.g. gmbus and dp aux transfers. */
- intel_modeset_init(dev);
-
- intel_guc_ucode_init(dev);
-
- ret = i915_gem_init(dev);
- if (ret)
- goto cleanup_irq;
-
- intel_modeset_gem_init(dev);
-
- if (INTEL_INFO(dev)->num_pipes == 0)
- return 0;
-
- ret = intel_fbdev_init(dev);
- if (ret)
- goto cleanup_gem;
-
- /* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev_priv);
-
- /*
- * Some ports require correctly set-up hpd registers for detection to
- * work properly (leading to ghost connected connector status), e.g. VGA
- * on gm45. Hence we can only set up the initial fbdev config after hpd
- * irqs are fully enabled. Now we should scan for the initial config
- * only once hotplug handling is enabled, but due to screwed-up locking
- * around kms/fbdev init we can't protect the fdbev initial config
- * scanning against hotplug events. Hence do this first and ignore the
- * tiny window where we will loose hotplug notifactions.
- */
- intel_fbdev_initial_config_async(dev);
-
- drm_kms_helper_poll_init(dev);
-
- return 0;
-
-cleanup_gem:
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
-cleanup_irq:
- intel_guc_ucode_fini(dev);
- drm_irq_uninstall(dev);
- intel_teardown_gmbus(dev);
-cleanup_csr:
- intel_csr_ucode_fini(dev_priv);
- intel_power_domains_fini(dev_priv);
- vga_switcheroo_unregister_client(dev->pdev);
-cleanup_vga_client:
- vga_client_register(dev->pdev, NULL, NULL, NULL);
-out:
- return ret;
-}
-
-#if IS_ENABLED(CONFIG_FB)
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
- struct apertures_struct *ap;
- struct pci_dev *pdev = dev_priv->dev->pdev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool primary;
- int ret;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = ggtt->mappable_base;
- ap->ranges[0].size = ggtt->mappable_end;
-
- primary =
- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-
- ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
-
- kfree(ap);
-
- return ret;
-}
-#else
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#endif
-
-#if !defined(CONFIG_VGA_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#elif !defined(CONFIG_DUMMY_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return -ENODEV;
-}
-#else
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- DRM_INFO("Replacing VGA console driver\n");
-
- console_lock();
- if (con_is_bound(&vga_con))
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
- if (ret == 0) {
- ret = do_unregister_con_driver(&vga_con);
-
- /* Ignore "already unregistered". */
- if (ret == -ENODEV)
- ret = 0;
- }
- console_unlock();
-
- return ret;
-}
-#endif
-
-static void i915_dump_device_info(struct drm_i915_private *dev_priv)
-{
- const struct intel_device_info *info = &dev_priv->info;
-
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
- DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
- info->gen,
- dev_priv->dev->pdev->device,
- dev_priv->dev->pdev->revision,
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
-#undef PRINT_FLAG
-#undef SEP_COMMA
-}
-
-static void cherryview_sseu_info_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_device_info *info;
- u32 fuse, eu_dis;
-
- info = (struct intel_device_info *)&dev_priv->info;
- fuse = I915_READ(CHV_FUSE_GT);
-
- info->slice_total = 1;
-
- if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- info->subslice_per_slice++;
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
- }
-
- if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- info->subslice_per_slice++;
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
- }
-
- info->subslice_total = info->subslice_per_slice;
- /*
- * CHV expected to always have a uniform distribution of EU
- * across subslices.
- */
- info->eu_per_subslice = info->subslice_total ?
- info->eu_total / info->subslice_total :
- 0;
- /*
- * CHV supports subslice power gating on devices with more than
- * one subslice, and supports EU power gating on devices with
- * more than one EU pair per subslice.
- */
- info->has_slice_pg = 0;
- info->has_subslice_pg = (info->subslice_total > 1);
- info->has_eu_pg = (info->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_device_info *info;
- int s_max = 3, ss_max = 4, eu_max = 8;
- int s, ss;
- u32 fuse2, s_enable, ss_disable, eu_disable;
- u8 eu_mask = 0xff;
-
- info = (struct intel_device_info *)&dev_priv->info;
- fuse2 = I915_READ(GEN8_FUSE2);
- s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
- GEN8_F2_S_ENA_SHIFT;
- ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT;
-
- info->slice_total = hweight32(s_enable);
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- info->subslice_per_slice = ss_max - hweight32(ss_disable);
- info->subslice_total = info->slice_total *
- info->subslice_per_slice;
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < s_max; s++) {
- if (!(s_enable & (0x1 << s)))
- /* skip disabled slice */
- continue;
-
- eu_disable = I915_READ(GEN9_EU_DISABLE(s));
- for (ss = 0; ss < ss_max; ss++) {
- int eu_per_ss;
-
- if (ss_disable & (0x1 << ss))
- /* skip disabled subslice */
- continue;
-
- eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
- eu_mask);
-
- /*
- * Record which subslice(s) has(have) 7 EUs. we
- * can tune the hash used to spread work among
- * subslices if they are unbalanced.
- */
- if (eu_per_ss == 7)
- info->subslice_7eu[s] |= 1 << ss;
-
- info->eu_total += eu_per_ss;
- }
- }
-
- /*
- * SKL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery. BXT is expected to be perfectly uniform in EU
- * distribution.
- */
- info->eu_per_subslice = info->subslice_total ?
- DIV_ROUND_UP(info->eu_total,
- info->subslice_total) : 0;
- /*
- * SKL supports slice power gating on devices with more than
- * one slice, and supports EU power gating on devices with
- * more than one EU pair per subslice. BXT supports subslice
- * power gating on devices with more than one subslice, and
- * supports EU power gating on devices with more than one EU
- * pair per subslice.
- */
- info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
- (info->slice_total > 1));
- info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
- info->has_eu_pg = (info->eu_per_subslice > 2);
-}
-
-static void broadwell_sseu_info_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_device_info *info;
- const int s_max = 3, ss_max = 3, eu_max = 8;
- int s, ss;
- u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
- fuse2 = I915_READ(GEN8_FUSE2);
- s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
-
- eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
- eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
- (32 - GEN8_EU_DIS0_S1_SHIFT));
- eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
- ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
- (32 - GEN8_EU_DIS1_S2_SHIFT));
-
-
- info = (struct intel_device_info *)&dev_priv->info;
- info->slice_total = hweight32(s_enable);
-
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- info->subslice_per_slice = ss_max - hweight32(ss_disable);
- info->subslice_total = info->slice_total * info->subslice_per_slice;
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < s_max; s++) {
- if (!(s_enable & (0x1 << s)))
- /* skip disabled slice */
- continue;
-
- for (ss = 0; ss < ss_max; ss++) {
- u32 n_disabled;
-
- if (ss_disable & (0x1 << ss))
- /* skip disabled subslice */
- continue;
-
- n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
-
- /*
- * Record which subslices have 7 EUs.
- */
- if (eu_max - n_disabled == 7)
- info->subslice_7eu[s] |= 1 << ss;
-
- info->eu_total += eu_max - n_disabled;
- }
- }
-
- /*
- * BDW is expected to always have a uniform distribution of EU across
- * subslices with the exception that any one EU in any one subslice may
- * be fused off for die recovery.
- */
- info->eu_per_subslice = info->subslice_total ?
- DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
-
- /*
- * BDW supports slice power gating on devices with more than
- * one slice.
- */
- info->has_slice_pg = (info->slice_total > 1);
- info->has_subslice_pg = 0;
- info->has_eu_pg = 0;
-}
-
-/*
- * Determine various intel_device_info fields at runtime.
- *
- * Use it when either:
- * - it's judged too laborious to fill n static structures with the limit
- * when a simple if statement does the job,
- * - run-time checks (eg read fuse/strap registers) are needed.
- *
- * This function needs to be called:
- * - after the MMIO has been setup as we are reading registers,
- * - after the PCH has been detected,
- * - before the first usage of the fields it can tweak.
- */
-static void intel_device_info_runtime_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_device_info *info;
- enum pipe pipe;
-
- info = (struct intel_device_info *)&dev_priv->info;
-
- /*
- * Skylake and Broxton currently don't expose the topmost plane as its
- * use is exclusive with the legacy cursor and we only want to expose
- * one of those, not both. Until we can safely expose the topmost plane
- * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
- * we don't expose the topmost plane at all to prevent ABI breakage
- * down the line.
- */
- if (IS_BROXTON(dev)) {
- info->num_sprites[PIPE_A] = 2;
- info->num_sprites[PIPE_B] = 2;
- info->num_sprites[PIPE_C] = 1;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 2;
- else
- for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 1;
-
- if (i915.disable_display) {
- DRM_INFO("Display disabled (module parameter)\n");
- info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
- (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
- HAS_PCH_SPLIT(dev)) {
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 sfuse_strap = I915_READ(SFUSE_STRAP);
-
- /*
- * SFUSE_STRAP is supposed to have a bit signalling the display
- * is fused off. Unfortunately it seems that, at least in
- * certain cases, fused off display means that PCH display
- * reads don't land anywhere. In that case, we read 0s.
- *
- * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
- * should be set when taking over after the firmware.
- */
- if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
- sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (dev_priv->pch_type == PCH_CPT &&
- !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- DRM_INFO("Display fused off, disabling\n");
- info->num_pipes = 0;
- } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- DRM_INFO("PipeC fused off\n");
- info->num_pipes -= 1;
- }
- } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
- u32 dfsm = I915_READ(SKL_DFSM);
- u8 disabled_mask = 0;
- bool invalid;
- int num_bits;
-
- if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- disabled_mask |= BIT(PIPE_A);
- if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- disabled_mask |= BIT(PIPE_B);
- if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- disabled_mask |= BIT(PIPE_C);
-
- num_bits = hweight8(disabled_mask);
-
- switch (disabled_mask) {
- case BIT(PIPE_A):
- case BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_C):
- invalid = true;
- break;
- default:
- invalid = false;
- }
-
- if (num_bits > info->num_pipes || invalid)
- DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
- disabled_mask);
- else
- info->num_pipes -= num_bits;
- }
-
- /* Initialize slice/subslice/EU info */
- if (IS_CHERRYVIEW(dev))
- cherryview_sseu_info_init(dev);
- else if (IS_BROADWELL(dev))
- broadwell_sseu_info_init(dev);
- else if (INTEL_INFO(dev)->gen >= 9)
- gen9_sseu_info_init(dev);
-
- /* Snooping is broken on BXT A stepping. */
- info->has_snoop = !info->has_llc;
- info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
-
- DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
- DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
- DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
- DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->has_slice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->has_subslice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->has_eu_pg ? "y" : "n");
-}
-
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
-{
- /*
- * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
- * CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(dev_priv)) {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
- DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
- }
-}
-
-static int i915_workqueues_init(struct drm_i915_private *dev_priv)
-{
- /*
- * The i915 workqueue is primarily used for batched retirement of
- * requests (and thus managing bo) once the task has been completed
- * by the GPU. i915_gem_retire_requests() is called directly when we
- * need high-priority retirement, such as waiting for an explicit
- * bo.
- *
- * It is also used for periodic low-priority events, such as
- * idle-timers and recording error state.
- *
- * All tasks on the workqueue are expected to acquire the dev mutex
- * so there is no point in running more than one instance of the
- * workqueue at any time. Use an ordered one.
- */
- dev_priv->wq = alloc_ordered_workqueue("i915", 0);
- if (dev_priv->wq == NULL)
- goto out_err;
-
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->hotplug.dp_wq == NULL)
- goto out_free_wq;
-
- dev_priv->gpu_error.hangcheck_wq =
- alloc_ordered_workqueue("i915-hangcheck", 0);
- if (dev_priv->gpu_error.hangcheck_wq == NULL)
- goto out_free_dp_wq;
-
- return 0;
-
-out_free_dp_wq:
- destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_free_wq:
- destroy_workqueue(dev_priv->wq);
-out_err:
- DRM_ERROR("Failed to allocate workqueues.\n");
-
- return -ENOMEM;
-}
-
-static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
-{
- destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
- destroy_workqueue(dev_priv->hotplug.dp_wq);
- destroy_workqueue(dev_priv->wq);
-}
-
-/**
- * i915_driver_init_early - setup state not requiring device access
- * @dev_priv: device private
- *
- * Initialize everything that is a "SW-only" state, that is state not
- * requiring accessing the device or exposing the driver via kernel internal
- * or userspace interfaces. Example steps belonging here: lock initialization,
- * system memory allocation, setting up device specific attributes and
- * function hooks not requiring accessing the device.
- */
-static int i915_driver_init_early(struct drm_i915_private *dev_priv,
- struct drm_device *dev,
- struct intel_device_info *info)
-{
- struct intel_device_info *device_info;
- int ret = 0;
-
- if (i915_inject_load_failure())
- return -ENODEV;
-
- /* Setup the write-once "constant" device info */
- device_info = (struct intel_device_info *)&dev_priv->info;
- memcpy(device_info, info, sizeof(dev_priv->info));
- device_info->device_id = dev->pdev->device;
-
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
- spin_lock_init(&dev_priv->mmio_flip_lock);
- mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->av_mutex);
- mutex_init(&dev_priv->wm.wm_mutex);
- mutex_init(&dev_priv->pps_mutex);
-
- ret = i915_workqueues_init(dev_priv);
- if (ret < 0)
- return ret;
-
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev);
-
- intel_pm_setup(dev);
- intel_init_dpio(dev_priv);
- intel_power_domains_init(dev_priv);
- intel_irq_init(dev_priv);
- intel_init_display_hooks(dev_priv);
- intel_init_clock_gating_hooks(dev_priv);
- intel_init_audio_hooks(dev_priv);
- i915_gem_load_init(dev);
-
- intel_display_crc_init(dev);
-
- i915_dump_device_info(dev_priv);
-
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
-
- return 0;
-}
-
-/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
-{
- i915_gem_load_cleanup(dev_priv->dev);
- i915_workqueues_cleanup(dev_priv);
-}
-
-static int i915_mmio_setup(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int mmio_bar;
- int mmio_size;
-
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- /*
- * Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (INTEL_INFO(dev)->gen < 5)
- mmio_size = 512 * 1024;
- else
- mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
- if (dev_priv->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
- return -EIO;
- }
-
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev);
-
- return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_teardown_mchbar(dev);
- pci_iounmap(dev->pdev, dev_priv->regs);
-}
-
-/**
- * i915_driver_init_mmio - setup device MMIO
- * @dev_priv: device private
- *
- * Setup minimal device state necessary for MMIO accesses later in the
- * initialization sequence. The setup here should avoid any other device-wide
- * side effects or exposing the driver via kernel internal or user space
- * interfaces.
- */
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- int ret;
-
- if (i915_inject_load_failure())
- return -ENODEV;
-
- if (i915_get_bridge_dev(dev))
- return -EIO;
-
- ret = i915_mmio_setup(dev);
- if (ret < 0)
- goto put_bridge;
-
- intel_uncore_init(dev);
-
- return 0;
-
-put_bridge:
- pci_dev_put(dev_priv->bridge_dev);
-
- return ret;
-}
-
-/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- intel_uncore_fini(dev);
- i915_mmio_cleanup(dev);
- pci_dev_put(dev_priv->bridge_dev);
-}
-
-/**
- * i915_driver_init_hw - setup state requiring device access
- * @dev_priv: device private
- *
- * Setup state that requires accessing the device, but doesn't require
- * exposing the driver via kernel internal or userspace interfaces.
- */
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- uint32_t aperture_size;
- int ret;
-
- if (i915_inject_load_failure())
- return -ENODEV;
-
- intel_device_info_runtime_init(dev);
-
- ret = i915_ggtt_init_hw(dev);
- if (ret)
- return ret;
-
- ret = i915_ggtt_enable_hw(dev);
- if (ret) {
- DRM_ERROR("failed to enable GGTT\n");
- goto out_ggtt;
- }
-
- /* WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over. */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_ggtt;
- }
-
- ret = i915_kick_out_vgacon(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_ggtt;
- }
-
- pci_set_master(dev->pdev);
-
- /* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
-
- /* 965GM sometimes incorrectly writes to hardware status page (HWS)
- * using 32bit addressing, overwriting memory if HWS is located
- * above 4GB.
- *
- * The documentation also mentions an issue with undefined
- * behaviour if any general state is accessed within a page above 4GB,
- * which also needs to be handled carefully.
- */
- if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
- dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
-
- aperture_size = ggtt->mappable_end;
-
- ggtt->mappable =
- io_mapping_create_wc(ggtt->mappable_base,
- aperture_size);
- if (!ggtt->mappable) {
- ret = -EIO;
- goto out_ggtt;
- }
-
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
- aperture_size);
-
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
- intel_uncore_sanitize(dev);
-
- intel_opregion_setup(dev);
-
- i915_gem_load_init_fences(dev_priv);
-
- /* On the 945G/GM, the chipset reports the MSI capability on the
- * integrated graphics even though the support isn't actually there
- * according to the published specs. It doesn't appear to function
- * correctly in testing on 945G.
- * This may be a side effect of MSI having been made available for PEG
- * and the registers being closely associated.
- *
- * According to chipset errata, on the 965GM, MSI interrupts may
- * be lost or delayed, but we use them anyways to avoid
- * stuck interrupts on some machines.
- */
- if (!IS_I945G(dev) && !IS_I945GM(dev)) {
- if (pci_enable_msi(dev->pdev) < 0)
- DRM_DEBUG_DRIVER("can't enable MSI");
- }
-
- return 0;
-
-out_ggtt:
- i915_ggtt_cleanup_hw(dev);
-
- return ret;
-}
-
-/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
- pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_free(ggtt->mappable);
- i915_ggtt_cleanup_hw(dev);
-}
-
-/**
- * i915_driver_register - register the driver with the rest of the system
- * @dev_priv: device private
- *
- * Perform any steps necessary to make the driver available via kernel
- * internal or userspace interfaces.
- */
-static void i915_driver_register(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- i915_gem_shrinker_init(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
-
- i915_setup_sysfs(dev);
-
- if (INTEL_INFO(dev_priv)->num_pipes) {
- /* Must be done after probing outputs */
- intel_opregion_init(dev);
- acpi_video_register();
- }
-
- if (IS_GEN5(dev_priv))
- intel_gpu_ips_init(dev_priv);
-
- i915_audio_component_init(dev_priv);
-}
-
-/**
- * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
- * @dev_priv: device private
- */
-static void i915_driver_unregister(struct drm_i915_private *dev_priv)
-{
- i915_audio_component_cleanup(dev_priv);
- intel_gpu_ips_teardown();
- acpi_video_unregister();
- intel_opregion_fini(dev_priv->dev);
- i915_teardown_sysfs(dev_priv->dev);
- i915_gem_shrinker_cleanup(dev_priv);
-}
-
-/**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
- *
- * The driver load routine has to do several things:
- * - drive output discovery via intel_modeset_init()
- * - initialize the memory manager
- * - allocate initial config memory
- * - setup the DRM framebuffer with the allocated memory
- */
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct drm_i915_private *dev_priv;
- int ret = 0;
-
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev->dev_private = dev_priv;
- /* Must be set before calling __i915_printk */
- dev_priv->dev = dev;
-
- ret = i915_driver_init_early(dev_priv, dev,
- (struct intel_device_info *)flags);
-
- if (ret < 0)
- goto out_free_priv;
-
- intel_runtime_pm_get(dev_priv);
-
- ret = i915_driver_init_mmio(dev_priv);
- if (ret < 0)
- goto out_runtime_pm_put;
-
- ret = i915_driver_init_hw(dev_priv);
- if (ret < 0)
- goto out_cleanup_mmio;
-
- /*
- * TODO: move the vblank init and parts of modeset init steps into one
- * of the i915_driver_init_/i915_driver_register functions according
- * to the role/effect of the given init step.
- */
- if (INTEL_INFO(dev)->num_pipes) {
- ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
- if (ret)
- goto out_cleanup_hw;
- }
-
- ret = i915_load_modeset_init(dev);
- if (ret < 0)
- goto out_cleanup_vblank;
-
- i915_driver_register(dev_priv);
-
- intel_runtime_pm_enable(dev_priv);
-
- intel_runtime_pm_put(dev_priv);
-
- return 0;
-
-out_cleanup_vblank:
- drm_vblank_cleanup(dev);
-out_cleanup_hw:
- i915_driver_cleanup_hw(dev_priv);
-out_cleanup_mmio:
- i915_driver_cleanup_mmio(dev_priv);
-out_runtime_pm_put:
- intel_runtime_pm_put(dev_priv);
- i915_driver_cleanup_early(dev_priv);
-out_free_priv:
- i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-
- kfree(dev_priv);
-
- return ret;
-}
-
-int i915_driver_unload(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- intel_fbdev_fini(dev);
-
- ret = i915_gem_suspend(dev);
- if (ret) {
- DRM_ERROR("failed to idle hardware: %d\n", ret);
- return ret;
- }
-
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- i915_driver_unregister(dev_priv);
-
- drm_vblank_cleanup(dev);
-
- intel_modeset_cleanup(dev);
-
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
-
- vga_switcheroo_unregister_client(dev->pdev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
-
- intel_csr_ucode_fini(dev_priv);
-
- /* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_destroy_error_state(dev);
-
- /* Flush any outstanding unpin_work. */
- flush_workqueue(dev_priv->wq);
-
- intel_guc_ucode_fini(dev);
- mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_engines(dev);
- i915_gem_context_fini(dev);
- mutex_unlock(&dev->struct_mutex);
- intel_fbc_cleanup_cfb(dev_priv);
-
- intel_power_domains_fini(dev_priv);
-
- i915_driver_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
- i915_driver_cleanup_early(dev_priv);
- kfree(dev_priv);
-
- return 0;
-}
-
-int i915_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- int ret;
-
- ret = i915_gem_open(dev, file);
- if (ret)
- return ret;
-
- return 0;
-}
-
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-void i915_driver_lastclose(struct drm_device *dev)
-{
- intel_fbdev_restore_mode(dev);
- vga_switcheroo_process_delayed_switch();
-}
-
-void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
-{
- mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file);
- i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
-}
-
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
-static int
-i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- return -ENODEV;
-}
-
-const struct drm_ioctl_desc i915_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
- DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
-};
-
-int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 85c4debf47e0..95ddd56b89f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,400 +27,92 @@
*
*/
-#include <linux/device.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-
-#include <linux/apple-gmux.h>
-#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/oom.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
-
-static struct drm_driver driver;
-
-#define GEN_DEFAULT_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
-
-#define GEN_CHV_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- CHV_PIPE_C_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- CHV_TRANSCODER_C_OFFSET, }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
- CHV_PALETTE_C_OFFSET }
-
-#define CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
-
-#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
-
-#define BDW_COLORS \
- .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
-#define CHV_COLORS \
- .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
-
-static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_845g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_mobile = 1, .num_pipes = 2,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .num_pipes = 2,
- .has_hotplug = 1,
- .has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .num_pipes = 2,
- .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
- .has_overlay = 1,
- .supports_tv = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_g4x = 1, .num_pipes = 2,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
- .supports_tv = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_overlay = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
+#include <linux/vt.h>
+#include <acpi/video.h>
-static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-#define GEN7_FEATURES \
- .gen = 7, .num_pipes = 3, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
- .has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .has_llc = 1, \
- GEN_DEFAULT_PIPEOFFSETS, \
- IVB_CURSOR_OFFSETS
-
-static const struct intel_device_info intel_ivybridge_d_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
-};
-
-static const struct intel_device_info intel_ivybridge_m_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
- .is_mobile = 1,
-};
-
-static const struct intel_device_info intel_ivybridge_q_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
- .num_pipes = 0, /* legal, last one wins */
-};
-
-#define VLV_FEATURES \
- .gen = 7, .num_pipes = 2, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .display_mmio_offset = VLV_DISPLAY_BASE, \
- GEN_DEFAULT_PIPEOFFSETS, \
- CURSOR_OFFSETS
-
-static const struct intel_device_info intel_valleyview_m_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
- .is_mobile = 1,
-};
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/i915_drm.h>
-static const struct intel_device_info intel_valleyview_d_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
-};
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_drv.h"
-#define HSW_FEATURES \
- GEN7_FEATURES, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
- .has_fpga_dbg = 1
+static struct drm_driver driver;
-static const struct intel_device_info intel_haswell_d_info = {
- HSW_FEATURES,
- .is_haswell = 1,
-};
+static unsigned int i915_load_fail_count;
-static const struct intel_device_info intel_haswell_m_info = {
- HSW_FEATURES,
- .is_haswell = 1,
- .is_mobile = 1,
-};
+bool __i915_inject_load_failure(const char *func, int line)
+{
+ if (i915_load_fail_count >= i915.inject_load_failure)
+ return false;
-#define BDW_FEATURES \
- HSW_FEATURES, \
- BDW_COLORS
+ if (++i915_load_fail_count == i915.inject_load_failure) {
+ DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+ i915.inject_load_failure, func, line);
+ return true;
+ }
-static const struct intel_device_info intel_broadwell_d_info = {
- BDW_FEATURES,
- .gen = 8,
-};
+ return false;
+}
-static const struct intel_device_info intel_broadwell_m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
-};
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
-static const struct intel_device_info intel_broadwell_gt3d_info = {
- BDW_FEATURES,
- .gen = 8,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *dev = dev_priv->drm.dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
-static const struct intel_device_info intel_broadwell_gt3m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+ va_start(args, fmt);
-static const struct intel_device_info intel_cherryview_info = {
- .gen = 8, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .is_cherryview = 1,
- .display_mmio_offset = VLV_DISPLAY_BASE,
- GEN_CHV_PIPEOFFSETS,
- CURSOR_OFFSETS,
- CHV_COLORS,
-};
+ vaf.fmt = fmt;
+ vaf.va = &args;
-static const struct intel_device_info intel_skylake_info = {
- BDW_FEATURES,
- .is_skylake = 1,
- .gen = 9,
-};
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
-static const struct intel_device_info intel_skylake_gt3_info = {
- BDW_FEATURES,
- .is_skylake = 1,
- .gen = 9,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+ if (is_error && !shown_bug_once) {
+ dev_notice(dev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
-static const struct intel_device_info intel_broxton_info = {
- .is_preliminary = 1,
- .is_broxton = 1,
- .gen = 9,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .num_pipes = 3,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
- BDW_COLORS,
-};
+ va_end(args);
+}
-static const struct intel_device_info intel_kabylake_info = {
- BDW_FEATURES,
- .is_kabylake = 1,
- .gen = 9,
-};
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+ return i915.inject_load_failure &&
+ i915_load_fail_count == i915.inject_load_failure;
+}
-static const struct intel_device_info intel_kabylake_gt3_info = {
- BDW_FEATURES,
- .is_kabylake = 1,
- .gen = 9,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+#define i915_load_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, \
+ i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
-/*
- * Make sure any device matches here are from most specific to most
- * general. For example, since the Quanta match is based on the subsystem
- * and subvendor IDs, we need it to come before the more general IVB
- * PCI ID matches, otherwise we'll use the wrong info struct above.
- */
-static const struct pci_device_id pciidlist[] = {
- INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_845g_info),
- INTEL_I85X_IDS(&intel_i85x_info),
- INTEL_I865G_IDS(&intel_i865g_info),
- INTEL_I915G_IDS(&intel_i915g_info),
- INTEL_I915GM_IDS(&intel_i915gm_info),
- INTEL_I945G_IDS(&intel_i945g_info),
- INTEL_I945GM_IDS(&intel_i945gm_info),
- INTEL_I965G_IDS(&intel_i965g_info),
- INTEL_G33_IDS(&intel_g33_info),
- INTEL_I965GM_IDS(&intel_i965gm_info),
- INTEL_GM45_IDS(&intel_gm45_info),
- INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_IDS(&intel_pineview_info),
- INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
- INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
- INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
- INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
- INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
- INTEL_HSW_D_IDS(&intel_haswell_d_info),
- INTEL_HSW_M_IDS(&intel_haswell_m_info),
- INTEL_VLV_M_IDS(&intel_valleyview_m_info),
- INTEL_VLV_D_IDS(&intel_valleyview_d_info),
- INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
- INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
- INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
- INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
- INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_info),
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
- INTEL_BXT_IDS(&intel_broxton_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_info),
- INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
- INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- {0, 0, 0}
-};
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
@@ -450,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
return ret;
}
-void intel_detect_pch(struct drm_device *dev)
+static void intel_detect_pch(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -519,8 +211,10 @@ void intel_detect_pch(struct drm_device *dev)
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
- pch->subsystem_vendor == 0x1af4 &&
- pch->subsystem_device == 0x1100)) {
+ pch->subsystem_vendor ==
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ pch->subsystem_device ==
+ PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
@@ -534,9 +228,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch);
}
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return false;
if (i915.semaphores >= 0)
@@ -546,22 +240,1172 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists)
return false;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
+static int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ case I915_PARAM_LAST_DISPATCH:
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pdev->device;
+ break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ break;
+ case I915_PARAM_HAS_BSD2:
+ value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(dev);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = USES_PPGTT(dev);
+ break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev_priv);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(dev_priv);
+ break;
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ value = 1;
+ break;
+ case I915_PARAM_MMAP_VERSION:
+ value = 1;
+ break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = HAS_RESOURCE_STREAMER(dev);
+ break;
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_POOLED_EU:
+ value = HAS_POOLED_EU(dev);
+ break;
+ case I915_PARAM_MIN_EU_IN_POOL:
+ value = INTEL_INFO(dev)->min_eu_in_pool;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (put_user(value, param->value))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (!dev_priv->bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+ return 0;
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ return ret;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ u32 deven_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume_switcheroo(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+static void i915_gem_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload. Afterwards we then clean up the
+ * GEM state tracking, flushing off the requests and leaving the
+ * system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that reseting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_reset(dev);
+ i915_gem_cleanup_engines(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ WARN_ON(!list_empty(&to_i915(dev)->context_list));
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ ret = intel_bios_init(dev_priv);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+
+ intel_register_dsm_handler();
+
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+ if (ret)
+ goto cleanup_vga_client;
+
+ /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+ intel_update_rawclk(dev_priv);
+
+ intel_power_domains_init_hw(dev_priv, false);
+
+ intel_csr_ucode_init(dev_priv);
+
+ ret = intel_irq_install(dev_priv);
+ if (ret)
+ goto cleanup_csr;
+
+ intel_setup_gmbus(dev);
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
+ intel_modeset_init(dev);
+
+ intel_guc_init(dev);
+
+ ret = i915_gem_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
+ intel_modeset_gem_init(dev);
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return 0;
+
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev_priv);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+cleanup_gem:
+ i915_gem_fini(dev);
+cleanup_irq:
+ intel_guc_fini(dev);
+ drm_irq_uninstall(dev);
+ intel_teardown_gmbus(dev);
+cleanup_csr:
+ intel_csr_ucode_fini(dev_priv);
+ intel_power_domains_fini(dev_priv);
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_FB)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ bool primary;
+ int ret;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].size = ggtt->mappable_end;
+
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+
+ return ret;
+}
+#else
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ DRM_INFO("Replacing VGA console driver\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+ /*
+ * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+ * CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(dev_priv)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ }
+}
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+ if (dev_priv->wq == NULL)
+ goto out_err;
+
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL)
+ goto out_free_wq;
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(dev_priv->wq);
+out_err:
+ DRM_ERROR("Failed to allocate workqueues.\n");
+
+ return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->wq);
+}
+
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+ const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct intel_device_info *device_info;
+ int ret = 0;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ /* Setup the write-once "constant" device info */
+ device_info = mkwrite_device_info(dev_priv);
+ memcpy(device_info, match_info, sizeof(*device_info));
+ device_info->device_id = dev_priv->drm.pdev->device;
+
+ BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ device_info->gen_mask = BIT(device_info->gen - 1);
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ mutex_init(&dev_priv->backlight_lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
+ mutex_init(&dev_priv->sb_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_gvt_init(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(&dev_priv->drm);
+
+ intel_pm_setup(&dev_priv->drm);
+ intel_init_dpio(dev_priv);
+ intel_power_domains_init(dev_priv);
+ intel_irq_init(dev_priv);
+ intel_init_display_hooks(dev_priv);
+ intel_init_clock_gating_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
+ i915_gem_load_init(&dev_priv->drm);
+
+ intel_display_crc_init(&dev_priv->drm);
+
+ intel_device_info_dump(dev_priv);
+
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev_priv))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
+ return 0;
+
+err_workqueues:
+ i915_workqueues_cleanup(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+ i915_gem_load_cleanup(&dev_priv->drm);
+ i915_workqueues_cleanup(dev_priv);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mmio_bar;
+ int mmio_size;
+
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_INFO(dev)->gen < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+ if (dev_priv->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
+ return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_teardown_mchbar(dev);
+ pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
+/**
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ if (i915_get_bridge_dev(dev))
+ return -EIO;
+
+ ret = i915_mmio_setup(dev);
+ if (ret < 0)
+ goto put_bridge;
+
+ intel_uncore_init(dev_priv);
+
+ return 0;
+
+put_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
+
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+
+ intel_uncore_fini(dev_priv);
+ i915_mmio_cleanup(dev);
+ pci_dev_put(dev_priv->bridge_dev);
+}
+
+static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ i915.enable_execlists =
+ intel_sanitize_enable_execlists(dev_priv,
+ i915.enable_execlists);
+
+ /*
+ * i915.enable_ppgtt is read-only, so do an early pass to validate the
+ * user's requested state against the hardware/driver capabilities. We
+ * do this now so that we can print out any log messages once rather
+ * than every time we check intel_enable_ppgtt().
+ */
+ i915.enable_ppgtt =
+ intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+}
+
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ uint32_t aperture_size;
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ intel_device_info_runtime_init(dev_priv);
+
+ intel_sanitize_options(dev_priv);
+
+ ret = i915_ggtt_init_hw(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
+
+ /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over. */
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_ggtt;
+ }
+
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_ggtt;
+ }
+
+ pci_set_master(dev->pdev);
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
+
+
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
+
+ aperture_size = ggtt->mappable_end;
+
+ ggtt->mappable =
+ io_mapping_create_wc(ggtt->mappable_base,
+ aperture_size);
+ if (!ggtt->mappable) {
+ ret = -EIO;
+ goto out_ggtt;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
+ aperture_size);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ intel_uncore_sanitize(dev_priv);
+
+ intel_opregion_setup(dev_priv);
+
+ i915_gem_load_init_fences(dev_priv);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed, but we use them anyways to avoid
+ * stuck interrupts on some machines.
+ */
+ if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (pci_enable_msi(dev->pdev) < 0)
+ DRM_DEBUG_DRIVER("can't enable MSI");
+ }
+
+ return 0;
+
+out_ggtt:
+ i915_ggtt_cleanup_hw(dev);
+
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ pm_qos_remove_request(&dev_priv->pm_qos);
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_free(ggtt->mappable);
+ i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+
+ i915_gem_shrinker_init(dev_priv);
+
+ /*
+ * Notify a valid surface after modesetting,
+ * when running inside a VM.
+ */
+ if (intel_vgpu_active(dev_priv))
+ I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
+ /* Reveal our presence to userspace */
+ if (drm_dev_register(dev, 0) == 0) {
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev);
+ } else
+ DRM_ERROR("Failed to register driver for userspace access!\n");
+
+ if (INTEL_INFO(dev_priv)->num_pipes) {
+ /* Must be done after probing outputs */
+ intel_opregion_register(dev_priv);
+ acpi_video_register();
+ }
+
+ if (IS_GEN5(dev_priv))
+ intel_gpu_ips_init(dev_priv);
+
+ i915_audio_component_init(dev_priv);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. We do it last so that the async config
+ * cannot run before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(dev);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ i915_audio_component_cleanup(dev_priv);
+
+ intel_gpu_ips_teardown();
+ acpi_video_unregister();
+ intel_opregion_unregister(dev_priv);
+
+ i915_teardown_sysfs(&dev_priv->drm);
+ i915_debugfs_unregister(dev_priv);
+ drm_dev_unregister(&dev_priv->drm);
+
+ i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (i915.nuclear_pageflip)
+ driver.driver_features |= DRIVER_ATOMIC;
+
+ ret = -ENOMEM;
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv)
+ ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
+ if (ret) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "[" DRM_NAME ":%s] allocation failed\n", __func__);
+ kfree(dev_priv);
+ return ret;
+ }
+
+ dev_priv->drm.pdev = pdev;
+ dev_priv->drm.dev_private = dev_priv;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free_priv;
+
+ pci_set_drvdata(pdev, &dev_priv->drm);
+
+ ret = i915_driver_init_early(dev_priv, ent);
+ if (ret < 0)
+ goto out_pci_disable;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_driver_init_mmio(dev_priv);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_init_hw(dev_priv);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ /*
+ * TODO: move the vblank init and parts of modeset init steps into one
+ * of the i915_driver_init_/i915_driver_register functions according
+ * to the role/effect of the given init step.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes) {
+ ret = drm_vblank_init(&dev_priv->drm,
+ INTEL_INFO(dev_priv)->num_pipes);
+ if (ret)
+ goto out_cleanup_hw;
+ }
+
+ ret = i915_load_modeset_init(&dev_priv->drm);
+ if (ret < 0)
+ goto out_cleanup_vblank;
+
+ i915_driver_register(dev_priv);
+
+ intel_runtime_pm_enable(dev_priv);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+
+out_cleanup_vblank:
+ drm_vblank_cleanup(&dev_priv->drm);
+out_cleanup_hw:
+ i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+ i915_driver_cleanup_mmio(dev_priv);
+out_runtime_pm_put:
+ intel_runtime_pm_put(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
+out_pci_disable:
+ pci_disable_device(pdev);
+out_free_priv:
+ i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+ drm_dev_unref(&dev_priv->drm);
+ return ret;
+}
+
+void i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_fbdev_fini(dev);
+
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ i915_driver_unregister(dev_priv);
+
+ drm_vblank_cleanup(dev);
+
+ intel_modeset_cleanup(dev);
+
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ }
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+ intel_csr_ucode_fini(dev_priv);
+
+ /* Free error state after interrupts are fully disabled. */
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ i915_destroy_error_state(dev);
+
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
+
+ intel_guc_fini(dev);
+ i915_gem_fini(dev);
+ intel_fbc_cleanup_cfb(dev_priv);
+
+ intel_power_domains_fini(dev_priv);
+
+ i915_driver_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ i915_driver_cleanup_early(dev_priv);
+}
+
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ int ret;
+
+ ret = i915_gem_open(dev, file);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+static void i915_driver_lastclose(struct drm_device *dev)
+{
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
+}
+
+static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
drm_modeset_lock_all(dev);
@@ -586,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
pci_power_t opregion_target_state;
int error;
@@ -614,7 +1458,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev);
@@ -632,10 +1476,10 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev, opregion_target_state);
+ intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_forcewake_reset(dev, false);
- intel_opregion_fini(dev);
+ intel_uncore_forcewake_reset(dev_priv, false);
+ intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -653,7 +1497,7 @@ out:
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
bool fw_csr;
int ret;
@@ -715,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
- if (!dev || !dev->dev_private) {
+ if (!dev) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
@@ -737,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
disable_rpm_wakeref_asserts(dev_priv);
@@ -753,7 +1597,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
- intel_opregion_setup(dev);
+ intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
@@ -771,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
@@ -781,7 +1625,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev);
@@ -798,7 +1642,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
- intel_opregion_init(dev);
+ intel_opregion_register(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
@@ -806,7 +1650,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
drm_kms_helper_poll_enable(dev);
@@ -817,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
/*
@@ -874,9 +1718,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev, true);
+ intel_uncore_early_sanitize(dev_priv, true);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -884,7 +1728,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv);
}
- intel_uncore_sanitize(dev);
+ intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -927,14 +1771,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = &dev_priv->drm;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
- intel_reset_gt_powersave(dev);
+ intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
@@ -948,24 +1792,11 @@ int i915_reset(struct drm_device *dev)
goto error;
}
- i915_gem_reset(dev);
-
- ret = intel_gpu_reset(dev, ALL_ENGINES);
-
- /* Also reset the gpu hangman. */
- if (error->stop_rings != 0) {
- DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- error->stop_rings = 0;
- if (ret == -ENODEV) {
- DRM_INFO("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
- ret = 0;
- }
- }
+ pr_notice("drm/i915: Resetting chip after gpu hang\n");
- if (i915_stop_ring_allow_warn(dev_priv))
- pr_notice("drm/i915: Resetting chip after gpu hang\n");
+ i915_gem_reset(dev);
+ ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1005,7 +1836,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
return 0;
@@ -1015,51 +1846,12 @@ error:
return ret;
}
-static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct intel_device_info *intel_info =
- (struct intel_device_info *) ent->driver_data;
-
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
- return -ENODEV;
- }
-
- /* Only bind to function 0 of the device. Early generations
- * used function 1 as a placeholder for multi-head. This causes
- * us confusion instead, especially on the systems where both
- * functions have the same PCI-ID!
- */
- if (PCI_FUNC(pdev->devfn))
- return -ENODEV;
-
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
- return -EPROBE_DEFER;
-
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void
-i915_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- if (!drm_dev || !drm_dev->dev_private) {
+ if (!drm_dev) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
@@ -1072,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev)
static int i915_pm_suspend_late(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -1091,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev)
static int i915_pm_poweroff_late(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1101,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev)
static int i915_pm_resume_early(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1111,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1119,6 +1911,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *dev)
+{
+ return i915_pm_suspend(dev);
+}
+
+static int i915_pm_freeze_late(struct device *dev)
+{
+ int ret;
+
+ ret = i915_pm_suspend_late(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_freeze_late(dev_to_i915(dev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_restore(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1318,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
u32 val;
int err;
-#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
if (force_on)
@@ -1329,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
- err = wait_for(COND, 20);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_SURVIVABILITY_REG,
+ VLV_GFX_CLK_STATUS_BIT,
+ VLV_GFX_CLK_STATUS_BIT,
+ 20);
if (err)
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
-#undef COND
}
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1350,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
-#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
- allow)
- err = wait_for(COND, 1);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_PW_STATUS,
+ VLV_GTLC_ALLOWWAKEACK,
+ allow,
+ 1);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
+
return err;
-#undef COND
}
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1368,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
-#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
- if (COND)
+ if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1380,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
- err = wait_for(COND, 3);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_PW_STATUS, mask, val,
+ 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
onoff(wait_for_on));
return err;
-#undef COND
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1443,7 +2281,7 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -1479,10 +2317,10 @@ static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+ if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
@@ -1543,7 +2378,7 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1557,14 +2392,14 @@ static int intel_runtime_suspend(struct device *device)
* FIXME: We really should find a document that references the arguments
* used below!
*/
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
- intel_opregion_notify_adapter(dev, PCI_D3hot);
+ intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
@@ -1573,11 +2408,14 @@ static int intel_runtime_suspend(struct device *device)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
- intel_opregion_notify_adapter(dev, PCI_D1);
+ intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
assert_forcewakes_inactive(dev_priv);
+ if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+ intel_hpd_poll_init(dev_priv);
+
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
@@ -1586,7 +2424,7 @@ static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1597,7 +2435,7 @@ static int intel_runtime_resume(struct device *device)
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1624,7 +2462,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
- gen6_update_ring_freq(dev);
+ gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1636,8 +2474,6 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
- intel_enable_gt_powersave(dev);
-
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
@@ -1648,7 +2484,7 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
-static const struct dev_pm_ops i915_pm_ops = {
+const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
@@ -1673,14 +2509,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
- .freeze = i915_pm_suspend,
- .freeze_late = i915_pm_suspend_late,
- .thaw_early = i915_pm_resume_early,
- .thaw = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
- .restore_early = i915_pm_resume_early,
- .restore = i915_pm_resume,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
@@ -1707,6 +2543,68 @@ static const struct file_operations i915_driver_fops = {
.llseek = noop_llseek,
};
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -1714,18 +2612,12 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER | DRIVER_MODESET,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = i915_debugfs_init,
- .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
@@ -1738,6 +2630,7 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
+ .num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -1746,56 +2639,3 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
-
-static struct pci_driver i915_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
-};
-
-static int __init i915_init(void)
-{
- driver.num_ioctls = i915_max_ioctl;
-
- /*
- * Enable KMS by default, unless explicitly overriden by
- * either the i915.modeset prarameter or by the
- * vga_text_mode_force boot option.
- */
-
- if (i915.modeset == 0)
- driver.driver_features &= ~DRIVER_MODESET;
-
- if (vgacon_text_force() && i915.modeset == -1)
- driver.driver_features &= ~DRIVER_MODESET;
-
- if (!(driver.driver_features & DRIVER_MODESET)) {
- /* Silently fail loading to not upset userspace. */
- DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
- return 0;
- }
-
- if (i915.nuclear_pageflip)
- driver.driver_features |= DRIVER_ATOMIC;
-
- return drm_pci_init(&driver, &i915_pci_driver);
-}
-
-static void __exit i915_exit(void)
-{
- if (!(driver.driver_features & DRIVER_MODESET))
- return; /* Never loaded a driver. */
-
- drm_pci_exit(&driver, &i915_pci_driver);
-}
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR("Tungsten Graphics, Inc.");
-MODULE_AUTHOR("Intel Corporation");
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc3f2e6842e7..21f939074abc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -47,6 +47,7 @@
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
+#include <drm/drm_auth.h>
#include "i915_params.h"
#include "i915_reg.h"
@@ -61,12 +62,14 @@
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
+#include "intel_gvt.h"
+
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160425"
+#define DRIVER_DATE "20160711"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -281,6 +284,9 @@ struct i915_hotplug {
u32 short_port_mask;
struct work_struct dig_port_work;
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
@@ -317,21 +323,36 @@ struct i915_hotplug {
for_each_if ((__ports_mask) & (1 << (__port)))
#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
- &dev->mode_config.plane_list, \
+ &(dev)->mode_config.plane_list, \
base.head)
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if ((plane_mask) & \
+ (1 << drm_plane_index(&intel_plane->base)))
+
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
@@ -340,7 +361,7 @@ struct i915_hotplug {
#define for_each_intel_connector(dev, intel_connector) \
list_for_each_entry(intel_connector, \
- &dev->mode_config.connector_list, \
+ &(dev)->mode_config.connector_list, \
base.head)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -462,6 +483,7 @@ struct drm_i915_error_state {
struct timeval time;
char error_msg[128];
+ bool simulated;
int iommu;
u32 reset_count;
u32 suspend_count;
@@ -493,6 +515,7 @@ struct drm_i915_error_state {
bool valid;
/* Software tracked state */
bool waiting;
+ int num_waiters;
int hangcheck_score;
enum intel_ring_hangcheck_action hangcheck_action;
int num_requests;
@@ -538,6 +561,12 @@ struct drm_i915_error_state {
u32 tail;
} *requests;
+ struct drm_i915_error_waiter {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 seqno;
+ } *waiters;
+
struct {
u32 gfx_mode;
union {
@@ -588,6 +617,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +642,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
- void (*hpd_irq_setup)(struct drm_device *dev);
+ void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -735,6 +765,7 @@ struct intel_csr {
func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \
+ func(is_broadwell) sep \
func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
@@ -749,7 +780,8 @@ struct intel_csr {
func(has_llc) sep \
func(has_snoop) sep \
func(has_ddi) sep \
- func(has_fpga_dbg)
+ func(has_fpga_dbg) sep \
+ func(has_pooled_eu)
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
@@ -757,9 +789,10 @@ struct intel_csr {
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
- u8 num_pipes:3;
+ u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
+ u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
/* Register offsets for the various display pipes and transcoders */
@@ -774,6 +807,7 @@ struct intel_device_info {
u8 subslice_per_slice;
u8 eu_total;
u8 eu_per_subslice;
+ u8 min_eu_in_pool;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
u8 subslice_7eu[3];
u8 has_slice_pg:1;
@@ -821,9 +855,8 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
-#define CONTEXT_NO_ZEROMAP (1<<0)
/**
- * struct intel_context - as the name implies, represents a context.
+ * struct i915_gem_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
@@ -841,33 +874,40 @@ struct i915_ctx_hang_stats {
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
-struct intel_context {
+struct i915_gem_context {
struct kref ref;
- int user_handle;
- uint8_t remap_slice;
struct drm_i915_private *i915;
- int flags;
struct drm_i915_file_private *file_priv;
- struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
- /* Legacy ring buffer submission */
- struct {
- struct drm_i915_gem_object *rcs_state;
- bool initialized;
- } legacy_hw_ctx;
+ struct i915_ctx_hang_stats hang_stats;
- /* Execlists */
- struct {
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned long flags;
+#define CONTEXT_NO_ZEROMAP BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
+ unsigned hw_id;
+ u32 user_handle;
+
+ u32 ggtt_alignment;
+
+ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int pin_count;
struct i915_vma *lrc_vma;
- u64 lrc_desc;
uint32_t *lrc_reg_state;
+ u64 lrc_desc;
+ int pin_count;
+ bool initialised;
} engine[I915_NUM_ENGINES];
+ u32 ring_size;
+ u32 desc_template;
+ struct atomic_notifier_head status_notifier;
+ bool execlists_force_single_submission;
struct list_head link;
+
+ u8 remap_slice;
};
enum fb_op_origin {
@@ -1116,6 +1156,8 @@ struct intel_gen6_power_mgmt {
bool interrupts_enabled;
u32 pm_iir;
+ u32 pm_intr_keep;
+
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
* Soft limits are those which are used for the dynamic reclocking done
@@ -1283,37 +1325,11 @@ struct i915_gem_mm {
struct list_head fence_list;
/**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
- */
- struct delayed_work idle_work;
-
- /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
- /**
- * Is the GPU currently considered idle, or busy executing userspace
- * requests? Whilst idle, we attempt to power down the hardware and
- * display clocks. In order to reduce the effect on performance, there
- * is a slight delay before we do so.
- */
- bool busy;
-
/* the indicator for dispatch video commands on two BSD rings */
unsigned int bsd_ring_dispatch_index;
@@ -1350,7 +1366,6 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
- struct workqueue_struct *hangcheck_wq;
struct delayed_work hangcheck_work;
/* For reset and error_state handling. */
@@ -1387,20 +1402,19 @@ struct i915_gpu_error {
#define I915_WEDGED (1 << 31)
/**
+ * Waitqueue to signal when a hang is detected. Used to for waiters
+ * to release the struct_mutex for the reset to procede.
+ */
+ wait_queue_head_t wait_queue;
+
+ /**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.
*/
wait_queue_head_t reset_queue;
- /* Userspace knobs for gpu hang simulation;
- * combines both a ring mask, and extra flags
- */
- u32 stop_rings;
-#define I915_STOP_RING_ALLOW_BAN (1 << 31)
-#define I915_STOP_RING_ALLOW_WARN (1 << 30)
-
/* For missed irq/seqno simulation. */
- unsigned int test_irq_rings;
+ unsigned long test_irq_rings;
};
enum modeset_restore {
@@ -1489,6 +1503,7 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ enum intel_backlight_type type;
} backlight;
/* MIPI DSI */
@@ -1581,7 +1596,7 @@ struct skl_ddb_allocation {
};
struct skl_wm_values {
- bool dirty[I915_MAX_PIPES];
+ unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1698,7 +1713,7 @@ struct i915_execbuffer_params {
uint64_t batch_obj_vm_offset;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch_obj;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct drm_i915_gem_request *request;
};
@@ -1710,7 +1725,8 @@ struct intel_wm_config {
};
struct drm_i915_private {
- struct drm_device *dev;
+ struct drm_device drm;
+
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
@@ -1725,6 +1741,8 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
+ struct intel_gvt gvt;
+
struct intel_guc guc;
struct intel_csr csr;
@@ -1748,6 +1766,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
+ struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
@@ -1803,13 +1822,17 @@ struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_boot_cdclk;
+ unsigned int skl_preferred_vco_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
+ struct {
+ unsigned int vco, ref;
+ } cdclk_pll;
+
/**
* wq - Driver workqueue for GEM.
*
@@ -1839,6 +1862,13 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ /* The hw wants to have a stable context identifier for the lifetime
+ * of the context (for OA, PASID, faults, etc). This is limited
+ * in execlists to 21 bits.
+ */
+ struct ida context_hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+
/* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1951,9 +1981,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /* Committed wm config */
- struct intel_wm_config config;
-
/*
* The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store
@@ -1976,6 +2003,13 @@ struct drm_i915_private {
* cstate->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
+
+ /*
+ * Set during HW readout of watermarks/DDB. Some platforms
+ * need to know when we're still using BIOS-provided values
+ * (which we don't fully trust).
+ */
+ bool distrust_bios_wm;
} wm;
struct i915_runtime_pm pm;
@@ -1988,9 +2022,35 @@ struct drm_i915_private {
int (*init_engines)(struct drm_device *dev);
void (*cleanup_engine)(struct intel_engine_cs *engine);
void (*stop_engine)(struct intel_engine_cs *engine);
- } gt;
- struct intel_context *kernel_context;
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ unsigned int active_engines;
+ bool awake;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * When we detect an idle GPU, we want to turn on
+ * powersaving features. So once we see that there
+ * are no more requests outstanding and no more
+ * arrive within a small period of time, we fire
+ * off the idle_work.
+ */
+ struct delayed_work idle_work;
+ } gt;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2005,7 +2065,7 @@ struct drm_i915_private {
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct drm_i915_private, drm);
}
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2176,6 +2236,7 @@ struct drm_i915_gem_object {
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ unsigned int has_wc_mmap;
unsigned int pin_display;
struct sg_table *pages;
@@ -2228,9 +2289,81 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+ struct scatterlist *sgp;
+ union {
+ unsigned long pfn;
+ dma_addr_t dma;
+ };
+ unsigned int curr;
+ unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+ struct sgt_iter s = { .sgp = sgl };
+
+ if (s.sgp) {
+ s.max = s.curr = s.sgp->offset;
+ s.max += s.sgp->length;
+ if (dma)
+ s.dma = sg_dma_address(s.sgp);
+ else
+ s.pfn = page_to_pfn(sg_page(s.sgp));
+ }
+
+ return s;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * If the entry is the last, return NULL; otherwise, step to the next
+ * element in the array (@sg@+1). If that's a chain pointer, follow it;
+ * otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ return sg_is_last(sg) ? NULL :
+ likely(!sg_is_chain(++sg)) ? sg :
+ sg_chain_ptr(sg);
+}
+
+/**
+ * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap: DMA address (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_dma(__dmap, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
+ ((__dmap) = (__iter).dma + (__iter).curr); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp: page pointer (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
+ ((__pp) = (__iter).pfn == 0 ? NULL : \
+ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
/**
* Request queue structure.
@@ -2252,7 +2385,7 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *engine;
- unsigned reset_counter;
+ struct intel_signal_node signaling;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -2279,6 +2412,9 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
+ /** Preallocate space in the ringbuffer for the emitting the request */
+ u32 reserved_space;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
@@ -2289,9 +2425,20 @@ struct drm_i915_gem_request {
* i915_gem_request_free() will then decrement the refcount on the
* context.
*/
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct intel_ringbuffer *ringbuf;
+ /**
+ * Context related to the previous request.
+ * As the contexts are accessed by the hardware until the switch is
+ * completed to a new context, the hardware may still be writing
+ * to the context object after the breadcrumb is visible. We must
+ * not unpin/unbind/prune that object whilst still active and so
+ * we keep the previous context pinned until the following (this)
+ * request is retired.
+ */
+ struct i915_gem_context *previous_context;
+
/** Batch buffer related to this request if any (used for
error state dump only) */
struct drm_i915_gem_object *batch_obj;
@@ -2328,11 +2475,13 @@ struct drm_i915_gem_request {
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
+ /** Execlists context hardware id. */
+ unsigned ctx_hw_id;
};
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx);
+ struct i915_gem_context *ctx);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@@ -2360,23 +2509,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
- struct drm_device *dev;
-
- if (!req)
- return;
-
- dev = req->engine->dev;
- if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
-}
-
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
@@ -2504,9 +2639,29 @@ struct drm_i915_cmd_table {
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
-#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
+#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+
+#define GEN_FOREVER (0)
+/*
+ * Returns true if Gen is in inclusive range [Start, End].
+ *
+ * Use GEN_FOREVER for unbound start and or end.
+ */
+#define IS_GEN(p, s, e) ({ \
+ unsigned int __s = (s), __e = (e); \
+ BUILD_BUG_ON(!__builtin_constant_p(s)); \
+ BUILD_BUG_ON(!__builtin_constant_p(e)); \
+ if ((__s) != GEN_FOREVER) \
+ __s = (s) - 1; \
+ if ((__e) == GEN_FOREVER) \
+ __e = BITS_PER_LONG - 1; \
+ else \
+ __e = (e) - 1; \
+ !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+})
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -2539,7 +2694,7 @@ struct drm_i915_cmd_table {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
+#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2591,6 +2746,8 @@ struct drm_i915_cmd_table {
#define SKL_REVID_D0 0x3
#define SKL_REVID_E0 0x4
#define SKL_REVID_F0 0x5
+#define SKL_REVID_G0 0x6
+#define SKL_REVID_H0 0x7
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
@@ -2616,29 +2773,34 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
-#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
-
-#define RENDER_RING (1<<RCS)
-#define BSD_RING (1<<VCS)
-#define BLT_RING (1<<BCS)
-#define VEBOX_RING (1<<VECS)
-#define BSD2_RING (1<<VCS2)
-#define ALL_ENGINES (~0)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
-#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
-#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
+#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
+#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
+#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
+#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
+#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
+#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
+#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+
+#define ENGINE_MASK(id) BIT(id)
+#define RENDER_RING ENGINE_MASK(RCS)
+#define BSD_RING ENGINE_MASK(VCS)
+#define BLT_RING ENGINE_MASK(BCS)
+#define VEBOX_RING ENGINE_MASK(VECS)
+#define BSD2_RING ENGINE_MASK(VCS2)
+#define ALL_ENGINES (~0)
+
+#define HAS_ENGINE(dev_priv, id) \
+ (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+
+#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
+#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
+#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
+#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
+#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2656,9 +2818,10 @@ struct drm_i915_cmd_table {
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/* WaRsDisableCoarsePowerGating:skl,bxt */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- IS_SKL_GT3(dev) || \
- IS_SKL_GT4(dev))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2696,12 +2859,18 @@ struct drm_i915_cmd_table {
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev))
-#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
-#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+/*
+ * For now, anything with a GuC requires uCode loading, and then supports
+ * command submission once loaded. But these are logically independent
+ * properties, so we have separate macros to test them.
+ */
+#define HAS_GUC(dev) (IS_GEN9(dev))
+#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
+#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
@@ -2710,6 +2879,8 @@ struct drm_i915_cmd_table {
!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
!IS_BROXTON(dev))
+#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2746,13 +2917,22 @@ struct drm_i915_cmd_table {
#include "i915_trace.h"
-extern const struct drm_ioctl_desc i915_ioctls[];
-extern int i915_max_ioctl;
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
-/* i915_dma.c */
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt);
+
+/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
const char *fmt, ...);
@@ -2760,21 +2940,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#define i915_report_error(dev_priv, fmt, ...) \
__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
-extern void i915_driver_lastclose(struct drm_device * dev);
-extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file);
-extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
+extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
+extern int i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2784,30 +2956,51 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
/* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
/* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
+{
+ unsigned long delay;
+
+ if (unlikely(!i915.enable_hangcheck))
+ return;
+
+ /* Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+ queue_delayed_work(system_long_wq,
+ &dev_priv->gpu_error.hangcheck_work, delay);
+}
+
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev,
+extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake);
-extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-extern void intel_uncore_fini(struct drm_device *dev);
-extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
+extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -2823,9 +3016,26 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms);
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms);
+
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt.initialized;
+}
+
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
- return to_i915(dev)->vgpu.active;
+ return dev_priv->vgpu.active;
}
void
@@ -2882,7 +3092,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
ibx_display_interrupt_update(dev_priv, bits, 0);
}
-
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -2921,7 +3130,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_init_userptr(struct drm_device *dev);
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2931,11 +3140,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
@@ -2990,6 +3201,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+static inline dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+{
+ if (n < obj->get_page.last) {
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+ }
+
+ while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+ obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+ if (unlikely(sg_is_chain(obj->get_page.sg)))
+ obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+ }
+
+ return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+}
+
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
@@ -3066,6 +3294,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -3075,31 +3308,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
- return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
- return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->seqno);
}
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us)
+{
+ return (i915_gem_request_started(request) &&
+ __i915_spin_request(request, state, timeout_us));
+}
+
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3142,27 +3378,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
-static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gpu_error.stop_rings == 0 ||
- dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
-}
-
-static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gpu_error.stop_rings == 0 ||
- dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
-}
-
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *batch_obj,
@@ -3227,8 +3450,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
@@ -3263,14 +3484,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- return i915_gem_obj_size(obj, &ggtt->base);
-}
+unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3284,12 +3499,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
alignment, flags | PIN_GLOBAL);
}
-static inline int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
- return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
-}
-
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
static inline void
@@ -3313,28 +3522,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-static inline void i915_gem_context_reference(struct intel_context *ctx)
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev);
+
+static inline struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_gem_context *ctx;
+
+ lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
+
+ ctx = idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
+}
+
+static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
}
-static inline void i915_gem_context_unreference(struct intel_context *ctx)
+static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
{
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
kref_put(&ctx->ref, i915_gem_context_free);
}
-static inline bool i915_gem_context_is_default(const struct intel_context *c)
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
@@ -3347,6 +3572,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3361,9 +3588,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
@@ -3404,7 +3631,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
@@ -3418,12 +3645,14 @@ int i915_verify_lists(struct drm_device *dev);
#endif
/* i915_debugfs.c */
-int i915_debugfs_init(struct drm_minor *minor);
-void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
+static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
+static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3442,18 +3671,19 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
-int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3502,31 +3732,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
/* intel_opregion.c */
#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_device *dev);
-extern void intel_opregion_init(struct drm_device *dev);
-extern void intel_opregion_fini(struct drm_device *dev);
-extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
+extern void intel_opregion_register(struct drm_i915_private *dev_priv);
+extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
-extern int intel_opregion_notify_adapter(struct drm_device *dev,
+extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_device *dev);
+extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
#else
-static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
-static inline void intel_opregion_init(struct drm_device *dev) { return; }
-static inline void intel_opregion_fini(struct drm_device *dev) { return; }
-static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
-intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
{
return 0;
}
-static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
{
return -ENODEV;
}
@@ -3541,36 +3773,45 @@ static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
+/* intel_device_info.c */
+static inline struct intel_device_info *
+mkwrite_device_info(struct drm_i915_private *dev_priv)
+{
+ return (struct intel_device_info *)&dev_priv->info;
+}
+
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
+void intel_device_info_dump(struct drm_i915_private *dev_priv);
+
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
-extern void intel_connector_unregister(struct intel_connector *);
+extern int intel_connector_register(struct drm_connector *);
+extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void intel_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
-extern void intel_detect_pch(struct drm_device *dev);
-extern int intel_enable_rc6(const struct drm_device *dev);
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
@@ -3599,6 +3840,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+/* intel_dpio_phy.c */
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
@@ -3672,6 +3931,7 @@ __raw_write(64, q)
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
/* "Broadcast RGB" property */
@@ -3735,12 +3995,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
schedule_timeout_uninterruptible(remaining_jiffies);
}
}
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *req)
+static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
{
- if (engine->trace_irq_req == NULL && engine->irq_get(engine))
- i915_gem_request_assign(&engine->trace_irq_req, req);
+ struct intel_engine_cs *engine = req->engine;
+
+ /* Before we do the heavier coherent read of the seqno,
+ * check the value (hopefully) in the CPU cacheline.
+ */
+ if (i915_gem_request_completed(req))
+ return true;
+
+ /* Ensure our read of the seqno is coherent so that we
+ * do not "miss an interrupt" (i.e. if this is the last
+ * request and the seqno write from the GPU is not visible
+ * by the time the interrupt fires, we will see that the
+ * request is incomplete and go back to sleep awaiting
+ * another interrupt that will never come.)
+ *
+ * Strictly, we only need to do this once after an interrupt,
+ * but it is easier and safer to do it every time the waiter
+ * is woken.
+ */
+ if (engine->irq_seqno_barrier &&
+ READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+ cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
+ struct task_struct *tsk;
+
+ /* The ordering of irq_posted versus applying the barrier
+ * is crucial. The clearing of the current irq_posted must
+ * be visible before we perform the barrier operation,
+ * such that if a subsequent interrupt arrives, irq_posted
+ * is reasserted and our task rewoken (which causes us to
+ * do another __i915_request_irq_complete() immediately
+ * and reapply the barrier). Conversely, if the clear
+ * occurs after the barrier, then an interrupt that arrived
+ * whilst we waited on the barrier would not trigger a
+ * barrier on the next pass, and the read may not see the
+ * seqno update.
+ */
+ engine->irq_seqno_barrier(engine);
+
+ /* If we consume the irq, but we are no longer the bottom-half,
+ * the real bottom-half may not have serialised their own
+ * seqno check with the irq-barrier (i.e. may have inspected
+ * the seqno before we believe it coherent since they see
+ * irq_posted == false but we are still running).
+ */
+ rcu_read_lock();
+ tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ if (tsk && tsk != current)
+ /* Note that if the bottom-half is changed as we
+ * are sending the wake-up, the new bottom-half will
+ * be woken by whomever made the change. We only have
+ * to worry about when we steal the irq-posted for
+ * ourself.
+ */
+ wake_up_process(tsk);
+ rcu_read_unlock();
+
+ if (i915_gem_request_completed(req))
+ return true;
+ }
+
+ /* We need to check whether any gpu reset happened in between
+ * the request being submitted and now. If a reset has occurred,
+ * the seqno will have been advance past ours and our request
+ * is complete. If we are in the process of handling a reset,
+ * the request is effectively complete as the rendering will
+ * be discarded, but we need to return in order to drop the
+ * struct_mutex.
+ */
+ if (i915_reset_in_progress(&req->i915->gpu_error))
+ return true;
+
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ed6117a0ee84..11681501d7b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return false;
+
if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
return true;
return obj->pin_display;
}
+static int
+insert_mappable_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u32 size)
+{
+ memset(node, 0, sizeof(*node));
+ return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
+ size, 0, 0, 0,
+ i915->ggtt.mappable_end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+}
+
+static void
+remove_mappable_node(struct drm_mm_node *node)
+{
+ drm_mm_remove_node(node);
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -107,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
}
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -356,13 +377,13 @@ out:
void *i915_gem_object_alloc(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
kmem_cache_free(dev_priv->objects, obj);
}
@@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
@@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file,
/**
* Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -484,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
*needs_clflush = 0;
- if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+ if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
return -EINVAL;
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
return ret ? - EFAULT : 0;
}
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+ uint64_t page_base, int page_offset,
+ char __user *user_data,
+ unsigned long length, bool pwrite)
+{
+ void __iomem *ioaddr;
+ void *vaddr;
+ uint64_t unwritten;
+
+ ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force *)ioaddr + page_offset;
+ if (pwrite)
+ unwritten = __copy_from_user(vaddr, user_data, length);
+ else
+ unwritten = __copy_to_user(user_data, vaddr, length);
+
+ io_mapping_unmap(ioaddr);
+ return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, uint64_t size,
+ uint64_t data_offset, uint64_t data_ptr)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_mm_node node;
+ char __user *user_data;
+ uint64_t remain;
+ uint64_t offset;
+ int ret;
+
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ if (ret) {
+ ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ remove_mappable_node(&node);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+ } else {
+ node.start = i915_gem_obj_ggtt_offset(obj);
+ node.allocated = false;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ goto out_unpin;
+
+ user_data = u64_to_user_ptr(data_ptr);
+ remain = size;
+ offset = data_offset;
+
+ mutex_unlock(&dev->struct_mutex);
+ if (likely(!i915.prefault_disable)) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto out_unpin;
+ }
+ }
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ u32 page_base = node.start;
+ unsigned page_offset = offset_in_page(offset);
+ unsigned page_length = PAGE_SIZE - page_offset;
+ page_length = remain < page_length ? remain : page_length;
+ if (node.allocated) {
+ wmb();
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start,
+ I915_CACHE_NONE, 0);
+ wmb();
+ } else {
+ page_base += offset & PAGE_MASK;
+ }
+ /* This is a slow read/write as it tries to read from
+ * and write to user memory which may result into page
+ * faults, and so we cannot perform this under struct_mutex.
+ */
+ if (slow_user_access(ggtt->mappable, page_base,
+ page_offset, user_data,
+ page_length, false)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+ /* The user has modified the object whilst we tried
+ * reading from it, and we now have no idea what domain
+ * the pages should be in. As we have just been touching
+ * them directly, flush everything back to the GTT
+ * domain.
+ */
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ }
+
+out_unpin:
+ if (node.allocated) {
+ wmb();
+ ggtt->base.clear_range(&ggtt->base,
+ node.start, node.size,
+ true);
+ i915_gem_object_unpin_pages(obj);
+ remove_mappable_node(&node);
+ } else {
+ i915_gem_object_ggtt_unpin(obj);
+ }
+out:
+ return ret;
+}
+
static int
i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
@@ -672,6 +835,9 @@ out:
/**
* Reads data from the object referenced by handle.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*
* On error, the contents of *data are undefined.
*/
@@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- /* prime objects have no backing filp to GEM pread/pwrite
- * pages from.
- */
- if (!obj->base.filp) {
- ret = -EINVAL;
- goto out;
- }
-
trace_i915_gem_object_pread(obj, args->offset, args->size);
ret = i915_gem_shmem_pread(dev, obj, args, file);
+ /* pread for non shmem backed objects */
+ if (ret == -EFAULT || ret == -ENODEV)
+ ret = i915_gem_gtt_pread(dev, obj, args->size,
+ args->offset, args->data_ptr);
+
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping,
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
+ * @dev: drm device pointer
+ * @obj: i915 gem object
+ * @args: pwrite arguments structure
+ * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- ssize_t remain;
- loff_t offset, page_base;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_mm_node node;
+ uint64_t remain, offset;
char __user *user_data;
- int page_offset, page_length, ret;
+ int ret;
+ bool hit_slow_path = false;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ return -EFAULT;
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
- if (ret)
- goto out;
+ if (ret) {
+ ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ remove_mappable_node(&node);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+ } else {
+ node.start = i915_gem_obj_ggtt_offset(obj);
+ node.allocated = false;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
+ }
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
+ intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+ obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
+ offset = args->offset;
remain = args->size;
-
- offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
-
- intel_fb_obj_invalidate(obj, ORIGIN_GTT);
-
- while (remain > 0) {
+ while (remain) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = offset & PAGE_MASK;
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
+ u32 page_base = node.start;
+ unsigned page_offset = offset_in_page(offset);
+ unsigned page_length = PAGE_SIZE - page_offset;
+ page_length = remain < page_length ? remain : page_length;
+ if (node.allocated) {
+ wmb(); /* flush the write before we modify the GGTT */
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
+ wmb(); /* flush modifications to the GGTT (insert_page) */
+ } else {
+ page_base += offset & PAGE_MASK;
+ }
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
+ * If the object is non-shmem backed, we retry again with the
+ * path that handles page fault.
*/
if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) {
- ret = -EFAULT;
- goto out_flush;
+ hit_slow_path = true;
+ mutex_unlock(&dev->struct_mutex);
+ if (slow_user_access(ggtt->mappable,
+ page_base,
+ page_offset, user_data,
+ page_length, true)) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto out_flush;
+ }
+
+ mutex_lock(&dev->struct_mutex);
}
remain -= page_length;
@@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_flush:
+ if (hit_slow_path) {
+ if (ret == 0 &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+ /* The user has modified the object whilst we tried
+ * reading from it, and we now have no idea what domain
+ * the pages should be in. As we have just been touching
+ * them directly, flush everything back to the GTT
+ * domain.
+ */
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ }
+ }
+
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+ if (node.allocated) {
+ wmb();
+ ggtt->base.clear_range(&ggtt->base,
+ node.start, node.size,
+ true);
+ i915_gem_object_unpin_pages(obj);
+ remove_mappable_node(&node);
+ } else {
+ i915_gem_object_ggtt_unpin(obj);
+ }
out:
return ret;
}
@@ -1006,7 +1230,7 @@ out:
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
else
obj->cache_dirty = true;
@@ -1016,6 +1240,9 @@ out:
/**
* Writes data to the object referenced by handle.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
@@ -1023,7 +1250,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- /* prime objects have no backing filp to GEM pread/pwrite
- * pages from.
- */
- if (!obj->base.filp) {
- ret = -EINVAL;
- goto out;
- }
-
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -EFAULT;
@@ -1079,20 +1298,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->tiling_mode == I915_TILING_NONE &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ if (!i915_gem_object_has_struct_page(obj) ||
cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case. */
}
- if (ret == -EFAULT || ret == -ENOSPC) {
+ if (ret == -EFAULT) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
- else
+ else if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ else
+ ret = -ENODEV;
}
out:
@@ -1123,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
return 0;
}
-static void fake_irq(unsigned long data)
-{
- wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
static unsigned long local_clock_us(unsigned *cpu)
{
unsigned long t;
@@ -1166,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
return this_cpu != cpu;
}
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ int state, unsigned long timeout_us)
{
- unsigned long timeout;
unsigned cpu;
/* When waiting for high frequency requests, e.g. during synchronous
@@ -1181,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* takes to sleep on a request, on the order of a microsecond.
*/
- if (req->engine->irq_refcount)
- return -EBUSY;
-
- /* Only spin if we know the GPU is processing this request */
- if (!i915_gem_request_started(req, true))
- return -EAGAIN;
-
- timeout = local_clock_us(&cpu) + 5;
- while (!need_resched()) {
- if (i915_gem_request_completed(req, true))
- return 0;
+ timeout_us += local_clock_us(&cpu);
+ do {
+ if (i915_gem_request_completed(req))
+ return true;
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout, cpu))
+ if (busywait_stop(timeout_us, cpu))
break;
cpu_relax_lowlatency();
- }
+ } while (!need_resched());
- if (i915_gem_request_completed(req, false))
- return 0;
-
- return -EAGAIN;
+ return false;
}
/**
@@ -1213,6 +1412,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: RPS client
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
@@ -1229,26 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout,
struct intel_rps_client *rps)
{
- struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const bool irq_test_in_progress =
- ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- DEFINE_WAIT(wait);
- unsigned long timeout_expire;
+ DEFINE_WAIT(reset);
+ struct intel_wait wait;
+ unsigned long timeout_remain;
s64 before = 0; /* Only to silence a compiler warning. */
- int ret;
+ int ret = 0;
- WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+ might_sleep();
if (list_empty(&req->list))
return 0;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return 0;
- timeout_expire = 0;
+ timeout_remain = MAX_SCHEDULE_TIMEOUT;
if (timeout) {
if (WARN_ON(*timeout < 0))
return -EINVAL;
@@ -1256,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (*timeout == 0)
return -ETIME;
- timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+ timeout_remain = nsecs_to_jiffies_timeout(*timeout);
/*
* Record current time in case interrupted by signal, or wedged.
@@ -1264,75 +1460,76 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
before = ktime_get_raw_ns();
}
- if (INTEL_INFO(dev_priv)->gen >= 6)
- gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
trace_i915_gem_request_wait_begin(req);
- /* Optimistic spin for the next jiffie before touching IRQs */
- ret = __i915_spin_request(req, state);
- if (ret == 0)
- goto out;
-
- if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
- ret = -ENODEV;
- goto out;
- }
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (INTEL_INFO(req->i915)->gen >= 6)
+ gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
- for (;;) {
- struct timer_list timer;
+ /* Optimistic spin for the next ~jiffie before touching IRQs */
+ if (i915_spin_request(req, state, 5))
+ goto complete;
- prepare_to_wait(&engine->irq_queue, &wait, state);
+ set_current_state(state);
+ add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- /* We need to check whether any gpu reset happened in between
- * the request being submitted and now. If a reset has occurred,
- * the request is effectively complete (we either are in the
- * process of or have discarded the rendering and completely
- * reset the GPU. The results of the request are lost and we
- * are free to continue on with the original operation.
+ intel_wait_init(&wait, req->seqno);
+ if (intel_engine_add_wait(req->engine, &wait))
+ /* In order to check that we haven't missed the interrupt
+ * as we enabled it, we need to kick ourselves to do a
+ * coherent check on the seqno before we sleep.
*/
- if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
- ret = 0;
- break;
- }
-
- if (i915_gem_request_completed(req, false)) {
- ret = 0;
- break;
- }
+ goto wakeup;
+ for (;;) {
if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
break;
}
- if (timeout && time_after_eq(jiffies, timeout_expire)) {
+ timeout_remain = io_schedule_timeout(timeout_remain);
+ if (timeout_remain == 0) {
ret = -ETIME;
break;
}
- timer.function = NULL;
- if (timeout || missed_irq(dev_priv, engine)) {
- unsigned long expire;
+ if (intel_wait_complete(&wait))
+ break;
- setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
- mod_timer(&timer, expire);
- }
+ set_current_state(state);
- io_schedule();
+wakeup:
+ /* Carefully check if the request is complete, giving time
+ * for the seqno to be visible following the interrupt.
+ * We also have to check in case we are kicked by the GPU
+ * reset in order to drop the struct_mutex.
+ */
+ if (__i915_request_irq_complete(req))
+ break;
- if (timer.function) {
- del_singleshot_timer_sync(&timer);
- destroy_timer_on_stack(&timer);
- }
+ /* Only spin if we know the GPU is processing this request */
+ if (i915_spin_request(req, state, 2))
+ break;
}
- if (!irq_test_in_progress)
- engine->irq_put(engine);
+ remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- finish_wait(&engine->irq_queue, &wait);
-
-out:
+ intel_engine_remove_wait(req->engine, &wait);
+ __set_current_state(TASK_RUNNING);
+complete:
trace_i915_gem_request_wait_end(req);
if (timeout) {
@@ -1351,6 +1548,22 @@ out:
*timeout = 0;
}
+ if (rps && req->seqno == req->engine->last_submitted_seqno) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&req->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&req->i915->rps.client_lock);
+ }
+
return ret;
}
@@ -1413,6 +1626,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
+ if (request->previous_context) {
+ if (i915.enable_execlists)
+ intel_lr_context_unpin(request->previous_context,
+ request->engine);
+ }
+
+ i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request);
}
@@ -1422,7 +1642,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
- lockdep_assert_held(&engine->dev->struct_mutex);
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
if (list_empty(&req->list))
return;
@@ -1440,6 +1660,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
+ * @req: request to wait on
*/
int
i915_wait_request(struct drm_i915_gem_request *req)
@@ -1450,14 +1671,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
interruptible = dev_priv->mm.interruptible;
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
/* If the GPU hung, we want to keep the requests to find the guilty. */
- if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ if (!i915_reset_in_progress(&dev_priv->gpu_error))
__i915_gem_request_retire__upto(req);
return 0;
@@ -1466,6 +1687,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for read access or write
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -1516,7 +1739,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ if (!i915_reset_in_progress(&req->i915->gpu_error))
__i915_gem_request_retire__upto(req);
}
@@ -1529,7 +1752,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n = 0;
@@ -1580,9 +1803,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
return &fpriv->rps;
}
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+ return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+ ORIGIN_GTT : ORIGIN_CPU;
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1633,9 +1866,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- write_domain == I915_GEM_DOMAIN_GTT ?
- ORIGIN_GTT : ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
unref:
drm_gem_object_unreference(&obj->base);
@@ -1646,6 +1877,9 @@ unlock:
/**
* Called when user space has done writes to this buffer
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1676,8 +1910,11 @@ unlock:
}
/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ * it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
@@ -1736,6 +1973,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
else
addr = -ENOMEM;
up_write(&mm->mmap_sem);
+
+ /* This may race, but that's ok, it only gets set */
+ WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
}
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
@@ -1982,7 +2222,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
return size;
/* Previous chips need a power-of-two fence region when tiling */
- if (INTEL_INFO(dev)->gen == 3)
+ if (IS_GEN3(dev))
gtt_size = 1024*1024;
else
gtt_size = 512*1024;
@@ -1995,7 +2235,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
/**
* i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @obj: object to check
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ * @fenced: is fenced alignemned required or not
*
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
@@ -2021,7 +2264,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int ret;
dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2162,7 +2405,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2428,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -2238,12 +2480,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
@@ -2340,8 +2582,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- put_page(sg_page_iter_page(&sg_iter));
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
sg_free_table(st);
kfree(st);
@@ -2369,7 +2611,7 @@ err_pages:
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
@@ -2395,6 +2637,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+ if (pages != stack_pages)
+ drm_free_large(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
@@ -2407,29 +2687,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
- if (obj->mapping == NULL) {
- struct page **pages;
-
- pages = NULL;
- if (obj->base.size == PAGE_SIZE)
- obj->mapping = kmap(sg_page(obj->pages->sgl));
- else
- pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
- sizeof(*pages),
- GFP_TEMPORARY);
- if (pages != NULL) {
- struct sg_page_iter sg_iter;
- int n;
-
- n = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter,
- obj->pages->nents, 0)
- pages[n++] = sg_page_iter_page(&sg_iter);
-
- obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
- drm_free_large(pages);
- }
- if (obj->mapping == NULL) {
+ if (!obj->mapping) {
+ obj->mapping = i915_gem_object_map(obj);
+ if (!obj->mapping) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
@@ -2502,9 +2762,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
}
static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
@@ -2514,7 +2773,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
if (ret)
return ret;
}
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
+
+ /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+ if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+ while (intel_kick_waiters(dev_priv) ||
+ intel_kick_signalers(dev_priv))
+ yield();
+ }
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
@@ -2525,7 +2791,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (seqno == 0)
@@ -2534,7 +2800,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev, seqno - 1);
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
@@ -2550,13 +2816,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
}
int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev, 0);
+ int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
@@ -2567,6 +2831,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ dev_priv->gt.active_engines |= intel_engine_flag(engine);
+ if (dev_priv->gt.awake)
+ return;
+
+ intel_runtime_pm_get_noresume(dev_priv);
+ dev_priv->gt.awake = true;
+
+ i915_update_gfx_val(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_busy(dev_priv);
+
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -2577,16 +2861,15 @@ void __i915_add_request(struct drm_i915_gem_request *request,
bool flush_caches)
{
struct intel_engine_cs *engine;
- struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
+ u32 reserved_tail;
int ret;
if (WARN_ON(request == NULL))
return;
engine = request->engine;
- dev_priv = request->i915;
ringbuf = request->ringbuf;
/*
@@ -2594,9 +2877,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- intel_ring_reserved_space_use(ringbuf);
-
request_start = intel_ring_get_tail(ringbuf);
+ reserved_tail = request->reserved_space;
+ request->reserved_space = 0;
+
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2651,56 +2935,42 @@ void __i915_add_request(struct drm_i915_gem_request *request,
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
-
- i915_queue_hangcheck(engine->dev);
-
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
-
/* Sanity check that the reserved size was large enough. */
- intel_ring_reserved_space_end(ringbuf);
+ ret = intel_ring_get_tail(ringbuf) - request_start;
+ if (ret < 0)
+ ret += ringbuf->size;
+ WARN_ONCE(ret > reserved_tail,
+ "Not enough space reserved (%d bytes) "
+ "for adding the request (%d bytes)\n",
+ reserved_tail, ret);
+
+ i915_gem_mark_busy(engine);
}
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct intel_context *ctx)
+static bool i915_context_is_banned(const struct i915_gem_context *ctx)
{
unsigned long elapsed;
- elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-
if (ctx->hang_stats.banned)
return true;
+ elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
if (ctx->hang_stats.ban_period_seconds &&
elapsed <= ctx->hang_stats.ban_period_seconds) {
- if (!i915_gem_context_is_default(ctx)) {
- DRM_DEBUG("context hanging too fast, banning!\n");
- return true;
- } else if (i915_stop_ring_allow_ban(dev_priv)) {
- if (i915_stop_ring_allow_warn(dev_priv))
- DRM_ERROR("gpu hanging too fast, banning!\n");
- return true;
- }
+ DRM_DEBUG("context hanging too fast, banning!\n");
+ return true;
}
return false;
}
-static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct intel_context *ctx,
+static void i915_set_reset_status(struct i915_gem_context *ctx,
const bool guilty)
{
- struct i915_ctx_hang_stats *hs;
-
- if (WARN_ON(!ctx))
- return;
-
- hs = &ctx->hang_stats;
+ struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
if (guilty) {
- hs->banned = i915_context_is_banned(dev_priv, ctx);
+ hs->banned = i915_context_is_banned(ctx);
hs->batch_active++;
hs->guilty_ts = get_seconds();
} else {
@@ -2712,27 +2982,15 @@ void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
- struct intel_context *ctx = req->ctx;
-
- if (req->file_priv)
- i915_gem_request_remove_from_client(req);
-
- if (ctx) {
- if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->engine);
-
- i915_gem_context_unreference(ctx);
- }
-
kmem_cache_free(req->i915->requests, req);
}
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2754,26 +3012,16 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret)
goto err;
kref_init(&req->ref);
req->i915 = dev_priv;
req->engine = engine;
- req->reset_counter = reset_counter;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret) {
- i915_gem_context_unreference(req->ctx);
- goto err;
- }
-
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
@@ -2781,24 +3029,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
+ req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
if (i915.enable_execlists)
- ret = intel_logical_ring_reserve_space(req);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
- ret = intel_ring_reserve_space(req);
- if (ret) {
- /*
- * At this point, the request is fully allocated even if not
- * fully prepared. Thus it can be cleaned up using the proper
- * free code.
- */
- intel_ring_reserved_space_cancel(req->ringbuf);
- i915_gem_request_unreference(req);
- return ret;
- }
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
*req_out = req;
return 0;
+err_ctx:
+ i915_gem_context_unreference(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
return ret;
@@ -2818,13 +3062,13 @@ err:
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
- ctx = to_i915(engine->dev)->kernel_context;
+ ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
@@ -2834,8 +3078,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
+ /* We are called by the error capture and reset at a random
+ * point in time. In particular, note that neither is crucially
+ * ordered with an interrupt. After a hang, the GPU is dead and we
+ * assume that no more writes can happen (we waited long enough for
+ * all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ */
list_for_each_entry(request, &engine->request_list, list) {
- if (i915_gem_request_completed(request, false))
+ if (i915_gem_request_completed(request))
continue;
return request;
@@ -2844,27 +3096,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return NULL;
}
-static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
bool ring_hung;
request = i915_gem_find_active_request(engine);
-
if (request == NULL)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
- i915_set_reset_status(dev_priv, request->ctx, ring_hung);
-
+ i915_set_reset_status(request->ctx, ring_hung);
list_for_each_entry_continue(request, &engine->request_list, list)
- i915_set_reset_status(dev_priv, request->ctx, false);
+ i915_set_reset_status(request->ctx, false);
}
-static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *buffer;
@@ -2888,13 +3136,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
/* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet);
- spin_lock_bh(&engine->execlist_lock);
- /* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&engine->execlist_queue,
- &engine->execlist_retired_req_list);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
+ intel_execlists_cancel_requests(engine);
}
/*
@@ -2931,7 +3173,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
void i915_gem_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
/*
@@ -2940,10 +3182,10 @@ void i915_gem_reset(struct drm_device *dev)
* their reference to the objects, the inspection must be done first.
*/
for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_status(dev_priv, engine);
+ i915_gem_reset_engine_status(engine);
for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_cleanup(dev_priv, engine);
+ i915_gem_reset_engine_cleanup(engine);
i915_gem_context_reset(dev);
@@ -2954,6 +3196,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
+ * @engine: engine to retire requests on
*/
void
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -2972,7 +3215,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
struct drm_i915_gem_request,
list);
- if (!i915_gem_request_completed(request, true))
+ if (!i915_gem_request_completed(request))
break;
i915_gem_request_retire(request);
@@ -2995,58 +3238,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
i915_gem_object_retire__read(obj, engine->id);
}
- if (unlikely(engine->trace_irq_req &&
- i915_gem_request_completed(engine->trace_irq_req, true))) {
- engine->irq_put(engine);
- i915_gem_request_assign(&engine->trace_irq_req, NULL);
- }
-
WARN_ON(i915_verify_lists(engine->dev));
}
-bool
-i915_gem_retire_requests(struct drm_device *dev)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- bool idle = true;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (dev_priv->gt.active_engines == 0)
+ return;
+
+ GEM_BUG_ON(!dev_priv->gt.awake);
for_each_engine(engine, dev_priv) {
i915_gem_retire_requests_ring(engine);
- idle &= list_empty(&engine->request_list);
- if (i915.enable_execlists) {
- spin_lock_bh(&engine->execlist_lock);
- idle &= list_empty(&engine->execlist_queue);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
- }
+ if (list_empty(&engine->request_list))
+ dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
}
- if (idle)
- mod_delayed_work(dev_priv->wq,
- &dev_priv->mm.idle_work,
+ if (dev_priv->gt.active_engines == 0)
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
msecs_to_jiffies(100));
-
- return idle;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), mm.retire_work.work);
- struct drm_device *dev = dev_priv->dev;
- bool idle;
+ container_of(work, typeof(*dev_priv), gt.retire_work.work);
+ struct drm_device *dev = &dev_priv->drm;
/* Come back later if the device is busy... */
- idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
- idle = i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
- if (!idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+ /* Keep the retire handler running until we are finally idle.
+ * We do not need to do this test under locking as in the worst-case
+ * we queue the retire worker once too often.
+ */
+ if (READ_ONCE(dev_priv->gt.awake))
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
}
@@ -3054,25 +3291,55 @@ static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), mm.idle_work.work);
- struct drm_device *dev = dev_priv->dev;
+ container_of(work, typeof(*dev_priv), gt.idle_work.work);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ unsigned int stuck_engines;
+ bool rearm_hangcheck;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ if (READ_ONCE(dev_priv->gt.active_engines))
+ return;
+
+ rearm_hangcheck =
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ /* Currently busy, come back later */
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
+ msecs_to_jiffies(50));
+ goto out_rearm;
+ }
+
+ if (dev_priv->gt.active_engines)
+ goto out_unlock;
for_each_engine(engine, dev_priv)
- if (!list_empty(&engine->request_list))
- return;
+ i915_gem_batch_pool_fini(&engine->batch_pool);
- /* we probably should sync with hangcheck here, using cancel_work_sync.
- * Also locking seems to be fubar here, engine->request_list is protected
- * by dev->struct_mutex. */
+ GEM_BUG_ON(!dev_priv->gt.awake);
+ dev_priv->gt.awake = false;
+ rearm_hangcheck = false;
- intel_mark_idle(dev);
+ stuck_engines = intel_kick_waiters(dev_priv);
+ if (unlikely(stuck_engines)) {
+ DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
+ dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
+ }
- if (mutex_trylock(&dev->struct_mutex)) {
- for_each_engine(engine, dev_priv)
- i915_gem_batch_pool_fini(&engine->batch_pool);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_idle(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->struct_mutex);
+out_rearm:
+ if (rearm_hangcheck) {
+ GEM_BUG_ON(!dev_priv->gt.awake);
+ i915_queue_hangcheck(dev_priv);
}
}
@@ -3080,6 +3347,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
+ * @obj: object to flush
*/
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +3364,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
- goto retire;
-
- if (i915_gem_request_completed(req, true)) {
- __i915_gem_request_retire__upto(req);
-retire:
+ if (i915_gem_request_completed(req))
i915_gem_object_retire__read(obj, i);
- }
}
return 0;
@@ -3111,7 +3373,9 @@ retire:
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @DRM_IOCTL_ARGS: standard ioctl arguments
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
@@ -3185,7 +3449,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference__unlocked(req[i]);
+ i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3208,10 +3472,10 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (to == from)
return 0;
- if (i915_gem_request_completed(from_req, true))
+ if (i915_gem_request_completed(from_req))
return 0;
- if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915->mm.interruptible,
@@ -3345,10 +3609,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pin_count);
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int ret;
if (list_empty(&vma->obj_link))
@@ -3377,6 +3652,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
+
+ __i915_vma_iounmap(vma);
}
trace_i915_vma_unbind(vma);
@@ -3422,26 +3699,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
return __i915_vma_unbind(vma, false);
}
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
- /* Flush everything onto the inactive list. */
- for_each_engine(engine, dev_priv) {
- if (!i915.enable_execlists) {
- struct drm_i915_gem_request *req;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = i915_switch_context(req);
- i915_add_request_no_flush(req);
- if (ret)
- return ret;
- }
+ for_each_engine(engine, dev_priv) {
+ if (engine->last_context == NULL)
+ continue;
ret = intel_engine_idle(engine);
if (ret)
@@ -3488,6 +3755,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
/**
* Finds free space in the GTT aperture and binds the object or a view of it
* there.
+ * @obj: object to bind
+ * @vm: address space to bind into
+ * @ggtt_view: global gtt view if applicable
+ * @alignment: requested alignment
+ * @flags: mask of PIN_* flags to use
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +4003,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3745,6 +4017,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
/**
* Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
@@ -3816,6 +4090,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/**
* Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
*
* After this function returns, the object will be in the new cache-level
* across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +4201,9 @@ out:
* object is now coherent at its new cache level (with respect
* to the access domain).
*/
- if (obj->cache_dirty &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
- cpu_write_needs_clflush(obj)) {
+ if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
}
return 0;
@@ -3967,7 +4241,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -4097,6 +4371,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
/**
* Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
@@ -4159,7 +4435,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
static int
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
@@ -4195,10 +4471,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = __i915_wait_request(target, true, NULL, NULL);
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- i915_gem_request_unreference__unlocked(target);
+ i915_gem_request_unreference(target);
return ret;
}
@@ -4256,7 +4529,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
unsigned bound;
int ret;
@@ -4420,7 +4693,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -4490,7 +4763,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
- i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+ i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4499,21 +4772,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
+ int ret;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- i915_gem_object_free(obj);
- return NULL;
- }
+ ret = drm_gem_object_init(dev, &obj->base, size);
+ if (ret)
+ goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4823,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
trace_i915_gem_object_create(obj);
return obj;
+
+fail:
+ i915_gem_object_free(obj);
+
+ return ERR_PTR(ret);
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4580,7 +4858,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_vma *vma, *next;
intel_runtime_pm_get(dev_priv);
@@ -4655,16 +4933,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- BUG_ON(!view);
+ GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
@@ -4688,7 +4962,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
static void
i915_gem_stop_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
@@ -4698,27 +4972,28 @@ i915_gem_stop_engines(struct drm_device *dev)
int
i915_gem_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
goto err;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev);
+ i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- flush_delayed_work(&dev_priv->mm.idle_work);
+ cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ flush_delayed_work(&dev_priv->gt.idle_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->mm.busy);
+ WARN_ON(dev_priv->gt.awake);
return 0;
@@ -4727,40 +5002,9 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
- int i, ret;
-
- if (!HAS_L3_DPF(dev) || !remap_info)
- return 0;
-
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
- if (ret)
- return ret;
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
- intel_ring_emit(engine, remap_info[i]);
- }
-
- intel_ring_advance(engine);
-
- return ret;
-}
-
void i915_gem_init_swizzling(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4785,7 +5029,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
static void init_unused_ring(struct drm_device *dev, u32 base)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
@@ -4812,7 +5056,7 @@ static void init_unused_rings(struct drm_device *dev)
int i915_gem_init_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = intel_init_render_ring_buffer(dev);
@@ -4860,9 +5104,9 @@ cleanup_render_ring:
int
i915_gem_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- int ret, j;
+ int ret;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,59 +5158,10 @@ i915_gem_init_hw(struct drm_device *dev)
intel_mocs_init_l3cc_table(dev);
/* We can't enable contexts until all firmware is loaded */
- if (HAS_GUC_UCODE(dev)) {
- ret = intel_guc_ucode_load(dev);
- if (ret) {
- DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
- ret = -EIO;
- goto out;
- }
- }
-
- /*
- * Increment the next seqno by 0x100 so we have a visible break
- * on re-initialisation
- */
- ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+ ret = intel_guc_setup(dev);
if (ret)
goto out;
- /* Now it is safe to go back round and do everything else: */
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
-
- if (engine->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++) {
- ret = i915_gem_l3_remap(req, j);
- if (ret)
- goto err_request;
- }
- }
-
- ret = i915_ppgtt_init_ring(req);
- if (ret)
- goto err_request;
-
- ret = i915_gem_context_enable(req);
- if (ret)
- goto err_request;
-
-err_request:
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("Failed to enable %s, error=%d\n",
- engine->name, ret);
- i915_gem_cleanup_engines(dev);
- break;
- }
- }
-
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -4974,12 +5169,9 @@ out:
int i915_gem_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- i915.enable_execlists = intel_sanitize_enable_execlists(dev,
- i915.enable_execlists);
-
mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) {
@@ -5002,10 +5194,7 @@ int i915_gem_init(struct drm_device *dev)
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = i915_gem_init_userptr(dev);
- if (ret)
- goto out_unlock;
-
+ i915_gem_init_userptr(dev_priv);
i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
@@ -5037,19 +5226,11 @@ out_unlock:
void
i915_gem_cleanup_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
dev_priv->gt.cleanup_engine(engine);
-
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev, ALL_ENGINES);
}
static void
@@ -5062,7 +5243,7 @@ init_engine_lists(struct intel_engine_cs *engine)
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
@@ -5073,7 +5254,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
else
dev_priv->num_fence_regs = 8;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
@@ -5086,7 +5267,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
void
i915_gem_load_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
dev_priv->objects =
@@ -5114,22 +5295,15 @@ i915_gem_load_init(struct drm_device *dev)
init_engine_lists(&dev_priv->engine[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
- INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+ INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
i915_gem_idle_work_handler);
+ init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /*
- * Set initial sequence number for requests.
- * Using this number allows the wraparound to happen early,
- * catching any obvious problems.
- */
- dev_priv->next_seqno = ((u32)~0 - 0x1100);
- dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5148,6 +5322,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->objects);
}
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ /* Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ */
+
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5187,7 +5389,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
- file_priv->dev_priv = dev->dev_private;
+ file_priv->dev_priv = to_i915(dev);
file_priv->file = file;
INIT_LIST_HEAD(&file_priv->rps.link);
@@ -5233,7 +5435,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_vma *vma;
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5254,13 +5456,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5485,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
+ if (vma->is_ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5310,23 +5507,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
return false;
}
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- BUG_ON(list_empty(&o->vma_list));
+ GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size;
}
+
return 0;
}
@@ -5347,7 +5539,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
struct page *page;
/* Only default objects have per-page dirty tracking */
- if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+ if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
return NULL;
page = i915_gem_object_get_page(obj, n);
@@ -5365,8 +5557,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
- if (IS_ERR_OR_NULL(obj))
+ obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) {
int ret;
- obj = i915_gem_alloc_object(pool->dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ obj = i915_gem_object_create(pool->dev, size);
+ if (IS_ERR(obj))
+ return obj;
ret = i915_gem_object_get_pages(obj);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..3c97f0e7a003 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
-static void i915_gem_context_clean(struct intel_context *ctx)
+static void i915_gem_context_clean(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ int i;
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
- if (i915.enable_execlists)
- intel_lr_context_free(ctx);
-
/*
* This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_ppgtt_put(ctx->ppgtt);
- if (ctx->legacy_hw_ctx.rcs_state)
- drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_context *ce = &ctx->engine[i];
+
+ if (!ce->state)
+ continue;
+
+ WARN_ON(ce->pin_count);
+ if (ce->ringbuf)
+ intel_ringbuffer_free(ce->ringbuf);
+
+ drm_gem_object_unreference(&ce->state->base);
+ }
+
list_del(&ctx->link);
+
+ ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
kfree(ctx);
}
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ lockdep_assert_held(&dev->struct_mutex);
+
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return obj;
/*
* Try to make the context utilize L3 as well as LLC.
@@ -209,22 +224,52 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static struct intel_context *
+static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
+{
+ int ret;
+
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0) {
+ /* Contexts are only released when no longer active.
+ * Flush any pending retires to hopefully release some
+ * stale contexts and try again.
+ */
+ i915_gem_retire_requests(dev_priv);
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ *out = ret;
+ return 0;
+}
+
+static struct i915_gem_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
+ ret = assign_hw_id(dev_priv, &ctx->hw_id);
+ if (ret) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
+ ctx->ggtt_alignment = get_context_alignment(dev_priv);
+
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -232,7 +277,7 @@ __create_hw_context(struct drm_device *dev,
ret = PTR_ERR(obj);
goto err_out;
}
- ctx->legacy_hw_ctx.rcs_state = obj;
+ ctx->engine[RCS].state = obj;
}
/* Default context will never have a file_priv */
@@ -249,9 +294,13 @@ __create_hw_context(struct drm_device *dev,
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
- ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+ ctx->remap_slice = ALL_L3_SLICES(dev_priv);
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+ ctx->ring_size = 4 * PAGE_SIZE;
+ ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
return ctx;
@@ -265,44 +314,27 @@ err_out:
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
- const bool is_global_default_ctx = file_priv == NULL;
- struct intel_context *ctx;
- int ret = 0;
+ struct i915_gem_context *ctx;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev->struct_mutex);
ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
- /* We may need to do things with the shrinker which
- * require us to immediately switch back to the default
- * context. This can cause a problem as pinning the
- * default context also requires GTT space which may not
- * be available. To avoid this we always pin the default
- * context.
- */
- ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
- get_context_alignment(dev), 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- goto err_destroy;
- }
- }
-
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
- if (IS_ERR_OR_NULL(ppgtt)) {
+ if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- ret = PTR_ERR(ppgtt);
- goto err_unpin;
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
+ i915_gem_context_unreference(ctx);
+ return ERR_CAST(ppgtt);
}
ctx->ppgtt = ppgtt;
@@ -311,76 +343,102 @@ i915_gem_create_context(struct drm_device *dev,
trace_i915_context_create(ctx);
return ctx;
+}
-err_unpin:
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
-err_destroy:
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(ret);
+/**
+ * i915_gem_context_create_gvt - create a GVT GEM context
+ * @dev: drm device *
+ *
+ * This function is used to create a GVT specific GEM context.
+ *
+ * Returns:
+ * pointer to i915_gem_context on success, error pointer if failed
+ *
+ */
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return ERR_PTR(-ENODEV);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ctx = i915_gem_create_context(dev, NULL);
+ if (IS_ERR(ctx))
+ goto out;
+
+ ctx->execlists_force_single_submission = true;
+ ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ctx;
}
-static void i915_gem_context_unpin(struct intel_context *ctx,
+static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine);
} else {
- if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ if (ce->state)
+ i915_gem_object_ggtt_unpin(ce->state);
+
i915_gem_context_unreference(ctx);
}
}
void i915_gem_context_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev->struct_mutex);
if (i915.enable_execlists) {
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev_priv, ctx);
}
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
-
- if (engine->last_context) {
- i915_gem_context_unpin(engine->last_context, engine);
- engine->last_context = NULL;
- }
- }
-
- /* Force the GPU state to be reinitialised on enabling */
- dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+ i915_gem_context_lost(dev_priv);
}
int i915_gem_context_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->kernel_context))
return 0;
- if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+ if (intel_vgpu_active(dev_priv) &&
+ HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
+ /* Using the simple ida interface, the max is limited by sizeof(int) */
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ ida_init(&dev_priv->context_hw_ida);
+
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
- } else if (HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ } else if (HAS_HW_CONTEXTS(dev_priv)) {
+ dev_priv->hw_context_size =
+ round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -403,67 +461,60 @@ int i915_gem_context_init(struct drm_device *dev)
return 0;
}
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *dctx = dev_priv->kernel_context;
- int i;
-
- if (dctx->legacy_hw_ctx.rcs_state) {
- /* The only known way to stop the gpu from accessing the hw context is
- * to reset it. Do this as the very last operation to avoid confusing
- * other code, leading to spurious errors. */
- intel_gpu_reset(dev, ALL_ENGINES);
+ struct intel_engine_cs *engine;
- /* When default context is created and switched to, base object refcount
- * will be 2 (+1 from object creation and +1 from do_switch()).
- * i915_gem_context_fini() will be called after gpu_idle() has switched
- * to default context. So we need to unreference the base object once
- * to offset the do_switch part, so that i915_gem_context_unreference()
- * can then free the base object correctly. */
- WARN_ON(!dev_priv->engine[RCS].last_context);
-
- i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
- }
-
- for (i = I915_NUM_ENGINES; --i >= 0;) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ for_each_engine(engine, dev_priv) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
- i915_gem_context_unreference(dctx);
- dev_priv->kernel_context = NULL;
-}
+ /* Force the GPU state to be restored on enabling */
+ if (!i915.enable_execlists) {
+ struct i915_gem_context *ctx;
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ if (!i915_gem_context_is_default(ctx))
+ continue;
- if (i915.enable_execlists) {
- if (engine->init_context == NULL)
- return 0;
+ for_each_engine(engine, dev_priv)
+ ctx->engine[engine->id].initialised = false;
- ret = engine->init_context(req);
- } else
- ret = i915_switch_context(req);
+ ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ }
- if (ret) {
- DRM_ERROR("ring init context: %d\n", ret);
- return ret;
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *kce =
+ &dev_priv->kernel_context->engine[engine->id];
+
+ kce->initialised = true;
+ }
}
+}
- return 0;
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *dctx = dev_priv->kernel_context;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ i915_gem_context_unreference(dctx);
+ dev_priv->kernel_context = NULL;
+
+ ida_destroy(&dev_priv->context_hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct intel_context *ctx = p;
+ struct i915_gem_context *ctx = p;
+ ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_unreference(ctx);
return 0;
}
@@ -471,7 +522,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr);
@@ -491,31 +542,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ lockdep_assert_held(&dev->struct_mutex);
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
}
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct intel_context *ctx;
-
- ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
- if (!ctx)
- return ERR_PTR(-ENOENT);
-
- return ctx;
-}
-
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(engine->dev) ?
- hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(dev_priv) ?
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0;
int len, ret;
@@ -524,21 +566,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(engine->dev)) {
+ if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+ if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(engine->dev)->gen < 8)
+ else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(engine->dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
@@ -546,14 +588,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -568,7 +610,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine,
- i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +618,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
*/
intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -609,45 +651,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
}
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
- struct intel_context *to)
+static int remap_l3(struct drm_i915_gem_request *req, int slice)
+{
+ u32 *remap_info = req->i915->l3_parity.remap_info[slice];
+ struct intel_engine_cs *engine = req->engine;
+ int i, ret;
+
+ if (!remap_info)
+ return 0;
+
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
+ }
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
+
+ return 0;
+}
+
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
if (to->remap_slice)
return false;
- if (!to->legacy_hw_ctx.initialized)
+ if (!to->engine[RCS].initialised)
return false;
- if (to->ppgtt &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
return to == engine->last_context;
}
static bool
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
+ /* Always load the ppgtt on first use */
+ if (!engine->last_context)
+ return true;
+
+ /* Same context without new entries, skip */
if (engine->last_context == to &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
if (engine->id != RCS)
return true;
- if (INTEL_INFO(engine->dev)->gen < 8)
+ if (INTEL_GEN(engine->i915) < 8)
return true;
return false;
}
static bool
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+ struct i915_gem_context *to,
+ u32 hw_flags)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
if (!IS_GEN8(to->i915))
@@ -661,18 +741,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
- struct intel_context *to = req->ctx;
+ struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
- struct intel_context *from;
+ struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+ struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
- if (skip_rcs_switch(engine, to))
+ if (skip_rcs_switch(ppgtt, engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
- ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(engine->dev),
+ ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
+ to->ggtt_alignment,
0);
if (ret)
return ret;
@@ -694,37 +775,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
if (ret)
goto unpin_out;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
hw_flags = MI_RESTORE_INHIBIT;
- else if (to->ppgtt &&
- intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+ else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
else
hw_flags = 0;
- /* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(engine, to) &&
- needs_pd_load_post(to, hw_flags));
-
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
@@ -738,8 +814,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+ from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -747,10 +823,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->legacy_hw_ctx.rcs_state->dirty = 1;
+ from->engine[RCS].state->dirty = 1;
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(from->engine[RCS].state);
i915_gem_context_unreference(from);
}
i915_gem_context_reference(to);
@@ -759,9 +835,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
- if (needs_pd_load_post(to, hw_flags)) {
+ if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
@@ -771,33 +847,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret;
}
- if (to->ppgtt)
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ if (ppgtt)
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
- ret = i915_gem_l3_remap(req, i);
+ ret = remap_l3(req, i);
if (ret)
return ret;
to->remap_slice &= ~(1<<i);
}
- if (!to->legacy_hw_ctx.initialized) {
+ if (!to->engine[RCS].initialised) {
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
return ret;
}
- to->legacy_hw_ctx.initialized = true;
+ to->engine[RCS].initialised = true;
}
return 0;
unpin_out:
- i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(to->engine[RCS].state);
return ret;
}
@@ -817,25 +893,24 @@ unpin_out:
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (engine->id != RCS ||
- req->ctx->legacy_hw_ctx.rcs_state == NULL) {
- struct intel_context *to = req->ctx;
+ if (!req->ctx->engine[engine->id].state) {
+ struct i915_gem_context *to = req->ctx;
+ struct i915_hw_ppgtt *ppgtt =
+ to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
int ret;
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
return ret;
- /* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (to != engine->last_context) {
@@ -861,7 +936,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!contexts_enabled(dev))
@@ -890,7 +965,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (args->pad != 0)
@@ -903,13 +978,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
@@ -922,14 +997,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -951,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else
args->value = to_i915(dev)->ggtt.base.total;
break;
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+ break;
default:
ret = -EINVAL;
break;
@@ -965,14 +1043,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -996,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
}
break;
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ if (args->size) {
+ ret = -EINVAL;
+ } else {
+ if (args->value)
+ ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
+ else
+ ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
+ }
+ break;
default:
ret = -EINVAL;
break;
@@ -1004,3 +1092,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+ hs = &ctx->hang_stats;
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
new file mode 100644
index 000000000000..91315557e421
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GEM_DMABUF_H_
+#define _I915_GEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+static inline struct reservation_object *
+i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
+{
+ struct dma_buf *dma_buf;
+
+ if (obj->base.dma_buf)
+ dma_buf = obj->base.dma_buf;
+ else if (obj->base.import_attach)
+ dma_buf = obj->base.import_attach->dmabuf;
+ else
+ return NULL;
+
+ return dma_buf->resv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..3c1280ec7ff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,37 @@
#include "intel_drv.h"
#include "i915_trace.h"
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ if (i915.enable_execlists)
+ return 0;
+
+ for_each_engine(engine, dev_priv) {
+ struct drm_i915_gem_request *req;
+ int ret;
+
+ if (engine->last_context == NULL)
+ continue;
+
+ if (engine->last_context == dev_priv->kernel_context)
+ continue;
+
+ req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = i915_switch_context(req);
+ i915_add_request_no_flush(req);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
@@ -150,11 +181,19 @@ none:
/* Only idle the GPU and repeat the search once */
if (pass++ == 0) {
- ret = i915_gpu_idle(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (i915_is_ggtt(vm)) {
+ ret = switch_to_pinned_context(dev_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
goto search_again;
}
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
trace_i915_gem_evict_vm(vm);
if (do_idle) {
- ret = i915_gpu_idle(vm->dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+ if (i915_is_ggtt(vm)) {
+ ret = switch_to_pinned_context(dev_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
return ret;
- i915_gem_retire_requests(vm->dev);
+ i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..1978633e7549 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry;
i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (flush_chipset)
- i915_gem_chipset_flush(req->engine->dev);
+ i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
return 0;
}
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
- struct intel_context *ctx = NULL;
+ struct i915_gem_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
- ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
- if (i915.enable_execlists && !ctx->engine[engine->id].state) {
- int ret = intel_lr_context_deferred_alloc(ctx, engine);
- if (ret) {
- DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
- return ERR_PTR(ret);
- }
- }
-
return ctx;
}
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@@ -1150,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret, i;
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1233,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
@@ -1336,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
return file_priv->bsd_ring;
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
struct i915_execbuffer_params *params = &params_master;
@@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
- if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
return -EPERM;
dispatch_flags |= I915_DISPATCH_SECURE;
@@ -1485,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags |= I915_DISPATCH_RS;
}
+ /* Take a local wakeref for preparing to dispatch the execbuf as
+ * we expect to access the hardware fairly frequently in the
+ * process. Upon first dispatch, we acquire another prolonged
+ * wakeref that we hold until the GPU has been idle for at least
+ * 100ms.
+ */
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1561,7 +1559,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
args->batch_len,
- file->is_master);
+ drm_is_current_master(file));
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..251d7a95af89 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -58,7 +58,7 @@
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val;
if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int reg = fence_number(dev_priv, fence);
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_i915_fence_reg *fence;
int ret;
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_fence_reg *reg, *avail;
int i;
@@ -367,7 +367,7 @@ int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
@@ -433,7 +433,7 @@ bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int i;
if (obj->bit_17 == NULL)
return;
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..10f1e32767e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
*
*/
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED,
};
-static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt)
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
- has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+ has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+ has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+ has_full_48bit_ppgtt =
+ IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
has_full_ppgtt = false; /* emulation is too hard */
+ if (!has_aliasing_ppgtt)
+ return 0;
+
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
- (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+ if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
#endif
/* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
- if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
static int gen8_init_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
+ int ret;
vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pt);
+ ret = PTR_ERR(vm->scratch_pt);
+ goto free_scratch_page;
}
vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) {
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pd);
+ ret = PTR_ERR(vm->scratch_pd);
+ goto free_pt;
}
if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) {
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pdp);
+ ret = PTR_ERR(vm->scratch_pdp);
+ goto free_pd;
}
}
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
+
+free_pd:
+ free_pd(dev, vm->scratch_pd);
+free_pt:
+ free_pt(dev, vm->scratch_pt);
+free_scratch_page:
+ free_scratch_page(dev, vm->scratch_page);
+
+ return ret;
}
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(vm->dev))
+ if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(ppgtt->base.dev)) {
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(ppgtt->base.dev))
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1552,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
- uint32_t pte, pde, temp;
+ uint32_t pte, pde;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1622,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt;
- uint32_t pde, temp;
+ uint32_t pde;
- gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_for_each_pde(pt, pd, start, length, pde)
gen6_write_pde(pd, pde, pt);
/* Make sure write is complete before other code can use this page
@@ -1665,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
-}
-
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
@@ -1713,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
+ struct drm_i915_private *dev_priv = req->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-
- POSTING_READ(RING_PP_DIR_DCLV(engine));
-
return 0;
}
static void gen8_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
@@ -1739,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
@@ -1764,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
static void gen6_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1821,20 +1823,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
- struct sg_page_iter sg_iter;
+ gen6_pte_t *pt_vaddr = NULL;
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
- pt_vaddr = NULL;
- for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ for_each_sgt_dma(addr, sgt_iter, pages) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, true, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1844,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
act_pte = 0;
}
}
+
if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}
@@ -1857,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t start, length, start_save, length_save;
- uint32_t pde, temp;
+ uint32_t pde;
int ret;
if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1873,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* need allocation. The second stage marks use ptes within the page
* tables.
*/
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
@@ -1898,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
start = start_save;
length = length_save;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1967,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory *pd = &ppgtt->pd;
+ struct drm_device *dev = vm->dev;
struct i915_page_table *pt;
uint32_t pde;
drm_mm_remove_node(&ppgtt->node);
- gen6_for_all_pdes(pt, ppgtt, pde) {
+ gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(ppgtt->base.dev, pt);
- }
+ free_pt(dev, pt);
gen6_free_scratch(vm);
}
@@ -2041,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
struct i915_page_table *unused;
- uint32_t pde, temp;
+ uint32_t pde;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
@@ -2055,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (IS_GEN6(dev)) {
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
ppgtt->switch_mm = gen6_mm_switch;
- } else if (IS_HASWELL(dev)) {
+ else if (IS_HASWELL(dev))
ppgtt->switch_mm = hsw_mm_switch;
- } else if (IS_GEN7(dev)) {
+ else if (IS_GEN7(dev))
ppgtt->switch_mm = gen7_mm_switch;
- } else
+ else
BUG();
- if (intel_vgpu_active(dev))
- ppgtt->switch_mm = vgpu_mm_switch;
-
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
@@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
{
drm_mm_init(&vm->mm, vm->start, vm->total);
- vm->dev = dev_priv->dev;
+ vm->dev = &dev_priv->drm;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2123,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
static void gtt_write_workarounds(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
@@ -2140,9 +2140,9 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2179,20 +2179,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
return 0;
}
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- if (i915.enable_execlists)
- return 0;
-
- if (!ppgtt)
- return 0;
-
- return ppgtt->switch_mm(ppgtt, req);
-}
-
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
@@ -2257,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(ggtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev)) {
- DRM_ERROR("Couldn't idle GPU\n");
+ if (i915_gem_wait_for_idle(dev_priv)) {
+ DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
@@ -2275,12 +2261,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
-void i915_check_and_clear_faults(struct drm_device *dev)
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
for_each_engine(engine, dev_priv) {
@@ -2324,7 +2309,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
@@ -2352,29 +2337,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
#endif
}
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (offset >> PAGE_SHIFT);
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0; /* shut up gcc */
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ gen8_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- gen8_set_pte(&gtt_entries[i],
- gen8_pte_encode(addr, level, true));
- i++;
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = gen8_pte_encode(addr, level, true);
+ gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
/*
@@ -2385,8 +2390,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readq(&gtt_entries[i-1])
- != gen8_pte_encode(addr, level, true));
+ WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2424,6 +2428,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (offset >> PAGE_SHIFT);
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@@ -2436,21 +2462,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen6_pte_t __iomem *gtt_entries;
+ gen6_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
- i++;
+ gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = vm->pte_encode(addr, level, true, flags);
+ iowrite32(gtt_entry, &gtt_entries[i++]);
}
/* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2485,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
- if (i != 0) {
- unsigned long gtt = readl(&gtt_entries[i-1]);
- WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
- }
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2498,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
+static void nop_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+}
+
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2543,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2538,12 +2569,30 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
@@ -2561,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
@@ -2642,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_device *dev = vma->vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = vma->obj;
const uint64_t size = min_t(uint64_t,
obj->base.size,
@@ -2668,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool interruptible;
interruptible = do_idling(dev_priv);
@@ -2727,11 +2776,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE;
- if (intel_vgpu_active(dev)) {
- ret = intel_vgt_balloon(dev);
- if (ret)
- return ret;
- }
+ ret = intel_vgt_balloon(dev_priv);
+ if (ret)
+ return ret;
if (!HAS_LLC(dev))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
@@ -2831,8 +2878,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) {
- if (intel_vgpu_active(dev))
- intel_vgt_deballoon();
+ intel_vgt_deballoon(dev_priv);
drm_mm_takedown(&ggtt->base.mm);
list_del(&ggtt->base.global_link);
@@ -3069,13 +3115,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
- ggtt->base.clear_range = gen8_ggtt_clear_range;
- if (IS_CHERRYVIEW(dev_priv))
- ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
- else
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.insert_page = gen8_ggtt_insert_page;
+ ggtt->base.clear_range = nop_clear_range;
+ if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ ggtt->base.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ if (IS_CHERRYVIEW(dev_priv))
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret;
}
@@ -3108,6 +3157,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
ggtt->base.clear_range = gen6_ggtt_clear_range;
+ ggtt->base.insert_page = gen6_ggtt_insert_page;
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -3129,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
@@ -3138,7 +3188,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
&ggtt->mappable_base, &ggtt->mappable_end);
- ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+ ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+ ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
@@ -3219,14 +3270,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
#endif
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0;
@@ -3250,9 +3293,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- bool flush;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3302,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
-
- flush = true;
}
- if (flush)
- i915_gem_clflush_object(obj, obj->pin_display);
+ if (obj->pin_display)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3437,11 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
+ const size_t n_pages = obj->base.size / PAGE_SIZE;
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ dma_addr_t dma_addr;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
@@ -3409,7 +3450,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+ page_addr_list = drm_malloc_gfp(n_pages,
sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list)
@@ -3432,11 +3473,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
- i++;
- }
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ page_addr_list[i++] = dma_addr;
+ GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
@@ -3634,3 +3674,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size;
}
}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!vma->obj->map_and_fenceable))
+ return ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!vma->is_ggtt);
+ GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ vma->pin_count++;
+ return ptr;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..aa5f31d1c2ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
+#include <linux/io-mapping.h>
+
struct drm_i915_file_private;
typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ void __iomem *iomap;
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
@@ -316,6 +319,11 @@ struct i915_address_space {
uint64_t start,
uint64_t length,
bool use_scratch);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
@@ -382,27 +390,27 @@ struct i915_hw_ppgtt {
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
-/* For each pde iterates over every pde between from start until start + length.
- * If start, and start+length are not perfectly divisible, the macro will round
- * down, and up as needed. The macro modifies pde, start, and length. Dev is
- * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
- * and length = 2G effectively iterates over every PDE in the system.
- *
- * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
*/
-#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
- for (iter = gen6_pde_index(start); \
- length > 0 && iter < I915_PDES ? \
- (pt = (pd)->page_table[iter]), 1 : 0; \
- iter++, \
- temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
- temp = min_t(unsigned, temp, length), \
- start += temp, length -= temp)
-
-#define gen6_for_all_pdes(pt, ppgtt, iter) \
- for (iter = 0; \
- pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
- iter++)
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = (pd)->page_table[iter], true); \
+ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = (pd)->page_table[iter], true); \
+ ++iter)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
@@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
@@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
-void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
@@ -560,4 +566,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->pin_count == 0);
+ GEM_BUG_ON(vma->iomap == NULL);
+ vma->pin_count--;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..f75bbd67a13a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
#include "intel_renderstate.h"
static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
{
switch (gen) {
case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+ struct drm_i915_private *dev_priv)
{
int ret;
- so->gen = INTEL_INFO(dev)->gen;
- so->rodata = render_state_get_rodata(dev, so->gen);
+ so->gen = INTEL_GEN(dev_priv);
+ so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL)
- return -ENOMEM;
+ so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ if (IS_ERR(so->obj))
+ return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
@@ -93,6 +94,7 @@ free_gem:
static int render_state_setup(struct render_state *so)
{
+ struct drm_device *dev = so->obj->base.dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
struct page *page;
@@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so)
so->aux_batch_offset = i * sizeof(u32);
+ if (HAS_POOLED_EU(dev)) {
+ /*
+ * We always program 3x6 pool config but depending upon which
+ * subslice is disabled HW drops down to appropriate config
+ * shown below.
+ *
+ * In the below table 2x6 config always refers to
+ * fused-down version, native 2x6 is not available and can
+ * be ignored
+ *
+ * SNo subslices config eu pool configuration
+ * -----------------------------------------------------------
+ * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
+ * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
+ * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
+ * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
+ */
+ u32 eu_pool_config = 0x00777000;
+
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
+ OUT_BATCH(d, i, eu_pool_config);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ }
+
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
@@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, engine->dev);
+ ret = render_state_init(so, engine->i915);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 66571466e9a8..6f10b421487b 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
+
+ /*
+ * Unbinding of objects will require HW access; Let us not wake the
+ * device just to recover a little memory. If absolutely necessary,
+ * we will force the wake during oom-notifier.
+ */
+ if ((flags & I915_SHRINK_BOUND) &&
+ !intel_runtime_pm_get_if_in_use(dev_priv))
+ flags &= ~I915_SHRINK_BOUND;
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list);
}
- i915_gem_retire_requests(dev_priv->dev);
+ if (flags & I915_SHRINK_BOUND)
+ intel_runtime_pm_put(dev_priv);
+
+ i915_gem_retire_requests(dev_priv);
return count;
}
@@ -245,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
@@ -253,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock))
return 0;
+ i915_gem_retire_requests(dev_priv);
+
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (can_release_pages(obj))
@@ -274,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
unsigned long freed;
bool unlock;
@@ -309,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
- while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+ while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
@@ -330,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int
@@ -345,7 +359,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
+ intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
+ intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@@ -386,17 +402,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
- unsigned long freed_pages;
+ struct i915_vma *vma, *next;
+ unsigned long freed_pages = 0;
+ int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
- freed_pages = i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
+ /* Force everything onto the inactive lists */
+ ret = i915_gem_wait_for_idle(dev_priv);
+ if (ret)
+ goto out;
+
+ intel_runtime_pm_get(dev_priv);
+ freed_pages += i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+ intel_runtime_pm_put(dev_priv);
+
+ /* We also want to clear any cached iomaps as they wrap vmap */
+ list_for_each_entry_safe(vma, next,
+ &dev_priv->ggtt.base.inactive_list, vm_link) {
+ unsigned long count = vma->node.size >> PAGE_SHIFT;
+ if (vma->iomap && i915_vma_unbind(vma) == 0)
+ freed_pages += count;
+ }
+out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 44004e3f09e4..66be299a1486 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -111,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm;
- pci_read_config_dword(dev->pdev, BSM, &bsm);
+ pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
- base = bsm & BSM_MASK;
+ base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
@@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
@@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -601,7 +601,7 @@ cleanup:
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..8030199731db 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode))
- ret = i915_gem_object_ggtt_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) {
if (obj->pages &&
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-int
-i915_gem_init_userptr(struct drm_device *dev)
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
- return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..9d73d2216adc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt;
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if (error->ring[i].num_waiters) {
+ err_printf(m, "%s --- %d waiters\n",
+ dev_priv->engine[i].name,
+ error->ring[i].num_waiters);
+ for (j = 0; j < error->ring[i].num_waiters; j++) {
+ err_printf(m, " seqno 0x%08x for %s [%d]\n",
+ error->ring[i].waiters[j].seqno,
+ error->ring[i].waiters[j].comm,
+ error->ring[i].waiters[j].pid);
+ }
+ }
+
if ((obj = error->ring[i].ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
hws_page[elt+1],
hws_page[elt+2],
hws_page[elt+3]);
- offset += 16;
+ offset += 16;
}
}
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
- kfree(error->ring[i].requests);
i915_error_object_free(error->ring[i].wa_ctx);
+ kfree(error->ring[i].requests);
+ kfree(error->ring[i].waiters);
}
i915_error_object_free(error->semaphore_obj);
@@ -824,19 +837,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code;
}
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
- } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+ } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
}
@@ -851,7 +863,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!i915_semaphore_is_enabled(dev_priv->dev))
+ if (!i915_semaphore_is_enabled(dev_priv))
return;
if (!error->semaphore_obj)
@@ -893,31 +905,71 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
}
}
-static void i915_record_ring_state(struct drm_device *dev,
+static void engine_record_waiters(struct intel_engine_cs *engine,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_error_waiter *waiter;
+ struct rb_node *rb;
+ int count;
+
+ ering->num_waiters = 0;
+ ering->waiters = NULL;
+
+ spin_lock(&b->lock);
+ count = 0;
+ for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
+ count++;
+ spin_unlock(&b->lock);
+
+ waiter = NULL;
+ if (count)
+ waiter = kmalloc_array(count,
+ sizeof(struct drm_i915_error_waiter),
+ GFP_ATOMIC);
+ if (!waiter)
+ return;
+
+ ering->waiters = waiter;
+
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ strcpy(waiter->comm, w->tsk->comm);
+ waiter->pid = w->tsk->pid;
+ waiter->seqno = w->seqno;
+ waiter++;
+
+ if (++ering->num_waiters == count)
+ break;
+ }
+ spin_unlock(&b->lock);
+}
+
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine,
ering);
else
gen6_record_semaphore_state(dev_priv, engine, ering);
}
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
@@ -929,20 +981,20 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(GEN2_INSTDONE);
}
- ering->waiting = waitqueue_active(&engine->irq_queue);
+ ering->waiting = intel_engine_has_waiter(engine);
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ering->acthd = intel_ring_get_active_head(engine);
- ering->seqno = engine->get_seqno(engine);
+ ering->seqno = intel_engine_get_seqno(engine);
ering->last_seqno = engine->last_submitted_seqno;
ering->start = I915_READ_START(engine);
ering->head = I915_READ_HEAD(engine);
ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine);
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
default:
case RCS:
@@ -958,7 +1010,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -971,18 +1023,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action;
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_INFO(dev)->gen >= 8)
+ else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +1050,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
@@ -1016,34 +1068,33 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
}
}
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
- struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
- if (engine->dev == NULL)
+ if (!intel_engine_initialized(engine))
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, engine, &error->ring[i]);
+ i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
+ engine_record_waiters(engine, &error->ring[i]);
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
+ struct intel_ringbuffer *rb;
- vm = request->ctx && request->ctx->ppgtt ?
- &request->ctx->ppgtt->base :
- &ggtt->base;
+ vm = request->ctx->ppgtt ?
+ &request->ctx->ppgtt->base : &ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1070,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
rcu_read_unlock();
}
- }
- if (i915.enable_execlists) {
- /* TODO: This is only a small fix to keep basic error
- * capture working, but we need to add more information
- * for it to be useful (e.g. dump the context being
- * executed).
- */
- if (request)
- rbuf = request->ctx->engine[engine->id].ringbuf;
- else
- rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
- } else
- rbuf = engine->buffer;
-
- error->ring[i].cpu_ring_head = rbuf->head;
- error->ring[i].cpu_ring_tail = rbuf->tail;
+ error->simulated |=
+ request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
- error->ring[i].ringbuffer =
- i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+ rb = request->ringbuf;
+ error->ring[i].cpu_ring_head = rb->head;
+ error->ring[i].cpu_ring_tail = rb->tail;
+ error->ring[i].ringbuffer =
+ i915_error_ggtt_object_create(dev_priv,
+ rb->obj);
+ }
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv,
@@ -1234,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1301,15 +1343,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
u32 engine_mask,
const char *error_msg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
@@ -1317,7 +1358,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x",
- INTEL_INFO(dev)->gen, ring_id, ecode);
+ INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1352,14 +1393,17 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg)
{
static bool warned;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
+ if (READ_ONCE(dev_priv->gpu_error.first_error))
+ return;
+
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
@@ -1372,23 +1416,25 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
+ i915_gem_record_fences(dev_priv, error);
+ i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
+ error->overlay = intel_overlay_capture_error_state(dev_priv);
+ error->display = intel_display_capture_error_state(dev_priv);
- i915_error_capture_msg(dev, error, engine_mask, error_msg);
+ i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (dev_priv->gpu_error.first_error == NULL) {
- dev_priv->gpu_error.first_error = error;
- error = NULL;
+ if (!error->simulated) {
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (!dev_priv->gpu_error.first_error) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
}
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) {
i915_error_state_free(&error->ref);
@@ -1400,7 +1446,8 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ dev_priv->drm.primary->index);
warned = true;
}
}
@@ -1408,7 +1455,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
@@ -1426,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
void i915_destroy_error_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error;
spin_lock_irq(&dev_priv->gpu_error.lock);
@@ -1450,17 +1497,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+ uint32_t *instdone)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- if (IS_GEN2(dev) || IS_GEN3(dev))
+ if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
-#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
-
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
+#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
+#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..2112e029db6a 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
- /* No HOST2GUC command should take longer than 10ms */
- ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No HOST2GUC command should ever take longer than 10ms.
+ */
+ ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
+ if (ret)
+ ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
if (status != GUC2HOST_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
@@ -153,13 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_device *dev = dev_priv->dev;
u32 data[2];
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6(dev) ||
- NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -175,94 +179,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
* client object which contains the page being used for the doorbell
*/
-static void guc_init_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int guc_update_doorbell_id(struct intel_guc *guc,
+ struct i915_guc_client *client,
+ u16 new_id)
{
+ struct sg_table *sg = guc->ctx_pool_obj->pages;
+ void *doorbell_bitmap = guc->doorbell_bitmap;
struct guc_doorbell_info *doorbell;
+ struct guc_context_desc desc;
+ size_t len;
doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = 0;
-}
-
-static int guc_ring_doorbell(struct i915_guc_client *gc)
-{
- struct guc_process_desc *desc;
- union guc_doorbell_qw db_cmp, db_exc, db_ret;
- union guc_doorbell_qw *db;
- int attempt = 2, ret = -EAGAIN;
-
- desc = gc->client_base + gc->proc_desc_offset;
-
- /* Update the tail so it is visible to GuC */
- desc->tail = gc->wq_tail;
-
- /* current cookie */
- db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = gc->cookie;
-
- /* cookie to be updated */
- db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = gc->cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
-
- /* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
-
- while (attempt--) {
- /* lets ring the doorbell */
- db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
- db_cmp.value_qw, db_exc.value_qw);
-
- /* if the exchange was successfully executed */
- if (db_ret.value_qw == db_cmp.value_qw) {
- /* db was successfully rung */
- gc->cookie = db_exc.cookie;
- ret = 0;
- break;
- }
+ if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
+ test_bit(client->doorbell_id, doorbell_bitmap)) {
+ /* Deactivate the old doorbell */
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ (void)host2guc_release_doorbell(guc, client);
+ __clear_bit(client->doorbell_id, doorbell_bitmap);
+ }
- /* XXX: doorbell was lost and need to acquire it again */
- if (db_ret.db_status == GUC_DOORBELL_DISABLED)
- break;
+ /* Update the GuC's idea of the doorbell ID */
+ len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+ sizeof(desc) * client->ctx_index);
+ if (len != sizeof(desc))
+ return -EFAULT;
+ desc.db_id = new_id;
+ len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+ sizeof(desc) * client->ctx_index);
+ if (len != sizeof(desc))
+ return -EFAULT;
- DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
- db_cmp.cookie, db_ret.cookie);
+ client->doorbell_id = new_id;
+ if (new_id == GUC_INVALID_DOORBELL_ID)
+ return 0;
- /* update the cookie to newly read cookie from GuC */
- db_cmp.cookie = db_ret.cookie;
- db_exc.cookie = db_ret.cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
- }
+ /* Activate the new doorbell */
+ __set_bit(new_id, doorbell_bitmap);
+ doorbell->cookie = 0;
+ doorbell->db_status = GUC_DOORBELL_ENABLED;
+ return host2guc_allocate_doorbell(guc, client);
+}
- return ret;
+static int guc_init_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client,
+ uint16_t db_id)
+{
+ return guc_update_doorbell_id(guc, client, db_id);
}
static void guc_disable_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct guc_doorbell_info *doorbell;
- i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
- int value;
-
- doorbell = client->client_base + client->doorbell_offset;
+ (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
- doorbell->db_status = GUC_DOORBELL_DISABLED;
+ /* XXX: wait for any interrupts */
+ /* XXX: wait for workqueue to drain */
+}
- I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
+static uint16_t
+select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+{
+ /*
+ * The bitmap tracks which doorbell registers are currently in use.
+ * It is split into two halves; the first half is used for normal
+ * priority contexts, the second half for high-priority ones.
+ * Note that logically higher priorities are numerically less than
+ * normal ones, so the test below means "is it high-priority?"
+ */
+ const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
+ const uint16_t half = GUC_MAX_DOORBELLS / 2;
+ const uint16_t start = hi_pri ? half : 0;
+ const uint16_t end = start + half;
+ uint16_t id;
- value = I915_READ(drbreg);
- WARN_ON((value & GEN8_DRB_VALID) != 0);
+ id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
+ if (id == end)
+ id = GUC_INVALID_DOORBELL_ID;
- I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
- I915_WRITE(drbreg, 0);
+ DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
+ hi_pri ? "high" : "normal", id);
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
+ return id;
}
/*
@@ -289,37 +287,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
return offset;
}
-static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
-{
- /*
- * The bitmap is split into two halves; the first half is used for
- * normal priority contexts, the second half for high-priority ones.
- * Note that logically higher priorities are numerically less than
- * normal ones, so the test below means "is it high-priority?"
- */
- const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
- const uint16_t half = GUC_MAX_DOORBELLS / 2;
- const uint16_t start = hi_pri ? half : 0;
- const uint16_t end = start + half;
- uint16_t id;
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
- if (id == end)
- id = GUC_INVALID_DOORBELL_ID;
- else
- bitmap_set(guc->doorbell_bitmap, id, 1);
-
- DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
- hi_pri ? "high" : "normal", id);
-
- return id;
-}
-
-static void release_doorbell(struct intel_guc *guc, uint16_t id)
-{
- bitmap_clear(guc->doorbell_bitmap, id, 1);
-}
-
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
@@ -361,10 +328,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
- struct intel_context *ctx = client->owner;
+ struct i915_gem_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
- enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -374,10 +340,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
- uint64_t ctx_desc;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -386,20 +352,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[id].state;
- if (!obj)
+ if (!ce->state)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, engine);
- lrc->context_desc = (u32)ctx_desc;
+ lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ctx->engine[id].ringbuf->obj;
+ obj = ce->ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = gfx_addr;
@@ -427,7 +391,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.wq_size = client->wq_size;
/*
- * XXX: Take LRCs from an existing intel_context if this is not an
+ * XXX: Take LRCs from an existing context if this is not an
* IsKMDCreatedContext client
*/
desc.desc_private = (uintptr_t)client;
@@ -451,47 +415,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
-int i915_guc_wq_check_space(struct i915_guc_client *gc)
+/**
+ * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * @request: request associated with the commands
+ *
+ * Return: 0 if space is available
+ * -EAGAIN if space is not currently available
+ *
+ * This function must be called (and must return 0) before a request
+ * is submitted to the GuC via i915_guc_submit() below. Once a result
+ * of 0 has been returned, it remains valid until (but only until)
+ * the next call to submit().
+ *
+ * This precheck allows the caller to determine in advance that space
+ * will be available for the next submission before committing resources
+ * to it, and helps avoid late failures with complicated recovery paths.
+ */
+int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
{
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ struct i915_guc_client *gc = request->i915->guc.execbuf_client;
struct guc_process_desc *desc;
- u32 size = sizeof(struct guc_wq_item);
- int ret = -ETIMEDOUT, timeout_counter = 200;
+ u32 freespace;
- if (!gc)
- return 0;
+ GEM_BUG_ON(gc == NULL);
desc = gc->client_base + gc->proc_desc_offset;
- while (timeout_counter-- > 0) {
- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- ret = 0;
- break;
- }
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ if (likely(freespace >= wqi_size))
+ return 0;
- if (timeout_counter)
- usleep_range(1000, 2000);
- };
+ gc->no_wq_space += 1;
- return ret;
+ return -EAGAIN;
}
-static int guc_add_workqueue_item(struct i915_guc_client *gc,
- struct drm_i915_gem_request *rq)
+static void guc_add_workqueue_item(struct i915_guc_client *gc,
+ struct drm_i915_gem_request *rq)
{
+ /* wqi_len is in DWords, and does not include the one-word header */
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
- u32 tail, wq_len, wq_off, space;
+ u32 freespace, tail, wq_off, wq_page;
desc = gc->client_base + gc->proc_desc_offset;
- space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- if (WARN_ON(space < sizeof(struct guc_wq_item)))
- return -ENOSPC; /* shouldn't happen */
- /* postincrement WQ tail for next time */
- wq_off = gc->wq_tail;
- gc->wq_tail += sizeof(struct guc_wq_item);
- gc->wq_tail &= gc->wq_size - 1;
+ /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ GEM_BUG_ON(freespace < wqi_size);
+
+ /* The GuC firmware wants the tail index in QWords, not bytes */
+ tail = rq->tail;
+ GEM_BUG_ON(tail & 7);
+ tail >>= 3;
+ GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -500,19 +481,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
* XXX: if not the case, we need save data to a temp wqi and copy it to
* workqueue buffer dw by dw.
*/
- WARN_ON(sizeof(struct guc_wq_item) != 16);
- WARN_ON(wq_off & 3);
+ BUILD_BUG_ON(wqi_size != 16);
+
+ /* postincrement WQ tail for next time */
+ wq_off = gc->wq_tail;
+ gc->wq_tail += wqi_size;
+ gc->wq_tail &= gc->wq_size - 1;
+ GEM_BUG_ON(wq_off & (wqi_size - 1));
- /* wq starts from the page after doorbell / process_desc */
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
- (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+ /* WQ starts from the page after doorbell / process_desc */
+ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
wq_off &= PAGE_SIZE - 1;
+ base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
wqi = (struct guc_wq_item *)((char *)base + wq_off);
- /* len does not include the header */
- wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+ /* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
- (wq_len << WQ_LEN_SHIFT) |
+ (wqi_len << WQ_LEN_SHIFT) |
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
@@ -520,48 +505,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
rq->engine);
- /* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->ringbuf->tail >> 3;
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = 0; /*XXX: what fence to be here */
+ wqi->fence_id = rq->seqno;
kunmap_atomic(base);
+}
- return 0;
+static int guc_ring_doorbell(struct i915_guc_client *gc)
+{
+ struct guc_process_desc *desc;
+ union guc_doorbell_qw db_cmp, db_exc, db_ret;
+ union guc_doorbell_qw *db;
+ int attempt = 2, ret = -EAGAIN;
+
+ desc = gc->client_base + gc->proc_desc_offset;
+
+ /* Update the tail so it is visible to GuC */
+ desc->tail = gc->wq_tail;
+
+ /* current cookie */
+ db_cmp.db_status = GUC_DOORBELL_ENABLED;
+ db_cmp.cookie = gc->cookie;
+
+ /* cookie to be updated */
+ db_exc.db_status = GUC_DOORBELL_ENABLED;
+ db_exc.cookie = gc->cookie + 1;
+ if (db_exc.cookie == 0)
+ db_exc.cookie = 1;
+
+ /* pointer of current doorbell cacheline */
+ db = gc->client_base + gc->doorbell_offset;
+
+ while (attempt--) {
+ /* lets ring the doorbell */
+ db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
+ db_cmp.value_qw, db_exc.value_qw);
+
+ /* if the exchange was successfully executed */
+ if (db_ret.value_qw == db_cmp.value_qw) {
+ /* db was successfully rung */
+ gc->cookie = db_exc.cookie;
+ ret = 0;
+ break;
+ }
+
+ /* XXX: doorbell was lost and need to acquire it again */
+ if (db_ret.db_status == GUC_DOORBELL_DISABLED)
+ break;
+
+ DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
+ db_cmp.cookie, db_ret.cookie);
+
+ /* update the cookie to newly read cookie from GuC */
+ db_cmp.cookie = db_ret.cookie;
+ db_exc.cookie = db_ret.cookie + 1;
+ if (db_exc.cookie == 0)
+ db_exc.cookie = 1;
+ }
+
+ return ret;
}
/**
* i915_guc_submit() - Submit commands through GuC
- * @client: the guc client where commands will go through
* @rq: request associated with the commands
*
- * Return: 0 if succeed
+ * Return: 0 on success, otherwise an errno.
+ * (Note: nonzero really shouldn't happen!)
+ *
+ * The caller must have already called i915_guc_wq_check_space() above
+ * with a result of 0 (success) since the last request submission. This
+ * guarantees that there is space in the work queue for the new request,
+ * so enqueuing the item cannot fail.
+ *
+ * Bad Things Will Happen if the caller violates this protocol e.g. calls
+ * submit() when check() says there's no space, or calls submit() multiple
+ * times with no intervening check().
+ *
+ * The only error here arises if the doorbell hardware isn't functioning
+ * as expected, which really shouln't happen.
*/
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq)
+int i915_guc_submit(struct drm_i915_gem_request *rq)
{
- struct intel_guc *guc = client->guc;
- unsigned int engine_id = rq->engine->guc_id;
- int q_ret, b_ret;
+ unsigned int engine_id = rq->engine->id;
+ struct intel_guc *guc = &rq->i915->guc;
+ struct i915_guc_client *client = guc->execbuf_client;
+ int b_ret;
- q_ret = guc_add_workqueue_item(client, rq);
- if (q_ret == 0)
- b_ret = guc_ring_doorbell(client);
+ guc_add_workqueue_item(client, rq);
+ b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
- if (q_ret) {
- client->q_fail += 1;
- client->retcode = q_ret;
- } else if (b_ret) {
+ client->retcode = b_ret;
+ if (b_ret)
client->b_fail += 1;
- client->retcode = q_ret = b_ret;
- } else {
- client->retcode = 0;
- }
+
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->seqno;
- return q_ret;
+ return b_ret;
}
/*
@@ -572,7 +614,7 @@ int i915_guc_submit(struct i915_guc_client *client,
/**
* gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev: drm device
+ * @dev_priv: driver private data structure
* @size: size of object
*
* This is a wrapper to create a gem obj. In order to use it inside GuC, the
@@ -581,14 +623,13 @@ int i915_guc_submit(struct i915_guc_client *client,
*
* Return: A drm_i915_gem_object if successful, otherwise NULL.
*/
-static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
- u32 size)
+static struct drm_i915_gem_object *
+gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- obj = i915_gem_alloc_object(dev, size);
- if (!obj)
+ obj = i915_gem_object_create(&dev_priv->drm, size);
+ if (IS_ERR(obj))
return NULL;
if (i915_gem_object_get_pages(obj)) {
@@ -623,10 +664,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
drm_gem_object_unreference(&obj->base);
}
-static void guc_client_free(struct drm_device *dev,
- struct i915_guc_client *client)
+static void
+guc_client_free(struct drm_i915_private *dev_priv,
+ struct i915_guc_client *client)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
if (!client)
@@ -639,17 +680,10 @@ static void guc_client_free(struct drm_device *dev,
if (client->client_base) {
/*
- * If we got as far as setting up a doorbell, make sure
- * we shut it down before unmapping & deallocating the
- * memory. So first disable the doorbell, then tell the
- * GuC that we've finished with it, finally deallocate
- * it in our bitmap
+ * If we got as far as setting up a doorbell, make sure we
+ * shut it down before unmapping & deallocating the memory.
*/
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
- guc_disable_doorbell(guc, client);
- host2guc_release_doorbell(guc, client);
- release_doorbell(guc, client->doorbell_id);
- }
+ guc_disable_doorbell(guc, client);
kunmap(kmap_to_page(client->client_base));
}
@@ -664,9 +698,51 @@ static void guc_client_free(struct drm_device *dev,
kfree(client);
}
+/*
+ * Borrow the first client to set up & tear down every doorbell
+ * in turn, to ensure that all doorbell h/w is (re)initialised.
+ */
+static void guc_init_doorbell_hw(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct i915_guc_client *client = guc->execbuf_client;
+ uint16_t db_id, i;
+ int err;
+
+ db_id = client->doorbell_id;
+
+ for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+ i915_reg_t drbreg = GEN8_DRBREGL(i);
+ u32 value = I915_READ(drbreg);
+
+ err = guc_update_doorbell_id(guc, client, i);
+
+ /* Report update failure or unexpectedly active doorbell */
+ if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
+ DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
+ i, drbreg.reg, value, err);
+ }
+
+ /* Restore to original value */
+ err = guc_update_doorbell_id(guc, client, db_id);
+ if (err)
+ DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
+ db_id, err);
+
+ for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+ i915_reg_t drbreg = GEN8_DRBREGL(i);
+ u32 value = I915_READ(drbreg);
+
+ if (i != db_id && (value & GUC_DOORBELL_ENABLED))
+ DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
+ i, drbreg.reg, value);
+
+ }
+}
+
/**
* guc_client_alloc() - Allocate an i915_guc_client
- * @dev: drm device
+ * @dev_priv: driver private data structure
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -676,14 +752,15 @@ static void guc_client_free(struct drm_device *dev,
*
* Return: An i915_guc_client object if success, else NULL.
*/
-static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
- uint32_t priority,
- struct intel_context *ctx)
+static struct i915_guc_client *
+guc_client_alloc(struct drm_i915_private *dev_priv,
+ uint32_t priority,
+ struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct drm_i915_gem_object *obj;
+ uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
@@ -702,7 +779,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
+ obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
if (!obj)
goto err;
@@ -712,6 +789,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
+ db_id = select_doorbell_register(guc, client->priority);
+ if (db_id == GUC_INVALID_DOORBELL_ID)
+ /* XXX: evict a doorbell instead? */
+ goto err;
+
client->doorbell_offset = select_doorbell_cacheline(guc);
/*
@@ -724,29 +806,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- client->doorbell_id = assign_doorbell(guc, client->priority);
- if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
- /* XXX: evict a doorbell instead */
- goto err;
-
guc_init_proc_desc(guc, client);
guc_init_ctx_desc(guc, client);
- guc_init_doorbell(guc, client);
-
- /* XXX: Any cache flushes needed? General domain mgmt calls? */
-
- if (host2guc_allocate_doorbell(guc, client))
+ if (guc_init_doorbell(guc, client, db_id))
goto err;
- DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
- priority, client, client->ctx_index, client->doorbell_id);
+ DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
+ priority, client, client->ctx_index);
+ DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
+ client->doorbell_id, client->doorbell_offset);
return client;
err:
DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
- guc_client_free(dev, client);
+ guc_client_free(dev_priv, client);
return NULL;
}
@@ -771,7 +846,7 @@ static void guc_create_log(struct intel_guc *guc)
obj = guc->log_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv->dev, size);
+ obj = gem_allocate_guc_obj(dev_priv, size);
if (!obj) {
/* logging will be off */
i915.guc_log_level = -1;
@@ -831,7 +906,7 @@ static void guc_create_ads(struct intel_guc *guc)
obj = guc->ads_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+ obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
if (!obj)
return;
@@ -885,66 +960,65 @@ static void guc_create_ads(struct intel_guc *guc)
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
*/
-int i915_guc_submission_init(struct drm_device *dev)
+int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const size_t ctxsize = sizeof(struct guc_context_desc);
const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
+ /* Wipe bitmap & delete client in case of reinitialisation */
+ bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
+ i915_guc_submission_disable(dev_priv);
+
if (!i915.enable_guc_submission)
return 0; /* not enabled */
if (guc->ctx_pool_obj)
return 0; /* already allocated */
- guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
+ guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
if (!guc->ctx_pool_obj)
return -ENOMEM;
ida_init(&guc->ctx_ids);
-
guc_create_log(guc);
-
guc_create_ads(guc);
return 0;
}
-int i915_guc_submission_enable(struct drm_device *dev)
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
- client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+ client = guc_client_alloc(dev_priv,
+ GUC_CTX_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
if (!client) {
DRM_ERROR("Failed to create execbuf guc_client\n");
return -ENOMEM;
}
guc->execbuf_client = client;
-
host2guc_sample_forcewake(guc, client);
+ guc_init_doorbell_hw(guc);
return 0;
}
-void i915_guc_submission_disable(struct drm_device *dev)
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- guc_client_free(dev, guc->execbuf_client);
+ guc_client_free(dev_priv, guc->execbuf_client);
guc->execbuf_client = NULL;
}
-void i915_guc_submission_fini(struct drm_device *dev)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
gem_release_guc_obj(dev_priv->guc.ads_obj);
@@ -965,12 +1039,12 @@ void i915_guc_submission_fini(struct drm_device *dev)
*/
int intel_guc_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
@@ -991,12 +1065,12 @@ int intel_guc_suspend(struct drm_device *dev)
*/
int intel_guc_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aab47f7bb61b..1c2aec392412 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
}
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
+ POSTING_READ_FW(GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
__gen6_disable_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_device *dev)
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -349,14 +348,11 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-void gen6_enable_rps_interrupts(struct drm_device *dev)
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
-
- WARN_ON(dev_priv->rps.pm_iir);
- WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+ WARN_ON_ONCE(dev_priv->rps.pm_iir);
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
@@ -367,32 +363,13 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
- /*
- * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
- mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_INFO(dev_priv)->gen >= 8)
- mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return mask;
+ return (mask & ~dev_priv->rps.pm_intr_keep);
}
-void gen6_disable_rps_interrupts(struct drm_device *dev)
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- cancel_work_sync(&dev_priv->rps.work);
-
- spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
@@ -401,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
~dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
- synchronize_irq(dev->irq);
+ /* Now that we will not be generating any more work, flush any
+ * outsanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&dev_priv->rps.work);
+ gen6_reset_rps_interrupts(dev_priv);
}
/**
@@ -582,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
status_mask);
else
enable_mask = status_mask << 16;
@@ -596,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
status_mask);
else
enable_mask = status_mask << 16;
@@ -605,19 +589,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-static void i915_enable_asle_pipestat(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+ if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
@@ -685,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
@@ -732,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
@@ -741,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct drm_display_mode *mode = &crtc->base.hwmode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -750,7 +732,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
- if (HAS_DDI(dev) && !position) {
+ if (HAS_DDI(dev_priv) && !position) {
int i, temp;
for (i = 0; i < 100; i++) {
@@ -793,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int position;
@@ -835,7 +817,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -897,7 +879,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
*vpos = position;
*hpos = 0;
} else {
@@ -914,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
unsigned long irqflags;
int position;
@@ -955,9 +937,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
&crtc->hwmode);
}
-static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
@@ -986,7 +967,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
new_delay = dev_priv->ips.min_delay;
}
- if (ironlake_set_drps(dev, new_delay))
+ if (ironlake_set_drps(dev_priv, new_delay))
dev_priv->ips.cur_delay = new_delay;
spin_unlock(&mchdev_lock);
@@ -996,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
static void notify_ring(struct intel_engine_cs *engine)
{
- if (!intel_engine_initialized(engine))
- return;
-
- trace_i915_gem_request_notify(engine);
- engine->user_interrupts++;
-
- wake_up_all(&engine->irq_queue);
+ smp_store_mb(engine->breadcrumbs.irq_posted, true);
+ if (intel_engine_wakeup(engine)) {
+ trace_i915_gem_request_notify(engine);
+ engine->breadcrumbs.irq_wakeups++;
+ }
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1083,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
- if (engine->irq_refcount)
+ if (intel_engine_has_waiter(engine))
return true;
return false;
@@ -1104,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
return;
}
- /*
- * The RPS work is synced during runtime suspend, we don't require a
- * wakeref. TODO: instead of disabling the asserts make sure that we
- * always hold an RPM reference while the work is running.
- */
- DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1123,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
+ return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1175,11 +1147,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
- intel_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
-out:
- ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
@@ -1205,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
* In order to prevent a get/put style interface, acquire struct mutex
* any time we access those registers.
*/
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1241,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
parity_event[5] = NULL;
- kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+ kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1261,7 +1231,7 @@ out:
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1287,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
@@ -1297,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
-
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
@@ -1506,27 +1473,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
}
-static void gmbus_irq_handler(struct drm_device *dev)
+static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-static void dp_aux_irq_handler(struct drm_device *dev)
+static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
int head, tail;
@@ -1550,7 +1513,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
entry = &pipe_crc->entries[head];
- entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
+ pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
@@ -1566,27 +1530,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4) {}
#endif
-static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1557,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t res1, res2;
- if (INTEL_INFO(dev)->gen >= 3)
+ if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_RED(pipe)),
I915_READ(PIPE_CRC_RES_GREEN(pipe)),
I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1626,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ schedule_work(&dev_priv->rps.work);
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -1643,18 +1606,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
-static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- if (!drm_handle_vblank(dev, pipe))
- return false;
+ bool ret;
- return true;
+ ret = drm_handle_vblank(&dev_priv->drm, pipe);
+ if (ret)
+ intel_finish_page_flip_mmio(dev_priv, pipe);
+
+ return ret;
}
-static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
- u32 pipe_stats[I915_MAX_PIPES])
+static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1676,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
spin_unlock(&dev_priv->irq_lock);
}
-static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
- if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1710,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
return hotplug_status;
}
-static void i9xx_hpd_irq_handler(struct drm_device *dev,
+static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) {
@@ -1760,11 +1724,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
hotplug_trigger, hpd_status_g4x,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
@@ -1772,7 +1736,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
}
}
@@ -1780,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1831,7 +1795,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1814,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(dev_priv, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1863,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1911,7 +1875,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1891,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(dev_priv, gt_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1901,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
return ret;
}
-static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
/*
@@ -1966,16 +1930,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1948,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1981,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
-static void ivb_err_int_handler(struct drm_device *dev)
+static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
enum pipe pipe;
@@ -2032,19 +1994,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev))
- ivb_pipe_crc_irq_handler(dev, pipe);
+ if (IS_IVYBRIDGE(dev_priv))
+ ivb_pipe_crc_irq_handler(dev_priv, pipe);
else
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
}
}
I915_WRITE(GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_device *dev)
+static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 serr_int = I915_READ(SERR_INT);
if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2023,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
I915_WRITE(SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2038,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2056,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev);
+ cpt_serr_int_handler(dev_priv);
}
-static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2089,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pin_mask)
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
-static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2108,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = I915_READ(SDEIIR);
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev_priv))
+ cpt_irq_handler(dev_priv, pch_iir);
else
- ibx_irq_handler(dev, pch_iir);
+ ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev);
+ if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev_priv);
}
-static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
+ ivb_err_int_handler(dev_priv);
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
- cpt_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -2257,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2277,7 +2232,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
@@ -2289,7 +2244,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (gt_iir) {
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
snb_gt_irq_handler(dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2254,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (de_iir) {
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 7)
- ivb_display_irq_handler(dev, de_iir);
+ if (INTEL_GEN(dev_priv) >= 7)
+ ivb_display_irq_handler(dev_priv, de_iir);
else
- ilk_display_irq_handler(dev, de_iir);
+ ilk_display_irq_handler(dev_priv, de_iir);
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2271,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
@@ -2327,10 +2282,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
-static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2295,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
- struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
u32 iir;
enum pipe pipe;
@@ -2357,7 +2311,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (iir & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
@@ -2381,26 +2335,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_D;
if (iir & tmp_mask) {
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
found = true;
}
if (IS_BROXTON(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
- bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+ bxt_hpd_irq_handler(dev_priv, tmp_mask,
+ hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
- ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+ ilk_hpd_irq_handler(dev_priv,
+ tmp_mask, hpd_bdw);
found = true;
}
}
- if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
- gmbus_irq_handler(dev);
+ if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2427,8 +2383,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2392,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
- if (flip_done) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (flip_done)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2413,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors);
}
- if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2426,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
- spt_irq_handler(dev, iir);
+ spt_irq_handler(dev_priv, iir);
else
- cpt_irq_handler(dev, iir);
+ cpt_irq_handler(dev_priv, iir);
} else {
/*
* Like on previous PCH there seems to be something
@@ -2490,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 master_ctl;
u32 gt_iir[4] = {};
irqreturn_t ret;
@@ -2521,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_error_wake_up(struct drm_i915_private *dev_priv,
- bool reset_completed)
+static void i915_error_wake_up(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
-
/*
* Notify all waiters for GPU completion events that reset state has
* been changed, and that they need to restart their wait after
@@ -2534,36 +2485,28 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_engine(engine, dev_priv)
- wake_up_all(&engine->irq_queue);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
-
- /*
- * Signal tasks blocked in i915_gem_wait_for_error that the pending
- * reset state is cleared.
- */
- if (reset_completed)
- wake_up_all(&dev_priv->gpu_error.reset_queue);
}
/**
* i915_reset_and_wakeup - do process context error handling work
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2520,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
- reset_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/*
* In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2531,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
intel_runtime_pm_get(dev_priv);
- intel_prepare_reset(dev);
+ intel_prepare_reset(dev_priv);
/*
* All state reset _must_ be completed before we update the
@@ -2597,27 +2539,26 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
- ret = i915_reset(dev);
+ ret = i915_reset(dev_priv);
- intel_finish_reset(dev);
+ intel_finish_reset(dev_priv);
intel_runtime_pm_put(dev_priv);
if (ret == 0)
- kobject_uevent_env(&dev->primary->kdev->kobj,
+ kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
/*
* Note: The wake_up also serves as a memory barrier so that
* waiters see the update value of the reset counter atomic_t.
*/
- i915_error_wake_up(dev_priv, true);
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
@@ -2627,9 +2568,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err("render error detected, EIR: 0x%08x\n", eir);
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -2651,7 +2592,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
@@ -2673,7 +2614,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2709,18 +2650,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
/**
* i915_handle_error - handle a gpu error
- * @dev: drm device
+ * @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
+ * @fmt: Error message format string
*/
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
@@ -2728,8 +2670,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, engine_mask, error_msg);
- i915_report_and_clear_eir(dev);
+ i915_capture_error_state(dev_priv, engine_mask, error_msg);
+ i915_report_and_clear_eir(dev_priv);
if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2748,10 +2690,10 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
* ensure that the waiters see the updated value of the reset
* counter atomic_t.
*/
- i915_error_wake_up(dev_priv, false);
+ i915_error_wake_up(dev_priv);
}
- i915_reset_and_wakeup(dev);
+ i915_reset_and_wakeup(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2759,7 +2701,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
*/
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2776,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2790,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2803,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2818,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
*/
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2830,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2842,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2853,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2869,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
}
static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(engine->i915) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2826,10 @@ static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
- if (INTEL_INFO(dev_priv)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv) {
if (engine == signaller)
continue;
@@ -2916,7 +2858,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2942,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
return NULL;
/*
@@ -2954,7 +2896,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
@@ -2976,7 +2918,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2928,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
@@ -3000,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
- if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
@@ -3028,7 +2970,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
if (engine->id != RCS)
return true;
- i915_get_extra_instdone(engine->dev, instdone);
+ i915_get_extra_instdone(engine->i915, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3011,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
enum intel_ring_hangcheck_action ha;
u32 tmp;
@@ -3078,7 +3019,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != HANGCHECK_HUNG)
return ha;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3029,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
- if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
@@ -3113,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG;
}
-static unsigned kick_waiters(struct intel_engine_cs *engine)
+static unsigned long kick_waiters(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = to_i915(engine->dev);
- unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+ struct drm_i915_private *i915 = engine->i915;
+ unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
- if (engine->hangcheck.user_interrupts == user_interrupts &&
+ if (engine->hangcheck.user_interrupts == irq_count &&
!test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
- if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+ if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
- else
- DRM_INFO("Fake missed irq on %s\n",
- engine->name);
- wake_up_all(&engine->irq_queue);
+
+ intel_engine_enable_fake_irq(engine);
}
- return user_interrupts;
+ return irq_count;
}
/*
* This is called when the chip hasn't reported back with completed
@@ -3144,11 +3083,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
- struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int busy_count = 0, rings_hung = 0;
- bool stuck[I915_NUM_ENGINES] = { 0 };
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
#define BUSY 1
#define KICK 5
#define HUNG 20
@@ -3157,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!i915.enable_hangcheck)
return;
- /*
- * The hangcheck work is synced during runtime suspend, we don't
- * require a wakeref. TODO: instead of disabling the asserts make
- * sure that we hold a reference when this work is running.
- */
- DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
@@ -3170,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv) {
+ bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
unsigned user_interrupts;
- bool busy = true;
semaphore_clear_deadlocks(dev_priv);
@@ -3189,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->irq_seqno_barrier(engine);
acthd = intel_ring_get_active_head(engine);
- seqno = engine->get_seqno(engine);
+ seqno = intel_engine_get_seqno(engine);
/* Reset stuck interrupts between batch advances */
user_interrupts = 0;
@@ -3197,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE;
- if (waitqueue_active(&engine->irq_queue)) {
+ if (busy) {
/* Safeguard against driver failure */
user_interrupts = kick_waiters(engine);
engine->hangcheck.score += BUSY;
- } else
- busy = false;
+ }
} else {
/* We always increment the hangcheck score
* if the ring is busy and still processing
@@ -3234,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
break;
case HANGCHECK_HUNG:
engine->hangcheck.score += HUNG;
- stuck[id] = true;
break;
}
}
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
} else {
engine->hangcheck.action = HANGCHECK_ACTIVE;
@@ -3262,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
busy_count += busy;
}
- for_each_engine_id(engine, dev_priv, id) {
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- DRM_INFO("%s on %s\n",
- stuck[id] ? "stuck" : "no progress",
- engine->name);
- rings_hung |= intel_engine_flag(engine);
- }
- }
+ if (hung) {
+ char msg[80];
+ int len;
- if (rings_hung) {
- i915_handle_error(dev, rings_hung, "Engine(s) hung");
- goto out;
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
}
+ /* Reset timer in case GPU hangs without another request being added */
if (busy_count)
- /* Reset timer case chip hangs without another request
- * being added */
- i915_queue_hangcheck(dev);
-
-out:
- ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-}
-
-void i915_queue_hangcheck(struct drm_device *dev)
-{
- struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
-
- if (!i915.enable_hangcheck)
- return;
-
- /* Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
- round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
+ i915_queue_hangcheck(dev_priv);
}
static void ibx_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
return;
@@ -3324,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
*/
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
return;
@@ -3336,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
GEN5_IRQ_RESET(GT);
if (INTEL_INFO(dev)->gen >= 6)
@@ -3396,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3411,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
static void valleyview_irq_preinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
@@ -3434,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3480,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3500,31 +3422,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(dev, encoder)
+ for_each_intel_encoder(&dev_priv->drm, encoder)
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static void ibx_hpd_irq_setup(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
}
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3463,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_device *dev)
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3569,24 +3488,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
-static void ilk_hpd_irq_setup(struct drm_device *dev)
+static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
@@ -3601,15 +3519,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
- ibx_hpd_irq_setup(dev);
+ ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_device *dev)
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3642,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
static void ibx_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev))
@@ -3659,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3673,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN5(dev)) {
- gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
- ILK_BSD_USER_INTERRUPT;
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
@@ -3696,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
if (INTEL_INFO(dev)->gen >= 7) {
@@ -3775,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static int valleyview_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen5_gt_irq_postinstall(dev);
@@ -3827,6 +3743,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
+ u32 de_misc_masked = GEN8_DE_MISC_GSE;
enum pipe pipe;
if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,11 +3779,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
}
static int gen8_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_SPLIT(dev))
ibx_irq_pre_postinstall(dev);
@@ -3885,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen8_gt_irq_postinstall(dev_priv);
@@ -3902,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void gen8_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3912,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
static void valleyview_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3932,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
static void cherryview_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3952,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
static void ironlake_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3962,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
static void i8xx_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
for_each_pipe(dev_priv, pipe)
@@ -3974,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4006,13 +3924,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i8xx_handle_vblank(struct drm_device *dev,
+static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4027,19 +3944,18 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 iir, new_iir;
u32 pipe_stats[2];
int pipe;
@@ -4089,15 +4005,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, plane, pipe, iir))
+ i8xx_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4116,7 +4032,7 @@ out:
static void i8xx_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
for_each_pipe(dev_priv, pipe) {
@@ -4131,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
static void i915_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4149,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
static int i915_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4182,7 +4098,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4197,13 +4113,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i915_handle_vblank(struct drm_device *dev,
+static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4218,19 +4133,18 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4273,11 +4187,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
break;
/* Consume port. Then clear IIR or we'll miss events */
- if (I915_HAS_HOTPLUG(dev) &&
+ if (I915_HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4202,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, plane, pipe, iir))
+ i915_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4221,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4335,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i915_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4357,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
static void i965_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4373,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
u32 error_mask;
@@ -4391,7 +4305,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask |= I915_USER_INTERRUPT;
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
/* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4320,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
*/
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
error_mask = ~(GM45_ERROR_PAGE_TABLE |
GM45_ERROR_MEM_PRIV |
GM45_ERROR_CP_PRIV |
@@ -4424,26 +4338,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
return 0;
}
-static void i915_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en;
assert_spin_locked(&dev_priv->irq_lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
- hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
@@ -4458,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
int ret = IRQ_NONE, pipe;
@@ -4510,7 +4423,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4436,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, pipe, pipe, iir))
+ i915_handle_vblank(dev_priv, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4567,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
static void i965_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (!dev_priv)
@@ -4597,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
*/
void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
intel_hpd_init_work(dev_priv);
@@ -4611,6 +4524,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ dev_priv->rps.pm_intr_keep = 0;
+
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
@@ -4674,12 +4601,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev_priv)->gen == 2) {
+ if (IS_GEN2(dev_priv)) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev_priv)->gen == 3) {
+ } else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4717,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
dev_priv->pm.irqs_enabled = true;
- return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
+ return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
}
/**
@@ -4729,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_irq_uninstall(dev_priv->dev);
+ drm_irq_uninstall(&dev_priv->drm);
intel_hpd_cancel_work(dev_priv);
dev_priv->pm.irqs_enabled = false;
}
@@ -4743,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
dev_priv->pm.irqs_enabled = false;
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
}
/**
@@ -4758,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->pm.irqs_enabled = true;
- dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
- dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
+ dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
+ dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..b6e404c91eed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = {
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
- .enable_guc_submission = false,
+ .enable_guc_loading = 0,
+ .enable_guc_submission = 0,
.guc_log_level = -1,
.enable_dp_mst = true,
.inject_load_failure = 0,
+ .enable_dpcd_backlight = false,
+ .enable_gvt = false,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
-MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
+MODULE_PARM_DESC(enable_guc_loading,
+ "Enable GuC firmware loading "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
+MODULE_PARM_DESC(enable_guc_submission,
+ "Enable GuC submission "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
+module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
+MODULE_PARM_DESC(enable_dpcd_backlight,
+ "Enable support for DPCD backlight control (default:false)");
+
+module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
+MODULE_PARM_DESC(enable_gvt,
+ "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..0ad020b4a925 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
+ int enable_guc_loading;
+ int enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
@@ -57,10 +59,11 @@ struct i915_params {
bool load_detect_test;
bool reset;
bool disable_display;
- bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
bool enable_dp_mst;
+ bool enable_dpcd_backlight;
+ bool enable_gvt;
};
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644
index 000000000000..949c01686a66
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+
+#define GEN_DEFAULT_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ CHV_PIPE_C_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ CHV_TRANSCODER_C_OFFSET, }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+ CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+#define BDW_COLORS \
+ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1, .num_pipes = 2,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1, .num_pipes = 2,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+#define GEN7_FEATURES \
+ .gen = 7, .num_pipes = 3, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_fbc = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ IVB_CURSOR_OFFSETS
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .num_pipes = 0, /* legal, last one wins */
+};
+
+#define VLV_FEATURES \
+ .gen = 7, .num_pipes = 2, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .display_mmio_offset = VLV_DISPLAY_BASE, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ VLV_FEATURES,
+ .is_valleyview = 1,
+ .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ VLV_FEATURES,
+ .is_valleyview = 1,
+};
+
+#define HSW_FEATURES \
+ GEN7_FEATURES, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .has_ddi = 1, \
+ .has_fpga_dbg = 1
+
+static const struct intel_device_info intel_haswell_d_info = {
+ HSW_FEATURES,
+ .is_haswell = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ HSW_FEATURES,
+ .is_haswell = 1,
+ .is_mobile = 1,
+};
+
+#define BDW_FEATURES \
+ HSW_FEATURES, \
+ BDW_COLORS
+
+static const struct intel_device_info intel_broadwell_d_info = {
+ BDW_FEATURES,
+ .gen = 8,
+ .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+ BDW_FEATURES,
+ .gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+ BDW_FEATURES,
+ .gen = 8,
+ .is_broadwell = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+ BDW_FEATURES,
+ .gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .is_cherryview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_CHV_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+ CHV_COLORS,
+};
+
+static const struct intel_device_info intel_skylake_info = {
+ BDW_FEATURES,
+ .is_skylake = 1,
+ .gen = 9,
+};
+
+static const struct intel_device_info intel_skylake_gt3_info = {
+ BDW_FEATURES,
+ .is_skylake = 1,
+ .gen = 9,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+ .is_broxton = 1,
+ .gen = 9,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .num_pipes = 3,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_fbc = 1,
+ .has_pooled_eu = 0,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+ BDW_COLORS,
+};
+
+static const struct intel_device_info intel_kabylake_info = {
+ BDW_FEATURES,
+ .is_kabylake = 1,
+ .gen = 9,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info = {
+ BDW_FEATURES,
+ .is_kabylake = 1,
+ .gen = 9,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+static const struct pci_device_id pciidlist[] = {
+ INTEL_I830_IDS(&intel_i830_info),
+ INTEL_I845G_IDS(&intel_845g_info),
+ INTEL_I85X_IDS(&intel_i85x_info),
+ INTEL_I865G_IDS(&intel_i865g_info),
+ INTEL_I915G_IDS(&intel_i915g_info),
+ INTEL_I915GM_IDS(&intel_i915gm_info),
+ INTEL_I945G_IDS(&intel_i945g_info),
+ INTEL_I945GM_IDS(&intel_i945gm_info),
+ INTEL_I965G_IDS(&intel_i965g_info),
+ INTEL_G33_IDS(&intel_g33_info),
+ INTEL_I965GM_IDS(&intel_i965gm_info),
+ INTEL_GM45_IDS(&intel_gm45_info),
+ INTEL_G45_IDS(&intel_g45_info),
+ INTEL_PINEVIEW_IDS(&intel_pineview_info),
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
+ INTEL_HSW_D_IDS(&intel_haswell_d_info),
+ INTEL_HSW_M_IDS(&intel_haswell_m_info),
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info),
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info),
+ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
+ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
+ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
+ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+ INTEL_CHV_IDS(&intel_cherryview_info),
+ INTEL_SKL_GT1_IDS(&intel_skylake_info),
+ INTEL_SKL_GT2_IDS(&intel_skylake_info),
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+ INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+ INTEL_BXT_IDS(&intel_broxton_info),
+ INTEL_KBL_GT1_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
+ INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ {0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+extern int i915_driver_load(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct intel_device_info *intel_info =
+ (struct intel_device_info *) ent->driver_data;
+
+ if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+ DRM_INFO("This hardware requires preliminary hardware support.\n"
+ "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ return -ENODEV;
+ }
+
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return -EPROBE_DEFER;
+
+ return i915_driver_load(pdev, ent);
+}
+
+extern void i915_driver_unload(struct drm_device *dev);
+
+static void i915_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ i915_driver_unload(dev);
+ drm_dev_unref(dev);
+}
+
+extern const struct dev_pm_ops i915_pm_ops;
+
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+ bool use_kms = true;
+
+ /*
+ * Enable KMS by default, unless explicitly overriden by
+ * either the i915.modeset prarameter or by the
+ * vga_text_mode_force boot option.
+ */
+
+ if (i915.modeset == 0)
+ use_kms = false;
+
+ if (vgacon_text_force() && i915.modeset == -1)
+ use_kms = false;
+
+ if (!use_kms) {
+ /* Silently fail loading to not upset userspace. */
+ DRM_DEBUG_DRIVER("KMS disabled.\n");
+ return 0;
+ }
+
+ return pci_register_driver(&i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+ if (!i915_pci_driver.driver.owner)
+ return;
+
+ pci_unregister_driver(&i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
new file mode 100644
index 000000000000..c0cb2974caac
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_PVINFO_H_
+#define _I915_PVINFO_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE 0x78000
+#define VGT_PVINFO_SIZE 0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+ INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+/*
+ * notifications from guest to vgpu device model
+ */
+enum vgt_g2v_type {
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
+ VGT_G2V_EXECLIST_CONTEXT_CREATE,
+ VGT_G2V_EXECLIST_CONTEXT_DESTROY,
+ VGT_G2V_MAX,
+};
+
+struct vgt_if {
+ u64 magic; /* VGT_MAGIC */
+ uint16_t version_major;
+ uint16_t version_minor;
+ u32 vgt_id; /* ID of vGT instance */
+ u32 rsv1[12]; /* pad to offset 0x40 */
+ /*
+ * Data structure to describe the balooning info of resources.
+ * Each VM can only have one portion of continuous area for now.
+ * (May support scattered resource in future)
+ * (starting from offset 0x40)
+ */
+ struct {
+ /* Aperture register balooning */
+ struct {
+ u32 base;
+ u32 size;
+ } mappable_gmadr; /* aperture */
+ /* GMADR register balooning */
+ struct {
+ u32 base;
+ u32 size;
+ } nonmappable_gmadr; /* non aperture */
+ /* allowed fence registers */
+ u32 fence_num;
+ u32 rsv2[3];
+ } avail_rs; /* available/assigned resource */
+ u32 rsv3[0x200 - 24]; /* pad to half page */
+ /*
+ * The bottom half page is for response from Gfx driver to hypervisor.
+ */
+ u32 rsv4;
+ u32 display_ready; /* ready for display owner switch */
+
+ u32 rsv5[4];
+
+ u32 g2v_notify;
+ u32 rsv6[7];
+
+ struct {
+ u32 lo;
+ u32 hi;
+ } pdp[4];
+
+ u32 execlist_context_descriptor_lo;
+ u32 execlist_context_descriptor_hi;
+
+ u32 rsv7[0x200 - 24]; /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+ _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
+
+#endif /* _I915_PVINFO_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3fcf7dd5b6ca..ce14fe09d962 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -445,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -716,6 +718,9 @@ enum skl_disp_power_wells {
/* Not actual bit groups. Used as IDs for lookup_power_well() */
SKL_DISP_PW_ALWAYS_ON,
SKL_DISP_PW_DC_OFF,
+
+ BXT_DPIO_CMN_A,
+ BXT_DPIO_CMN_BC,
};
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -889,7 +894,7 @@ enum skl_disp_power_wells {
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT).
+ * digital port D (CHV) or port A (BXT). ::
*
*
* Dual channel PHY (VLV/CHV/BXT)
@@ -1276,6 +1281,15 @@ enum skl_disp_power_wells {
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
@@ -1672,6 +1686,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
@@ -2171,6 +2188,9 @@ enum skl_disp_power_wells {
#define FBC_LL_SIZE (1536)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN (1<<30)
+
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
@@ -2461,6 +2481,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
#define _FPA0 0x6040
#define _FPA1 0x6044
#define _FPB0 0x6048
@@ -3032,6 +3054,18 @@ enum skl_disp_power_wells {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+enum {
+ INTEL_ADVANCED_CONTEXT = 0,
+ INTEL_LEGACY_32B_CONTEXT,
+ INTEL_ADVANCED_AD_CONTEXT,
+ INTEL_LEGACY_64B_CONTEXT
+};
+
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ INTEL_LEGACY_64B_CONTEXT : \
+ INTEL_LEGACY_32B_CONTEXT)
+
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -6045,6 +6079,9 @@ enum skl_disp_power_wells {
#define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
+#define CHICKEN_PAR2_1 _MMIO(0x42090)
+#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
+
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@@ -6084,6 +6121,7 @@ enum skl_disp_power_wells {
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
@@ -6108,7 +6146,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100)
-#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7028,7 +7073,8 @@ enum skl_disp_power_wells {
#define GEN6_RPDEUC _MMIO(0xA084)
#define GEN6_RPDEUCSW _MMIO(0xA088)
#define GEN6_RC_STATE _MMIO(0xA094)
-#define RC6_STATE (1 << 18)
+#define RC_SW_TARGET_STATE_SHIFT 16
+#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
@@ -7042,13 +7088,17 @@ enum skl_disp_power_wells {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
#define GEN9_RENDER_PG_ENABLE (1<<0)
#define GEN9_MEDIA_PG_ENABLE (1<<1)
+#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
+#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
+#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
@@ -7578,14 +7628,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
@@ -8161,6 +8212,8 @@ enum skl_disp_power_wells {
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
+#define BXT_DPHY_DEFEATURE_EN (1 << 8)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 34e061a9ef06..5cfe4c7716b4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -31,7 +31,7 @@
static void i915_save_display(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
static void i915_restore_display(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask = 0xffffffff;
/* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
int i915_save_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
int i915_restore_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..d61829e54f93 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -38,12 +38,12 @@
static u32 calc_residency(struct drm_device *dev,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL;
u32 ret;
- if (!intel_enable_rc6(dev))
+ if (!intel_enable_rc6())
return 0;
intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_to_drm_minor(kdev);
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
}
static ssize_t
@@ -167,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -203,8 +202,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
+ struct i915_gem_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -228,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
}
}
- ret = i915_gpu_idle(drm_dev);
- if (ret) {
- kfree(temp);
- mutex_unlock(&drm_dev->struct_mutex);
- return ret;
- }
-
/* TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
@@ -276,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -310,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -331,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return snprintf(buf, PAGE_SIZE,
"%d\n",
@@ -342,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -360,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
ssize_t ret;
@@ -397,7 +389,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -410,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -428,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
ssize_t ret;
@@ -461,7 +453,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -488,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..534154e05fbe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
),
TP_fast_assign(
- __entry->dev = i915->dev->primary->index;
+ __entry->dev = i915->drm.primary->index;
__entry->target = target;
__entry->flags = flags;
),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
),
TP_fast_assign(
- __entry->dev = from->dev->primary->index;
+ __entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->flags = flags;
- i915_trace_irq_get(engine, req);
+ intel_engine_enable_signaling(req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = req->engine->dev->primary->index;
+ __entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->drm.primary->index;
__entry->ring = engine->id;
- __entry->seqno = engine->get_seqno(engine);
+ __entry->seqno = intel_engine_get_seqno(engine);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->blocking =
- mutex_is_locked(&engine->dev->struct_mutex);
+ mutex_is_locked(&req->i915->drm.struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,19 +734,19 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
* the context.
*/
DECLARE_EVENT_CLASS(i915_context,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u32, dev)
- __field(struct intel_context *, ctx)
+ __field(struct i915_gem_context *, ctx)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->i915->dev->primary->index;
+ __entry->dev = ctx->i915->drm.primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
)
DEFINE_EVENT(i915_context, i915_context_create,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
DEFINE_EVENT(i915_context, i915_context_free,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
+ TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
- __field(struct intel_context *, to)
+ __field(struct i915_gem_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->drm.primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..f6acb5a0e701 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -53,20 +53,19 @@
/**
* i915_check_vgpu - detect virtual GPU
- * @dev: drm device *
+ * @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_device *dev)
+void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic;
uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- if (!IS_HASWELL(dev))
+ if (!IS_HASWELL(dev_priv))
return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info;
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
-void intel_vgt_deballoon(void)
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
{
int i;
+ if (!intel_vgpu_active(dev_priv))
+ return;
+
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++) {
@@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm,
* of its graphic space being zero. Yet there are some portions ballooned out(
* the shadow part, which are marked as reserved by drm allocator). From the
* host point of view, the graphic address space is partitioned by multiple
- * vGPUs in different VMs.
+ * vGPUs in different VMs. ::
*
* vGPU1 view Host view
* 0 ------> +-----------+ +-----------+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* mappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * v |///////////| | Host |
+ * | |###########| | |
+ * v |###########| | Host |
* +=======+===========+ +===========+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* unmappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * | |///////////| | Host |
- * v |///////////| | |
+ * | |###########| | |
+ * | |###########| | Host |
+ * v |###########| | |
* total GM size ------> +-----------+ +-----------+
*
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
-int intel_vgt_balloon(struct drm_device *dev)
+int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
@@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev)
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
+ if (!intel_vgpu_active(dev_priv))
+ return 0;
+
mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
@@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev)
err:
DRM_ERROR("VGT balloon fail\n");
- intel_vgt_deballoon();
+ intel_vgt_deballoon(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..3c3b2d24e830 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,94 +24,10 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-/* The MMIO offset of the shared info between guest and host emulator */
-#define VGT_PVINFO_PAGE 0x78000
-#define VGT_PVINFO_SIZE 0x1000
+#include "i915_pvinfo.h"
-/*
- * The following structure pages are defined in GEN MMIO space
- * for virtualization. (One page for now)
- */
-#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
-#define VGT_VERSION_MAJOR 1
-#define VGT_VERSION_MINOR 0
-
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
-/*
- * notifications from guest to vgpu device model
- */
-enum vgt_g2v_type {
- VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
- VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
- VGT_G2V_EXECLIST_CONTEXT_CREATE,
- VGT_G2V_EXECLIST_CONTEXT_DESTROY,
- VGT_G2V_MAX,
-};
-
-struct vgt_if {
- uint64_t magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
- uint32_t vgt_id; /* ID of vGT instance */
- uint32_t rsv1[12]; /* pad to offset 0x40 */
- /*
- * Data structure to describe the balooning info of resources.
- * Each VM can only have one portion of continuous area for now.
- * (May support scattered resource in future)
- * (starting from offset 0x40)
- */
- struct {
- /* Aperture register balooning */
- struct {
- uint32_t base;
- uint32_t size;
- } mappable_gmadr; /* aperture */
- /* GMADR register balooning */
- struct {
- uint32_t base;
- uint32_t size;
- } nonmappable_gmadr; /* non aperture */
- /* allowed fence registers */
- uint32_t fence_num;
- uint32_t rsv2[3];
- } avail_rs; /* available/assigned resource */
- uint32_t rsv3[0x200 - 24]; /* pad to half page */
- /*
- * The bottom half page is for response from Gfx driver to hypervisor.
- */
- uint32_t rsv4;
- uint32_t display_ready; /* ready for display owner switch */
-
- uint32_t rsv5[4];
-
- uint32_t g2v_notify;
- uint32_t rsv6[7];
-
- struct {
- uint32_t lo;
- uint32_t hi;
- } pdp[4];
-
- uint32_t execlist_context_descriptor_lo;
- uint32_t execlist_context_descriptor_hi;
-
- uint32_t rsv7[0x200 - 24]; /* pad to one page */
-} __packed;
-
-#define vgtif_reg(x) \
- _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
-
-/* vGPU display status to be used by the host side */
-#define VGT_DRV_DISPLAY_NOT_READY 0
-#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
-
-extern void i915_check_vgpu(struct drm_device *dev);
-extern int intel_vgt_balloon(struct drm_device *dev);
-extern void intel_vgt_deballoon(void);
+void i915_check_vgpu(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct drm_i915_private *dev_priv);
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea721..c5a166752eda 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i];
+ plane = drm_state->planes[i].ptr;
/*
* to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
continue;
}
- plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+ plane_state = intel_atomic_get_existing_plane_state(drm_state,
+ intel_plane);
scaler_id = &plane_state->scaler_id;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..6700a7be7f78 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
{
if (((mode->clock == TMDS_297M) ||
(mode->clock == TMDS_296M)) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
return true;
else
return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_elda, uint32_t bits_elda,
i915_reg_t reg_edid)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint8_t *eld = connector->eld;
uint32_t tmp;
int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(crtc->config))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
- int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
- return ret;
+ return dev_priv->cdclk_freq;
}
static int i915_audio_component_sync_audio_rate(struct device *dev,
@@ -755,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
if (WARN_ON(acomp->ops || acomp->dev))
return -EEXIST;
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = &i915_audio_component_ops;
acomp->dev = i915_dev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
dev_priv->audio_component = acomp;
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return 0;
}
@@ -773,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
struct i915_audio_component *acomp = data;
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = NULL;
acomp->dev = NULL;
dev_priv->audio_component = NULL;
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -805,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
@@ -827,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
if (!dev_priv->audio_component_registered)
return;
- component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b9022fa053d6..c6e69e4cfa83 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -218,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(dev_priv->dev);
+ ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
WARN_ON(ret > 0xf);
panel_type = ret;
@@ -323,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
}
+ dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ if (bdb->version >= 191 &&
+ get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
+ const struct bdb_lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ dev_priv->vbt.backlight.type = method->type;
+ }
+
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -768,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return;
}
+ /*
+ * These fields are introduced from the VBT version 197 onwards,
+ * so making sure that these bits are set zero in the previous
+ * versions.
+ */
+ if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
+ dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
+ dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1407,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
int
intel_bios_init(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
+enum intel_backlight_type {
+ INTEL_BACKLIGHT_PMIC,
+ INTEL_BACKLIGHT_LPSS,
+ INTEL_BACKLIGHT_DISPLAY_DDI,
+ INTEL_BACKLIGHT_DSI_DCS,
+ INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+};
+
struct edp_power_seq {
u16 t1_t3;
u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2;
u16 lane_cnt:2;
u16 pixel_overlap:3;
- u16 rsvd3:9;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 rsvd3:4;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644
index 000000000000..b074f3d6d127
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+
+static void intel_breadcrumbs_fake_irq(unsigned long data)
+{
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+
+ /*
+ * The timer persists in case we cannot enable interrupts,
+ * or if we have previously seen seqno/interrupt incoherency
+ * ("missed interrupt" syndrome). Here the worker will wake up
+ * every jiffie in order to kick the oldest waiter to do the
+ * coherent seqno check.
+ */
+ rcu_read_lock();
+ if (intel_engine_wakeup(engine))
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ rcu_read_unlock();
+}
+
+static void irq_enable(struct intel_engine_cs *engine)
+{
+ /* Enabling the IRQ may miss the generation of the interrupt, but
+ * we still need to force the barrier before reading the seqno,
+ * just in case.
+ */
+ engine->breadcrumbs.irq_posted = true;
+
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
+}
+
+static void irq_disable(struct intel_engine_cs *engine)
+{
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
+
+ engine->breadcrumbs.irq_posted = false;
+}
+
+static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+ struct drm_i915_private *i915 = engine->i915;
+
+ assert_spin_locked(&b->lock);
+ if (b->rpm_wakelock)
+ return;
+
+ /* Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference. For completeness,
+ * record an rpm reference for ourselves to cover the
+ * interrupt we unmask.
+ */
+ intel_runtime_pm_get_noresume(i915);
+ b->rpm_wakelock = true;
+
+ /* No interrupts? Kick the waiter every jiffie! */
+ if (intel_irqs_enabled(i915)) {
+ if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+ irq_enable(engine);
+ b->irq_enabled = true;
+ }
+
+ if (!b->irq_enabled ||
+ test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+ mod_timer(&b->fake_irq, jiffies + 1);
+
+ /* Ensure that even if the GPU hangs, we get woken up.
+ *
+ * However, note that if no one is waiting, we never notice
+ * a gpu hang. Eventually, we will have to wait for a resource
+ * held by the GPU and so trigger a hangcheck. In the most
+ * pathological case, this will be upon memory starvation!
+ */
+ i915_queue_hangcheck(i915);
+}
+
+static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ assert_spin_locked(&b->lock);
+ if (!b->rpm_wakelock)
+ return;
+
+ if (b->irq_enabled) {
+ irq_disable(engine);
+ b->irq_enabled = false;
+ }
+
+ intel_runtime_pm_put(engine->i915);
+ b->rpm_wakelock = false;
+}
+
+static inline struct intel_wait *to_wait(struct rb_node *node)
+{
+ return container_of(node, struct intel_wait, node);
+}
+
+static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
+ struct intel_wait *wait)
+{
+ assert_spin_locked(&b->lock);
+
+ /* This request is completed, so remove it from the tree, mark it as
+ * complete, and *then* wake up the associated task.
+ */
+ rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
+
+ wake_up_process(wait->tsk); /* implicit smp_wmb() */
+}
+
+static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node **p, *parent, *completed;
+ bool first;
+ u32 seqno;
+
+ /* Insert the request into the retirement ordered list
+ * of waiters by walking the rbtree. If we are the oldest
+ * seqno in the tree (the first to be retired), then
+ * set ourselves as the bottom-half.
+ *
+ * As we descend the tree, prune completed branches since we hold the
+ * spinlock we know that the first_waiter must be delayed and can
+ * reduce some of the sequential wake up latency if we take action
+ * ourselves and wake up the completed tasks in parallel. Also, by
+ * removing stale elements in the tree, we may be able to reduce the
+ * ping-pong between the old bottom-half and ourselves as first-waiter.
+ */
+ first = true;
+ parent = NULL;
+ completed = NULL;
+ seqno = intel_engine_get_seqno(engine);
+
+ /* If the request completed before we managed to grab the spinlock,
+ * return now before adding ourselves to the rbtree. We let the
+ * current bottom-half handle any pending wakeups and instead
+ * try and get out of the way quickly.
+ */
+ if (i915_seqno_passed(seqno, wait->seqno)) {
+ RB_CLEAR_NODE(&wait->node);
+ return first;
+ }
+
+ p = &b->waiters.rb_node;
+ while (*p) {
+ parent = *p;
+ if (wait->seqno == to_wait(parent)->seqno) {
+ /* We have multiple waiters on the same seqno, select
+ * the highest priority task (that with the smallest
+ * task->prio) to serve as the bottom-half for this
+ * group.
+ */
+ if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
+ p = &parent->rb_right;
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ } else if (i915_seqno_passed(wait->seqno,
+ to_wait(parent)->seqno)) {
+ p = &parent->rb_right;
+ if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
+ completed = parent;
+ else
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&wait->node, parent, p);
+ rb_insert_color(&wait->node, &b->waiters);
+ GEM_BUG_ON(!first && !b->irq_seqno_bh);
+
+ if (completed) {
+ struct rb_node *next = rb_next(completed);
+
+ GEM_BUG_ON(!next && !first);
+ if (next && next != &wait->node) {
+ GEM_BUG_ON(first);
+ b->first_wait = to_wait(next);
+ smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ /* As there is a delay between reading the current
+ * seqno, processing the completed tasks and selecting
+ * the next waiter, we may have missed the interrupt
+ * and so need for the next bottom-half to wakeup.
+ *
+ * Also as we enable the IRQ, we may miss the
+ * interrupt for that seqno, so we have to wake up
+ * the next bottom-half in order to do a coherent check
+ * in case the seqno passed.
+ */
+ __intel_breadcrumbs_enable_irq(b);
+ if (READ_ONCE(b->irq_posted))
+ wake_up_process(to_wait(next)->tsk);
+ }
+
+ do {
+ struct intel_wait *crumb = to_wait(completed);
+ completed = rb_prev(completed);
+ __intel_breadcrumbs_finish(b, crumb);
+ } while (completed);
+ }
+
+ if (first) {
+ GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+ b->first_wait = wait;
+ smp_store_mb(b->irq_seqno_bh, wait->tsk);
+ /* After assigning ourselves as the new bottom-half, we must
+ * perform a cursory check to prevent a missed interrupt.
+ * Either we miss the interrupt whilst programming the hardware,
+ * or if there was a previous waiter (for a later seqno) they
+ * may be woken instead of us (due to the inherent race
+ * in the unlocked read of b->irq_seqno_bh in the irq handler)
+ * and so we miss the wake up.
+ */
+ __intel_breadcrumbs_enable_irq(b);
+ }
+ GEM_BUG_ON(!b->irq_seqno_bh);
+ GEM_BUG_ON(!b->first_wait);
+ GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+
+ return first;
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ bool first;
+
+ spin_lock(&b->lock);
+ first = __intel_engine_add_wait(engine, wait);
+ spin_unlock(&b->lock);
+
+ return first;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
+{
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+}
+
+static inline bool chain_wakeup(struct rb_node *rb, int priority)
+{
+ return rb && to_wait(rb)->tsk->prio <= priority;
+}
+
+static inline int wakeup_priority(struct intel_breadcrumbs *b,
+ struct task_struct *tsk)
+{
+ if (tsk == b->signaler)
+ return INT_MIN;
+ else
+ return tsk->prio;
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ /* Quick check to see if this waiter was already decoupled from
+ * the tree by the bottom-half to avoid contention on the spinlock
+ * by the herd.
+ */
+ if (RB_EMPTY_NODE(&wait->node))
+ return;
+
+ spin_lock(&b->lock);
+
+ if (RB_EMPTY_NODE(&wait->node))
+ goto out_unlock;
+
+ if (b->first_wait == wait) {
+ const int priority = wakeup_priority(b, wait->tsk);
+ struct rb_node *next;
+
+ GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+
+ /* We are the current bottom-half. Find the next candidate,
+ * the first waiter in the queue on the remaining oldest
+ * request. As multiple seqnos may complete in the time it
+ * takes us to wake up and find the next waiter, we have to
+ * wake up that waiter for it to perform its own coherent
+ * completion check.
+ */
+ next = rb_next(&wait->node);
+ if (chain_wakeup(next, priority)) {
+ /* If the next waiter is already complete,
+ * wake it up and continue onto the next waiter. So
+ * if have a small herd, they will wake up in parallel
+ * rather than sequentially, which should reduce
+ * the overall latency in waking all the completed
+ * clients.
+ *
+ * However, waking up a chain adds extra latency to
+ * the first_waiter. This is undesirable if that
+ * waiter is a high priority task.
+ */
+ u32 seqno = intel_engine_get_seqno(engine);
+
+ while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
+ struct rb_node *n = rb_next(next);
+
+ __intel_breadcrumbs_finish(b, to_wait(next));
+ next = n;
+ if (!chain_wakeup(next, priority))
+ break;
+ }
+ }
+
+ if (next) {
+ /* In our haste, we may have completed the first waiter
+ * before we enabled the interrupt. Do so now as we
+ * have a second waiter for a future seqno. Afterwards,
+ * we have to wake up that waiter in case we missed
+ * the interrupt, or if we have to handle an
+ * exception rather than a seqno completion.
+ */
+ b->first_wait = to_wait(next);
+ smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ if (b->first_wait->seqno != wait->seqno)
+ __intel_breadcrumbs_enable_irq(b);
+ wake_up_process(b->irq_seqno_bh);
+ } else {
+ b->first_wait = NULL;
+ WRITE_ONCE(b->irq_seqno_bh, NULL);
+ __intel_breadcrumbs_disable_irq(b);
+ }
+ } else {
+ GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
+ rb_erase(&wait->node, &b->waiters);
+
+out_unlock:
+ GEM_BUG_ON(b->first_wait == wait);
+ GEM_BUG_ON(rb_first(&b->waiters) !=
+ (b->first_wait ? &b->first_wait->node : NULL));
+ GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+ spin_unlock(&b->lock);
+}
+
+static bool signal_complete(struct drm_i915_gem_request *request)
+{
+ if (!request)
+ return false;
+
+ /* If another process served as the bottom-half it may have already
+ * signalled that this wait is already completed.
+ */
+ if (intel_wait_complete(&request->signaling.wait))
+ return true;
+
+ /* Carefully check if the request is complete, giving time for the
+ * seqno to be visible or if the GPU hung.
+ */
+ if (__i915_request_irq_complete(request))
+ return true;
+
+ return false;
+}
+
+static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+{
+ return container_of(rb, struct drm_i915_gem_request, signaling.node);
+}
+
+static void signaler_set_rtpriority(void)
+{
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+}
+
+static int intel_breadcrumbs_signaler(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *request;
+
+ /* Install ourselves with high priority to reduce signalling latency */
+ signaler_set_rtpriority();
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* We are either woken up by the interrupt bottom-half,
+ * or by a client adding a new signaller. In both cases,
+ * the GPU seqno may have advanced beyond our oldest signal.
+ * If it has, propagate the signal, remove the waiter and
+ * check again with the next oldest signal. Otherwise we
+ * need to wait for a new interrupt from the GPU or for
+ * a new client.
+ */
+ request = READ_ONCE(b->first_signal);
+ if (signal_complete(request)) {
+ /* Wake up all other completed waiters and select the
+ * next bottom-half for the next user interrupt.
+ */
+ intel_engine_remove_wait(engine,
+ &request->signaling.wait);
+
+ /* Find the next oldest signal. Note that as we have
+ * not been holding the lock, another client may
+ * have installed an even older signal than the one
+ * we just completed - so double check we are still
+ * the oldest before picking the next one.
+ */
+ spin_lock(&b->lock);
+ if (request == b->first_signal) {
+ struct rb_node *rb =
+ rb_next(&request->signaling.node);
+ b->first_signal = rb ? to_signaler(rb) : NULL;
+ }
+ rb_erase(&request->signaling.node, &b->signals);
+ spin_unlock(&b->lock);
+
+ i915_gem_request_unreference(request);
+ } else {
+ if (kthread_should_stop())
+ break;
+
+ schedule();
+ }
+ } while (1);
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *parent, **p;
+ bool first, wakeup;
+
+ if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
+ return;
+
+ spin_lock(&b->lock);
+ if (unlikely(request->signaling.wait.tsk)) {
+ wakeup = false;
+ goto unlock;
+ }
+
+ request->signaling.wait.tsk = b->signaler;
+ request->signaling.wait.seqno = request->seqno;
+ i915_gem_request_reference(request);
+
+ /* First add ourselves into the list of waiters, but register our
+ * bottom-half as the signaller thread. As per usual, only the oldest
+ * waiter (not just signaller) is tasked as the bottom-half waking
+ * up all completed waiters after the user interrupt.
+ *
+ * If we are the oldest waiter, enable the irq (after which we
+ * must double check that the seqno did not complete).
+ */
+ wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
+
+ /* Now insert ourselves into the retirement ordered list of signals
+ * on this engine. We track the oldest seqno as that will be the
+ * first signal to complete.
+ */
+ parent = NULL;
+ first = true;
+ p = &b->signals.rb_node;
+ while (*p) {
+ parent = *p;
+ if (i915_seqno_passed(request->seqno,
+ to_signaler(parent)->seqno)) {
+ p = &parent->rb_right;
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&request->signaling.node, parent, p);
+ rb_insert_color(&request->signaling.node, &b->signals);
+ if (first)
+ smp_store_mb(b->first_signal, request);
+
+unlock:
+ spin_unlock(&b->lock);
+
+ if (wakeup)
+ wake_up_process(b->signaler);
+}
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct task_struct *tsk;
+
+ spin_lock_init(&b->lock);
+ setup_timer(&b->fake_irq,
+ intel_breadcrumbs_fake_irq,
+ (unsigned long)engine);
+
+ /* Spawn a thread to provide a common bottom-half for all signals.
+ * As this is an asynchronous interface we cannot steal the current
+ * task for handling the bottom-half to the user interrupt, therefore
+ * we create a thread to do the coherent seqno dance after the
+ * interrupt and then signal the waitqueue (via the dma-buf/fence).
+ */
+ tsk = kthread_run(intel_breadcrumbs_signaler, engine,
+ "i915/signal:%d", engine->id);
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ b->signaler = tsk;
+
+ return 0;
+}
+
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ if (!IS_ERR_OR_NULL(b->signaler))
+ kthread_stop(b->signaler);
+
+ del_timer_sync(&b->fake_irq);
+}
+
+unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int mask = 0;
+
+ /* To avoid the task_struct disappearing beneath us as we wake up
+ * the process, we must first inspect the task_struct->state under the
+ * RCU lock, i.e. as we call wake_up_process() we must be holding the
+ * rcu_read_lock().
+ */
+ rcu_read_lock();
+ for_each_engine(engine, i915)
+ if (unlikely(intel_engine_wakeup(engine)))
+ mask |= intel_engine_flag(engine);
+ rcu_read_unlock();
+
+ return mask;
+}
+
+unsigned int intel_kick_signalers(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int mask = 0;
+
+ for_each_engine(engine, i915) {
+ if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
+ wake_up_process(engine->breadcrumbs.signaler);
+ mask |= intel_engine_flag(engine);
+ }
+ }
+
+ return mask;
+}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 1b3f97449395..bc0fef3d3335 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc)->pipe;
uint32_t mode;
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->display.load_csc_matrix)
dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
struct drm_property_blob *blob)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
int i;
if (HAS_GMCH_DISPLAY(dev)) {
- if (intel_crtc->config->has_dsi_encoder)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.load_luts(crtc_state);
}
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
/* Enable color management support when we have degamma & gamma LUTs. */
if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
INTEL_INFO(dev)->color.gamma_lut_size != 0)
- drm_helper_crtc_enable_color_mgmt(crtc,
+ drm_crtc_enable_color_mgmt(crtc,
INTEL_INFO(dev)->color.degamma_lut_size,
+ true,
INTEL_INFO(dev)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..827b6ef4e9ae 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -98,7 +98,7 @@ out:
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 adpa;
bool ret;
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
@@ -326,11 +328,26 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
+ /*
+ * Doing a force trigger causes a hpd interrupt to get sent, which can
+ * get us stuck in a loop if we're polling:
+ * - We enable power wells and reset the ADPA
+ * - output_poll_exec does force probe on VGA, triggering a hpd
+ * - HPD handler waits for poll to unlock dev->mode_config.mutex
+ * - output_poll_exec shuts off the ADPA, unlocks
+ * dev->mode_config.mutex
+ * - HPD handler runs, resets ADPA and brings us back to the start
+ *
+ * Just disable HPD interrupts here to prevent this
+ */
+ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -338,8 +355,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -353,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+ if (reenable_hpd)
+ intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+
return ret;
}
@@ -367,7 +389,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 stat;
bool ret = false;
int i, tries = 0;
@@ -394,9 +416,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 0,
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -449,7 +471,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
@@ -485,7 +507,7 @@ static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
{
struct drm_device *dev = crt->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@@ -600,7 +622,7 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -681,7 +703,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -713,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
return 0;
}
-static void intel_crt_reset(struct drm_connector *connector)
+void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
@@ -739,10 +761,11 @@ static void intel_crt_reset(struct drm_connector *connector)
*/
static const struct drm_connector_funcs intel_crt_connector_funcs = {
- .reset = intel_crt_reset,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -753,10 +776,10 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .reset = intel_crt_reset,
.destroy = intel_encoder_destroy,
};
@@ -791,7 +814,7 @@ void intel_crt_init(struct drm_device *dev)
struct drm_connector *connector;
struct intel_crt *crt;
struct intel_connector *intel_connector;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t adpa_reg;
u32 adpa;
@@ -839,7 +862,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base);
@@ -876,12 +899,9 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- drm_connector_register(connector);
-
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -902,5 +922,5 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
}
- intel_crt_reset(connector);
+ intel_crt_reset(&crt->base.base);
}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 2b3b428d9cd2..3edb9580928e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
- uint32_t required_min_version;
+ uint32_t required_version;
if (!fw)
return NULL;
@@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_KABYLAKE(dev_priv)) {
- required_min_version = KBL_CSR_VERSION_REQUIRED;
+ required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
- required_min_version = SKL_CSR_VERSION_REQUIRED;
+ required_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
- required_min_version = BXT_CSR_VERSION_REQUIRED;
+ required_version = BXT_CSR_VERSION_REQUIRED;
} else {
MISSING_CASE(INTEL_REVID(dev_priv));
- required_min_version = 0;
+ required_version = 0;
}
- if (csr->version < required_min_version) {
- DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
- " please upgrade to v%u.%u or later"
- " [" FIRMWARE_URL "].\n",
+ if (csr->version != required_version) {
+ DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ " please use v%u.%u [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(required_min_version),
- CSR_VERSION_MINOR(required_min_version));
+ CSR_VERSION_MAJOR(required_version),
+ CSR_VERSION_MINOR(required_version));
return NULL;
}
@@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
csr = &dev_priv->csr;
ret = request_firmware(&fw, dev_priv->csr.fw_path,
- &dev_priv->dev->pdev->dev);
+ &dev_priv->drm.pdev->dev);
if (fw)
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
@@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- dev_notice(dev_priv->dev->dev,
+ dev_notice(dev_priv->drm.dev,
"Failed to load DMC firmware"
" [" FIRMWARE_URL "],"
" disabling runtime power management.\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..dd1d6fe12297 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
/* fallthrough and treat as unknown */
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_UNKNOWN:
@@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
@@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
void hsw_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
@@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->has_pch_encoder)
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
- else if (pipe_config->has_dp_encoder)
+ else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
uint32_t dpll_ctl1, dpll;
@@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
u32 val, pll;
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
- intel_clock_t clock;
+ struct dpll clock;
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t dpll = port;
@@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
@@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
- } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
out:
+ if (ret && IS_BROXTON(dev_priv)) {
+ tmp = I915_READ(BXT_PHY_CTL(port));
+ if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
+ DRM_ERROR("Port %c enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", port_name(port), tmp);
+ }
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -1351,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1363,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
- struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@@ -1384,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
- if (type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (type == INTEL_OUTPUT_DP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
@@ -1442,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
ddi_translations = bxt_ddi_translations_edp;
- } else if (type == INTEL_OUTPUT_DISPLAYPORT
+ } else if (type == INTEL_OUTPUT_DP
|| type == INTEL_OUTPUT_EDP) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
ddi_translations = bxt_ddi_translations_dp;
@@ -1616,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_ddi_clk_select(intel_encoder, crtc->config);
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1640,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1661,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_vdd_on(intel_dp);
@@ -1687,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1726,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
@@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
+ enum port port;
+
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
@@ -1770,38 +1780,51 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
+ for_each_port_masked(port,
+ phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
+ BIT(PORT_A)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
return true;
}
-static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
-static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-
-static void broxton_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- enum port port;
- u32 ports, val;
+ u32 val;
- if (broxton_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
- if (broxton_phy_verify_state(dev_priv, phy)) {
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
@@ -1810,8 +1833,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
- } else {
- DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
@@ -1831,28 +1852,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
DRM_ERROR("timeout during PHY%d power on\n", phy);
}
- if (phy == DPIO_PHY0)
- ports = BIT(PORT_B) | BIT(PORT_C);
- else
- ports = BIT(PORT_A);
-
- for_each_port_masked(port, ports) {
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (lane != 1)
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
- }
-
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
@@ -1899,10 +1898,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-
- val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
- DPIO_PHY1);
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@@ -1912,31 +1908,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
- /*
- * During PHY1 init delay waiting for GRC calibration to finish, since
- * it can happen in parallel with the subsequent PHY0 init.
- */
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-}
-
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
-{
- /* Enable PHY1 first since it provides Rcomp for PHY0 */
- broxton_phy_init(dev_priv, DPIO_PHY1);
- broxton_phy_init(dev_priv, DPIO_PHY0);
- /*
- * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
- * PHY1 GRC calibration to finish, so wait for it here.
- */
- broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
+ if (phy == DPIO_PHY1)
+ bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
-static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
uint32_t val;
@@ -1949,12 +1930,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
-{
- broxton_phy_uninit(dev_priv, DPIO_PHY1);
- broxton_phy_uninit(dev_priv, DPIO_PHY0);
-}
-
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
@@ -1982,11 +1957,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- enum port port;
- u32 ports;
uint32_t mask;
bool ok;
@@ -1994,27 +1967,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- /* We expect the PHY to be always enabled */
- if (!broxton_phy_is_enabled(dev_priv, phy))
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
- if (phy == DPIO_PHY0)
- ports = BIT(PORT_B) | BIT(PORT_C);
- else
- ports = BIT(PORT_A);
-
- for_each_port_masked(port, ports) {
- int lane;
-
- for (lane = 0; lane < 4; lane++)
- ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
- LATENCY_OPTIM,
- lane != 1 ? LATENCY_OPTIM : 0,
- "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
- }
-
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
@@ -2058,11 +2015,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
#undef _CHK
}
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
+static uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
- if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
- !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
- i915_report_error(dev_priv, "DDI PHY state mismatch\n");
+ switch (pipe_config->lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(pipe_config->lane_count);
+
+ return 0;
+ }
+}
+
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int lane;
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
+ }
+}
+
+static uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ int lane;
+ uint8_t mask;
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2113,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
void intel_ddi_fdi_disable(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
@@ -2146,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
@@ -2200,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
- pipe_config->has_dp_encoder = true;
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2236,13 +2246,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
intel_ddi_clock_get(encoder, pipe_config);
+
+ if (IS_BROXTON(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
+ int ret;
WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
@@ -2250,9 +2266,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (type == INTEL_OUTPUT_HDMI)
- return intel_hdmi_compute_config(encoder, pipe_config);
+ ret = intel_hdmi_compute_config(encoder, pipe_config);
else
- return intel_dp_compute_config(encoder, pipe_config);
+ ret = intel_dp_compute_config(encoder, pipe_config);
+
+ if (IS_BROXTON(dev_priv) && ret)
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
+ pipe_config);
+
+ return ret;
+
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -2297,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
void intel_ddi_init(struct drm_device *dev, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -2347,10 +2371,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
encoder = &intel_encoder->base;
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
+ if (IS_BROXTON(dev_priv))
+ intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644
index 000000000000..cba137f9ad3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+void intel_device_info_dump(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
+ DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ info->gen,
+ dev_priv->drm.pdev->device,
+ dev_priv->drm.pdev->revision,
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ u32 fuse, eu_dis;
+
+ fuse = I915_READ(CHV_FUSE_GT);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, s_enable, ss_disable, eu_disable;
+ u8 eu_mask = 0xff;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < ss_max; ss++) {
+ int eu_per_ss;
+
+ if (ss_disable & BIT(ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+ eu_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ info->subslice_7eu[s] |= BIT(ss);
+
+ info->eu_total += eu_per_ss;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ info->has_slice_pg =
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ info->slice_total > 1;
+ info->has_subslice_pg =
+ IS_BROXTON(dev_priv) && info->subslice_total > 1;
+ info->has_eu_pg = info->eu_per_subslice > 2;
+
+ if (IS_BROXTON(dev_priv)) {
+#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
+ /*
+ * There is a HW issue in 2x6 fused down parts that requires
+ * Pooled EU to be enabled as a WA. The pool configuration
+ * changes depending upon which subslice is fused down. This
+ * doesn't affect if the device has all 3 subslices enabled.
+ */
+ /* WaEnablePooledEuFor2x6:bxt */
+ info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
+ (info->subslice_per_slice == 2 &&
+ INTEL_REVID(dev_priv) < BXT_REVID_C0));
+
+ info->min_eu_in_pool = 0;
+ if (info->has_pooled_eu) {
+ if (IS_SS_DISABLED(ss_disable, 0) ||
+ IS_SS_DISABLED(ss_disable, 2))
+ info->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(ss_disable, 1))
+ info->min_eu_in_pool = 6;
+ else
+ info->min_eu_in_pool = 9;
+ }
+#undef IS_SS_DISABLED
+ }
+}
+
+static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ const int s_max = 3, ss_max = 3, eu_max = 8;
+ int s, ss;
+ u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+ eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+ eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+ ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+ (32 - GEN8_EU_DIS0_S1_SHIFT));
+ eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+ ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+ (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+ info->slice_total = hweight32(s_enable);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ u32 n_disabled;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+ /*
+ * Record which subslices have 7 EUs.
+ */
+ if (eu_max - n_disabled == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_max - n_disabled;
+ }
+ }
+
+ /*
+ * BDW is expected to always have a uniform distribution of EU across
+ * subslices with the exception that any one EU in any one subslice may
+ * be fused off for die recovery.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+ /*
+ * BDW supports slice power gating on devices with more than
+ * one slice.
+ */
+ info->has_slice_pg = (info->slice_total > 1);
+ info->has_subslice_pg = 0;
+ info->has_eu_pg = 0;
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ * - it's judged too laborious to fill n static structures with the limit
+ * when a simple if statement does the job,
+ * - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ * - after the MMIO has been setup as we are reading registers,
+ * - after the PCH has been detected,
+ * - before the first usage of the fields it can tweak.
+ */
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ enum pipe pipe;
+
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+ if (IS_BROXTON(dev_priv)) {
+ info->num_sprites[PIPE_A] = 2;
+ info->num_sprites[PIPE_B] = 2;
+ info->num_sprites[PIPE_C] = 1;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 2;
+ else
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 1;
+
+ if (i915.disable_display) {
+ DRM_INFO("Display disabled (module parameter)\n");
+ info->num_pipes = 0;
+ } else if (info->num_pipes > 0 &&
+ (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+ HAS_PCH_SPLIT(dev_priv)) {
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+ /*
+ * SFUSE_STRAP is supposed to have a bit signalling the display
+ * is fused off. Unfortunately it seems that, at least in
+ * certain cases, fused off display means that PCH display
+ * reads don't land anywhere. In that case, we read 0s.
+ *
+ * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+ * should be set when taking over after the firmware.
+ */
+ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+ sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+ (dev_priv->pch_type == PCH_CPT &&
+ !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+ DRM_INFO("Display fused off, disabling\n");
+ info->num_pipes = 0;
+ } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+ DRM_INFO("PipeC fused off\n");
+ info->num_pipes -= 1;
+ }
+ } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ u32 dfsm = I915_READ(SKL_DFSM);
+ u8 disabled_mask = 0;
+ bool invalid;
+ int num_bits;
+
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+ disabled_mask |= BIT(PIPE_A);
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+ disabled_mask |= BIT(PIPE_B);
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+ disabled_mask |= BIT(PIPE_C);
+
+ num_bits = hweight8(disabled_mask);
+
+ switch (disabled_mask) {
+ case BIT(PIPE_A):
+ case BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_C):
+ invalid = true;
+ break;
+ default:
+ invalid = false;
+ }
+
+ if (num_bits > info->num_pipes || invalid)
+ DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+ disabled_mask);
+ else
+ info->num_pipes -= num_bits;
+ }
+
+ /* Initialize slice/subslice/EU info */
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_sseu_info_init(dev_priv);
+ else if (IS_BROADWELL(dev_priv))
+ broadwell_sseu_info_init(dev_priv);
+ else if (INTEL_INFO(dev_priv)->gen >= 9)
+ gen9_sseu_info_init(dev_priv);
+
+ info->has_snoop = !info->has_llc;
+
+ /* Snooping is broken on BXT A stepping. */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ info->has_snoop = false;
+
+ DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+ DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+ info->has_slice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+ info->has_subslice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+ info->has_eu_pg ? "y" : "n");
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3074c56a643d..dcf93b3d4fb6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -46,7 +47,11 @@
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
-#include <linux/dma-buf.h>
+
+static bool is_mmio_work(struct intel_flip_work *work)
+{
+ return work->mmio_work.func;
+}
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int bxt_calc_cdclk(int max_pixclk);
-typedef struct {
- int min, max;
-} intel_range_t;
-
-typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
-} intel_p2_t;
-
-typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
};
/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
static int
intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
}
}
-static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
return 270000;
}
-static const intel_limit_t intel_limits_i8xx_dac = {
+static const struct intel_limit intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
.p2_slow = 4, .p2_fast = 2 },
};
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const struct intel_limit intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 4 },
};
-static const intel_limit_t intel_limits_i8xx_lvds = {
+static const struct intel_limit intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2_slow = 14, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_i9xx_sdvo = {
+static const struct intel_limit intel_limits_i9xx_sdvo = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_i9xx_lvds = {
+static const struct intel_limit intel_limits_i9xx_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
};
-static const intel_limit_t intel_limits_g4x_sdvo = {
+static const struct intel_limit intel_limits_g4x_sdvo = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
},
};
-static const intel_limit_t intel_limits_g4x_hdmi = {
+static const struct intel_limit intel_limits_g4x_hdmi = {
.dot = { .min = 22000, .max = 400000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
.dot = { .min = 20000, .max = 115000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = 80000, .max = 224000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_pineview_sdvo = {
+static const struct intel_limit intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_pineview_lvds = {
+static const struct intel_limit intel_limits_pineview_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const intel_limit_t intel_limits_ironlake_dac = {
+static const struct intel_limit intel_limits_ironlake_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_ironlake_single_lvds = {
+static const struct intel_limit intel_limits_ironlake_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_vlv = {
+static const struct intel_limit intel_limits_vlv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
-static const intel_limit_t intel_limits_chv = {
+static const struct intel_limit intel_limits_chv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_bxt = {
+static const struct intel_limit intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
.vco = { .min = 4800000, .max = 6700000 },
@@ -526,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
return drm_atomic_crtc_needs_modeset(state);
}
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->type == type)
- return true;
-
- return false;
-}
-
-/**
- * Returns whether any output on the specified pipe will have the specified
- * type after a staged modeset is complete, i.e., the same as
- * intel_pipe_has_type() but looking at encoder->new_crtc instead of
- * encoder->crtc.
- */
-static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
- int type)
-{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- int i, num_connectors = 0;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- num_connectors++;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (encoder->type == type)
- return true;
- }
-
- WARN_ON(num_connectors == 0);
-
- return false;
-}
-
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -581,7 +539,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -598,7 +556,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
@@ -610,7 +568,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot;
}
-static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -622,7 +580,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot / 5;
}
-int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -642,8 +600,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
*/
static bool intel_PLL_is_valid(struct drm_device *dev,
- const intel_limit_t *limit,
- const intel_clock_t *clock)
+ const struct intel_limit *limit,
+ const struct dpll *clock)
{
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid("n out of range\n");
@@ -678,13 +636,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static int
-i9xx_select_p2_div(const intel_limit_t *limit,
+i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -713,13 +671,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
+i9xx_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +728,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-pnv_find_best_dpll(const intel_limit_t *limit,
+pnv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +783,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-g4x_find_best_dpll(const intel_limit_t *limit,
+g4x_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int max_n;
bool found = false;
/* approximately equals target * 0.00585 */
@@ -877,8 +835,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
* best configuration and error found so far. Return the calculated error.
*/
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const intel_clock_t *calculated_clock,
- const intel_clock_t *best_clock,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
unsigned int best_error_ppm,
unsigned int *error_ppm)
{
@@ -918,14 +876,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-vlv_find_best_dpll(const intel_limit_t *limit,
+vlv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock;
+ struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +935,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-chv_find_best_dpll(const intel_limit_t *limit,
+chv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
- intel_clock_t clock;
+ struct dpll clock;
uint64_t m2;
int found = false;
@@ -1035,10 +993,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
}
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock)
+ struct dpll *best_clock)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_bxt;
+ const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
@@ -1076,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
@@ -1112,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1120,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
- 100))
+ if (intel_wait_for_register(dev_priv,
+ reg, I965_PIPECONF_ACTIVE, 0,
+ 100))
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
@@ -1203,7 +1162,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (INTEL_INFO(dev_priv)->gen == 5)
+ if (IS_GEN5(dev_priv))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1230,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
@@ -1272,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
bool cur_state;
if (IS_845G(dev) || IS_I865G(dev))
@@ -1334,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
@@ -1360,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int sprite;
if (INTEL_INFO(dev)->gen >= 9) {
@@ -1540,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ if (intel_wait_for_register(dev_priv,
+ DPLL(pipe),
+ DPLL_LOCK_VLV,
+ DPLL_LOCK_VLV,
+ 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
@@ -1589,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ if (intel_wait_for_register(dev_priv,
+ DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
+ 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
@@ -1635,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc)
+ for_each_intel_crtc(dev, crtc) {
count += crtc->base.state->active &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
+ }
return count;
}
@@ -1645,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
@@ -1717,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1809,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+ if (intel_wait_for_register(dev_priv,
+ dpll_reg, port_mask, expected_mask,
+ 1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
@@ -1817,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
i915_reg_t reg;
@@ -1850,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1859,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv) &&
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -1867,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(reg, val | TRANS_ENABLE);
- if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ if (intel_wait_for_register(dev_priv,
+ reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
+ 100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
@@ -1895,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+ if (intel_wait_for_register(dev_priv,
+ LPT_TRANSCONF,
+ TRANS_STATE_ENABLE,
+ TRANS_STATE_ENABLE,
+ 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1918,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ reg, TRANS_STATE_ENABLE, 0,
+ 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
if (HAS_PCH_CPT(dev)) {
@@ -1938,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
+ 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
@@ -1957,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
static void intel_enable_pipe(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pch_transcoder;
@@ -1981,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv))
- if (crtc->config->has_dsi_encoder)
+ if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -2030,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
*/
static void intel_disable_pipe(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -2068,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
intel_wait_for_pipe_off(crtc);
}
-static bool need_vtd_wa(struct drm_device *dev)
-{
-#ifdef CONFIG_INTEL_IOMMU
- if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
- return true;
-#endif
- return false;
-}
-
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2241,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation)
{
struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
u32 alignment;
@@ -2258,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* we should always have valid PTE following the scanout preventing
* the VT-d warning.
*/
- if (need_vtd_wa(dev) && alignment < 256 * 1024)
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
alignment = 256 * 1024;
/*
@@ -2309,7 +2278,7 @@ err_pm:
return ret;
}
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
@@ -2543,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
@@ -2639,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2752,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
@@ -2769,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2897,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3007,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3091,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc)->pipe;
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3110,17 +3079,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENODEV;
}
-static void intel_complete_page_flips(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum plane plane = intel_crtc->plane;
+ struct intel_crtc *crtc;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip_plane(dev, plane);
- }
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3107,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(dev);
+ drm_modeset_lock_all(&dev_priv->drm);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(dev);
+ intel_display_suspend(&dev_priv->drm);
}
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*/
- intel_complete_page_flips(dev);
+ intel_complete_page_flips(dev_priv);
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
* Flips in the rings have been nuked by the reset,
* so update the base address of all primary
@@ -3187,7 +3149,7 @@ void intel_finish_reset(struct drm_device *dev)
* FIXME: Atomic will make this obsolete since we won't schedule
* CS-based flips (which might get lost in gpu resets) any more.
*/
- intel_update_primary_planes(dev);
+ intel_update_primary_planes(&dev_priv->drm);
return;
}
@@ -3198,18 +3160,18 @@ void intel_finish_reset(struct drm_device *dev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(&dev_priv->drm);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
+ intel_display_resume(&dev_priv->drm);
intel_hpd_init(dev_priv);
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3186,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return false;
spin_lock_irq(&dev->event_lock);
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ pending = to_intel_crtc(crtc)->flip_work != NULL;
spin_unlock_irq(&dev->event_lock);
return pending;
@@ -3234,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3275,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3318,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3419,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = {
static void gen6_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3552,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3671,7 +3633,7 @@ train_done:
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -3708,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -3738,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3803,7 +3765,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
- if (crtc->unpin_work)
+ if (crtc->flip_work)
intel_wait_for_vblank(dev, crtc->pipe);
return true;
@@ -3815,11 +3777,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
static void page_flip_completed(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
+ struct intel_flip_work *work = intel_crtc->flip_work;
- /* ensure that the unpin work is consistent wrt ->pending. */
- smp_rmb();
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
if (work->event)
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3787,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->work);
+ queue_work(dev_priv->wq, &work->unpin_work);
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
@@ -3836,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
long ret;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3851,9 +3811,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ work = intel_crtc->flip_work;
+ if (work && !is_mmio_work(work)) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
@@ -3997,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4019,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
@@ -4069,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
struct intel_encoder *encoder;
for_each_encoder_on_crtc(dev, crtc, encoder) {
- if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
}
@@ -4088,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 temp;
@@ -4138,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
+ if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4178,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
static void lpt_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -4194,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
@@ -4281,8 +4243,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+ DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->base.name,
+ intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4275,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_crtc->pipe,
- drm_plane_index(&intel_plane->base));
+ DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ intel_crtc->pipe, drm_plane_index(&intel_plane->base));
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -4330,8 +4293,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
- DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
- intel_plane->base.base.id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -4350,8 +4314,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
break;
default:
- DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->pixel_format);
return -EINVAL;
}
@@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
@@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->ips_enabled)
return;
@@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+ if (intel_wait_for_register(dev_priv,
+ IPS_CTL, IPS_ENABLE, IPS_ENABLE,
+ 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
@@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
void hsw_disable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->ips_enabled)
return;
@@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
- if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+ if (intel_wait_for_register(dev_priv,
+ IPS_CTL, IPS_ENABLE, 0,
+ 42))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
@@ -4508,7 +4477,7 @@ static void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4540,7 +4509,7 @@ static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4567,7 +4536,7 @@ static void
intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4641,14 +4610,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
- intel_fbc_pre_update(crtc);
+ intel_fbc_pre_update(crtc, pipe_config, primary_state);
if (old_primary_state->visible &&
(modeset || !primary_state->visible))
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4841,13 +4810,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
@@ -4863,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
&intel_crtc->config->fdi_m_n, NULL);
}
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
haswell_set_pipeconf(crtc);
haswell_set_pipemisc(crtc);
@@ -4885,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
if (INTEL_INFO(dev)->gen >= 9)
@@ -4900,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_color_load_luts(&pipe_config->base);
intel_ddi_set_pipe_settings(crtc);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
@@ -4909,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -4946,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4961,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -5024,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
static void haswell_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5042,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
if (INTEL_INFO(dev)->gen >= 9)
@@ -5056,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
else
ironlake_pfit_disable(intel_crtc, false);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_pipe_clock(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5076,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config = crtc->config;
if (!pipe_config->gmch_pfit.control)
@@ -5146,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
WARN_ON_ONCE(!HAS_DDI(dev));
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5180,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* run the DP detection too.
*/
WARN_ON_ONCE(!HAS_DDI(dev));
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
return port_to_aux_power_domain(intel_dig_port->port);
@@ -5228,7 +5201,7 @@ static unsigned long
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain domain;
unsigned long domains, new_domains, old_domains;
@@ -5269,21 +5242,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
return max_cdclk_freq*90/100;
}
+static int skl_calc_cdclk(int max_pixclk, int vco);
+
static void intel_update_max_cdclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ int max_cdclk, vco;
+
+ vco = dev_priv->skl_preferred_vco_freq;
+ WARN_ON(vco != 8100000 && vco != 8640000);
+ /*
+ * Use the lower (vco 8640) cdclk values as a
+ * first guess. skl_calc_cdclk() will correct it
+ * if the preferred vco is 8100 instead.
+ */
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
- dev_priv->max_cdclk_freq = 675000;
+ max_cdclk = 617143;
else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
- dev_priv->max_cdclk_freq = 540000;
+ max_cdclk = 540000;
else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
- dev_priv->max_cdclk_freq = 450000;
+ max_cdclk = 432000;
else
- dev_priv->max_cdclk_freq = 337500;
+ max_cdclk = 308571;
+
+ dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROXTON(dev)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev)) {
@@ -5321,267 +5307,322 @@ static void intel_update_max_cdclk(struct drm_device *dev)
static void intel_update_cdclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
+ dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
+ dev_priv->cdclk_pll.ref);
+ else
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+ dev_priv->cdclk_freq);
/*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
+ * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+ * Programmng [sic] note: bit[9:2] should be programmed to the number
+ * of cdclk that generates 4MHz reference clock freq which is used to
+ * generate GMBus clock. This will vary with the cdclk freq.
*/
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
- }
+}
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+ return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- uint32_t divider;
- uint32_t ratio;
- uint32_t current_freq;
- int ret;
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk_pll.ref)
+ return 0;
- /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
- switch (frequency) {
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
case 144000:
+ case 288000:
+ case 384000:
+ case 576000:
+ ratio = 60;
+ break;
+ case 624000:
+ ratio = 65;
+ break;
+ }
+
+ return dev_priv->cdclk_pll.ref * ratio;
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+ dev_priv->cdclk_pll.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
+ u32 val;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ val &= ~BXT_DE_PLL_RATIO_MASK;
+ val |= BXT_DE_PLL_RATIO(ratio);
+ I915_WRITE(BXT_DE_PLL_CTL, val);
+
+ I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK,
+ BXT_DE_PLL_LOCK,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
+{
+ u32 val, divider;
+ int vco, ret;
+
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
+
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ case 8:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 288000:
+ case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 384000:
+ case 3:
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 576000:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(60);
- break;
- case 624000:
+ case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(65);
- break;
- case 19200:
- /*
- * Bypass frequency with DE PLL disabled. Init ratio, divider
- * to suppress GCC warning.
- */
- ratio = 0;
- divider = 0;
break;
default:
- DRM_ERROR("unsupported CDCLK freq %d", frequency);
+ WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
+ WARN_ON(vco != 0);
- return;
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ break;
}
- mutex_lock(&dev_priv->rps.hw_lock);
/* Inform power controller of upcoming frequency change */
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
- current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
- /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
- current_freq = current_freq * 500 + 1000;
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /*
- * DE PLL has to be disabled when
- * - setting to 19.2MHz (bypass, PLL isn't used)
- * - before setting to 624MHz (PLL needs toggling)
- * - before setting to any frequency from 624MHz (PLL needs toggling)
- */
- if (frequency == 19200 || frequency == 624000 ||
- current_freq == 624000) {
- I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
- 1))
- DRM_ERROR("timout waiting for DE PLL unlock\n");
- }
-
- if (frequency != 19200) {
- uint32_t val;
-
- val = I915_READ(BXT_DE_PLL_CTL);
- val &= ~BXT_DE_PLL_RATIO_MASK;
- val |= ratio;
- I915_WRITE(BXT_DE_PLL_CTL, val);
-
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
-
- val = I915_READ(CDCLK_CTL);
- val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
- val |= divider;
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- if (frequency >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ if (dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
- val &= ~CDCLK_FREQ_DECIMAL_MASK;
- /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
- val |= (frequency - 1000) / 500;
- I915_WRITE(CDCLK_CTL, val);
- }
+ val = divider | skl_cdclk_decimal(cdclk);
+ /*
+ * FIXME if only the cd2x divider needs changing, it could be done
+ * without shutting off the pipe (if only one pipe is active).
+ */
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(frequency, 25000));
+ DIV_ROUND_UP(cdclk, 25000));
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
- intel_update_cdclk(dev_priv->dev);
+ intel_update_cdclk(&dev_priv->drm);
}
-static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
- return false;
+ u32 cdctl, expected;
- /* TODO: Check for a valid CDCLK rate */
+ intel_update_cdclk(&dev_priv->drm);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
+ goto sanitize;
- return false;
- }
+ /* DPLL okay; verify the cdclock
+ *
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = I915_READ(CDCLK_CTL);
+ /*
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
+ */
+ cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+ expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (dev_priv->cdclk_freq >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return false;
- }
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
- return true;
-}
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
-{
- return broxton_cdclk_is_enabled(dev_priv);
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
-void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
- /* check if cd clock is enabled */
- if (broxton_cdclk_is_enabled(dev_priv)) {
- DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
- return;
- }
+ bxt_sanitize_cdclk(dev_priv);
- DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
+ return;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
- * - check if setting the max (or any) cdclk freq is really necessary
- * here, it belongs to modeset time
*/
- broxton_set_cdclk(dev_priv, 624000);
-
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
+}
- udelay(10);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
+}
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout!\n");
+static int skl_calc_cdclk(int max_pixclk, int vco)
+{
+ if (vco == 8640000) {
+ if (max_pixclk > 540000)
+ return 617143;
+ else if (max_pixclk > 432000)
+ return 540000;
+ else if (max_pixclk > 308571)
+ return 432000;
+ else
+ return 308571;
+ } else {
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+ }
}
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void
+skl_dpll0_update(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ u32 val;
- udelay(10);
+ dev_priv->cdclk_pll.ref = 24000;
+ dev_priv->cdclk_pll.vco = 0;
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout!\n");
+ val = I915_READ(LCPLL1_CTL);
+ if ((val & LCPLL_PLL_ENABLE) == 0)
+ return;
- /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
- broxton_set_cdclk(dev_priv, 19200);
-}
+ if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ return;
-static const struct skl_cdclk_entry {
- unsigned int freq;
- unsigned int vco;
-} skl_cdclk_frequencies[] = {
- { .freq = 308570, .vco = 8640 },
- { .freq = 337500, .vco = 8100 },
- { .freq = 432000, .vco = 8640 },
- { .freq = 450000, .vco = 8100 },
- { .freq = 540000, .vco = 8100 },
- { .freq = 617140, .vco = 8640 },
- { .freq = 675000, .vco = 8100 },
-};
+ val = I915_READ(DPLL_CTRL1);
-static unsigned int skl_cdclk_decimal(unsigned int freq)
-{
- return (freq - 1000) / 500;
+ if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ return;
+
+ switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8100000;
+ break;
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8640000;
+ break;
+ default:
+ MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ break;
+ }
}
-static unsigned int skl_cdclk_get_vco(unsigned int freq)
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
- const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
+ bool changed = dev_priv->skl_preferred_vco_freq != vco;
- if (e->freq == freq)
- return e->vco;
- }
+ dev_priv->skl_preferred_vco_freq = vco;
- return 8100;
+ if (changed)
+ intel_update_max_cdclk(&dev_priv->drm);
}
static void
-skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int min_freq;
+ int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = I915_READ(CDCLK_CTL);
- val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
- val |= CDCLK_FREQ_337_308;
-
- if (required_vco == 8640)
- min_freq = 308570;
- else
- min_freq = 337500;
-
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+ WARN_ON(vco != 8100000 && vco != 8640000);
+ /* select the minimum CDCLK before enabling DPLL 0 */
+ val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
I915_WRITE(CDCLK_CTL, val);
POSTING_READ(CDCLK_CTL);
@@ -5592,14 +5633,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
* 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
* The modeset code is responsible for the selection of the exact link
* rate later on, with the constraint of choosing a frequency that
- * works with required_vco.
+ * works with vco.
*/
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- if (required_vco == 8640)
+ if (vco == 8640000)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
else
@@ -5611,8 +5652,27 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+ 5))
DRM_ERROR("DPLL0 not locked\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+
+ /* We'll want to keep using the current vco from now on. */
+ skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void
+skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("Couldn't disable DPLL0\n");
+
+ dev_priv->cdclk_pll.vco = 0;
}
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5631,23 +5691,17 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
{
- unsigned int i;
-
- for (i = 0; i < 15; i++) {
- if (skl_cdclk_pcu_ready(dev_priv))
- return true;
- udelay(10);
- }
-
- return false;
+ return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
}
-static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
- DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+ WARN_ON((cdclk == 24000) != (vco == 0));
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5709,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
}
/* set CDCLK_CTL */
- switch(freq) {
+ switch (cdclk) {
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5719,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
freq_select = CDCLK_FREQ_540;
pcu_ack = 2;
break;
- case 308570:
+ case 308571:
case 337500:
default:
freq_select = CDCLK_FREQ_337_308;
pcu_ack = 0;
break;
- case 617140:
+ case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
pcu_ack = 3;
break;
}
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_disable(dev_priv);
+
+ if (dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
@@ -5689,52 +5750,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
intel_update_cdclk(dev);
}
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
+
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
- /* disable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
-
- udelay(10);
-
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout\n");
-
- /* disable DPLL0 */
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
}
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
- unsigned int required_vco;
+ int cdclk, vco;
- /* DPLL0 not enabled (happens on early BIOS versions) */
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
- /* enable DPLL0 */
- required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
- skl_dpll0_enable(dev_priv, required_vco);
- }
+ skl_sanitize_cdclk(dev_priv);
- /* set CDCLK to the frequency the BIOS chose */
- skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
-
- /* enable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
+ /*
+ * Use the current vco as our initial
+ * guess as to what the preferred vco is.
+ */
+ if (dev_priv->skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(dev_priv,
+ dev_priv->cdclk_pll.vco);
+ return;
+ }
- udelay(10);
+ vco = dev_priv->skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+ cdclk = skl_calc_cdclk(0, vco);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
+ skl_set_cdclk(dev_priv, cdclk, vco);
}
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- int freq = dev_priv->skl_boot_cdclk;
+ uint32_t cdctl, expected;
/*
* check if the pre-os intialized the display
@@ -5744,8 +5794,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
+ intel_update_cdclk(&dev_priv->drm);
/* Is PLL enabled and locked ? */
- if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -5754,25 +5806,26 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
+ cdctl = I915_READ(CDCLK_CTL);
+ expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ if (cdctl == expected)
/* All well; nothing to sanitize */
- return false;
+ return;
+
sanitize:
- /*
- * As of now initialize with max cdclk till
- * we get dynamic cdclk support
- * */
- dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
- skl_init_cdclk(dev_priv);
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- /* we did have to sanitize */
- return true;
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5837,7 +5890,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5906,21 +5959,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
-static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
+static int bxt_calc_cdclk(int max_pixclk)
{
- /*
- * FIXME:
- * - remove the guardband, it's not needed on BXT
- * - set 19.2MHz bypass frequency if there are no active pipes
- */
- if (max_pixclk > 576000*9/10)
+ if (max_pixclk > 576000)
return 624000;
- else if (max_pixclk > 384000*9/10)
+ else if (max_pixclk > 384000)
return 576000;
- else if (max_pixclk > 288000*9/10)
+ else if (max_pixclk > 288000)
return 384000;
- else if (max_pixclk > 144000*9/10)
+ else if (max_pixclk > 144000)
return 288000;
else
return 144000;
@@ -5931,7 +5978,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
unsigned max_pixclk = 0, i;
@@ -5958,14 +6005,11 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int max_pixclk = intel_mode_max_pixclk(dev, state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5975,22 +6019,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
-static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, state);
+ int max_pixclk = ilk_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
- broxton_calc_cdclk(dev_priv, max_pixclk);
+ bxt_calc_cdclk(max_pixclk);
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+ intel_state->dev_cdclk = bxt_calc_cdclk(0);
return 0;
}
@@ -6034,7 +6073,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6073,14 +6112,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6125,7 +6164,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6146,7 +6185,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -6182,7 +6221,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
static void i9xx_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->gmch_pfit.control)
return;
@@ -6197,7 +6236,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -6223,7 +6262,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_crtc->config->has_dsi_encoder) {
+ if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -6252,7 +6291,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
if (to_intel_plane_state(crtc->primary->state)->visible) {
- WARN_ON(intel_crtc->unpin_work);
+ WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
@@ -6262,8 +6301,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.id, crtc->name);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
crtc->state->active = false;
@@ -6541,7 +6580,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
@@ -6561,12 +6600,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_limit = dev_priv->max_dotclk_freq;
- /* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+ clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -6574,16 +6613,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (intel_crtc_supports_double_wide(crtc) &&
adjusted_mode->crtc_clock > clock_limit) {
- clock_limit *= 2;
+ clock_limit = dev_priv->max_dotclk_freq;
pipe_config->double_wide = true;
}
+ }
- if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
- return -EINVAL;
- }
+ if (adjusted_mode->crtc_clock > clock_limit) {
+ DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
+ return -EINVAL;
}
/*
@@ -6592,7 +6631,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+ if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -6615,81 +6654,103 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int skylake_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t linkrate;
+ uint32_t cdctl;
- if (!(lcpll1 & LCPLL_PLL_ENABLE))
- return 24000; /* 24MHz is the cd freq with NSSC ref */
+ skl_dpll0_update(dev_priv);
- if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
- return 540000;
+ if (dev_priv->cdclk_pll.vco == 0)
+ return dev_priv->cdclk_pll.ref;
- linkrate = (I915_READ(DPLL_CTRL1) &
- DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+ cdctl = I915_READ(CDCLK_CTL);
- if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
- linkrate == DPLL_CTRL1_LINK_RATE_1080) {
- /* vco 8640 */
+ if (dev_priv->cdclk_pll.vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
- return 308570;
+ return 308571;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
- return 617140;
+ return 617143;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
} else {
- /* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
return 675000;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
}
- /* error case, do as if DPLL0 isn't enabled */
- return 24000;
+ return dev_priv->cdclk_pll.ref;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ dev_priv->cdclk_pll.ref = 19200;
+ dev_priv->cdclk_pll.vco = 0;
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ return;
+
+ if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+ return;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
+ dev_priv->cdclk_pll.ref;
}
static int broxton_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
- int cdclk;
+ u32 divider;
+ int div, vco;
- if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
- return 19200;
+ bxt_de_pll_update(dev_priv);
- cdclk = 19200 * pll_ratio / 2;
+ vco = dev_priv->cdclk_pll.vco;
+ if (vco == 0)
+ return dev_priv->cdclk_pll.ref;
+
+ divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
- switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+ switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
- return cdclk; /* 576MHz or 624MHz */
+ div = 2;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- return cdclk * 2 / 3; /* 384MHz */
+ div = 3;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
- return cdclk / 2; /* 288MHz */
+ div = 4;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- return cdclk / 4; /* 144MHz */
+ div = 8;
+ break;
+ default:
+ MISSING_CASE(divider);
+ return dev_priv->cdclk_pll.ref;
}
- /* error case, do as if DE PLL isn't enabled */
- return 19200;
+ return DIV_ROUND_CLOSEST(vco, div);
}
static int broadwell_get_display_clock_speed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -6709,7 +6770,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
static int haswell_get_display_clock_speed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -6843,7 +6904,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
static unsigned int intel_hpll_vco(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7063,7 +7124,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
@@ -7081,7 +7142,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
@@ -7123,7 +7184,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7137,7 +7198,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
@@ -7200,7 +7261,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!pipe_config->has_dsi_encoder)
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
DPLL_EXT_BUFFER_ENABLE_VLV;
@@ -7217,7 +7278,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!pipe_config->has_dsi_encoder)
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
pipe_config->dpll_hw_state.dpll_md =
@@ -7228,7 +7289,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7287,15 +7348,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* Set HBR and RBR LPF coefficients */
if (pipe_config->port_clock == 162000 ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (pipe_config->has_dp_encoder) {
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7315,8 +7376,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ if (intel_crtc_has_dp_encoder(crtc->config))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -7328,7 +7388,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
@@ -7487,22 +7547,18 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll;
- bool is_sdvo;
struct dpll *clock = &crtc_state->dpll;
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
-
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7512,10 +7568,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
- if (is_sdvo)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc_state->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -7545,7 +7602,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -7563,10 +7620,10 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -7574,7 +7631,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -7585,10 +7642,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -7601,7 +7658,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7618,7 +7675,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7663,7 +7720,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
@@ -7678,7 +7735,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
uint32_t tmp;
@@ -7713,7 +7770,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7751,7 +7808,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -7797,7 +7854,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7816,21 +7873,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 48000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
limit = &intel_limits_i8xx_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
limit = &intel_limits_i8xx_dvo;
} else {
limit = &intel_limits_i8xx_dac;
@@ -7852,14 +7909,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7869,10 +7926,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else {
/* The option is for other outputs */
@@ -7895,14 +7952,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7929,14 +7986,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7963,7 +8020,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_chv;
+ const struct intel_limit *limit = &intel_limits_chv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8041,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_vlv;
+ const struct intel_limit *limit = &intel_limits_vlv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8005,7 +8062,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8032,9 +8089,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
- intel_clock_t clock;
+ struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8060,7 +8117,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
@@ -8128,10 +8185,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- intel_clock_t clock;
+ struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
@@ -8162,7 +8219,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8273,7 +8330,7 @@ out:
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
int i;
u32 val, final;
@@ -8544,7 +8601,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
bool with_fdi)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8583,7 +8640,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -8704,7 +8761,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t val;
@@ -8746,7 +8803,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
u32 val = 0;
@@ -8765,7 +8822,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
static void haswell_set_pipemisc(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8814,41 +8871,17 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll, fp, fp2;
- int factor, i;
- bool is_lvds = false, is_sdvo = false;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- break;
- default:
- break;
- }
- }
+ int factor;
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
- if (is_lvds) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8872,7 +8905,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dpll = 0;
- if (is_lvds)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8880,9 +8913,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
- if (is_sdvo)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc_state->has_dp_encoder)
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -8905,7 +8940,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
break;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -8921,11 +8957,11 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t reduced_clock;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct dpll reduced_clock;
bool has_reduced_clock = false;
struct intel_shared_dpll *pll;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 120000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -8937,7 +8973,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->has_pch_encoder)
return 0;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
dev_priv->vbt.lvds_ssc_freq);
@@ -8976,7 +9012,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
has_reduced_clock)
crtc->lowfreq_avail = true;
@@ -8987,7 +9023,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9005,7 +9041,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
if (INTEL_INFO(dev)->gen >= 5) {
@@ -9063,7 +9099,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
uint32_t ps_ctrl = 0;
int id = -1;
@@ -9094,7 +9130,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
@@ -9177,7 +9213,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9202,7 +9238,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
@@ -9270,7 +9306,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -9320,6 +9356,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
+ /*
+ * The pipe->pch transcoder and pch transcoder->pll
+ * mapping is fixed.
+ */
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
@@ -9361,7 +9401,7 @@ out:
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -9395,7 +9435,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (IS_HASWELL(dev))
return I915_READ(D_COMP_HSW);
@@ -9405,7 +9445,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (IS_HASWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
@@ -9451,7 +9491,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -9506,7 +9546,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ if (intel_wait_for_register(dev_priv,
+ LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+ 5))
DRM_ERROR("LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9520,7 +9562,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(dev_priv->dev);
+ intel_update_cdclk(&dev_priv->drm);
}
/*
@@ -9548,7 +9590,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9565,7 +9607,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9580,21 +9622,21 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
}
}
-static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
- broxton_set_cdclk(to_i915(dev), req_cdclk);
+ bxt_set_cdclk(to_i915(dev), req_cdclk);
}
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct intel_crtc_state *crtc_state;
@@ -9630,7 +9672,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val, data;
int ret;
@@ -9707,6 +9749,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
cdclk, dev_priv->cdclk_freq);
}
+static int broadwell_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+}
+
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9718,14 +9772,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- if (max_pixclk > 540000)
- cdclk = 675000;
- else if (max_pixclk > 450000)
- cdclk = 540000;
- else if (max_pixclk > 337500)
- cdclk = 450000;
- else
- cdclk = 337500;
+ cdclk = broadwell_calc_cdclk(max_pixclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9735,7 +9782,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk = intel_state->dev_cdclk = cdclk;
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = 337500;
+ intel_state->dev_cdclk = broadwell_calc_cdclk(0);
return 0;
}
@@ -9750,13 +9797,51 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
broadwell_set_cdclk(dev, req_cdclk);
}
+static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ const int max_pixclk = ilk_max_pixel_rate(state);
+ int vco = intel_state->cdclk_pll_vco;
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = skl_calc_cdclk(max_pixclk, vco);
+
+ /*
+ * FIXME move the cdclk caclulation to
+ * compute_config() so we can fail gracegully.
+ */
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ cdclk = dev_priv->max_cdclk_freq;
+ }
+
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
+
+ return 0;
+}
+
+static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
+ unsigned int req_cdclk = intel_state->dev_cdclk;
+ unsigned int req_vco = intel_state->cdclk_pll_vco;
+
+ skl_set_cdclk(dev_priv, req_cdclk, req_vco);
+}
+
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
-
- if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
}
@@ -9866,10 +9951,14 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
unsigned long *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 tmp;
+ /*
+ * The pipe->transcoder mapping is fixed with the exception of the eDP
+ * transcoder handled below.
+ */
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
/*
@@ -9913,14 +10002,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
unsigned long *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
enum port port;
enum transcoder cpu_transcoder;
u32 tmp;
- pipe_config->has_dsi_encoder = false;
-
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
if (port == PORT_A)
cpu_transcoder = TRANSCODER_DSI_A;
@@ -9952,18 +10039,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
continue;
pipe_config->cpu_transcoder = cpu_transcoder;
- pipe_config->has_dsi_encoder = true;
break;
}
- return pipe_config->has_dsi_encoder;
+ return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10006,7 +10092,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10020,18 +10106,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- if (IS_BROXTON(dev_priv)) {
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask);
- WARN_ON(active && pipe_config->has_dsi_encoder);
- if (pipe_config->has_dsi_encoder)
- active = true;
+ if (IS_BROXTON(dev_priv) &&
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+ WARN_ON(active);
+ active = true;
}
if (!active)
goto out;
- if (!pipe_config->has_dsi_encoder) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -10082,7 +10166,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
@@ -10145,7 +10229,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
@@ -10193,7 +10277,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 base = intel_crtc->cursor_addr;
@@ -10337,10 +10421,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- obj = i915_gem_alloc_object(dev,
+ obj = i915_gem_object_create(dev,
intel_framebuffer_size_for_mode(mode, bpp));
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
@@ -10360,7 +10444,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -10630,7 +10714,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10648,11 +10732,11 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
- intel_clock_t clock;
+ struct dpll clock;
int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
@@ -10774,7 +10858,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
@@ -10826,48 +10910,20 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-void intel_mark_busy(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->mm.busy)
- return;
-
- intel_runtime_pm_get(dev_priv);
- i915_update_gfx_val(dev_priv);
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_rps_busy(dev_priv);
- dev_priv->mm.busy = true;
-}
-
-void intel_mark_idle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->mm.busy)
- return;
-
- dev_priv->mm.busy = false;
-
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_rps_idle(dev->dev_private);
-
- intel_runtime_pm_put(dev_priv);
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = intel_crtc->unpin_work;
- intel_crtc->unpin_work = NULL;
+ work = intel_crtc->flip_work;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
if (work) {
- cancel_work_sync(&work->work);
+ cancel_work_sync(&work->mmio_work);
+ cancel_work_sync(&work->unpin_work);
kfree(work);
}
@@ -10878,12 +10934,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
static void intel_unpin_work_fn(struct work_struct *__work)
{
- struct intel_unpin_work *work =
- container_of(__work, struct intel_unpin_work, work);
+ struct intel_flip_work *work =
+ container_of(__work, struct intel_flip_work, unpin_work);
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
+ if (is_mmio_work(work))
+ flush_work(&work->mmio_work);
+
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10902,63 +10961,17 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
-static void do_intel_finish_page_flip(struct drm_device *dev,
- struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (intel_crtc == NULL)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->unpin_work;
-
- /* Ensure we don't miss a work->pending update ... */
- smp_rmb();
-
- if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return;
- }
-
- page_flip_completed(intel_crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
return !((a - b) & 0x80000000);
}
-static bool page_flip_finished(struct intel_crtc *crtc)
+static bool __pageflip_finished_cs(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned reset_counter;
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -10997,40 +11010,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
* anyway, we don't really care.
*/
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
- crtc->unpin_work->gtt_offset &&
+ crtc->flip_work->gtt_offset &&
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
- crtc->unpin_work->flip_count);
+ crtc->flip_work->flip_count);
}
-void intel_prepare_page_flip(struct drm_device *dev, int plane)
+static bool
+__pageflip_finished_mmio(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ /*
+ * MMIO work completes when vblank is different from
+ * flip_queued_vblank.
+ *
+ * Reset counter value doesn't matter, this is handled by
+ * i915_wait_request finishing early, so no need to handle
+ * reset here.
+ */
+ return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
+}
+
+
+static bool pageflip_finished(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
+{
+ if (!atomic_read(&work->pending))
+ return false;
+
+ smp_rmb();
+
+ if (is_mmio_work(work))
+ return __pageflip_finished_mmio(crtc, work);
+ else
+ return __pageflip_finished_cs(crtc, work);
+}
+
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
unsigned long flags;
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
- *
- * NB: An MMIO update of the plane base pointer will also
- * generate a page-flip completion irq, i.e. every modeset
- * is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
- atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ !is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
+{
+ work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
+
/* Ensure that the work item is consistent when activating it ... */
- smp_wmb();
- atomic_set(&work->pending, INTEL_FLIP_PENDING);
- /* and that it is marked active as soon as the irq could fire. */
- smp_wmb();
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11061,10 +11137,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11093,10 +11168,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11108,7 +11182,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
@@ -11124,7 +11198,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11135,7 +11209,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11147,7 +11220,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
@@ -11159,7 +11232,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11171,7 +11244,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11263,16 +11335,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
+ struct reservation_object *resv;
+
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11284,7 +11357,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
if (engine == NULL)
return true;
- if (INTEL_INFO(engine->dev)->gen < 5)
+ if (INTEL_GEN(engine->i915) < 5)
return false;
if (i915.use_mmio_flip < 0)
@@ -11293,20 +11366,20 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
return true;
else if (i915.enable_execlists)
return true;
- else if (obj->base.dma_buf &&
- !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
- false))
+
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true;
- else
- return engine != i915_gem_request_get_engine(obj->last_write_req);
+
+ return engine != i915_gem_request_get_engine(obj->last_write_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride, tile_height;
@@ -11355,10 +11428,10 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
}
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11378,78 +11451,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
POSTING_READ(DSPSURF(intel_crtc->plane));
}
-/*
- * XXX: This is the temporary way to update the plane registers until we get
- * around to using the usual plane update functions for MMIO flips
- */
-static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
-{
- struct intel_crtc *crtc = mmio_flip->crtc;
- struct intel_unpin_work *work;
-
- spin_lock_irq(&crtc->base.dev->event_lock);
- work = crtc->unpin_work;
- spin_unlock_irq(&crtc->base.dev->event_lock);
- if (work == NULL)
- return;
-
- intel_mark_page_flip_active(work);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
- skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc);
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *work)
+static void intel_mmio_flip_work_func(struct work_struct *w)
{
- struct intel_mmio_flip *mmio_flip =
- container_of(work, struct intel_mmio_flip, work);
+ struct intel_flip_work *work =
+ container_of(w, struct intel_flip_work, mmio_work);
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+ to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct reservation_object *resv;
- if (mmio_flip->req) {
- WARN_ON(__i915_wait_request(mmio_flip->req,
+ if (work->flip_queued_req)
+ WARN_ON(__i915_wait_request(work->flip_queued_req,
false, NULL,
- &mmio_flip->i915->rps.mmioflips));
- i915_gem_request_unreference__unlocked(mmio_flip->req);
- }
+ &dev_priv->rps.mmioflips));
/* For framebuffer backed by dmabuf, wait for fence */
- if (obj->base.dma_buf)
- WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
- false, false,
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv)
+ WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
MAX_SCHEDULE_TIMEOUT) < 0);
- intel_do_mmio_flip(mmio_flip);
- kfree(mmio_flip);
-}
-
-static int intel_queue_mmio_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj)
-{
- struct intel_mmio_flip *mmio_flip;
-
- mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
- if (mmio_flip == NULL)
- return -ENOMEM;
-
- mmio_flip->i915 = to_i915(dev);
- mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
- mmio_flip->crtc = to_intel_crtc(crtc);
- mmio_flip->rotation = crtc->primary->state->rotation;
+ intel_pipe_update_start(crtc);
- INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
- schedule_work(&mmio_flip->work);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_do_mmio_flip(crtc, work->rotation, work);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(crtc, work);
- return 0;
+ intel_pipe_update_end(crtc, work);
}
static int intel_default_queue_flip(struct drm_device *dev,
@@ -11462,37 +11494,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
return -ENODEV;
}
-static bool __intel_pageflip_stall_check(struct drm_device *dev,
- struct drm_crtc *crtc)
+static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
- u32 addr;
+ u32 addr, vblank;
- if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
- return true;
-
- if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+ if (!atomic_read(&work->pending))
return false;
- if (!work->enable_stall_check)
- return false;
+ smp_rmb();
+ vblank = intel_crtc_get_vblank_counter(intel_crtc);
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
- !i915_gem_request_completed(work->flip_queued_req, true))
+ !i915_gem_request_completed(work->flip_queued_req))
return false;
- work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+ work->flip_ready_vblank = vblank;
}
- if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+ if (vblank - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
* assume a missed interrupt. */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
else
addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11504,12 +11531,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return addr == work->gtt_offset;
}
-void intel_check_page_flip(struct drm_device *dev, int pipe)
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -11517,16 +11544,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->unpin_work;
- if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
- WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ work = intel_crtc->flip_work;
+
+ if (work != NULL && !is_mmio_work(work) &&
+ __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ WARN_ONCE(1,
+ "Kicking stuck page flip: queued at %d, now %d\n",
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
page_flip_completed(intel_crtc);
work = NULL;
}
- if (work != NULL &&
- drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
- intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+
+ if (work != NULL && !is_mmio_work(work) &&
+ intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -11536,13 +11567,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
@@ -11579,19 +11610,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
work->old_fb = old_fb;
- INIT_WORK(&work->work, intel_unpin_work_fn);
+ INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
- /* We borrow the event spin lock for protecting unpin_work */
+ /* We borrow the event spin lock for protecting flip_work */
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ if (intel_crtc->flip_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
*/
- if (__intel_pageflip_stall_check(dev, crtc)) {
+ if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
page_flip_completed(intel_crtc);
} else {
@@ -11603,7 +11634,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
}
}
- intel_crtc->unpin_work = work;
+ intel_crtc->flip_work = work;
spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11615,7 +11646,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
- intel_fbc_pre_update(intel_crtc);
+
+ intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+ to_intel_plane_state(primary->state));
work->pending_flip_obj = obj;
@@ -11658,6 +11691,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, engine, &request);
+ if (!ret && !request) {
+ request = i915_gem_request_alloc(engine, NULL);
+ ret = PTR_ERR_OR_ZERO(request);
+ }
+
if (ret)
goto cleanup_pending;
}
@@ -11669,38 +11707,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
obj, 0);
work->gtt_offset += intel_crtc->dspaddr_offset;
+ work->rotation = crtc->primary->state->rotation;
if (mmio_flip) {
- ret = intel_queue_mmio_flip(dev, crtc, obj);
- if (ret)
- goto cleanup_unpin;
+ INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- } else {
- if (!request) {
- request = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
- }
+ schedule_work(&work->mmio_work);
+ } else {
+ i915_gem_request_assign(&work->flip_queued_req, request);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- i915_gem_request_assign(&work->flip_queued_req, request);
- }
+ intel_mark_page_flip_active(intel_crtc, work);
- if (request)
i915_add_request_no_flush(request);
+ }
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
- work->enable_stall_check = true;
-
- i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+ i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -11726,7 +11754,7 @@ cleanup:
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
@@ -11828,15 +11856,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
- int idx = intel_crtc->base.base.id, ret;
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
+ int ret;
- if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
- plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -11854,6 +11881,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* Visibility is calculated as if the crtc was on, but
* after scaler setup everything depends on it being off
* when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11867,11 +11899,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
- plane->base.id, fb ? fb->base.id : -1);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ intel_crtc->base.base.id,
+ intel_crtc->base.name,
+ plane->base.id, plane->name,
+ fb ? fb->base.id : -1);
- DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.id, was_visible, visible,
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ plane->base.id, plane->name,
+ was_visible, visible,
turn_off, turn_on, mode_changed);
if (turn_on) {
@@ -11944,31 +11980,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
return true;
}
-static bool check_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (!check_single_encoder_cloning(state, crtc, encoder))
- return false;
- }
-
- return true;
-}
-
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
@@ -11976,11 +11992,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
int ret;
bool mode_changed = needs_modeset(crtc_state);
- if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return -EINVAL;
- }
-
if (mode_changed && !crtc_state->active)
pipe_config->update_wm_post = true;
@@ -12033,7 +12044,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
} else if (dev_priv->display.compute_intermediate_wm) {
if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+ pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -12095,21 +12106,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
}
- /* Clamp bpp to default limit on screens without EDID 1.4 */
- if (connector->base.display_info.bpc == 0) {
- int type = connector->base.connector_type;
- int clamp_bpp = 24;
-
- /* Fall back to 18 bpp when DP sink capability is unknown. */
- if (type == DRM_MODE_CONNECTOR_DisplayPort ||
- type == DRM_MODE_CONNECTOR_eDP)
- clamp_bpp = 18;
-
- if (bpp > clamp_bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
- bpp, clamp_bpp);
- pipe_config->pipe_bpp = clamp_bpp;
- }
+ /* Clamp bpp to 8 on screens without EDID 1.4 */
+ if (connector->base.display_info.bpc == 0 && bpp > 24) {
+ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+ bpp);
+ pipe_config->pipe_bpp = 24;
}
}
@@ -12168,7 +12169,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
+ crtc->base.base.id, crtc->base.name,
context, pipe_config, pipe_name(crtc->pipe));
DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12181,14 +12183,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_dp_encoder,
+ intel_crtc_has_dp_encoder(pipe_config),
pipe_config->lane_count,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- pipe_config->has_dp_encoder,
+ intel_crtc_has_dp_encoder(pipe_config),
pipe_config->lane_count,
pipe_config->dp_m2_n2.gmch_m,
pipe_config->dp_m2_n2.gmch_n,
@@ -12269,29 +12271,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
state = to_intel_plane_state(plane->state);
fb = state->base.fb;
if (!fb) {
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
- "disabled, scaler_id = %d\n",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane), state->scaler_id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
+ plane->base.id, plane->name, state->scaler_id);
continue;
}
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane));
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
- fb->base.id, fb->width, fb->height, fb->pixel_format);
- DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
- state->scaler_id,
- state->src.x1 >> 16, state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+ DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
+ plane->base.id, plane->name);
+ DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format));
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst),
+ drm_rect_height(&state->dst));
}
}
@@ -12326,7 +12323,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
case INTEL_OUTPUT_UNKNOWN:
if (WARN_ON(!HAS_DDI(dev)))
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12423,6 +12420,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ goto fail;
+ }
+
+ /*
+ * Determine output_types before calling the .compute_config()
+ * hooks so that the hooks can use this information safely.
+ */
+ pipe_config->output_types |= 1 << encoder->type;
+ }
+
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
@@ -12708,8 +12723,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
- PIPE_CONF_CHECK_I(has_dp_encoder);
PIPE_CONF_CHECK_I(lane_count);
+ PIPE_CONF_CHECK_X(lane_lat_optim_mask);
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12719,7 +12734,7 @@ intel_pipe_config_compare(struct drm_device *dev,
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
- PIPE_CONF_CHECK_I(has_dsi_encoder);
+ PIPE_CONF_CHECK_X(output_types);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12838,7 +12853,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct skl_ddb_entry *hw_entry, *sw_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -12944,7 +12959,7 @@ verify_crtc_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config, *sw_config;
@@ -12958,7 +12973,7 @@ verify_crtc_state(struct drm_crtc *crtc,
pipe_config->base.crtc = crtc;
pipe_config->base.state = old_state;
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
@@ -12987,8 +13002,10 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active)
+ if (active) {
+ pipe_config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, pipe_config);
+ }
}
if (!new_crtc_state->active)
@@ -13067,7 +13084,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
@@ -13106,7 +13123,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
static void
verify_disabled_dpll_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13153,7 +13170,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
@@ -13288,7 +13305,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
static int intel_modeset_checks(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0, i;
@@ -13306,6 +13323,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
intel_state->active_crtcs |= 1 << i;
else
intel_state->active_crtcs &= ~(1 << i);
+
+ if (crtc_state->active != crtc->state->active)
+ intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
}
/*
@@ -13316,9 +13336,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret < 0)
+ return ret;
- if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
ret = intel_modeset_all_pipes(state);
if (ret < 0)
@@ -13342,38 +13370,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
-static void calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
-
- /*
- * Calculate watermark configuration details now that derived
- * plane/crtc state is all properly updated.
- */
- drm_for_each_crtc(crtc, dev) {
- cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
- crtc->state;
-
- if (cstate->active)
- intel_state->wm_config.num_pipes_active++;
- }
- drm_for_each_legacy_plane(plane, dev) {
- pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
- plane->state;
+ struct drm_i915_private *dev_priv = to_i915(dev);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
+ /* Is there platform-specific watermark information to calculate? */
+ if (dev_priv->display.compute_global_watermarks)
+ return dev_priv->display.compute_global_watermarks(state);
- intel_state->wm_config.sprites_enabled = true;
- if (pstate->crtc_w != pstate->src_w >> 16 ||
- pstate->crtc_h != pstate->src_h >> 16)
- intel_state->wm_config.sprites_scaled = true;
- }
+ return 0;
}
/**
@@ -13403,14 +13409,13 @@ static int intel_atomic_check(struct drm_device *dev,
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
- if (!crtc_state->enable) {
- if (needs_modeset(crtc_state))
- any_ms = true;
+ if (!needs_modeset(crtc_state))
continue;
- }
- if (!needs_modeset(crtc_state))
+ if (!crtc_state->enable) {
+ any_ms = true;
continue;
+ }
/* FIXME: For only active_changed we shouldn't need to do any
* state recomputation at all. */
@@ -13420,8 +13425,11 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
ret = intel_modeset_pipe_config(crtc, pipe_config);
- if (ret)
+ if (ret) {
+ intel_dump_pipe_config(to_intel_crtc(crtc),
+ pipe_config, "[failed]");
return ret;
+ }
if (i915.fastboot &&
intel_pipe_config_compare(dev,
@@ -13431,13 +13439,12 @@ static int intel_atomic_check(struct drm_device *dev,
to_intel_crtc_state(crtc_state)->update_pipe = true;
}
- if (needs_modeset(crtc_state)) {
+ if (needs_modeset(crtc_state))
any_ms = true;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
@@ -13457,27 +13464,20 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
intel_fbc_choose_crtc(dev_priv, state);
- calc_watermark_data(state);
-
- return 0;
+ return calc_watermark_data(state);
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
- if (nonblock) {
- DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
- return -EINVAL;
- }
-
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
@@ -13521,6 +13521,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
}
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ if (!dev->max_vblank_count)
+ return drm_accurate_vblank_count(&crtc->base);
+
+ return dev->driver->get_vblank_counter(dev, crtc->pipe);
+}
+
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
struct drm_i915_private *dev_priv,
unsigned crtc_mask)
@@ -13586,45 +13596,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
return false;
}
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * we can only handle plane-related operations and do not yet support
- * nonblocking commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- int ret = 0, i;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
+ int i, ret;
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
- if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- return ret;
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *intel_plane_state =
+ to_intel_plane_state(plane_state);
+
+ if (!intel_plane_state->wait_req)
+ continue;
+
+ ret = __i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
+ /* EIO should be eaten, and we can't get interrupted in the
+ * worker, and blocking commits have waited already. */
+ WARN_ON(ret);
}
- drm_atomic_helper_swap_state(dev, state);
- dev_priv->wm.config = intel_state->wm_config;
- intel_shared_dpll_commit(state);
+ drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13679,7 +13680,8 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
if (dev_priv->display.modeset_commit_cdclk &&
- intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
dev_priv->display.modeset_commit_cdclk(state);
intel_modeset_verify_disabled(dev);
@@ -13691,30 +13693,44 @@ static int intel_atomic_commit(struct drm_device *dev,
bool modeset = needs_modeset(crtc->state);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
- bool update_pipe = !modeset && pipe_config->update_pipe;
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
+ /* Complete events for now disable pipes here. */
+ if (modeset && !crtc->state->active && crtc->state->event) {
+ spin_lock_irq(&dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+
if (!modeset)
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
if (crtc->state->active &&
drm_atomic_get_existing_plane_state(state, crtc->primary))
- intel_fbc_enable(intel_crtc);
+ intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
- if (crtc->state->active &&
- (crtc->state->planes_changed || update_pipe))
+ if (crtc->state->active)
drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
if (pipe_config->base.active && needs_vblank_wait(pipe_config))
crtc_vblank_mask |= 1 << i;
}
- /* FIXME: add subpixel order */
-
+ /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+ * already, but still need the state for the delayed optimization. To
+ * fix this:
+ * - wrap the optimization/post_plane_update stuff into a per-crtc work.
+ * - schedule that vblank worker _before_ calling hw_done
+ * - at the start of commit_tail, cancel it _synchrously
+ * - switch over to the vblank wait helper in the core after that since
+ * we don't need out special handling any more.
+ */
if (!state->legacy_cursor_update)
intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
@@ -13741,6 +13757,8 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
+ drm_atomic_helper_commit_hw_done(state);
+
if (intel_state->modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
@@ -13748,6 +13766,8 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
+ drm_atomic_helper_commit_cleanup_done(state);
+
drm_atomic_state_free(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13762,6 +13782,86 @@ static int intel_atomic_commit(struct drm_device *dev,
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+}
+
+static void intel_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ intel_atomic_commit_tail(state);
+}
+
+static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state;
+ struct drm_plane *plane;
+ struct drm_i915_gem_object *obj, *old_obj;
+ struct intel_plane *intel_plane;
+ int i;
+
+ mutex_lock(&state->dev->struct_mutex);
+ for_each_plane_in_state(state, plane, old_plane_state, i) {
+ obj = intel_fb_obj(plane->state->fb);
+ old_obj = intel_fb_obj(old_plane_state->fb);
+ intel_plane = to_intel_plane(plane);
+
+ i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+ }
+ mutex_unlock(&state->dev->struct_mutex);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @nonblock: nonblocking commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * nonblocking commits are only safe for pure plane updates. Everything else
+ * should work though.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret = 0;
+
+ if (intel_state->modeset && nonblock) {
+ DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+
+ ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ return ret;
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ dev_priv->wm.distrust_bios_wm = false;
+ dev_priv->wm.skl_results = intel_state->wm_results;
+ intel_shared_dpll_commit(state);
+ intel_atomic_track_fbs(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ intel_atomic_commit_tail(state);
return 0;
}
@@ -13775,8 +13875,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
state = drm_atomic_state_alloc(dev);
if (!state) {
- DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
+ crtc->base.id, crtc->name);
return;
}
@@ -13806,8 +13906,50 @@ out:
#undef for_each_intel_crtc_masked
+/*
+ * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
+ * drm_atomic_helper_legacy_gamma_set() directly.
+ */
+static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_crtc_state *state;
+ int ret;
+
+ ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure we update the legacy properties so this works when
+ * atomic is not enabled.
+ */
+
+ state = crtc->state;
+
+ drm_object_property_set_value(&crtc->base,
+ config->degamma_lut_property,
+ (state->degamma_lut) ?
+ state->degamma_lut->base.id : 0);
+
+ drm_object_property_set_value(&crtc->base,
+ config->ctm_property,
+ (state->ctm) ?
+ state->ctm->base.id : 0);
+
+ drm_object_property_set_value(&crtc->base,
+ config->gamma_lut_property,
+ (state->gamma_lut) ?
+ state->gamma_lut->base.id : 0);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .gamma_set = intel_atomic_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
@@ -13836,9 +13978,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_framebuffer *fb = new_state->fb;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct reservation_object *resv;
int ret = 0;
if (!obj && !old_obj)
@@ -13868,12 +14010,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
+ if (!obj)
+ return 0;
+
/* For framebuffer backed by dmabuf, wait for fence */
- if (obj && obj->base.dma_buf) {
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv) {
long lret;
- lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
- false, true,
+ lret = reservation_object_wait_timeout_rcu(resv, false, true,
MAX_SCHEDULE_TIMEOUT);
if (lret == -ERESTARTSYS)
return lret;
@@ -13881,9 +14026,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
WARN(lret < 0, "waiting returns %li\n", lret);
}
- if (!obj) {
- ret = 0;
- } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
@@ -13894,15 +14037,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
if (ret == 0) {
- if (obj) {
- struct intel_plane_state *plane_state =
- to_intel_plane_state(new_state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(new_state);
- i915_gem_request_assign(&plane_state->wait_req,
- obj->last_write_req);
- }
-
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+ i915_gem_request_assign(&plane_state->wait_req,
+ obj->last_write_req);
}
return ret;
@@ -13922,7 +14061,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -13936,11 +14074,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
- /* prepare_fb aborted? */
- if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
- (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
@@ -13948,15 +14081,11 @@ int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
{
int max_scale;
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
- dev = intel_crtc->base.dev;
- dev_priv = dev->dev_private;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
@@ -13996,6 +14125,7 @@ intel_check_primary_plane(struct drm_plane *plane,
return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
+ state->base.rotation,
min_scale, max_scale,
can_position, true,
&state->visible);
@@ -14032,7 +14162,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_pipe_update_end(intel_crtc);
+ intel_pipe_update_end(intel_crtc, NULL);
}
/**
@@ -14044,9 +14174,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ if (!plane)
+ return;
+
drm_plane_cleanup(plane);
- kfree(intel_plane);
+ kfree(to_intel_plane(plane));
}
const struct drm_plane_funcs intel_plane_funcs = {
@@ -14118,10 +14250,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
- intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane 1%c", pipe_name(pipe));
+ else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c", plane_name(primary->plane));
if (ret)
goto fail;
@@ -14171,6 +14317,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
+ state->base.rotation,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &state->visible);
@@ -14279,7 +14426,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14327,7 +14475,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
@@ -14364,7 +14512,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
goto fail;
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs, NULL);
+ cursor, &intel_crtc_funcs,
+ "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14398,10 +14547,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
return;
fail:
- if (primary)
- drm_plane_cleanup(primary);
- if (cursor)
- drm_plane_cleanup(cursor);
+ intel_plane_destroy(primary);
+ intel_plane_destroy(cursor);
kfree(crtc_state);
kfree(intel_crtc);
}
@@ -14427,11 +14574,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct intel_crtc *crtc;
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
-
- if (!drmmode_crtc) {
- DRM_ERROR("no such CRTC id\n");
+ if (!drmmode_crtc)
return -ENOENT;
- }
crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14458,7 +14602,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
static bool has_edp_a(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!IS_MOBILE(dev))
return false;
@@ -14474,7 +14618,7 @@ static bool has_edp_a(struct drm_device *dev)
static bool intel_crt_present(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen >= 9)
return false;
@@ -14500,10 +14644,15 @@ static bool intel_crt_present(struct drm_device *dev)
static void intel_setup_outputs(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ /*
+ * intel_edp_init_connector() depends on this completing first, to
+ * prevent the registeration of both eDP and LVDS and the incorrect
+ * sharing of the PPS.
+ */
intel_lvds_init(dev);
if (intel_crt_present(dev))
@@ -15088,12 +15237,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- broadwell_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- broadwell_modeset_calc_cdclk;
- }
+ }
+
+ if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ broadwell_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broadwell_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
@@ -15101,9 +15251,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
valleyview_modeset_calc_cdclk;
} else if (IS_BROXTON(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
- broxton_modeset_commit_cdclk;
+ bxt_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
- broxton_modeset_calc_cdclk;
+ bxt_modeset_calc_cdclk;
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ skl_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ skl_modeset_calc_cdclk;
}
switch (INTEL_INFO(dev_priv)->gen) {
@@ -15142,7 +15297,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
*/
static void quirk_pipea_force(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_INFO("applying pipe a force quirk\n");
@@ -15150,7 +15305,7 @@ static void quirk_pipea_force(struct drm_device *dev)
static void quirk_pipeb_force(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEB_FORCE;
DRM_INFO("applying pipe b force quirk\n");
@@ -15161,7 +15316,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
*/
static void quirk_ssc_force_disable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
DRM_INFO("applying lvds SSC disable quirk\n");
}
@@ -15172,7 +15327,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
*/
static void quirk_invert_brightness(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
DRM_INFO("applying inverted panel brightness quirk\n");
}
@@ -15180,7 +15335,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
DRM_INFO("applying backlight present quirk\n");
}
@@ -15306,7 +15461,7 @@ static void intel_init_quirks(struct drm_device *dev)
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u8 sr1;
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
@@ -15324,14 +15479,14 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_init_clock_gating(dev);
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
}
/*
@@ -15401,7 +15556,6 @@ retry:
}
/* Write calculated watermark values back */
- to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
@@ -15499,11 +15653,13 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_update_czclk(dev_priv);
- intel_update_rawclk(dev_priv);
intel_update_cdclk(dev);
intel_shared_dpll_init(dev);
+ if (dev_priv->max_cdclk_freq == 0)
+ intel_update_max_cdclk(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
@@ -15571,7 +15727,7 @@ static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15611,7 +15767,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
@@ -15644,8 +15800,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
- DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
- crtc->base.base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
+ crtc->base.base.id, crtc->base.name);
/* Pipe has the wrong plane attached and the plane is active.
* Temporarily change the plane mapping and disable everything
@@ -15736,7 +15892,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
void i915_redisable_vga_power_on(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15747,7 +15903,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
void i915_redisable_vga(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
@@ -15787,7 +15943,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -15813,26 +15969,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (crtc_state->base.active) {
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (IS_BROADWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- } else if (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
- crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
crtc->active ? "enabled" : "disabled");
}
@@ -15858,6 +16012,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
+ crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
@@ -15942,7 +16097,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
static void
intel_modeset_setup_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -16063,15 +16218,16 @@ retry:
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
- intel_init_gt_powersave(dev);
+ intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
- intel_setup_overlay(dev);
+ intel_setup_overlay(dev_priv);
/*
* Make sure any fbs we allocated at startup are properly
@@ -16097,26 +16253,36 @@ void intel_modeset_gem_init(struct drm_device *dev)
c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
- intel_backlight_register(dev);
+ return 0;
+
+err:
+ return ret;
}
-void intel_connector_unregister(struct intel_connector *intel_connector)
+void intel_connector_unregister(struct drm_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ intel_backlight_device_unregister(intel_connector);
intel_panel_destroy_backlight(connector);
- drm_connector_unregister(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_connector *connector;
-
- intel_disable_gt_powersave(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
- intel_backlight_unregister(dev);
+ intel_disable_gt_powersave(dev_priv);
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -16138,27 +16304,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* destroy the backlight and sysfs files before encoders/connectors */
- for_each_intel_connector(dev, connector)
- connector->unregister(connector);
-
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev);
+ intel_cleanup_overlay(dev_priv);
- intel_cleanup_gt_powersave(dev);
+ intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev);
}
-/*
- * Return which encoder is currently attached for connector.
- */
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
-{
- return &intel_attached_encoder(connector)->base;
-}
-
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
@@ -16172,7 +16326,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
@@ -16242,9 +16396,8 @@ struct intel_display_error_state {
};
struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
@@ -16254,14 +16407,14 @@ intel_display_capture_error_state(struct drm_device *dev)
};
int i;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(dev_priv, i) {
@@ -16277,25 +16430,25 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos = I915_READ(DSPPOS(i));
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
error->plane[i].addr = I915_READ(DSPADDR(i));
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
/* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
@@ -16329,7 +16482,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891107f92d9f..21b04c3eda41 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
@@ -267,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
/*
@@ -285,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
mutex_unlock(&dev_priv->pps_mutex);
@@ -299,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -373,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
enum pipe pipe;
@@ -431,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
return intel_dp->pps_pipe;
}
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ WARN_ON(!is_edp(intel_dp));
+
+ /*
+ * TODO: BXT has 2 PPS instances. The correct port->PPS instance
+ * mapping needs to be retrieved from VBT, for now just hard-code to
+ * use instance #0 always.
+ */
+ if (!intel_dp->pps_reset)
+ return 0;
+
+ intel_dp->pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+ return 0;
+}
+
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -480,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -512,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ !IS_BROXTON(dev)))
return;
/*
@@ -537,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- intel_dp->pps_pipe = INVALID_PIPE;
+ if (IS_BROXTON(dev))
+ intel_dp->pps_reset = true;
+ else
+ intel_dp->pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_BROXTON(dev_priv)) {
+ int idx = bxt_power_sequencer_idx(intel_dp);
+
+ regs->pp_ctrl = BXT_PP_CONTROL(idx);
+ regs->pp_stat = BXT_PP_STATUS(idx);
+ regs->pp_on = BXT_PP_ON_DELAYS(idx);
+ regs->pp_off = BXT_PP_OFF_DELAYS(idx);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ regs->pp_ctrl = PCH_PP_CONTROL;
+ regs->pp_stat = PCH_PP_STATUS;
+ regs->pp_on = PCH_PP_ON_DELAYS;
+ regs->pp_off = PCH_PP_OFF_DELAYS;
+ regs->pp_div = PCH_PP_DIVISOR;
+ } else {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
+ regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
+ regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
+ regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
}
}
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct pps_registers regs;
- if (IS_BROXTON(dev))
- return BXT_PP_CONTROL(0);
- else if (HAS_PCH_SPLIT(dev))
- return PCH_PP_CONTROL;
- else
- return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+ intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+ &regs);
+
+ return regs.pp_ctrl;
}
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct pps_registers regs;
- if (IS_BROXTON(dev))
- return BXT_PP_STATUS(0);
- else if (HAS_PCH_SPLIT(dev))
- return PCH_PP_STATUS;
- else
- return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+ intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+ &regs);
+
+ return regs.pp_stat;
}
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -575,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp) || code != SYS_RESTART)
return 0;
@@ -606,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -620,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -635,7 +699,7 @@ static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp))
return;
@@ -653,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
@@ -775,6 +839,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_1600us |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
@@ -785,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
@@ -1181,48 +1246,21 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
- drm_dp_aux_unregister(&intel_dp->aux);
kfree(intel_dp->aux.name);
}
-static int
+static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
- int ret;
intel_aux_reg_init(intel_dp);
+ drm_dp_aux_init(&intel_dp->aux);
+ /* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
- if (!intel_dp->aux.name)
- return -ENOMEM;
-
- intel_dp->aux.dev = connector->base.kdev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
-
- DRM_DEBUG_KMS("registering %s bus for %s\n",
- intel_dp->aux.name,
- connector->base.kdev->kobj.name);
-
- ret = drm_dp_aux_register(&intel_dp->aux);
- if (ret < 0) {
- DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
- intel_dp->aux.name, ret);
- kfree(intel_dp->aux.name);
- return ret;
- }
-
- return 0;
-}
-
-static void
-intel_dp_connector_unregister(struct intel_connector *intel_connector)
-{
- struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
-
- intel_dp_aux_fini(intel_dp);
- intel_connector_unregister(intel_connector);
}
static int
@@ -1435,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1463,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
@@ -1582,6 +1619,27 @@ found:
&pipe_config->dp_m2_n2);
}
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ if (is_edp(intel_dp) &&
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
+ int vco;
+
+ switch (pipe_config->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+
+ to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
+ }
+
if (!HAS_DDI(dev))
intel_dp_set_clock(encoder, pipe_config);
@@ -1598,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1686,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp);
+
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
+ intel_pps_verify_state(dev_priv, intel_dp);
+
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -1704,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
- 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+ if (intel_wait_for_register(dev_priv,
+ pp_stat_reg, mask, value,
+ 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -1765,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1788,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1861,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1930,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv =
- intel_dp_to_dev(intel_dp)->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1952,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2013,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2065,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2106,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2222,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2287,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
@@ -2340,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -2378,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
!IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->has_dp_encoder = true;
-
pipe_config->lane_count =
((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
@@ -2460,55 +2521,11 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp);
}
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
-}
-
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_dp_link_down(intel_dp);
@@ -2527,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
if (HAS_DDI(dev)) {
@@ -2607,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
static void intel_dp_enable_port(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc =
to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
@@ -2636,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
@@ -2709,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
@@ -2735,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2773,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2811,266 +2828,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_dp_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- /* Program Tx lane latency optimal setting*/
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- /* Set the upar bit */
- if (intel_crtc->config->lane_count == 1)
- data = 0x0;
- else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- if (intel_crtc->config->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
- }
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- unsigned int lane_mask =
- intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
- u32 val;
-
intel_dp_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, lane_mask);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
- }
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
/*
@@ -3089,7 +2878,7 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_BROXTON(dev))
@@ -3178,16 +2967,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dport->base.base.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3045,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value, 0);
return 0;
}
-static bool chv_need_uniq_trans_scale(uint8_t train_set)
-{
- return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
- (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-}
-
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
- u32 deemph_reg_value, margin_reg_value, val;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u32 deemph_reg_value, margin_reg_value;
+ bool uniq_trans_scale = false;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- int i;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3076,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
deemph_reg_value = 128;
margin_reg_value = 154;
- /* FIXME extra to set for 1200 */
+ uniq_trans_scale = true;
break;
default:
return 0;
@@ -3364,88 +3128,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
- }
-
- /* Program swing deemph */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- /* Program swing margin */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- if (chv_need_uniq_trans_scale(train_set))
- val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
- else
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, deemph_reg_value,
+ margin_reg_value, uniq_trans_scale);
return 0;
}
@@ -3612,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
uint32_t val;
@@ -3634,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
- 1))
+ if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+ DP_TP_STATUS_IDLE_DONE,
+ DP_TP_STATUS_IDLE_DONE,
+ 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -3646,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev)))
@@ -3698,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3713,8 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t rev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3456,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
+
+ /* Read the eDP Display control capabilities registers */
+ memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
+ if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd)))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
}
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3472,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
/* Intermediate frequency support */
- if (is_edp(intel_dp) &&
- (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
- (rev >= 0x03)) { /* eDp v1.4 or higher */
+ if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -4559,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
intel_dp_probe_oui(intel_dp);
@@ -4635,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
/* MST devices are disconnected from a monitor POV */
intel_dp_unset_edid(intel_dp);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
return connector_status_disconnected;
}
@@ -4674,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
intel_display_power_put(dev_priv, power_domain);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4723,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4811,6 +4502,32 @@ done:
return 0;
}
+static int
+intel_dp_connector_register(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ i915_debugfs_connector_add(connector);
+
+ DRM_DEBUG_KMS("registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
+
+ intel_dp->aux.dev = connector->kdev;
+ return drm_dp_aux_register(&intel_dp->aux);
+}
+
+static void
+intel_dp_connector_unregister(struct drm_connector *connector)
+{
+ drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+ intel_connector_unregister(connector);
+}
+
static void
intel_dp_connector_destroy(struct drm_connector *connector)
{
@@ -4851,6 +4568,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL;
}
}
+
+ intel_dp_aux_fini(intel_dp);
+
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@@ -4876,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4929,6 +4649,8 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_dp_connector_register,
+ .early_unregister = intel_dp_connector_unregister,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -4937,7 +4659,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -4951,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
- intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_dig_port->base.type = INTEL_OUTPUT_DP;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -5019,7 +4740,7 @@ put_power:
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* eDP not supported on g4x. so bail out early just
@@ -5061,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
- i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- if (IS_BROXTON(dev)) {
- /*
- * TODO: BXT has 2 sets of PPS registers.
- * Correct Register for Broxton need to be identified
- * using VBT. hardcoding for now
- */
- pp_ctrl_reg = BXT_PP_CONTROL(0);
- pp_on_reg = BXT_PP_ON_DELAYS(0);
- pp_off_reg = BXT_PP_OFF_DELAYS(0);
- } else if (HAS_PCH_SPLIT(dev)) {
- pp_ctrl_reg = PCH_PP_CONTROL;
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_div_reg = PCH_PP_DIVISOR;
- } else {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ struct pps_registers regs;
- pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
- pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
- pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
- pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
- }
+ intel_pps_get_registers(dev_priv, intel_dp, &regs);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
- pp_on = I915_READ(pp_on_reg);
- pp_off = I915_READ(pp_off_reg);
- if (!IS_BROXTON(dev)) {
- I915_WRITE(pp_ctrl_reg, pp_ctl);
- pp_div = I915_READ(pp_div_reg);
+ pp_on = I915_READ(regs.pp_on);
+ pp_off = I915_READ(regs.pp_off);
+ if (!IS_BROXTON(dev_priv)) {
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+ pp_div = I915_READ(regs.pp_div);
}
/* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
+ seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
+ seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
+ seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
- cur.t11_t12 = (tmp - 1) * 1000;
+ seq->t11_t12 = (tmp - 1) * 1000;
else
- cur.t11_t12 = 0;
+ seq->t11_t12 = 0;
} else {
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
}
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps_delays;
+
+ intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+ intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
vbt = dev_priv->vbt.edp.pps;
@@ -5152,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* too. */
spec.t11_t12 = (510 + 100) * 10;
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+ intel_pps_dump_state("vbt", &vbt);
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
@@ -5181,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
}
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
- i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
+ struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- if (IS_BROXTON(dev)) {
- /*
- * TODO: BXT has 2 sets of PPS registers.
- * Correct Register for Broxton need to be identified
- * using VBT. hardcoding for now
- */
- pp_ctrl_reg = BXT_PP_CONTROL(0);
- pp_on_reg = BXT_PP_ON_DELAYS(0);
- pp_off_reg = BXT_PP_OFF_DELAYS(0);
-
- } else if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_div_reg = PCH_PP_DIVISOR;
- } else {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ intel_pps_get_registers(dev_priv, intel_dp, &regs);
- pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
- pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
- pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
- }
-
- /*
- * And finally store the new values in the power sequencer. The
- * backlight delays are set to 1 because we do manual waits on them. For
- * T8, even BSpec recommends doing it. For T9, if we don't do this,
- * we'll end up waiting for the backlight off delay twice: once when we
- * do the manual sleep, and once when we disable the panel and wait for
- * the PP_STATUS bit to become zero.
- */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_BROXTON(dev)) {
- pp_div = I915_READ(pp_ctrl_reg);
+ pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -5256,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_on |= port_sel;
- I915_WRITE(pp_on_reg, pp_on);
- I915_WRITE(pp_off_reg, pp_off);
+ I915_WRITE(regs.pp_on, pp_on);
+ I915_WRITE(regs.pp_off, pp_off);
if (IS_BROXTON(dev))
- I915_WRITE(pp_ctrl_reg, pp_div);
+ I915_WRITE(regs.pp_ctrl, pp_div);
else
- I915_WRITE(pp_div_reg, pp_div);
+ I915_WRITE(regs.pp_div, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(pp_on_reg),
- I915_READ(pp_off_reg),
+ I915_READ(regs.pp_on),
+ I915_READ(regs.pp_off),
IS_BROXTON(dev) ?
- (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
- I915_READ(pp_div_reg));
+ (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
+ I915_READ(regs.pp_div));
}
/**
@@ -5285,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
*/
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5384,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5416,7 +5129,7 @@ unlock:
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5431,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
}
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- intel_dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ intel_dp->attached_connector->panel.
+ fixed_mode->vrefresh);
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5463,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
goto unlock;
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- intel_dp->attached_connector->panel.
- downclock_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ intel_dp->attached_connector->panel.
+ downclock_mode->vrefresh);
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5484,7 +5197,7 @@ unlock:
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5507,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
mutex_unlock(&dev_priv->drrs.mutex);
}
@@ -5529,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5552,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
/*
* flush also means no more activity hence schedule downclock, if all
@@ -5589,14 +5302,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
*
* DRRS saves power by switching to low RR based on usage scenarios.
*
- * eDP DRRS:-
- * The implementation is based on frontbuffer tracking implementation.
- * When there is a disturbance on the screen triggered by user activity or a
- * periodic system activity, DRRS is disabled (RR is changed to high RR).
- * When there is no movement on screen, after a timeout of 1 second, a switch
- * to low RR is made.
- * For integration with frontbuffer tracking code,
- * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
+ * and intel_edp_drrs_flush() are called.
*
* DRRS can be further extended to support other internal panels and also
* the scenario of video playback wherein RR is set based on the rate
@@ -5622,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
{
struct drm_connector *connector = &intel_connector->base;
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5660,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
@@ -5671,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
+ /*
+ * On IBX/CPT we may get here with LVDS already registered. Since the
+ * driver uses the only internal power sequencer available for both
+ * eDP and LVDS bail out early in this case to prevent interfering
+ * with an already powered-on LVDS power sequencer.
+ */
+ if (intel_get_lvds_encoder(dev)) {
+ WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ DRM_INFO("LVDS was detected, not registering eDP\n");
+
+ return false;
+ }
+
pps_lock(intel_dp);
+
+ intel_dp_init_panel_power_timestamps(intel_dp);
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ vlv_initial_power_sequencer_setup(intel_dp);
+ } else {
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ }
+
intel_edp_panel_vdd_sanitize(intel_dp);
+
pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
@@ -5686,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- return false;
+ goto out_vdd_off;
}
- /* We now know it's not a ghost, init power sequence regs. */
- pps_lock(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
- pps_unlock(intel_dp);
-
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
@@ -5761,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_setup_backlight(connector, pipe);
return true;
+
+out_vdd_off:
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ /*
+ * vdd might still be enabled do to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ pps_lock(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
+ pps_unlock(intel_dp);
+
+ return false;
}
bool
@@ -5771,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- int type, ret;
+ int type;
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
@@ -5832,17 +5576,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
+ intel_dp_aux_init(intel_dp, intel_connector);
+
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_dp_connector_unregister;
/* Set up the hotplug pin. */
switch (port) {
@@ -5867,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
BUG();
}
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- intel_dp_init_panel_power_timestamps(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_initial_power_sequencer_setup(intel_dp);
- else
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- pps_unlock(intel_dp);
- }
-
- ret = intel_dp_aux_init(intel_dp, intel_connector);
- if (ret)
- goto fail;
-
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) &&
+ if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5904,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- i915_debugfs_connector_add(connector);
-
return true;
fail:
- if (is_edp(intel_dp)) {
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
- }
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
@@ -5929,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev,
i915_reg_t output_reg,
enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -5947,7 +5664,7 @@ bool intel_dp_init(struct drm_device *dev,
encoder = &intel_encoder->base;
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL))
+ DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
@@ -5977,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
if (IS_CHERRYVIEW(dev)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
@@ -6007,43 +5724,35 @@ err_connector_alloc:
void intel_dp_mst_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port)
+
+ if (!intel_dig_port || !intel_dig_port->dp.can_mst)
continue;
- if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- if (!intel_dig_port->dp.can_mst)
- continue;
- if (intel_dig_port->dp.is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
- }
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
}
}
void intel_dp_mst_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port)
- continue;
- if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- int ret;
+ int ret;
- if (!intel_dig_port->dp.can_mst)
- continue;
+ if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
- if (ret != 0) {
- intel_dp_check_mst_status(&intel_dig_port->dp);
- }
- }
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret)
+ intel_dp_check_mst_status(&intel_dig_port->dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+{
+ uint8_t reg_val = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ &reg_val) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
+ return;
+ }
+ if (enable)
+ reg_val |= DP_EDP_BACKLIGHT_ENABLE;
+ else
+ reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ reg_val) != 1) {
+ DRM_DEBUG_KMS("Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
+ }
+}
+
+/*
+ * Read the current backlight value from DPCD register(s) based
+ * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
+ */
+static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t read_val[2] = { 0x0 };
+ uint16_t level = 0;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ &read_val, sizeof(read_val)) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ return 0;
+ }
+ level = read_val[0];
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ level = (read_val[0] << 8 | read_val[1]);
+
+ return level;
+}
+
+/*
+ * Sends the current backlight level over the aux channel, checking if its using
+ * 8-bit or 16 bit value (MSB and LSB)
+ */
+static void
+intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t vals[2] = { 0x0 };
+
+ vals[0] = level;
+
+ /* Write the MSB and/or LSB */
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
+ vals[0] = (level & 0xFF00) >> 8;
+ vals[1] = (level & 0xFF);
+ }
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ vals, sizeof(vals)) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ return;
+ }
+}
+
+static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t dpcd_buf = 0;
+
+ set_aux_backlight_enable(intel_dp, true);
+
+ if ((drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
+ ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
+ DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
+}
+
+static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
+{
+ set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
+}
+
+static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_dp_aux_enable_backlight(connector);
+
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ panel->backlight.max = 0xFFFF;
+ else
+ panel->backlight.max = 0xFF;
+
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_get_backlight(connector);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static bool
+intel_dp_aux_display_control_capable(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+
+ /* Check the eDP Display control capabilities registers to determine if
+ * the panel can support backlight control over the aux channel
+ */
+ if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+ !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
+ (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
+ DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ return true;
+ }
+ return false;
+}
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (!i915.enable_dpcd_backlight)
+ return -ENODEV;
+
+ if (!intel_dp_aux_display_control_capable(intel_connector))
+ return -ENODEV;
+
+ panel->backlight.setup = intel_dp_aux_setup_backlight;
+ panel->backlight.enable = intel_dp_aux_enable_backlight;
+ panel->backlight.disable = intel_dp_aux_disable_backlight;
+ panel->backlight.set = intel_dp_aux_set_backlight;
+ panel->backlight.get = intel_dp_aux_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..68a005d729e9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
- pipe_config->has_dp_encoder = true;
bpp = 24;
/*
* for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
- 1))
+ if (intel_wait_for_register(dev_priv,
+ DP_TP_STATUS(port),
+ DP_TP_STATUS_ACT_SENT,
+ DP_TP_STATUS_ACT_SENT,
+ 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
- pipe_config->has_dp_encoder = true;
-
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,8 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -455,7 +457,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
- intel_connector->unregister = intel_connector_unregister;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
+
drm_connector_register(&intel_connector->base);
}
@@ -489,7 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
- intel_connector->unregister(intel_connector);
+ drm_connector_unregister(connector);
/* need to nuke the connector */
drm_modeset_lock_all(dev);
@@ -534,7 +537,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST, NULL);
+ DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..047f48748944
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+ int i;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ }
+
+ /* Program swing deemph */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ if (uniq_trans_scale)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ else
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ unsigned int lane_mask =
+ intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+ u32 val;
+
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dport->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ }
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i, stagger;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ /* Set the upar bit */
+ if (intel_crtc->config->lane_count == 1)
+ data = 0x0;
+ else
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ if (intel_crtc->config->lane_count > 2) {
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+ }
+
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, false);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dport->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dport->release_cl2_override = false;
+ }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+ if (tx3_demph)
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ /* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Enable clock channels for this port */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ mutex_unlock(&dev_priv->sb_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 58f60b27837e..5c1f2d235ffa 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
unsigned old_mask;
@@ -151,7 +151,7 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
struct intel_shared_dpll_config *shared_dpll;
enum intel_dpll_id i;
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
if (memcmp(&crtc_state->dpll_hw_state,
&shared_dpll[i].hw_state,
sizeof(crtc_state->dpll_hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, pll->name,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name, pll->name,
shared_dpll[i].crtc_mask,
pll->active_mask);
return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
for (i = range_min; i <= range_max; i++) {
pll = &dev_priv->shared_dplls[i];
if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
return pll;
}
}
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
/* Make sure no transcoder isn't still depending on us. */
@@ -358,8 +358,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
} else {
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_PCH_PLL_A,
@@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
enum intel_dpll_id pll_id;
@@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
- if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+ if (intel_wait_for_register(dev_priv,
+ DPLL_STATUS,
+ DPLL_LOCK(pll->id),
+ DPLL_LOCK(pll->id),
+ 5))
DRM_ERROR("DPLL %d not locked\n", pll->id);
}
@@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
switch (crtc_state->port_clock / 2) {
@@ -1239,9 +1243,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case 162000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
break;
- /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
- results in CDCLK change. Need to handle the change of CDCLK by
- disabling pipes and re-enabling them */
case 108000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
break;
@@ -1511,7 +1512,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) {
- intel_clock_t best_clock;
+ struct dpll best_clock;
/* Calculate HDMI div */
/*
@@ -1533,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
clk_div.m2_frac_en = clk_div.m2_frac != 0;
vco = best_clock.vco;
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP) {
int i;
@@ -1616,8 +1617,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) intel_dig_port->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
intel_reference_shared_dpll(pll, crtc_state);
@@ -1635,19 +1636,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
static void intel_ddi_pll_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- int cdclk_freq;
-
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- dev_priv->skl_boot_cdclk = cdclk_freq;
- if (skl_sanitize_cdclk(dev_priv))
- DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
- DRM_ERROR("LCPLL1 is disabled\n");
- } else if (!IS_BROXTON(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ uint32_t val = I915_READ(LCPLL_CTL);
+
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
@@ -1730,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
void intel_shared_dpll_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f7f0f01814f6..cc937a19b1ba 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -69,39 +69,63 @@
})
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
-#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
-# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
#endif
-#define _wait_for_atomic(COND, US) ({ \
- unsigned long end__; \
- int ret__ = 0; \
- _WAIT_FOR_ATOMIC_CHECK; \
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
BUILD_BUG_ON((US) > 50000); \
- end__ = (local_clock() >> 10) + (US) + 1; \
- while (!(COND)) { \
- if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
- /* Unlike the regular wait_for(), this atomic variant \
- * cannot be preempted (and we'll just ignore the issue\
- * of irq interruptions) and so we know that no time \
- * has passed since the last check of COND and can \
- * immediately report the timeout. \
- */ \
- ret__ = -ETIMEDOUT; \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
} \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \
})
-#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
-#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
+#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
+#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
INTEL_OUTPUT_LVDS = 4,
INTEL_OUTPUT_TVOUT = 5,
INTEL_OUTPUT_HDMI = 6,
- INTEL_OUTPUT_DISPLAYPORT = 7,
+ INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
+ async_cookie_t cookie;
int preferred_bpp;
};
@@ -242,14 +267,6 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
- /*
- * Removes all interfaces through which the connector is accessible
- * - like sysfs, debugfs entries -, so that no new operations can be
- * started on the connector. Also makes sure all currently pending
- * operations finish before returing.
- */
- void (*unregister)(struct intel_connector *);
-
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -266,7 +283,7 @@ struct intel_connector {
struct intel_dp *mst_port;
};
-typedef struct dpll {
+struct dpll {
/* given values */
int n;
int m1, m2;
@@ -276,7 +293,7 @@ typedef struct dpll {
int vco;
int m;
int p;
-} intel_clock_t;
+};
struct intel_atomic_state {
struct drm_atomic_state base;
@@ -291,17 +308,32 @@ struct intel_atomic_state {
bool dpll_set, modeset;
+ /*
+ * Does this transaction change the pipes that are active? This mask
+ * tracks which CRTC's have changed their active state at the end of
+ * the transaction (not counting the temporary disable during modesets).
+ * This mask should only be non-zero when intel_state->modeset is true,
+ * but the converse is not necessarily true; simply changing a mode may
+ * not flip the final active status of any CRTC's
+ */
+ unsigned int active_pipe_changes;
+
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
+ /* SKL/KBL Only */
+ unsigned int cdclk_pll_vco;
+
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
- struct intel_wm_config wm_config;
/*
* Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks.
*/
bool skip_intermediate_wm;
+
+ /* Gen9+ only */
+ struct skl_wm_values wm_results;
};
struct intel_plane_state {
@@ -405,6 +437,48 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+struct intel_crtc_wm_state {
+ union {
+ struct {
+ /*
+ * Intermediate watermarks; these can be
+ * programmed immediately since they satisfy
+ * both the current configuration we're
+ * switching away from and the new
+ * configuration we're switching to.
+ */
+ struct intel_pipe_wm intermediate;
+
+ /*
+ * Optimal watermarks, programmed post-vblank
+ * when this state is committed.
+ */
+ struct intel_pipe_wm optimal;
+ } ilk;
+
+ struct {
+ /* gen9+ only needs 1-step wm programming */
+ struct skl_pipe_wm optimal;
+
+ /* cached plane data rate */
+ unsigned plane_data_rate[I915_MAX_PLANES];
+ unsigned plane_y_data_rate[I915_MAX_PLANES];
+
+ /* minimum block allocation */
+ uint16_t minimum_blocks[I915_MAX_PLANES];
+ uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ } skl;
+ };
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -448,12 +522,10 @@ struct intel_crtc_state {
*/
bool limited_color_range;
- /* DP has a bunch of special case unfortunately, so mark the pipe
- * accordingly. */
- bool has_dp_encoder;
-
- /* DSI has special cases */
- bool has_dsi_encoder;
+ /* Bitmask of encoder types (enum intel_output_type)
+ * driven by the pipe.
+ */
+ unsigned int output_types;
/* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink;
@@ -522,6 +594,12 @@ struct intel_crtc_state {
uint8_t lane_count;
+ /*
+ * Used by platforms having DP/HDMI PHY with programmable lane
+ * latency optimization.
+ */
+ uint8_t lane_lat_optim_mask;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
@@ -558,32 +636,7 @@ struct intel_crtc_state {
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm;
- struct {
- /*
- * Optimal watermarks, programmed post-vblank when this state
- * is committed.
- */
- union {
- struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
- } optimal;
-
- /*
- * Intermediate watermarks; these can be programmed immediately
- * since they satisfy both the current configuration we're
- * switching away from and the new configuration we're switching
- * to.
- */
- struct intel_pipe_wm intermediate;
-
- /*
- * Platforms with two-step watermark programming will need to
- * update watermark programming post-vblank to switch from the
- * safe intermediate watermarks to the optimal final
- * watermarks.
- */
- bool need_postvbl_update;
- } wm;
+ struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
@@ -598,14 +651,6 @@ struct vlv_wm_state {
bool cxsr;
};
-struct intel_mmio_flip {
- struct work_struct work;
- struct drm_i915_private *i915;
- struct drm_i915_gem_request *req;
- struct intel_crtc *crtc;
- unsigned int rotation;
-};
-
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -620,7 +665,7 @@ struct intel_crtc {
unsigned long enabled_power_domains;
bool lowfreq_avail;
struct intel_overlay *overlay;
- struct intel_unpin_work *unpin_work;
+ struct intel_flip_work *flip_work;
atomic_t unpin_work_count;
@@ -815,6 +860,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -838,6 +884,11 @@ struct intel_dp {
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
struct edp_power_seq pps_delays;
bool can_mst; /* this port supports mst */
@@ -934,33 +985,32 @@ vlv_pipe_to_channel(enum pipe pipe)
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
static inline struct drm_crtc *
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
-struct intel_unpin_work {
- struct work_struct work;
+struct intel_flip_work {
+ struct work_struct unpin_work;
+ struct work_struct mmio_work;
+
struct drm_crtc *crtc;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
-#define INTEL_FLIP_INACTIVE 0
-#define INTEL_FLIP_PENDING 1
-#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank;
u32 flip_ready_vblank;
- bool enable_stall_check;
+ unsigned int rotation;
};
struct intel_load_detect_pipe {
@@ -1029,9 +1079,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_rps_interrupts(struct drm_device *dev);
-void gen6_enable_rps_interrupts(struct drm_device *dev);
-void gen6_disable_rps_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1052,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
-
+void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -1110,14 +1160,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
-void intel_mark_idle(struct drm_device *dev);
+void intel_mark_busy(struct drm_i915_private *dev_priv);
+void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1126,7 +1178,6 @@ struct intel_connector *intel_connector_alloc(void);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1134,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline bool
+intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
+ enum intel_output_type type)
+{
+ return crtc_state->output_types & (1 << type);
+}
+static inline bool
+intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_types &
+ ((1 << INTEL_OUTPUT_DP) |
+ (1 << INTEL_OUTPUT_DP_MST) |
+ (1 << INTEL_OUTPUT_EDP));
+}
static inline void
intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
@@ -1149,6 +1213,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
@@ -1162,14 +1229,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation);
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
-void intel_prepare_page_flip(struct drm_device *dev, int plane);
-void intel_finish_page_flip(struct drm_device *dev, int pipe);
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-void intel_check_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1226,23 +1293,25 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct drm_framebuffer *fb, int plane,
unsigned int pitch,
unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_i915_private *dev_priv);
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void bxt_init_cdclk(struct drm_i915_private *dev_priv);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1250,8 +1319,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock);
-int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1310,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1337,15 +1406,27 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+/* intel_dp_aux_backlight.c */
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev);
+/* intel_dsi_dcs_backlight.c */
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev);
+/* intel_hotplug.c */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
/* legacy fbdev emulation in intel_fbdev.c */
@@ -1383,11 +1464,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc);
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1409,6 +1494,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
void intel_lvds_init(struct drm_device *dev);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1422,13 +1508,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_device *dev);
-void intel_cleanup_overlay(struct drm_device *dev);
+void intel_setup_overlay(struct drm_i915_private *dev_priv);
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
@@ -1447,7 +1533,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+ enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1456,8 +1543,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
-void intel_backlight_register(struct drm_device *dev);
-void intel_backlight_unregister(struct drm_device *dev);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/* intel_psr.c */
@@ -1599,21 +1697,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_device *dev);
-void intel_cleanup_gt_powersave(struct drm_device *dev);
-void intel_enable_gt_powersave(struct drm_device *dev);
-void intel_disable_gt_powersave(struct drm_device *dev);
-void intel_suspend_gt_powersave(struct drm_device *dev);
-void intel_reset_gt_powersave(struct drm_device *dev);
-void gen6_update_ring_freq(struct drm_device *dev);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req);
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1621,7 +1718,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
+static inline int intel_enable_rc6(void)
+{
+ return i915.enable_rc6;
+}
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev,
@@ -1629,11 +1730,13 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 4756ef639648..de8e9fb51595 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port), mask, mask,
+ 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port),
+ data_mask, 0,
+ 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 0,
+ 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_INTR_STAT(port),
+ data_mask, data_mask,
+ 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
/* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_INTR_STAT(port), mask, mask,
+ 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- pipe_config->has_dsi_encoder = true;
-
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -528,11 +538,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
- u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -551,11 +560,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ u32 val;
+
/* Disable DPOunit clock gating, can stall pipe */
- tmp = I915_READ(DSPCLK_GATE_D);
- tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, tmp);
+ val = I915_READ(DSPCLK_GATE_D);
+ val |= DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
/* put device in ready state */
@@ -601,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
@@ -640,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -666,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
- if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
- == 0x00000), 30))
+ if (intel_wait_for_register(dev_priv,
+ port_ctrl, AFE_LATCHOUT, 0,
+ 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -684,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
DRM_DEBUG_KMS("\n");
@@ -693,7 +705,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder);
- if (!IS_BROXTON(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
val = I915_READ(DSPCLK_GATE_D);
@@ -719,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
@@ -793,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
@@ -953,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
- pipe_config->has_dsi_encoder = true;
-
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
@@ -1012,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1098,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1171,6 +1181,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
+ if (IS_BROXTON(dev_priv)) {
+ tmp |= BXT_DPHY_DEFEATURE_EN;
+ if (!is_cmd_mode(intel_dsi))
+ tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
+ }
+
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
@@ -1378,12 +1394,13 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dsi_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dsi_set_property,
@@ -1413,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port;
unsigned int i;
@@ -1449,7 +1466,7 @@ void intel_dsi_init(struct drm_device *dev)
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- NULL);
+ "DSI %c", port_name(port));
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1460,7 +1477,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_encoder->get_config = intel_dsi_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1473,10 +1489,42 @@ void intel_dsi_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link)
+ if (dev_priv->vbt.dsi.config->dual_link) {
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
- else
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+ } else {
intel_dsi->ports = BIT(port);
+ intel_dsi->dcs_backlight_ports = BIT(port);
+ intel_dsi->dcs_cabc_ports = BIT(port);
+ }
+
+ if (!dev_priv->vbt.dsi.config->cabc_supported)
+ intel_dsi->dcs_cabc_ports = 0;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1549,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
- drm_connector_register(connector);
-
- intel_panel_setup_backlight(connector, INVALID_PIPE);
-
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
+
+ u16 dcs_backlight_ports;
+ u16 dcs_cabc_ports;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..ac7c6020c443
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Deepak M <m.deepak at intel.com>
+ */
+
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "i915_drv.h"
+#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define CONTROL_DISPLAY_BCTRL (1 << 5)
+#define CONTROL_DISPLAY_DD (1 << 3)
+#define CONTROL_DISPLAY_BL (1 << 2)
+
+#define POWER_SAVE_OFF (0 << 0)
+#define POWER_SAVE_LOW (1 << 0)
+#define POWER_SAVE_MEDIUM (2 << 0)
+#define POWER_SAVE_HIGH (3 << 0)
+#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
+
+#define PANEL_PWM_MAX_VALUE 0xFF
+
+static u32 dcs_get_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ break;
+ }
+
+ return data;
+}
+
+static void dcs_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data = level;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ }
+}
+
+static void dcs_disable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ dcs_set_backlight(connector, 0);
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_OFF;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl &= ~CONTROL_DISPLAY_BL;
+ ctrl &= ~CONTROL_DISPLAY_DD;
+ ctrl &= ~CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+}
+
+static void dcs_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_panel *panel = &connector->panel;
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl |= CONTROL_DISPLAY_BL;
+ ctrl |= CONTROL_DISPLAY_DD;
+ ctrl |= CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_MEDIUM;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ dcs_set_backlight(connector, panel->backlight.level);
+}
+
+static int dcs_setup_backlight(struct intel_connector *connector,
+ enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+ panel->backlight.level = PANEL_PWM_MAX_VALUE;
+
+ return 0;
+}
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ return -ENODEV;
+
+ if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ return -EINVAL;
+
+ panel->backlight.setup = dcs_setup_backlight;
+ panel->backlight.enable = dcs_enable_backlight;
+ panel->backlight.disable = dcs_disable_backlight;
+ panel->backlight.set = dcs_set_backlight;
+ panel->backlight.get = dcs_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..cd154ce6b6c1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+#define CHV_GPIO_IDX_START_N 0
+#define CHV_GPIO_IDX_START_E 73
+#define CHV_GPIO_IDX_START_SW 100
+#define CHV_GPIO_IDX_START_SE 198
+
+#define CHV_VBT_MAX_PINS_PER_FMLY 15
+
+#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
+#define CHV_GPIO_GPIOEN (1 << 15)
+#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
+#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
+#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
+#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
+#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
+
+#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
+#define CHV_GPIO_CFGLOCK (1 << 31)
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
- DRM_DEBUG_KMS("GPIO element v3 not supported\n");
- return;
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ port = IOSF_PORT_GPIO_NC;
} else {
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- port = IOSF_PORT_GPIO_SC;
+ DRM_DEBUG_KMS("SC gpio not supported\n");
+ return;
} else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
@@ -231,10 +250,60 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ u16 cfg0, cfg1;
+ u16 family_num;
+ u8 port;
+
+ if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+ /* XXX: it's unclear whether 255->57 is part of SE. */
+ gpio_index -= CHV_GPIO_IDX_START_SE;
+ port = CHV_IOSF_PORT_GPIO_SE;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
+ gpio_index -= CHV_GPIO_IDX_START_SW;
+ port = CHV_IOSF_PORT_GPIO_SW;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ gpio_index -= CHV_GPIO_IDX_START_E;
+ port = CHV_IOSF_PORT_GPIO_E;
+ } else {
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+ } else {
+ /* XXX: The spec is unclear about CHV GPIO on seq v2 */
+ if (gpio_source != 0) {
+ DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ return;
+ }
+
+ if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
+ gpio_index);
+ return;
+ }
+
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+
+ family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
+ gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
+
+ cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
+ cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
+ vlv_iosf_sb_write(dev_priv, port, cfg0,
+ CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u8 gpio_source, gpio_index;
bool value;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_CHERRYVIEW(dev_priv))
+ chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
@@ -398,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *mode;
if (!panel->connector)
@@ -426,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -578,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
);
/*
- * Exit zero is unified val ths_zero and ths_exit
+ * Exit zero is unified val ths_zero and ths_exit
* minimum value for ths_exit = 110ns
* min (exit_zero_cnt * 2) = 110/UI
* exit_zero_cnt = 55/UI
*/
- if (exit_zero_cnt < (55 * ui_den / ui_num))
- if ((55 * ui_den) % ui_num)
- exit_zero_cnt += 1;
+ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 1765e6e18f2c..6ab58a01b18e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct intel_crtc_state *config,
int target_dsi_clk)
{
- unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
unsigned int m, n, p;
- int ref_clk;
- int delta = target_dsi_clk;
- u32 m_seed;
+ unsigned int calc_m, calc_p;
+ int delta, ref_clk;
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
m_max = 92;
}
+ calc_p = p_min;
+ calc_m = m_min;
+ delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
+
for (m = m_min; m <= m_max && delta; m++) {
for (p = p_min; p <= p_max && delta; p++) {
/*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
}
/* register has log2(N1), this works fine for powers of two */
- n = ffs(n) - 1;
- m_seed = lfsr_converts[calc_m - 62];
config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
- config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
- m_seed << DSI_PLL_M1_DIV_SHIFT;
+ config->dsi_pll.div =
+ (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
+ (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
return 0;
}
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
- & BXT_DSI_PLL_LOCKED) == 0, 1))
+ if (intel_wait_for_register(dev_priv,
+ BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED,
+ 0,
+ 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 dsi_clk;
u32 dsi_ratio;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* Divide by zero */
if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+ if (intel_wait_for_register(dev_priv,
+ BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED,
+ BXT_DSI_PLL_LOCKED,
+ 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..47bdf9dad0d3 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
u32 tmp;
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
static void intel_disable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
@@ -341,6 +341,8 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
@@ -351,7 +353,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -378,7 +379,7 @@ static struct drm_display_mode *
intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -406,9 +407,21 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
+static char intel_dvo_port_name(i915_reg_t dvo_reg)
+{
+ if (i915_mmio_reg_equal(dvo_reg, DVOA))
+ return 'A';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOB))
+ return 'B';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOC))
+ return 'C';
+ else
+ return '?';
+}
+
void intel_dvo_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
@@ -428,8 +441,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->attached_connector = intel_connector;
intel_encoder = &intel_dvo->base;
- drm_encoder_init(dev, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type, NULL);
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
@@ -438,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->pre_enable = intel_dvo_pre_enable;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type,
+ "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
@@ -537,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->panel_wants_dither = true;
}
- drm_connector_register(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 647127f3aaff..6a7ad3ed1463 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ if (intel_wait_for_register(dev_priv,
+ FBC_STATUS, FBC_STAT_COMPRESSING, 0,
+ 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -374,8 +376,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
+ *
* FIXME: This should be tracked in the plane config eventually
- * instead of queried at runtime for most callers.
+ * instead of queried at runtime for most callers.
*/
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
{
@@ -389,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
struct intel_crtc *crtc = fbc->crtc;
- struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -442,7 +445,7 @@ out:
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
@@ -480,10 +483,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
intel_fbc_hw_deactivate(dev_priv);
}
-static bool multiple_pipes_ok(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_plane *primary = crtc->base.primary;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe = crtc->pipe;
@@ -491,9 +494,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc)
if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
- WARN_ON(!drm_modeset_is_locked(&primary->mutex));
-
- if (to_intel_plane_state(primary->state)->visible)
+ if (plane_state->visible)
fbc->visible_pipes_mask |= (1 << pipe);
else
fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -554,7 +555,7 @@ again:
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
@@ -685,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
@@ -708,21 +709,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
-static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->base.primary->state);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
- WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
-
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate =
@@ -740,7 +736,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
- if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+ if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
@@ -750,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
@@ -822,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- bool enable_by_default = IS_BROADWELL(dev_priv);
- if (intel_vgpu_active(dev_priv->dev)) {
+ if (intel_vgpu_active(dev_priv)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
- if (i915.enable_fbc < 0 && !enable_by_default) {
- fbc->no_fbc_reason = "disabled per chip default";
- return false;
- }
-
if (!i915.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param";
+ fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -857,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
@@ -886,9 +876,11 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
return memcmp(params1, params2, sizeof(*params1)) == 0;
}
-void intel_fbc_pre_update(struct intel_crtc *crtc)
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -896,7 +888,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
mutex_lock(&fbc->lock);
- if (!multiple_pipes_ok(crtc)) {
+ if (!multiple_pipes_ok(crtc, plane_state)) {
fbc->no_fbc_reason = "more than one pipe active";
goto deactivate;
}
@@ -904,7 +896,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
if (!fbc->enabled || fbc->crtc != crtc)
goto unlock;
- intel_fbc_update_state_cache(crtc);
+ intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
deactivate:
intel_fbc_deactivate(dev_priv);
@@ -914,7 +906,7 @@ unlock:
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_reg_params old_params;
@@ -947,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
void intel_fbc_post_update(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -996,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!fbc_supported(dev_priv))
return;
- if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
- return;
-
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
+ if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+ goto out;
+
if (!fbc->busy_bits && fbc->enabled &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
@@ -1011,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
__intel_fbc_post_update(fbc->crtc);
}
+out:
mutex_unlock(&fbc->lock);
}
@@ -1088,9 +1081,11 @@ out:
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_crtc *crtc)
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -1101,19 +1096,19 @@ void intel_fbc_enable(struct intel_crtc *crtc)
if (fbc->enabled) {
WARN_ON(fbc->crtc == NULL);
if (fbc->crtc == crtc) {
- WARN_ON(!crtc->config->enable_fbc);
+ WARN_ON(!crtc_state->enable_fbc);
WARN_ON(fbc->active);
}
goto out;
}
- if (!crtc->config->enable_fbc)
+ if (!crtc_state->enable_fbc)
goto out;
WARN_ON(fbc->active);
WARN_ON(fbc->crtc != NULL);
- intel_fbc_update_state_cache(crtc);
+ intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
if (intel_fbc_alloc_cfb(crtc)) {
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
@@ -1161,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -1215,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
if (!no_fbc_on_multiple_pipes(dev_priv))
return;
- for_each_intel_crtc(dev_priv->dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(&crtc->base) &&
to_intel_plane_state(crtc->base.primary->state)->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+{
+ if (i915.enable_fbc >= 0)
+ return !!i915.enable_fbc;
+
+ if (IS_BROADWELL(dev_priv))
+ return 1;
+
+ return 0;
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -1238,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
fbc->work.scheduled = false;
+ i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
return;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..3e3632c18733 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
- if (!obj) {
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(obj);
goto out;
}
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
- int size, ret;
bool prealloc = false;
+ void *vaddr;
+ int ret;
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
obj = intel_fb->obj;
- size = obj->base.size;
mutex_lock(&dev->struct_mutex);
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
+ vma = i915_gem_obj_to_ggtt(obj);
+
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
- info->fix.smem_len = size;
+ info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+ info->fix.smem_len = vma->node.size;
- info->screen_base =
- ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
- size);
- if (!info->screen_base) {
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
- ret = -ENOSPC;
+ ret = PTR_ERR(vaddr);
goto out_destroy_fbi;
}
- info->screen_size = size;
+ info->screen_base = vaddr;
+ info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -360,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
+ unsigned long conn_configured, mask;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
- uint64_t conn_configured = 0, mask;
int pass = 0;
- save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
- GFP_KERNEL);
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
- memcpy(save_enabled, enabled, fb_helper->connector_count);
- mask = (1 << fb_helper->connector_count) - 1;
+ memcpy(save_enabled, enabled, count);
+ mask = BIT(count) - 1;
+ conn_configured = 0;
retry:
- for (i = 0; i < fb_helper->connector_count; i++) {
+ for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -386,7 +389,7 @@ retry:
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
- if (conn_configured & (1 << i))
+ if (conn_configured & BIT(i))
continue;
if (pass == 0 && !connector->has_tile)
@@ -398,7 +401,7 @@ retry:
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
continue;
}
@@ -417,7 +420,7 @@ retry:
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
continue;
}
@@ -430,14 +433,15 @@ retry:
intel_crtc->lut_b[j] = j;
}
- new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
+ new_crtc = intel_fb_helper_crtc(fb_helper,
+ connector->state->crtc);
/*
* Make sure we're not trying to drive multiple connectors
* with a single CRTC, since our cloning support may not
* match the BIOS.
*/
- for (j = 0; j < fb_helper->connector_count; j++) {
+ for (j = 0; j < count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
goto bail;
@@ -488,15 +492,15 @@ retry:
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
connector->name,
- pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
connector->state->crtc->base.id,
+ connector->state->crtc->name,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
}
if ((conn_configured & mask) != mask) {
@@ -520,7 +524,7 @@ retry:
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, fb_helper->connector_count);
+ memcpy(enabled, save_enabled, count);
kfree(save_enabled);
return false;
}
@@ -536,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
};
-static void intel_fbdev_destroy(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
{
/* We rely on the object-free to release the VMA pinning for
* the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -550,9 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->fb) {
- drm_framebuffer_unregister_private(&ifbdev->fb->base);
+ mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+ mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+
drm_framebuffer_remove(&ifbdev->fb->base);
}
+
+ kfree(ifbdev);
}
/*
@@ -685,9 +693,9 @@ out:
static void intel_fbdev_suspend_worker(struct work_struct *work)
{
- intel_fbdev_set_suspend(container_of(work,
- struct drm_i915_private,
- fbdev_suspend_work)->dev,
+ intel_fbdev_set_suspend(&container_of(work,
+ struct drm_i915_private,
+ fbdev_suspend_work)->drm,
FBINFO_STATE_RUNNING,
true);
}
@@ -695,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -717,8 +725,6 @@ int intel_fbdev_init(struct drm_device *dev)
return ret;
}
- ifbdev->helper.atomic = true;
-
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
@@ -729,42 +735,54 @@ int intel_fbdev_init(struct drm_device *dev)
static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{
- struct drm_i915_private *dev_priv = data;
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = data;
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
ifbdev->preferred_bpp))
- intel_fbdev_fini(dev_priv->dev);
+ intel_fbdev_fini(ifbdev->helper.dev);
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
- async_schedule(intel_fbdev_initial_config, to_i915(dev));
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+ ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+ if (!ifbdev->cookie)
+ return;
+
+ /* Only serialises with all preceding async calls, hence +1 */
+ async_synchronize_cookie(ifbdev->cookie + 1);
+ ifbdev->cookie = 0;
}
void intel_fbdev_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+ if (!ifbdev)
return;
flush_work(&dev_priv->fbdev_suspend_work);
-
if (!current_is_async())
- async_synchronize_full();
- intel_fbdev_destroy(dev, dev_priv->fbdev);
- kfree(dev_priv->fbdev);
+ intel_fbdev_sync(ifbdev);
+
+ intel_fbdev_destroy(ifbdev);
dev_priv->fbdev = NULL;
}
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
- if (!ifbdev)
+ if (!ifbdev || !ifbdev->fb)
return;
info = ifbdev->helper.fbdev;
@@ -809,29 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbdev)
- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+ if (ifbdev && ifbdev->fb)
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
}
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- int ret;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
- struct drm_fb_helper *fb_helper;
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
- fb_helper = &ifbdev->helper;
+ intel_fbdev_sync(ifbdev);
+ if (!ifbdev->fb)
+ return;
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret) {
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
DRM_DEBUG("failed to restore crtc mode\n");
} else {
- mutex_lock(&fb_helper->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 9be839a242f9..2aa744081f09 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -50,7 +50,7 @@
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
enum pipe pipe;
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable)
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+ ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
- ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
enable);
else
- cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
enable, old);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(dev_priv->dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
continue;
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(dev_priv->dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..3e3e743740c0 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -26,6 +26,7 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
+#include "intel_ringbuffer.h"
struct drm_i915_gem_request;
@@ -48,14 +49,23 @@ struct drm_i915_gem_request;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*
- * Finally, we also keep a few statistics here, including the number of
- * submissions to each engine, and a record of the last submission failure
- * (if any).
+ * We also keep a few statistics on failures. Ideally, these should all
+ * be zero!
+ * no_wq_space: times that the submission pre-check found no space was
+ * available in the work queue (note, the queue is shared,
+ * not per-engine). It is OK for this to be nonzero, but
+ * it should not be huge!
+ * q_fail: failed to enqueue a work item. This should never happen,
+ * because we check for space beforehand.
+ * b_fail: failed to ring the doorbell. This should never happen, unless
+ * somehow the hardware misbehaves, or maybe if the GuC firmware
+ * crashes? We probably need to reset the GPU to recover.
+ * retcode: errno from last guc_submit()
*/
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
void *client_base; /* first page (only) of above */
- struct intel_context *owner;
+ struct i915_gem_context *owner;
struct intel_guc *guc;
uint32_t priority;
uint32_t ctx_index;
@@ -71,12 +81,13 @@ struct i915_guc_client {
uint32_t wq_tail;
uint32_t unused; /* Was 'wq_head' */
- /* GuC submission statistics & status */
- uint64_t submissions[GUC_MAX_ENGINES_NUM];
- uint32_t q_fail;
+ uint32_t no_wq_space;
+ uint32_t q_fail; /* No longer used */
uint32_t b_fail;
int retcode;
- int spare; /* pad to 32 DWords */
+
+ /* Per-engine counts of GuC submissions */
+ uint64_t submissions[I915_NUM_ENGINES];
};
enum intel_guc_fw_status {
@@ -133,25 +144,24 @@ struct intel_guc {
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
- uint64_t submissions[GUC_MAX_ENGINES_NUM];
- uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
+ uint64_t submissions[I915_NUM_ENGINES];
+ uint32_t last_seqno[I915_NUM_ENGINES];
};
/* intel_guc_loader.c */
-extern void intel_guc_ucode_init(struct drm_device *dev);
-extern int intel_guc_ucode_load(struct drm_device *dev);
-extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern void intel_guc_init(struct drm_device *dev);
+extern int intel_guc_setup(struct drm_device *dev);
+extern void intel_guc_fini(struct drm_device *dev);
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
/* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_device *dev);
-int i915_guc_submission_enable(struct drm_device *dev);
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq);
-void i915_guc_submission_disable(struct drm_device *dev);
-void i915_guc_submission_fini(struct drm_device *dev);
-int i915_guc_wq_check_space(struct i915_guc_client *client);
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_submit(struct drm_i915_gem_request *rq);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
#define WQ_RING_TAIL_SHIFT 20
-#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
+#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
+#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..605c69658d2c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,15 @@
*
*/
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin"
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
+#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
+
+#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{
@@ -84,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
int irqs;
- /* tell all command streamers NOT to forward interrupts and vblank to GuC */
+ /* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
@@ -100,10 +106,10 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
int irqs;
+ u32 tmp;
- /* tell all command streamers to forward interrupts and vblank to GuC */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
- irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+ /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
@@ -114,6 +120,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+ /*
+ * If GuC has routed PM interrupts to itself, don't keep it.
+ * and keep other interrupts those are unmasked by GuC.
+ */
+ tmp = I915_READ(GEN6_PMINTRMSK);
+ if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
+ dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+ }
}
static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,13 +297,24 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
return ret;
}
+static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+ u32 wopcm_size = GUC_WOPCM_TOP;
+
+ /* On BXT, the top of WOPCM is reserved for RC6 context */
+ if (IS_BROXTON(dev_priv))
+ wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+ return wopcm_size;
+}
+
/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -308,7 +335,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+ I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,66 +399,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
}
/**
- * intel_guc_ucode_load() - load GuC uCode into the device
+ * intel_guc_setup() - finish preparing the GuC for activity
* @dev: drm device
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
+ * The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_ucode_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that worked,
+ * and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
-int intel_guc_ucode_load(struct drm_device *dev)
+int intel_guc_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- int retries, err = 0;
+ const char *fw_path = guc_fw->guc_fw_path;
+ int retries, ret, err;
- if (!i915.enable_guc_submission)
- return 0;
-
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
+ fw_path,
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
- direct_interrupts_to_host(dev_priv);
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
- return 0;
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
- guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
- return -ENOEXEC;
-
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
-
- switch (guc_fw->guc_fw_fetch_status) {
- case GUC_FIRMWARE_FAIL:
- /* something went wrong :( */
- err = -EIO;
+ /* Loading forbidden, or no firmware to load? */
+ if (!i915.enable_guc_loading) {
+ err = 0;
goto fail;
-
- case GUC_FIRMWARE_NONE:
- case GUC_FIRMWARE_PENDING:
- default:
- /* "can't happen" */
- WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
- guc_fw->guc_fw_path,
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- guc_fw->guc_fw_fetch_status);
+ } else if (fw_path == NULL) {
+ /* Device is known to have no uCode (e.g. no GuC) */
err = -ENXIO;
goto fail;
+ } else if (*fw_path == '\0') {
+ /* Device has a GuC but we don't know what f/w to load? */
+ DRM_INFO("No GuC firmware known for this platform\n");
+ err = -ENODEV;
+ goto fail;
+ }
- case GUC_FIRMWARE_SUCCESS:
- break;
+ /* Fetch failed, or already fetched but failed to load? */
+ if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+ err = -EIO;
+ goto fail;
+ } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+ err = -ENOEXEC;
+ goto fail;
}
- err = i915_guc_submission_init(dev);
+ direct_interrupts_to_host(dev_priv);
+
+ guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+ intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+ err = i915_guc_submission_init(dev_priv);
if (err)
goto fail;
@@ -448,7 +472,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
*/
err = i915_reset_guc(dev_priv);
if (err) {
- DRM_ERROR("GuC reset failed, err %d\n", err);
+ DRM_ERROR("GuC reset failed: %d\n", err);
goto fail;
}
@@ -459,8 +483,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (--retries == 0)
goto fail;
- DRM_INFO("GuC fw load failed, err %d; will reset and "
- "retry %d more time(s)\n", err, retries);
+ DRM_INFO("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -470,10 +494,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
- /* The execbuf_client will be recreated. Release it first. */
- i915_guc_submission_disable(dev);
-
- err = i915_guc_submission_enable(dev);
+ err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
direct_interrupts_to_guc(dev_priv);
@@ -482,15 +503,50 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
- DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
direct_interrupts_to_host(dev_priv);
- i915_guc_submission_disable(dev);
- i915_guc_submission_fini(dev);
+ i915_guc_submission_disable(dev_priv);
+ i915_guc_submission_fini(dev_priv);
- return err;
+ /*
+ * We've failed to load the firmware :(
+ *
+ * Decide whether to disable GuC submission and fall back to
+ * execlist mode, and whether to hide the error by returning
+ * zero or to return -EIO, which the caller will treat as a
+ * nonfatal error (i.e. it doesn't prevent driver load, but
+ * marks the GPU as wedged until reset).
+ */
+ if (i915.enable_guc_loading > 1) {
+ ret = -EIO;
+ } else if (i915.enable_guc_submission > 1) {
+ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+
+ if (err == 0 && !HAS_GUC_UCODE(dev))
+ ; /* Don't mention the GuC! */
+ else if (err == 0)
+ DRM_INFO("GuC firmware load skipped\n");
+ else if (ret != -EIO)
+ DRM_INFO("GuC firmware load failed: %d\n", err);
+ else
+ DRM_ERROR("GuC firmware load failed: %d\n", err);
+
+ if (i915.enable_guc_submission) {
+ if (fw_path == NULL)
+ DRM_INFO("GuC submission without firmware not supported\n");
+ if (ret == 0)
+ DRM_INFO("Falling back from GuC submission to execlist mode\n");
+ else
+ DRM_ERROR("GuC init failed: %d\n", ret);
+ }
+ i915.enable_guc_submission = 0;
+
+ return ret;
}
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
+ if (size > guc_wopcm_size(to_i915(dev))) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
@@ -617,22 +671,25 @@ fail:
}
/**
- * intel_guc_ucode_init() - define parameters and fetch firmware
+ * intel_guc_init() - define parameters and fetch firmware
* @dev: drm device
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
- * when intel_guc_ucode_load() is called.
+ * when intel_guc_setup() is called.
*/
-void intel_guc_ucode_init(struct drm_device *dev)
+void intel_guc_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- if (!HAS_GUC_SCHED(dev))
- i915.enable_guc_submission = false;
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev);
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
@@ -640,27 +697,30 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 6;
guc_fw->guc_fw_minor_wanted = 1;
+ } else if (IS_BROXTON(dev)) {
+ fw_path = I915_BXT_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 8;
+ guc_fw->guc_fw_minor_wanted = 7;
+ } else if (IS_KABYLAKE(dev)) {
+ fw_path = I915_KBL_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 9;
+ guc_fw->guc_fw_minor_wanted = 14;
} else {
- i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
}
- if (!i915.enable_guc_submission)
- return;
-
guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+ /* Early (and silent) return if GuC loading is disabled */
+ if (!i915.enable_guc_loading)
+ return;
if (fw_path == NULL)
return;
-
- if (*fw_path == '\0') {
- DRM_ERROR("No GuC firmware known for this platform\n");
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+ if (*fw_path == '\0')
return;
- }
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,18 +729,18 @@ void intel_guc_ucode_init(struct drm_device *dev)
}
/**
- * intel_guc_ucode_fini() - clean up all allocated resources
+ * intel_guc_fini() - clean up all allocated resources
* @dev: drm device
*/
-void intel_guc_ucode_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
- i915_guc_submission_disable(dev);
- i915_guc_submission_fini(dev);
+ i915_guc_submission_disable(dev_priv);
+ i915_guc_submission_fini(dev_priv);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
new file mode 100644
index 000000000000..434f4d5c553d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_gvt.h"
+
+/**
+ * DOC: Intel GVT-g host support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides the englightments
+ * of GVT and the necessary components used by GVT in i915 driver.
+ */
+
+static bool is_supported_device(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv))
+ return true;
+ return false;
+}
+
+/**
+ * intel_gvt_init - initialize GVT components
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage to create a GVT device.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!i915.enable_gvt) {
+ DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+ return 0;
+ }
+
+ if (!is_supported_device(dev_priv)) {
+ DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
+ goto bail;
+ }
+
+ /*
+ * We're not in host or fail to find a MPT module, disable GVT-g
+ */
+ ret = intel_gvt_init_host();
+ if (ret) {
+ DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
+ goto bail;
+ }
+
+ ret = intel_gvt_init_device(dev_priv);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+ goto bail;
+ }
+
+ return 0;
+
+bail:
+ i915.enable_gvt = 0;
+ return 0;
+}
+
+/**
+ * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * @dev_priv: drm i915 private *
+ *
+ * This function is called at the i915 driver unloading stage, to shutdown
+ * GVT components and release the related resources.
+ */
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (!intel_gvt_active(dev_priv))
+ return;
+
+ intel_gvt_clean_device(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
new file mode 100644
index 000000000000..960211df74db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _INTEL_GVT_H_
+#define _INTEL_GVT_H_
+
+#include "gvt/gvt.h"
+
+#ifdef CONFIG_DRM_I915_GVT
+int intel_gvt_init(struct drm_i915_private *dev_priv);
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+int intel_gvt_init_device(struct drm_i915_private *dev_priv);
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
+int intel_gvt_init_host(void);
+#else
+static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+}
+#endif
+
+#endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a8844702d11b..4df9f384910c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -47,7 +47,7 @@ static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
int i;
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg;
u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
int dotclock;
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
static void g4x_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
static void ibx_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
static void cpt_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
static void intel_disable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_atomic_state *state;
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int count = 0, count_hdmi = 0;
- int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- state = crtc_state->base.state;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
- count++;
- }
-
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
- return count_hdmi > 0 && count_hdmi == count;
+ return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,39 +1656,16 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
- /* Enable clock channels for this port */
- mutex_lock(&dev_priv->sb_lock);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_phy_pre_encoder_enable(encoder);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-
- /* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
+ 0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -1719,213 +1678,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_hdmi_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
-
intel_hdmi_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, 0x0);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->sb_lock);
@@ -1940,142 +1719,16 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- /* Program Tx latency optimal setting */
- for (i = 0; i < 4; i++) {
- /* Set the upar bit */
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ chv_phy_pre_encoder_enable(encoder);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1739,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport, 0x0);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2106,6 +1756,8 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2114,7 +1766,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2138,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
@@ -2242,12 +1893,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
intel_hdmi->attached_connector = intel_connector;
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -2280,7 +1929,7 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..f48957ea100d 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
int i;
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev: drm device
+ * @dev_priv: drm_i915_private
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_device *dev,
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
+ WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", i);
continue;
}
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
}
if (storm_detected)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/*
@@ -453,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_device *dev,
*
* This is a separate step from interrupt enabling to simplify the locking rules
* in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
*/
void intel_hpd_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
int i;
for_each_hpd_pin(i) {
dev_priv->hotplug.stats[i].count = 0;
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
}
+
+ WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->hotplug.poll_init_work);
+
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void i915_hpd_poll_init_work(struct work_struct *work) {
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ hotplug.poll_init_work);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ bool enabled;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *intel_connector =
+ to_intel_connector(connector);
connector->polled = intel_connector->polled;
/* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -475,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
continue;
if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
- intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->encoder->hpd_pin > HPD_NONE) {
+ connector->polled = enabled ?
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT :
+ DRM_CONNECTOR_POLL_HPD;
+ }
}
+ if (enabled)
+ drm_kms_helper_poll_enable_locked(dev);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
/*
- * Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked checks happy.
+ * We might have missed any hotplugs that happened while we were
+ * in the middle of disabling polling
*/
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ if (!enabled)
+ drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ * @enabled: Whether to enable or disable polling
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+ WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+ /*
+ * We might already be holding dev->mode_config.mutex, so do this in a
+ * seperate worker
+ * As well, there's no issue if we race here since we always reschedule
+ * this worker anyway
+ */
+ schedule_work(&dev_priv->hotplug.poll_init_work);
}
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
}
@@ -509,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ bool ret = false;
+
+ if (pin == HPD_NONE)
+ return false;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ ret = true;
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ if (pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 81de23098be7..1f266d7df2ec 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
void
intel_i2c_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(dev_priv->dev);
+ intel_i2c_reset(&dev_priv->drm);
intel_i2c_quirk_set(dev_priv, true);
set_data(bus, 1);
set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
-#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
-
if (!HAS_GMBUS_IRQ(dev_priv))
- return wait_for(C, 10);
+ return intel_wait_for_register(dev_priv,
+ GMBUS2, GMBUS_ACTIVE, 0,
+ 10);
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
- ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
+ (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
msecs_to_jiffies_timeout(10));
I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return 0;
else
return -ETIMEDOUT;
-#undef C
}
static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- intel_i2c_reset(dev_priv->dev);
+ intel_i2c_reset(&dev_priv->drm);
return 0;
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2d8415ed8b..414ddda43922 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,31 +208,27 @@
} while (0)
enum {
- ADVANCED_CONTEXT = 0,
- LEGACY_32B_CONTEXT,
- ADVANCED_AD_CONTEXT,
- LEGACY_64B_CONTEXT
-};
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- LEGACY_64B_CONTEXT :\
- LEGACY_32B_CONTEXT)
-enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
FAULT_AND_STREAM,
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
-static int intel_lr_context_pin(struct intel_context *ctx,
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev: DRM device.
+ * @dev_priv: i915 device private
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
@@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
{
- WARN_ON(i915.enable_ppgtt == -1);
-
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
return 1;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 1;
if (enable_execlists == 0)
return 0;
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+ USES_PPGTT(dev_priv) &&
i915.use_mmio_flip >= 0)
return 1;
@@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (IS_GEN8(dev) || IS_GEN9(dev))
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
engine->idle_lite_restore_wa = ~0;
- engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
- engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
@@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* descriptor for a pinned context
*
* @ctx: Context to work on
- * @ring: Engine the descriptor will be used with
+ * @engine: Engine the descriptor will be used with
*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
@@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB:
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
- * bits 52-63: reserved, may encode the engine ID (for GuC)
+ * bits 32-52: ctx ID, a globally unique tag
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
*/
static void
-intel_lr_context_descriptor_update(struct intel_context *ctx,
+intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- uint64_t lrca, desc;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc;
- lrca = ctx->engine[engine->id].lrc_vma->node.start +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
- desc = engine->ctx_desc_template; /* bits 0-11 */
- desc |= lrca; /* bits 12-31 */
- desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
+ desc = ctx->desc_template; /* bits 3-4 */
+ desc |= engine->ctx_desc_template; /* bits 0-11 */
+ desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+ /* bits 12-31 */
+ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
- ctx->engine[engine->id].lrc_desc = desc;
+ ce->lrc_desc = desc;
}
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
return ctx->engine[engine->id].lrc_desc;
}
-/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx: Context to get the ID for
- * @ring: Engine to get the ID for
- *
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
- *
- * The context ID is a portion of the context descriptor, so we can
- * just extract the required part from the cached descriptor.
- *
- * Return: 20-bits globally unique context ID.
- */
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
-}
-
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct intel_engine_cs *engine = rq0->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = rq0->i915;
uint64_t desc[2];
if (rq1) {
@@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
spin_unlock_irq(&dev_priv->uncore.lock);
}
+static inline void execlists_context_status_change(
+ struct drm_i915_gem_request *rq,
+ unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+}
+
static void execlists_context_unqueue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
@@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+ WARN_ON(!intel_irqs_enabled(engine->i915));
/* Try to read in pairs */
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
- list_move_tail(&req0->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&req0->execlist_link);
+ i915_gem_request_unreference(req0);
req0 = cursor;
} else {
+ if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
+ /*
+ * req0 (after merged) ctx requires single
+ * submission, stop picking
+ */
+ if (req0->ctx->execlists_force_single_submission)
+ break;
+ /*
+ * req0 ctx doesn't require single submission,
+ * but next req ctx requires, stop picking
+ */
+ if (cursor->ctx->execlists_force_single_submission)
+ break;
+ }
req1 = cursor;
WARN_ON(req1->elsp_submitted);
break;
@@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
if (unlikely(!req0))
return;
+ execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+
+ if (req1)
+ execlists_context_status_change(req1,
+ INTEL_CONTEXT_SCHEDULE_IN);
+
if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
/*
* WaIdleLiteRestore: make sure we never cause a lite restore
@@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
}
static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
{
struct drm_i915_gem_request *head_req;
@@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
struct drm_i915_gem_request,
execlist_link);
- if (!head_req)
- return 0;
-
- if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
- return 0;
+ if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
+ return 0;
WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
if (--head_req->elsp_submitted > 0)
return 0;
- list_move_tail(&head_req->execlist_link,
- &engine->execlist_retired_req_list);
+ execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
+
+ list_del(&head_req->execlist_link);
+ i915_gem_request_unreference(head_req);
return 1;
}
@@ -517,7 +523,7 @@ static u32
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
u32 *context_id)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status;
read_pointer %= GEN8_CSB_ENTRIES;
@@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
- * @engine: Engine Command Streamer to handle.
+ * @data: tasklet handler passed in unsigned long
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status_pointer;
unsigned int read_pointer, write_pointer;
u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, engine);
-
- i915_gem_request_reference(request);
-
spin_lock_bh(&engine->execlist_lock);
list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
- list_move_tail(&tail_req->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&tail_req->execlist_link);
+ i915_gem_request_unreference(tail_req);
}
}
+ i915_gem_request_reference(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ request->ctx_hw_id = request->ctx->hw_id;
if (num_elements == 0)
execlists_context_unqueue(engine);
@@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret = 0;
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ int ret;
- request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ if (!ce->state) {
+ ret = execlists_context_deferred_alloc(request->ctx, engine);
+ if (ret)
+ return ret;
+ }
+
+ request->ringbuf = ce->ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
- struct intel_guc *guc = &request->i915->guc;
-
- ret = i915_guc_wq_check_space(guc->execbuf_client);
+ ret = i915_guc_wq_check_space(request);
if (ret)
return ret;
}
- if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->engine);
+ ret = intel_lr_context_pin(request->ctx, engine);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unpin;
+
+ if (!ce->initialised) {
+ ret = engine->init_context(request);
+ if (ret)
+ goto err_unpin;
+
+ ce->initialised = true;
+ }
+
+ /* Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+
+err_unpin:
+ intel_lr_context_unpin(request->ctx, engine);
return ret;
}
@@ -734,7 +774,6 @@ static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct drm_i915_private *dev_priv = request->i915;
struct intel_engine_cs *engine = request->engine;
intel_logical_ring_advance(ringbuf);
@@ -750,54 +789,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
- if (intel_engine_stopped(engine))
- return 0;
-
- if (engine->last_context != request->ctx) {
- if (engine->last_context)
- intel_lr_context_unpin(engine->last_context, engine);
- if (request->ctx != request->i915->kernel_context) {
- intel_lr_context_pin(request->ctx, engine);
- engine->last_context = request->ctx;
- } else {
- engine->last_context = NULL;
- }
- }
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ request->previous_context = engine->last_context;
+ engine->last_context = request->ctx;
- if (dev_priv->guc.execbuf_client)
- i915_guc_submit(dev_priv->guc.execbuf_client, request);
+ if (i915.enable_guc_submission)
+ i915_guc_submit(request);
else
execlists_context_queue(request);
return 0;
}
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
- */
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
- return intel_ring_begin(request, 0);
-}
-
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @dev: DRM device.
- * @file: DRM file.
- * @ring: Engine Command Streamer to submit to.
- * @ctx: Context to employ for this submission.
+ * @params: execbuffer call parameters.
* @args: execbuffer call arguments.
* @vmas: list of vmas.
- * @batch_obj: the batchbuffer to submit.
- * @exec_start: batchbuffer start virtual address pointer.
- * @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@@ -810,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
@@ -881,28 +894,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *engine)
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req, *tmp;
- struct list_head retired_list;
+ LIST_HEAD(cancel_list);
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
- if (list_empty(&engine->execlist_retired_req_list))
- return;
+ WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
- INIT_LIST_HEAD(&retired_list);
spin_lock_bh(&engine->execlist_lock);
- list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+ list_replace_init(&engine->execlist_queue, &cancel_list);
spin_unlock_bh(&engine->execlist_lock);
- list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
- struct intel_context *ctx = req->ctx;
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
-
- if (ctx_obj && (ctx != req->i915->kernel_context))
- intel_lr_context_unpin(ctx, engine);
-
+ list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -910,7 +913,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
@@ -923,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ RING_MI_MODE(engine->mmio_base),
+ MODE_IDLE, MODE_IDLE,
+ 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
@@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0;
}
-static int intel_lr_context_do_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
u32 *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ce->pin_count++)
+ return 0;
+
+ ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
- return ret;
+ goto err;
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ce->state);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
@@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
if (ret)
goto unpin_map;
- ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ i915_gem_context_reference(ctx);
+ ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
- ctx_obj->dirty = true;
+
+ lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+ ce->lrc_reg_state = lrc_reg_state;
+ ce->state->dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- return ret;
+ return 0;
unpin_map:
- i915_gem_object_unpin_map(ctx_obj);
+ i915_gem_object_unpin_map(ce->state);
unpin_ctx_obj:
- i915_gem_object_ggtt_unpin(ctx_obj);
-
+ i915_gem_object_ggtt_unpin(ce->state);
+err:
+ ce->pin_count = 0;
return ret;
}
-static int intel_lr_context_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- int ret = 0;
+ struct intel_context *ce = &ctx->engine[engine->id];
- if (ctx->engine[engine->id].pin_count++ == 0) {
- ret = intel_lr_context_do_pin(ctx, engine);
- if (ret)
- goto reset_pin_count;
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(ce->pin_count == 0);
- i915_gem_context_reference(ctx);
- }
- return ret;
+ if (--ce->pin_count)
+ return;
-reset_pin_count:
- ctx->engine[engine->id].pin_count = 0;
- return ret;
-}
+ intel_unpin_ringbuffer_obj(ce->ringbuf);
-void intel_lr_context_unpin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ i915_gem_object_unpin_map(ce->state);
+ i915_gem_object_ggtt_unpin(ce->state);
- WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
- if (--ctx->engine[engine->id].pin_count == 0) {
- i915_gem_object_unpin_map(ctx_obj);
- intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- ctx->engine[engine->id].lrc_vma = NULL;
- ctx->engine[engine->id].lrc_desc = 0;
- ctx->engine[engine->id].lrc_reg_state = NULL;
+ ce->lrc_vma = NULL;
+ ce->lrc_desc = 0;
+ ce->lrc_reg_state = NULL;
- i915_gem_context_unreference(ctx);
- }
+ i915_gem_context_unreference(ctx);
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
int ret, i;
struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
@@ -1103,7 +1097,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
@@ -1165,7 +1159,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
/**
* gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
*
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned. This is updated
* with the offset value received as input.
@@ -1202,7 +1196,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(engine->dev)) {
+ if (IS_BROADWELL(engine->i915)) {
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
@@ -1239,7 +1233,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
/**
* gen8_init_perctx_bb() - initialize per ctx batch with WA
*
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned.
* size: size of the batch in DWORDS but HW expects in terms of cachelines
@@ -1274,13 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
uint32_t *offset)
{
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1305,6 +1298,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
}
+
+ /* WaMediaPoolStateCmdInWABB:bxt */
+ if (HAS_POOLED_EU(engine->i915)) {
+ /*
+ * EU pool configuration is setup along with golden context
+ * during context initialization. This value depends on
+ * device type (2x6 or 3x6) and needs to be updated based
+ * on which subslice is disabled especially for 2x6
+ * devices, however it is safe to load default
+ * configuration of 3x6 device instead of masking off
+ * corresponding bits because HW ignores bits of a disabled
+ * subslice and drops down to appropriate config. Please
+ * see render_state_setup() in i915_gem_render_state.c for
+ * possible configurations, to avoid duplication they are
+ * not shown here again.
+ */
+ u32 eu_pool_config = 0x00777000;
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
+ wa_ctx_emit(batch, index, eu_pool_config);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
+
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1317,12 +1335,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1331,7 +1348,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaClearTdlStateAckDirtyBits:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1350,8 +1367,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1363,11 +1380,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
- PAGE_ALIGN(size));
- if (!engine->wa_ctx.obj) {
+ engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
+ PAGE_ALIGN(size));
+ if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
- return -ENOMEM;
+ ret = PTR_ERR(engine->wa_ctx.obj);
+ engine->wa_ctx.obj = NULL;
+ return ret;
}
ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1401,9 +1420,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(engine->dev)->gen > 9) {
+ if (INTEL_GEN(engine->i915) > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(engine->dev)->gen);
+ INTEL_GEN(engine->i915));
return 0;
}
@@ -1423,7 +1442,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(engine->dev)->gen == 8) {
+ if (IS_GEN8(engine->i915)) {
ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1437,7 +1456,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ } else if (IS_GEN9(engine->i915)) {
ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1463,7 +1482,7 @@ out:
static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
@@ -1472,8 +1491,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned int next_context_status_buffer_hw;
lrc_init_hws(engine);
@@ -1520,8 +1538,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_common_ring(engine);
@@ -1598,7 +1615,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
if (req->ctx->ppgtt &&
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
- !intel_vgpu_active(req->i915->dev)) {
+ !intel_vgpu_active(req->i915)) {
ret = intel_logical_ring_emit_pdps(req);
if (ret)
return ret;
@@ -1624,38 +1641,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ struct drm_i915_private *dev_priv = engine->i915;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1664,8 +1661,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = request->i915;
uint32_t cmd;
int ret;
@@ -1734,7 +1730,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(engine->dev))
+ if (IS_GEN9(request->i915))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
@@ -1793,16 +1789,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
return 0;
}
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
{
/*
@@ -1818,14 +1804,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
- /* See bxt_a_get_seqno() explaining the reason for the clflush. */
- intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -1833,11 +1811,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
*/
#define WA_TAIL_DWORDS 2
-static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
-{
- return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
-}
-
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1853,10 +1826,10 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
- hws_seqno_address(request->engine) |
+ intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ intel_logical_ring_emit(ringbuf, request->seqno);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
@@ -1883,7 +1856,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
+ intel_logical_ring_emit(ringbuf,
+ intel_hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
@@ -1945,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
- * @ring: Engine Command Streamer.
+ * @engine: Engine Command Streamer.
*
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@@ -1962,7 +1936,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
tasklet_kill(&engine->irq_tasklet);
- dev_priv = engine->dev->dev_private;
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_logical_ring_stop(engine);
@@ -1975,36 +1949,34 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ intel_engine_fini_breadcrumbs(engine);
+
if (engine->status_page.obj) {
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
+ intel_lr_context_unpin(dev_priv->kernel_context, engine);
engine->idle_lite_restore_wa = 0;
engine->disable_lite_restore_wa = false;
engine->ctx_desc_template = 0;
lrc_destroy_wa_ctx_obj(engine);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
static void
-logical_ring_default_vfuncs(struct drm_device *dev,
- struct intel_engine_cs *engine)
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
- engine->irq_get = gen8_logical_ring_get_irq;
- engine->irq_put = gen8_logical_ring_put_irq;
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
- engine->get_seqno = gen8_get_seqno;
- engine->set_seqno = gen8_set_seqno;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
- engine->set_seqno = bxt_a_set_seqno;
- }
}
static inline void
@@ -2033,60 +2005,28 @@ lrc_setup_hws(struct intel_engine_cs *engine,
}
static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
+logical_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_context *dctx = dev_priv->kernel_context;
- enum forcewake_domains fw_domains;
+ struct i915_gem_context *dctx = engine->i915->kernel_context;
int ret;
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
- engine->dev = dev;
- INIT_LIST_HEAD(&engine->active_list);
- INIT_LIST_HEAD(&engine->request_list);
- i915_gem_batch_pool_init(dev, &engine->batch_pool);
- init_waitqueue_head(&engine->irq_queue);
-
- INIT_LIST_HEAD(&engine->buffers);
- INIT_LIST_HEAD(&engine->execlist_queue);
- INIT_LIST_HEAD(&engine->execlist_retired_req_list);
- spin_lock_init(&engine->execlist_lock);
-
- tasklet_init(&engine->irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
-
- logical_ring_init_platform_invariants(engine);
-
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->fw_domains = fw_domains;
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto error;
ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(dctx, engine);
+ ret = execlists_context_deferred_alloc(dctx, engine);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(dctx, engine);
+ ret = intel_lr_context_pin(dctx, engine);
if (ret) {
- DRM_ERROR(
- "Failed to pin and map ringbuffer %s: %d\n",
- engine->name, ret);
+ DRM_ERROR("Failed to pin context for %s: %d\n",
+ engine->name, ret);
goto error;
}
@@ -2104,26 +2044,16 @@ error:
return ret;
}
-static int logical_render_ring_init(struct drm_device *dev)
+static int logical_render_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- engine->name = "render ring";
- engine->id = RCS;
- engine->exec_id = I915_EXEC_RENDER;
- engine->guc_id = GUC_RENDER_ENGINE;
- engine->mmio_base = RENDER_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
- if (HAS_L3_DPF(dev))
+ if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, engine);
-
/* Override some for render ring. */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
engine->init_hw = gen9_init_render_ring;
else
engine->init_hw = gen8_init_render_ring;
@@ -2132,9 +2062,7 @@ static int logical_render_ring_init(struct drm_device *dev)
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
- engine->dev = dev;
-
- ret = intel_init_pipe_control(engine);
+ ret = intel_init_pipe_control(engine, 4096);
if (ret)
return ret;
@@ -2149,7 +2077,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
- ret = logical_ring_init(dev, engine);
+ ret = logical_ring_init(engine);
if (ret) {
lrc_destroy_wa_ctx_obj(engine);
}
@@ -2157,133 +2085,164 @@ static int logical_render_ring_init(struct drm_device *dev)
return ret;
}
-static int logical_bsd_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
- engine->name = "bsd ring";
- engine->id = VCS;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE;
- engine->mmio_base = GEN6_BSD_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
-
- return logical_ring_init(dev, engine);
-}
+static const struct logical_ring_info {
+ const char *name;
+ unsigned exec_id;
+ unsigned guc_id;
+ u32 mmio_base;
+ unsigned irq_shift;
+ int (*init)(struct intel_engine_cs *engine);
+} logical_rings[] = {
+ [RCS] = {
+ .name = "render ring",
+ .exec_id = I915_EXEC_RENDER,
+ .guc_id = GUC_RENDER_ENGINE,
+ .mmio_base = RENDER_RING_BASE,
+ .irq_shift = GEN8_RCS_IRQ_SHIFT,
+ .init = logical_render_ring_init,
+ },
+ [BCS] = {
+ .name = "blitter ring",
+ .exec_id = I915_EXEC_BLT,
+ .guc_id = GUC_BLITTER_ENGINE,
+ .mmio_base = BLT_RING_BASE,
+ .irq_shift = GEN8_BCS_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VCS] = {
+ .name = "bsd ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VCS2] = {
+ .name = "bsd2 ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE2,
+ .mmio_base = GEN8_BSD2_RING_BASE,
+ .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VECS] = {
+ .name = "video enhancement ring",
+ .exec_id = I915_EXEC_VEBOX,
+ .guc_id = GUC_VIDEOENHANCE_ENGINE,
+ .mmio_base = VEBOX_RING_BASE,
+ .irq_shift = GEN8_VECS_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+};
-static int logical_bsd2_ring_init(struct drm_device *dev)
+static struct intel_engine_cs *
+logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+ const struct logical_ring_info *info = &logical_rings[id];
+ struct intel_engine_cs *engine = &dev_priv->engine[id];
+ enum forcewake_domains fw_domains;
- engine->name = "bsd2 ring";
- engine->id = VCS2;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE2;
- engine->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->id = id;
+ engine->name = info->name;
+ engine->exec_id = info->exec_id;
+ engine->guc_id = info->guc_id;
+ engine->mmio_base = info->mmio_base;
- logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ engine->i915 = dev_priv;
- return logical_ring_init(dev, engine);
-}
+ /* Intentionally left blank. */
+ engine->buffer = NULL;
-static int logical_blt_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
- engine->name = "blitter ring";
- engine->id = BCS;
- engine->exec_id = I915_EXEC_BLT;
- engine->guc_id = GUC_BLITTER_ENGINE;
- engine->mmio_base = BLT_RING_BASE;
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
- logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
- return logical_ring_init(dev, engine);
-}
+ engine->fw_domains = fw_domains;
-static int logical_vebox_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VECS];
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ INIT_LIST_HEAD(&engine->buffers);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ spin_lock_init(&engine->execlist_lock);
- engine->name = "video enhancement ring";
- engine->id = VECS;
- engine->exec_id = I915_EXEC_VEBOX;
- engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
- engine->mmio_base = VEBOX_RING_BASE;
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
- logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ logical_ring_init_platform_invariants(engine);
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine, info->irq_shift);
+
+ intel_engine_init_hangcheck(engine);
+ i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
- return logical_ring_init(dev, engine);
+ return engine;
}
/**
* intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
* @dev: DRM device.
*
- * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
- * those engines that are present in the hardware.
+ * This function inits the engines for an Execlists submission style (the
+ * equivalent in the legacy ringbuffer submission world would be
+ * i915_gem_init_engines). It does it only for those engines that are present in
+ * the hardware.
*
* Return: non-zero if the initialization failed.
*/
int intel_logical_rings_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int mask = 0;
+ unsigned int i;
int ret;
- ret = logical_render_ring_init(dev);
- if (ret)
- return ret;
+ WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
- if (HAS_BSD(dev)) {
- ret = logical_bsd_ring_init(dev);
- if (ret)
- goto cleanup_render_ring;
- }
+ for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
+ if (!HAS_ENGINE(dev_priv, i))
+ continue;
- if (HAS_BLT(dev)) {
- ret = logical_blt_ring_init(dev);
- if (ret)
- goto cleanup_bsd_ring;
- }
+ if (!logical_rings[i].init)
+ continue;
- if (HAS_VEBOX(dev)) {
- ret = logical_vebox_ring_init(dev);
+ ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
if (ret)
- goto cleanup_blt_ring;
+ goto cleanup;
+
+ mask |= ENGINE_MASK(i);
}
- if (HAS_BSD2(dev)) {
- ret = logical_bsd2_ring_init(dev);
- if (ret)
- goto cleanup_vebox_ring;
+ /*
+ * Catch failures to update logical_rings table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
+ struct intel_device_info *info =
+ (struct intel_device_info *)&dev_priv->info;
+ info->ring_mask = mask;
}
return 0;
-cleanup_vebox_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
-cleanup_render_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
+cleanup:
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ intel_logical_ring_cleanup(&dev_priv->engine[i]);
return ret;
}
static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
{
u32 rpcs = 0;
@@ -2291,7 +2250,7 @@ make_rpcs(struct drm_device *dev)
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
/*
@@ -2300,24 +2259,24 @@ make_rpcs(struct drm_device *dev)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev)->has_slice_pg) {
+ if (INTEL_INFO(dev_priv)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->slice_total <<
+ rpcs |= INTEL_INFO(dev_priv)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_subslice_pg) {
+ if (INTEL_INFO(dev_priv)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_eu_pg) {
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ if (INTEL_INFO(dev_priv)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2329,9 +2288,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(engine->dev)->gen) {
+ switch (INTEL_GEN(engine->i915)) {
default:
- MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+ MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
case 9:
indirect_ctx_offset =
@@ -2347,13 +2306,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static int
-populate_lr_context(struct intel_context *ctx,
+populate_lr_context(struct i915_gem_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = ctx->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
void *vaddr;
u32 *reg_state;
@@ -2391,7 +2349,7 @@ populate_lr_context(struct intel_context *ctx,
RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- (HAS_RESOURCE_STREAMER(dev) ?
+ (HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
0);
@@ -2480,7 +2438,7 @@ populate_lr_context(struct intel_context *ctx,
if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev));
+ make_rpcs(dev_priv));
}
i915_gem_object_unpin_map(ctx_obj);
@@ -2489,39 +2447,8 @@ populate_lr_context(struct intel_context *ctx,
}
/**
- * intel_lr_context_free() - free the LRC specific bits of a context
- * @ctx: the LR context to free.
- *
- * The real context freeing is done in i915_gem_context_free: this only
- * takes care of the bits that are LRC related: the per-engine backing
- * objects and the logical ringbuffer.
- */
-void intel_lr_context_free(struct intel_context *ctx)
-{
- int i;
-
- for (i = I915_NUM_ENGINES; --i >= 0; ) {
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (!ctx_obj)
- continue;
-
- if (ctx == ctx->i915->kernel_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- i915_gem_object_unpin_map(ctx_obj);
- }
-
- WARN_ON(ctx->engine[i].pin_count);
- intel_ringbuffer_free(ringbuf);
- drm_gem_object_unreference(&ctx_obj->base);
- }
-}
-
-/**
* intel_lr_context_size() - return the size of the context for an engine
- * @ring: which engine to find the context size for
+ * @engine: which engine to find the context size for
*
* Each engine may require a different amount of space for a context image,
* so when allocating (or copying) an image, this function can be used to
@@ -2537,11 +2464,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
{
int ret = 0;
- WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+ WARN_ON(INTEL_GEN(engine->i915) < 8);
switch (engine->id) {
case RCS:
- if (INTEL_INFO(engine->dev)->gen >= 9)
+ if (INTEL_GEN(engine->i915) >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2558,9 +2485,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
}
/**
- * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
+ * execlists_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
- * @ring: engine to be used with the context.
+ * @engine: engine to be used with the context.
*
* This function can be called more than once, with different engines, if we plan
* to use the context with them. The context backing objects and the ringbuffers
@@ -2570,31 +2497,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
*
* Return: non-zero on error.
*/
-
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
+ struct intel_context *ce = &ctx->engine[engine->id];
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
- WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[engine->id].state);
+ WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_alloc_object(dev, context_size);
- if (!ctx_obj) {
+ ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
+ if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
- return -ENOMEM;
+ return PTR_ERR(ctx_obj);
}
- ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error_deref_obj;
@@ -2606,48 +2531,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
goto error_ringbuf;
}
- ctx->engine[engine->id].ringbuf = ringbuf;
- ctx->engine[engine->id].state = ctx_obj;
+ ce->ringbuf = ringbuf;
+ ce->state = ctx_obj;
+ ce->initialised = engine->init_context == NULL;
- if (ctx != ctx->i915->kernel_context && engine->init_context) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, ctx);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- DRM_ERROR("ring create req: %d\n", ret);
- goto error_ringbuf;
- }
-
- ret = engine->init_context(req);
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("ring init context: %d\n",
- ret);
- goto error_ringbuf;
- }
- }
return 0;
error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[engine->id].ringbuf = NULL;
- ctx->engine[engine->id].state = NULL;
+ ce->ringbuf = NULL;
+ ce->state = NULL;
return ret;
}
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[engine->id].ringbuf;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr;
uint32_t *reg_state;
@@ -2666,7 +2572,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ctx_obj);
- ringbuf->head = 0;
- ringbuf->tail = 0;
+ ce->ringbuf->head = 0;
+ ce->ringbuf->tail = 0;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..2b8255c19dcc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -57,6 +57,11 @@
#define GEN8_CSB_READ_PTR(csb_status) \
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+};
+
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
-void intel_lr_context_free(struct intel_context *ctx);
+struct i915_gem_context;
+
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct intel_context *ctx,
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct drm_i915_private;
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx);
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+ struct i915_gem_context *ctx);
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-
/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+ int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-void intel_execlists_retire_requests(struct intel_engine_cs *engine);
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 96281e628d2a..49550470483e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
@@ -184,13 +184,13 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
* panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
*/
- temp &= ~LVDS_A3_POWER_MASK;
- temp |= lvds_encoder->a3_power;
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
- if (INTEL_INFO(dev)->gen == 4) {
+ if (IS_GEN4(dev_priv)) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_encoder->reg);
- if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_connector *connector = &lvds_connector->base.base;
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -556,6 +555,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
{
- struct intel_encoder *encoder;
- struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->type == INTEL_OUTPUT_LVDS) {
- lvds_encoder = to_lvds_encoder(&encoder->base);
+ for_each_intel_encoder(dev, intel_encoder)
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ return intel_encoder;
- return lvds_encoder->is_dual_link;
- }
- }
+ return NULL;
+}
- return false;
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+
+ return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
struct drm_device *dev = lvds_encoder->base.base.dev;
unsigned int val;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
*/
void intel_lvds_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
@@ -978,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ DRM_MODE_ENCODER_LVDS, "LVDS");
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -992,7 +995,6 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
@@ -1119,6 +1121,7 @@ out:
mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1131,9 +1134,6 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
- drm_connector_register(connector);
-
- intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..927825f5b284 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
#define L3_WB 3
/* Target cache */
-#define ELLC 0
-#define LLC 1
-#define LLC_ELLC 2
+#define LE_TC_PAGETABLE 0
+#define LE_TC_LLC 1
+#define LE_TC_LLC_ELLC 2
+#define LE_TC_LLC_ELLC_ALT 3
/*
* MOCS tables
@@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
* end.
*/
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
- /* { 0x00000009, 0x0010 } */
- { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
- /* { 0x00000038, 0x0030 } */
- { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
- /* { 0x0000003b, 0x0030 } */
- { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+ { /* 0x00000009 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0010 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+ },
+ {
+ /* 0x00000038 */
+ .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
+ {
+ /* 0x0000003b */
+ .control_value = LE_CACHEABILITY(LE_WB) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
- /* { 0x00000009, 0x0010 } */
- { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
- /* { 0x00000038, 0x0030 } */
- { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
- /* { 0x0000003b, 0x0030 } */
- { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+ {
+ /* 0x00000009 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0010 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+ },
+ {
+ /* 0x00000038 */
+ .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
+ {
+ /* 0x00000039 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
};
/**
@@ -156,6 +190,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
"Platform that should have a MOCS table does not.\n");
}
+ /* WaDisableSkipCaching:skl,bxt,kbl */
+ if (IS_GEN9(dev_priv)) {
+ int i;
+
+ for (i = 0; i < table->size; i++)
+ if (WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
+ return false;
+ }
+
return result;
}
@@ -189,7 +233,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
*/
int intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 38a4c8ce7e63..f2584d0a01ab 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -82,7 +82,7 @@ void
intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
prop = dev_priv->broadcast_rgb_property;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 16e209d326b6..adca262d591a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -232,18 +232,36 @@ struct opregion_asle_ext {
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1 << 31)
#define MAX_DSLP 1500
-static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u32 main_function, sub_function, scic;
u16 swsci_val;
u32 dslp;
@@ -293,16 +311,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
- pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+ pci_read_config_word(pdev, SWSCI, &swsci_val);
if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
swsci_val |= SWSCI_SCISEL;
swsci_val &= ~SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
swsci_val |= SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +354,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
u32 parm = 0;
u32 type = 0;
u32 port;
/* don't care about old stuff for now */
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -365,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_CRT;
break;
case INTEL_OUTPUT_UNKNOWN:
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -382,7 +400,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
parm |= type << (16 + port * 3);
- return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+ return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
@@ -396,27 +414,28 @@ static const struct {
{ PCI_D3cold, 0x04 },
};
-int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state)
{
int i;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
- return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *connector;
struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct drm_device *dev = &dev_priv->drm;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -449,7 +468,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return 0;
}
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +476,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
return ASLC_ALS_ILLUM_FAILED;
}
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
@@ -471,13 +490,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return ASLC_PFIT_FAILED;
}
-static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
DRM_DEBUG_DRIVER("SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
-static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +514,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
return ASLC_BUTTON_ARRAY_FAILED;
}
-static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +524,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
return ASLC_CONVERTIBLE_FAILED;
}
-static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +534,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
return ASLC_DOCKING_FAILED;
}
-static u32 asle_isct_state(struct drm_device *dev)
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_DRIVER("ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +546,6 @@ static void asle_work(struct work_struct *work)
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
- struct drm_device *dev = dev_priv->dev;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -544,40 +562,38 @@ static void asle_work(struct work_struct *work)
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
- aslc_stat |= asle_set_als_illum(dev, asle->alsi);
+ aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
- aslc_stat |= asle_set_backlight(dev, asle->bclp);
+ aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
- aslc_stat |= asle_set_pfit(dev, asle->pfit);
+ aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
- aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
- aslc_stat |= asle_set_supported_rotation_angles(dev,
+ aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
- aslc_stat |= asle_set_button_array(dev, asle->iuer);
+ aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
- aslc_stat |= asle_set_convertible(dev, asle->iuer);
+ aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
- aslc_stat |= asle_set_docking(dev, asle->iuer);
+ aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
- aslc_stat |= asle_isct_state(dev);
+ aslc_stat |= asle_isct_state(dev_priv);
asle->aslc = aslc_stat;
}
-void intel_opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
@@ -658,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static void intel_didl_outputs(struct drm_device *dev)
+static u32 acpi_display_type(struct drm_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +727,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp, max_outputs;
int i = 0;
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
@@ -725,45 +782,25 @@ end:
blind_set:
i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- int output_type = ACPI_OTHER_OUTPUT;
+ list_for_each_entry(connector,
+ &dev_priv->drm.mode_config.connector_list, head) {
+ int display_type = acpi_display_type(connector);
+
if (i >= max_outputs) {
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
max_outputs);
return;
}
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- output_type = ACPI_VGA_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- output_type = ACPI_TV_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- output_type = ACPI_DIGITAL_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- output_type = ACPI_LVDS_OUTPUT;
- break;
- }
+
temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | output_type | i);
+ set_did(opregion, i, temp | (1 << 31) | display_type | i);
i++;
}
goto end;
}
-static void intel_setup_cadls(struct drm_device *dev)
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
int i = 0;
u32 disp_id;
@@ -780,17 +817,16 @@ static void intel_setup_cadls(struct drm_device *dev)
} while (++i < 8 && disp_id != 0);
}
-void intel_opregion_init(struct drm_device *dev)
+void intel_opregion_register(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
+ intel_didl_outputs(dev_priv);
+ intel_setup_cadls(dev_priv);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
@@ -808,9 +844,8 @@ void intel_opregion_init(struct drm_device *dev)
}
}
-void intel_opregion_fini(struct drm_device *dev)
+void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -842,9 +877,8 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->lid_state = NULL;
}
-static void swsci_setup(struct drm_device *dev)
+static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -854,7 +888,7 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
- if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +899,7 @@ static void swsci_setup(struct drm_device *dev)
* must not call interfaces that are not specifically requested by the
* bios.
*/
- if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
@@ -876,7 +910,7 @@ static void swsci_setup(struct drm_device *dev)
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
- if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +952,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-int intel_opregion_setup(struct drm_device *dev)
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
@@ -933,7 +967,7 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
- pci_read_config_dword(dev->pdev, ASLS, &asls);
+ pci_read_config_dword(pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +999,7 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev);
+ swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1048,12 @@ err_out:
}
int
-intel_opregion_get_panel_type(struct drm_device *dev)
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{
u32 panel_details;
int ret;
- ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
ret);
@@ -1044,7 +1078,7 @@ intel_opregion_get_panel_type(struct drm_device *dev)
* vswing instead. Low vswing results in some display flickers, so
* let's simply ignore the OpRegion panel type on SKL for now.
*/
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv)) {
DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..3212d8806b5a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct intel_crtc *crtc;
struct drm_i915_gem_object *vid_bo;
struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr,
+ PAGE_SIZE);
return regs;
}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap(regs);
}
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
- WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+ WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- if (IS_I830(dev)) {
+ if (IS_I830(dev_priv)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
mask = 0x1f;
shift = 5;
} else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
ret <<= 1;
ret -= 1;
return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers __iomem *regs;
bool scale_changed = false;
- struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
oconfig = OCONF_CC_OUT_8BIT;
- if (IS_GEN4(overlay->dev))
+ if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_w;
swidth = params->src_w;
- swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
swidth |= (params->src_w/uv_hscale) << 16;
- tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ tmp_U = calc_swidthsw(dev_priv, params->offset_U,
params->src_w/uv_hscale);
- tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ tmp_V = calc_swidthsw(dev_priv, params->offset_V,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,7 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
- intel_frontbuffer_flip(dev,
+ intel_frontbuffer_flip(&dev_priv->drm,
INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0;
@@ -852,12 +848,12 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- struct drm_device *dev = overlay->dev;
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -897,15 +893,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 pfit_control = I915_READ(PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
@@ -948,7 +943,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
return 0;
}
-static int check_overlay_src(struct drm_device *dev,
+static int check_overlay_src(struct drm_i915_private *dev_priv,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -959,7 +954,7 @@ static int check_overlay_src(struct drm_device *dev,
u32 tmp;
/* check src dimensions */
- if (IS_845G(dev) || IS_I830(dev)) {
+ if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1011,14 +1006,14 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev) || IS_845G(dev))
+ if (IS_I830(dev_priv) || IS_845G(dev_priv))
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN4(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1058,13 @@ static int check_overlay_src(struct drm_device *dev,
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe(struct drm_device *dev)
+static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
/* i830 doesn't have a panel fitter */
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return -1;
pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,18 +1074,18 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
return -1;
/* 965 can place panel fitter on either pipe */
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
return (pfit_control >> 29) & 0x3;
/* older chips can only use pipe 1 */
return 1;
}
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
@@ -1162,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
/* line too wide, i.e. one-line-mode */
if (mode->hdisplay > 1024 &&
- intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
@@ -1196,7 +1191,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev, put_image_rec, new_bo);
+ ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
if (ret != 0)
goto out_unlock;
params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,11 +1279,11 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
return 0;
}
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct overlay_registers __iomem *regs;
int ret;
@@ -1309,7 +1304,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1336,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
goto out_unlock;
if (overlay->active) {
@@ -1371,37 +1366,37 @@ out_unlock:
return ret;
}
-void intel_setup_overlay(struct drm_device *dev)
+void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
int ret;
- if (!HAS_OVERLAY(dev))
+ if (!HAS_OVERLAY(dev_priv))
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
if (WARN_ON(dev_priv->overlay))
goto out_free;
- overlay->dev = dev;
+ overlay->i915 = dev_priv;
reg_bo = NULL;
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
- if (reg_bo == NULL)
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
+ reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
+ PAGE_SIZE);
if (reg_bo == NULL)
+ reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
+ if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
- if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1436,23 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
return;
}
-void intel_cleanup_overlay(struct drm_device *dev)
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (!dev_priv->overlay)
return;
@@ -1482,18 +1475,17 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr);
return regs;
}
@@ -1501,15 +1493,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap_atomic(regs);
}
-
struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
@@ -1523,10 +1513,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
- else
- error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+ error->base = overlay->flip_addr;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index aba94099886b..96c65d77e886 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -377,7 +377,7 @@ out:
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
+ pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
+ pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
return;
}
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_level;
int ret;
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
.get_brightness = intel_backlight_device_get_brightness,
};
-static int intel_backlight_device_register(struct intel_connector *connector)
+int intel_backlight_device_register(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
@@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector)
return 0;
}
-static void intel_backlight_device_unregister(struct intel_connector *connector)
+void intel_backlight_device_unregister(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
@@ -1225,14 +1225,6 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
panel->backlight.device = NULL;
}
}
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/*
@@ -1324,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int clock;
if (IS_G4X(dev_priv))
@@ -1724,6 +1716,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
@@ -1805,19 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
-
-void intel_backlight_register(struct drm_device *dev)
-{
- struct intel_connector *connector;
-
- for_each_intel_connector(dev, connector)
- intel_backlight_device_register(connector);
-}
-
-void intel_backlight_unregister(struct drm_device *dev)
-{
- struct intel_connector *connector;
-
- for_each_intel_connector(dev, connector)
- intel_backlight_device_unregister(connector);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2863b92c9da6..97ba6c8cf907 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/cpufreq.h>
+#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
@@ -82,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
static void bxt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen9_init_clock_gating(dev);
@@ -108,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -147,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -318,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 val;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -374,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
static int vlv_get_fifo_size(struct drm_device *dev,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -425,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -441,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -458,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -636,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
static void pineview_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
@@ -933,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
static void vlv_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1324,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
static void vlv_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct vlv_wm_values wm = {};
@@ -1380,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
@@ -1437,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
static void i965_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
@@ -1511,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
@@ -1641,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
static void i845_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
@@ -2040,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev,
- struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(cstate->base.state);
const struct drm_display_mode *adjusted_mode =
&cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
@@ -2052,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
- if (WARN_ON(dev_priv->cdclk_freq == 0))
+ if (WARN_ON(intel_state->cdclk == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -2061,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- dev_priv->cdclk_freq);
+ intel_state->cdclk);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2069,7 +2070,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_GEN9(dev)) {
uint32_t val;
@@ -2174,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
@@ -2235,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(dev_priv->dev);
+ int level, max_level = ilk_wm_max_level(&dev_priv->drm);
if (wm[0] >= min)
return false;
@@ -2249,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
static void snb_wm_latency_quirk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2271,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
static void ilk_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
@@ -2293,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
static void skl_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2329,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
@@ -2337,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.optimal.ilk;
+ pipe_wm = &cstate->wm.ilk.optimal;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct intel_plane_state *ps;
@@ -2380,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
@@ -2419,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate)
{
- struct intel_pipe_wm *a = &newstate->wm.intermediate;
+ struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
int level, max_level = ilk_wm_max_level(dev);
@@ -2428,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* currently active watermarks to get values that are safe both before
* and after the vblank.
*/
- *a = newstate->wm.optimal.ilk;
+ *a = newstate->wm.ilk.optimal;
a->pipe_enabled |= b->pipe_enabled;
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
@@ -2457,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+ if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
newstate->wm.need_postvbl_update = false;
return 0;
@@ -2504,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
@@ -2564,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
/* The value we need to program into the WM_LPx latency field */
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 2 * level;
@@ -2764,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2839,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
bool ilk_disable_lp_wm(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -2877,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
- const struct intel_wm_config *config,
- struct skl_ddb_entry *alloc /* out */)
+ struct skl_ddb_entry *alloc, /* out */
+ int *num_active /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
- struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
+ int pipe = to_intel_crtc(for_crtc)->pipe;
- if (!cstate->base.active) {
+ if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
alloc->end = 0;
+ *num_active = hweight32(dev_priv->active_crtcs);
return;
}
+ if (intel_state->active_pipe_changes)
+ *num_active = hweight32(intel_state->active_crtcs);
+ else
+ *num_active = hweight32(dev_priv->active_crtcs);
+
if (IS_BROXTON(dev))
ddb_size = BXT_DDB_SIZE;
else
@@ -2898,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
ddb_size -= 4; /* 4 blocks for bypass path allocation */
- nth_active_pipe = 0;
- for_each_crtc(dev, crtc) {
- if (!to_intel_crtc(crtc)->active)
- continue;
-
- if (crtc == for_crtc)
- break;
-
- nth_active_pipe++;
+ /*
+ * If the state doesn't change the active CRTC's, then there's
+ * no need to recalculate; the existing pipe allocation limits
+ * should remain unchanged. Note that we're safe from racing
+ * commits since any racing commit that changes the active CRTC
+ * list would need to grab _all_ crtc locks, including the one
+ * we currently hold.
+ */
+ if (!intel_state->active_pipe_changes) {
+ *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ return;
}
- pipe_size = ddb_size / config->num_pipes_active;
- alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ nth_active_pipe = hweight32(intel_state->active_crtcs &
+ (drm_crtc_mask(for_crtc) - 1));
+ pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
+ alloc->start = nth_active_pipe * ddb_size / *num_active;
alloc->end = alloc->start + pipe_size;
}
-static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+static unsigned int skl_cursor_allocation(int num_active)
{
- if (config->num_pipes_active == 1)
+ if (num_active == 1)
return 32;
return 8;
@@ -2960,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
}
}
+/*
+ * Determines the downscale amount of a plane for the purposes of watermark calculations.
+ * The bspec defines downscale amount as:
+ *
+ * """
+ * Horizontal down scale amount = maximum[1, Horizontal source size /
+ * Horizontal destination size]
+ * Vertical down scale amount = maximum[1, Vertical source size /
+ * Vertical destination size]
+ * Total down scale amount = Horizontal down scale amount *
+ * Vertical down scale amount
+ * """
+ *
+ * Return value is provided in 16.16 fixed point form to retain fractional part.
+ * Caller should take care of dividing & rounding off the value.
+ */
+static uint32_t
+skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+{
+ uint32_t downscale_h, downscale_w;
+ uint32_t src_w, src_h, dst_w, dst_h;
+
+ if (WARN_ON(!pstate->visible))
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ /* n.b., src is 16.16 fixed point, dst is whole integer */
+ src_w = drm_rect_width(&pstate->src);
+ src_h = drm_rect_height(&pstate->src);
+ dst_w = drm_rect_width(&pstate->dst);
+ dst_h = drm_rect_height(&pstate->dst);
+ if (intel_rotation_90_or_270(pstate->base.rotation))
+ swap(dst_w, dst_h);
+
+ downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+ downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+
+ /* Provide result in 16.16 fixed point */
+ return (uint64_t)downscale_w * downscale_h >> 16;
+}
+
static unsigned int
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
@@ -2967,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
+ unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+
+ if (!intel_pstate->visible)
+ return 0;
+ if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+ if (y && format != DRM_FORMAT_NV12)
+ return 0;
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2976,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
swap(width, height);
/* for planar format */
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return width * height *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ data_rate = width * height *
+ drm_format_plane_cpp(format, 0);
else /* uv-plane data rate */
- return (width / 2) * (height / 2) *
- drm_format_plane_cpp(fb->pixel_format, 1);
+ data_rate = (width / 2) * (height / 2) *
+ drm_format_plane_cpp(format, 1);
+ } else {
+ /* for packed formats */
+ data_rate = width * height * drm_format_plane_cpp(format, 0);
}
- /* for packed formats */
- return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
+ down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+
+ return (uint64_t)data_rate * down_scale_amount >> 16;
}
/*
@@ -2995,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_crtc *crtc = cstate->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_plane *plane;
const struct intel_plane *intel_plane;
- unsigned int total_data_rate = 0;
+ struct drm_plane_state *pstate;
+ unsigned int rate, total_data_rate = 0;
+ int id;
+ int i;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- const struct drm_plane_state *pstate = intel_plane->base.state;
+ if (WARN_ON(!state))
+ return 0;
- if (pstate->fb == NULL)
- continue;
+ /* Calculate and cache data rate for each plane */
+ for_each_plane_in_state(state, plane, pstate, i) {
+ id = skl_wm_plane_id(to_intel_plane(plane));
+ intel_plane = to_intel_plane(plane);
- if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ if (intel_plane->pipe != intel_crtc->pipe)
continue;
/* packed/uv */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 0);
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 0);
+ intel_cstate->wm.skl.plane_data_rate[id] = rate;
+
+ /* y-plane */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 1);
+ intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
+ }
+
+ /* Calculate CRTC's total data rate from cached values */
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
- /* y-plane */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
+ /* packed/uv */
+ total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
+ total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
}
+ WARN_ON(cstate->plane_mask && total_data_rate == 0);
+
return total_data_rate;
}
-static void
+static uint16_t
+skl_ddb_min_alloc(const struct drm_plane_state *pstate,
+ const int y)
+{
+ struct drm_framebuffer *fb = pstate->fb;
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ uint32_t src_w, src_h;
+ uint32_t min_scanlines = 8;
+ uint8_t plane_bpp;
+
+ if (WARN_ON(!fb))
+ return 0;
+
+ /* For packed formats, no y-plane, return 0 */
+ if (y && fb->pixel_format != DRM_FORMAT_NV12)
+ return 0;
+
+ /* For Non Y-tile return 8-blocks */
+ if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ return 8;
+
+ src_w = drm_rect_width(&intel_pstate->src) >> 16;
+ src_h = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(src_w, src_h);
+
+ /* Halve UV plane width and height for NV12 */
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+ src_w /= 2;
+ src_h /= 2;
+ }
+
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ else
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ if (intel_rotation_90_or_270(pstate->rotation)) {
+ switch (plane_bpp) {
+ case 1:
+ min_scanlines = 32;
+ break;
+ case 2:
+ min_scanlines = 16;
+ break;
+ case 4:
+ min_scanlines = 8;
+ break;
+ case 8:
+ min_scanlines = 4;
+ break;
+ default:
+ WARN(1, "Unsupported pixel depth %u for rotation",
+ plane_bpp);
+ min_scanlines = 32;
+ }
+ }
+
+ return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
+}
+
+static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
- uint16_t minimum[I915_MAX_PLANES];
- uint16_t y_minimum[I915_MAX_PLANES];
+ uint16_t *minimum = cstate->wm.skl.minimum_blocks;
+ uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
unsigned int total_data_rate;
+ int num_active;
+ int id, i;
- skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
+ if (WARN_ON(!state))
+ return 0;
+
+ if (!cstate->base.active) {
+ ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ return 0;
+ }
+
+ skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
- sizeof(ddb->plane[pipe][PLANE_CURSOR]));
- return;
+ return 0;
}
- cursor_blocks = skl_cursor_allocation(config);
+ cursor_blocks = skl_cursor_allocation(num_active);
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
alloc_size -= cursor_blocks;
- alloc->end -= cursor_blocks;
/* 1. Allocate the mininum required blocks for each active plane */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- int id = skl_wm_plane_id(intel_plane);
+ for_each_plane_in_state(state, plane, pstate, i) {
+ intel_plane = to_intel_plane(plane);
+ id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(plane->state)->visible)
+ if (intel_plane->pipe != pipe)
continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (!to_intel_plane_state(pstate)->visible) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
+ continue;
+ }
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
continue;
+ }
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
- minimum[id] = 8;
- alloc_size -= minimum[id];
- y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
- alloc_size -= y_minimum[id];
+ for (i = 0; i < PLANE_CURSOR; i++) {
+ alloc_size -= minimum[i];
+ alloc_size -= y_minimum[i];
}
/*
@@ -3084,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(cstate);
+ if (total_data_rate == 0)
+ return 0;
start = alloc->start;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_plane_state *pstate = intel_plane->base.state;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- continue;
-
- data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
+ data_rate = cstate->wm.skl.plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3109,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
- ddb->plane[pipe][id].start = start;
- ddb->plane[pipe][id].end = start + plane_blocks;
+ /* Leave disabled planes at (0,0) */
+ if (data_rate) {
+ ddb->plane[pipe][id].start = start;
+ ddb->plane[pipe][id].end = start + plane_blocks;
+ }
start += plane_blocks;
/*
* allocation for y_plane part of planar format:
*/
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
- y_data_rate = skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
- y_plane_blocks = y_minimum[id];
- y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
- total_data_rate);
+ y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+
+ y_plane_blocks = y_minimum[id];
+ y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+ total_data_rate);
+ if (y_data_rate) {
ddb->y_plane[pipe][id].start = start;
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
-
- start += y_plane_blocks;
}
+ start += y_plane_blocks;
}
+ return 0;
}
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3189,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
return ret;
}
-static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
- const struct intel_crtc *intel_crtc)
+static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
+ struct intel_plane_state *pstate)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ uint64_t adjusted_pixel_rate;
+ uint64_t downscale_amount;
+ uint64_t pixel_rate;
+
+ /* Shouldn't reach here on disabled planes... */
+ if (WARN_ON(!pstate->visible))
+ return 0;
/*
- * If ddb allocation of pipes changed, it may require recalculation of
- * watermarks
+ * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
+ * with additional adjustments for plane-specific scaling.
*/
- if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
- return true;
+ adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ downscale_amount = skl_plane_downscale_amount(pstate);
+
+ pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
+ WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
- return false;
+ return pixel_rate;
}
-static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
- struct intel_plane *intel_plane,
- uint16_t ddb_allocation,
- int level,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines /* out */)
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ struct intel_plane_state *intel_pstate,
+ uint16_t ddb_allocation,
+ int level,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines, /* out */
+ bool *enabled /* out */)
{
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- struct intel_plane_state *intel_pstate =
- to_intel_plane_state(plane->state);
+ struct drm_plane_state *pstate = &intel_pstate->base;
+ struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3225,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t selected_result;
uint8_t cpp;
uint32_t width = 0, height = 0;
+ uint32_t plane_pixel_rate;
- if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
- return false;
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+ *enabled = false;
+ return 0;
+ }
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
- if (intel_rotation_90_or_270(plane->state->rotation))
+ if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
- cpp, latency);
- method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
+ plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+
+ method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
+ method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
width,
cpp,
@@ -3252,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
- if (intel_rotation_90_or_270(plane->state->rotation)) {
+ if (intel_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3288,40 +3464,100 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_blocks++;
}
- if (res_blocks >= ddb_allocation || res_lines > 31)
- return false;
+ if (res_blocks >= ddb_allocation || res_lines > 31) {
+ *enabled = false;
+
+ /*
+ * If there are no valid level 0 watermarks, then we can't
+ * support this display configuration.
+ */
+ if (level) {
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
+ DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
+ to_intel_crtc(cstate->base.crtc)->pipe,
+ skl_wm_plane_id(to_intel_plane(pstate->plane)),
+ res_blocks, ddb_allocation, res_lines);
+
+ return -EINVAL;
+ }
+ }
*out_blocks = res_blocks;
*out_lines = res_lines;
+ *enabled = true;
- return true;
+ return 0;
}
-static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
- int level,
- struct skl_wm_level *result)
+static int
+skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct intel_crtc_state *cstate,
+ int level,
+ struct skl_wm_level *result)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_plane *plane;
struct intel_plane *intel_plane;
+ struct intel_plane_state *intel_pstate;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
+ int ret;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(result, 0, sizeof(*result));
+
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
int i = skl_wm_plane_id(intel_plane);
+ plane = &intel_plane->base;
+ intel_pstate = NULL;
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
+
+ /*
+ * Note: If we start supporting multiple pending atomic commits
+ * against the same planes/CRTC's in the future, plane->state
+ * will no longer be the correct pre-state to use for the
+ * calculations here and we'll need to change where we get the
+ * 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit
+ * against a CRTC. Even if the plane isn't modified by this
+ * transaction and we don't have a plane lock, we still have
+ * the CRTC's lock, so we know that no other transactions are
+ * racing with us to update it.
+ */
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
+
+ WARN_ON(!intel_pstate->base.fb);
+
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_plane,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i]);
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i],
+ &result->plane_en[i]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static uint32_t
@@ -3355,21 +3591,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
}
}
-static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
- const struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev);
+ int ret;
for (level = 0; level <= max_level; level++) {
- skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ level, &pipe_wm->wm[level]);
+ if (ret)
+ return ret;
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
+
+ return 0;
}
static void skl_compute_wm_results(struct drm_device *dev,
@@ -3442,14 +3683,16 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
const struct skl_wm_values *new)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc) {
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
- if (!new->dirty[pipe])
+ if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
+ continue;
+ if (!crtc->active)
continue;
I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3537,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
struct skl_wm_values *new_values)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_allocation *cur_ddb, *new_ddb;
bool reallocated[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
@@ -3616,116 +3859,182 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
}
}
-static bool skl_update_pipe_wm(struct drm_crtc *crtc,
- struct skl_ddb_allocation *ddb, /* out */
- struct skl_pipe_wm *pipe_wm /* out */)
+static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm, /* out */
+ bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
+ struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+ int ret;
- skl_allocate_pipe_ddb(cstate, ddb);
- skl_compute_pipe_wm(cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ if (ret)
+ return ret;
if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
- return false;
+ *changed = false;
+ else
+ *changed = true;
- intel_crtc->wm.active.skl = *pipe_wm;
+ return 0;
+}
- return true;
+static uint32_t
+pipes_modified(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ uint32_t i, ret = 0;
+
+ for_each_crtc_in_state(state, crtc, cstate, i)
+ ret |= drm_crtc_mask(crtc);
+
+ return ret;
}
-static void skl_update_other_pipe_wm(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct skl_wm_values *r)
+static int
+skl_compute_ddb(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *intel_crtc;
- struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+ struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ uint32_t realloc_pipes = pipes_modified(state);
+ int ret;
/*
- * If the WM update hasn't changed the allocation for this_crtc (the
- * crtc we are currently computing the new WM values for), other
- * enabled crtcs will keep the same allocation and we don't need to
- * recompute anything for them.
+ * If this is our first atomic update following hardware readout,
+ * we can't trust the DDB that the BIOS programmed for us. Let's
+ * pretend that all pipes switched active status so that we'll
+ * ensure a full DDB recompute.
*/
- if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
- return;
+ if (dev_priv->wm.distrust_bios_wm)
+ intel_state->active_pipe_changes = ~0;
/*
- * Otherwise, because of this_crtc being freshly enabled/disabled, the
- * other active pipes need new DDB allocation and WM values.
+ * If the modeset changes which CRTC's are active, we need to
+ * recompute the DDB allocation for *all* active pipes, even
+ * those that weren't otherwise being modified in any way by this
+ * atomic commit. Due to the shrinking of the per-pipe allocations
+ * when new active CRTC's are added, it's possible for a pipe that
+ * we were already using and aren't changing at all here to suddenly
+ * become invalid if its DDB needs exceeds its new allocation.
+ *
+ * Note that if we wind up doing a full DDB recompute, we can't let
+ * any other display updates race with this transaction, so we need
+ * to grab the lock on *all* CRTC's.
*/
- for_each_intel_crtc(dev, intel_crtc) {
- struct skl_pipe_wm pipe_wm = {};
- bool wm_changed;
-
- if (this_crtc->pipe == intel_crtc->pipe)
- continue;
+ if (intel_state->active_pipe_changes) {
+ realloc_pipes = ~0;
+ intel_state->wm_results.dirty_pipes = ~0;
+ }
- if (!intel_crtc->active)
- continue;
+ for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+ struct intel_crtc_state *cstate;
- wm_changed = skl_update_pipe_wm(&intel_crtc->base,
- &r->ddb, &pipe_wm);
+ cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
- /*
- * If we end up re-computing the other pipe WM values, it's
- * because it was really needed, so we expect the WM values to
- * be different.
- */
- WARN_ON(!wm_changed);
-
- skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
- r->dirty[intel_crtc->pipe] = true;
+ ret = skl_allocate_pipe_ddb(cstate, ddb);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
-static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+static int
+skl_compute_wm(struct drm_atomic_state *state)
{
- watermarks->wm_linetime[pipe] = 0;
- memset(watermarks->plane[pipe], 0,
- sizeof(uint32_t) * 8 * I915_MAX_PLANES);
- memset(watermarks->plane_trans[pipe],
- 0, sizeof(uint32_t) * I915_MAX_PLANES);
- watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_wm_values *results = &intel_state->wm_results;
+ struct skl_pipe_wm *pipe_wm;
+ bool changed = false;
+ int ret, i;
+
+ /*
+ * If this transaction isn't actually touching any CRTC's, don't
+ * bother with watermark calculation. Note that if we pass this
+ * test, we're guaranteed to hold at least one CRTC state mutex,
+ * which means we can safely use values like dev_priv->active_crtcs
+ * since any racing commits that want to update them would need to
+ * hold _all_ CRTC state mutexes.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i)
+ changed = true;
+ if (!changed)
+ return 0;
+
+ /* Clear all dirty flags */
+ results->dirty_pipes = 0;
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate WM's for all pipes that are part of this transaction.
+ * Note that the DDB allocation above may have added more CRTC's that
+ * weren't otherwise being modified (and set bits in dirty_pipes) if
+ * pipe allocations had to change.
+ *
+ * FIXME: Now that we're doing this in the atomic check phase, we
+ * should allow skl_update_pipe_wm() to return failure in cases where
+ * no suitable watermark values can be found.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(cstate);
+
+ pipe_wm = &intel_cstate->wm.skl.optimal;
+ ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
+ &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ results->dirty_pipes |= drm_crtc_mask(crtc);
- /* Clear ddb entries for pipe */
- memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
- memset(&watermarks->ddb.plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.y_plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
- sizeof(struct skl_ddb_entry));
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ /* This pipe's WM's did not change */
+ continue;
+
+ intel_cstate->update_wm_pre = true;
+ skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
+ }
+ return 0;
}
static void skl_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
-
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- /* Clear all dirty flags */
- memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
-
- skl_clear_wm(results, intel_crtc->pipe);
-
- if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
- skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
- results->dirty[intel_crtc->pipe] = true;
+ intel_crtc->wm.active.skl = *pipe_wm;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
- skl_update_other_pipe_wm(dev, crtc, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
/* store the new configuration */
dev_priv->wm.skl_hw = *results;
+
+ mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3748,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
@@ -3785,7 +4094,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -3797,7 +4106,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_lock(&dev_priv->wm.wm_mutex);
if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
}
mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3850,11 +4159,11 @@ static void skl_pipe_wm_active_state(uint32_t val,
static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
+ struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
@@ -3877,7 +4186,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- hw->dirty[pipe] = true;
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
active->linetime = hw->wm_linetime[pipe];
@@ -3904,23 +4213,31 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
void skl_wm_get_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
skl_ddb_get_hw_state(dev_priv, ddb);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
skl_pipe_wm_get_hw_state(crtc);
+
+ if (dev_priv->active_crtcs) {
+ /* Fully recompute DDB on first atomic commit */
+ dev_priv->wm.distrust_bios_wm = true;
+ } else {
+ /* Easy/common case; just sanitize DDB now if everything off */
+ memset(ddb, 0, sizeof(*ddb));
+ }
}
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+ struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
enum pipe pipe = intel_crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -4120,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
void ilk_wm_get_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
@@ -4182,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
*/
void intel_update_watermarks(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -4197,9 +4514,8 @@ DEFINE_SPINLOCK(mchdev_lock);
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
assert_spin_locked(&mchdev_lock);
@@ -4221,9 +4537,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-static void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
@@ -4280,7 +4595,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
- ironlake_set_drps(dev, fstart);
+ ironlake_set_drps(dev_priv, fstart);
dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4291,9 +4606,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
spin_unlock_irq(&mchdev_lock);
}
-static void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
@@ -4308,7 +4622,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->ips.fstart);
+ ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4354,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
- if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+ if (val > dev_priv->rps.efficient_freq + 1 &&
+ val > dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+ if (val <= dev_priv->rps.efficient_freq &&
+ val < dev_priv->rps.cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+ else if (val >= dev_priv->rps.rp0_freq &&
+ val > dev_priv->rps.cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+ if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
+ val < dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
}
@@ -4412,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
}
I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+ GT_INTERVAL_FROM_US(dev_priv,
+ ei_up * threshold_up / 100));
I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+ GT_INTERVAL_FROM_US(dev_priv,
+ ei_down * threshold_down / 100));
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
@@ -4452,12 +4772,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4470,10 +4788,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -4495,15 +4813,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
-static void valleyview_set_rps(struct drm_device *dev, u8 val)
+static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq);
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
@@ -4536,7 +4852,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
/* Wake up the media well, as that takes a lot less
* power than the Render well. */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
- valleyview_set_rps(dev_priv->dev, val);
+ valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
@@ -4548,22 +4864,36 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+
+ gen6_enable_rps_interrupts(dev_priv);
+
+ /* Ensure we start at the user's desired frequency */
+ intel_set_rps(dev_priv,
+ clamp(dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit));
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ /* Flush our bottom-half so that it does not race with us
+ * setting the idle frequency and so that it is bounded by
+ * our rpm wakeref. And then disable the interrupts to stop any
+ * futher RPS reclocking whilst we are asleep.
+ */
+ gen6_disable_rps_interrupts(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4580,7 +4910,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!(dev_priv->mm.busy &&
+ if (!(dev_priv->gt.awake &&
dev_priv->rps.enabled &&
dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
return;
@@ -4596,7 +4926,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.client_boost = true;
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ schedule_work(&dev_priv->rps.work);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -4609,49 +4939,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->rps.client_lock);
}
-void intel_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev, val);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ valleyview_set_rps(dev_priv, val);
else
- gen6_set_rps(dev, val);
+ gen6_set_rps(dev_priv, val);
}
-static void gen9_disable_rc6(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN9_PG_ENABLE, 0);
}
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_device *dev)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4661,34 +4981,45 @@ static void valleyview_disable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
mode = 0;
}
- if (HAS_RC6p(dev))
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
+ if (HAS_RC6p(dev_priv))
+ DRM_DEBUG_DRIVER("Enabling RC6 states: "
+ "RC6 %s RC6p %s RC6pp %s\n",
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
else
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
+ DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
-static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
+ u32 rc_ctl;
+ int rc_sw_target;
+
+ rc_ctl = I915_READ(GEN6_RC_CONTROL);
+ rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
+ RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
enable_rc6 = false;
}
@@ -4700,7 +5031,7 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
(rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
ggtt->stolen_reserved_size))) {
- DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -4708,31 +5039,40 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
+ !I915_READ(GEN8_PUSHBUS_ENABLE) ||
+ !I915_READ(GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!I915_READ(GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
enable_rc6 = false;
}
- if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_HW_ENABLE)) &&
- ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
- !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
- DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+ if (!I915_READ(GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
enable_rc6 = false;
}
return enable_rc6;
}
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
{
/* No RC6 before Ironlake and code is gone for ilk. */
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return 0;
if (!enable_rc6)
return 0;
- if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+ if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
@@ -4741,33 +5081,28 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (HAS_RC6p(dev))
+ if (HAS_RC6p(dev_priv))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
mask = INTEL_RC6_ENABLE;
if ((enable_rc6 & mask) != enable_rc6)
- DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
+ DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
+ "(requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
return enable_rc6 & mask;
}
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
return INTEL_RC6_ENABLE;
}
-int intel_enable_rc6(const struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
- return i915.enable_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t rp_state_cap;
u32 ddcc_status = 0;
int ret;
@@ -4775,7 +5110,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4791,8 +5126,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4804,7 +5139,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4821,7 +5156,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
if (dev_priv->rps.min_freq_softlimit == 0) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dev_priv->rps.min_freq_softlimit =
max_t(int, dev_priv->rps.efficient_freq,
intel_freq_opcode(dev_priv, 450));
@@ -4832,16 +5167,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_device *dev)
+static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
/*
* BIOS could leave the Hw Turbo enabled, so need to explicitly
* clear out the Control register just to avoid inconsitency
@@ -4851,7 +5184,7 @@ static void gen9_enable_rps(struct drm_device *dev)
* if the Turbo is left enabled in the Control register, as the
* Up/Down interrupts would remain masked.
*/
- gen9_disable_rps(dev);
+ gen9_disable_rps(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4870,14 +5203,13 @@ static void gen9_enable_rps(struct drm_device *dev)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen9_enable_rc6(struct drm_device *dev)
+static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -4894,7 +5226,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 2b: Program RC6 thresholds.*/
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4903,7 +5235,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC_UCODE(dev))
+ if (HAS_GUC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4913,12 +5245,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -4934,19 +5266,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if (NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
}
-static void gen8_enable_rps(struct drm_device *dev)
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -4961,7 +5291,7 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4970,16 +5300,16 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- intel_print_rc6_info(dev, rc6_mask);
- if (IS_BROADWELL(dev))
+ intel_print_rc6_info(dev_priv, rc6_mask);
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
@@ -5020,14 +5350,13 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen6_enable_rps(struct drm_device *dev)
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
@@ -5054,7 +5383,7 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5070,7 +5399,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5078,12 +5407,12 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
- rc6_mode = intel_enable_rc6(dev_priv->dev);
+ rc6_mode = intel_enable_rc6();
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
/* We don't use those on Haswell */
- if (!IS_HASWELL(dev)) {
+ if (!IS_HASWELL(dev_priv)) {
if (rc6_mode & INTEL_RC6p_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
@@ -5091,7 +5420,7 @@ static void gen6_enable_rps(struct drm_device *dev)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
- intel_print_rc6_info(dev, rc6_mask);
+ intel_print_rc6_info(dev_priv, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -5115,13 +5444,13 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev) && ret) {
+ if (IS_GEN6(dev_priv) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -5134,9 +5463,8 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void __gen6_update_ring_freq(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
@@ -5165,7 +5493,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5183,16 +5511,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_INFO(dev)->gen >= 8) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
@@ -5219,26 +5547,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
}
}
-void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_CORE_RING_FREQ(dev))
+ if (!HAS_CORE_RING_FREQ(dev_priv))
return;
mutex_lock(&dev_priv->rps.hw_lock);
- __gen6_update_ring_freq(dev);
+ __gen6_update_ring_freq(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
}
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev)->eu_total) {
+ switch (INTEL_INFO(dev_priv)->eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5349,9 +5674,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
}
-static void cherryview_setup_pctx(struct drm_device *dev)
+static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
u32 pcbr;
@@ -5370,15 +5694,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
-static void valleyview_setup_pctx(struct drm_device *dev)
+static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *pctx;
unsigned long pctx_paddr;
u32 pcbr;
int pctx_size = 24*1024;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
@@ -5386,7 +5709,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+ pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
@@ -5403,7 +5726,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
- pctx = i915_gem_object_create_stolen(dev, pctx_size);
+ pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
@@ -5415,13 +5738,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static void valleyview_cleanup_pctx(struct drm_device *dev)
+static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (WARN_ON(!dev_priv->vlv_pctx))
return;
@@ -5440,12 +5761,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
dev_priv->rps.gpll_ref_freq);
}
-static void valleyview_init_gt_powersave(struct drm_device *dev)
+static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- valleyview_setup_pctx(dev);
+ valleyview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5499,12 +5819,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void cherryview_init_gt_powersave(struct drm_device *dev)
+static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- cherryview_setup_pctx(dev);
+ cherryview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5564,14 +5883,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- valleyview_cleanup_pctx(dev);
+ valleyview_cleanup_pctx(dev_priv);
}
-static void cherryview_enable_rps(struct drm_device *dev)
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
@@ -5616,8 +5934,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
- (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5662,14 +5980,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void valleyview_enable_rps(struct drm_device *dev)
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
@@ -5722,10 +6039,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
- intel_print_rc6_info(dev, rc6_mode);
+ intel_print_rc6_info(dev_priv, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5752,7 +6069,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5842,10 +6159,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -5885,11 +6201,10 @@ static int _pxvid_to_vd(u8 pxvid)
static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- struct drm_device *dev = dev_priv->dev;
const int vd = _pxvid_to_vd(pxvid);
const int vm = vd - 1125;
- if (INTEL_INFO(dev)->is_mobile)
+ if (INTEL_INFO(dev_priv)->is_mobile)
return vm > 0 ? vm : 0;
return vd;
@@ -5930,9 +6245,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return;
spin_lock_irq(&mchdev_lock);
@@ -5981,10 +6294,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -6125,7 +6437,7 @@ bool i915_gpu_turbo_disable(void)
dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+ if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
ret = false;
out_unlock:
@@ -6173,9 +6485,8 @@ void intel_gpu_ips_teardown(void)
spin_unlock_irq(&mchdev_lock);
}
-static void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
@@ -6244,10 +6555,8 @@ static void intel_init_emon(struct drm_device *dev)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_gt_powersave(struct drm_device *dev)
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
@@ -6257,74 +6566,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
}
- if (IS_CHERRYVIEW(dev))
- cherryview_init_gt_powersave(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_init_gt_powersave(dev);
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_init_gt_powersave(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_init_gt_powersave(dev_priv);
}
-void intel_cleanup_gt_powersave(struct drm_device *dev)
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return;
- else if (IS_VALLEYVIEW(dev))
- valleyview_cleanup_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_cleanup_gt_powersave(dev_priv);
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
-static void gen6_suspend_rps(struct drm_device *dev)
+static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev_priv);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev: drm device
+ * @dev_priv: i915 device
*
* We don't want to disable RC6 or other features here, we just want
* to make sure any work we've queued has finished and won't bother
* us while we're suspended.
*/
-void intel_suspend_gt_powersave(struct drm_device *dev)
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_device *dev)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_disable_drps(dev);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- intel_suspend_gt_powersave(dev);
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_disable_drps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
+ intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev)->gen >= 9) {
- gen9_disable_rc6(dev);
- gen9_disable_rps(dev);
- } else if (IS_CHERRYVIEW(dev))
- cherryview_disable_rps(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_disable_rps(dev);
+ if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_disable_rc6(dev_priv);
+ gen9_disable_rps(dev_priv);
+ } else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rps(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rps(dev_priv);
else
- gen6_disable_rps(dev);
+ gen6_disable_rps(dev_priv);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6336,27 +6637,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
- struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
- cherryview_enable_rps(dev);
- } else if (IS_VALLEYVIEW(dev)) {
- valleyview_enable_rps(dev);
- } else if (INTEL_INFO(dev)->gen >= 9) {
- gen9_enable_rc6(dev);
- gen9_enable_rps(dev);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- __gen6_update_ring_freq(dev);
- } else if (IS_BROADWELL(dev)) {
- gen8_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_enable_rps(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ valleyview_enable_rps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_enable_rc6(dev_priv);
+ gen9_enable_rps(dev_priv);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ __gen6_update_ring_freq(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ gen8_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
} else {
- gen6_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ gen6_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
}
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6367,27 +6667,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
dev_priv->rps.enabled = true;
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_enable_gt_powersave(struct drm_device *dev)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
return;
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- mutex_lock(&dev->struct_mutex);
- intel_init_emon(dev);
- mutex_unlock(&dev->struct_mutex);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_enable_drps(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_init_emon(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -6406,20 +6704,18 @@ void intel_enable_gt_powersave(struct drm_device *dev)
}
}
-void intel_reset_gt_powersave(struct drm_device *dev)
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
dev_priv->rps.enabled = false;
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -6431,7 +6727,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6446,7 +6742,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
static void ilk_init_lp_watermarks(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6460,7 +6756,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
static void ironlake_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6534,7 +6830,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -6571,7 +6867,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
static void gen6_check_mch_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -6582,7 +6878,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
static void gen6_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6697,7 +6993,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
static void lpt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* TODO: this bit should only be enabled when really needed, then
@@ -6716,7 +7012,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
static void lpt_suspend_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_LPT_LP(dev)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6726,6 +7022,29 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+ int general_prio_credits,
+ int high_prio_credits)
+{
+ u32 misccpctl;
+
+ /* WaTempDisableDOPClkGating:bdw */
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ I915_WRITE(GEN8_L3SQCREG1,
+ L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
+ L3_HIGH_PRIO_CREDITS(high_prio_credits));
+
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating.
+ * See the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
static void kabylake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6753,6 +7072,10 @@ static void skylake_init_clock_gating(struct drm_device *dev)
gen9_init_clock_gating(dev);
+ /* WAC6entrylatency:skl */
+ I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ FBC_LLC_FULLY_OPEN);
+
/* WaFbcNukeOnHostModify:skl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
@@ -6760,9 +7083,8 @@ static void skylake_init_clock_gating(struct drm_device *dev)
static void broadwell_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- uint32_t misccpctl;
ilk_init_lp_watermarks(dev);
@@ -6793,20 +7115,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
- /*
- * WaProgramL3SqcReg1Default:bdw
- * WaTempDisableDOPClkGating:bdw
- */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
- /*
- * Wait at least 100 clocks before re-enabling clock gating. See
- * the definition of L3SQCREG1 in BSpec.
- */
- POSTING_READ(GEN8_L3SQCREG1);
- udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ /* WaProgramL3SqcReg1Default:bdw */
+ gen8_set_l3sqc_credits(dev_priv, 30, 2);
/*
* WaGttCachingOffByDefault:bdw
@@ -6815,12 +7125,16 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+ /* WaKVMNotificationOnConfigChange:bdw */
+ I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
+
lpt_init_clock_gating(dev);
}
static void haswell_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
ilk_init_lp_watermarks(dev);
@@ -6876,7 +7190,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
ilk_init_lp_watermarks(dev);
@@ -6974,7 +7288,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
static void valleyview_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -7056,7 +7370,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -7077,6 +7391,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
+ * WaProgramL3SqcReg1Default:chv
+ * See gfxspecs/Related Documents/Performance Guide/
+ * LSQC Setting Recommendations.
+ */
+ gen8_set_l3sqc_credits(dev_priv, 38, 2);
+
+ /*
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
@@ -7085,7 +7406,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
static void g4x_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7112,7 +7433,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
static void crestline_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7128,7 +7449,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
static void broadwater_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
@@ -7145,7 +7466,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
static void gen3_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7170,7 +7491,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
static void i85x_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
@@ -7184,7 +7505,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
static void i830_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
@@ -7195,7 +7516,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.init_clock_gating(dev);
}
@@ -7263,7 +7584,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_fbc_init(dev_priv);
@@ -7277,6 +7598,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
@@ -7340,46 +7662,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ /* GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
return -EAGAIN;
}
- I915_WRITE(GEN6_PCODE_DATA, *val);
- I915_WRITE(GEN6_PCODE_DATA1, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+ I915_WRITE_FW(GEN6_PCODE_DATA, *val);
+ I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
+ I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
- *val = I915_READ(GEN6_PCODE_DATA);
- I915_WRITE(GEN6_PCODE_DATA, 0);
+ *val = I915_READ_FW(GEN6_PCODE_DATA);
+ I915_WRITE_FW(GEN6_PCODE_DATA, 0);
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ /* GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
return -EAGAIN;
}
- I915_WRITE(GEN6_PCODE_DATA, val);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+ I915_WRITE_FW(GEN6_PCODE_DATA, val);
+ I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
- I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE_FW(GEN6_PCODE_DATA, 0);
return 0;
}
@@ -7449,23 +7784,21 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
- if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->engine->dev), NULL,
- req->emitted_jiffies);
+ if (!i915_gem_request_completed(req))
+ gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
- i915_gem_request_unreference__unlocked(req);
+ i915_gem_request_unreference(req);
kfree(boost);
}
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req)
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
{
struct request_boost *boost;
- if (req == NULL || INTEL_INFO(dev)->gen < 6)
+ if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7476,12 +7809,12 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work);
- queue_work(to_i915(dev)->wq, &boost->work);
+ queue_work(req->i915->wq, &boost->work);
}
void intel_pm_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..2b0d1baf15b3 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val;
val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t val;
@@ -173,10 +173,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t aux_clock_divider;
i915_reg_t aux_ctl_reg;
- int precharge = 0x3;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[4] = DP_SET_POWER_D0,
};
enum port port = dig_port->port;
+ u32 aux_ctl;
int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
+ if (dev_priv->psr.link_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
/* Setup AUX registers */
@@ -204,40 +211,16 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- if (INTEL_INFO(dev)->gen >= 9) {
- uint32_t val;
-
- val = I915_READ(aux_ctl_reg);
- val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
- val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
- val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
- val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
- /* Use hardcoded data values for PSR, frame sync and GTC */
- val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
- I915_WRITE(aux_ctl_reg, val);
- } else {
- I915_WRITE(aux_ctl_reg,
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
- }
-
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_clock_divider);
+ I915_WRITE(aux_ctl_reg, aux_ctl);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -252,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -269,17 +252,17 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Lately it was identified that depending on panel idle frame count
+ * calculated at HW can be off by 1. So let's use what came
+ * from VBT + 1.
+ * There are also other cases where panel demands at least 4
+ * but VBT is not being set. To cover these 2 cases lets use
+ * at least 5 when VBT isn't set to be on the safest side.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
uint32_t val = EDP_PSR_ENABLE;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
@@ -341,9 +324,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config->base.adjusted_mode;
+ int psr_setup_time;
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -382,11 +368,25 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
}
if (IS_HASWELL(dev) &&
- intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
+ psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ if (psr_setup_time < 0) {
+ DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return false;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -395,7 +395,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
@@ -424,7 +424,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
if (!HAS_PSR(dev)) {
@@ -511,15 +511,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
- if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1))
+ if (intel_wait_for_register(dev_priv,
+ VLV_PSRSTAT(intel_crtc->pipe),
+ VLV_EDP_PSR_IN_TRANS,
+ 0,
+ 1))
WARN(1, "PSR transition took longer than expected\n");
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -538,16 +541,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0,
- 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
@@ -566,7 +571,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -603,14 +608,20 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv)) {
- if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
} else {
- if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_PSRSTAT(pipe),
+ VLV_EDP_PSR_IN_TRANS,
+ 0,
+ 1)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
@@ -636,7 +647,7 @@ unlock:
static void intel_psr_exit(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -691,7 +702,7 @@ static void intel_psr_exit(struct drm_device *dev)
void intel_psr_single_frame_update(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
u32 val;
@@ -739,7 +750,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -777,7 +788,7 @@ void intel_psr_invalidate(struct drm_device *dev,
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -813,7 +824,7 @@ void intel_psr_flush(struct drm_device *dev,
*/
void intel_psr_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 68c5af079ef8..cca7792f26d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
@@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-bool intel_engine_stopped(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
- return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
-}
-
static void __intel_ring_advance(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
- if (intel_engine_stopped(engine))
- return;
engine->write_tail(engine, ringbuf->tail);
}
@@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 flush_domains)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
@@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
cmd |= MI_EXE_FLUSH;
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
+ (IS_G4X(req->i915) || IS_GEN5(req->i915)))
cmd |= MI_INVALIDATE_ISP;
ret = intel_ring_begin(req, 2);
@@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_TAIL(engine, value);
}
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_INFO(engine->dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
@@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(engine->dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
@@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
- if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv,
+ reg, INSTPM_SYNC_FLUSH, 0,
+ 1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
}
@@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ RING_MI_MODE(engine->mmio_base),
+ MODE_IDLE,
+ MODE_IDLE,
+ 1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
/* Sometimes we observe that the idle flag is not
@@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0);
engine->write_tail(engine, 0);
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
static int init_ring_common(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
@@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
}
}
- if (I915_NEED_GFX_HWS(dev))
+ if (I915_NEED_GFX_HWS(dev_priv))
intel_ring_setup_status_page(engine);
else
ring_setup_phys_status_page(engine);
@@ -641,59 +640,42 @@ out:
return ret;
}
-void
-intel_fini_pipe_control(struct intel_engine_cs *engine)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
-
if (engine->scratch.obj == NULL)
return;
- if (INTEL_INFO(dev)->gen >= 5) {
- kunmap(sg_page(engine->scratch.obj->pages->sgl));
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
- }
-
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
drm_gem_object_unreference(&engine->scratch.obj->base);
engine->scratch.obj = NULL;
}
-int
-intel_init_pipe_control(struct intel_engine_cs *engine)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
{
+ struct drm_i915_gem_object *obj;
int ret;
WARN_ON(engine->scratch.obj);
- engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
- if (engine->scratch.obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
+ obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ if (!obj)
+ obj = i915_gem_object_create(&engine->i915->drm, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ ret = PTR_ERR(obj);
goto err;
}
- ret = i915_gem_object_set_cache_level(engine->scratch.obj,
- I915_CACHE_LLC);
- if (ret)
- goto err_unref;
-
- ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
if (ret)
goto err_unref;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
- engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
- if (engine->scratch.cpu_page == NULL) {
- ret = -ENOMEM;
- goto err_unpin;
- }
-
+ engine->scratch.obj = obj;
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
engine->name, engine->scratch.gtt_offset);
return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
err_unref:
drm_gem_object_unreference(&engine->scratch.obj->base);
err:
@@ -702,11 +684,9 @@ err:
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
+ int ret, i;
if (w->count == 0)
return 0;
@@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
@@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -911,8 +888,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
@@ -937,14 +913,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -970,8 +946,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1035,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -1077,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1090,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1104,30 +1078,30 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1135,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ /* WaInPlaceDecompressionHang:skl */
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
@@ -1145,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1155,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
@@ -1168,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
/* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1179,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
@@ -1189,17 +1173,27 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret;
}
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
/* WaInsertDummyPushConstPs:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ /* WaInPlaceDecompressionHang:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
return 0;
}
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
@@ -1241,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaInPlaceDecompressionHang:kbl */
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
@@ -1251,24 +1249,23 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
int init_workarounds_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
return bdw_init_workarounds(engine);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return chv_init_workarounds(engine);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
return skl_init_workarounds(engine);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return bxt_init_workarounds(engine);
if (IS_KABYLAKE(dev_priv))
@@ -1279,14 +1276,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+ if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -1295,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
@@ -1320,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (INTEL_INFO(dev_priv)->gen >= 6)
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
return init_workarounds_ring(engine);
}
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (dev_priv->semaphore_obj) {
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1348,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 8
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1363,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_FLUSH_ENABLE);
+ PIPE_CONTROL_CS_STALL);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1390,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 6
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1405,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
@@ -1429,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
@@ -1448,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
if (i915_mmio_reg_valid(mbox_reg)) {
- u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(signaller, mbox_reg);
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
}
}
@@ -1488,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req)
intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
intel_ring_emit(engine,
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, req->seqno);
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
+
+ return 0;
+}
+
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->engine;
+ int ret;
+
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 8);
+ else
+ ret = intel_ring_begin(req, 8);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+ intel_ring_emit(engine, 0);
intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ /* We're thrashing one dword of HWS. */
+ intel_ring_emit(engine, 0);
intel_ring_emit(engine, MI_USER_INTERRUPT);
+ intel_ring_emit(engine, MI_NOOP);
__intel_ring_advance(engine);
return 0;
}
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
@@ -1516,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->engine;
- struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ struct drm_i915_private *dev_priv = waiter_req->i915;
+ u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+ struct i915_hw_ppgtt *ppgtt;
int ret;
ret = intel_ring_begin(waiter_req, 4);
@@ -1525,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter,
- lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
- intel_ring_emit(waiter,
- upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter, lower_32_bits(offset));
+ intel_ring_emit(waiter, upper_32_bits(offset));
intel_ring_advance(waiter);
+
+ /* When the !RCS engines idle waiting upon a semaphore, they lose their
+ * pagetables and we must reload them before executing the batch.
+ * We do this on the i915_switch_context() following the wait and
+ * before the dispatch.
+ */
+ ppgtt = waiter_req->ctx->ppgtt;
+ if (ppgtt && waiter_req->engine->id != RCS)
+ ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
return 0;
}
@@ -1561,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
return ret;
/* If seqno wrap happened, omit the wait with no-ops */
- if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
@@ -1577,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
return 0;
}
-#define PIPE_CONTROL_FLUSH(ring__, addr__) \
-do { \
- intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL); \
- intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
- intel_ring_emit(ring__, 0); \
- intel_ring_emit(ring__, 0); \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
{
- struct intel_engine_cs *engine = req->engine;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- int ret;
-
- /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
- * incoherent with writes to memory, i.e. completely fubar,
- * so we need to use PIPE_NOTIFY instead.
+ /* MI_STORE are internally buffered by the GPU and not flushed
+ * either by MI_FLUSH or SyncFlush or any other combination of
+ * MI commands.
*
- * However, we also need to workaround the qword write
- * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
- * memory before requesting an interrupt.
+ * "Only the submission of the store operation is guaranteed.
+ * The write result will be complete (coherent) some time later
+ * (this is practically a finite period but there is no guaranteed
+ * latency)."
+ *
+ * Empirically, we observe that we need a delay of at least 75us to
+ * be sure that the seqno write is visible by the CPU.
*/
- ret = intel_ring_begin(req, 32);
- if (ret)
- return ret;
-
- intel_ring_emit(engine,
- GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(engine,
- engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
- intel_ring_emit(engine, 0);
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
-
- intel_ring_emit(engine,
- GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
- PIPE_CONTROL_NOTIFY);
- intel_ring_emit(engine,
- engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
- intel_ring_emit(engine, 0);
- __intel_ring_advance(engine);
-
- return 0;
+ usleep_range(125, 250);
}
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -1664,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
spin_unlock_irq(&dev_priv->uncore.lock);
}
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
+gen5_irq_enable(struct intel_engine_cs *engine)
{
- return engine->scratch.cpu_page[0];
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- engine->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+gen5_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0)
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0)
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
-{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (!intel_irqs_enabled(dev_priv))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
}
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (!intel_irqs_enabled(dev_priv))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(RING_IMR(engine->mmio_base));
}
static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
}
static int
@@ -1824,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
intel_ring_emit(engine,
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, req->seqno);
intel_ring_emit(engine, MI_USER_INTERRUPT);
__intel_ring_advance(engine);
return 0;
}
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_PARITY_ERROR(dev)));
- else
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ engine->irq_keep_mask));
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
- else
- I915_WRITE_IMR(engine, ~0);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~0);
+ gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
- } else {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
- } else {
- I915_WRITE_IMR(engine, ~0);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int
@@ -2066,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
engine->status_page.page_addr = NULL;
}
@@ -2097,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine)
unsigned flags;
int ret;
- obj = i915_gem_alloc_object(engine->dev, 4096);
- if (obj == NULL) {
+ obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ return PTR_ERR(obj);
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2108,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine)
goto err_unref;
flags = 0;
- if (!HAS_LLC(engine->dev))
+ if (!HAS_LLC(engine->i915))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
@@ -2142,11 +1974,11 @@ err_unref:
static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
@@ -2159,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
+ GEM_BUG_ON(ringbuf->vma == NULL);
+ GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
i915_gem_object_unpin_map(ringbuf->obj);
else
- iounmap(ringbuf->virtual_start);
+ i915_vma_unpin_iomap(ringbuf->vma);
ringbuf->virtual_start = NULL;
- ringbuf->vma = NULL;
+
i915_gem_object_ggtt_unpin(ringbuf->obj);
+ ringbuf->vma = NULL;
}
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2206,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- addr = ioremap_wc(ggtt->mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (addr == NULL) {
- ret = -ENOMEM;
+ addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto err_unpin;
}
}
@@ -2238,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, ringbuf->size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, ringbuf->size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
@@ -2272,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->dev) || IS_845G(engine->dev))
+ if (IS_I830(engine->i915) || IS_845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
intel_ring_update_space(ring);
- ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+ ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
@@ -2298,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
kfree(ring);
}
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+ int ret;
+
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+ if (ce->pin_count++)
+ return 0;
+
+ if (ce->state) {
+ ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+ if (ret)
+ goto error;
+ }
+
+ /* The kernel context is only used as a placeholder for flushing the
+ * active context. It is never used for submitting user rendering and
+ * as such never requires the golden render context, and so we can skip
+ * emitting it when we switch to the kernel context. This is required
+ * as during eviction we cannot allocate and pin the renderstate in
+ * order to initialise the context.
+ */
+ if (ctx == ctx->i915->kernel_context)
+ ce->initialised = true;
+
+ i915_gem_context_reference(ctx);
+ return 0;
+
+error:
+ ce->pin_count = 0;
+ return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+ if (--ce->pin_count)
+ return;
+
+ if (ce->state)
+ i915_gem_object_ggtt_unpin(ce->state);
+
+ i915_gem_context_unreference(ctx);
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(engine->buffer);
- engine->dev = dev;
+ engine->i915 = dev_priv;
INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list);
INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2315,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
- init_waitqueue_head(&engine->irq_queue);
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto error;
+
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+ if (ret)
+ goto error;
ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
@@ -2324,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
}
engine->buffer = ringbuf;
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
ret = init_status_page(engine);
if (ret)
goto error;
@@ -2335,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
engine->name, ret);
@@ -2361,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(engine->dev);
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_stop_engine(engine);
- WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(engine->buffer);
intel_ringbuffer_free(engine->buffer);
@@ -2375,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(engine->dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
cleanup_status_page(engine);
} else {
WARN_ON(engine->id != RCS);
@@ -2384,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->dev = NULL;
+ intel_engine_fini_breadcrumbs(engine);
+
+ intel_ring_context_unpin(dev_priv->kernel_context, engine);
+
+ engine->i915 = NULL;
}
int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2407,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- request->ringbuf = request->engine->buffer;
- return 0;
-}
+ int ret;
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
*/
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
- return intel_ring_begin(request, 0);
-}
+ request->reserved_space += LEGACY_REQUEST_SIZE;
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
- ringbuf->reserved_size = size;
-}
-
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ request->ringbuf = request->engine->buffer;
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ return ret;
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
+ request->reserved_space -= LEGACY_REQUEST_SIZE;
+ return 0;
}
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2468,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
*
* See also i915_gem_request_alloc() and i915_add_request().
*/
- GEM_BUG_ON(!ringbuf->reserved_size);
+ GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &engine->request_list, list) {
unsigned space;
@@ -2503,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int total_bytes, wait_bytes;
bool need_wrap = false;
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + req->reserved_space;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2519,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
* and only need to effectively wait for the reserved
* size space from the start of ringbuffer.
*/
- wait_bytes = remain_actual + ringbuf->reserved_size;
+ wait_bytes = remain_actual + req->reserved_space;
} else {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -2576,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
@@ -2586,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
* the semaphore value, then when the seqno moves backwards all
* future waits will complete instantly (causing rendering corruption).
*/
- if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
if (HAS_VEBOX(dev_priv))
@@ -2603,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
- engine->set_seqno(engine, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
engine->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
+
+ /* After manually advancing the seqno, fake the interrupt in case
+ * there are any waiters for that seqno.
+ */
+ rcu_read_lock();
+ intel_engine_wakeup(engine);
+ rcu_read_unlock();
}
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
- I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+ I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_INDICATOR) == 0,
- 50))
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 50))
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
- I915_WRITE_TAIL(engine, value);
- POSTING_READ(RING_TAIL(engine->mmio_base));
+ I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+ POSTING_READ_FW(RING_TAIL(engine->mmio_base));
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2654,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2676,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2767,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
@@ -2776,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2797,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2809,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
return 0;
}
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ int ret, i;
+
+ if (!i915_semaphore_is_enabled(dev_priv))
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+ obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ }
+
+ if (!i915_semaphore_is_enabled(dev_priv))
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ u64 ring_offset;
+
+ if (i != engine->id)
+ ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+ else
+ ring_offset = MI_SEMAPHORE_SYNC_INVALID;
+
+ engine->semaphore.signal_ggtt[i] = ring_offset;
+ }
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ static const struct {
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
+ } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+ [RCS] = {
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+ },
+ [VCS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+ },
+ [BCS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+ },
+ [VECS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+ },
+ };
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
+
+ if (i == engine->id || i == VCS2) {
+ wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+ mbox_reg = GEN6_NOSYNC;
+ } else {
+ wait_mbox = sem_data[engine->id][i].wait_mbox;
+ mbox_reg = sem_data[engine->id][i].mbox_reg;
+ }
+
+ engine->semaphore.mbox.wait[i] = wait_mbox;
+ engine->semaphore.mbox.signal[i] = mbox_reg;
+ }
+ }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->irq_enable = gen8_irq_enable;
+ engine->irq_disable = gen8_irq_disable;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ engine->irq_enable = gen5_irq_enable;
+ engine->irq_disable = gen5_irq_disable;
+ engine->irq_seqno_barrier = gen5_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ engine->irq_enable = i9xx_irq_enable;
+ engine->irq_disable = i9xx_irq_disable;
+ } else {
+ engine->irq_enable = i8xx_irq_enable;
+ engine->irq_disable = i8xx_irq_disable;
+ }
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ engine->init_hw = init_ring_common;
+ engine->write_tail = ring_write_tail;
+
+ engine->add_request = i9xx_add_request;
+ if (INTEL_GEN(dev_priv) >= 6)
+ engine->add_request = gen6_add_request;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ else if (INTEL_GEN(dev_priv) >= 6)
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
+ intel_ring_init_irq(dev_priv, engine);
+ intel_ring_init_semaphores(dev_priv, engine);
+}
+
int intel_init_render_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- struct drm_i915_gem_object *obj;
int ret;
engine->name = "render ring";
@@ -2822,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (i915_semaphore_is_enabled(dev)) {
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
- if (ret != 0) {
- drm_gem_object_unreference(&obj->base);
- DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else
- dev_priv->semaphore_obj = obj;
- }
- }
+ intel_ring_default_vfuncs(dev_priv, engine);
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ if (HAS_L3_DPF(dev_priv))
+ engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen6_add_request;
+ engine->add_request = gen8_render_add_request;
engine->flush = gen8_render_ring_flush;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
- WARN_ON(!dev_priv->semaphore_obj);
- engine->semaphore.sync_to = gen8_ring_sync;
+ if (i915_semaphore_is_enabled(dev_priv))
engine->semaphore.signal = gen8_rcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen6_add_request;
engine->flush = gen7_render_ring_flush;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
engine->flush = gen6_render_ring_flush;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between VCS2 and RCS later.
- */
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- } else if (IS_GEN5(dev)) {
- engine->add_request = pc_render_add_request;
+ } else if (IS_GEN5(dev_priv)) {
engine->flush = gen4_render_ring_flush;
- engine->get_seqno = pc_render_get_seqno;
- engine->set_seqno = pc_render_set_seqno;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
- GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
- engine->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
engine->flush = gen2_render_ring_flush;
else
engine->flush = gen4_render_ring_flush;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
- engine->irq_get = i8xx_ring_get_irq;
- engine->irq_put = i8xx_ring_put_irq;
- } else {
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
- }
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- engine->write_tail = ring_write_tail;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
- else if (IS_GEN8(dev))
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 6)
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 4)
- engine->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
- engine->dispatch_execbuffer = i830_dispatch_execbuffer;
- else
- engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
engine->init_hw = init_render_ring;
engine->cleanup = render_ring_cleanup;
- /* Workaround batchbuffer to combat CS tlb bug. */
- if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate batch bo\n");
- return -ENOMEM;
- }
-
- ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
- if (ret != 0) {
- drm_gem_object_unreference(&obj->base);
- DRM_ERROR("Failed to ping batch bo\n");
- return ret;
- }
-
- engine->scratch.obj = obj;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
- }
-
ret = intel_init_ring_buffer(dev, engine);
if (ret)
return ret;
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(engine);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ ret = intel_init_pipe_control(engine, 4096);
+ if (ret)
+ return ret;
+ } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+ ret = intel_init_pipe_control(engine, I830_WA_SIZE);
if (ret)
return ret;
}
@@ -2965,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VCS];
engine->name = "bsd ring";
@@ -2973,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->exec_id = I915_EXEC_BSD;
engine->hw_id = 1;
- engine->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6) {
+ intel_ring_default_vfuncs(dev_priv, engine);
+
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
engine->write_tail = gen6_bsd_ring_write_tail;
engine->flush = gen6_bsd_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer =
- gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else {
+ else
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->dispatch_execbuffer =
- gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- }
} else {
engine->mmio_base = BSD_RING_BASE;
engine->flush = bsd_ring_flush;
- engine->add_request = i9xx_add_request;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
- } else {
+ else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
- }
- engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
@@ -3044,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
engine->name = "bsd2 ring";
engine->id = VCS2;
engine->exec_id = I915_EXEC_BSD;
engine->hw_id = 4;
-
- engine->write_tail = ring_write_tail;
engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_bsd_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer =
- gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[BCS];
engine->name = "blitter ring";
engine->id = BCS;
engine->exec_id = I915_EXEC_BLT;
engine->hw_id = 2;
-
engine->mmio_base = BLT_RING_BASE;
- engine->write_tail = ring_write_tail;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else {
+ else
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.sync_to = gen6_ring_sync;
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between BCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between BCS and VCS2 later.
- */
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- }
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VECS];
engine->name = "video enhancement ring";
engine->id = VECS;
engine->exec_id = I915_EXEC_VEBOX;
engine->hw_id = 3;
-
engine->mmio_base = VEBOX_RING_BASE;
- engine->write_tail = ring_write_tail;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
} else {
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_get = hsw_vebox_get_irq;
- engine->irq_put = hsw_vebox_put_irq;
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
+ engine->irq_enable = hsw_vebox_irq_enable;
+ engine->irq_disable = hsw_vebox_irq_disable;
}
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..12cb7ed90014 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,18 +62,6 @@ struct intel_hw_status_page {
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT(e) do { \
- if (!dev_priv->semaphore_obj) { \
- break; \
- } \
- (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
- (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
- (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
- (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
- (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
- (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
- } while(0)
-
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
+ unsigned long user_interrupts;
u32 seqno;
- unsigned user_interrupts;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
@@ -107,7 +95,6 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
- int reserved_size;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -120,7 +107,7 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
-struct intel_context;
+struct i915_gem_context;
struct drm_i915_reg_table;
/*
@@ -142,7 +129,10 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj;
};
-struct intel_engine_cs {
+struct drm_i915_gem_request;
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
const char *name;
enum intel_engine_id {
RCS = 0,
@@ -157,10 +147,42 @@ struct intel_engine_cs {
unsigned int hw_id;
unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
- struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head buffers;
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+ unsigned long irq_wakeups;
+ bool irq_posted;
+
+ spinlock_t lock; /* protects the lists of requests */
+ struct rb_root waiters; /* sorted by retirement, priority */
+ struct rb_root signals; /* sorted by retirement */
+ struct intel_wait *first_wait; /* oldest waiter by retirement */
+ struct task_struct *signaler; /* used for fence signalling */
+ struct drm_i915_gem_request *first_signal;
+ struct timer_list fake_irq; /* used after a missed interrupt */
+
+ bool irq_enabled : 1;
+ bool rpm_wakelock : 1;
+ } breadcrumbs;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -171,11 +193,10 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- unsigned irq_refcount; /* protected by dev_priv->irq_lock */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- struct drm_i915_gem_request *trace_irq_req;
- bool __must_check (*irq_get)(struct intel_engine_cs *ring);
- void (*irq_put)(struct intel_engine_cs *ring);
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *ring);
+ void (*irq_disable)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
@@ -194,9 +215,6 @@ struct intel_engine_cs {
* monotonic, even if not coherent.
*/
void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
- u32 (*get_seqno)(struct intel_engine_cs *ring);
- void (*set_seqno)(struct intel_engine_cs *ring,
- u32 seqno);
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
@@ -268,13 +286,11 @@ struct intel_engine_cs {
struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
- struct list_head execlist_retired_req_list;
unsigned int fw_domains;
unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
- u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
@@ -306,20 +322,16 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
- unsigned user_interrupts;
bool gpu_caches_dirty;
- wait_queue_head_t irq_queue;
-
- struct intel_context *last_context;
+ struct i915_gem_context *last_context;
struct intel_ring_hangcheck hangcheck;
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
- volatile u32 *cpu_page;
} scratch;
bool needs_cmd_parser;
@@ -350,13 +362,13 @@ struct intel_engine_cs {
};
static inline bool
-intel_engine_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(const struct intel_engine_cs *engine)
{
- return engine->dev != NULL;
+ return engine->i915 != NULL;
}
static inline unsigned
-intel_engine_flag(struct intel_engine_cs *engine)
+intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
@@ -427,7 +439,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -458,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
-int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -475,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
int init_workarounds_ring(struct intel_engine_cs *engine);
@@ -486,26 +501,73 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
- * is ILK at 136 words. Reserving too much is better than reserving too little
- * as that allows for corner cases that might have been missed. So the figure
- * has been rounded up to 160 words.
+ * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
+ * we need to allocate double the largest single packet within that emission
+ * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
*/
-#define MIN_SPACE_FOR_ADD_REQUEST 160
+#define MIN_SPACE_FOR_ADD_REQUEST 336
-/*
- * Reserve space in the ring to guarantee that the i915_add_request() call
- * will always have sufficient room to do its stuff. The request creation
- * code calls this automatically.
- */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
-/* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
-/* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
-/* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
-
-/* Legacy ringbuffer specific portion of reservation code: */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
+/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
+struct intel_wait {
+ struct rb_node node;
+ struct task_struct *tsk;
+ u32 seqno;
+};
+
+struct intel_signal_node {
+ struct rb_node node;
+ struct intel_wait wait;
+};
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+
+static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+{
+ wait->tsk = current;
+ wait->seqno = seqno;
+}
+
+static inline bool intel_wait_complete(const struct intel_wait *wait)
+{
+ return RB_EMPTY_NODE(&wait->node);
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+
+static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+}
+
+static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+ bool wakeup = false;
+ struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ /* Note that for this not to dangerously chase a dangling pointer,
+ * the caller is responsible for ensure that the task remain valid for
+ * wake_up_process() i.e. that the RCU grace period cannot expire.
+ *
+ * Also note that tsk is likely to be in !TASK_RUNNING state so an
+ * early test for tsk->state != TASK_RUNNING before wake_up_process()
+ * is unlikely to be beneficial.
+ */
+ if (tsk)
+ wakeup = wake_up_process(tsk);
+ return wakeup;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+unsigned int intel_kick_waiters(struct drm_i915_private *i915);
+unsigned int intel_kick_signalers(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..1c603bbe5784 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,9 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
+
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
@@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv,
power_well->ops->disable(dev_priv, power_well);
}
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN(!power_well->count, "Use count on power well %s is already zero",
+ power_well->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -267,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -298,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -345,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE_ENABLED), 20))
+ if (intel_wait_for_register(dev_priv,
+ HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_STATE_ENABLED,
+ HSW_PWR_WELL_STATE_ENABLED,
+ 20))
DRM_ERROR("Timeout enabling power well\n");
hsw_power_well_post_enable(dev_priv);
}
@@ -419,6 +442,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
@@ -548,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC9\n");
+ intel_power_sequencer_reset(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -669,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
switch (power_well->data) {
case SKL_DISP_PW_1:
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG0_DIST_STATUS), 1)) {
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG0_DIST_STATUS,
+ SKL_FUSE_PG0_DIST_STATUS,
+ 1)) {
DRM_ERROR("PG0 not enabled\n");
return;
}
@@ -731,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG1_DIST_STATUS), 1))
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG1_DIST_STATUS,
+ SKL_FUSE_PG1_DIST_STATUS,
+ 1))
DRM_ERROR("PG1 distributing status timeout\n");
} else if (power_well->data == SKL_DISP_PW_2) {
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG2_DIST_STATUS), 1))
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG2_DIST_STATUS,
+ SKL_FUSE_PG2_DIST_STATUS,
+ 1))
DRM_ERROR("PG2 distributing status timeout\n");
}
}
@@ -800,21 +843,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
+static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
+{
+ enum skl_disp_power_wells power_well_id = power_well->data;
+
+ return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum skl_disp_power_wells power_well_id = power_well->data;
+ struct i915_power_well *cmn_a_well;
+
+ if (power_well_id == BXT_DPIO_CMN_BC) {
+ /*
+ * We need to copy the GRC calibration value from the eDP PHY,
+ * so make sure it's powered up.
+ */
+ cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+ intel_power_well_get(dev_priv, cmn_a_well);
+ }
+
+ bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
+
+ if (power_well_id == BXT_DPIO_CMN_BC)
+ intel_power_well_put(dev_priv, cmn_a_well);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv,
+ bxt_power_well_to_phy(power_well));
+}
+
+static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
+ else
+ bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
+}
+
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ bxt_power_well_to_phy(power_well));
+
+ power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ bxt_power_well_to_phy(power_well));
+}
+
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
}
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 tmp = I915_READ(DBUF_CTL);
+
+ WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+ (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+ "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- if (IS_BROXTON(dev_priv)) {
- broxton_cdclk_verify_state(dev_priv);
- broxton_ddi_phy_verify_state(dev_priv);
- }
+ WARN_ON(dev_priv->cdclk_freq !=
+ dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_BROXTON(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,10 +1069,16 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
+
+ WARN_ON(dev_priv->rawclk_freq == 0);
+
+ I915_WRITE(RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ struct intel_encoder *encoder;
enum pipe pipe;
/*
@@ -962,7 +1089,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(dev_priv->dev, pipe) {
+ for_each_pipe(&dev_priv->drm, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -987,7 +1114,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
- i915_redisable_vga_power_on(dev_priv->dev);
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ i915_redisable_vga_power_on(&dev_priv->drm);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -997,9 +1130,11 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
+
+ intel_power_sequencer_reset(dev_priv);
- vlv_power_sequencer_reset(dev_priv);
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1092,7 +1227,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
u32 phy_control = dev_priv->chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
- u32 tmp;
/*
* The BIOS can leave the PHY is some weird state
@@ -1180,10 +1314,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
- WARN(phy_status != tmp,
- "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- tmp, phy_status, dev_priv->chv_phy_control);
+ if (intel_wait_for_register(dev_priv,
+ DISPLAY_PHY_STATUS,
+ phy_status_mask,
+ phy_status,
+ 10))
+ DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
}
#undef BITS_SET
@@ -1211,7 +1349,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ if (intel_wait_for_register(dev_priv,
+ DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy),
+ PHY_POWERGOOD(phy),
+ 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
mutex_lock(&dev_priv->sb_lock);
@@ -1501,10 +1643,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well;
int i;
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
- }
+ for_each_power_well(i, power_well, BIT(domain), power_domains)
+ intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
}
@@ -1598,14 +1738,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN(!power_well->count,
- "Use count on power well %s is already zero",
- power_well->name);
-
- if (!--power_well->count)
- intel_power_well_disable(dev_priv, power_well);
- }
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+ intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1776,6 +1910,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
.is_enabled = gen9_dc_off_power_well_enabled,
};
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
@@ -2012,6 +2153,18 @@ static struct i915_power_well bxt_power_wells[] = {
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
},
+ {
+ .name = "dpio-common-a",
+ .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .data = BXT_DPIO_CMN_A,
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .data = BXT_DPIO_CMN_BC,
+ },
};
static int
@@ -2131,7 +2284,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
{
- struct device *device = &dev_priv->dev->pdev->dev;
+ struct device *device = &dev_priv->drm.pdev->dev;
/*
* The i915.ko module is still not prepared to be loaded when
@@ -2171,6 +2324,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+ DRM_ERROR("DBuf power disable timeout!\n");
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -2195,12 +2370,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- if (!resume)
- return;
-
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload)
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
@@ -2211,6 +2385,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_dbuf_disable(dev_priv);
+
skl_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -2254,11 +2430,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- broxton_init_cdclk(dev_priv);
- broxton_ddi_phy_init(dev_priv);
+ bxt_init_cdclk(dev_priv);
- broxton_cdclk_verify_state(dev_priv);
- broxton_ddi_phy_verify_state(dev_priv);
+ gen9_dbuf_enable(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
@@ -2271,8 +2445,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- broxton_ddi_phy_uninit(dev_priv);
- broxton_uninit_cdclk(dev_priv);
+ gen9_dbuf_disable(dev_priv);
+
+ bxt_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -2403,13 +2578,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
+ * @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
@@ -2471,7 +2647,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
pm_runtime_get_sync(device);
@@ -2492,7 +2668,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
if (IS_ENABLED(CONFIG_PM)) {
@@ -2534,7 +2710,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
assert_rpm_wakelock_held(dev_priv);
@@ -2553,7 +2729,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
assert_rpm_wakelock_held(dev_priv);
@@ -2576,7 +2752,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..e378f35365a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 bval = val, cval = val;
int i;
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -2177,12 +2177,39 @@ done:
#undef CHECK_PROPERTY
}
+static int
+intel_sdvo_connector_register(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&connector->kdev->kobj,
+ &sdvo->ddc.dev.kobj,
+ sdvo->ddc.dev.kobj.name);
+}
+
+static void
+intel_sdvo_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+
+ sysfs_remove_link(&connector->kdev->kobj,
+ sdvo->ddc.dev.kobj.name);
+ intel_connector_unregister(connector);
+}
+
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_sdvo_connector_register,
+ .early_unregister = intel_sdvo_connector_unregister,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2191,7 +2218,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2312,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
@@ -2346,20 +2372,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
return 0x72;
}
-static void
-intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
-{
- struct drm_connector *drm_connector;
- struct intel_sdvo *sdvo_encoder;
-
- drm_connector = &intel_connector->base;
- sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
-
- sysfs_remove_link(&drm_connector->kdev->kobj,
- sdvo_encoder->ddc.dev.kobj.name);
- intel_connector_unregister(intel_connector);
-}
-
static int
intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
struct intel_sdvo *encoder)
@@ -2382,27 +2394,10 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
- connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- ret = drm_connector_register(drm_connector);
- if (ret < 0)
- goto err1;
-
- ret = sysfs_create_link(&drm_connector->kdev->kobj,
- &encoder->ddc.dev.kobj,
- encoder->ddc.dev.kobj.name);
- if (ret < 0)
- goto err2;
return 0;
-
-err2:
- drm_connector_unregister(drm_connector);
-err1:
- drm_connector_cleanup(drm_connector);
-
- return ret;
}
static void
@@ -2529,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2608,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
- drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2959,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -2981,7 +2974,7 @@ bool intel_sdvo_init(struct drm_device *dev,
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
- NULL);
+ "SDVO %c", port_name(port));
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index c3998188cf35..1a840bf92eea 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
- if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT,
+ SBI_BUSY | SBI_RESPONSE_FAIL,
+ 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT,
+ SBI_BUSY | SBI_RESPONSE_FAIL,
+ 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..7c08e4f29032 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,8 +53,8 @@ format_is_yuv(uint32_t format)
}
}
-static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs)
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs)
{
/* paranoia */
if (!adjusted_mode->crtc_htotal)
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
- enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -93,7 +91,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
/* FIXME needs to be calibrated sensibly */
- min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
local_irq_disable();
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
- crtc->debug.start_vbl_count =
- dev->driver->get_vblank_counter(dev, pipe);
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
}
@@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
{
- struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
- u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
+ if (work) {
+ work->flip_queued_vblank = end_vbl_count;
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
+ }
+
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+ /* We're still in the vblank-evade critical section, this can't race.
+ * Would be slightly nice to just grab the vblank count and arm the
+ * event outside of the critical section - the spinlock might spin for a
+ * while ... */
+ if (crtc->base.state->event) {
+ WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+
+ spin_lock(&crtc->base.dev->event_lock);
+ drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+ spin_unlock(&crtc->base.dev->event_lock);
+
+ crtc->base.state->event = NULL;
+ }
+
local_irq_enable();
if (crtc->debug.start_vbl_count &&
@@ -183,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
- uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
- I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+
+ scaler = &crtc_state->scaler_state.scalers[scaler_id];
+
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -286,7 +303,7 @@ static void
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
@@ -300,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
- struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
int plane = intel_plane->plane;
/* Seems RGB data bypasses the CSC always */
@@ -342,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -468,7 +485,7 @@ static void
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
@@ -485,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -607,7 +624,7 @@ static void
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
@@ -626,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -736,7 +753,7 @@ static void
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
@@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
- plane_formats, num_plane_formats,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "plane %d%c", plane + 2, pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..49136ad5473e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -826,7 +826,7 @@ static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp = I915_READ(TV_CTL);
if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
intel_enable_tv(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
intel_disable_tv(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
static void intel_tv_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_crtc *crtc = connector->state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
@@ -1501,6 +1501,8 @@ out:
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_tv_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
@@ -1512,7 +1514,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
void
intel_tv_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
@@ -1591,7 +1592,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_SVIDEO);
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
@@ -1600,7 +1601,6 @@ intel_tv_init(struct drm_device *dev)
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
@@ -1642,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
- drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..ff80a81b1a84 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv);
}
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
return false;
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
+static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
__raw_i915_write32(dev_priv, GTFIFOCTL,
__raw_i915_read32(dev_priv, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
GT_FIFO_CTL_RC6_POLICY_STALL);
}
- intel_uncore_forcewake_reset(dev, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+ bool restore_forcewake)
{
- __intel_uncore_early_sanitize(dev, restore_forcewake);
- i915_check_and_clear_faults(dev);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ i915_check_and_clear_faults(dev_priv);
}
-void intel_uncore_sanitize(struct drm_device *dev)
+void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+ i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_disable_gt_powersave(dev);
+ intel_disable_gt_powersave(dev_priv);
}
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(d);
}
-static void intel_uncore_fw_domains_init(struct drm_device *dev)
+static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (INTEL_INFO(dev_priv)->gen <= 5)
return;
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- if (!IS_CHERRYVIEW(dev))
+ if (!IS_CHERRYVIEW(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- mutex_lock(&dev->struct_mutex);
+ spin_lock_irq(&dev_priv->uncore.lock);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1311,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1324,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_check_vgpu(dev);
+ i915_check_vgpu(dev_priv);
intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev);
- __intel_uncore_early_sanitize(dev, false);
+ intel_uncore_fw_domains_init(dev_priv);
+ __intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
break;
case 8:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);
@@ -1357,13 +1352,13 @@ void intel_uncore_init(struct drm_device *dev)
break;
case 7:
case 6:
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1376,24 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
-void intel_uncore_fini(struct drm_device *dev)
+void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev);
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_sanitize(dev_priv);
+ intel_uncore_forcewake_reset(dev_priv, false);
}
-#define GEN_RANGE(l, h) GENMASK(h, l)
+#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
static const struct register_whitelist {
i915_reg_t offset_ldw, offset_udw;
@@ -1414,7 +1409,7 @@ static const struct register_whitelist {
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
unsigned size;
@@ -1423,7 +1418,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1467,83 +1462,47 @@ out:
return ret;
}
-int i915_get_reset_stats_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reset_stats *args = data;
- struct i915_ctx_hang_stats *hs;
- struct intel_context *ctx;
- int ret;
-
- if (args->flags || args->pad)
- return -EINVAL;
-
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
- hs = &ctx->hang_stats;
-
- if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
- else
- args->reset_count = 0;
-
- args->batch_active = hs->batch_active;
- args->batch_pending = hs->batch_pending;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
/* assert reset for at least 20 usec */
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
- return wait_for(i915_reset_complete(dev), 500);
+ return wait_for(i915_reset_complete(pdev), 500);
}
-static int g4x_reset_complete(struct drm_device *dev)
+static int g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for(g4x_reset_complete(dev), 500);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(pdev), 500);
}
-static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1551,9 +1510,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1561,27 +1520,29 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
I915_WRITE(ILK_GDSR,
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = wait_for((I915_READ(ILK_GDSR) &
- ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
if (ret)
return ret;
I915_WRITE(ILK_GDSR,
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = wait_for((I915_READ(ILK_GDSR) &
- ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
if (ret)
return ret;
@@ -1594,25 +1555,21 @@ static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
u32 hw_domain_mask)
{
- int ret;
-
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
-#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
/* Spin waiting for the device to ack the reset requests */
- ret = wait_for(ACKED, 500);
-#undef ACKED
-
- return ret;
+ return intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500);
}
/**
* gen6_reset_engines - reset individual engines
- * @dev: DRM device
+ * @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
*
* This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
*
* Returns 0 on success, nonzero on error.
*/
-static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen6_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER,
@@ -1647,33 +1604,94 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
- intel_uncore_forcewake_reset(dev, true);
+ intel_uncore_forcewake_reset(dev_priv, true);
return ret;
}
-static int wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
+/**
+ * intel_wait_for_register_fw - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ * (I915_READ_FW(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Note that this routine assumes the caller holds forcewake asserted, it is
+ * not suitable for very long waits. See intel_wait_for_register() if you
+ * wish to wait without holding forcewake for the duration (i.e. you expect
+ * the wait to be slow).
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
+{
+#define done ((I915_READ_FW(reg) & mask) == value)
+ int ret = wait_for_us(done, 2);
+ if (ret)
+ ret = wait_for(done, timeout_ms);
+ return ret;
+#undef done
+}
+
+/**
+ * intel_wait_for_register - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ * (I915_READ(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
{
- return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
+
+ unsigned fw =
+ intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ int ret;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+ ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
+ intel_uncore_forcewake_put(dev_priv, fw);
+ if (ret)
+ ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
+ timeout_ms);
+
+ return ret;
}
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
- ret = wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700);
+ ret = intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
@@ -1682,22 +1700,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen8_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, dev_priv, engine_mask)
if (gen8_request_engine_reset(engine))
goto not_ready;
- return gen6_reset_engines(dev, engine_mask);
+ return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1724,35 @@ not_ready:
return -EIO;
}
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
- unsigned engine_mask)
+typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
if (!i915.reset)
return NULL;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_INFO(dev_priv)->gen >= 8)
return gen8_reset_engines;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_INFO(dev_priv)->gen >= 6)
return gen6_reset_engines;
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
return ironlake_do_reset;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
return g4x_do_reset;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
return g33_do_reset;
- else if (INTEL_INFO(dev)->gen >= 3)
+ else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset;
else
return NULL;
}
-int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int (*reset)(struct drm_device *, unsigned);
+ reset_func reset;
int ret;
- reset = intel_get_gpu_reset(dev);
+ reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL)
return -ENODEV;
@@ -1742,15 +1760,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev, engine_mask);
+ ret = reset(dev_priv, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-bool intel_has_gpu_reset(struct drm_device *dev)
+bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
{
- return intel_get_gpu_reset(dev) != NULL;
+ return intel_get_gpu_reset(dev_priv) != NULL;
}
int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1776,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
int ret;
unsigned long irqflags;
- if (!i915.enable_guc_submission)
+ if (!HAS_GUC(dev_priv))
return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1820,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
@@ -1842,10 +1860,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 44fb0b35eed3..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -447,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3;
} __packed;
+struct bdb_lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
+ struct bdb_lfp_backlight_control_method backlight_control[16];
} __packed;
struct aimdb_header {
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index a1844b50546c..f2c9ae822149 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,7 +1,6 @@
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a24631fdf4ad..359cd2765552 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -28,6 +28,11 @@ struct imx_hdmi {
struct regmap *regmap;
};
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_hdmi, encoder);
+}
+
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
{
45250000, {
@@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
{
}
-static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
-}
-
-static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
-{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
@@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
}
-static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
}
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
- .mode_set = dw_hdmi_imx_encoder_mode_set,
- .prepare = dw_hdmi_imx_encoder_prepare,
- .commit = dw_hdmi_imx_encoder_commit,
+ .enable = dw_hdmi_imx_encoder_enable,
.disable = dw_hdmi_imx_encoder_disable,
+ .atomic_check = dw_hdmi_imx_atomic_check,
};
static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 82656654fb21..9f7dafce3a4c 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -15,10 +15,14 @@
*/
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reservation.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
struct imx_drm_crtc *crtc[MAX_CRTC];
unsigned int pipes;
struct drm_fbdev_cma *fbhelper;
+ struct drm_atomic_state *state;
};
struct imx_drm_crtc {
@@ -85,45 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm)
return 0;
}
-static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
-{
- struct imx_drm_device *imxdrm = crtc->dev->dev_private;
- unsigned i;
-
- for (i = 0; i < MAX_CRTC; i++)
- if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
- return imxdrm->crtc[i];
-
- return NULL;
-}
-
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct imx_drm_crtc_helper_funcs *helper;
- struct imx_drm_crtc *imx_crtc;
-
- imx_crtc = imx_drm_find_crtc(encoder->crtc);
- if (!imx_crtc)
- return -EINVAL;
-
- helper = &imx_crtc->imx_drm_helper_funcs;
- if (helper->set_interface_pix_fmt)
- return helper->set_interface_pix_fmt(encoder->crtc,
- bus_format, hsync_pin, vsync_pin,
- bus_flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
-
-int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
-{
- return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
-
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
return drm_crtc_vblank_get(imx_drm_crtc->crtc);
@@ -208,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_gem_cma_object *cma_obj;
+ struct fence *excl;
+ unsigned shared_count;
+ struct fence **shared;
+ unsigned int i, j;
+ int ret;
+
+ /* Wait for fences. */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ plane_state = crtc->primary->state;
+ if (plane_state->fb) {
+ cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+ if (cma_obj->base.dma_buf) {
+ ret = reservation_object_get_fences_rcu(
+ cma_obj->base.dma_buf->resv, &excl,
+ &shared_count, &shared);
+ if (unlikely(ret))
+ DRM_ERROR("failed to get fences "
+ "for buffer\n");
+
+ if (excl) {
+ fence_wait(excl, false);
+ fence_put(excl);
+ }
+ for (j = 0; j < shared_count; i++) {
+ fence_wait(shared[j], false);
+ fence_put(shared[j]);
+ }
+ }
+ }
+ }
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+ .atomic_commit_tail = imx_drm_atomic_commit_tail,
};
/*
@@ -249,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm_mode_config_init(drm);
@@ -279,6 +303,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
}
}
+ drm_mode_config_reset(drm);
+
/*
* All components are now initialised, so setup the fb helper.
* The fb helper takes copies of key hardware information, so the
@@ -289,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- drm_helper_disable_unused_functions(drm);
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
drm->mode_config.num_crtc, MAX_CRTC);
if (IS_ERR(imxdrm->fbhelper)) {
@@ -403,11 +428,11 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
- .set_busid = drm_platform_set_busid,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -492,6 +517,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */
if (drm_dev == NULL)
@@ -499,17 +525,26 @@ static int imx_drm_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
+ imxdrm = drm_dev->dev_private;
+ imxdrm->state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(imxdrm->state)) {
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(imxdrm->state);
+ }
+
return 0;
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imx_drm;
if (drm_dev == NULL)
return 0;
- drm_helper_resume_force_mode(drm_dev);
+ imx_drm = drm_dev->dev_private;
+ drm_atomic_helper_resume(drm_dev, imx_drm->state);
drm_kms_helper_poll_enable(drm_dev);
return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 74320a1723b7..07d33e45f90f 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -15,12 +15,22 @@ struct platform_device;
unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+struct imx_crtc_state {
+ struct drm_crtc_state base;
+ u32 bus_format;
+ u32 bus_flags;
+ int di_hsync_pin;
+ int di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct imx_crtc_state, base);
+}
+
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
- int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin,
- u32 bus_flags);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
@@ -42,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags);
-int imx_drm_set_bus_format(struct drm_encoder *encoder,
- u32 bus_format);
-
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index beff793bb717..b03919ed60ba 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -17,6 +17,8 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
@@ -49,9 +51,6 @@
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
-#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
-#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
-
struct imx_ldb;
struct imx_ldb_channel {
@@ -66,9 +65,19 @@ struct imx_ldb_channel {
int edid_len;
struct drm_display_mode mode;
int mode_valid;
- int bus_format;
+ u32 bus_format;
};
+static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
+{
+ return container_of(c, struct imx_ldb_channel, connector);
+}
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_ldb_channel, encoder);
+}
+
struct bus_mux {
int reg;
int shift;
@@ -93,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect(
return connector_status_connected;
}
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+ u32 bus_format)
+{
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ }
+}
+
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -100,11 +135,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
imx_ldb_ch->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
- if (!imx_ldb_ch->bus_format && di->num_bus_formats)
- imx_ldb_ch->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
@@ -141,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder(
return &imx_ldb_ch->encoder;
}
-static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
unsigned long serial_clk, unsigned long di_clk)
{
@@ -173,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
chno);
}
-static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
- struct imx_ldb *ldb = imx_ldb_ch->ldb;
- int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- u32 bus_format;
-
- switch (imx_ldb_ch->bus_format) {
- default:
- dev_warn(ldb->dev,
- "could not determine data mapping, default to 18-bit \"spwg\"\n");
- /* fallthrough */
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB666_1X18;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
- LDB_BIT_MAP_CH0_JEIDA;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
- LDB_BIT_MAP_CH1_JEIDA;
- break;
- }
-
- imx_drm_set_bus_format(encoder, bus_format);
-}
-
-static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -219,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
clk_prepare_enable(ldb->clk[0]);
clk_prepare_enable(ldb->clk[1]);
+ } else {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
}
if (imx_ldb_ch == &ldb->channel[0] || dual) {
@@ -265,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ u32 bus_format = imx_ldb_ch->bus_format;
if (mode->clock > 170000) {
dev_warn(ldb->dev,
@@ -286,18 +283,33 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
}
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
- if (imx_ldb_ch == &ldb->channel[0]) {
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
}
- if (imx_ldb_ch == &ldb->channel[1]) {
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
}
+
+ if (!bus_format) {
+ struct drm_connector *connector;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ struct drm_display_info *di = &connector->display_info;
+
+ if (connector->encoder == encoder &&
+ di->num_bus_formats) {
+ bus_format = di->bus_formats[0];
+ break;
+ }
+ }
+ }
+ imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
}
static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
@@ -357,11 +369,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imx_ldb_ch->panel);
}
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ u32 bus_format = imx_ldb_ch->bus_format;
+
+ /* Bus format description in DT overrides connector display info. */
+ if (!bus_format && di->num_bus_formats)
+ bus_format = di->bus_formats[0];
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
@@ -374,11 +420,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
- .dpms = imx_ldb_encoder_dpms,
- .prepare = imx_ldb_encoder_prepare,
- .commit = imx_ldb_encoder_commit,
.mode_set = imx_ldb_encoder_mode_set,
+ .enable = imx_ldb_encoder_enable,
.disable = imx_ldb_encoder_disable,
+ .atomic_check = imx_ldb_encoder_atomic_check,
};
static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
@@ -400,10 +445,10 @@ static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ struct drm_encoder *encoder = &imx_ldb_ch->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
- imx_ldb_ch->child);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
@@ -417,9 +462,8 @@ static int imx_ldb_register(struct drm_device *drm,
return ret;
}
- drm_encoder_helper_add(&imx_ldb_ch->encoder,
- &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
drm_connector_helper_add(&imx_ldb_ch->connector,
@@ -427,11 +471,14 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
- if (imx_ldb_ch->panel)
- drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+ if (imx_ldb_ch->panel) {
+ ret = drm_panel_attach(imx_ldb_ch->panel,
+ &imx_ldb_ch->connector);
+ if (ret)
+ return ret;
+ }
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- &imx_ldb_ch->encoder);
+ drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
return 0;
}
@@ -560,6 +607,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct imx_ldb_channel *channel;
struct device_node *ddc_node;
struct device_node *ep;
+ int bus_format;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
@@ -632,21 +680,22 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
}
- channel->bus_format = of_get_bus_format(dev, child);
- if (channel->bus_format == -EINVAL) {
+ bus_format = of_get_bus_format(dev, child);
+ if (bus_format == -EINVAL) {
/*
* If no bus format was specified in the device tree,
* we can still get it from the connected panel later.
*/
if (channel->panel && channel->panel->funcs &&
channel->panel->funcs->get_modes)
- channel->bus_format = 0;
+ bus_format = 0;
}
- if (channel->bus_format < 0) {
+ if (bus_format < 0) {
dev_err(dev, "could not determine data mapping: %d\n",
- channel->bus_format);
- return channel->bus_format;
+ bus_format);
+ return bus_format;
}
+ channel->bus_format = bus_format;
ret = imx_ldb_register(drm, channel);
if (ret)
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index baf788121287..5e875944ffa2 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <video/imx-ipu-v3.h>
@@ -97,9 +98,6 @@
/* TVE_TST_MODE_REG */
#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
-#define con_to_tve(x) container_of(x, struct imx_tve, connector)
-#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
-
enum {
TVE_MODE_TVOUT,
TVE_MODE_VGA,
@@ -112,6 +110,8 @@ struct imx_tve {
spinlock_t lock; /* register lock */
bool enabled;
int mode;
+ int di_hsync_pin;
+ int di_vsync_pin;
struct regmap *regmap;
struct regulator *dac_reg;
@@ -120,10 +120,18 @@ struct imx_tve {
struct clk *di_sel_clk;
struct clk_hw clk_hw_di;
struct clk *di_clk;
- int vsync_pin;
- int hsync_pin;
};
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+ return container_of(c, struct imx_tve, connector);
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_tve, encoder);
+}
+
static void tve_lock(void *__tve)
__acquires(&tve->lock)
{
@@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve)
tve->enabled = true;
clk_prepare_enable(tve->clk);
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN,
- TVE_IPU_CLK_EN | TVE_EN);
+ TVE_EN, TVE_EN);
}
/* clear interrupt status register */
@@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve)
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN, 0);
+ TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
}
@@ -275,36 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder(
return &tve->encoder;
}
-static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
- int ret;
-
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
- if (ret < 0)
- dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
-}
-
-static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
-
- tve_disable(tve);
-
- switch (tve->mode) {
- case TVE_MODE_VGA:
- imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
- tve->hsync_pin, tve->vsync_pin,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
- break;
- case TVE_MODE_TVOUT:
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
- break;
- }
-}
-
static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
@@ -333,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
ret);
}
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
if (tve->mode == TVE_MODE_VGA)
ret = tve_setup_vga(tve);
else
@@ -341,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
dev_err(tve->dev, "failed to set configuration: %d\n", ret);
}
-static void imx_tve_encoder_commit(struct drm_encoder *encoder)
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
{
struct imx_tve *tve = enc_to_tve(encoder);
@@ -355,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
tve_disable(tve);
}
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+ imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+ imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_tve_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -373,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
- .dpms = imx_tve_encoder_dpms,
- .prepare = imx_tve_encoder_prepare,
.mode_set = imx_tve_encoder_mode_set,
- .commit = imx_tve_encoder_commit,
+ .enable = imx_tve_encoder_enable,
.disable = imx_tve_encoder_disable,
+ .atomic_check = imx_tve_atomic_check,
};
static irqreturn_t imx_tve_irq_handler(int irq, void *data)
@@ -495,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
encoder_type = tve->mode == TVE_MODE_VGA ?
DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
- tve->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
if (ret)
return ret;
@@ -587,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (tve->mode == TVE_MODE_VGA) {
ret = of_property_read_u32(np, "fsl,hsync-pin",
- &tve->hsync_pin);
+ &tve->di_hsync_pin);
if (ret < 0) {
- dev_err(dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get hsync pin\n");
return ret;
}
- ret |= of_property_read_u32(np, "fsl,vsync-pin",
- &tve->vsync_pin);
+ ret = of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->di_vsync_pin);
if (ret < 0) {
dev_err(dev, "failed to get vsync pin\n");
@@ -633,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
- regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ if (ret)
+ return ret;
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index fc040417e1e8..08e188bc10fc 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -18,12 +18,12 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -33,23 +33,6 @@
#define DRIVER_DESC "i.MX IPUv3 Graphics"
-enum ipu_flip_status {
- IPU_FLIP_NONE,
- IPU_FLIP_PENDING,
- IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
- struct work_struct unref_work;
- struct drm_gem_object *bo;
- struct drm_pending_vblank_event *page_flip_event;
- struct work_struct fence_work;
- struct ipu_crtc *crtc;
- struct fence *excl;
- unsigned shared_count;
- struct fence **shared;
-};
-
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
@@ -60,201 +43,166 @@ struct ipu_crtc {
struct ipu_dc *dc;
struct ipu_di *di;
- int enabled;
- enum ipu_flip_status flip_state;
- struct workqueue_struct *flip_queue;
- struct ipu_flip_work *flip_work;
int irq;
- u32 bus_format;
- u32 bus_flags;
- int di_hsync_pin;
- int di_vsync_pin;
};
-#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct ipu_crtc, base);
+}
-static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_enable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (ipu_crtc->enabled)
- return;
-
ipu_dc_enable(ipu);
- ipu_plane_enable(ipu_crtc->plane[0]);
- /* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
- drm_crtc_vblank_on(&ipu_crtc->base);
-
- ipu_crtc->enabled = 1;
}
-static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_disable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (!ipu_crtc->enabled)
- return;
-
- /* Stop DC channel and DI before IDMAC */
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
- ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
- drm_crtc_vblank_off(&ipu_crtc->base);
- ipu_crtc->enabled = 0;
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
}
-static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct imx_crtc_state *state;
- dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- ipu_fb_enable(ipu_crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- ipu_fb_disable(ipu_crtc);
- break;
+ if (crtc->state) {
+ if (crtc->state->mode_blob)
+ drm_property_unreference_blob(crtc->state->mode_blob);
+
+ state = to_imx_crtc_state(crtc->state);
+ memset(state, 0, sizeof(*state));
+ } else {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+ crtc->state = &state->base;
}
+
+ state->base.crtc = crtc;
}
-static void ipu_flip_unref_work_func(struct work_struct *__work)
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, unref_work);
+ struct imx_crtc_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
- drm_gem_object_unreference_unlocked(work->bo);
- kfree(work);
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ WARN_ON(state->base.crtc != crtc);
+ state->base.crtc = crtc;
+
+ return &state->base;
}
-static void ipu_flip_fence_work_func(struct work_struct *__work)
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, fence_work);
- int i;
-
- /* wait for all fences attached to the FB obj to signal */
- if (work->excl) {
- fence_wait(work->excl, false);
- fence_put(work->excl);
- }
- for (i = 0; i < work->shared_count; i++) {
- fence_wait(work->shared[i], false);
- fence_put(work->shared[i]);
- }
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_imx_crtc_state(state));
+}
+
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = imx_drm_crtc_reset,
+ .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+ .atomic_destroy_state = imx_drm_crtc_destroy_state,
+};
- work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_crtc *ipu_crtc = dev_id;
+
+ imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+
+ return IRQ_HANDLED;
}
-static int ipu_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct ipu_flip_work *flip_work;
+ struct videomode vm;
int ret;
- if (ipu_crtc->flip_state != IPU_FLIP_NONE)
- return -EBUSY;
-
- ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
- if (ret) {
- dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
- list_del(&event->base.link);
-
- return ret;
- }
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
- flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
- if (!flip_work) {
- ret = -ENOMEM;
- goto put_vblank;
- }
- INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
- flip_work->page_flip_event = event;
+ ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+ if (ret)
+ return false;
- /* get BO backing the old framebuffer and take a reference */
- flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
- drm_gem_object_reference(flip_work->bo);
+ if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+ return false;
- ipu_crtc->flip_work = flip_work;
- /*
- * If the object has a DMABUF attached, we need to wait on its fences
- * if there are any.
- */
- if (cma_obj->base.dma_buf) {
- INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
- flip_work->crtc = ipu_crtc;
+ drm_display_mode_from_videomode(&vm, adjusted_mode);
- ret = reservation_object_get_fences_rcu(
- cma_obj->base.dma_buf->resv, &flip_work->excl,
- &flip_work->shared_count, &flip_work->shared);
+ return true;
+}
- if (unlikely(ret)) {
- DRM_ERROR("failed to get fences for buffer\n");
- goto free_flip_work;
- }
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
- /* No need to queue the worker if the are no fences */
- if (!flip_work->excl && !flip_work->shared_count) {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- } else {
- ipu_crtc->flip_state = IPU_FLIP_PENDING;
- queue_work(ipu_crtc->flip_queue,
- &flip_work->fence_work);
- }
- } else {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- }
+ if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+ return -EINVAL;
return 0;
-
-free_flip_work:
- drm_gem_object_unreference_unlocked(flip_work->bo);
- kfree(flip_work);
- ipu_crtc->flip_work = NULL;
-put_vblank:
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
- return ret;
}
-static const struct drm_crtc_funcs ipu_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = ipu_page_flip,
-};
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
-static int ipu_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
- int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc)
encoder_types |= BIT(encoder->encoder_type);
+ }
dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
__func__, encoder_types);
@@ -272,114 +220,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
else
sig_cfg.clkflags = 0;
- sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
- sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
DRM_BUS_FLAG_PIXDATA_POSEDGE);
- sig_cfg.bus_format = ipu_crtc->bus_format;
+ sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
- sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
- sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
+ sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+ sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
drm_display_mode_to_videomode(mode, &sig_cfg.mode);
- ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
- mode->flags & DRM_MODE_FLAG_INTERLACE,
- ipu_crtc->bus_format, mode->hdisplay);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing display controller failed with %d\n",
- ret);
- return ret;
- }
-
- ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing panel failed with %d\n", ret);
- return ret;
- }
-
- return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
- crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x, y, mode->hdisplay, mode->vdisplay,
- mode->flags & DRM_MODE_FLAG_INTERLACE);
-}
-
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
-{
- unsigned long flags;
- struct drm_device *drm = ipu_crtc->base.dev;
- struct ipu_flip_work *work = ipu_crtc->flip_work;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- if (work->page_flip_event)
- drm_crtc_send_vblank_event(&ipu_crtc->base,
- work->page_flip_event);
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
-static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
-{
- struct ipu_crtc *ipu_crtc = dev_id;
-
- imx_drm_handle_vblank(ipu_crtc->imx_crtc);
-
- if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
- struct ipu_plane *plane = ipu_crtc->plane[0];
-
- ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
- plane->x, plane->y);
- ipu_crtc_handle_pageflip(ipu_crtc);
- queue_work(ipu_crtc->flip_queue,
- &ipu_crtc->flip_work->unref_work);
- ipu_crtc->flip_state = IPU_FLIP_NONE;
- }
-
- return IRQ_HANDLED;
-}
-
-static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct videomode vm;
- int ret;
-
- drm_display_mode_to_videomode(adjusted_mode, &vm);
-
- ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
- if (ret)
- return false;
-
- drm_display_mode_from_videomode(&vm, adjusted_mode);
-
- return true;
-}
-
-static void ipu_crtc_prepare(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_disable(ipu_crtc);
-}
-
-static void ipu_crtc_commit(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_enable(ipu_crtc);
+ ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+ mode->flags & DRM_MODE_FLAG_INTERLACE,
+ imx_crtc_state->bus_format, mode->hdisplay);
+ ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
}
static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
- .dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
- .mode_set = ipu_crtc_mode_set,
- .prepare = ipu_crtc_prepare,
- .commit = ipu_crtc_commit,
+ .mode_set_nofb = ipu_crtc_mode_set_nofb,
+ .atomic_check = ipu_crtc_atomic_check,
+ .atomic_begin = ipu_crtc_atomic_begin,
+ .disable = ipu_crtc_disable,
+ .enable = ipu_crtc_enable,
};
static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -398,23 +262,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
disable_irq_nosync(ipu_crtc->irq);
}
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_crtc->bus_format = bus_format;
- ipu_crtc->bus_flags = bus_flags;
- ipu_crtc->di_hsync_pin = hsync_pin;
- ipu_crtc->di_vsync_pin = vsync_pin;
-
- return 0;
-}
-
static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
.enable_vblank = ipu_enable_vblank,
.disable_vblank = ipu_disable_vblank,
- .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
.crtc_funcs = &ipu_crtc_funcs,
.crtc_helper_funcs = &ipu_helper_funcs,
};
@@ -496,8 +346,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1]))
+ if (IS_ERR(ipu_crtc->plane[1])) {
ipu_crtc->plane[1] = NULL;
+ } else {
+ ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
+ if (ret) {
+ dev_err(ipu_crtc->dev, "getting plane 1 "
+ "resources failed with %d.\n", ret);
+ goto err_put_plane0_res;
+ }
+ }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -505,16 +363,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane_res;
+ goto err_put_plane1_res;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
- ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
return 0;
-err_put_plane_res:
+err_put_plane1_res:
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+err_put_plane0_res:
ipu_plane_put_resources(ipu_crtc->plane[0]);
err_remove_crtc:
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
@@ -553,9 +412,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
- destroy_workqueue(ipu_crtc->flip_queue);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+ ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index a4bb44118d33..4ad67d015ec7 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -14,13 +14,19 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
#include "ipuv3-plane.h"
-#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+ return container_of(p, struct ipu_plane, base);
+}
static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_ARGB1555,
@@ -53,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
IPU_IRQ_EOF);
}
-static int calc_vref(struct drm_display_mode *mode)
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state)
{
- unsigned long htotal, vtotal;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
- htotal = mode->htotal;
- vtotal = mode->vtotal;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ BUG_ON(!cma_obj);
- if (!htotal || !vtotal)
- return 60;
-
- return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+ return cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * (state->src_y >> 16) +
+ (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
}
-static inline int calc_bandwidth(int width, int height, unsigned int vref)
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
{
- return width * height * vref;
-}
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
-int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
- int x, int y)
-{
- struct drm_gem_cma_object *cma_obj[3];
- unsigned long eba, ubo, vbo;
- int active, i;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+ BUG_ON(!cma_obj);
- for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
- cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
- if (!cma_obj[i]) {
- DRM_DEBUG_KMS("plane %d entry is null.\n", i);
- return -EFAULT;
- }
- }
+ return cma_obj->paddr + fb->offsets[1] +
+ fb->pitches[1] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- eba = cma_obj[0]->paddr + fb->offsets[0] +
- fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
- if (eba & 0x7) {
- DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
- return -EINVAL;
- }
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
+ BUG_ON(!cma_obj);
- if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
- DRM_DEBUG_KMS("pitches out of range.\n");
- return -EINVAL;
- }
+ return cma_obj->paddr + fb->offsets[2] +
+ fb->pitches[2] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
- DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane *plane = &ipu_plane->base;
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
+ int active;
- ipu_plane->stride[0] = fb->pitches[0];
+ eba = drm_plane_state_to_eba(state);
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ if (old_state->fb)
+ break;
+
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -117,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = cma_obj[1]->paddr + fb->offsets[1] +
- fb->pitches[1] * y / 2 + x / 2 - eba;
- vbo = cma_obj[2]->paddr + fb->offsets[2] +
- fb->pitches[2] * y / 2 + x / 2 - eba;
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
- if ((ubo & 0x7) || (vbo & 0x7)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
- return -EINVAL;
- }
-
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
- (ipu_plane->v_offset != vbo))) {
- DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- if (fb->pitches[1] != fb->pitches[2]) {
- DRM_DEBUG_KMS("U/V pitches must be identical.\n");
- return -EINVAL;
- }
-
- if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
- DRM_DEBUG_KMS("U/V pitches out of range.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled &&
- (ipu_plane->stride[1] != fb->pitches[1])) {
- DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- ipu_plane->u_offset = ubo;
- ipu_plane->v_offset = vbo;
- ipu_plane->stride[1] = fb->pitches[1];
+ if (fb->pixel_format == DRM_FORMAT_YUV420)
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+ else
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], vbo, ubo);
dev_dbg(ipu_plane->base.dev->dev,
- "phys = %pad %pad %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, &cma_obj[1]->paddr,
- &cma_obj[2]->paddr, x, y);
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
break;
default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, x, y);
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+
break;
}
- if (ipu_plane->enabled) {
+ if (old_state->fb) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -177,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
}
-
- /* cache offsets for subsequent pageflips */
- ipu_plane->x = x;
- ipu_plane->y = y;
-
- return 0;
-}
-
-int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h, bool interlaced)
-{
- struct device *dev = ipu_plane->base.dev->dev;
- int ret;
-
- /* no scaling */
- if (src_w != crtc_w || src_h != crtc_h)
- return -EINVAL;
-
- /* clip to crtc bounds */
- if (crtc_x < 0) {
- if (-crtc_x > crtc_w)
- return -EINVAL;
- src_x += -crtc_x;
- src_w -= -crtc_x;
- crtc_w -= -crtc_x;
- crtc_x = 0;
- }
- if (crtc_y < 0) {
- if (-crtc_y > crtc_h)
- return -EINVAL;
- src_y += -crtc_y;
- src_h -= -crtc_y;
- crtc_h -= -crtc_y;
- crtc_y = 0;
- }
- if (crtc_x + crtc_w > mode->hdisplay) {
- if (crtc_x > mode->hdisplay)
- return -EINVAL;
- crtc_w = mode->hdisplay - crtc_x;
- src_w = crtc_w;
- }
- if (crtc_y + crtc_h > mode->vdisplay) {
- if (crtc_y > mode->vdisplay)
- return -EINVAL;
- crtc_h = mode->vdisplay - crtc_y;
- src_h = crtc_h;
- }
- /* full plane minimum width is 13 pixels */
- if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
- return -EINVAL;
- if (crtc_h < 2)
- return -EINVAL;
-
- /*
- * since we cannot touch active IDMAC channels, we do not support
- * resizing the enabled plane or changing its format
- */
- if (ipu_plane->enabled) {
- if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
- fb->pixel_format != ipu_plane->base.fb->pixel_format)
- return -EINVAL;
-
- return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- }
-
- switch (ipu_plane->dp_flow) {
- case IPU_DP_FLOW_SYNC_BG:
- ret = ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
- if (ret) {
- dev_err(dev,
- "initializing display processor failed with %d\n",
- ret);
- return ret;
- }
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
- break;
- case IPU_DP_FLOW_SYNC_FG:
- ipu_dp_setup_channel(ipu_plane->dp,
- ipu_drm_fourcc_to_colorspace(fb->pixel_format),
- IPUV3_COLORSPACE_UNKNOWN);
- ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
- /* Enable local alpha on partial plane */
- switch (fb->pixel_format) {
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
- break;
- default:
- break;
- }
- }
-
- ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
- calc_bandwidth(crtc_w, crtc_h,
- calc_vref(mode)), 64);
- if (ret) {
- dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
- return ret;
- }
-
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
-
- ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
- ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
- if (ret < 0) {
- dev_err(dev, "unsupported pixel format 0x%08x\n",
- fb->pixel_format);
- return ret;
- }
- ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
- ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
- ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- if (ret < 0)
- return ret;
- if (interlaced)
- ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->u_offset,
- ipu_plane->v_offset);
- } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->v_offset,
- ipu_plane->u_offset);
- }
-
- ipu_plane->w = src_w;
- ipu_plane->h = src_h;
-
- return 0;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -372,7 +203,7 @@ err_out:
return ret;
}
-void ipu_plane_enable(struct ipu_plane *ipu_plane)
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
{
if (ipu_plane->dp)
ipu_dp_enable(ipu_plane->ipu);
@@ -380,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane)
ipu_idmac_enable_channel(ipu_plane->ipu_ch);
if (ipu_plane->dp)
ipu_dp_enable_channel(ipu_plane->dp);
-
- ipu_plane->enabled = true;
}
-void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static void ipu_plane_disable(struct ipu_plane *ipu_plane)
{
- ipu_plane->enabled = false;
-
ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
if (ipu_plane->dp)
@@ -398,74 +225,225 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane)
ipu_dp_disable(ipu_plane->ipu);
}
-/*
- * drm_plane API
- */
-
-static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static int ipu_disable_plane(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
- int ret = 0;
-
- DRM_DEBUG_KMS("plane - %p\n", plane);
-
- if (!ipu_plane->enabled)
- ret = ipu_plane_get_resources(ipu_plane);
- if (ret < 0)
- return ret;
-
- ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
- false);
- if (ret < 0) {
- ipu_plane_put_resources(ipu_plane);
- return ret;
- }
- if (crtc != plane->crtc)
- dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
- plane->crtc, crtc);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!ipu_plane->enabled)
- ipu_plane_enable(ipu_plane);
+ ipu_plane_disable(ipu_plane);
return 0;
}
-static int ipu_disable_plane(struct drm_plane *plane)
+static void ipu_plane_destroy(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (ipu_plane->enabled)
- ipu_plane_disable(ipu_plane);
+ ipu_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(ipu_plane);
+}
- ipu_plane_put_resources(ipu_plane);
+static const struct drm_plane_funcs ipu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = ipu_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_crtc_state *crtc_state;
+ struct device *dev = plane->dev->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+
+ /* Ok to disable */
+ if (!fb)
+ return 0;
+
+ if (!state->crtc)
+ return -EINVAL;
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* CRTC should be enabled */
+ if (!crtc_state->enable)
+ return -EINVAL;
+
+ /* no scaling */
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h)
+ return -EINVAL;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ /* full plane doesn't support partial off screen */
+ if (state->crtc_x || state->crtc_y ||
+ state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_h != crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ /* full plane minimum width is 13 pixels */
+ if (state->crtc_w < 13)
+ return -EINVAL;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+ if (state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+ break;
+ default:
+ dev_warn(dev, "Unsupported plane type\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_h < 2)
+ return -EINVAL;
+
+ /*
+ * since we cannot touch active IDMAC channels, we do not support
+ * resizing the enabled plane or changing its format
+ */
+ if (old_fb && (state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ fb->pixel_format != old_fb->pixel_format))
+ return -EINVAL;
+
+ eba = drm_plane_state_to_eba(state);
+
+ if (eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+ return -EINVAL;
+
+ if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+ return -EINVAL;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+
+ if ((ubo & 0x7) || (vbo & 0x7))
+ return -EINVAL;
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+ return -EINVAL;
+
+ if (old_fb) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ old_vbo = drm_plane_state_to_vbo(old_state);
+ if (ubo != old_ubo || vbo != old_vbo)
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2])
+ return -EINVAL;
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ return -EINVAL;
+ }
return 0;
}
-static void ipu_plane_destroy(struct drm_plane *plane)
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ ipu_disable_plane(plane);
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ enum ipu_color_space ics;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ if (old_state->fb) {
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ return;
+ }
- ipu_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ipu_dp_setup_channel(ipu_plane->dp,
+ IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB);
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+ ipu_dp_setup_channel(ipu_plane->dp, ics,
+ IPUV3_COLORSPACE_UNKNOWN);
+ ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
+ state->crtc_y);
+ /* Enable local alpha on partial plane */
+ switch (state->fb->pixel_format) {
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+
+ ipu_cpmem_zero(ipu_plane->ipu_ch);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
+ state->src_h >> 16);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+ ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ ipu_plane_enable(ipu_plane);
}
-static const struct drm_plane_funcs ipu_plane_funcs = {
- .update_plane = ipu_update_plane,
- .disable_plane = ipu_disable_plane,
- .destroy = ipu_plane_destroy,
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -498,5 +476,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
return ERR_PTR(ret);
}
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 4448fd4ad4eb..338b88a74eb6 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -23,17 +23,6 @@ struct ipu_plane {
int dma;
int dp_flow;
-
- int x;
- int y;
- int w;
- int h;
-
- unsigned int u_offset;
- unsigned int v_offset;
- unsigned int stride[2];
-
- bool enabled;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-void ipu_plane_enable(struct ipu_plane *plane);
-void ipu_plane_disable(struct ipu_plane *plane);
-int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
- int x, int y);
-
int ipu_plane_get_resources(struct ipu_plane *plane);
void ipu_plane_put_resources(struct ipu_plane *plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2d1fd02cd3d6..1dad297b01fd 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
@@ -25,9 +26,6 @@
#include "imx-drm.h"
-#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
-#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
-
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
@@ -37,8 +35,19 @@ struct imx_parallel_display {
u32 bus_format;
struct drm_display_mode mode;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
+static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
+{
+ return container_of(c, struct imx_parallel_display, connector);
+}
+
+static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_parallel_display, encoder);
+}
+
static enum drm_connector_status imx_pd_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -53,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->panel && imxpd->panel->funcs &&
imxpd->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
- if (!imxpd->bus_format && di->num_bus_formats)
- imxpd->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
@@ -69,10 +74,16 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ int ret;
if (!mode)
return -EINVAL;
- of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
+
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ OF_USE_NATIVE_MODE);
+ if (ret)
+ return ret;
+
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
@@ -90,24 +101,7 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
- if (mode != DRM_MODE_DPMS_ON)
- drm_panel_disable(imxpd->panel);
- else
- drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
- imxpd->connector.display_info.bus_flags);
-}
-
-static void imx_pd_encoder_commit(struct drm_encoder *encoder)
+static void imx_pd_encoder_enable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -115,12 +109,6 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder)
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
-{
-}
-
static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -129,11 +117,33 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imxpd->panel);
}
+static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ imx_crtc_state->bus_flags = di->bus_flags;
+ if (!imxpd->bus_format && di->num_bus_formats)
+ imx_crtc_state->bus_format = di->bus_formats[0];
+ else
+ imx_crtc_state->bus_format = imxpd->bus_format;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_pd_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
@@ -146,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .dpms = imx_pd_encoder_dpms,
- .prepare = imx_pd_encoder_prepare,
- .commit = imx_pd_encoder_commit,
- .mode_set = imx_pd_encoder_mode_set,
+ .enable = imx_pd_encoder_enable,
.disable = imx_pd_encoder_disable,
+ .atomic_check = imx_pd_encoder_atomic_check,
};
static int imx_pd_register(struct drm_device *drm,
struct imx_parallel_display *imxpd)
{
+ struct drm_encoder *encoder = &imxpd->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
- imxpd->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
@@ -170,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
- drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ if (!imxpd->bridge) {
+ drm_connector_helper_add(&imxpd->connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, &imxpd->connector,
+ &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ }
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
+ if (imxpd->bridge) {
+ imxpd->bridge->encoder = encoder;
+ encoder->bridge = imxpd->bridge;
+ ret = drm_bridge_attach(drm, imxpd->bridge);
+ if (ret < 0) {
+ dev_err(imxpd->dev, "failed to attach bridge: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ }
return 0;
}
@@ -195,6 +217,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
+ u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
@@ -208,14 +231,15 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
if (!strcmp(fmt, "rgb24"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
else if (!strcmp(fmt, "rgb565"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ bus_format = MEDIA_BUS_FMT_RGB565_1X16;
else if (!strcmp(fmt, "bgr666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
else if (!strcmp(fmt, "lvds666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
+ imxpd->bus_format = bus_format;
/* port@1 is the output port */
ep = of_graph_get_endpoint_by_regs(np, 1, -1);
@@ -223,13 +247,30 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
struct device_node *remote;
remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
of_node_put(ep);
- if (remote) {
- imxpd->panel = of_drm_find_panel(remote);
- of_node_put(remote);
+
+ imxpd->panel = of_drm_find_panel(remote);
+ if (imxpd->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ imxpd->bridge = of_drm_find_bridge(remote);
+ if (imxpd->bridge)
+ dev_dbg(dev, "found bridge %s\n",
+ remote->full_name);
}
- if (!imxpd->panel)
+ if (!imxpd->panel && !imxpd->bridge) {
+ dev_dbg(dev, "waiting for panel or bridge %s\n",
+ remote->full_name);
+ of_node_put(remote);
return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index eeefc971801a..23ac8041c562 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -6,7 +6,6 @@ config DRM_MEDIATEK
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
- select IOMMU_DMA
select MEMORY
select MTK_SMI
help
@@ -14,3 +13,11 @@ config DRM_MEDIATEK
The module will be called mediatek-drm
This driver provides kernel mode setting and
buffer management to userspace.
+
+config DRM_MEDIATEK_HDMI
+ tristate "DRM HDMI Support for Mediatek SoCs"
+ depends on DRM_MEDIATEK
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ select GENERIC_PHY
+ help
+ DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5fcf58e87786..bf2e5be1ab30 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
+
+mediatek-drm-hdmi-objs := mtk_cec.o \
+ mtk_hdmi.o \
+ mtk_hdmi_ddc.o \
+ mtk_mt8173_hdmi_phy.o
+
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
new file mode 100644
index 000000000000..7a3eb8c17ef9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "mtk_cec.h"
+
+#define TR_CONFIG 0x00
+#define CLEAR_CEC_IRQ BIT(15)
+
+#define CEC_CKGEN 0x04
+#define CEC_32K_PDN BIT(19)
+#define PDN BIT(16)
+
+#define RX_EVENT 0x54
+#define HDMI_PORD BIT(25)
+#define HDMI_HTPLG BIT(24)
+#define HDMI_PORD_INT_EN BIT(9)
+#define HDMI_HTPLG_INT_EN BIT(8)
+
+#define RX_GEN_WD 0x58
+#define HDMI_PORD_INT_32K_STATUS BIT(26)
+#define RX_RISC_INT_32K_STATUS BIT(25)
+#define HDMI_HTPLG_INT_32K_STATUS BIT(24)
+#define HDMI_PORD_INT_32K_CLR BIT(18)
+#define RX_INT_32K_CLR BIT(17)
+#define HDMI_HTPLG_INT_32K_CLR BIT(16)
+#define HDMI_PORD_INT_32K_STA_MASK BIT(10)
+#define RX_RISC_INT_32K_STA_MASK BIT(9)
+#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8)
+#define HDMI_PORD_INT_32K_EN BIT(2)
+#define RX_INT_32K_EN BIT(1)
+#define HDMI_HTPLG_INT_32K_EN BIT(0)
+
+#define NORMAL_INT_CTRL 0x5C
+#define HDMI_HTPLG_INT_STA BIT(0)
+#define HDMI_PORD_INT_STA BIT(1)
+#define HDMI_HTPLG_INT_CLR BIT(16)
+#define HDMI_PORD_INT_CLR BIT(17)
+#define HDMI_FULL_INT_CLR BIT(20)
+
+struct mtk_cec {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ bool hpd;
+ void (*hpd_event)(bool hpd, struct device *dev);
+ struct device *hdmi_dev;
+ spinlock_t lock;
+};
+
+static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
+ unsigned int bits)
+{
+ void __iomem *reg = cec->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
+ unsigned int bits)
+{
+ void __iomem *reg = cec->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
+ unsigned int val, unsigned int mask)
+{
+ u32 tmp = readl(cec->regs + offset) & ~mask;
+
+ tmp |= val & mask;
+ writel(val, cec->regs + offset);
+}
+
+void mtk_cec_set_hpd_event(struct device *dev,
+ void (*hpd_event)(bool hpd, struct device *dev),
+ struct device *hdmi_dev)
+{
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cec->lock, flags);
+ cec->hdmi_dev = hdmi_dev;
+ cec->hpd_event = hpd_event;
+ spin_unlock_irqrestore(&cec->lock, flags);
+}
+
+bool mtk_cec_hpd_high(struct device *dev)
+{
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ unsigned int status;
+
+ status = readl(cec->regs + RX_EVENT);
+
+ return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
+}
+
+static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
+{
+ mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
+ mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+ mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
+ HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
+ RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
+}
+
+static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
+{
+ mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
+{
+ mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
+{
+ mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+ mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+ HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+ mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+ usleep_range(5, 10);
+ mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+ HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+ mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+ mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+}
+
+static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
+{
+ void (*hpd_event)(bool hpd, struct device *dev);
+ struct device *hdmi_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cec->lock, flags);
+ hpd_event = cec->hpd_event;
+ hdmi_dev = cec->hdmi_dev;
+ spin_unlock_irqrestore(&cec->lock, flags);
+
+ if (hpd_event)
+ hpd_event(hpd, hdmi_dev);
+}
+
+static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
+{
+ struct device *dev = arg;
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ bool hpd;
+
+ mtk_cec_clear_htplg_irq(cec);
+ hpd = mtk_cec_hpd_high(dev);
+
+ if (cec->hpd != hpd) {
+ dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
+ cec->hpd, hpd);
+ cec->hpd = hpd;
+ mtk_cec_hpd_event(cec, hpd);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mtk_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_cec *cec;
+ struct resource *res;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cec);
+ spin_lock_init(&cec->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->regs)) {
+ ret = PTR_ERR(cec->regs);
+ dev_err(dev, "Failed to ioremap cec: %d\n", ret);
+ return ret;
+ }
+
+ cec->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cec->clk)) {
+ ret = PTR_ERR(cec->clk);
+ dev_err(dev, "Failed to get cec clock: %d\n", ret);
+ return ret;
+ }
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0) {
+ dev_err(dev, "Failed to get cec irq: %d\n", cec->irq);
+ return cec->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, cec->irq, NULL,
+ mtk_cec_htplg_isr_thread,
+ IRQF_SHARED | IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT, "hdmi hpd", dev);
+ if (ret) {
+ dev_err(dev, "Failed to register cec irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable cec clock: %d\n", ret);
+ return ret;
+ }
+
+ mtk_cec_htplg_irq_init(cec);
+ mtk_cec_htplg_irq_enable(cec);
+
+ return 0;
+}
+
+static int mtk_cec_remove(struct platform_device *pdev)
+{
+ struct mtk_cec *cec = platform_get_drvdata(pdev);
+
+ mtk_cec_htplg_irq_disable(cec);
+ clk_disable_unprepare(cec->clk);
+ return 0;
+}
+
+static const struct of_device_id mtk_cec_of_ids[] = {
+ { .compatible = "mediatek,mt8173-cec", },
+ {}
+};
+
+struct platform_driver mtk_cec_driver = {
+ .probe = mtk_cec_probe,
+ .remove = mtk_cec_remove,
+ .driver = {
+ .name = "mediatek-cec",
+ .of_match_table = mtk_cec_of_ids,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h
new file mode 100644
index 000000000000..10057b7eabec
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_CEC_H
+#define _MTK_CEC_H
+
+#include <linux/types.h>
+
+struct device;
+
+void mtk_cec_set_hpd_event(struct device *dev,
+ void (*hotplug_event)(bool hpd, struct device *dev),
+ struct device *hdmi_dev);
+bool mtk_cec_hpd_high(struct device *dev);
+
+#endif /* _MTK_CEC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d0ab..eebb7d881c2b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
mutex_lock(&private->commit.lock);
flush_work(&private->commit.work);
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (async)
mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
- .gem_free_object = mtk_drm_gem_free_object,
+ .gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.dumb_map_offset = mtk_drm_gem_dumb_map_offset,
@@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev)
if (!drm)
return -ENOMEM;
- drm_dev_set_unique(drm, dev_name(dev));
-
drm->dev_private = private;
private->drm = drm;
@@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- ret = drm_connector_register_all(drm);
- if (ret < 0)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(drm);
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
@@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
struct drm_device *drm = private->drm;
int i;
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index fa2ec0cd00e8..7abc550ebc00 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -54,15 +54,14 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
obj = &mtk_gem->base;
- init_dma_attrs(&mtk_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
&mtk_gem->dma_addr, GFP_KERNEL,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (!mtk_gem->cookie) {
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
ret = -ENOMEM;
@@ -93,7 +92,7 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, mtk_gem->sg);
else
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
- mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, mtk_gem->dma_attrs);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
@@ -173,7 +172,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
- mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
@@ -224,7 +223,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 3a2a5624a1cb..2752718fa5b2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -35,7 +35,7 @@ struct mtk_drm_gem_obj {
void *cookie;
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct sg_table *sg;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 51bc8988fc26..3995765a90dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
return drm_plane_helper_check_update(plane, state->crtc, fb,
&src, &dest, &clip,
+ state->rotation,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &visible);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 769559124562..28b2044ed9f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(dsi->panel);
}
-static struct drm_encoder *mtk_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct mtk_dsi *dsi = connector_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
.mode_fixup = mtk_dsi_encoder_mode_fixup,
.mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
static const struct drm_connector_helper_funcs
mtk_dsi_connector_helper_funcs = {
.get_modes = mtk_dsi_connector_get_modes,
- .best_encoder = mtk_dsi_connector_best_encoder,
};
static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
new file mode 100644
index 000000000000..334562d06731
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -0,0 +1,1828 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/hdmi-codec.h>
+#include "mtk_cec.h"
+#include "mtk_hdmi.h"
+#include "mtk_hdmi_regs.h"
+
+#define NCTS_BYTES 7
+
+enum mtk_hdmi_clk_id {
+ MTK_HDMI_CLK_HDMI_PIXEL,
+ MTK_HDMI_CLK_HDMI_PLL,
+ MTK_HDMI_CLK_AUD_BCLK,
+ MTK_HDMI_CLK_AUD_SPDIF,
+ MTK_HDMI_CLK_COUNT
+};
+
+enum hdmi_aud_input_type {
+ HDMI_AUD_INPUT_I2S = 0,
+ HDMI_AUD_INPUT_SPDIF,
+};
+
+enum hdmi_aud_i2s_fmt {
+ HDMI_I2S_MODE_RJT_24BIT = 0,
+ HDMI_I2S_MODE_RJT_16BIT,
+ HDMI_I2S_MODE_LJT_24BIT,
+ HDMI_I2S_MODE_LJT_16BIT,
+ HDMI_I2S_MODE_I2S_24BIT,
+ HDMI_I2S_MODE_I2S_16BIT
+};
+
+enum hdmi_aud_mclk {
+ HDMI_AUD_MCLK_128FS,
+ HDMI_AUD_MCLK_192FS,
+ HDMI_AUD_MCLK_256FS,
+ HDMI_AUD_MCLK_384FS,
+ HDMI_AUD_MCLK_512FS,
+ HDMI_AUD_MCLK_768FS,
+ HDMI_AUD_MCLK_1152FS,
+};
+
+enum hdmi_aud_channel_type {
+ HDMI_AUD_CHAN_TYPE_1_0 = 0,
+ HDMI_AUD_CHAN_TYPE_1_1,
+ HDMI_AUD_CHAN_TYPE_2_0,
+ HDMI_AUD_CHAN_TYPE_2_1,
+ HDMI_AUD_CHAN_TYPE_3_0,
+ HDMI_AUD_CHAN_TYPE_3_1,
+ HDMI_AUD_CHAN_TYPE_4_0,
+ HDMI_AUD_CHAN_TYPE_4_1,
+ HDMI_AUD_CHAN_TYPE_5_0,
+ HDMI_AUD_CHAN_TYPE_5_1,
+ HDMI_AUD_CHAN_TYPE_6_0,
+ HDMI_AUD_CHAN_TYPE_6_1,
+ HDMI_AUD_CHAN_TYPE_7_0,
+ HDMI_AUD_CHAN_TYPE_7_1,
+ HDMI_AUD_CHAN_TYPE_3_0_LRS,
+ HDMI_AUD_CHAN_TYPE_3_1_LRS,
+ HDMI_AUD_CHAN_TYPE_4_0_CLRS,
+ HDMI_AUD_CHAN_TYPE_4_1_CLRS,
+ HDMI_AUD_CHAN_TYPE_6_1_CS,
+ HDMI_AUD_CHAN_TYPE_6_1_CH,
+ HDMI_AUD_CHAN_TYPE_6_1_OH,
+ HDMI_AUD_CHAN_TYPE_6_1_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_6_0_CS,
+ HDMI_AUD_CHAN_TYPE_6_0_CH,
+ HDMI_AUD_CHAN_TYPE_6_0_OH,
+ HDMI_AUD_CHAN_TYPE_6_0_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
+ HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
+};
+
+enum hdmi_aud_channel_swap_type {
+ HDMI_AUD_SWAP_LR,
+ HDMI_AUD_SWAP_LFE_CC,
+ HDMI_AUD_SWAP_LSRS,
+ HDMI_AUD_SWAP_RLS_RRS,
+ HDMI_AUD_SWAP_LR_STATUS,
+};
+
+struct hdmi_audio_param {
+ enum hdmi_audio_coding_type aud_codec;
+ enum hdmi_audio_sample_size aud_sampe_size;
+ enum hdmi_aud_input_type aud_input_type;
+ enum hdmi_aud_i2s_fmt aud_i2s_fmt;
+ enum hdmi_aud_mclk aud_mclk;
+ enum hdmi_aud_channel_type aud_input_chan_type;
+ struct hdmi_codec_params codec_params;
+};
+
+struct mtk_hdmi {
+ struct drm_bridge bridge;
+ struct drm_connector conn;
+ struct device *dev;
+ struct phy *phy;
+ struct device *cec_dev;
+ struct i2c_adapter *ddc_adpt;
+ struct clk *clk[MTK_HDMI_CLK_COUNT];
+ struct drm_display_mode mode;
+ bool dvi_mode;
+ u32 min_clock;
+ u32 max_clock;
+ u32 max_hdisplay;
+ u32 max_vdisplay;
+ u32 ibias;
+ u32 ibias_up;
+ struct regmap *sys_regmap;
+ unsigned int sys_offset;
+ void __iomem *regs;
+ enum hdmi_colorspace csp;
+ struct hdmi_audio_param aud_param;
+ bool audio_enable;
+ bool powered;
+ bool enabled;
+};
+
+static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
+{
+ return container_of(b, struct mtk_hdmi, bridge);
+}
+
+static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
+{
+ return container_of(c, struct mtk_hdmi, conn);
+}
+
+static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
+{
+ return readl(hdmi->regs + offset);
+}
+
+static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
+{
+ writel(val, hdmi->regs + offset);
+}
+
+static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
+{
+ mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
+ VIDEO_SOURCE_SEL);
+}
+
+static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
+{
+ struct arm_smccc_res res;
+
+ /*
+ * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
+ * output. This bit can only be controlled in ARM supervisor mode.
+ * The ARM trusted firmware provides an API for the HDMI driver to set
+ * this control bit to enable HDMI output in supervisor mode.
+ */
+ arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
+ 0, 0, 0, 0, 0, &res);
+
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
+}
+
+static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
+}
+
+static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_RST, HDMI_RST);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_RST, 0);
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ ANLG_ON, ANLG_ON);
+}
+
+static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
+ CFG2_NOTICE_EN);
+}
+
+static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
+{
+ mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+}
+
+static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+}
+
+static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
+ u8 len)
+{
+ u32 ctrl_reg = GRL_CTRL;
+ int i;
+ u8 *frame_data;
+ enum hdmi_infoframe_type frame_type;
+ u8 frame_ver;
+ u8 frame_len;
+ u8 checksum;
+ int ctrl_frame_en = 0;
+
+ frame_type = *buffer;
+ buffer += 1;
+ frame_ver = *buffer;
+ buffer += 1;
+ frame_len = *buffer;
+ buffer += 1;
+ checksum = *buffer;
+ buffer += 1;
+ frame_data = buffer;
+
+ dev_dbg(hdmi->dev,
+ "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
+ frame_type, frame_ver, frame_len, checksum);
+
+ switch (frame_type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ ctrl_frame_en = CTRL_AVI_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ ctrl_frame_en = CTRL_SPD_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ ctrl_frame_en = CTRL_AUDIO_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ ctrl_frame_en = VS_EN;
+ ctrl_reg = GRL_ACP_ISRC_CTRL;
+ break;
+ }
+ mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+
+ mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ for (i = 0; i < frame_len; i++)
+ mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+
+ mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+}
+
+static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
+ AUDIO_PACKET_OFF);
+}
+
+static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
+ usleep_range(2000, 4000);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
+}
+
+static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
+ COLOR_8BIT_MODE);
+}
+
+static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ usleep_range(2000, 4000);
+ mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+}
+
+static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
+ CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ usleep_range(2000, 4000);
+ mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+}
+
+static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
+{
+ mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
+ CTS_CTRL_SOFT);
+}
+
+static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
+ bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
+ NCTS_WRI_ANYTIME);
+}
+
+static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+ mode->clock == 74250 &&
+ mode->vdisplay == 1080)
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ else
+ mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_channel_swap_type swap)
+{
+ u8 swap_bit;
+
+ switch (swap) {
+ case HDMI_AUD_SWAP_LR:
+ swap_bit = LR_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LFE_CC:
+ swap_bit = LFE_CC_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LSRS:
+ swap_bit = LSRS_SWAP;
+ break;
+ case HDMI_AUD_SWAP_RLS_RRS:
+ swap_bit = RLS_RRS_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LR_STATUS:
+ swap_bit = LR_STATUS_SWAP;
+ break;
+ default:
+ swap_bit = LFE_CC_SWAP;
+ break;
+ }
+ mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+}
+
+static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
+ enum hdmi_audio_sample_size bit_num)
+{
+ u32 val;
+
+ switch (bit_num) {
+ case HDMI_AUDIO_SAMPLE_SIZE_16:
+ val = AOUT_16BIT;
+ break;
+ case HDMI_AUDIO_SAMPLE_SIZE_20:
+ val = AOUT_20BIT;
+ break;
+ case HDMI_AUDIO_SAMPLE_SIZE_24:
+ case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
+ val = AOUT_24BIT;
+ break;
+ }
+
+ mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_i2s_fmt i2s_fmt)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
+
+ switch (i2s_fmt) {
+ case HDMI_I2S_MODE_RJT_24BIT:
+ val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_RJT_16BIT:
+ val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
+ break;
+ case HDMI_I2S_MODE_LJT_24BIT:
+ default:
+ val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_LJT_16BIT:
+ val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
+ break;
+ case HDMI_I2S_MODE_I2S_24BIT:
+ val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_I2S_16BIT:
+ val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
+ break;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG0, val);
+}
+
+static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
+{
+ const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
+ u8 val;
+
+ /* Disable high bitrate, set DST packet normal/double */
+ mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+
+ if (dst)
+ val = DST_NORMAL_DOUBLE | SACD_DST;
+ else
+ val = 0;
+
+ mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_channel_type channel_type,
+ u8 channel_count)
+{
+ unsigned int ch_switch;
+ u8 i2s_uv;
+
+ ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
+ CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
+ CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
+ CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
+
+ if (channel_count == 2) {
+ i2s_uv = I2S_UV_CH_EN(0);
+ } else if (channel_count == 3 || channel_count == 4) {
+ if (channel_count == 4 &&
+ (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
+ channel_type == HDMI_AUD_CHAN_TYPE_4_0))
+ i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
+ else
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
+ } else if (channel_count == 6 || channel_count == 5) {
+ if (channel_count == 6 &&
+ channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
+ channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+ I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+ } else {
+ i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
+ I2S_UV_CH_EN(0);
+ }
+ } else if (channel_count == 8 || channel_count == 7) {
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+ I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+ } else {
+ i2s_uv = I2S_UV_CH_EN(0);
+ }
+
+ mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
+ mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+}
+
+static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_input_type input_type)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ if (input_type == HDMI_AUD_INPUT_I2S &&
+ (val & CFG1_SPDIF) == CFG1_SPDIF) {
+ val &= ~CFG1_SPDIF;
+ } else if (input_type == HDMI_AUD_INPUT_SPDIF &&
+ (val & CFG1_SPDIF) == 0) {
+ val |= CFG1_SPDIF;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG1, val);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
+ u8 *channel_status)
+{
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ }
+ for (; i < 24; i++) {
+ mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
+ mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ }
+}
+
+static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ if (val & MIX_CTRL_SRC_EN) {
+ val &= ~MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ usleep_range(255, 512);
+ val |= MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ }
+}
+
+static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ val &= ~MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+}
+
+static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_mclk mclk)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ val &= CFG5_CD_RATIO_MASK;
+
+ switch (mclk) {
+ case HDMI_AUD_MCLK_128FS:
+ val |= CFG5_FS128;
+ break;
+ case HDMI_AUD_MCLK_256FS:
+ val |= CFG5_FS256;
+ break;
+ case HDMI_AUD_MCLK_384FS:
+ val |= CFG5_FS384;
+ break;
+ case HDMI_AUD_MCLK_512FS:
+ val |= CFG5_FS512;
+ break;
+ case HDMI_AUD_MCLK_768FS:
+ val |= CFG5_FS768;
+ break;
+ default:
+ val |= CFG5_FS256;
+ break;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG5, val);
+}
+
+struct hdmi_acr_n {
+ unsigned int clock;
+ unsigned int n[3];
+};
+
+/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
+static const struct hdmi_acr_n hdmi_rec_n_table[] = {
+ /* Clock, N: 32kHz 44.1kHz 48kHz */
+ { 25175, { 4576, 7007, 6864 } },
+ { 74176, { 11648, 17836, 11648 } },
+ { 148352, { 11648, 8918, 5824 } },
+ { 296703, { 5824, 4459, 5824 } },
+ { 297000, { 3072, 4704, 5120 } },
+ { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
+};
+
+/**
+ * hdmi_recommended_n() - Return N value recommended by HDMI specification
+ * @freq: audio sample rate in Hz
+ * @clock: rounded TMDS clock in kHz
+ */
+static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
+{
+ const struct hdmi_acr_n *recommended;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
+ if (clock == hdmi_rec_n_table[i].clock)
+ break;
+ }
+ recommended = hdmi_rec_n_table + i;
+
+ switch (freq) {
+ case 32000:
+ return recommended->n[0];
+ case 44100:
+ return recommended->n[1];
+ case 48000:
+ return recommended->n[2];
+ case 88200:
+ return recommended->n[1] * 2;
+ case 96000:
+ return recommended->n[2] * 2;
+ case 176400:
+ return recommended->n[1] * 4;
+ case 192000:
+ return recommended->n[2] * 4;
+ default:
+ return (128 * freq) / 1000;
+ }
+}
+
+static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
+{
+ switch (clock) {
+ case 25175:
+ return 25174825; /* 25.2/1.001 MHz */
+ case 74176:
+ return 74175824; /* 74.25/1.001 MHz */
+ case 148352:
+ return 148351648; /* 148.5/1.001 MHz */
+ case 296703:
+ return 296703297; /* 297/1.001 MHz */
+ default:
+ return clock * 1000;
+ }
+}
+
+static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
+ unsigned int tmds_clock, unsigned int n)
+{
+ return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
+ 128 * audio_sample_rate);
+}
+
+static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
+ unsigned int cts)
+{
+ unsigned char val[NCTS_BYTES];
+ int i;
+
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ memset(val, 0, sizeof(val));
+
+ val[0] = (cts >> 24) & 0xff;
+ val[1] = (cts >> 16) & 0xff;
+ val[2] = (cts >> 8) & 0xff;
+ val[3] = cts & 0xff;
+
+ val[4] = (n >> 16) & 0xff;
+ val[5] = (n >> 8) & 0xff;
+ val[6] = n & 0xff;
+
+ for (i = 0; i < NCTS_BYTES; i++)
+ mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+}
+
+static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
+ unsigned int sample_rate,
+ unsigned int clock)
+{
+ unsigned int n, cts;
+
+ n = hdmi_recommended_n(sample_rate, clock);
+ cts = hdmi_expected_cts(sample_rate, clock, n);
+
+ dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
+ __func__, sample_rate, clock, n, cts);
+
+ mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
+ AUDIO_I2S_NCTS_SEL);
+ do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
+}
+
+static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
+{
+ switch (channel_type) {
+ case HDMI_AUD_CHAN_TYPE_1_0:
+ case HDMI_AUD_CHAN_TYPE_1_1:
+ case HDMI_AUD_CHAN_TYPE_2_0:
+ return 2;
+ case HDMI_AUD_CHAN_TYPE_2_1:
+ case HDMI_AUD_CHAN_TYPE_3_0:
+ return 3;
+ case HDMI_AUD_CHAN_TYPE_3_1:
+ case HDMI_AUD_CHAN_TYPE_4_0:
+ case HDMI_AUD_CHAN_TYPE_3_0_LRS:
+ return 4;
+ case HDMI_AUD_CHAN_TYPE_4_1:
+ case HDMI_AUD_CHAN_TYPE_5_0:
+ case HDMI_AUD_CHAN_TYPE_3_1_LRS:
+ case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
+ return 5;
+ case HDMI_AUD_CHAN_TYPE_5_1:
+ case HDMI_AUD_CHAN_TYPE_6_0:
+ case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
+ case HDMI_AUD_CHAN_TYPE_6_0_CS:
+ case HDMI_AUD_CHAN_TYPE_6_0_CH:
+ case HDMI_AUD_CHAN_TYPE_6_0_OH:
+ case HDMI_AUD_CHAN_TYPE_6_0_CHR:
+ return 6;
+ case HDMI_AUD_CHAN_TYPE_6_1:
+ case HDMI_AUD_CHAN_TYPE_6_1_CS:
+ case HDMI_AUD_CHAN_TYPE_6_1_CH:
+ case HDMI_AUD_CHAN_TYPE_6_1_OH:
+ case HDMI_AUD_CHAN_TYPE_6_1_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0:
+ case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
+ return 7;
+ case HDMI_AUD_CHAN_TYPE_7_1:
+ case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
+ return 8;
+ default:
+ return 2;
+ }
+}
+
+static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
+{
+ unsigned long rate;
+ int ret;
+
+ /* The DPI driver already should have set TVDPLL to the correct rate */
+ ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
+ ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+ if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
+ dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
+ rate);
+ else
+ dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
+
+ mtk_hdmi_hw_config_sys(hdmi);
+ mtk_hdmi_hw_set_deep_color_mode(hdmi);
+ return 0;
+}
+
+static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_hw_reset(hdmi);
+ mtk_hdmi_hw_enable_notice(hdmi, true);
+ mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
+ mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
+ mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
+
+ mtk_hdmi_hw_msic_setting(hdmi, mode);
+}
+
+static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_hw_send_aud_packet(hdmi, enable);
+ return 0;
+}
+
+static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on)
+{
+ mtk_hdmi_hw_ncts_enable(hdmi, on);
+ return 0;
+}
+
+static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
+{
+ enum hdmi_aud_channel_type chan_type;
+ u8 chan_count;
+ bool dst;
+
+ mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
+ mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
+ hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
+ mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+ } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
+ hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
+ }
+
+ mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
+ mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+
+ dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
+ (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
+ mtk_hdmi_hw_audio_config(hdmi, dst);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
+ chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ else
+ chan_type = hdmi->aud_param.aud_input_chan_type;
+ chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
+ mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
+ mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
+
+ return 0;
+}
+
+static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *display_mode)
+{
+ unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
+
+ mtk_hdmi_aud_on_off_hw_ncts(hdmi, false);
+ mtk_hdmi_hw_aud_src_disable(hdmi);
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ break;
+ default:
+ return -EINVAL;
+ }
+ mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
+ } else {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ break;
+ default:
+ return -EINVAL;
+ }
+ mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
+ }
+
+ mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
+
+ mtk_hdmi_hw_aud_src_reenable(hdmi);
+ return 0;
+}
+
+static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *display_mode)
+{
+ mtk_hdmi_hw_aud_mute(hdmi);
+ mtk_hdmi_aud_enable_packet(hdmi, false);
+
+ mtk_hdmi_aud_set_input(hdmi);
+ mtk_hdmi_aud_set_src(hdmi, display_mode);
+ mtk_hdmi_hw_aud_set_channel_status(hdmi,
+ hdmi->aud_param.codec_params.iec.status);
+
+ usleep_range(50, 100);
+
+ mtk_hdmi_aud_on_off_hw_ncts(hdmi, true);
+ mtk_hdmi_aud_enable_packet(hdmi, true);
+ mtk_hdmi_hw_aud_unmute(hdmi);
+ return 0;
+}
+
+static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ u8 buffer[17];
+ ssize_t err;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev,
+ "Failed to get AVI infoframe from mode: %zd\n", err);
+ return err;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
+ const char *vendor,
+ const char *product)
+{
+ struct hdmi_spd_infoframe frame;
+ u8 buffer[29];
+ ssize_t err;
+
+ err = hdmi_spd_infoframe_init(&frame, vendor, product);
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ frame.channels = mtk_hdmi_aud_get_chnl_count(
+ hdmi->aud_param.aud_input_chan_type);
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_vendor_infoframe frame;
+ u8 buffer[10];
+ ssize_t err;
+
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
+ if (err) {
+ dev_err(hdmi->dev,
+ "Failed to get vendor infoframe from mode: %zd\n", err);
+ return err;
+ }
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
+
+ hdmi->csp = HDMI_COLORSPACE_RGB;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
+ return 0;
+}
+
+void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_aud_enable_packet(hdmi, true);
+ hdmi->audio_enable = true;
+}
+
+void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_aud_enable_packet(hdmi, false);
+ hdmi->audio_enable = false;
+}
+
+int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
+ struct hdmi_audio_param *param)
+{
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ param->aud_codec, param->aud_input_type,
+ param->aud_input_chan_type, param->codec_params.sample_rate);
+ memcpy(&hdmi->aud_param, param, sizeof(*param));
+ return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
+}
+
+static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int ret;
+
+ mtk_hdmi_hw_vid_black(hdmi, true);
+ mtk_hdmi_hw_aud_mute(hdmi);
+ mtk_hdmi_hw_send_av_mute(hdmi);
+ phy_power_off(hdmi->phy);
+
+ ret = mtk_hdmi_video_change_vpll(hdmi,
+ mode->clock * 1000);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
+ return ret;
+ }
+ mtk_hdmi_video_set_display_mode(hdmi, mode);
+
+ phy_power_on(hdmi->phy);
+ mtk_hdmi_aud_output_config(hdmi, mode);
+
+ mtk_hdmi_setup_audio_infoframe(hdmi);
+ mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+ mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+
+ mtk_hdmi_hw_vid_black(hdmi, false);
+ mtk_hdmi_hw_aud_unmute(hdmi);
+ mtk_hdmi_hw_send_av_unmute(hdmi);
+
+ return 0;
+}
+
+static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
+ [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
+ [MTK_HDMI_CLK_HDMI_PLL] = "pll",
+ [MTK_HDMI_CLK_AUD_BCLK] = "bclk",
+ [MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
+};
+
+static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
+ struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
+ hdmi->clk[i] = of_clk_get_by_name(np,
+ mtk_hdmi_clk_names[i]);
+ if (IS_ERR(hdmi->clk[i]))
+ return PTR_ERR(hdmi->clk[i]);
+ }
+ return 0;
+}
+
+static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
+{
+ int ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ return ret;
+}
+
+static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
+{
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+}
+
+static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
+ bool force)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ return mtk_cec_hpd_high(hdmi->cec_dev) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_conn_destroy(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
+
+ drm_connector_cleanup(conn);
+}
+
+static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ struct edid *edid;
+ int ret;
+
+ if (!hdmi->ddc_adpt)
+ return -ENODEV;
+
+ edid = drm_get_edid(conn, hdmi->ddc_adpt);
+ if (!edid)
+ return -ENODEV;
+
+ hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+
+ drm_mode_connector_update_edid_property(conn, edid);
+
+ ret = drm_add_edid_modes(conn, edid);
+ drm_edid_to_eld(conn, edid);
+ kfree(edid);
+ return ret;
+}
+
+static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
+
+ if (hdmi->bridge.next) {
+ struct drm_display_mode adjusted_mode;
+
+ drm_mode_copy(&adjusted_mode, mode);
+ if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
+ &adjusted_mode))
+ return MODE_BAD;
+ }
+
+ if (mode->clock < 27000)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
+}
+
+static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ return hdmi->bridge.encoder;
+}
+
+static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = hdmi_conn_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = hdmi_conn_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs
+ mtk_hdmi_connector_helper_funcs = {
+ .get_modes = mtk_hdmi_conn_get_modes,
+ .mode_valid = mtk_hdmi_conn_mode_valid,
+ .best_encoder = mtk_hdmi_conn_best_enc,
+};
+
+static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
+ drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
+}
+
+/*
+ * Bridge callbacks
+ */
+
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ int ret;
+
+ ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
+ &mtk_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
+
+ hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
+ hdmi->conn.interlace_allowed = true;
+ hdmi->conn.doublescan_allowed = false;
+
+ ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+ bridge->encoder);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Failed to attach connector to encoder: %d\n", ret);
+ return ret;
+ }
+
+ if (bridge->next) {
+ bridge->next->encoder = bridge->encoder;
+ ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Failed to attach external bridge: %d\n", ret);
+ return ret;
+ }
+ }
+
+ mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
+
+ return 0;
+}
+
+static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->enabled)
+ return;
+
+ phy_power_off(hdmi->phy);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+ hdmi->enabled = false;
+}
+
+static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->powered)
+ return;
+
+ mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+ mtk_hdmi_hw_make_reg_writable(hdmi, false);
+
+ hdmi->powered = false;
+}
+
+static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
+ adjusted_mode->name, adjusted_mode->hdisplay);
+ dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
+ adjusted_mode->hsync_start, adjusted_mode->hsync_end,
+ adjusted_mode->htotal);
+ dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
+ adjusted_mode->hskew, adjusted_mode->vdisplay);
+ dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
+ adjusted_mode->vsync_start, adjusted_mode->vsync_end,
+ adjusted_mode->vtotal);
+ dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
+ adjusted_mode->vscan, adjusted_mode->flags);
+
+ drm_mode_copy(&hdmi->mode, adjusted_mode);
+}
+
+static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ mtk_hdmi_hw_make_reg_writable(hdmi, true);
+ mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+
+ hdmi->powered = true;
+}
+
+static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
+ clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+ clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+ phy_power_on(hdmi->phy);
+
+ hdmi->enabled = true;
+}
+
+static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
+ .attach = mtk_hdmi_bridge_attach,
+ .mode_fixup = mtk_hdmi_bridge_mode_fixup,
+ .disable = mtk_hdmi_bridge_disable,
+ .post_disable = mtk_hdmi_bridge_post_disable,
+ .mode_set = mtk_hdmi_bridge_mode_set,
+ .pre_enable = mtk_hdmi_bridge_pre_enable,
+ .enable = mtk_hdmi_bridge_enable,
+};
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
+ struct platform_device *cec_pdev;
+ struct regmap *regmap;
+ struct resource *mem;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
+ /* The CEC module handles HDMI hotplug detection */
+ cec_np = of_find_compatible_node(np->parent, NULL,
+ "mediatek,mt8173-cec");
+ if (!cec_np) {
+ dev_err(dev, "Failed to find CEC node\n");
+ return -EINVAL;
+ }
+
+ cec_pdev = of_find_device_by_node(cec_np);
+ if (!cec_pdev) {
+ dev_err(hdmi->dev, "Waiting for CEC device %s\n",
+ cec_np->full_name);
+ return -EPROBE_DEFER;
+ }
+ hdmi->cec_dev = &cec_pdev->dev;
+
+ /*
+ * The mediatek,syscon-hdmi property contains a phandle link to the
+ * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
+ * registers it contains.
+ */
+ regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
+ ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
+ &hdmi->sys_offset);
+ if (IS_ERR(regmap))
+ ret = PTR_ERR(regmap);
+ if (ret) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev,
+ "Failed to get system configuration registers: %d\n",
+ ret);
+ return ret;
+ }
+ hdmi->sys_regmap = regmap;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ port = of_graph_get_port_by_id(np, 1);
+ if (!port) {
+ dev_err(dev, "Missing output port node\n");
+ return -EINVAL;
+ }
+
+ ep = of_get_child_by_name(port, "endpoint");
+ if (!ep) {
+ dev_err(dev, "Missing endpoint node in port %s\n",
+ port->full_name);
+ of_node_put(port);
+ return -EINVAL;
+ }
+ of_node_put(port);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -EINVAL;
+ }
+ of_node_put(ep);
+
+ if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ hdmi->bridge.next = of_drm_find_bridge(remote);
+ if (!hdmi->bridge.next) {
+ dev_err(dev, "Waiting for external bridge\n");
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ if (!i2c_np) {
+ dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
+ remote->full_name);
+ of_node_put(remote);
+ return -EINVAL;
+ }
+ of_node_put(remote);
+
+ hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ if (!hdmi->ddc_adpt) {
+ dev_err(dev, "Failed to get ddc i2c adapter by node\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * HDMI audio codec callbacks
+ */
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param hdmi_params;
+ unsigned int chan = params->cea.channels;
+
+ dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ params->sample_rate, params->sample_width, chan);
+
+ if (!hdmi->bridge.encoder)
+ return -ENODEV;
+
+ switch (chan) {
+ case 2:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ break;
+ case 4:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ break;
+ case 6:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ break;
+ case 8:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ break;
+ default:
+ dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ break;
+ default:
+ dev_err(hdmi->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ break;
+ default:
+ dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
+ daifmt->fmt);
+ return -EINVAL;
+ }
+
+ memcpy(&hdmi_params.codec_params, params,
+ sizeof(hdmi_params.codec_params));
+
+ mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+
+ return 0;
+}
+
+static int mtk_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ mtk_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ mtk_hdmi_audio_disable(hdmi);
+}
+
+int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s(%d)\n", __func__, enable);
+
+ if (enable)
+ mtk_hdmi_hw_aud_mute(hdmi);
+ else
+ mtk_hdmi_hw_aud_unmute(hdmi);
+
+ return 0;
+}
+
+static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
+ .hw_params = mtk_hdmi_audio_hw_params,
+ .audio_startup = mtk_hdmi_audio_startup,
+ .audio_shutdown = mtk_hdmi_audio_shutdown,
+ .digital_mute = mtk_hdmi_audio_digital_mute,
+ .get_eld = mtk_hdmi_audio_get_eld,
+};
+
+static void mtk_hdmi_register_audio_driver(struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &mtk_hdmi_audio_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO, &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(pdev))
+ return;
+
+ DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+}
+
+static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+
+ ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
+ if (ret)
+ return ret;
+
+ hdmi->phy = devm_phy_get(dev, "hdmi");
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ ret = mtk_hdmi_output_init(hdmi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize hdmi output\n");
+ return ret;
+ }
+
+ mtk_hdmi_register_audio_driver(dev);
+
+ hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
+ hdmi->bridge.of_node = pdev->dev.of_node;
+ ret = drm_bridge_add(&hdmi->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add bridge, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_hdmi_clk_enable_audio(hdmi);
+ if (ret) {
+ dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
+ goto err_bridge_remove;
+ }
+
+ dev_dbg(dev, "mediatek hdmi probe success\n");
+ return 0;
+
+err_bridge_remove:
+ drm_bridge_remove(&hdmi->bridge);
+ return ret;
+}
+
+static int mtk_drm_hdmi_remove(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&hdmi->bridge);
+ mtk_hdmi_clk_disable_audio(hdmi);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_hdmi_suspend(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ mtk_hdmi_clk_disable_audio(hdmi);
+ dev_dbg(dev, "hdmi suspend success!\n");
+ return 0;
+}
+
+static int mtk_hdmi_resume(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = mtk_hdmi_clk_enable_audio(hdmi);
+ if (ret) {
+ dev_err(dev, "hdmi resume failed!\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "hdmi resume success!\n");
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
+ mtk_hdmi_suspend, mtk_hdmi_resume);
+
+static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
+ { .compatible = "mediatek,mt8173-hdmi", },
+ {}
+};
+
+static struct platform_driver mtk_hdmi_driver = {
+ .probe = mtk_drm_hdmi_probe,
+ .remove = mtk_drm_hdmi_remove,
+ .driver = {
+ .name = "mediatek-drm-hdmi",
+ .of_match_table = mtk_drm_hdmi_of_ids,
+ .pm = &mtk_hdmi_pm_ops,
+ },
+};
+
+static struct platform_driver * const mtk_hdmi_drivers[] = {
+ &mtk_hdmi_phy_driver,
+ &mtk_hdmi_ddc_driver,
+ &mtk_cec_driver,
+ &mtk_hdmi_driver,
+};
+
+static int __init mtk_hdmitx_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
+ ret = platform_driver_register(mtk_hdmi_drivers[i]);
+ if (ret < 0) {
+ pr_err("Failed to register %s driver: %d\n",
+ mtk_hdmi_drivers[i]->driver.name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ platform_driver_unregister(mtk_hdmi_drivers[i]);
+
+ return ret;
+}
+
+static void __exit mtk_hdmitx_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
+ platform_driver_unregister(mtk_hdmi_drivers[i]);
+}
+
+module_init(mtk_hdmitx_init);
+module_exit(mtk_hdmitx_exit);
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
new file mode 100644
index 000000000000..6371b3de1ff6
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_CTRL_H
+#define _MTK_HDMI_CTRL_H
+
+struct platform_driver;
+
+extern struct platform_driver mtk_cec_driver;
+extern struct platform_driver mtk_hdmi_ddc_driver;
+extern struct platform_driver mtk_hdmi_phy_driver;
+
+#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
new file mode 100644
index 000000000000..33c9e1bdb114
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define SIF1_CLOK (288)
+#define DDC_DDCMCTL0 (0x0)
+#define DDCM_ODRAIN BIT(31)
+#define DDCM_CLK_DIV_OFFSET (16)
+#define DDCM_CLK_DIV_MASK (0xfff << 16)
+#define DDCM_CS_STATUS BIT(4)
+#define DDCM_SCL_STATE BIT(3)
+#define DDCM_SDA_STATE BIT(2)
+#define DDCM_SM0EN BIT(1)
+#define DDCM_SCL_STRECH BIT(0)
+#define DDC_DDCMCTL1 (0x4)
+#define DDCM_ACK_OFFSET (16)
+#define DDCM_ACK_MASK (0xff << 16)
+#define DDCM_PGLEN_OFFSET (8)
+#define DDCM_PGLEN_MASK (0x7 << 8)
+#define DDCM_SIF_MODE_OFFSET (4)
+#define DDCM_SIF_MODE_MASK (0x7 << 4)
+#define DDCM_START (0x1)
+#define DDCM_WRITE_DATA (0x2)
+#define DDCM_STOP (0x3)
+#define DDCM_READ_DATA_NO_ACK (0x4)
+#define DDCM_READ_DATA_ACK (0x5)
+#define DDCM_TRI BIT(0)
+#define DDC_DDCMD0 (0x8)
+#define DDCM_DATA3 (0xff << 24)
+#define DDCM_DATA2 (0xff << 16)
+#define DDCM_DATA1 (0xff << 8)
+#define DDCM_DATA0 (0xff << 0)
+#define DDC_DDCMD1 (0xc)
+#define DDCM_DATA7 (0xff << 24)
+#define DDCM_DATA6 (0xff << 16)
+#define DDCM_DATA5 (0xff << 8)
+#define DDCM_DATA4 (0xff << 0)
+
+struct mtk_hdmi_ddc {
+ struct i2c_adapter adap;
+ struct clk *clk;
+ void __iomem *regs;
+};
+
+static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
+}
+
+static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
+}
+
+static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ return (readl(ddc->regs + offset) & val) == val;
+}
+
+static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int mask, unsigned int shift,
+ unsigned int val)
+{
+ unsigned int tmp;
+
+ tmp = readl(ddc->regs + offset);
+ tmp &= ~mask;
+ tmp |= (val << shift) & mask;
+ writel(tmp, ddc->regs + offset);
+}
+
+static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
+ unsigned int offset, unsigned int mask,
+ unsigned int shift)
+{
+ return (readl(ddc->regs + offset) & mask) >> shift;
+}
+
+static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
+{
+ u32 val;
+
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
+ DDCM_SIF_MODE_OFFSET, mode);
+ sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
+ readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
+ (val & DDCM_TRI) != DDCM_TRI, 4, 20000);
+}
+
+static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+ struct device *dev = ddc->adap.dev.parent;
+ u32 remain_count, ack_count, ack_final, read_count, temp_count;
+ u32 index = 0;
+ u32 ack;
+ int i;
+
+ ddcm_trigger_mode(ddc, DDCM_START);
+ sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+ 0x00);
+ ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+ dev_dbg(dev, "ack = 0x%x\n", ack);
+ if (ack != 0x01) {
+ dev_err(dev, "i2c ack err!\n");
+ return -ENXIO;
+ }
+
+ remain_count = msg->len;
+ ack_count = (msg->len - 1) / 8;
+ ack_final = 0;
+
+ while (remain_count > 0) {
+ if (ack_count > 0) {
+ read_count = 8;
+ ack_final = 0;
+ ack_count--;
+ } else {
+ read_count = remain_count;
+ ack_final = 1;
+ }
+
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
+ DDCM_PGLEN_OFFSET, read_count - 1);
+ ddcm_trigger_mode(ddc, (ack_final == 1) ?
+ DDCM_READ_DATA_NO_ACK :
+ DDCM_READ_DATA_ACK);
+
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
+ DDCM_ACK_OFFSET);
+ temp_count = 0;
+ while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
+ temp_count++;
+ if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
+ ((ack_final == 0) && (temp_count != read_count))) {
+ dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
+ break;
+ }
+
+ for (i = read_count; i >= 1; i--) {
+ int shift;
+ int offset;
+
+ if (i > 4) {
+ offset = DDC_DDCMD1;
+ shift = (i - 5) * 8;
+ } else {
+ offset = DDC_DDCMD0;
+ shift = (i - 1) * 8;
+ }
+
+ msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
+ 0xff << shift,
+ shift);
+ }
+
+ remain_count -= read_count;
+ index += read_count;
+ }
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+ struct device *dev = ddc->adap.dev.parent;
+ u32 ack;
+
+ ddcm_trigger_mode(ddc, DDCM_START);
+ sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
+ sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+ 0x1);
+ ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+ dev_dbg(dev, "ack = %d\n", ack);
+
+ if (ack != 0x03) {
+ dev_err(dev, "i2c ack err!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct mtk_hdmi_ddc *ddc = adapter->algo_data;
+ struct device *dev = adapter->dev.parent;
+ int ret;
+ int i;
+
+ if (!ddc) {
+ dev_err(dev, "invalid arguments\n");
+ return -EINVAL;
+ }
+
+ sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
+ sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
+ sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
+
+ if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
+ dev_err(dev, "ddc line is busy!\n");
+ return -EBUSY;
+ }
+
+ sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
+ DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg = &msgs[i];
+
+ dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
+ msg->addr, msg->flags, msg->len);
+
+ if (msg->flags & I2C_M_RD)
+ ret = mtk_hdmi_ddc_read_msg(ddc, msg);
+ else
+ ret = mtk_hdmi_ddc_write_msg(ddc, msg);
+ if (ret < 0)
+ goto xfer_end;
+ }
+
+ ddcm_trigger_mode(ddc, DDCM_STOP);
+
+ return i;
+
+xfer_end:
+ ddcm_trigger_mode(ddc, DDCM_STOP);
+ dev_err(dev, "ddc failed!\n");
+ return ret;
+}
+
+static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
+ .master_xfer = mtk_hdmi_ddc_xfer,
+ .functionality = mtk_hdmi_ddc_func,
+};
+
+static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_ddc *ddc;
+ struct resource *mem;
+ int ret;
+
+ ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ ddc->clk = devm_clk_get(dev, "ddc-i2c");
+ if (IS_ERR(ddc->clk)) {
+ dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
+ return PTR_ERR(ddc->clk);
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(ddc->regs))
+ return PTR_ERR(ddc->regs);
+
+ ret = clk_prepare_enable(ddc->clk);
+ if (ret) {
+ dev_err(dev, "enable ddc clk failed!\n");
+ return ret;
+ }
+
+ strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
+ ddc->adap.owner = THIS_MODULE;
+ ddc->adap.class = I2C_CLASS_DDC;
+ ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
+ ddc->adap.retries = 3;
+ ddc->adap.dev.of_node = dev->of_node;
+ ddc->adap.algo_data = ddc;
+ ddc->adap.dev.parent = &pdev->dev;
+
+ ret = i2c_add_adapter(&ddc->adap);
+ if (ret < 0) {
+ dev_err(dev, "failed to add bus to i2c core\n");
+ goto err_clk_disable;
+ }
+
+ platform_set_drvdata(pdev, ddc);
+
+ dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
+ dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
+ dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
+ &mem->end);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(ddc->clk);
+ return ret;
+}
+
+static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
+{
+ struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&ddc->adap);
+ clk_disable_unprepare(ddc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_hdmi_ddc_match[] = {
+ { .compatible = "mediatek,mt8173-hdmi-ddc", },
+ {},
+};
+
+struct platform_driver mtk_hdmi_ddc_driver = {
+ .probe = mtk_hdmi_ddc_probe,
+ .remove = mtk_hdmi_ddc_remove,
+ .driver = {
+ .name = "mediatek-hdmi-ddc",
+ .of_match_table = mtk_hdmi_ddc_match,
+ },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
new file mode 100644
index 000000000000..a5cb07d12c9c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_REGS_H
+#define _MTK_HDMI_REGS_H
+
+#define GRL_INT_MASK 0x18
+#define GRL_IFM_PORT 0x188
+#define GRL_CH_SWAP 0x198
+#define LR_SWAP BIT(0)
+#define LFE_CC_SWAP BIT(1)
+#define LSRS_SWAP BIT(2)
+#define RLS_RRS_SWAP BIT(3)
+#define LR_STATUS_SWAP BIT(4)
+#define GRL_I2S_C_STA0 0x140
+#define GRL_I2S_C_STA1 0x144
+#define GRL_I2S_C_STA2 0x148
+#define GRL_I2S_C_STA3 0x14C
+#define GRL_I2S_C_STA4 0x150
+#define GRL_I2S_UV 0x154
+#define I2S_UV_V BIT(0)
+#define I2S_UV_U BIT(1)
+#define I2S_UV_CH_EN_MASK 0x3c
+#define I2S_UV_CH_EN(x) BIT((x) + 2)
+#define I2S_UV_TMDS_DEBUG BIT(6)
+#define I2S_UV_NORMAL_INFO_INV BIT(7)
+#define GRL_ACP_ISRC_CTRL 0x158
+#define VS_EN BIT(0)
+#define ACP_EN BIT(1)
+#define ISRC1_EN BIT(2)
+#define ISRC2_EN BIT(3)
+#define GAMUT_EN BIT(4)
+#define GRL_CTS_CTRL 0x160
+#define CTS_CTRL_SOFT BIT(0)
+#define GRL_INT 0x14
+#define INT_MDI BIT(0)
+#define INT_HDCP BIT(1)
+#define INT_FIFO_O BIT(2)
+#define INT_FIFO_U BIT(3)
+#define INT_IFM_ERR BIT(4)
+#define INT_INF_DONE BIT(5)
+#define INT_NCTS_DONE BIT(6)
+#define INT_CTRL_PKT_DONE BIT(7)
+#define GRL_INT_MASK 0x18
+#define GRL_CTRL 0x1C
+#define CTRL_GEN_EN BIT(2)
+#define CTRL_SPD_EN BIT(3)
+#define CTRL_MPEG_EN BIT(4)
+#define CTRL_AUDIO_EN BIT(5)
+#define CTRL_AVI_EN BIT(6)
+#define CTRL_AVMUTE BIT(7)
+#define GRL_STATUS 0x20
+#define STATUS_HTPLG BIT(0)
+#define STATUS_PORD BIT(1)
+#define GRL_DIVN 0x170
+#define NCTS_WRI_ANYTIME BIT(6)
+#define GRL_AUDIO_CFG 0x17C
+#define AUDIO_ZERO BIT(0)
+#define HIGH_BIT_RATE BIT(1)
+#define SACD_DST BIT(2)
+#define DST_NORMAL_DOUBLE BIT(3)
+#define DSD_INV BIT(4)
+#define LR_INV BIT(5)
+#define LR_MIX BIT(6)
+#define DSD_SEL BIT(7)
+#define GRL_NCTS 0x184
+#define GRL_CH_SW0 0x18C
+#define GRL_CH_SW1 0x190
+#define GRL_CH_SW2 0x194
+#define CH_SWITCH(from, to) ((from) << ((to) * 3))
+#define GRL_INFOFRM_VER 0x19C
+#define GRL_INFOFRM_TYPE 0x1A0
+#define GRL_INFOFRM_LNG 0x1A4
+#define GRL_MIX_CTRL 0x1B4
+#define MIX_CTRL_SRC_EN BIT(0)
+#define BYPASS_VOLUME BIT(1)
+#define MIX_CTRL_FLAT BIT(7)
+#define GRL_AOUT_CFG 0x1C4
+#define AOUT_BNUM_SEL_MASK 0x03
+#define AOUT_24BIT 0x00
+#define AOUT_20BIT 0x02
+#define AOUT_16BIT 0x03
+#define AOUT_FIFO_ADAP_CTRL BIT(6)
+#define AOUT_BURST_PREAMBLE_EN BIT(7)
+#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \
+ AOUT_FIFO_ADAP_CTRL)
+#define GRL_SHIFT_L1 0x1C0
+#define GRL_SHIFT_R2 0x1B0
+#define AUDIO_PACKET_OFF BIT(6)
+#define GRL_CFG0 0x24
+#define CFG0_I2S_MODE_MASK 0x3
+#define CFG0_I2S_MODE_RTJ 0x1
+#define CFG0_I2S_MODE_LTJ 0x0
+#define CFG0_I2S_MODE_I2S 0x2
+#define CFG0_W_LENGTH_MASK 0x30
+#define CFG0_W_LENGTH_24BIT 0x00
+#define CFG0_W_LENGTH_16BIT 0x10
+#define GRL_CFG1 0x28
+#define CFG1_EDG_SEL BIT(0)
+#define CFG1_SPDIF BIT(1)
+#define CFG1_DVI BIT(2)
+#define CFG1_HDCP_DEBUG BIT(3)
+#define GRL_CFG2 0x2c
+#define CFG2_MHL_DE_SEL BIT(3)
+#define CFG2_MHL_FAKE_DE_SEL BIT(4)
+#define CFG2_MHL_DATA_REMAP BIT(5)
+#define CFG2_NOTICE_EN BIT(6)
+#define CFG2_ACLK_INV BIT(7)
+#define GRL_CFG3 0x30
+#define CFG3_AES_KEY_INDEX_MASK 0x3f
+#define CFG3_CONTROL_PACKET_DELAY BIT(6)
+#define CFG3_KSV_LOAD_START BIT(7)
+#define GRL_CFG4 0x34
+#define CFG4_AES_KEY_LOAD BIT(4)
+#define CFG4_AV_UNMUTE_EN BIT(5)
+#define CFG4_AV_UNMUTE_SET BIT(6)
+#define CFG4_MHL_MODE BIT(7)
+#define GRL_CFG5 0x38
+#define CFG5_CD_RATIO_MASK 0x8F
+#define CFG5_FS128 (0x1 << 4)
+#define CFG5_FS256 (0x2 << 4)
+#define CFG5_FS384 (0x3 << 4)
+#define CFG5_FS512 (0x4 << 4)
+#define CFG5_FS768 (0x6 << 4)
+#define DUMMY_304 0x304
+#define CHMO_SEL (0x3 << 2)
+#define CHM1_SEL (0x3 << 4)
+#define CHM2_SEL (0x3 << 6)
+#define AUDIO_I2S_NCTS_SEL BIT(1)
+#define AUDIO_I2S_NCTS_SEL_64 (1 << 1)
+#define AUDIO_I2S_NCTS_SEL_128 (0 << 1)
+#define NEW_GCP_CTRL BIT(0)
+#define NEW_GCP_CTRL_MERGE BIT(0)
+#define GRL_L_STATUS_0 0x200
+#define GRL_L_STATUS_1 0x204
+#define GRL_L_STATUS_2 0x208
+#define GRL_L_STATUS_3 0x20c
+#define GRL_L_STATUS_4 0x210
+#define GRL_L_STATUS_5 0x214
+#define GRL_L_STATUS_6 0x218
+#define GRL_L_STATUS_7 0x21c
+#define GRL_L_STATUS_8 0x220
+#define GRL_L_STATUS_9 0x224
+#define GRL_L_STATUS_10 0x228
+#define GRL_L_STATUS_11 0x22c
+#define GRL_L_STATUS_12 0x230
+#define GRL_L_STATUS_13 0x234
+#define GRL_L_STATUS_14 0x238
+#define GRL_L_STATUS_15 0x23c
+#define GRL_L_STATUS_16 0x240
+#define GRL_L_STATUS_17 0x244
+#define GRL_L_STATUS_18 0x248
+#define GRL_L_STATUS_19 0x24c
+#define GRL_L_STATUS_20 0x250
+#define GRL_L_STATUS_21 0x254
+#define GRL_L_STATUS_22 0x258
+#define GRL_L_STATUS_23 0x25c
+#define GRL_R_STATUS_0 0x260
+#define GRL_R_STATUS_1 0x264
+#define GRL_R_STATUS_2 0x268
+#define GRL_R_STATUS_3 0x26c
+#define GRL_R_STATUS_4 0x270
+#define GRL_R_STATUS_5 0x274
+#define GRL_R_STATUS_6 0x278
+#define GRL_R_STATUS_7 0x27c
+#define GRL_R_STATUS_8 0x280
+#define GRL_R_STATUS_9 0x284
+#define GRL_R_STATUS_10 0x288
+#define GRL_R_STATUS_11 0x28c
+#define GRL_R_STATUS_12 0x290
+#define GRL_R_STATUS_13 0x294
+#define GRL_R_STATUS_14 0x298
+#define GRL_R_STATUS_15 0x29c
+#define GRL_R_STATUS_16 0x2a0
+#define GRL_R_STATUS_17 0x2a4
+#define GRL_R_STATUS_18 0x2a8
+#define GRL_R_STATUS_19 0x2ac
+#define GRL_R_STATUS_20 0x2b0
+#define GRL_R_STATUS_21 0x2b4
+#define GRL_R_STATUS_22 0x2b8
+#define GRL_R_STATUS_23 0x2bc
+#define GRL_ABIST_CTRL0 0x2D4
+#define GRL_ABIST_CTRL1 0x2D8
+#define ABIST_EN BIT(7)
+#define ABIST_DATA_FMT (0x7 << 0)
+#define VIDEO_CFG_0 0x380
+#define VIDEO_CFG_1 0x384
+#define VIDEO_CFG_2 0x388
+#define VIDEO_CFG_3 0x38c
+#define VIDEO_CFG_4 0x390
+#define VIDEO_SOURCE_SEL BIT(7)
+#define NORMAL_PATH (1 << 7)
+#define GEN_RGB (0 << 7)
+
+#define HDMI_SYS_CFG1C 0x000
+#define HDMI_ON BIT(0)
+#define HDMI_RST BIT(1)
+#define ANLG_ON BIT(2)
+#define CFG10_DVI BIT(3)
+#define HDMI_TST BIT(3)
+#define SYS_KEYMASK1 (0xff << 8)
+#define SYS_KEYMASK2 (0xff << 16)
+#define AUD_OUTSYNC_EN BIT(24)
+#define AUD_OUTSYNC_PRE_EN BIT(25)
+#define I2CM_ON BIT(26)
+#define E2PROM_TYPE_8BIT BIT(27)
+#define MCM_E2PROM_ON BIT(28)
+#define EXT_E2PROM_ON BIT(29)
+#define HTPLG_PIN_SEL_OFF BIT(30)
+#define AES_EFUSE_ENABLE BIT(31)
+#define HDMI_SYS_CFG20 0x004
+#define DEEP_COLOR_MODE_MASK (3 << 1)
+#define COLOR_8BIT_MODE (0 << 1)
+#define COLOR_10BIT_MODE (1 << 1)
+#define COLOR_12BIT_MODE (2 << 1)
+#define COLOR_16BIT_MODE (3 << 1)
+#define DEEP_COLOR_EN BIT(0)
+#define HDMI_AUDIO_TEST_SEL BIT(8)
+#define HDMI2P0_EN BIT(11)
+#define HDMI_OUT_FIFO_EN BIT(16)
+#define HDMI_OUT_FIFO_CLK_INV BIT(17)
+#define MHL_MODE_ON BIT(28)
+#define MHL_PP_MODE BIT(29)
+#define MHL_SYNC_AUTO_EN BIT(30)
+#define HDMI_PCLK_FREE_RUN BIT(31)
+
+#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index cf8f38d39e10..1c366f8cb2d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy)) {
+ if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
new file mode 100644
index 000000000000..8a24754b440f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define HDMI_CON0 0x00
+#define RG_HDMITX_PLL_EN BIT(31)
+#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
+#define PLL_FBKDIV_SHIFT 24
+#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
+#define PLL_FBKSEL_SHIFT 22
+#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
+#define PREDIV_SHIFT 20
+#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
+#define POSDIV_SHIFT 18
+#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
+#define RG_HDMITX_PLL_IR (0xf << 12)
+#define PLL_IR_SHIFT 12
+#define RG_HDMITX_PLL_IC (0xf << 8)
+#define PLL_IC_SHIFT 8
+#define RG_HDMITX_PLL_BP (0xf << 4)
+#define PLL_BP_SHIFT 4
+#define RG_HDMITX_PLL_BR (0x3 << 2)
+#define PLL_BR_SHIFT 2
+#define RG_HDMITX_PLL_BC (0x3 << 0)
+#define PLL_BC_SHIFT 0
+#define HDMI_CON1 0x04
+#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
+#define PLL_DIVEN_SHIFT 29
+#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
+#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
+#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
+#define RG_HDMITX_PLL_BAND (0x3f << 16)
+#define RG_HDMITX_PLL_REF_SEL BIT(15)
+#define RG_HDMITX_PLL_BIAS_EN BIT(14)
+#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
+#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
+#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
+#define PLL_TXDIV_SHIFT 10
+#define RG_HDMITX_PLL_LVROD_EN BIT(9)
+#define RG_HDMITX_PLL_MONVC_EN BIT(8)
+#define RG_HDMITX_PLL_MONCK_EN BIT(7)
+#define RG_HDMITX_PLL_MONREF_EN BIT(6)
+#define RG_HDMITX_PLL_TST_EN BIT(5)
+#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
+#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
+#define HDMI_CON2 0x08
+#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
+#define RG_HDMITX_EN_TX_CKLDO BIT(0)
+#define HDMI_CON3 0x0c
+#define RG_HDMITX_SER_EN (0xf << 28)
+#define RG_HDMITX_PRD_EN (0xf << 24)
+#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
+#define RG_HDMITX_DRV_EN (0xf << 16)
+#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
+#define DRV_IMP_EN_SHIFT 12
+#define RG_HDMITX_MHLCK_FORCE BIT(10)
+#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
+#define RG_HDMITX_MHLCK_EN BIT(8)
+#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
+#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
+#define RG_HDMITX_SER_BIST_TOG BIT(2)
+#define RG_HDMITX_SER_DIN_TOG BIT(1)
+#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
+#define HDMI_CON4 0x10
+#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
+#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
+#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
+#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
+#define PRD_IBIAS_CLK_SHIFT 24
+#define PRD_IBIAS_D2_SHIFT 16
+#define PRD_IBIAS_D1_SHIFT 8
+#define PRD_IBIAS_D0_SHIFT 0
+#define HDMI_CON5 0x14
+#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
+#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
+#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
+#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
+#define DRV_IBIAS_CLK_SHIFT 24
+#define DRV_IBIAS_D2_SHIFT 16
+#define DRV_IBIAS_D1_SHIFT 8
+#define DRV_IBIAS_D0_SHIFT 0
+#define HDMI_CON6 0x18
+#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
+#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
+#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
+#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
+#define DRV_IMP_CLK_SHIFT 24
+#define DRV_IMP_D2_SHIFT 16
+#define DRV_IMP_D1_SHIFT 8
+#define DRV_IMP_D0_SHIFT 0
+#define HDMI_CON7 0x1c
+#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
+#define RG_HDMITX_SER_DIN (0x3ff << 16)
+#define RG_HDMITX_CHLDC_TST (0xf << 12)
+#define RG_HDMITX_CHLCK_TST (0xf << 8)
+#define RG_HDMITX_RESERVE (0xff << 0)
+#define HDMI_CON8 0x20
+#define RGS_HDMITX_2T1_LEV (0xf << 16)
+#define RGS_HDMITX_2T1_EDG (0xf << 12)
+#define RGS_HDMITX_5T1_LEV (0xf << 8)
+#define RGS_HDMITX_5T1_EDG (0xf << 4)
+#define RGS_HDMITX_PLUG_TST BIT(0)
+
+struct mtk_hdmi_phy {
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *pll;
+ struct clk_hw pll_hw;
+ unsigned long pll_rate;
+ u8 drv_imp_clk;
+ u8 drv_imp_d2;
+ u8 drv_imp_d1;
+ u8 drv_imp_d0;
+ u32 ibias;
+ u32 ibias_up;
+};
+
+static const u8 PREDIV[3][4] = {
+ {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
+ {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
+};
+
+static const u8 TXDIV[3][4] = {
+ {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
+ {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
+};
+
+static const u8 FBKSEL[3][4] = {
+ {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
+ {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
+};
+
+static const u8 FBKDIV[3][4] = {
+ {19, 24, 29, 19}, /* 27Mhz */
+ {19, 24, 14, 19}, /* 74Mhz */
+ {19, 24, 14, 19} /* 148Mhz */
+};
+
+static const u8 DIVEN[3][4] = {
+ {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
+ {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
+ {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
+};
+
+static const u8 HTPLLBP[3][4] = {
+ {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
+ {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
+ {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
+};
+
+static const u8 HTPLLBC[3][4] = {
+ {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
+ {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
+ {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
+};
+
+static const u8 HTPLLBR[3][4] = {
+ {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
+ {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
+ {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
+};
+
+static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 val, u32 mask)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, reg);
+}
+
+static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+
+ return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ usleep_range(100, 150);
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ unsigned int pre_div;
+ unsigned int div;
+
+ dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
+ rate, parent_rate);
+
+ if (rate <= 27000000) {
+ pre_div = 0;
+ div = 3;
+ } else if (rate <= 74250000) {
+ pre_div = 1;
+ div = 2;
+ } else {
+ pre_div = 1;
+ div = 1;
+ }
+
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
+ RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+ (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
+ RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+ (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
+ (0x1 << PLL_BR_SHIFT),
+ RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+ RG_HDMITX_PLL_BR);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
+ (0x3 << PRD_IBIAS_CLK_SHIFT) |
+ (0x3 << PRD_IBIAS_D2_SHIFT) |
+ (0x3 << PRD_IBIAS_D1_SHIFT) |
+ (0x3 << PRD_IBIAS_D0_SHIFT),
+ RG_HDMITX_PRD_IBIAS_CLK |
+ RG_HDMITX_PRD_IBIAS_D2 |
+ RG_HDMITX_PRD_IBIAS_D1 |
+ RG_HDMITX_PRD_IBIAS_D0);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
+ (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
+ (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
+ (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
+ (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
+ (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
+ RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+ RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
+ (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
+ RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+ return 0;
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
+static const struct clk_ops mtk_hdmi_pll_ops = {
+ .prepare = mtk_hdmi_pll_prepare,
+ .unprepare = mtk_hdmi_pll_unprepare,
+ .set_rate = mtk_hdmi_pll_set_rate,
+ .round_rate = mtk_hdmi_pll_round_rate,
+ .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_DRV_EN);
+ usleep_range(100, 150);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_SER_EN);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(hdmi_phy->pll);
+ if (ret < 0)
+ return ret;
+
+ mtk_hdmi_phy_enable_tmds(hdmi_phy);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+ mtk_hdmi_phy_disable_tmds(hdmi_phy);
+ clk_disable_unprepare(hdmi_phy->pll);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_hdmi_phy_ops = {
+ .power_on = mtk_hdmi_phy_power_on,
+ .power_off = mtk_hdmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_phy *hdmi_phy;
+ struct resource *mem;
+ struct clk *ref_clk;
+ const char *ref_clk_name;
+ struct clk_init_data clk_init = {
+ .ops = &mtk_hdmi_pll_ops,
+ .num_parents = 1,
+ .parent_names = (const char * const *)&ref_clk_name,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+ };
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+ if (!hdmi_phy)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hdmi_phy->regs)) {
+ ret = PTR_ERR(hdmi_phy->regs);
+ dev_err(dev, "Failed to get memory resource: %d\n", ret);
+ return ret;
+ }
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ ret = PTR_ERR(ref_clk);
+ dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+ ret);
+ return ret;
+ }
+ ref_clk_name = __clk_get_name(ref_clk);
+
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &clk_init.name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_phy->pll_hw.init = &clk_init;
+ hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+ if (IS_ERR(hdmi_phy->pll)) {
+ ret = PTR_ERR(hdmi_phy->pll);
+ dev_err(dev, "Failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+ &hdmi_phy->ibias);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+ &hdmi_phy->ibias_up);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+ hdmi_phy->drv_imp_clk = 0x30;
+ hdmi_phy->drv_imp_d2 = 0x30;
+ hdmi_phy->drv_imp_d1 = 0x30;
+ hdmi_phy->drv_imp_d0 = 0x30;
+
+ phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create HDMI PHY\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, hdmi_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ hdmi_phy->dev = dev;
+ return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+ hdmi_phy->pll);
+}
+
+static int mtk_hdmi_phy_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+ { .compatible = "mediatek,mt8173-hdmi-phy", },
+ {},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+ .probe = mtk_hdmi_phy_probe,
+ .remove = mtk_hdmi_phy_remove,
+ .driver = {
+ .name = "mediatek-hdmi-phy",
+ .of_match_table = mtk_hdmi_phy_match,
+ },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 3a1c5fbae54a..520e5e668d6c 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,11 +1,7 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7200..2b4b125eebc3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = mgag200_gem_free_object,
+ .gem_free_object_unlocked = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 615cbb08ba29..13798b3e6beb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -17,8 +17,8 @@
static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
- if (mga_fb->obj)
- drm_gem_object_unreference_unlocked(mga_fb->obj);
+
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d347dca17267..6b21cb27e1cc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
mga_crtc->lut_r[i] = red[i] >> 8;
mga_crtc->lut_g[i] = green[i] >> 8;
mga_crtc->lut_b[i] = blue[i] >> 8;
}
mga_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 9d5083d0f1ee..68268e55d595 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
{
}
-static int mgag200_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
.evict_flags = mgag200_bo_evict_flags,
- .move = mgag200_bo_move,
+ .move = NULL,
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 167a4971f47c..7c7a0314a756 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select SND_SOC_HDMI_CODEC if SND_SOC
default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 60cb02624dc0..4e2806cf778c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -35,6 +35,7 @@ msm-y := \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
@@ -45,6 +46,7 @@ msm-y := \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
+ msm_gem_shrinker.o \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2aec27dbb5bb..f386f463278d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
- unsigned i, ibs = 0;
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence->seqno);
@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
+ adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
if (gpu->memptrs_bo) {
+ if (gpu->memptrs)
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57e70..ec572f8389ed 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
struct platform_device *phy_pdev;
struct device_node *phy_node;
- phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0);
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
dev_err(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 93c1ee094eac..63436d8ee470 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
},
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+ .io_start = { 0x4700000, 0x5800000 },
+ .num_dsi = 2,
};
static const char * const dsi_6g_bus_clk_names[] = {
@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd922800, 0xfd922b00 },
+ .num_dsi = 2,
};
static const char * const dsi_8916_bus_clk_names[] = {
@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
},
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+ .io_start = { 0x1a98000 },
+ .num_dsi = 1,
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd998000, 0xfd9a0000 },
+ .num_dsi = 2,
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index a68c836744a3..eeacc3232494 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -34,6 +34,8 @@ struct msm_dsi_config {
struct dsi_reg_config reg_cfg;
const char * const *bus_clk_names;
const int num_bus_clks;
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi;
};
struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a3e47ad83eb3..f05ed0e1f3d6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_vaddr(msm_host->tx_gem_obj);
+ data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+
return len;
}
@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
u32 lane_map[4];
int ret, i, len, num_lanes;
- prop = of_find_property(ep, "qcom,data-lane-map", &len);
+ prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
dev_dbg(dev, "failed to find data lane mapping\n");
return -EINVAL;
@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
msm_host->num_data_lanes = num_lanes;
- ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+ ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
dev_err(dev, "failed to read lane data\n");
@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
const int *swap = supported_data_lane_swaps[i];
int j;
+ /*
+ * the data-lanes array we get from DT has a logical->physical
+ * mapping. The "data lane swap" register field represents
+ * supported configurations in a physical->logical mapping.
+ * Translate the DT mapping to what we understand and find a
+ * configuration that works.
+ */
for (j = 0; j < num_lanes; j++) {
- if (swap[j] != lane_map[j])
+ if (lane_map[j] < 0 || lane_map[j] > 3)
+ dev_err(dev, "bad physical lane entry %u\n",
+ lane_map[j]);
+
+ if (swap[lane_map[j]] != j)
break;
}
@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
struct device_node *endpoint, *device_node;
int ret;
- ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
- if (ret) {
- dev_err(dev, "%s: host index not specified, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
/*
- * Get the first endpoint node. In our case, dsi has one output port
- * to which the panel is connected. Don't return an error if a port
- * isn't defined. It's possible that there is nothing connected to
- * the dsi output.
+ * Get the endpoint of the output port of the DSI host. In our case,
+ * this is mapped to port number with reg = 1. Don't return an error if
+ * the remote endpoint isn't defined. It's possible that there is
+ * nothing connected to the dsi output.
*/
- endpoint = of_graph_get_next_endpoint(np, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
dev_dbg(dev, "%s: no endpoint\n", __func__);
return 0;
@@ -1648,6 +1655,25 @@ err:
return ret;
}
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ msm_host->id = dsi_host_get_id(msm_host);
+ if (msm_host->id < 0) {
+ ret = msm_host->id;
+ pr_err("%s: unable to identify DSI host index\n", __func__);
+ goto fail;
+ }
+
/* fixup base address by io offset */
msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
}
msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
- if (IS_ERR(msm_host->mode)) {
+ if (!msm_host->mode) {
pr_err("%s: cannot duplicate mode\n", __func__);
- return PTR_ERR(msm_host->mode);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e2f42d8ea294..f39386ed75e4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{}
};
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi_phy; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cfg = match->data;
phy->pdev = pdev;
- ret = of_property_read_u32(dev->of_node,
- "qcom,dsi-phy-index", &phy->id);
- if (ret) {
- dev_err(dev, "%s: PHY index not specified, %d\n",
+ phy->id = dsi_phy_get_id(phy);
+ if (phy->id < 0) {
+ ret = phy->id;
+ dev_err(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0d54ed00386d..f24a85439b94 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
* Fill default H/W values in illegal cells, eg. cell {0, 1}.
*/
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi_phy;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index f4bc11af849a..c757e2070cac 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
- }
+ },
+ .io_start = { 0xfd998300, 0xfd9a0300 },
+ .num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 96d1852af418..63d7fba31380 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0xfd922b00, 0xfd923100 },
+ .num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x1a98500 },
+ .num_dsi_phy = 1,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 213355a3e767..7bdb9de54968 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x4700300, 0x5800300 },
+ .num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd038c0..5960628ceb93 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-edp_connector_best_encoder(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
-
- DBG("");
- return edp_connector->edp->encoder;
-}
-
static const struct drm_connector_funcs edp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
.get_modes = edp_connector_get_modes,
.mode_valid = edp_connector_mode_valid,
- .best_encoder = edp_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 51b9ea552f97..973720792236 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
return gpio;
}
+/*
+ * HDMI audio codec callbacks
+ */
+static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned int chan;
+ unsigned int channel_allocation = 0;
+ unsigned int rate;
+ unsigned int level_shift = 0; /* 0dB */
+ bool down_mix = false;
+
+ dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ params->sample_width, params->cea.channels);
+
+ switch (params->cea.channels) {
+ case 2:
+ /* FR and FL speakers */
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_2;
+ break;
+ case 4:
+ /* FC, LFE, FR and FL speakers */
+ channel_allocation = 0x3;
+ chan = MSM_HDMI_AUDIO_CHANNEL_4;
+ break;
+ case 6:
+ /* RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x0B;
+ chan = MSM_HDMI_AUDIO_CHANNEL_6;
+ break;
+ case 8:
+ /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x1F;
+ chan = MSM_HDMI_AUDIO_CHANNEL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ dev_err(dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ msm_hdmi_audio_set_sample_rate(hdmi, rate);
+ msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
+ level_shift, down_mix);
+
+ return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+ .hw_params = msm_hdmi_audio_hw_params,
+ .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &msm_hdmi_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
+}
+
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
- int i;
+ int i, err;
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
+ err = msm_hdmi_register_audio_driver(hdmi, dev);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec %d\n", err);
+ hdmi->audio_pdev = NULL;
+ }
+
return 0;
}
@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
+ if (priv->hdmi->audio_pdev)
+ platform_device_unregister(priv->hdmi->audio_pdev);
+
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bc7ba0bdee07..accc9a61611d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
+ struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
/*
* audio:
*/
+/* Supported HDMI Audio channels and rates */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+#define HDMI_SAMPLE_RATE_32KHZ 0
+#define HDMI_SAMPLE_RATE_44_1KHZ 1
+#define HDMI_SAMPLE_RATE_48KHZ 2
+#define HDMI_SAMPLE_RATE_88_2KHZ 3
+#define HDMI_SAMPLE_RATE_96KHZ 4
+#define HDMI_SAMPLE_RATE_176_4KHZ 5
+#define HDMI_SAMPLE_RATE_192KHZ 6
int msm_hdmi_audio_update(struct hdmi *hdmi);
int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683112..a2515b466ce5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
return 0;
}
-static struct drm_encoder *
-msm_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- return hdmi_connector->hdmi->encoder;
-}
-
static const struct drm_connector_funcs hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
.get_modes = msm_hdmi_connector_get_modes,
.mode_valid = msm_hdmi_connector_mode_valid,
- .best_encoder = msm_hdmi_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 0baaaaabd002..6e767979aab3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{
- if (hdmi && hdmi->hdcp_ctrl) {
+ if (hdmi) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 35ad78a1dc1c..24258e3025e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -23,7 +23,6 @@
struct mdp4_dtv_encoder {
struct drm_encoder base;
- struct clk *src_clk;
struct clk *hdmi_clk;
struct clk *mdp_clk;
unsigned long int pixclock;
@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
- clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
bs_set(mdp4_dtv_encoder, 1);
- DBG("setting src_clk=%lu", pc);
+ DBG("setting mdp_clk=%lu", pc);
- ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
- clk_prepare_enable(mdp4_dtv_encoder->src_clk);
- ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
- if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ pc, ret);
+
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
mdp4_dtv_encoder->enabled = true;
@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+ return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
}
/* initialize encoder */
@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
- mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
- if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
- dev_err(dev->dev, "failed to get src_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
- goto fail;
- }
-
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
dev_err(dev->dev, "failed to get hdmi_clk\n");
@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
goto fail;
}
- mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get mdp_clk\n");
+ dev_err(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..7b39e89fbc2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
mdp4_enable(mdp4_kms);
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
- }
}
static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
- }
mdp4_disable(mdp4_kms);
}
@@ -162,6 +158,7 @@ static const char * const iommu_ports[] = {
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu;
if (mmu) {
@@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
- if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (mdp4_kms->rpm_enabled)
+ pm_runtime_disable(dev);
+
kfree(mdp4_kms);
}
@@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ kms->irq = irq;
+
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
@@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
dev_err(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
@@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ pm_runtime_enable(dev->dev);
+ mdp4_kms->rpm_enabled = true;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c5d045d5680d..25fb83997119 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -47,6 +47,8 @@ struct mdp4_kms {
struct mdp_irq error_handler;
+ bool rpm_enabled;
+
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
uint32_t blank_cursor_iova;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd7631ef..353429b05733 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- return mdp4_lvds_connector->encoder;
-}
-
static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
.get_modes = mdp4_lvds_connector_get_modes,
.mode_valid = mdp4_lvds_connector_mode_valid,
- .best_encoder = mdp4_lvds_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index b275ce11b24b..ca6ca30650a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-static inline uint32_t __offset_MDP(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->mdp.base[0]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-
-static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
-#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
+#define REG_MDP5_HW_VERSION 0x00000000
+#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
+ return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
-#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
+#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
}
-#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
}
-static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+#define REG_MDP5_DISP_INTF_SEL 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
}
-static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_EN 0x00000010
-static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_STATUS 0x00000014
-static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_CLEAR 0x00000018
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_EN 0x0000001c
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_STATUS 0x00000020
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
-static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
-#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
+#define REG_MDP5_SPARE_0 0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
@@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); }
+#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 57f73f0c120d..ac9e4cde1380 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
0,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.pp = {
.count = 3,
- .base = { 0x21b00, 0x21c00, 0x21d00 },
+ .base = { 0x21a00, 0x21b00, 0x21c00 },
},
.intf = {
- .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+ .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.ad = {
.count = 2,
- .base = { 0x13100, 0x13300 },
+ .base = { 0x13000, 0x13200 },
},
.pp = {
.count = 3,
- .base = { 0x12d00, 0x12e00, 0x12f00 },
+ .base = { 0x12c00, 0x12d00, 0x12e00 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x03200, 0x03600 },
+ .base = { 0x03100, 0x03500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+ .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
},
.ad = {
.count = 3,
- .base = { 0x13500, 0x13700, 0x13900 },
+ .base = { 0x13400, 0x13600, 0x13800 },
},
.pp = {
.count = 4,
- .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
- .base = { 0x01000 },
+ .base = { 0x0 },
.caps = MDP_CAP_SMP |
0,
},
@@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = {
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0x4003ffff,
},
.pipe_vig = {
.count = 1,
- .base = { 0x05000 },
+ .base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
- .base = { 0x15000, 0x17000 },
+ .base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
- .base = { 0x25000 },
+ .base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
- .base = { 0x45000, 0x48000 },
+ .base = { 0x44000, 0x47000 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
- .base = { 0x55000 },
+ .base = { 0x54000 },
},
.intf = {
- .base = { 0x00000, 0x6b800 },
+ .base = { 0x00000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
@@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_SMP |
0,
},
@@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = {
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf0ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+ .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
@@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
0,
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
@@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
- .base = { 0x55000, 0x57000 },
+ .base = { 0x54000, 0x56000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.cdm = {
.count = 1,
- .base = { 0x7a200 },
+ .base = { 0x79200 },
},
.dsc = {
.count = 2,
- .base = { 0x81000, 0x81400 },
+ .base = { 0x80000, 0x80400 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 69094cb28103..c627ab6d0061 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
* start signal for the slave encoder
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
else
return -EINVAL;
/* Smart Panel, Sync mode */
- data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL;
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0),
- MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_disable(mdp5_kms);
return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..fa2be7ce9468 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
+ const struct drm_plane_state *pstate;
int cnt = 0, i;
DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *pstate;
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
- pstate = state->state->plane_states[drm_plane_index(plane)];
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!pstate)
- pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
- int ret, bpp, lm;
- unsigned int depth;
+ int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
@@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
lm = mdp5_crtc->lm;
- drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
- stride = width * (bpp >> 3);
+ stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 4e81ca4f964a..d021edc3b307 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
u32 intf_sel;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf->num) {
case 0:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
break;
case 1:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
break;
case 2:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
break;
case 3:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
break;
default:
BUG();
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
if (!enable) {
ctlx->pair = NULL;
ctly->pair = NULL;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
ctlx->pair = ctly;
ctly->pair = ctlx;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
- MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+ MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1d95f9fd9dc7..fe0c22230883 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
* to use the master's enable signal for the slave encoder.
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
else
return -EINVAL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
/* Dumb Panel, Sync mode */
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 73bc3e312fd4..d53e5510fd7c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,7 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h"
@@ -24,9 +23,9 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
- irqmask ^ (irqmask & old_irqmask));
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
+ mdp5_enable(mdp5_kms);
mdp_irq_register(mdp_kms, error_handler);
+ mdp5_disable(mdp5_kms);
return 0;
}
@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
-static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+irqreturn_t mdp5_irq(struct msm_kms *kms)
{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
uint32_t status, enable;
- enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
- status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
+ enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status);
@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-}
-
-irqreturn_t mdp5_irq(struct msm_kms *kms)
-{
- struct mdp_kms *mdp_kms = to_mdp_kms(kms);
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
- uint32_t intr;
-
- intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
-
- VERB("intr=%08x", intr);
-
- if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
- mdp5_irq_mdp(mdp_kms);
- intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
- }
-
- while (intr) {
- irq_hw_number_t hwirq = fls(intr) - 1;
- generic_handle_irq(irq_find_mapping(
- mdp5_kms->irqcontroller.domain, hwirq));
- intr &= ~(1 << hwirq);
- }
return IRQ_HANDLED;
}
@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp5_crtc_vblank(crtc), false);
mdp5_disable(mdp5_kms);
}
-
-/*
- * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
- MDSS_HW_INTR_STATUS_INTR_DSI1 | \
- MDSS_HW_INTR_STATUS_INTR_HDMI | \
- MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdp5_hw_mask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void mdp5_hw_unmask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip mdp5_hw_irq_chip = {
- .name = "mdp5",
- .irq_mask = mdp5_hw_mask_irq,
- .irq_unmask = mdp5_hw_unmask_irq,
-};
-
-static int mdp5_hw_irqdomain_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct mdp5_kms *mdp5_kms = d->host_data;
-
- if (!(VALID_IRQS & (1 << hwirq)))
- return -EPERM;
-
- irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdp5_kms);
-
- return 0;
-}
-
-static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
- .map = mdp5_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-
-int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
-{
- struct device *dev = mdp5_kms->dev->dev;
- struct irq_domain *d;
-
- d = irq_domain_add_linear(dev->of_node, 32,
- &mdp5_hw_irqdomain_ops, mdp5_kms);
- if (!d) {
- dev_err(dev, "mdp5 irq domain add failed\n");
- return -ENXIO;
- }
-
- mdp5_kms->irqcontroller.enabled_mask = 0;
- mdp5_kms->irqcontroller.domain = d;
-
- return 0;
-}
-
-void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
-{
- if (mdp5_kms->irqcontroller.domain) {
- irq_domain_remove(mdp5_kms->irqcontroller.domain);
- mdp5_kms->irqcontroller.domain = NULL;
- }
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..ed7143d35b25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -16,6 +16,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_irq.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -28,10 +29,11 @@ static const char *iommu_ports[] = {
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_device *dev = mdp5_kms->dev;
+ struct platform_device *pdev = mdp5_kms->pdev;
unsigned long flags;
- pm_runtime_get_sync(dev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
@@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
@@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
{
int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
-
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
- }
mdp5_disable(mdp5_kms);
}
@@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
return mdp5_encoder_set_split_display(encoder, slave_encoder);
}
-static void mdp5_destroy(struct msm_kms *kms)
+static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
- mdp5_irq_domain_fini(mdp5_kms);
-
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- kfree(mdp5_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
@@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .destroy = mdp5_destroy,
+ .destroy = mdp5_kms_destroy,
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* register our interrupt-controller for hdmi/eDP/dsi/etc
- * to use for irqs routed through mdp:
- */
- ret = mdp5_irq_domain_init(mdp5_kms);
- if (ret)
- goto fail;
-
/* construct CRTCs and their private planes: */
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
@@ -425,17 +404,17 @@ fail:
return ret;
}
-static void read_hw_revision(struct mdp5_kms *mdp5_kms,
- uint32_t *major, uint32_t *minor)
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+ u32 *major, u32 *minor)
{
- uint32_t version;
+ u32 version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
- *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
+ *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
@@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct mdp5_cfg *config;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
- struct msm_kms *kms = NULL;
+ struct mdp5_cfg *config;
+ struct msm_kms *kms;
struct msm_mmu *mmu;
- uint32_t major, minor;
- int i, ret;
+ int irq, i, ret;
- mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
- if (!mdp5_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
- ret = -ENOMEM;
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ pdev = mdp5_kms->pdev;
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
- spin_lock_init(&mdp5_kms->resource_lock);
+ kms->irq = irq;
- mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
- kms = &mdp5_kms->base.base;
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+ !config->hw->intf.base[i])
+ continue;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
- mdp5_kms->dev = dev;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+ }
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
- /* mdp5_kms->mmio actually represents the MDSS base address */
- mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
- if (IS_ERR(mdp5_kms->mmio)) {
- ret = PTR_ERR(mdp5_kms->mmio);
+ if (config->platform.iommu) {
+ mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
+ iommu_domain_free(config->platform.iommu);
+ goto fail;
+ }
+
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ mmu = NULL;
+ }
+ mdp5_kms->mmu = mmu;
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
goto fail;
}
- mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdp5_kms->vbif)) {
- ret = PTR_ERR(mdp5_kms->vbif);
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(mdp5_kms->vdd)) {
- ret = PTR_ERR(mdp5_kms->vdd);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = config->hw->lm.max_width;
+ dev->mode_config.max_height = config->hw->lm.max_height;
+
+ dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+ dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+ dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
+ dev->max_vblank_count = 0xffffffff;
+ dev->vblank_disable_immediate = true;
+
+ return kms;
+fail:
+ if (kms)
+ mdp5_kms_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static void mdp5_destroy(struct platform_device *pdev)
+{
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
+ if (mdp5_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ u32 major, minor;
+ int ret;
+
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ ret = -ENOMEM;
goto fail;
}
- ret = regulator_enable(mdp5_kms->vdd);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ platform_set_drvdata(pdev, mdp5_kms);
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->pdev = pdev;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
@@ -635,9 +709,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
- if (ret)
- goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
if (ret)
goto fail;
@@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
- clk_set_rate(mdp5_kms->src_clk, 200000000);
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
- read_hw_revision(mdp5_kms, &major, &minor);
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
+
+ read_mdp_hw_revision(mdp5_kms, &major, &minor);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
@@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->caps = config->hw->mdp.caps;
/* TODO: compute core clock rate at runtime */
- clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+ clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
/*
* Some chipsets have a Shared Memory Pool (SMP), while others
@@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- /* make sure things are off before attaching iommu (bootloader could
- * have left things on, in which case we'll start getting faults if
- * we don't disable):
- */
- mdp5_enable(mdp5_kms);
- for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
- if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
- !config->hw->intf.base[i])
- continue;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ /* set uninit-ed kms */
+ priv->kms = &mdp5_kms->base.base;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
- }
- mdp5_disable(mdp5_kms);
- mdelay(16);
+ return 0;
+fail:
+ mdp5_destroy(pdev);
+ return ret;
+}
- if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(dev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
- goto fail;
- }
+static int mdp5_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
- goto fail;
- }
- } else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
+ DBG("");
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
+ return mdp5_init(pdev, ddev);
+}
- ret = modeset_init(mdp5_kms);
- if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
- goto fail;
- }
+static void mdp5_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ mdp5_destroy(pdev);
+}
- dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
- dev->vblank_disable_immediate = true;
+static const struct component_ops mdp5_ops = {
+ .bind = mdp5_bind,
+ .unbind = mdp5_unbind,
+};
- return kms;
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+ DBG("");
+ return component_add(&pdev->dev, &mdp5_ops);
+}
-fail:
- if (kms)
- mdp5_destroy(kms);
- return ERR_PTR(ret);
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &mdp5_ops);
+ return 0;
+}
+
+static const struct of_device_id mdp5_dt_match[] = {
+ { .compatible = "qcom,mdp5", },
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+ .probe = mdp5_dev_probe,
+ .remove = mdp5_dev_remove,
+ .driver = {
+ .name = "msm_mdp",
+ .of_match_table = mdp5_dt_match,
+ },
+};
+
+void __init msm_mdp_register(void)
+{
+ DBG("");
+ platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&mdp5_driver);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9a25898239d3..03738927be10 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -31,6 +31,8 @@ struct mdp5_kms {
struct drm_device *dev;
+ struct platform_device *pdev;
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
@@ -43,29 +45,23 @@ struct mdp5_kms {
struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
- void __iomem *mmio, *vbif;
-
- struct regulator *vdd;
+ void __iomem *mmio;
struct clk *axi_clk;
struct clk *ahb_clk;
- struct clk *src_clk;
struct clk *core_clk;
struct clk *lut_clk;
struct clk *vsync_clk;
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_MDP_DISP_INTF_SEL
+ * - REG_MDP5_DISP_INTF_SEL
*/
spinlock_t resource_lock;
- struct mdp_irq error_handler;
+ bool rpm_enabled;
- struct {
- volatile unsigned long enabled_mask;
- struct irq_domain *domain;
- } irqcontroller;
+ struct mdp_irq error_handler;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
new file mode 100644
index 000000000000..d444a6901fff
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+/*
+ * If needed, this can become more specific: something like struct mdp5_mdss,
+ * which contains a 'struct msm_mdss base' member.
+ */
+struct msm_mdss {
+ struct drm_device *dev;
+
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct {
+ volatile unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
+};
+
+static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+{
+ msm_writel(data, mdss->mmio + reg);
+}
+
+static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+{
+ return msm_readl(mdss->mmio + reg);
+}
+
+static irqreturn_t mdss_irq(int irq, void *arg)
+{
+ struct msm_mdss *mdss = arg;
+ u32 intr;
+
+ intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdss->irqcontroller.domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
+ MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
+
+static void mdss_hw_mask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void mdss_hw_unmask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip mdss_hw_irq_chip = {
+ .name = "mdss",
+ .irq_mask = mdss_hw_mask_irq,
+ .irq_unmask = mdss_hw_unmask_irq,
+};
+
+static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct msm_mdss *mdss = d->host_data;
+
+ if (!(VALID_IRQS & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, mdss);
+
+ return 0;
+}
+
+static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+ .map = mdss_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+
+static int mdss_irq_domain_init(struct msm_mdss *mdss)
+{
+ struct device *dev = mdss->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
+ mdss);
+ if (!d) {
+ dev_err(dev, "mdss irq domain add failed\n");
+ return -ENXIO;
+ }
+
+ mdss->irqcontroller.enabled_mask = 0;
+ mdss->irqcontroller.domain = d;
+
+ return 0;
+}
+
+void msm_mdss_destroy(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
+
+ if (!mdss)
+ return;
+
+ irq_domain_remove(mdss->irqcontroller.domain);
+ mdss->irqcontroller.domain = NULL;
+
+ regulator_disable(mdss->vdd);
+
+ pm_runtime_put_sync(dev->dev);
+
+ pm_runtime_disable(dev->dev);
+}
+
+int msm_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss;
+ int ret;
+
+ DBG("");
+
+ if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+ return 0;
+
+ mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
+ if (!mdss) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdss->dev = dev;
+
+ mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdss->mmio)) {
+ ret = PTR_ERR(mdss->mmio);
+ goto fail;
+ }
+
+ mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdss->vbif)) {
+ ret = PTR_ERR(mdss->vbif);
+ goto fail;
+ }
+
+ /* Regulator to enable GDSCs in downstream kernels */
+ mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdss->vdd)) {
+ ret = PTR_ERR(mdss->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdss->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ mdss_irq, 0, "mdss_isr", mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ goto fail_irq;
+ }
+
+ ret = mdss_irq_domain_init(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ goto fail_irq;
+ }
+
+ priv->mdss = mdss;
+
+ pm_runtime_enable(dev->dev);
+
+ /*
+ * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
+ * domain. Remove this once runtime PM is adapted for all the devices.
+ */
+ pm_runtime_get_sync(dev->dev);
+
+ return 0;
+fail_irq:
+ regulator_disable(mdss->vdd);
+fail:
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 6f425c25d9fe..27d7b55b52c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -42,7 +42,7 @@
*
* configured:
* The block is allocated to some client, and assigned to that
- * client in MDP5_MDP_SMP_ALLOC registers.
+ * client in MDP5_SMP_ALLOC registers.
*
* inuse:
* The block is being actively used by a client.
@@ -59,7 +59,7 @@
* mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
* are configured for the union(pending, inuse)
* Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
switch (fld) {
case 0:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
}
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..4a8a6f1f1151 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
int i;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
if (!crtc->state->enable)
continue;
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct msm_drm_private *priv = dev->dev_private;
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
struct msm_commit *c;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
/*
* Figure out what crtcs we have:
*/
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
- c->crtc_mask |= (1 << drm_crtc_index(crtc));
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ c->crtc_mask |= drm_crtc_mask(crtc);
/*
* Figure out what fence to wait for:
*/
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
- if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+ struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
}
}
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c654092ef78..8a0237008f74 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,6 +21,16 @@
#include "msm_gpu.h"
#include "msm_kms.h"
+
+/*
+ * MSM driver version:
+ * - 1.0.0 - initial interface
+ * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ */
+#define MSM_VERSION_MAJOR 1
+#define MSM_VERSION_MINOR 1
+#define MSM_VERSION_PATCHLEVEL 0
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev)
kfree(vbl_ev);
}
- drm_kms_helper_poll_fini(ddev);
+ msm_gem_shrinker_cleanup(ddev);
- drm_connector_unregister_all(ddev);
+ drm_kms_helper_poll_fini(ddev);
drm_dev_unregister(ddev);
@@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms) {
- pm_runtime_disable(dev);
+ if (kms)
kms->funcs->destroy(kms);
- }
if (gpu) {
mutex_lock(&ddev->struct_mutex);
@@ -230,15 +238,16 @@ static int msm_drm_uninit(struct device *dev)
}
if (priv->vram.paddr) {
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
dma_free_attrs(dev, priv->vram.size, NULL,
- priv->vram.paddr, &attrs);
+ priv->vram.paddr, attrs);
}
component_unbind_all(dev, ddev);
+ msm_mdss_destroy(ddev);
+
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -284,6 +293,7 @@ static int msm_init_vram(struct drm_device *dev)
if (node) {
struct resource r;
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
size = r.end - r.start;
@@ -299,21 +309,21 @@ static int msm_init_vram(struct drm_device *dev)
}
if (size) {
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs = 0;
void *p;
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attrs |= DMA_ATTR_WRITE_COMBINE;
/* note that for no-kernel-mapping, the vaddr returned
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, &attrs);
+ &priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
@@ -352,6 +362,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
}
ddev->dev_private = priv;
+ priv->dev = ddev;
+
+ ret = msm_mdss_init(ddev);
+ if (ret) {
+ kfree(priv);
+ drm_dev_unref(ddev);
+ return ret;
+ }
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
@@ -367,6 +385,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret) {
+ msm_mdss_destroy(ddev);
kfree(priv);
drm_dev_unref(ddev);
return ret;
@@ -376,9 +395,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto fail;
+ msm_gem_shrinker_init(ddev);
+
switch (get_mdp_ver(pdev)) {
case 4:
kms = mdp4_kms_init(ddev);
+ priv->kms = kms;
break;
case 5:
kms = mdp5_kms_init(ddev);
@@ -400,10 +422,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto fail;
}
- priv->kms = kms;
-
if (kms) {
- pm_runtime_enable(dev);
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
@@ -419,24 +438,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto fail;
}
- pm_runtime_get_sync(dev);
- ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
- pm_runtime_put_sync(dev);
- if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ if (kms) {
+ pm_runtime_get_sync(dev);
+ ret = drm_irq_install(ddev, kms->irq);
+ pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
}
ret = drm_dev_register(ddev, 0);
if (ret)
goto fail;
- ret = drm_connector_register_all(ddev);
- if (ret) {
- dev_err(dev, "failed to register connectors\n");
- goto fail;
- }
-
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -690,6 +705,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
}
+static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ switch (args->madv) {
+ case MSM_MADV_DONTNEED:
+ case MSM_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = msm_gem_madvise(obj, args->madv);
+ if (ret >= 0) {
+ args->retained = ret;
+ ret = 0;
+ }
+
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -698,6 +751,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -730,7 +784,6 @@ static struct drm_driver msm_driver = {
.open = msm_open,
.preclose = msm_preclose,
.lastclose = msm_lastclose,
- .set_busid = drm_platform_set_busid,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
@@ -764,8 +817,9 @@ static struct drm_driver msm_driver = {
.name = "msm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
- .major = 1,
- .minor = 0,
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
};
#ifdef CONFIG_PM_SLEEP
@@ -805,22 +859,146 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int add_components(struct device *dev, struct component_match **matchptr,
- const char *name)
+/*
+ * Identify what components need to be added by parsing what remote-endpoints
+ * our MDP output ports are connected to. In the case of LVDS on MDP4, there
+ * is no external component that we need to add since LVDS is within MDP4
+ * itself.
+ */
+static int add_components_mdp(struct device *mdp_dev,
+ struct component_match **matchptr)
{
- struct device_node *np = dev->of_node;
- unsigned i;
+ struct device_node *np = mdp_dev->of_node;
+ struct device_node *ep_node;
+ struct device *master_dev;
+
+ /*
+ * on MDP4 based platforms, the MDP platform device is the component
+ * master that adds other display interface components to itself.
+ *
+ * on MDP5 based platforms, the MDSS platform device is the component
+ * master that adds MDP5 and other display interface components to
+ * itself.
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4"))
+ master_dev = mdp_dev;
+ else
+ master_dev = mdp_dev->parent;
- for (i = 0; ; i++) {
- struct device_node *node;
+ for_each_endpoint_of_node(np, ep_node) {
+ struct device_node *intf;
+ struct of_endpoint ep;
+ int ret;
- node = of_parse_phandle(np, name, i);
- if (!node)
- break;
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret) {
+ dev_err(mdp_dev, "unable to parse port endpoint\n");
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /*
+ * The LCDC/LVDS port on MDP4 is a speacial case where the
+ * remote-endpoint isn't a component that we need to add
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4") &&
+ ep.port == 0) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ /*
+ * It's okay if some of the ports don't have a remote endpoint
+ * specified. It just means that the port isn't connected to
+ * any external interface.
+ */
+ intf = of_graph_get_remote_port_parent(ep_node);
+ if (!intf) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ component_match_add(master_dev, matchptr, compare_of, intf);
+
+ of_node_put(intf);
+ of_node_put(ep_node);
+ }
+
+ return 0;
+}
+
+static int compare_name_mdp(struct device *dev, void *data)
+{
+ return (strstr(dev_name(dev), "mdp") != NULL);
+}
+
+static int add_display_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device *mdp_dev;
+ int ret;
+
+ /*
+ * MDP5 based devices don't have a flat hierarchy. There is a top level
+ * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
+ * children devices, find the MDP5 node, and then add the interfaces
+ * to our components list.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate children devices\n");
+ return ret;
+ }
+
+ mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ if (!mdp_dev) {
+ dev_err(dev, "failed to find MDSS MDP node\n");
+ of_platform_depopulate(dev);
+ return -ENODEV;
+ }
+
+ put_device(mdp_dev);
- component_match_add(dev, matchptr, compare_of, node);
+ /* add the MDP component itself */
+ component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
+ } else {
+ /* MDP4 */
+ mdp_dev = dev;
}
+ ret = add_components_mdp(mdp_dev, matchptr);
+ if (ret)
+ of_platform_depopulate(dev);
+
+ return ret;
+}
+
+/*
+ * We don't know what's the best binding to link the gpu with the drm device.
+ * Fow now, we just hunt for all the possible gpus that we support, and add them
+ * as components.
+ */
+static const struct of_device_id msm_gpu_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ { .compatible = "qcom,kgsl-3d0" },
+ { },
+};
+
+static int add_gpu_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, msm_gpu_match);
+ if (!np)
+ return 0;
+
+ component_match_add(dev, matchptr, compare_of, np);
+
+ of_node_put(np);
+
return 0;
}
@@ -846,9 +1024,15 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
+ int ret;
- add_components(&pdev->dev, &match, "connectors");
- add_components(&pdev->dev, &match, "gpus");
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ ret = add_gpu_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -857,20 +1041,14 @@ static int msm_pdev_probe(struct platform_device *pdev)
static int msm_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
+ of_platform_depopulate(&pdev->dev);
return 0;
}
-static const struct platform_device_id msm_id[] = {
- { "mdp", 0 },
- { }
-};
-
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
- { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
- /* to support downstream DT files */
- { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
+ { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
+ { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -883,12 +1061,12 @@ static struct platform_driver msm_platform_driver = {
.of_match_table = dt_match,
.pm = &msm_pm_ops,
},
- .id_table = msm_id,
};
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_mdp_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -904,6 +1082,7 @@ static void __exit msm_drm_unregister(void)
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_mdp_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5b2963f32291..b4bc7f1ef717 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -46,6 +46,7 @@
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
+struct msm_mdss;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
struct msm_drm_private {
+ struct drm_device *dev;
+
struct msm_kms *kms;
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
+ /* top level MDSS wrapper device (for MDP5 only) */
+ struct msm_mdss *mdss;
+
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
@@ -147,6 +153,9 @@ struct msm_drm_private {
struct drm_mm mm;
} vram;
+ struct notifier_block vmap_notifier;
+ struct shrinker shrinker;
+
struct msm_vblank_ctrl vblank_ctrl;
};
@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+void msm_gem_shrinker_init(struct drm_device *dev);
+void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
-void *msm_gem_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
+void msm_gem_put_vaddr(struct drm_gem_object *obj);
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
+void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
}
#endif
+void __init msm_mdp_register(void);
+void __exit msm_mdp_unregister(void);
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..95cf8fe72ee5 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
- if (bo)
- drm_gem_object_unreference_unlocked(bo);
+
+ drm_gem_object_unreference_unlocked(bo);
}
kfree(msm_fb);
}
-static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = msm_framebuffer_create_handle,
.destroy = msm_framebuffer_destroy,
- .dirty = msm_framebuffer_dirty,
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c6cf837c5193..ffd4a338ca12 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
@@ -188,21 +188,7 @@ fail:
return ret;
}
-static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
- u16 red, u16 green, u16 blue, int regno)
-{
- DBG("fbdev: set gamma");
-}
-
-static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, int regno)
-{
- DBG("fbdev: get gamma");
-}
-
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
- .gamma_set = msm_crtc_fb_gamma_set,
- .gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
};
@@ -265,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
+ msm_gem_put_vaddr(fbdev->bo);
drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 69836f5685b1..6cd4af443139 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void
+put_iova(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
+ uint32_t offset = msm_obj->domain[id].iova;
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ msm_obj->domain[id].iova = 0;
+ }
+ }
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -388,7 +408,7 @@ fail:
return ret;
}
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
+ msm_obj->vmap_count++;
return msm_obj->vaddr;
}
-void *msm_gem_vaddr(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_vaddr_locked(obj);
+ ret = msm_gem_get_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ WARN_ON(msm_obj->vmap_count < 1);
+ msm_obj->vmap_count--;
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+ mutex_lock(&obj->dev->struct_mutex);
+ msm_gem_put_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+}
+
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ if (msm_obj->madv != __MSM_MADV_PURGED)
+ msm_obj->madv = madv;
+
+ return (msm_obj->madv != __MSM_MADV_PURGED);
+}
+
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!is_purgeable(msm_obj));
+ WARN_ON(obj->import_attach);
+
+ put_iova(obj);
+
+ msm_gem_vunmap(obj);
+
+ put_pages(obj);
+
+ msm_obj->madv = __MSM_MADV_PURGED;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
reservation_object_add_excl_fence(msm_obj->resv, fence);
@@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct reservation_object_list *fobj;
struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
+ const char *madv;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
+ switch (msm_obj->madv) {
+ case __MSM_MADV_PURGED:
+ madv = " purged";
+ break;
+ case MSM_MADV_DONTNEED:
+ madv = " purgeable";
+ break;
+ case MSM_MADV_WILLNEED:
+ default:
+ madv = "";
+ break;
+ }
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr, obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- }
- }
+ put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
@@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- vunmap(msm_obj->vaddr);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
@@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
+ msm_obj->madv = MSM_MADV_WILLNEED;
if (resv) {
msm_obj->resv = resv;
@@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference(obj);
-
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
@@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9facd4b6ffd9..b2f13cfe945e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -29,6 +29,16 @@ struct msm_gem_object {
uint32_t flags;
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ uint8_t madv;
+
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
-#define MAX_CMDS 4
+static inline bool is_purgeable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
+ !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -95,7 +114,7 @@ struct msm_gem_submit {
uint32_t size; /* in dwords */
uint32_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 6b90890faffe..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{
- return msm_gem_vaddr(obj);
+ return msm_gem_get_vaddr(obj);
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- /* TODO msm_gem_vunmap() */
+ msm_gem_put_vaddr(obj);
}
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644
index 000000000000..283d2841ba58
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
+
+static unsigned long
+msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long count = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_purgeable(msm_obj))
+ count += msm_obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long freed = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (is_purgeable(msm_obj)) {
+ msm_gem_purge(&msm_obj->base);
+ freed += msm_obj->base.size >> PAGE_SHIFT;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return NOTIFY_DONE;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ /* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+ if (++unmapped >= 15)
+ break;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * msm_gem_shrinker_init - Initialize msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function registers and sets up the msm shrinker.
+ */
+void msm_gem_shrinker_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->shrinker.count_objects = msm_gem_shrinker_count;
+ priv->shrinker.scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&priv->shrinker));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+}
+
+/**
+ * msm_gem_shrinker_cleanup - Clean up msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function unregisters the msm shrinker.
+ */
+void msm_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index eb4bb8b2f3a5..9766f9ae4b7d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -29,10 +29,11 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gpu *gpu, int nr_bos, int nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+ int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(*submit->cmd));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (!submit)
@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
+ submit->cmd = (void *)&submit->bos[nr_bos];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr_locked(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off;
}
+ msm_gem_put_vaddr_locked(&obj->base);
+
return 0;
}
@@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pipe != MSM_PIPE_3D0)
return -EINVAL;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
-
- submit = submit_create(dev, gpu, args->nr_bos);
- if (!submit)
- return -ENOMEM;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
ret = submit_lookup_objects(submit, args, file);
if (ret)
@@ -462,6 +467,7 @@ out:
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a7a0b6d9b057..3a294d0da3a0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e32222c3d44f..40e41e5cdbc6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -61,10 +61,8 @@ struct msm_kms_funcs {
struct msm_kms {
const struct msm_kms_funcs *funcs;
- /* irq handling: */
- bool in_irq;
- struct list_head irq_list; /* list of mdp4_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ /* irq number to be passed on to drm_irq_install */
+ int irq;
};
static inline void msm_kms_init(struct msm_kms *kms,
@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+int msm_mdss_init(struct drm_device *dev);
+void msm_mdss_destroy(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 830857c47c86..17fe4e53e0d1 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos)
{
struct msm_perf_state *perf = file->private_data;
- int n = 0, ret;
+ int n = 0, ret = 0;
mutex_lock(&perf->read_lock);
@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
}
n = min((int)sz, perf->buftot - perf->bufpos);
- ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
- if (ret)
+ if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
+ ret = -EFAULT;
goto out;
+ }
perf->bufpos += n;
*ppos += n;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0857710c2ff2..3a5fdfcd67ae 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -27,6 +27,11 @@
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
*/
#ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
#include "msm_gpu.h"
#include "msm_gem.h"
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
goto out;
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
- ret = copy_to_user(buf, fptr, n);
- if (ret)
+ if (copy_to_user(buf, fptr, n)) {
+ ret = -EFAULT;
goto out;
+ }
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
*ppos += n;
@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
kfree(rd);
}
+static void snapshot_buf(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit, int idx,
+ uint32_t iova, uint32_t size)
+{
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ const char *buf;
+
+ buf = msm_gem_get_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ return;
+
+ if (iova) {
+ buf += iova - submit->bos[idx].iova;
+ } else {
+ iova = submit->bos[idx].iova;
+ size = obj->base.size;
+ }
+
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[2]){ iova, size }, 8);
+ rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
+
+ msm_gem_put_vaddr_locked(&obj->base);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit)
{
@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- /* could be nice to have an option (module-param?) to snapshot
- * all the bo's associated with the submit. Handy to see vtx
- * buffers, etc. For now just the cmdstream bo's is enough.
- */
+ if (rd_full) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ /* buffers that are written to probably don't start out
+ * with anything interesting:
+ */
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ continue;
+
+ snapshot_buf(rd, submit, i, 0, 0);
+ }
+ }
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t idx = submit->cmd[i].idx;
uint32_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
- struct msm_gem_object *obj = submit->bos[idx].obj;
- const char *buf = msm_gem_vaddr_locked(&obj->base);
-
- if (IS_ERR(buf))
- continue;
- buf += iova - submit->bos[idx].iova;
-
- rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
- rd_write_section(rd, RD_BUFFER_CONTENTS,
- buf, szd * 4);
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!rd_full) {
+ snapshot_buf(rd, submit, submit->cmd[i].idx,
+ submit->cmd[i].iova, szd * 4);
+ }
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 42f5359cf988..f326cf6a32e6 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
goto fail;
}
- ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->start = msm_gem_get_vaddr_locked(ring->bo);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
goto fail;
@@ -59,7 +59,9 @@ fail:
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
- if (ring->bo)
+ if (ring->bo) {
+ msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
+ }
kfree(ring);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 5ab13e7939db..2922a82cba8e 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB
- select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54da33..0cb7a18cde26 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
}
-static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+static int
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
- int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
*/
if (!nv_crtc->base.primary->fb) {
nv_crtc->lut.depth = 0;
- return;
+ return 0;
}
nv_crtc_gamma_load(crtc);
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index aea81a547e85..34c0f2f67548 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- /* Turn every CRTC off. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
-
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_restore(&encoder->base.base);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index a665b78b2af5..434d1e29f279 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 331620a52afa..287a7d6fa480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_FERMI 0x07
#define NV_DEVICE_INFO_V0_KEPLER 0x08
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+#define NV_DEVICE_INFO_V0_PASCAL 0x0a
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 982aad8fa645..e6e9537537cf 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -39,6 +39,7 @@
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
+#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -50,6 +51,8 @@
#define GK110_DISP /* cl5070.h */ 0x00009270
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
+#define GP100_DISP /* cl5070.h */ 0x00009770
+#define GP104_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -86,6 +89,8 @@
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
+#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -105,6 +110,8 @@
#define MAXWELL_A /* cl9097.h */ 0x0000b097
#define MAXWELL_B /* cl9097.h */ 0x0000b197
+#define PASCAL_A /* cl9097.h */ 0x0000c097
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
@@ -128,6 +135,8 @@
#define FERMI_DMA 0x000090b5
#define KEPLER_DMA_COPY_A 0x0000a0b5
#define MAXWELL_DMA_COPY_A 0x0000b0b5
+#define PASCAL_DMA_COPY_A 0x0000c0b5
+#define PASCAL_DMA_COPY_B 0x0000c1b5
#define FERMI_DECOMPRESS 0x000090b8
@@ -137,6 +146,7 @@
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
+#define PASCAL_COMPUTE_A 0x0000c0c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 126a85cc81bc..7ea8aa7ca408 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -33,7 +33,10 @@ enum nvkm_devidx {
NVKM_ENGINE_CE0,
NVKM_ENGINE_CE1,
NVKM_ENGINE_CE2,
- NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+ NVKM_ENGINE_CE3,
+ NVKM_ENGINE_CE4,
+ NVKM_ENGINE_CE5,
+ NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
NVKM_ENGINE_CIPHER,
NVKM_ENGINE_DISP,
@@ -50,7 +53,8 @@ enum nvkm_devidx {
NVKM_ENGINE_NVENC0,
NVKM_ENGINE_NVENC1,
- NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+ NVKM_ENGINE_NVENC2,
+ NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
NVKM_ENGINE_NVDEC,
NVKM_ENGINE_PM,
@@ -102,6 +106,7 @@ struct nvkm_device {
NV_C0 = 0xc0,
NV_E0 = 0xe0,
GM100 = 0x110,
+ GP100 = 0x130,
} card_type;
u32 chipset;
u8 chiprev;
@@ -136,7 +141,7 @@ struct nvkm_device {
struct nvkm_volt *volt;
struct nvkm_engine *bsp;
- struct nvkm_engine *ce[3];
+ struct nvkm_engine *ce[6];
struct nvkm_engine *cipher;
struct nvkm_disp *disp;
struct nvkm_dma *dma;
@@ -149,7 +154,7 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[2];
+ struct nvkm_engine *nvenc[3];
struct nvkm_engine *nvdec;
struct nvkm_pm *pm;
struct nvkm_engine *sec;
@@ -170,7 +175,6 @@ struct nvkm_device_func {
void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
- bool cpu_coherent;
};
struct nvkm_device_quirk {
@@ -206,7 +210,7 @@ struct nvkm_device_chip {
int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
@@ -219,7 +223,7 @@ struct nvkm_device_chip {
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index b5370cb56e3c..e5c9b6268dcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -28,6 +28,7 @@ struct nvkm_device_tegra {
} iommu;
int gpu_speedo;
+ int gpu_speedo_id;
};
struct nvkm_device_tegra_func {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 594d719ba41e..d3d26a1e215d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index d4fdce27b297..e82049667ce4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 15ddfcf5e8db..ed92fec5292c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 6515f5810a26..89cf99307828 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index e39a1fea930b..a72f3290528a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -7,6 +7,9 @@ struct nvkm_bios {
u32 size;
u8 *data;
+ u32 image0_size;
+ u32 imaged_addr;
+
u32 bmp_offset;
u32 bit_offset;
@@ -22,10 +25,9 @@ struct nvkm_bios {
u8 nvbios_checksum(const u8 *data, int size);
u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
-
-#define nvbios_rd08(b,o) (b)->data[(o)]
-#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
-#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
+u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
+u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 0a734fd06acf..3a410275fa71 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -56,6 +56,8 @@ struct nvkm_fb {
int regions;
} tile;
+ u8 page;
+
struct nvkm_memory *mmu_rd;
struct nvkm_memory *mmu_wr;
};
@@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index c6b90b6543b3..cd755baf9cab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 2e80682b2da1..27d25b18d85c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -7,11 +7,14 @@ struct nvkm_mc {
struct nvkm_subdev subdev;
};
-void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
-void nvkm_mc_intr_unarm(struct nvkm_mc *);
-void nvkm_mc_intr_rearm(struct nvkm_mc *);
-void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
-void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_intr(struct nvkm_device *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_device *);
+void nvkm_mc_intr_rearm(struct nvkm_device *);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_unk260(struct nvkm_device *, u32 data);
int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
@@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index ddb913889d7e..e6523e2cea9f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index c6edd95a5b69..b04c38c07761 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -43,9 +43,8 @@ struct nvkm_secboot {
const struct nvkm_secboot_func *func;
struct nvkm_subdev subdev;
+ enum nvkm_devidx devidx;
u32 base;
- u32 irq_mask;
- u32 enable_mask;
};
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 8fb575a92c48..71ebbfd4484f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -8,10 +8,11 @@ struct nvkm_top {
struct list_head device;
};
-u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index feff55cff05b..b765f4ffcde6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -12,6 +12,9 @@ struct nvkm_volt {
u32 uv;
u8 vid;
} vid[256];
+
+ u32 max_uv;
+ u32 min_uv;
};
int nvkm_volt_get(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index eb7de487a2b3..7bd4683216d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
return NVIF_CLASS_SW_GF100;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index db76b94e6e26..f2ad17aa33f0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -45,6 +45,8 @@
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
+ bool optimus_flags_detected;
+ bool optimus_skip_dsm;
acpi_handle dhandle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -57,9 +59,6 @@ bool nouveau_is_v1_dsm(void) {
return nouveau_dsm_priv.dsm_detected;
}
-#define NOUVEAU_DSM_HAS_MUX 0x1
-#define NOUVEAU_DSM_HAS_OPT 0x2
-
#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
@@ -110,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
* requirements on the fourth parameter, so a private implementation
* instead of using acpi_check_dsm().
*/
-static int nouveau_check_optimus_dsm(acpi_handle handle)
+static int nouveau_dsm_get_optimus_functions(acpi_handle handle)
{
int result;
@@ -125,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle)
* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
* If the n-th bit is enabled, function n is supported
*/
- return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
+ if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS))
+ return result;
+ return 0;
}
static int nouveau_dsm(acpi_handle handle, int func, int arg)
@@ -212,26 +213,55 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.get_client_id = nouveau_dsm_get_client_id,
};
-static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+/*
+ * Firmware supporting Windows 8 or later do not use _DSM to put the device into
+ * D3cold, they instead rely on disabling power resources on the parent.
+ */
+static bool nouveau_pr3_present(struct pci_dev *pdev)
+{
+ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
+ struct acpi_device *parent_adev;
+
+ if (!parent_pdev)
+ return false;
+
+ parent_adev = ACPI_COMPANION(&parent_pdev->dev);
+ if (!parent_adev)
+ return false;
+
+ return acpi_has_method(parent_adev->handle, "_PR3");
+}
+
+static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
+ bool *has_mux, bool *has_opt,
+ bool *has_opt_flags, bool *has_pr3)
{
acpi_handle dhandle;
- int retval = 0;
+ bool supports_mux;
+ int optimus_funcs;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
- return false;
+ return;
if (!acpi_has_method(dhandle, "_DSM"))
- return false;
+ return;
+
+ supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ 1 << NOUVEAU_DSM_POWER);
+ optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
- if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
- 1 << NOUVEAU_DSM_POWER))
- retval |= NOUVEAU_DSM_HAS_MUX;
+ /* Does not look like a Nvidia device. */
+ if (!supports_mux && !optimus_funcs)
+ return;
- if (nouveau_check_optimus_dsm(dhandle))
- retval |= NOUVEAU_DSM_HAS_OPT;
+ *dhandle_out = dhandle;
+ *has_mux = supports_mux;
+ *has_opt = !!optimus_funcs;
+ *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
+ *has_pr3 = false;
- if (retval & NOUVEAU_DSM_HAS_OPT) {
+ if (optimus_funcs) {
uint32_t result;
nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
&result);
@@ -239,11 +269,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
- }
- if (retval)
- nouveau_dsm_priv.dhandle = dhandle;
- return retval;
+ *has_pr3 = nouveau_pr3_present(pdev);
+ }
}
static bool nouveau_dsm_detect(void)
@@ -251,11 +279,13 @@ static bool nouveau_dsm_detect(void)
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
- int has_dsm = 0;
- int has_optimus = 0;
+ acpi_handle dhandle = NULL;
+ bool has_mux = false;
+ bool has_optimus = false;
+ bool has_optimus_flags = false;
+ bool has_power_resources = false;
int vga_count = 0;
bool guid_valid;
- int retval;
bool ret = false;
/* lookup the MXM GUID */
@@ -268,32 +298,32 @@ static bool nouveau_dsm_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- retval = nouveau_dsm_pci_probe(pdev);
- if (retval & NOUVEAU_DSM_HAS_MUX)
- has_dsm |= 1;
- if (retval & NOUVEAU_DSM_HAS_OPT)
- has_optimus = 1;
+ nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
+ &has_optimus_flags, &has_power_resources);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
vga_count++;
- retval = nouveau_dsm_pci_probe(pdev);
- if (retval & NOUVEAU_DSM_HAS_MUX)
- has_dsm |= 1;
- if (retval & NOUVEAU_DSM_HAS_OPT)
- has_optimus = 1;
+ nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
+ &has_optimus_flags, &has_power_resources);
}
/* find the optimus DSM or the old v1 DSM */
- if (has_optimus == 1) {
+ if (has_optimus) {
+ nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
+ if (has_power_resources)
+ pr_info("nouveau: detected PR support, will not use DSM\n");
nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags;
+ nouveau_dsm_priv.optimus_skip_dsm = has_power_resources;
ret = true;
- } else if (vga_count == 2 && has_dsm && guid_valid) {
+ } else if (vga_count == 2 && has_mux && guid_valid) {
+ nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
@@ -321,11 +351,12 @@ void nouveau_register_dsm_handler(void)
void nouveau_switcheroo_optimus_dsm(void)
{
u32 result = 0;
- if (!nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm)
return;
- nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
- 0x3, &result);
+ if (nouveau_dsm_priv.optimus_flags_detected)
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
+ 0x3, &result);
nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5e3f3e826476..6190035edfea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nvxx_device(&drm->device)->func->cpu_coherent)
- nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+ nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
if (drm->client.vm) {
@@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- /*
- * TTM buffers allocated using the DMA API already have a mapping, let's
- * use it instead.
- */
- if (!nvbo->force_coherent)
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
- &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
if (!nvbo)
return;
- /*
- * TTM buffers allocated using the DMA API already had a coherent
- * mapping which we used, no need to unmap.
- */
- if (!nvbo->force_coherent)
- ttm_bo_kunmap(&nvbo->kmap);
+ ttm_bo_kunmap(&nvbo->kmap);
}
void
@@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
return 0;
}
-static inline void *
-_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
-{
- struct ttm_dma_tt *dma_tt;
- u8 *m = mem;
-
- index *= sz;
-
- if (m) {
- /* kmap'd address, return the corresponding offset */
- m += index;
- } else {
- /* DMA-API mapping, lookup the right address */
- dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
- m = dma_tt->cpu_address[index / PAGE_SIZE];
- m += index % PAGE_SIZE;
- }
-
- return m;
-}
-#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
@@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
- no_wait_gpu,
new_mem);
nouveau_fence_unref(&fence);
}
@@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1182,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1210,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
@@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
+ ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+ if (ret)
+ return ret;
+
if (nvbo->pin_refcnt)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
@@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached)
- return ttm_dma_populate(ttm_dma, dev->dev);
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm);
@@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached) {
- ttm_dma_unpopulate(ttm_dma, dev->dev);
- return;
- }
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index b1d2527c5625..f9b3c811187e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -191,7 +191,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+ MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7c77f960c8b8..afbf557b23d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
{
struct nouveau_crtc *nv_crtc =
container_of(notify, typeof(*nv_crtc), vblank);
- drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_NOTIFY_KEEP;
}
@@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GP104_DISP,
+ GP100_DISP,
GM200_DISP,
GM107_DISP,
GK110_DISP,
@@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
+ drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -760,12 +763,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+ { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
new_bo->bo.offset };
/* Keep vblanks on during flip, for the target crtc of this flip */
- drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+ drm_crtc_vblank_get(crtc);
/* Emit a page flip */
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +812,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
fail_unreserve:
- drm_vblank_put(dev, nouveau_crtc(crtc)->index);
+ drm_crtc_vblank_put(crtc);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&cli->mutex);
@@ -842,17 +844,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_arm_vblank_event(dev, s->crtc, s->event);
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
} else {
- drm_send_vblank_event(dev, s->crtc, s->event);
+ drm_crtc_send_vblank_event(s->crtc, s->event);
/* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
+ drm_crtc_vblank_put(s->crtc);
}
}
else {
/* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
+ drm_crtc_vblank_put(s->crtc);
}
list_del(&s->head);
@@ -873,9 +875,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
if (!nouveau_finish_page_flip(chan, &state)) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, state.crtc, state.offset +
- state.y * state.pitch +
- state.x * state.bpp / 8);
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 24273bacd885..0420ee861ea4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
struct nouveau_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
- int crtc, bpp, pitch, x, y;
+ struct drm_crtc *crtc;
+ int bpp, pitch;
u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 11f8dd9c0edb..66c1280c0f1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "drmP.h"
@@ -200,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_A:
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
+ case PASCAL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
@@ -315,16 +314,19 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- /* remove conflicting drivers (vesafb, efifb etc) */
+ /* We need to check that the chipset is supported before booting
+ * fbdev off the hardware, as there's no way to put it back.
+ */
+ ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ if (ret)
+ return ret;
+
+ nvkm_device_del(&device);
+
+ /* Remove conflicting drivers (vesafb, efifb etc). */
aper = alloc_apertures(3);
if (!aper)
return -ENOMEM;
@@ -438,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_vga_init(drm);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (!nvxx_device(&drm->device)->mmu) {
+ ret = -ENOSYS;
+ goto fail_device;
+ }
+
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
0x1000, NULL, &drm->client.vm);
if (ret)
@@ -498,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- pm_runtime_get_sync(dev->dev);
+ if (nouveau_runtime_pm != 0) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
+
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
@@ -970,7 +981,7 @@ driver_stub = {
.gem_prime_vmap = nouveau_gem_prime_vmap,
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
- .gem_free_object = nouveau_gem_object_del,
+ .gem_free_object_unlocked = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
@@ -1078,7 +1089,6 @@ nouveau_drm_init(void)
driver_pci = driver_stub;
driver_pci.set_busid = drm_pci_set_busid;
driver_platform = driver_stub;
- driver_platform.set_busid = drm_platform_set_busid;
nouveau_display_options();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- u32 contexts, context_base;
+ u32 contexts;
+ u64 context_base;
bool uevent;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 1ff4166af26e..71f764bf4cc6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -535,6 +535,40 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
nouveau_hwmon_get_in0_input, NULL, 0);
static ssize_t
+nouveau_hwmon_get_in0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->min_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->min_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
+ nouveau_hwmon_get_in0_min, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_in0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->max_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->max_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
+ nouveau_hwmon_get_in0_max, NULL, 0);
+
+static ssize_t
nouveau_hwmon_get_in0_label(struct device *d,
struct device_attribute *a, char *buf)
{
@@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
static struct attribute *hwmon_in0_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bcee91497eb9..1825dbc33192 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077a95..08f9c6fa0f7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
ntfy->p->base.event = &ntfy->p->e.base;
ntfy->p->base.file_priv = f;
ntfy->p->base.pid = current->pid;
- ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7d9248b8c664..da8fd5ff9d0f 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | image->width);
+ OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
- dsize = ALIGN(image->width * image->height, 32) >> 5;
+ dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dsize) {
int iter_len = dsize > 128 ? 128 : dsize;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b0057bf..7d0edcbcfca7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
+ GP104_DISP_CORE_CHANNEL_DMA,
+ GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
@@ -1346,21 +1348,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static void
+static int
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = min_t(u32, start + size, 256);
u32 i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
}
nv50_crtc_lut_load(crtc);
+
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 1aeb698e9707..af3d3c49411a 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING(chan, 0);
OUT_RING(chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 839f4c8c1805..054b6a056d99 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
OUT_RING (chan, 0);
OUT_RING (chan, image->dy);
- dwords = ALIGN(image->width * image->height, 32) >> 5;
+ dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
while (dwords) {
int push = dwords > 2047 ? 2047 : dwords;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index b18557858f19..19044aba265e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_CE0 ] = "ce0",
[NVKM_ENGINE_CE1 ] = "ce1",
[NVKM_ENGINE_CE2 ] = "ce2",
+ [NVKM_ENGINE_CE3 ] = "ce3",
+ [NVKM_ENGINE_CE4 ] = "ce4",
+ [NVKM_ENGINE_CE5 ] = "ce5",
[NVKM_ENGINE_CIPHER ] = "cipher",
[NVKM_ENGINE_DISP ] = "disp",
[NVKM_ENGINE_DMAOBJ ] = "dma",
@@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_MSVLD ] = "msvld",
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
+ [NVKM_ENGINE_NVENC2 ] = "nvenc2",
[NVKM_ENGINE_NVDEC ] = "nvdec",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
@@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
}
}
- nvkm_mc_reset(device->mc, subdev->index);
+ nvkm_mc_reset(device, subdev->index);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 9c19d59b47df..a4458a8eb30a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
+nvkm-y += nvkm/engine/ce/gp100.o
+nvkm-y += nvkm/engine/ce/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
new file mode 100644
index 000000000000..c7710456bc30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_enum
+gp100_ce_launcherr_report[] = {
+ { 0x0, "NO_ERR" },
+ { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
+ { 0x2, "INVALID_ALIGNMENT" },
+ { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
+ { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
+ { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x6, "DST_LINE_EXCEEDS_PITCH" },
+ { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
+ { 0x9, "INVALID_VALUE" },
+ { 0xa, "UNUSED_FIELD" },
+ { 0xb, "INVALID_OPERATION" },
+ { 0xc, "NO_RESOURCES" },
+ { 0xd, "INVALID_CONFIG" },
+ {}
+};
+
+static void
+gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
+{
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x104418 + base);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
+ nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
+}
+
+void
+gp100_ce_intr(struct nvkm_engine *ce)
+{
+ const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x10440c + base);
+ u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
+ if (intr & 0x00000001) { //XXX: guess
+ nvkm_warn(subdev, "BLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000001);
+ intr &= ~0x00000001;
+ }
+ if (intr & 0x00000002) { //XXX: guess
+ nvkm_warn(subdev, "NONBLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000002);
+ intr &= ~0x00000002;
+ }
+ if (intr & 0x00000004) {
+ gp100_ce_intr_launcherr(ce, base);
+ nvkm_wr32(device, 0x104410 + base, 0x00000004);
+ intr &= ~0x00000004;
+ }
+ if (intr) {
+ nvkm_warn(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x104410 + base, intr);
+ }
+}
+
+static const struct nvkm_engine_func
+gp100_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp100_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
new file mode 100644
index 000000000000..20e019788a53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gp104_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_B },
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index e2fa8b161943..2dce405976ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -4,4 +4,5 @@
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
+void gp100_ce_intr(struct nvkm_engine *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 4572debcb0c9..7218a067a6c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2148,6 +2148,67 @@ nv12b_chipset = {
.sw = gf100_sw_new,
};
+static const struct nvkm_device_chip
+nv130_chipset = {
+ .name = "GP100",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gm200_secboot_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp100_ce_new,
+ .ce[1] = gp100_ce_new,
+ .ce[2] = gp100_ce_new,
+ .ce[3] = gp100_ce_new,
+ .ce[4] = gp100_ce_new,
+ .ce[5] = gp100_ce_new,
+ .dma = gf119_dma_new,
+ .disp = gp100_disp_new,
+ .fifo = gp100_fifo_new,
+ .gr = gp100_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv134_chipset = {
+ .name = "GP104",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp104_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp104_ce_new,
+ .ce[1] = gp104_ce_new,
+ .ce[2] = gp104_ce_new,
+ .ce[3] = gp104_ce_new,
+ .disp = gp104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(CE0 , device->ce[0] , device->ce[0]);
_(CE1 , device->ce[1] , device->ce[1]);
_(CE2 , device->ce[2] , device->ce[2]);
+ _(CE3 , device->ce[3] , device->ce[3]);
+ _(CE4 , device->ce[4] , device->ce[4]);
+ _(CE5 , device->ce[5] , device->ce[5]);
_(CIPHER , device->cipher , device->cipher);
_(DISP , device->disp , &device->disp->engine);
_(DMAOBJ , device->dma , &device->dma->engine);
@@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(MSVLD , device->msvld , device->msvld);
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
+ _(NVENC2 , device->nvenc[2], device->nvenc[2]);
_(NVDEC , device->nvdec , device->nvdec);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
@@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x100: device->card_type = NV_E0; break;
case 0x110:
case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
default:
break;
}
@@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
@@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_CE0 , ce[0]);
_(NVKM_ENGINE_CE1 , ce[1]);
_(NVKM_ENGINE_CE2 , ce[2]);
+ _(NVKM_ENGINE_CE3 , ce[3]);
+ _(NVKM_ENGINE_CE4 , ce[4]);
+ _(NVKM_ENGINE_CE5 , ce[5]);
_(NVKM_ENGINE_CIPHER , cipher);
_(NVKM_ENGINE_DISP , disp);
_(NVKM_ENGINE_DMAOBJ , dma);
@@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_MSVLD , msvld);
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
+ _(NVKM_ENGINE_NVENC2 , nvenc[2]);
_(NVKM_ENGINE_NVDEC , nvdec);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..b1b693219db3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,6 @@ nvkm_device_pci_func = {
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
- .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index ec12efb4689a..939682f18788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -191,13 +191,11 @@ static irqreturn_t
nvkm_device_tegra_intr(int irq, void *arg)
{
struct nvkm_device_tegra *tdev = arg;
- struct nvkm_mc *mc = tdev->device.mc;
+ struct nvkm_device *device = &tdev->device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
@@ -247,7 +245,6 @@ nvkm_device_tegra_func = {
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
- .cpu_coherent = false,
};
int
@@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 137066426ed7..79a8f71cf788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e2a64ed14b22..77a52b54a31e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
+nvkm-y += nvkm/engine/disp/gp100.o
+nvkm-y += nvkm/engine/disp/gp104.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -45,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
+nvkm-y += nvkm/engine/disp/rootgp100.o
+nvkm-y += nvkm/engine/disp/rootgp104.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
+nvkm-y += nvkm/engine/disp/dmacgp104.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
@@ -59,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
+nvkm-y += nvkm/engine/disp/basegp104.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
@@ -70,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
+nvkm-y += nvkm/engine/disp/coregp100.o
+nvkm-y += nvkm/engine/disp/coregp104.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -77,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
+nvkm-y += nvkm/engine/disp/ovlygp104.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
new file mode 100644
index 000000000000..51688e37c54e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_base_oclass = {
+ .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index aee374884c96..f5f683d9fd20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 6b1dc703dac7..21fbf89b6319 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
}
};
-static void
+void
gf119_disp_core_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
new file mode 100644
index 000000000000..d5dff6619d4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp100_disp_core_oclass = {
+ .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
new file mode 100644
index 000000000000..6922f4007b61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static int
+gp104_disp_core_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494, chan->push);
+ nvkm_wr32(device, 0x611498, 0x00010000);
+ nvkm_wr32(device, 0x61149c, 0x00000001);
+ nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_core_func = {
+ .init = gp104_disp_core_init,
+ .fini = gf119_disp_core_fini,
+ .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_core_oclass = {
+ .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gp104_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index 876b14549a58..a57f7cef307a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
chan->base.chid << 27 | 0x00000001);
}
-static void
+void
gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
new file mode 100644
index 000000000000..ad24c2c57696
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_dmac_func = {
+ .init = gp104_disp_dmac_init,
+ .fini = gf119_disp_dmac_fini,
+ .bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index fc84eb8b5c45..43ac05857853 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func nv50_disp_core_func;
extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_dmac *);
+
+extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 5dd34382f55a..29e84b241cca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,12 +76,10 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
mask |= 0x0001 << or;
mask |= 0x0100 << head;
-
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
@@ -415,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
nvkm_wr32(device, 0x6101d0, 0x80000000);
}
-static void
+void
gf119_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -463,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
u32 stat = nvkm_rd32(device, 0x61009c);
int chid = ffs(stat) - 1;
if (chid >= 0)
- gf119_disp_intr_error(disp, chid);
+ disp->func->intr_error(disp, chid);
intr &= ~0x00000002;
}
@@ -507,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
static const struct nv50_disp_func
gf119_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gf119_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index a86384b8e388..37f145cf30d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gk104_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk104_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 0d574c7e594a..e14ac946608c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gk110_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk110_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index f4b9cf8574be..2f2437cc5891 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gm107_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm107_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 67eec8620719..9f368d4ee61e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gm200_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm200_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
new file mode 100644
index 000000000000..4f81bf31435e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gp100_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp100_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
new file mode 100644
index 000000000000..3bf3380336e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static void
+gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
+ u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
+ u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
+
+ nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid < ARRAY_SIZE(disp->chan)) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nvkm_wr32(device, 0x61009c, (1 << chid));
+ nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
+}
+
+static const struct nv50_disp_func
+gp104_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gp104_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp104_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fcb1b0c46d64..fbb8c7dc18fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -32,6 +32,7 @@
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
+#include <subdev/timer.h>
static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp *base)
@@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
@@ -426,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
return outp;
}
+static bool
+nv50_disp_dptmds_war(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
+ switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
+ case 0x00000000:
+ case 0x00030000:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+
+}
+
+static void
+nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
+
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
+ nvkm_usec(device, 400, NVKM_DELAY);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
+
+ if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pu_pc = seqctl & 0x0000000f;
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
+ }
+}
+
+static void
+nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ u32 sorpwr;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ sorpwr = nvkm_rd32(device, 0x61c004 + soff);
+ if (sorpwr & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pd_pc = (seqctl & 0x00000f00) >> 8;
+ u32 pu_pc = seqctl & 0x0000000f;
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
+ }
+
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
+
+ if (sorpwr & 0x00000001) {
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
+ }
+}
+
+static void
+nv50_disp_update_sppll1(struct nv50_disp *disp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ bool used = false;
+ int sor;
+
+ if (!nv50_disp_dptmds_war(device))
+ return;
+
+ for (sor = 0; sor < disp->func->sor.nr; sor++) {
+ u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
+ switch (clksor & 0x03000000) {
+ case 0x02000000:
+ case 0x03000000:
+ used = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (used)
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
+}
+
static void
nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
{
@@ -679,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
nvkm_mask(device, hreg, 0x0000000f, hval);
nvkm_mask(device, oreg, mask, oval);
+
+ nv50_disp_dptmds_war_2(disp, &outp->info);
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -720,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
+ nv50_disp_dptmds_war_3(disp, &outp->info);
}
void
@@ -767,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
continue;
nv50_disp_intr_unk40_0(disp, head);
}
+ nv50_disp_update_sppll1(disp);
}
nvkm_wr32(device, 0x610030, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index aecebd8717e5..1e1de6bfe85a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
struct nv50_disp_func {
void (*intr)(struct nv50_disp *);
+ void (*intr_error)(struct nv50_disp *, int chid);
const struct nvkm_event_func *uevent;
void (*super)(struct work_struct *);
@@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
void gf119_disp_vblank_fini(struct nv50_disp *, int);
void gf119_disp_intr(struct nv50_disp *);
void gf119_disp_intr_supervisor(struct work_struct *);
+void gf119_disp_intr_error(struct nv50_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 2e2dc0641ef2..2f0220b39f34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
}
};
-static const struct nv50_disp_chan_mthd
+const struct nv50_disp_chan_mthd
gk104_disp_ovly_chan_mthd = {
.name = "Overlay",
.addr = 0x001000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
new file mode 100644
index 000000000000..97e2dd2d908e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_ovly_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gk104_disp_ovly_chan_mthd,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
new file mode 100644
index 000000000000..ac8fdd728ec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp100_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp100_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp100_disp_root_oclass = {
+ .base.oclass = GP100_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp100_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
new file mode 100644
index 000000000000..8443e04dc626
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp104_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp104_disp_core_oclass,
+ &gp104_disp_base_oclass,
+ &gp104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp104_disp_root_oclass = {
+ .base.oclass = GP104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index cb449ed8d92c..ad00f1724b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 65e5d291ecda..98651a43bc12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogk110.o
nvkm-y += nvkm/engine/fifo/gpfifogm200.o
+nvkm-y += nvkm/engine/fifo/gpfifogp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index e06f4d46f802..230f64e5f731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 743f3a189f28..103c0afaaa6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
}
if (eu == NULL) {
- enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+ enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
if (engidx < NVKM_SUBDEV_NR) {
const char *src = nvkm_subdev_name[engidx];
char *dst = en;
@@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_top *top = device->top;
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
@@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
/* Determine runlist configuration from topology device info. */
i = 0;
- while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
+ while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
/* Determine which PBDMA handles requests for this engine. */
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
if (map[j] & (1 << runl)) {
@@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
}
}
- nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
- engn, runl, pbid);
+ nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+ engn, runl, pbid, nvkm_subdev_name[engidx]);
fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
fifo->engine[engn].runl = runl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644
index 000000000000..eff83f7fb705
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "HOST0" },
+ { 0x07, "HOST1" },
+ { 0x08, "HOST2" },
+ { 0x09, "HOST3" },
+ { 0x0a, "HOST4" },
+ { 0x0b, "HOST5" },
+ { 0x0c, "HOST6" },
+ { 0x0d, "HOST7" },
+ { 0x0e, "HOST8" },
+ { 0x0f, "HOST9" },
+ { 0x10, "HOST10" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x1f, "PHYSICAL" },
+ {}
+};
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .chan = {
+ &gp100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
new file mode 100644
index 000000000000..1530a9217aea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gp100_fifo_gpfifo_oclass = {
+ .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 290ed0db8047..f1c494182248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
+nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
+nvkm-y += nvkm/engine/gr/ctxgp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index b02d8f50ea6a..bc77eea351a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index ac895edce164..52048b5a5274 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
extern const struct gf100_grctx_func gm20b_grctx;
+extern const struct gf100_grctx_func gp100_grctx;
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index f521de11a299..c925ade5880e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
const struct gf100_grctx_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 9ba337778ef5..c46b3fdf7203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
new file mode 100644
index 000000000000..3d1ae7ddf7dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp100_grctx_generate_pagepool(struct gf100_grctx *info)
+{
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+ const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 ao = 0;
+ u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_refn(info, 0x419b00, 0x00000000, s, b);
+ mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+ mmio_wr32(info, 0x405830, attrib);
+ mmio_wr32(info, 0x40585c, alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ mmio_wr32(info, o + 0xf0, bs);
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, bs);
+ }
+ }
+
+ mmio_wr32(info, 0x418eec, 0x00000000);
+ mmio_wr32(info, 0x41befc, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_405b60(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+ u32 dist[TPC_MAX / 4] = {};
+ u32 gpcs[GPC_MAX * 2] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < gr->gpc_nr * 2; i++)
+ nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+static void
+gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout, tmp;
+ int i;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
+
+ grctx->pagepool(info);
+ grctx->bundle(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
+
+ gm200_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000000);
+
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
+ nvkm_wr32(device, 0x4041c4, tmp);
+
+ gp100_grctx_generate_405b60(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ nvkm_wr32(device, 0x404154, idle_timeout);
+ gf100_gr_mthd(gr, gr->fuc_method);
+}
+
+const struct gf100_grctx_func
+gp100_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x1080,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp100_grctx_generate_attrib,
+ .attrib_nr_max = 0x660,
+ .attrib_nr = 0x440,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ae9ab5b1ab97..157919c788e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1457,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
int i;
+ int ret = 0;
if (gr->firmware) {
/* load fuc microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
&gr->fuc409d);
+ if (ret)
+ return ret;
+
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
&gr->fuc41ad);
+ if (ret)
+ return ret;
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* start both of them running */
nvkm_wr32(device, 0x409840, 0xffffffff);
@@ -1576,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
}
/* load HUB microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
nvkm_wr32(device, 0x4091c0, 0x01000000);
for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
@@ -1599,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a188, i >> 6);
nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
}
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* load register lists */
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 2b98abdb9270..268b8d60ff73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
+
+void gm200_gr_init_gpc_mmu(struct gf100_gr *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4ca8ed15191c..de8b806b88fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
if (ret)
return ret;
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 4dfa4513bb6c..6435f1257572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
}
-static void
+void
gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
new file mode 100644
index 000000000000..26ad79def0ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static int
+gp100_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, rop;
+ int i;
+
+ gr->func->init_gpc_mmu(gr);
+
+ gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+ gr->func->init_rop_active_fbps(gr);
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000f0002);
+ nvkm_wr32(device, 0x405848, 0xc0000000);
+ nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+ nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+
+ gr->func->init_ppc_exceptions(gr);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+ }
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
+}
+
+static const struct gf100_gr_func
+gp100_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 2,
+ .grctx = &gp100_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_A, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 69de8c6259fe..f1e15a4d4f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2207dac23981..300f5ed5de0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
- nvkm_wo32(chan->inst, i + 1, 0x0436086c);
- nvkm_wo32(chan->inst, i + 2, 0x000c001b);
+ nvkm_wo32(chan->inst, i + 4, 0x0436086c);
+ nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index e15b9627b07e..f3c30b2a788e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -26,6 +26,49 @@
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+
+static bool
+nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
+{
+ u32 p = *addr;
+
+ if (*addr > bios->image0_size && bios->imaged_addr) {
+ *addr -= bios->image0_size;
+ *addr += bios->imaged_addr;
+ }
+
+ if (unlikely(*addr + size >= bios->size)) {
+ nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
+ return false;
+ }
+
+ return true;
+}
+
+u8
+nvbios_rd08(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 1)))
+ return bios->data[addr];
+ return 0x00;
+}
+
+u16
+nvbios_rd16(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 2)))
+ return get_unaligned_le16(&bios->data[addr]);
+ return 0x0000;
+}
+
+u32
+nvbios_rd32(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 4)))
+ return get_unaligned_le32(&bios->data[addr]);
+ return 0x00000000;
+}
u8
nvbios_checksum(const u8 *data, int size)
@@ -100,8 +143,9 @@ int
nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
+ struct nvbios_image image;
struct bit_entry bit_i;
- int ret;
+ int ret, idx = 0;
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
@@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
if (ret)
return ret;
+ /* Some tables have weird pointers that need adjustment before
+ * they're dereferenced. I'm not entirely sure why...
+ */
+ if (nvbios_image(bios, idx++, &image)) {
+ bios->image0_size = image.size;
+ while (nvbios_image(bios, idx++, &image)) {
+ if (image.type == 0xe0) {
+ bios->imaged_addr = image.base;
+ break;
+ }
+ }
+ }
+
/* detect type of vbios we're dealing with */
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
"\xff\x7f""NV\0", 5);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 05332476354a..d89e78c4e689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
case 0x30:
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
break;
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x04);
*cnt = 0;
*len = 0;
@@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
break;
case 0x40:
case 0x41:
+ case 0x42:
info->flags = nvbios_rd08(bios, data + 0x04);
info->script[0] = nvbios_rd16(bios, data + 0x05);
info->script[1] = nvbios_rd16(bios, data + 0x07);
@@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
info->pe = nvbios_rd08(bios, data + 0x02);
info->tx_pu = nvbios_rd08(bios, data + 0x03);
break;
+ case 0x42:
+ info->dc = nvbios_rd08(bios, data + 0x00);
+ info->pe = nvbios_rd08(bios, data + 0x01);
+ info->tx_pu = nvbios_rd08(bios, data + 0x02);
+ break;
default:
data = 0x0000;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 74b14cf09308..1dbff7aeafec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
bool
nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
{
+ u32 imaged_addr = bios->imaged_addr;
memset(image, 0x00, sizeof(*image));
+ bios->imaged_addr = 0;
do {
image->base += image->size;
- if (image->last || !nvbios_imagen(bios, image))
+ if (image->last || !nvbios_imagen(bios, image)) {
+ bios->imaged_addr = imaged_addr;
return false;
+ }
} while(idx--);
+ bios->imaged_addr = imaged_addr;
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 91a7dc56e406..2ca23a9157ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -77,15 +77,17 @@ g84_pll_mapping[] = {
{}
};
-static u16
+static u32
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
- u16 data = 0x0000;
+ u32 data = 0x0000;
if (!bit_entry(bios, 'C', &bit_C)) {
if (bit_C.version == 1 && bit_C.length >= 10)
data = nvbios_rd16(bios, bit_C.offset + 8);
+ if (bit_C.version == 2 && bit_C.length >= 4)
+ data = nvbios_rd32(bios, bit_C.offset + 0);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
@@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
}
}
-static u16
+static u32
pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
@@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
map = pll_map(bios);
while (map && map->reg) {
if (map->reg == reg && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*type = map->type;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
@@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
return 0x0000;
}
-static u16
+static u32
pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
@@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
map = pll_map(bios);
while (map && map->reg) {
if (map->type == type && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*reg = map->reg;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
@@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
struct nvkm_device *device = subdev->device;
u8 ver, len;
u32 reg = type;
- u16 data;
+ u32 data;
if (type > PLL_MAX) {
reg = type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index c268e5afe852..b4a308f3cf7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -26,21 +26,6 @@
#include <subdev/bios/image.h>
#include <subdev/bios/pmu.h>
-static u32
-weirdo_pointer(struct nvkm_bios *bios, u32 data)
-{
- struct nvbios_image image;
- int idx = 0;
- if (nvbios_image(bios, idx++, &image)) {
- data -= image.size;
- while (nvbios_image(bios, idx++, &image)) {
- if (image.type == 0xe0)
- return image.base + data;
- }
- }
- return 0;
-}
-
u32
nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
@@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'p', &bit_p)) {
if (bit_p.version == 2 && bit_p.length >= 4)
data = nvbios_rd32(bios, bit_p.offset + 0x00);
- if ((data = weirdo_pointer(bios, data))) {
+ if (data) {
*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
@@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
u32 data;
memset(info, 0x00, sizeof(*info));
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
- if ( pmuE.type == type &&
- (data = weirdo_pointer(bios, pmuE.data))) {
+ if (pmuE.type == type && (data = pmuE.data)) {
info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
info->boot_addr = data + 0x30;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index d0ae7454764e..b57c370c725d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 rammap = 0x0000;
+ u32 rammap = 0x0000;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- rammap = nvbios_rd16(bios, bit_P.offset + 4);
+ rammap = nvbios_rd32(bios, bit_P.offset + 4);
if (rammap) {
*ver = nvbios_rd08(bios, rammap + 0);
@@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 78c449b417b7..89d5543118cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
- u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
+ u32 sclk, sctl, sdiv = 2;
switch (ssrc & 0x00000003) {
case 0:
@@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
case 2:
return 100000;
case 3:
- if (sctl & 0x80000000) {
- u32 sclk = read_vco(clk, dsrc + (doff * 4));
- u32 sdiv = (sctl & 0x0000003f) + 2;
- return (sclk * 2) / sdiv;
+ sclk = read_vco(clk, dsrc + (doff * 4));
+
+ /* Memclk has doff of 0 despite its alt. location */
+ if (doff <= 2) {
+ sctl = nvkm_rd32(device, dctl + (doff * 4));
+
+ if (sctl & 0x80000000) {
+ if (ssrc & 0x100)
+ sctl >>= 8;
+
+ sdiv = (sctl & 0x3f) + 2;
+ }
}
- return read_vco(clk, dsrc + (doff * 4));
+ return (sclk * 2) / sdiv;
default:
return 0;
}
@@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 975c401bccab..06bc0d2d6ae1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 5f0ee24e31b8..218893e3e5f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -28,69 +28,6 @@
#include <core/tegra.h>
#include <subdev/timer.h>
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
-
-#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
-#define GPCPLL_CFG_ENABLE BIT(0)
-#define GPCPLL_CFG_IDDQ BIT(1)
-#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
-#define GPCPLL_CFG_LOCK BIT(17)
-
-#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
-#define GPCPLL_COEFF_M_SHIFT 0
-#define GPCPLL_COEFF_M_WIDTH 8
-#define GPCPLL_COEFF_N_SHIFT 8
-#define GPCPLL_COEFF_N_WIDTH 8
-#define GPCPLL_COEFF_P_SHIFT 16
-#define GPCPLL_COEFF_P_WIDTH 6
-
-#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
-#define GPCPLL_CFG2_SETUP2_SHIFT 16
-#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
-
-#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
-#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
-
-#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
-#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
-#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
-#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
-#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
-
-#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
-#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
-
-#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
-#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
-#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
-#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
-#define GPC2CLK_OUT_VCODIV_WIDTH 6
-#define GPC2CLK_OUT_VCODIV_SHIFT 8
-#define GPC2CLK_OUT_VCODIV1 0
-#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
- GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH 6
-#define GPC2CLK_OUT_BYPDIV_SHIFT 0
-#define GPC2CLK_OUT_BYPDIV31 0x3c
-#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
- | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
- | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
-#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
- | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
- | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
-
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
- (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-
static const u8 _pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_pl = 1, .max_pl = 32,
};
-static void
+void
gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
@@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
-static u32
-gk20a_pllg_calc_rate(struct gk20a_clk *clk)
+void
+gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
+ val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
+ nvkm_wr32(device, GPCPLL_COEFF, val);
+}
+
+u32
+gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
u32 rate;
u32 divider;
- rate = clk->parent_rate * clk->pll.n;
- divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
+ rate = clk->parent_rate * pll->n;
+ divider = pll->m * clk->pl_to_div(pll->pl);
return rate / divider / 2;
}
-static int
-gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
+int
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
+ struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
@@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
target_clk_f = rate * 2 / KHZ;
ref_clk_f = clk->parent_rate / KHZ;
- max_vco_f = clk->params->max_vco;
+ target_vco_f = target_clk_f + target_clk_f / 50;
+ max_vco_f = max(clk->params->max_vco, target_vco_f);
min_vco_f = clk->params->min_vco;
best_m = clk->params->max_m;
best_n = clk->params->min_n;
best_pl = clk->params->min_pl;
- target_vco_f = target_clk_f + target_clk_f / 50;
- if (max_vco_f < target_vco_f)
- max_vco_f = target_vco_f;
-
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
high_pl = min(high_pl, clk->params->max_pl);
@@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
target_vco_f = target_clk_f * clk->pl_to_div(pl);
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
- u32 u_f, vco_f;
-
- u_f = ref_clk_f / m;
+ u32 u_f = ref_clk_f / m;
if (u_f < clk->params->min_u)
break;
@@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
break;
for (; n <= n2; n++) {
+ u32 vco_f;
+
if (n < clk->params->min_n)
continue;
if (n > clk->params->max_n)
@@ -247,16 +194,16 @@ found_match:
"no best match for target @ %dMHz on gpc_pll",
target_clk_f / KHZ);
- clk->pll.m = best_m;
- clk->pll.n = best_n;
- clk->pll.pl = best_pl;
+ pll->m = best_m;
+ pll->n = best_n;
+ pll->pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(clk);
+ target_freq = gk20a_pllg_calc_rate(clk, pll);
nvkm_debug(subdev,
- "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
- clk->pl_to_div(clk->pll.pl));
+ "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq / KHZ, pll->m, pll->n, pll->pl,
+ clk->pl_to_div(pll->pl));
return 0;
}
@@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val;
- int ramp_timeout;
+ struct gk20a_pll pll;
+ int ret = 0;
/* get old coefficients */
- val = nvkm_rd32(device, GPCPLL_COEFF);
+ gk20a_pllg_read_mnp(clk, &pll);
/* do nothing if NDIV is the same */
- if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+ if (n == pll.n)
return 0;
- /* setup */
- nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
- 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
- nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
- 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
-
/* pll slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
- val = nvkm_rd32(device, GPCPLL_COEFF);
- val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
- val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ pll.n = n;
udelay(1);
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, &pll);
/* dynamic ramp to new ndiv */
- val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1);
- nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
- for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
- udelay(1);
- val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
- if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
- break;
- }
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
/* exit slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
@@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- if (ramp_timeout <= 0) {
- nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
+ return ret;
}
-static void
+static int
gk20a_pllg_enable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
+
+ /* enable lock detection */
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+ val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+ nvkm_wr32(device, GPCPLL_CFG, val);
+ }
+
+ /* wait for lock */
+ if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK) < 0)
+ return -ETIMEDOUT;
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
}
static void
@@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val, cfg;
- struct gk20a_pll old_pll;
- u32 n_lo;
-
- /* get old coefficients */
- gk20a_pllg_read_mnp(clk, &old_pll);
-
- /* do NDIV slide if there is no change in M and PL */
- cfg = nvkm_rd32(device, GPCPLL_CFG);
- if (allow_slide && clk->pll.m == old_pll.m &&
- clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(clk, clk->pll.n);
- }
-
- /* slide down to NDIV_LO */
- if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret;
-
- n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- ret = gk20a_pllg_slide(clk, n_lo);
+ struct gk20a_pll cur_pll;
+ int ret;
- if (ret)
- return ret;
- }
+ gk20a_pllg_read_mnp(clk, &cur_pll);
- /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
- 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
-
- /* put PLL in bypass before programming it */
- val = nvkm_rd32(device, SEL_VCO);
- val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
- nvkm_wr32(device, SEL_VCO, val);
-
- /* get out from IDDQ */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_IDDQ) {
- val &= ~GPCPLL_CFG_IDDQ;
- nvkm_wr32(device, GPCPLL_CFG, val);
- nvkm_rd32(device, GPCPLL_CFG);
- udelay(2);
- }
gk20a_pllg_disable(clk);
- nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
- clk->pll.m, clk->pll.n, clk->pll.pl);
-
- n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
- val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, pll);
- gk20a_pllg_enable(clk);
-
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_LOCK_DET_OFF) {
- val &= ~GPCPLL_CFG_LOCK_DET_OFF;
- nvkm_wr32(device, GPCPLL_CFG, val);
- }
-
- if (nvkm_usec(device, 300,
- if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
- break;
- ) < 0)
- return -ETIMEDOUT;
-
- /* switch to VCO mode */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
- BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ ret = gk20a_pllg_enable(clk);
+ if (ret)
+ return ret;
/* restore out divider 1:1 */
- val = nvkm_rd32(device, GPC2CLK_OUT);
- if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
- (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
- val &= ~GPC2CLK_OUT_VCODIV_MASK;
- val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
- udelay(2);
- nvkm_wr32(device, GPC2CLK_OUT, val);
- /* Intentional 2nd write to assure linear divider operation */
- nvkm_wr32(device, GPC2CLK_OUT, val);
- nvkm_rd32(device, GPC2CLK_OUT);
- }
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
- /* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
+ return 0;
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clk *clk)
+gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
- int err;
+ struct gk20a_pll cur_pll;
+ int ret;
- err = _gk20a_pllg_program_mnp(clk, true);
- if (err)
- err = _gk20a_pllg_program_mnp(clk, false);
+ if (gk20a_pllg_is_enabled(clk)) {
+ gk20a_pllg_read_mnp(clk, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gk20a_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
- return err;
+ /* slide up to new NDIV */
+ return gk20a_pllg_slide(clk, pll->n);
}
static struct nvkm_pstate
@@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
+ struct gk20a_pll pll;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(clk, &clk->pll);
- return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
+ gk20a_pllg_read_mnp(clk, &pll);
+ return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
@@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
struct gk20a_clk *clk = gk20a_clk(base);
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
- GK20A_CLK_GPC_MDIV);
+ GK20A_CLK_GPC_MDIV, &clk->pll);
}
int
gk20a_clk_prog(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
+ int ret;
+
+ ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
+ if (ret)
+ ret = gk20a_pllg_program_mnp(clk, &clk->pll);
- return gk20a_pllg_program_mnp(clk);
+ return ret;
}
void
@@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
{
}
+int
+gk20a_clk_setup_slide(struct gk20a_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 step_a, step_b;
+
+ switch (clk->parent_rate) {
+ case 12000000:
+ case 12800000:
+ case 13000000:
+ step_a = 0x2b;
+ step_b = 0x0b;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ case 38400000:
+ step_a = 0x04;
+ step_b = 0x05;
+ break;
+ default:
+ nvkm_error(subdev, "invalid parent clock rate %u KHz",
+ clk->parent_rate / KHZ);
+ return -EINVAL;
+ }
+
+ nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+ nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+ return 0;
+}
+
void
gk20a_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gk20a_clk *clk = gk20a_clk(base);
- u32 val;
/* slide to VCO min */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_ENABLE) {
+ if (gk20a_pllg_is_enabled(clk)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(clk, &pll);
- n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
+ n_lo = gk20a_pllg_n_lo(clk, &pll);
gk20a_pllg_slide(clk, n_lo);
}
- /* put PLL in bypass before disabling it */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
gk20a_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
@@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
struct nvkm_device *device = subdev->device;
int ret;
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(&clk->base);
@@ -646,7 +610,7 @@ gk20a_clk = {
};
int
-_gk20a_clk_ctor(struct nvkm_device *device, int index,
+gk20a_clk_ctor(struct nvkm_device *device, int index,
const struct nvkm_clk_func *func,
const struct gk20a_clk_pllg_params *params,
struct gk20a_clk *clk)
@@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+ ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
clk);
clk->pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 13c46740197d..0d1450972162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -24,9 +24,79 @@
#ifndef __NVKM_CLK_GK20A_H__
#define __NVKM_CLK_GK20A_H__
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w) ((1 << (w)) - 1)
+
#define GK20A_CLK_GPC_MDIV 1000
#define SYS_GPCPLL_CFG_BASE 0x00137000
+#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE BIT(0)
+#define GPCPLL_CFG_IDDQ BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
+#define GPCPLL_CFG_LOCK BIT(17)
+
+#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0
+#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9
+#define GPCPLL_CFG3_VCO_CTRL_MASK \
+ (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8
+
+#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT 0
+#define GPCPLL_COEFF_M_WIDTH 8
+#define GPCPLL_COEFF_N_SHIFT 8
+#define GPCPLL_COEFF_N_WIDTH 8
+#define GPCPLL_COEFF_N_MASK \
+ (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
+#define GPCPLL_COEFF_P_SHIFT 16
+#define GPCPLL_COEFF_P_WIDTH 6
+
+#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
+
+#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+ (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
+
+#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH 6
+#define GPC2CLK_OUT_VCODIV_SHIFT 8
+#define GPC2CLK_OUT_VCODIV1 0
+#define GPC2CLK_OUT_VCODIV2 2
+#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+ GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_SHIFT 0
+#define GPC2CLK_OUT_BYPDIV31 0x3c
+#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+ | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+ | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+ | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+ | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
/* All frequencies in Khz */
struct gk20a_clk_pllg_params {
@@ -54,7 +124,29 @@ struct gk20a_clk {
};
#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
-int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
+int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
+void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
+void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
+
+static inline bool
+gk20a_pllg_is_enabled(struct gk20a_clk *clk)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ return val & GPCPLL_CFG_ENABLE;
+}
+
+static inline u32
+gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
+{
+ return DIV_ROUND_UP(pll->m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+}
+
+int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
void gk20a_clk_fini(struct nvkm_clk *);
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
@@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
int gk20a_clk_prog(struct nvkm_clk *);
void gk20a_clk_tidy(struct nvkm_clk *);
+int gk20a_clk_setup_slide(struct gk20a_clk *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 71b2bbb61973..b284e949f732 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -21,20 +21,123 @@
*/
#include <subdev/clk.h>
+#include <subdev/volt.h>
+#include <subdev/timer.h>
#include <core/device.h>
+#include <core/tegra.h>
#include "priv.h"
#include "gk20a.h"
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
+#define GPCPLL_CFG_SYNC_MODE BIT(2)
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
+#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
+#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
+#define GPCPLL_CFG2_SDM_DIN_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
+#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
+#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
+#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
+#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
+#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
+#define GPCPLL_DVFS0_DFS_COEFF_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
+#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
+#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
+#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
+
+#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
+#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
+#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
+#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
+#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
+#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
+#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
+#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
+#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
+#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
+#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
+#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
+#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
+#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
+#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
+#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
+
+#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
+#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
+
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
+
+#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
+#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
+
+struct gm20b_clk_dvfs_params {
+ s32 coeff_slope;
+ s32 coeff_offs;
+ u32 vco_ctrl;
+};
+
+static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
+ .coeff_slope = -165230,
+ .coeff_offs = 214007,
+ .vco_ctrl = 0x7 << 3,
+};
+
+/*
+ * base.n is now the *integer* part of the N factor.
+ * sdm_din contains n's decimal part.
+ */
+struct gm20b_pll {
+ struct gk20a_pll base;
+ u32 sdm_din;
+};
+
+struct gm20b_clk_dvfs {
+ u32 dfs_coeff;
+ s32 dfs_det_max;
+ s32 dfs_ext_cal;
+};
+
+struct gm20b_clk {
+ /* currently applied parameters */
+ struct gk20a_clk base;
+ struct gm20b_clk_dvfs dvfs;
+ u32 uv;
+
+ /* new parameters to apply */
+ struct gk20a_pll new_pll;
+ struct gm20b_clk_dvfs new_dvfs;
+ u32 new_uv;
+
+ const struct gm20b_clk_dvfs_params *dvfs_params;
+
+ /* fused parameters */
+ s32 uvdet_slope;
+ s32 uvdet_offs;
+
+ /* safe frequency we can use at minimum voltage */
+ u32 safe_fmax_vmin;
+};
+#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
+
static u32 pl_to_div(u32 pl)
{
return pl;
@@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
.min_pl = 1, .max_pl = 31,
};
+static void
+gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 val;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll->base);
+ val = nvkm_rd32(device, GPCPLL_CFG2);
+ pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
+ MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+}
+
+static void
+gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+ gk20a_pllg_write_mnp(&clk->base, &pll->base);
+}
+
+/*
+ * Determine DFS_COEFF for the requested voltage. Always select external
+ * calibration override equal to the voltage, and set maximum detection
+ * limit "0" (to make sure that PLL output remains under F/V curve when
+ * voltage increases).
+ */
+static void
+gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
+ u32 coeff;
+ /* Work with mv as uv would likely trigger an overflow */
+ s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
+
+ /* coeff = slope * voltage + offset */
+ coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
+ coeff = DIV_ROUND_CLOSEST(coeff, 1000);
+ dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
+
+ dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
+ clk->uvdet_slope);
+ /* should never happen */
+ if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
+ nvkm_error(subdev, "dfs_ext_cal overflow!\n");
+
+ dvfs->dfs_det_max = 0;
+
+ nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
+ __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
+ dvfs->dfs_det_max);
+}
+
+/*
+ * Solve equation for integer and fractional part of the effective NDIV:
+ *
+ * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
+ * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
+ *
+ * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
+ */
+static void
+gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gk20a_clk_pllg_params *p = clk->base.params;
+ u32 n;
+ s32 det_delta;
+ u32 rem, rem_range;
+
+ /* calculate current ext_cal and subtract previous one */
+ det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
+ clk->uvdet_slope);
+ det_delta -= clk->dvfs.dfs_ext_cal;
+ det_delta = min(det_delta, clk->dvfs.dfs_det_max);
+ det_delta *= clk->dvfs.dfs_coeff;
+
+ /* integer part of n */
+ n = (n_eff << DFS_DET_RANGE) - det_delta;
+ /* should never happen! */
+ if (n <= 0) {
+ nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
+ n = 1 << DFS_DET_RANGE;
+ }
+ if (n >> DFS_DET_RANGE > p->max_n) {
+ nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
+ n = p->max_n << DFS_DET_RANGE;
+ }
+ *n_int = n >> DFS_DET_RANGE;
+
+ /* fractional part of n */
+ rem = ((u32)n) & MASK(DFS_DET_RANGE);
+ rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
+ /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
+ rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
+ /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
+ *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+
+ nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
+ n_eff, *n_int, *sdm_din);
+}
+
+static int
+gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll pll;
+ u32 n_int, sdm_din;
+ int ret = 0;
+
+ /* calculate the new n_int/sdm_din for this n/uv */
+ gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
+
+ /* get old coefficients */
+ gm20b_pllg_read_mnp(clk, &pll);
+ /* do nothing if NDIV is the same */
+ if (n_int == pll.base.n && sdm_din == pll.sdm_din)
+ return 0;
+
+ /* pll slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+ /* new ndiv ready for ramp */
+ /* in DVFS mode SDM is updated via "new" field */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
+ pll.base.n = n_int;
+ udelay(1);
+ gk20a_pllg_write_mnp(&clk->base, &pll.base);
+
+ /* dynamic ramp to new ndiv */
+ udelay(1);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
+
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
+
+ /* in DVFS mode complete SDM update */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+
+ /* exit slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+ nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
+
+ return ret;
+}
+
+static int
+gm20b_pllg_enable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* In DVFS mode lock cannot be used - so just delay */
+ udelay(40);
+
+ /* set SYNC_MODE for glitchless switch out of bypass */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
+ GPCPLL_CFG_SYNC_MODE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
+}
+
+static void
+gm20b_pllg_disable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+ /* clear SYNC_MODE before disabling PLL */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+}
+
+static int
+gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll cur_pll;
+ u32 n_int, sdm_din;
+ /* if we only change pdiv, we can do a glitchless transition */
+ bool pdiv_only;
+ int ret;
+
+ gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
+ gm20b_pllg_read_mnp(clk, &cur_pll);
+ pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
+ cur_pll.base.m == pll->m;
+
+ /* need full sequence if clock not enabled yet */
+ if (!gk20a_pllg_is_enabled(&clk->base))
+ pdiv_only = false;
+
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+ udelay(2);
+
+ if (pdiv_only) {
+ u32 old = cur_pll.base.pl;
+ u32 new = pll->pl;
+
+ /*
+ * we can do a glitchless transition only if the old and new PL
+ * parameters share at least one bit set to 1. If this is not
+ * the case, calculate and program an interim PL that will allow
+ * us to respect that rule.
+ */
+ if ((old & new) == 0) {
+ cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
+ new | BIT(ffs(old) - 1));
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ }
+
+ cur_pll.base.pl = new;
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ } else {
+ /* disable before programming if more than pdiv changes */
+ gm20b_pllg_disable(clk);
+
+ cur_pll.base = *pll;
+ cur_pll.base.n = n_int;
+ cur_pll.sdm_din = sdm_din;
+ gm20b_pllg_write_mnp(clk, &cur_pll);
+
+ ret = gm20b_pllg_enable(clk);
+ if (ret)
+ return ret;
+ }
+
+ /* restore out divider 1:1 */
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+
+ return 0;
+}
+
+static int
+gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct gk20a_pll cur_pll;
+ int ret;
+
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ gk20a_pllg_read_mnp(&clk->base, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gm20b_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
+
+ /* slide up to new NDIV */
+ return gm20b_pllg_slide(clk, pll->n);
+}
+
+static int
+gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ struct nvkm_subdev *subdev = &base->subdev;
+ struct nvkm_volt *volt = base->subdev.device->volt;
+ int ret;
+
+ ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
+ GK20A_CLK_GPC_MDIV, &clk->new_pll);
+ if (ret)
+ return ret;
+
+ clk->new_uv = volt->vid[cstate->voltage].uv;
+ gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
+
+ nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
+
+ return 0;
+}
+
+/*
+ * Compute PLL parameters that are always safe for the current voltage
+ */
+static void
+gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
+{
+ u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
+ u32 parent_rate = clk->base.parent_rate / KHZ;
+ u32 nmin, nsafe;
+
+ /* remove a safe margin of 10% */
+ if (rate > clk->safe_fmax_vmin)
+ rate = rate * (100 - 10) / 100;
+
+ /* gpc2clk */
+ rate *= 2;
+
+ nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
+ nsafe = pll->m * rate / (clk->base.parent_rate);
+
+ if (nsafe < nmin) {
+ pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
+ nsafe = nmin;
+ }
+
+ pll->n = nsafe;
+}
+
+static void
+gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
+ coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+}
+
+static void
+gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+ u32 val;
+
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
+ dfs_det_cal);
+ udelay(1);
+
+ val = nvkm_rd32(device, GPCPLL_DVFS1);
+ if (!(val & BIT(25))) {
+ /* Use external value to overwrite calibration value */
+ val |= BIT(25) | BIT(16);
+ nvkm_wr32(device, GPCPLL_DVFS1, val);
+ }
+}
+
+static void
+gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0,
+ GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
+ dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
+ dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+
+ gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
+}
+
+static int
+gm20b_clk_prog(struct nvkm_clk *base)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ u32 cur_freq;
+ int ret;
+
+ /* No change in DVFS settings? */
+ if (clk->uv == clk->new_uv)
+ goto prog;
+
+ /*
+ * Interim step for changing DVFS detection settings: low enough
+ * frequency to be safe at at DVFS coeff = 0.
+ *
+ * 1. If voltage is increasing:
+ * - safe frequency target matches the lowest - old - frequency
+ * - DVFS settings are still old
+ * - Voltage already increased to new level by volt, but maximum
+ * detection limit assures PLL output remains under F/V curve
+ *
+ * 2. If voltage is decreasing:
+ * - safe frequency target matches the lowest - new - frequency
+ * - DVFS settings are still old
+ * - Voltage is also old, it will be lowered by volt afterwards
+ *
+ * Interim step can be skipped if old frequency is below safe minimum,
+ * i.e., it is low enough to be safe at any voltage in operating range
+ * with zero DVFS coefficient.
+ */
+ cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
+ if (cur_freq > clk->safe_fmax_vmin) {
+ struct gk20a_pll pll_safe;
+
+ if (clk->uv < clk->new_uv)
+ /* voltage will raise: safe frequency is current one */
+ pll_safe = clk->base.pll;
+ else
+ /* voltage will drop: safe frequency is new one */
+ pll_safe = clk->new_pll;
+
+ gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
+ ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * DVFS detection settings transition:
+ * - Set DVFS coefficient zero
+ * - Set calibration level to new voltage
+ * - Set DVFS coefficient to match new voltage
+ */
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+prog:
+ clk->uv = clk->new_uv;
+ clk->dvfs = clk->new_dvfs;
+ clk->base.pll = clk->new_pll;
+
+ return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
+}
+
static struct nvkm_pstate
gm20b_pstates[] = {
{
@@ -133,9 +714,99 @@ gm20b_pstates[] = {
.voltage = 12,
},
},
-
};
+static void
+gm20b_clk_fini(struct nvkm_clk *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gm20b_clk *clk = gm20b_clk(base);
+
+ /* slide to VCO min */
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ struct gk20a_pll pll;
+ u32 n_lo;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll);
+ n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
+ gm20b_pllg_slide(clk, n_lo);
+ }
+
+ gm20b_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
+}
+
+static int
+gm20b_clk_init_dvfs(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool fused = clk->uvdet_offs && clk->uvdet_slope;
+ static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
+ u32 data;
+ int ret;
+
+ /* Enable NA DVFS */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
+ GPCPLL_DVFS1_EN_DFS_BIT);
+
+ /* Set VCO_CTRL */
+ if (clk->dvfs_params->vco_ctrl)
+ nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
+ clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
+
+ if (fused) {
+ /* Start internal calibration, but ignore results */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* got uvdev parameters from fuse, skip calibration */
+ goto calibrated;
+ }
+
+ /*
+ * If calibration parameters are not fused, start internal calibration,
+ * wait for completion, and use results along with default slope to
+ * calculate ADC offset during boot.
+ */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* Wait for internal calibration done (spec < 2us). */
+ ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
+ if (ret < 0) {
+ nvkm_error(subdev, "GPCPLL calibration timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ data = nvkm_rd32(device, GPCPLL_CFG3) >>
+ GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
+ data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
+
+ clk->uvdet_slope = ADC_SLOPE_UV;
+ clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
+
+ nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
+ clk->uvdet_offs, clk->uvdet_slope);
+
+calibrated:
+ /* Compute and apply initial DVFS parameters */
+ gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+ return 0;
+}
+
+/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
+static const struct nvkm_clk_func gm20b_clk;
+
static int
gm20b_clk_init(struct nvkm_clk *base)
{
@@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
+ u32 data;
+
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+ GPC2CLK_OUT_INIT_VAL);
/* Set the global bypass control to VCO */
nvkm_mask(device, BYPASSCTRL_SYS,
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
0);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
+ /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
+ data = nvkm_rd32(device, 0x021944);
+ if (!(data & 0x3)) {
+ data |= 0x2;
+ nvkm_wr32(device, 0x021944, data);
+
+ data = nvkm_rd32(device, 0x021948);
+ data |= 0x1;
+ nvkm_wr32(device, 0x021948, data);
+ }
+
+ /* Disable idle slow down */
+ nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
+
+ /* speedo >= 1? */
+ if (clk->base.func == &gm20b_clk) {
+ struct gm20b_clk *_clk = gm20b_clk(base);
+ struct nvkm_volt *volt = device->volt;
+
+ /* Get current voltage */
+ _clk->uv = nvkm_volt_get(volt);
+
+ /* Initialize DVFS */
+ ret = gm20b_clk_init_dvfs(_clk);
+ if (ret)
+ return ret;
+ }
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
- ret = base->func->prog(&clk->base);
+ ret = base->func->prog(base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
@@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
+ /* Speedo 0 only supports 12 voltages */
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
.domains = {
{ nv_clk_src_crystal, 0xff },
@@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
},
};
-int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+static const struct nvkm_clk_func
+gm20b_clk = {
+ .init = gm20b_clk_init,
+ .fini = gm20b_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gm20b_clk_calc,
+ .prog = gm20b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gm20b_pstates,
+ .nr_pstates = ARRAY_SIZE(gm20b_pstates),
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max },
+ },
+};
+
+static int
+gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+ struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
@@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
- &gm20b_pllg_params, clk);
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+ &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
+
+/* FUSE register */
+#define FUSE_RESERVED_CALIB0 0x204
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
+#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
+#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
+
+static int
+gm20b_clk_init_fused_params(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ u32 val = 0;
+ u32 rev = 0;
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA)
+ tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
+ rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
+#endif
+
+ /* No fused parameters, we will calibrate later */
+ if (rev == 0)
+ return -EINVAL;
+
+ /* Integer part in mV + fractional part in uV */
+ clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
+
+ /* Integer part in mV + fractional part in 100uV */
+ clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
+
+ nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
+ clk->uvdet_slope, clk->uvdet_offs);
+ return 0;
+}
+
+static int
+gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_volt *volt = subdev->device->volt;
+ struct nvkm_pstate *pstates = clk->base.base.func->pstates;
+ int nr_pstates = clk->base.base.func->nr_pstates;
+ int vmin, id = 0;
+ u32 fmax = 0;
+ int i;
+
+ /* find lowest voltage we can use */
+ vmin = volt->vid[0].uv;
+ for (i = 1; i < volt->vid_nr; i++) {
+ if (volt->vid[i].uv <= vmin) {
+ vmin = volt->vid[i].uv;
+ id = volt->vid[i].vid;
+ }
+ }
+
+ /* find max frequency at this voltage */
+ for (i = 0; i < nr_pstates; i++)
+ if (pstates[i].base.voltage == id)
+ fmax = max(fmax,
+ pstates[i].base.domain[nv_clk_src_gpc]);
+
+ if (!fmax) {
+ nvkm_error(subdev, "failed to evaluate safe fmax\n");
+ return -EINVAL;
+ }
+
+ /* we are safe at 90% of the max frequency */
+ clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
+ nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
+
+ return 0;
+}
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gm20b_clk *clk;
+ struct nvkm_subdev *subdev;
+ struct gk20a_clk_pllg_params *clk_params;
+ int ret;
+
+ /* Speedo 0 GPUs cannot use noise-aware PLL */
+ if (tdev->gpu_speedo_id == 0)
+ return gm20b_clk_new_speedo0(device, index, pclk);
+
+ /* Speedo >= 1, use NAPLL */
+ clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base.base;
+ subdev = &clk->base.base.subdev;
+
+ /* duplicate the clock parameters since we will patch them below */
+ clk_params = (void *) (clk + 1);
+ *clk_params = gm20b_pllg_params;
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
+ &clk->base);
+ if (ret)
+ return ret;
+
+ /*
+ * NAPLL can only work with max_u, clamp the m range so
+ * gk20a_pllg_calc_mnp always uses it
+ */
+ clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
+ (clk->base.parent_rate / KHZ));
+ if (clk_params->max_m == 0) {
+ nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
+ kfree(clk);
+ return gm20b_clk_new_speedo0(device, index, pclk);
+ }
+
+ clk->base.pl_to_div = pl_to_div;
+ clk->base.div_to_pl = div_to_pl;
+
+ clk->dvfs_params = &gm20b_dvfs_params;
+
+ ret = gm20b_clk_init_fused_params(clk);
+ /*
+ * we will calibrate during init - should never happen on
+ * prod parts
+ */
+ if (ret)
+ nvkm_warn(subdev, "no fused calibration parameters\n");
+
+ ret = gm20b_clk_init_safe_fmax(clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 842d5de96d73..edcc157e6ac8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gp100.o
+nvkm-y += nvkm/subdev/fb/gp104.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgp100.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index ce90242b8cce..a7049c041594 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -25,6 +25,7 @@
#include "ram.h"
#include <core/memory.h>
+#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
#include <engine/gr.h>
@@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_page)
+ fb->func->init_page(fb);
+ if (fb->func->init_unkn)
+ fb->func->init_unkn(fb);
return 0;
}
@@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e649ead5ccfc..76433cc66fff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -72,6 +72,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
}
void
+gf100_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
+ break;
+ case 17:
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
+ fb->page = 17;
+ break;
+ }
+}
+
+void
gf100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
-
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
}
void *
@@ -125,6 +139,7 @@ gf100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2160e5a39c9a..449f431644b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
void *gf100_fb_dtor(struct nvkm_fb *);
void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
+
+void gp100_fb_init(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index b41f0f70038c..4245e2e6e604 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -29,6 +29,7 @@ gk104_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 7306f7dfc3b9..f815fe2bbf08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -27,7 +27,6 @@ static void
gk20a_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
}
@@ -36,6 +35,7 @@ static const struct nvkm_fb_func
gk20a_fb = {
.oneinit = gf100_fb_oneinit,
.init = gk20a_fb_init,
+ .init_page = gf100_fb_init_page,
.memtype_valid = gf100_fb_memtype_valid,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 4869fdb753c9..db699025f546 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -29,6 +29,7 @@ gm107_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 44f5716f64d8..62f653240be3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,6 +26,24 @@
#include <core/memory.h>
+void
+gm200_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
+ break;
+ case 17:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
+ break;
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
+ fb->page = 0;
+ break;
+ }
+}
+
static void
gm200_fb_init(struct nvkm_fb *base)
{
@@ -48,6 +66,7 @@ gm200_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
new file mode 100644
index 000000000000..98474aec1921
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gp100_fb_init_unkn(struct nvkm_fb *base)
+{
+ struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
+ nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
+ nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
+ nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
+}
+
+void
+gp100_fb_init(struct nvkm_fb *base)
+{
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ if (fb->r100c10_page)
+ nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00060000,
+ max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gp100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
new file mode 100644
index 000000000000..92cb71861bec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static const struct nvkm_fb_func
+gp104_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index d97d640e60a0..e905d44fa1d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -8,6 +8,8 @@ struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_page)(struct nvkm_fb *);
+ void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
struct {
@@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
int gf100_fb_oneinit(struct nvkm_fb *);
+void gf100_fb_init_page(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+void gm200_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index f816cbf2ced3..b9ec0ae6723a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
new file mode 100644
index 000000000000..f3be408b5e5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static int
+gp100_ram_init(struct nvkm_ram *ram)
+{
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, hdr, cnt, len, snr, ssz;
+ u32 data;
+ int i;
+
+ /* run a bunch of tables from rammap table. there's actually
+ * individual pointers for each rammap entry too, but, nvidia
+ * seem to just run the last two entries' scripts early on in
+ * their init, and never again.. we'll just run 'em all once
+ * for now.
+ *
+ * i strongly suspect that each script is for a separate mode
+ * (likely selected by 0x9a065c's lower bits?), and the
+ * binary driver skips the one that's already been setup by
+ * the init tables.
+ */
+ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ if (!data || hdr < 0x15)
+ return -EINVAL;
+
+ cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
+ data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+ if (cnt) {
+ u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
+ for (i = 0; i < cnt; i++, data += 4) {
+ if (i != save >> 4) {
+ nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
+ nvbios_exec(&(struct nvbios_init) {
+ .subdev = subdev,
+ .bios = bios,
+ .offset = nvbios_rd32(bios, data),
+ .execute = 1,
+ });
+ }
+ }
+ nvkm_mask(device, 0x9a065c, 0x000000f0, save);
+ }
+
+ nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
+ nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+ nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
+ return 0;
+}
+
+static const struct nvkm_ram_func
+gp100_ram_func = {
+ .init = gp100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+};
+
+int
+gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbio_opt = nvkm_rd32(device, 0x021c14);
+ u64 part, size = 0, comm = ~0ULL;
+ bool mixed = false;
+ int ret;
+
+ nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
+ for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
+ if (!(fbio_opt & (1 << fbpa))) {
+ part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+ nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
+ part = part << 20;
+ if (part != comm) {
+ if (comm != ~0ULL)
+ mixed = true;
+ comm = min(comm, part);
+ }
+ size = size + part;
+ }
+ }
+
+ ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
+ *pram = ram;
+ if (ret)
+ return ret;
+
+ nvkm_mm_fini(&ram->vram);
+
+ if (mixed) {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ((comm * fbpa_num) - rsvd_head) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+
+ ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
+ NVKM_RAM_MM_SHIFT,
+ (size - (comm * fbpa_num) - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 6b8f2a19b2d9..a6a7fa0d7679 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -109,7 +109,7 @@ struct gk20a_instmem {
u16 iommu_bit;
/* Only used by DMA API */
- struct dma_attrs attrs;
+ unsigned long attrs;
};
#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
@@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
goto out;
dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, &imem->attrs);
+ node->handle, imem->attrs);
out:
return node;
@@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &imem->attrs);
+ imem->attrs);
if (!node->base.vaddr) {
nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
@@ -597,10 +597,9 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- init_dma_attrs(&imem->attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ imem->attrs = DMA_ATTR_NON_CONSISTENT |
+ DMA_ATTR_WEAK_ORDERING |
+ DMA_ATTR_WRITE_COMBINE;
nvkm_info(&imem->base.subdev, "using DMA API\n");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 932b366598aa..12d6f4f102cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
nvkm-y += nvkm/subdev/ltc/gk104.o
nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
+nvkm-y += nvkm/subdev/ltc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index c9eb677967a8..4a0fa0a9b802 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,7 +23,6 @@
*/
#include "priv.h"
-#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
@@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
nvkm_wr32(device, 0x17ea58, depth);
}
-static const struct nvkm_bitfield
+const struct nvkm_bitfield
gf100_ltc_lts_intr_name[] = {
{ 0x00000001, "IDLE_ERROR_IQ" },
{ 0x00000002, "IDLE_ERROR_CBC" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 389fb13a1998..ec0a3844b2d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
nvkm_wr32(device, 0x17e34c, depth);
}
-static void
+void
gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = &ltc->subdev;
struct nvkm_device *device = subdev->device;
u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
- u32 stat = nvkm_rd32(device, base + 0x00c);
+ u32 intr = nvkm_rd32(device, base + 0x00c);
+ u16 stat = intr & 0x0000ffff;
+ char msg[128];
if (stat) {
- nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
- nvkm_wr32(device, base + 0x00c, stat);
+ nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+ nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
}
+
+ nvkm_wr32(device, base + 0x00c, intr);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
new file mode 100644
index 000000000000..0bdfb2f40266
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static void
+gp100_ltc_intr(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 mask;
+
+ mask = nvkm_rd32(device, 0x0001c0);
+ while (mask) {
+ u32 s, c = __ffs(mask);
+ for (s = 0; s < ltc->lts_nr; s++)
+ gm107_ltc_intr_lts(ltc, c, s);
+ mask &= ~(1 << c);
+ }
+}
+
+static int
+gp100_ltc_oneinit(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
+ ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
+ /*XXX: tagram allocation - TBD */
+ return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+}
+
+static void
+gp100_ltc_init(struct nvkm_ltc *ltc)
+{
+ /*XXX: PMU LS call to setup tagram address */
+}
+
+static const struct nvkm_ltc_func
+gp100_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp100_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 6d81c695ed0d..8b95f96e3ffa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -2,6 +2,7 @@
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
#include <subdev/ltc.h>
+#include <core/enum.h>
int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
int index, struct nvkm_ltc **);
@@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
void gf100_ltc_invalidate(struct nvkm_ltc *);
void gf100_ltc_flush(struct nvkm_ltc *);
+extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
void gm107_ltc_intr(struct nvkm_ltc *);
+void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
void gm107_ltc_cbc_wait(struct nvkm_ltc *);
void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 49695ac7be2e..12943f92c206 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o
nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
+nvkm-y += nvkm/subdev/mc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 350a8caa84c8..6b25e25f9eba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -27,43 +27,67 @@
#include <subdev/top.h>
void
-nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
+nvkm_mc_unk260(struct nvkm_device *device, u32 data)
{
- if (mc->func->unk260)
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc) && mc->func->unk260)
mc->func->unk260(mc, data);
}
void
-nvkm_mc_intr_unarm(struct nvkm_mc *mc)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
{
- return mc->func->intr_unarm(mc);
+ struct nvkm_mc *mc = device->mc;
+ const struct nvkm_mc_map *map;
+ if (likely(mc) && mc->func->intr_mask) {
+ u32 mask = nvkm_top_intr_mask(device, devidx);
+ for (map = mc->func->intr; !mask && map->stat; map++) {
+ if (map->unit == devidx)
+ mask = map->stat;
+ }
+ mc->func->intr_mask(mc, mask, en ? mask : 0);
+ }
+}
+
+void
+nvkm_mc_intr_unarm(struct nvkm_device *device)
+{
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_unarm(mc);
}
void
-nvkm_mc_intr_rearm(struct nvkm_mc *mc)
+nvkm_mc_intr_rearm(struct nvkm_device *device)
{
- return mc->func->intr_rearm(mc);
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_rearm(mc);
}
static u32
-nvkm_mc_intr_mask(struct nvkm_mc *mc)
+nvkm_mc_intr_stat(struct nvkm_mc *mc)
{
- u32 intr = mc->func->intr_mask(mc);
+ u32 intr = mc->func->intr_stat(mc);
if (WARN_ON_ONCE(intr == 0xffffffff))
intr = 0; /* likely fallen off the bus */
return intr;
}
void
-nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+nvkm_mc_intr(struct nvkm_device *device, bool *handled)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
struct nvkm_subdev *subdev;
- const struct nvkm_mc_map *map = mc->func->intr;
- u32 stat, intr = nvkm_mc_intr_mask(mc);
+ const struct nvkm_mc_map *map;
+ u32 stat, intr;
u64 subdevs;
- stat = nvkm_top_intr(device->top, intr, &subdevs);
+ if (unlikely(!mc))
+ return;
+
+ intr = nvkm_mc_intr_stat(mc);
+ stat = nvkm_top_intr(device, intr, &subdevs);
while (subdevs) {
enum nvkm_devidx subidx = __ffs64(subdevs);
subdev = nvkm_device_subdev(device, subidx);
@@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
subdevs &= ~BIT_ULL(subidx);
}
- while (map->stat) {
+ for (map = mc->func->intr; map->stat; map++) {
if (intr & map->stat) {
subdev = nvkm_device_subdev(device, map->unit);
if (subdev)
nvkm_subdev_intr(subdev);
stat &= ~map->stat;
}
- map++;
}
if (stat)
@@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
*handled = intr != 0;
}
-static void
-nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+static u32
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
+ enum nvkm_devidx devidx)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
- u64 pmc_enable;
-
- if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
- for (map = mc->func->reset; map && map->stat; map++) {
- if (map->unit == devidx) {
- pmc_enable = map->stat;
- break;
+ u64 pmc_enable = 0;
+ if (likely(mc)) {
+ if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+ for (map = mc->func->reset; map && map->stat; map++) {
+ if (!isauto || !map->noauto) {
+ if (map->unit == devidx) {
+ pmc_enable = map->stat;
+ break;
+ }
+ }
}
}
}
+ return pmc_enable;
+}
+void
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
}
void
-nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
{
- if (likely(mc))
- nvkm_mc_reset_(mc, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable)
+ nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+}
+
+void
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable) {
+ nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+ nvkm_rd32(device, 0x000200);
+ }
}
static int
nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_mc *mc = nvkm_mc(subdev);
- nvkm_mc_intr_unarm(mc);
+ nvkm_mc_intr_unarm(subdev->device);
return 0;
}
@@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev)
struct nvkm_mc *mc = nvkm_mc(subdev);
if (mc->func->init)
mc->func->init(mc);
- nvkm_mc_intr_rearm(mc);
+ nvkm_mc_intr_rearm(subdev->device);
return 0;
}
@@ -148,16 +191,21 @@ nvkm_mc = {
.fini = nvkm_mc_fini,
};
+void
+nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc *mc)
+{
+ nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+ mc->func = func;
+}
+
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
int index, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
-
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
-
- nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
- mc->func = func;
+ nvkm_mc_ctor(func, device, index, *pmc);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 5c85b47f071d..c3d66ef5dc12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -57,7 +57,7 @@ g84_mc = {
.intr = g84_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g84_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 0280b43cc10c..93ad4982ce5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -57,7 +57,7 @@ g98_mc = {
.intr = g98_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g98_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 8397e223bd43..d2c4d6033abb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc)
}
u32
-gf100_mc_intr_mask(struct nvkm_mc *mc)
+gf100_mc_intr_stat(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x000100);
@@ -85,6 +85,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc)
}
void
+gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_mask(device, 0x000640, mask, stat);
+ nvkm_mask(device, 0x000644, mask, stat);
+}
+
+void
gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
{
nvkm_wr32(mc->subdev.device, 0x000260, data);
@@ -97,6 +105,7 @@ gf100_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gf100_mc_reset,
.unk260 = gf100_mc_unk260,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 317464212c7d..7b8c6ecad1a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,6 +26,7 @@
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00002000, NVKM_SUBDEV_PMU, true },
{}
};
@@ -53,6 +54,7 @@ gk104_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
.unk260 = gf100_mc_unk260,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 60b044f517ed..ca1bf3279dbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -30,6 +30,7 @@ gk20a_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
new file mode 100644
index 000000000000..4d22f4abd6de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gp100_mc(p) container_of((p), struct gp100_mc, base)
+#include "priv.h"
+
+struct gp100_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
+static void
+gp100_mc_intr_update(struct gp100_mc *mc)
+{
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
+ }
+}
+
+static void
+gp100_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static const struct nvkm_mc_func
+gp100_mc = {
+ .init = nv50_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ struct gp100_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index aad0ba95bf18..99d50a3d956f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -53,13 +53,20 @@ gt215_mc_intr[] = {
{},
};
+static void
+gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
+}
+
static const struct nvkm_mc_func
gt215_mc = {
.init = nv50_mc_init,
.intr = gt215_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_mask = gt215_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = gt215_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index a062624e906b..6509defd1460 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc)
}
u32
-nv04_mc_intr_mask(struct nvkm_mc *mc)
+nv04_mc_intr_stat(struct nvkm_mc *mc)
{
return nvkm_rd32(mc->subdev.device, 0x000100);
}
@@ -75,7 +75,7 @@ nv04_mc = {
.intr = nv04_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 55f0b9166b52..9213107901e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -39,7 +39,7 @@ nv11_mc = {
.intr = nv11_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index c40fa67f79a5..64bf5bbf8146 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -48,7 +48,7 @@ nv17_mc = {
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index cc56271db564..65fa44a64b98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -43,7 +43,7 @@ nv44_mc = {
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 343b6078580d..fe93b4fd7100 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -50,7 +50,7 @@ nv50_mc = {
.intr = nv50_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index a12038118512..4f0576a06d24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -3,12 +3,15 @@
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
+ int index, struct nvkm_mc *);
int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
int index, struct nvkm_mc **);
struct nvkm_mc_map {
u32 stat;
u32 unit;
+ bool noauto;
};
struct nvkm_mc_func {
@@ -18,8 +21,10 @@ struct nvkm_mc_func {
void (*intr_unarm)(struct nvkm_mc *);
/* enable reporting of interrupts to host */
void (*intr_rearm)(struct nvkm_mc *);
+ /* (un)mask delivery of specific interrupts */
+ void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
- u32 (*intr_mask)(struct nvkm_mc *);
+ u32 (*intr_stat)(struct nvkm_mc *);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
@@ -27,7 +32,7 @@ struct nvkm_mc_func {
void nv04_mc_init(struct nvkm_mc *);
void nv04_mc_intr_unarm(struct nvkm_mc *);
void nv04_mc_intr_rearm(struct nvkm_mc *);
-u32 nv04_mc_intr_mask(struct nvkm_mc *);
+u32 nv04_mc_intr_stat(struct nvkm_mc *);
extern const struct nvkm_mc_map nv04_mc_reset[];
extern const struct nvkm_mc_map nv17_mc_intr[];
@@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *);
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
-u32 gf100_mc_intr_mask(struct nvkm_mc *);
+void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 3c2519fdeb81..2a31b7d66a6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o
nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
+nvkm-y += nvkm/subdev/pci/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 6b0328bd7eed..eb9b278198b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -69,15 +69,13 @@ static irqreturn_t
nvkm_pci_intr(int irq, void *arg)
{
struct nvkm_pci *pci = arg;
- struct nvkm_mc *mc = pci->subdev.device->mc;
+ struct nvkm_device *device = pci->subdev.device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- if (pci->msi)
- pci->func->msi_rearm(pci);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
new file mode 100644
index 000000000000..82c5234a06ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ nvkm_pci_wr32(pci, 0x0704, 0x00000000);
+}
+
+static const struct nvkm_pci_func
+gp100_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = gp100_pci_msi_rearm,
+};
+
+int
+gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 213fdba6cfa0..314be2192b7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -19,8 +19,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+
+#include <subdev/mc.h>
#include <subdev/timer.h>
static const char *
@@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
int ret;
/* enable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
- nvkm_rd32(device, 0x200);
+ nvkm_mc_enable(device, sb->devidx);
ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
if (ret < 0) {
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+ nvkm_mc_disable(device, sb->devidx);
return ret;
}
@@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
/* enable IRQs */
nvkm_wr32(device, sb->base + 0x010, 0xff);
- nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
- nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+ nvkm_mc_intr_mask(device, sb->devidx, true);
return 0;
}
@@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
struct nvkm_device *device = sb->subdev.device;
/* disable IRQs and wait for any previous code to complete */
- nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
- nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+ nvkm_mc_intr_mask(device, sb->devidx, false);
nvkm_wr32(device, sb->base + 0x014, 0xff);
falcon_wait_idle(device, sb->base);
/* disable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+ nvkm_mc_disable(device, sb->devidx);
return 0;
}
@@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
return ret;
}
- /*
- * Build all blobs - the same blobs can be used to perform secure boot
- * multiple times
- */
- if (sb->func->prepare_blobs)
- ret = sb->func->prepare_blobs(sb);
-
- return ret;
+ return 0;
}
static int
@@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
/* setup the performing falcon's base address and masks */
switch (func->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
+ sb->devidx = NVKM_SUBDEV_PMU;
sb->base = 0x10a000;
- sb->irq_mask = 0x1000000;
- sb->enable_mask = 0x2000;
break;
default:
nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index cc100dc940ea..f1e2dc914366 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
/* Write LS blob */
ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+ if (ret)
+ nvkm_gpuobj_del(&gsb->ls_blob);
cleanup:
ls_ucode_mgr_cleanup(&mgr);
@@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
int ret;
/* Load and prepare the managed falcon's firmwares */
- ret = gm200_secboot_prepare_ls_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->ls_blob) {
+ ret = gm200_secboot_prepare_ls_blob(gsb);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware that will load the LS firmwares */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
- &gsb->acr_load_blob,
- &gsb->acr_load_bl_desc, true);
- if (ret)
- return ret;
+ if (!gsb->acr_load_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+ &gsb->acr_load_blob,
+ &gsb->acr_load_bl_desc, true);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware bootloader */
- ret = gm200_secboot_prepare_hsbl_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->hsbl_blob) {
+ ret = gm200_secboot_prepare_hsbl_blob(gsb);
+ if (ret)
+ return ret;
+ }
return 0;
}
static int
-gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
{
- struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20x_secboot_prepare_blobs(gsb);
@@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
return ret;
/* dGPU only: load the HS firmware that unprotects the WPR region */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
- &gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc, false);
- if (ret)
- return ret;
+ if (!gsb->acr_unload_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+ &gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
+static int
+gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int ret;
+
+ /* firmware already loaded, nothing to do... */
+ if (gsb->firmware_ok)
+ return 0;
+
+ ret = gsb->func->prepare_blobs(gsb);
+ if (ret) {
+ nvkm_error(subdev, "failed to load secure firmware\n");
+ return ret;
+ }
+
+ gsb->firmware_ok = true;
+
+ return 0;
+}
/*
@@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
+ /* Make sure all blobs are ready */
+ ret = gm200_secboot_blobs_ready(gsb);
+ if (ret)
+ return ret;
+
/*
* Dummy GM200 implementation: perform secure boot each time we are
* called on FECS. Since only FECS and GPCCS are managed and started
@@ -1373,7 +1407,6 @@ gm200_secboot = {
.dtor = gm200_secboot_dtor,
.init = gm200_secboot_init,
.fini = gm200_secboot_fini,
- .prepare_blobs = gm200_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
@@ -1415,6 +1448,7 @@ gm200_secboot_func = {
.bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
.fixup_bl_desc = gm200_secboot_fixup_bl_desc,
.fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+ .prepare_blobs = gm200_secboot_prepare_blobs,
};
int
@@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 684320484b70..d5395ebfe8d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc {
u32 data_size;
};
+static int
+gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int acr_size;
+ int ret;
+
+ ret = gm20x_secboot_prepare_blobs(gsb);
+ if (ret)
+ return ret;
+
+ acr_size = gsb->acr_load_blob->size;
+ /*
+ * On Tegra the WPR region is set by the bootloader. It is illegal for
+ * the HS blob to be larger than this region.
+ */
+ if (acr_size > gsb->wpr_size) {
+ nvkm_error(subdev, "WPR region too small for FW blob!\n");
+ nvkm_error(subdev, "required: %dB\n", acr_size);
+ nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/**
* gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
*
@@ -88,6 +114,7 @@ gm20b_secboot_func = {
.bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
.fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
.fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+ .prepare_blobs = gm20b_secboot_prepare_blobs,
};
@@ -147,32 +174,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
#endif
static int
-gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int acr_size;
- int ret;
-
- ret = gm20x_secboot_prepare_blobs(gsb);
- if (ret)
- return ret;
-
- acr_size = gsb->acr_load_blob->size;
- /*
- * On Tegra the WPR region is set by the bootloader. It is illegal for
- * the HS blob to be larger than this region.
- */
- if (acr_size > gsb->wpr_size) {
- nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
- nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
- nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
-static int
gm20b_secboot_init(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
@@ -189,7 +190,6 @@ static const struct nvkm_secboot_func
gm20b_secboot = {
.dtor = gm200_secboot_dtor,
.init = gm20b_secboot_init,
- .prepare_blobs = gm20b_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index f2b09dee7c5d..a9a8a0e1017e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -30,7 +30,6 @@ struct nvkm_secboot_func {
int (*init)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
- int (*prepare_blobs)(struct nvkm_secboot *);
int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
@@ -147,10 +146,8 @@ struct hsflcn_acr_desc {
* @inst: instance block for HS falcon
* @pgd: page directory for the HS falcon
* @vm: address space used by the HS falcon
- * @bl_desc_size: size of the BL descriptor used by this chip.
- * @fixup_bl_desc: hook that generates the proper BL descriptor format from
- * the generic GM200 format into a data array of size
- * bl_desc_size
+ * @falcon_state: current state of the managed falcons
+ * @firmware_ok: whether the firmware blobs have been created
*/
struct gm200_secboot {
struct nvkm_secboot base;
@@ -196,9 +193,19 @@ struct gm200_secboot {
RUNNING,
} falcon_state[NVKM_SECBOOT_FALCON_END];
+ bool firmware_ok;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+/**
+ * Contains functions we wish to abstract between GM200-like implementations
+ * @bl_desc_size: size of the BL descriptor used by this chip.
+ * @fixup_bl_desc: hook that generates the proper BL descriptor format from
+ * the generic GM200 format into a data array of size
+ * bl_desc_size
+ * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
+ * @prepare_blobs: prepares the various blobs needed for secure booting
+ */
struct gm200_secboot_func {
/*
* Size of the bootloader descriptor for this chip. A block of this
@@ -214,6 +221,7 @@ struct gm200_secboot_func {
* we want the HS FW to set up.
*/
void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+ int (*prepare_blobs)(struct gm200_secboot *);
};
int gm200_secboot_init(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index a1b264664aad..fe063d5728e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top)
}
u32
-nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
@@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
}
u32
-nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
{
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *info;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == devidx && info->intr >= 0)
+ return BIT(info->intr);
+ }
+ }
+
+ return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
+{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
u64 subdevs = 0;
u32 handled = 0;
@@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
}
enum nvkm_devidx
-nvkm_top_fault(struct nvkm_top *top, int fault)
+nvkm_top_fault(struct nvkm_device *device, int fault)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
@@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault)
}
enum nvkm_devidx
-nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
int n = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index e06acc340e99..efac3402f9dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top)
struct nvkm_subdev *subdev = &top->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_top_device *info = NULL;
- u32 data, type;
+ u32 data, type, inst;
int i;
for (i = 0; i < 64; i++) {
@@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top)
if (!(info = nvkm_top_device_new(top)))
return -ENOMEM;
type = ~0;
+ inst = 0;
}
data = nvkm_rd32(device, 0x022700 + (i * 0x04));
@@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x00000000: /* NOT_VALID */
continue;
case 0x00000001: /* DATA */
+ inst = (data & 0x3c000000) >> 26;
info->addr = (data & 0x00fff000);
info->fault = (data & 0x000000f8) >> 3;
break;
@@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top)
continue;
/* Translate engine type to NVKM engine identifier. */
+#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
+#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
+ info->index = NVKM_ENGINE_##A##0 + inst
switch (type) {
- case 0x00000000: info->index = NVKM_ENGINE_GR; break;
- case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
- case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
- case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
- case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
- case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
- case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
- case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
- case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
- case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
- case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
- case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
- case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+ case 0x00000000: A_(GR ); break;
+ case 0x00000001: A_(CE0 ); break;
+ case 0x00000002: A_(CE1 ); break;
+ case 0x00000003: A_(CE2 ); break;
+ case 0x00000008: A_(MSPDEC); break;
+ case 0x00000009: A_(MSPPP ); break;
+ case 0x0000000a: A_(MSVLD ); break;
+ case 0x0000000b: A_(MSENC ); break;
+ case 0x0000000c: A_(VIC ); break;
+ case 0x0000000d: A_(SEC ); break;
+ case 0x0000000e: B_(NVENC ); break;
+ case 0x0000000f: A_(NVENC1); break;
+ case 0x00000010: A_(NVDEC ); break;
+ case 0x00000013: B_(CE ); break;
break;
default:
break;
}
- nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
- "runlist %2d intr %2d reset %2d\n", type,
+ nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+ "engine %2d runlist %2d intr %2d "
+ "reset %2d\n", type, inst,
info->index == NVKM_SUBDEV_NR ? NULL :
nvkm_subdev_name[info->index],
info->addr, info->fault, info->engine, info->runlist,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 6b2d7531a7ff..1c3d23b0e84a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
if (data && info.vidmask && info.base && info.step) {
+ volt->min_uv = info.min;
+ volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
if (info.base >= info.min &&
info.base <= info.max) {
@@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
volt->vid_mask = info.vidmask;
} else if (data && info.vidmask) {
+ volt->min_uv = 0xffffffff;
+ volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
&ivid);
@@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
volt->vid[volt->vid_nr].uv = ivid.voltage;
volt->vid[volt->vid_nr].vid = ivid.vid;
volt->vid_nr++;
+ volt->min_uv = min(volt->min_uv, ivid.voltage);
+ volt->max_uv = max(volt->max_uv, ivid.voltage);
}
}
volt->vid_mask = info.vidmask;
+ } else if (data && info.type == NVBIOS_VOLT_PWM) {
+ volt->min_uv = info.base;
+ volt->max_uv = info.base + info.pwm_range;
}
}
@@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
- if (bios)
+ if (bios) {
nvkm_volt_parse_bios(bios, volt);
+ nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
+ volt->min_uv, volt->max_uv);
+ }
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index d554455326da..ce5d83cdc7cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
return mv;
}
-int
+static int
gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
{
+ static const int v_scale = 1000;
int mv;
mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
- mv = DIV_ROUND_UP(mv, 1000);
+ mv = DIV_ROUND_UP(mv, v_scale);
return mv * 1000;
}
-int
+static int
gk20a_volt_vid_get(struct nvkm_volt *base)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
return -EINVAL;
}
-int
+static int
gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
-int
+static int
gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -143,9 +144,9 @@ gk20a_volt = {
};
int
-_gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt)
+gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
@@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index,
volt->base.vid_nr = nb_coefs;
for (i = 0; i < volt->base.vid_nr; i++) {
volt->base.vid[i].vid = i;
- volt->base.vid[i].uv =
- gk20a_volt_calc_voltage(&coefs[i],
- tdev->gpu_speedo);
+ volt->base.vid[i].uv = max(
+ gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
+ vmin);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
}
@@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
- ARRAY_SIZE(gk20a_cvb_coef), volt);
+ return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 0fa3b502bcf8..6a6c97f9684e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,13 +37,8 @@ struct gk20a_volt {
struct regulator *vdd;
};
-int _gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt);
-
-int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
-int gk20a_volt_vid_get(struct nvkm_volt *volt);
-int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
-int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+int gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 49b5ecb701e4..74db4d28930f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = {
/* 921600 */ { 2647676, -106455, 1632 },
};
+static const struct cvb_coef gm20b_na_cvb_coef[] = {
+ /* KHz, c0, c1, c2, c3, c4, c5 */
+ /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 },
+ /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 },
+ /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 },
+ /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 },
+ /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 },
+ /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
+ /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
+ /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
+ /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
+ /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
+ /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
+ /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
+ /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
+};
+
+const u32 speedo_to_vmin[] = {
+ /* 0, 1, 2, 3, 4, */
+ 950000, 840000, 818750, 840000, 810000,
+};
+
int
gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
+ u32 vmin;
+
+ if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
+ nvdev_error(device, "unsupported speedo %d\n",
+ tdev->gpu_speedo_id);
+ return -EINVAL;
+ }
volt = kzalloc(sizeof(*volt), GFP_KERNEL);
if (!volt)
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
- ARRAY_SIZE(gm20b_cvb_coef), volt);
+ vmin = speedo_to_vmin[tdev->gpu_speedo_id];
+
+ if (tdev->gpu_speedo_id >= 1)
+ return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
+ ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+ else
+ return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 336ad4de9981..556f81f6b2c7 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,11 +4,6 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
default n
help
DRM display driver for OMAP2/3/4 based boards.
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 2a618afe0f53..c226da145fb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,80 +1,80 @@
menu "OMAPDRM External Display Device Drivers"
-config DISPLAY_ENCODER_OPA362
+config DRM_OMAP_ENCODER_OPA362
tristate "OPA362 external analog amplifier"
help
Driver for OPA362 external analog TV amplifier controlled
through a GPIO.
-config DISPLAY_ENCODER_TFP410
+config DRM_OMAP_ENCODER_TFP410
tristate "TFP410 DPI to DVI Encoder"
help
Driver for TFP410 DPI to DVI encoder.
-config DISPLAY_ENCODER_TPD12S015
+config DRM_OMAP_ENCODER_TPD12S015
tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
-config DISPLAY_CONNECTOR_DVI
+config DRM_OMAP_CONNECTOR_DVI
tristate "DVI Connector"
depends on I2C
help
Driver for a generic DVI connector.
-config DISPLAY_CONNECTOR_HDMI
+config DRM_OMAP_CONNECTOR_HDMI
tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
-config DISPLAY_CONNECTOR_ANALOG_TV
+config DRM_OMAP_CONNECTOR_ANALOG_TV
tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
-config DISPLAY_PANEL_DPI
+config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
help
Driver for generic DPI panels.
-config DISPLAY_PANEL_DSI_CM
+config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DSI command mode panels.
-config DISPLAY_PANEL_SONY_ACX565AKM
+config DRM_OMAP_PANEL_SONY_ACX565AKM
tristate "ACX565AKM Panel"
depends on SPI && BACKLIGHT_CLASS_DEVICE
help
This is the LCD panel used on Nokia N900
-config DISPLAY_PANEL_LGPHILIPS_LB035Q02
+config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
tristate "LG.Philips LB035Q02 LCD Panel"
depends on SPI
help
LCD Panel used on the Gumstix Overo Palo35
-config DISPLAY_PANEL_SHARP_LS037V7DW01
+config DRM_OMAP_PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on BACKLIGHT_CLASS_DEVICE
help
LCD Panel used in TI's SDP3430 and EVM boards
-config DISPLAY_PANEL_TPO_TD028TTEC1
+config DRM_OMAP_PANEL_TPO_TD028TTEC1
tristate "TPO TD028TTEC1 LCD Panel"
depends on SPI
help
LCD panel used in Openmoko.
-config DISPLAY_PANEL_TPO_TD043MTEA1
+config DRM_OMAP_PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on SPI
help
LCD Panel used in OMAP3 Pandora
-config DISPLAY_PANEL_NEC_NL8048HL11
+config DRM_OMAP_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 Panel"
depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 9aa176bfbf2e..46baafb1a83e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,14 +1,14 @@
-obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o
-obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o
-obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
-obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
-obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 8511c648a15c..3485d1ecd655 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,9 +14,10 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
struct omap_video_timings timings;
- enum omap_dss_venc_type connector_type;
bool invert_polarity;
};
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
static const struct of_device_id tvc_of_match[];
-struct tvc_of_data {
- enum omap_dss_venc_type connector_type;
-};
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->timings);
if (!ddata->dev->of_node) {
- in->ops.atv->set_type(in, ddata->connector_type);
+ in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
in->ops.atv->invert_vid_out_polarity(in,
ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
ddata->in = in;
- ddata->connector_type = pdata->connector_type;
ddata->invert_polarity = pdata->invert_polarity;
dssdev = &ddata->dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 747f26a55e43..684b7aeda411 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,10 +15,10 @@
#include <linux/slab.h>
#include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
static const struct omap_video_timings dvic_default_timings = {
.x_res = 640,
.y_res = 480,
@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 667ca4a24ece..7bdf83af9797 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -17,10 +17,10 @@
#include <linux/of_gpio.h>
#include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
static const struct omap_video_timings hdmic_default_timings = {
.x_res = 640,
.y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 9594ff7a2b0c..fe4e7ec3bab0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -18,9 +18,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 671806ca7d6a..d768217cefe0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 916a89978387..46855c8f5cbf 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7c2331be8d15..7f16f985ab22 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -15,11 +15,13 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
#include <video/of_display_timing.h>
+#include "../dss/omapdss.h"
+
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
int backlight_gpio;
struct gpio_desc *enable_gpio;
+ struct regulator *vcc_supply;
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (r)
return r;
+ r = regulator_enable(ddata->vcc_supply);
+ if (r) {
+ in->ops.dpi->disable(in);
+ return r;
+ }
+
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
gpio_set_value_cansleep(ddata->backlight_gpio, 0);
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
+ regulator_disable(ddata->vcc_supply);
in->ops.dpi->disable(in);
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
ddata->enable_gpio = gpio;
+ /*
+ * Many different panels are supported by this driver and there are
+ * probably very different needs for their reset pins in regards to
+ * timing and order relative to the enable gpio. So for now it's just
+ * ensured that the reset line isn't active.
+ */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(ddata->vcc_supply))
+ return PTR_ERR(ddata->vcc_supply);
+
ddata->backlight_gpio = -ENOENT;
r = of_get_display_timing(node, "panel-timing", &timing);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 2b118071b5a1..0eae8afaed90 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -25,10 +25,10 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
#include <video/mipi_display.h>
+#include "../dss/omapdss.h"
+
/* DSI Virtual channel. Hardcoded for now. */
#define TCH 0
@@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev)
return 0;
err_sysfs_create:
- if (bldev != NULL)
- backlight_device_unregister(bldev);
+ backlight_device_unregister(bldev);
err_bl:
destroy_workqueue(ddata->workqueue);
err_reg:
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index ac680e1de603..6dfb96cea293 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -17,8 +17,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
static struct omap_video_timings lb035q02_timings = {
.x_res = 320,
@@ -51,9 +50,6 @@ struct panel_drv_data {
struct omap_video_timings videomode;
- /* used for non-DT boot, to be removed */
- int backlight_gpio;
-
struct gpio_desc *enable_gpio;
};
@@ -171,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 1);
-
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
return 0;
@@ -190,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 0);
-
in->ops.dpi->disable(in);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -256,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
ddata->enable_gpio = gpio;
- ddata->backlight_gpio = -ENOENT;
-
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
dev_err(&spi->dev, "failed to find video source\n");
@@ -290,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- if (gpio_is_valid(ddata->backlight_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
- GPIOF_OUT_INIT_LOW, "panel backlight");
- if (r)
- goto err_gpio;
- }
-
ddata->videomode = lb035q02_timings;
dssdev = &ddata->dssdev;
@@ -316,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
return 0;
err_reg:
-err_gpio:
omap_dss_put_device(ddata->in);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 38d2920a95e6..fc4c238c9583 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -18,7 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 4363fffc87e3..3d3efc561ea9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -13,11 +13,11 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index deb416736aad..157c512205d1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -33,9 +33,10 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
#define MIPID_CMD_READ_DISP_ID 0x04
#define MIPID_CMD_READ_RED 0x06
#define MIPID_CMD_READ_GREEN 0x07
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index bd8d85041926..e859b3f893f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -28,7 +28,8 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index d93175b03a12..66c6bbe6472b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 7e4e5bebabbe..6a3ebfcd7223 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -35,8 +35,7 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.default_display_name = def_disp_name;
else if (pdata->default_display_name)
core.default_display_name = pdata->default_display_name;
- else if (pdata->default_device)
- core.default_display_name = pdata->default_device->name;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index f83608b69e68..535240fba671 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -41,8 +41,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
#include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
* never both, we can just use this flag for now.
*/
bool reverse_ilace_field_order:1;
+
+ bool has_gamma_table:1;
+
+ bool has_gamma_i734_bug:1;
};
#define DISPC_MAX_NR_FIFOS 5
+#define DISPC_MAX_CHANNEL_GAMMA 4
static struct {
struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
+ u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
+
const struct dispc_features *feat;
bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
u8 low;
};
+struct dispc_gamma_desc {
+ u32 len;
+ u32 bits;
+ u16 reg;
+ bool has_index;
+};
+
static const struct {
const char *name;
u32 vsync_irq;
u32 framedone_irq;
u32 sync_lost_irq;
+ struct dispc_gamma_desc gamma;
struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
} mgr_desc[] = {
[OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC,
.framedone_irq = DISPC_IRQ_FRAMEDONE,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE0,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
@@ -207,6 +227,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
.framedone_irq = DISPC_IRQ_FRAMEDONETV,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
+ .gamma = {
+ .len = 1024,
+ .bits = 10,
+ .reg = DISPC_GAMMA_TABLE2,
+ .has_index = false,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
[DISPC_MGR_FLD_STNTFT] = { },
@@ -224,6 +250,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC2,
.framedone_irq = DISPC_IRQ_FRAMEDONE2,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE1,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
@@ -241,6 +273,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC3,
.framedone_irq = DISPC_IRQ_FRAMEDONE3,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE3,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
return unit * 8;
}
-void dispc_enable_gamma_table(bool enable)
-{
- /*
- * This is partially implemented to support only disabling of
- * the gamma table.
- */
- if (enable) {
- DSSWARN("Gamma table enabling for TV not yet supported");
- return;
- }
-
- REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
-}
-
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
static unsigned long dispc_fclk_rate(void)
{
- struct dss_pll *pll;
- unsigned long r = 0;
+ unsigned long r;
+ enum dss_clk_source src;
+
+ src = dss_get_dispc_clk_source();
- switch (dss_get_dispc_clk_source()) {
- case OMAP_DSS_CLK_SRC_FCK:
+ if (src == DSS_CLK_SRC_FCK) {
r = dss_get_dispc_clk_rate();
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi0");
- if (!pll)
- pll = dss_pll_find("video0");
+ } else {
+ struct dss_pll *pll;
+ unsigned clkout_idx;
- r = pll->cinfo.clkout[0];
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi1");
- if (!pll)
- pll = dss_pll_find("video1");
+ pll = dss_pll_find_by_src(src);
+ clkout_idx = dss_pll_get_clkout_idx_for_src(src);
- r = pll->cinfo.clkout[0];
- break;
- default:
- BUG();
- return 0;
+ r = pll->cinfo.clkout[clkout_idx];
}
return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
{
- struct dss_pll *pll;
int lcd;
unsigned long r;
- u32 l;
-
- if (dss_mgr_is_lcd(channel)) {
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ enum dss_clk_source src;
- lcd = FLD_GET(l, 23, 16);
+ /* for TV, LCLK rate is the FCLK rate */
+ if (!dss_mgr_is_lcd(channel))
+ return dispc_fclk_rate();
- switch (dss_get_lcd_clk_source(channel)) {
- case OMAP_DSS_CLK_SRC_FCK:
- r = dss_get_dispc_clk_rate();
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi0");
- if (!pll)
- pll = dss_pll_find("video0");
+ src = dss_get_lcd_clk_source(channel);
- r = pll->cinfo.clkout[0];
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi1");
- if (!pll)
- pll = dss_pll_find("video1");
+ if (src == DSS_CLK_SRC_FCK) {
+ r = dss_get_dispc_clk_rate();
+ } else {
+ struct dss_pll *pll;
+ unsigned clkout_idx;
- r = pll->cinfo.clkout[0];
- break;
- default:
- BUG();
- return 0;
- }
+ pll = dss_pll_find_by_src(src);
+ clkout_idx = dss_pll_get_clkout_idx_for_src(src);
- return r / lcd;
- } else {
- return dispc_fclk_rate();
+ r = pll->cinfo.clkout[clkout_idx];
}
+
+ lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+
+ return r / lcd;
}
static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
{
int lcd, pcd;
- enum omap_dss_clk_source lcd_clk_src;
+ enum dss_clk_source lcd_clk_src;
seq_printf(s, "- %s -\n", mgr_desc[channel].name);
lcd_clk_src = dss_get_lcd_clk_source(channel);
- seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name,
- dss_get_generic_clk_source_name(lcd_clk_src),
- dss_feat_get_clk_source_name(lcd_clk_src));
+ seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
+ dss_get_clk_source_name(lcd_clk_src));
dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
{
int lcd;
u32 l;
- enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
+ enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
if (dispc_runtime_get())
return;
seq_printf(s, "- DISPC -\n");
- seq_printf(s, "dispc fclk source = %s (%s)\n",
- dss_get_generic_clk_source_name(dispc_clk_src),
- dss_feat_get_clk_source_name(dispc_clk_src));
+ seq_printf(s, "dispc fclk source = %s\n",
+ dss_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
+u32 dispc_mgr_gamma_size(enum omap_channel channel)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+
+ if (!dispc.feat->has_gamma_table)
+ return 0;
+
+ return gdesc->len;
+}
+EXPORT_SYMBOL(dispc_mgr_gamma_size);
+
+static void dispc_mgr_write_gamma_table(enum omap_channel channel)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *table = dispc.gamma_table[channel];
+ unsigned int i;
+
+ DSSDBG("%s: channel %d\n", __func__, channel);
+
+ for (i = 0; i < gdesc->len; ++i) {
+ u32 v = table[i];
+
+ if (gdesc->has_index)
+ v |= i << 24;
+ else if (i == 0)
+ v |= 1 << 31;
+
+ dispc_write_reg(gdesc->reg, v);
+ }
+}
+
+static void dispc_restore_gamma_tables(void)
+{
+ DSSDBG("%s()\n", __func__);
+
+ if (!dispc.feat->has_gamma_table)
+ return;
+
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
+
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
+
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
+
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
+}
+
+static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+void dispc_mgr_set_gamma(enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *table = dispc.gamma_table[channel];
+ uint i;
+
+ DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
+ channel, length, gdesc->len);
+
+ if (!dispc.feat->has_gamma_table)
+ return;
+
+ if (lut == NULL || length < 2) {
+ lut = dispc_mgr_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ uint first = i * (gdesc->len - 1) / (length - 1);
+ uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
+ uint w = last - first;
+ u16 r, g, b;
+ uint j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
+
+ r >>= 16 - gdesc->bits;
+ g >>= 16 - gdesc->bits;
+ b >>= 16 - gdesc->bits;
+
+ table[first + j] = (r << (gdesc->bits * 2)) |
+ (g << gdesc->bits) | b;
+ }
+ }
+
+ if (dispc.is_enabled)
+ dispc_mgr_write_gamma_table(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_set_gamma);
+
+static int dispc_init_gamma_tables(void)
+{
+ int channel;
+
+ if (!dispc.feat->has_gamma_table)
+ return 0;
+
+ for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *gt;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD2 &&
+ !dss_has_feature(FEAT_MGR_LCD2))
+ continue;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD3 &&
+ !dss_has_feature(FEAT_MGR_LCD3))
+ continue;
+
+ gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
+ sizeof(u32), GFP_KERNEL);
+ if (!gt)
+ return -ENOMEM;
+
+ dispc.gamma_table[channel] = gt;
+
+ dispc_mgr_set_gamma(channel, NULL, 0);
+ }
+ return 0;
+}
+
static void _omap_dispc_initial_config(void)
{
u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
dispc.core_clk_rate = dispc_fclk_rate();
}
- /* FUNCGATED */
- if (dss_has_feature(FEAT_FUNCGATED))
+ /* Use gamma table mode, instead of palette mode */
+ if (dispc.feat->has_gamma_table)
+ REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+
+ /* For older DSS versions (FEAT_FUNCGATED) this enables
+ * func-clock auto-gating. For newer versions
+ * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
+ */
+ if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
+ .has_gamma_table = true,
+ .has_gamma_i734_bug = true,
};
static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
+ .has_gamma_table = true,
+ .has_gamma_i734_bug = true,
};
static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
}
EXPORT_SYMBOL(dispc_free_irq);
+/*
+ * Workaround for errata i734 in DSS dispc
+ * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
+ *
+ * For gamma tables to work on LCD1 the GFX plane has to be used at
+ * least once after DSS HW has come out of reset. The workaround
+ * sets up a minimal LCD setup with GFX plane and waits for one
+ * vertical sync irq before disabling the setup and continuing with
+ * the context restore. The physical outputs are gated during the
+ * operation. This workaround requires that gamma table's LOADMODE
+ * is set to 0x2 in DISPC_CONTROL1 register.
+ *
+ * For details see:
+ * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
+ * Literature Number: SWPZ037E
+ * Or some other relevant errata document for the DSS IP version.
+ */
+
+static const struct dispc_errata_i734_data {
+ struct omap_video_timings timings;
+ struct omap_overlay_info ovli;
+ struct omap_overlay_manager_info mgri;
+ struct dss_lcd_mgr_config lcd_conf;
+} i734 = {
+ .timings = {
+ .x_res = 8, .y_res = 1,
+ .pixelclock = 16000000,
+ .hsw = 8, .hfp = 4, .hbp = 4,
+ .vsw = 1, .vfp = 1, .vbp = 1,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .interlace = false,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .double_pixel = false,
+ },
+ .ovli = {
+ .screen_width = 1,
+ .width = 1, .height = 1,
+ .color_mode = OMAP_DSS_COLOR_RGB24U,
+ .rotation = OMAP_DSS_ROT_0,
+ .rotation_type = OMAP_DSS_ROT_DMA,
+ .mirror = 0,
+ .pos_x = 0, .pos_y = 0,
+ .out_width = 0, .out_height = 0,
+ .global_alpha = 0xff,
+ .pre_mult_alpha = 0,
+ .zorder = 0,
+ },
+ .mgri = {
+ .default_color = 0,
+ .trans_enabled = false,
+ .partial_alpha_enabled = false,
+ .cpr_enable = false,
+ },
+ .lcd_conf = {
+ .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
+ .stallmode = false,
+ .fifohandcheck = false,
+ .clock_info = {
+ .lck_div = 1,
+ .pck_div = 2,
+ },
+ .video_port_width = 24,
+ .lcden_sig_polarity = 0,
+ },
+};
+
+static struct i734_buf {
+ size_t size;
+ dma_addr_t paddr;
+ void *vaddr;
+} i734_buf;
+
+static int dispc_errata_i734_wa_init(void)
+{
+ if (!dispc.feat->has_gamma_i734_bug)
+ return 0;
+
+ i734_buf.size = i734.ovli.width * i734.ovli.height *
+ color_mode_to_bpp(i734.ovli.color_mode) / 8;
+
+ i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
+ &i734_buf.paddr, GFP_KERNEL);
+ if (!i734_buf.vaddr) {
+ dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dispc_errata_i734_wa_fini(void)
+{
+ if (!dispc.feat->has_gamma_i734_bug)
+ return;
+
+ dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
+ i734_buf.paddr);
+}
+
+static void dispc_errata_i734_wa(void)
+{
+ u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
+ struct omap_overlay_info ovli;
+ struct dss_lcd_mgr_config lcd_conf;
+ u32 gatestate;
+ unsigned int count;
+
+ if (!dispc.feat->has_gamma_i734_bug)
+ return;
+
+ gatestate = REG_GET(DISPC_CONFIG, 8, 4);
+
+ ovli = i734.ovli;
+ ovli.paddr = i734_buf.paddr;
+ lcd_conf = i734.lcd_conf;
+
+ /* Gate all LCD1 outputs */
+ REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
+
+ /* Setup and enable GFX plane */
+ dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_enable(OMAP_DSS_GFX, true);
+
+ /* Set up and enable display manager for LCD1 */
+ dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
+ dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
+ &lcd_conf.clock_info);
+ dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+
+ dispc_clear_irqstatus(framedone_irq);
+
+ /* Enable and shut the channel to produce just one frame */
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
+
+ /* Busy wait for framedone. We can't fiddle with irq handlers
+ * in PM resume. Typically the loop runs less than 5 times and
+ * waits less than a micro second.
+ */
+ count = 0;
+ while (!(dispc_read_irqstatus() & framedone_irq)) {
+ if (count++ > 10000) {
+ dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
+ __func__);
+ break;
+ }
+ }
+ dispc_ovl_enable(OMAP_DSS_GFX, false);
+
+ /* Clear all irq bits before continuing */
+ dispc_clear_irqstatus(0xffffffff);
+
+ /* Restore the original state to LCD1 output gates */
+ REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
+}
+
/* DISPC HW IP initialisation */
static int dispc_bind(struct device *dev, struct device *master, void *data)
{
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
+ r = dispc_errata_i734_wa_init();
+ if (r)
+ return r;
+
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
}
}
+ r = dispc_init_gamma_tables();
+ if (r)
+ return r;
+
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
void *data)
{
pm_runtime_disable(dev);
+
+ dispc_errata_i734_wa_fini();
}
static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
_omap_dispc_initial_config();
+ dispc_errata_i734_wa();
+
dispc_restore_context();
+
+ dispc_restore_gamma_tables();
}
dispc.is_enabled = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 483744223dd1..bc1d8126ee87 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -42,6 +42,11 @@
#define DISPC_MSTANDBY_CTRL 0x0858
#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
+#define DISPC_GAMMA_TABLE0 0x0630
+#define DISPC_GAMMA_TABLE1 0x0634
+#define DISPC_GAMMA_TABLE2 0x0638
+#define DISPC_GAMMA_TABLE3 0x0850
+
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
DISPC_BA0_OFFSET(n))
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 038c15b04215..34fad2376f8d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -18,8 +18,8 @@
*/
#include <linux/kernel.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dispc.h"
static const struct dispc_coef coef3_M8[8] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9f3dd09b0a6c..8dcdd7cf9937 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 97ea60257884..b268295b76cf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -34,17 +34,15 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
-#define HSDIV_DISPC 0
-
struct dpi_data {
struct platform_device *pdev;
struct regulator *vdds_dsi_reg;
+ enum dss_clk_source clk_src;
struct dss_pll *pll;
struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
return dev_get_drvdata(&pdev->dev);
}
-static struct dss_pll *dpi_get_pll(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
{
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
case OMAPDSS_VER_OMAP3630:
case OMAPDSS_VER_AM35xx:
case OMAPDSS_VER_AM43xx:
- return NULL;
+ return DSS_CLK_SRC_FCK;
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dss_pll_find("dsi0");
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD2:
- return dss_pll_find("dsi1");
+ return DSS_CLK_SRC_PLL2_1;
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
case OMAPDSS_VER_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dss_pll_find("dsi0");
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD3:
- return dss_pll_find("dsi1");
+ return DSS_CLK_SRC_PLL2_1;
+ case OMAP_DSS_CHANNEL_LCD2:
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
case OMAPDSS_VER_DRA7xx:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD2:
- return dss_pll_find("video0");
+ return DSS_CLK_SRC_PLL1_3;
case OMAP_DSS_CHANNEL_LCD3:
- return dss_pll_find("video1");
+ return DSS_CLK_SRC_PLL2_1;
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
default:
- return NULL;
- }
-}
-
-static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
-{
- switch (channel) {
- case OMAP_DSS_CHANNEL_LCD:
- return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
- case OMAP_DSS_CHANNEL_LCD2:
- return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
- case OMAP_DSS_CHANNEL_LCD3:
- return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
- default:
- /* this shouldn't happen */
- WARN_ON(1);
- return OMAP_DSS_CLK_SRC_FCK;
+ return DSS_CLK_SRC_FCK;
}
}
struct dpi_clk_calc_ctx {
struct dss_pll *pll;
+ unsigned clkout_idx;
/* inputs */
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
- struct dss_pll_clock_info dsi_cinfo;
+ struct dss_pll_clock_info pll_cinfo;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
return false;
- ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
- ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
+ ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
+ ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
{
struct dpi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.n = n;
- ctx->dsi_cinfo.m = m;
- ctx->dsi_cinfo.fint = fint;
- ctx->dsi_cinfo.clkdco = clkdco;
+ ctx->pll_cinfo.n = n;
+ ctx->pll_cinfo.m = m;
+ ctx->pll_cinfo.fint = fint;
+ ctx->pll_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dpi_calc_hsdiv_cb, ctx);
}
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
dpi_calc_dispc_cb, ctx);
}
-static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck,
+static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
struct dpi_clk_calc_ctx *ctx)
{
unsigned long clkin;
- unsigned long pll_min, pll_max;
memset(ctx, 0, sizeof(*ctx));
ctx->pll = dpi->pll;
- ctx->pck_min = pck - 1000;
- ctx->pck_max = pck + 1000;
+ ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
- pll_min = 0;
- pll_max = 0;
+ clkin = clk_get_rate(dpi->pll->clkin);
- clkin = clk_get_rate(ctx->pll->clkin);
+ if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
+ unsigned long pll_min, pll_max;
- return dss_pll_calc(ctx->pll, clkin,
- pll_min, pll_max,
- dpi_calc_pll_cb, ctx);
+ ctx->pck_min = pck - 1000;
+ ctx->pck_max = pck + 1000;
+
+ pll_min = 0;
+ pll_max = 0;
+
+ return dss_pll_calc_a(ctx->pll, clkin,
+ pll_min, pll_max,
+ dpi_calc_pll_cb, ctx);
+ } else { /* DSS_PLL_TYPE_B */
+ dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
+
+ ctx->dispc_cinfo.lck_div = 1;
+ ctx->dispc_cinfo.pck_div = 1;
+ ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
+ ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
+
+ return true;
+ }
}
static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
-static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
+static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
int r;
bool ok;
- ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx);
+ ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
- r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo);
+ r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
if (r)
return r;
- dss_select_lcd_clk_source(channel,
- dpi_get_alt_clk_src(channel));
+ dss_select_lcd_clk_source(channel, dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+ *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
int r = 0;
if (dpi->pll)
- r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
&lck_div, &pck_div);
else
r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (dpi->pll) {
r = dss_pll_enable(dpi->pll);
if (r)
- goto err_dsi_pll_init;
+ goto err_pll_init;
}
r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
err_set_mode:
if (dpi->pll)
dss_pll_disable(dpi->pll);
-err_dsi_pll_init:
+err_pll_init:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
dss_mgr_disable(channel);
if (dpi->pll) {
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
return -EINVAL;
if (dpi->pll) {
- ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
- fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
mutex_unlock(&dpi->lock);
}
-static int dpi_verify_dsi_pll(struct dss_pll *pll)
+static int dpi_verify_pll(struct dss_pll *pll)
{
int r;
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
if (dpi->pll)
return;
- pll = dpi_get_pll(dpi->output.dispc_channel);
+ dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
+
+ pll = dss_pll_find_by_src(dpi->clk_src);
if (!pll)
return;
- /* On DRA7 we need to set a mux to use the PLL */
- if (omapdss_get_version() == OMAPDSS_VER_DRA7xx)
- dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
-
- if (dpi_verify_dsi_pll(pll)) {
- DSSWARN("DSI PLL not operational\n");
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
return;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 56c43f355ce3..e1be5e795cd8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -42,9 +42,9 @@
#include <linux/of_platform.h>
#include <linux/component.h>
-#include <video/omapdss.h>
#include <video/mipi_display.h>
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -1261,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
unsigned long r;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
+ if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
@@ -1474,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
- enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
+ enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
struct dss_pll *pll = &dsi->pll;
@@ -1494,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
cinfo->clkdco, cinfo->m);
seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
- dss_feat_get_clk_source_name(dsi_module == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
+ dss_get_clk_source_name(dsi_module == 0 ?
+ DSS_CLK_SRC_PLL1_1 :
+ DSS_CLK_SRC_PLL2_1),
cinfo->clkout[HSDIV_DISPC],
cinfo->mX[HSDIV_DISPC],
- dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+ dispc_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
- dss_feat_get_clk_source_name(dsi_module == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
+ dss_get_clk_source_name(dsi_module == 0 ?
+ DSS_CLK_SRC_PLL1_2 :
+ DSS_CLK_SRC_PLL2_2),
cinfo->clkout[HSDIV_DSI],
cinfo->mX[HSDIV_DSI],
- dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+ dsi_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "- DSI%d -\n", dsi_module + 1);
- seq_printf(s, "dsi fclk source = %s (%s)\n",
- dss_get_generic_clk_source_name(dsi_clk_src),
- dss_feat_get_clk_source_name(dsi_clk_src));
+ seq_printf(s, "dsi fclk source = %s\n",
+ dss_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
@@ -4101,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
int r;
dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
+ DSS_CLK_SRC_PLL1_1 :
+ DSS_CLK_SRC_PLL2_1);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
r = dss_mgr_register_framedone_handler(channel,
@@ -4149,7 +4148,7 @@ err1:
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
err:
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
return r;
}
@@ -4162,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4196,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
goto err1;
dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
+ DSS_CLK_SRC_PLL1_2 :
+ DSS_CLK_SRC_PLL2_2);
DSSDBG("PLL OK\n");
@@ -4229,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
err3:
dsi_cio_uninit(dsidev);
err2:
- dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
err1:
dss_pll_disable(&dsi->pll);
err0:
@@ -4251,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
dsi_vc_enable(dsidev, 2, 0);
dsi_vc_enable(dsidev, 3, 0);
- dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
dsi_pll_uninit(dsidev, disconnect_lanes);
}
@@ -4452,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_cm_calc_hsdiv_cb, ctx);
}
@@ -4491,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
pll_max = cfg->hs_clk_max * 4;
- return dss_pll_calc(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_cm_calc_pll_cb, ctx);
}
@@ -4750,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_vm_calc_hsdiv_cb, ctx);
}
@@ -4792,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
pll_max = byteclk_max * 4 * 4;
}
- return dss_pll_calc(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_vm_calc_pll_cb, ctx);
}
@@ -5138,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
};
static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 7) - 1,
.m_max = (1 << 11) - 1,
.mX_max = (1 << 4) - 1,
@@ -5163,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
};
static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
@@ -5188,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
};
static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index bf407b6ba15c..e256d879b25c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -18,8 +18,7 @@
#include <linux/of.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
struct device_node *
@@ -126,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
{
- struct device_node *np;
+ struct device_node *np, *np_parent;
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
- np = of_get_next_parent(np);
+ np_parent = of_get_next_parent(np);
+ of_node_put(np);
- return np;
+ return np_parent;
}
struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 3303cfad4838..14887d5b02e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -42,8 +42,7 @@
#include <linux/suspend.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -76,6 +75,8 @@ struct dss_features {
const enum omap_display_type *ports;
int num_ports;
int (*dpi_select_source)(int port, enum omap_channel channel);
+ int (*select_lcd_source)(enum omap_channel channel,
+ enum dss_clk_source clk_src);
};
static struct {
@@ -92,9 +93,9 @@ static struct {
unsigned long cache_prate;
struct dispc_clock_info cache_dispc_cinfo;
- enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
- enum omap_dss_clk_source dispc_clk_source;
- enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
+ enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -106,11 +107,14 @@ static struct {
} dss;
static const char * const dss_generic_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI",
+ [DSS_CLK_SRC_FCK] = "FCK",
+ [DSS_CLK_SRC_PLL1_1] = "PLL1:1",
+ [DSS_CLK_SRC_PLL1_2] = "PLL1:2",
+ [DSS_CLK_SRC_PLL1_3] = "PLL1:3",
+ [DSS_CLK_SRC_PLL2_1] = "PLL2:1",
+ [DSS_CLK_SRC_PLL2_2] = "PLL2:2",
+ [DSS_CLK_SRC_PLL2_3] = "PLL2:3",
+ [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
};
static bool dss_initialized;
@@ -203,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
1 << shift, val << shift);
}
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
+static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
enum omap_channel channel)
{
unsigned shift, val;
if (!dss.syscon_pll_ctrl)
- return;
+ return -EINVAL;
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
shift = 3;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL1_1:
val = 0; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_HDMI_PLL:
val = 1; break;
default:
DSSERR("error in PLL mux config for LCD\n");
- return;
+ return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD2:
shift = 5;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL1_3:
val = 0; break;
- case DSS_PLL_VIDEO2:
+ case DSS_CLK_SRC_PLL2_3:
val = 1; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD2\n");
- return;
+ return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD3:
shift = 7;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
- val = 1; break;
- case DSS_PLL_VIDEO2:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL2_1:
val = 0; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_PLL1_3:
+ val = 1; break;
+ case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD3\n");
- return;
+ return -EINVAL;
}
break;
default:
DSSERR("error in PLL mux config\n");
- return;
+ return -EINVAL;
}
regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
0x3 << shift, val << shift);
+
+ return 0;
}
void dss_sdi_init(int datapairs)
@@ -354,14 +360,14 @@ void dss_sdi_disable(void)
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
{
return dss_generic_clk_source_names[clk_src];
}
void dss_dump_clocks(struct seq_file *s)
{
- const char *fclk_name, *fclk_real_name;
+ const char *fclk_name;
unsigned long fclk_rate;
if (dss_runtime_get())
@@ -369,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
seq_printf(s, "- DSS -\n");
- fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
+ fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss.dss_clk);
- seq_printf(s, "%s (%s) = %lu\n",
- fclk_name, fclk_real_name,
+ seq_printf(s, "%s = %lu\n",
+ fclk_name,
fclk_rate);
dss_runtime_put();
@@ -403,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
+static int dss_get_channel_index(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return 0;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return 1;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 2;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
u8 start, end;
+ /*
+ * We always use PRCM clock as the DISPC func clock, except on DSS3,
+ * where we don't have separate DISPC and LCD clock sources.
+ */
+ if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
+ clk_src != DSS_CLK_SRC_FCK))
+ return;
+
switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
+ case DSS_CLK_SRC_FCK:
b = 0;
break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ case DSS_CLK_SRC_PLL1_1:
b = 1;
break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
+ case DSS_CLK_SRC_PLL2_1:
b = 2;
break;
default:
@@ -431,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
}
void dss_select_dsi_clk_source(int dsi_module,
- enum omap_dss_clk_source clk_src)
+ enum dss_clk_source clk_src)
{
int b, pos;
switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
+ case DSS_CLK_SRC_FCK:
b = 0;
break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
+ case DSS_CLK_SRC_PLL1_2:
BUG_ON(dsi_module != 0);
b = 1;
break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
+ case DSS_CLK_SRC_PLL2_2:
BUG_ON(dsi_module != 1);
b = 1;
break;
@@ -458,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
dss.dsi_clk_source[dsi_module] = clk_src;
}
+static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ [OMAP_DSS_CHANNEL_LCD3] = 19,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+ int r;
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return -EINVAL;
+ }
+
+ r = dss_ctrl_pll_set_control_mux(clk_src, channel);
+ if (r)
+ return r;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
+static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ [OMAP_DSS_CHANNEL_LCD3] = 19,
+ };
+ const enum dss_clk_source allowed_plls[] = {
+ [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+ [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
+ [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(allowed_plls[channel] != clk_src))
+ return -EINVAL;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
+static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ };
+ const enum dss_clk_source allowed_plls[] = {
+ [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+ [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return 0;
+ }
+
+ if (WARN_ON(allowed_plls[channel] != clk_src))
+ return -EINVAL;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
void dss_select_lcd_clk_source(enum omap_channel channel,
- enum omap_dss_clk_source clk_src)
+ enum dss_clk_source clk_src)
{
- int b, ix, pos;
+ int idx = dss_get_channel_index(channel);
+ int r;
if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
dss_select_dispc_clk_source(clk_src);
+ dss.lcd_clk_source[idx] = clk_src;
return;
}
- switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
- b = 0;
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
- b = 1;
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
- channel != OMAP_DSS_CHANNEL_LCD3);
- b = 1;
- break;
- default:
- BUG();
+ r = dss.feat->select_lcd_source(channel, clk_src);
+ if (r)
return;
- }
-
- pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
- REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
- ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
- dss.lcd_clk_source[ix] = clk_src;
+ dss.lcd_clk_source[idx] = clk_src;
}
-enum omap_dss_clk_source dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(void)
{
return dss.dispc_clk_source;
}
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
{
return dss.dsi_clk_source[dsi_module];
}
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
- int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
- return dss.lcd_clk_source[ix];
+ int idx = dss_get_channel_index(channel);
+ return dss.lcd_clk_source[idx];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
@@ -859,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_omap4,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_omap4,
};
static const struct dss_features omap54xx_dss_feats = {
@@ -868,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_omap5,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_omap5,
};
static const struct dss_features am43xx_dss_feats = {
@@ -886,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_dra7xx,
.ports = dra7xx_ports,
.num_ports = ARRAY_SIZE(dra7xx_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_dra7,
};
static int dss_init_features(struct platform_device *pdev)
@@ -1143,18 +1240,18 @@ static int dss_bind(struct device *dev)
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
- dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38e6ab50142d..4fd06dc41cb3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
+enum dss_clk_source {
+ DSS_CLK_SRC_FCK = 0,
+
+ DSS_CLK_SRC_PLL1_1,
+ DSS_CLK_SRC_PLL1_2,
+ DSS_CLK_SRC_PLL1_3,
+
+ DSS_CLK_SRC_PLL2_1,
+ DSS_CLK_SRC_PLL2_2,
+ DSS_CLK_SRC_PLL2_3,
+
+ DSS_CLK_SRC_HDMI_PLL,
+};
+
enum dss_pll_id {
DSS_PLL_DSI1,
DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
#define DSS_PLL_MAX_HSDIVS 4
+enum dss_pll_type {
+ DSS_PLL_TYPE_A,
+ DSS_PLL_TYPE_B,
+};
+
/*
* Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
* Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
};
struct dss_pll_hw {
+ enum dss_pll_type type;
+
unsigned n_max;
unsigned m_min;
unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
/* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
#endif
void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
- enum omap_channel channel);
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
void dss_select_dsi_clk_source(int dsi_module,
- enum omap_dss_clk_source clk_src);
+ enum dss_clk_source clk_src);
void dss_select_lcd_clk_source(enum omap_channel channel,
- enum omap_dss_clk_source clk_src);
-enum omap_dss_clk_source dss_get_dispc_clk_source(void);
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module);
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
+ enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(void);
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
int dss_pll_register(struct dss_pll *pll);
void dss_pll_unregister(struct dss_pll *pll);
struct dss_pll *dss_pll_find(const char *name);
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
int dss_pll_enable(struct dss_pll *pll);
void dss_pll_disable(struct dss_pll *pll);
int dss_pll_set_config(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data);
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data);
+
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
+
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_write_config_type_b(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index c886a2927f73..ee5b93ce2763 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -23,8 +23,7 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -50,7 +49,6 @@ struct omap_dss_features {
const enum omap_dss_output_id *supported_outputs;
const enum omap_color_mode *supported_color_modes;
const enum omap_overlay_caps *overlay_caps;
- const char * const *clksrc_names;
const struct dss_param_range *dss_params;
const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
};
-static const char * const omap2_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
-};
-
-static const char * const omap3_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
-};
-
-static const char * const omap4_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
-};
-
-static const char * const omap5_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
-};
-
static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
[FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_outputs = omap2_dss_supported_outputs,
.supported_color_modes = omap2_dss_supported_color_modes,
.overlay_caps = omap2_dss_overlay_caps,
- .clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_outputs = omap3430_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
.supported_outputs = omap3430_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
.supported_outputs = am43xx_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap2_dss_clk_source_names,
.dss_params = am43xx_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA,
.buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_outputs = omap3630_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3630_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
.supported_outputs = omap5_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap5_dss_clk_source_names,
.dss_params = omap5_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
color_mode;
}
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
-{
- return omap_current_dss_features->clksrc_names[id];
-}
-
u32 dss_feat_get_buffer_size_unit(void)
{
return omap_current_dss_features->buffer_size_unit;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 3d67d39f192f..bb4b7f0e642b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
u32 dss_feat_get_burst_size_unit(void); /* in bytes */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 53616b02b613..63e711545865 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -23,8 +23,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "dss.h"
/* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
void __iomem *base;
+ struct platform_device *pdev;
struct hdmi_wp_data *wp;
};
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
- unsigned long target_tmds, struct dss_pll_clock_info *pi);
int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
struct hdmi_wp_data *wp);
void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 4d46cdf7a037..cbd28dfdb86a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -34,9 +34,9 @@
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "hdmi4_core.h"
#include "dss.h"
#include "dss_features.h"
@@ -177,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (p->double_pixel)
pc *= 2;
- hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+ /* DSS_HDMI_TCLK is bitclk / 10 */
+ pc *= 10;
+
+ dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ pc, &hdmi_cinfo);
r = dss_pll_enable(&hdmi.pll.pll);
if (r) {
@@ -204,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
/* tv size */
dss_mgr_set_timings(channel, p);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9255c0e1e4a7..0c0a5139a301 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -39,9 +39,9 @@
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
#include "dss_features.h"
@@ -189,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (p->double_pixel)
pc *= 2;
- hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+ /* DSS_HDMI_TCLK is bitclk / 10 */
+ pc *= 10;
+
+ dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ pc, &hdmi_cinfo);
/* disable and clear irqs */
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -221,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
/* tv size */
dss_mgr_set_timings(channel, p);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 1b8fcc6c4ba1..4dfb67fe5f6d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -4,8 +4,8 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "hdmi.h"
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index f98b750fc499..3ead47cccac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -14,8 +14,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index f1015e8b8267..b8bf6a9e5557 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -17,9 +17,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
@@ -39,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
DUMPPLL(PLLCTRL_CFG4);
}
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
- unsigned long target_tmds, struct dss_pll_clock_info *pi)
-{
- unsigned long fint, clkdco, clkout;
- unsigned long target_bitclk, target_clkdco;
- unsigned long min_dco;
- unsigned n, m, mf, m2, sd;
- unsigned long clkin;
- const struct dss_pll_hw *hw = pll->pll.hw;
-
- clkin = clk_get_rate(pll->pll.clkin);
-
- DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
-
- target_bitclk = target_tmds * 10;
-
- /* Fint */
- n = DIV_ROUND_UP(clkin, hw->fint_max);
- fint = clkin / n;
-
- /* adjust m2 so that the clkdco will be high enough */
- min_dco = roundup(hw->clkdco_min, fint);
- m2 = DIV_ROUND_UP(min_dco, target_bitclk);
- if (m2 == 0)
- m2 = 1;
-
- target_clkdco = target_bitclk * m2;
- m = target_clkdco / fint;
-
- clkdco = fint * m;
-
- /* adjust clkdco with fractional mf */
- if (WARN_ON(target_clkdco - clkdco > fint))
- mf = 0;
- else
- mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
-
- if (mf > 0)
- clkdco += (u32)div_u64((u64)mf * fint, 262144);
-
- clkout = clkdco / m2;
-
- /* sigma-delta */
- sd = DIV_ROUND_UP(fint * m, 250000000);
-
- DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
- n, m, mf, m2, sd);
- DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
-
- pi->n = n;
- pi->m = m;
- pi->mf = mf;
- pi->mX[0] = m2;
- pi->sd = sd;
-
- pi->fint = fint;
- pi->clkdco = clkdco;
- pi->clkout[0] = clkout;
-}
-
static int hdmi_pll_enable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
- u16 r = 0;
+ int r;
+
+ r = pm_runtime_get_sync(&pll->pdev->dev);
+ WARN_ON(r < 0);
dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
@@ -118,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
+ int r;
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
+
+ r = pm_runtime_put_sync(&pll->pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct dss_pll_ops dsi_pll_ops = {
@@ -131,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
};
static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
+ .type = DSS_PLL_TYPE_B,
+
.n_max = 255,
.m_min = 20,
.m_max = 4095,
@@ -154,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
};
static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
+ .type = DSS_PLL_TYPE_B,
+
.n_max = 255,
.m_min = 20,
.m_max = 2045,
@@ -225,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
int r;
struct resource *res;
+ pll->pdev = pdev;
pll->wp = wp;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 055f62fca5dc..203694a52d18 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -15,8 +15,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index d7e7c909bbc2..6eaf1adbd606 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -18,7 +18,872 @@
#ifndef __OMAP_DRM_DSS_H
#define __OMAP_DRM_DSS_H
-#include <video/omapdss.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <video/videomode.h>
+#include <linux/platform_data/omapdss.h>
+#include <uapi/drm/drm_mode.h>
+
+#define DISPC_IRQ_FRAMEDONE (1 << 0)
+#define DISPC_IRQ_VSYNC (1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
+#define DISPC_IRQ_GFX_END_WIN (1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
+#define DISPC_IRQ_OCP_ERR (1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
+#define DISPC_IRQ_VID1_END_WIN (1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
+#define DISPC_IRQ_VID2_END_WIN (1 << 13)
+#define DISPC_IRQ_SYNC_LOST (1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
+#define DISPC_IRQ_WAKEUP (1 << 16)
+#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
+#define DISPC_IRQ_VSYNC2 (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
+#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
+#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
+#define DISPC_IRQ_FRAMEDONETV (1 << 24)
+#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
+#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
+#define DISPC_IRQ_VSYNC3 (1 << 28)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
+#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+struct dss_lcd_mgr_config;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
+struct hdmi_avi_infoframe;
+
+enum omap_display_type {
+ OMAP_DISPLAY_TYPE_NONE = 0,
+ OMAP_DISPLAY_TYPE_DPI = 1 << 0,
+ OMAP_DISPLAY_TYPE_DBI = 1 << 1,
+ OMAP_DISPLAY_TYPE_SDI = 1 << 2,
+ OMAP_DISPLAY_TYPE_DSI = 1 << 3,
+ OMAP_DISPLAY_TYPE_VENC = 1 << 4,
+ OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
+ OMAP_DISPLAY_TYPE_DVI = 1 << 6,
+};
+
+enum omap_plane {
+ OMAP_DSS_GFX = 0,
+ OMAP_DSS_VIDEO1 = 1,
+ OMAP_DSS_VIDEO2 = 2,
+ OMAP_DSS_VIDEO3 = 3,
+ OMAP_DSS_WB = 4,
+};
+
+enum omap_channel {
+ OMAP_DSS_CHANNEL_LCD = 0,
+ OMAP_DSS_CHANNEL_DIGIT = 1,
+ OMAP_DSS_CHANNEL_LCD2 = 2,
+ OMAP_DSS_CHANNEL_LCD3 = 3,
+ OMAP_DSS_CHANNEL_WB = 4,
+};
+
+enum omap_color_mode {
+ OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
+ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
+ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
+ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
+ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
+ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
+ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
+ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
+ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
+ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
+ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
+ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
+ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
+ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
+ OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
+ OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
+ OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
+ OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
+ OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
+};
+
+enum omap_dss_load_mode {
+ OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
+ OMAP_DSS_LOAD_CLUT_ONLY = 1,
+ OMAP_DSS_LOAD_FRAME_ONLY = 2,
+ OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
+};
+
+enum omap_dss_trans_key_type {
+ OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+ OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+ OMAP_DSS_RFBI_TE_MODE_1 = 1,
+ OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_dss_signal_level {
+ OMAPDSS_SIG_ACTIVE_LOW,
+ OMAPDSS_SIG_ACTIVE_HIGH,
+};
+
+enum omap_dss_signal_edge {
+ OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
+};
+
+enum omap_dss_venc_type {
+ OMAP_DSS_VENC_TYPE_COMPOSITE,
+ OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_dss_dsi_pixel_format {
+ OMAP_DSS_DSI_FMT_RGB888,
+ OMAP_DSS_DSI_FMT_RGB666,
+ OMAP_DSS_DSI_FMT_RGB666_PACKED,
+ OMAP_DSS_DSI_FMT_RGB565,
+};
+
+enum omap_dss_dsi_mode {
+ OMAP_DSS_DSI_CMD_MODE = 0,
+ OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_display_caps {
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
+};
+
+enum omap_dss_display_state {
+ OMAP_DSS_DISPLAY_DISABLED = 0,
+ OMAP_DSS_DISPLAY_ACTIVE,
+};
+
+enum omap_dss_rotation_type {
+ OMAP_DSS_ROT_DMA = 1 << 0,
+ OMAP_DSS_ROT_VRFB = 1 << 1,
+ OMAP_DSS_ROT_TILER = 1 << 2,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+ OMAP_DSS_ROT_0 = 0,
+ OMAP_DSS_ROT_90 = 1,
+ OMAP_DSS_ROT_180 = 2,
+ OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+ OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
+ OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
+ OMAP_DSS_OVL_CAP_POS = 1 << 4,
+ OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
+};
+
+enum omap_overlay_manager_caps {
+ OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
+};
+
+enum omap_dss_clk_source {
+ OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
+};
+
+enum omap_hdmi_flags {
+ OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
+};
+
+enum omap_dss_output_id {
+ OMAP_DSS_OUTPUT_DPI = 1 << 0,
+ OMAP_DSS_OUTPUT_DBI = 1 << 1,
+ OMAP_DSS_OUTPUT_SDI = 1 << 2,
+ OMAP_DSS_OUTPUT_DSI1 = 1 << 3,
+ OMAP_DSS_OUTPUT_DSI2 = 1 << 4,
+ OMAP_DSS_OUTPUT_VENC = 1 << 5,
+ OMAP_DSS_OUTPUT_HDMI = 1 << 6,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+ int cs_on_time;
+ int cs_off_time;
+ int we_on_time;
+ int we_off_time;
+ int re_on_time;
+ int re_off_time;
+ int we_cycle_time;
+ int re_cycle_time;
+ int cs_pulse_width;
+ int access_time;
+
+ int clk_div;
+
+ u32 tim[5]; /* set by rfbi_convert_timings() */
+
+ int converted;
+};
+
+/* DSI */
+
+enum omap_dss_dsi_trans_mode {
+ /* Sync Pulses: both sync start and end packets sent */
+ OMAP_DSS_DSI_PULSE_MODE,
+ /* Sync Events: only sync start packets sent */
+ OMAP_DSS_DSI_EVENT_MODE,
+ /* Burst: only sync start packets sent, pixels are time compressed */
+ OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+ unsigned long hsclk;
+
+ unsigned ndl;
+ unsigned bitspp;
+
+ /* pixels */
+ u16 hact;
+ /* lines */
+ u16 vact;
+
+ /* DSI video mode blanking data */
+ /* Unit: byte clock cycles */
+ u16 hss;
+ u16 hsa;
+ u16 hse;
+ u16 hfp;
+ u16 hbp;
+ /* Unit: line clocks */
+ u16 vsa;
+ u16 vfp;
+ u16 vbp;
+
+ /* DSI blanking modes */
+ int blanking_mode;
+ int hsa_blanking_mode;
+ int hbp_blanking_mode;
+ int hfp_blanking_mode;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+
+ bool ddr_clk_always_on;
+ int window_sync;
+};
+
+struct omap_dss_dsi_config {
+ enum omap_dss_dsi_mode mode;
+ enum omap_dss_dsi_pixel_format pixel_format;
+ const struct omap_video_timings *timings;
+
+ unsigned long hs_clk_min, hs_clk_max;
+ unsigned long lp_clk_min, lp_clk_max;
+
+ bool ddr_clk_always_on;
+ enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+struct omap_video_timings {
+ /* Unit: pixels */
+ u16 x_res;
+ /* Unit: pixels */
+ u16 y_res;
+ /* Unit: Hz */
+ u32 pixelclock;
+ /* Unit: pixel clocks */
+ u16 hsw; /* Horizontal synchronization pulse width */
+ /* Unit: pixel clocks */
+ u16 hfp; /* Horizontal front porch */
+ /* Unit: pixel clocks */
+ u16 hbp; /* Horizontal back porch */
+ /* Unit: line clocks */
+ u16 vsw; /* Vertical synchronization pulse width */
+ /* Unit: line clocks */
+ u16 vfp; /* Vertical front porch */
+ /* Unit: line clocks */
+ u16 vbp; /* Vertical back porch */
+
+ /* Vsync logic level */
+ enum omap_dss_signal_level vsync_level;
+ /* Hsync logic level */
+ enum omap_dss_signal_level hsync_level;
+ /* Interlaced or Progressive timings */
+ bool interlace;
+ /* Pixel clock edge to drive LCD data */
+ enum omap_dss_signal_edge data_pclk_edge;
+ /* Data enable logic level */
+ enum omap_dss_signal_level de_level;
+ /* Pixel clock edges to drive HSYNC and VSYNC signals */
+ enum omap_dss_signal_edge sync_pclk_edge;
+
+ bool double_pixel;
+};
+
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+extern const struct omap_video_timings omap_dss_pal_timings;
+extern const struct omap_video_timings omap_dss_ntsc_timings;
+
+struct omap_dss_cpr_coefs {
+ s16 rr, rg, rb;
+ s16 gr, gg, gb;
+ s16 br, bg, bb;
+};
+
+struct omap_overlay_info {
+ dma_addr_t paddr;
+ dma_addr_t p_uv_addr; /* for NV12 format */
+ u16 screen_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+
+ u16 pos_x;
+ u16 pos_y;
+ u16 out_width; /* if 0, out_width == width */
+ u16 out_height; /* if 0, out_height == height */
+ u8 global_alpha;
+ u8 pre_mult_alpha;
+ u8 zorder;
+};
+
+struct omap_overlay {
+ struct kobject kobj;
+ struct list_head list;
+
+ /* static fields */
+ const char *name;
+ enum omap_plane id;
+ enum omap_color_mode supported_modes;
+ enum omap_overlay_caps caps;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+
+ /*
+ * The following functions do not block:
+ *
+ * is_enabled
+ * set_overlay_info
+ * get_overlay_info
+ *
+ * The rest of the functions may block and cannot be called from
+ * interrupt context
+ */
+
+ int (*enable)(struct omap_overlay *ovl);
+ int (*disable)(struct omap_overlay *ovl);
+ bool (*is_enabled)(struct omap_overlay *ovl);
+
+ int (*set_manager)(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr);
+ int (*unset_manager)(struct omap_overlay *ovl);
+
+ int (*set_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+ void (*get_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+
+ int (*wait_for_go)(struct omap_overlay *ovl);
+
+ struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+ u32 default_color;
+
+ enum omap_dss_trans_key_type trans_key_type;
+ u32 trans_key;
+ bool trans_enabled;
+
+ bool partial_alpha_enabled;
+
+ bool cpr_enable;
+ struct omap_dss_cpr_coefs cpr_coefs;
+};
+
+struct omap_overlay_manager {
+ struct kobject kobj;
+
+ /* static fields */
+ const char *name;
+ enum omap_channel id;
+ enum omap_overlay_manager_caps caps;
+ struct list_head overlays;
+ enum omap_display_type supported_displays;
+ enum omap_dss_output_id supported_outputs;
+
+ /* dynamic fields */
+ struct omap_dss_device *output;
+
+ /*
+ * The following functions do not block:
+ *
+ * set_manager_info
+ * get_manager_info
+ * apply
+ *
+ * The rest of the functions may block and cannot be called from
+ * interrupt context
+ */
+
+ int (*set_output)(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *output);
+ int (*unset_output)(struct omap_overlay_manager *mgr);
+
+ int (*set_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+ void (*get_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+
+ int (*apply)(struct omap_overlay_manager *mgr);
+ int (*wait_for_go)(struct omap_overlay_manager *mgr);
+ int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+
+ struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
+};
+
+/* 22 pins means 1 clk lane and 10 data lanes */
+#define OMAP_DSS_MAX_DSI_PINS 22
+
+struct omap_dsi_pin_config {
+ int num_pins;
+ /*
+ * pin numbers in the following order:
+ * clk+, clk-
+ * data1+, data1-
+ * data2+, data2-
+ * ...
+ */
+ int pins[OMAP_DSS_MAX_DSI_PINS];
+};
+
+struct omap_dss_writeback_info {
+ u32 paddr;
+ u32 p_uv_addr;
+ u16 buf_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+ u8 pre_mult_alpha;
+};
+
+struct omapdss_dpi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
+};
+
+struct omapdss_sdi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
+};
+
+struct omapdss_dvi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+};
+
+struct omapdss_atv_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_type)(struct omap_dss_device *dssdev,
+ enum omap_dss_venc_type type);
+ void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
+ bool invert_polarity);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_hdmi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+ int (*set_infoframe)(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi);
+};
+
+struct omapdss_dsi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
+ bool enter_ulps);
+
+ /* bus configuration */
+ int (*set_config)(struct omap_dss_device *dssdev,
+ const struct omap_dss_dsi_config *cfg);
+ int (*configure_pins)(struct omap_dss_device *dssdev,
+ const struct omap_dsi_pin_config *pin_cfg);
+
+ void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
+ bool enable);
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*update)(struct omap_dss_device *dssdev, int channel,
+ void (*callback)(int, void *), void *data);
+
+ void (*bus_lock)(struct omap_dss_device *dssdev);
+ void (*bus_unlock)(struct omap_dss_device *dssdev);
+
+ int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
+ void (*disable_video_output)(struct omap_dss_device *dssdev,
+ int channel);
+
+ int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
+ int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
+ int vc_id);
+ void (*release_vc)(struct omap_dss_device *dssdev, int channel);
+
+ /* data transfer */
+ int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+ u8 *data, int len);
+
+ int (*gen_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_read)(struct omap_dss_device *dssdev, int channel,
+ u8 *reqdata, int reqlen,
+ u8 *data, int len);
+
+ int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
+
+ int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
+ int channel, u16 plen);
+};
+
+struct omap_dss_device {
+ struct kobject kobj;
+ struct device *dev;
+
+ struct module *owner;
+
+ struct list_head panel_list;
+
+ /* alias in the form of "display%d" */
+ char alias[16];
+
+ enum omap_display_type type;
+ enum omap_display_type output_type;
+
+ union {
+ struct {
+ u8 data_lines;
+ } dpi;
+
+ struct {
+ u8 channel;
+ u8 data_lines;
+ } rfbi;
+
+ struct {
+ u8 datapairs;
+ } sdi;
+
+ struct {
+ int module;
+ } dsi;
+
+ struct {
+ enum omap_dss_venc_type type;
+ bool invert_polarity;
+ } venc;
+ } phy;
+
+ struct {
+ struct omap_video_timings timings;
+
+ enum omap_dss_dsi_pixel_format dsi_pix_fmt;
+ enum omap_dss_dsi_mode dsi_mode;
+ } panel;
+
+ struct {
+ u8 pixel_size;
+ struct rfbi_timings rfbi_timings;
+ } ctrl;
+
+ const char *name;
+
+ /* used to match device to driver */
+ const char *driver_name;
+
+ void *data;
+
+ struct omap_dss_driver *driver;
+
+ union {
+ const struct omapdss_dpi_ops *dpi;
+ const struct omapdss_sdi_ops *sdi;
+ const struct omapdss_dvi_ops *dvi;
+ const struct omapdss_hdmi_ops *hdmi;
+ const struct omapdss_atv_ops *atv;
+ const struct omapdss_dsi_ops *dsi;
+ } ops;
+
+ /* helper variable for driver suspend/resume */
+ bool activate_after_resume;
+
+ enum omap_display_caps caps;
+
+ struct omap_dss_device *src;
+
+ enum omap_dss_display_state state;
+
+ /* OMAP DSS output specific fields */
+
+ struct list_head list;
+
+ /* DISPC channel for this output */
+ enum omap_channel dispc_channel;
+ bool dispc_channel_connected;
+
+ /* output instance */
+ enum omap_dss_output_id id;
+
+ /* the port number in the DT node */
+ int port_num;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+
+ struct omap_dss_device *dst;
+};
+
+struct omap_dss_driver {
+ int (*probe)(struct omap_dss_device *);
+ void (*remove)(struct omap_dss_device *);
+
+ int (*connect)(struct omap_dss_device *dssdev);
+ void (*disconnect)(struct omap_dss_device *dssdev);
+
+ int (*enable)(struct omap_dss_device *display);
+ void (*disable)(struct omap_dss_device *display);
+ int (*run_test)(struct omap_dss_device *display, int test);
+
+ int (*update)(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h);
+ int (*sync)(struct omap_dss_device *dssdev);
+
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+ int (*get_te)(struct omap_dss_device *dssdev);
+
+ u8 (*get_rotate)(struct omap_dss_device *dssdev);
+ int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+ bool (*get_mirror)(struct omap_dss_device *dssdev);
+ int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*memory_read)(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h);
+
+ void (*get_resolution)(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres);
+ void (*get_dimensions)(struct omap_dss_device *dssdev,
+ u32 *width, u32 *height);
+ int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+ int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi);
+};
+
+enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omapdss_register_display(struct omap_dss_device *dssdev);
+void omapdss_unregister_display(struct omap_dss_device *dssdev);
+
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+ int (*match)(struct omap_dss_device *dssdev, void *data));
+const char *omapdss_get_default_display_name(void);
+
+void videomode_to_omap_video_timings(const struct videomode *vm,
+ struct omap_video_timings *ovt);
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+ struct videomode *vm);
+
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+int omapdss_register_output(struct omap_dss_device *output);
+void omapdss_unregister_output(struct omap_dss_device *output);
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
+struct omap_dss_device *omap_dss_find_output(const char *name);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
+int omapdss_output_set_device(struct omap_dss_device *out,
+ struct omap_dss_device *dssdev);
+int omapdss_output_unset_device(struct omap_dss_device *out);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
+
+void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres);
+int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omapdss_compat_init(void);
+void omapdss_compat_uninit(void);
+
+static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->src;
+}
+
+static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
+{
+ return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+}
+
+struct device_node *
+omapdss_of_get_next_port(const struct device_node *parent,
+ struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_first_endpoint(const struct device_node *parent);
+
+struct omap_dss_device *
+omapdss_of_find_source_for_first_ep(struct device_node *node);
u32 dispc_read_irqstatus(void);
void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
const struct omap_video_timings *timings);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
+u32 dispc_mgr_gamma_size(enum omap_channel channel);
+void dispc_mgr_set_gamma(enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
bool dispc_ovl_enabled(enum omap_plane plane);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 829232ad8c81..24f859488201 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,8 +21,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
static LIST_HEAD(output_list);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index f974ddcd3b6e..0a76c89cdc2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -22,8 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#define PLL_CONTROL 0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
return NULL;
}
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
+{
+ struct dss_pll *pll;
+
+ switch (src) {
+ default:
+ case DSS_CLK_SRC_FCK:
+ return NULL;
+
+ case DSS_CLK_SRC_HDMI_PLL:
+ return dss_pll_find("hdmi");
+
+ case DSS_CLK_SRC_PLL1_1:
+ case DSS_CLK_SRC_PLL1_2:
+ case DSS_CLK_SRC_PLL1_3:
+ pll = dss_pll_find("dsi0");
+ if (!pll)
+ pll = dss_pll_find("video0");
+ return pll;
+
+ case DSS_CLK_SRC_PLL2_1:
+ case DSS_CLK_SRC_PLL2_2:
+ case DSS_CLK_SRC_PLL2_3:
+ pll = dss_pll_find("dsi1");
+ if (!pll)
+ pll = dss_pll_find("video1");
+ return pll;
+ }
+}
+
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
+{
+ switch (src) {
+ case DSS_CLK_SRC_HDMI_PLL:
+ return 0;
+
+ case DSS_CLK_SRC_PLL1_1:
+ case DSS_CLK_SRC_PLL2_1:
+ return 0;
+
+ case DSS_CLK_SRC_PLL1_2:
+ case DSS_CLK_SRC_PLL2_2:
+ return 1;
+
+ case DSS_CLK_SRC_PLL1_3:
+ case DSS_CLK_SRC_PLL2_3:
+ return 2;
+
+ default:
+ return 0;
+ }
+}
+
int dss_pll_enable(struct dss_pll *pll)
{
int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
return 0;
}
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data)
{
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
return false;
}
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+/*
+ * clkdco = clkin / n * m * 2
+ * clkoutX = clkdco / mX
+ */
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data)
{
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
return false;
}
+/*
+ * This calculates a PLL config that will provide the target_clkout rate
+ * for clkout. Additionally clkdco rate will be the same as clkout rate
+ * when clkout rate is >= min_clkdco.
+ *
+ * clkdco = clkin / n * m + clkin / n * mf / 262144
+ * clkout = clkdco / m2
+ */
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
+{
+ unsigned long fint, clkdco, clkout;
+ unsigned long target_clkdco;
+ unsigned long min_dco;
+ unsigned n, m, mf, m2, sd;
+ const struct dss_pll_hw *hw = pll->hw;
+
+ DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
+
+ /* Fint */
+ n = DIV_ROUND_UP(clkin, hw->fint_max);
+ fint = clkin / n;
+
+ /* adjust m2 so that the clkdco will be high enough */
+ min_dco = roundup(hw->clkdco_min, fint);
+ m2 = DIV_ROUND_UP(min_dco, target_clkout);
+ if (m2 == 0)
+ m2 = 1;
+
+ target_clkdco = target_clkout * m2;
+ m = target_clkdco / fint;
+
+ clkdco = fint * m;
+
+ /* adjust clkdco with fractional mf */
+ if (WARN_ON(target_clkdco - clkdco > fint))
+ mf = 0;
+ else
+ mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
+
+ if (mf > 0)
+ clkdco += (u32)div_u64((u64)mf * fint, 262144);
+
+ clkout = clkdco / m2;
+
+ /* sigma-delta */
+ sd = DIV_ROUND_UP(fint * m, 250000000);
+
+ DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
+ n, m, mf, m2, sd);
+ DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
+
+ cinfo->n = n;
+ cinfo->m = m;
+ cinfo->mf = mf;
+ cinfo->mX[0] = m2;
+ cinfo->sd = sd;
+
+ cinfo->fint = fint;
+ cinfo->clkdco = clkdco;
+ cinfo->clkout[0] = clkout;
+
+ return true;
+}
+
static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
{
unsigned long timeout;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index 3796576dfadf..cd53566d75eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -38,7 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
struct rfbi_reg { u16 idx; };
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index cd6d3bfb041d..0a96c321ce62 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,7 +29,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
static struct {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08a2cc778ba9..6eedf2118708 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,8 +37,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b1ec59e42940..7429de928d4e 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -17,8 +17,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
};
static const struct dss_pll_hw dss_dra7_video_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.mX_lsb[0] = 21,
.mX_msb[1] = 30,
.mX_lsb[1] = 26,
+ .mX_msb[2] = 4,
+ .mX_lsb[2] = 0,
+ .mX_msb[3] = 9,
+ .mX_lsb[3] = 5,
.has_refsel = true,
};
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index ce2d67b6a8c7..137fe690a0da 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,7 +32,6 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
- struct drm_encoder *encoder;
bool hdmi_mode;
};
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
return ret;
}
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- return omap_connector->encoder;
-}
-
static const struct drm_connector_funcs omap_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.get_modes = omap_connector_get_modes,
.mode_valid = omap_connector_mode_valid,
- .best_encoder = omap_connector_attached_encoder,
};
/* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
goto fail;
omap_connector->dssdev = dssdev;
- omap_connector->encoder = encoder;
connector = &omap_connector->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 075f2bb44867..180f644e861e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
}
+static int omap_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (state->color_mgmt_changed && state->gamma_lut) {
+ uint length = state->gamma_lut->length /
+ sizeof(struct drm_color_lut);
+
+ if (length < 2)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(omap_crtc->vblank_irq.registered);
+ if (crtc->state->color_mgmt_changed) {
+ struct drm_color_lut *lut = NULL;
+ uint length = 0;
+
+ if (crtc->state->gamma_lut) {
+ lut = (struct drm_color_lut *)
+ crtc->state->gamma_lut->data;
+ length = crtc->state->gamma_lut->length /
+ sizeof(*lut);
+ }
+ dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+ }
+
+ if (crtc->state->color_mgmt_changed) {
+ struct drm_color_lut *lut = NULL;
+ uint length = 0;
+
+ if (crtc->state->gamma_lut) {
+ lut = (struct drm_color_lut *)
+ crtc->state->gamma_lut->data;
+ length = crtc->state->gamma_lut->length /
+ sizeof(*lut);
+ }
+ dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+ }
+
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_property = drm_atomic_helper_crtc_set_property,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_nofb = omap_crtc_mode_set_nofb,
.disable = omap_crtc_disable,
.enable = omap_crtc_enable,
+ .atomic_check = omap_crtc_atomic_check,
.atomic_begin = omap_crtc_atomic_begin,
.atomic_flush = omap_crtc_atomic_flush,
};
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+ /* The dispc API adapts to what ever size, but the HW supports
+ * 256 element gamma table for LCDs and 1024 element table for
+ * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
+ * tables so lets use that. Size of HW gamma table can be
+ * extracted with dispc_mgr_gamma_size(). If it returns 0
+ * gamma table is not supprted.
+ */
+ if (dispc_mgr_gamma_size(channel)) {
+ uint gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+ }
+
omap_plane_install_properties(crtc->primary, &crtc->base);
omap_crtcs[channel] = omap_crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d86f5479345b..26c6134eb744 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_atomic_state_commit *commit;
- unsigned int i;
- int ret;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
spin_unlock(&priv->commit.lock);
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DVI:
return DRM_MODE_CONNECTOR_DVID;
+ case OMAP_DISPLAY_TYPE_DSI:
+ return DRM_MODE_CONNECTOR_DSI;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
@@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = {
.unload = dev_unload,
.open = dev_open,
.lastclose = dev_lastclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = omap_irq_enable_vblank,
.disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3f823c368912..dcc30a98b9d4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -24,7 +24,6 @@
#include <linux/platform_data/omap_drm.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include <video/omapdss.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *paddr);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-static inline int align_pitch(int pitch, int width, int bpp)
-{
- int bytespp = (bpp + 7) / 8;
- /* in case someone tries to feed us a completely bogus stride: */
- pitch = max(pitch, width * bytespp);
- /* PVR needs alignment to 8 pixels.. right now that is the most
- * restrictive stride requirement..
- */
- return roundup(pitch, 8 * bytespp);
-}
-
/* map crtc to vblank mask */
uint32_t pipe2vbl(struct drm_crtc *crtc);
struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f84570d1636c..31f5178c22c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -115,24 +115,16 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- if (plane->bo)
- drm_gem_object_unreference_unlocked(plane->bo);
+
+ drm_gem_object_unreference_unlocked(plane->bo);
}
kfree(omap_fb);
}
-static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = omap_framebuffer_create_handle,
.destroy = omap_framebuffer_destroy,
- .dirty = omap_framebuffer_dirty,
};
static uint32_t get_linear_addr(struct plane *plane,
@@ -320,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
mutex_unlock(&omap_fb->lock);
}
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- if (p >= drm_format_num_planes(fb->pixel_format))
- return NULL;
- return omap_fb->planes[p].bo;
-}
-
/* iterate thru all the connectors, returning ones that are attached
* to the same fb..
*/
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 89da41ac64d2..adb10fbe918d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = align_pitch(
- mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
- mode_cmd.width, sizes->surface_bpp);
+ mode_cmd.pitches[0] =
+ DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
@@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
if (ret)
goto fini;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 0dbd0f03f9bd..505dee0db973 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -383,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
return size;
}
-/* get tiled size, returns -EINVAL if not tiled buffer */
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (omap_obj->flags & OMAP_BO_TILED) {
- *w = omap_obj->width;
- *h = omap_obj->height;
- return 0;
- }
- return -EINVAL;
-}
-
/* -----------------------------------------------------------------------------
* Fault Handling
*/
@@ -661,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{
union omap_gem_size gsize;
- args->pitch = align_pitch(0, args->width, args->bpp);
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3a7bdf1c842b..85143d1b9b31 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel)
if (p->backlight) {
p->backlight->props.power = FB_BLANK_POWERDOWN;
+ p->backlight->props.state |= BL_CORE_FBBLANK;
backlight_update_status(p->backlight);
}
@@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel)
msleep(p->desc->delay.enable);
if (p->backlight) {
+ p->backlight->props.state &= ~BL_CORE_FBBLANK;
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
}
@@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = {
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 154,
+ .height = 90,
},
};
@@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
static const struct drm_display_mode lg_lp120up1_mode = {
.clock = 162300,
.hdisplay = 1920,
@@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = {
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 223,
+ .height = 125,
},
};
@@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct display_timing sharp_lq101k1ly04_timing = {
+ .pixelclock = { 60000000, 65000000, 80000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 20, 20, 20 },
+ .hback_porch = { 20, 20, 20 },
+ .hsync_len = { 10, 10, 10 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 4, 4, 4 },
+ .vsync_len = { 4, 4, 4 },
+ .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq101k1ly04 = {
+ .timings = &sharp_lq101k1ly04_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct drm_display_mode sharp_lq123p1jx31_mode = {
+ .clock = 252750,
+ .hdisplay = 2400,
+ .hsync_start = 2400 + 48,
+ .hsync_end = 2400 + 48 + 32,
+ .htotal = 2400 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 3,
+ .vsync_end = 1600 + 3 + 10,
+ .vtotal = 1600 + 3 + 10 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .modes = &sharp_lq123p1jx31_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -1457,6 +1596,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
+ }, {
.compatible = "lg,lp120up1",
.data = &lg_lp120up1,
}, {
@@ -1481,15 +1626,27 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,lq101k1ly04",
+ .data = &sharp_lq101k1ly04,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
@@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
-
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 38c2bb72e456..da45b11b66b8 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,12 +1,7 @@
config DRM_QXL
tristate "QXL virtual GPU"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select CRC32
help
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index b5d4b41361bd..04270f5d110c 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
- queue_work(qdev->gc_queue, &qdev->gc_work);
+ schedule_work(&qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385892..3aef12742a53 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
&norect, one_clip_rect, inc);
- drm_vblank_get(dev, qcrtc->index);
+ drm_crtc_vblank_get(crtc);
if (event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, qcrtc->index, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- drm_vblank_put(dev, qcrtc->index);
+ drm_crtc_vblank_put(crtc);
ret = qxl_bo_reserve(bo, false);
if (!ret) {
@@ -469,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
- if (qxl_fb->obj)
- drm_gem_object_unreference_unlocked(qxl_fb->obj);
+ drm_gem_object_unreference_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
}
@@ -730,7 +728,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
qxl_crtc->index = crtc_id;
- drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 56e1d633875e..ffe885395145 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
- struct qxl_drawable *drawable,
unsigned num_clips,
struct qxl_bo *clips_bo)
{
@@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
+ if (ret)
+ return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects)
goto out_release_backoff;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe50ba..460bbceae297 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object = qxl_gem_object_free,
+ .gem_free_object_unlocked = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3ad6604b34ce..8e633caa4078 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -321,7 +321,6 @@ struct qxl_device {
struct qxl_bo *current_release_bo[3];
int current_release_bo_offset[3];
- struct workqueue_struct *gc_queue;
struct work_struct gc_work;
struct drm_property *hotplug_mode_update_property;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f6320b8..df2657051afd 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- int bpp;
- int depth;
-
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 2319800b7add..e642242728c0 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
(unsigned long)qdev->surfaceram_size);
- qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
@@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_bo_unref(&qdev->current_release_bo[0]);
if (qdev->current_release_bo[1])
qxl_bo_unref(&qdev->current_release_bo[1]);
- flush_workqueue(qdev->gc_queue);
- destroy_workqueue(qdev->gc_queue);
- qdev->gc_queue = NULL;
-
+ flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
@@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
struct qxl_device *qdev;
int r;
- /* require kms */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
if (qdev == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %d "
+ FENCE_WARN(fence, "failed to wait on release %llu "
"after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0738d74c8d04..d50c9679e631 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 259cd6e6d71c..a97abc8af657 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 587cae4e73c9..56bb758f4e33 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 35e0fc3ae8a7..7ba450832e6b 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
if (i >= sclk_table->count) {
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else {
- /* XXX check display min clock requirements */
+ /* XXX The current code always reprogrammed the sclk levels,
+ * but we don't currently handle disp sclk requirements
+ * so just skip it.
+ */
if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ba192a35c607..0c1b9ff433af 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
@@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
@@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
int new_fw = 0;
int err;
int num_fw;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ if ((rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x665f))
+ new_smc = true;
new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ if (rdev->pdev->revision == 0x80)
+ new_smc = true;
new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
@@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev)
}
}
rdev->rlc.cs_data = ci_cs_data;
- rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 0d3f744de35a..d960d3915408 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_PFP_SYNC_ME:
+ if (pkt->count) {
+ DRM_ERROR("bad PFP_SYNC_ME\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
case PACKET3_MPEG_INDEX:
case PACKET3_WAIT_REG_MEM:
case PACKET3_MEM_WRITE:
+ case PACKET3_PFP_SYNC_ME:
case PACKET3_SURFACE_SYNC:
case PACKET3_EVENT_WRITE:
case PACKET3_EVENT_WRITE_EOP:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0b174e14e9a6..c8e3d394cde7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1624,6 +1624,7 @@
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
- unsigned fence_context;
+ u64 fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 59acd0e5c2c6..31c9a92d6a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev)
}
atif->encoder_for_bl = target;
- if (!target) {
- /* Brightness change notification is enabled, but we
- * didn't find a backlight controller, this should
- * never happen.
- */
- DRM_ERROR("Cannot find a backlight controller\n");
- }
}
if (atif->functions.sbios_requests && !atif->functions.system_params) {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f8097a0e7a79..5df3ec73021b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- if (crev < 2)
+ if ((frev < 2) && (crev < 2))
p1pll->pll_out_min =
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
else
@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (crev >= 4) {
+ if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 95f4fea89302..6de342861202 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include "radeon_acpi.h"
@@ -27,6 +28,7 @@ struct radeon_atpx_functions {
struct radeon_atpx {
acpi_handle handle;
struct radeon_atpx_functions functions;
+ bool is_hybrid;
};
static struct radeon_atpx_priv {
@@ -62,6 +64,14 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+ return radeon_atpx_priv.atpx.is_hybrid;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
@@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+#if 1
+ /* This is a temporary hack until the D3 cold support
+ * makes it upstream. The ATPX power_control method seems
+ * to still work on even if the system should be using
+ * the new standardized hybrid D3 cold ACPI interface.
+ */
+ atpx->functions.power_cntl = true;
+#else
+ atpx->functions.power_cntl = false;
+#endif
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
@@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
if (!info)
return -EIO;
kfree(info);
+
+ /* 200ms delay is required after off */
+ if (state == 0)
+ msleep(200);
}
return 0;
}
@@ -505,7 +536,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
static const struct vga_switcheroo_handler radeon_atpx_handler = {
.switchto = radeon_atpx_switchto,
.power_state = radeon_atpx_power_state,
- .init = radeon_atpx_init,
.get_client_id = radeon_atpx_get_client_id,
};
@@ -541,6 +571,7 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ radeon_atpx_init();
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 81a63d7f5cd9..b79f3b002471 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (i2c_bus->valid) {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
+
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 21c44b2293bc..a00dd2f74527 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
+#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
return 0;
failed:
+ /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
+ if (radeon_is_px(ddev))
+ pm_runtime_put_noidle(ddev->dev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982647..c3206fb8f4cf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = radeon_crtc->lut_b[regno] << 6;
}
-static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
}
radeon_crtc_load_lut(crtc);
+
+ return 0;
}
static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
- drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(&radeon_crtc->base);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
}
work->base = base;
- r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(crtc);
pflip_cleanup:
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set2 = radeon_crtc_cursor_set2,
.cursor_move = radeon_crtc_cursor_move,
@@ -711,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
+ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
@@ -1321,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- if (radeon_fb->obj) {
- drm_gem_object_unreference_unlocked(radeon_fb->obj);
- }
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -1708,6 +1709,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
+ drm_crtc_force_disable_all(rdev->ddev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa740171f..c01a7c6abb49 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
#include "radeon_drv.h"
#include <drm/drm_pciids.h>
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
@@ -95,9 +93,10 @@
* 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
* 2.44.0 - SET_APPEND_CNT packet3 support
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ * 2.46.0 - Add PFP_SYNC_ME support on evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 45
+#define KMS_DRIVER_MINOR 46
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -164,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
#else
static inline void radeon_register_atpx_handler(void) {}
static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
int radeon_no_wb;
@@ -340,13 +343,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret == -EPROBE_DEFER)
return ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* Get rid of things like offb */
@@ -412,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (radeon_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
@@ -429,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (radeon_is_atpx_hybrid() ||
+ !radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414953c46a38..835563c1f0ed 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (radeon_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
radeon_kfd_device_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 478d4099b0d0..d0de4022fff9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925a5b..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
+ struct drm_crtc *crtc;
int i, r;
/* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_unmap_vram_bos(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.active_crtcs & (1 << i)) {
/* This can fail if a modeset is in progress */
- if (drm_vblank_get(rdev->ddev, i) == 0)
+ if (drm_crtc_vblank_get(crtc) == 0)
rdev->pm.req_vblank |= (1 << i);
else
DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
i);
}
+ i++;
}
}
radeon_set_power_state(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
- drm_vblank_put(rdev->ddev, i);
+ drm_crtc_vblank_put(crtc);
}
+ i++;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 590b0377fbe2..0c00e192c845 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence))
return PTR_ERR(fence);
- r = ttm_bo_move_accel_cleanup(bo, &fence->base,
- evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
radeon_fence_unref(&fence);
return r;
}
@@ -347,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -380,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (r)
+ return r;
+
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
if (WARN_ON_ONCE(rbo->pin_count > 0))
@@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index b30e719dd56d..2523ca96c6c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
@@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/verde_rlc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
@@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
MODULE_FIRMWARE("radeon/oland_rlc.bin");
MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
@@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin");
MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev)
char fw_name[30];
int err;
int new_fw = 0;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
+ /* XXX: figure out which Tahitis need the new ucode */
+ if (0)
+ new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B))
+ new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605))
+ new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))
+ new_smc = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7fc3ca5ce6c7..4c2fd056dd6d 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -6,7 +6,6 @@ config DRM_RCAR_DU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 827711e28226..d3b44651061a 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,8 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_plane.o \
rcar_du_vgacon.o
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
- rcar_du_hdmienc.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
+
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 0d8bdda736f9..7316fc7fa0bd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -196,7 +196,7 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
static unsigned int plane_zpos(struct rcar_du_plane *plane)
{
- return to_rcar_plane_state(plane->plane.state)->zpos;
+ return plane->plane.state->normalized_zpos;
}
static const struct rcar_du_format_info *
@@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_FRM) {
- drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27883..899ef7a2a7b4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev)
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = rcdu->ddev;
- drm_connector_unregister_all(ddev);
drm_dev_unregister(ddev);
if (rcdu->fbdev)
@@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev)
if (!ddev)
return -ENOMEM;
- drm_dev_set_unique(ddev, dev_name(&pdev->dev));
-
rcdu->ddev = ddev;
ddev->dev_private = rcdu;
@@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev)
* disabled for all CRTCs.
*/
ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
+ if (ret < 0)
goto error;
- }
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to initialize DRM/KMS (%d)\n", ret);
goto error;
}
@@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev)
if (ret)
goto error;
- ret = drm_connector_register_all(ddev);
- if (ret < 0)
- goto error;
-
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index ed35467d96cf..c843c3134498 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -92,7 +92,6 @@ struct rcar_du_device {
struct {
struct drm_property *alpha;
struct drm_property *colorkey;
- struct drm_property *zpos;
} props;
unsigned int dpad0_source;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4e939e41f030..ab8645c57e2d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,7 +19,6 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
@@ -27,18 +26,6 @@
#include "rcar_du_vgacon.h"
/* -----------------------------------------------------------------------------
- * Common connector functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- return rcar_encoder_to_drm_encoder(rcon->encoder);
-}
-
-/* -----------------------------------------------------------------------------
* Encoder
*/
@@ -186,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
case DRM_MODE_ENCODER_TMDS:
- ret = rcar_du_hdmi_connector_init(rcdu, renc);
+ /* connector managed by the bridge driver */
break;
default:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 719b6f2a031c..7fc10a9c34c3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_ENCODER_H__
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder_slave.h>
struct rcar_du_device;
struct rcar_du_hdmienc;
@@ -30,16 +29,16 @@ enum rcar_du_encoder_type {
};
struct rcar_du_encoder {
- struct drm_encoder_slave slave;
+ struct drm_encoder base;
enum rcar_du_output output;
struct rcar_du_hdmienc *hdmi;
struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, slave.base)
+ container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
+#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
struct rcar_du_connector {
struct drm_connector connector;
@@ -49,9 +48,6 @@ struct rcar_du_connector {
#define to_rcar_connector(c) \
container_of(c, struct rcar_du_connector, connector)
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
deleted file mode 100644
index 6c927144b5c9..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
-#include "rcar_du_kms.h"
-
-#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
-
-static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->get_modes == NULL)
- return 0;
-
- return sfuncs->get_modes(encoder, connector);
-}
-
-static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->mode_valid == NULL)
- return MODE_OK;
-
- return sfuncs->mode_valid(encoder, mode);
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = rcar_du_hdmi_connector_get_modes,
- .mode_valid = rcar_du_hdmi_connector_mode_valid,
- .best_encoder = rcar_du_connector_best_encoder,
-};
-
-static enum drm_connector_status
-rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct rcar_du_connector *con = to_rcar_connector(connector);
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->detect == NULL)
- return connector_status_unknown;
-
- return sfuncs->detect(encoder, connector);
-}
-
-static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_hdmi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_connector *rcon;
- struct drm_connector *connector;
- int ret;
-
- rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
- if (rcon == NULL)
- return -ENOMEM;
-
- connector = &rcon->connector;
- connector->display_info.width_mm = 0;
- connector->display_info.height_mm = 0;
- connector->interlace_allowed = true;
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0)
- return ret;
-
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_object_property_set_value(&connector->base,
- rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
-
- rcon->encoder = renc;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
deleted file mode 100644
index 87daa949227f..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_HDMICON_H__
-#define __RCAR_DU_HDMICON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc);
-#else
-static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- return -ENOSYS;
-}
-#endif
-
-#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 461662d231e2..e03004f4588d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -16,7 +16,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -25,20 +24,14 @@
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
- struct device *dev;
bool enabled;
};
#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
-#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -50,15 +43,11 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
true);
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
-
hdmienc->enabled = true;
}
@@ -67,29 +56,21 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- const struct drm_display_mode *mode = &crtc_state->mode;
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
adjusted_mode);
- if (sfuncs->mode_fixup == NULL)
- return 0;
-
- return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
+ return 0;
}
+
static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->mode_set)
- sfuncs->mode_set(encoder, mode, adjusted_mode);
rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
}
@@ -109,7 +90,6 @@ static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
rcar_du_hdmienc_disable(encoder);
drm_encoder_cleanup(encoder);
- put_device(hdmienc->dev);
}
static const struct drm_encoder_funcs encoder_funcs = {
@@ -120,8 +100,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc, struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct drm_i2c_encoder_driver *driver;
- struct i2c_client *i2c_slave;
+ struct drm_bridge *bridge;
struct rcar_du_hdmienc *hdmienc;
int ret;
@@ -129,44 +108,30 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate the slave I2C device and driver. */
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_dbg(rcdu->dev,
- "can't get I2C slave for %s, deferring probe\n",
- of_node_full_name(np));
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
-
- hdmienc->dev = &i2c_slave->dev;
-
- if (hdmienc->dev->driver == NULL) {
- dev_dbg(rcdu->dev,
- "I2C slave %s not probed yet, deferring probe\n",
- dev_name(hdmienc->dev));
- ret = -EPROBE_DEFER;
- goto error;
- }
-
- /* Initialize the slave encoder. */
- driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
- ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
- if (ret < 0)
- goto error;
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret < 0)
- goto error;
+ return ret;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- return 0;
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+ encoder->bridge = bridge;
+
+ ret = drm_bridge_attach(rcdu->ddev, bridge);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
-error:
- put_device(hdmienc->dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d970..f03eb55318c1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
{
struct rcar_du_device *rcdu = dev->dev_private;
struct rcar_du_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
unsigned int i;
int ret;
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
spin_lock(&rcdu->commit.wait.lock);
ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
}
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -527,11 +527,6 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
- rcdu->props.zpos =
- drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
- if (rcdu->props.zpos == NULL)
- return -ENOMEM;
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index e905f5da7aaa..6afd0af312ba 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
- .best_encoder = rcar_du_connector_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f78e1..a74f8ed8ca2e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
bool needs_realloc = false;
unsigned int groups = 0;
unsigned int i;
+ struct drm_plane *drm_plane;
+ struct drm_plane_state *drm_plane_state;
/* Check if hardware planes need to be reallocated. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int index;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
/* Reallocate hardware planes for each plane that needs it. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
@@ -656,7 +652,7 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
state->source = RCAR_DU_PLANE_MEMORY;
state->alpha = 255;
state->colorkey = RCAR_DU_COLORKEY_NONE;
- state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
+ state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
plane->state = &state->state;
plane->state->plane = plane;
@@ -674,8 +670,6 @@ static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
rstate->alpha = val;
else if (property == rcdu->props.colorkey)
rstate->colorkey = val;
- else if (property == rcdu->props.zpos)
- rstate->zpos = val;
else
return -EINVAL;
@@ -694,8 +688,6 @@ static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
*val = rstate->alpha;
else if (property == rcdu->props.colorkey)
*val = rstate->colorkey;
- else if (property == rcdu->props.zpos)
- *val = rstate->zpos;
else
return -EINVAL;
@@ -767,8 +759,7 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.zpos, 1);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index b18b7b25dbfa..8b91dd3a46e4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -51,7 +51,6 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
* @hwindex: 0-based hardware plane index, -1 means unused
* @alpha: value of the plane alpha property
* @colorkey: value of the plane colorkey property
- * @zpos: value of the plane zpos property
*/
struct rcar_du_plane_state {
struct drm_plane_state state;
@@ -62,7 +61,6 @@ struct rcar_du_plane_state {
unsigned int alpha;
unsigned int colorkey;
- unsigned int zpos;
};
static inline struct rcar_du_plane_state *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index d2f66068e52c..fedb0161e234 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -195,9 +195,10 @@
#define DEFR6_ODPM12_DISP (2 << 8)
#define DEFR6_ODPM12_CDE (3 << 8)
#define DEFR6_ODPM12_MASK (3 << 8)
-#define DEFR6_TCNE2 (1 << 6)
+#define DEFR6_TCNE1 (1 << 6)
+#define DEFR6_TCNE0 (1 << 4)
#define DEFR6_MLOS1 (1 << 2)
-#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
+#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
/* -----------------------------------------------------------------------------
* R8A7790-only Control Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d7e5c99caf6..8d6125c1c0f9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_vga_connector_get_modes,
- .best_encoder = rcar_du_connector_best_encoder,
};
static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- rcon->encoder = renc;
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 6ac717f2056f..83ebd162f3ef 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -43,12 +43,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
.src_y = 0,
.src_w = mode->hdisplay << 16,
.src_h = mode->vdisplay << 16,
+ .zpos = 0,
},
.format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
.source = RCAR_DU_PLANE_VSPD1,
.alpha = 255,
.colorkey = 0,
- .zpos = 0,
};
if (rcdu->info->gen >= 3)
@@ -152,7 +152,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.pixelformat = 0,
.pitch = fb->pitches[0],
.alpha = state->alpha,
- .zpos = state->zpos,
+ .zpos = state->state.zpos,
};
unsigned int i;
@@ -267,7 +267,7 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
return;
state->alpha = 255;
- state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
+ state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
plane->state = &state->state;
plane->state->plane = plane;
@@ -282,8 +282,6 @@ static int rcar_du_vsp_plane_atomic_set_property(struct drm_plane *plane,
if (property == rcdu->props.alpha)
rstate->alpha = val;
- else if (property == rcdu->props.zpos)
- rstate->zpos = val;
else
return -EINVAL;
@@ -300,8 +298,6 @@ static int rcar_du_vsp_plane_atomic_get_property(struct drm_plane *plane,
if (property == rcdu->props.alpha)
*val = rstate->alpha;
- else if (property == rcdu->props.zpos)
- *val = rstate->zpos;
else
return -EINVAL;
@@ -381,8 +377,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
drm_object_attach_property(&plane->plane.base,
rcdu->props.alpha, 255);
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.zpos, 1);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1,
+ vsp->num_planes - 1);
}
return 0;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index d30bdc38a760..3c58669a06ce 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,9 @@ config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
depends on RESET_CONTROLLER
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Rockchip soc chipset.
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7f6a55cae27a..89aadbf465f8 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -14,6 +14,7 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -33,13 +34,28 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
+#define RK3288_GRF_SOC_CON6 0x25c
+#define RK3288_EDP_LCDC_SEL BIT(5)
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_EDP_LCDC_SEL BIT(5)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
-/* dp grf register offset */
-#define GRF_SOC_CON6 0x025c
-#define GRF_EDP_LCD_SEL_MASK BIT(5)
-#define GRF_EDP_SEL_VOP_LIT BIT(5)
-#define GRF_EDP_SEL_VOP_BIG 0
+/**
+ * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for eDP
+ * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @chip_type: specific chip type
+ */
+struct rockchip_dp_chip_data {
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+ u32 chip_type;
+};
struct rockchip_dp_device {
struct drm_device *drm_dev;
@@ -48,9 +64,12 @@ struct rockchip_dp_device {
struct drm_display_mode mode;
struct clk *pclk;
+ struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ const struct rockchip_dp_chip_data *data;
+
struct analogix_dp_plat_data plat_data;
};
@@ -77,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -92,6 +112,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
return 0;
}
+static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
+{
+ struct drm_display_info *di = &connector->display_info;
+ /* VOP couldn't output YUV video format for eDP rightly */
+ u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+
+ if ((di->color_formats & mask)) {
+ DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
+ di->color_formats &= ~mask;
+ di->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ di->bpc = 8;
+ }
+
+ return 0;
+}
+
static bool
rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
@@ -119,17 +156,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
return;
if (ret)
- val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_lit;
else
- val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_big;
dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
- ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
- if (ret != 0) {
- dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ ret = clk_prepare_enable(dp->grfclk);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
+
+ ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+ if (ret != 0)
+ dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+
+ clk_disable_unprepare(dp->grfclk);
}
static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
@@ -143,22 +186,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
/*
- * FIXME(Yakir): driver should configure the CRTC output video
- * mode with the display information which indicated the monitor
- * support colorimetry.
- *
- * But don't know why the CRTC driver seems could only output the
- * RGBaaa rightly. For example, if connect the "innolux,n116bge"
- * eDP screen, EDID would indicated that screen only accepted the
- * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
- * screen would show a blue picture (RGB888 show a green picture).
- * But if I configure CTRC to RGBaaa, and eDP driver still keep
- * RGB666 input video mode, then screen would works prefect.
+ * The hardware IC designed that VOP must output the RGB10 video
+ * format to eDP controller, and if eDP panel only support RGB8,
+ * then eDP controller should cut down the video data, not via VOP
+ * controller, that's why we need to hardcode the VOP output mode
+ * to RGA10 here.
*/
+
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
+ if (dp->data->chip_type == RK3399_EDP) {
+ /*
+ * For RK3399, VOP Lit must code the out mode to RGB888,
+ * VOP Big must code the out mode to RGB10.
+ */
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
+ encoder);
+ if (ret > 0)
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ }
return 0;
}
@@ -192,6 +242,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
return PTR_ERR(dp->grf);
}
+ dp->grfclk = devm_clk_get(dev, "grf");
+ if (PTR_ERR(dp->grfclk) == -ENOENT) {
+ dp->grfclk = NULL;
+ } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(dp->grfclk)) {
+ dev_err(dev, "failed to get grf clock\n");
+ return PTR_ERR(dp->grfclk);
+ }
+
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
dev_err(dev, "failed to get pclk property\n");
@@ -213,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -246,6 +307,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
@@ -256,10 +318,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
*/
dev_set_drvdata(dev, NULL);
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = rockchip_dp_init(dp);
if (ret < 0)
return ret;
+ dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
@@ -270,9 +337,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = RK3288_DP;
+ dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
}
@@ -292,38 +360,33 @@ static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node, *port, *endpoint;
+ struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
- struct drm_panel *panel;
port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- dev_err(dev, "can't find output port\n");
- return -EINVAL;
- }
-
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_err(dev, "no output endpoint found\n");
- return -EINVAL;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no output node found\n");
- return -EINVAL;
- }
-
- panel = of_drm_find_panel(panel_node);
- if (!panel) {
- DRM_ERROR("failed to find panel\n");
+ if (port) {
+ endpoint = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (!endpoint) {
+ dev_err(dev, "no output endpoint found\n");
+ return -EINVAL;
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!panel_node) {
+ dev_err(dev, "no output node found\n");
+ return -EINVAL;
+ }
+
+ panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- return -EPROBE_DEFER;
+ if (!panel) {
+ DRM_ERROR("failed to find panel\n");
+ return -EPROBE_DEFER;
+ }
}
- of_node_put(panel_node);
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -349,24 +412,30 @@ static int rockchip_dp_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
-static int rockchip_dp_suspend(struct device *dev)
-{
- return analogix_dp_suspend(dev);
-}
-
-static int rockchip_dp_resume(struct device *dev)
-{
- return analogix_dp_resume(dev);
-}
+ .suspend = analogix_dp_suspend,
+ .resume_early = analogix_dp_resume,
#endif
+};
-static const struct dev_pm_ops rockchip_dp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
+static const struct rockchip_dp_chip_data rk3399_edp = {
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
+ .chip_type = RK3399_EDP,
+};
+
+static const struct rockchip_dp_chip_data rk3288_dp = {
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
+ .chip_type = RK3288_DP,
};
static const struct of_device_id rockchip_dp_dt_ids[] = {
- {.compatible = "rockchip,rk3288-dp",},
+ {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
+ {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index dedc65b40f36..ca22e5ee89ca 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
return mode_status;
}
-static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
.get_modes = dw_mipi_dsi_connector_get_modes,
.mode_valid = dw_mipi_dsi_mode_valid,
- .best_encoder = dw_mipi_dsi_connector_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 801110f65a63..0665fb915579 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -15,7 +15,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/bridge/dw_hdmi.h>
#include "rockchip_drm_drv.h"
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8b4feb60b25..006260de9dbd 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-inno_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct inno_hdmi *hdmi = to_inno_hdmi(connector);
-
- return &hdmi->encoder;
-}
-
static int
inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
.get_modes = inno_hdmi_connector_get_modes,
.mode_valid = inno_hdmi_connector_mode_valid,
- .best_encoder = inno_hdmi_connector_best_encoder,
};
static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f703cb..a822d49a255a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,11 +19,13 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/console.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
#define DRIVER_MINOR 0
static bool is_support_iommu = true;
+static struct drm_driver rockchip_drm_driver;
/*
* Attach a (component) device to the shared drm dma mapping from master drm
@@ -76,7 +79,7 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return -EINVAL;
priv->crtc_funcs[pipe] = crtc_funcs;
@@ -89,7 +92,7 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return;
priv->crtc_funcs[pipe] = NULL;
@@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
priv->crtc_funcs[pipe]->disable_vblank(crtc);
}
-static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+static int rockchip_drm_bind(struct device *dev)
{
+ struct drm_device *drm_dev;
struct rockchip_drm_private *private;
struct dma_iommu_mapping *mapping = NULL;
- struct device *dev = drm_dev->dev;
- struct drm_connector *connector;
int ret;
- private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
- if (!private)
+ drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm_dev)
return -ENOMEM;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, rockchip_drm_atomic_work);
+ dev_set_drvdata(dev, drm_dev);
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
drm_dev->dev_private = private;
@@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_detach_device;
- /*
- * All components are now added, we can publish the connector sysfs
- * entries to userspace. This will generate hotplug events and so
- * userspace will expect to be able to access DRM at this point.
- */
- list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
- head) {
- ret = drm_connector_register(connector);
- if (ret) {
- dev_err(drm_dev->dev,
- "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
- connector->base.id,
- connector->name, ret);
- goto err_unbind;
- }
- }
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_vblank_cleanup;
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto err_fbdev_fini;
+
if (is_support_iommu)
arm_iommu_release_mapping(mapping);
return 0;
+err_fbdev_fini:
+ rockchip_drm_fbdev_fini(drm_dev);
err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
-err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
if (is_support_iommu)
@@ -240,12 +235,14 @@ err_release_mapping:
err_config_cleanup:
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
+err_free:
+ drm_dev_unref(drm_dev);
return ret;
}
-static int rockchip_drm_unload(struct drm_device *drm_dev)
+static void rockchip_drm_unbind(struct device *dev)
{
- struct device *dev = drm_dev->dev;
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
rockchip_drm_fbdev_fini(drm_dev);
drm_vblank_cleanup(drm_dev);
@@ -255,32 +252,12 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
arm_iommu_detach_device(dev);
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
-
- return 0;
-}
-
-static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
- struct drm_file *file_priv)
-{
- struct rockchip_drm_private *priv = crtc->dev->dev_private;
- int pipe = drm_crtc_index(crtc);
-
- if (pipe < ROCKCHIP_MAX_CRTC &&
- priv->crtc_funcs[pipe] &&
- priv->crtc_funcs[pipe]->cancel_pending_vblank)
- priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
-}
-
-static void rockchip_drm_preclose(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+ drm_dev_unregister(drm_dev);
+ drm_dev_unref(drm_dev);
+ dev_set_drvdata(dev, NULL);
}
-void rockchip_drm_lastclose(struct drm_device *dev)
+static void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
@@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
.release = drm_release,
};
-const struct vm_operations_struct rockchip_drm_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC,
- .load = rockchip_drm_load,
- .unload = rockchip_drm_unload,
- .preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
.disable_vblank = rockchip_drm_crtc_disable_vblank,
- .gem_vm_ops = &rockchip_drm_vm_ops,
- .gem_free_object = rockchip_gem_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.dumb_map_offset = rockchip_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
@@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-static int rockchip_drm_sys_suspend(struct device *dev)
+void rockchip_drm_fb_suspend(struct drm_device *drm)
{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_connector *connector;
+ struct rockchip_drm_private *priv = drm->dev_private;
- if (!drm)
- return 0;
+ console_lock();
+ drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
+ console_unlock();
+}
- drm_modeset_lock_all(drm);
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- int old_dpms = connector->dpms;
+void rockchip_drm_fb_resume(struct drm_device *drm)
+{
+ struct rockchip_drm_private *priv = drm->dev_private;
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+ console_lock();
+ drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
+ console_unlock();
+}
- /* Set the old mode back to the connector for resume */
- connector->dpms = old_dpms;
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct rockchip_drm_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_disable(drm);
+ rockchip_drm_fb_suspend(drm);
+
+ priv->state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(priv->state)) {
+ rockchip_drm_fb_resume(drm);
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(priv->state);
}
- drm_modeset_unlock_all(drm);
return 0;
}
@@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_connector *connector;
- enum drm_connector_status status;
- bool changed = false;
-
- if (!drm)
- return 0;
+ struct rockchip_drm_private *priv = drm->dev_private;
- drm_modeset_lock_all(drm);
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- int desired_mode = connector->dpms;
-
- /*
- * at suspend time, we save dpms to connector->dpms,
- * restore the old_dpms, and at current time, the connector
- * dpms status must be DRM_MODE_DPMS_OFF.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
-
- /*
- * If the connector has been disconnected during suspend,
- * disconnect it from the encoder and leave it off. We'll notify
- * userspace at the end.
- */
- if (desired_mode == DRM_MODE_DPMS_ON) {
- status = connector->funcs->detect(connector, true);
- if (status == connector_status_disconnected) {
- connector->encoder = NULL;
- connector->status = status;
- changed = true;
- continue;
- }
- }
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, desired_mode);
- }
- drm_modeset_unlock_all(drm);
-
- drm_helper_resume_force_mode(drm);
-
- if (changed)
- drm_kms_helper_hotplug_event(drm);
+ drm_atomic_helper_resume(drm, priv->state);
+ rockchip_drm_fb_resume(drm);
+ drm_kms_helper_poll_enable(drm);
return 0;
}
@@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev,
}
}
-static int rockchip_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&rockchip_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_free;
-
- dev_set_drvdata(dev, drm);
-
- return 0;
-
-err_free:
- drm_dev_unref(drm);
- return ret;
-}
-
-static void rockchip_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_dev_unref(drm);
- dev_set_drvdata(dev, NULL);
-}
-
static const struct component_master_ops rockchip_drm_ops = {
.bind = rockchip_drm_bind,
.unbind = rockchip_drm_unbind,
@@ -518,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
is_support_iommu = false;
}
+ of_node_put(iommu);
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 56f43a364c7f..ea3932940061 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
void (*wait_for_update)(struct drm_crtc *crtc);
- void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
-};
-
-struct rockchip_atomic_commit {
- struct work_struct work;
- struct drm_atomic_state *state;
- struct drm_device *dev;
- struct mutex lock;
};
struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
-
- struct rockchip_atomic_commit commit;
+ struct drm_atomic_state *state;
};
-void rockchip_drm_atomic_work(struct work_struct *work);
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
const struct rockchip_crtc_funcs *crtc_funcs);
void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 755cfdba61cd..55c52734c52d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
@@ -43,14 +44,10 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- struct drm_gem_object *obj;
int i;
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
- obj = rockchip_fb->obj[i];
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
- }
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
+ drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
@@ -228,87 +225,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
}
static void
-rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
+rockchip_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = commit->state;
- struct drm_device *dev = commit->dev;
+ struct drm_device *dev = state->dev;
- /*
- * TODO: do fence wait here.
- */
-
- /*
- * Rockchip crtc support runtime PM, can't update display planes
- * when crtc is disabled.
- *
- * drm_atomic_helper_commit comments detail that:
- * For drivers supporting runtime PM the recommended sequence is
- *
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- *
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- *
- * drm_atomic_helper_commit_planes(dev, state, true);
- *
- * See the kerneldoc entries for these three functions for more details.
- */
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, true);
+ drm_atomic_helper_commit_hw_done(state);
+
rockchip_atomic_wait_for_complete(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
-}
-
-void rockchip_drm_atomic_work(struct work_struct *work)
-{
- struct rockchip_atomic_commit *commit = container_of(work,
- struct rockchip_atomic_commit, work);
-
- rockchip_atomic_commit_complete(commit);
}
-int rockchip_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct rockchip_atomic_commit *commit = &private->commit;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* serialize outstanding nonblocking commits */
- mutex_lock(&commit->lock);
- flush_work(&commit->work);
-
- drm_atomic_helper_swap_state(dev, state);
-
- commit->dev = dev;
- commit->state = state;
-
- if (nonblock)
- schedule_work(&commit->work);
- else
- rockchip_atomic_commit_complete(commit);
-
- mutex_unlock(&commit->lock);
-
- return 0;
-}
+static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
+ .atomic_commit_tail = rockchip_atomic_commit_tail,
+};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_user_fb_create,
.output_poll_changed = rockchip_drm_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = rockchip_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
struct drm_framebuffer *
@@ -339,4 +281,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_height = 4096;
dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+ dev->mode_config.helper_private = &rockchip_mode_config_helpers;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index f261512bb4a0..207e01de6e32 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi->screen_size = rk_obj->base.size;
fbi->fix.smem_len = rk_obj->base.size;
- DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
fb->width, fb->height, fb->depth, rk_obj->kvaddr,
offset, size);
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
goto err_drm_fb_helper_fini;
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 9c2d8a894093..b70f9423379c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -17,8 +17,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_vma_manager.h>
-#include <linux/dma-attrs.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
@@ -28,17 +26,16 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
- init_dma_attrs(&rk_obj->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (!rk_obj->kvaddr) {
- DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
+ DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
return -ENOMEM;
}
@@ -51,7 +48,7 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
struct drm_device *drm = obj->dev;
dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
}
static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
@@ -70,7 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
- obj->size, &rk_obj->dma_attrs);
+ obj->size, rk_obj->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
@@ -262,7 +259,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
rk_obj->dma_addr, obj->size,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
@@ -276,7 +273,7 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+ if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return NULL;
return rk_obj->kvaddr;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index ad22618473a4..18b3488db4ec 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -23,7 +23,7 @@ struct rockchip_gem_object {
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
};
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1c4d5b5a70a2..91305eb7d312 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -98,7 +98,9 @@ struct vop_win {
const struct vop_win_data *data;
struct vop *vop;
- struct vop_plane_state state;
+ /* protected by dev->event_lock */
+ bool enable;
+ dma_addr_t yrgb_mst;
};
struct vop {
@@ -112,6 +114,8 @@ struct vop {
bool vsync_work_pending;
struct completion dsp_hold_completion;
struct completion wait_update_complete;
+
+ /* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
const struct vop_data *data;
@@ -324,9 +328,9 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
scl_cal_scale2(src_h, dst_h));
if (is_yuv) {
VOP_SCL_SET(vop, win, scale_cbcr_x,
- scl_cal_scale2(src_w, dst_w));
+ scl_cal_scale2(cbcr_src_w, dst_w));
VOP_SCL_SET(vop, win, scale_cbcr_y,
- scl_cal_scale2(src_h, dst_h));
+ scl_cal_scale2(cbcr_src_h, dst_h));
}
return;
}
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
int ret;
- if (vop->is_enabled)
- return;
-
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
int i;
- if (!vop->is_enabled)
- return;
+ WARN_ON(vop->event);
/*
* We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
clk_disable(vop->aclk);
clk_disable(vop->hclk);
pm_runtime_put(vop->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void vop_plane_destroy(struct drm_plane *plane)
@@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
ret = drm_plane_helper_check_update(plane, crtc, state->fb,
src, dest, &clip,
+ state->rotation,
min_scale,
max_scale,
true, true, &visible);
@@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
if (!old_state->crtc)
return;
+ spin_lock_irq(&plane->dev->event_lock);
+ vop_win->enable = false;
+ vop_win->yrgb_mst = 0;
+ spin_unlock_irq(&plane->dev->event_lock);
+
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
/*
* can't update plane when vop is disabled.
*/
- if (!crtc)
+ if (WARN_ON(!crtc))
return;
if (WARN_ON(!vop->is_enabled))
@@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[0];
vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+ spin_lock_irq(&plane->dev->event_lock);
+ vop_win->enable = true;
+ vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
+ spin_unlock_irq(&plane->dev->event_lock);
+
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -779,7 +798,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_disable = vop_plane_atomic_disable,
};
-void vop_atomic_plane_reset(struct drm_plane *plane)
+static void vop_atomic_plane_reset(struct drm_plane *plane)
{
struct vop_plane_state *vop_plane_state =
to_vop_plane_state(plane->state);
@@ -796,7 +815,7 @@ void vop_atomic_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
}
-struct drm_plane_state *
+static struct drm_plane_state *
vop_atomic_plane_duplicate_state(struct drm_plane *plane)
{
struct vop_plane_state *old_vop_plane_state;
@@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
}
-static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
- struct drm_file *file_priv)
-{
- struct drm_device *drm = crtc->dev;
- struct vop *vop = to_vop(crtc);
- struct drm_pending_vblank_event *e;
- unsigned long flags;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- e = vop->event;
- if (e && e->base.file_priv == file_priv) {
- vop->event = NULL;
-
- e->base.destroy(&e->base);
- file_priv->event_space += sizeof(e->event);
- }
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.wait_for_update = vop_crtc_wait_for_update,
- .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
u16 vact_end = vact_st + vdisplay;
uint32_t val;
+ WARN_ON(vop->event);
+
vop_enable(crtc);
/*
* If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct vop *vop = to_vop(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ WARN_ON(vop->event);
vop->event = crtc->state->event;
crtc->state->event = NULL;
}
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1048,6 +1052,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
+static void vop_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(crtc->state);
+
+ crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+
static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct rockchip_crtc_state *rockchip_state;
@@ -1073,23 +1088,21 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = vop_crtc_destroy,
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = vop_crtc_reset,
.atomic_duplicate_state = vop_crtc_duplicate_state,
.atomic_destroy_state = vop_crtc_destroy_state,
};
static bool vop_win_pending_is_complete(struct vop_win *vop_win)
{
- struct drm_plane *plane = &vop_win->base;
- struct vop_plane_state *state = to_vop_plane_state(plane->state);
dma_addr_t yrgb_mst;
- if (!state->enable)
+ if (!vop_win->enable)
return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
- return yrgb_mst == state->yrgb_mst;
+ return yrgb_mst == vop_win->yrgb_mst;
}
static void vop_handle_vblank(struct vop *vop)
@@ -1104,15 +1117,16 @@ static void vop_handle_vblank(struct vop *vop)
return;
}
+ spin_lock_irqsave(&drm->event_lock, flags);
if (vop->event) {
- spin_lock_irqsave(&drm->event_lock, flags);
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
if (!completion_done(&vop->wait_update_complete))
complete(&vop->wait_update_complete);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 3166b46a5893..919992cdc97e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -190,7 +190,7 @@ static const struct vop_data rk3288_vop = {
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_scl_regs rk3066_win_scl = {
+static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
@@ -198,7 +198,7 @@ static const struct vop_scl_regs rk3066_win_scl = {
};
static const struct vop_win_phy rk3036_win0_data = {
- .scl = &rk3066_win_scl,
+ .scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
@@ -210,6 +210,7 @@ static const struct vop_win_phy rk3036_win0_data = {
.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+ .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
};
static const struct vop_win_phy rk3036_win1_data = {
@@ -299,7 +300,7 @@ static int vop_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver vop_platform_driver = {
+static struct platform_driver vop_platform_driver = {
.probe = vop_probe,
.remove = vop_remove,
.driver = {
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 8d17d00ddb4b..c987c826daa3 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -6,7 +6,6 @@ config DRM_SHMOBILE
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc779d5..6547b1db460a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
scrtc->event = NULL;
if (event) {
drm_crtc_send_vblank_event(&scrtc->crtc, event);
- drm_vblank_put(dev, 0);
+ drm_crtc_vblank_put(&scrtc->crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
if (event) {
event->pipe = 0;
- drm_vblank_get(dev, 0);
+ drm_crtc_vblank_get(&scrtc->crtc);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172079..f0492603ea88 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = {
| DRIVER_PRIME,
.load = shmob_drm_load,
.unload = shmob_drm_unload,
- .set_busid = drm_platform_set_busid,
.irq_handler = shmob_drm_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 93ad8a5704d1..03defda77766 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
struct sis_file_private *file_priv = file->driver_priv;
struct sis_memblock *entry, *next;
- if (!(file->minor->master && file->master->lock.hw_lock))
+ if (!(dev->master && file->master->lock.hw_lock))
return;
drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 5ad43a1bb260..494ab257f77c 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -7,5 +7,6 @@ config DRM_STI
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select FW_LOADER
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index a516eb869f6f..2da7d6866d5d 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -6,6 +6,8 @@
#include "sti_awg_utils.h"
+#define AWG_DELAY (-5)
+
#define AWG_OPCODE_OFFSET 10
#define AWG_MAX_ARG 0x3ff
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
- val = timing->trailing_pixels - 1;
+ val = timing->trailing_pixels - 1 + AWG_DELAY;
ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 3d2fa3ab33df..134201ecc6fd 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = {
},
};
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
+{
+ int ret = 0, i;
+
+ for (i = 0; compo->vid[i]; i++) {
+ ret = vid_debugfs_init(compo->vid[i], minor);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; compo->mixer[i]; i++) {
+ ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int sti_compositor_bind(struct device *dev,
struct device *master,
void *data)
@@ -234,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
}
/* Get reset resources */
- compo->rst_main = devm_reset_control_get(dev, "compo-main");
+ compo->rst_main = devm_reset_control_get_shared(dev, "compo-main");
/* Take compo main out of reset */
if (!IS_ERR(compo->rst_main))
reset_control_deassert(compo->rst_main);
- compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+ compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux");
/* Take compo aux out of reset */
if (!IS_ERR(compo->rst_aux))
reset_control_deassert(compo->rst_aux);
@@ -247,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
compo->vtg_main = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 1a4a73dab11e..24444ef42a98 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -81,4 +81,7 @@ struct sti_compositor {
struct notifier_block vtg_vblank_nb;
};
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
+
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e04deedabd4a..c7d734dc3cf4 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -23,22 +23,11 @@
static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct device *dev = mixer->dev;
- struct sti_compositor *compo = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_READY;
- /* Prepare and enable the compo IP clock */
- if (mixer->id == STI_MIXER_MAIN) {
- if (clk_prepare_enable(compo->clk_compo_main))
- DRM_INFO("Failed to prepare/enable compo_main clk\n");
- } else {
- if (clk_prepare_enable(compo->clk_compo_aux))
- DRM_INFO("Failed to prepare/enable compo_aux clk\n");
- }
-
drm_crtc_vblank_on(crtc);
}
@@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct clk *clk;
+ struct clk *compo_clk, *pix_clk;
int rate = mode->clock * 1000;
- int res;
DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
@@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
mode->vsync_start, mode->vsync_end,
mode->vtotal, mode->type, mode->flags);
- /* Set rate and prepare/enable pixel clock */
- if (mixer->id == STI_MIXER_MAIN)
- clk = compo->clk_pix_main;
- else
- clk = compo->clk_pix_aux;
+ if (mixer->id == STI_MIXER_MAIN) {
+ compo_clk = compo->clk_compo_main;
+ pix_clk = compo->clk_pix_main;
+ } else {
+ compo_clk = compo->clk_compo_aux;
+ pix_clk = compo->clk_pix_aux;
+ }
+
+ /* Prepare and enable the compo IP clock */
+ if (clk_prepare_enable(compo_clk)) {
+ DRM_INFO("Failed to prepare/enable compositor clk\n");
+ goto compo_error;
+ }
- res = clk_set_rate(clk, rate);
- if (res < 0) {
+ /* Set rate and prepare/enable pixel clock */
+ if (clk_set_rate(pix_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
- return -EINVAL;
+ goto pix_error;
}
- if (clk_prepare_enable(clk)) {
+ if (clk_prepare_enable(pix_clk)) {
DRM_ERROR("Failed to prepare/enable pix clk\n");
- return -EINVAL;
+ goto pix_error;
}
sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &crtc->mode);
- res = sti_mixer_active_video_area(mixer, &crtc->mode);
- if (res) {
+ if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
DRM_ERROR("Can't set active video area\n");
- return -EINVAL;
+ goto mixer_error;
}
- return res;
+ return 0;
+
+mixer_error:
+ clk_disable_unprepare(pix_clk);
+pix_error:
+ clk_disable_unprepare(compo_clk);
+compo_error:
+ return -EINVAL;
}
static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
static void
sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- sti_crtc_enable(crtc);
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
@@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.atomic_begin = sti_crtc_atomic_begin,
.atomic_flush = sti_crtc_atomic_flush,
};
@@ -331,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
}
}
+static int sti_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+
+ if (drm_crtc_index(crtc) == 0)
+ return sti_compositor_debufs_init(compo, crtc->dev->primary);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -339,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .late_register = sti_crtc_late_register,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e990299735c..3b53f7f2e3fc 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(CUR_AWE);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_disable = sti_cursor_atomic_disable,
};
+static void sti_cursor_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_cursor_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+
+ return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_cursor_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_cursor_late_register,
+};
+
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
@@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
possible_crtcs,
- &sti_plane_helpers_funcs,
+ &sti_cursor_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
DRM_PLANE_TYPE_CURSOR, NULL);
@@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
- if (cursor_debugfs_init(cursor, drm_dev->primary))
- DRM_ERROR("CURSOR debugfs setup failed\n");
-
return &cursor->plane.drm_plane;
err_plane:
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72294..96bd3d08b2d4 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct drm_device *dev = node->minor->dev;
struct drm_plane *p;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
list_for_each_entry(p, &dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
plane->fps_info.fips_str);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
* the software side now.
*/
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
sti_atomic_schedule(private, state);
@@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm,
return 0;
}
+static void sti_output_poll_changed(struct drm_device *ddev)
+{
+ struct sti_private *private = ddev->dev_private;
+
+ if (!ddev->mode_config.num_connector)
+ return;
+
+ if (private->fbdev) {
+ drm_fbdev_cma_hotplug_event(private->fbdev);
+ return;
+ }
+
+ private->fbdev = drm_fbdev_cma_init(ddev, 32,
+ ddev->mode_config.num_crtc,
+ ddev->mode_config.num_connector);
+ if (IS_ERR(private->fbdev))
+ private->fbdev = NULL;
+}
+
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
+ .output_poll_changed = sti_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = sti_atomic_commit,
};
@@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &sti_mode_config_funcs;
}
-static int sti_load(struct drm_device *dev, unsigned long flags)
-{
- struct sti_private *private;
- int ret;
-
- private = kzalloc(sizeof(*private), GFP_KERNEL);
- if (!private) {
- DRM_ERROR("Failed to allocate private\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)private;
- private->drm_dev = dev;
-
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, sti_atomic_work);
-
- drm_mode_config_init(dev);
- drm_kms_helper_poll_init(dev);
-
- sti_mode_config_init(dev);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret) {
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- kfree(private);
- return ret;
- }
-
- drm_mode_config_reset(dev);
-
- drm_helper_disable_unused_functions(dev);
- drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
-
- return 0;
-}
-
static const struct file_operations sti_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = {
static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .load = sti_load,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+static int sti_init(struct drm_device *ddev)
+{
+ struct sti_private *private;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ ddev->dev_private = (void *)private;
+ dev_set_drvdata(ddev->dev, ddev);
+ private->drm_dev = ddev;
+
+ mutex_init(&private->commit.lock);
+ INIT_WORK(&private->commit.work, sti_atomic_work);
+
+ drm_mode_config_init(ddev);
+
+ sti_mode_config_init(ddev);
+
+ drm_kms_helper_poll_init(ddev);
+
+ return 0;
+}
+
+static void sti_cleanup(struct drm_device *ddev)
+{
+ struct sti_private *private = ddev->dev_private;
+
+ if (private->fbdev) {
+ drm_fbdev_cma_fini(private->fbdev);
+ private->fbdev = NULL;
+ }
+
+ drm_kms_helper_poll_fini(ddev);
+ drm_vblank_cleanup(ddev);
+ kfree(private);
+ ddev->dev_private = NULL;
+}
+
static int sti_bind(struct device *dev)
{
- return drm_platform_init(&sti_driver, to_platform_device(dev));
+ struct drm_device *ddev;
+ int ret;
+
+ ddev = drm_dev_alloc(&sti_driver, dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->platformdev = to_platform_device(dev);
+
+ ret = sti_init(ddev);
+ if (ret)
+ goto err_drm_dev_unref;
+
+ ret = component_bind_all(ddev->dev, ddev);
+ if (ret)
+ goto err_cleanup;
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_register;
+
+ drm_mode_config_reset(ddev);
+
+ return 0;
+
+err_register:
+ drm_mode_config_cleanup(ddev);
+err_cleanup:
+ sti_cleanup(ddev);
+err_drm_dev_unref:
+ drm_dev_unref(ddev);
+ return ret;
}
static void sti_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_dev_unregister(ddev);
+ sti_cleanup(ddev);
+ drm_dev_unref(ddev);
}
static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 30ddc20841c3..78ebe5e30f53 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -24,6 +24,7 @@ struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
+ struct drm_fbdev_cma *fbdev;
struct {
struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f76632002c..00881eb4536e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
-{
- struct sti_dvo_connector *dvo_connector
- = to_sti_dvo_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return dvo_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
.get_modes = sti_dvo_connector_get_modes,
.mode_valid = sti_dvo_connector_mode_valid,
- .best_encoder = sti_dvo_best_encoder,
};
static enum drm_connector_status
@@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
-static void sti_dvo_connector_destroy(struct drm_connector *connector)
+static int sti_dvo_late_register(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
+ struct sti_dvo *dvo = dvo_connector->dvo;
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(dvo_connector);
+ if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
+ DRM_ERROR("DVO debugfs setup failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static const struct drm_connector_funcs sti_dvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
- .destroy = sti_dvo_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_dvo_late_register,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
@@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
-
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
- if (dvo_debugfs_init(dvo, drm_dev->primary))
- DRM_ERROR("DVO debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
drm_bridge_remove(bridge);
- drm_connector_cleanup(drm_connector);
return -EINVAL;
}
@@ -602,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
+ of_node_put(dvo->panel_node);
platform_set_drvdata(pdev, dvo);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da197..b8d942ca45e8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct drm_crtc *crtc = drm_plane->crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
unsigned int b;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
for (b = 0; b < GDP_NODE_NB_BANK; b++) {
seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
gdp_node_dump_node(s, gdp->node_list[b].btm_field);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_disable = sti_gdp_atomic_disable,
};
+static void sti_gdp_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_gdp_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+
+ return gdp_debugfs_init(gdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_gdp_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_gdp_late_register,
+};
+
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
@@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
possible_crtcs,
- &sti_plane_helpers_funcs,
+ &sti_gdp_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
type, NULL);
@@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&gdp->plane, type);
- if (gdp_debugfs_init(gdp, drm_dev->primary))
- DRM_ERROR("GDP debugfs setup failed\n");
-
return &gdp->plane.drm_plane;
err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cdf09..8505569f75de 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
-{
- struct sti_hda_connector *hda_connector
- = to_sti_hda_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return hda_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
.get_modes = sti_hda_connector_get_modes,
.mode_valid = sti_hda_connector_mode_valid,
- .best_encoder = sti_hda_best_encoder,
};
static enum drm_connector_status
@@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static void sti_hda_connector_destroy(struct drm_connector *connector)
+static int sti_hda_late_register(struct drm_connector *connector)
{
struct sti_hda_connector *hda_connector
= to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
+ DRM_ERROR("HDA debugfs setup failed\n");
+ return -EINVAL;
+ }
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(hda_connector);
+ return 0;
}
static const struct drm_connector_funcs sti_hda_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hda_connector_detect,
- .destroy = sti_hda_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_hda_late_register,
};
static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
@@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
-
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
@@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
/* force to disable hd dacs at startup */
hda_enable_hd_dacs(hda, false);
- if (hda_debugfs_init(hda, drm_dev->primary))
- DRM_ERROR("HDA debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
- drm_connector_cleanup(drm_connector);
+ drm_bridge_remove(bridge);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd5b9..fedc17f98d9b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -18,6 +18,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <sound/hdmi-codec.h>
+
#include "sti_hdmi.h"
#include "sti_hdmi_tx3g4c28phy.h"
#include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
#define HDMI_DFLT_CHL0_DAT 0x0110
#define HDMI_DFLT_CHL1_DAT 0x0114
#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_AUDIO_CFG 0x0200
+#define HDMI_SPDIF_FIFO_STATUS 0x0204
#define HDMI_SW_DI_1_HEAD_WORD 0x0210
#define HDMI_SW_DI_1_PKT_WORD0 0x0214
#define HDMI_SW_DI_1_PKT_WORD1 0x0218
@@ -44,6 +48,9 @@
#define HDMI_SW_DI_1_PKT_WORD5 0x0228
#define HDMI_SW_DI_1_PKT_WORD6 0x022C
#define HDMI_SW_DI_CFG 0x0230
+#define HDMI_SAMPLE_FLAT_MASK 0x0244
+#define HDMI_AUDN 0x0400
+#define HDMI_AUD_CTS 0x0404
#define HDMI_SW_DI_2_HEAD_WORD 0x0600
#define HDMI_SW_DI_2_PKT_WORD0 0x0604
#define HDMI_SW_DI_2_PKT_WORD1 0x0608
@@ -103,6 +110,7 @@
#define HDMI_INT_DLL_LCK BIT(5)
#define HDMI_INT_NEW_FRAME BIT(6)
#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
@@ -111,6 +119,7 @@
| HDMI_INT_GLOBAL)
#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_AUDIO_FIFO_XRUN \
| HDMI_INT_GENCTRL_PKT \
| HDMI_INT_NEW_FRAME \
| HDMI_INT_DLL_LCK \
@@ -121,6 +130,27 @@
#define HDMI_STA_SW_RST BIT(1)
+#define HDMI_AUD_CFG_8CH BIT(0)
+#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
+#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
+#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
+#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
+#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
+#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
+#define HDMI_AUD_CFG_CH12_VALID BIT(28)
+#define HDMI_AUD_CFG_CH34_VALID BIT(29)
+#define HDMI_AUD_CFG_CH56_VALID BIT(30)
+#define HDMI_AUD_CFG_CH78_VALID BIT(31)
+
+/* sample flat mask */
+#define HDMI_SAMPLE_FLAT_NO 0
+#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
+#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
+#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
+#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
+#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
+ HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
+
#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
wake_up_interruptible(&hdmi->wait_event);
}
+ /* Audio FIFO underrun IRQ */
+ if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
+ DRM_INFO("Warning: audio FIFO underrun occurs!");
+
return IRQ_HANDLED;
}
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
*/
static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
{
- struct hdmi_audio_infoframe infofame;
+ struct hdmi_audio_params *audio = &hdmi->audio;
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
- int ret;
-
- ret = hdmi_audio_infoframe_init(&infofame);
- if (ret < 0) {
- DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
- return ret;
- }
-
- infofame.channels = 2;
-
- ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
- if (ret < 0) {
- DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
- return ret;
+ int ret, val;
+
+ DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
+ audio->enabled ? "enable" : "disable");
+ if (audio->enabled) {
+ /* set audio parameters stored*/
+ ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
+ sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+ return ret;
+ }
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+ } else {
+ /*disable audio info frame transmission */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
+ HDMI_IFRAME_SLOT_AUDIO);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
- hdmi_infoframe_write_infopack(hdmi, buffer, ret);
-
return 0;
}
@@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
DBGFS_DUMP("\n", HDMI_CFG);
@@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP("", HDMI_SW_DI_CFG);
hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+ DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
+ DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
+ DBGFS_DUMP("\n", HDMI_AUDN);
+
seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
return count;
@@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
-{
- struct sti_hdmi_connector *hdmi_connector
- = to_sti_hdmi_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return hdmi_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
.get_modes = sti_hdmi_connector_get_modes,
.mode_valid = sti_hdmi_connector_mode_valid,
- .best_encoder = sti_hdmi_best_encoder,
};
/* get detection status of display device */
@@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
-static void sti_hdmi_connector_destroy(struct drm_connector *connector)
-{
- struct sti_hdmi_connector *hdmi_connector
- = to_sti_hdmi_connector(connector);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(hdmi_connector);
-}
-
static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
struct drm_connector *connector)
{
@@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
return -EINVAL;
}
+static int sti_hdmi_late_register(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
+ DRM_ERROR("HDMI debugfs setup failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
- .destroy = sti_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_set_property = sti_hdmi_connector_set_property,
.atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_hdmi_late_register,
};
static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
@@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
return NULL;
}
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+ unsigned int n;
+
+ switch (audio_fs) {
+ case 32000:
+ n = 4096;
+ break;
+ case 44100:
+ n = 6272;
+ break;
+ case 48000:
+ n = 6144;
+ break;
+ case 88200:
+ n = 6272 * 2;
+ break;
+ case 96000:
+ n = 6144 * 2;
+ break;
+ case 176400:
+ n = 6272 * 4;
+ break;
+ case 192000:
+ n = 6144 * 4;
+ break;
+ default:
+ /* Not pre-defined, recommended value: 128 * fs / 1000 */
+ n = (audio_fs * 128) / 1000;
+ }
+
+ return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi,
+ struct hdmi_audio_params *params)
+{
+ int audio_cfg, n;
+ struct hdmi_audio_infoframe *info = &params->cea;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ /* update N parameter */
+ n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+ DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+ params->sample_rate, hdmi->mode.clock * 1000, n);
+ hdmi_write(hdmi, n, HDMI_AUDN);
+
+ /* update HDMI registers according to configuration */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+ switch (info->channels) {
+ case 8:
+ audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ case 6:
+ audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ case 4:
+ audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ case 2:
+ audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+ break;
+ default:
+ DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+ info->channels);
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio = *params;
+
+ return hdmi_audio_infoframe_config(hdmi);
+}
+
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int audio_cfg;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* disable audio */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio.enabled = 0;
+ hdmi_audio_infoframe_config(hdmi);
+}
+
+static int hdmi_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+ struct hdmi_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
+ daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+ daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+
+ audio.enabled = 1;
+
+ ret = hdmi_audio_configure(hdmi, &audio);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
+
+ if (enable)
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
+ else
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
+
+ return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_connector *connector = hdmi->drm_connector;
+
+ DRM_DEBUG_DRIVER("\n");
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .digital_mute = hdmi_audio_digital_mute,
+ .get_eld = hdmi_audio_get_eld,
+};
+
+static int sti_hdmi_register_audio_driver(struct device *dev,
+ struct sti_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->audio.enabled = 0;
+
+ hdmi->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
+
+ return 0;
+}
+
static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
/* initialise property */
sti_hdmi_connector_init_property(drm_dev, drm_connector);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
+ hdmi->drm_connector = drm_connector;
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
@@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ err = sti_hdmi_register_audio_driver(dev, hdmi);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec\n");
+ goto err_sysfs;
+ }
+
+ /* Initialize audio infoframe */
+ err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
+ if (err) {
+ DRM_ERROR("Failed to init audio infoframe\n");
+ goto err_sysfs;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
- if (hdmi_debugfs_init(hdmi, drm_dev->primary))
- DRM_ERROR("HDMI debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
- drm_connector_cleanup(drm_connector);
-
+ drm_bridge_remove(bridge);
+ hdmi->drm_connector = NULL;
return -EINVAL;
}
@@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
i2c_put_adapter(hdmi->ddc_adapt);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index ef3a94583bbd..119bc3582ac7 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
void (*stop)(struct sti_hdmi *hdmi);
};
+struct hdmi_audio_params {
+ bool enabled;
+ unsigned int sample_width;
+ unsigned int sample_rate;
+ struct hdmi_audio_infoframe cea;
+};
+
/* values for the framing mode property */
enum sti_hdmi_modes {
HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
* @ddc_adapt: i2c ddc adapter
* @colorspace: current colorspace selected
* @hdmi_mode: select framing for HDMI or DVI
+ * @audio_pdev: ASoC hdmi-codec platform device
+ * @audio: hdmi audio parameters.
+ * @drm_connector: hdmi connector
*/
struct sti_hdmi {
struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
struct i2c_adapter *ddc_adapt;
enum hdmi_colorspace colorspace;
enum sti_hdmi_modes hdmi_mode;
+ struct platform_device *audio_pdev;
+ struct hdmi_audio_params audio;
+ struct drm_connector *drm_connector;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1edec29b9e45..b5ee783e3e7c 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
int cmd, cmd_offset, infoxp70;
void *virt;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_disable = sti_hqvdp_atomic_disable,
};
+static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+
+ return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_hqvdp_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_hqvdp_late_register,
+};
+
static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
struct device *dev, int desc)
{
@@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_hqvdp_init(hqvdp);
res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
- &sti_plane_helpers_funcs,
+ &sti_hqvdp_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
DRM_PLANE_TYPE_OVERLAY, NULL);
@@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
- if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
- DRM_ERROR("HQVDP debugfs setup failed\n");
-
return &hqvdp->plane.drm_plane;
}
@@ -1346,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
hqvdp->vtg = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, hqvdp);
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b51f7..7d9aea805eab 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
-static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
@@ -246,13 +239,10 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
- int plane_id, depth = plane->zorder;
+ int plane_id, depth = plane->drm_plane.state->normalized_zpos;
unsigned int i;
u32 mask, val;
- if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
- return 1;
-
switch (plane->desc) {
case STI_GDP_0:
plane_id = GAM_DEPTH_GDP0_ID;
@@ -285,8 +275,8 @@ int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
break;
}
- mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
- plane_id = plane_id << (3 * (depth - 1));
+ mask |= GAM_DEPTH_MASK_ID << (3 * depth);
+ plane_id = plane_id << (3 * depth);
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
sti_plane_to_str(plane), depth);
@@ -400,8 +390,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
- if (mixer_debugfs_init(mixer, drm_dev->primary))
- DRM_ERROR("MIXER debugfs setup failed\n");
-
return mixer;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 6f35fc086873..830a3c42d886 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 6
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index f10c98d3f012..ca4b3719a64a 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -14,15 +14,6 @@
#include "sti_drv.h"
#include "sti_plane.h"
-/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
-enum sti_plane_desc sti_plane_default_zorder[] = {
- STI_GDP_0,
- STI_GDP_1,
- STI_HQVDP_0,
- STI_GDP_2,
- STI_GDP_3,
-};
-
const char *sti_plane_to_str(struct sti_plane *plane)
{
switch (plane->desc) {
@@ -45,25 +36,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
#define STI_FPS_INTERVAL_MS 3000
-static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
-{
- struct timespec tmp_ts = timespec_sub(lhs, rhs);
- u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
-
- do_div(tmp_ns, NSEC_PER_MSEC);
-
- return (u32)tmp_ns;
-}
-
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field)
{
- struct timespec now;
+ ktime_t now;
struct sti_fps_info *fps;
int fpks, fipks, ms_since_last, num_frames, num_fields;
- getrawmonotonic(&now);
+ now = ktime_get();
/* Compute number of frame updates */
fps = &plane->fps_info;
@@ -76,7 +57,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
return;
fps->curr_frame_counter++;
- ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+ ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
num_frames = fps->curr_frame_counter - fps->last_frame_counter;
if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
@@ -106,77 +87,46 @@ void sti_plane_update_fps(struct sti_plane *plane,
plane->fps_info.fips_str);
}
-static void sti_plane_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_helper_disable(drm_plane);
- drm_plane_cleanup(drm_plane);
-}
-
-static int sti_plane_set_property(struct drm_plane *drm_plane,
- struct drm_property *property,
- uint64_t val)
+static int sti_plane_get_default_zpos(enum drm_plane_type type)
{
- struct drm_device *dev = drm_plane->dev;
- struct sti_private *private = dev->dev_private;
- struct sti_plane *plane = to_sti_plane(drm_plane);
-
- DRM_DEBUG_DRIVER("\n");
-
- if (property == private->plane_zorder_property) {
- plane->zorder = val;
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
return 0;
+ case DRM_PLANE_TYPE_OVERLAY:
+ return 1;
+ case DRM_PLANE_TYPE_CURSOR:
+ return 7;
}
+ return 0;
+}
- return -EINVAL;
+void sti_plane_reset(struct drm_plane *plane)
+{
+ drm_atomic_helper_plane_reset(plane);
+ plane->state->zpos = sti_plane_get_default_zpos(plane->type);
}
-static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane,
+ enum drm_plane_type type)
{
- struct drm_device *dev = drm_plane->dev;
- struct sti_private *private = dev->dev_private;
- struct sti_plane *plane = to_sti_plane(drm_plane);
- struct drm_property *prop;
-
- prop = private->plane_zorder_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 1,
- GAM_MIXER_NB_DEPTH_LEVEL);
- if (!prop)
- return;
-
- private->plane_zorder_property = prop;
+ int zpos = sti_plane_get_default_zpos(type);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ case DRM_PLANE_TYPE_OVERLAY:
+ drm_plane_create_zpos_property(drm_plane, zpos, 0, 6);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ drm_plane_create_zpos_immutable_property(drm_plane, zpos);
+ break;
}
-
- drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
}
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type)
{
- unsigned int i;
+ sti_plane_attach_zorder_property(&plane->drm_plane, type);
- for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
- if (sti_plane_default_zorder[i] == plane->desc)
- break;
-
- plane->zorder = i + 1;
-
- if (type == DRM_PLANE_TYPE_OVERLAY)
- sti_plane_attach_zorder_property(&plane->drm_plane);
-
- DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
- plane->drm_plane.base.id,
- sti_plane_to_str(plane), plane->zorder);
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s\n",
+ plane->drm_plane.base.id, sti_plane_to_str(plane));
}
-
-struct drm_plane_funcs sti_plane_helpers_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_plane_destroy,
- .set_property = sti_plane_set_property,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index c50a3b9f5d37..ce3e8d6c88bb 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -11,8 +11,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-extern struct drm_plane_funcs sti_plane_helpers_funcs;
-
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
#define STI_PLANE_TYPE_SHIFT 8
@@ -57,7 +55,7 @@ struct sti_fps_info {
unsigned int last_frame_counter;
unsigned int curr_field_counter;
unsigned int last_field_counter;
- struct timespec last_timestamp;
+ ktime_t last_timestamp;
char fps_str[FPS_LENGTH];
char fips_str[FPS_LENGTH];
};
@@ -68,14 +66,12 @@ struct sti_fps_info {
* @plane: drm plane it is bound to (if any)
* @desc: plane type & id
* @status: to know the status of the plane
- * @zorder: plane z-order
* @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
- int zorder;
struct sti_fps_info fps_info;
};
@@ -83,6 +79,8 @@ const char *sti_plane_to_str(struct sti_plane *plane);
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field);
+
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
+void sti_plane_reset(struct drm_plane *plane);
#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a59da..e25995b35715 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -112,6 +112,7 @@ struct sti_tvout {
struct drm_encoder *hdmi;
struct drm_encoder *hda;
struct drm_encoder *dvo;
+ bool debugfs_registered;
};
struct sti_tvout_encoder {
@@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
@@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
kfree(sti_encoder);
}
+static int sti_tvout_late_register(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+ int ret;
+
+ if (tvout->debugfs_registered)
+ return 0;
+
+ ret = tvout_debugfs_init(tvout, encoder->dev->primary);
+ if (ret)
+ return ret;
+
+ tvout->debugfs_registered = true;
+ return 0;
+}
+
+static void sti_tvout_early_unregister(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ if (!tvout->debugfs_registered)
+ return;
+
+ tvout_debugfs_exit(tvout, encoder->dev->primary);
+ tvout->debugfs_registered = false;
+}
+
static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
+ .late_register = sti_tvout_late_register,
+ .early_unregister = sti_tvout_early_unregister,
};
static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
@@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
sti_tvout_create_encoders(drm_dev, tvout);
- if (tvout_debugfs_init(tvout, drm_dev->primary))
- DRM_ERROR("TVOUT debugfs setup failed\n");
-
return 0;
}
@@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
sti_tvout_destroy_encoders(tvout);
-
- tvout_debugfs_exit(tvout, drm_dev->primary);
}
static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5ac6..47634a0251fc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
DBGFS_DUMP(VID_CSAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
-static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
@@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
sti_vid_init(vid);
- if (vid_debugfs_init(vid, drm_dev->primary))
- DRM_ERROR("VID debugfs setup failed\n");
-
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 6c842344f3d8..fdc90f922a05 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr);
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+
#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 6bf4ce466d20..0bdc385eec17 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -65,7 +65,7 @@
#define HDMI_DELAY (5)
/* Delay introduced by the DVO in nb of pixel */
-#define DVO_DELAY (2)
+#define DVO_DELAY (7)
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
@@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
if (np) {
vtg->slave = of_vtg_find(np);
+ of_node_put(np);
if (!vtg->slave)
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 41cacecbea9a..4a192210574f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct sun4i_drv *drv = scrtc->drv;
+ struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_DRIVER("Committing plane changes\n");
sun4i_backend_commit(drv->backend);
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static void sun4i_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 937394cbc241..7092daaf6c43 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -75,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.dumb_destroy = drm_gem_dumb_destroy,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
/* PRIME Operations */
@@ -123,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev)
if (!drm)
return -ENOMEM;
- ret = drm_dev_set_unique(drm, dev_name(drm->dev));
- if (ret)
- goto free_drm;
-
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv) {
ret = -ENOMEM;
@@ -178,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto free_drm;
- ret = drm_connector_register_all(drm);
- if (ret)
- goto unregister_drm;
-
return 0;
-unregister_drm:
- drm_dev_unregister(drm);
free_drm:
drm_dev_unref(drm);
return ret;
@@ -195,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index a0b30c216a5b..70688febd7ac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
{
struct sun4i_drv *drv = drm->dev_private;
- if (drv->fbdev)
- drm_fbdev_cma_hotplug_event(drv->fbdev);
+ drm_fbdev_cma_hotplug_event(drv->fbdev);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aaffe9e64ffb..f5bbac6efb4c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -104,19 +104,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-sun4i_rgb_best_encoder(struct drm_connector *connector)
-{
- struct sun4i_rgb *rgb =
- drm_connector_to_sun4i_rgb(connector);
-
- return &rgb->encoder;
-}
-
static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
.mode_valid = sun4i_rgb_mode_valid,
- .best_encoder = sun4i_rgb_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index bc047f923508..b84147896294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-sun4i_tv_comp_best_encoder(struct drm_connector *connector)
-{
- struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
-
- return &tv->encoder;
-}
-
static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.get_modes = sun4i_tv_comp_get_modes,
.mode_valid = sun4i_tv_comp_mode_valid,
- .best_encoder = sun4i_tv_comp_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 39940f5b7c91..8495bd01b544 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
@@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
tegra_dc_stats_reset(&dc->stats);
drm_crtc_vblank_off(crtc);
+
+ pm_runtime_put_sync(dc->dev);
}
static void tegra_crtc_enable(struct drm_crtc *crtc)
@@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc)
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ pm_runtime_get_sync(dc->dev);
+
+ /* initialize display controller */
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
+
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ if (dc->soc->supports_border_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+ /* apply PLL and pixel clock changes */
tegra_dc_commit_state(dc, state);
/* program display mode */
@@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- u32 value;
int err;
dc->syncpt = host1x_syncpt_request(dc->dev, flags);
@@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
- /* initialize display controller */
- if (dc->syncpt) {
- u32 syncpt = host1x_syncpt_id(dc->syncpt);
-
- value = SYNCPT_CNTRL_NO_STALL;
- tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-
- value = SYNCPT_VSYNC_ENABLE | syncpt;
- tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
- }
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
- /* initialize timer */
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
- WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
- WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- if (dc->soc->supports_border_color)
- tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
- tegra_dc_stats_reset(&dc->stats);
-
return 0;
cleanup:
@@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
+ reset_control_assert(dc->rst);
+
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
dc->powergate = TEGRA_POWERGATE_DIS;
else
dc->powergate = TEGRA_POWERGATE_DISB;
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to power partition: %d\n",
- err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n",
- err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to deassert reset: %d\n",
- err);
- return err;
- }
+ tegra_powergate_power_off(dc->powergate);
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
return -ENXIO;
}
- INIT_LIST_HEAD(&dc->client.list);
- dc->client.ops = &dc_client_ops;
- dc->client.dev = &pdev->dev;
-
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
return err;
}
+ platform_set_drvdata(pdev, dc);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, dc);
-
return 0;
}
@@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev)
return err;
}
- reset_control_assert(dc->rst);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
if (dc->soc->has_powergate)
tegra_powergate_power_off(dc->powergate);
@@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
+static int tegra_dc_resume(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ return err;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dc_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
+};
+
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
+ .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index b24a0f14821a..059f409556d5 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -12,6 +12,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
@@ -44,6 +47,11 @@ struct tegra_dpaux {
struct completion complete;
struct work_struct work;
struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+ struct pinctrl_dev *pinctrl;
+ struct pinctrl_desc desc;
+#endif
};
static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
@@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
return ret;
}
+enum tegra_dpaux_functions {
+ DPAUX_PADCTL_FUNC_AUX,
+ DPAUX_PADCTL_FUNC_I2C,
+ DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+ u32 value;
+
+ switch (function) {
+ case DPAUX_PADCTL_FUNC_AUX:
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_AUX;
+ break;
+
+ case DPAUX_PADCTL_FUNC_I2C:
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ break;
+
+ case DPAUX_PADCTL_FUNC_OFF:
+ tegra_dpaux_pad_power_down(dpaux);
+ return 0;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ tegra_dpaux_pad_power_up(dpaux);
+
+ return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+ PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+ PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+ "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+ "aux",
+ "i2c",
+ "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+ unsigned int group)
+{
+ return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = tegra_dpaux_pin_numbers;
+ *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+ .get_groups_count = tegra_dpaux_get_groups_count,
+ .get_group_name = tegra_dpaux_get_group_name,
+ .get_group_pins = tegra_dpaux_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+ unsigned int function)
+{
+ return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+ *groups = tegra_dpaux_groups;
+
+ return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+ unsigned int function, unsigned int group)
+{
+ struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+ return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+ .get_functions_count = tegra_dpaux_get_functions_count,
+ .get_function_name = tegra_dpaux_get_function_name,
+ .get_function_groups = tegra_dpaux_get_function_groups,
+ .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
@@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return -ENXIO;
}
- dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
- if (IS_ERR(dpaux->rst)) {
- dev_err(&pdev->dev, "failed to get reset control: %ld\n",
- PTR_ERR(dpaux->rst));
- return PTR_ERR(dpaux->rst);
+ if (!pdev->dev.pm_domain) {
+ dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev,
+ "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
+ return PTR_ERR(dpaux->rst);
+ }
}
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
@@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return err;
}
- reset_control_deassert(dpaux->rst);
+ if (dpaux->rst)
+ reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent)) {
dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
PTR_ERR(dpaux->clk_parent));
- return PTR_ERR(dpaux->clk_parent);
+ err = PTR_ERR(dpaux->clk_parent);
+ goto assert_reset;
}
err = clk_prepare_enable(dpaux->clk_parent);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
err);
- return err;
+ goto assert_reset;
}
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
- return err;
+ goto disable_parent_clk;
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
PTR_ERR(dpaux->vdd));
- return PTR_ERR(dpaux->vdd);
+ err = PTR_ERR(dpaux->vdd);
+ goto disable_parent_clk;
}
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
@@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto disable_parent_clk;
}
disable_irq(dpaux->irq);
@@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
- return err;
+ goto disable_parent_clk;
/*
* Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
* is no possibility to perform the I2C mode configuration in the
* HDMI path.
*/
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
- value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_I2C;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+ if (err < 0)
+ return err;
+#ifdef CONFIG_GENERIC_PINCONF
+ dpaux->desc.name = dev_name(&pdev->dev);
+ dpaux->desc.pins = tegra_dpaux_pins;
+ dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+ dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+ dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+ dpaux->desc.owner = THIS_MODULE;
+
+ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ if (!dpaux->pinctrl) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+ return -ENODEV;
+ }
+#endif
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpaux);
return 0;
+
+disable_parent_clk:
+ clk_disable_unprepare(dpaux->clk_parent);
+assert_reset:
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
+ clk_disable_unprepare(dpaux->clk);
+
+ return err;
}
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
- u32 value;
/* make sure pads are powered down when not in use */
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
drm_dp_aux_unregister(&dpaux->aux);
@@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
cancel_work_sync(&dpaux->work);
clk_disable_unprepare(dpaux->clk_parent);
- reset_control_assert(dpaux->rst);
+
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
clk_disable_unprepare(dpaux->clk);
return 0;
@@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
int drm_dp_aux_enable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
-
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
- DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_AUX;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
- return 0;
+ return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
}
int drm_dp_aux_disable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b59c3bf0df44..755264d9db22 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
*/
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, false);
drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, true);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
* the software side now.
*/
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
tegra_atomic_schedule(tegra, state);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f52d6cb24ff5..0ddcce1b420d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
int tegra_output_connector_get_modes(struct drm_connector *connector);
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector);
enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force);
void tegra_output_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d1239ebc190f..3d228ad90e0f 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
@@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
}
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ /*
+ * XXX Is this still needed? The module reset is deasserted right
+ * before this function is called.
+ */
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+ return tegra_mipi_calibrate(dsi->mipi);
+}
+
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
unsigned int vrefresh)
{
@@ -794,7 +834,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_dsi_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
@@ -837,7 +876,7 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_disable(dsi);
- return;
+ pm_runtime_put(dsi->dev);
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -848,6 +887,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi *dsi = to_dsi(output);
struct tegra_dsi_state *state;
u32 value;
+ int err;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
state = tegra_dsi_get_state(dsi);
@@ -876,8 +922,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_enable(output->panel);
-
- return;
}
static int
@@ -967,55 +1011,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
.atomic_check = tegra_dsi_encoder_atomic_check,
};
-static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
-{
- u32 value;
-
- value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
-
- return 0;
-}
-
-static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
-{
- u32 value;
-
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
- /* start calibration */
- tegra_dsi_pad_enable(dsi);
-
- value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
- DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
- DSI_PAD_OUT_CLK(0x0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
-
- value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
- DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
-
- return tegra_mipi_calibrate(dsi->mipi);
-}
-
static int tegra_dsi_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
- reset_control_deassert(dsi->rst);
-
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- goto reset;
- }
-
/* Gangsters must not register their own outputs. */
if (!dsi->master) {
dsi->output.dev = client->dev;
@@ -1038,12 +1039,9 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_connector_register(&dsi->output.connector);
err = tegra_output_init(drm, &dsi->output);
- if (err < 0) {
- dev_err(client->dev,
- "failed to initialize output: %d\n",
+ if (err < 0)
+ dev_err(dsi->dev, "failed to initialize output: %d\n",
err);
- goto reset;
- }
dsi->output.encoder.possible_crtcs = 0x3;
}
@@ -1055,10 +1053,6 @@ static int tegra_dsi_init(struct host1x_client *client)
}
return 0;
-
-reset:
- reset_control_assert(dsi->rst);
- return err;
}
static int tegra_dsi_exit(struct host1x_client *client)
@@ -1070,7 +1064,7 @@ static int tegra_dsi_exit(struct host1x_client *client)
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_dsi_debugfs_exit(dsi);
- reset_control_assert(dsi->rst);
+ regulator_disable(dsi->vdd);
return 0;
}
@@ -1494,74 +1488,50 @@ static int tegra_dsi_probe(struct platform_device *pdev)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
- dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (!pdev->dev.pm_domain) {
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+ }
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
- err = PTR_ERR(dsi->clk);
- goto reset;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable DSI clock\n");
- goto reset;
+ return PTR_ERR(dsi->clk);
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
dev_err(&pdev->dev, "cannot get low-power clock\n");
- err = PTR_ERR(dsi->clk_lp);
- goto disable_clk;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable low-power clock\n");
- goto disable_clk;
+ return PTR_ERR(dsi->clk_lp);
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
dev_err(&pdev->dev, "cannot get parent clock\n");
- err = PTR_ERR(dsi->clk_parent);
- goto disable_clk_lp;
+ return PTR_ERR(dsi->clk_parent);
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
dev_err(&pdev->dev, "cannot get VDD supply\n");
- err = PTR_ERR(dsi->vdd);
- goto disable_clk_lp;
- }
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable VDD supply\n");
- goto disable_clk_lp;
+ return PTR_ERR(dsi->vdd);
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- goto disable_vdd;
+ return err;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs)) {
- err = PTR_ERR(dsi->regs);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi)) {
- err = PTR_ERR(dsi->mipi);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1572,6 +1542,9 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto mipi_free;
}
+ platform_set_drvdata(pdev, dsi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&dsi->client.list);
dsi->client.ops = &dsi_client_ops;
dsi->client.dev = &pdev->dev;
@@ -1583,22 +1556,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto unregister;
}
- platform_set_drvdata(pdev, dsi);
-
return 0;
unregister:
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
-disable_vdd:
- regulator_disable(dsi->vdd);
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-reset:
- reset_control_assert(dsi->rst);
return err;
}
@@ -1607,6 +1570,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
struct tegra_dsi *dsi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&dsi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1619,14 +1584,82 @@ static int tegra_dsi_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
- regulator_disable(dsi->vdd);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dsi_suspend(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
- reset_control_assert(dsi->rst);
+
+ regulator_disable(dsi->vdd);
return 0;
}
+static int tegra_dsi_resume(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
+
+ return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+ return err;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
+};
+
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
@@ -1640,6 +1673,7 @@ struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
+ .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 1b12aa7a715e..e6d71fa4028e 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
struct tegra_bo *bo = fb->planes[i];
if (bo) {
- if (bo->pages && bo->vaddr)
+ if (bo->pages)
vunmap(bo->vaddr);
drm_gem_object_unreference_unlocked(&bo->gem);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index b7ef4929e347..cda0491ed6bf 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -18,10 +19,14 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <sound/hda_verbs.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#define HDMI_ELD_BUFFER_SIZE 96
+
struct tmds_config {
unsigned int pclk;
u32 pll0;
@@ -39,6 +44,8 @@ struct tegra_hdmi_config {
u32 fuse_override_value;
bool has_sor_io_peak_current;
+ bool has_hda;
+ bool has_hbr;
};
struct tegra_hdmi {
@@ -60,7 +67,10 @@ struct tegra_hdmi {
const struct tegra_hdmi_config *config;
unsigned int audio_source;
- unsigned int audio_freq;
+ unsigned int audio_sample_rate;
+ unsigned int audio_channels;
+
+ unsigned int pixel_clock;
bool stereo;
bool dvi;
@@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = {
};
static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
{
const struct tegra_hdmi_audio_config *table;
- switch (audio_freq) {
+ switch (sample_rate) {
case 32000:
table = tegra_hdmi_audio_32k;
break;
@@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
}
}
-static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+ static const struct {
+ unsigned int sample_rate;
+ unsigned int offset;
+ } regs[] = {
+ { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+ { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+ { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+ { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+ { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+ { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+ { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+ tegra_hdmi_writel(hdmi, value, regs[i].offset);
+ break;
+ }
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
{
- struct device_node *node = hdmi->dev->of_node;
const struct tegra_hdmi_audio_config *config;
- unsigned int offset = 0;
- u32 value;
+ u32 source, value;
switch (hdmi->audio_source) {
case HDA:
- value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ else
+ return -EINVAL;
+
break;
case SPDIF:
- value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
break;
default:
- value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
break;
}
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
- } else {
- value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ /*
+ * Tegra30 and later use a slightly modified version of the register
+ * layout to accomodate for changes related to supporting HDA as the
+ * audio input source for HDMI. The source select field has moved to
+ * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+ * per block fields remain in the AUDIO_CNTRL0 register.
+ */
+ if (hdmi->config->has_hda) {
+ /*
+ * Inject null samples into the audio FIFO for every frame in
+ * which the codec did not receive any samples. This applies
+ * to stereo LPCM only.
+ *
+ * XXX: This seems to be a remnant of MCP days when this was
+ * used to work around issues with monitors not being able to
+ * play back system startup sounds early. It is possibly not
+ * needed on Linux at all.
+ */
+ if (hdmi->audio_channels == 2)
+ value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+ else
+ value = 0;
+
+ value |= source;
+
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ }
- value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ /*
+ * On Tegra20, HDA is not a supported audio source and the source
+ * select field is part of the AUDIO_CNTRL0 register.
+ */
+ value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+ if (!hdmi->config->has_hda)
+ value |= source;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ /*
+ * Advertise support for High Bit-Rate on Tegra114 and later.
+ */
+ if (hdmi->config->has_hbr) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
}
- config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
+ hdmi->pixel_clock);
if (!config) {
- dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
- hdmi->audio_freq, pclk);
+ dev_err(hdmi->dev,
+ "cannot set audio to %u Hz at %u Hz pixel clock\n",
+ hdmi->audio_sample_rate, hdmi->pixel_clock);
return -EINVAL;
}
@@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
- value = ACR_SUBPACK_CTS(config->cts);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
@@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
value &= ~AUDIO_N_RESETF;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- switch (hdmi->audio_freq) {
- case 32000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
- break;
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_aval(hdmi, config->aval);
- case 44100:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
- break;
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
- case 48000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
- break;
+ return 0;
+}
- case 88200:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
- break;
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 96000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
- break;
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- case 176400:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
- break;
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 192000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
- break;
- }
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- tegra_hdmi_writel(hdmi, config->aval, offset);
- }
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+ size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+ u32 value;
- tegra_hdmi_setup_audio_fs_tables(hdmi);
+ for (i = 0; i < length; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
- return 0;
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | 0,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+ value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
}
static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
@@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
u8 buffer[17];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
- return;
- }
-
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
@@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
@@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
u8 buffer[14];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
- return;
- }
-
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
@@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
return;
}
- frame.channels = 2;
+ frame.channels = hdmi->audio_channels;
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
@@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
* bytes can be programmed.
*/
tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
@@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
struct hdmi_vendor_infoframe frame;
u8 buffer[10];
ssize_t err;
- u32 value;
-
- if (!hdmi->stereo) {
- value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- value &= ~GENERIC_CTRL_ENABLE;
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- return;
- }
hdmi_vendor_infoframe_init(&frame);
frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
@@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
@@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
return drm_detect_hdmi_monitor(edid);
}
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ enum drm_connector_status status;
+
+ status = tegra_output_connector_detect(connector, force);
+ if (status == connector_status_connected)
+ return status;
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ return status;
+}
+
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = tegra_output_connector_detect,
+ .detect = tegra_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -806,7 +935,6 @@ static const struct drm_connector_helper_funcs
tegra_hdmi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_hdmi_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
@@ -815,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
+ struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
/*
@@ -829,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_dc_commit(dc);
}
+
+ if (!hdmi->dvi) {
+ if (hdmi->stereo)
+ tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_avi_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+ pm_runtime_put(hdmi->dev);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -837,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct device_node *node = output->dev->of_node;
struct tegra_hdmi *hdmi = to_hdmi(output);
- unsigned int pulse_start, div82, pclk;
+ unsigned int pulse_start, div82;
int retries = 1000;
u32 value;
int err;
- hdmi->dvi = !tegra_output_is_hdmi(output);
+ pm_runtime_get_sync(hdmi->dev);
- pclk = mode->clock * 1000;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI driver.
+ */
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+ hdmi->pixel_clock = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = clk_set_rate(hdmi->clk, pclk);
+ err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
@@ -910,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+ hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
- err = tegra_hdmi_setup_audio(hdmi, pclk);
+ err = tegra_hdmi_setup_audio(hdmi);
if (err < 0)
hdmi->dvi = true;
}
- if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
- /*
- * TODO: add ELD support
- */
- }
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_eld(hdmi);
rekey = HDMI_REKEY_DEFAULT;
value = HDMI_CTRL_REKEY(rekey);
@@ -932,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
- if (hdmi->dvi)
- tegra_hdmi_writel(hdmi, 0x0,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- else
- tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ if (!hdmi->dvi) {
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_avi_infoframe(hdmi, mode);
- tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_stereo_infoframe(hdmi);
+ if (hdmi->stereo)
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+ }
/* TMDS CONFIG */
for (i = 0; i < hdmi->config->num_tmds; i++) {
- if (pclk <= hdmi->config->tmds[i].pclk) {
+ if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
@@ -1032,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
tegra_dc_commit(dc);
+ if (!hdmi->dvi) {
+ tegra_hdmi_enable_avi_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+
+ if (hdmi->stereo)
+ tegra_hdmi_enable_stereo_infoframe(hdmi);
+ }
+
/* TODO: add HDCP support */
}
@@ -1236,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_INT_MASK);
+ DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
#undef DUMP_REG
@@ -1361,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
return err;
}
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- reset_control_deassert(hdmi->rst);
-
return 0;
}
@@ -1378,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
tegra_output_exit(&hdmi->output);
- reset_control_assert(hdmi->rst);
- clk_disable_unprepare(hdmi->clk);
-
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
regulator_disable(hdmi->hdmi);
@@ -1402,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = false,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra30_hdmi_config = {
@@ -1410,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = true,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra114_hdmi_config = {
@@ -1418,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct tegra_hdmi_config tegra124_hdmi_config = {
@@ -1426,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct of_device_id tegra_hdmi_of_match[] = {
@@ -1437,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
+static void hda_format_parse(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ u32 value;
+ int err;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+ if (value & INT_CODEC_SCRATCH0) {
+ unsigned int format;
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ hda_format_parse(format, &sample_rate, &channels);
+
+ hdmi->audio_sample_rate = sample_rate;
+ hdmi->audio_channels = channels;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+ } else {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_hdmi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1454,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->config = match->data;
hdmi->dev = &pdev->dev;
+
hdmi->audio_source = AUTO;
- hdmi->audio_freq = 44100;
+ hdmi->audio_sample_rate = 48000;
+ hdmi->audio_channels = 2;
hdmi->stereo = false;
hdmi->dvi = false;
@@ -1516,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->irq = err;
+ err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+ dev_name(hdmi->dev), hdmi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hdmi->irq, err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
@@ -1527,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, hdmi);
-
return 0;
}
@@ -1537,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1546,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
tegra_output_remove(&hdmi->output);
- clk_disable_unprepare(hdmi->clk_parent);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_hdmi_suspend(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(hdmi->clk);
return 0;
}
+static int tegra_hdmi_resume(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(hdmi->clk);
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
+};
+
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
- .owner = THIS_MODULE,
.of_match_table = tegra_hdmi_of_match,
+ .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index a882514389cd..2339f134a09a 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -468,9 +468,20 @@
#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
-#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae
+#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0)
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
@@ -481,6 +492,14 @@
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+#define HDMI_NV_PDISP_INT_STATUS 0xcc
+#define INT_SCRATCH (1 << 3)
+#define INT_CP_REQUEST (1 << 2)
+#define INT_CODEC_SCRATCH1 (1 << 1)
+#define INT_CODEC_SCRATCH0 (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK 0xcd
+#define HDMI_NV_PDISP_INT_ENABLE 0xce
+
#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 46664b622270..595d1ec3e02e 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -36,20 +36,13 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
if (edid) {
err = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
}
return err;
}
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector)
-{
- struct tegra_output *output = connector_to_output(connector);
-
- return &output->encoder;
-}
-
enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force)
{
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index e246334e0252..a131b44e2d6f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_rgb_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 757c6e8603af..74d0540b8d4c 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -7,11 +7,13 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -149,6 +151,8 @@ struct tegra_sor_soc {
const struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
+
+ const u8 *xbar_cfg;
};
struct tegra_sor;
@@ -169,7 +173,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
+ struct clk *clk_brick;
struct clk *clk_safe;
+ struct clk *clk_src;
struct clk *clk_dp;
struct clk *clk;
@@ -190,6 +196,18 @@ struct tegra_sor {
struct regulator *hdmi_supply;
};
+struct tegra_sor_state {
+ struct drm_connector_state base;
+
+ unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_sor_state, base);
+}
+
struct tegra_sor_config {
u32 bits_per_pixel;
@@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
writel(value, sor->regs + (offset << 2));
}
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+ int err;
+
+ clk_disable_unprepare(sor->clk);
+
+ err = clk_set_parent(sor->clk, parent);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+struct tegra_clk_sor_brick {
+ struct clk_hw hw;
+ struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_sor_brick, hw);
+}
+
+static const char * const tegra_clk_sor_brick_parents[] = {
+ "pll_d2_out0", "pll_dp"
+};
+
+static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ switch (index) {
+ case 0:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ break;
+
+ case 1:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ return 0;
+}
+
+static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u8 parent = U8_MAX;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+ switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+ parent = 0;
+ break;
+
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+ parent = 1;
+ break;
+ }
+
+ return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_brick_ops = {
+ .set_parent = tegra_clk_sor_brick_set_parent,
+ .get_parent = tegra_clk_sor_brick_get_parent,
+};
+
+static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
+ const char *name)
+{
+ struct tegra_clk_sor_brick *brick;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
+ if (!brick)
+ return ERR_PTR(-ENOMEM);
+
+ brick->sor = sor;
+
+ init.name = name;
+ init.flags = 0;
+ init.parent_names = tegra_clk_sor_brick_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
+ init.ops = &tegra_clk_sor_brick_ops;
+
+ brick->hw.init = &init;
+
+ clk = devm_clk_register(sor->dev, &brick->hw);
+ if (IS_ERR(clk))
+ kfree(brick);
+
+ return clk;
+}
+
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
@@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
return false;
}
-static int tegra_sor_calc_config(struct tegra_sor *sor,
- const struct drm_display_mode *mode,
- struct tegra_sor_config *config,
- struct drm_dp_link *link)
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_config *config,
+ struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
const u64 pclk = mode->clock * 1000;
@@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+ const struct tegra_sor_config *config)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+ value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+ value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+ value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+ if (config->active_polarity)
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+ else
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+ value |= config->hblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+ value |= config->vblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (state->bpc) {
+ case 16:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+ break;
+
+ case 12:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+ break;
+
+ case 10:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+ break;
+
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ /* XXX interlacing support */
+ tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+}
+
static int tegra_sor_detach(struct tegra_sor *sor)
{
unsigned long value, timeout;
@@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if ((value & SOR_PWR_TRIGGER) != 0)
return -ETIMEDOUT;
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
@@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
sor->debugfs = NULL;
}
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ kfree(connector->state);
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
static enum drm_connector_status
tegra_sor_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
return tegra_output_connector_detect(connector, force);
}
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state = to_sor_state(connector->state);
+ struct tegra_sor_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
+
+ return &copy->base;
+}
+
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = tegra_sor_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -1081,13 +1372,16 @@ static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ /* HDMI 2.0 modes are not yet supported */
+ if (mode->clock > 340000)
+ return MODE_NOCLOCK;
+
return MODE_OK;
}
static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
.get_modes = tegra_sor_connector_get_modes,
.mode_valid = tegra_sor_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
@@ -1141,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_unprepare(output->panel);
- reset_control_assert(sor->rst);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
#if 0
@@ -1192,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
+ struct tegra_sor_state *state;
struct drm_dp_link link;
u8 rate, lanes;
+ unsigned int i;
int err = 0;
u32 value;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ state = to_sor_state(output->connector.state);
- reset_control_deassert(sor->rst);
+ pm_runtime_get_sync(sor->dev);
if (output->panel)
drm_panel_prepare(output->panel);
@@ -1219,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
return;
}
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = output->connector.display_info.bpc * 3;
+ config.bits_per_pixel = state->bpc * 3;
- err = tegra_sor_calc_config(sor, mode, &config, &link);
+ err = tegra_sor_compute_config(sor, mode, &config, &link);
if (err < 0)
- dev_err(sor->dev, "failed to compute link configuration: %d\n",
- err);
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
@@ -1326,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, SOR_PLL2);
- /* switch to DP clock */
- err = clk_set_parent(sor->clk, sor->clk_dp);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /* switch to DP parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
if (err < 0)
- dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
/* power DP lanes */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
@@ -1375,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- /* set linkctl */
+ tegra_sor_apply_config(sor, &config);
+
+ /* enable link */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
-
- value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
- value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
-
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
@@ -1394,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_DP_TPG);
- value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
- value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
- value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
-
- if (config.active_polarity)
- value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
- else
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-
- value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
- value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
- tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
- value |= config.hblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
- value |= config.vblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-
/* enable pad calibration logic */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
@@ -1478,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /*
- * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
- * raster, associate with display controller)
- */
- value = SOR_STATE_ASY_PROTOCOL_DP_A |
- SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (config.bits_per_pixel) {
- case 24:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 18:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- vbe = vse + (mode->vsync_start - mode->vdisplay);
- hbe = hse + (mode->hsync_start - mode->hdisplay);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
-
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ tegra_sor_mode_set(sor, mode, state);
+
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0)
@@ -1578,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
+ struct drm_display_info *info;
int err;
+ info = &output->connector.display_info;
+
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
@@ -1590,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return err;
}
+ switch (info->bpc) {
+ case 8:
+ case 6:
+ state->bpc = info->bpc;
+ break;
+
+ default:
+ DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+ state->bpc = 8;
+ break;
+ }
+
return 0;
}
@@ -1752,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
- reset_control_assert(sor->rst);
- usleep_range(1000, 2000);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -1762,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
struct tegra_output *output = encoder_to_output(encoder);
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_state *state;
struct drm_display_mode *mode;
- struct drm_display_info *info;
+ unsigned int div, i;
u32 value;
int err;
+ state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
- info = &output->connector.display_info;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_get_sync(sor->dev);
- usleep_range(1000, 2000);
-
- reset_control_deassert(sor->rst);
-
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
@@ -1877,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
- /* XXX don't hardcode */
- value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
- SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /* switch to parent clock */
+ err = clk_set_parent(sor->clk_src, sor->clk_parent);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set source clock: %d\n", err);
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_src);
if (err < 0)
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
@@ -2002,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
- switch (info->bpc) {
+ switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
@@ -2012,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
break;
default:
- WARN(1, "%u bits-per-color not supported\n", info->bpc);
+ WARN(1, "%u bits-per-color not supported\n", state->bpc);
+ value |= BASE_COLOR_SIZE_888;
break;
}
@@ -2022,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /* configure mode */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
- value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- value &= ~SOR_STATE_ASY_OWNER_MASK;
-
- value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (info->bpc) {
- case 8:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 6:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
+ /* configure dynamic range of output */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+ /* configure colorspace */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- /* sync end = sync width - 1 */
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- /* blank end = sync end + back porch */
- vbe = vse + (mode->vtotal - mode->vsync_end);
- hbe = hse + (mode->htotal - mode->hsync_end);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- /* blank start = blank end + active */
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+ tegra_sor_mode_set(sor, mode, state);
tegra_sor_update(sor);
@@ -2196,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client)
* XXX: Remove this reset once proper hand-over from firmware to
* kernel is possible.
*/
- err = reset_control_assert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk);
@@ -2210,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client)
usleep_range(1000, 3000);
- err = reset_control_deassert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk_safe);
@@ -2324,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.remove = tegra_sor_hdmi_remove,
};
+static const u8 tegra124_sor_xbar_cfg[5] = {
+ 0, 1, 2, 3, 4
+};
+
static const struct tegra_sor_soc tegra124_sor = {
.supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
};
static const struct tegra_sor_soc tegra210_sor = {
@@ -2336,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = {
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
};
static const struct tegra_sor_soc tegra210_sor1 = {
@@ -2346,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = {
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
};
static const struct of_device_id tegra_sor_of_match[] = {
@@ -2435,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
- dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
- goto remove;
+ if (!pdev->dev.pm_domain) {
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n",
+ err);
+ goto remove;
+ }
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2449,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+ sor->clk_src = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(sor->clk_src)) {
+ err = PTR_ERR(sor->clk_src);
+ dev_err(sor->dev, "failed to get source clock: %d\n",
+ err);
+ goto remove;
+ }
+ }
+
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(sor->clk_parent)) {
err = PTR_ERR(sor->clk_parent);
@@ -2470,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ platform_set_drvdata(pdev, sor);
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
+ pm_runtime_put(&pdev->dev);
+
+ if (IS_ERR(sor->clk_brick)) {
+ err = PTR_ERR(sor->clk_brick);
+ dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ goto remove;
+ }
+
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
@@ -2481,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- platform_set_drvdata(pdev, sor);
-
return 0;
remove:
@@ -2498,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
struct tegra_sor *sor = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -2516,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int tegra_sor_suspend(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+
+ return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+};
+
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
.of_match_table = tegra_sor_of_match,
+ .pm = &tegra_sor_pm_ops,
},
.probe = tegra_sor_probe,
.remove = tegra_sor_remove,
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index 2d31d027e3f6..865c73b48968 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -27,6 +27,9 @@
#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17)
#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index f60a1ec84fa4..28fed7e206d0 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,7 +2,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 79027b1c64d3..107c8bd04f6d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
- drm_handle_vblank(dev, 0);
+ drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc903524d..d27809372d54 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = {
.load = tilcdc_load,
.unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
- .set_busid = drm_platform_set_busid,
.irq_handler = tilcdc_irq,
.irq_preinstall = tilcdc_irq_preinstall,
.irq_postinstall = tilcdc_irq_postinstall,
@@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = tilcdc_enable_vblank,
.disable_vblank = tilcdc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a71cf98c655f..42c074a9c955 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
-
- if (bo->ttm)
- ttm_tt_destroy(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
+ fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -355,12 +354,14 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+ mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -396,8 +397,7 @@ moved:
out_err:
new_man = &bdev->man[bo->mem.mem_type];
- if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
- ttm_tt_unbind(bo->ttm);
+ if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -418,11 +418,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, NULL);
- if (bo->ttm) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
ww_mutex_unlock (&bo->resv->lock);
@@ -688,15 +685,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS) {
- pr_err("Failed to expire sync object before buffer eviction\n");
- }
- goto out;
- }
-
lockdep_assert_held(&bo->resv->lock.base);
evict_mem = bo->mem;
@@ -720,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait_gpu);
- if (ret) {
+ if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
@@ -800,6 +788,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
EXPORT_SYMBOL(ttm_bo_mem_put);
/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct fence *fence;
+ int ret;
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ reservation_object_add_shared_fence(bo->resv, fence);
+
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
+ fence_put(bo->moving);
+ bo->moving = fence;
+ }
+
+ return 0;
+}
+
+/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
@@ -825,10 +841,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (unlikely(ret != 0))
return ret;
} while (1);
- if (mem->mm_node == NULL)
- return -ENOMEM;
mem->mem_type = mem_type;
- return 0;
+ return ttm_bo_add_move_fence(bo, man, mem);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -898,6 +912,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool has_erestartsys = false;
int i, ret;
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
@@ -931,9 +949,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
-
- if (mem->mm_node)
+
+ if (mem->mm_node) {
+ ret = ttm_bo_add_move_fence(bo, man, mem);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ return ret;
+ }
break;
+ }
}
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -1000,20 +1024,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
lockdep_assert_held(&bo->resv->lock.base);
- /*
- * Don't wait for the BO on initial allocation. This is important when
- * the BO has an imported reservation object.
- */
- if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
- /*
- * FIXME: It's possible to pipeline buffer moves.
- * Have the driver move function wait for idle when necessary,
- * instead of doing it here.
- */
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
- if (ret)
- return ret;
- }
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
@@ -1166,7 +1176,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->priv_flags = 0;
+ bo->moving = NULL;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
@@ -1278,6 +1288,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
+ struct fence *fence;
int ret;
/*
@@ -1298,6 +1309,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
}
spin_unlock(&glob->lru_lock);
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ ret = fence_wait(fence, false);
+ fence_put(fence);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ pr_err("Cleanup eviction failed\n");
+ }
+ }
+ }
+
return 0;
}
@@ -1317,6 +1345,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
+ fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1362,6 +1391,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
+ spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1380,6 +1410,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->size = p_size;
INIT_LIST_HEAD(&man->lru);
+ man->move = NULL;
return 0;
}
@@ -1573,47 +1604,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- struct fence *excl;
- long timeout = 15 * HZ;
- int i;
-
- resv = bo->resv;
- fobj = reservation_object_get_list(resv);
- excl = reservation_object_get_excl(resv);
- if (excl) {
- if (!fence_is_signaled(excl)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(excl,
- interruptible, timeout);
- }
- }
-
- for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
- struct fence *fence;
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
-
- if (!fence_is_signaled(fence)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(fence,
- interruptible, timeout);
- }
- }
+ long timeout = no_wait ? 0 : 15 * HZ;
+ timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(resv, NULL);
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ reservation_object_add_excl_fence(bo->resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
@@ -1683,14 +1684,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
ttm_bo_list_ref_sub(bo, put_count, true);
/**
- * Wait for GPU, then move to system cached.
+ * Move to system cached
*/
- ret = ttm_bo_wait(bo, false, false);
-
- if (unlikely(ret != 0))
- goto out;
-
if ((bo->mem.placement & swap_placement) != swap_placement) {
struct ttm_mem_reg evict_mem;
@@ -1705,6 +1701,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
goto out;
}
+ /**
+ * Make sure BO is idle.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (unlikely(ret != 0))
+ goto out;
+
ttm_bo_unmap_virtual(bo);
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d9831559706e..f157a9efd220 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict,
+ bool evict, bool interruptible,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ pr_err("Failed to expire sync object before unbinding TTM\n");
+ return ret;
+ }
+
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
@@ -321,7 +329,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -337,6 +346,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
@@ -401,8 +414,7 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
- ttm_tt_unbind(ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
@@ -462,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
@@ -634,7 +647,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict,
- bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -649,9 +661,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- (bo->ttm != NULL)) {
- ttm_tt_unbind(bo->ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -665,7 +675,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -694,3 +705,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+ struct fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+ struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+ int ret;
+
+ reservation_object_add_excl_fence(bo->resv, fence);
+
+ if (!evict) {
+ struct ttm_buffer_object *ghost_obj;
+
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
+ reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+
+ } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation
+ */
+
+ spin_lock(&from->move_lock);
+ if (!from->move || fence_is_later(fence, from->move)) {
+ fence_put(from->move);
+ from->move = fence_get(fence);
+ }
+ spin_unlock(&from->move_lock);
+
+ ttm_bo_free_old_node(bo);
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ } else {
+ /**
+ * Last resort, wait for the move to be completed.
+ *
+ * Should never happen in pratice.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (ret)
+ return ret;
+
+ if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_free_old_node(bo);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3216878bced3..a6ed9d5e5167 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
{
int ret = 0;
- if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+ if (likely(!bo->moving))
goto out_unlock;
/*
* Quick non-stalling check for idle.
*/
- ret = ttm_bo_wait(bo, false, true);
- if (likely(ret == 0))
- goto out_unlock;
+ if (fence_is_signaled(bo->moving))
+ goto out_clear;
/*
* If possible, avoid waiting for GPU with mmap_sem
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait(bo, true, false);
+ (void) fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = ttm_bo_wait(bo, true, false);
- if (unlikely(ret != 0))
+ ret = fence_wait(bo->moving, true);
+ if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
+
+out_clear:
+ fence_put(bo->moving);
+ bo->moving = NULL;
out_unlock:
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 97542c35d6ef..bc5aa573f466 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -166,12 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- if (unlikely(ttm == NULL))
+ if (ttm == NULL)
return;
- if (ttm->state == tt_bound) {
- ttm_tt_unbind(ttm);
- }
+ ttm_tt_unbind(ttm);
if (ttm->state == tt_unbound)
ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 613ab0622d6e..1616ec4f4d84 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,12 +4,7 @@ config DRM_UDL
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c20408940cd0..17d34e0edbdd 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- drm_connector_unregister_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44995..f92ea9579674 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
if (event)
- drm_send_vblank_event(dev, 0, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf45f..59adcf8532dd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
- *
- * Note that this is called with the struct_mutex held.
*/
void vc4_free_object(struct drm_gem_object *gem_bo)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0f18b76c7906..8fc2b731b59a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,12 +46,17 @@ struct vc4_crtc {
const struct vc4_crtc_data *data;
void __iomem *regs;
+ /* Timestamp at start of vblank irq - unaffected by lock delays. */
+ ktime_t t_vblank;
+
/* Which HVS channel we're using for our CRTC. */
int channel;
u8 lut_r[256];
u8 lut_g[256];
u8 lut_b[256];
+ /* Size in pixels of the COB memory allocated to this CRTC. */
+ u32 cob_size;
struct drm_pending_vblank_event *event;
};
@@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
}
#endif
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ u32 val;
+ int fifo_lines;
+ int vblank_lines;
+ int ret = 0;
+
+ /*
+ * XXX Doesn't work well in interlaced mode yet, partially due
+ * to problems in vc4 kms or drm core interlaced mode handling,
+ * so disable for now in interlaced mode.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return ret;
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ /*
+ * Read vertical scanline which is currently composed for our
+ * pixelvalve by the HVS, and also the scaler status.
+ */
+ val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Vertical position of hvs composed scanline. */
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ /* No hpos info available. */
+ if (hpos)
+ *hpos = 0;
+
+ /* This is the offset we need for translating hvs -> pv scanout pos. */
+ fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+
+ if (fifo_lines > 0)
+ ret |= DRM_SCANOUTPOS_VALID;
+
+ /* HVS more than fifo_lines into frame for compositing? */
+ if (*vpos > fifo_lines) {
+ /*
+ * We are in active scanout and can get some meaningful results
+ * from HVS. The actual PV scanout can not trail behind more
+ * than fifo_lines as that is the fifo's capacity. Assume that
+ * in active scanout the HVS and PV work in lockstep wrt. HVS
+ * refilling the fifo and PV consuming from the fifo, ie.
+ * whenever the PV consumes and frees up a scanline in the
+ * fifo, the HVS will immediately refill it, therefore
+ * incrementing vpos. Therefore we choose HVS read position -
+ * fifo size in scanlines as a estimate of the real scanout
+ * position of the PV.
+ */
+ *vpos -= fifo_lines + 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ *vpos /= 2;
+
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ return ret;
+ }
+
+ /*
+ * Less: This happens when we are in vblank and the HVS, after getting
+ * the VSTART restart signal from the PV, just started refilling its
+ * fifo with new lines from the top-most lines of the new framebuffers.
+ * The PV does not scan out in vblank, so does not remove lines from
+ * the fifo, so the fifo will be full quickly and the HVS has to pause.
+ * We can't get meaningful readings wrt. scanline position of the PV
+ * and need to make things up in a approximative but consistent way.
+ */
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+
+ if (flags & DRM_CALLED_FROM_VBLIRQ) {
+ /*
+ * Assume the irq handler got called close to first
+ * line of vblank, so PV has about a full vblank
+ * scanlines to go, and as a base timestamp use the
+ * one taken at entry into vblank irq handler, so it
+ * is not affected by random delays due to lock
+ * contention on event_lock or vblank_time lock in
+ * the core.
+ */
+ *vpos = -vblank_lines;
+
+ if (stime)
+ *stime = vc4_crtc->t_vblank;
+ if (etime)
+ *etime = vc4_crtc->t_vblank;
+
+ /*
+ * If the HVS fifo is not yet full then we know for certain
+ * we are at the very beginning of vblank, as the hvs just
+ * started refilling, and the stime and etime timestamps
+ * truly correspond to start of vblank.
+ */
+ if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ } else {
+ /*
+ * No clue where we are inside vblank. Return a vpos of zero,
+ * which will cause calling code to just return the etime
+ * timestamp uncorrected. At least this is no worse than the
+ * standard fallback.
+ */
+ *vpos = 0;
+ }
+
+ return ret;
+}
+
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = &vc4_crtc->base;
+ struct drm_crtc_state *state = crtc->state;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
+ vblank_time, flags,
+ &state->adjusted_mode);
+}
+
static void vc4_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
@@ -175,20 +318,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
}
-static void
+static int
vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 i;
- for (i = start; i < start + size; i++) {
+ for (i = 0; i < size; i++) {
vc4_crtc->lut_r[i] = r[i] >> 8;
vc4_crtc->lut_g[i] = g[i] >> 8;
vc4_crtc->lut_b[i] = b[i] >> 8;
}
vc4_crtc_lut_load(crtc);
+
+ return 0;
}
static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +540,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
unsigned long flags;
+ const struct drm_plane_state *plane_state;
u32 dlist_count = 0;
int ret;
@@ -404,18 +550,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (hweight32(state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *plane_state =
- state->state->plane_states[drm_plane_index(plane)];
-
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!plane_state)
- plane_state = plane->state;
-
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
dlist_count += vc4_plane_dlist_size(plane_state);
- }
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -526,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
+ vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
drm_crtc_handle_vblank(&vc4_crtc->base);
vc4_crtc_handle_page_flip(vc4_crtc);
@@ -730,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
}
}
+static void
+vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
+{
+ struct drm_device *drm = vc4_crtc->base.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
+ /* Top/base are supposed to be 4-pixel aligned, but the
+ * Raspberry Pi firmware fills the low bits (which are
+ * presumably ignored).
+ */
+ u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ vc4_crtc->cob_size = top - base + 4;
+}
+
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -806,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
crtc->cursor = cursor_plane;
}
+ vc4_crtc_get_cob_allocation(vc4_crtc);
+
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 9817dbfa4ac3..275fedbdbd9e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-vc4_dpi_connector_best_encoder(struct drm_connector *connector)
-{
- struct vc4_dpi_connector *dpi_connector =
- to_vc4_dpi_connector(connector);
- return dpi_connector->encoder;
-}
-
static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
.get_modes = vc4_dpi_connector_get_modes,
- .best_encoder = vc4_dpi_connector_best_encoder,
};
static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
@@ -236,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct vc4_dpi_connector *dpi_connector;
- int ret = 0;
dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
GFP_KERNEL);
- if (!dpi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!dpi_connector)
+ return ERR_PTR(-ENOMEM);
+
connector = &dpi_connector->base;
dpi_connector->encoder = dpi->encoder;
@@ -260,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, dpi->encoder);
return connector;
-
- fail:
- if (connector)
- vc4_dpi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 250ed7e3754c..8b42d31a7f0e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "drm_fb_cma_helper.h"
#include "uapi/drm/vc4_drm.h"
@@ -43,12 +44,54 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
return map;
}
+static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_get_param *args = data;
+ int ret;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_VC4_PARAM_V3D_IDENT0:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT0);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT1:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT1);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT2:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT2);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ args->value = true;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void vc4_lastclose(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_restore_mode(vc4->fbdev);
+ drm_fbdev_cma_restore_mode(vc4->fbdev);
}
static const struct file_operations vc4_drm_fops = {
@@ -74,6 +117,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -92,6 +136,8 @@ static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_counter = drm_vblank_no_hw_counter,
+ .get_scanout_position = vc4_crtc_get_scanoutpos,
+ .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
@@ -99,7 +145,7 @@ static struct drm_driver vc4_drm_driver = {
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object = vc4_free_object,
+ .gem_free_object_unlocked = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -176,7 +222,6 @@ static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm;
- struct drm_connector *connector;
struct vc4_dev *vc4;
int ret = 0;
@@ -196,8 +241,6 @@ static int vc4_drm_bind(struct device *dev)
vc4_bo_cache_init(drm);
drm_mode_config_init(drm);
- if (ret)
- goto unref;
vc4_gem_init(drm);
@@ -211,27 +254,14 @@ static int vc4_drm_bind(struct device *dev)
if (ret < 0)
goto unbind_all;
- /* Connector registration has to occur after DRM device
- * registration, because it creates sysfs entries based on the
- * DRM device.
- */
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret)
- goto unregister;
- }
-
vc4_kms_load(drm);
return 0;
-unregister:
- drm_dev_unregister(drm);
unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
-unref:
drm_dev_unref(drm);
vc4_bo_cache_destroy(drm);
return ret;
@@ -259,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = {
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
&vc4_dpi_driver,
- &vc4_crtc_driver,
&vc4_hvs_driver,
+ &vc4_crtc_driver,
&vc4_v3d_driver,
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 37cac59401d7..489e3de0c050 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -355,6 +355,9 @@ struct vc4_validated_shader_info {
uint32_t uniforms_src_size;
uint32_t num_texture_samples;
struct vc4_texture_sample_info *texture_samples;
+
+ uint32_t num_uniform_addr_offsets;
+ uint32_t *uniform_addr_offsets;
};
/**
@@ -415,6 +418,13 @@ extern struct platform_driver vc4_crtc_driver;
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
@@ -469,7 +479,7 @@ int vc4_kms_load(struct drm_device *dev);
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
-u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
void vc4_plane_async_set_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 46899d6de675..6155e8aca1c6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
- mutex_lock(&dev->struct_mutex);
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_unreference(state->bo[i]);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
- /* Need the struct lock for drm_gem_object_unreference(). */
- mutex_lock(&dev->struct_mutex);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_unreference(&exec->bo[i]->base);
+ drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
kfree(exec->bo);
}
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_unreference(&bo->base.base);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
}
- mutex_unlock(&dev->struct_mutex);
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd2644d231ff..4452f3631cac 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *
-vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct vc4_hdmi_connector *hdmi_connector =
- to_vc4_hdmi_connector(connector);
- return hdmi_connector->encoder;
-}
-
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
.get_modes = vc4_hdmi_connector_get_modes,
- .best_encoder = vc4_hdmi_connector_best_encoder,
};
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
@@ -465,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
- ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
- if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
- return -ENODEV;
- }
-
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
@@ -482,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hdmi->hsm_clock);
}
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
+
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
if (!hdmi->ddc) {
DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 861a623bc185..4ac894d993cd 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_hotplug_event(vc4->fbdev);
+ drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
struct vc4_commit {
@@ -111,6 +110,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
int i;
uint64_t wait_seqno = 0;
struct vc4_commit *c;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state;
c = commit_init(state);
if (!c)
@@ -138,13 +139,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
return ret;
}
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(state, plane, new_state, i) {
if ((plane->state->fb != new_state->fb) && new_state->fb) {
struct drm_gem_cma_object *cma_bo =
drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -160,7 +155,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fde31..29e4b400e25e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -94,6 +94,14 @@ static const struct hvs_format {
.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
},
{
+ .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+ },
+ {
+ .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+ },
+ {
.drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
},
@@ -690,9 +698,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
return vc4_state->dlist_count;
}
-u32 vc4_plane_dlist_size(struct drm_plane_state *state)
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ const struct vc4_plane_state *vc4_state =
+ container_of(state, typeof(*vc4_state), base);
return vc4_state->dlist_count;
}
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
index d5c2f3c85ebb..f4e795a0d3f6 100644
--- a/drivers/gpu/drm/vc4/vc4_qpu_defines.h
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -70,7 +70,7 @@ enum qpu_raddr {
QPU_R_ELEM_QPU = 38,
QPU_R_NOP,
QPU_R_XY_PIXEL_COORD = 41,
- QPU_R_MS_REV_FLAGS = 41,
+ QPU_R_MS_REV_FLAGS = 42,
QPU_R_VPM = 48,
QPU_R_VPM_LD_BUSY,
QPU_R_VPM_LD_WAIT,
@@ -230,6 +230,15 @@ enum qpu_unpack_r4 {
#define QPU_COND_MUL_SHIFT 46
#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+#define QPU_BRANCH_COND_SHIFT 52
+#define QPU_BRANCH_COND_MASK QPU_MASK(55, 52)
+
+#define QPU_BRANCH_REL ((uint64_t)1 << 51)
+#define QPU_BRANCH_REG ((uint64_t)1 << 50)
+
+#define QPU_BRANCH_RADDR_A_SHIFT 45
+#define QPU_BRANCH_RADDR_A_MASK QPU_MASK(49, 45)
+
#define QPU_SF ((uint64_t)1 << 45)
#define QPU_WADDR_ADD_SHIFT 38
@@ -261,4 +270,10 @@ enum qpu_unpack_r4 {
#define QPU_OP_ADD_SHIFT 24
#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+#define QPU_LOAD_IMM_SHIFT 0
+#define QPU_LOAD_IMM_MASK QPU_MASK(31, 0)
+
+#define QPU_BRANCH_TARGET_SHIFT 0
+#define QPU_BRANCH_TARGET_MASK QPU_MASK(31, 0)
+
#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index f99eece4cc97..160942a9180e 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -366,7 +366,6 @@
# define SCALER_DISPBKGND_FILL BIT(24)
#define SCALER_DISPSTAT0 0x00000048
-#define SCALER_DISPBASE0 0x0000004c
# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
# define SCALER_DISPSTATX_MODE_SHIFT 30
# define SCALER_DISPSTATX_MODE_DISABLED 0
@@ -375,6 +374,24 @@
# define SCALER_DISPSTATX_MODE_EOF 3
# define SCALER_DISPSTATX_FULL BIT(29)
# define SCALER_DISPSTATX_EMPTY BIT(28)
+# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12)
+# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12
+# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0)
+# define SCALER_DISPSTATX_LINE_SHIFT 0
+
+#define SCALER_DISPBASE0 0x0000004c
+/* Last pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned (and thus 4 pixels less than the
+ * next COB base).
+ */
+# define SCALER_DISPBASEX_TOP_MASK VC4_MASK(31, 16)
+# define SCALER_DISPBASEX_TOP_SHIFT 16
+/* First pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned.
+ */
+# define SCALER_DISPBASEX_BASE_MASK VC4_MASK(15, 0)
+# define SCALER_DISPBASEX_BASE_SHIFT 0
+
#define SCALER_DISPCTRL1 0x00000050
#define SCALER_DISPBKGND1 0x00000054
#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \
@@ -385,6 +402,9 @@
(x) * (SCALER_DISPSTAT1 - \
SCALER_DISPSTAT0))
#define SCALER_DISPBASE1 0x0000005c
+#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \
+ (x) * (SCALER_DISPBASE1 - \
+ SCALER_DISPBASE0))
#define SCALER_DISPCTRL2 0x00000060
#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \
(x) * (SCALER_DISPCTRL1 - \
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 24c2c746e8f3..9ce1d0adf882 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t src_offset = *(uint32_t *)(pkt_u + o);
uint32_t *texture_handles_u;
void *uniform_data_u;
- uint32_t tex;
+ uint32_t tex, uni;
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
@@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev,
}
}
+ /* Fill in the uniform slots that need this shader's
+ * start-of-uniforms address (used for resetting the uniform
+ * stream in the presence of control flow).
+ */
+ for (uni = 0;
+ uni < validated_shader->num_uniform_addr_offsets;
+ uni++) {
+ uint32_t o = validated_shader->uniform_addr_offsets[uni];
+ ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
+ }
+
*(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
exec->uniforms_u += validated_shader->uniforms_src_size;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index f67124b4c534..46527e989ce3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -39,7 +39,17 @@
#include "vc4_drv.h"
#include "vc4_qpu_defines.h"
+#define LIVE_REG_COUNT (32 + 32 + 4)
+
struct vc4_shader_validation_state {
+ /* Current IP being validated. */
+ uint32_t ip;
+
+ /* IP at the end of the BO, do not read shader[max_ip] */
+ uint32_t max_ip;
+
+ uint64_t *shader;
+
struct vc4_texture_sample_info tmu_setup[2];
int tmu_write_count[2];
@@ -49,8 +59,30 @@ struct vc4_shader_validation_state {
*
* This is used for the validation of direct address memory reads.
*/
- uint32_t live_min_clamp_offsets[32 + 32 + 4];
- bool live_max_clamp_regs[32 + 32 + 4];
+ uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
+ bool live_max_clamp_regs[LIVE_REG_COUNT];
+ uint32_t live_immediates[LIVE_REG_COUNT];
+
+ /* Bitfield of which IPs are used as branch targets.
+ *
+ * Used for validation that the uniform stream is updated at the right
+ * points and clearing the texturing/clamping state.
+ */
+ unsigned long *branch_targets;
+
+ /* Set when entering a basic block, and cleared when the uniform
+ * address update is found. This is used to make sure that we don't
+ * read uniforms when the address is undefined.
+ */
+ bool needs_uniform_address_update;
+
+ /* Set when we find a backwards branch. If the branch is backwards,
+ * the taraget is probably doing an address reset to read uniforms,
+ * and so we need to be sure that a uniforms address is present in the
+ * stream, even if the shader didn't need to read uniforms in later
+ * basic blocks.
+ */
+ bool needs_uniform_address_for_loop;
};
static uint32_t
@@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader,
}
static bool
-check_tmu_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_tmu_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
@@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst,
return false;
}
- /* We assert that the the clamped address is the first
+ /* We assert that the clamped address is the first
* argument, and the UBO base address is the second argument.
* This is arbitrary, but simpler than supporting flipping the
* two either way.
@@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst,
/* Since direct uses a RADDR uniform reference, it will get counted in
* check_instruction_reads()
*/
- if (!is_direct)
+ if (!is_direct) {
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Texturing with undefined uniform address\n");
+ return false;
+ }
+
validated_shader->uniforms_size += 4;
+ }
if (submit) {
if (!record_texture_sample(validated_shader,
@@ -227,23 +265,138 @@ check_tmu_write(uint64_t inst,
return true;
}
+static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
+{
+ uint32_t o = validated_shader->num_uniform_addr_offsets;
+ uint32_t num_uniforms = validated_shader->uniforms_size / 4;
+
+ validated_shader->uniform_addr_offsets =
+ krealloc(validated_shader->uniform_addr_offsets,
+ (o + 1) *
+ sizeof(*validated_shader->uniform_addr_offsets),
+ GFP_KERNEL);
+ if (!validated_shader->uniform_addr_offsets)
+ return false;
+
+ validated_shader->uniform_addr_offsets[o] = num_uniforms;
+ validated_shader->num_uniform_addr_offsets++;
+
+ return true;
+}
+
static bool
-check_reg_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint64_t inst = validation_state->shader[validation_state->ip];
+ u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ u32 add_lri = raddr_add_a_to_live_reg_index(inst);
+ /* We want our reset to be pointing at whatever uniform follows the
+ * uniforms base address.
+ */
+ u32 expected_offset = validated_shader->uniforms_size + 4;
+
+ /* We only support absolute uniform address changes, and we
+ * require that they be in the current basic block before any
+ * of its uniform reads.
+ *
+ * One could potentially emit more efficient QPU code, by
+ * noticing that (say) an if statement does uniform control
+ * flow for all threads and that the if reads the same number
+ * of uniforms on each side. However, this scheme is easy to
+ * validate so it's all we allow for now.
+ */
+
+ if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+ DRM_ERROR("uniforms address change must be "
+ "normal math\n");
+ return false;
+ }
+
+ if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+ DRM_ERROR("Uniform address reset must be an ADD.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
+ DRM_ERROR("Uniform address reset must be unconditional.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
+ !(inst & QPU_PM)) {
+ DRM_ERROR("No packing allowed on uniforms reset\n");
+ return false;
+ }
+
+ if (add_lri == -1) {
+ DRM_ERROR("First argument of uniform address write must be "
+ "an immediate value.\n");
+ return false;
+ }
+
+ if (validation_state->live_immediates[add_lri] != expected_offset) {
+ DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+ validation_state->live_immediates[add_lri],
+ expected_offset);
+ return false;
+ }
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("Second argument of uniform address write must be "
+ "a uniform.\n");
+ return false;
+ }
+
+ validation_state->needs_uniform_address_update = false;
+ validation_state->needs_uniform_address_for_loop = false;
+ return require_uniform_address_uniform(validated_shader);
+}
+
+static bool
+check_reg_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ bool ws = inst & QPU_WS;
+ bool is_b = is_mul ^ ws;
+ u32 lri = waddr_to_live_reg_index(waddr, is_b);
+
+ if (lri != -1) {
+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+ uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
+
+ if (sig == QPU_SIG_LOAD_IMM &&
+ QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
+ ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
+ (!is_mul && cond_add == QPU_COND_ALWAYS))) {
+ validation_state->live_immediates[lri] =
+ QPU_GET_FIELD(inst, QPU_LOAD_IMM);
+ } else {
+ validation_state->live_immediates[lri] = ~0;
+ }
+ }
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
- /* XXX: We'll probably need to support this for reladdr, but
- * it's definitely a security-related one.
- */
- DRM_ERROR("uniforms address load unsupported\n");
- return false;
+ if (is_b) {
+ DRM_ERROR("relative uniforms address change "
+ "unsupported\n");
+ return false;
+ }
+
+ return validate_uniform_address_write(validated_shader,
+ validation_state,
+ is_mul);
case QPU_W_TLB_COLOR_MS:
case QPU_W_TLB_COLOR_ALL:
@@ -261,7 +414,7 @@ check_reg_write(uint64_t inst,
case QPU_W_TMU1_T:
case QPU_W_TMU1_R:
case QPU_W_TMU1_B:
- return check_tmu_write(inst, validated_shader, validation_state,
+ return check_tmu_write(validated_shader, validation_state,
is_mul);
case QPU_W_HOST_INT:
@@ -294,10 +447,10 @@ check_reg_write(uint64_t inst,
}
static void
-track_live_clamps(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+track_live_clamps(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
@@ -369,10 +522,10 @@ track_live_clamps(uint64_t inst,
}
static bool
-check_instruction_writes(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
bool ok;
@@ -382,20 +535,44 @@ check_instruction_writes(uint64_t inst,
return false;
}
- ok = (check_reg_write(inst, validated_shader, validation_state,
- false) &&
- check_reg_write(inst, validated_shader, validation_state,
- true));
+ ok = (check_reg_write(validated_shader, validation_state, false) &&
+ check_reg_write(validated_shader, validation_state, true));
- track_live_clamps(inst, validated_shader, validation_state);
+ track_live_clamps(validated_shader, validation_state);
return ok;
}
static bool
-check_instruction_reads(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader)
+check_branch(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ int ip)
+{
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+
+ if ((int)branch_imm < 0)
+ validation_state->needs_uniform_address_for_loop = true;
+
+ /* We don't want to have to worry about validation of this, and
+ * there's no need for it.
+ */
+ if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
+ DRM_ERROR("branch instruction at %d wrote a register.\n",
+ validation_state->ip);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
@@ -407,40 +584,204 @@ check_instruction_reads(uint64_t inst,
* already be OOM.
*/
validated_shader->uniforms_size += 4;
+
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Uniform read with undefined uniform "
+ "address\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Make sure that all branches are absolute and point within the shader, and
+ * note their targets for later.
+ */
+static bool
+vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t max_branch_target = 0;
+ bool found_shader_end = false;
+ int ip;
+ int shader_end_ip = 0;
+ int last_branch = -2;
+
+ for (ip = 0; ip < validation_state->max_ip; ip++) {
+ uint64_t inst = validation_state->shader[ip];
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ uint32_t after_delay_ip = ip + 4;
+ uint32_t branch_target_ip;
+
+ if (sig == QPU_SIG_PROG_END) {
+ shader_end_ip = ip;
+ found_shader_end = true;
+ continue;
+ }
+
+ if (sig != QPU_SIG_BRANCH)
+ continue;
+
+ if (ip - last_branch < 4) {
+ DRM_ERROR("Branch at %d during delay slots\n", ip);
+ return false;
+ }
+ last_branch = ip;
+
+ if (inst & QPU_BRANCH_REG) {
+ DRM_ERROR("branching from register relative "
+ "not supported\n");
+ return false;
+ }
+
+ if (!(inst & QPU_BRANCH_REL)) {
+ DRM_ERROR("relative branching required\n");
+ return false;
+ }
+
+ /* The actual branch target is the instruction after the delay
+ * slots, plus whatever byte offset is in the low 32 bits of
+ * the instruction. Make sure we're not branching beyond the
+ * end of the shader object.
+ */
+ if (branch_imm % sizeof(inst) != 0) {
+ DRM_ERROR("branch target not aligned\n");
+ return false;
+ }
+
+ branch_target_ip = after_delay_ip + (branch_imm >> 3);
+ if (branch_target_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+ ip, branch_target_ip,
+ validation_state->max_ip);
+ return false;
+ }
+ set_bit(branch_target_ip, validation_state->branch_targets);
+
+ /* Make sure that the non-branching path is also not outside
+ * the shader.
+ */
+ if (after_delay_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d continues past shader end "
+ "(%d/%d)\n",
+ ip, after_delay_ip, validation_state->max_ip);
+ return false;
+ }
+ set_bit(after_delay_ip, validation_state->branch_targets);
+ max_branch_target = max(max_branch_target, after_delay_ip);
+
+ /* There are two delay slots after program end is signaled
+ * that are still executed, then we're finished.
+ */
+ if (found_shader_end && ip == shader_end_ip + 2)
+ break;
+ }
+
+ if (max_branch_target > shader_end_ip) {
+ DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ return false;
}
return true;
}
+/* Resets any known state for the shader, used when we may be branched to from
+ * multiple locations in the program (or at shader start).
+ */
+static void
+reset_validation_state(struct vc4_shader_validation_state *validation_state)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
+
+ for (i = 0; i < LIVE_REG_COUNT; i++) {
+ validation_state->live_min_clamp_offsets[i] = ~0;
+ validation_state->live_max_clamp_regs[i] = false;
+ validation_state->live_immediates[i] = ~0;
+ }
+}
+
+static bool
+texturing_in_progress(struct vc4_shader_validation_state *validation_state)
+{
+ return (validation_state->tmu_write_count[0] != 0 ||
+ validation_state->tmu_write_count[1] != 0);
+}
+
+static bool
+vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t ip = validation_state->ip;
+
+ if (!test_bit(ip, validation_state->branch_targets))
+ return true;
+
+ if (texturing_in_progress(validation_state)) {
+ DRM_ERROR("Branch target landed during TMU setup\n");
+ return false;
+ }
+
+ /* Reset our live values tracking, since this instruction may have
+ * multiple predecessors.
+ *
+ * One could potentially do analysis to determine that, for
+ * example, all predecessors have a live max clamp in the same
+ * register, but we don't bother with that.
+ */
+ reset_validation_state(validation_state);
+
+ /* Since we've entered a basic block from potentially multiple
+ * predecessors, we need the uniforms address to be updated before any
+ * unforms are read. We require that after any branch point, the next
+ * uniform to be loaded is a uniform address offset. That uniform's
+ * offset will be marked by the uniform address register write
+ * validation, or a one-off the end-of-program check.
+ */
+ validation_state->needs_uniform_address_update = true;
+
+ return true;
+}
+
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
- uint32_t ip, max_ip;
- uint64_t *shader;
- struct vc4_validated_shader_info *validated_shader;
+ uint32_t ip;
+ struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
- int i;
memset(&validation_state, 0, sizeof(validation_state));
+ validation_state.shader = shader_obj->vaddr;
+ validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
- for (i = 0; i < 8; i++)
- validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
- for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
- validation_state.live_min_clamp_offsets[i] = ~0;
+ reset_validation_state(&validation_state);
- shader = shader_obj->vaddr;
- max_ip = shader_obj->base.size / sizeof(uint64_t);
+ validation_state.branch_targets =
+ kcalloc(BITS_TO_LONGS(validation_state.max_ip),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!validation_state.branch_targets)
+ goto fail;
validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
if (!validated_shader)
- return NULL;
+ goto fail;
+
+ if (!vc4_validate_branches(&validation_state))
+ goto fail;
- for (ip = 0; ip < max_ip; ip++) {
- uint64_t inst = shader[ip];
+ for (ip = 0; ip < validation_state.max_ip; ip++) {
+ uint64_t inst = validation_state.shader[ip];
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ validation_state.ip = ip;
+
+ if (!vc4_handle_branch_target(&validation_state))
+ goto fail;
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -450,13 +791,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
goto fail;
}
- if (!check_instruction_reads(inst, validated_shader))
+ if (!check_instruction_reads(validated_shader,
+ &validation_state))
goto fail;
if (sig == QPU_SIG_PROG_END) {
@@ -467,13 +809,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
break;
case QPU_SIG_LOAD_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
+ case QPU_SIG_BRANCH:
+ if (!check_branch(inst, validated_shader,
+ &validation_state, ip))
+ goto fail;
+ break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
@@ -487,13 +834,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
break;
}
- if (ip == max_ip) {
+ if (ip == validation_state.max_ip) {
DRM_ERROR("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
}
+ /* If we did a backwards branch and we haven't emitted a uniforms
+ * reset since then, we still need the uniforms stream to have the
+ * uniforms address available so that the backwards branch can do its
+ * uniforms reset.
+ *
+ * We could potentially prove that the backwards branch doesn't
+ * contain any uses of uniforms until program exit, but that doesn't
+ * seem to be worth the trouble.
+ */
+ if (validation_state.needs_uniform_address_for_loop) {
+ if (!require_uniform_address_uniform(validated_shader))
+ goto fail;
+ validated_shader->uniforms_size += 4;
+ }
+
/* Again, no chance of integer overflow here because the worst case
* scenario is 8 bytes of uniforms plus handles per 8-byte
* instruction.
@@ -502,9 +864,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
(validated_shader->uniforms_size +
4 * validated_shader->num_texture_samples);
+ kfree(validation_state.branch_targets);
+
return validated_shader;
fail:
+ kfree(validation_state.branch_targets);
if (validated_shader) {
kfree(validated_shader->texture_samples);
kfree(validated_shader);
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842028..bfcdea1330e6 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3dde6..c15bafb06665 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,38 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
-{
- drm_gem_put_pages(&obj->base, obj->pages, false, false);
- obj->pages = NULL;
-}
-
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
- drm_gem_free_mmap_offset(obj);
-
- if (vgem_obj->use_dma_buf && obj->dma_buf) {
- dma_buf_put(obj->dma_buf);
- obj->dma_buf = NULL;
- }
-
drm_gem_object_release(obj);
-
- if (vgem_obj->pages)
- vgem_gem_put_pages(vgem_obj);
-
- vgem_obj->pages = NULL;
-
kfree(vgem_obj);
}
-int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
-{
- struct page **pages;
-
- if (obj->pages || obj->use_dma_buf)
- return 0;
-
- pages = drm_gem_get_pages(&obj->base);
- if (IS_ERR(pages)) {
- return PTR_ERR(pages);
- }
-
- obj->pages = pages;
-
- return 0;
-}
-
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
- loff_t num_pages;
- pgoff_t page_offset;
- int ret;
-
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset > num_pages)
- return VM_FAULT_SIGBUS;
-
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- obj->pages[page_offset]);
- switch (ret) {
- case 0:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -EBUSY:
- return VM_FAULT_RETRY;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ON(1);
- return VM_FAULT_SIGBUS;
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ struct page *page;
+
+ page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
+ (vaddr - vma->vm_start) >> PAGE_SHIFT);
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ return 0;
+ } else switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON_ONCE(PTR_ERR(page));
+ return VM_FAULT_SIGBUS;
}
}
@@ -126,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile;
+ int ret;
+
+ vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+ if (!vfile)
+ return -ENOMEM;
+
+ file->driver_priv = vfile;
+
+ ret = vgem_fence_open(vfile);
+ if (ret) {
+ kfree(vfile);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+
+ vgem_fence_close(vfile);
+ kfree(vfile);
+}
+
/* ioctls */
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -134,57 +119,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
- struct drm_gem_object *gem_object;
- int err;
-
- size = roundup(size, PAGE_SIZE);
+ int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
- gem_object = &obj->base;
-
- err = drm_gem_object_init(dev, gem_object, size);
- if (err)
- goto out;
-
- err = vgem_gem_get_pages(obj);
- if (err)
- goto out;
-
- err = drm_gem_handle_create(file, gem_object, handle);
- if (err)
- goto handle_out;
+ ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
+ if (ret)
+ goto err_free;
- drm_gem_object_unreference_unlocked(gem_object);
+ ret = drm_gem_handle_create(file, &obj->base, handle);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ goto err;
- return gem_object;
+ return &obj->base;
-handle_out:
- drm_gem_object_release(gem_object);
-out:
+err_free:
kfree(obj);
- return ERR_PTR(err);
+err:
+ return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
- uint64_t size;
- uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ u64 pitch, size;
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
-
- if (IS_ERR(gem_object)) {
- DRM_DEBUG_DRIVER("object creation failed\n");
+ if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
- }
args->size = gem_object->size;
args->pitch = pitch;
@@ -194,26 +165,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
{
- int ret = 0;
struct drm_gem_object *obj;
+ int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
- BUG_ON(!obj->filp);
-
- obj->filp->private_data = obj;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
-
unref:
drm_gem_object_unreference_unlocked(obj);
@@ -221,26 +192,134 @@ unref:
}
static struct drm_ioctl_desc vgem_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
+static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long flags = vma->vm_flags;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ /* Keep the WC mmaping set by drm_gem_mmap() but our pages
+ * are ordinary and not special.
+ */
+ vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
+ return 0;
+}
+
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .mmap = drm_gem_mmap,
+ .mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
};
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ drm_clflush_pages(pages, n_pages);
+ drm_gem_put_pages(obj, pages, true, false);
+
+ return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct sg_table *st;
+ struct page **pages;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return st;
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+ void *addr;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return NULL;
+
+ addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (obj->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!obj->filp)
+ return -ENODEV;
+
+ ret = obj->filp->f_op->mmap(obj->filp, vma);
+ if (ret)
+ return ret;
+
+ fput(vma->vm_file);
+ vma->vm_file = get_file(obj->filp);
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM,
- .gem_free_object = vgem_gem_free_object,
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .open = vgem_open,
+ .preclose = vgem_preclose,
+ .gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
+ .num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
+
.dumb_create = vgem_gem_dumb_create,
.dumb_map_offset = vgem_gem_dumb_map,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_pin = vgem_prime_pin,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = vgem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_prime_vmap,
+ .gem_prime_vunmap = vgem_prime_vunmap,
+ .gem_prime_mmap = vgem_prime_mmap,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -248,7 +327,7 @@ static struct drm_driver vgem_driver = {
.minor = DRIVER_MINOR,
};
-struct drm_device *vgem_device;
+static struct drm_device *vgem_device;
static int __init vgem_init(void)
{
@@ -260,10 +339,7 @@ static int __init vgem_init(void)
goto out;
}
- drm_dev_set_unique(vgem_device, "vgem");
-
ret = drm_dev_register(vgem_device, 0);
-
if (ret)
goto out_unref;
@@ -285,5 +361,6 @@ module_init(vgem_init);
module_exit(vgem_exit);
MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee275..1f8798ad329c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,15 +32,25 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+ struct idr fence_idr;
+ struct mutex fence_mutex;
+};
+
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
- struct page **pages;
- bool use_dma_buf;
};
-/* vgem_drv.c */
-extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
-extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
#endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000000..5c57c1ffa1f9
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+ struct fence base;
+ struct spinlock lock;
+ struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+ return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+ return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+ struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+ del_timer_sync(&fence->timer);
+ fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+ int size)
+{
+ snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+static const struct fence_ops vgem_fence_ops = {
+ .get_driver_name = vgem_fence_get_driver_name,
+ .get_timeline_name = vgem_fence_get_timeline_name,
+ .enable_signaling = vgem_fence_enable_signaling,
+ .signaled = vgem_fence_signaled,
+ .wait = fence_default_wait,
+ .release = vgem_fence_release,
+
+ .fence_value_str = vgem_fence_value_str,
+ .timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+ struct vgem_fence *fence = (struct vgem_fence *)data;
+
+ fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
+{
+ struct vgem_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ spin_lock_init(&fence->lock);
+ fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ fence_context_alloc(1), 1);
+
+ setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+ /* We force the fence to expire within 10s to prevent driver hangs */
+ mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
+
+ return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+ struct drm_gem_object *obj)
+{
+ struct dma_buf *dmabuf;
+
+ if (obj->dma_buf)
+ return 0;
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ obj->dma_buf = dmabuf;
+ drm_gem_object_reference(obj);
+ return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct drm_vgem_fence_attach *arg = data;
+ struct vgem_file *vfile = file->driver_priv;
+ struct reservation_object *resv;
+ struct drm_gem_object *obj;
+ struct fence *fence;
+ int ret;
+
+ if (arg->flags & ~VGEM_FENCE_WRITE)
+ return -EINVAL;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, arg->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = attach_dmabuf(dev, obj);
+ if (ret)
+ goto err;
+
+ fence = vgem_fence_create(vfile, arg->flags);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Check for a conflicting fence */
+ resv = obj->dma_buf->resv;
+ if (!reservation_object_test_signaled_rcu(resv,
+ arg->flags & VGEM_FENCE_WRITE)) {
+ ret = -EBUSY;
+ goto err_fence;
+ }
+
+ /* Expose the fence via the dma-buf */
+ ret = 0;
+ mutex_lock(&resv->lock.base);
+ if (arg->flags & VGEM_FENCE_WRITE)
+ reservation_object_add_excl_fence(resv, fence);
+ else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+ reservation_object_add_shared_fence(resv, fence);
+ mutex_unlock(&resv->lock.base);
+
+ /* Record the fence in our idr for later signaling */
+ if (ret == 0) {
+ mutex_lock(&vfile->fence_mutex);
+ ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+ mutex_unlock(&vfile->fence_mutex);
+ if (ret > 0) {
+ arg->out_fence = ret;
+ ret = 0;
+ }
+ }
+err_fence:
+ if (ret) {
+ fence_signal(fence);
+ fence_put(fence);
+ }
+err:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+ struct drm_vgem_fence_signal *arg = data;
+ struct fence *fence;
+ int ret = 0;
+
+ if (arg->flags)
+ return -EINVAL;
+
+ mutex_lock(&vfile->fence_mutex);
+ fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+ mutex_unlock(&vfile->fence_mutex);
+ if (!fence)
+ return -ENOENT;
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ if (fence_is_signaled(fence))
+ ret = -ETIMEDOUT;
+
+ fence_signal(fence);
+ fence_put(fence);
+ return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+ mutex_init(&vfile->fence_mutex);
+ idr_init(&vfile->fence_idr);
+
+ return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+ fence_signal(p);
+ fence_put(p);
+ return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+ idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+ idr_destroy(&vfile->fence_idr);
+}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4f20742e7788..a04ef1c992d9 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
struct via_file_private *file_priv = file->driver_priv;
struct via_memblock *entry, *next;
- if (!(file->minor->master && file->master->lock.hw_lock))
+ if (!(dev->master && file->master->lock.hw_lock))
return;
drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 9983eadb81b6..e1afc3d3f8d9 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,11 +1,7 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88f44..4e192aa2d021 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#define XRES_MIN 320
-#define YRES_MIN 200
+#define XRES_MIN 32
+#define YRES_MIN 32
#define XRES_DEF 1024
#define YRES_DEF 768
@@ -38,138 +38,11 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-static void
-virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_output *output)
-{
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = 0;
- virtio_gpu_cursor_ping(vgdev, output);
-}
-
-static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x, int32_t hot_y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
- struct virtio_gpu_fence *fence = NULL;
- int ret = 0;
-
- if (handle == 0) {
- virtio_gpu_hide_cursor(vgdev, output);
- return 0;
- }
-
- /* lookup the cursor */
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
-
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (!qobj->hw_res_handle) {
- ret = -EINVAL;
- goto out;
- }
-
- virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
- cpu_to_le32(64),
- cpu_to_le32(64),
- 0, 0, &fence);
- ret = virtio_gpu_object_reserve(qobj, false);
- if (!ret) {
- reservation_object_add_excl_fence(qobj->tbo.resv,
- &fence->f);
- fence_put(&fence->f);
- virtio_gpu_object_unreserve(qobj);
- virtio_gpu_object_wait(qobj, false);
- }
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
- output->cursor.hot_x = cpu_to_le32(hot_x);
- output->cursor.hot_y = cpu_to_le32(hot_y);
- virtio_gpu_cursor_ping(vgdev, output);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gobj);
- return ret;
-}
-
-static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
- output->cursor.pos.x = cpu_to_le32(x);
- output->cursor.pos.y = cpu_to_le32(y);
- virtio_gpu_cursor_ping(vgdev, output);
- return 0;
-}
-
-static int virtio_gpu_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
- struct drm_plane *plane = crtc->primary;
- struct virtio_gpu_framebuffer *vgfb;
- struct virtio_gpu_object *bo;
- unsigned long irqflags;
- uint32_t handle;
-
- plane->fb = fb;
- vgfb = to_virtio_gpu_framebuffer(plane->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
- handle = bo->hw_res_handle;
-
- DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
- bo->dumb ? ", dumb" : "",
- crtc->mode.hdisplay, crtc->mode.vdisplay);
- if (bo->dumb) {
- virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, handle, 0,
- cpu_to_le32(crtc->mode.hdisplay),
- cpu_to_le32(crtc->mode.vdisplay),
- 0, 0, NULL);
- }
- virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- crtc->mode.hdisplay,
- crtc->mode.vdisplay, 0, 0);
- virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
- crtc->mode.hdisplay,
- crtc->mode.vdisplay);
-
- if (event) {
- spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
- drm_send_vblank_event(crtc->dev, -1, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
- }
-
- return 0;
-}
-
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
- .cursor_set2 = virtio_gpu_crtc_cursor_set,
- .cursor_move = virtio_gpu_crtc_cursor_move,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
- .page_flip = virtio_gpu_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -180,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
- if (virtio_gpu_fb->obj)
- drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
@@ -267,6 +139,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (crtc->state->event)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
@@ -341,15 +214,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder*
-virtio_gpu_best_encoder(struct drm_connector *connector)
-{
- struct virtio_gpu_output *virtio_gpu_output =
- drm_connector_to_virtio_gpu_output(connector);
-
- return &virtio_gpu_output->enc;
-}
-
static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
.mode_set = virtio_gpu_enc_mode_set,
.enable = virtio_gpu_enc_enable,
@@ -359,7 +223,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
.get_modes = virtio_gpu_conn_get_modes,
.mode_valid = virtio_gpu_conn_mode_valid,
- .best_encoder = virtio_gpu_best_encoder,
};
static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +269,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_connector *connector = &output->conn;
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
- struct drm_plane *plane;
+ struct drm_plane *primary, *cursor;
output->index = index;
if (index == 0) {
@@ -415,13 +278,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
output->info.r.height = cpu_to_le32(YRES_DEF);
}
- plane = virtio_gpu_plane_init(vgdev, index);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+ cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor))
+ return PTR_ERR(cursor);
+ drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- plane->crtc = crtc;
+ primary->crtc = crtc;
+ cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -458,14 +325,31 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
return NULL;
}
return &virtio_gpu_fb->base;
}
+static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
+ .atomic_commit_tail = vgdev_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.fb_create = virtio_gpu_user_framebuffer_create,
.atomic_check = drm_atomic_helper_check,
@@ -477,7 +361,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
int i;
drm_mode_config_init(vgdev->ddev);
- vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
+ vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
+ vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
/* modes will be validated against the framebuffer size */
vgdev->ddev->mode_config.min_width = XRES_MIN;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 88a39165edd5..7f0e93f87a55 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -27,16 +27,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77a35..c13f70cfc461 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
@@ -143,7 +142,7 @@ static struct drm_driver driver = {
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object = virtio_gpu_gem_free_object,
+ .gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f846a..b18ef3111f0c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -48,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtio_gpu_plane.c */
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index);
/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a2345ab..925ca25209df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_ABGR8888,
};
+static const uint32_t virtio_gpu_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static void virtio_gpu_plane_destroy(struct drm_plane *plane)
{
kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return 0;
}
-static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
+ struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
uint32_t handle;
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, handle, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- plane->state->crtc_x, plane->state->crtc_y, NULL);
+ cpu_to_le32(plane->state->src_w >> 16),
+ cpu_to_le32(plane->state->src_h >> 16),
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16, NULL);
}
} else {
handle = 0;
}
- DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle,
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
plane->state->crtc_w, plane->state->crtc_h,
- plane->state->crtc_x, plane->state->crtc_y);
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- plane->state->crtc_w,
- plane->state->crtc_h,
- plane->state->crtc_x,
- plane->state->crtc_y);
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->crtc_x,
- plane->state->crtc_y,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
}
+static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = NULL;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_fence *fence = NULL;
+ struct virtio_gpu_object *bo = NULL;
+ uint32_t handle;
+ int ret = 0;
-static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = {
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
+ if (plane->state->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ handle = bo->hw_res_handle;
+ } else {
+ handle = 0;
+ }
+
+ if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
+ /* new cursor -- update & wait */
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(plane->state->crtc_w),
+ cpu_to_le32(plane->state->crtc_h),
+ 0, 0, &fence);
+ ret = virtio_gpu_object_reserve(bo, false);
+ if (!ret) {
+ reservation_object_add_excl_fence(bo->tbo.resv,
+ &fence->f);
+ fence_put(&fence->f);
+ fence = NULL;
+ virtio_gpu_object_unreserve(bo);
+ virtio_gpu_object_wait(bo, false);
+ }
+ }
+
+ if (plane->state->fb != old_state->fb) {
+ DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->fb ? plane->state->fb->hot_x : 0,
+ plane->state->fb ? plane->state->fb->hot_y : 0);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+ output->cursor.resource_id = cpu_to_le32(handle);
+ if (plane->state->fb) {
+ output->cursor.hot_x =
+ cpu_to_le32(plane->state->fb->hot_x);
+ output->cursor.hot_y =
+ cpu_to_le32(plane->state->fb->hot_y);
+ } else {
+ output->cursor.hot_x = cpu_to_le32(0);
+ output->cursor.hot_y = cpu_to_le32(0);
+ }
+ } else {
+ DRM_DEBUG("move +%d+%d\n",
+ plane->state->crtc_x,
+ plane->state->crtc_y);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+ }
+ output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
+ output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
+ virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
+ .atomic_check = virtio_gpu_plane_atomic_check,
+ .atomic_update = virtio_gpu_primary_plane_update,
+};
+
+static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
.atomic_check = virtio_gpu_plane_atomic_check,
- .atomic_update = virtio_gpu_plane_atomic_update,
+ .atomic_update = virtio_gpu_cursor_plane_update,
};
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index)
{
struct drm_device *dev = vgdev->ddev;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
- int ret;
+ const uint32_t *formats;
+ int ret, nformats;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
+ if (type == DRM_PLANE_TYPE_CURSOR) {
+ formats = virtio_gpu_cursor_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
+ funcs = &virtio_gpu_cursor_helper_funcs;
+ } else {
+ formats = virtio_gpu_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_formats);
+ funcs = &virtio_gpu_primary_helper_funcs;
+ }
ret = drm_universal_plane_init(dev, plane, 1 << index,
&virtio_gpu_plane_funcs,
- virtio_gpu_formats,
- ARRAY_SIZE(virtio_gpu_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ formats, nformats,
+ type, NULL);
if (ret)
goto err_plane_init;
- drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs);
+ drm_plane_helper_add(plane, funcs);
return plane;
err_plane_init:
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index a0580815629f..80482ac5f95d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
virtio_gpu_move_null(bo, new_mem);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c940..aa04fb0159a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
struct vmw_cmdbuf_man *man = header->man;
u32 val;
- if (sizeof(header->handle) > 4)
- val = (header->handle >> 32);
- else
- val = 0;
+ val = upper_32_bits(header->handle);
vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
- val = (header->handle & 0xFFFFFFFFULL);
+ val = lower_32_bits(header->handle);
val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8d528fcf6e96..e8ae3dc476d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1053,15 +1053,14 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
- if (file_priv->minor->type != DRM_MINOR_LEGACY ||
- !(flags & DRM_AUTH))
+ if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
- if (file_priv->is_master) {
+ if (drm_is_current_master(file_priv)) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
@@ -1240,8 +1239,7 @@ static int vmw_master_set(struct drm_device *dev,
}
static void vmw_master_drop(struct drm_device *dev,
- struct drm_file *file_priv,
- bool from_release)
+ struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 89fb19443a3f..74304b03f9d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
#include <drm/drm_hashtab.h>
+#include <drm/drm_auth.h>
#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1a1a87cbf109..dc5beff2b4aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
(sw_context->cmd_bounce_size >> 1));
}
- if (sw_context->cmd_bounce != NULL)
- vfree(sw_context->cmd_bounce);
-
+ vfree(sw_context->cmd_bounce);
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
- unsigned ctx;
+ u64 ctx;
};
struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e29da45a2847..bf28ccc150df 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
+
+ return 0;
}
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c501..ff4803c107bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
+ uint32_t size);
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index a18db4d5347c..c5d82a8a2ec9 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -96,12 +96,12 @@ fail:
*/
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
- u32 pos = pb->pos;
- u32 *p = (u32 *)((void *)pb->mapped + pos);
- WARN_ON(pos == pb->fence);
+ u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
+
+ WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
- pb->pos = (pos + 8) & (pb->size_bytes - 1);
+ pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
}
/*
@@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
enum cdma_event event)
{
for (;;) {
+ struct push_buffer *pb = &cdma->push_buffer;
unsigned int space;
- if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
space = list_empty(&cdma->sync_queue) ? 1 : 0;
- else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
- struct push_buffer *pb = &cdma->push_buffer;
+ break;
+
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
space = host1x_pushbuffer_space(pb);
- } else {
+ break;
+
+ default:
WARN_ON(1);
return -EINVAL;
}
@@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
mutex_lock(&cdma->lock);
continue;
}
+
cdma->event = event;
mutex_unlock(&cdma->lock);
down(&cdma->sem);
mutex_lock(&cdma->lock);
}
+
return 0;
}
@@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
/* Start timer on next pending syncpt */
if (job->timeout)
cdma_start_timer_locked(cdma, job);
+
break;
}
@@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
/* Pop push buffer slots */
if (job->num_slots) {
struct push_buffer *pb = &cdma->push_buffer;
+
host1x_pushbuffer_pop(pb, job->num_slots);
+
if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
signal = true;
}
@@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
struct device *dev)
{
- u32 restart_addr;
- u32 syncpt_incrs;
- struct host1x_job *job = NULL;
- u32 syncpt_val;
struct host1x *host1x = cdma_to_host1x(cdma);
+ u32 restart_addr, syncpt_incrs, syncpt_val;
+ struct host1x_job *job = NULL;
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
@@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
syncpt_val += syncpt_incrs;
}
- /* The following sumbits from the same client may be dependent on the
+ /*
+ * The following sumbits from the same client may be dependent on the
* failed submit and therefore they may fail. Force a small timeout
- * to make the queue cleanup faster */
+ * to make the queue cleanup faster.
+ */
list_for_each_entry_from(job, &cdma->sync_queue, list)
if (job->client == cdma->timeout.client)
@@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
err = host1x_pushbuffer_init(&cdma->push_buffer);
if (err)
return err;
+
return 0;
}
@@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
/* init state on first submit with timeout value */
if (!cdma->timeout.initialized) {
int err;
+
err = host1x_hw_cdma_timeout_init(host1x, cdma,
job->syncpt_id);
if (err) {
@@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
}
}
}
+
if (!cdma->running)
host1x_hw_cdma_start(host1x, cdma);
@@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
}
+
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
host1x_pushbuffer_push(pb, op1, op2);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index b4ae3affb987..8f437d924c10 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put);
struct host1x_channel *host1x_channel_request(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev->parent);
- int max_channels = host->info->nb_channels;
+ unsigned int max_channels = host->info->nb_channels;
struct host1x_channel *channel = NULL;
- int index, err;
+ unsigned long index;
+ int err;
mutex_lock(&host->chlist_mutex);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index ee3d12b51c50..d9330fcc62ad 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
va_start(args, fmt);
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
+
o->fn(o->ctx, o->buf, len);
}
@@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
struct output *o = data;
mutex_lock(&ch->reflock);
+
if (ch->refcount) {
mutex_lock(&ch->cdma.lock);
+
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
+
host1x_hw_show_channel_cdma(m, ch, o);
mutex_unlock(&ch->cdma.lock);
}
+
mutex_unlock(&ch->reflock);
return 0;
@@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
static void show_syncpts(struct host1x *m, struct output *o)
{
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "---- syncpts ----\n");
+
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
+
if (!min && !max)
continue;
- host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+
+ host1x_debug_output(o, "id %u (%s) min %d max %d\n",
i, m->syncpt[i].name, min, max);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
u32 base_val;
+
base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
if (base_val)
- host1x_debug_output(o, "waitbase id %d val %d\n", i,
+ host1x_debug_output(o, "waitbase id %u val %d\n", i,
base_val);
}
@@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused)
.fn = write_to_seqfile,
.ctx = s
};
+
show_all(s->private, &o);
+
return 0;
}
@@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused)
.fn = write_to_seqfile,
.ctx = s
};
+
show_all_no_fifo(s->private, &o);
+
return 0;
}
@@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file)
}
static const struct file_operations host1x_debug_all_fops = {
- .open = host1x_debug_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static int host1x_debug_open(struct inode *inode, struct file *file)
@@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file)
}
static const struct file_operations host1x_debug_fops = {
- .open = host1x_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void host1x_debugfs_init(struct host1x *host1x)
@@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x)
struct output o = {
.fn = write_to_printk
};
+
show_all(host1x, &o);
}
@@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x)
struct output o = {
.fn = write_to_printk
};
+
show_syncpts(host1x, &o);
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index ff348690df94..a62317af76ad 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
}
static const struct host1x_info host1x01_info = {
- .nb_channels = 8,
- .nb_pts = 32,
- .nb_mlocks = 16,
- .nb_bases = 8,
- .init = host1x01_init,
- .sync_offset = 0x3000,
- .dma_mask = DMA_BIT_MASK(32),
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .init = host1x01_init,
+ .sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
@@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = {
.dma_mask = DMA_BIT_MASK(34),
};
-static struct of_device_id host1x_of_match[] = {
+static const struct of_device_id host1x_of_match[] = {
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index dace124994bb..5220510f39da 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -45,7 +45,7 @@ struct host1x_cdma_ops {
void (*start)(struct host1x_cdma *cdma);
void (*stop)(struct host1x_cdma *cdma);
void (*flush)(struct host1x_cdma *cdma);
- int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+ int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
void (*timeout_destroy)(struct host1x_cdma *cdma);
void (*freeze)(struct host1x_cdma *cdma);
void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -82,21 +82,21 @@ struct host1x_intr_ops {
int (*init_host_sync)(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *work));
void (*set_syncpt_threshold)(
- struct host1x *host, u32 id, u32 thresh);
- void (*enable_syncpt_intr)(struct host1x *host, u32 id);
- void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+ struct host1x *host, unsigned int id, u32 thresh);
+ void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
+ void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
void (*disable_all_syncpt_intrs)(struct host1x *host);
int (*free_syncpt_irq)(struct host1x *host);
};
struct host1x_info {
- int nb_channels; /* host1x: num channels supported */
- int nb_pts; /* host1x: num syncpoints supported */
- int nb_bases; /* host1x: num syncpoints supported */
- int nb_mlocks; /* host1x: number of mlocks */
- int (*init)(struct host1x *); /* initialize per SoC ops */
- int sync_offset;
- u64 dma_mask; /* mask of addressable memory */
+ unsigned int nb_channels; /* host1x: number of channels supported */
+ unsigned int nb_pts; /* host1x: number of syncpoints supported */
+ unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
+ unsigned int nb_mlocks; /* host1x: number of mlocks supported */
+ int (*init)(struct host1x *host1x); /* initialize per SoC ops */
+ unsigned int sync_offset; /* offset of syncpoint registers */
+ u64 dma_mask; /* mask of addressable memory */
};
struct host1x {
@@ -109,7 +109,6 @@ struct host1x {
struct clk *clk;
struct mutex intr_mutex;
- struct workqueue_struct *intr_wq;
int intr_syncpt_irq;
const struct host1x_syncpt_ops *syncpt_op;
@@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
}
static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host->intr_op->set_syncpt_threshold(host, id, thresh);
}
static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->enable_syncpt_intr(host, id);
}
static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->disable_syncpt_intr(host, id);
}
@@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
static inline int host1x_hw_channel_init(struct host1x *host,
struct host1x_channel *channel,
- int chid)
+ unsigned int id)
{
- return host->channel_op->init(channel, host, chid);
+ return host->channel_op->init(channel, host, id);
}
static inline int host1x_hw_channel_submit(struct host1x *host,
@@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
struct host1x_cdma *cdma,
- u32 syncpt_id)
+ unsigned int syncpt)
{
- return host->cdma_op->timeout_init(cdma, syncpt_id);
+ return host->cdma_op->timeout_init(cdma, syncpt);
}
static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 305ea8f3382d..659c1bbfeeba 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
- u32 i;
+ unsigned int i;
for (i = 0; i < syncpt_incrs; i++)
host1x_syncpt_incr(cdma->timeout.syncpt);
@@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
&pb->phys, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
+
wmb();
}
@@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma)
struct host1x_channel *ch = cdma_to_channel(cdma);
mutex_lock(&cdma->lock);
+
if (cdma->running) {
host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma->running = false;
}
+
mutex_unlock(&cdma->lock);
}
@@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
u32 cmdproc_stop;
dev_dbg(host1x->dev,
- "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+ "resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop &= ~(BIT(ch->id));
+ cmdproc_stop &= ~BIT(ch->id);
host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
cdma->torndown = false;
@@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
*/
static void cdma_timeout_handler(struct work_struct *work)
{
+ u32 prev_cmdproc, cmdproc_stop, syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
- u32 syncpt_val;
-
- u32 prev_cmdproc, cmdproc_stop;
-
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
timeout.wq);
host1x = cdma_to_host1x(cdma);
@@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work)
return;
}
- dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
- __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
- syncpt_val, cdma->timeout.syncpt_val);
+ dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
+ __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+ syncpt_val, cdma->timeout.syncpt_val);
/* stop HW, resetting channel/module */
host1x_hw_cdma_freeze(host1x, cdma);
@@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work)
/*
* Init timeout resources
*/
-static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
@@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma)
{
if (cdma->timeout.initialized)
cancel_delayed_work(&cdma->timeout.wq);
+
cdma->timeout.initialized = false;
}
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 946c332c3906..5e8df78b7acd 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+
offset += i * sizeof(u32);
trace_host1x_cdma_push_gather(dev_name(dev), bo,
@@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job)
struct host1x_job_gather *g = &job->gathers[i];
u32 op1 = host1x_opcode_gather(g->words);
u32 op2 = g->base + g->offset;
+
trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
host1x_cdma_push(cdma, op1, op2);
}
@@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
- u32 id, value;
+ unsigned int id;
+ u32 value;
value = host1x_syncpt_read_max(sp);
id = sp->base->id;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index cc3f1825c735..7a4a3286e4a7 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -40,8 +40,7 @@ enum {
static unsigned int show_channel_command(struct output *o, u32 val)
{
- unsigned mask;
- unsigned subop;
+ unsigned int mask, subop;
switch (val >> 28) {
case HOST1X_OPCODE_SETCLASS:
@@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val)
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
- } else {
- host1x_debug_output(o, "SETCL(class=%03x)\n",
- val >> 6 & 0x3ff);
- return 0;
}
+ host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return 0;
+
case HOST1X_OPCODE_INCR:
host1x_debug_output(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
@@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
struct host1x_job *job;
list_for_each_entry(job, &cdma->sync_queue, list) {
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
job, job->syncpt_id, job->syncpt_end,
job->first_get, job->timeout,
@@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
- host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
!ch->cdma.push_buffer.mapped) {
@@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT)
+ HOST1X_UCLASS_WAIT_SYNCPT)
host1x_debug_output(o, "waiting on syncpt %d val %d\n",
cbread >> 24, cbread & 0xffffff);
else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
- HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
-
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
base = (cbread >> 16) & 0xff;
baseval =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
@@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
u32 val, rd_ptr, wr_ptr, start, end;
unsigned int data_count = 0;
- host1x_debug_output(o, "%d: fifo:\n", ch->id);
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
host1x_debug_output(o, "FIFOSTAT %08x\n", val);
@@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
{
- int i;
+ unsigned int i;
host1x_debug_output(o, "---- mlocks ----\n");
+
for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
u32 owner =
host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by channel %d\n",
+ host1x_debug_output(o, "%u: locked by channel %u\n",
i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by cpu\n", i);
+ host1x_debug_output(o, "%u: locked by cpu\n", i);
else
- host1x_debug_output(o, "%d: unlocked\n", i);
+ host1x_debug_output(o, "%u: unlocked\n", i);
}
+
host1x_debug_output(o, "\n");
}
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index e1e31e9e67cd..dacb8009a605 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
- queue_work(host->intr_wq, &syncpt->intr.work);
+ schedule_work(&syncpt->intr.work);
}
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x *host = dev_id;
unsigned long reg;
- int i, id;
+ unsigned int i, id;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
@@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
- u32 i;
+ unsigned int i;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
host1x_sync_writel(host, 0xffffffffu,
@@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
-static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *))
+static int
+_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *))
{
- int i, err;
+ unsigned int i;
+ int err;
host1x_hw_intr_disable_all_syncpt_intrs(host);
@@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
}
static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
}
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
}
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
@@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
static int _host1x_free_syncpt_irq(struct host1x *host)
{
+ unsigned int i;
+
devm_free_irq(host->dev, host->intr_syncpt_irq, host);
- flush_workqueue(host->intr_wq);
+
+ for (i = 0; i < host->info->nb_pts; i++)
+ cancel_work_sync(&host->syncpt[i].intr.work);
+
return 0;
}
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 56e85395ac24..c93f74fcce72 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -26,8 +26,9 @@
*/
static void syncpt_restore(struct host1x_syncpt *sp)
{
+ u32 min = host1x_syncpt_read_min(sp);
struct host1x *host = sp->host;
- int min = host1x_syncpt_read_min(sp);
+
host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
}
@@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp)
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
@@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
@@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
if (!host1x_syncpt_client_managed(sp) &&
host1x_syncpt_idle(sp))
return -EINVAL;
+
host1x_sync_writel(host, BIT_MASK(sp->id),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
@@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
/* remove a wait pointed to by patch_addr */
static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
{
- u32 override = host1x_class_host_wait_syncpt(
- HOST1X_SYNCPT_RESERVED, 0);
+ u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
*((u32 *)patch_addr) = override;
+
return 0;
}
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 2491bf82e30c..8b4fad0ab35d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
static void action_wakeup(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up(wq);
}
static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up_interruptible(wq);
}
typedef void (*action_handler)(struct host1x_waitlist *waiter);
-static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
action_submit_complete,
action_wakeup,
action_wakeup_interruptible,
@@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work)
host1x_syncpt_load(host->syncpt + id));
}
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref)
{
@@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
return 0;
}
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
{
struct host1x_waitlist *waiter = ref;
struct host1x_syncpt *syncpt;
@@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
mutex_init(&host->intr_mutex);
host->intr_syncpt_irq = irq_sync;
- host->intr_wq = create_workqueue("host_syncpt");
- if (!host->intr_wq)
- return -ENOMEM;
for (id = 0; id < nb_pts; ++id) {
struct host1x_syncpt *syncpt = host->syncpt + id;
@@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
INIT_LIST_HEAD(&syncpt->intr.wait_head);
snprintf(syncpt->intr.thresh_irq_name,
sizeof(syncpt->intr.thresh_irq_name),
- "host1x_sp_%02d", id);
+ "host1x_sp_%02u", id);
}
host1x_intr_start(host);
@@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
void host1x_intr_deinit(struct host1x *host)
{
host1x_intr_stop(host);
- destroy_workqueue(host->intr_wq);
}
void host1x_intr_start(struct host1x *host)
@@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host)
if (!list_empty(&syncpt[id].intr.wait_head)) {
/* output diagnostics */
mutex_unlock(&host->intr_mutex);
- pr_warn("%s cannot stop syncpt intr id=%d\n",
+ pr_warn("%s cannot stop syncpt intr id=%u\n",
__func__, id);
return;
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 2b8adf016a05..1370c2bb75b8 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -75,7 +75,7 @@ struct host1x_waitlist {
*
* This is a non-blocking api.
*/
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref);
@@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
* You must call this if you passed non-NULL as ref.
* @ref the ref returned from host1x_intr_add_action()
*/
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index b4515d544039..a91b7c4a6110 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
if (host1x_syncpt_is_expired(sp, wait->thresh)) {
dev_dbg(host->dev,
- "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+ "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
wait->syncpt_id, sp->name, wait->thresh,
host1x_syncpt_read_min(sp));
@@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
+
size += g->words * sizeof(u32);
}
@@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
+
if (syncpt_id < host1x_syncpt_nb_pts(host))
set_bit(syncpt_id, waitchk_mask);
}
@@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
host1x_bo_unpin(unpin->bo, unpin->sgt);
host1x_bo_put(unpin->bo);
}
+
job->num_unpins = 0;
if (job->gather_copy_size)
dma_free_wc(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped, job->gather_copy);
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 6b7fdc1e2ed0..95589328ad52 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
return NULL;
}
- name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+ name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
return NULL;
@@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max);
void host1x_syncpt_restore(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
host1x_hw_syncpt_restore(host, sp_base + i);
+
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+
wmb();
}
@@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host)
void host1x_syncpt_save(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
if (host1x_syncpt_client_managed(sp_base + i))
@@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host)
u32 host1x_syncpt_load(struct host1x_syncpt *sp)
{
u32 val;
+
val = host1x_hw_syncpt_load(sp->host, sp);
trace_host1x_syncpt_load_min(sp->id, val);
@@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp)
*/
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
{
- u32 val;
host1x_hw_syncpt_load_wait_base(sp->host, sp);
- val = sp->base_val;
- return val;
+
+ return sp->base_val;
}
/*
@@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr);
static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
host1x_hw_syncpt_load(sp->host, sp);
+
return host1x_syncpt_is_expired(sp, thresh);
}
@@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
* Main entrypoint for syncpoint value waits.
*/
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
- u32 *value)
+ u32 *value)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
void *ref;
@@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
return 0;
}
@@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = val;
+
goto done;
}
@@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
/* wait for the syncpoint, or timeout, or signal */
while (timeout) {
long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
- int remain = wait_event_interruptible_timeout(wq,
+ int remain;
+
+ remain = wait_event_interruptible_timeout(wq,
syncpt_load_min_is_expired(sp, thresh),
check);
if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
err = 0;
+
break;
}
+
if (remain < 0) {
err = remain;
break;
}
+
timeout -= check;
+
if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
dev_warn(sp->host->dev,
- "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+ "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
current->comm, sp->id, sp->name,
thresh, timeout);
host1x_debug_dump_syncpts(sp->host);
+
if (check_count == MAX_STUCK_CHECK_COUNT)
host1x_debug_dump(sp->host);
+
check_count++;
}
}
+
host1x_intr_put_ref(sp->host, sp->id, ref);
done:
@@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
u32 current_val;
u32 future_val;
+
smp_rmb();
+
current_val = (u32)atomic_read(&sp->min_val);
future_val = (u32)atomic_read(&sp->max_val);
@@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host)
{
struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
- int i;
+ unsigned int i;
- syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+ syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
- bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+ bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
GFP_KERNEL);
if (!bases)
return -ENOMEM;
@@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(dev->parent);
+
return host1x_syncpt_alloc(host, dev, flags);
}
EXPORT_SYMBOL(host1x_syncpt_request);
@@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free);
void host1x_syncpt_deinit(struct host1x *host)
{
- int i;
struct host1x_syncpt *sp = host->syncpt;
+ unsigned int i;
+
for (i = 0; i < host->info->nb_pts; i++, sp++)
kfree(sp->name);
}
@@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host)
/*
* Read max. It indicates how many operations there are in queue, either in
* channel or in a software thread.
- * */
+ */
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->max_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_max);
@@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->min_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
@@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_read);
-int host1x_syncpt_nb_pts(struct host1x *host)
+unsigned int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
}
-int host1x_syncpt_nb_bases(struct host1x *host)
+unsigned int host1x_syncpt_nb_bases(struct host1x *host)
{
return host->info->nb_bases;
}
-int host1x_syncpt_nb_mlocks(struct host1x *host)
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
{
return host->info->nb_mlocks;
}
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
{
if (host->info->nb_pts < id)
return NULL;
+
return host->syncpt + id;
}
EXPORT_SYMBOL(host1x_syncpt_get);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 9056465ecd3f..f719205105ac 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -37,7 +37,7 @@ struct host1x_syncpt_base {
};
struct host1x_syncpt {
- int id;
+ unsigned int id;
atomic_t min_val;
atomic_t max_val;
u32 base_val;
@@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host);
void host1x_syncpt_deinit(struct host1x *host);
/* Return number of sync point supported. */
-int host1x_syncpt_nb_pts(struct host1x *host);
+unsigned int host1x_syncpt_nb_pts(struct host1x *host);
/* Return number of wait bases supported. */
-int host1x_syncpt_nb_bases(struct host1x *host);
+unsigned int host1x_syncpt_nb_bases(struct host1x *host);
/* Return number of mlocks supported. */
-int host1x_syncpt_nb_mlocks(struct host1x *host);
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
/*
* Check sync point sanity. If max is larger than min, there have too many
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 2f29780e7c68..659475c1e44a 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
+ default:
+ WARN_ON(1);
+ /* fall-through */
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
@@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt)
return IPU_DC_MAP_LVDS666;
case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
- default:
- return -EINVAL;
}
}
@@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
dc->di = ipu_di_get_num(di);
map = ipu_bus_format_to_map(bus_format);
- if (map < 0) {
- dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
- return map;
- }
/*
* In interlaced mode we need more counters to create the asymmetric
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 359268e3a166..a8d87ddd8a17 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->mode.hactive, sig->mode.vactive);
- if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
- return -EINVAL;
-
dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 837b1ec22800..42705bb5aaa3 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -45,17 +45,6 @@
#define DMFC_DP_CHAN_6B_24 16
#define DMFC_DP_CHAN_6F_29 24
-#define DMFC_FIFO_SIZE_64 (3 << 3)
-#define DMFC_FIFO_SIZE_128 (2 << 3)
-#define DMFC_FIFO_SIZE_256 (1 << 3)
-#define DMFC_FIFO_SIZE_512 (0 << 3)
-
-#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
-#define DMFC_BURSTSIZE_128 (0 << 6)
-#define DMFC_BURSTSIZE_64 (1 << 6)
-#define DMFC_BURSTSIZE_32 (2 << 6)
-#define DMFC_BURSTSIZE_16 (3 << 6)
-
struct dmfc_channel_data {
int ipu_channel;
unsigned long channel_reg;
@@ -104,9 +93,6 @@ struct ipu_dmfc_priv;
struct dmfc_channel {
unsigned slots;
- unsigned slotmask;
- unsigned segment;
- int burstsize;
struct ipu_soc *ipu;
struct ipu_dmfc_priv *priv;
const struct dmfc_channel_data *data;
@@ -117,7 +103,6 @@ struct ipu_dmfc_priv {
struct device *dev;
struct dmfc_channel channels[DMFC_NUM_CHANNELS];
struct mutex mutex;
- unsigned long bandwidth_per_slot;
void __iomem *base;
int use_count;
};
@@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
}
EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
-static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
- int segment, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- u32 val, field;
-
- dev_dbg(priv->dev,
- "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
- slots, segment, dmfc->data->ipu_channel);
-
- switch (slots) {
- case 1:
- field = DMFC_FIFO_SIZE_64;
- break;
- case 2:
- field = DMFC_FIFO_SIZE_128;
- break;
- case 4:
- field = DMFC_FIFO_SIZE_256;
- break;
- case 8:
- field = DMFC_FIFO_SIZE_512;
- break;
- default:
- return -EINVAL;
- }
-
- switch (burstsize) {
- case 16:
- field |= DMFC_BURSTSIZE_16;
- break;
- case 32:
- field |= DMFC_BURSTSIZE_32;
- break;
- case 64:
- field |= DMFC_BURSTSIZE_64;
- break;
- case 128:
- field |= DMFC_BURSTSIZE_128;
- break;
- }
-
- field |= DMFC_SEGMENT(segment);
-
- val = readl(priv->base + dmfc->data->channel_reg);
-
- val &= ~(0xff << dmfc->data->shift);
- val |= field << dmfc->data->shift;
-
- writel(val, priv->base + dmfc->data->channel_reg);
-
- dmfc->slots = slots;
- dmfc->segment = segment;
- dmfc->burstsize = burstsize;
- dmfc->slotmask = ((1 << slots) - 1) << segment;
-
- return 0;
-}
-
-static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
- unsigned long bandwidth)
-{
- int slots = 1;
-
- while (slots * priv->bandwidth_per_slot < bandwidth)
- slots *= 2;
-
- return slots;
-}
-
-static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
-{
- unsigned slotmask_need, slotmask_used = 0;
- int i, segment = 0;
-
- slotmask_need = (1 << slots) - 1;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- slotmask_used |= priv->channels[i].slotmask;
-
- while (slotmask_need <= 0xff) {
- if (!(slotmask_used & slotmask_need))
- return segment;
-
- slotmask_need <<= 1;
- segment++;
- }
-
- return -EBUSY;
-}
-
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int i;
-
- dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
- dmfc->slots, dmfc->segment);
-
- mutex_lock(&priv->mutex);
-
- if (!dmfc->slots)
- goto out;
-
- dmfc->slotmask = 0;
- dmfc->slots = 0;
- dmfc->segment = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- priv->channels[i].slotmask = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0) {
- priv->channels[i].segment =
- dmfc_find_slots(priv, priv->channels[i].slots);
- priv->channels[i].slotmask =
- ((1 << priv->channels[i].slots) - 1) <<
- priv->channels[i].segment;
- }
- }
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0)
- ipu_dmfc_setup_channel(&priv->channels[i],
- priv->channels[i].slots,
- priv->channels[i].segment,
- priv->channels[i].burstsize);
- }
-out:
- mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
-
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_pixel_per_second, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
- int segment = -1, ret = 0;
-
- dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
- bandwidth_pixel_per_second / 1000000,
- dmfc->data->ipu_channel);
-
- ipu_dmfc_free_bandwidth(dmfc);
-
- mutex_lock(&priv->mutex);
-
- if (slots > 8) {
- ret = -EBUSY;
- goto out;
- }
-
- /* For the MEM_BG channel, first try to allocate twice the slots */
- if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
- segment = dmfc_find_slots(priv, slots * 2);
- else if (slots < 2)
- /* Always allocate at least 128*4 bytes (2 slots) */
- slots = 2;
-
- if (segment >= 0)
- slots *= 2;
- else
- segment = dmfc_find_slots(priv, slots);
- if (segment < 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
-
-out:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get);
void ipu_dmfc_put(struct dmfc_channel *dmfc)
{
- ipu_dmfc_free_bandwidth(dmfc);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_put);
@@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
priv->channels[i].priv = priv;
priv->channels[i].ipu = ipu;
priv->channels[i].data = &dmfcdata[i];
- }
-
- writel(0x0, priv->base + DMFC_WR_CHAN);
- writel(0x0, priv->base + DMFC_DP_CHAN);
- /*
- * We have a total bandwidth of clkrate * 4pixel divided
- * into 8 slots.
- */
- priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
-
- dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
- priv->bandwidth_per_slot / 1000000);
+ if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+ priv->channels[i].slots = 2;
+ }
+ writel(0x00000050, priv->base + DMFC_WR_CHAN);
+ writel(0x00005654, priv->base + DMFC_DP_CHAN);
writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
writel(0x00000003, priv->base + DMFC_GENERAL1);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d926..5f962bfcb43c 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) "vga_switcheroo: " fmt
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
@@ -51,9 +52,9 @@
*
* * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
* * muxless: Dual GPUs but only one of them is connected to outputs.
- * The other one is merely used to offload rendering, its results
- * are copied over PCIe into the framebuffer. On Linux this is
- * supported with DRI PRIME.
+ * The other one is merely used to offload rendering, its results
+ * are copied over PCIe into the framebuffer. On Linux this is
+ * supported with DRI PRIME.
*
* Hybrid graphics started to appear in the late Naughties and were initially
* all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
*
* Register vga client (GPU). Enable vga_switcheroo if another GPU and a
* handler have already registered. The power state of the client is assumed
- * to be ON.
+ * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
+ * to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* @id: client identifier
*
* Register audio client (audio device on a GPU). The power state of the
- * client is assumed to be ON.
+ * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
+ * shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
}
/**
+ * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
+ * @pdev: client pci device
+ *
+ * Determine whether any prerequisites are not fulfilled to probe a given
+ * client. Drivers shall invoke this early on in their ->probe callback
+ * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
+ * register the client ere thou hast called this.
+ *
+ * Return: %true if probing should be deferred, otherwise %false.
+ */
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ /*
+ * apple-gmux is needed on pre-retina MacBook Pro
+ * to probe the panel if pdev is the inactive GPU.
+ */
+ if (apple_gmux_present() && pdev != vga_default_device() &&
+ !vgasr_priv.handler_flags)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
+
+/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device
*
@@ -530,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
* * OFF: Power off the device not in use.
* * ON: Power on the device not in use.
* * IGD: Switch to the integrated graphics device.
- * Power on the integrated GPU if necessary, power off the discrete GPU.
- * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
- * have opened device files of the GPUs or the audio client. If the
- * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
- * and /dev/snd/controlC1 to identify processes blocking the switch.
+ * Power on the integrated GPU if necessary, power off the discrete GPU.
+ * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
+ * have opened device files of the GPUs or the audio client. If the
+ * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
+ * and /dev/snd/controlC1 to identify processes blocking the switch.
* * DIS: Switch to the discrete graphics device.
* * DIGD: Delayed switch to the integrated graphics device.
- * This will perform the switch once the last user space process has
- * closed the device files of the GPUs and the audio client.
+ * This will perform the switch once the last user space process has
+ * closed the device files of the GPUs and the audio client.
* * DDIS: Delayed switch to the discrete graphics device.
* * MIGD: Mux-only switch to the integrated graphics device.
- * Does not remap console or change the power state of either gpu.
- * If the integrated GPU is currently off, the screen will turn black.
- * If it is on, the screen will show whatever happens to be in VRAM.
- * Either way, the user has to blindly enter the command to switch back.
+ * Does not remap console or change the power state of either gpu.
+ * If the integrated GPU is currently off, the screen will turn black.
+ * If it is on, the screen will show whatever happens to be in VRAM.
+ * Either way, the user has to blindly enter the command to switch back.
* * MDIS: Mux-only switch to the discrete graphics device.
*
* For GPUs whose power state is controlled by the driver's runtime pm,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index a7f886961830..fc1e65a263a4 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -30,6 +30,7 @@
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
+#define ADT7411_CFG1_RESERVED_BIT1 (1 << 1)
#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
#define ADT7411_REG_CFG2 0x19
@@ -37,6 +38,9 @@
#define ADT7411_REG_CFG3 0x1a
#define ADT7411_CFG3_ADC_CLK_225 (1 << 0)
+#define ADT7411_CFG3_RESERVED_BIT1 (1 << 1)
+#define ADT7411_CFG3_RESERVED_BIT2 (1 << 2)
+#define ADT7411_CFG3_RESERVED_BIT3 (1 << 3)
#define ADT7411_CFG3_REF_VDD (1 << 4)
#define ADT7411_REG_DEVICE_ID 0x4d
@@ -280,6 +284,45 @@ static int adt7411_detect(struct i2c_client *client,
return 0;
}
+static int adt7411_init_device(struct adt7411_data *data)
+{
+ int ret;
+ u8 val;
+
+ ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG3);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We must only write zero to bit 1 and bit 2 and only one to bit 3
+ * according to the datasheet.
+ */
+ val = ret;
+ val &= ~(ADT7411_CFG3_RESERVED_BIT1 | ADT7411_CFG3_RESERVED_BIT2);
+ val |= ADT7411_CFG3_RESERVED_BIT3;
+
+ ret = i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG3, val);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We must only write zero to bit 1 and only one to bit 3 according to
+ * the datasheet.
+ */
+ val = ret;
+ val &= ~ADT7411_CFG1_RESERVED_BIT1;
+ val |= ADT7411_CFG1_RESERVED_BIT3;
+
+ /* enable monitoring */
+ val |= ADT7411_CFG1_START_MONITOR;
+
+ return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
+}
+
static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -297,10 +340,7 @@ static int adt7411_probe(struct i2c_client *client,
mutex_init(&data->device_lock);
mutex_init(&data->update_lock);
- /* According to the datasheet, we must only write 1 to bit 3 */
- ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
- ADT7411_CFG1_RESERVED_BIT3
- | ADT7411_CFG1_START_MONITOR, 1);
+ ret = adt7411_init_device(data);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index 2b2ff67026be..48633e541dc3 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -242,7 +242,7 @@ static int fts_wd_set_resolution(struct fts_data *data,
}
if (resolution == seconds)
- set_bit(1, (unsigned long *)&ret);
+ ret |= BIT(1);
else
ret &= ~BIT(1);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index b550ba5fa58a..89449871bca7 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -110,24 +110,24 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "in%d_input",
- in_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d_input",
+ in_i++);
break;
case IIO_TEMP:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "humidity%d_input",
+ humidity_i++);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 547a9c87c68c..92f9d4bbf597 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -220,7 +220,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm75_data *data;
- int status;
+ int status, err;
u8 set_mask, clr_mask;
int new;
enum lm75_type kind = id->driver_data;
@@ -331,7 +331,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (status != new)
i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
- devm_add_action(dev, lm75_remove, data);
+ err = devm_add_action_or_reset(dev, lm75_remove, data);
+ if (err)
+ return err;
dev_dbg(dev, "Config %02x\n", new);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 1e8237478b2f..496e771b363f 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -529,7 +529,7 @@ static int lm90_update_limits(struct device *dev)
return val;
data->temp_hyst = val;
- lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
if (val < 0)
return val;
data->temp11[REMOTE_LOW] = val << 8;
@@ -1551,9 +1551,7 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
- devm_add_action(&client->dev, lm90_restore_conf, data);
-
- return 0;
+ return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}
static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
@@ -1640,7 +1638,9 @@ static int lm90_probe(struct i2c_client *client,
return err;
}
- devm_add_action(dev, lm90_regulator_disable, regulator);
+ err = devm_add_action_or_reset(dev, lm90_regulator_disable, regulator);
+ if (err)
+ return err;
data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data)
@@ -1696,7 +1696,9 @@ static int lm90_probe(struct i2c_client *client,
err = device_create_file(dev, &dev_attr_pec);
if (err)
return err;
- devm_add_action(dev, lm90_remove_pec, dev);
+ err = devm_add_action_or_reset(dev, lm90_remove_pec, dev);
+ if (err)
+ return err;
}
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index b73a48832732..6ea99cd6ae79 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -720,7 +720,7 @@ static int sht3x_probe(struct i2c_client *client,
data->setup.blocking_io = false;
data->setup.high_precision = true;
data->mode = 0;
- data->last_update = 0;
+ data->last_update = jiffies - msecs_to_jiffies(3000);
data->client = client;
crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index a942a2574a4d..8479ac5eb853 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -227,7 +227,9 @@ static int tmp102_probe(struct i2c_client *client,
tmp102->config_orig = regval;
- devm_add_action(dev, tmp102_restore_config, tmp102);
+ err = devm_add_action_or_reset(dev, tmp102_restore_config, tmp102);
+ if (err)
+ return err;
regval &= ~TMP102_CONFIG_CLEAR;
regval |= TMP102_CONFIG_SET;
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 75dd6d041241..11e2a1fc10e9 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -71,7 +71,7 @@ static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
if (rc)
goto exit;
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS) {
rc = i2c_opal_translate_error(rc);
goto exit;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 2137adfbd8c3..e9b7dc037ff8 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
+source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ad1b1adcf6f0..e6dfa1bd3def 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
+#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
@@ -162,6 +163,14 @@ struct rdma_bind_list {
unsigned short port;
};
+struct class_port_info_context {
+ struct ib_class_port_info *class_port_info;
+ struct ib_device *device;
+ struct completion done;
+ struct ib_sa_query *sa_query;
+ u8 port_num;
+};
+
static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
@@ -306,6 +315,7 @@ struct cma_multicast {
struct sockaddr_storage addr;
struct kref mcref;
bool igmp_joined;
+ u8 join_state;
};
struct cma_work {
@@ -3752,10 +3762,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
}
}
+static void cma_query_sa_classport_info_cb(int status,
+ struct ib_class_port_info *rec,
+ void *context)
+{
+ struct class_port_info_context *cb_ctx = context;
+
+ WARN_ON(!context);
+
+ if (status || !rec) {
+ pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
+ cb_ctx->device->name, cb_ctx->port_num, status);
+ goto out;
+ }
+
+ memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
+
+out:
+ complete(&cb_ctx->done);
+}
+
+static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
+ struct ib_class_port_info *class_port_info)
+{
+ struct class_port_info_context *cb_ctx;
+ int ret;
+
+ cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
+ if (!cb_ctx)
+ return -ENOMEM;
+
+ cb_ctx->device = device;
+ cb_ctx->class_port_info = class_port_info;
+ cb_ctx->port_num = port_num;
+ init_completion(&cb_ctx->done);
+
+ ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
+ CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
+ GFP_KERNEL, cma_query_sa_classport_info_cb,
+ cb_ctx, &cb_ctx->sa_query);
+ if (ret < 0) {
+ pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
+ device->name, port_num, ret);
+ goto out;
+ }
+
+ wait_for_completion(&cb_ctx->done);
+
+out:
+ kfree(cb_ctx);
+ return ret;
+}
+
static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
struct ib_sa_mcmember_rec rec;
+ struct ib_class_port_info class_port_info;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
ib_sa_comp_mask comp_mask;
int ret;
@@ -3774,7 +3837,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
- rec.join_state = 1;
+ rec.join_state = mc->join_state;
+
+ if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
+ ret = cma_query_sa_classport_info(id_priv->id.device,
+ id_priv->id.port_num,
+ &class_port_info);
+
+ if (ret)
+ return ret;
+
+ if (!(ib_get_cpi_capmask2(&class_port_info) &
+ IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
+ pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
+ "RDMA CM: SM doesn't support Send Only Full Member option\n",
+ id_priv->id.device->name, id_priv->id.port_num);
+ return -EOPNOTSUPP;
+ }
+ }
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@ -3843,6 +3923,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
enum ib_gid_type gid_type;
+ bool send_only;
+
+ send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
if (cma_zero_addr((struct sockaddr *)&mc->addr))
return -EINVAL;
@@ -3878,10 +3961,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
- true);
- if (!err)
- mc->igmp_joined = true;
+ if (!send_only) {
+ err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
+ true);
+ if (!err)
+ mc->igmp_joined = true;
+ }
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -3911,7 +3996,7 @@ out1:
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
- void *context)
+ u8 join_state, void *context)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc;
@@ -3930,6 +4015,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
mc->context = context;
mc->id_priv = id_priv;
mc->igmp_joined = false;
+ mc->join_state = join_state;
spin_lock(&id_priv->lock);
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5c155fa91eec..760ef603a468 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -311,6 +311,15 @@ static int read_port_immutable(struct ib_device *device)
return 0;
}
+void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
+{
+ if (dev->get_dev_fw_str)
+ dev->get_dev_fw_str(dev, str, str_len);
+ else
+ str[0] = '\0';
+}
+EXPORT_SYMBOL(ib_get_device_fw_str);
+
/**
* ib_register_device - Register an IB device with IB core
* @device:Device to register
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f0572049d291..357624f8b9d3 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
/*
* Release a reference on cm_id. If the last reference is being
- * released, enable the waiting thread (in iw_destroy_cm_id) to
- * get woken up, and return 1 if a thread is already waiting.
+ * released, free the cm_id and return 1.
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
- complete(&cm_id_priv->destroy_comp);
+ free_cm_id(cm_id_priv);
return 1;
}
@@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
- int cb_destroy;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- /*
- * Test bit before deref in case the cm_id gets freed on another
- * thread.
- */
- cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
- }
+ (void)iwcm_deref_id(cm_id_priv);
}
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
@@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
wait_event(cm_id_priv->connect_wait,
!test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
+ /*
+ * Since we're deleting the cm_id, drop any events that
+ * might arrive before the last dereference.
+ */
+ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
+
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->state) {
case IW_CM_STATE_LISTEN:
@@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
-
destroy_cm_id(cm_id);
-
- wait_for_completion(&cm_id_priv->destroy_comp);
-
- free_cm_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
ret = cm_id->cm_handler(cm_id, iw_event);
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(cm_id);
- if (atomic_read(&cm_id_priv->refcount)==0)
- free_cm_id(cm_id_priv);
+ iw_destroy_cm_id(cm_id);
}
out:
@@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work)
unsigned long flags;
int empty;
int ret = 0;
- int destroy_id;
spin_lock_irqsave(&cm_id_priv->lock, flags);
empty = list_empty(&cm_id_priv->work_list);
@@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work)
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &levent);
- if (ret) {
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(&cm_id_priv->id);
- }
- BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv)) {
- if (destroy_id) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
- }
+ if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ ret = process_event(cm_id_priv, &levent);
+ if (ret)
+ destroy_cm_id(&cm_id_priv->id);
+ } else
+ pr_debug("dropping event %d\n", levent.event);
+ if (iwcm_deref_id(cm_id_priv))
return;
- }
if (empty)
return;
spin_lock_irqsave(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
index 3f6cc82564c8..82c2cd1b0a80 100644
--- a/drivers/infiniband/core/iwcm.h
+++ b/drivers/infiniband/core/iwcm.h
@@ -56,7 +56,7 @@ struct iwcm_id_private {
struct list_head work_free_list;
};
-#define IWCM_F_CALLBACK_DESTROY 1
+#define IWCM_F_DROP_EVENTS 1
#define IWCM_F_CONNECT_WAIT 2
#endif /* IWCM_H */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index b65e06c560d7..ade71e7f0131 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE 64
#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
+#define IWPM_MSG_SIZE 512
static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{
struct sk_buff *skb = NULL;
- skb = dev_alloc_skb(NLMSG_GOODSIZE);
+ skb = dev_alloc_skb(IWPM_MSG_SIZE);
if (!skb) {
pr_err("%s Unable to allocate skb\n", __func__);
goto create_nlmsg_exit;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a83ec28a147b..3a3c5d73bbfc 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,18 +93,6 @@ enum {
struct mcast_member;
-/*
-* There are 4 types of join states:
-* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
-*/
-enum {
- FULLMEMBER_JOIN,
- NONMEMBER_JOIN,
- SENDONLY_NONMEBER_JOIN,
- SENDONLY_FULLMEMBER_JOIN,
- NUM_JOIN_MEMBERSHIP_TYPES,
-};
-
struct mcast_group {
struct ib_sa_mcmember_rec rec;
struct rb_node node;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 9b8c20c8209b..10469b0088b5 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb)
int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
__u32 pid)
{
- return nlmsg_unicast(nls, skb, pid);
+ int err;
+
+ err = netlink_unicast(nls, skb, pid, 0);
+ return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(ibnl_unicast);
@@ -252,6 +255,7 @@ int __init ibnl_init(void)
return -ENOMEM;
}
+ nls->sk_sndtimeo = 10 * HZ;
return 0;
}
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 1eb9b1294a63..dbfd854c32c9 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -58,19 +58,13 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
return false;
}
-static inline u32 rdma_rw_max_sge(struct ib_device *dev,
- enum dma_data_direction dir)
-{
- return dir == DMA_TO_DEVICE ?
- dev->attrs.max_sge : dev->attrs.max_sge_rd;
-}
-
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
{
/* arbitrary limit to avoid allocating gigantic resources */
return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
}
+/* Caller must have zero-initialized *reg. */
static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
u32 sg_cnt, u32 offset)
@@ -114,6 +108,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
+ struct rdma_rw_reg_ctx *prev = NULL;
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
int i, j, ret = 0, count = 0;
@@ -125,7 +120,6 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
}
for (i = 0; i < ctx->nr_ops; i++) {
- struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
u32 nents = min(sg_cnt, pages_per_mr);
@@ -162,9 +156,13 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
sg_cnt -= nents;
for (j = 0; j < nents; j++)
sg = sg_next(sg);
+ prev = reg;
offset = 0;
}
+ if (prev)
+ prev->wr.wr.next = NULL;
+
ctx->type = RDMA_RW_MR;
return count;
@@ -181,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct ib_device *dev = qp->pd->device;
- u32 max_sge = rdma_rw_max_sge(dev, dir);
+ u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
+ qp->max_read_sge;
struct ib_sge *sge;
u32 total_len = 0, i, j;
@@ -205,11 +204,10 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = remote_addr + total_len;
rdma_wr->rkey = rkey;
+ rdma_wr->wr.num_sge = nr_sge;
rdma_wr->wr.sg_list = sge;
for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
- rdma_wr->wr.num_sge++;
-
sge->addr = ib_sg_dma_address(dev, sg) + offset;
sge->length = ib_sg_dma_len(dev, sg) - offset;
sge->lkey = qp->pd->local_dma_lkey;
@@ -220,8 +218,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
offset = 0;
}
- if (i + 1 < ctx->nr_ops)
- rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
+ rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
+ &ctx->map.wrs[i + 1].wr : NULL;
}
ctx->type = RDMA_RW_MULTI_WR;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e95538650dc6..b9bf7aa055e7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -65,10 +65,17 @@ struct ib_sa_sm_ah {
u8 src_path_mask;
};
+struct ib_sa_classport_cache {
+ bool valid;
+ struct ib_class_port_info data;
+};
+
struct ib_sa_port {
struct ib_mad_agent *agent;
struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task;
+ struct ib_sa_classport_cache classport_info;
+ spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock;
u8 port_num;
};
@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags);
+ if (event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_LID_CHANGE) {
+ spin_lock_irqsave(&port->classport_lock, flags);
+ port->classport_info.valid = false;
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ }
queue_work(ib_wq, &sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task);
}
@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
{
+ unsigned long flags;
struct ib_sa_classport_info_query *query =
container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(classport_info_rec_table,
ARRAY_SIZE(classport_info_rec_table),
mad->data, &rec);
+
+ spin_lock_irqsave(&sa_query->port->classport_lock, flags);
+ if (!status && !sa_query->port->classport_info.valid) {
+ memcpy(&sa_query->port->classport_info.data, &rec,
+ sizeof(sa_query->port->classport_info.data));
+
+ sa_query->port->classport_info.valid = true;
+ }
+ spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
+
query->callback(status, &rec, query->context);
} else {
query->callback(status, NULL, query->context);
@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
struct ib_sa_port *port;
struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
+ struct ib_class_port_info cached_class_port_info;
int ret;
+ unsigned long flags;
if (!sa_dev)
return -ENODEV;
@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
+ /* Use cached ClassPortInfo attribute if valid instead of sending mad */
+ spin_lock_irqsave(&port->classport_lock, flags);
+ if (port->classport_info.valid && callback) {
+ memcpy(&cached_class_port_info, &port->classport_info.data,
+ sizeof(cached_class_port_info));
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ callback(0, &cached_class_port_info, context);
+ return 0;
+ }
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+
query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
+ spin_lock_init(&sa_dev->port[i].classport_lock);
+ sa_dev->port[i].classport_info.valid = false;
+
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 60df4f8e81be..15defefecb4f 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -38,6 +38,7 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_pma.h>
@@ -1200,16 +1201,28 @@ static ssize_t set_node_desc(struct device *device,
return count;
}
+static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ib_device *dev = container_of(device, struct ib_device, dev);
+
+ ib_get_device_fw_str(dev, buf, PAGE_SIZE);
+ strlcat(buf, "\n", PAGE_SIZE);
+ return strlen(buf);
+}
+
static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static struct device_attribute *ib_class_attributes[] = {
&dev_attr_node_type,
&dev_attr_sys_image_guid,
&dev_attr_node_guid,
- &dev_attr_node_desc
+ &dev_attr_node_desc,
+ &dev_attr_fw_ver,
};
static void free_port_list_attributes(struct ib_device *device)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index c0f3826abb30..2825ece91d3c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -106,6 +106,7 @@ struct ucma_multicast {
int events_reported;
u64 uid;
+ u8 join_state;
struct list_head list;
struct sockaddr_storage addr;
};
@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file,
struct ucma_multicast *mc;
struct sockaddr *addr;
int ret;
+ u8 join_state;
if (out_len < sizeof(resp))
return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr;
- if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ return -EINVAL;
+
+ if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
+ join_state = BIT(FULLMEMBER_JOIN);
+ else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
+ join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
+ else
return -EINVAL;
ctx = ucma_get_ctx(file, cmd->id);
@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file,
ret = -ENOMEM;
goto err1;
}
-
+ mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
if (ret)
goto err2;
@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
- join_cmd.reserved = 0;
+ join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
return ucma_process_join(file, &join_cmd, out_len);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index fe4d2e1a8b58..c68746ce6624 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -37,7 +37,6 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/slab.h>
#include <rdma/ib_umem_odp.h>
@@ -92,12 +91,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long npages;
int ret;
int i;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ dma_attrs |= DMA_ATTR_WRITE_BARRIER;
if (!size)
return ERR_PTR(-EINVAL);
@@ -215,7 +214,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->sg_head.sgl,
umem->npages,
DMA_BIDIRECTIONAL,
- &attrs);
+ dma_attrs);
if (umem->nmap <= 0) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 612ccfd39bf9..df26a741cda6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -116,6 +116,7 @@ struct ib_uverbs_event_file {
struct ib_uverbs_file {
struct kref ref;
struct mutex mutex;
+ struct mutex cleanup_mutex; /* protect cleanup */
struct ib_uverbs_device *device;
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
@@ -162,6 +163,10 @@ struct ib_uqp_object {
struct ib_uxrcd_object *uxrcd;
};
+struct ib_uwq_object {
+ struct ib_uevent_object uevent;
+};
+
struct ib_ucq_object {
struct ib_uobject uobject;
struct ib_uverbs_file *uverbs_file;
@@ -181,6 +186,8 @@ extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr;
extern struct idr ib_uverbs_xrcd_idr;
extern struct idr ib_uverbs_rule_idr;
+extern struct idr ib_uverbs_wq_idr;
+extern struct idr ib_uverbs_rwq_ind_tbl_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
@@ -199,6 +206,7 @@ void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
@@ -219,6 +227,7 @@ struct ib_uverbs_flow_spec {
struct ib_uverbs_flow_spec_eth eth;
struct ib_uverbs_flow_spec_ipv4 ipv4;
struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
+ struct ib_uverbs_flow_spec_ipv6 ipv6;
};
};
@@ -275,5 +284,10 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);
IB_UVERBS_DECLARE_EX_CMD(create_cq);
IB_UVERBS_DECLARE_EX_CMD(create_qp);
+IB_UVERBS_DECLARE_EX_CMD(create_wq);
+IB_UVERBS_DECLARE_EX_CMD(modify_wq);
+IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
+IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
+IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 825021d1008b..f6647318138d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -57,6 +57,8 @@ static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" };
+static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" };
/*
* The ib_uobject locking scheme is as follows:
@@ -243,6 +245,27 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
+static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context)
+{
+ return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0);
+}
+
+static void put_wq_read(struct ib_wq *wq)
+{
+ put_uobj_read(wq->uobject);
+}
+
+static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle,
+ struct ib_ucontext *context)
+{
+ return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0);
+}
+
+static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table)
+{
+ put_uobj_read(ind_table->uobject);
+}
+
static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
{
struct ib_uobject *uobj;
@@ -326,6 +349,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->qp_list);
INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list);
+ INIT_LIST_HEAD(&ucontext->wq_list);
+ INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
rcu_read_lock();
@@ -1750,6 +1775,8 @@ static int create_qp(struct ib_uverbs_file *file,
struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp;
int ret;
+ struct ib_rwq_ind_table *ind_tbl = NULL;
+ bool has_sq = true;
if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
return -EPERM;
@@ -1761,6 +1788,32 @@ static int create_qp(struct ib_uverbs_file *file,
init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
&qp_lock_class);
down_write(&obj->uevent.uobject.mutex);
+ if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
+ sizeof(cmd->rwq_ind_tbl_handle) &&
+ (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
+ ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle,
+ file->ucontext);
+ if (!ind_tbl) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ attr.rwq_ind_tbl = ind_tbl;
+ }
+
+ if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) +
+ sizeof(cmd->reserved1)) && cmd->reserved1) {
+ ret = -EOPNOTSUPP;
+ goto err_put;
+ }
+
+ if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ if (ind_tbl && !cmd->max_send_wr)
+ has_sq = false;
if (cmd->qp_type == IB_QPT_XRC_TGT) {
xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
@@ -1784,20 +1837,24 @@ static int create_qp(struct ib_uverbs_file *file,
}
}
- if (cmd->recv_cq_handle != cmd->send_cq_handle) {
- rcq = idr_read_cq(cmd->recv_cq_handle,
- file->ucontext, 0);
- if (!rcq) {
- ret = -EINVAL;
- goto err_put;
+ if (!ind_tbl) {
+ if (cmd->recv_cq_handle != cmd->send_cq_handle) {
+ rcq = idr_read_cq(cmd->recv_cq_handle,
+ file->ucontext, 0);
+ if (!rcq) {
+ ret = -EINVAL;
+ goto err_put;
+ }
}
}
}
- scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
- rcq = rcq ?: scq;
+ if (has_sq)
+ scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
+ if (!ind_tbl)
+ rcq = rcq ?: scq;
pd = idr_read_pd(cmd->pd_handle, file->ucontext);
- if (!pd || !scq) {
+ if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
goto err_put;
}
@@ -1864,16 +1921,20 @@ static int create_qp(struct ib_uverbs_file *file,
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
qp->srq = attr.srq;
+ qp->rwq_ind_tbl = ind_tbl;
qp->event_handler = attr.event_handler;
qp->qp_context = attr.qp_context;
qp->qp_type = attr.qp_type;
atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
- atomic_inc(&attr.send_cq->usecnt);
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
atomic_inc(&attr.recv_cq->usecnt);
if (attr.srq)
atomic_inc(&attr.srq->usecnt);
+ if (ind_tbl)
+ atomic_inc(&ind_tbl->usecnt);
}
qp->uobject = &obj->uevent.uobject;
@@ -1913,6 +1974,8 @@ static int create_qp(struct ib_uverbs_file *file,
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
+ if (ind_tbl)
+ put_rwq_indirection_table_read(ind_tbl);
mutex_lock(&file->mutex);
list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
@@ -1940,6 +2003,8 @@ err_put:
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
+ if (ind_tbl)
+ put_rwq_indirection_table_read(ind_tbl);
put_uobj_write(&obj->uevent.uobject);
return ret;
@@ -2033,7 +2098,7 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
if (err)
return err;
- if (cmd.comp_mask)
+ if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
return -EINVAL;
if (cmd.reserved)
@@ -3040,6 +3105,15 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
sizeof(struct ib_flow_ipv4_filter));
break;
+ case IB_FLOW_SPEC_IPV6:
+ ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
+ if (ib_spec->ipv6.size != kern_spec->ipv6.size)
+ return -EINVAL;
+ memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
+ sizeof(struct ib_flow_ipv6_filter));
+ memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
+ sizeof(struct ib_flow_ipv6_filter));
+ break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
@@ -3056,6 +3130,445 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
return 0;
}
+int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_create_wq cmd = {};
+ struct ib_uverbs_ex_create_wq_resp resp = {};
+ struct ib_uwq_object *obj;
+ int err = 0;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ struct ib_wq *wq;
+ struct ib_wq_init_attr wq_init_attr = {};
+ size_t required_cmd_sz;
+ size_t required_resp_len;
+
+ required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
+ required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext,
+ &wq_lock_class);
+ down_write(&obj->uevent.uobject.mutex);
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ if (!pd) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
+ if (!cq) {
+ err = -EINVAL;
+ goto err_put_pd;
+ }
+
+ wq_init_attr.cq = cq;
+ wq_init_attr.max_sge = cmd.max_sge;
+ wq_init_attr.max_wr = cmd.max_wr;
+ wq_init_attr.wq_context = file;
+ wq_init_attr.wq_type = cmd.wq_type;
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ obj->uevent.events_reported = 0;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
+ goto err_put_cq;
+ }
+
+ wq->uobject = &obj->uevent.uobject;
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = &obj->uevent.uobject;
+ obj->uevent.uobject.object = wq;
+ err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
+ if (err)
+ goto destroy_wq;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.wq_handle = obj->uevent.uobject.id;
+ resp.max_sge = wq_init_attr.max_sge;
+ resp.max_wr = wq_init_attr.max_wr;
+ resp.wqn = wq->wq_num;
+ resp.response_length = required_resp_len;
+ err = ib_copy_to_udata(ucore,
+ &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+
+ put_pd_read(pd);
+ put_cq_read(cq);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list);
+ mutex_unlock(&file->mutex);
+
+ obj->uevent.uobject.live = 1;
+ up_write(&obj->uevent.uobject.mutex);
+ return 0;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
+destroy_wq:
+ ib_destroy_wq(wq);
+err_put_cq:
+ put_cq_read(cq);
+err_put_pd:
+ put_pd_read(pd);
+err_uobj:
+ put_uobj_write(&obj->uevent.uobject);
+
+ return err;
+}
+
+int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_destroy_wq cmd = {};
+ struct ib_uverbs_ex_destroy_wq_resp resp = {};
+ struct ib_wq *wq;
+ struct ib_uobject *uobj;
+ struct ib_uwq_object *obj;
+ size_t required_cmd_sz;
+ size_t required_resp_len;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
+ required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ resp.response_length = required_resp_len;
+ uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle,
+ file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+
+ wq = uobj->object;
+ obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
+ ret = ib_destroy_wq(wq);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ ib_uverbs_release_uevent(file, &obj->uevent);
+ resp.events_reported = obj->uevent.events_reported;
+ put_uobj(uobj);
+
+ ret = ib_copy_to_udata(ucore, &resp, resp.response_length);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_wq cmd = {};
+ struct ib_wq *wq;
+ struct ib_wq_attr wq_attr = {};
+ size_t required_cmd_sz;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (!cmd.attr_mask)
+ return -EINVAL;
+
+ if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE))
+ return -EINVAL;
+
+ wq = idr_read_wq(cmd.wq_handle, file->ucontext);
+ if (!wq)
+ return -EINVAL;
+
+ wq_attr.curr_wq_state = cmd.curr_wq_state;
+ wq_attr.wq_state = cmd.wq_state;
+ ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
+ put_wq_read(wq);
+ return ret;
+}
+
+int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
+ struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
+ struct ib_uobject *uobj;
+ int err = 0;
+ struct ib_rwq_ind_table_init_attr init_attr = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ struct ib_wq **wqs = NULL;
+ u32 *wqs_handles = NULL;
+ struct ib_wq *wq = NULL;
+ int i, j, num_read_wqs;
+ u32 num_wq_handles;
+ u32 expected_in_size;
+ size_t required_cmd_sz_header;
+ size_t required_resp_len;
+
+ required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
+ required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
+
+ if (ucore->inlen < required_cmd_sz_header)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
+ if (err)
+ return err;
+
+ ucore->inbuf += required_cmd_sz_header;
+ ucore->inlen -= required_cmd_sz_header;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
+ return -EINVAL;
+
+ num_wq_handles = 1 << cmd.log_ind_tbl_size;
+ expected_in_size = num_wq_handles * sizeof(__u32);
+ if (num_wq_handles == 1)
+ /* input size for wq handles is u64 aligned */
+ expected_in_size += sizeof(__u32);
+
+ if (ucore->inlen < expected_in_size)
+ return -EINVAL;
+
+ if (ucore->inlen > expected_in_size &&
+ !ib_is_udata_cleared(ucore, expected_in_size,
+ ucore->inlen - expected_in_size))
+ return -EOPNOTSUPP;
+
+ wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
+ GFP_KERNEL);
+ if (!wqs_handles)
+ return -ENOMEM;
+
+ err = ib_copy_from_udata(wqs_handles, ucore,
+ num_wq_handles * sizeof(__u32));
+ if (err)
+ goto err_free;
+
+ wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
+ if (!wqs) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
+ num_read_wqs++) {
+ wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext);
+ if (!wq) {
+ err = -EINVAL;
+ goto put_wqs;
+ }
+
+ wqs[num_read_wqs] = wq;
+ }
+
+ uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+ if (!uobj) {
+ err = -ENOMEM;
+ goto put_wqs;
+ }
+
+ init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class);
+ down_write(&uobj->mutex);
+ init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
+ init_attr.ind_tbl = wqs;
+ rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
+
+ if (IS_ERR(rwq_ind_tbl)) {
+ err = PTR_ERR(rwq_ind_tbl);
+ goto err_uobj;
+ }
+
+ rwq_ind_tbl->ind_tbl = wqs;
+ rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
+ rwq_ind_tbl->uobject = uobj;
+ uobj->object = rwq_ind_tbl;
+ rwq_ind_tbl->device = ib_dev;
+ atomic_set(&rwq_ind_tbl->usecnt, 0);
+
+ for (i = 0; i < num_wq_handles; i++)
+ atomic_inc(&wqs[i]->usecnt);
+
+ err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+ if (err)
+ goto destroy_ind_tbl;
+
+ resp.ind_tbl_handle = uobj->id;
+ resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
+ resp.response_length = required_resp_len;
+
+ err = ib_copy_to_udata(ucore,
+ &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+
+ kfree(wqs_handles);
+
+ for (j = 0; j < num_read_wqs; j++)
+ put_wq_read(wqs[j]);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list);
+ mutex_unlock(&file->mutex);
+
+ uobj->live = 1;
+
+ up_write(&uobj->mutex);
+ return 0;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+destroy_ind_tbl:
+ ib_destroy_rwq_ind_table(rwq_ind_tbl);
+err_uobj:
+ put_uobj_write(uobj);
+put_wqs:
+ for (j = 0; j < num_read_wqs; j++)
+ put_wq_read(wqs[j]);
+err_free:
+ kfree(wqs_handles);
+ kfree(wqs);
+ return err;
+}
+
+int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ struct ib_uobject *uobj;
+ int ret;
+ struct ib_wq **ind_tbl;
+ size_t required_cmd_sz;
+
+ required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle,
+ file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+ rwq_ind_tbl = uobj->object;
+ ind_tbl = rwq_ind_tbl->ind_tbl;
+
+ ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+ kfree(ind_tbl);
+ return ret;
+}
+
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
struct ib_udata *ucore,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 31f422a70623..0012fa58c105 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -76,6 +76,8 @@ DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
DEFINE_IDR(ib_uverbs_xrcd_idr);
DEFINE_IDR(ib_uverbs_rule_idr);
+DEFINE_IDR(ib_uverbs_wq_idr);
+DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr);
static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -130,6 +132,11 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
[IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
[IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp,
+ [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq,
+ [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq,
+ [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
+ [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
+ [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -265,6 +272,27 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uqp);
}
+ list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
+ struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
+ struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
+
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+ ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ kfree(ind_tbl);
+ kfree(uobj);
+ }
+
+ list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
+ struct ib_wq *wq = uobj->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
+ ib_destroy_wq(wq);
+ ib_uverbs_release_uevent(file, &uwq->uevent);
+ kfree(uwq);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
struct ib_srq *srq = uobj->object;
struct ib_uevent_object *uevent =
@@ -568,6 +596,16 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
&uobj->events_reported);
}
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
+ struct ib_uevent_object, uobject);
+
+ ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
+ event->event, &uobj->event_list,
+ &uobj->events_reported);
+}
+
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
{
struct ib_uevent_object *uobj;
@@ -931,6 +969,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file->async_file = NULL;
kref_init(&file->ref);
mutex_init(&file->mutex);
+ mutex_init(&file->cleanup_mutex);
filp->private_data = file;
kobject_get(&dev->kobj);
@@ -956,18 +995,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_device *dev = file->device;
- struct ib_ucontext *ucontext = NULL;
+
+ mutex_lock(&file->cleanup_mutex);
+ if (file->ucontext) {
+ ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ file->ucontext = NULL;
+ }
+ mutex_unlock(&file->cleanup_mutex);
mutex_lock(&file->device->lists_mutex);
- ucontext = file->ucontext;
- file->ucontext = NULL;
if (!file->is_closed) {
list_del(&file->list);
file->is_closed = 1;
}
mutex_unlock(&file->device->lists_mutex);
- if (ucontext)
- ib_uverbs_cleanup_ucontext(file, ucontext);
if (file->async_file)
kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
@@ -1181,22 +1222,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
struct ib_ucontext *ucontext;
-
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
file->is_closed = 1;
- ucontext = file->ucontext;
list_del(&file->list);
- file->ucontext = NULL;
kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex);
- /* We must release the mutex before going ahead and calling
- * disassociate_ucontext. disassociate_ucontext might end up
- * indirectly calling uverbs_close, for example due to freeing
- * the resources (e.g mmput).
- */
+
ib_uverbs_event_handler(&file->event_handler, &event);
+
+ mutex_lock(&file->cleanup_mutex);
+ ucontext = file->ucontext;
+ file->ucontext = NULL;
+ mutex_unlock(&file->cleanup_mutex);
+
+ /* At this point ib_uverbs_close cannot be running
+ * ib_uverbs_cleanup_ucontext
+ */
if (ucontext) {
+ /* We must release the mutex before going ahead and
+ * calling disassociate_ucontext. disassociate_ucontext
+ * might end up indirectly calling uverbs_close,
+ * for example due to freeing the resources
+ * (e.g mmput).
+ */
ib_dev->disassociate_ucontext(ucontext);
ib_uverbs_cleanup_ucontext(file, ucontext);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 6298f54b4137..f2b776efab3a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -758,6 +758,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp *qp;
int ret;
+ if (qp_init_attr->rwq_ind_tbl &&
+ (qp_init_attr->recv_cq ||
+ qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
+ qp_init_attr->cap.max_recv_sge))
+ return ERR_PTR(-EINVAL);
+
/*
* If the callers is using the RDMA API calculate the resources
* needed for the RDMA READ/WRITE operations.
@@ -775,6 +781,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
+ qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
atomic_set(&qp->usecnt, 0);
qp->mrs_used = 0;
@@ -792,7 +799,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->srq = NULL;
} else {
qp->recv_cq = qp_init_attr->recv_cq;
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
+ if (qp_init_attr->recv_cq)
+ atomic_inc(&qp_init_attr->recv_cq->usecnt);
qp->srq = qp_init_attr->srq;
if (qp->srq)
atomic_inc(&qp_init_attr->srq->usecnt);
@@ -803,7 +811,10 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->xrcd = NULL;
atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->send_cq)
+ atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->rwq_ind_tbl)
+ atomic_inc(&qp->rwq_ind_tbl->usecnt);
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
@@ -814,6 +825,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
}
}
+ /*
+ * Note: all hw drivers guarantee that max_send_sge is lower than
+ * the device RDMA WRITE SGE limit but not all hw drivers ensure that
+ * max_send_sge <= max_sge_rd.
+ */
+ qp->max_write_sge = qp_init_attr->cap.max_send_sge;
+ qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
+ device->attrs.max_sge_rd);
+
return qp;
}
EXPORT_SYMBOL(ib_create_qp);
@@ -1283,6 +1303,7 @@ int ib_destroy_qp(struct ib_qp *qp)
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
struct ib_srq *srq;
+ struct ib_rwq_ind_table *ind_tbl;
int ret;
WARN_ON_ONCE(qp->mrs_used > 0);
@@ -1297,6 +1318,7 @@ int ib_destroy_qp(struct ib_qp *qp)
scq = qp->send_cq;
rcq = qp->recv_cq;
srq = qp->srq;
+ ind_tbl = qp->rwq_ind_tbl;
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
@@ -1311,6 +1333,8 @@ int ib_destroy_qp(struct ib_qp *qp)
atomic_dec(&rcq->usecnt);
if (srq)
atomic_dec(&srq->usecnt);
+ if (ind_tbl)
+ atomic_dec(&ind_tbl->usecnt);
}
return ret;
@@ -1558,6 +1582,150 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
+/**
+ * ib_create_wq - Creates a WQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the WQ.
+ * @wq_init_attr: A list of initial attributes required to create the
+ * WQ. If WQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created WQ.
+ *
+ * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * the requested size of the WQ, and set to the actual values allocated
+ * on return.
+ * If ib_create_wq() succeeds, then max_wr and max_sge will always be
+ * at least as large as the requested values.
+ */
+struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *wq_attr)
+{
+ struct ib_wq *wq;
+
+ if (!pd->device->create_wq)
+ return ERR_PTR(-ENOSYS);
+
+ wq = pd->device->create_wq(pd, wq_attr, NULL);
+ if (!IS_ERR(wq)) {
+ wq->event_handler = wq_attr->event_handler;
+ wq->wq_context = wq_attr->wq_context;
+ wq->wq_type = wq_attr->wq_type;
+ wq->cq = wq_attr->cq;
+ wq->device = pd->device;
+ wq->pd = pd;
+ wq->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&wq_attr->cq->usecnt);
+ atomic_set(&wq->usecnt, 0);
+ }
+ return wq;
+}
+EXPORT_SYMBOL(ib_create_wq);
+
+/**
+ * ib_destroy_wq - Destroys the specified WQ.
+ * @wq: The WQ to destroy.
+ */
+int ib_destroy_wq(struct ib_wq *wq)
+{
+ int err;
+ struct ib_cq *cq = wq->cq;
+ struct ib_pd *pd = wq->pd;
+
+ if (atomic_read(&wq->usecnt))
+ return -EBUSY;
+
+ err = wq->device->destroy_wq(wq);
+ if (!err) {
+ atomic_dec(&pd->usecnt);
+ atomic_dec(&cq->usecnt);
+ }
+ return err;
+}
+EXPORT_SYMBOL(ib_destroy_wq);
+
+/**
+ * ib_modify_wq - Modifies the specified WQ.
+ * @wq: The WQ to modify.
+ * @wq_attr: On input, specifies the WQ attributes to modify.
+ * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
+ * are being modified.
+ * On output, the current values of selected WQ attributes are returned.
+ */
+int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask)
+{
+ int err;
+
+ if (!wq->device->modify_wq)
+ return -ENOSYS;
+
+ err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
+ return err;
+}
+EXPORT_SYMBOL(ib_modify_wq);
+
+/*
+ * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
+ * @device: The device on which to create the rwq indirection table.
+ * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
+ * create the Indirection Table.
+ *
+ * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
+ * than the created ib_rwq_ind_table object and the caller is responsible
+ * for its memory allocation/free.
+ */
+struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr)
+{
+ struct ib_rwq_ind_table *rwq_ind_table;
+ int i;
+ u32 table_size;
+
+ if (!device->create_rwq_ind_table)
+ return ERR_PTR(-ENOSYS);
+
+ table_size = (1 << init_attr->log_ind_tbl_size);
+ rwq_ind_table = device->create_rwq_ind_table(device,
+ init_attr, NULL);
+ if (IS_ERR(rwq_ind_table))
+ return rwq_ind_table;
+
+ rwq_ind_table->ind_tbl = init_attr->ind_tbl;
+ rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
+ rwq_ind_table->device = device;
+ rwq_ind_table->uobject = NULL;
+ atomic_set(&rwq_ind_table->usecnt, 0);
+
+ for (i = 0; i < table_size; i++)
+ atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
+
+ return rwq_ind_table;
+}
+EXPORT_SYMBOL(ib_create_rwq_ind_table);
+
+/*
+ * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
+ * @wq_ind_table: The Indirection Table to destroy.
+*/
+int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
+{
+ int err, i;
+ u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
+ struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
+
+ if (atomic_read(&rwq_ind_table->usecnt))
+ return -EBUSY;
+
+ err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
+ if (!err) {
+ for (i = 0; i < table_size; i++)
+ atomic_dec(&ind_tbl[i]->usecnt);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
+
struct ib_flow *ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3e8431b5cad7..04bbf172abde 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
state_set(&child_ep->com, CONNECTING);
child_ep->com.tdev = tdev;
child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = PF_INET;
+ child_ep->com.local_addr.sin_family = AF_INET;
child_ep->com.local_addr.sin_port = req->local_port;
child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = PF_INET;
+ child_ep->com.remote_addr.sin_family = AF_INET;
child_ep->com.remote_addr.sin_port = req->peer_port;
child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
get_ep(&parent_ep->com);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index bb1a839d4d6d..3edb80644b53 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1183,18 +1183,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- PDBG("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.fw_version);
-}
-
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1334,13 +1322,11 @@ static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *iwch_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
@@ -1362,6 +1348,18 @@ static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
+ struct ethtool_drvinfo info;
+ struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+
+ PDBG("%s dev 0x%p\n", __func__, iwch_dev);
+ lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ snprintf(str, str_len, "%s", info.fw_version);
+}
+
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
@@ -1437,6 +1435,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.get_hw_stats = iwch_get_mib;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = iwch_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3a67216bce6..3aca7f6171b4 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -294,6 +294,25 @@ static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
return;
}
+static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
+{
+ struct sk_buff *skb;
+ unsigned int i;
+ size_t len;
+
+ len = roundup(sizeof(union cpl_wr_size), 16);
+ for (i = 0; i < size; i++) {
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail;
+ skb_queue_tail(ep_skb_list, skb);
+ }
+ return 0;
+fail:
+ skb_queue_purge(ep_skb_list);
+ return -ENOMEM;
+}
+
static void *alloc_ep(int size, gfp_t gfp)
{
struct c4iw_ep_common *epc;
@@ -384,6 +403,8 @@ void _c4iw_free_ep(struct kref *kref)
if (ep->mpa_skb)
kfree_skb(ep->mpa_skb);
}
+ if (!skb_queue_empty(&ep->com.ep_skb_list))
+ skb_queue_purge(&ep->com.ep_skb_list);
kfree(ep);
}
@@ -620,25 +641,27 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
}
}
-static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+static int send_flowc(struct c4iw_ep *ep)
{
- unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
int i;
u16 vlan = ep->l2t->vlan;
int nparams;
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
if (vlan == CPL_L2T_VLAN_NONE)
nparams = 8;
else
nparams = 9;
- skb = get_skb(skb, flowclen, GFP_KERNEL);
- flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
16)) | FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
@@ -679,18 +702,16 @@ static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
return c4iw_ofld_send(&ep->com.dev->rdev, skb);
}
-static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+static int send_halfclose(struct c4iw_ep *ep)
{
struct cpl_close_con_req *req;
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
int wrlen = roundup(sizeof *req, 16);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(NULL, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ if (WARN_ON(!skb))
return -ENOMEM;
- }
+
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
@@ -701,26 +722,24 @@ static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+static int send_abort(struct c4iw_ep *ep)
{
struct cpl_abort_req *req;
int wrlen = roundup(sizeof *req, 16);
+ struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(skb, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
+ if (WARN_ON(!req_skb))
return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, ep, abort_arp_failure);
- req = (struct cpl_abort_req *) skb_put(skb, wrlen);
+
+ set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
+ req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
req->cmd = CPL_ABORT_SEND_RST;
- return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
@@ -992,9 +1011,19 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0) |
- (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
+
+ mpa->flags = 0;
+ if (crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (markers_enabled) {
+ mpa->flags |= MPA_MARKERS;
+ ep->mpa_attr.recv_marker_enabled = 1;
+ } else {
+ ep->mpa_attr.recv_marker_enabled = 0;
+ }
+ if (mpa_rev_to_use == 2)
+ mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
if (mpa_rev_to_use == 1) {
@@ -1169,8 +1198,11 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
+ mpa->flags = 0;
+ if (ep->mpa_attr.crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (ep->mpa_attr.recv_marker_enabled)
+ mpa->flags |= MPA_MARKERS;
mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
@@ -1248,7 +1280,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
- ret = send_flowc(ep, NULL);
+ ret = send_flowc(ep);
if (ret)
goto err;
if (ep->retry_with_mpa_v1)
@@ -1555,7 +1587,6 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
__state_set(&ep->com, FPDU_MODE);
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
ep->mpa_attr.version = mpa->revision;
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
@@ -2004,12 +2035,17 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
}
/*
- * Return whether a failed active open has allocated a TID
+ * Some of the error codes above implicitly indicate that there is no TID
+ * allocated with the result of an ACT_OPEN. We use this predicate to make
+ * that explicit.
*/
static inline int act_open_has_tid(int status)
{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
+ return (status != CPL_ERR_TCAM_PARITY &&
+ status != CPL_ERR_TCAM_MISS &&
+ status != CPL_ERR_TCAM_FULL &&
+ status != CPL_ERR_CONN_EXIST_SYNRECV &&
+ status != CPL_ERR_CONN_EXIST);
}
/* Returns whether a CPL status conveys negative advice.
@@ -2130,6 +2166,7 @@ out:
static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
+ int size = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)
&ep->com.cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)
@@ -2145,6 +2182,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
init_timer(&ep->timer);
c4iw_init_wr_wait(&ep->com.wr_wait);
+ /* When MPA revision is different on nodes, the node with MPA_rev=2
+ * tries to reconnect with MPA_rev 1 for the same EP through
+ * c4iw_reconnect(), where the same EP is assigned with new tid for
+ * further connection establishment. As we are using the same EP pointer
+ * for reconnect, few skbs are used during the previous c4iw_connect(),
+ * which leaves the EP with inadequate skbs for further
+ * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
+ * skb_list() during peer_abort(). Allocate skbs which is already used.
+ */
+ size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
/*
* Allocate an active TID to initiate a TCP connection.
*/
@@ -2210,6 +2262,7 @@ fail2:
* response of 1st connect request.
*/
connect_reply_upcall(ep, -ECONNRESET);
+fail1:
c4iw_put_ep(&ep->com);
out:
return err;
@@ -2576,6 +2629,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;
+ skb_queue_head_init(&child_ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
+ goto fail;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -2640,6 +2697,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
goto out;
+fail:
+ c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
if (parent_ep)
@@ -2670,7 +2729,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state = MPA_REQ_WAIT;
start_ep_timer(ep);
set_bit(PASS_ESTAB, &ep->com.history);
- ret = send_flowc(ep, skb);
+ ret = send_flowc(ep);
mutex_unlock(&ep->com.mutex);
if (ret)
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
@@ -2871,10 +2930,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
- __func__);
+ rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!rpl_skb)) {
release = 1;
goto out;
}
@@ -3011,9 +3068,9 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s last streaming msg ack ep %p tid %u state %u "
"initiator %u freeing skb\n", __func__, ep, ep->hwtid,
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
- mutex_lock(&ep->com.mutex);
if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
stop_ep_timer(ep);
mutex_unlock(&ep->com.mutex);
@@ -3025,9 +3082,9 @@ out:
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err = 0;
- int disconnect = 0;
+ int abort;
struct c4iw_ep *ep = to_ep(cm_id);
+
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
@@ -3038,16 +3095,13 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
}
set_bit(ULP_REJECT, &ep->com.history);
if (mpa_rev == 0)
- disconnect = 2;
- else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- disconnect = 1;
- }
+ abort = 1;
+ else
+ abort = send_mpa_reject(ep, pdata, pdata_len);
mutex_unlock(&ep->com.mutex);
- if (disconnect) {
- stop_ep_timer(ep);
- err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
- }
+
+ stop_ep_timer(ep);
+ c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return 0;
}
@@ -3248,6 +3302,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto out;
}
+
+ skb_queue_head_init(&ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
init_timer(&ep->timer);
ep->plen = conn_param->private_data_len;
if (ep->plen)
@@ -3266,7 +3327,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->com.qp) {
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
- goto fail1;
+ goto fail2;
}
ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
@@ -3279,7 +3340,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->atid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
err = -ENOMEM;
- goto fail1;
+ goto fail2;
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
@@ -3303,7 +3364,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail2;
}
/* find a route */
@@ -3323,7 +3384,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail2;
}
/* find a route */
@@ -3339,14 +3400,14 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
- goto fail2;
+ goto fail3;
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail3;
+ goto fail4;
}
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
@@ -3362,13 +3423,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto out;
cxgb4_l2t_release(ep->l2t);
-fail3:
+fail4:
dst_release(ep->dst);
-fail2:
+fail3:
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail1:
+fail2:
+ skb_queue_purge(&ep->com.ep_skb_list);
deref_cm_id(&ep->com);
+fail1:
c4iw_put_ep(&ep->com);
out:
return err;
@@ -3461,6 +3524,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
err = -ENOMEM;
goto fail1;
}
+ skb_queue_head_init(&ep->com.ep_skb_list);
PDBG("%s ep %p\n", __func__, ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3577,11 +3641,22 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MPA_REQ_RCVD:
case MPA_REP_SENT:
case FPDU_MODE:
+ case CONNECTING:
close = 1;
if (abrupt)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
+
+ /*
+ * if we close before we see the fw4_ack() then we fix
+ * up the timer state since we're reusing it.
+ */
+ if (ep->mpa_skb &&
+ test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
+ clear_bit(STOP_MPA_TIMER, &ep->com.flags);
+ stop_ep_timer(ep);
+ }
start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
@@ -3611,10 +3686,10 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
close_complete_upcall(ep, -ECONNRESET);
- ret = send_abort(ep, NULL, gfp);
+ ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
- ret = send_halfclose(ep, gfp);
+ ret = send_halfclose(ep);
}
if (ret) {
set_bit(EP_DISC_FAIL, &ep->com.history);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b0b955724458..812ab7278b8e 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -33,19 +33,15 @@
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
struct c4iw_wr_wait wr_wait;
- struct sk_buff *skb;
int ret;
wr_len = sizeof *res_wr + sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
@@ -863,7 +859,9 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
: NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
+ chp->destroy_skb);
+ chp->destroy_skb = NULL;
kfree(chp);
return 0;
}
@@ -879,7 +877,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
struct c4iw_cq *chp;
struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL;
- int ret;
+ int ret, wr_len;
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
@@ -896,6 +894,13 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (!chp)
return ERR_PTR(-ENOMEM);
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!chp->destroy_skb) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
if (ib_context)
ucontext = to_c4iw_ucontext(ib_context);
@@ -936,7 +941,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ret)
- goto err1;
+ goto err2;
chp->rhp = rhp;
chp->cq.size--; /* status page */
@@ -947,15 +952,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret)
- goto err2;
+ goto err3;
if (ucontext) {
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
- goto err3;
+ goto err4;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2)
- goto err4;
+ goto err5;
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
@@ -970,7 +975,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
- goto err5;
+ goto err6;
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@@ -986,15 +991,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
__func__, chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
-err5:
+err6:
kfree(mm2);
-err4:
+err5:
kfree(mm);
-err3:
+err4:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err2:
+err3:
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->destroy_skb);
+err2:
+ kfree_skb(chp->destroy_skb);
err1:
kfree(chp);
return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ae2e8b23d2dd..071d7332ec06 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -317,7 +317,7 @@ static int qp_open(struct inode *inode, struct file *file)
idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
spin_unlock_irq(&qpd->devp->lock);
- qpd->bufsize = count * 128;
+ qpd->bufsize = count * 180;
qpd->buf = vmalloc(qpd->bufsize);
if (!qpd->buf) {
kfree(qpd);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f6f34a75af27..aa47e0ae80bc 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -384,6 +384,7 @@ struct c4iw_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
u64 *mpl;
@@ -400,6 +401,7 @@ static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
struct c4iw_mw {
struct ib_mw ibmw;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
};
@@ -412,6 +414,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
struct c4iw_cq {
struct ib_cq ibcq;
struct c4iw_dev *rhp;
+ struct sk_buff *destroy_skb;
struct t4_cq cq;
spinlock_t lock;
spinlock_t comp_handler_lock;
@@ -472,7 +475,7 @@ struct c4iw_qp {
struct t4_wq wq;
spinlock_t lock;
struct mutex mutex;
- atomic_t refcnt;
+ struct kref kref;
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
@@ -789,10 +792,29 @@ enum c4iw_ep_history {
CM_ID_DEREFED = 28,
};
+enum conn_pre_alloc_buffers {
+ CN_ABORT_REQ_BUF,
+ CN_ABORT_RPL_BUF,
+ CN_CLOSE_CON_REQ_BUF,
+ CN_DESTROY_BUF,
+ CN_FLOWC_BUF,
+ CN_MAX_CON_BUF
+};
+
+#define FLOWC_LEN 80
+union cpl_wr_size {
+ struct cpl_abort_req abrt_req;
+ struct cpl_abort_rpl abrt_rpl;
+ struct fw_ri_wr ri_req;
+ struct cpl_close_con_req close_req;
+ char flowc_buf[FLOWC_LEN];
+};
+
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
struct c4iw_dev *dev;
+ struct sk_buff_head ep_skb_list;
enum c4iw_ep_state state;
struct kref kref;
struct mutex mutex;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 55d0651ee4de..0b91b0f4df71 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -59,9 +59,9 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
}
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
- u32 len, dma_addr_t data, int wait)
+ u32 len, dma_addr_t data,
+ int wait, struct sk_buff *skb)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
@@ -74,9 +74,11 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
c4iw_init_wr_wait(&wr_wait);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
@@ -108,9 +110,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
@@ -134,9 +135,11 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
@@ -173,6 +176,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
ret = c4iw_ofld_send(rdev, skb);
+ skb = NULL;
if (ret)
return ret;
len -= C4IW_MAX_INLINE_SIZE;
@@ -182,7 +186,8 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data, struct sk_buff *skb)
{
u32 remain = len;
u32 dmalen;
@@ -205,7 +210,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
- !remain);
+ !remain, skb);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -213,7 +218,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
daddr += dmalen;
}
if (remain)
- ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
@@ -224,23 +229,25 @@ out:
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb)
{
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) {
- if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
printk_ratelimited(KERN_WARNING
"%s: dma map"
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len,
- data);
- } else
+ data, skb);
+ } else {
return 0;
+ }
} else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
+ return _c4iw_write_mem_inline(rdev, addr,
+ len, data, skb);
} else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
+ return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
}
/*
@@ -253,7 +260,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
- u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
+ struct sk_buff *skb)
{
int err;
struct fw_ri_tpte tpt;
@@ -307,7 +315,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt);
+ sizeof(tpt), &tpt, skb);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -327,28 +335,29 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
__func__, pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
- err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr)
+ u32 pbl_addr, struct sk_buff *skb)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
- pbl_size, pbl_addr);
+ pbl_size, pbl_addr, skb);
}
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0);
+ 0UL, 0, 0, 0, 0, NULL);
}
-static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
+ struct sk_buff *skb)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0);
+ 0, skb);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
@@ -356,7 +365,7 @@ static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
- 0UL, 0, 0, pbl_size, pbl_addr);
+ 0UL, 0, 0, pbl_size, pbl_addr, NULL);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -383,14 +392,16 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len ?
mhp->attr.len : -1, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
- if (ret)
+ if (ret) {
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->dereg_skb = NULL;
+ }
return ret;
}
@@ -423,6 +434,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
@@ -435,7 +452,8 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
+ mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
+ NULL);
if (ret)
goto err1;
@@ -445,8 +463,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
return &mhp->ibmr;
err2:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
err1:
+ kfree_skb(mhp->dereg_skb);
+err0:
kfree(mhp);
return ERR_PTR(ret);
}
@@ -481,11 +501,18 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
+
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
return ERR_PTR(err);
}
@@ -550,6 +577,7 @@ err_pbl:
err:
ib_umem_release(mhp->umem);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
return ERR_PTR(err);
}
@@ -572,11 +600,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
- ret = allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
+
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ ret = -ENOMEM;
+ goto free_mhp;
}
+
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ if (ret)
+ goto free_skb;
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
@@ -584,12 +617,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
- deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto dealloc_win;
}
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
+
+dealloc_win:
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+free_skb:
+ kfree_skb(mhp->dereg_skb);
+free_mhp:
+ kfree(mhp);
+ return ERR_PTR(ret);
}
int c4iw_dealloc_mw(struct ib_mw *mw)
@@ -602,7 +642,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
return 0;
@@ -666,7 +707,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
return &(mhp->ibmr);
err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
err2:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
@@ -717,7 +758,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index dd8a86b726d2..df127ce6b6ec 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -409,20 +409,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
-
- return sprintf(buf, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
-}
-
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -502,13 +488,11 @@ static int c4iw_get_mib(struct ib_device *ibdev,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *c4iw_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
@@ -530,6 +514,20 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+
+ snprintf(str, str_len, "%u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
+}
+
int c4iw_register_device(struct c4iw_dev *dev)
{
int ret;
@@ -605,6 +603,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.drain_sq = c4iw_drain_sq;
dev->ibdev.drain_rq = c4iw_drain_rq;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e8993e49b8b3..edb1172b6f54 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -683,17 +683,25 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
return 0;
}
+void _free_qp(struct kref *kref)
+{
+ struct c4iw_qp *qhp;
+
+ qhp = container_of(kref, struct c4iw_qp, kref);
+ PDBG("%s qhp %p\n", __func__, qhp);
+ kfree(qhp);
+}
+
void c4iw_qp_add_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+ kref_get(&to_c4iw_qp(qp)->kref);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
- wake_up(&(to_c4iw_qp(qp)->wait));
+ kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -1081,9 +1089,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
- skb = alloc_skb(sizeof *wqe, gfp);
- if (!skb)
+ skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1202,9 +1211,10 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
ep->hwtid);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
- if (!skb)
+ skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return -ENOMEM;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1592,8 +1602,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
spin_lock_irq(&rhp->lock);
if (!list_empty(&qhp->db_fc_entry))
@@ -1606,8 +1614,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ c4iw_qp_rem_ref(ib_qp);
+
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
- kfree(qhp);
return 0;
}
@@ -1704,7 +1713,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
+ kref_init(&qhp->kref);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
@@ -1896,12 +1905,20 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
+static void move_qp_to_err(struct c4iw_qp *qp)
+{
+ struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
+
+ (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+}
+
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
unsigned long flag;
bool need_to_wait;
+ move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_sq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
@@ -1916,6 +1933,7 @@ void c4iw_drain_rq(struct ib_qp *ibqp)
unsigned long flag;
bool need_to_wait;
+ move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_rq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index a925fb0db706..f6ea0881765a 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,9 +1,9 @@
config INFINIBAND_HFI1
tristate "Intel OPA Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C
select MMU_NOTIFIER
select CRC32
- default m
+ select I2C_ALGOBIT
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 9b5382c94b0c..0cf97a09b64b 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
+ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
verbs_txreq.o
hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 14d7eeb09be6..79575ee873f2 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -47,12 +47,18 @@
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/cpumask.h>
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
#include "trace.h"
+struct hfi1_affinity_node_list node_affinity = {
+ .list = LIST_HEAD_INIT(node_affinity.list),
+ .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
+};
+
/* Name of IRQ types, indexed by enum irq_type */
static const char * const irq_type_names[] = {
"SDMA",
@@ -61,6 +67,9 @@ static const char * const irq_type_names[] = {
"OTHER",
};
+/* Per NUMA node count of HFI devices */
+static unsigned int *hfi1_per_node_cntr;
+
static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{
cpumask_clear(&set->mask);
@@ -69,47 +78,136 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
}
/* Initialize non-HT cpu cores mask */
-int init_real_cpu_mask(struct hfi1_devdata *dd)
+void init_real_cpu_mask(void)
{
- struct hfi1_affinity *info;
int possible, curr_cpu, i, ht;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- cpumask_clear(&info->real_cpu_mask);
+ cpumask_clear(&node_affinity.real_cpu_mask);
/* Start with cpu online mask as the real cpu mask */
- cpumask_copy(&info->real_cpu_mask, cpu_online_mask);
+ cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
/*
* Remove HT cores from the real cpu mask. Do this in two steps below.
*/
- possible = cpumask_weight(&info->real_cpu_mask);
+ possible = cpumask_weight(&node_affinity.real_cpu_mask);
ht = cpumask_weight(topology_sibling_cpumask(
- cpumask_first(&info->real_cpu_mask)));
+ cpumask_first(&node_affinity.real_cpu_mask)));
/*
* Step 1. Skip over the first N HT siblings and use them as the
* "real" cores. Assumes that HT cores are not enumerated in
* succession (except in the single core case).
*/
- curr_cpu = cpumask_first(&info->real_cpu_mask);
+ curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
/*
* Step 2. Remove the remaining HT siblings. Use cpumask_next() to
* skip any gaps.
*/
for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &info->real_cpu_mask);
- curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
+ curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
+ }
+}
+
+int node_affinity_init(void)
+{
+ int node;
+ struct pci_dev *dev = NULL;
+ const struct pci_device_id *ids = hfi1_pci_tbl;
+
+ cpumask_clear(&node_affinity.proc.used);
+ cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
+
+ node_affinity.proc.gen = 0;
+ node_affinity.num_core_siblings =
+ cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&node_affinity.proc.mask)
+ ));
+ node_affinity.num_online_nodes = num_online_nodes();
+ node_affinity.num_online_cpus = num_online_cpus();
+
+ /*
+ * The real cpu mask is part of the affinity struct but it has to be
+ * initialized early. It is needed to calculate the number of user
+ * contexts in set_up_context_variables().
+ */
+ init_real_cpu_mask();
+
+ hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
+ sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
+ if (!hfi1_per_node_cntr)
+ return -ENOMEM;
+
+ while (ids->vendor) {
+ dev = NULL;
+ while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
+ node = pcibus_to_node(dev->bus);
+ if (node < 0)
+ node = numa_node_id();
+
+ hfi1_per_node_cntr[node]++;
+ }
+ ids++;
}
- dd->affinity = info;
return 0;
}
+void node_affinity_destroy(void)
+{
+ struct list_head *pos, *q;
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ list_for_each_safe(pos, q, &node_affinity.list) {
+ entry = list_entry(pos, struct hfi1_affinity_node,
+ list);
+ list_del(pos);
+ kfree(entry);
+ }
+ spin_unlock(&node_affinity.lock);
+ kfree(hfi1_per_node_cntr);
+}
+
+static struct hfi1_affinity_node *node_affinity_allocate(int node)
+{
+ struct hfi1_affinity_node *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+ entry->node = node;
+ INIT_LIST_HEAD(&entry->list);
+
+ return entry;
+}
+
+/*
+ * It appends an entry to the list.
+ * It *must* be called with node_affinity.lock held.
+ */
+static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
+{
+ list_add_tail(&entry->list, &node_affinity.list);
+}
+
+/* It must be called with node_affinity.lock held */
+static struct hfi1_affinity_node *node_affinity_lookup(int node)
+{
+ struct list_head *pos;
+ struct hfi1_affinity_node *entry;
+
+ list_for_each(pos, &node_affinity.list) {
+ entry = list_entry(pos, struct hfi1_affinity_node, list);
+ if (entry->node == node)
+ return entry;
+ }
+
+ return NULL;
+}
+
/*
* Interrupt affinity.
*
@@ -121,10 +219,10 @@ int init_real_cpu_mask(struct hfi1_devdata *dd)
* to the node relative 1 as necessary.
*
*/
-void hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
int node = pcibus_to_node(dd->pcidev->bus);
- struct hfi1_affinity *info = dd->affinity;
+ struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
int curr_cpu, possible, i;
@@ -132,56 +230,93 @@ void hfi1_dev_affinity_init(struct hfi1_devdata *dd)
node = numa_node_id();
dd->node = node;
- spin_lock_init(&info->lock);
-
- init_cpu_mask_set(&info->def_intr);
- init_cpu_mask_set(&info->rcv_intr);
- init_cpu_mask_set(&info->proc);
-
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
- /* Use the "real" cpu mask of this node as the default */
- cpumask_and(&info->def_intr.mask, &info->real_cpu_mask, local_mask);
-
- /* fill in the receive list */
- possible = cpumask_weight(&info->def_intr.mask);
- curr_cpu = cpumask_first(&info->def_intr.mask);
- if (possible == 1) {
- /* only one CPU, everyone will use it */
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- } else {
- /*
- * Retain the first CPU in the default list for the control
- * context.
- */
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- /*
- * Remove the remaining kernel receive queues from
- * the default list and add them to the receive list.
- */
- for (i = 0; i < dd->n_krcv_queues - 1; i++) {
- cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- if (curr_cpu >= nr_cpu_ids)
- break;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ /*
+ * If this is the first time this NUMA node's affinity is used,
+ * create an entry in the global affinity structure and initialize it.
+ */
+ if (!entry) {
+ entry = node_affinity_allocate(node);
+ if (!entry) {
+ dd_dev_err(dd,
+ "Unable to allocate global affinity node\n");
+ return -ENOMEM;
}
- }
+ init_cpu_mask_set(&entry->def_intr);
+ init_cpu_mask_set(&entry->rcv_intr);
+ cpumask_clear(&entry->general_intr_mask);
+ /* Use the "real" cpu mask of this node as the default */
+ cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
+ local_mask);
+
+ /* fill in the receive list */
+ possible = cpumask_weight(&entry->def_intr.mask);
+ curr_cpu = cpumask_first(&entry->def_intr.mask);
+
+ if (possible == 1) {
+ /* only one CPU, everyone will use it */
+ cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ } else {
+ /*
+ * The general/control context will be the first CPU in
+ * the default list, so it is removed from the default
+ * list and added to the general interrupt list.
+ */
+ cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
- cpumask_copy(&info->proc.mask, cpu_online_mask);
-}
+ /*
+ * Remove the remaining kernel receive queues from
+ * the default list and add them to the receive list.
+ */
+ for (i = 0;
+ i < (dd->n_krcv_queues - 1) *
+ hfi1_per_node_cntr[dd->node];
+ i++) {
+ cpumask_clear_cpu(curr_cpu,
+ &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu,
+ &entry->rcv_intr.mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
+ if (curr_cpu >= nr_cpu_ids)
+ break;
+ }
-void hfi1_dev_affinity_free(struct hfi1_devdata *dd)
-{
- kfree(dd->affinity);
+ /*
+ * If there ends up being 0 CPU cores leftover for SDMA
+ * engines, use the same CPU cores as general/control
+ * context.
+ */
+ if (cpumask_weight(&entry->def_intr.mask) == 0)
+ cpumask_copy(&entry->def_intr.mask,
+ &entry->general_intr_mask);
+ }
+
+ spin_lock(&node_affinity.lock);
+ node_affinity_add_tail(entry);
+ spin_unlock(&node_affinity.lock);
+ }
+
+ return 0;
}
int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{
int ret;
cpumask_var_t diff;
- struct cpu_mask_set *set;
+ struct hfi1_affinity_node *entry;
+ struct cpu_mask_set *set = NULL;
struct sdma_engine *sde = NULL;
struct hfi1_ctxtdata *rcd = NULL;
char extra[64];
@@ -194,22 +329,25 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
if (!ret)
return -ENOMEM;
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
switch (msix->type) {
case IRQ_SDMA:
sde = (struct sdma_engine *)msix->arg;
scnprintf(extra, 64, "engine %u", sde->this_idx);
- /* fall through */
+ set = &entry->def_intr;
+ break;
case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
+ cpu = cpumask_first(&entry->general_intr_mask);
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- set = &dd->affinity->def_intr;
- cpu = cpumask_first(&set->mask);
- } else {
- set = &dd->affinity->rcv_intr;
- }
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ cpu = cpumask_first(&entry->general_intr_mask);
+ else
+ set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
default:
@@ -218,12 +356,12 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
}
/*
- * The control receive context is placed on a particular CPU, which
- * is set above. Skip accounting for it. Everything else finds its
- * CPU here.
+ * The general and control contexts are placed on a particular
+ * CPU, which is set above. Skip accounting for it. Everything else
+ * finds its CPU here.
*/
- if (cpu == -1) {
- spin_lock(&dd->affinity->lock);
+ if (cpu == -1 && set) {
+ spin_lock(&node_affinity.lock);
if (cpumask_equal(&set->mask, &set->used)) {
/*
* We've used up all the CPUs, bump up the generation
@@ -235,7 +373,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&node_affinity.lock);
}
switch (msix->type) {
@@ -263,43 +401,84 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
{
struct cpu_mask_set *set = NULL;
struct hfi1_ctxtdata *rcd;
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
switch (msix->type) {
case IRQ_SDMA:
+ set = &entry->def_intr;
+ break;
case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
+ /* Don't do accounting for general contexts */
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
- /* only do accounting for non control contexts */
+ /* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT)
- set = &dd->affinity->rcv_intr;
+ set = &entry->rcv_intr;
break;
default:
return;
}
if (set) {
- spin_lock(&dd->affinity->lock);
+ spin_lock(&node_affinity.lock);
cpumask_andnot(&set->used, &set->used, &msix->mask);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&node_affinity.lock);
}
irq_set_affinity_hint(msix->msix.vector, NULL);
cpumask_clear(&msix->mask);
}
-int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
+/* This should be called with node_affinity.lock held */
+static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
+ struct hfi1_affinity_node_list *affinity)
{
- int cpu = -1, ret;
- cpumask_var_t diff, mask, intrs;
+ int possible, curr_cpu, i;
+ uint num_cores_per_socket = node_affinity.num_online_cpus /
+ affinity->num_core_siblings /
+ node_affinity.num_online_nodes;
+
+ cpumask_copy(hw_thread_mask, &affinity->proc.mask);
+ if (affinity->num_core_siblings > 0) {
+ /* Removing other siblings not needed for now */
+ possible = cpumask_weight(hw_thread_mask);
+ curr_cpu = cpumask_first(hw_thread_mask);
+ for (i = 0;
+ i < num_cores_per_socket * node_affinity.num_online_nodes;
+ i++)
+ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+
+ for (; i < possible; i++) {
+ cpumask_clear_cpu(curr_cpu, hw_thread_mask);
+ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+ }
+
+ /* Identifying correct HW threads within physical cores */
+ cpumask_shift_left(hw_thread_mask, hw_thread_mask,
+ num_cores_per_socket *
+ node_affinity.num_online_nodes *
+ hw_thread_no);
+ }
+}
+
+int hfi1_get_proc_affinity(int node)
+{
+ int cpu = -1, ret, i;
+ struct hfi1_affinity_node *entry;
+ cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
- struct cpu_mask_set *set = &dd->affinity->proc;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
/*
* check whether process/context affinity has already
@@ -325,22 +504,41 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
/*
* The process does not have a preset CPU affinity so find one to
- * recommend. We prefer CPUs on the same NUMA as the device.
+ * recommend using the following algorithm:
+ *
+ * For each user process that is opening a context on HFI Y:
+ * a) If all cores are filled, reinitialize the bitmask
+ * b) Fill real cores first, then HT cores (First set of HT
+ * cores on all physical cores, then second set of HT core,
+ * and, so on) in the following order:
+ *
+ * 1. Same NUMA node as HFI Y and not running an IRQ
+ * handler
+ * 2. Same NUMA node as HFI Y and running an IRQ handler
+ * 3. Different NUMA node to HFI Y and not running an IRQ
+ * handler
+ * 4. Different NUMA node to HFI Y and running an IRQ
+ * handler
+ * c) Mark core as filled in the bitmask. As user processes are
+ * done, clear cores from the bitmask.
*/
ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
if (!ret)
goto done;
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
if (!ret)
goto free_diff;
- ret = zalloc_cpumask_var(&intrs, GFP_KERNEL);
+ ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
+ if (!ret)
+ goto free_hw_thread_mask;
+ ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
if (!ret)
- goto free_mask;
+ goto free_available_mask;
- spin_lock(&dd->affinity->lock);
+ spin_lock(&affinity->lock);
/*
- * If we've used all available CPUs, clear the mask and start
+ * If we've used all available HW threads, clear the mask and start
* overloading.
*/
if (cpumask_equal(&set->mask, &set->used)) {
@@ -348,81 +546,198 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_clear(&set->used);
}
- /* CPUs used by interrupt handlers */
- cpumask_copy(intrs, (dd->affinity->def_intr.gen ?
- &dd->affinity->def_intr.mask :
- &dd->affinity->def_intr.used));
- cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
- &dd->affinity->rcv_intr.mask :
- &dd->affinity->rcv_intr.used));
+ /*
+ * If NUMA node has CPUs used by interrupt handlers, include them in the
+ * interrupt handler mask.
+ */
+ entry = node_affinity_lookup(node);
+ if (entry) {
+ cpumask_copy(intrs_mask, (entry->def_intr.gen ?
+ &entry->def_intr.mask :
+ &entry->def_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
+ &entry->rcv_intr.mask :
+ &entry->rcv_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
+ }
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
- cpumask_pr_args(intrs));
+ cpumask_pr_args(intrs_mask));
+
+ cpumask_copy(hw_thread_mask, &set->mask);
/*
- * If we don't have a NUMA node requested, preference is towards
- * device NUMA node
+ * If HT cores are enabled, identify which HW threads within the
+ * physical cores should be used.
*/
- if (node == -1)
- node = dd->node;
+ if (affinity->num_core_siblings > 0) {
+ for (i = 0; i < affinity->num_core_siblings; i++) {
+ find_hw_thread_mask(i, hw_thread_mask, affinity);
+
+ /*
+ * If there's at least one available core for this HW
+ * thread number, stop looking for a core.
+ *
+ * diff will always be not empty at least once in this
+ * loop as the used mask gets reset when
+ * (set->mask == set->used) before this loop.
+ */
+ cpumask_andnot(diff, hw_thread_mask, &set->used);
+ if (!cpumask_empty(diff))
+ break;
+ }
+ }
+ hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
+ cpumask_pr_args(hw_thread_mask));
+
node_mask = cpumask_of_node(node);
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
cpumask_pr_args(node_mask));
- /* diff will hold all unused cpus */
- cpumask_andnot(diff, &set->mask, &set->used);
- hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
-
- /* get cpumask of available CPUs on preferred NUMA */
- cpumask_and(mask, diff, node_mask);
- hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
+ /* Get cpumask of available CPUs on preferred NUMA */
+ cpumask_and(available_mask, hw_thread_mask, node_mask);
+ cpumask_andnot(available_mask, available_mask, &set->used);
+ hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
+ cpumask_pr_args(available_mask));
/*
* At first, we don't want to place processes on the same
- * CPUs as interrupt handlers.
+ * CPUs as interrupt handlers. Then, CPUs running interrupt
+ * handlers are used.
+ *
+ * 1) If diff is not empty, then there are CPUs not running
+ * non-interrupt handlers available, so diff gets copied
+ * over to available_mask.
+ * 2) If diff is empty, then all CPUs not running interrupt
+ * handlers are taken, so available_mask contains all
+ * available CPUs running interrupt handlers.
+ * 3) If available_mask is empty, then all CPUs on the
+ * preferred NUMA node are taken, so other NUMA nodes are
+ * used for process assignments using the same method as
+ * the preferred NUMA node.
*/
- cpumask_andnot(diff, mask, intrs);
+ cpumask_andnot(diff, available_mask, intrs_mask);
if (!cpumask_empty(diff))
- cpumask_copy(mask, diff);
+ cpumask_copy(available_mask, diff);
- /*
- * if we don't have a cpu on the preferred NUMA, get
- * the list of the remaining available CPUs
- */
- if (cpumask_empty(mask)) {
- cpumask_andnot(diff, &set->mask, &set->used);
- cpumask_andnot(mask, diff, node_mask);
+ /* If we don't have CPUs on the preferred node, use other NUMA nodes */
+ if (cpumask_empty(available_mask)) {
+ cpumask_andnot(available_mask, hw_thread_mask, &set->used);
+ /* Excluding preferred NUMA cores */
+ cpumask_andnot(available_mask, available_mask, node_mask);
+ hfi1_cdbg(PROC,
+ "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
+ cpumask_pr_args(available_mask));
+
+ /*
+ * At first, we don't want to place processes on the same
+ * CPUs as interrupt handlers.
+ */
+ cpumask_andnot(diff, available_mask, intrs_mask);
+ if (!cpumask_empty(diff))
+ cpumask_copy(available_mask, diff);
}
- hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
- cpumask_pr_args(mask));
+ hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
+ cpumask_pr_args(available_mask));
- cpu = cpumask_first(mask);
+ cpu = cpumask_first(available_mask);
if (cpu >= nr_cpu_ids) /* empty */
cpu = -1;
else
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
-
- free_cpumask_var(intrs);
-free_mask:
- free_cpumask_var(mask);
+ spin_unlock(&affinity->lock);
+ hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
+
+ free_cpumask_var(intrs_mask);
+free_available_mask:
+ free_cpumask_var(available_mask);
+free_hw_thread_mask:
+ free_cpumask_var(hw_thread_mask);
free_diff:
free_cpumask_var(diff);
done:
return cpu;
}
-void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu)
+void hfi1_put_proc_affinity(int cpu)
{
- struct cpu_mask_set *set = &dd->affinity->proc;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
if (cpu < 0)
return;
- spin_lock(&dd->affinity->lock);
+ spin_lock(&affinity->lock);
cpumask_clear_cpu(cpu, &set->used);
+ hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&affinity->lock);
}
+/* Prevents concurrent reads and writes of the sdma_affinity attrib */
+static DEFINE_MUTEX(sdma_affinity_mutex);
+
+int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
+ size_t count)
+{
+ struct hfi1_affinity_node *entry;
+ struct cpumask mask;
+ int ret, i;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ if (!entry)
+ return -EINVAL;
+
+ ret = cpulist_parse(buf, &mask);
+ if (ret)
+ return ret;
+
+ if (!cpumask_subset(&mask, cpu_online_mask) || cpumask_empty(&mask)) {
+ dd_dev_warn(dd, "Invalid CPU mask\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&sdma_affinity_mutex);
+ /* reset the SDMA interrupt affinity details */
+ init_cpu_mask_set(&entry->def_intr);
+ cpumask_copy(&entry->def_intr.mask, &mask);
+ /*
+ * Reassign the affinity for each SDMA interrupt.
+ */
+ for (i = 0; i < dd->num_msix_entries; i++) {
+ struct hfi1_msix_entry *msix;
+
+ msix = &dd->msix_entries[i];
+ if (msix->type != IRQ_SDMA)
+ continue;
+
+ ret = hfi1_get_irq_affinity(dd, msix);
+
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&sdma_affinity_mutex);
+ return ret ? ret : strnlen(buf, PAGE_SIZE);
+}
+
+int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
+{
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ if (!entry)
+ return -EINVAL;
+
+ mutex_lock(&sdma_affinity_mutex);
+ cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
+ mutex_unlock(&sdma_affinity_mutex);
+ return strnlen(buf, PAGE_SIZE);
+}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 20f52fe74091..8879cf7a8cac 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -73,7 +73,6 @@ struct cpu_mask_set {
struct hfi1_affinity {
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
- struct cpu_mask_set proc;
struct cpumask real_cpu_mask;
/* spin lock to protect affinity struct */
spinlock_t lock;
@@ -82,11 +81,9 @@ struct hfi1_affinity {
struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
-int init_real_cpu_mask(struct hfi1_devdata *);
+void init_real_cpu_mask(void);
/* Initialize driver affinity data */
-void hfi1_dev_affinity_init(struct hfi1_devdata *);
-/* Free driver affinity data */
-void hfi1_dev_affinity_free(struct hfi1_devdata *);
+int hfi1_dev_affinity_init(struct hfi1_devdata *);
/*
* Set IRQ affinity to a CPU. The function will determine the
* CPU and set the affinity to it.
@@ -101,8 +98,35 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
* Determine a CPU affinity for a user process, if the process does not
* have an affinity set yet.
*/
-int hfi1_get_proc_affinity(struct hfi1_devdata *, int);
+int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
-void hfi1_put_proc_affinity(struct hfi1_devdata *, int);
+void hfi1_put_proc_affinity(int);
+
+int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
+int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
+ size_t count);
+
+struct hfi1_affinity_node {
+ int node;
+ struct cpu_mask_set def_intr;
+ struct cpu_mask_set rcv_intr;
+ struct cpumask general_intr_mask;
+ struct list_head list;
+};
+
+struct hfi1_affinity_node_list {
+ struct list_head list;
+ struct cpumask real_cpu_mask;
+ struct cpu_mask_set proc;
+ int num_core_siblings;
+ int num_online_nodes;
+ int num_online_cpus;
+ /* protect affinity node list */
+ spinlock_t lock;
+};
+
+int node_affinity_init(void);
+void node_affinity_destroy(void);
+extern struct hfi1_affinity_node_list node_affinity;
#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index dad4d0ebbdff..b32638d58ae8 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -63,6 +63,7 @@
#include "efivar.h"
#include "platform.h"
#include "aspm.h"
+#include "affinity.h"
#define NUM_IB_PORTS 1
@@ -121,6 +122,7 @@ struct flag_table {
#define SEC_SC_HALTED 0x4 /* per-context only */
#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
+#define DEFAULT_KRCVQS 2
#define MIN_KERNEL_KCTXTS 2
#define FIRST_KERNEL_KCTXT 1
/* sizes for both the QP and RSM map tables */
@@ -238,6 +240,9 @@ struct flag_table {
/* all CceStatus sub-block RXE pause bits */
#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
+#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
+#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
+
/*
* CCE Error flags.
*/
@@ -3947,6 +3952,28 @@ static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
return dd->sw_send_dma_eng_err_status_cnt[0];
}
+static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ u64 val = 0;
+ u64 csr = entry->csr;
+
+ val = read_write_csr(dd, csr, mode, data);
+ if (mode == CNTR_MODE_R) {
+ val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
+ CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
+ } else if (mode == CNTR_MODE_W) {
+ dd->sw_rcv_bypass_packet_errors = 0;
+ } else {
+ dd_dev_err(dd, "Invalid cntr register access mode");
+ return 0;
+ }
+ return val;
+}
+
#define def_access_sw_cpu(cntr) \
static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
void *context, int vl, int mode, u64 data) \
@@ -4020,7 +4047,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
CNTR_SYNTH),
-[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
+[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
+ access_dc_rcv_err_cnt),
[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
CNTR_SYNTH),
[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
@@ -8798,30 +8826,6 @@ static int write_tx_settings(struct hfi1_devdata *dd,
return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
}
-static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
-{
- u32 frame, version, prod_id;
- int ret, lane;
-
- /* 4 lanes */
- for (lane = 0; lane < 4; lane++) {
- ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
- if (ret) {
- dd_dev_err(dd,
- "Unable to read lane %d firmware details\n",
- lane);
- continue;
- }
- version = (frame >> SPICO_ROM_VERSION_SHIFT)
- & SPICO_ROM_VERSION_MASK;
- prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
- & SPICO_ROM_PROD_ID_MASK;
- dd_dev_info(dd,
- "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
- lane, version, prod_id);
- }
-}
-
/*
* Read an idle LCB message.
*
@@ -9187,17 +9191,24 @@ static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
unsigned long timeout;
/*
- * Check for QSFP interrupt for t_init (SFF 8679)
+ * Some QSFP cables have a quirk that asserts the IntN line as a side
+ * effect of power up on plug-in. We ignore this false positive
+ * interrupt until the module has finished powering up by waiting for
+ * a minimum timeout of the module inrush initialization time of
+ * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
+ * module have stabilized.
+ */
+ msleep(500);
+
+ /*
+ * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
*/
timeout = jiffies + msecs_to_jiffies(2000);
while (1) {
mask = read_csr(dd, dd->hfi1_id ?
ASIC_QSFP2_IN : ASIC_QSFP1_IN);
- if (!(mask & QSFP_HFI0_INT_N)) {
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
- ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
+ if (!(mask & QSFP_HFI0_INT_N))
break;
- }
if (time_after(jiffies, timeout)) {
dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
__func__);
@@ -9213,10 +9224,17 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
u64 mask;
mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
- if (enable)
+ if (enable) {
+ /*
+ * Clear the status register to avoid an immediate interrupt
+ * when we re-enable the IntN pin
+ */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
+ QSFP_HFI0_INT_N);
mask |= (u64)QSFP_HFI0_INT_N;
- else
+ } else {
mask &= ~(u64)QSFP_HFI0_INT_N;
+ }
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
}
@@ -9630,14 +9648,6 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
}
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo)
-{
- kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
- HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
- return 0;
-}
-
struct hfi1_message_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr)
{
@@ -9890,6 +9900,131 @@ static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
return 0;
}
+static const char *state_completed_string(u32 completed)
+{
+ static const char * const state_completed[] = {
+ "EstablishComm",
+ "OptimizeEQ",
+ "VerifyCap"
+ };
+
+ if (completed < ARRAY_SIZE(state_completed))
+ return state_completed[completed];
+
+ return "unknown";
+}
+
+static const char all_lanes_dead_timeout_expired[] =
+ "All lanes were inactive – was the interconnect media removed?";
+static const char tx_out_of_policy[] =
+ "Passing lanes on local port do not meet the local link width policy";
+static const char no_state_complete[] =
+ "State timeout occurred before link partner completed the state";
+static const char * const state_complete_reasons[] = {
+ [0x00] = "Reason unknown",
+ [0x01] = "Link was halted by driver, refer to LinkDownReason",
+ [0x02] = "Link partner reported failure",
+ [0x10] = "Unable to achieve frame sync on any lane",
+ [0x11] =
+ "Unable to find a common bit rate with the link partner",
+ [0x12] =
+ "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
+ [0x13] =
+ "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
+ [0x14] = no_state_complete,
+ [0x15] =
+ "State timeout occurred before link partner identified equalization presets",
+ [0x16] =
+ "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
+ [0x17] = tx_out_of_policy,
+ [0x20] = all_lanes_dead_timeout_expired,
+ [0x21] =
+ "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
+ [0x22] = no_state_complete,
+ [0x23] =
+ "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
+ [0x24] = tx_out_of_policy,
+ [0x30] = all_lanes_dead_timeout_expired,
+ [0x31] =
+ "State timeout occurred waiting for host to process received frames",
+ [0x32] = no_state_complete,
+ [0x33] =
+ "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
+ [0x34] = tx_out_of_policy,
+};
+
+static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
+ u32 code)
+{
+ const char *str = NULL;
+
+ if (code < ARRAY_SIZE(state_complete_reasons))
+ str = state_complete_reasons[code];
+
+ if (str)
+ return str;
+ return "Reserved";
+}
+
+/* describe the given last state complete frame */
+static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
+ const char *prefix)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 success;
+ u32 state;
+ u32 reason;
+ u32 lanes;
+
+ /*
+ * Decode frame:
+ * [ 0: 0] - success
+ * [ 3: 1] - state
+ * [ 7: 4] - next state timeout
+ * [15: 8] - reason code
+ * [31:16] - lanes
+ */
+ success = frame & 0x1;
+ state = (frame >> 1) & 0x7;
+ reason = (frame >> 8) & 0xff;
+ lanes = (frame >> 16) & 0xffff;
+
+ dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
+ prefix, frame);
+ dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
+ state_completed_string(state), state);
+ dd_dev_err(dd, " state successfully completed: %s\n",
+ success ? "yes" : "no");
+ dd_dev_err(dd, " fail reason 0x%x: %s\n",
+ reason, state_complete_reason_code_string(ppd, reason));
+ dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
+}
+
+/*
+ * Read the last state complete frames and explain them. This routine
+ * expects to be called if the link went down during link negotiation
+ * and initialization (LNI). That is, anywhere between polling and link up.
+ */
+static void check_lni_states(struct hfi1_pportdata *ppd)
+{
+ u32 last_local_state;
+ u32 last_remote_state;
+
+ read_last_local_state(ppd->dd, &last_local_state);
+ read_last_remote_state(ppd->dd, &last_remote_state);
+
+ /*
+ * Don't report anything if there is nothing to report. A value of
+ * 0 means the link was taken down while polling and there was no
+ * training in-process.
+ */
+ if (last_local_state == 0 && last_remote_state == 0)
+ return;
+
+ decode_state_complete(ppd, last_local_state, "transmitted");
+ decode_state_complete(ppd, last_remote_state, "received");
+}
+
/*
* Helper for set_link_state(). Do not call except from that routine.
* Expects ppd->hls_mutex to be held.
@@ -9902,8 +10037,6 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
{
struct hfi1_devdata *dd = ppd->dd;
u32 pstate, previous_state;
- u32 last_local_state;
- u32 last_remote_state;
int ret;
int do_transition;
int do_wait;
@@ -10003,12 +10136,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
} else if (previous_state
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
/* went down while attempting link up */
- /* byte 1 of last_*_state is the failure reason */
- read_last_local_state(dd, &last_local_state);
- read_last_remote_state(dd, &last_remote_state);
- dd_dev_err(dd,
- "LNI failure last states: local 0x%08x, remote 0x%08x\n",
- last_local_state, last_remote_state);
+ check_lni_states(ppd);
}
/* the active link width (downgrade) is 0 on link down */
@@ -11668,9 +11796,6 @@ static void free_cntrs(struct hfi1_devdata *dd)
dd->cntrnames = NULL;
}
-#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
-#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
-
static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
u64 *psval, void *context, int vl)
{
@@ -12325,37 +12450,6 @@ u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
return ib_pstate;
}
-/*
- * Read/modify/write ASIC_QSFP register bits as selected by mask
- * data: 0 or 1 in the positions depending on what needs to be written
- * dir: 0 for read, 1 for write
- * mask: select by setting
- * I2CCLK (bit 0)
- * I2CDATA (bit 1)
- */
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask)
-{
- u64 qsfp_oe, target_oe;
-
- target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
- if (mask) {
- /* We are writing register bits, so lock access */
- dir &= mask;
- data &= mask;
-
- qsfp_oe = read_csr(dd, target_oe);
- qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
- write_csr(dd, target_oe, qsfp_oe);
- }
- /* We are exclusively reading bits here, but it is unlikely
- * we'll get valid data when we set the direction of the pin
- * in the same call, so read should call this function again
- * to get valid data
- */
- return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
-}
-
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
@@ -12780,7 +12874,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Kernel receive contexts:
- * - min of 2 or 1 context/numa (excluding control context)
* - Context 0 - control context (VL15/multicast/error)
* - Context 1 - first kernel context
* - Context 2 - second kernel context
@@ -12794,9 +12887,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
num_kernel_contexts = n_krcvqs + 1;
else
- num_kernel_contexts = num_online_nodes() + 1;
- num_kernel_contexts =
- max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
+ num_kernel_contexts = DEFAULT_KRCVQS + 1;
/*
* Every kernel receive context needs an ACK send context.
* one send context is allocated for each VL{0-7} and VL15
@@ -12815,7 +12906,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
if (num_user_contexts < 0)
num_user_contexts =
- cpumask_weight(&dd->affinity->real_cpu_mask);
+ cpumask_weight(&node_affinity.real_cpu_mask);
total_contexts = num_kernel_contexts + num_user_contexts;
@@ -14141,6 +14232,11 @@ static int init_asic_data(struct hfi1_devdata *dd)
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+
+ /* first one through - set up i2c devices */
+ if (!peer)
+ ret = set_up_i2c(dd, dd->asic_data);
+
return ret;
}
@@ -14445,19 +14541,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
- /*
- * The real cpu mask is part of the affinity struct but has to be
- * initialized earlier than the rest of the affinity struct because it
- * is needed to calculate the number of user contexts in
- * set_up_context_variables(). However, hfi1_dev_affinity_init(),
- * which initializes the rest of the affinity struct members,
- * depends on set_up_context_variables() for the number of kernel
- * contexts, so it cannot be called before set_up_context_variables().
- */
- ret = init_real_cpu_mask(dd);
- if (ret)
- goto bail_cleanup;
-
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -14471,7 +14554,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up KDETH QP prefix in both RX and TX CSRs */
init_kdeth_qp(dd);
- hfi1_dev_affinity_init(dd);
+ ret = hfi1_dev_affinity_init(dd);
+ if (ret)
+ goto bail_cleanup;
/* send contexts must be set up before receive contexts */
ret = init_send_contexts(dd);
@@ -14508,8 +14593,14 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up LCB access - must be after set_up_interrupts() */
init_lcb_access(dd);
+ /*
+ * Serial number is created from the base guid:
+ * [27:24] = base guid [38:35]
+ * [23: 0] = base guid [23: 0]
+ */
snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
- dd->base_guid & 0xFFFFFF);
+ (dd->base_guid & 0xFFFFFF) |
+ ((dd->base_guid >> 11) & 0xF000000));
dd->oui1 = dd->base_guid >> 56 & 0xFF;
dd->oui2 = dd->base_guid >> 48 & 0xFF;
@@ -14518,7 +14609,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
if (ret)
goto bail_clear_intr;
- check_fabric_firmware_versions(dd);
thermal_init(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 66a327978739..ed11107c50fe 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -640,6 +640,7 @@ extern uint platform_config_load;
/* SBus commands */
#define RESET_SBUS_RECEIVER 0x20
#define WRITE_SBUS_RECEIVER 0x21
+#define READ_SBUS_RECEIVER 0x22
void sbus_request(struct hfi1_devdata *dd,
u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
int sbus_request_slow(struct hfi1_devdata *dd,
@@ -1336,10 +1337,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct hfi1_message_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr);
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo);
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 8744de6667c2..5b9993899789 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -471,6 +471,10 @@
#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT 2
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK 0x7ull
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT 32
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_MASK 0xFFFFFFFFull
#define ASIC_STS_THERM (ASIC + 0x000000000058)
#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index c75b0ae688f8..8246dc7d0573 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -392,9 +392,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
u16 rlid;
u8 svc_type, sl, sc5;
- sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(packet->rhf))
- sc5 |= 0x10;
+ sc5 = hdr2sc(rhdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
@@ -450,14 +448,20 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->rcv_flags = 0;
}
-static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
- struct hfi1_other_headers *ohdr,
- u64 rhf, u32 bth1, struct ib_grh *grh)
+void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 rqpn = 0;
- u16 rlid;
- u8 sc5, svc_type;
+ struct hfi1_ib_header *hdr = pkt->hdr;
+ struct hfi1_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = NULL;
+ u32 rqpn = 0, bth1;
+ u16 rlid, dlid = be16_to_cpu(hdr->lrh[1]);
+ u8 sc, svc_type;
+ bool is_mcast = false;
+
+ if (pkt->rcv_flags & HFI1_HAS_GRH)
+ grh = &hdr->u.l.grh;
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
@@ -466,6 +470,8 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
rlid = be16_to_cpu(hdr->lrh[3]);
rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
svc_type = IB_CC_SVCTYPE_UD;
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
break;
case IB_QPT_UC:
rlid = qp->remote_ah_attr.dlid;
@@ -481,24 +487,23 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
return;
}
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(rhf))
- sc5 |= 0x10;
+ sc = hdr2sc((struct hfi1_message_header *)hdr, pkt->rhf);
- if (bth1 & HFI1_FECN_SMASK) {
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (do_cnp && (bth1 & HFI1_FECN_SMASK)) {
u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
- return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
+ return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh);
}
- if (bth1 & HFI1_BECN_SMASK) {
+ if (!is_mcast && (bth1 & HFI1_BECN_SMASK)) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = bth1 & RVT_QPN_MASK;
- u8 sl = ibp->sc_to_sl[sc5];
+ u8 sl = ibp->sc_to_sl[sc];
process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
+
}
struct ps_mdata {
@@ -596,7 +601,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
struct rvt_qp *qp;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr;
- struct ib_grh *grh = NULL;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -616,14 +620,13 @@ static void __prescan_rxq(struct hfi1_packet *packet)
hfi1_get_msgheader(dd, rhf_addr);
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH) {
+ if (lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
+ else if (lnh == HFI1_LRH_GRH)
ohdr = &hdr->u.l.oth;
- grh = &hdr->u.l.grh;
- } else {
+ else
goto next; /* just in case */
- }
+
bth1 = be32_to_cpu(ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
@@ -639,7 +642,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
goto next;
}
- process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
+ process_ecn(qp, packet, true);
rcu_read_unlock();
/* turn off BECN, FECN */
@@ -1362,6 +1365,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
dd_dev_err(packet->rcd->dd,
"Bypass packets are not supported in normal operation. Dropping\n");
+ incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c702a009608f..1ecbec192358 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -168,6 +168,7 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
+ struct hfi1_filedata *fd;
struct hfi1_devdata *dd = container_of(inode->i_cdev,
struct hfi1_devdata,
user_cdev);
@@ -176,10 +177,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
kobject_get(&dd->kobj);
/* The real work is performed later in assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+
+ if (fd) {
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
+ fd->mm = current->mm;
+ }
+
+ fp->private_data = fd;
+
+ return fd ? 0 : -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -228,7 +236,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(struct hfi1_base_info));
break;
case HFI1_IOCTL_CREDIT_UPD:
- if (uctxt && uctxt->sc)
+ if (uctxt)
sc_return_credits(uctxt->sc);
break;
@@ -392,41 +400,38 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
- int ret = 0, done = 0, reqs = 0;
+ int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
- if (!cq || !pq) {
- ret = -EIO;
- goto done;
- }
+ if (!cq || !pq)
+ return -EIO;
- if (!iter_is_iovec(from) || !dim) {
- ret = -EINVAL;
- goto done;
- }
+ if (!iter_is_iovec(from) || !dim)
+ return -EINVAL;
hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
fd->uctxt->ctxt, fd->subctxt, dim);
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
- ret = -ENOSPC;
- goto done;
- }
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
+ return -ENOSPC;
while (dim) {
+ int ret;
unsigned long count = 0;
ret = hfi1_user_sdma_process_request(
kiocb->ki_filp, (struct iovec *)(from->iov + done),
dim, &count);
- if (ret)
- goto done;
+ if (ret) {
+ reqs = ret;
+ break;
+ }
dim -= count;
done += count;
reqs++;
}
-done:
- return ret ? ret : reqs;
+
+ return reqs;
}
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
@@ -718,7 +723,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_user_sdma_free_queues(fdata);
/* release the cpu */
- hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
+ hfi1_put_proc_affinity(fdata->rec_cpu_num);
/*
* Clear any left over, unhandled events so the next process that
@@ -730,7 +735,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (--uctxt->cnt) {
uctxt->active_slaves &= ~(1 << fdata->subctxt);
- uctxt->subpid[fdata->subctxt] = 0;
mutex_unlock(&hfi1_mutex);
goto done;
}
@@ -756,7 +760,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
sc_disable(uctxt->sc);
- uctxt->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
@@ -818,9 +821,10 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
ret = find_shared_ctxt(fp, uinfo);
if (ret < 0)
goto done_unlock;
- if (ret)
- fd->rec_cpu_num = hfi1_get_proc_affinity(
- fd->uctxt->dd, fd->uctxt->numa_id);
+ if (ret) {
+ fd->rec_cpu_num =
+ hfi1_get_proc_affinity(fd->uctxt->numa_id);
+ }
}
/*
@@ -895,7 +899,6 @@ static int find_shared_ctxt(struct file *fp,
}
fd->uctxt = uctxt;
fd->subctxt = uctxt->cnt++;
- uctxt->subpid[fd->subctxt] = current->pid;
uctxt->active_slaves |= 1 << fd->subctxt;
ret = 1;
goto done;
@@ -932,7 +935,11 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
if (ctxt == dd->num_rcv_contexts)
return -EBUSY;
- fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
+ /*
+ * If we don't have a NUMA node requested, preference is towards
+ * device NUMA node.
+ */
+ fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
if (fd->rec_cpu_num != -1)
numa = cpu_to_node(fd->rec_cpu_num);
else
@@ -976,8 +983,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
return ret;
}
uctxt->userversion = uinfo->userversion;
- uctxt->pid = current->pid;
- uctxt->flags = HFI1_CAP_UGET(MASK);
+ uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
@@ -1080,18 +1086,18 @@ static int user_init(struct file *fp)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
* Ignore the bit in the flags for now until proper
* support for multiple packet per rcv array entry is
* added.
*/
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
/*
* The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
@@ -1099,7 +1105,7 @@ static int user_init(struct file *fp)
* uses of the chip or ctxt. Therefore, add the rcvctrl op
* for both cases.
*/
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
else
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
@@ -1122,9 +1128,14 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
int ret = 0;
memset(&cinfo, 0, sizeof(cinfo));
- ret = hfi1_get_base_kinfo(uctxt, &cinfo);
- if (ret < 0)
- goto done;
+ cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
+ HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
+ HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
+ /* adjust flag if this fd is not able to cache */
+ if (!fd->handler)
+ cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
+
cinfo.num_active = hfi1_count_active_units();
cinfo.unit = uctxt->dd->unit;
cinfo.ctxt = uctxt->ctxt;
@@ -1146,7 +1157,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
ret = -EFAULT;
-done:
+
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index ed680fda611d..13db8eb4f4ec 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -206,6 +206,9 @@ static const struct firmware *platform_config;
/* the number of fabric SerDes on the SBus */
#define NUM_FABRIC_SERDES 4
+/* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
+#define SBUS_READ_COMPLETE 0x4
+
/* SBus fabric SerDes addresses, one set per HFI */
static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
{ 0x01, 0x02, 0x03, 0x04 },
@@ -240,6 +243,7 @@ static const u8 all_pcie_serdes_broadcast = 0xe0;
static void dispose_one_firmware(struct firmware_details *fdet);
static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
struct firmware_details *fdet);
+static void dump_fw_version(struct hfi1_devdata *dd);
/*
* Read a single 64-bit value from 8051 data memory.
@@ -1079,6 +1083,44 @@ void sbus_request(struct hfi1_devdata *dd,
}
/*
+ * Read a value from the SBus.
+ *
+ * Requires the caller to be in fast mode
+ */
+static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
+ u32 data_in)
+{
+ u64 reg;
+ int retries;
+ int success = 0;
+ u32 result = 0;
+ u32 result_code = 0;
+
+ sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
+
+ for (retries = 0; retries < 100; retries++) {
+ usleep_range(1000, 1200); /* arbitrary */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
+ & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
+ if (result_code != SBUS_READ_COMPLETE)
+ continue;
+
+ success = 1;
+ result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
+ & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
+ break;
+ }
+
+ if (!success) {
+ dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
+ result_code);
+ }
+
+ return result;
+}
+
+/*
* Turn off the SBus and fabric serdes spicos.
*
* + Must be called with Sbus fast mode turned on.
@@ -1636,6 +1678,7 @@ int load_firmware(struct hfi1_devdata *dd)
return ret;
}
+ dump_fw_version(dd);
return 0;
}
@@ -2054,3 +2097,85 @@ void read_guid(struct hfi1_devdata *dd)
dd_dev_info(dd, "GUID %llx",
(unsigned long long)dd->base_guid);
}
+
+/* read and display firmware version info */
+static void dump_fw_version(struct hfi1_devdata *dd)
+{
+ u32 pcie_vers[NUM_PCIE_SERDES];
+ u32 fabric_vers[NUM_FABRIC_SERDES];
+ u32 sbus_vers;
+ int i;
+ int all_same;
+ int ret;
+ u8 rcv_addr;
+
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
+ return;
+ }
+
+ /* set fast mode */
+ set_sbus_fast_mode(dd);
+
+ /* read version for SBus Master */
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
+ dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
+
+ /* read version for PCIe SerDes */
+ all_same = 1;
+ pcie_vers[0] = 0;
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && pcie_vers[0] != pcie_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
+ pcie_vers[0]);
+ } else {
+ dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ dd_dev_info(dd,
+ "PCIe SerDes lane %d firmware version 0x%x\n",
+ i, pcie_vers[i]);
+ }
+ }
+
+ /* read version for fabric SerDes */
+ all_same = 1;
+ fabric_vers[0] = 0;
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && fabric_vers[0] != fabric_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
+ fabric_vers[0]);
+ } else {
+ dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ dd_dev_info(dd,
+ "Fabric SerDes lane %d firmware version 0x%x\n",
+ i, fabric_vers[i]);
+ }
+ }
+
+ clear_sbus_fast_mode(dd);
+ release_chip_resource(dd, CR_SBUS);
+}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4417a0fd3ef9..1000e0fd96d9 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -62,6 +62,8 @@
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -253,7 +255,7 @@ struct hfi1_ctxtdata {
/* chip offset of PIO buffers for this ctxt */
u32 piobufs;
/* per-context configuration flags */
- u32 flags;
+ unsigned long flags;
/* per-context event flags for fileops/intr communication */
unsigned long event_flags;
/* WAIT_RCV that timed out, no interrupt */
@@ -268,9 +270,6 @@ struct hfi1_ctxtdata {
u32 urgent;
/* saved total number of polled urgent packets for poll edge trigger */
u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[HFI1_MAX_SHARED_CTXTS];
/* same size as task_struct .comm[], command that opened context */
char comm[TASK_COMM_LEN];
/* so file ops can get at unit */
@@ -366,11 +365,6 @@ struct hfi1_packet {
u8 etype;
};
-static inline bool has_sc4_bit(struct hfi1_packet *p)
-{
- return !!rhf_dc_info(p->rhf);
-}
-
/*
* Private data for snoop/capture support.
*/
@@ -805,10 +799,19 @@ struct hfi1_temp {
u8 triggers; /* temperature triggers */
};
+struct hfi1_i2c_bus {
+ struct hfi1_devdata *controlling_dd; /* current controlling device */
+ struct i2c_adapter adapter; /* bus details */
+ struct i2c_algo_bit_data algo; /* bus algorithm details */
+ int num; /* bus number, 0 or 1 */
+};
+
/* common data between shared ASIC HFIs */
struct hfi1_asic_data {
struct hfi1_devdata *dds[2]; /* back pointers */
struct mutex asic_resource_mutex;
+ struct hfi1_i2c_bus *i2c_bus0;
+ struct hfi1_i2c_bus *i2c_bus1;
};
/* device data struct now contains only "general per-device" info.
@@ -1128,7 +1131,8 @@ struct hfi1_devdata {
NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
/* Software counter that aggregates all cce_err_status errors */
u64 sw_cce_err_status_aggregate;
-
+ /* Software counter that aggregates all bypass packet rcv errors */
+ u64 sw_rcv_bypass_packet_errors;
/* receive interrupt functions */
rhf_rcv_function_ptr *rhf_rcv_function_map;
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
@@ -1174,6 +1178,8 @@ struct hfi1_devdata {
/* 8051 firmware version helper */
#define dc8051_ver(a, b) ((a) << 8 | (b))
+#define dc8051_ver_maj(a) ((a & 0xff00) >> 8)
+#define dc8051_ver_min(a) (a & 0x00ff)
/* f_put_tid types */
#define PT_EXPECTED 0
@@ -1182,6 +1188,7 @@ struct hfi1_devdata {
struct tid_rb_node;
struct mmu_rb_node;
+struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
@@ -1192,7 +1199,7 @@ struct hfi1_filedata {
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
- struct rb_root tid_rb_root;
+ struct mmu_rb_handler *handler;
struct tid_rb_node **entry_to_rb;
spinlock_t tid_lock; /* protect tid_[limit,used] counters */
u32 tid_limit;
@@ -1201,6 +1208,7 @@ struct hfi1_filedata {
u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock;
+ struct mm_struct *mm;
};
extern struct list_head hfi1_dev_list;
@@ -1234,6 +1242,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
void set_all_slowpath(struct hfi1_devdata *dd);
+extern const struct pci_device_id hfi1_pci_tbl[];
+
/* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */
#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
@@ -1259,7 +1269,7 @@ void receive_interrupt_work(struct work_struct *work);
static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
{
return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
- ((!!(rhf & RHF_DC_INFO_SMASK)) << 4);
+ ((!!(rhf_dc_info(rhf))) << 4);
}
static inline u16 generate_jkey(kuid_t uid)
@@ -1569,6 +1579,22 @@ static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
return &dd->pport[pidx].ibport_data;
}
+void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp);
+static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp)
+{
+ struct hfi1_other_headers *ohdr = pkt->ohdr;
+ u32 bth1;
+
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
+ hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+ return bth1 & HFI1_FECN_SMASK;
+ }
+ return false;
+}
+
/*
* Return the indexed PKEY from the port PKEY table.
*/
@@ -1586,8 +1612,7 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
}
/*
- * Readers of cc_state must call get_cc_state() under rcu_read_lock().
- * Writers of cc_state must call get_cc_state() under cc_state_lock.
+ * Called by readers of cc_state only, must call under rcu_read_lock().
*/
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
{
@@ -1595,6 +1620,16 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
}
/*
+ * Called by writers of cc_state only, must call under cc_state_lock.
+ */
+static inline
+struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
+{
+ return rcu_dereference_protected(ppd->cc_state,
+ lockdep_is_held(&ppd->cc_state_lock));
+}
+
+/*
* values for dd->flags (_device_ related flags)
*/
#define HFI1_INITTED 0x1 /* chip and driver up and initted */
@@ -1669,9 +1704,12 @@ void shutdown_led_override(struct hfi1_pportdata *ppd);
*/
#define DEFAULT_RCVHDR_ENTSIZE 32
-bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
-int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
-void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages);
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
+ size_t npages, bool writable, struct page **pages);
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
@@ -1947,4 +1985,55 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
+#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
+#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
+#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
+#define show_ib_opcode(opcode) \
+__print_symbolic(opcode, \
+ ib_opcode_name(RC_SEND_FIRST), \
+ ib_opcode_name(RC_SEND_MIDDLE), \
+ ib_opcode_name(RC_SEND_LAST), \
+ ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_SEND_ONLY), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_READ_REQUEST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
+ ib_opcode_name(RC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_COMPARE_SWAP), \
+ ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(UC_SEND_FIRST), \
+ ib_opcode_name(UC_SEND_MIDDLE), \
+ ib_opcode_name(UC_SEND_LAST), \
+ ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_SEND_ONLY), \
+ ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UD_SEND_ONLY), \
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(CNP))
#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index eed971ccd2a1..a358d23ecd54 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -64,6 +64,7 @@
#include "debugfs.h"
#include "verbs.h"
#include "aspm.h"
+#include "affinity.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -474,8 +475,9 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
{
- int i, size;
+ int i;
uint default_pkey_idx;
+ struct cc_state *cc_state;
ppd->dd = dd;
ppd->hw_pidx = hw_pidx;
@@ -526,9 +528,9 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cc_state_lock);
spin_lock_init(&ppd->cc_log_lock);
- size = sizeof(struct cc_state);
- RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
- if (!rcu_dereference(ppd->cc_state))
+ cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
+ RCU_INIT_POINTER(ppd->cc_state, cc_state);
+ if (!cc_state)
goto bail;
return;
@@ -972,39 +974,49 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
/*
* Release our hold on the shared asic data. If we are the last one,
- * free the structure. Must be holding hfi1_devs_lock.
+ * return the structure to be finalized outside the lock. Must be
+ * holding hfi1_devs_lock.
*/
-static void release_asic_data(struct hfi1_devdata *dd)
+static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
{
+ struct hfi1_asic_data *ad;
int other;
if (!dd->asic_data)
- return;
+ return NULL;
dd->asic_data->dds[dd->hfi1_id] = NULL;
other = dd->hfi1_id ? 0 : 1;
- if (!dd->asic_data->dds[other]) {
- /* we are the last holder, free it */
- kfree(dd->asic_data);
- }
+ ad = dd->asic_data;
dd->asic_data = NULL;
+ /* return NULL if the other dd still has a link */
+ return ad->dds[other] ? NULL : ad;
+}
+
+static void finalize_asic_data(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad)
+{
+ clean_up_i2c(dd, ad);
+ kfree(ad);
}
static void __hfi1_free_devdata(struct kobject *kobj)
{
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
+ struct hfi1_asic_data *ad;
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
idr_remove(&hfi1_unit_table, dd->unit);
list_del(&dd->list);
- release_asic_data(dd);
+ ad = release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ if (ad)
+ finalize_asic_data(dd, ad);
free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
- hfi1_dev_affinity_free(dd);
free_percpu(dd->send_schedule);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1162,7 +1174,7 @@ static int init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
-static const struct pci_device_id hfi1_pci_tbl[] = {
+const struct pci_device_id hfi1_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
{ 0, }
@@ -1198,6 +1210,10 @@ static int __init hfi1_mod_init(void)
if (ret)
goto bail;
+ ret = node_affinity_init();
+ if (ret)
+ goto bail;
+
/* validate max MTU before any devices start */
if (!valid_opa_max_mtu(hfi1_max_mtu)) {
pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
@@ -1278,6 +1294,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
+ node_affinity_destroy();
hfi1_wss_exit();
hfi1_dbg_exit();
hfi1_cpulist_count = 0;
@@ -1311,7 +1328,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
spin_lock(&ppd->cc_state_lock);
- cc_state = get_cc_state(ppd);
+ cc_state = get_cc_state_protected(ppd);
RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
@@ -1760,8 +1777,8 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
hfi1_cdbg(PROC,
"ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
- rcd->egrbufs.size);
+ rcd->ctxt, rcd->egrbufs.alloced,
+ rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
/*
* Set the contexts rcv array head update threshold to the closest
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index fca07a1d6c28..1263abe01999 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -588,7 +588,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_phys_conf = (ppd->port_type & 0xf);
-#if PI_LED_ENABLE_SUP
pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
pi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
@@ -602,11 +601,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
pi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
-#else
- pi->port_states.offline_reason = ppd->neighbor_normal << 4;
- pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- pi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
pi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | state;
@@ -1752,17 +1746,11 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (start_of_sm_config && (lstate == IB_PORT_INIT))
ppd->is_sm_config_started = 1;
-#if PI_LED_ENABLE_SUP
psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
psi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
psi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
-#else
- psi->port_states.offline_reason = ppd->neighbor_normal << 4;
- psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- psi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
psi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
@@ -2430,14 +2418,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
rsp->port_rcv_remote_physical_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
CNTR_INVALID_VL);
@@ -2499,6 +2482,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
idx_from_vl(vl)));
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
vlinfo++;
vfi++;
}
@@ -2529,9 +2515,8 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL);
/* local link integrity must be right-shifted by the lli resolution */
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- error_counter_summary += (tmp >> res_lli);
+ error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL) >> res_lli);
/* link error recovery must b right-shifted by the ler resolution */
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
@@ -2800,14 +2785,9 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
rsp->port_rcv_constraint_errors =
cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
rsp->excessive_buffer_overruns =
cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
}
@@ -2883,14 +2863,17 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
-
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
vlinfo = &rsp->vls[0];
vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo));
- /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
vlinfo += 1;
vfi++;
}
@@ -3162,10 +3145,8 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
- if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
+ if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
- }
if (counter_select & CS_LINK_ERROR_RECOVERY) {
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
@@ -3223,7 +3204,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
/* if (counter_select & CS_PORT_MARK_FECN)
* write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
*/
- /* port_vl_xmit_discards ??? */
+ if (counter_select & C_SW_XMIT_DSCD_VL)
+ write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl), 0);
}
if (resp_len)
@@ -3392,7 +3375,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
*/
spin_lock(&ppd->cc_state_lock);
- old_cc_state = get_cc_state(ppd);
+ old_cc_state = get_cc_state_protected(ppd);
if (!old_cc_state) {
/* never active, or shutting down */
spin_unlock(&ppd->cc_state_lock);
@@ -3960,7 +3943,6 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
/* LocalLinkIntegrityErrors */
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
/* ExcessiveBufferOverruns */
write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 8b734aaae88a..5aa3fd1be653 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -48,15 +48,8 @@
#define _HFI1_MAD_H
#include <rdma/ib_pma.h>
-#define USE_PI_LED_ENABLE 1 /*
- * use led enabled bit in struct
- * opa_port_states, if available
- */
#include <rdma/opa_smi.h>
#include <rdma/opa_port_info.h>
-#ifndef PI_LED_ENABLE_SUP
-#define PI_LED_ENABLE_SUP 0
-#endif
#include "opa_compat.h"
/*
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index b7a80aa1ae30..7ad30898fc19 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -53,19 +53,20 @@
#include "trace.h"
struct mmu_rb_handler {
- struct list_head list;
struct mmu_notifier mn;
- struct rb_root *root;
+ struct rb_root root;
+ void *ops_arg;
spinlock_t lock; /* protect the RB tree */
struct mmu_rb_ops *ops;
+ struct mm_struct *mm;
+ struct list_head lru_list;
+ struct work_struct del_work;
+ struct list_head del_list;
+ struct workqueue_struct *wq;
};
-static LIST_HEAD(mmu_rb_handlers);
-static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */
-
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *);
static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *,
@@ -76,6 +77,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
+static void do_remove(struct mmu_rb_handler *handler,
+ struct list_head *del_list);
+static void handle_remove(struct work_struct *work);
static struct mmu_notifier_ops mn_opts = {
.invalidate_page = mmu_notifier_page,
@@ -95,73 +99,79 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
return PAGE_ALIGN(node->addr + node->len) - 1;
}
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
+int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+ struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler)
{
struct mmu_rb_handler *handlr;
-
- if (!ops->invalidate)
- return -EINVAL;
+ int ret;
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
if (!handlr)
return -ENOMEM;
- handlr->root = root;
+ handlr->root = RB_ROOT;
handlr->ops = ops;
+ handlr->ops_arg = ops_arg;
INIT_HLIST_NODE(&handlr->mn.hlist);
spin_lock_init(&handlr->lock);
handlr->mn.ops = &mn_opts;
- spin_lock(&mmu_rb_lock);
- list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
- spin_unlock(&mmu_rb_lock);
+ handlr->mm = mm;
+ INIT_WORK(&handlr->del_work, handle_remove);
+ INIT_LIST_HEAD(&handlr->del_list);
+ INIT_LIST_HEAD(&handlr->lru_list);
+ handlr->wq = wq;
+
+ ret = mmu_notifier_register(&handlr->mn, handlr->mm);
+ if (ret) {
+ kfree(handlr);
+ return ret;
+ }
- return mmu_notifier_register(&handlr->mn, current->mm);
+ *handler = handlr;
+ return 0;
}
-void hfi1_mmu_rb_unregister(struct rb_root *root)
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
+ struct mmu_rb_node *rbnode;
+ struct rb_node *node;
unsigned long flags;
-
- if (!handler)
- return;
+ struct list_head del_list;
/* Unregister first so we don't get any more notifications. */
- if (current->mm)
- mmu_notifier_unregister(&handler->mn, current->mm);
+ mmu_notifier_unregister(&handler->mn, handler->mm);
- spin_lock(&mmu_rb_lock);
- list_del_rcu(&handler->list);
- spin_unlock(&mmu_rb_lock);
- synchronize_rcu();
+ /*
+ * Make sure the wq delete handler is finished running. It will not
+ * be triggered once the mmu notifiers are unregistered above.
+ */
+ flush_work(&handler->del_work);
+
+ INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- if (!RB_EMPTY_ROOT(root)) {
- struct rb_node *node;
- struct mmu_rb_node *rbnode;
-
- while ((node = rb_first(root))) {
- rbnode = rb_entry(node, struct mmu_rb_node, node);
- rb_erase(node, root);
- if (handler->ops->remove)
- handler->ops->remove(root, rbnode, NULL);
- }
+ while ((node = rb_first(&handler->root))) {
+ rbnode = rb_entry(node, struct mmu_rb_node, node);
+ rb_erase(node, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
}
spin_unlock_irqrestore(&handler->lock, flags);
+ do_remove(handler, &del_list);
+
kfree(handler);
}
-int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
struct mmu_rb_node *node;
unsigned long flags;
int ret = 0;
- if (!handler)
- return -EINVAL;
-
spin_lock_irqsave(&handler->lock, flags);
hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr,
mnode->len);
@@ -170,12 +180,13 @@ int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
ret = -EINVAL;
goto unlock;
}
- __mmu_int_rb_insert(mnode, root);
+ __mmu_int_rb_insert(mnode, &handler->root);
+ list_add(&mnode->list, &handler->lru_list);
- if (handler->ops->insert) {
- ret = handler->ops->insert(root, mnode);
- if (ret)
- __mmu_int_rb_remove(mnode, root);
+ ret = handler->ops->insert(handler->ops_arg, mnode);
+ if (ret) {
+ __mmu_int_rb_remove(mnode, &handler->root);
+ list_del(&mnode->list); /* remove from LRU list */
}
unlock:
spin_unlock_irqrestore(&handler->lock, flags);
@@ -191,10 +202,10 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len);
if (!handler->ops->filter) {
- node = __mmu_int_rb_iter_first(handler->root, addr,
+ node = __mmu_int_rb_iter_first(&handler->root, addr,
(addr + len) - 1);
} else {
- for (node = __mmu_int_rb_iter_first(handler->root, addr,
+ for (node = __mmu_int_rb_iter_first(&handler->root, addr,
(addr + len) - 1);
node;
node = __mmu_int_rb_iter_next(node, addr,
@@ -206,82 +217,72 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
-/* Caller must *not* hold handler lock. */
-static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, struct mm_struct *mm)
-{
- unsigned long flags;
-
- /* Validity of handler and node pointers has been checked by caller. */
- hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
- node->len);
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_int_rb_remove(node, handler->root);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- if (handler->ops->remove)
- handler->ops->remove(handler->root, node, mm);
-}
-
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
- unsigned long len)
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
struct mmu_rb_node *node;
unsigned long flags;
- if (!handler)
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
+ if (node) {
+ __mmu_int_rb_remove(node, &handler->root);
+ list_del(&node->list); /* remove from LRU list */
+ }
spin_unlock_irqrestore(&handler->lock, flags);
return node;
}
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *root,
- unsigned long addr, unsigned long len)
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
- struct mmu_rb_node *node;
+ struct mmu_rb_node *rbnode, *ptr;
+ struct list_head del_list;
unsigned long flags;
+ bool stop = false;
- if (!handler)
- return ERR_PTR(-EINVAL);
+ INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- node = __mmu_rb_search(handler, addr, len);
- if (node)
- __mmu_int_rb_remove(node, handler->root);
+ list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
+ list) {
+ if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ &stop)) {
+ __mmu_int_rb_remove(rbnode, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
+ }
+ if (stop)
+ break;
+ }
spin_unlock_irqrestore(&handler->lock, flags);
- return node;
+ while (!list_empty(&del_list)) {
+ rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
+ list_del(&rbnode->list);
+ handler->ops->remove(handler->ops_arg, rbnode);
+ }
}
-void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
+/*
+ * It is up to the caller to ensure that this function does not race with the
+ * mmu invalidate notifier which may be calling the users remove callback on
+ * 'node'.
+ */
+void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *node)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
-
- if (!handler || !node)
- return;
-
- __mmu_rb_remove(handler, node, NULL);
-}
+ unsigned long flags;
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
-{
- struct mmu_rb_handler *handler;
+ /* Validity of handler and node pointers has been checked by caller. */
+ hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
+ node->len);
+ spin_lock_irqsave(&handler->lock, flags);
+ __mmu_int_rb_remove(node, &handler->root);
+ list_del(&node->list); /* remove from LRU list */
+ spin_unlock_irqrestore(&handler->lock, flags);
- rcu_read_lock();
- list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
- if (handler->root == root)
- goto unlock;
- }
- handler = NULL;
-unlock:
- rcu_read_unlock();
- return handler;
+ handler->ops->remove(handler->ops_arg, node);
}
static inline void mmu_notifier_page(struct mmu_notifier *mn,
@@ -304,9 +305,10 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
- struct rb_root *root = handler->root;
+ struct rb_root *root = &handler->root;
struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
+ bool added = false;
spin_lock_irqsave(&handler->lock, flags);
for (node = __mmu_int_rb_iter_first(root, start, end - 1);
@@ -315,11 +317,53 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node)) {
+ if (handler->ops->invalidate(handler->ops_arg, node)) {
__mmu_int_rb_remove(node, root);
- if (handler->ops->remove)
- handler->ops->remove(root, node, mm);
+ /* move from LRU list to delete list */
+ list_move(&node->list, &handler->del_list);
+ added = true;
}
}
spin_unlock_irqrestore(&handler->lock, flags);
+
+ if (added)
+ queue_work(handler->wq, &handler->del_work);
+}
+
+/*
+ * Call the remove function for the given handler and the list. This
+ * is expected to be called with a delete list extracted from handler.
+ * The caller should not be holding the handler lock.
+ */
+static void do_remove(struct mmu_rb_handler *handler,
+ struct list_head *del_list)
+{
+ struct mmu_rb_node *node;
+
+ while (!list_empty(del_list)) {
+ node = list_first_entry(del_list, struct mmu_rb_node, list);
+ list_del(&node->list);
+ handler->ops->remove(handler->ops_arg, node);
+ }
+}
+
+/*
+ * Work queue function to remove all nodes that have been queued up to
+ * be removed. The key feature is that mm->mmap_sem is not being held
+ * and the remove callback can sleep while taking it, if needed.
+ */
+static void handle_remove(struct work_struct *work)
+{
+ struct mmu_rb_handler *handler = container_of(work,
+ struct mmu_rb_handler,
+ del_work);
+ struct list_head del_list;
+ unsigned long flags;
+
+ /* remove anything that is queued to get removed */
+ spin_lock_irqsave(&handler->lock, flags);
+ list_replace_init(&handler->del_list, &del_list);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ do_remove(handler, &del_list);
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 7a57b9c49d27..754f6ebf13fb 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -54,23 +54,34 @@ struct mmu_rb_node {
unsigned long len;
unsigned long __last;
struct rb_node node;
+ struct list_head list;
};
+/*
+ * NOTE: filter, insert, invalidate, and evict must not sleep. Only remove is
+ * allowed to sleep.
+ */
struct mmu_rb_ops {
- bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
- int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
- int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
+ bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+ int (*insert)(void *ops_arg, struct mmu_rb_node *mnode);
+ void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
+ int (*invalidate)(void *ops_arg, struct mmu_rb_node *node);
+ int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop);
};
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops);
-void hfi1_mmu_rb_unregister(struct rb_root *);
-int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
- unsigned long);
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *, unsigned long,
- unsigned long);
+int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+ struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler);
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len);
#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 0bac21e6a658..89c68da1c273 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -679,6 +679,10 @@ static uint pcie_pset = UNSET_PSET;
module_param(pcie_pset, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
+static uint pcie_ctle = 1; /* discrete on, integrated off */
+module_param(pcie_ctle, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
+
/* equalization columns */
#define PREC 0
#define ATTN 1
@@ -716,6 +720,36 @@ static const u8 integrated_preliminary_eq[11][3] = {
{ 0x00, 0x1e, 0x0a }, /* p10 */
};
+static const u8 discrete_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x48, 0x0b, 0x04, 0x04 }, /* p0 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p1 */
+ { 0x50, 0x09, 0x06, 0x06 }, /* p2 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p3 */
+ { 0x80, 0x05, 0x0f, 0x0a }, /* p4 */
+ { 0x70, 0x05, 0x0f, 0x0a }, /* p5 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x48, 0x09, 0x06, 0x06 }, /* p8 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p9 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p10 */
+};
+
+static const u8 integrated_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p0 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p1 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p2 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p3 */
+ { 0x58, 0x0a, 0x05, 0x05 }, /* p4 */
+ { 0x48, 0x0a, 0x05, 0x05 }, /* p5 */
+ { 0x40, 0x0a, 0x05, 0x05 }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p8 */
+ { 0x38, 0x09, 0x06, 0x06 }, /* p9 */
+ { 0x38, 0x0e, 0x01, 0x01 }, /* p10 */
+};
+
/* helper to format the value to write to hardware */
#define eq_value(pre, curr, post) \
((((u32)(pre)) << \
@@ -951,11 +985,14 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
u32 status, err;
int ret;
int do_retry, retry_count = 0;
+ int intnum = 0;
uint default_pset;
u16 target_vector, target_speed;
u16 lnkctl2, vendor;
u8 div;
const u8 (*eq)[3];
+ const u8 (*ctle_tunings)[4];
+ uint static_ctle_mode;
int return_error = 0;
/* PCIe Gen3 is for the ASIC only */
@@ -1089,6 +1126,9 @@ retry:
div = 3;
eq = discrete_preliminary_eq;
default_pset = DEFAULT_DISCRETE_PSET;
+ ctle_tunings = discrete_ctle_tunings;
+ /* bit 0 - discrete on/off */
+ static_ctle_mode = pcie_ctle & 0x1;
} else {
/* 400mV, FS=29, LF = 9 */
fs = 29;
@@ -1096,6 +1136,9 @@ retry:
div = 1;
eq = integrated_preliminary_eq;
default_pset = DEFAULT_MCP_PSET;
+ ctle_tunings = integrated_ctle_tunings;
+ /* bit 1 - integrated on/off */
+ static_ctle_mode = (pcie_ctle >> 1) & 0x1;
}
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
(fs <<
@@ -1135,16 +1178,33 @@ retry:
* step 5c: Program gasket interrupts
*/
/* set the Rx Bit Rate to REFCLK ratio */
- write_gasket_interrupt(dd, 0, 0x0006, 0x0050);
+ write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
/* disable pCal for PCIe Gen3 RX equalization */
- write_gasket_interrupt(dd, 1, 0x0026, 0x5b01);
+ /* select adaptive or static CTLE */
+ write_gasket_interrupt(dd, intnum++, 0x0026,
+ 0x5b01 | (static_ctle_mode << 3));
/*
* Enable iCal for PCIe Gen3 RX equalization, and set which
* evaluation of RX_EQ_EVAL will launch the iCal procedure.
*/
- write_gasket_interrupt(dd, 2, 0x0026, 0x5202);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
+
+ if (static_ctle_mode) {
+ /* apply static CTLE tunings */
+ u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
+
+ pcie_dc = ctle_tunings[pcie_pset][0];
+ pcie_lf = ctle_tunings[pcie_pset][1];
+ pcie_hf = ctle_tunings[pcie_pset][2];
+ pcie_bw = ctle_tunings[pcie_pset][3];
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
+ }
+
/* terminate list */
- write_gasket_interrupt(dd, 3, 0x0000, 0x0000);
+ write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
/*
* step 5d: program XMT margin
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d4022450b73f..ac1bf4a73571 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1952,13 +1952,17 @@ int init_pervl_scs(struct hfi1_devdata *dd)
dd->vld[15].sc = sc_alloc(dd, SC_VL15,
dd->rcd[0]->rcvhdrqentsize, dd->node);
if (!dd->vld[15].sc)
- goto nomem;
+ return -ENOMEM;
+
hfi1_init_ctxt(dd->vld[15].sc);
dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
- dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
+ dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
sizeof(struct send_context *),
GFP_KERNEL, dd->node);
+ if (!dd->kernel_send_context)
+ goto freesc15;
+
dd->kernel_send_context[0] = dd->vld[15].sc;
for (i = 0; i < num_vls; i++) {
@@ -2010,12 +2014,21 @@ int init_pervl_scs(struct hfi1_devdata *dd)
if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
goto nomem;
return 0;
+
nomem:
- sc_free(dd->vld[15].sc);
- for (i = 0; i < num_vls; i++)
+ for (i = 0; i < num_vls; i++) {
sc_free(dd->vld[i].sc);
+ dd->vld[i].sc = NULL;
+ }
+
for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
sc_free(dd->kernel_send_context[i + 1]);
+
+ kfree(dd->kernel_send_context);
+ dd->kernel_send_context = NULL;
+
+freesc15:
+ sc_free(dd->vld[15].sc);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 03df9322f862..965c8aef0c60 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -537,20 +537,6 @@ static void apply_tunings(
u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
u8 *cache = ppd->qsfp_info.cache;
- /* Enable external device config if channel is limiting active */
- read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, &config_data);
- config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
- config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
- ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, config_data);
- if (ret != HCMD_SUCCESS)
- dd_dev_err(
- ppd->dd,
- "%s: Failed to set enable external device config\n",
- __func__);
-
- config_data = 0; /* re-init */
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
@@ -638,9 +624,13 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
if (ret)
return ret;
+ /*
+ * We'll change the QSFP memory contents from here on out, thus we set a
+ * flag here to remind ourselves to reset the QSFP module. This prevents
+ * reuse of stale settings established in our previous pass through.
+ */
if (ppd->qsfp_info.reset_needed) {
reset_qsfp(ppd);
- ppd->qsfp_info.reset_needed = 0;
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
} else {
ppd->qsfp_info.reset_needed = 1;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 1a942ffba4cb..a5aa3517e7d5 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -52,6 +52,7 @@
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
+#include <rdma/ib_verbs.h>
#include "hfi.h"
#include "qp.h"
@@ -115,6 +116,66 @@ static const u16 credit_table[31] = {
32768 /* 1E */
};
+const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
+[IB_WR_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_RDMA_READ] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC,
+},
+
+[IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND_WITH_IMM] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_REG_MR] = {
+ .length = sizeof(struct ib_reg_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_LOCAL_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_SEND_WITH_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+},
+
+};
+
static void flush_tx_list(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -745,8 +806,9 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
priv->owner = qp;
- priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
- if (!priv->s_hdr) {
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+ rdi->dparms.node);
+ if (!priv->s_ahg) {
kfree(priv);
return ERR_PTR(-ENOMEM);
}
@@ -759,7 +821,7 @@ void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- kfree(priv->s_hdr);
+ kfree(priv->s_ahg);
kfree(priv);
}
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index e7bc8d6cf681..587d84d65bb8 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -54,6 +54,8 @@
extern unsigned int hfi1_qp_table_size;
+extern const struct rvt_operation_params hfi1_post_parms[];
+
/*
* free_ahg - clear ahg from QP
*/
@@ -61,7 +63,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- priv->s_hdr->ahgcount = 0;
+ priv->s_ahg->ahgcount = 0;
qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 9fb561682c66..a207717ade2a 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -50,46 +50,285 @@
#include <linux/vmalloc.h>
#include "hfi.h"
-#include "twsi.h"
+
+/* for the given bus number, return the CSR for reading an i2c line */
+static inline u32 i2c_in_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN;
+}
+
+/* for the given bus number, return the CSR for writing an i2c line */
+static inline u32 i2c_oe_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
+}
+
+static void hfi1_setsda(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CDAT;
+ else
+ reg |= QSFP_HFI0_I2CDAT;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static void hfi1_setscl(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CCLK;
+ else
+ reg |= QSFP_HFI0_I2CCLK;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static int hfi1_getsda(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setsda(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CDAT);
+}
+
+static int hfi1_getscl(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setscl(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CCLK);
+}
/*
- * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
- * in twsi.c
+ * Allocate and initialize the given i2c bus number.
+ * Returns NULL on failure.
*/
-#define I2C_MAX_RETRY 4
+static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad, int num)
+{
+ struct hfi1_i2c_bus *bus;
+ int ret;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->controlling_dd = dd;
+ bus->num = num; /* our bus number */
+
+ bus->algo.setsda = hfi1_setsda;
+ bus->algo.setscl = hfi1_setscl;
+ bus->algo.getsda = hfi1_getsda;
+ bus->algo.getscl = hfi1_getscl;
+ bus->algo.udelay = 5;
+ bus->algo.timeout = usecs_to_jiffies(50);
+ bus->algo.data = bus;
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.algo_data = &bus->algo;
+ bus->adapter.dev.parent = &dd->pcidev->dev;
+ snprintf(bus->adapter.name, sizeof(bus->adapter.name),
+ "hfi1_i2c%d", num);
+
+ ret = i2c_bit_add_bus(&bus->adapter);
+ if (ret) {
+ dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
+ __func__, num, ret);
+ kfree(bus);
+ return NULL;
+ }
+
+ return bus;
+}
/*
- * Raw i2c write. No set-up or lock checking.
+ * Initialize i2c buses.
+ * Return 0 on success, -errno on error.
*/
-static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len)
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
{
- struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt;
- u8 *buff = bp;
+ ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
+ ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
+ if (!ad->i2c_bus0 || !ad->i2c_bus1)
+ return -ENOMEM;
+ return 0;
+};
- cnt = 0;
- while (cnt < len) {
- int wlen = len - cnt;
+static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
+{
+ if (bus) {
+ i2c_del_adapter(&bus->adapter);
+ kfree(bus);
+ }
+}
- ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
- buff + cnt, wlen);
- if (ret) {
- /* hfi1_twsi_blk_wr() 1 for error, else 0 */
- return -EIO;
- }
- offset += wlen;
- cnt += wlen;
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
+{
+ clean_i2c_bus(ad->i2c_bus0);
+ ad->i2c_bus0 = NULL;
+ clean_i2c_bus(ad->i2c_bus1);
+ ad->i2c_bus1 = NULL;
+}
+
+static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ /* fall through */
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_NOSTART,
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
}
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
+ i2c->controlling_dd = dd;
+ ret = i2c_transfer(&i2c->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
+ __func__, i2c->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ /* fall through */
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_RD,
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
+ }
- return cnt;
+ bus->controlling_dd = dd;
+ ret = i2c_transfer(&bus->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
+ __func__, bus->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Raw i2c write. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
+ */
+static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
}
/*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written, or -errno.
*/
int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
@@ -99,63 +338,36 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d write interface reset failed\n",
- target);
+ ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
return ret;
- }
- return __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ return len;
}
/*
* Raw i2c read. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
*/
static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
int offset, void *bp, int len)
{
struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt, pass = 0;
- int orig_offset = offset;
-
- cnt = 0;
- while (cnt < len) {
- int rlen = len - cnt;
-
- ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
- bp + cnt, rlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
- continue;
- if (ret) {
- /* hfi1_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto exit;
- }
- offset += rlen;
- cnt += rlen;
- }
-
- ret = cnt;
-
-exit:
- if (ret < 0) {
- hfi1_dev_porterr(dd, ppd->port,
- "I2C chain %d read failed, addr 0x%x, offset 0x%x, len %d\n",
- target, i2c_addr, orig_offset, len);
- }
-
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
-
- return ret;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
}
/*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes read, or -errno.
*/
int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
@@ -165,16 +377,11 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d read interface reset failed\n",
- target);
+ ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
return ret;
- }
- return __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ return len;
}
/*
@@ -182,6 +389,8 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
* by writing @addr = ((256 * n) + m)
*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written or -errno.
*/
int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -189,21 +398,12 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int count = 0;
int offset;
int nwrite;
- int ret;
+ int ret = 0;
u8 page;
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d write interface reset failed\n",
- target);
- return ret;
- }
-
while (count < len) {
/*
* Set the qsfp page based on a zero-based address
@@ -213,11 +413,12 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
hfi1_dev_porterr(ppd->dd, ppd->port,
"QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
target, ret);
- ret = -EIO;
break;
}
@@ -229,11 +430,13 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
offset, bp + count, nwrite);
- if (ret <= 0) /* stop on error or nothing written */
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) /* stop on error */
break;
- count += ret;
- addr += ret;
+ count += nwrite;
+ addr += nwrite;
}
if (ret < 0)
@@ -243,7 +446,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
/*
* Perform a stand-alone single QSFP write. Acquire the resource, do the
- * read, then release the resource.
+ * write, then release the resource.
*/
int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -266,6 +469,8 @@ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
* by reading @addr = ((256 * n) + m)
*
* Caller must hold the i2c chain resource.
+ *
+ * Return the number of bytes read or -errno.
*/
int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -273,21 +478,12 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int count = 0;
int offset;
int nread;
- int ret;
+ int ret = 0;
u8 page;
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d read interface reset failed\n",
- target);
- return ret;
- }
-
while (count < len) {
/*
* Set the qsfp page based on a zero-based address
@@ -296,11 +492,12 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
page = (u8)(addr / QSFP_PAGESIZE);
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
hfi1_dev_porterr(ppd->dd, ppd->port,
"QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
target, ret);
- ret = -EIO;
break;
}
@@ -310,15 +507,13 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
- /* QSFPs require a 5-10msec delay after write operations */
- mdelay(5);
ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
offset, bp + count, nread);
- if (ret <= 0) /* stop on error or nothing read */
+ if (ret) /* stop on error */
break;
- count += ret;
- addr += ret;
+ count += nread;
+ addr += nread;
}
if (ret < 0)
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index dadc66c442b9..69275ebd9597 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -238,3 +238,6 @@ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
+struct hfi1_asic_data;
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 792f15eb8efe..5da190e6011b 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -477,6 +477,37 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
+ /*
+ * Local operations are processed immediately
+ * after all prior requests have completed
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (++qp->s_tail == qp->s_size)
+ qp->s_tail = 0;
+ if (!(wqe->wr.send_flags &
+ RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp,
+ wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ hfi1_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ qp->s_hdrwords = 0;
+ goto done_free_tx;
+ }
+
newreq = 1;
qp->s_psn = wqe->psn;
}
@@ -491,6 +522,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
@@ -504,11 +536,17 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
- } else {
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
+ /* Invalidate rkey comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(
+ wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
@@ -671,11 +709,16 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
- } else {
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
+ /* invalidate data comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
@@ -1047,7 +1090,7 @@ void hfi1_rc_timeout(unsigned long arg)
ibp->rvp.n_rc_timeouts++;
qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
- trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
+ trace_hfi1_timeout(qp, qp->s_last_psn + 1);
restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
}
@@ -1171,7 +1214,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
* If we were waiting for sends to complete before re-sending,
* and they are now complete, restart sending.
*/
- trace_hfi1_rc_sendcomplete(qp, psn);
+ trace_hfi1_sendcomplete(qp, psn);
if (qp->s_flags & RVT_S_WAIT_PSN &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
qp->s_flags &= ~RVT_S_WAIT_PSN;
@@ -1567,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
- trace_hfi1_rc_ack(qp, psn);
+ trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
@@ -1782,7 +1825,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
u8 i, prev;
int old_req;
- trace_hfi1_rc_rcv_error(qp, psn);
+ trace_hfi1_rcv_error(qp, psn);
if (diff > 0) {
/*
* Packet sequence error.
@@ -2086,7 +2129,6 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
@@ -2097,30 +2139,15 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
int diff;
struct ib_reth *reth;
unsigned long flags;
- u32 bth1;
int ret, is_fecn = 0;
int copy_last = 0;
+ u32 rkey;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
return;
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- u16 rlid = qp->remote_ah_attr.dlid;
- u32 lqpn, rqpn;
-
- lqpn = qp->ibqp.qp_num;
- rqpn = qp->remote_qpn;
- process_becn(
- ppd,
- qp->remote_ah_attr.sl,
- rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_RC);
- }
- is_fecn = bth1 & HFI1_FECN_SMASK;
- }
+ is_fecn = process_ecn(qp, packet, false);
psn = be32_to_cpu(ohdr->bth[2]);
opcode = (bth0 >> 24) & 0xff;
@@ -2154,7 +2181,8 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
case OP(SEND_MIDDLE):
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE))
break;
goto nack_inv;
@@ -2170,6 +2198,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
opcode == OP(RDMA_WRITE_MIDDLE) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
@@ -2218,6 +2247,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
+ case OP(SEND_ONLY_WITH_INVALIDATE):
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto nack_op_err;
@@ -2226,12 +2256,22 @@ send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
+ if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
+ goto send_last_inv;
/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
+ case OP(SEND_LAST_WITH_INVALIDATE):
+send_last_inv:
+ rkey = be32_to_cpu(ohdr->u.ieth);
+ if (rvt_invalidate_rkey(qp, rkey))
+ goto no_immediate_data;
+ wc.ex.invalidate_rkey = rkey;
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ goto send_last;
case OP(RDMA_WRITE_LAST):
copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
/* fall through */
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a659aec3c3c6..48d5094f98e2 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -372,6 +372,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
int ret;
int copy_last = 0;
u32 to;
+ int local_ops = 0;
rcu_read_lock();
@@ -440,11 +441,31 @@ again:
sqp->s_sge.num_sge = wqe->wr.num_sge;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+ }
+ goto send;
+
case IB_WR_SEND_WITH_IMM:
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
+send:
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
@@ -583,6 +604,10 @@ send_comp:
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
hfi1_send_complete(sqp, wqe, send_status);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
goto again;
rnr_nak:
@@ -683,10 +708,10 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-#define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
+#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
/**
- * build_ahg - create ahg in s_hdr
+ * build_ahg - create ahg in s_ahg
* @qp: a pointer to QP
* @npsn: the next PSN for the request/response
*
@@ -708,19 +733,18 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
- priv->s_hdr->sde = priv->s_sde;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
qp->s_flags |= RVT_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
if (qp->s_ahgidx >= 0) {
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[0] =
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[0] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16((u16)npsn),
BTH2_OFFSET,
@@ -728,8 +752,8 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
16);
if ((npsn & 0xffff0000) !=
(qp->s_ahgpsn & 0xffff0000)) {
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[1] =
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[1] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16(
(u16)(npsn >> 16)),
@@ -766,7 +790,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
}
lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
/*
- * reset s_hdr/AHG fields
+ * reset s_ahg/AHG fields
*
* This insures that the ahgentry/ahgcount
* are at a non-AHG default to protect
@@ -776,10 +800,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
* build_ahg() will modify as appropriate
* to use the AHG feature.
*/
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->sde = NULL;
+ priv->s_ahg->tx_flags = 0;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
else
@@ -890,7 +913,7 @@ void hfi1_do_send(struct rvt_qp *qp)
*/
if (hfi1_verbs_send(qp, &ps))
return;
- /* Record that s_hdr is empty. */
+ /* Record that s_ahg is empty. */
qp->s_hdrwords = 0;
/* allow other tasks to run */
if (unlikely(time_after(jiffies, timeout))) {
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 91fc2aed6aed..74c84c655f7e 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,6 +49,7 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
+#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
@@ -622,6 +623,27 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
+static ssize_t show_sdma_affinity(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ return hfi1_get_sdma_affinity(dd, buf);
+}
+
+static ssize_t store_sdma_affinity(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ return hfi1_set_sdma_affinity(dd, buf, count);
+}
+
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
@@ -636,6 +658,8 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
+ store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
@@ -646,6 +670,7 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
+ &dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 28c1d0832886..92dc88f013c9 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -44,1329 +44,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR hfi1
-
-#if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __HFI1_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "sdma.h"
-
-#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
-#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
-
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rx
-
-TRACE_EVENT(hfi1_rcvhdr,
- TP_PROTO(struct hfi1_devdata *dd,
- u32 ctxt,
- u64 eflags,
- u32 etype,
- u32 hlen,
- u32 tlen,
- u32 updegr,
- u32 etail
- ),
- TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u64, eflags)
- __field(u32, ctxt)
- __field(u32, etype)
- __field(u32, hlen)
- __field(u32, tlen)
- __field(u32, updegr)
- __field(u32, etail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->eflags = eflags;
- __entry->ctxt = ctxt;
- __entry->etype = etype;
- __entry->hlen = hlen;
- __entry->tlen = tlen;
- __entry->updegr = updegr;
- __entry->etail = etail;
- ),
- TP_printk(
- "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->eflags,
- __entry->etype, show_packettype(__entry->etype),
- __entry->hlen,
- __entry->tlen,
- __entry->updegr,
- __entry->etail
- )
-);
-
-TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
- TP_ARGS(dd, ctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, ctxt)
- __field(u8, slow_path)
- __field(u8, dma_rtail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt) {
- __entry->slow_path = 1;
- __entry->dma_rtail = 0xFF;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_dma_rtail){
- __entry->dma_rtail = 1;
- __entry->slow_path = 0;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_nodma_rtail) {
- __entry->dma_rtail = 0;
- __entry->slow_path = 0;
- }
- ),
- TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->slow_path,
- __entry->dma_rtail
- )
-);
-
-TRACE_EVENT(hfi1_exp_tid_reg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr,
- u32 npages, unsigned long va, unsigned long pa,
- dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_unreg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_inval,
- TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr,
- u32 npages, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(unsigned long, va)
- __field(u32, rarr)
- __field(u32, npages)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->va = va;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_mmu_invalidate,
- TP_PROTO(unsigned ctxt, u16 subctxt, const char *type,
- unsigned long start, unsigned long end),
- TP_ARGS(ctxt, subctxt, type, start, end),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __string(type, type)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __assign_str(type, type);
- __entry->start = start;
- __entry->end = end;
- ),
- TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
- __entry->ctxt,
- __entry->subctxt,
- __get_str(type),
- __entry->start,
- __entry->end
- )
- );
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_tx
-
-TRACE_EVENT(hfi1_piofree,
- TP_PROTO(struct send_context *sc, int extra),
- TP_ARGS(sc, extra),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(int, extra)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->extra = extra;
- ),
- TP_printk("[%s] ctxt %u(%u) extra %d",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->extra
- )
-);
-
-TRACE_EVENT(hfi1_wantpiointr,
- TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
- TP_ARGS(sc, needint, credit_ctrl),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(u32, needint)
- __field(u64, credit_ctrl)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->needint = needint;
- __entry->credit_ctrl = credit_ctrl;
- ),
- TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->needint,
- (unsigned long long)__entry->credit_ctrl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, flags)
- __field(u32, s_flags)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->flags = flags;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- ),
- TP_printk(
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->flags,
- __entry->s_flags
- )
-);
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ibhdrs
-
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
-const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
-
-#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
-
-const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
-
-#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
-
-#define lrh_name(lrh) { HFI1_##lrh, #lrh }
-#define show_lnh(lrh) \
-__print_symbolic(lrh, \
- lrh_name(LRH_BTH), \
- lrh_name(LRH_GRH))
-
-#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
-#define show_ib_opcode(opcode) \
-__print_symbolic(opcode, \
- ib_opcode_name(RC_SEND_FIRST), \
- ib_opcode_name(RC_SEND_MIDDLE), \
- ib_opcode_name(RC_SEND_LAST), \
- ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_SEND_ONLY), \
- ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_FIRST), \
- ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(RC_RDMA_WRITE_LAST), \
- ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_READ_REQUEST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
- ib_opcode_name(RC_ACKNOWLEDGE), \
- ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
- ib_opcode_name(RC_COMPARE_SWAP), \
- ib_opcode_name(RC_FETCH_ADD), \
- ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
- ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
- ib_opcode_name(UC_SEND_FIRST), \
- ib_opcode_name(UC_SEND_MIDDLE), \
- ib_opcode_name(UC_SEND_LAST), \
- ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_SEND_ONLY), \
- ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_FIRST), \
- ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(UC_RDMA_WRITE_LAST), \
- ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UD_SEND_ONLY), \
- ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(CNP))
-
-#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
-#define BTH_PRN \
- "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
- "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
-#define EHDR_PRN "%s"
-
-DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- /* LRH */
- __field(u8, vl)
- __field(u8, lver)
- __field(u8, sl)
- __field(u8, lnh)
- __field(u16, dlid)
- __field(u16, len)
- __field(u16, slid)
- /* BTH */
- __field(u8, opcode)
- __field(u8, se)
- __field(u8, m)
- __field(u8, pad)
- __field(u8, tver)
- __field(u16, pkey)
- __field(u8, f)
- __field(u8, b)
- __field(u32, qpn)
- __field(u8, a)
- __field(u32, psn)
- /* extended headers */
- __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- DD_DEV_ASSIGN(dd);
- /* LRH */
- __entry->vl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
- __entry->lver =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
- __entry->sl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->lnh =
- (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- __entry->dlid =
- be16_to_cpu(hdr->lrh[1]);
- /* allow for larger len */
- __entry->len =
- be16_to_cpu(hdr->lrh[2]);
- __entry->slid =
- be16_to_cpu(hdr->lrh[3]);
- /* BTH */
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- __entry->opcode =
- (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->se =
- (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
- __entry->m =
- (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
- __entry->pad =
- (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- __entry->tver =
- (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
- __entry->pkey =
- be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->f =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
- HFI1_FECN_MASK;
- __entry->b =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
- HFI1_BECN_MASK;
- __entry->qpn =
- be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->a =
- (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
- /* allow for larger PSN */
- __entry->psn =
- be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
- /* extended headers */
- memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
- ibhdr_exhdr_len(hdr));
- ),
- TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
- __get_str(dev),
- /* LRH */
- __entry->vl,
- __entry->lver,
- __entry->sl,
- __entry->lnh, show_lnh(__entry->lnh),
- __entry->dlid,
- __entry->len,
- __entry->slid,
- /* BTH */
- __entry->opcode, show_ib_opcode(__entry->opcode),
- __entry->se,
- __entry->m,
- __entry->pad,
- __entry->tver,
- __entry->pkey,
- __entry->f,
- __entry->b,
- __entry->qpn,
- __entry->a,
- __entry->psn,
- /* extended headers */
- __parse_ib_ehdrs(
- __entry->opcode,
- (void *)__get_dynamic_array(ehdrs))
- )
-);
-
-DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_snoop
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct hfi1_ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ctxts
-
-#define UCTXT_FMT \
- "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
- "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
-TRACE_EVENT(hfi1_uctxtdata,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
- TP_ARGS(dd, uctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(u32, credits)
- __field(u64, hw_free)
- __field(u64, piobase)
- __field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
- __field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = uctxt->ctxt;
- __entry->credits = uctxt->sc->credits;
- __entry->hw_free = (u64)uctxt->sc->hw_free;
- __entry->piobase = (u64)uctxt->sc->base_addr;
- __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
- __entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys =
- uctxt->egrbufs.rcvtids[0].phys;
- ),
- TP_printk("[%s] ctxt %u " UCTXT_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->credits,
- __entry->hw_free,
- __entry->piobase,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
- __entry->eager_cnt,
- __entry->rcvegr_phys
- )
-);
-
-#define CINFO_FMT \
- "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
-TRACE_EVENT(hfi1_ctxt_info,
- TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
- struct hfi1_ctxt_info cinfo),
- TP_ARGS(dd, ctxt, subctxt, cinfo),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(unsigned, subctxt)
- __field(u16, egrtids)
- __field(u16, rcvhdrq_cnt)
- __field(u16, rcvhdrq_size)
- __field(u16, sdma_ring_size)
- __field(u32, rcvegr_size)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->egrtids = cinfo.egrtids;
- __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
- __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
- __entry->sdma_ring_size = cinfo.sdma_ring_size;
- __entry->rcvegr_size = cinfo.rcvegr_size;
- ),
- TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->egrtids,
- __entry->rcvegr_size,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_size,
- __entry->sdma_ring_size
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sma
-
-#define BCT_FORMAT \
- "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
-
-#define BCT(field) \
- be16_to_cpu( \
- ((struct buffer_control *)__get_dynamic_array(bct))->field \
- )
-
-DECLARE_EVENT_CLASS(hfi1_bct_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct buffer_control *bc),
- TP_ARGS(dd, bc),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __dynamic_array(u8, bct, sizeof(*bc))
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- memcpy(__get_dynamic_array(bct), bc,
- sizeof(*bc));
- ),
- TP_printk(BCT_FORMAT,
- BCT(overall_shared_limit),
-
- BCT(vl[0].dedicated),
- BCT(vl[0].shared),
-
- BCT(vl[1].dedicated),
- BCT(vl[1].shared),
-
- BCT(vl[2].dedicated),
- BCT(vl[2].shared),
-
- BCT(vl[3].dedicated),
- BCT(vl[3].shared),
-
- BCT(vl[4].dedicated),
- BCT(vl[4].shared),
-
- BCT(vl[5].dedicated),
- BCT(vl[5].shared),
-
- BCT(vl[6].dedicated),
- BCT(vl[6].shared),
-
- BCT(vl[7].dedicated),
- BCT(vl[7].shared),
-
- BCT(vl[15].dedicated),
- BCT(vl[15].shared)
- )
-);
-
-DEFINE_EVENT(hfi1_bct_template, bct_set,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-DEFINE_EVENT(hfi1_bct_template, bct_get,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sdma
-
-TRACE_EVENT(hfi1_sdma_descriptor,
- TP_PROTO(struct sdma_engine *sde,
- u64 desc0,
- u64 desc1,
- u16 e,
- void *descp),
- TP_ARGS(sde, desc0, desc1, e, descp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(void *, descp)
- __field(u64, desc0)
- __field(u64, desc1)
- __field(u16, e)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->desc0 = desc0;
- __entry->desc1 = desc1;
- __entry->idx = sde->this_idx;
- __entry->descp = descp;
- __entry->e = e;
- ),
- TP_printk(
- "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
- __get_str(dev),
- __entry->idx,
- __parse_sdma_flags(__entry->desc0, __entry->desc1),
- (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
- SDMA_DESC0_PHY_ADDR_MASK,
- (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
- SDMA_DESC1_GENERATION_MASK),
- (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
- SDMA_DESC0_BYTE_COUNT_MASK),
- __entry->desc0,
- __entry->desc1,
- __entry->descp,
- __entry->e
- )
-);
-
-TRACE_EVENT(hfi1_sdma_engine_select,
- TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
- TP_ARGS(dd, sel, vl, idx),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, sel)
- __field(u8, vl)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->sel = sel;
- __entry->vl = vl;
- __entry->idx = idx;
- ),
- TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
- __get_str(dev),
- __entry->idx,
- __entry->sel,
- __entry->vl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, status)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->status = status;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) status %llx",
- __get_str(dev),
- __entry->idx,
- (unsigned long long)__entry->status
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(int, aidx)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->idx = sde->this_idx;
- __entry->aidx = aidx;
- ),
- TP_printk("[%s] SDE(%u) aidx %d",
- __get_str(dev),
- __entry->idx,
- __entry->aidx
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead,
- u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- __entry->sn = txp ? txp->sn : ~0;
- ),
- TP_printk(
- "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->sn,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#else
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead, u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- ),
- TP_printk(
- "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#endif
-
-DECLARE_EVENT_CLASS(hfi1_sdma_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->sn = sn;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) sn %llu",
- __get_str(dev),
- __entry->idx,
- __entry->sn
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 sn
- ),
- TP_ARGS(sde, sn)
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn)
-);
-
-#define USDMA_HDR_FORMAT \
- "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
-
-TRACE_EVENT(hfi1_sdma_user_header,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- struct hfi1_pkt_header *hdr, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(__le32, pbc0)
- __field(__le32, pbc1)
- __field(__be32, lrh0)
- __field(__be32, lrh1)
- __field(__be32, bth0)
- __field(__be32, bth1)
- __field(__be32, bth2)
- __field(__le32, kdeth0)
- __field(__le32, kdeth1)
- __field(__le32, kdeth2)
- __field(__le32, kdeth3)
- __field(__le32, kdeth4)
- __field(__le32, kdeth5)
- __field(__le32, kdeth6)
- __field(__le32, kdeth7)
- __field(__le32, kdeth8)
- __field(u32, tidval)
- ),
- TP_fast_assign(
- __le32 *pbc = (__le32 *)hdr->pbc;
- __be32 *lrh = (__be32 *)hdr->lrh;
- __be32 *bth = (__be32 *)hdr->bth;
- __le32 *kdeth = (__le32 *)&hdr->kdeth;
-
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->pbc0 = pbc[0];
- __entry->pbc1 = pbc[1];
- __entry->lrh0 = be32_to_cpu(lrh[0]);
- __entry->lrh1 = be32_to_cpu(lrh[1]);
- __entry->bth0 = be32_to_cpu(bth[0]);
- __entry->bth1 = be32_to_cpu(bth[1]);
- __entry->bth2 = be32_to_cpu(bth[2]);
- __entry->kdeth0 = kdeth[0];
- __entry->kdeth1 = kdeth[1];
- __entry->kdeth2 = kdeth[2];
- __entry->kdeth3 = kdeth[3];
- __entry->kdeth4 = kdeth[4];
- __entry->kdeth5 = kdeth[5];
- __entry->kdeth6 = kdeth[6];
- __entry->kdeth7 = kdeth[7];
- __entry->kdeth8 = kdeth[8];
- __entry->tidval = tidval;
- ),
- TP_printk(USDMA_HDR_FORMAT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->pbc1,
- __entry->pbc0,
- __entry->lrh0,
- __entry->lrh1,
- __entry->bth0,
- __entry->bth1,
- __entry->bth2,
- __entry->kdeth0,
- __entry->kdeth1,
- __entry->kdeth2,
- __entry->kdeth3,
- __entry->kdeth4,
- __entry->kdeth5,
- __entry->kdeth6,
- __entry->kdeth7,
- __entry->kdeth8,
- __entry->tidval
- )
- );
-
-#define SDMA_UREQ_FMT \
- "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
-TRACE_EVENT(hfi1_sdma_user_reqinfo,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
- TP_ARGS(dd, ctxt, subctxt, i),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u8, ver_opcode)
- __field(u8, iovcnt)
- __field(u16, npkts)
- __field(u16, fragsize)
- __field(u16, comp_idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->ver_opcode = i[0] & 0xff;
- __entry->iovcnt = (i[0] >> 8) & 0xff;
- __entry->npkts = i[1];
- __entry->fragsize = i[2];
- __entry->comp_idx = i[3];
- ),
- TP_printk(SDMA_UREQ_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->ver_opcode,
- __entry->iovcnt,
- __entry->npkts,
- __entry->fragsize,
- __entry->comp_idx
- )
- );
-
-#define usdma_complete_name(st) { st, #st }
-#define show_usdma_complete_state(st) \
- __print_symbolic(st, \
- usdma_complete_name(FREE), \
- usdma_complete_name(QUEUED), \
- usdma_complete_name(COMPLETE), \
- usdma_complete_name(ERROR))
-
-TRACE_EVENT(hfi1_sdma_user_completion,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
- u8 state, int code),
- TP_ARGS(dd, ctxt, subctxt, idx, state, code),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, idx)
- __field(u8, state)
- __field(int, code)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->idx = idx;
- __entry->state = state;
- __entry->code = code;
- ),
- TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
- __get_str(dev), __entry->ctxt, __entry->subctxt,
- __entry->idx, show_usdma_complete_state(__entry->state),
- __entry->code)
- );
-
-const char *print_u32_array(struct trace_seq *, u32 *, int);
-#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
-
-TRACE_EVENT(hfi1_sdma_user_header_ahg,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(u8, sde)
- __field(u8, idx)
- __field(int, len)
- __field(u32, tidval)
- __array(u32, ahg, 10)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->sde = sde;
- __entry->idx = ahgidx;
- __entry->len = len;
- __entry->tidval = tidval;
- memcpy(__entry->ahg, ahg, len * sizeof(u32));
- ),
- TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->sde,
- __entry->idx,
- __entry->len - 1,
- __print_u32_hex(__entry->ahg, __entry->len),
- __entry->tidval
- )
- );
-
-TRACE_EVENT(hfi1_sdma_state,
- TP_PROTO(struct sdma_engine *sde,
- const char *cstate,
- const char *nstate
- ),
- TP_ARGS(sde, cstate, nstate),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __string(curstate, cstate)
- __string(newstate, nstate)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __assign_str(curstate, cstate);
- __assign_str(newstate, nstate);
- ),
- TP_printk("[%s] current state %s new state %s",
- __get_str(dev),
- __get_str(curstate),
- __get_str(newstate)
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rc
-
-DECLARE_EVENT_CLASS(hfi1_rc_template,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, s_flags)
- __field(u32, psn)
- __field(u32, s_psn)
- __field(u32, s_next_psn)
- __field(u32, s_sending_psn)
- __field(u32, s_sending_hpsn)
- __field(u32, r_psn)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- __entry->psn = psn;
- __entry->s_psn = qp->s_psn;
- __entry->s_next_psn = qp->s_next_psn;
- __entry->s_sending_psn = qp->s_sending_psn;
- __entry->s_sending_hpsn = qp->s_sending_hpsn;
- __entry->r_psn = qp->r_psn;
- ),
- TP_printk(
- "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->s_flags,
- __entry->psn,
- __entry->s_psn,
- __entry->s_next_psn,
- __entry->s_sending_psn,
- __entry->s_sending_hpsn,
- __entry->r_psn
- )
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_misc
-
-TRACE_EVENT(hfi1_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
- int src),
- TP_ARGS(dd, is_entry, src),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __array(char, buf, 64)
- __field(int, src)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd)
- is_entry->is_name(__entry->buf, 64,
- src - is_entry->start);
- __entry->src = src;
- ),
- TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
- __entry->src)
-);
-
-/*
- * Note:
- * This produces a REALLY ugly trace in the console output when the string is
- * too long.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_trace
-
-#define MAX_MSG_LEN 512
-
-DECLARE_EVENT_CLASS(hfi1_trace_template,
- TP_PROTO(const char *function, struct va_format *vaf),
- TP_ARGS(function, vaf),
- TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
- ),
- TP_printk("(%s) %s",
- __get_str(function),
- __get_str(msg))
-);
-
-/*
- * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
- * actual function to work and can not be in a macro.
- */
-#define __hfi1_trace_def(lvl) \
-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
- \
-DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
- TP_PROTO(const char *function, struct va_format *vaf), \
- TP_ARGS(function, vaf))
-
-#define __hfi1_trace_fn(lvl) \
-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
-{ \
- struct va_format vaf = { \
- .fmt = fmt, \
- }; \
- va_list args; \
- \
- va_start(args, fmt); \
- vaf.va = &args; \
- trace_hfi1_ ##lvl(func, &vaf); \
- va_end(args); \
- return; \
-}
-
-/*
- * To create a new trace level simply define it below and as a __hfi1_trace_fn
- * in trace.c. This will create all the hooks for calling
- * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
- * the debugfs stuff.
- */
-__hfi1_trace_def(PKT);
-__hfi1_trace_def(PROC);
-__hfi1_trace_def(SDMA);
-__hfi1_trace_def(LINKVERB);
-__hfi1_trace_def(DEBUG);
-__hfi1_trace_def(SNOOP);
-__hfi1_trace_def(CNTR);
-__hfi1_trace_def(PIO);
-__hfi1_trace_def(DC8051);
-__hfi1_trace_def(FIRMWARE);
-__hfi1_trace_def(RCVCTRL);
-__hfi1_trace_def(TID);
-__hfi1_trace_def(MMU);
-__hfi1_trace_def(IOCTL);
-
-#define hfi1_cdbg(which, fmt, ...) \
- __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
-
-#define hfi1_dbg(fmt, ...) \
- hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
-
-/*
- * Define HFI1_EARLY_DBG at compile time or here to enable early trace
- * messages. Do not check in an enablement for this.
- */
-
-#ifdef HFI1_EARLY_DBG
-#define hfi1_dbg_early(fmt, ...) \
- trace_printk(fmt, ##__VA_ARGS__)
-#else
-#define hfi1_dbg_early(fmt, ...)
-#endif
-
-#endif /* __HFI1_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
+#include "trace_dbg.h"
+#include "trace_misc.h"
+#include "trace_ctxts.h"
+#include "trace_ibhdrs.h"
+#include "trace_rc.h"
+#include "trace_rx.h"
+#include "trace_tx.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
new file mode 100644
index 000000000000..31654bbac1cf
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -0,0 +1,141 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_CTXTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ctxts
+
+#define UCTXT_FMT \
+ "cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
+ "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
+TRACE_EVENT(hfi1_uctxtdata,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
+ TP_ARGS(dd, uctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(u32, credits)
+ __field(u64, hw_free)
+ __field(void __iomem *, piobase)
+ __field(u16, rcvhdrq_cnt)
+ __field(u64, rcvhdrq_phys)
+ __field(u32, eager_cnt)
+ __field(u64, rcvegr_phys)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = uctxt->ctxt;
+ __entry->credits = uctxt->sc->credits;
+ __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
+ __entry->piobase = uctxt->sc->base_addr;
+ __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->eager_cnt = uctxt->egrbufs.alloced;
+ __entry->rcvegr_phys =
+ uctxt->egrbufs.rcvtids[0].phys;
+ ),
+ TP_printk("[%s] ctxt %u " UCTXT_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->credits,
+ __entry->hw_free,
+ __entry->piobase,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_phys,
+ __entry->eager_cnt,
+ __entry->rcvegr_phys
+ )
+);
+
+#define CINFO_FMT \
+ "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
+TRACE_EVENT(hfi1_ctxt_info,
+ TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
+ unsigned int subctxt,
+ struct hfi1_ctxt_info cinfo),
+ TP_ARGS(dd, ctxt, subctxt, cinfo),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(unsigned int, subctxt)
+ __field(u16, egrtids)
+ __field(u16, rcvhdrq_cnt)
+ __field(u16, rcvhdrq_size)
+ __field(u16, sdma_ring_size)
+ __field(u32, rcvegr_size)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->egrtids = cinfo.egrtids;
+ __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
+ __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
+ __entry->sdma_ring_size = cinfo.sdma_ring_size;
+ __entry->rcvegr_size = cinfo.rcvegr_size;
+ ),
+ TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->egrtids,
+ __entry->rcvegr_size,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_size,
+ __entry->sdma_ring_size
+ )
+);
+
+#endif /* __HFI1_TRACE_CTXTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ctxts
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
new file mode 100644
index 000000000000..0e7d929530c5
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -0,0 +1,155 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_EXTRA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_EXTRA_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+/*
+ * Note:
+ * This produces a REALLY ugly trace in the console output when the string is
+ * too long.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_dbg
+
+#define MAX_MSG_LEN 512
+
+DECLARE_EVENT_CLASS(hfi1_trace_template,
+ TP_PROTO(const char *function, struct va_format *vaf),
+ TP_ARGS(function, vaf),
+ TP_STRUCT__entry(__string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(__assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf
+ (__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >=
+ MAX_MSG_LEN);
+ ),
+ TP_printk("(%s) %s",
+ __get_str(function),
+ __get_str(msg))
+);
+
+/*
+ * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
+ * actual function to work and can not be in a macro.
+ */
+#define __hfi1_trace_def(lvl) \
+void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+#define __hfi1_trace_fn(lvl) \
+void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ trace_hfi1_ ##lvl(func, &vaf); \
+ va_end(args); \
+ return; \
+}
+
+/*
+ * To create a new trace level simply define it below and as a __hfi1_trace_fn
+ * in trace.c. This will create all the hooks for calling
+ * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
+ * the debugfs stuff.
+ */
+__hfi1_trace_def(PKT);
+__hfi1_trace_def(PROC);
+__hfi1_trace_def(SDMA);
+__hfi1_trace_def(LINKVERB);
+__hfi1_trace_def(DEBUG);
+__hfi1_trace_def(SNOOP);
+__hfi1_trace_def(CNTR);
+__hfi1_trace_def(PIO);
+__hfi1_trace_def(DC8051);
+__hfi1_trace_def(FIRMWARE);
+__hfi1_trace_def(RCVCTRL);
+__hfi1_trace_def(TID);
+__hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
+
+#define hfi1_cdbg(which, fmt, ...) \
+ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
+
+#define hfi1_dbg(fmt, ...) \
+ hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
+
+/*
+ * Define HFI1_EARLY_DBG at compile time or here to enable early trace
+ * messages. Do not check in an enablement for this.
+ */
+
+#ifdef HFI1_EARLY_DBG
+#define hfi1_dbg_early(fmt, ...) \
+ trace_printk(fmt, ##__VA_ARGS__)
+#else
+#define hfi1_dbg_early(fmt, ...)
+#endif
+
+#endif /* __HFI1_TRACE_EXTRA_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_dbg
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
new file mode 100644
index 000000000000..c3e41aed0034
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_IBHDRS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IBHDRS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ibhdrs
+
+u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
+
+#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
+
+#define lrh_name(lrh) { HFI1_##lrh, #lrh }
+#define show_lnh(lrh) \
+__print_symbolic(lrh, \
+ lrh_name(LRH_BTH), \
+ lrh_name(LRH_GRH))
+
+#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
+#define BTH_PRN \
+ "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
+ "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
+#define EHDR_PRN "%s"
+
+DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ /* LRH */
+ __field(u8, vl)
+ __field(u8, lver)
+ __field(u8, sl)
+ __field(u8, lnh)
+ __field(u16, dlid)
+ __field(u16, len)
+ __field(u16, slid)
+ /* BTH */
+ __field(u8, opcode)
+ __field(u8, se)
+ __field(u8, m)
+ __field(u8, pad)
+ __field(u8, tver)
+ __field(u16, pkey)
+ __field(u8, f)
+ __field(u8, b)
+ __field(u32, qpn)
+ __field(u8, a)
+ __field(u32, psn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ DD_DEV_ASSIGN(dd);
+ /* LRH */
+ __entry->vl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
+ __entry->lver =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
+ __entry->sl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->lnh =
+ (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ __entry->dlid =
+ be16_to_cpu(hdr->lrh[1]);
+ /* allow for larger len */
+ __entry->len =
+ be16_to_cpu(hdr->lrh[2]);
+ __entry->slid =
+ be16_to_cpu(hdr->lrh[3]);
+ /* BTH */
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ __entry->opcode =
+ (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->se =
+ (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
+ __entry->m =
+ (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
+ __entry->pad =
+ (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ __entry->tver =
+ (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
+ __entry->pkey =
+ be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->f =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
+ HFI1_FECN_MASK;
+ __entry->b =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
+ HFI1_BECN_MASK;
+ __entry->qpn =
+ be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ __entry->a =
+ (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
+ /* allow for larger PSN */
+ __entry->psn =
+ be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
+ /* extended headers */
+ memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
+ ibhdr_exhdr_len(hdr));
+ ),
+ TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
+ __get_str(dev),
+ /* LRH */
+ __entry->vl,
+ __entry->lver,
+ __entry->sl,
+ __entry->lnh, show_lnh(__entry->lnh),
+ __entry->dlid,
+ __entry->len,
+ __entry->slid,
+ /* BTH */
+ __entry->opcode, show_ib_opcode(__entry->opcode),
+ __entry->se,
+ __entry->m,
+ __entry->pad,
+ __entry->tver,
+ __entry->pkey,
+ __entry->f,
+ __entry->b,
+ __entry->qpn,
+ __entry->a,
+ __entry->psn,
+ /* extended headers */
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+#endif /* __HFI1_TRACE_IBHDRS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ibhdrs
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
new file mode 100644
index 000000000000..d308454af7fd
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -0,0 +1,81 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_MISC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_MISC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_misc
+
+TRACE_EVENT(hfi1_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
+ int src),
+ TP_ARGS(dd, is_entry, src),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __array(char, buf, 64)
+ __field(int, src)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd)
+ is_entry->is_name(__entry->buf, 64,
+ src - is_entry->start);
+ __entry->src = src;
+ ),
+ TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
+ __entry->src)
+);
+
+#endif /* __HFI1_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_misc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
new file mode 100644
index 000000000000..5ea5005f9f41
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -0,0 +1,123 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rc
+
+DECLARE_EVENT_CLASS(hfi1_rc_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u32, psn)
+ __field(u32, s_psn)
+ __field(u32, s_next_psn)
+ __field(u32, s_sending_psn)
+ __field(u32, s_sending_hpsn)
+ __field(u32, r_psn)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_next_psn = qp->s_next_psn;
+ __entry->s_sending_psn = qp->s_sending_psn;
+ __entry->s_sending_hpsn = qp->s_sending_hpsn;
+ __entry->r_psn = qp->r_psn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->psn,
+ __entry->s_psn,
+ __entry->s_next_psn,
+ __entry->s_sending_psn,
+ __entry->s_sending_hpsn,
+ __entry->r_psn
+ )
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_sendcomplete,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+#endif /* __HFI1_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
new file mode 100644
index 000000000000..9ba1f615ec95
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rx
+
+TRACE_EVENT(hfi1_rcvhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u32 ctxt,
+ u64 eflags,
+ u32 etype,
+ u32 hlen,
+ u32 tlen,
+ u32 updegr,
+ u32 etail
+ ),
+ TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, etype)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->eflags = eflags;
+ __entry->ctxt = ctxt;
+ __entry->etype = etype;
+ __entry->hlen = hlen;
+ __entry->tlen = tlen;
+ __entry->updegr = updegr;
+ __entry->etail = etail;
+ ),
+ TP_printk(
+ "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->etype, show_packettype(__entry->etype),
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
+);
+
+TRACE_EVENT(hfi1_receive_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
+ TP_ARGS(dd, ctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, ctxt)
+ __field(u8, slow_path)
+ __field(u8, dma_rtail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt) {
+ __entry->slow_path = 1;
+ __entry->dma_rtail = 0xFF;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_dma_rtail){
+ __entry->dma_rtail = 1;
+ __entry->slow_path = 0;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_nodma_rtail) {
+ __entry->dma_rtail = 0;
+ __entry->slow_path = 0;
+ }
+ ),
+ TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->slow_path,
+ __entry->dma_rtail
+ )
+);
+
+TRACE_EVENT(hfi1_exp_tid_reg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
+ u32 npages, unsigned long va, unsigned long pa,
+ dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_tid_inval,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
+ u32 npages, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, va)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->va = va;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_mmu_invalidate,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
+ unsigned long start, unsigned long end),
+ TP_ARGS(ctxt, subctxt, type, start, end),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __string(type, type)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __assign_str(type, type);
+ __entry->start = start;
+ __entry->end = end;
+ ),
+ TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __get_str(type),
+ __entry->start,
+ __entry->end
+ )
+ );
+
+#define SNOOP_PRN \
+ "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
+ "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
+
+TRACE_EVENT(snoop_capture,
+ TP_PROTO(struct hfi1_devdata *dd,
+ int hdr_len,
+ struct hfi1_ib_header *hdr,
+ int data_len,
+ void *data),
+ TP_ARGS(dd, hdr_len, hdr, data_len, data),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, slid)
+ __field(u16, dlid)
+ __field(u32, qpn)
+ __field(u8, opcode)
+ __field(u8, sl)
+ __field(u16, pkey)
+ __field(u32, hdr_len)
+ __field(u32, data_len)
+ __field(u8, lnh)
+ __dynamic_array(u8, raw_hdr, hdr_len)
+ __dynamic_array(u8, raw_pkt, data_len)
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ DD_DEV_ASSIGN(dd);
+ __entry->slid = be16_to_cpu(hdr->lrh[3]);
+ __entry->dlid = be16_to_cpu(hdr->lrh[1]);
+ __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->hdr_len = hdr_len;
+ __entry->data_len = data_len;
+ memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
+ memcpy(__get_dynamic_array(raw_pkt), data, data_len);
+ ),
+ TP_printk(
+ "[%s] " SNOOP_PRN,
+ __get_str(dev),
+ __entry->slid,
+ __entry->dlid,
+ __entry->qpn,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->sl,
+ __entry->pkey,
+ __entry->hdr_len,
+ __entry->data_len
+ )
+);
+
+#endif /* __HFI1_TRACE_RX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
new file mode 100644
index 000000000000..415d6be42c5d
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "sdma.h"
+
+const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
+
+#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tx
+
+TRACE_EVENT(hfi1_piofree,
+ TP_PROTO(struct send_context *sc, int extra),
+ TP_ARGS(sc, extra),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(int, extra)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->extra = extra;
+ ),
+ TP_printk("[%s] ctxt %u(%u) extra %d",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->extra
+ )
+);
+
+TRACE_EVENT(hfi1_wantpiointr,
+ TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
+ TP_ARGS(sc, needint, credit_ctrl),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(u32, needint)
+ __field(u64, credit_ctrl)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->needint = needint;
+ __entry->credit_ctrl = credit_ctrl;
+ ),
+ TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->needint,
+ (unsigned long long)__entry->credit_ctrl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, s_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->flags = flags;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->s_flags
+ )
+);
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+TRACE_EVENT(hfi1_sdma_descriptor,
+ TP_PROTO(struct sdma_engine *sde,
+ u64 desc0,
+ u64 desc1,
+ u16 e,
+ void *descp),
+ TP_ARGS(sde, desc0, desc1, e, descp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(void *, descp)
+ __field(u64, desc0)
+ __field(u64, desc1)
+ __field(u16, e)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->desc0 = desc0;
+ __entry->desc1 = desc1;
+ __entry->idx = sde->this_idx;
+ __entry->descp = descp;
+ __entry->e = e;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
+ __get_str(dev),
+ __entry->idx,
+ __parse_sdma_flags(__entry->desc0, __entry->desc1),
+ (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
+ SDMA_DESC0_PHY_ADDR_MASK,
+ (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
+ SDMA_DESC1_GENERATION_MASK),
+ (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
+ SDMA_DESC0_BYTE_COUNT_MASK),
+ __entry->desc0,
+ __entry->desc1,
+ __entry->descp,
+ __entry->e
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_engine_select,
+ TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
+ TP_ARGS(dd, sel, vl, idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, sel)
+ __field(u8, vl)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->sel = sel;
+ __entry->vl = vl;
+ __entry->idx = idx;
+ ),
+ TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sel,
+ __entry->vl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, status)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->status = status;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) status %llx",
+ __get_str(dev),
+ __entry->idx,
+ (unsigned long long)__entry->status
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(int, aidx)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->idx = sde->this_idx;
+ __entry->aidx = aidx;
+ ),
+ TP_printk("[%s] SDE(%u) aidx %d",
+ __get_str(dev),
+ __entry->idx,
+ __entry->aidx
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ __entry->sn = txp ? txp->sn : ~0;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#else
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead, u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#endif
+
+DECLARE_EVENT_CLASS(hfi1_sdma_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->sn = sn;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) sn %llu",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn)
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn)
+);
+
+#define USDMA_HDR_FORMAT \
+ "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
+
+TRACE_EVENT(hfi1_sdma_user_header,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ struct hfi1_pkt_header *hdr, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u32, pbc0)
+ __field(u32, pbc1)
+ __field(u32, lrh0)
+ __field(u32, lrh1)
+ __field(u32, bth0)
+ __field(u32, bth1)
+ __field(u32, bth2)
+ __field(u32, kdeth0)
+ __field(u32, kdeth1)
+ __field(u32, kdeth2)
+ __field(u32, kdeth3)
+ __field(u32, kdeth4)
+ __field(u32, kdeth5)
+ __field(u32, kdeth6)
+ __field(u32, kdeth7)
+ __field(u32, kdeth8)
+ __field(u32, tidval)
+ ),
+ TP_fast_assign(
+ __le32 *pbc = (__le32 *)hdr->pbc;
+ __be32 *lrh = (__be32 *)hdr->lrh;
+ __be32 *bth = (__be32 *)hdr->bth;
+ __le32 *kdeth = (__le32 *)&hdr->kdeth;
+
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->pbc0 = le32_to_cpu(pbc[0]);
+ __entry->pbc1 = le32_to_cpu(pbc[1]);
+ __entry->lrh0 = be32_to_cpu(lrh[0]);
+ __entry->lrh1 = be32_to_cpu(lrh[1]);
+ __entry->bth0 = be32_to_cpu(bth[0]);
+ __entry->bth1 = be32_to_cpu(bth[1]);
+ __entry->bth2 = be32_to_cpu(bth[2]);
+ __entry->kdeth0 = le32_to_cpu(kdeth[0]);
+ __entry->kdeth1 = le32_to_cpu(kdeth[1]);
+ __entry->kdeth2 = le32_to_cpu(kdeth[2]);
+ __entry->kdeth3 = le32_to_cpu(kdeth[3]);
+ __entry->kdeth4 = le32_to_cpu(kdeth[4]);
+ __entry->kdeth5 = le32_to_cpu(kdeth[5]);
+ __entry->kdeth6 = le32_to_cpu(kdeth[6]);
+ __entry->kdeth7 = le32_to_cpu(kdeth[7]);
+ __entry->kdeth8 = le32_to_cpu(kdeth[8]);
+ __entry->tidval = tidval;
+ ),
+ TP_printk(USDMA_HDR_FORMAT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->pbc1,
+ __entry->pbc0,
+ __entry->lrh0,
+ __entry->lrh1,
+ __entry->bth0,
+ __entry->bth1,
+ __entry->bth2,
+ __entry->kdeth0,
+ __entry->kdeth1,
+ __entry->kdeth2,
+ __entry->kdeth3,
+ __entry->kdeth4,
+ __entry->kdeth5,
+ __entry->kdeth6,
+ __entry->kdeth7,
+ __entry->kdeth8,
+ __entry->tidval
+ )
+);
+
+#define SDMA_UREQ_FMT \
+ "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
+TRACE_EVENT(hfi1_sdma_user_reqinfo,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
+ TP_ARGS(dd, ctxt, subctxt, i),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd);
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u8, ver_opcode)
+ __field(u8, iovcnt)
+ __field(u16, npkts)
+ __field(u16, fragsize)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->ver_opcode = i[0] & 0xff;
+ __entry->iovcnt = (i[0] >> 8) & 0xff;
+ __entry->npkts = i[1];
+ __entry->fragsize = i[2];
+ __entry->comp_idx = i[3];
+ ),
+ TP_printk(SDMA_UREQ_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->ver_opcode,
+ __entry->iovcnt,
+ __entry->npkts,
+ __entry->fragsize,
+ __entry->comp_idx
+ )
+);
+
+#define usdma_complete_name(st) { st, #st }
+#define show_usdma_complete_state(st) \
+ __print_symbolic(st, \
+ usdma_complete_name(FREE), \
+ usdma_complete_name(QUEUED), \
+ usdma_complete_name(COMPLETE), \
+ usdma_complete_name(ERROR))
+
+TRACE_EVENT(hfi1_sdma_user_completion,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
+ u8 state, int code),
+ TP_ARGS(dd, ctxt, subctxt, idx, state, code),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, idx)
+ __field(u8, state)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->idx = idx;
+ __entry->state = state;
+ __entry->code = code;
+ ),
+ TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
+ __get_str(dev), __entry->ctxt, __entry->subctxt,
+ __entry->idx, show_usdma_complete_state(__entry->state),
+ __entry->code)
+);
+
+const char *print_u32_array(struct trace_seq *, u32 *, int);
+#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
+
+TRACE_EVENT(hfi1_sdma_user_header_ahg,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u8, sde)
+ __field(u8, idx)
+ __field(int, len)
+ __field(u32, tidval)
+ __array(u32, ahg, 10)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->sde = sde;
+ __entry->idx = ahgidx;
+ __entry->len = len;
+ __entry->tidval = tidval;
+ memcpy(__entry->ahg, ahg, len * sizeof(u32));
+ ),
+ TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->sde,
+ __entry->idx,
+ __entry->len - 1,
+ __print_u32_hex(__entry->ahg, __entry->len),
+ __entry->tidval
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_state,
+ TP_PROTO(struct sdma_engine *sde,
+ const char *cstate,
+ const char *nstate
+ ),
+ TP_ARGS(sde, cstate, nstate),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __string(curstate, cstate)
+ __string(newstate, nstate)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __assign_str(curstate, cstate);
+ __assign_str(newstate, nstate);
+ ),
+ TP_printk("[%s] current state %s new state %s",
+ __get_str(dev),
+ __get_str(curstate),
+ __get_str(newstate)
+ )
+);
+
+#define BCT_FORMAT \
+ "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
+
+#define BCT(field) \
+ be16_to_cpu( \
+ ((struct buffer_control *)__get_dynamic_array(bct))->field \
+ )
+
+DECLARE_EVENT_CLASS(hfi1_bct_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct buffer_control *bc),
+ TP_ARGS(dd, bc),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __dynamic_array(u8, bct, sizeof(*bc))
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ memcpy(__get_dynamic_array(bct), bc,
+ sizeof(*bc));
+ ),
+ TP_printk(BCT_FORMAT,
+ BCT(overall_shared_limit),
+
+ BCT(vl[0].dedicated),
+ BCT(vl[0].shared),
+
+ BCT(vl[1].dedicated),
+ BCT(vl[1].shared),
+
+ BCT(vl[2].dedicated),
+ BCT(vl[2].shared),
+
+ BCT(vl[3].dedicated),
+ BCT(vl[3].shared),
+
+ BCT(vl[4].dedicated),
+ BCT(vl[4].shared),
+
+ BCT(vl[5].dedicated),
+ BCT(vl[5].shared),
+
+ BCT(vl[6].dedicated),
+ BCT(vl[6].shared),
+
+ BCT(vl[7].dedicated),
+ BCT(vl[7].shared),
+
+ BCT(vl[15].dedicated),
+ BCT(vl[15].shared)
+ )
+);
+
+DEFINE_EVENT(hfi1_bct_template, bct_set,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+DEFINE_EVENT(hfi1_bct_template, bct_get,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+#endif /* __HFI1_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c
deleted file mode 100644
index e82e52a63d35..000000000000
--- a/drivers/infiniband/hw/hfi1/twsi.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "hfi.h"
-#include "twsi.h"
-
-/*
- * "Two Wire Serial Interface" support.
- *
- * Originally written for a not-quite-i2c serial eeprom, which is
- * still used on some supported boards. Later boards have added a
- * variety of other uses, most board-specific, so the bit-boffing
- * part has been split off to this file, while the other parts
- * have been moved to chip-specific files.
- *
- * We have also dropped all pretense of fully generic (e.g. pretend
- * we don't know whether '1' is the higher voltage) interface, as
- * the restrictions of the generic i2c interface (e.g. no access from
- * driver itself) make it unsuitable for this use.
- */
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the hfi1_ib device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct hfi1_devdata *dd, u32 target)
-{
- /*
- * implicit read of EXTStatus is as good as explicit
- * read of scratch, if all we want to do is flush
- * writes.
- */
- hfi1_gpio_mod(dd, target, 0, 0, 0);
- rmb(); /* inlined, so prevent compiler reordering */
-}
-
-/*
- * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
- * for "almost compliant" modules
- */
-#define SCL_WAIT_USEC 1000
-
-/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
- * Should be 20, but some chips need more.
- */
-#define TWSI_BUF_WAIT_USEC 60
-
-static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
-{
- u32 mask;
-
- udelay(1);
-
- mask = QSFP_HFI0_I2CCLK;
-
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
-
- /*
- * Allow for slow slaves by simple
- * delay for falling edge, sampling on rise.
- */
- if (!bit) {
- udelay(2);
- } else {
- int rise_usec;
-
- for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
- if (mask & hfi1_gpio_mod(dd, target, 0, 0, 0))
- break;
- udelay(2);
- }
- if (rise_usec <= 0)
- dd_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
- }
- i2c_wait_for_writes(dd, target);
-}
-
-static u8 scl_in(struct hfi1_devdata *dd, u32 target, int wait)
-{
- u32 read_val, mask;
-
- mask = QSFP_HFI0_I2CCLK;
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
- read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd, target);
- return (read_val & mask) >> GPIO_SCL_NUM;
-}
-
-static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit)
-{
- u32 mask;
-
- mask = QSFP_HFI0_I2CDAT;
-
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
-
- i2c_wait_for_writes(dd, target);
- udelay(2);
-}
-
-static u8 sda_in(struct hfi1_devdata *dd, u32 target, int wait)
-{
- u32 read_val, mask;
-
- mask = QSFP_HFI0_I2CDAT;
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
- read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd, target);
- return (read_val & mask) >> GPIO_SDA_NUM;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the hfi1_ib device
- */
-static int i2c_ackrcv(struct hfi1_devdata *dd, u32 target)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, target, 1);
- scl_out(dd, target, 1);
- ack_received = sda_in(dd, target, 1) == 0;
- scl_out(dd, target, 0);
- return ack_received;
-}
-
-static void stop_cmd(struct hfi1_devdata *dd, u32 target);
-
-/**
- * rd_byte - read a byte, sending STOP on last, else ACK
- * @dd: the hfi1_ib device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct hfi1_devdata *dd, u32 target, int last)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, target, 1);
- data |= sda_in(dd, target, 0);
- scl_out(dd, target, 0);
- }
- if (last) {
- scl_out(dd, target, 1);
- stop_cmd(dd, target);
- } else {
- sda_out(dd, target, 0);
- scl_out(dd, target, 1);
- scl_out(dd, target, 0);
- sda_out(dd, target, 1);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the hfi1_ib device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct hfi1_devdata *dd, u32 target, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, target, bit);
- scl_out(dd, target, 1);
- scl_out(dd, target, 0);
- }
- return (!i2c_ackrcv(dd, target)) ? 1 : 0;
-}
-
-/*
- * issue TWSI start sequence:
- * (both clock/data high, clock high, data low while clock is high)
- */
-static void start_seq(struct hfi1_devdata *dd, u32 target)
-{
- sda_out(dd, target, 1);
- scl_out(dd, target, 1);
- sda_out(dd, target, 0);
- udelay(1);
- scl_out(dd, target, 0);
-}
-
-/**
- * stop_seq - transmit the stop sequence
- * @dd: the hfi1_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_seq(struct hfi1_devdata *dd, u32 target)
-{
- scl_out(dd, target, 0);
- sda_out(dd, target, 0);
- scl_out(dd, target, 1);
- sda_out(dd, target, 1);
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the hfi1_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct hfi1_devdata *dd, u32 target)
-{
- stop_seq(dd, target);
- udelay(TWSI_BUF_WAIT_USEC);
-}
-
-/**
- * hfi1_twsi_reset - reset I2C communication
- * @dd: the hfi1_ib device
- * returns 0 if ok, -EIO on error
- */
-int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target)
-{
- int clock_cycles_left = 9;
- u32 mask;
-
- /* Both SCL and SDA should be high. If not, there
- * is something wrong.
- */
- mask = QSFP_HFI0_I2CCLK | QSFP_HFI0_I2CDAT;
-
- /*
- * Force pins to desired innocuous state.
- * This is the default power-on state with out=0 and dir=0,
- * So tri-stated and should be floating high (barring HW problems)
- */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
-
- /* Check if SCL is low, if it is low then we have a slave device
- * misbehaving and there is not much we can do.
- */
- if (!scl_in(dd, target, 0))
- return -EIO;
-
- /* Check if SDA is low, if it is low then we have to clock SDA
- * up to 9 times for the device to release the bus
- */
- while (clock_cycles_left--) {
- if (sda_in(dd, target, 0))
- return 0;
- scl_out(dd, target, 0);
- scl_out(dd, target, 1);
- }
-
- return -EIO;
-}
-
-#define HFI1_TWSI_START 0x100
-#define HFI1_TWSI_STOP 0x200
-
-/* Write byte to TWSI, optionally prefixed with START or suffixed with
- * STOP.
- * returns 0 if OK (ACK received), else != 0
- */
-static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags)
-{
- int ret = 1;
-
- if (flags & HFI1_TWSI_START)
- start_seq(dd, target);
-
- /* Leaves SCL low (from i2c_ackrcv()) */
- ret = wr_byte(dd, target, data);
-
- if (flags & HFI1_TWSI_STOP)
- stop_cmd(dd, target);
- return ret;
-}
-
-/* Added functionality for IBA7220-based cards */
-#define HFI1_TEMP_DEV 0x98
-
-/*
- * hfi1_twsi_blk_rd
- * General interface for data transfer from twsi devices.
- * One vestige of its former role is that it recognizes a device
- * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a N-byte "address" which selects
- * the "register" or "offset" within the device from which data should
- * be read.
- */
-int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- void *buffer, int len)
-{
- u8 *bp = buffer;
- int ret = 1;
- int i;
- int offset_size;
-
- /* obtain the offset size, strip it from the device address */
- offset_size = (dev >> 8) & 0xff;
- dev &= 0xff;
-
- /* allow at most a 2 byte offset */
- if (offset_size > 2)
- goto bail;
-
- if (dev == HFI1_TWSI_NO_DEV) {
- /* legacy not-really-I2C */
- addr = (addr << 1) | READ_CMD;
- ret = twsi_wr(dd, target, addr, HFI1_TWSI_START);
- } else {
- /* Actual I2C */
- if (offset_size) {
- ret = twsi_wr(dd, target,
- dev | WRITE_CMD, HFI1_TWSI_START);
- if (ret) {
- stop_cmd(dd, target);
- goto bail;
- }
-
- for (i = 0; i < offset_size; i++) {
- ret = twsi_wr(dd, target,
- (addr >> (i * 8)) & 0xff, 0);
- udelay(TWSI_BUF_WAIT_USEC);
- if (ret) {
- dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
- i, addr);
- goto bail;
- }
- }
- }
- ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START);
- }
- if (ret) {
- stop_cmd(dd, target);
- goto bail;
- }
-
- /*
- * block devices keeps clocking data out as long as we ack,
- * automatically incrementing the address. Some have "pages"
- * whose boundaries will not be crossed, but the handling
- * of these is left to the caller, who is in a better
- * position to know.
- */
- while (len-- > 0) {
- /*
- * Get and store data, sending ACK if length remaining,
- * else STOP
- */
- *bp++ = rd_byte(dd, target, !len);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * hfi1_twsi_blk_wr
- * General interface for data transfer to twsi devices.
- * One vestige of its former role is that it recognizes a device
- * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a N-byte "address" which selects
- * the "register" or "offset" within the device to which data should
- * be written.
- */
-int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- const void *buffer, int len)
-{
- const u8 *bp = buffer;
- int ret = 1;
- int i;
- int offset_size;
-
- /* obtain the offset size, strip it from the device address */
- offset_size = (dev >> 8) & 0xff;
- dev &= 0xff;
-
- /* allow at most a 2 byte offset */
- if (offset_size > 2)
- goto bail;
-
- if (dev == HFI1_TWSI_NO_DEV) {
- if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD,
- HFI1_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START))
- goto failed_write;
- }
-
- for (i = 0; i < offset_size; i++) {
- ret = twsi_wr(dd, target, (addr >> (i * 8)) & 0xff, 0);
- udelay(TWSI_BUF_WAIT_USEC);
- if (ret) {
- dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
- i, addr);
- goto bail;
- }
- }
-
- for (i = 0; i < len; i++)
- if (twsi_wr(dd, target, *bp++, 0))
- goto failed_write;
-
- ret = 0;
-
-failed_write:
- stop_cmd(dd, target);
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h
deleted file mode 100644
index 5b8a5b5e7eae..000000000000
--- a/drivers/infiniband/hw/hfi1/twsi.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _TWSI_H
-#define _TWSI_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#define HFI1_TWSI_NO_DEV 0xFF
-
-struct hfi1_devdata;
-
-/* Bit position of SDA/SCL pins in ASIC_QSFP* registers */
-#define GPIO_SDA_NUM 1
-#define GPIO_SCL_NUM 0
-
-/* these functions must be called with qsfp_lock held */
-int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target);
-int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- void *buffer, int len);
-int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- const void *buffer, int len);
-
-#endif /* _TWSI_H */
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index df773d433297..a726d96d185f 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -119,6 +119,31 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
/*
+ * Local operations are processed immediately
+ * after all prior requests have completed.
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp, wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ qp->s_hdrwords = 0;
+ goto done_free_tx;
+ }
+ /*
* Start a new request.
*/
qp->s_psn = wqe->psn;
@@ -294,46 +319,12 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
struct ib_reth *reth;
int has_grh = rcv_flags & HFI1_HAS_GRH;
int ret;
- u32 bth1;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
return;
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 rqpn, lqpn;
- u16 rlid = be16_to_cpu(hdr->lrh[3]);
- u8 sl, sc5;
-
- lqpn = bth1 & RVT_QPN_MASK;
- rqpn = qp->remote_qpn;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_UC);
- }
-
- if (bth1 & HFI1_FECN_SMASK) {
- struct ib_grh *grh = NULL;
- u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
- u32 src_qp = qp->remote_qpn;
- u8 sc5;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- if (has_grh)
- grh = &hdr->u.l.grh;
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5,
- grh);
- }
- }
+ process_ecn(qp, packet, true);
psn = be32_to_cpu(ohdr->bth[2]);
opcode = (bth0 >> 24) & 0xff;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index be91f6fa1c87..f01e8e1d62d3 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -184,8 +184,12 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
if (ah_attr->ah_flags & IB_AH_GRH) {
- hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1, 0);
+ struct ib_grh grh;
+ struct ib_global_route grd = ah_attr->grh;
+
+ hfi1_make_grh(ibp, &grh, &grd, 0, 0);
+ hfi1_copy_sge(&qp->r_sge, &grh,
+ sizeof(grh), 1, 0);
wc.wc_flags |= IB_WC_GRH;
} else {
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
@@ -430,10 +434,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->qkey : wqe->ud_wr.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
/* disarm any ahg */
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->sde = NULL;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
+ priv->s_ahg->tx_flags = 0;
/* pbc */
ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
@@ -665,13 +668,13 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
struct hfi1_other_headers *ohdr = packet->ohdr;
int opcode;
u32 hdrsize = packet->hlen;
- u32 pad;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
u16 dlid, pkey;
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_ib_header *hdr = packet->hdr;
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
@@ -680,52 +683,33 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
bool has_grh = rcv_flags & HFI1_HAS_GRH;
u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
- int is_mcast;
- struct ib_grh *grh = NULL;
+ u8 sl_from_sc, sl;
+ u16 slid;
+ u8 extra_bytes;
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
dlid = be16_to_cpu(hdr->lrh[1]);
- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & HFI1_BECN_SMASK)) {
- /*
- * In pre-B0 h/w the CNP_OPCODE is handled via an
- * error path.
- */
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl;
-
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
- }
+ slid = be16_to_cpu(hdr->lrh[3]);
+ pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+ sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ extra_bytes = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ extra_bytes += (SIZE_OF_CRC << 2);
+ sl_from_sc = ibp->sc_to_sl[sc5];
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
opcode &= 0xff;
- pkey = (u16)be32_to_cpu(ohdr->bth[0]);
-
- if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
- u16 slid = be16_to_cpu(hdr->lrh[3]);
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
- }
+ process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets.
*/
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto drop;
- tlen -= hdrsize + pad + 4;
+ tlen -= hdrsize + extra_bytes;
/*
* Check that the permissive LID is only used on QP0
@@ -736,10 +720,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
hdr->lrh[3] == IB_LID_PERMISSIVE))
goto drop;
if (qp->ibqp.qp_num > 1) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid;
-
- slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
/*
* Traps will not be sent for packets dropped
@@ -748,12 +728,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* IB spec (release 1.3, section 10.9.4)
*/
hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- pkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
+ pkey, sl,
src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
+ slid, dlid);
return;
}
} else {
@@ -763,22 +740,18 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
goto drop;
}
if (unlikely(qkey != qp->qkey)) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, sl,
src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
+ slid, dlid);
return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen > 2048 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+ (tlen > 2048 || (sc5 == 0xF))))
goto drop;
} else {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
- u16 slid = be16_to_cpu(hdr->lrh[3]);
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
@@ -861,7 +834,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
qp->ibqp.qp_type == IB_QPT_SMI) {
if (mgmt_pkey_idx < 0) {
if (net_ratelimit()) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = ppd->dd;
dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
@@ -874,8 +846,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
wc.pkey_index = 0;
}
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = ibp->sc_to_sl[sc5];
+ wc.slid = slid;
+ wc.sl = sl_from_sc;
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 1b640a35b3fe..64d26525435a 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -82,24 +82,25 @@ struct tid_pageset {
((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
- struct rb_root *);
+ struct hfi1_filedata *);
static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
-static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+static int tid_rb_insert(void *, struct mmu_rb_node *);
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode);
+static void tid_rb_remove(void *, struct mmu_rb_node *);
+static int tid_rb_invalidate(void *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
u32 *, unsigned *, unsigned *);
static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
-static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *);
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
static struct mmu_rb_ops tid_rb_ops = {
- .insert = mmu_rb_insert,
- .remove = mmu_rb_remove,
- .invalidate = mmu_rb_invalidate
+ .insert = tid_rb_insert,
+ .remove = tid_rb_remove,
+ .invalidate = tid_rb_invalidate
};
static inline u32 rcventry2tidinfo(u32 rcventry)
@@ -162,7 +163,6 @@ int hfi1_user_exp_rcv_init(struct file *fp)
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
- fd->tid_rb_root = RB_ROOT;
if (!uctxt->subctxt_cnt || !fd->subctxt) {
exp_tid_group_init(&uctxt->tid_group_list);
@@ -197,7 +197,7 @@ int hfi1_user_exp_rcv_init(struct file *fp)
if (!fd->entry_to_rb)
return -ENOMEM;
- if (!HFI1_CAP_IS_USET(TID_UNMAP)) {
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
fd->invalid_tid_idx = 0;
fd->invalid_tids = kzalloc(uctxt->expected_count *
sizeof(u32), GFP_KERNEL);
@@ -208,15 +208,15 @@ int hfi1_user_exp_rcv_init(struct file *fp)
/*
* Register MMU notifier callbacks. If the registration
- * fails, continue but turn off the TID caching for
- * all user contexts.
+ * fails, continue without TID caching for this context.
*/
- ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops);
+ ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
+ dd->pport->hfi1_wq,
+ &fd->handler);
if (ret) {
dd_dev_info(dd,
"Failed MMU notifier registration %d\n",
ret);
- HFI1_CAP_USET(TID_UNMAP);
ret = 0;
}
}
@@ -235,7 +235,7 @@ int hfi1_user_exp_rcv_init(struct file *fp)
* init.
*/
spin_lock(&fd->tid_lock);
- if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) {
+ if (uctxt->subctxt_cnt && fd->handler) {
u16 remainder;
fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
@@ -261,18 +261,16 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
* The notifier would have been removed when the process'es mm
* was freed.
*/
- if (!HFI1_CAP_IS_USET(TID_UNMAP))
- hfi1_mmu_rb_unregister(&fd->tid_rb_root);
+ if (fd->handler)
+ hfi1_mmu_rb_unregister(fd->handler);
kfree(fd->invalid_tids);
if (!uctxt->cnt) {
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
- unlock_exp_tids(uctxt, &uctxt->tid_full_list,
- &fd->tid_rb_root);
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
- unlock_exp_tids(uctxt, &uctxt->tid_used_list,
- &fd->tid_rb_root);
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
list) {
list_del_init(&grp->list);
@@ -399,12 +397,12 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
* pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers.
*/
- if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages)) {
+ if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
ret = -ENOMEM;
goto bail;
}
- pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
+ pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
if (pinned <= 0) {
ret = pinned;
goto bail;
@@ -559,7 +557,7 @@ nomem:
* for example), unpin all unmapped pages so we can pin them nex time.
*/
if (mapped_pages != pinned) {
- hfi1_release_user_pages(current->mm, &pages[mapped_pages],
+ hfi1_release_user_pages(fd->mm, &pages[mapped_pages],
pinned - mapped_pages,
false);
fd->tid_n_pinned -= pinned - mapped_pages;
@@ -829,7 +827,6 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_rb_node *node;
struct hfi1_devdata *dd = uctxt->dd;
- struct rb_root *root = &fd->tid_rb_root;
dma_addr_t phys;
/*
@@ -861,10 +858,10 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
node->freed = false;
memcpy(node->pages, pages, sizeof(struct page *) * npages);
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- ret = mmu_rb_insert(root, &node->mmu);
+ if (!fd->handler)
+ ret = tid_rb_insert(fd, &node->mmu);
else
- ret = hfi1_mmu_rb_insert(root, &node->mmu);
+ ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
if (ret) {
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
@@ -904,19 +901,19 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
node = fd->entry_to_rb[rcventry];
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
if (grp)
*grp = node->grp;
- clear_tid_node(fd, fd->subctxt, node);
+
+ if (!fd->handler)
+ cacheless_tid_rb_remove(fd, node);
+ else
+ hfi1_mmu_rb_remove(fd->handler, &node->mmu);
+
return 0;
}
-static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
- struct tid_rb_node *node)
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
@@ -934,7 +931,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
+ hfi1_release_user_pages(fd->mm, node->pages, node->npages, true);
fd->tid_n_pinned -= node->npages;
node->grp->used--;
@@ -949,12 +946,15 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
kfree(node);
}
+/*
+ * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
+ * clearing nodes in the non-cached case.
+ */
static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
- struct exp_tid_set *set, struct rb_root *root)
+ struct exp_tid_set *set,
+ struct hfi1_filedata *fd)
{
struct tid_group *grp, *ptr;
- struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata,
- tid_rb_root);
int i;
list_for_each_entry_safe(grp, ptr, &set->list, list) {
@@ -969,22 +969,23 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
uctxt->expected_base];
if (!node || node->rcventry != rcventry)
continue;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu);
- clear_tid_node(fd, -1, node);
+
+ cacheless_tid_rb_remove(fd, node);
}
}
}
}
-static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+/*
+ * Always return 0 from this function. A non-zero return indicates that the
+ * remove operation will be called and that memory should be unpinned.
+ * However, the driver cannot unpin out from under PSM. Instead, retain the
+ * memory (by returning 0) and inform PSM that the memory is going away. PSM
+ * will call back later when it has removed the memory from its list.
+ */
+static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct hfi1_filedata *fdata = arg;
struct hfi1_ctxtdata *uctxt = fdata->uctxt;
struct tid_rb_node *node =
container_of(mnode, struct tid_rb_node, mmu);
@@ -1025,10 +1026,9 @@ static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
return 0;
}
-static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
+static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct hfi1_filedata *fdata = arg;
struct tid_rb_node *tnode =
container_of(node, struct tid_rb_node, mmu);
u32 base = fdata->uctxt->expected_base;
@@ -1037,14 +1037,20 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
return 0;
}
-static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- struct mm_struct *mm)
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
u32 base = fdata->uctxt->expected_base;
fdata->entry_to_rb[tnode->rcventry - base] = NULL;
+ clear_tid_node(fdata, tnode);
+}
+
+static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
+{
+ struct hfi1_filedata *fdata = arg;
+ struct tid_rb_node *tnode =
+ container_of(node, struct tid_rb_node, mmu);
+
+ cacheless_tid_rb_remove(fdata, tnode);
}
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 88e10b5f55f1..20f4ddcac3b0 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -68,7 +68,8 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
* could keeping caching buffers.
*
*/
-bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages)
{
unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
size = (cache_size * (1UL << 20)); /* convert to bytes */
@@ -89,9 +90,9 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
/* Convert to number of pages */
size = DIV_ROUND_UP(size, PAGE_SIZE);
- down_read(&current->mm->mmap_sem);
- pinned = current->mm->pinned_vm;
- up_read(&current->mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ pinned = mm->pinned_vm;
+ up_read(&mm->mmap_sem);
/* First, check the absolute limit against all pinned pages. */
if (pinned + npages >= ulimit && !can_lock)
@@ -100,8 +101,8 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
return ((nlocked + npages) <= size) || can_lock;
}
-int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
- struct page **pages)
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
+ bool writable, struct page **pages)
{
int ret;
@@ -109,9 +110,9 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
if (ret < 0)
return ret;
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += ret;
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm += ret;
+ up_write(&mm->mmap_sem);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 47ffd273ecbd..0ecf27903dc2 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -145,7 +145,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* Last packet in the request */
#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
-#define SDMA_REQ_IN_USE 0
+/* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1
#define SDMA_REQ_SEND_DONE 2
#define SDMA_REQ_HAVE_AHG 3
@@ -183,16 +183,18 @@ struct user_sdma_iovec {
struct sdma_mmu_node *node;
};
-#define SDMA_CACHE_NODE_EVICT 0
-
struct sdma_mmu_node {
struct mmu_rb_node rb;
- struct list_head list;
struct hfi1_user_sdma_pkt_q *pq;
atomic_t refcount;
struct page **pages;
unsigned npages;
- unsigned long flags;
+};
+
+/* evict operation argument */
+struct evict_data {
+ u32 cleared; /* count evicted so far */
+ u32 target; /* target count to evict */
};
struct user_sdma_request {
@@ -305,14 +307,16 @@ static int defer_packet_queue(
unsigned seq);
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
-static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+static int sdma_rb_insert(void *, struct mmu_rb_node *);
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *arg2, bool *stop);
+static void sdma_rb_remove(void *, struct mmu_rb_node *);
+static int sdma_rb_invalidate(void *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
.filter = sdma_rb_filter,
.insert = sdma_rb_insert,
+ .evict = sdma_rb_evict,
.remove = sdma_rb_remove,
.invalidate = sdma_rb_invalidate
};
@@ -397,6 +401,11 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
if (!pq->reqs)
goto pq_reqs_nomem;
+ memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long);
+ pq->req_in_use = kzalloc(memsize, GFP_KERNEL);
+ if (!pq->req_in_use)
+ goto pq_reqs_no_in_use;
+
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
@@ -405,9 +414,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
- pq->sdma_rb_root = RB_ROOT;
- INIT_LIST_HEAD(&pq->evict);
- spin_lock_init(&pq->evict_lock);
+ atomic_set(&pq->n_locked, 0);
+ pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
activate_packet_queue, NULL);
@@ -437,7 +445,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
cq->nentries = hfi1_sdma_comp_ring_size;
fd->cq = cq;
- ret = hfi1_mmu_rb_register(&pq->sdma_rb_root, &sdma_rb_ops);
+ ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
+ &pq->handler);
if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret);
goto done;
@@ -453,6 +462,8 @@ cq_comps_nomem:
cq_nomem:
kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem:
+ kfree(pq->req_in_use);
+pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
kfree(pq);
@@ -472,8 +483,9 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
uctxt->ctxt, fd->subctxt);
pq = fd->pq;
- hfi1_mmu_rb_unregister(&pq->sdma_rb_root);
if (pq) {
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
if (!list_empty(&pq->list))
list_del_init(&pq->list);
@@ -484,6 +496,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
pq->wait,
(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
+ kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
fd->pq = NULL;
@@ -496,10 +509,31 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
return 0;
}
+static u8 dlid_to_selector(u16 dlid)
+{
+ static u8 mapping[256];
+ static int initialized;
+ static u8 next;
+ int hash;
+
+ if (!initialized) {
+ memset(mapping, 0xFF, 256);
+ initialized = 1;
+ }
+
+ hash = ((dlid >> 8) ^ dlid) & 0xFF;
+ if (mapping[hash] == 0xFF) {
+ mapping[hash] = next;
+ next = (next + 1) & 0x7F;
+ }
+
+ return mapping[hash];
+}
+
int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
- int ret = 0, i = 0;
+ int ret = 0, i;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
@@ -511,6 +545,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
struct user_sdma_request *req;
u8 opcode, sc, vl;
int req_queued = 0;
+ u16 dlid;
+ u8 selector;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@@ -529,30 +565,48 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
- if (cq->comps[info.comp_idx].status == QUEUED ||
- test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) {
- hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
- dd->unit, uctxt->ctxt, fd->subctxt,
- info.comp_idx);
- return -EBADSLT;
+
+ if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid comp index",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
+ return -EINVAL;
}
+
+ /*
+ * Sanity check the header io vector count. Need at least 1 vector
+ * (header) and cannot be larger than the actual io vector count.
+ */
+ if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
+ req_iovcnt(info.ctrl), dim);
+ return -EINVAL;
+ }
+
if (!info.fragsize) {
hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Request does not specify fragsize",
dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
return -EINVAL;
}
+
+ /* Try to claim the request. */
+ if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
+ dd->unit, uctxt->ctxt, fd->subctxt,
+ info.comp_idx);
+ return -EBADSLT;
+ }
/*
- * We've done all the safety checks that we can up to this point,
- * "allocate" the request entry.
+ * All safety checks have been done and this request has been claimed.
*/
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx;
memset(req, 0, sizeof(*req));
- /* Mark the request as IN_USE before we start filling it in. */
- set_bit(SDMA_REQ_IN_USE, &req->flags);
- req->data_iovs = req_iovcnt(info.ctrl) - 1;
+ req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
req->pq = pq;
req->cq = cq;
req->status = -1;
@@ -560,13 +614,22 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
memcpy(&req->info, &info, sizeof(info));
- if (req_opcode(info.ctrl) == EXPECTED)
+ if (req_opcode(info.ctrl) == EXPECTED) {
+ /* expected must have a TID info and at least one data vector */
+ if (req->data_iovs < 2) {
+ SDMA_DBG(req,
+ "Not enough vectors for expected request");
+ ret = -EINVAL;
+ goto free_req;
+ }
req->data_iovs--;
+ }
if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
MAX_VECTORS_PER_REQ);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_req;
}
/* Copy the header from the user buffer */
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
@@ -634,7 +697,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
idx++;
/* Save all the IO vector structures */
- while (i < req->data_iovs) {
+ for (i = 0; i < req->data_iovs; i++) {
INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
ret = pin_vector_pages(req, &req->iovs[i]);
@@ -642,7 +705,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
req->status = ret;
goto free_req;
}
- req->data_len += req->iovs[i++].iov.iov_len;
+ req->data_len += req->iovs[i].iov.iov_len;
}
SDMA_DBG(req, "total data length %u", req->data_len);
@@ -686,9 +749,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
idx++;
}
+ dlid = be16_to_cpu(req->hdr.lrh[1]);
+ selector = dlid_to_selector(dlid);
+
/* Have to select the engine */
req->sde = sdma_select_engine_vl(dd,
- (u32)(uctxt->ctxt + fd->subctxt),
+ (u32)(uctxt->ctxt + fd->subctxt +
+ selector),
vl);
if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM;
@@ -766,14 +833,21 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
* The size of the data of the first packet is in the header
* template. However, it includes the header and ICRC, which need
* to be subtracted.
+ * The minimum representable packet data length in a header is 4 bytes,
+ * therefore, when the data length request is less than 4 bytes, there's
+ * only one packet, and the packet data length is equal to that of the
+ * request data length.
* The size of the remaining packets is the minimum of the frag
* size (MTU) or remaining data in the request.
*/
u32 len;
if (!req->seqnum) {
- len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
- (sizeof(tx->hdr) - 4));
+ if (req->data_len < sizeof(u32))
+ len = req->data_len;
+ else
+ len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
+ (sizeof(tx->hdr) - 4));
} else if (req_opcode(req->info.ctrl) == EXPECTED) {
u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
PAGE_SIZE;
@@ -803,6 +877,13 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
return len;
}
+static inline u32 pad_len(u32 len)
+{
+ if (len & (sizeof(u32) - 1))
+ len += sizeof(u32) - (len & (sizeof(u32) - 1));
+ return len;
+}
+
static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
{
/* (Size of complete header - size of PBC) + 4B ICRC + data length */
@@ -894,7 +975,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
if (!req->seqnum) {
u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
- u32 lrhlen = get_lrh_len(req->hdr, datalen);
+ u32 lrhlen = get_lrh_len(req->hdr,
+ pad_len(datalen));
/*
* Copy the request header into the tx header
* because the HW needs a cacheline-aligned
@@ -1048,39 +1130,24 @@ static inline int num_user_pages(const struct iovec *iov)
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
- u32 cleared = 0;
- struct sdma_mmu_node *node, *ptr;
- struct list_head to_evict = LIST_HEAD_INIT(to_evict);
-
- spin_lock(&pq->evict_lock);
- list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) {
- /* Make sure that no one is still using the node. */
- if (!atomic_read(&node->refcount)) {
- set_bit(SDMA_CACHE_NODE_EVICT, &node->flags);
- list_del_init(&node->list);
- list_add(&node->list, &to_evict);
- cleared += node->npages;
- if (cleared >= npages)
- break;
- }
- }
- spin_unlock(&pq->evict_lock);
-
- list_for_each_entry_safe(node, ptr, &to_evict, list)
- hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
+ struct evict_data evict_data;
- return cleared;
+ evict_data.cleared = 0;
+ evict_data.target = npages;
+ hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ return evict_data.cleared;
}
static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec) {
+ struct user_sdma_iovec *iovec)
+{
int ret = 0, pinned, npages, cleared;
struct page **pages;
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct sdma_mmu_node *node = NULL;
struct mmu_rb_node *rb_node;
- rb_node = hfi1_mmu_rb_extract(&pq->sdma_rb_root,
+ rb_node = hfi1_mmu_rb_extract(pq->handler,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
if (rb_node && !IS_ERR(rb_node))
@@ -1096,7 +1163,6 @@ static int pin_vector_pages(struct user_sdma_request *req,
node->rb.addr = (unsigned long)iovec->iov.iov_base;
node->pq = pq;
atomic_set(&node->refcount, 0);
- INIT_LIST_HEAD(&node->list);
}
npages = num_user_pages(&iovec->iov);
@@ -1111,28 +1177,14 @@ static int pin_vector_pages(struct user_sdma_request *req,
npages -= node->npages;
- /*
- * If rb_node is NULL, it means that this is brand new node
- * and, therefore not on the eviction list.
- * If, however, the rb_node is non-NULL, it means that the
- * node is already in RB tree and, therefore on the eviction
- * list (nodes are unconditionally inserted in the eviction
- * list). In that case, we have to remove the node prior to
- * calling the eviction function in order to prevent it from
- * freeing this node.
- */
- if (rb_node) {
- spin_lock(&pq->evict_lock);
- list_del_init(&node->list);
- spin_unlock(&pq->evict_lock);
- }
retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) {
+ if (!hfi1_can_pin_pages(pq->dd, pq->mm,
+ atomic_read(&pq->n_locked), npages)) {
cleared = sdma_cache_evict(pq, npages);
if (cleared >= npages)
goto retry;
}
- pinned = hfi1_acquire_user_pages(
+ pinned = hfi1_acquire_user_pages(pq->mm,
((unsigned long)iovec->iov.iov_base +
(node->npages * PAGE_SIZE)), npages, 0,
pages + node->npages);
@@ -1142,7 +1194,7 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages,
+ unpin_vector_pages(pq->mm, pages, node->npages,
pinned);
ret = -EFAULT;
goto bail;
@@ -1152,28 +1204,22 @@ retry:
node->pages = pages;
node->npages += pinned;
npages = node->npages;
- spin_lock(&pq->evict_lock);
- list_add(&node->list, &pq->evict);
- pq->n_locked += pinned;
- spin_unlock(&pq->evict_lock);
+ atomic_add(pinned, &pq->n_locked);
}
iovec->pages = node->pages;
iovec->npages = npages;
iovec->node = node;
- ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
+ ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
if (ret) {
- spin_lock(&pq->evict_lock);
- if (!list_empty(&node->list))
- list_del(&node->list);
- pq->n_locked -= node->npages;
- spin_unlock(&pq->evict_lock);
+ atomic_sub(node->npages, &pq->n_locked);
+ iovec->node = NULL;
goto bail;
}
return 0;
bail:
if (rb_node)
- unpin_vector_pages(current->mm, node->pages, 0, node->npages);
+ unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
kfree(node);
return ret;
}
@@ -1181,7 +1227,7 @@ bail:
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages + start, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, false);
kfree(pages);
}
@@ -1192,16 +1238,14 @@ static int check_header_template(struct user_sdma_request *req,
/*
* Perform safety checks for any type of packet:
* - transfer size is multiple of 64bytes
- * - packet length is multiple of 4bytes
- * - entire request length is multiple of 4bytes
+ * - packet length is multiple of 4 bytes
* - packet length is not larger than MTU size
*
* These checks are only done for the first packet of the
* transfer since the header is "given" to us by user space.
* For the remainder of the packets we compute the values.
*/
- if (req->info.fragsize % PIO_BLOCK_SIZE ||
- lrhlen & 0x3 || req->data_len & 0x3 ||
+ if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
lrhlen > get_lrh_len(*hdr, req->info.fragsize))
return -EINVAL;
@@ -1263,7 +1307,7 @@ static int set_txreq_header(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr = &tx->hdr;
u16 pbclen;
int ret;
- u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
+ u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
/* Copy the header template to the request before modification */
memcpy(hdr, &req->hdr, sizeof(*hdr));
@@ -1374,7 +1418,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
- u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
@@ -1534,14 +1578,14 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
continue;
if (unpin)
- hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
+ hfi1_mmu_rb_remove(req->pq->handler,
&node->rb);
else
atomic_dec(&node->refcount);
}
}
kfree(req->tids);
- clear_bit(SDMA_REQ_IN_USE, &req->flags);
+ clear_bit(req->info.comp_idx, req->pq->req_in_use);
}
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
@@ -1564,7 +1608,7 @@ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
return (bool)(node->addr == addr);
}
-static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
@@ -1573,48 +1617,45 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
return 0;
}
-static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- struct mm_struct *mm)
+/*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+ * Called with the rb tree lock held.
+ */
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+ /* is this node still being used? */
+ if (atomic_read(&node->refcount))
+ return 0; /* keep this node */
+
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+ /* have enough pages been cleared? */
+ if (evict_data->cleared >= evict_data->target)
+ *stop = true;
+
+ return 1; /* remove this node */
+}
+
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
- spin_lock(&node->pq->evict_lock);
- /*
- * We've been called by the MMU notifier but this node has been
- * scheduled for eviction. The eviction function will take care
- * of freeing this node.
- * We have to take the above lock first because we are racing
- * against the setting of the bit in the eviction function.
- */
- if (mm && test_bit(SDMA_CACHE_NODE_EVICT, &node->flags)) {
- spin_unlock(&node->pq->evict_lock);
- return;
- }
+ atomic_sub(node->npages, &node->pq->n_locked);
- if (!list_empty(&node->list))
- list_del(&node->list);
- node->pq->n_locked -= node->npages;
- spin_unlock(&node->pq->evict_lock);
+ unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
- /*
- * If mm is set, we are being called by the MMU notifier and we
- * should not pass a mm_struct to unpin_vector_page(). This is to
- * prevent a deadlock when hfi1_release_user_pages() attempts to
- * take the mmap_sem, which the MMU notifier has already taken.
- */
- unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
- node->npages);
- /*
- * If called by the MMU notifier, we have to adjust the pinned
- * page count ourselves.
- */
- if (mm)
- mm->pinned_vm -= node->npages;
kfree(node);
}
-static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index b9240e351161..39001714f551 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -63,14 +63,14 @@ struct hfi1_user_sdma_pkt_q {
struct hfi1_devdata *dd;
struct kmem_cache *txreq_cache;
struct user_sdma_request *reqs;
+ unsigned long *req_in_use;
struct iowait busy;
unsigned state;
wait_queue_head_t wait;
unsigned long unpinned;
- struct rb_root sdma_rb_root;
- u32 n_locked;
- struct list_head evict;
- spinlock_t evict_lock; /* protect evict and n_locked */
+ struct mmu_rb_handler *handler;
+ atomic_t n_locked;
+ struct mm_struct *mm;
};
struct hfi1_user_sdma_comp_q {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 849c4b9399d4..2b359540901d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -306,7 +306,10 @@ const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+ [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [IB_WR_REG_MR] = IB_WC_REG_MR
};
/*
@@ -378,6 +381,8 @@ static const opcode_handler opcode_handler_tbl[256] = {
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
[IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
[IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
[IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
@@ -540,19 +545,15 @@ void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
-static inline int qp_ok(int opcode, struct hfi1_packet *packet)
+static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
{
- struct hfi1_ibport *ibp;
-
if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
- goto dropit;
+ return NULL;
if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
(opcode == IB_OPCODE_CNP))
- return 1;
-dropit:
- ibp = &packet->rcd->ppd->ibport_data;
- ibp->rvp.n_pkt_drops++;
- return 0;
+ return opcode_handler_tbl[opcode];
+
+ return NULL;
}
/**
@@ -571,6 +572,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler packet_handler;
unsigned long flags;
u32 qp_num;
int lnh;
@@ -616,8 +618,11 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
+ packet_handler = qp_ok(opcode, packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
}
/*
@@ -634,8 +639,11 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
goto drop;
}
spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
+ packet_handler = qp_ok(opcode, packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
rcu_read_unlock();
}
@@ -808,19 +816,19 @@ static int build_verbs_tx_desc(
struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx,
- struct ahg_ib_header *ahdr,
+ struct hfi1_ahg_info *ahg_info,
u64 pbc)
{
int ret = 0;
- struct hfi1_pio_header *phdr = &tx->phdr;
+ struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
- if (!ahdr->ahgcount) {
+ if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
&tx->txreq,
- ahdr->tx_flags,
+ ahg_info->tx_flags,
hdrbytes + length,
- ahdr->ahgidx,
+ ahg_info->ahgidx,
0,
NULL,
0,
@@ -838,11 +846,11 @@ static int build_verbs_tx_desc(
} else {
ret = sdma_txinit_ahg(
&tx->txreq,
- ahdr->tx_flags,
+ ahg_info->tx_flags,
length,
- ahdr->ahgidx,
- ahdr->ahgcount,
- ahdr->ahgdesc,
+ ahg_info->ahgidx,
+ ahg_info->ahgcount,
+ ahg_info->ahgdesc,
hdrbytes,
verbs_sdma_complete);
if (ret)
@@ -860,7 +868,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct ahg_ib_header *ahdr = priv->s_hdr;
+ struct hfi1_ahg_info *ahg_info = priv->s_ahg;
u32 hdrwords = qp->s_hdrwords;
struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
@@ -888,7 +896,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
plen);
}
tx->wqe = qp->s_wqe;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
+ ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
if (unlikely(ret))
goto bail_build;
}
@@ -1291,19 +1299,24 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
{
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ u16 ver = dd->dc8051_ver;
memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
+ rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) |
+ (u64)dc8051_ver_min(ver);
rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
rdi->dparms.props.hw_ver = dd->minrev;
rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
- rdi->dparms.props.max_mr_size = ~0ULL;
+ rdi->dparms.props.max_mr_size = U64_MAX;
+ rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
rdi->dparms.props.max_qp = hfi1_max_qps;
rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
rdi->dparms.props.max_sge = hfi1_max_sges;
@@ -1567,6 +1580,17 @@ static void init_ibport(struct hfi1_pportdata *ppd)
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
+static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct hfi1_ibdev *dev = dev_from_rdi(rdi);
+ u16 ver = dd_from_dev(dev)->dc8051_ver;
+
+ snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver),
+ dc8051_ver_min(ver));
+}
+
/**
* hfi1_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
@@ -1613,6 +1637,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
+ ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
strncpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
@@ -1680,6 +1705,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ /* post send table */
+ dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi,
@@ -1730,8 +1758,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
struct rvt_qp *qp = packet->qp;
u32 lqpn, rqpn = 0;
u16 rlid = 0;
- u8 sl, sc5, sc4_bit, svc_type;
- bool sc4_set = has_sc4_bit(packet);
+ u8 sl, sc5, svc_type;
switch (packet->qp->ibqp.qp_type) {
case IB_QPT_UC:
@@ -1754,9 +1781,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
return;
}
- sc4_bit = sc4_set << 4;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
+ sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = qp->ibqp.qp_num;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 488356775627..d1b101c54828 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -178,16 +178,14 @@ struct hfi1_ib_header {
} u;
} __packed;
-struct ahg_ib_header {
- struct sdma_engine *sde;
+struct hfi1_ahg_info {
u32 ahgdesc[2];
u16 tx_flags;
u8 ahgcount;
u8 ahgidx;
- struct hfi1_ib_header ibh;
};
-struct hfi1_pio_header {
+struct hfi1_sdma_header {
__le64 pbc;
struct hfi1_ib_header hdr;
} __packed;
@@ -197,7 +195,7 @@ struct hfi1_pio_header {
* pair is made common
*/
struct hfi1_qp_priv {
- struct ahg_ib_header *s_hdr; /* next header to send */
+ struct hfi1_ahg_info *s_ahg; /* ahg info for next header */
struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */
u8 s_sc; /* SC[0..4] for next packet */
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index a1d6e0807f97..5660897593ba 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -56,7 +56,7 @@
#include "iowait.h"
struct verbs_txreq {
- struct hfi1_pio_header phdr;
+ struct hfi1_sdma_header phdr;
struct sdma_txreq txreq;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index d2fa72516960..5026dc79978a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1567,12 +1567,12 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
ret = i40iw_manage_qhash(iwdev, cm_info,
I40IW_QHASH_TYPE_TCP_SYN,
I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
- kfree(child_listen_node);
- cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"freed pointer = %p\n",
child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
}
spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index bd942da91a27..2fac1db0e0a0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1557,6 +1557,9 @@ enum i40iw_alignment {
#define I40IW_RING_MOVE_TAIL(_ring) \
(_ring).tail = ((_ring).tail + 1) % (_ring).size
+#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
(_ring).tail = ((_ring).tail + (_count)) % (_ring).size
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index e9c6e82af9c7..c62d354f7810 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1025,6 +1025,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
u16 txoffset, bufoffset;
buf = i40iw_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
nextseqnum = buf->seqnum + fpdu_len;
txbuf->totallen = buf->hdrlen + fpdu_len;
txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
@@ -1048,6 +1050,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
fpdu_len -= buf->datalen;
i40iw_puda_ret_bufpool(ieq, buf);
buf = i40iw_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
} while (1);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 16cc61720b53..2b1a04e9ca3c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -667,7 +667,7 @@ struct i40iw_tcp_offload_info {
bool time_stamp;
u8 cwnd_inc_limit;
bool drop_ooo_seg;
- bool dup_ack_thresh;
+ u8 dup_ack_thresh;
u8 ttl;
u8 src_mac_addr_idx;
bool avoid_stretch_ack;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index e35faea88c13..4d28c3cb03cc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -291,9 +291,9 @@ static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
- for (i = 1; i < op_info->num_lo_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -401,9 +401,9 @@ static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, op_info->sg_list);
- for (i = 1; i < op_info->num_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -685,9 +685,9 @@ static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, info->sg_list);
- for (i = 1; i < info->num_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -753,8 +753,7 @@ static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
* @post_cq: update cq tail
*/
static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
- struct i40iw_cq_poll_info *info,
- bool post_cq)
+ struct i40iw_cq_poll_info *info)
{
u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
u64 *cqe, *sw_wqe;
@@ -762,7 +761,6 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
struct i40iw_ring *pring = NULL;
u32 wqe_idx, q_type, array_idx = 0;
enum i40iw_status_code ret_code = 0;
- enum i40iw_status_code ret_code2 = 0;
bool move_cq_head = true;
u8 polarity;
u8 addl_wqes = 0;
@@ -870,19 +868,14 @@ exit:
move_cq_head = false;
if (move_cq_head) {
- I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
-
- if (ret_code2 && !ret_code)
- ret_code = ret_code2;
+ I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
cq->polarity ^= 1;
- if (post_cq) {
- I40IW_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
- }
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
} else {
if (info->is_srq)
return ret_code;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 4627646fe8cd..276bcefffd7e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -327,7 +327,7 @@ struct i40iw_cq_ops {
void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
enum i40iw_completion_notify);
enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
- struct i40iw_cq_poll_info *, bool);
+ struct i40iw_cq_poll_info *);
enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 283b64c942ee..2360338877bf 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -529,7 +529,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
if (status)
- return -ENOSYS;
+ return -ENOMEM;
sqdepth = sq_size << sqshift;
rqdepth = rq_size << rqshift;
@@ -671,7 +671,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
if (init_attr->qp_type != IB_QPT_RC) {
- err_code = -ENOSYS;
+ err_code = -EINVAL;
goto error;
}
if (iwdev->push_mode)
@@ -1840,6 +1840,7 @@ struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
iwmr->ibmr.lkey = stag;
iwmr->page_cnt = 1;
iwmr->pgaddrmem[0] = addr;
+ iwmr->length = size;
status = i40iw_hwreg_mr(iwdev, iwmr, access);
if (status) {
i40iw_free_stag(iwdev, stag);
@@ -1863,7 +1864,7 @@ static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
{
u64 kva = 0;
- return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+ return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
}
/**
@@ -1975,18 +1976,6 @@ static ssize_t i40iw_show_rev(struct device *dev,
}
/**
- * i40iw_show_fw_ver
- */
-static ssize_t i40iw_show_fw_ver(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u32 firmware_version = I40IW_FW_VERSION;
-
- return sprintf(buf, "%u.%u\n", firmware_version,
- (firmware_version & 0x000000ff));
-}
-
-/**
* i40iw_show_hca
*/
static ssize_t i40iw_show_hca(struct device *dev,
@@ -2006,13 +1995,11 @@ static ssize_t i40iw_show_board(struct device *dev,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
static struct device_attribute *i40iw_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -2091,8 +2078,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
}
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_RDMA_WRITE:
info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
@@ -2113,8 +2104,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
@@ -2132,15 +2127,19 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_LOCAL_INV:
info.op_type = I40IW_OP_TYPE_INV_STAG;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
if (ret)
- err = -EIO;
+ err = -ENOMEM;
break;
case IB_WR_REG_MR:
{
@@ -2174,7 +2173,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
if (ret)
- err = -EIO;
+ err = -ENOMEM;
break;
}
default:
@@ -2214,6 +2213,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
enum i40iw_status_code ret = 0;
unsigned long flags;
+ int err = 0;
iwqp = (struct i40iw_qp *)ibqp;
ukqp = &iwqp->sc_qp.qp_uk;
@@ -2228,6 +2228,10 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
if (ret) {
i40iw_pr_err(" post_recv err %d\n", ret);
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
*bad_wr = ib_wr;
goto out;
}
@@ -2235,9 +2239,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
}
out:
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (ret)
- return -ENOSYS;
- return 0;
+ return err;
}
/**
@@ -2264,7 +2266,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
spin_lock_irqsave(&iwcq->lock, flags);
while (cqe_count < num_entries) {
- ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+ ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
if (ret == I40IW_ERR_QUEUE_EMPTY) {
break;
} else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
@@ -2437,6 +2439,15 @@ static const char * const i40iw_hw_stat_names[] = {
"iwRdmaInv"
};
+static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ u32 firmware_version = I40IW_FW_VERSION;
+
+ snprintf(str, str_len, "%u.%u", firmware_version,
+ (firmware_version & 0x000000ff));
+}
+
/**
* i40iw_alloc_hw_stats - Allocate a hw stats structure
* @ibdev: device pointer from stack
@@ -2528,7 +2539,7 @@ static int i40iw_modify_port(struct ib_device *ibdev,
int port_modify_mask,
struct ib_port_modify *props)
{
- return 0;
+ return -ENOSYS;
}
/**
@@ -2660,6 +2671,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
sizeof(iwibdev->ibdev.iwcm->ifname));
iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
+ iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
iwibdev->ibdev.poll_cq = i40iw_poll_cq;
iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
iwibdev->ibdev.post_send = i40iw_post_send;
@@ -2723,7 +2735,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
if (!iwdev->iwibdev)
- return -ENOSYS;
+ return -ENOMEM;
iwibdev = iwdev->iwibdev;
ret = ib_register_device(&iwibdev->ibdev, NULL);
@@ -2748,5 +2760,5 @@ error:
kfree(iwdev->iwibdev->ibdev.iwcm);
iwdev->iwibdev->ibdev.iwcm = NULL;
ib_dealloc_device(&iwdev->iwibdev->ibdev);
- return -ENOSYS;
+ return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9f8b516eb2b0..d6fc8a6e8c33 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -288,7 +288,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
if (cq->resize_buf)
return -EBUSY;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
@@ -316,7 +316,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return -EFAULT;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 42a46078d7d5..2af44c2de262 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2025,16 +2025,6 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
- (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
- (int) dev->dev->caps.fw_ver & 0xffff);
-}
-
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -2053,17 +2043,204 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
+struct diag_counter {
+ const char *name;
+ u32 offset;
+};
+
+#define DIAG_COUNTER(_name, _offset) \
+ { .name = #_name, .offset = _offset }
+
+static const struct diag_counter diag_basic[] = {
+ DIAG_COUNTER(rq_num_lle, 0x00),
+ DIAG_COUNTER(sq_num_lle, 0x04),
+ DIAG_COUNTER(rq_num_lqpoe, 0x08),
+ DIAG_COUNTER(sq_num_lqpoe, 0x0C),
+ DIAG_COUNTER(rq_num_lpe, 0x18),
+ DIAG_COUNTER(sq_num_lpe, 0x1C),
+ DIAG_COUNTER(rq_num_wrfe, 0x20),
+ DIAG_COUNTER(sq_num_wrfe, 0x24),
+ DIAG_COUNTER(sq_num_mwbe, 0x2C),
+ DIAG_COUNTER(sq_num_bre, 0x34),
+ DIAG_COUNTER(sq_num_rire, 0x44),
+ DIAG_COUNTER(rq_num_rire, 0x48),
+ DIAG_COUNTER(sq_num_rae, 0x4C),
+ DIAG_COUNTER(rq_num_rae, 0x50),
+ DIAG_COUNTER(sq_num_roe, 0x54),
+ DIAG_COUNTER(sq_num_tree, 0x5C),
+ DIAG_COUNTER(sq_num_rree, 0x64),
+ DIAG_COUNTER(rq_num_rnr, 0x68),
+ DIAG_COUNTER(sq_num_rnr, 0x6C),
+ DIAG_COUNTER(rq_num_oos, 0x100),
+ DIAG_COUNTER(sq_num_oos, 0x104),
+};
+
+static const struct diag_counter diag_ext[] = {
+ DIAG_COUNTER(rq_num_dup, 0x130),
+ DIAG_COUNTER(sq_num_to, 0x134),
+};
+
+static const struct diag_counter diag_device_only[] = {
+ DIAG_COUNTER(num_cqovf, 0x1A0),
+ DIAG_COUNTER(rq_num_udsdprd, 0x118),
+};
+
+static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+ if (!diag[!!port_num].name)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
+ diag[!!port_num].num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+ u32 hw_value[ARRAY_SIZE(diag_device_only) +
+ ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
+ int ret;
+ int i;
+
+ ret = mlx4_query_diag_counters(dev->dev,
+ MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
+ diag[!!port].offset, hw_value,
+ diag[!!port].num_counters, port);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < diag[!!port].num_counters; i++)
+ stats->value[i] = hw_value[i];
+
+ return diag[!!port].num_counters;
+}
+
+static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
+ const char ***name,
+ u32 **offset,
+ u32 *num,
+ bool port)
+{
+ u32 num_counters;
+
+ num_counters = ARRAY_SIZE(diag_basic);
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
+ num_counters += ARRAY_SIZE(diag_ext);
+
+ if (!port)
+ num_counters += ARRAY_SIZE(diag_device_only);
+
+ *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
+ if (!*name)
+ return -ENOMEM;
+
+ *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
+ if (!*offset)
+ goto err_name;
+
+ *num = num_counters;
+
+ return 0;
+
+err_name:
+ kfree(*name);
+ return -ENOMEM;
+}
+
+static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
+ const char **name,
+ u32 *offset,
+ bool port)
+{
+ int i;
+ int j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
+ name[i] = diag_basic[i].name;
+ offset[i] = diag_basic[i].offset;
+ }
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
+ for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
+ name[j] = diag_ext[i].name;
+ offset[j] = diag_ext[i].offset;
+ }
+ }
+
+ if (!port) {
+ for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
+ name[j] = diag_device_only[i].name;
+ offset[j] = diag_device_only[i].offset;
+ }
+ }
+}
+
+static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
+{
+ struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
+ int i;
+ int ret;
+ bool per_port = !!(ibdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ /* i == 1 means we are building port counters */
+ if (i && !per_port)
+ continue;
+
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ &diag[i].offset,
+ &diag[i].num_counters, i);
+ if (ret)
+ goto err_alloc;
+
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ diag[i].offset, i);
+ }
+
+ ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
+ ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
+
+ return 0;
+
+err_alloc:
+ if (i) {
+ kfree(diag[i - 1].name);
+ kfree(diag[i - 1].offset);
+ }
+
+ return ret;
+}
+
+static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
+{
+ int i;
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ kfree(ibdev->diag_counters[i].offset);
+ kfree(ibdev->diag_counters[i].name);
+ }
+}
+
#define MLX4_IB_INVALID_MAC ((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
struct net_device *dev,
@@ -2280,6 +2457,17 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_fw_ver_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct mlx4_ib_dev *dev =
+ container_of(device, struct mlx4_ib_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d",
+ (int) (dev->dev->caps.fw_ver >> 32),
+ (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dev->caps.fw_ver & 0xffff);
+}
+
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
struct mlx4_ib_dev *ibdev;
@@ -2413,6 +2601,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
+ ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
if (!mlx4_is_slave(ibdev->dev)) {
@@ -2555,9 +2744,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ if (ib_register_device(&ibdev->ib_dev, NULL))
+ goto err_diag_counters;
+
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -2623,6 +2815,9 @@ err_mad:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_diag_counters:
+ mlx4_ib_diag_cleanup(ibdev);
+
err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
@@ -2726,6 +2921,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 29acda249612..7c5832ede4bd 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -549,6 +549,14 @@ struct mlx4_ib_counters {
u32 default_counter;
};
+#define MLX4_DIAG_COUNTERS_TYPES 2
+
+struct mlx4_ib_diag_counters {
+ const char **name;
+ u32 *offset;
+ u32 num_counters;
+};
+
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@@ -585,6 +593,7 @@ struct mlx4_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9c0e67bd2ba7..308a358e5b46 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -424,6 +424,83 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
item->key = be32_to_cpu(cqe->mkey);
}
+static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ unsigned int idx;
+ int np;
+ int i;
+
+ wq = &qp->sq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ idx = wq->last_poll & (wq->wqe_cnt - 1);
+ wc->wr_id = wq->wrid[idx];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ wq->last_poll = wq->w_list[idx].next;
+ }
+ *npolled = np;
+}
+
+static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ int np;
+ int i;
+
+ wq = &qp->rq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+ *npolled = np;
+}
+
+static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ sw_send_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -594,12 +671,18 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int soft_polled = 0;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ goto out;
+ }
if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc);
@@ -612,7 +695,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
if (npolled)
mlx5_cq_set_ci(&cq->mcq);
-
+out:
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
@@ -843,6 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->resize_buf = NULL;
cq->resize_umem = NULL;
cq->create_flags = attr->flags;
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
if (context) {
err = create_cq_user(dev, udata, context, cq, entries,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 53e03c8ede79..79e6309460dc 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -69,15 +69,6 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
}
-static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
-{
- return ++index % gsi->cap.max_send_wr;
-}
-
-#define for_each_outstanding_wr(gsi, index) \
- for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
- index = next_outstanding(gsi, index))
-
/* Call with gsi->lock locked */
static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
{
@@ -85,8 +76,9 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
struct mlx5_ib_gsi_wr *wr;
u32 index;
- for_each_outstanding_wr(gsi, index) {
- wr = &gsi->outstanding_wrs[index];
+ for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
+ index++) {
+ wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
if (!wr->completed)
break;
@@ -430,8 +422,9 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
return -ENOMEM;
}
- gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
- gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
+ gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
+ gsi->cap.max_send_wr];
+ gsi->outstanding_pi++;
if (!wc) {
memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index dad63f038bb8..a84bb766fc62 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -42,11 +42,13 @@
#include <asm/pat.h>
#endif
#include <linux/sched.h>
+#include <linux/delay.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
@@ -457,8 +459,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ struct mlx5_ib_query_device_resp resp = {};
+ size_t resp_len;
+ u64 max_tso;
- if (uhw->inlen || uhw->outlen)
+ resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+ if (uhw->outlen && uhw->outlen < resp_len)
+ return -EINVAL;
+ else
+ resp.response_length = resp_len;
+
+ if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -511,10 +522,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- (MLX5_CAP_ETH(dev->mdev, csum_cap)))
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_ETH(mdev, csum_cap))
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+ if (max_tso) {
+ resp.tso_caps.max_tso = 1 << max_tso;
+ resp.tso_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ resp.response_length += sizeof(resp.tso_caps);
+ }
+ }
+ }
+
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
props->device_cap_flags |= IB_DEVICE_UD_TSO;
@@ -576,6 +598,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (!mlx5_core_is_pf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+ if (uhw->outlen) {
+ err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -983,6 +1012,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_uars;
}
+ INIT_LIST_HEAD(&context->vma_private_list);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -992,6 +1022,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
+ if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+ resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+ resp.response_length += sizeof(resp.cmds_supp_uhw);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1006,8 +1041,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
offsetof(struct mlx5_init_seg, internal_timer_h) %
PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2) +
- sizeof(resp.reserved3);
+ sizeof(resp.reserved2);
}
err = ib_copy_to_udata(udata, &resp, resp.response_length);
@@ -1086,6 +1120,125 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+static void mlx5_ib_vma_open(struct vm_area_struct *area)
+{
+ /* vma_open is called when a new VMA is created on top of our VMA. This
+ * is done through either mremap flow or split_vma (usually due to
+ * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
+ * as this VMA is strongly hardware related. Therefore we set the
+ * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
+ * calling us again and trying to do incorrect actions. We assume that
+ * the original VMA size is exactly a single page, and therefore all
+ * "splitting" operation will not happen to it.
+ */
+ area->vm_ops = NULL;
+}
+
+static void mlx5_ib_vma_close(struct vm_area_struct *area)
+{
+ struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
+
+ /* It's guaranteed that all VMAs opened on a FD are closed before the
+ * file itself is closed, therefore no sync is needed with the regular
+ * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
+ * However need a sync with accessing the vma as part of
+ * mlx5_ib_disassociate_ucontext.
+ * The close operation is usually called under mm->mmap_sem except when
+ * process is exiting.
+ * The exiting case is handled explicitly as part of
+ * mlx5_ib_disassociate_ucontext.
+ */
+ mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
+
+ /* setting the vma context pointer to null in the mlx5_ib driver's
+ * private data, to protect a race condition in
+ * mlx5_ib_disassociate_ucontext().
+ */
+ mlx5_ib_vma_priv_data->vma = NULL;
+ list_del(&mlx5_ib_vma_priv_data->list);
+ kfree(mlx5_ib_vma_priv_data);
+}
+
+static const struct vm_operations_struct mlx5_ib_vm_ops = {
+ .open = mlx5_ib_vma_open,
+ .close = mlx5_ib_vma_close
+};
+
+static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *ctx)
+{
+ struct mlx5_ib_vma_private_data *vma_prv;
+ struct list_head *vma_head = &ctx->vma_private_list;
+
+ vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
+ if (!vma_prv)
+ return -ENOMEM;
+
+ vma_prv->vma = vma;
+ vma->vm_private_data = vma_prv;
+ vma->vm_ops = &mlx5_ib_vm_ops;
+
+ list_add(&vma_prv->list, vma_head);
+
+ return 0;
+}
+
+static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ int ret;
+ struct vm_area_struct *vma;
+ struct mlx5_ib_vma_private_data *vma_private, *n;
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
+
+ /* need to protect from a race on closing the vma as part of
+ * mlx5_ib_vma_close.
+ */
+ down_read(&owning_mm->mmap_sem);
+ list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
+ list) {
+ vma = vma_private->vma;
+ ret = zap_vma_ptes(vma, vma->vm_start,
+ PAGE_SIZE);
+ WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
+ /* context going to be destroyed, should
+ * not access ops any more.
+ */
+ vma->vm_ops = NULL;
+ list_del(&vma_private->list);
+ kfree(vma_private);
+ }
+ up_read(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+}
+
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
{
switch (cmd) {
@@ -1101,8 +1254,10 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
- struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
{
+ struct mlx5_uuar_info *uuari = &context->uuari;
int err;
unsigned long idx;
phys_addr_t pfn, pa;
@@ -1152,14 +1307,13 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return 0;
+ return mlx5_ib_set_vma_data(vma, context);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
- struct mlx5_uuar_info *uuari = &context->uuari;
unsigned long command;
phys_addr_t pfn;
@@ -1168,7 +1322,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- return uar_mmap(dev, command, vma, uuari);
+ return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
return -ENOSYS;
@@ -1331,6 +1485,32 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
break;
+ case IB_FLOW_SPEC_IPV6:
+ if (ib_spec->size != sizeof(ib_spec->ipv6))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IPV6);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.src_ip,
+ sizeof(ib_spec->ipv6.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.src_ip,
+ sizeof(ib_spec->ipv6.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.dst_ip,
+ sizeof(ib_spec->ipv6.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.dst_ip,
+ sizeof(ib_spec->ipv6.val.dst_ip));
+ break;
case IB_FLOW_SPEC_TCP:
if (ib_spec->size != sizeof(ib_spec->tcp_udp))
return -EINVAL;
@@ -1801,15 +1981,6 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
- fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
-}
-
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1828,7 +1999,6 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
@@ -1836,7 +2006,6 @@ static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
static struct device_attribute *mlx5_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
&dev_attr_fw_pages,
@@ -1854,6 +2023,65 @@ static void pkey_change_handler(struct work_struct *work)
mutex_unlock(&ports->devr->mutex);
}
+static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_qp *mqp;
+ struct mlx5_ib_cq *send_mcq, *recv_mcq;
+ struct mlx5_core_cq *mcq;
+ struct list_head cq_armed_list;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_armed_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+ /*At that point all inflight post send were put to be executed as of we
+ * lock/unlock above locks Now need to arm all involved CQs.
+ */
+ list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+ mcq->comp(mcq);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+}
+
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1866,6 +2094,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_SYS_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -2272,6 +2501,15 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
+ fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+}
+
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{
int err;
@@ -2298,6 +2536,113 @@ static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
unregister_netdevice_notifier(&dev->roce.nb);
}
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_ports; i++)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+}
+
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ ret = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].q_cnt_id);
+ if (ret) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d, err %d\n",
+ i + 1, ret);
+ goto dealloc_counters;
+ }
+ }
+
+ return 0;
+
+dealloc_counters:
+ while (--i >= 0)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+
+ return ret;
+}
+
+static const char * const names[] = {
+ "rx_write_requests",
+ "rx_read_requests",
+ "rx_atomic_requests",
+ "out_of_buffer",
+ "out_of_sequence",
+ "duplicate_request",
+ "rnr_nak_retry_err",
+ "packet_seq_err",
+ "implied_nak_seq_err",
+ "local_ack_timeout_err",
+};
+
+static const size_t stats_offsets[] = {
+ MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
+ MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
+ MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
+ MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
+ MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
+ MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
+};
+
+static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
+
+ /* We support only per port stats */
+ if (port_num == 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ void *out;
+ __be32 val;
+ int ret;
+ int i;
+
+ if (!port || !stats)
+ return -ENOSYS;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_core_query_q_counter(dev->mdev,
+ dev->port[port - 1].q_cnt_id, 0,
+ out, outlen);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ val = *(__be32 *)(out + stats_offsets[i]);
+ stats->value[i] = (u64)be32_to_cpu(val);
+ }
+free:
+ kvfree(out);
+ return ARRAY_SIZE(names);
+}
+
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
@@ -2320,10 +2665,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev = mdev;
+ dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port)
+ goto err_dealloc;
+
rwlock_init(&dev->roce.netdev_lock);
err = get_port_caps(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -2418,6 +2768,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
@@ -2425,6 +2776,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
}
+ dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
+
mlx5_ib_internal_fill_odp_caps(dev);
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -2435,6 +2788,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
+ MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+ }
+
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -2447,9 +2806,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
IB_LINK_LAYER_ETHERNET) {
dev->ib_dev.create_flow = mlx5_ib_create_flow;
dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
+ dev->ib_dev.create_wq = mlx5_ib_create_wq;
+ dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
+ dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
+ dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+ dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
}
err = init_node_data(dev);
if (err)
@@ -2457,6 +2826,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
@@ -2472,10 +2843,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_rsrc;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_alloc_q_counters(dev);
if (err)
goto err_odp;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_q_cnt;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
@@ -2497,6 +2872,9 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_q_cnt:
+ mlx5_ib_dealloc_q_counters(dev);
+
err_odp:
mlx5_ib_odp_remove_one(dev);
@@ -2507,6 +2885,9 @@ err_disable_roce:
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+err_free_port:
+ kfree(dev->port);
+
err_dealloc:
ib_dealloc_device((struct ib_device *)dev);
@@ -2519,11 +2900,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
ib_unregister_device(&dev->ib_dev);
+ mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+ kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c4a9825828bc..372385d0f993 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -105,6 +105,11 @@ enum {
MLX5_CQE_VERSION_V1,
};
+struct mlx5_ib_vma_private_data {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -116,6 +121,7 @@ struct mlx5_ib_ucontext {
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
+ struct list_head vma_private_list;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -217,12 +223,41 @@ struct mlx5_ib_wq {
void *qend;
};
+struct mlx5_ib_rwq {
+ struct ib_wq ibwq;
+ u32 rqn;
+ u32 rq_num_pas;
+ u32 log_rq_stride;
+ u32 log_rq_size;
+ u32 rq_page_offset;
+ u32 log_page_size;
+ struct ib_umem *umem;
+ size_t buf_size;
+ unsigned int page_shift;
+ int create_type;
+ struct mlx5_db db;
+ u32 user_index;
+ u32 wqe_count;
+ u32 wqe_shift;
+ int wq_sig;
+};
+
enum {
MLX5_QP_USER,
MLX5_QP_KERNEL,
MLX5_QP_EMPTY
};
+enum {
+ MLX5_WQ_USER,
+ MLX5_WQ_KERNEL
+};
+
+struct mlx5_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+ u32 rqtn;
+};
+
/*
* Connect-IB can trigger up to four concurrent pagefaults
* per-QP.
@@ -266,6 +301,10 @@ struct mlx5_ib_qp_trans {
u8 resp_depth;
};
+struct mlx5_ib_rss_qp {
+ u32 tirn;
+};
+
struct mlx5_ib_rq {
struct mlx5_ib_qp_base base;
struct mlx5_ib_wq *rq;
@@ -294,6 +333,7 @@ struct mlx5_ib_qp {
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
+ struct mlx5_ib_rss_qp rss_qp;
};
struct mlx5_buf buf;
@@ -340,6 +380,9 @@ struct mlx5_ib_qp {
spinlock_t disable_page_faults_lock;
struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
#endif
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
};
struct mlx5_ib_cq_buf {
@@ -401,6 +444,8 @@ struct mlx5_ib_cq {
struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
u32 create_flags;
struct list_head wc_list;
enum ib_cq_notify_flags notify_flags;
@@ -546,6 +591,10 @@ struct mlx5_ib_resources {
struct mutex mutex;
};
+struct mlx5_ib_port {
+ u16 q_cnt_id;
+};
+
struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
@@ -581,6 +630,11 @@ struct mlx5_ib_dev {
struct srcu_struct mr_srcu;
#endif
struct mlx5_ib_flow_db flow_db;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -628,6 +682,16 @@ static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mlx5_ib_qp, ibqp);
}
+static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
+{
+ return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
+}
+
+static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
+}
+
static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
{
return container_of(msrq, struct mlx5_ib_srq, msrq);
@@ -762,6 +826,16 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_wq(struct ib_wq *wq);
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
extern struct workqueue_struct *mlx5_ib_page_fault_wq;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8cf2ce50511f..4b021305c321 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1193,12 +1193,16 @@ error:
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
+ struct mlx5_core_dev *mdev = dev->mdev;
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
int err;
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
mlx5_ib_init_umr_context(&umr_context);
umrwr.wr.wr_cqe = &umr_context.cqe;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ce0a7ab35a22..0dd7d93cac95 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -77,6 +77,10 @@ struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
+
static int is_qp0(enum ib_qp_type qp_type)
{
return qp_type == IB_QPT_SMI;
@@ -609,6 +613,11 @@ static int to_mlx5_st(enum ib_qp_type type)
}
}
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+
static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
{
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
@@ -649,6 +658,71 @@ err_umem:
return err;
}
+static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq)
+{
+ struct mlx5_ib_ucontext *context;
+
+ context = to_mucontext(pd->uobject->context);
+ mlx5_ib_db_unmap_user(context, &rwq->db);
+ if (rwq->umem)
+ ib_umem_release(rwq->umem);
+}
+
+static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_rwq *rwq,
+ struct mlx5_ib_create_wq *ucmd)
+{
+ struct mlx5_ib_ucontext *context;
+ int page_shift = 0;
+ int npages;
+ u32 offset = 0;
+ int ncont = 0;
+ int err;
+
+ if (!ucmd->buf_addr)
+ return -EINVAL;
+
+ context = to_mucontext(pd->uobject->context);
+ rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
+ rwq->buf_size, 0, 0);
+ if (IS_ERR(rwq->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(rwq->umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
+ &rwq->rq_page_offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+
+ rwq->rq_num_pas = ncont;
+ rwq->page_shift = page_shift;
+ rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
+
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+ npages, page_shift, ncont, offset);
+
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map failed\n");
+ goto err_umem;
+ }
+
+ rwq->create_type = MLX5_WQ_USER;
+ return 0;
+
+err_umem:
+ ib_umem_release(rwq->umem);
+ return err;
+}
+
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
@@ -1201,6 +1275,187 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
rq->doorbell = &qp->db;
}
+static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+}
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_uobject *uobj = pd->uobject;
+ struct ib_ucontext *ucontext = uobj->context;
+ struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
+ struct mlx5_ib_create_qp_resp resp = {};
+ int inlen;
+ int err;
+ u32 *in;
+ void *tirc;
+ void *hfso;
+ u32 selected_fields = 0;
+ size_t min_resp_len;
+ u32 tdn = mucontext->tdn;
+ struct mlx5_ib_create_qp_rss ucmd = {};
+ size_t required_cmd_sz;
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ return -EOPNOTSUPP;
+
+ if (init_attr->create_flags || init_attr->send_cq)
+ return -EINVAL;
+
+ min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
+ mlx5_ib_dbg(dev, "invalid reserved\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, min_resp_len);
+ if (err) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EINVAL;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ init_attr->rwq_ind_tbl->ind_tbl_num);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ switch (ucmd.rx_hash_function) {
+ case MLX5_RX_HASH_FUNC_TOEPLITZ:
+ {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+
+ if (len != ucmd.rx_key_len) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, ucmd.rx_hash_key, len);
+ break;
+ }
+ default:
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!ucmd.rx_hash_fields_mask) {
+ /* special case when this TIR serves as steering entry without hashing */
+ if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
+ goto create_tir;
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+
+ if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) &&
+ ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
+
+create_tir:
+ err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+
+ if (err)
+ goto err;
+
+ kvfree(in);
+ /* qpn is reserved for that QP */
+ qp->trans_qp.base.mqp.qpn = 0;
+ return 0;
+
+err:
+ kvfree(in);
+ return err;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1211,6 +1466,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in;
struct mlx5_ib_create_qp ucmd;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
int inlen = sizeof(*in);
int err;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
@@ -1227,6 +1485,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ if (init_attr->rwq_ind_tbl) {
+ if (!udata)
+ return -ENOSYS;
+
+ err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
+ return err;
+ }
+
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
@@ -1460,6 +1726,23 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
+ get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
return 0;
err_create:
@@ -1478,23 +1761,23 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
if (send_cq) {
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock,
SINGLE_DEPTH_NESTING);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock,
SINGLE_DEPTH_NESTING);
}
} else {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
}
} else if (recv_cq) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else {
__acquire(&send_cq->lock);
@@ -1509,21 +1792,21 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
} else {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
}
} else if (recv_cq) {
__release(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
} else {
__release(&recv_cq->lock);
__release(&send_cq->lock);
@@ -1535,17 +1818,18 @@ static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
return to_mpd(qp->ibqp.pd);
}
-static void get_cqs(struct mlx5_ib_qp *qp,
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{
- switch (qp->ibqp.qp_type) {
+ switch (qp_type) {
case IB_QPT_XRC_TGT:
*send_cq = NULL;
*recv_cq = NULL;
break;
case MLX5_IB_QPT_REG_UMR:
case IB_QPT_XRC_INI:
- *send_cq = to_mcq(qp->ibqp.send_cq);
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = NULL;
break;
@@ -1557,8 +1841,8 @@ static void get_cqs(struct mlx5_ib_qp *qp,
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
case IB_QPT_MAX:
@@ -1577,8 +1861,14 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_modify_qp_mbox_in *in;
+ unsigned long flags;
int err;
+ if (qp->ibqp.rwq_ind_tbl) {
+ destroy_rss_raw_qp_tir(dev, qp);
+ return;
+ }
+
base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
@@ -1602,17 +1892,28 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base->mqp.qpn);
}
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ if (send_cq)
+ list_del(&qp->cq_send_list);
+
+ if (recv_cq)
+ list_del(&qp->cq_recv_list);
if (qp->create_type == MLX5_QP_KERNEL) {
- mlx5_ib_lock_cqs(send_cq, recv_cq);
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
NULL);
- mlx5_ib_unlock_cqs(send_cq, recv_cq);
}
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
destroy_raw_packet_qp(dev, qp);
@@ -2300,7 +2601,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
@@ -2349,6 +2651,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
else
sqd_event = 0;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+ struct mlx5_ib_port *mibport = &dev->port[port_num];
+
+ context->qp_counter_set_usr_page |=
+ cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
+ }
+
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->sq_crq_size |= cpu_to_be16(1 << 4);
@@ -2439,6 +2750,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int port;
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
@@ -3397,6 +3711,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
@@ -3424,6 +3739,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
@@ -3725,6 +4047,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int err = 0;
int nreq;
@@ -3736,6 +4060,13 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -4055,6 +4386,9 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int err = 0;
u8 raw_packet_qp_state;
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
@@ -4164,3 +4498,322 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return 0;
}
+
+static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr)
+{
+ struct mlx5_ib_dev *dev;
+ __be64 *rq_pas0;
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ dev = to_mdev(pd->device);
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, mem_rq_type,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, user_index, rwq->user_index);
+ MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
+ MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+ MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
+ MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
+ MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
+ rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
+ kvfree(in);
+ return err;
+}
+
+static int set_user_rq_size(struct mlx5_ib_dev *dev,
+ struct ib_wq_init_attr *wq_init_attr,
+ struct mlx5_ib_create_wq *ucmd,
+ struct mlx5_ib_rwq *rwq)
+{
+ /* Sanity check RQ size before proceeding */
+ if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
+ return -EINVAL;
+
+ if (!ucmd->rq_wqe_count)
+ return -EINVAL;
+
+ rwq->wqe_count = ucmd->rq_wqe_count;
+ rwq->wqe_shift = ucmd->rq_wqe_shift;
+ rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+ rwq->log_rq_stride = rwq->wqe_shift;
+ rwq->log_rq_size = ilog2(rwq->wqe_count);
+ return 0;
+}
+
+static int prepare_user_rq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct mlx5_ib_rwq *rwq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_wq ucmd = {};
+ int err;
+ size_t required_cmd_sz;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.reserved) {
+ mlx5_ib_dbg(dev, "invalid reserved\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ err = create_user_rq(dev, pd, rwq, &ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ if (err)
+ return err;
+ }
+
+ rwq->user_index = ucmd.user_index;
+ return 0;
+}
+
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_rwq *rwq;
+ struct mlx5_ib_create_wq_resp resp = {};
+ size_t min_resp_len;
+ int err;
+
+ if (!udata)
+ return ERR_PTR(-ENOSYS);
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+ switch (init_attr->wq_type) {
+ case IB_WQT_RQ:
+ rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
+ if (!rwq)
+ return ERR_PTR(-ENOMEM);
+ err = prepare_user_rq(pd, init_attr, udata, rwq);
+ if (err)
+ goto err;
+ err = create_rq(rwq, pd, init_attr);
+ if (err)
+ goto err_user_rq;
+ break;
+ default:
+ mlx5_ib_dbg(dev, "unsupported wq type %d\n",
+ init_attr->wq_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rwq->ibwq.wq_num = rwq->rqn;
+ rwq->ibwq.state = IB_WQS_RESET;
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ return &rwq->ibwq;
+
+err_copy:
+ mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+err_user_rq:
+ destroy_user_rq(pd, rwq);
+err:
+ kfree(rwq);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_wq(struct ib_wq *wq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+
+ mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ destroy_user_rq(wq->pd, rwq);
+ kfree(rwq);
+
+ return 0;
+}
+
+struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
+ int sz = 1 << init_attr->log_ind_tbl_size;
+ struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
+ size_t min_resp_len;
+ int inlen;
+ int err;
+ int i;
+ u32 *in;
+ void *rqtc;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
+ if (!rwq_ind_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+
+ err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
+ kvfree(in);
+
+ if (err)
+ goto err;
+
+ rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ return &rwq_ind_tbl->ib_rwq_ind_tbl;
+
+err_copy:
+ mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+err:
+ kfree(rwq_ind_tbl);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
+
+ mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+
+ kfree(rwq_ind_tbl);
+ return 0;
+}
+
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ struct mlx5_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ int curr_wq_state;
+ int wq_state;
+ int inlen;
+ int err;
+ void *rqc;
+ void *in;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
+
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
+ wq_attr->curr_wq_state : wq->state;
+ wq_state = (wq_attr_mask & IB_WQ_STATE) ?
+ wq_attr->wq_state : curr_wq_state;
+ if (curr_wq_state == IB_WQS_ERR)
+ curr_wq_state = MLX5_RQC_STATE_ERR;
+ if (wq_state == IB_WQS_ERR)
+ wq_state = MLX5_RQC_STATE_ERR;
+ MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(rqc, rqc, state, wq_state);
+
+ err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
+ kvfree(in);
+ if (!err)
+ rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3b2ddd64a371..ed6ac52355f1 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -74,14 +74,12 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen,
- int is_xrc)
+ struct mlx5_srq_attr *in,
+ struct ib_udata *udata, int buf_size)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
size_t ucmdlen;
- void *xsrqc;
int err;
int npages;
int page_shift;
@@ -104,7 +102,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata->inlen - sizeof(ucmd)))
return -EINVAL;
- if (is_xrc) {
+ if (in->type == IB_SRQT_XRC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
@@ -130,14 +128,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem;
}
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
- *in = mlx5_vzalloc(*inlen);
- if (!(*in)) {
+ in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
+ if (!in->pas) {
err = -ENOMEM;
goto err_umem;
}
- mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
+ mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
@@ -146,20 +143,16 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
-
- if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
- is_xrc){
- xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
- xrc_srq_context_entry);
- MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
- }
+ in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->page_offset = offset;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type == IB_SRQT_XRC)
+ in->user_index = uidx;
return 0;
err_in:
- kvfree(*in);
+ kvfree(in->pas);
err_umem:
ib_umem_release(srq->umem);
@@ -168,15 +161,13 @@ err_umem:
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen, int is_xrc)
+ struct mlx5_srq_attr *in, int buf_size)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
int page_shift;
int npages;
- void *xsrqc;
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
@@ -204,13 +195,12 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
- *in = mlx5_vzalloc(*inlen);
- if (!*in) {
+ in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
+ if (!in->pas) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&srq->buf, (*in)->pas);
+ mlx5_fill_page_array(&srq->buf, in->pas);
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
@@ -221,20 +211,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
-
- if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
- is_xrc){
- xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
- xrc_srq_context_entry);
- /* 0xffffff means we ask to work with cqe version 0 */
- MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
- }
+ in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type == IB_SRQT_XRC)
+ in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
err_in:
- kvfree(*in);
+ kvfree(in->pas);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
@@ -267,10 +252,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int desc_size;
int buf_size;
int err;
- struct mlx5_create_srq_mbox_in *uninitialized_var(in);
- int uninitialized_var(inlen);
- int is_xrc;
- u32 flgs, xrcdn;
+ struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
/* Sanity check SRQ size before proceeding */
@@ -302,14 +284,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
-
if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
- is_xrc);
+ err = create_srq_user(pd, srq, &in, udata, buf_size);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
- is_xrc);
+ err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -317,23 +295,23 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- in->ctx.state_log_sz = ilog2(srq->msrq.max);
- flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
- xrcdn = 0;
- if (is_xrc) {
- xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
+ in.type = init_attr->srq_type;
+ in.log_size = ilog2(srq->msrq.max);
+ in.wqe_shift = srq->msrq.wqe_shift - 4;
+ if (srq->wq_sig)
+ in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
+ if (init_attr->srq_type == IB_SRQT_XRC) {
+ in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
+ in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
} else if (init_attr->srq_type == IB_SRQT_BASIC) {
- xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
+ in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
+ in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
}
- in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
-
- in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
- kvfree(in);
+ in.pd = to_mpd(pd)->pdn;
+ in.db_record = srq->db.dma;
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
+ kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
@@ -401,7 +379,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
- struct mlx5_query_srq_mbox_out *out;
+ struct mlx5_srq_attr *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
@@ -411,7 +389,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (ret)
goto out_box;
- srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
+ srq_attr->srq_limit = out->lwm;
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;
@@ -458,6 +436,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
struct mlx5_wqe_data_seg *scat;
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int err = 0;
int nreq;
@@ -465,6 +445,12 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ goto out;
+ }
+
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
err = -EINVAL;
@@ -507,7 +493,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
-
+out:
spin_unlock_irqrestore(&srq->lock, flags);
return err;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 61bc308bb802..188dac4301b5 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -46,6 +46,10 @@ enum {
MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
};
+enum {
+ MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
+};
+
/* Increment this value if any changes that break userspace ABI
* compatibility are made.
@@ -79,6 +83,10 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
};
+enum mlx5_user_cmds_supp_uhw {
+ MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -94,8 +102,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
- __u8 reserved2;
- __u16 reserved3;
+ __u8 cmds_supp_uhw;
+ __u16 reserved2;
__u64 hca_core_clock_offset;
};
@@ -103,6 +111,22 @@ struct mlx5_ib_alloc_pd_resp {
__u32 pdn;
};
+struct mlx5_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx5_ib_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ struct mlx5_ib_tso_caps tso_caps;
+};
+
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
@@ -148,6 +172,40 @@ struct mlx5_ib_create_qp {
__u64 sq_buf_addr;
};
+/* RX Hash function flags */
+enum mlx5_rx_hash_function_flags {
+ MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+};
+
+/*
+ * RX Hash flags, these flags allows to set which incoming packet's field should
+ * participates in RX Hash. Each flag represent certain packet's field,
+ * when the flag is set the field that is represented by the flag will
+ * participate in RX Hash calculation.
+ * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
+ * and *TCP and *UDP flags can't be enabled together on the same QP.
+*/
+enum mlx5_rx_hash_fields {
+ MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
+ MLX5_RX_HASH_DST_IPV4 = 1 << 1,
+ MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
+ MLX5_RX_HASH_DST_IPV6 = 1 << 3,
+ MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
+ MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
+ MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
+ MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
+};
+
+struct mlx5_ib_create_qp_rss {
+ __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
+ __u8 rx_key_len; /* valid only for Toeplitz */
+ __u8 reserved[6];
+ __u8 rx_hash_key[128]; /* valid only for Toeplitz */
+ __u32 comp_mask;
+ __u32 reserved1;
+};
+
struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
};
@@ -159,6 +217,32 @@ struct mlx5_ib_alloc_mw {
__u16 reserved2;
};
+struct mlx5_ib_create_wq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 user_index;
+ __u32 flags;
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_wq_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_rwq_ind_tbl_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_modify_wq {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
struct mlx5_ib_create_qp *ucmd,
int inlen,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9866c35cc977..da2335f7f7c3 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1081,16 +1081,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return sprintf(buf, "%x\n", dev->rev_id);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
- (int) (dev->fw_ver >> 16) & 0xffff,
- (int) dev->fw_ver & 0xffff);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1120,13 +1110,11 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *mthca_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -1187,6 +1175,17 @@ static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct mthca_dev *dev =
+ container_of(device, struct mthca_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d",
+ (int) (dev->fw_ver >> 32),
+ (int) (dev->fw_ver >> 16) & 0xffff,
+ (int) dev->fw_ver & 0xffff);
+}
+
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
@@ -1266,6 +1265,7 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
dev->ib_dev.dereg_mr = mthca_dereg_mr;
dev->ib_dev.get_port_immutable = mthca_port_immutable;
+ dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
if (dev->mthca_flags & MTHCA_FLAG_FMR) {
dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 74c6a9426047..6727af27c017 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -98,7 +98,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA "
"PCI header, aborting.\n");
- goto out;
+ goto put_dev;
}
for (i = 0; i < 64; ++i) {
@@ -108,7 +108,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA "
"PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
}
@@ -121,7 +121,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA "
"bridge PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
for (i = 0; i < 64; ++i) {
@@ -131,7 +131,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA bridge "
"PCI header, aborting.\n");
- goto out;
+ goto free_bh;
}
}
bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
@@ -139,7 +139,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't locate HCA bridge "
"PCI-X capability, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -152,7 +152,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't map HCA reset register, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
writel(MTHCA_RESET_VALUE, reset);
@@ -172,7 +172,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't access HCA after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
if (v != 0xffffffff)
@@ -184,7 +184,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "PCI device did not come back after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
good:
@@ -195,14 +195,14 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
/*
* Bridge control register is at 0x3e, so we'll
@@ -216,7 +216,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -225,7 +225,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -235,7 +235,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI-X "
"command register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -246,7 +246,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
- goto out;
+ goto free_bh;
}
linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
@@ -254,7 +254,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -266,7 +266,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -275,14 +275,12 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA COMMAND, "
"aborting.\n");
- goto out;
}
-
-out:
- if (bridge)
- pci_dev_put(bridge);
+free_bh:
kfree(bridge_header);
+free_hca:
kfree(hca_header);
-
+put_dev:
+ pci_dev_put(bridge);
return err;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 464d6da5fe91..bd69125731c1 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2606,23 +2606,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
/**
- * show_fw_ver
- */
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nes_ib_device *nesibdev =
- container_of(dev, struct nes_ib_device, ibdev.dev);
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
-
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "%u.%u\n",
- (nesvnic->nesdev->nesadapter->firmware_version >> 16),
- (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
-}
-
-
-/**
* show_hca
*/
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
@@ -2645,13 +2628,11 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *nes_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -3703,6 +3684,19 @@ static int nes_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(dev, struct nes_ib_device, ibdev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ snprintf(str, str_len, "%u.%u",
+ (nesvnic->nesdev->nesadapter->firmware_version >> 16),
+ (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
+}
+
/**
* nes_init_ofa_device
*/
@@ -3802,6 +3796,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
nesibdev->ibdev.get_port_immutable = nes_port_immutable;
+ nesibdev->ibdev.get_dev_fw_str = get_dev_fw_str;
memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
sizeof(nesibdev->ibdev.iwcm->ifname));
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 3d75f65ce87e..07d0c6c5b046 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -107,6 +107,14 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct ocrdma_dev *dev = get_ocrdma_dev(device);
+
+ snprintf(str, str_len, "%s", &dev->attr.fw_ver[0]);
+}
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
@@ -193,6 +201,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.process_mad = ocrdma_process_mad;
dev->ibdev.get_port_immutable = ocrdma_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_str;
if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
dev->ibdev.uverbs_cmd_mask |=
@@ -262,14 +271,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
-}
-
static ssize_t show_hca_type(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -279,12 +280,10 @@ static ssize_t show_hca_type(struct device *device,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type
};
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 575b737d9ef3..9cc0aae1d781 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -106,6 +106,49 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
+const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
+[IB_WR_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_RDMA_READ] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC,
+},
+
+[IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND_WITH_IMM] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+};
+
static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
gfp_t gfp)
{
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 846e6c726df7..10d062561bd9 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -169,8 +169,12 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
if (ah_attr->ah_flags & IB_AH_GRH) {
- qib_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1);
+ struct ib_grh grh;
+ struct ib_global_route grd = ah_attr->grh;
+
+ qib_make_grh(ibp, &grh, &grd, 0, 0);
+ qib_copy_sge(&qp->r_sge, &grh,
+ sizeof(grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cbf6200e6afc..fd1dfbce5539 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1582,6 +1582,8 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_total_mcast_qp_attach =
rdi->dparms.props.max_mcast_qp_attach *
rdi->dparms.props.max_mcast_grp;
+ /* post send table */
+ dd->verbs_dev.rdi.post_parms = qib_post_parms;
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 4f878151f81f..736ced684842 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -497,4 +497,6 @@ extern unsigned int ib_qib_max_srq_wrs;
extern const u32 ib_qib_rnr_table[];
+extern const struct rvt_operation_params qib_post_parms[];
+
#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 565c881a44ba..c229b9f4a52d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -331,6 +331,21 @@ static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void usnic_get_dev_fw_str(struct ib_device *device,
+ char *str,
+ size_t str_len)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev);
+ struct ethtool_drvinfo info;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ snprintf(str, str_len, "%s", info.fw_version);
+}
+
/* Start of PF discovery section */
static void *usnic_ib_device_add(struct pci_dev *dev)
{
@@ -414,6 +429,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
+ us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str;
if (ib_register_device(&us_ibdev->ib_dev, NULL))
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 3412ea06116e..80ef3f8998c8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,21 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_fw_ver(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct usnic_ib_dev *us_ibdev =
- container_of(device, struct usnic_ib_dev, ib_dev.dev);
- struct ethtool_drvinfo info;
-
- mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- mutex_unlock(&us_ibdev->usdev_lock);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
-}
-
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
char *buf)
@@ -192,7 +177,6 @@ usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
-static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
@@ -201,7 +185,6 @@ static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_fw_ver,
&dev_attr_board_id,
&dev_attr_config,
&dev_attr_iface,
diff --git a/drivers/infiniband/sw/Makefile b/drivers/infiniband/sw/Makefile
index 988b6a0101a4..8b095b27db87 100644
--- a/drivers/infiniband/sw/Makefile
+++ b/drivers/infiniband/sw/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
+obj-$(CONFIG_RDMA_RXE) += rxe/
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 11aa6a34bd71..1da8d01a6855 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,5 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
depends on 64BIT
- default m
---help---
This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6ca6fa80dd6e..f2f229efbe64 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -510,6 +510,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
if (rdi->worker)
return 0;
+ spin_lock_init(&rdi->n_cqs_lock);
rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
if (!rdi->worker)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0f4d4500f45e..80c4b6b401b8 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -140,6 +140,7 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
init_completion(&mr->comp);
/* count returning the ptr to user */
atomic_set(&mr->refcount, 1);
+ atomic_set(&mr->lkey_invalid, 0);
mr->pd = pd;
mr->max_segs = count;
return 0;
@@ -480,6 +481,123 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
}
/**
+ * rvt_set_page - page assignment function called by ib_sg_to_pages
+ * @ibmr: memory region
+ * @addr: dma address of mapped page
+ *
+ * Return: 0 on success
+ */
+static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ u32 ps = 1 << mr->mr.page_shift;
+ u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
+ int m, n;
+
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+ if (mr->mr.length == 0) {
+ mr->mr.user_base = addr;
+ mr->mr.iova = addr;
+ }
+
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+ mr->mr.map[m]->segs[n].length = ps;
+ mr->mr.length += ps;
+
+ return 0;
+}
+
+/**
+ * rvt_map_mr_sg - map sg list and set it the memory region
+ * @ibmr: memory region
+ * @sg: dma mapped scatterlist
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
+ * Return: number of sg elements mapped to the memory region
+ */
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ rvt_set_page);
+}
+
+/**
+ * rvt_fast_reg_mr - fast register physical MR
+ * @qp: the queue pair where the work request comes from
+ * @ibmr: the memory region to be registered
+ * @key: updated key for this memory region
+ * @access: access flags for this memory region
+ *
+ * Returns 0 on success.
+ */
+int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
+ int access)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+
+ if (qp->ibqp.pd != mr->mr.pd)
+ return -EACCES;
+
+ /* not applicable to dma MR or user MR */
+ if (!mr->mr.lkey || mr->umem)
+ return -EINVAL;
+
+ if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
+ return -EINVAL;
+
+ ibmr->lkey = key;
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_fast_reg_mr);
+
+/**
+ * rvt_invalidate_rkey - invalidate an MR rkey
+ * @qp: queue pair associated with the invalidate op
+ * @rkey: rkey to invalidate
+ *
+ * Returns 0 on success.
+ */
+int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+
+ if (rkey == 0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ atomic_set(&mr->lkey_invalid, 1);
+ rcu_read_unlock();
+ return 0;
+
+bail:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rvt_invalidate_rkey);
+
+/**
* rvt_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
@@ -682,7 +800,8 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
}
mr = rcu_dereference(
rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
off = sge->addr - mr->user_base;
@@ -782,7 +901,8 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
mr = rcu_dereference(
rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
off = vaddr - mr->iova;
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 69380512c6d1..132800ee0205 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -82,6 +82,8 @@ int rvt_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 41ba7e9cadaa..bdb540f25a88 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -435,8 +435,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
for (n = 0; n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -584,6 +583,7 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->r_rq.wq->tail = 0;
}
qp->r_sge.num_sge = 0;
+ atomic_set(&qp->s_reserved_used, 0);
}
/**
@@ -613,6 +613,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
gfp_t gfp;
+ size_t sqsize;
if (!rdi)
return ERR_PTR(-EINVAL);
@@ -643,7 +644,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
init_attr->cap.max_recv_wr == 0)
return ERR_PTR(-EINVAL);
}
-
+ sqsize =
+ init_attr->cap.max_send_wr + 1 +
+ rdi->dparms.reserved_operations;
switch (init_attr->qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
@@ -658,11 +661,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sizeof(struct rvt_swqe);
if (gfp == GFP_NOIO)
swq = __vmalloc(
- (init_attr->cap.max_send_wr + 1) * sz,
+ sqsize * sz,
gfp | __GFP_ZERO, PAGE_KERNEL);
else
swq = vzalloc_node(
- (init_attr->cap.max_send_wr + 1) * sz,
+ sqsize * sz,
rdi->dparms.node);
if (!swq)
return ERR_PTR(-ENOMEM);
@@ -741,13 +744,14 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->s_lock);
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
+ atomic_set(&qp->local_ops_pending, 0);
init_waitqueue_head(&qp->wait);
init_timer(&qp->s_timer);
qp->s_timer.data = (unsigned long)qp;
INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_size = sqsize;
qp->s_avail = init_attr->cap.max_send_wr;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
@@ -1332,7 +1336,8 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
attr->dest_qp_num = qp->remote_qpn;
attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_send_wr = qp->s_size - 1 -
+ rdi->dparms.reserved_operations;
attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
attr->cap.max_send_sge = qp->s_max_sge;
attr->cap.max_recv_sge = qp->r_rq.max_sge;
@@ -1440,25 +1445,116 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
/**
- * qp_get_savail - return number of avail send entries
+ * rvt_qp_valid_operation - validate post send wr request
+ * @qp - the qp
+ * @post-parms - the post send table for the driver
+ * @wr - the work request
+ *
+ * The routine validates the operation based on the
+ * validation table an returns the length of the operation
+ * which can extend beyond the ib_send_bw. Operation
+ * dependent flags key atomic operation validation.
*
+ * There is an exception for UD qps that validates the pd and
+ * overrides the length to include the additional UD specific
+ * length.
+ *
+ * Returns a negative error or the length of the work request
+ * for building the swqe.
+ */
+static inline int rvt_qp_valid_operation(
+ struct rvt_qp *qp,
+ const struct rvt_operation_params *post_parms,
+ struct ib_send_wr *wr)
+{
+ int len;
+
+ if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
+ return -EINVAL;
+ if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
+ return -EINVAL;
+ if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
+ ibpd_to_rvtpd(qp->ibqp.pd)->user)
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
+ !qp->s_max_rd_atomic)
+ return -EINVAL;
+ len = post_parms[wr->opcode].length;
+ /* UD specific */
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) {
+ if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ return -EINVAL;
+ len = sizeof(struct ib_ud_wr);
+ }
+ return len;
+}
+
+/**
+ * rvt_qp_is_avail - determine queue capacity
* @qp - the qp
+ * @rdi - the rdmavt device
+ * @reserved_op - is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
+ *
+ * For non reserved operations, the qp->s_avail
+ * may be changed.
+ *
+ * The return value is zero or a -ENOMEM.
*/
-static inline u32 qp_get_savail(struct rvt_qp *qp)
+static inline int rvt_qp_is_avail(
+ struct rvt_qp *qp,
+ struct rvt_dev_info *rdi,
+ bool reserved_op)
{
u32 slast;
- u32 ret;
-
+ u32 avail;
+ u32 reserved_used;
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ if (unlikely(reserved_op)) {
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ if (reserved_used >= rdi->dparms.reserved_operations)
+ return -ENOMEM;
+ return 0;
+ }
+ /* non-reserved operations */
+ if (likely(qp->s_avail))
+ return 0;
smp_read_barrier_depends(); /* see rc.c */
slast = ACCESS_ONCE(qp->s_last);
if (qp->s_head >= slast)
- ret = qp->s_size - (qp->s_head - slast);
+ avail = qp->s_size - (qp->s_head - slast);
else
- ret = slast - qp->s_head;
- return ret - 1;
+ avail = slast - qp->s_head;
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ avail = avail - 1 -
+ (rdi->dparms.reserved_operations - reserved_used);
+ /* insure we don't assign a negative s_avail */
+ if ((s32)avail <= 0)
+ return -ENOMEM;
+ qp->s_avail = avail;
+ if (WARN_ON(qp->s_avail >
+ (qp->s_size - 1 - rdi->dparms.reserved_operations)))
+ rvt_pr_err(rdi,
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
+ qp->ibqp.qp_num, qp->s_size, qp->s_avail,
+ qp->s_head, qp->s_tail, qp->s_cur,
+ qp->s_acked, qp->s_last);
+ return 0;
}
/**
@@ -1480,49 +1576,64 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
u8 log_pmtu;
int ret;
+ size_t cplen;
+ bool reserved_op;
+ int local_ops_delayed = 0;
+
+ BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
/* IB spec says that num_sge == 0 is OK. */
if (unlikely(wr->num_sge > qp->s_max_sge))
return -EINVAL;
+ ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
+ if (ret < 0)
+ return ret;
+ cplen = ret;
+
/*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
+ * Local operations include fast register and local invalidate.
+ * Fast register needs to be processed immediately because the
+ * registered lkey may be used by following work requests and the
+ * lkey needs to be valid at the time those requests are posted.
+ * Local invalidate can be processed immediately if fencing is
+ * not required and no previous local invalidate ops are pending.
+ * Signaled local operations that have been processed immediately
+ * need to have requests with "completion only" flags set posted
+ * to the send queue in order to generate completions.
*/
- if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
- return -EINVAL;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- return -EINVAL;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
+ switch (wr->opcode) {
+ case IB_WR_REG_MR:
+ ret = rvt_fast_reg_mr(qp,
+ reg_wr(wr)->mr,
+ reg_wr(wr)->key,
+ reg_wr(wr)->access);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ break;
+ case IB_WR_LOCAL_INV:
+ if ((wr->send_flags & IB_SEND_FENCE) ||
+ atomic_read(&qp->local_ops_pending)) {
+ local_ops_delayed = 1;
+ } else {
+ ret = rvt_invalidate_rkey(
+ qp, wr->ex.invalidate_rkey);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ }
+ break;
+ default:
return -EINVAL;
- } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
- return -EINVAL;
- } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1))) {
- return -EINVAL;
- } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
- return -EINVAL;
+ }
}
+
+ reserved_op = rdi->post_parms[wr->opcode].flags &
+ RVT_OPERATION_USE_RESERVE;
/* check for avail */
- if (unlikely(!qp->s_avail)) {
- qp->s_avail = qp_get_savail(qp);
- if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
- rvt_pr_err(rdi,
- "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
- qp->ibqp.qp_num, qp->s_size, qp->s_avail,
- qp->s_head, qp->s_tail, qp->s_cur,
- qp->s_acked, qp->s_last);
- if (!qp->s_avail)
- return -ENOMEM;
- }
+ ret = rvt_qp_is_avail(qp, rdi, reserved_op);
+ if (ret)
+ return ret;
next = qp->s_head + 1;
if (next >= qp->s_size)
next = 0;
@@ -1531,18 +1642,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
pd = ibpd_to_rvtpd(qp->ibqp.pd);
wqe = rvt_get_swqe_ptr(qp, qp->s_head);
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
+ /* cplen has length from above */
+ memcpy(&wqe->wr, wr, cplen);
wqe->length = 0;
j = 0;
@@ -1585,14 +1686,29 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
}
- wqe->ssn = qp->s_ssn++;
- wqe->psn = qp->s_next_psn;
- wqe->lpsn = wqe->psn +
- (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
- qp->s_next_psn = wqe->lpsn + 1;
+ if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
+ if (local_ops_delayed)
+ atomic_inc(&qp->local_ops_pending);
+ else
+ wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
+ wqe->ssn = 0;
+ wqe->psn = 0;
+ wqe->lpsn = 0;
+ } else {
+ wqe->ssn = qp->s_ssn++;
+ wqe->psn = qp->s_next_psn;
+ wqe->lpsn = wqe->psn +
+ (wqe->length ?
+ ((wqe->length - 1) >> log_pmtu) :
+ 0);
+ qp->s_next_psn = wqe->lpsn + 1;
+ }
trace_rvt_post_one_wr(qp, wqe);
+ if (unlikely(reserved_op))
+ rvt_qp_wqe_reserve(qp, wqe);
+ else
+ qp->s_avail--;
smp_wmb(); /* see request builders */
- qp->s_avail--;
qp->s_head = next;
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 30c4fda7a05a..d430c2f7cec4 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -370,6 +370,7 @@ enum {
REG_USER_MR,
DEREG_MR,
ALLOC_MR,
+ MAP_MR_SG,
ALLOC_FMR,
MAP_PHYS_FMR,
UNMAP_FMR,
@@ -528,7 +529,8 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
post_send),
rvt_post_send))
if (!rdi->driver_f.schedule_send ||
- !rdi->driver_f.do_send)
+ !rdi->driver_f.do_send ||
+ !rdi->post_parms)
return -EINVAL;
break;
@@ -633,6 +635,12 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
rvt_alloc_mr);
break;
+ case MAP_MR_SG:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ map_mr_sg),
+ rvt_map_mr_sg);
+ break;
+
case MAP_PHYS_FMR:
check_driver_override(rdi, offsetof(struct ib_device,
map_phys_fmr),
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
new file mode 100644
index 000000000000..1e4e628fe7b0
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -0,0 +1,24 @@
+config RDMA_RXE
+ tristate "Software RDMA over Ethernet (RoCE) driver"
+ depends on INET && PCI && INFINIBAND
+ depends on NET_UDP_TUNNEL
+ ---help---
+ This driver implements the InfiniBand RDMA transport over
+ the Linux network stack. It enables a system with a
+ standard Ethernet adapter to interoperate with a RoCE
+ adapter or with another system running the RXE driver.
+ Documentation on InfiniBand and RoCE can be downloaded at
+ www.infinibandta.org and www.openfabrics.org. (See also
+ siw which is a similar software driver for iWARP.)
+
+ The driver is split into two layers, one interfaces with the
+ Linux RDMA stack and implements a kernel or user space
+ verbs API. The user space verbs API requires a support
+ library named librxe which is loaded by the generic user
+ space verbs API, libibverbs. The other layer interfaces
+ with the Linux network stack at layer 3.
+
+ To configure and work with soft-RoCE driver please use the
+ following wiki page under "configure Soft-RoCE (RXE)" section:
+
+ https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
new file mode 100644
index 000000000000..3b3fb9d1c470
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_RDMA_RXE) += rdma_rxe.o
+
+rdma_rxe-y := \
+ rxe.o \
+ rxe_comp.o \
+ rxe_req.o \
+ rxe_resp.o \
+ rxe_recv.o \
+ rxe_pool.o \
+ rxe_queue.o \
+ rxe_verbs.o \
+ rxe_av.o \
+ rxe_srq.o \
+ rxe_qp.o \
+ rxe_cq.o \
+ rxe_mr.o \
+ rxe_dma.o \
+ rxe_opcode.o \
+ rxe_mmap.o \
+ rxe_icrc.o \
+ rxe_mcast.o \
+ rxe_task.o \
+ rxe_net.o \
+ rxe_sysfs.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
new file mode 100644
index 000000000000..55f0e8f0ca79
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
+MODULE_DESCRIPTION("Soft RDMA transport");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.2");
+
+/* free resources for all ports on a device */
+static void rxe_cleanup_ports(struct rxe_dev *rxe)
+{
+ kfree(rxe->port.pkey_tbl);
+ rxe->port.pkey_tbl = NULL;
+
+}
+
+/* free resources for a rxe device all objects created for this device must
+ * have been destroyed
+ */
+static void rxe_cleanup(struct rxe_dev *rxe)
+{
+ rxe_pool_cleanup(&rxe->uc_pool);
+ rxe_pool_cleanup(&rxe->pd_pool);
+ rxe_pool_cleanup(&rxe->ah_pool);
+ rxe_pool_cleanup(&rxe->srq_pool);
+ rxe_pool_cleanup(&rxe->qp_pool);
+ rxe_pool_cleanup(&rxe->cq_pool);
+ rxe_pool_cleanup(&rxe->mr_pool);
+ rxe_pool_cleanup(&rxe->mw_pool);
+ rxe_pool_cleanup(&rxe->mc_grp_pool);
+ rxe_pool_cleanup(&rxe->mc_elem_pool);
+
+ rxe_cleanup_ports(rxe);
+}
+
+/* called when all references have been dropped */
+void rxe_release(struct kref *kref)
+{
+ struct rxe_dev *rxe = container_of(kref, struct rxe_dev, ref_cnt);
+
+ rxe_cleanup(rxe);
+ ib_dealloc_device(&rxe->ib_dev);
+}
+
+void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
+EXPORT_SYMBOL_GPL(rxe_dev_put);
+
+/* initialize rxe device parameters */
+static int rxe_init_device_param(struct rxe_dev *rxe)
+{
+ rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+
+ rxe->attr.fw_ver = RXE_FW_VER;
+ rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
+ rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
+ rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
+ rxe->attr.hw_ver = RXE_HW_VER;
+ rxe->attr.max_qp = RXE_MAX_QP;
+ rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
+ rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
+ rxe->attr.max_sge = RXE_MAX_SGE;
+ rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
+ rxe->attr.max_cq = RXE_MAX_CQ;
+ rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
+ rxe->attr.max_mr = RXE_MAX_MR;
+ rxe->attr.max_pd = RXE_MAX_PD;
+ rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
+ rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
+ rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
+ rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
+ rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
+ rxe->attr.atomic_cap = RXE_ATOMIC_CAP;
+ rxe->attr.max_ee = RXE_MAX_EE;
+ rxe->attr.max_rdd = RXE_MAX_RDD;
+ rxe->attr.max_mw = RXE_MAX_MW;
+ rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
+ rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
+ rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
+ rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
+ rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
+ rxe->attr.max_ah = RXE_MAX_AH;
+ rxe->attr.max_fmr = RXE_MAX_FMR;
+ rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
+ rxe->attr.max_srq = RXE_MAX_SRQ;
+ rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
+ rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS;
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+
+ rxe->max_ucontext = RXE_MAX_UCONTEXT;
+
+ return 0;
+}
+
+/* initialize port attributes */
+static int rxe_init_port_param(struct rxe_port *port)
+{
+ port->attr.state = RXE_PORT_STATE;
+ port->attr.max_mtu = RXE_PORT_MAX_MTU;
+ port->attr.active_mtu = RXE_PORT_ACTIVE_MTU;
+ port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
+ port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
+ port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
+ port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
+ port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
+ port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
+ port->attr.lid = RXE_PORT_LID;
+ port->attr.sm_lid = RXE_PORT_SM_LID;
+ port->attr.lmc = RXE_PORT_LMC;
+ port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
+ port->attr.sm_sl = RXE_PORT_SM_SL;
+ port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
+ port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
+ port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
+ port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
+ port->attr.phys_state = RXE_PORT_PHYS_STATE;
+ port->mtu_cap =
+ ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
+ port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
+
+ return 0;
+}
+
+/* initialize port state, note IB convention that HCA ports are always
+ * numbered from 1
+ */
+static int rxe_init_ports(struct rxe_dev *rxe)
+{
+ struct rxe_port *port = &rxe->port;
+
+ rxe_init_port_param(port);
+
+ if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
+ return -EINVAL;
+
+ port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
+ sizeof(*port->pkey_tbl), GFP_KERNEL);
+
+ if (!port->pkey_tbl)
+ return -ENOMEM;
+
+ port->pkey_tbl[0] = 0xffff;
+ port->port_guid = rxe->ifc_ops->port_guid(rxe);
+
+ spin_lock_init(&port->port_lock);
+
+ return 0;
+}
+
+/* init pools of managed objects */
+static int rxe_init_pools(struct rxe_dev *rxe)
+{
+ int err;
+
+ err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
+ rxe->max_ucontext);
+ if (err)
+ goto err1;
+
+ err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
+ rxe->attr.max_pd);
+ if (err)
+ goto err2;
+
+ err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
+ rxe->attr.max_ah);
+ if (err)
+ goto err3;
+
+ err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
+ rxe->attr.max_srq);
+ if (err)
+ goto err4;
+
+ err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
+ rxe->attr.max_qp);
+ if (err)
+ goto err5;
+
+ err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
+ rxe->attr.max_cq);
+ if (err)
+ goto err6;
+
+ err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
+ rxe->attr.max_mr);
+ if (err)
+ goto err7;
+
+ err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
+ rxe->attr.max_mw);
+ if (err)
+ goto err8;
+
+ err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
+ rxe->attr.max_mcast_grp);
+ if (err)
+ goto err9;
+
+ err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
+ rxe->attr.max_total_mcast_qp_attach);
+ if (err)
+ goto err10;
+
+ return 0;
+
+err10:
+ rxe_pool_cleanup(&rxe->mc_grp_pool);
+err9:
+ rxe_pool_cleanup(&rxe->mw_pool);
+err8:
+ rxe_pool_cleanup(&rxe->mr_pool);
+err7:
+ rxe_pool_cleanup(&rxe->cq_pool);
+err6:
+ rxe_pool_cleanup(&rxe->qp_pool);
+err5:
+ rxe_pool_cleanup(&rxe->srq_pool);
+err4:
+ rxe_pool_cleanup(&rxe->ah_pool);
+err3:
+ rxe_pool_cleanup(&rxe->pd_pool);
+err2:
+ rxe_pool_cleanup(&rxe->uc_pool);
+err1:
+ return err;
+}
+
+/* initialize rxe device state */
+static int rxe_init(struct rxe_dev *rxe)
+{
+ int err;
+
+ /* init default device parameters */
+ rxe_init_device_param(rxe);
+
+ err = rxe_init_ports(rxe);
+ if (err)
+ goto err1;
+
+ err = rxe_init_pools(rxe);
+ if (err)
+ goto err2;
+
+ /* init pending mmap list */
+ spin_lock_init(&rxe->mmap_offset_lock);
+ spin_lock_init(&rxe->pending_lock);
+ INIT_LIST_HEAD(&rxe->pending_mmaps);
+ INIT_LIST_HEAD(&rxe->list);
+
+ mutex_init(&rxe->usdev_lock);
+
+ return 0;
+
+err2:
+ rxe_cleanup_ports(rxe);
+err1:
+ return err;
+}
+
+int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+{
+ struct rxe_port *port = &rxe->port;
+ enum ib_mtu mtu;
+
+ mtu = eth_mtu_int_to_enum(ndev_mtu);
+
+ /* Make sure that new MTU in range */
+ mtu = mtu ? min_t(enum ib_mtu, mtu, RXE_PORT_MAX_MTU) : IB_MTU_256;
+
+ port->attr.active_mtu = mtu;
+ port->mtu_cap = ib_mtu_enum_to_int(mtu);
+
+ return 0;
+}
+EXPORT_SYMBOL(rxe_set_mtu);
+
+/* called by ifc layer to create new rxe device.
+ * The caller should allocate memory for rxe by calling ib_alloc_device.
+ */
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
+{
+ int err;
+
+ kref_init(&rxe->ref_cnt);
+
+ err = rxe_init(rxe);
+ if (err)
+ goto err1;
+
+ err = rxe_set_mtu(rxe, mtu);
+ if (err)
+ goto err1;
+
+ err = rxe_register_device(rxe);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ rxe_dev_put(rxe);
+ return err;
+}
+EXPORT_SYMBOL(rxe_add);
+
+/* called by the ifc layer to remove a device */
+void rxe_remove(struct rxe_dev *rxe)
+{
+ rxe_unregister_device(rxe);
+
+ rxe_dev_put(rxe);
+}
+EXPORT_SYMBOL(rxe_remove);
+
+static int __init rxe_module_init(void)
+{
+ int err;
+
+ /* initialize slab caches for managed objects */
+ err = rxe_cache_init();
+ if (err) {
+ pr_err("rxe: unable to init object pools\n");
+ return err;
+ }
+
+ err = rxe_net_init();
+ if (err) {
+ pr_err("rxe: unable to init\n");
+ rxe_cache_exit();
+ return err;
+ }
+ pr_info("rxe: loaded\n");
+
+ return 0;
+}
+
+static void __exit rxe_module_exit(void)
+{
+ rxe_remove_all();
+ rxe_net_exit();
+ rxe_cache_exit();
+
+ pr_info("rxe: unloaded\n");
+}
+
+module_init(rxe_module_init);
+module_exit(rxe_module_exit);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
new file mode 100644
index 000000000000..12c71c549f97
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_H
+#define RXE_H
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe_net.h"
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+#include "rxe_param.h"
+#include "rxe_verbs.h"
+
+#define RXE_UVERBS_ABI_VERSION (1)
+
+#define IB_PHYS_STATE_LINK_UP (5)
+#define IB_PHYS_STATE_LINK_DOWN (3)
+
+#define RXE_ROCE_V2_SPORT (0xc000)
+
+int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
+void rxe_remove(struct rxe_dev *rxe);
+void rxe_remove_all(void);
+
+int rxe_rcv(struct sk_buff *skb);
+
+void rxe_dev_put(struct rxe_dev *rxe);
+struct rxe_dev *net_to_rxe(struct net_device *ndev);
+struct rxe_dev *get_rxe_by_name(const char* name);
+
+void rxe_port_up(struct rxe_dev *rxe);
+void rxe_port_down(struct rxe_dev *rxe);
+
+#endif /* RXE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
new file mode 100644
index 000000000000..5c9474212d4e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
+{
+ struct rxe_port *port;
+
+ if (attr->port_num != 1) {
+ pr_info("rxe: invalid port_num = %d\n", attr->port_num);
+ return -EINVAL;
+ }
+
+ port = &rxe->port;
+
+ if (attr->ah_flags & IB_AH_GRH) {
+ if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
+ pr_info("rxe: invalid sgid index = %d\n",
+ attr->grh.sgid_index);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
+ struct rxe_av *av, struct ib_ah_attr *attr)
+{
+ memset(av, 0, sizeof(*av));
+ memcpy(&av->grh, &attr->grh, sizeof(attr->grh));
+ av->port_num = port_num;
+ return 0;
+}
+
+int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
+ struct ib_ah_attr *attr)
+{
+ memcpy(&attr->grh, &av->grh, sizeof(av->grh));
+ attr->port_num = av->port_num;
+ return 0;
+}
+
+int rxe_av_fill_ip_info(struct rxe_dev *rxe,
+ struct rxe_av *av,
+ struct ib_ah_attr *attr,
+ struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid)
+{
+ rdma_gid2ip(&av->sgid_addr._sockaddr, sgid);
+ rdma_gid2ip(&av->dgid_addr._sockaddr, &attr->grh.dgid);
+ av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
+
+ return 0;
+}
+
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
+{
+ if (!pkt || !pkt->qp)
+ return NULL;
+
+ if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
+ return &pkt->qp->pri_av;
+
+ return (pkt->wqe) ? &pkt->wqe->av : NULL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
new file mode 100644
index 000000000000..36f67de44095
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+enum comp_state {
+ COMPST_GET_ACK,
+ COMPST_GET_WQE,
+ COMPST_COMP_WQE,
+ COMPST_COMP_ACK,
+ COMPST_CHECK_PSN,
+ COMPST_CHECK_ACK,
+ COMPST_READ,
+ COMPST_ATOMIC,
+ COMPST_WRITE_SEND,
+ COMPST_UPDATE_COMP,
+ COMPST_ERROR_RETRY,
+ COMPST_RNR_RETRY,
+ COMPST_ERROR,
+ COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
+ COMPST_DONE, /* The completer finished successflly */
+};
+
+static char *comp_state_name[] = {
+ [COMPST_GET_ACK] = "GET ACK",
+ [COMPST_GET_WQE] = "GET WQE",
+ [COMPST_COMP_WQE] = "COMP WQE",
+ [COMPST_COMP_ACK] = "COMP ACK",
+ [COMPST_CHECK_PSN] = "CHECK PSN",
+ [COMPST_CHECK_ACK] = "CHECK ACK",
+ [COMPST_READ] = "READ",
+ [COMPST_ATOMIC] = "ATOMIC",
+ [COMPST_WRITE_SEND] = "WRITE/SEND",
+ [COMPST_UPDATE_COMP] = "UPDATE COMP",
+ [COMPST_ERROR_RETRY] = "ERROR RETRY",
+ [COMPST_RNR_RETRY] = "RNR RETRY",
+ [COMPST_ERROR] = "ERROR",
+ [COMPST_EXIT] = "EXIT",
+ [COMPST_DONE] = "DONE",
+};
+
+static unsigned long rnrnak_usec[32] = {
+ [IB_RNR_TIMER_655_36] = 655360,
+ [IB_RNR_TIMER_000_01] = 10,
+ [IB_RNR_TIMER_000_02] = 20,
+ [IB_RNR_TIMER_000_03] = 30,
+ [IB_RNR_TIMER_000_04] = 40,
+ [IB_RNR_TIMER_000_06] = 60,
+ [IB_RNR_TIMER_000_08] = 80,
+ [IB_RNR_TIMER_000_12] = 120,
+ [IB_RNR_TIMER_000_16] = 160,
+ [IB_RNR_TIMER_000_24] = 240,
+ [IB_RNR_TIMER_000_32] = 320,
+ [IB_RNR_TIMER_000_48] = 480,
+ [IB_RNR_TIMER_000_64] = 640,
+ [IB_RNR_TIMER_000_96] = 960,
+ [IB_RNR_TIMER_001_28] = 1280,
+ [IB_RNR_TIMER_001_92] = 1920,
+ [IB_RNR_TIMER_002_56] = 2560,
+ [IB_RNR_TIMER_003_84] = 3840,
+ [IB_RNR_TIMER_005_12] = 5120,
+ [IB_RNR_TIMER_007_68] = 7680,
+ [IB_RNR_TIMER_010_24] = 10240,
+ [IB_RNR_TIMER_015_36] = 15360,
+ [IB_RNR_TIMER_020_48] = 20480,
+ [IB_RNR_TIMER_030_72] = 30720,
+ [IB_RNR_TIMER_040_96] = 40960,
+ [IB_RNR_TIMER_061_44] = 61410,
+ [IB_RNR_TIMER_081_92] = 81920,
+ [IB_RNR_TIMER_122_88] = 122880,
+ [IB_RNR_TIMER_163_84] = 163840,
+ [IB_RNR_TIMER_245_76] = 245760,
+ [IB_RNR_TIMER_327_68] = 327680,
+ [IB_RNR_TIMER_491_52] = 491520,
+};
+
+static inline unsigned long rnrnak_jiffies(u8 timeout)
+{
+ return max_t(unsigned long,
+ usecs_to_jiffies(rnrnak_usec[timeout]), 1);
+}
+
+static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND: return IB_WC_SEND;
+ case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
+ case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
+ case IB_WR_LSO: return IB_WC_LSO;
+ case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
+ case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
+ case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
+ case IB_WR_REG_MR: return IB_WC_REG_MR;
+
+ default:
+ return 0xff;
+ }
+}
+
+void retransmit_timer(unsigned long data)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)data;
+
+ if (qp->valid) {
+ qp->comp.timeout = 1;
+ rxe_run_task(&qp->comp.task, 1);
+ }
+}
+
+void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct sk_buff *skb)
+{
+ int must_sched;
+
+ skb_queue_tail(&qp->resp_pkts, skb);
+
+ must_sched = skb_queue_len(&qp->resp_pkts) > 1;
+ rxe_run_task(&qp->comp.task, must_sched);
+}
+
+static inline enum comp_state get_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe **wqe_p)
+{
+ struct rxe_send_wqe *wqe;
+
+ /* we come here whether or not we found a response packet to see if
+ * there are any posted WQEs
+ */
+ wqe = queue_head(qp->sq.queue);
+ *wqe_p = wqe;
+
+ /* no WQE or requester has not started it yet */
+ if (!wqe || wqe->state == wqe_state_posted)
+ return pkt ? COMPST_DONE : COMPST_EXIT;
+
+ /* WQE does not require an ack */
+ if (wqe->state == wqe_state_done)
+ return COMPST_COMP_WQE;
+
+ /* WQE caused an error */
+ if (wqe->state == wqe_state_error)
+ return COMPST_ERROR;
+
+ /* we have a WQE, if we also have an ack check its PSN */
+ return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
+}
+
+static inline void reset_retry_counters(struct rxe_qp *qp)
+{
+ qp->comp.retry_cnt = qp->attr.retry_cnt;
+ qp->comp.rnr_retry = qp->attr.rnr_retry;
+}
+
+static inline enum comp_state check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ s32 diff;
+
+ /* check to see if response is past the oldest WQE. if it is, complete
+ * send/write or error read/atomic
+ */
+ diff = psn_compare(pkt->psn, wqe->last_psn);
+ if (diff > 0) {
+ if (wqe->state == wqe_state_pending) {
+ if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
+ return COMPST_ERROR_RETRY;
+
+ reset_retry_counters(qp);
+ return COMPST_COMP_WQE;
+ } else {
+ return COMPST_DONE;
+ }
+ }
+
+ /* compare response packet to expected response */
+ diff = psn_compare(pkt->psn, qp->comp.psn);
+ if (diff < 0) {
+ /* response is most likely a retried packet if it matches an
+ * uncompleted WQE go complete it else ignore it
+ */
+ if (pkt->psn == wqe->last_psn)
+ return COMPST_COMP_ACK;
+ else
+ return COMPST_DONE;
+ } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
+ return COMPST_ERROR_RETRY;
+ } else {
+ return COMPST_CHECK_ACK;
+ }
+}
+
+static inline enum comp_state check_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ unsigned int mask = pkt->mask;
+ u8 syn;
+
+ /* Check the sequence only */
+ switch (qp->comp.opcode) {
+ case -1:
+ /* Will catch all *_ONLY cases. */
+ if (!(mask & RXE_START_MASK))
+ return COMPST_ERROR;
+
+ break;
+
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
+ pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ return COMPST_ERROR;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ /* Check operation validity. */
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
+ * doesn't have an AETH)
+ */
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (wqe->wr.opcode != IB_WR_RDMA_READ &&
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+ return COMPST_ERROR;
+ }
+ reset_retry_counters(qp);
+ return COMPST_READ;
+
+ case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
+ wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
+ return COMPST_ERROR;
+ reset_retry_counters(qp);
+ return COMPST_ATOMIC;
+
+ case IB_OPCODE_RC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+ switch (syn & AETH_TYPE_MASK) {
+ case AETH_ACK:
+ reset_retry_counters(qp);
+ return COMPST_WRITE_SEND;
+
+ case AETH_RNR_NAK:
+ return COMPST_RNR_RETRY;
+
+ case AETH_NAK:
+ switch (syn) {
+ case AETH_NAK_PSN_SEQ_ERROR:
+ /* a nak implicitly acks all packets with psns
+ * before
+ */
+ if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
+ qp->comp.psn = pkt->psn;
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+ return COMPST_ERROR_RETRY;
+
+ case AETH_NAK_INVALID_REQ:
+ wqe->status = IB_WC_REM_INV_REQ_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_ACC_ERR:
+ wqe->status = IB_WC_REM_ACCESS_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_OP_ERR:
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+
+ default:
+ pr_warn("unexpected nak %x\n", syn);
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+ }
+
+ default:
+ return COMPST_ERROR;
+ }
+ break;
+
+ default:
+ pr_warn("unexpected opcode\n");
+ }
+
+ return COMPST_ERROR;
+}
+
+static inline enum comp_state do_read(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int ret;
+
+ ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, payload_addr(pkt),
+ payload_size(pkt), to_mem_obj, NULL);
+ if (ret)
+ return COMPST_ERROR;
+
+ if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
+ return COMPST_COMP_ACK;
+ else
+ return COMPST_UPDATE_COMP;
+}
+
+static inline enum comp_state do_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int ret;
+
+ u64 atomic_orig = atmack_orig(pkt);
+
+ ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, &atomic_orig,
+ sizeof(u64), to_mem_obj, NULL);
+ if (ret)
+ return COMPST_ERROR;
+ else
+ return COMPST_COMP_ACK;
+}
+
+static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_cqe *cqe)
+{
+ memset(cqe, 0, sizeof(*cqe));
+
+ if (!qp->is_user) {
+ struct ib_wc *wc = &cqe->ibwc;
+
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = wqe->status;
+ wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->byte_len = wqe->dma.length;
+ wc->qp = &qp->ibqp;
+ } else {
+ struct ib_uverbs_wc *uwc = &cqe->uibwc;
+
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = wqe->status;
+ uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ uwc->wc_flags = IB_WC_WITH_IMM;
+ uwc->byte_len = wqe->dma.length;
+ uwc->qp_num = qp->ibqp.qp_num;
+ }
+}
+
+static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_cqe cqe;
+
+ if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ (qp->req.state == QP_STATE_ERROR)) {
+ make_send_cqe(qp, wqe, &cqe);
+ rxe_cq_post(qp->scq, &cqe, 0);
+ }
+
+ advance_consumer(qp->sq.queue);
+
+ /*
+ * we completed something so let req run again
+ * if it is trying to fence
+ */
+ if (qp->req.wait_fence) {
+ qp->req.wait_fence = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ unsigned long flags;
+
+ if (wqe->has_rd_atomic) {
+ wqe->has_rd_atomic = 0;
+ atomic_inc(&qp->req.rd_atomic);
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+
+ if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ /* state_lock used by requester & completer */
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if ((qp->req.state == QP_STATE_DRAIN) &&
+ (qp->comp.psn == qp->req.psn)) {
+ qp->req.state = QP_STATE_DRAINED;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ } else {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ }
+ }
+
+ do_complete(qp, wqe);
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ return COMPST_UPDATE_COMP;
+ else
+ return COMPST_DONE;
+}
+
+static inline enum comp_state complete_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ qp->comp.opcode = -1;
+
+ if (pkt) {
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+
+ do_complete(qp, wqe);
+
+ return COMPST_GET_WQE;
+}
+
+int rxe_completer(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_send_wqe *wqe = wqe;
+ struct sk_buff *skb = NULL;
+ struct rxe_pkt_info *pkt = NULL;
+ enum comp_state state;
+
+ if (!qp->valid) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while (queue_head(qp->sq.queue))
+ advance_consumer(qp->sq.queue);
+
+ goto exit;
+ }
+
+ if (qp->req.state == QP_STATE_ERROR) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while ((wqe = queue_head(qp->sq.queue))) {
+ wqe->status = IB_WC_WR_FLUSH_ERR;
+ do_complete(qp, wqe);
+ }
+
+ goto exit;
+ }
+
+ if (qp->req.state == QP_STATE_RESET) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while (queue_head(qp->sq.queue))
+ advance_consumer(qp->sq.queue);
+
+ goto exit;
+ }
+
+ if (qp->comp.timeout) {
+ qp->comp.timeout_retry = 1;
+ qp->comp.timeout = 0;
+ } else {
+ qp->comp.timeout_retry = 0;
+ }
+
+ if (qp->req.need_retry)
+ goto exit;
+
+ state = COMPST_GET_ACK;
+
+ while (1) {
+ pr_debug("state = %s\n", comp_state_name[state]);
+ switch (state) {
+ case COMPST_GET_ACK:
+ skb = skb_dequeue(&qp->resp_pkts);
+ if (skb) {
+ pkt = SKB_TO_PKT(skb);
+ qp->comp.timeout_retry = 0;
+ }
+ state = COMPST_GET_WQE;
+ break;
+
+ case COMPST_GET_WQE:
+ state = get_wqe(qp, pkt, &wqe);
+ break;
+
+ case COMPST_CHECK_PSN:
+ state = check_psn(qp, pkt, wqe);
+ break;
+
+ case COMPST_CHECK_ACK:
+ state = check_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_READ:
+ state = do_read(qp, pkt, wqe);
+ break;
+
+ case COMPST_ATOMIC:
+ state = do_atomic(qp, pkt, wqe);
+ break;
+
+ case COMPST_WRITE_SEND:
+ if (wqe->state == wqe_state_pending &&
+ wqe->last_psn == pkt->psn)
+ state = COMPST_COMP_ACK;
+ else
+ state = COMPST_UPDATE_COMP;
+ break;
+
+ case COMPST_COMP_ACK:
+ state = complete_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_COMP_WQE:
+ state = complete_wqe(qp, pkt, wqe);
+ break;
+
+ case COMPST_UPDATE_COMP:
+ if (pkt->mask & RXE_END_MASK)
+ qp->comp.opcode = -1;
+ else
+ qp->comp.opcode = pkt->opcode;
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+
+ state = COMPST_DONE;
+ break;
+
+ case COMPST_DONE:
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+ goto done;
+
+ case COMPST_EXIT:
+ if (qp->comp.timeout_retry && wqe) {
+ state = COMPST_ERROR_RETRY;
+ break;
+ }
+
+ /* re reset the timeout counter if
+ * (1) QP is type RC
+ * (2) the QP is alive
+ * (3) there is a packet sent by the requester that
+ * might be acked (we still might get spurious
+ * timeouts but try to keep them as few as possible)
+ * (4) the timeout parameter is set
+ */
+ if ((qp_type(qp) == IB_QPT_RC) &&
+ (qp->req.state == QP_STATE_READY) &&
+ (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
+ qp->qp_timeout_jiffies)
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+ goto exit;
+
+ case COMPST_ERROR_RETRY:
+ /* we come here if the retry timer fired and we did
+ * not receive a response packet. try to retry the send
+ * queue if that makes sense and the limits have not
+ * been exceeded. remember that some timeouts are
+ * spurious since we do not reset the timer but kick
+ * it down the road or let it expire
+ */
+
+ /* there is nothing to retry in this case */
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
+
+ if (qp->comp.retry_cnt > 0) {
+ if (qp->comp.retry_cnt != 7)
+ qp->comp.retry_cnt--;
+
+ /* no point in retrying if we have already
+ * seen the last ack that the requester could
+ * have caused
+ */
+ if (psn_compare(qp->req.psn,
+ qp->comp.psn) > 0) {
+ /* tell the requester to retry the
+ * send send queue next time around
+ */
+ qp->req.need_retry = 1;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ goto exit;
+ } else {
+ wqe->status = IB_WC_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_RNR_RETRY:
+ if (qp->comp.rnr_retry > 0) {
+ if (qp->comp.rnr_retry != 7)
+ qp->comp.rnr_retry--;
+
+ qp->req.need_retry = 1;
+ pr_debug("set rnr nak timer\n");
+ mod_timer(&qp->rnr_nak_timer,
+ jiffies + rnrnak_jiffies(aeth_syn(pkt)
+ & ~AETH_TYPE_MASK));
+ goto exit;
+ } else {
+ wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_ERROR:
+ do_complete(qp, wqe);
+ rxe_qp_error(qp);
+ goto exit;
+ }
+ }
+
+exit:
+ /* we come here if we are done with processing and want the task to
+ * exit from the loop calling us
+ */
+ return -EAGAIN;
+
+done:
+ /* we come here if we have processed a packet we want the task to call
+ * us again to see if there is anything else to do
+ */
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
new file mode 100644
index 000000000000..e5e6a5e7dee9
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector, struct ib_udata *udata)
+{
+ int count;
+
+ if (cqe <= 0) {
+ pr_warn("cqe(%d) <= 0\n", cqe);
+ goto err1;
+ }
+
+ if (cqe > rxe->attr.max_cqe) {
+ pr_warn("cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
+ goto err1;
+ }
+
+ if (cq) {
+ count = queue_count(cq->queue);
+ if (cqe < count) {
+ pr_warn("cqe(%d) < current # elements in queue (%d)",
+ cqe, count);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void rxe_send_complete(unsigned long data)
+{
+ struct rxe_cq *cq = (struct rxe_cq *)data;
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int err;
+
+ cq->queue = rxe_queue_init(rxe, &cqe,
+ sizeof(struct rxe_cqe));
+ if (!cq->queue) {
+ pr_warn("unable to create cq\n");
+ return -ENOMEM;
+ }
+
+ err = do_mmap_info(rxe, udata, false, context, cq->queue->buf,
+ cq->queue->buf_size, &cq->queue->ip);
+ if (err) {
+ kvfree(cq->queue->buf);
+ kfree(cq->queue);
+ return err;
+ }
+
+ if (udata)
+ cq->is_user = 1;
+
+ tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
+
+ spin_lock_init(&cq->cq_lock);
+ cq->ibcq.cqe = cqe;
+ return 0;
+}
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata)
+{
+ int err;
+
+ err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
+ sizeof(struct rxe_cqe),
+ cq->queue->ip ? cq->queue->ip->context : NULL,
+ udata, NULL, &cq->cq_lock);
+ if (!err)
+ cq->ibcq.cqe = cqe;
+
+ return err;
+}
+
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+{
+ struct ib_event ev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ if (unlikely(queue_full(cq->queue))) {
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ if (cq->ibcq.event_handler) {
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+
+ return -EBUSY;
+ }
+
+ memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
+
+ /* make sure all changes to the CQ are written before we update the
+ * producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(cq->queue);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if ((cq->notify == IB_CQ_NEXT_COMP) ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = 0;
+ tasklet_schedule(&cq->comp_task);
+ }
+
+ return 0;
+}
+
+void rxe_cq_cleanup(void *arg)
+{
+ struct rxe_cq *cq = arg;
+
+ if (cq->queue)
+ rxe_queue_cleanup(cq->queue);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
new file mode 100644
index 000000000000..7634c1a81b2b
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_dma.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+#define DMA_BAD_ADDER ((u64)0)
+
+static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == DMA_BAD_ADDER;
+}
+
+static u64 rxe_dma_map_single(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+ return (uintptr_t)cpu_addr;
+}
+
+static void rxe_dma_unmap_single(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static u64 rxe_dma_map_page(struct ib_device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ u64 addr;
+
+ WARN_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = DMA_BAD_ADDER;
+ goto done;
+ }
+
+ addr = (uintptr_t)page_address(page);
+ if (addr)
+ addr += offset;
+
+done:
+ return addr;
+}
+
+static void rxe_dma_unmap_page(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ WARN_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (uintptr_t)page_address(sg_page(sg));
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+
+ return ret;
+}
+
+static void rxe_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static void rxe_sync_single_for_cpu(struct ib_device *dev,
+ u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void rxe_sync_single_for_device(struct ib_device *dev,
+ u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+
+ if (dma_handle)
+ *dma_handle = (uintptr_t)addr;
+
+ return addr;
+}
+
+static void rxe_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
+ .mapping_error = rxe_mapping_error,
+ .map_single = rxe_dma_map_single,
+ .unmap_single = rxe_dma_unmap_single,
+ .map_page = rxe_dma_map_page,
+ .unmap_page = rxe_dma_unmap_page,
+ .map_sg = rxe_map_sg,
+ .unmap_sg = rxe_unmap_sg,
+ .sync_single_for_cpu = rxe_sync_single_for_cpu,
+ .sync_single_for_device = rxe_sync_single_for_device,
+ .alloc_coherent = rxe_dma_alloc_coherent,
+ .free_coherent = rxe_dma_free_coherent
+};
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
new file mode 100644
index 000000000000..d57b5e956ceb
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -0,0 +1,952 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_HDR_H
+#define RXE_HDR_H
+
+/* extracted information about a packet carried in an sk_buff struct fits in
+ * the skbuff cb array. Must be at most 48 bytes. stored in control block of
+ * sk_buff for received packets.
+ */
+struct rxe_pkt_info {
+ struct rxe_dev *rxe; /* device that owns packet */
+ struct rxe_qp *qp; /* qp that owns packet */
+ struct rxe_send_wqe *wqe; /* send wqe */
+ u8 *hdr; /* points to bth */
+ u32 mask; /* useful info about pkt */
+ u32 psn; /* bth psn of packet */
+ u16 pkey_index; /* partition of pkt */
+ u16 paylen; /* length of bth - icrc */
+ u8 port_num; /* port pkt received on */
+ u8 opcode; /* bth opcode of packet */
+ u8 offset; /* bth offset from pkt->hdr */
+};
+
+/* Macros should be used only for received skb */
+#define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
+#define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
+
+/*
+ * IBA header types and methods
+ *
+ * Some of these are for reference and completeness only since
+ * rxe does not currently support RD transport
+ * most of this could be moved into IB core. ib_pack.h has
+ * part of this but is incomplete
+ *
+ * Header specific routines to insert/extract values to/from headers
+ * the routines that are named __hhh_(set_)fff() take a pointer to a
+ * hhh header and get(set) the fff field. The routines named
+ * hhh_(set_)fff take a packet info struct and find the
+ * header and field based on the opcode in the packet.
+ * Conversion to/from network byte order from cpu order is also done.
+ */
+
+#define RXE_ICRC_SIZE (4)
+#define RXE_MAX_HDR_LENGTH (80)
+
+/******************************************************************************
+ * Base Transport Header
+ ******************************************************************************/
+struct rxe_bth {
+ u8 opcode;
+ u8 flags;
+ __be16 pkey;
+ __be32 qpn;
+ __be32 apsn;
+};
+
+#define BTH_TVER (0)
+#define BTH_DEF_PKEY (0xffff)
+
+#define BTH_SE_MASK (0x80)
+#define BTH_MIG_MASK (0x40)
+#define BTH_PAD_MASK (0x30)
+#define BTH_TVER_MASK (0x0f)
+#define BTH_FECN_MASK (0x80000000)
+#define BTH_BECN_MASK (0x40000000)
+#define BTH_RESV6A_MASK (0x3f000000)
+#define BTH_QPN_MASK (0x00ffffff)
+#define BTH_ACK_MASK (0x80000000)
+#define BTH_RESV7_MASK (0x7f000000)
+#define BTH_PSN_MASK (0x00ffffff)
+
+static inline u8 __bth_opcode(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return bth->opcode;
+}
+
+static inline void __bth_set_opcode(void *arg, u8 opcode)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->opcode = opcode;
+}
+
+static inline u8 __bth_se(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_SE_MASK & bth->flags);
+}
+
+static inline void __bth_set_se(void *arg, int se)
+{
+ struct rxe_bth *bth = arg;
+
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ else
+ bth->flags &= ~BTH_SE_MASK;
+}
+
+static inline u8 __bth_mig(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_MIG_MASK & bth->flags);
+}
+
+static inline void __bth_set_mig(void *arg, u8 mig)
+{
+ struct rxe_bth *bth = arg;
+
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ else
+ bth->flags &= ~BTH_MIG_MASK;
+}
+
+static inline u8 __bth_pad(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_PAD_MASK & bth->flags) >> 4;
+}
+
+static inline void __bth_set_pad(void *arg, u8 pad)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_PAD_MASK & (pad << 4)) |
+ (~BTH_PAD_MASK & bth->flags);
+}
+
+static inline u8 __bth_tver(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_TVER_MASK & bth->flags;
+}
+
+static inline void __bth_set_tver(void *arg, u8 tver)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_TVER_MASK & tver) |
+ (~BTH_TVER_MASK & bth->flags);
+}
+
+static inline u16 __bth_pkey(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return be16_to_cpu(bth->pkey);
+}
+
+static inline void __bth_set_pkey(void *arg, u16 pkey)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->pkey = cpu_to_be16(pkey);
+}
+
+static inline u32 __bth_qpn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
+}
+
+static inline void __bth_set_qpn(void *arg, u32 qpn)
+{
+ struct rxe_bth *bth = arg;
+ u32 resvqpn = be32_to_cpu(bth->qpn);
+
+ bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
+ (~BTH_QPN_MASK & resvqpn));
+}
+
+static inline int __bth_fecn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_fecn(void *arg, int fecn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (fecn)
+ bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
+}
+
+static inline int __bth_becn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_becn(void *arg, int becn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (becn)
+ bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
+}
+
+static inline u8 __bth_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
+}
+
+static inline void __bth_set_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
+}
+
+static inline int __bth_ack(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
+}
+
+static inline void __bth_set_ack(void *arg, int ack)
+{
+ struct rxe_bth *bth = arg;
+
+ if (ack)
+ bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
+ else
+ bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
+}
+
+static inline void __bth_set_resv7(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
+}
+
+static inline u32 __bth_psn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
+}
+
+static inline void __bth_set_psn(void *arg, u32 psn)
+{
+ struct rxe_bth *bth = arg;
+ u32 apsn = be32_to_cpu(bth->apsn);
+
+ bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
+ (~BTH_PSN_MASK & apsn));
+}
+
+static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
+{
+ return __bth_opcode(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
+{
+ __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
+}
+
+static inline u8 bth_se(struct rxe_pkt_info *pkt)
+{
+ return __bth_se(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
+{
+ __bth_set_se(pkt->hdr + pkt->offset, se);
+}
+
+static inline u8 bth_mig(struct rxe_pkt_info *pkt)
+{
+ return __bth_mig(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
+{
+ __bth_set_mig(pkt->hdr + pkt->offset, mig);
+}
+
+static inline u8 bth_pad(struct rxe_pkt_info *pkt)
+{
+ return __bth_pad(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
+{
+ __bth_set_pad(pkt->hdr + pkt->offset, pad);
+}
+
+static inline u8 bth_tver(struct rxe_pkt_info *pkt)
+{
+ return __bth_tver(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
+{
+ __bth_set_tver(pkt->hdr + pkt->offset, tver);
+}
+
+static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
+{
+ return __bth_pkey(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
+{
+ __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
+}
+
+static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
+{
+ return __bth_qpn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
+{
+ __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
+}
+
+static inline int bth_fecn(struct rxe_pkt_info *pkt)
+{
+ return __bth_fecn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
+{
+ __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
+}
+
+static inline int bth_becn(struct rxe_pkt_info *pkt)
+{
+ return __bth_becn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
+{
+ __bth_set_becn(pkt->hdr + pkt->offset, becn);
+}
+
+static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
+{
+ return __bth_resv6a(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv6a(pkt->hdr + pkt->offset);
+}
+
+static inline int bth_ack(struct rxe_pkt_info *pkt)
+{
+ return __bth_ack(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
+{
+ __bth_set_ack(pkt->hdr + pkt->offset, ack);
+}
+
+static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv7(pkt->hdr + pkt->offset);
+}
+
+static inline u32 bth_psn(struct rxe_pkt_info *pkt)
+{
+ return __bth_psn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
+{
+ __bth_set_psn(pkt->hdr + pkt->offset, psn);
+}
+
+static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
+ int mig, int pad, u16 pkey, u32 qpn, int ack_req,
+ u32 psn)
+{
+ struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
+
+ bth->opcode = opcode;
+ bth->flags = (pad << 4) & BTH_PAD_MASK;
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ bth->pkey = cpu_to_be16(pkey);
+ bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
+ psn &= BTH_PSN_MASK;
+ if (ack_req)
+ psn |= BTH_ACK_MASK;
+ bth->apsn = cpu_to_be32(psn);
+}
+
+/******************************************************************************
+ * Reliable Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_rdeth {
+ __be32 een;
+};
+
+#define RDETH_EEN_MASK (0x00ffffff)
+
+static inline u8 __rdeth_een(void *arg)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
+}
+
+static inline void __rdeth_set_een(void *arg, u32 een)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
+}
+
+static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
+{
+ return __rdeth_een(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
+}
+
+static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
+{
+ __rdeth_set_een(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
+}
+
+/******************************************************************************
+ * Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_deth {
+ __be32 qkey;
+ __be32 sqp;
+};
+
+#define GSI_QKEY (0x80010000)
+#define DETH_SQP_MASK (0x00ffffff)
+
+static inline u32 __deth_qkey(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return be32_to_cpu(deth->qkey);
+}
+
+static inline void __deth_set_qkey(void *arg, u32 qkey)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->qkey = cpu_to_be32(qkey);
+}
+
+static inline u32 __deth_sqp(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
+}
+
+static inline void __deth_set_sqp(void *arg, u32 sqp)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
+}
+
+static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
+{
+ return __deth_qkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
+{
+ __deth_set_qkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
+}
+
+static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
+{
+ return __deth_sqp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
+{
+ __deth_set_sqp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
+}
+
+/******************************************************************************
+ * RDMA Extended Transport Header
+ ******************************************************************************/
+struct rxe_reth {
+ __be64 va;
+ __be32 rkey;
+ __be32 len;
+};
+
+static inline u64 __reth_va(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be64_to_cpu(reth->va);
+}
+
+static inline void __reth_set_va(void *arg, u64 va)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->va = cpu_to_be64(va);
+}
+
+static inline u32 __reth_rkey(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->rkey);
+}
+
+static inline void __reth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 __reth_len(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->len);
+}
+
+static inline void __reth_set_len(void *arg, u32 len)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->len = cpu_to_be32(len);
+}
+
+static inline u64 reth_va(struct rxe_pkt_info *pkt)
+{
+ return __reth_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __reth_set_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
+}
+
+static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __reth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __reth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
+}
+
+static inline u32 reth_len(struct rxe_pkt_info *pkt)
+{
+ return __reth_len(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
+{
+ __reth_set_len(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
+}
+
+/******************************************************************************
+ * Atomic Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmeth {
+ __be64 va;
+ __be32 rkey;
+ __be64 swap_add;
+ __be64 comp;
+} __attribute__((__packed__));
+
+static inline u64 __atmeth_va(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->va);
+}
+
+static inline void __atmeth_set_va(void *arg, u64 va)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->va = cpu_to_be64(va);
+}
+
+static inline u32 __atmeth_rkey(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be32_to_cpu(atmeth->rkey);
+}
+
+static inline void __atmeth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u64 __atmeth_swap_add(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->swap_add);
+}
+
+static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->swap_add = cpu_to_be64(swap_add);
+}
+
+static inline u64 __atmeth_comp(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->comp);
+}
+
+static inline void __atmeth_set_comp(void *arg, u64 comp)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->comp = cpu_to_be64(comp);
+}
+
+static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __atmeth_set_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
+}
+
+static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __atmeth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
+}
+
+static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_swap_add(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
+{
+ __atmeth_set_swap_add(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
+}
+
+static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_comp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
+{
+ __atmeth_set_comp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
+}
+
+/******************************************************************************
+ * Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_aeth {
+ __be32 smsn;
+};
+
+#define AETH_SYN_MASK (0xff000000)
+#define AETH_MSN_MASK (0x00ffffff)
+
+enum aeth_syndrome {
+ AETH_TYPE_MASK = 0xe0,
+ AETH_ACK = 0x00,
+ AETH_RNR_NAK = 0x20,
+ AETH_RSVD = 0x40,
+ AETH_NAK = 0x60,
+ AETH_ACK_UNLIMITED = 0x1f,
+ AETH_NAK_PSN_SEQ_ERROR = 0x60,
+ AETH_NAK_INVALID_REQ = 0x61,
+ AETH_NAK_REM_ACC_ERR = 0x62,
+ AETH_NAK_REM_OP_ERR = 0x63,
+ AETH_NAK_INV_RD_REQ = 0x64,
+};
+
+static inline u8 __aeth_syn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
+}
+
+static inline void __aeth_set_syn(void *arg, u8 syn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
+ (~AETH_SYN_MASK & smsn));
+}
+
+static inline u32 __aeth_msn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
+}
+
+static inline void __aeth_set_msn(void *arg, u32 msn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
+ (~AETH_MSN_MASK & smsn));
+}
+
+static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_syn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
+{
+ __aeth_set_syn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
+}
+
+static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_msn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
+{
+ __aeth_set_msn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
+}
+
+/******************************************************************************
+ * Atomic Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmack {
+ __be64 orig;
+};
+
+static inline u64 __atmack_orig(void *arg)
+{
+ struct rxe_atmack *atmack = arg;
+
+ return be64_to_cpu(atmack->orig);
+}
+
+static inline void __atmack_set_orig(void *arg, u64 orig)
+{
+ struct rxe_atmack *atmack = arg;
+
+ atmack->orig = cpu_to_be64(orig);
+}
+
+static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
+{
+ return __atmack_orig(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
+}
+
+static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
+{
+ __atmack_set_orig(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
+}
+
+/******************************************************************************
+ * Immediate Extended Transport Header
+ ******************************************************************************/
+struct rxe_immdt {
+ __be32 imm;
+};
+
+static inline __be32 __immdt_imm(void *arg)
+{
+ struct rxe_immdt *immdt = arg;
+
+ return immdt->imm;
+}
+
+static inline void __immdt_set_imm(void *arg, __be32 imm)
+{
+ struct rxe_immdt *immdt = arg;
+
+ immdt->imm = imm;
+}
+
+static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
+{
+ return __immdt_imm(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
+}
+
+static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
+{
+ __immdt_set_imm(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
+}
+
+/******************************************************************************
+ * Invalidate Extended Transport Header
+ ******************************************************************************/
+struct rxe_ieth {
+ __be32 rkey;
+};
+
+static inline u32 __ieth_rkey(void *arg)
+{
+ struct rxe_ieth *ieth = arg;
+
+ return be32_to_cpu(ieth->rkey);
+}
+
+static inline void __ieth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_ieth *ieth = arg;
+
+ ieth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __ieth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
+}
+
+static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __ieth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
+}
+
+enum rxe_hdr_length {
+ RXE_BTH_BYTES = sizeof(struct rxe_bth),
+ RXE_DETH_BYTES = sizeof(struct rxe_deth),
+ RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
+ RXE_RETH_BYTES = sizeof(struct rxe_reth),
+ RXE_AETH_BYTES = sizeof(struct rxe_aeth),
+ RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
+ RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
+ RXE_IETH_BYTES = sizeof(struct rxe_ieth),
+ RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
+};
+
+static inline size_t header_size(struct rxe_pkt_info *pkt)
+{
+ return pkt->offset + rxe_opcode[pkt->opcode].length;
+}
+
+static inline void *payload_addr(struct rxe_pkt_info *pkt)
+{
+ return pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
+}
+
+static inline size_t payload_size(struct rxe_pkt_info *pkt)
+{
+ return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
+ - bth_pad(pkt) - RXE_ICRC_SIZE;
+}
+
+#endif /* RXE_HDR_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
new file mode 100644
index 000000000000..413b56b23a06
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* Compute a partial ICRC for all the IB transport headers. */
+u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+{
+ unsigned int bth_offset = 0;
+ struct iphdr *ip4h = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct udphdr *udph;
+ struct rxe_bth *bth;
+ int crc;
+ int length;
+ int hdr_size = sizeof(struct udphdr) +
+ (skb->protocol == htons(ETH_P_IP) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr));
+ /* pseudo header buffer size is calculate using ipv6 header size since
+ * it is bigger than ipv4
+ */
+ u8 pshdr[sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr) +
+ RXE_BTH_BYTES];
+
+ /* This seed is the result of computing a CRC with a seed of
+ * 0xfffffff and 8 bytes of 0xff representing a masked LRH.
+ */
+ crc = 0xdebb20e3;
+
+ if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
+ memcpy(pshdr, ip_hdr(skb), hdr_size);
+ ip4h = (struct iphdr *)pshdr;
+ udph = (struct udphdr *)(ip4h + 1);
+
+ ip4h->ttl = 0xff;
+ ip4h->check = CSUM_MANGLED_0;
+ ip4h->tos = 0xff;
+ } else { /* IPv6 */
+ memcpy(pshdr, ipv6_hdr(skb), hdr_size);
+ ip6h = (struct ipv6hdr *)pshdr;
+ udph = (struct udphdr *)(ip6h + 1);
+
+ memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl));
+ ip6h->priority = 0xf;
+ ip6h->hop_limit = 0xff;
+ }
+ udph->check = CSUM_MANGLED_0;
+
+ bth_offset += hdr_size;
+
+ memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
+ bth = (struct rxe_bth *)&pshdr[bth_offset];
+
+ /* exclude bth.resv8a */
+ bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
+
+ length = hdr_size + RXE_BTH_BYTES;
+ crc = crc32_le(crc, pshdr, length);
+
+ /* And finish to compute the CRC on the remainder of the headers. */
+ crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES,
+ rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
+ return crc;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
new file mode 100644
index 000000000000..4a5484ef604f
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_LOC_H
+#define RXE_LOC_H
+
+/* rxe_av.c */
+
+int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);
+
+int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
+ struct rxe_av *av, struct ib_ah_attr *attr);
+
+int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
+ struct ib_ah_attr *attr);
+
+int rxe_av_fill_ip_info(struct rxe_dev *rxe,
+ struct rxe_av *av,
+ struct ib_ah_attr *attr,
+ struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid);
+
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
+
+/* rxe_cq.c */
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector, struct ib_udata *udata);
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
+
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
+
+void rxe_cq_cleanup(void *arg);
+
+/* rxe_mcast.c */
+int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mc_grp **grp_p);
+
+int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_mc_grp *grp);
+
+int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ union ib_gid *mgid);
+
+void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
+
+void rxe_mc_cleanup(void *arg);
+
+/* rxe_mmap.c */
+struct rxe_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ struct kref ref;
+ void *obj;
+
+ struct mminfo info;
+};
+
+void rxe_mmap_release(struct kref *ref);
+
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+/* rxe_mr.c */
+enum copy_direction {
+ to_mem_obj,
+ from_mem_obj,
+};
+
+int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int access, struct rxe_mem *mem);
+
+int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+ u64 length, u64 iova, int access, struct ib_udata *udata,
+ struct rxe_mem *mr);
+
+int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int max_pages, struct rxe_mem *mem);
+
+int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
+ int length, enum copy_direction dir, u32 *crcp);
+
+int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+ struct rxe_dma_info *dma, void *addr, int length,
+ enum copy_direction dir, u32 *crcp);
+
+void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
+
+enum lookup_type {
+ lookup_local,
+ lookup_remote,
+};
+
+struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type);
+
+int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
+
+int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
+ u64 *page, int num_pages, u64 iova);
+
+void rxe_mem_cleanup(void *arg);
+
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+
+/* rxe_qp.c */
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
+
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
+ struct ib_pd *ibpd);
+
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
+
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask);
+
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata);
+
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
+
+void rxe_qp_error(struct rxe_qp *qp);
+
+void rxe_qp_destroy(struct rxe_qp *qp);
+
+void rxe_qp_cleanup(void *arg);
+
+static inline int qp_num(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_num;
+}
+
+static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_type;
+}
+
+static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
+{
+ return qp->attr.qp_state;
+}
+
+static inline int qp_mtu(struct rxe_qp *qp)
+{
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ return qp->attr.path_mtu;
+ else
+ return RXE_PORT_MAX_MTU;
+}
+
+static inline int rcv_wqe_size(int max_sge)
+{
+ return sizeof(struct rxe_recv_wqe) +
+ max_sge * sizeof(struct ib_sge);
+}
+
+void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
+
+static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
+{
+ qp->resp.res_head++;
+ if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic))
+ qp->resp.res_head = 0;
+}
+
+void retransmit_timer(unsigned long data);
+void rnr_nak_timer(unsigned long data);
+
+void dump_qp(struct rxe_qp *qp);
+
+/* rxe_srq.c */
+#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
+
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
+
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata);
+
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct ib_udata *udata);
+
+extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
+
+void rxe_release(struct kref *kref);
+
+int rxe_completer(void *arg);
+int rxe_requester(void *arg);
+int rxe_responder(void *arg);
+
+u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
+
+void rxe_resp_queue_pkt(struct rxe_dev *rxe,
+ struct rxe_qp *qp, struct sk_buff *skb);
+
+void rxe_comp_queue_pkt(struct rxe_dev *rxe,
+ struct rxe_qp *qp, struct sk_buff *skb);
+
+static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
+{
+ return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
+}
+
+static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt, struct sk_buff *skb)
+{
+ int err;
+ int is_request = pkt->mask & RXE_REQ_MASK;
+
+ if ((is_request && (qp->req.state != QP_STATE_READY)) ||
+ (!is_request && (qp->resp.state != QP_STATE_READY))) {
+ pr_info("Packet dropped. QP is not in ready state\n");
+ goto drop;
+ }
+
+ if (pkt->mask & RXE_LOOPBACK_MASK) {
+ memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+ err = rxe->ifc_ops->loopback(skb);
+ } else {
+ err = rxe->ifc_ops->send(rxe, pkt, skb);
+ }
+
+ if (err) {
+ rxe->xmit_errors++;
+ return err;
+ }
+
+ atomic_inc(&qp->skb_out);
+
+ if ((qp_type(qp) != IB_QPT_RC) &&
+ (pkt->mask & RXE_END_MASK)) {
+ pkt->wqe->state = wqe_state_done;
+ rxe_run_task(&qp->comp.task, 1);
+ }
+
+ goto done;
+
+drop:
+ kfree_skb(skb);
+ err = 0;
+done:
+ return err;
+}
+
+#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
new file mode 100644
index 000000000000..fa95544ca7e0
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mc_grp **grp_p)
+{
+ int err;
+ struct rxe_mc_grp *grp;
+
+ if (rxe->attr.max_mcast_qp_attach == 0) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ if (grp)
+ goto done;
+
+ grp = rxe_alloc(&rxe->mc_grp_pool);
+ if (!grp) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&grp->qp_list);
+ spin_lock_init(&grp->mcg_lock);
+ grp->rxe = rxe;
+
+ rxe_add_key(grp, mgid);
+
+ err = rxe->ifc_ops->mcast_add(rxe, mgid);
+ if (err)
+ goto err2;
+
+done:
+ *grp_p = grp;
+ return 0;
+
+err2:
+ rxe_drop_ref(grp);
+err1:
+ return err;
+}
+
+int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_mc_grp *grp)
+{
+ int err;
+ struct rxe_mc_elem *elem;
+
+ /* check to see of the qp is already a member of the group */
+ spin_lock_bh(&qp->grp_lock);
+ spin_lock_bh(&grp->mcg_lock);
+ list_for_each_entry(elem, &grp->qp_list, qp_list) {
+ if (elem->qp == qp) {
+ err = 0;
+ goto out;
+ }
+ }
+
+ if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ elem = rxe_alloc(&rxe->mc_elem_pool);
+ if (!elem) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* each qp holds a ref on the grp */
+ rxe_add_ref(grp);
+
+ grp->num_qp++;
+ elem->qp = qp;
+ elem->grp = grp;
+
+ list_add(&elem->qp_list, &grp->qp_list);
+ list_add(&elem->grp_list, &qp->grp_list);
+
+ err = 0;
+out:
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ return err;
+}
+
+int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ union ib_gid *mgid)
+{
+ struct rxe_mc_grp *grp;
+ struct rxe_mc_elem *elem, *tmp;
+
+ grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ if (!grp)
+ goto err1;
+
+ spin_lock_bh(&qp->grp_lock);
+ spin_lock_bh(&grp->mcg_lock);
+
+ list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
+ if (elem->qp == qp) {
+ list_del(&elem->qp_list);
+ list_del(&elem->grp_list);
+ grp->num_qp--;
+
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ rxe_drop_ref(elem);
+ rxe_drop_ref(grp); /* ref held by QP */
+ rxe_drop_ref(grp); /* ref from get_key */
+ return 0;
+ }
+ }
+
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ rxe_drop_ref(grp); /* ref from get_key */
+err1:
+ return -EINVAL;
+}
+
+void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
+{
+ struct rxe_mc_grp *grp;
+ struct rxe_mc_elem *elem;
+
+ while (1) {
+ spin_lock_bh(&qp->grp_lock);
+ if (list_empty(&qp->grp_list)) {
+ spin_unlock_bh(&qp->grp_lock);
+ break;
+ }
+ elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
+ grp_list);
+ list_del(&elem->grp_list);
+ spin_unlock_bh(&qp->grp_lock);
+
+ grp = elem->grp;
+ spin_lock_bh(&grp->mcg_lock);
+ list_del(&elem->qp_list);
+ grp->num_qp--;
+ spin_unlock_bh(&grp->mcg_lock);
+ rxe_drop_ref(grp);
+ rxe_drop_ref(elem);
+ }
+}
+
+void rxe_mc_cleanup(void *arg)
+{
+ struct rxe_mc_grp *grp = arg;
+ struct rxe_dev *rxe = grp->rxe;
+
+ rxe_drop_key(grp);
+ rxe->ifc_ops->mcast_delete(rxe, &grp->mgid);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
new file mode 100644
index 000000000000..54b3c7c99eff
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+void rxe_mmap_release(struct kref *ref)
+{
+ struct rxe_mmap_info *ip = container_of(ref,
+ struct rxe_mmap_info, ref);
+ struct rxe_dev *rxe = to_rdev(ip->context->device);
+
+ spin_lock_bh(&rxe->pending_lock);
+
+ if (!list_empty(&ip->pending_mmaps))
+ list_del(&ip->pending_mmaps);
+
+ spin_unlock_bh(&rxe->pending_lock);
+
+ vfree(ip->obj); /* buf */
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the memory region is mapped,
+ * to avoid releasing it.
+ */
+static void rxe_vma_open(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void rxe_vma_close(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, rxe_mmap_release);
+}
+
+static struct vm_operations_struct rxe_vm_ops = {
+ .open = rxe_vma_open,
+ .close = rxe_vma_close,
+};
+
+/**
+ * rxe_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rxe_dev *rxe = to_rdev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct rxe_mmap_info *ip, *pp;
+ int ret;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_bh(&rxe->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
+ if (context != ip->context || (__u64)offset != ip->info.offset)
+ continue;
+
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->info.size) {
+ pr_err("mmap region is larger than the object!\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ goto found_it;
+ }
+ pr_warn("unable to find pending mmap info\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+
+found_it:
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret) {
+ pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
+ goto done;
+ }
+
+ vma->vm_ops = &rxe_vm_ops;
+ vma->vm_private_data = ip;
+ rxe_vma_open(vma);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for rxe_mmap
+ */
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj)
+{
+ struct rxe_mmap_info *ip;
+
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_bh(&rxe->mmap_offset_lock);
+
+ if (rxe->mmap_offset == 0)
+ rxe->mmap_offset = PAGE_SIZE;
+
+ ip->info.offset = rxe->mmap_offset;
+ rxe->mmap_offset += size;
+
+ spin_unlock_bh(&rxe->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->info.size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+ return ip;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
new file mode 100644
index 000000000000..f3dab6574504
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/*
+ * lfsr (linear feedback shift register) with period 255
+ */
+static u8 rxe_get_key(void)
+{
+ static unsigned key = 1;
+
+ key = key << 1;
+
+ key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
+ ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
+
+ key &= 0xff;
+
+ return key;
+}
+
+int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
+{
+ switch (mem->type) {
+ case RXE_MEM_TYPE_DMA:
+ return 0;
+
+ case RXE_MEM_TYPE_MR:
+ case RXE_MEM_TYPE_FMR:
+ return ((iova < mem->iova) ||
+ ((iova + length) > (mem->iova + mem->length))) ?
+ -EFAULT : 0;
+
+ default:
+ return -EFAULT;
+ }
+}
+
+#define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
+ | IB_ACCESS_REMOTE_WRITE \
+ | IB_ACCESS_REMOTE_ATOMIC)
+
+static void rxe_mem_init(int access, struct rxe_mem *mem)
+{
+ u32 lkey = mem->pelem.index << 8 | rxe_get_key();
+ u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
+
+ if (mem->pelem.pool->type == RXE_TYPE_MR) {
+ mem->ibmr.lkey = lkey;
+ mem->ibmr.rkey = rkey;
+ }
+
+ mem->lkey = lkey;
+ mem->rkey = rkey;
+ mem->state = RXE_MEM_STATE_INVALID;
+ mem->type = RXE_MEM_TYPE_NONE;
+ mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+}
+
+void rxe_mem_cleanup(void *arg)
+{
+ struct rxe_mem *mem = arg;
+ int i;
+
+ if (mem->umem)
+ ib_umem_release(mem->umem);
+
+ if (mem->map) {
+ for (i = 0; i < mem->num_map; i++)
+ kfree(mem->map[i]);
+
+ kfree(mem->map);
+ }
+}
+
+static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+{
+ int i;
+ int num_map;
+ struct rxe_map **map = mem->map;
+
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+
+ mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mem->map)
+ goto err1;
+
+ for (i = 0; i < num_map; i++) {
+ mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mem->map[i])
+ goto err2;
+ }
+
+ WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+
+ mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mem->map_mask = RXE_BUF_PER_MAP - 1;
+
+ mem->num_buf = num_buf;
+ mem->num_map = num_map;
+ mem->max_buf = num_map * RXE_BUF_PER_MAP;
+
+ return 0;
+
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mem->map[i]);
+
+ kfree(mem->map);
+err1:
+ return -ENOMEM;
+}
+
+int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int access, struct rxe_mem *mem)
+{
+ rxe_mem_init(access, mem);
+
+ mem->pd = pd;
+ mem->access = access;
+ mem->state = RXE_MEM_STATE_VALID;
+ mem->type = RXE_MEM_TYPE_DMA;
+
+ return 0;
+}
+
+int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+ u64 length, u64 iova, int access, struct ib_udata *udata,
+ struct rxe_mem *mem)
+{
+ int entry;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf = NULL;
+ struct ib_umem *umem;
+ struct scatterlist *sg;
+ int num_buf;
+ void *vaddr;
+ int err;
+
+ umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ if (IS_ERR(umem)) {
+ pr_warn("err %d from rxe_umem_get\n",
+ (int)PTR_ERR(umem));
+ err = -EINVAL;
+ goto err1;
+ }
+
+ mem->umem = umem;
+ num_buf = umem->nmap;
+
+ rxe_mem_init(access, mem);
+
+ err = rxe_mem_alloc(rxe, mem, num_buf);
+ if (err) {
+ pr_warn("err %d from rxe_mem_alloc\n", err);
+ ib_umem_release(umem);
+ goto err1;
+ }
+
+ WARN_ON(!is_power_of_2(umem->page_size));
+
+ mem->page_shift = ilog2(umem->page_size);
+ mem->page_mask = umem->page_size - 1;
+
+ num_buf = 0;
+ map = mem->map;
+ if (length > 0) {
+ buf = map[0]->buf;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ pr_warn("null vaddr\n");
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ buf->addr = (uintptr_t)vaddr;
+ buf->size = umem->page_size;
+ num_buf++;
+ buf++;
+
+ if (num_buf >= RXE_BUF_PER_MAP) {
+ map++;
+ buf = map[0]->buf;
+ num_buf = 0;
+ }
+ }
+ }
+
+ mem->pd = pd;
+ mem->umem = umem;
+ mem->access = access;
+ mem->length = length;
+ mem->iova = iova;
+ mem->va = start;
+ mem->offset = ib_umem_offset(umem);
+ mem->state = RXE_MEM_STATE_VALID;
+ mem->type = RXE_MEM_TYPE_MR;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int max_pages, struct rxe_mem *mem)
+{
+ int err;
+
+ rxe_mem_init(0, mem);
+
+ /* In fastreg, we also set the rkey */
+ mem->ibmr.rkey = mem->ibmr.lkey;
+
+ err = rxe_mem_alloc(rxe, mem, max_pages);
+ if (err)
+ goto err1;
+
+ mem->pd = pd;
+ mem->max_buf = max_pages;
+ mem->state = RXE_MEM_STATE_FREE;
+ mem->type = RXE_MEM_TYPE_MR;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static void lookup_iova(
+ struct rxe_mem *mem,
+ u64 iova,
+ int *m_out,
+ int *n_out,
+ size_t *offset_out)
+{
+ size_t offset = iova - mem->iova + mem->offset;
+ int map_index;
+ int buf_index;
+ u64 length;
+
+ if (likely(mem->page_shift)) {
+ *offset_out = offset & mem->page_mask;
+ offset >>= mem->page_shift;
+ *n_out = offset & mem->map_mask;
+ *m_out = offset >> mem->map_shift;
+ } else {
+ map_index = 0;
+ buf_index = 0;
+
+ length = mem->map[map_index]->buf[buf_index].size;
+
+ while (offset >= length) {
+ offset -= length;
+ buf_index++;
+
+ if (buf_index == RXE_BUF_PER_MAP) {
+ map_index++;
+ buf_index = 0;
+ }
+ length = mem->map[map_index]->buf[buf_index].size;
+ }
+
+ *m_out = map_index;
+ *n_out = buf_index;
+ *offset_out = offset;
+ }
+}
+
+void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
+{
+ size_t offset;
+ int m, n;
+ void *addr;
+
+ if (mem->state != RXE_MEM_STATE_VALID) {
+ pr_warn("mem not in valid state\n");
+ addr = NULL;
+ goto out;
+ }
+
+ if (!mem->map) {
+ addr = (void *)(uintptr_t)iova;
+ goto out;
+ }
+
+ if (mem_check_range(mem, iova, length)) {
+ pr_warn("range violation\n");
+ addr = NULL;
+ goto out;
+ }
+
+ lookup_iova(mem, iova, &m, &n, &offset);
+
+ if (offset + length > mem->map[m]->buf[n].size) {
+ pr_warn("crosses page boundary\n");
+ addr = NULL;
+ goto out;
+ }
+
+ addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
+
+out:
+ return addr;
+}
+
+/* copy data from a range (vaddr, vaddr+length-1) to or from
+ * a mem object starting at iova. Compute incremental value of
+ * crc32 if crcp is not zero. caller must hold a reference to mem
+ */
+int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp)
+{
+ int err;
+ int bytes;
+ u8 *va;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf;
+ int m;
+ int i;
+ size_t offset;
+ u32 crc = crcp ? (*crcp) : 0;
+
+ if (mem->type == RXE_MEM_TYPE_DMA) {
+ u8 *src, *dest;
+
+ src = (dir == to_mem_obj) ?
+ addr : ((void *)(uintptr_t)iova);
+
+ dest = (dir == to_mem_obj) ?
+ ((void *)(uintptr_t)iova) : addr;
+
+ if (crcp)
+ *crcp = crc32_le(*crcp, src, length);
+
+ memcpy(dest, src, length);
+
+ return 0;
+ }
+
+ WARN_ON(!mem->map);
+
+ err = mem_check_range(mem, iova, length);
+ if (err) {
+ err = -EFAULT;
+ goto err1;
+ }
+
+ lookup_iova(mem, iova, &m, &i, &offset);
+
+ map = mem->map + m;
+ buf = map[0]->buf + i;
+
+ while (length > 0) {
+ u8 *src, *dest;
+
+ va = (u8 *)(uintptr_t)buf->addr + offset;
+ src = (dir == to_mem_obj) ? addr : va;
+ dest = (dir == to_mem_obj) ? va : addr;
+
+ bytes = buf->size - offset;
+
+ if (bytes > length)
+ bytes = length;
+
+ if (crcp)
+ crc = crc32_le(crc, src, bytes);
+
+ memcpy(dest, src, bytes);
+
+ length -= bytes;
+ addr += bytes;
+
+ offset = 0;
+ buf++;
+ i++;
+
+ if (i == RXE_BUF_PER_MAP) {
+ i = 0;
+ map++;
+ buf = map[0]->buf;
+ }
+ }
+
+ if (crcp)
+ *crcp = crc;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+/* copy data in or out of a wqe, i.e. sg list
+ * under the control of a dma descriptor
+ */
+int copy_data(
+ struct rxe_dev *rxe,
+ struct rxe_pd *pd,
+ int access,
+ struct rxe_dma_info *dma,
+ void *addr,
+ int length,
+ enum copy_direction dir,
+ u32 *crcp)
+{
+ int bytes;
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+ struct rxe_mem *mem = NULL;
+ u64 iova;
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (length > resid) {
+ err = -EINVAL;
+ goto err2;
+ }
+
+ if (sge->length && (offset < sge->length)) {
+ mem = lookup_mem(pd, access, sge->lkey, lookup_local);
+ if (!mem) {
+ err = -EINVAL;
+ goto err1;
+ }
+ }
+
+ while (length > 0) {
+ bytes = length;
+
+ if (offset >= sge->length) {
+ if (mem) {
+ rxe_drop_ref(mem);
+ mem = NULL;
+ }
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+
+ if (dma->cur_sge >= dma->num_sge) {
+ err = -ENOSPC;
+ goto err2;
+ }
+
+ if (sge->length) {
+ mem = lookup_mem(pd, access, sge->lkey,
+ lookup_local);
+ if (!mem) {
+ err = -EINVAL;
+ goto err1;
+ }
+ } else {
+ continue;
+ }
+ }
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ if (bytes > 0) {
+ iova = sge->addr + offset;
+
+ err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
+ if (err)
+ goto err2;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ addr += bytes;
+ }
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ if (mem)
+ rxe_drop_ref(mem);
+
+ return 0;
+
+err2:
+ if (mem)
+ rxe_drop_ref(mem);
+err1:
+ return err;
+}
+
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+{
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+
+ while (length) {
+ unsigned int bytes;
+
+ if (offset >= sge->length) {
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+ if (dma->cur_sge >= dma->num_sge)
+ return -ENOSPC;
+ }
+
+ bytes = length;
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ return 0;
+}
+
+/* (1) find the mem (mr or mw) corresponding to lkey/rkey
+ * depending on lookup_type
+ * (2) verify that the (qp) pd matches the mem pd
+ * (3) verify that the mem can support the requested access
+ * (4) verify that mem state is valid
+ */
+struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type)
+{
+ struct rxe_mem *mem;
+ struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
+ int index = key >> 8;
+
+ if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
+ mem = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mem)
+ goto err1;
+ } else {
+ goto err1;
+ }
+
+ if ((type == lookup_local && mem->lkey != key) ||
+ (type == lookup_remote && mem->rkey != key))
+ goto err2;
+
+ if (mem->pd != pd)
+ goto err2;
+
+ if (access && !(access & mem->access))
+ goto err2;
+
+ if (mem->state != RXE_MEM_STATE_VALID)
+ goto err2;
+
+ return mem;
+
+err2:
+ rxe_drop_ref(mem);
+err1:
+ return NULL;
+}
+
+int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
+ u64 *page, int num_pages, u64 iova)
+{
+ int i;
+ int num_buf;
+ int err;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf;
+ int page_size;
+
+ if (num_pages > mem->max_buf) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ num_buf = 0;
+ page_size = 1 << mem->page_shift;
+ map = mem->map;
+ buf = map[0]->buf;
+
+ for (i = 0; i < num_pages; i++) {
+ buf->addr = *page++;
+ buf->size = page_size;
+ buf++;
+ num_buf++;
+
+ if (num_buf == RXE_BUF_PER_MAP) {
+ map++;
+ buf = map[0]->buf;
+ num_buf = 0;
+ }
+ }
+
+ mem->iova = iova;
+ mem->va = iova;
+ mem->length = num_pages << mem->page_shift;
+ mem->state = RXE_MEM_STATE_VALID;
+
+ return 0;
+
+err1:
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
new file mode 100644
index 000000000000..0b8d2ea8b41d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/sch_generic.h>
+#include <linux/netfilter.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe.h"
+#include "rxe_net.h"
+#include "rxe_loc.h"
+
+static LIST_HEAD(rxe_dev_list);
+static spinlock_t dev_list_lock; /* spinlock for device list */
+
+struct rxe_dev *net_to_rxe(struct net_device *ndev)
+{
+ struct rxe_dev *rxe;
+ struct rxe_dev *found = NULL;
+
+ spin_lock_bh(&dev_list_lock);
+ list_for_each_entry(rxe, &rxe_dev_list, list) {
+ if (rxe->ndev == ndev) {
+ found = rxe;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev_list_lock);
+
+ return found;
+}
+
+struct rxe_dev *get_rxe_by_name(const char* name)
+{
+ struct rxe_dev *rxe;
+ struct rxe_dev *found = NULL;
+
+ spin_lock_bh(&dev_list_lock);
+ list_for_each_entry(rxe, &rxe_dev_list, list) {
+ if (!strcmp(name, rxe->ib_dev.name)) {
+ found = rxe;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev_list_lock);
+ return found;
+}
+
+
+struct rxe_recv_sockets recv_sockets;
+
+static __be64 rxe_mac_to_eui64(struct net_device *ndev)
+{
+ unsigned char *mac_addr = ndev->dev_addr;
+ __be64 eui64;
+ unsigned char *dst = (unsigned char *)&eui64;
+
+ dst[0] = mac_addr[0] ^ 2;
+ dst[1] = mac_addr[1];
+ dst[2] = mac_addr[2];
+ dst[3] = 0xff;
+ dst[4] = 0xfe;
+ dst[5] = mac_addr[3];
+ dst[6] = mac_addr[4];
+ dst[7] = mac_addr[5];
+
+ return eui64;
+}
+
+static __be64 node_guid(struct rxe_dev *rxe)
+{
+ return rxe_mac_to_eui64(rxe->ndev);
+}
+
+static __be64 port_guid(struct rxe_dev *rxe)
+{
+ return rxe_mac_to_eui64(rxe->ndev);
+}
+
+static struct device *dma_device(struct rxe_dev *rxe)
+{
+ struct net_device *ndev;
+
+ ndev = rxe->ndev;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(ndev);
+
+ return ndev->dev.parent;
+}
+
+static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ int err;
+ unsigned char ll_addr[ETH_ALEN];
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ err = dev_mc_add(rxe->ndev, ll_addr);
+
+ return err;
+}
+
+static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ int err;
+ unsigned char ll_addr[ETH_ALEN];
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ err = dev_mc_del(rxe->ndev, ll_addr);
+
+ return err;
+}
+
+static struct dst_entry *rxe_find_route4(struct net_device *ndev,
+ struct in_addr *saddr,
+ struct in_addr *daddr)
+{
+ struct rtable *rt;
+ struct flowi4 fl = { { 0 } };
+
+ memset(&fl, 0, sizeof(fl));
+ fl.flowi4_oif = ndev->ifindex;
+ memcpy(&fl.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl.daddr, daddr, sizeof(*daddr));
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(&init_net, &fl);
+ if (IS_ERR(rt)) {
+ pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
+ return NULL;
+ }
+
+ return &rt->dst;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ struct dst_entry *ndst;
+ struct flowi6 fl6 = { { 0 } };
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = ndev->ifindex;
+ memcpy(&fl6.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl6.daddr, daddr, sizeof(*daddr));
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
+ recv_sockets.sk6->sk, &ndst, &fl6))) {
+ pr_err_ratelimited("no route to %pI6\n", daddr);
+ goto put;
+ }
+
+ if (unlikely(ndst->error)) {
+ pr_err("no route to %pI6\n", daddr);
+ goto put;
+ }
+
+ return ndst;
+put:
+ dst_release(ndst);
+ return NULL;
+}
+
+#else
+
+static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ return NULL;
+}
+
+#endif
+
+static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct udphdr *udph;
+ struct net_device *ndev = skb->dev;
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ if (!rxe)
+ goto drop;
+
+ if (skb_linearize(skb)) {
+ pr_err("skb_linearize failed\n");
+ goto drop;
+ }
+
+ udph = udp_hdr(skb);
+ pkt->rxe = rxe;
+ pkt->port_num = 1;
+ pkt->hdr = (u8 *)(udph + 1);
+ pkt->mask = RXE_GRH_MASK;
+ pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+
+ return rxe_rcv(skb);
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ bool ipv6)
+{
+ int err;
+ struct socket *sock;
+ struct udp_port_cfg udp_cfg;
+ struct udp_tunnel_sock_cfg tnl_cfg;
+
+ memset(&udp_cfg, 0, sizeof(udp_cfg));
+
+ if (ipv6) {
+ udp_cfg.family = AF_INET6;
+ udp_cfg.ipv6_v6only = 1;
+ } else {
+ udp_cfg.family = AF_INET;
+ }
+
+ udp_cfg.local_udp_port = port;
+
+ /* Create UDP socket */
+ err = udp_sock_create(net, &udp_cfg, &sock);
+ if (err < 0) {
+ pr_err("failed to create udp socket. err = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ tnl_cfg.sk_user_data = NULL;
+ tnl_cfg.encap_type = 1;
+ tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+ tnl_cfg.encap_destroy = NULL;
+
+ /* Setup UDP tunnel */
+ setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+
+ return sock;
+}
+
+static void rxe_release_udp_tunnel(struct socket *sk)
+{
+ udp_tunnel_sock_release(sk);
+}
+
+static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
+ __be16 dst_port)
+{
+ struct udphdr *udph;
+
+ __skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+
+ udph->dest = dst_port;
+ udph->source = src_port;
+ udph->len = htons(skb->len);
+ udph->check = 0;
+}
+
+static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, __u8 proto,
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
+{
+ struct iphdr *iph;
+
+ skb_scrub_packet(skb, xnet);
+
+ skb_clear_hash(skb);
+ skb_dst_set(skb, dst);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+
+ iph->version = IPVERSION;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->frag_off = df;
+ iph->protocol = proto;
+ iph->tos = tos;
+ iph->daddr = daddr;
+ iph->saddr = saddr;
+ iph->ttl = ttl;
+ __ip_select_ident(dev_net(dst->dev), iph,
+ skb_shinfo(skb)->gso_segs ?: 1);
+ iph->tot_len = htons(skb->len);
+ ip_send_check(iph);
+}
+
+static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 proto, __u8 prio, __u8 ttl)
+{
+ struct ipv6hdr *ip6h;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
+ | IPSKB_REROUTED);
+ skb_dst_set(skb, dst);
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6_flow_hdr(ip6h, prio, htonl(0));
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+ ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+}
+
+static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+{
+ struct dst_entry *dst;
+ bool xnet = false;
+ __be16 df = htons(IP_DF);
+ struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ dst = rxe_find_route4(rxe->ndev, saddr, daddr);
+ if (!dst) {
+ pr_err("Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ if (!memcmp(saddr, daddr, sizeof(*daddr)))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
+ prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
+ htons(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
+ av->grh.traffic_class, av->grh.hop_limit, df, xnet);
+ return 0;
+}
+
+static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+{
+ struct dst_entry *dst;
+ struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ dst = rxe_find_route6(rxe->ndev, saddr, daddr);
+ if (!dst) {
+ pr_err("Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ if (!memcmp(saddr, daddr, sizeof(*daddr)))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
+ prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
+ htons(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
+ av->grh.traffic_class,
+ av->grh.hop_limit);
+ return 0;
+}
+
+static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc)
+{
+ int err = 0;
+ struct rxe_av *av = rxe_get_av(pkt);
+
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ err = prepare4(rxe, skb, av);
+ else if (av->network_type == RDMA_NETWORK_IPV6)
+ err = prepare6(rxe, skb, av);
+
+ *crc = rxe_icrc_hdr(pkt, skb);
+
+ return err;
+}
+
+static void rxe_skb_tx_dtor(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct rxe_qp *qp = sk->sk_user_data;
+ int skb_out = atomic_dec_return(&qp->skb_out);
+
+ if (unlikely(qp->need_req_skb &&
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+ rxe_run_task(&qp->req.task, 1);
+}
+
+static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ struct rxe_av *av;
+ int err;
+
+ av = rxe_get_av(pkt);
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ nskb->destructor = rxe_skb_tx_dtor;
+ nskb->sk = pkt->qp->sk->sk;
+
+ if (av->network_type == RDMA_NETWORK_IPV4) {
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ } else if (av->network_type == RDMA_NETWORK_IPV6) {
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ } else {
+ pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
+ kfree_skb(nskb);
+ return -EINVAL;
+ }
+
+ if (unlikely(net_xmit_eval(err))) {
+ pr_debug("error sending packet: %d\n", err);
+ return -EAGAIN;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int loopback(struct sk_buff *skb)
+{
+ return rxe_rcv(skb);
+}
+
+static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
+{
+ return rxe->port.port_guid == av->grh.dgid.global.interface_id;
+}
+
+static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt)
+{
+ unsigned int hdr_len;
+ struct sk_buff *skb;
+
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct iphdr);
+ else
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
+
+ skb->dev = rxe->ndev;
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
+
+ pkt->rxe = rxe;
+ pkt->port_num = 1;
+ pkt->hdr = skb_put(skb, paylen);
+ pkt->mask |= RXE_GRH_MASK;
+
+ memset(pkt->hdr, 0, paylen);
+
+ return skb;
+}
+
+/*
+ * this is required by rxe_cfg to match rxe devices in
+ * /sys/class/infiniband up with their underlying ethernet devices
+ */
+static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
+{
+ return rxe->ndev->name;
+}
+
+static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
+ unsigned int port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static struct rxe_ifc_ops ifc_ops = {
+ .node_guid = node_guid,
+ .port_guid = port_guid,
+ .dma_device = dma_device,
+ .mcast_add = mcast_add,
+ .mcast_delete = mcast_delete,
+ .prepare = prepare,
+ .send = send,
+ .loopback = loopback,
+ .init_packet = init_packet,
+ .parent_name = parent_name,
+ .link_layer = link_layer,
+};
+
+struct rxe_dev *rxe_net_add(struct net_device *ndev)
+{
+ int err;
+ struct rxe_dev *rxe = NULL;
+
+ rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
+ if (!rxe)
+ return NULL;
+
+ rxe->ifc_ops = &ifc_ops;
+ rxe->ndev = ndev;
+
+ err = rxe_add(rxe, ndev->mtu);
+ if (err) {
+ ib_dealloc_device(&rxe->ib_dev);
+ return NULL;
+ }
+
+ spin_lock_bh(&dev_list_lock);
+ list_add_tail(&rxe_dev_list, &rxe->list);
+ spin_unlock_bh(&dev_list_lock);
+ return rxe;
+}
+
+void rxe_remove_all(void)
+{
+ spin_lock_bh(&dev_list_lock);
+ while (!list_empty(&rxe_dev_list)) {
+ struct rxe_dev *rxe =
+ list_first_entry(&rxe_dev_list, struct rxe_dev, list);
+
+ list_del(&rxe->list);
+ spin_unlock_bh(&dev_list_lock);
+ rxe_remove(rxe);
+ spin_lock_bh(&dev_list_lock);
+ }
+ spin_unlock_bh(&dev_list_lock);
+}
+EXPORT_SYMBOL(rxe_remove_all);
+
+static void rxe_port_event(struct rxe_dev *rxe,
+ enum ib_event_type event)
+{
+ struct ib_event ev;
+
+ ev.device = &rxe->ib_dev;
+ ev.element.port_num = 1;
+ ev.event = event;
+
+ ib_dispatch_event(&ev);
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_up(struct rxe_dev *rxe)
+{
+ struct rxe_port *port;
+
+ port = &rxe->port;
+ port->attr.state = IB_PORT_ACTIVE;
+ port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
+
+ rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
+ pr_info("rxe: set %s active\n", rxe->ib_dev.name);
+ return;
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_down(struct rxe_dev *rxe)
+{
+ struct rxe_port *port;
+
+ port = &rxe->port;
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
+
+ rxe_port_event(rxe, IB_EVENT_PORT_ERR);
+ pr_info("rxe: set %s down\n", rxe->ib_dev.name);
+ return;
+}
+
+static int rxe_notify(struct notifier_block *not_blk,
+ unsigned long event,
+ void *arg)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(arg);
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+
+ if (!rxe)
+ goto out;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ list_del(&rxe->list);
+ rxe_remove(rxe);
+ break;
+ case NETDEV_UP:
+ rxe_port_up(rxe);
+ break;
+ case NETDEV_DOWN:
+ rxe_port_down(rxe);
+ break;
+ case NETDEV_CHANGEMTU:
+ pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
+ rxe_set_mtu(rxe, ndev->mtu);
+ break;
+ case NETDEV_REBOOT:
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGENAME:
+ case NETDEV_FEAT_CHANGE:
+ default:
+ pr_info("rxe: ignoring netdev event = %ld for %s\n",
+ event, ndev->name);
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rxe_net_notifier = {
+ .notifier_call = rxe_notify,
+};
+
+int rxe_net_init(void)
+{
+ int err;
+
+ spin_lock_init(&dev_list_lock);
+
+ recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), true);
+ if (IS_ERR(recv_sockets.sk6)) {
+ recv_sockets.sk6 = NULL;
+ pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ return -1;
+ }
+
+ recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), false);
+ if (IS_ERR(recv_sockets.sk4)) {
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ recv_sockets.sk4 = NULL;
+ recv_sockets.sk6 = NULL;
+ pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
+ return -1;
+ }
+
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ pr_err("rxe: Failed to rigister netdev notifier\n");
+ }
+
+ return err;
+}
+
+void rxe_net_exit(void)
+{
+ if (recv_sockets.sk6)
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+
+ if (recv_sockets.sk4)
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+
+ unregister_netdevice_notifier(&rxe_net_notifier);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
new file mode 100644
index 000000000000..7b06f76d16cc
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_NET_H
+#define RXE_NET_H
+
+#include <net/sock.h>
+#include <net/if_inet6.h>
+#include <linux/module.h>
+
+struct rxe_recv_sockets {
+ struct socket *sk4;
+ struct socket *sk6;
+};
+
+extern struct rxe_recv_sockets recv_sockets;
+
+struct rxe_dev *rxe_net_add(struct net_device *ndev);
+
+int rxe_net_init(void);
+void rxe_net_exit(void);
+
+#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
new file mode 100644
index 000000000000..61927c165b59
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -0,0 +1,961 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_pack.h>
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+
+/* useful information about work request opcodes and pkt opcodes in
+ * table form
+ */
+struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
+ [IB_WR_RDMA_WRITE] = {
+ .name = "IB_WR_RDMA_WRITE",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .name = "IB_WR_RDMA_WRITE_WITH_IMM",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_SEND] = {
+ .name = "IB_WR_SEND",
+ .mask = {
+ [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_SEND_WITH_IMM] = {
+ .name = "IB_WR_SEND_WITH_IMM",
+ .mask = {
+ [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ] = {
+ .name = "IB_WR_RDMA_READ",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .name = "IB_WR_ATOMIC_CMP_AND_SWP",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .name = "IB_WR_ATOMIC_FETCH_AND_ADD",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_LSO] = {
+ .name = "IB_WR_LSO",
+ .mask = {
+ /* not supported */
+ },
+ },
+ [IB_WR_SEND_WITH_INV] = {
+ .name = "IB_WR_SEND_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ_WITH_INV] = {
+ .name = "IB_WR_RDMA_READ_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_LOCAL_INV] = {
+ .name = "IB_WR_LOCAL_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_REG_MASK,
+ },
+ },
+ [IB_WR_REG_MR] = {
+ .name = "IB_WR_REG_MR",
+ .mask = {
+ [IB_QPT_RC] = WR_REG_MASK,
+ },
+ },
+};
+
+struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
+ [IB_OPCODE_RC_SEND_FIRST] = {
+ .name = "IB_OPCODE_RC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RC_SEND_MIDDLE]",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST] = {
+ .name = "IB_OPCODE_RC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_ACK_MASK | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_COMPARE_SWAP] = {
+ .name = "IB_OPCODE_RC_COMPARE_SWAP",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_FETCH_ADD] = {
+ .name = "IB_OPCODE_RC_FETCH_ADD",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_INV",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IETH_BYTES,
+ }
+ },
+
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = {
+ .name = "IB_OPCODE_UC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_UC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST] = {
+ .name = "IB_OPCODE_UC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+
+ /* RD */
+ [IB_OPCODE_RD_SEND_FIRST] = {
+ .name = "IB_OPCODE_RD_SEND_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RD_SEND_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST] = {
+ .name = "IB_OPCODE_RD_SEND_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
+ + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_REQ_MASK | RXE_READ_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_ACK_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
+ | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_COMPARE_SWAP] = {
+ .name = "RD_COMPARE_SWAP",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
+ | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ + RXE_ATMETH_BYTES
+ + RXE_DETH_BYTES +
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_FETCH_ADD] = {
+ .name = "IB_OPCODE_RD_FETCH_ADD",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
+ | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ + RXE_ATMETH_BYTES
+ + RXE_DETH_BYTES +
+ + RXE_RDETH_BYTES,
+ }
+ },
+
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY",
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+
+};
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
new file mode 100644
index 000000000000..307604e9c78d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_OPCODE_H
+#define RXE_OPCODE_H
+
+/*
+ * contains header bit mask definitions and header lengths
+ * declaration of the rxe_opcode_info struct and
+ * rxe_wr_opcode_info struct
+ */
+
+enum rxe_wr_mask {
+ WR_INLINE_MASK = BIT(0),
+ WR_ATOMIC_MASK = BIT(1),
+ WR_SEND_MASK = BIT(2),
+ WR_READ_MASK = BIT(3),
+ WR_WRITE_MASK = BIT(4),
+ WR_LOCAL_MASK = BIT(5),
+ WR_REG_MASK = BIT(6),
+
+ WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
+ WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
+ WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK,
+ WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK,
+};
+
+#define WR_MAX_QPT (8)
+
+struct rxe_wr_opcode_info {
+ char *name;
+ enum rxe_wr_mask mask[WR_MAX_QPT];
+};
+
+extern struct rxe_wr_opcode_info rxe_wr_opcode_info[];
+
+enum rxe_hdr_type {
+ RXE_LRH,
+ RXE_GRH,
+ RXE_BTH,
+ RXE_RETH,
+ RXE_AETH,
+ RXE_ATMETH,
+ RXE_ATMACK,
+ RXE_IETH,
+ RXE_RDETH,
+ RXE_DETH,
+ RXE_IMMDT,
+ RXE_PAYLOAD,
+ NUM_HDR_TYPES
+};
+
+enum rxe_hdr_mask {
+ RXE_LRH_MASK = BIT(RXE_LRH),
+ RXE_GRH_MASK = BIT(RXE_GRH),
+ RXE_BTH_MASK = BIT(RXE_BTH),
+ RXE_IMMDT_MASK = BIT(RXE_IMMDT),
+ RXE_RETH_MASK = BIT(RXE_RETH),
+ RXE_AETH_MASK = BIT(RXE_AETH),
+ RXE_ATMETH_MASK = BIT(RXE_ATMETH),
+ RXE_ATMACK_MASK = BIT(RXE_ATMACK),
+ RXE_IETH_MASK = BIT(RXE_IETH),
+ RXE_RDETH_MASK = BIT(RXE_RDETH),
+ RXE_DETH_MASK = BIT(RXE_DETH),
+ RXE_PAYLOAD_MASK = BIT(RXE_PAYLOAD),
+
+ RXE_REQ_MASK = BIT(NUM_HDR_TYPES + 0),
+ RXE_ACK_MASK = BIT(NUM_HDR_TYPES + 1),
+ RXE_SEND_MASK = BIT(NUM_HDR_TYPES + 2),
+ RXE_WRITE_MASK = BIT(NUM_HDR_TYPES + 3),
+ RXE_READ_MASK = BIT(NUM_HDR_TYPES + 4),
+ RXE_ATOMIC_MASK = BIT(NUM_HDR_TYPES + 5),
+
+ RXE_RWR_MASK = BIT(NUM_HDR_TYPES + 6),
+ RXE_COMP_MASK = BIT(NUM_HDR_TYPES + 7),
+
+ RXE_START_MASK = BIT(NUM_HDR_TYPES + 8),
+ RXE_MIDDLE_MASK = BIT(NUM_HDR_TYPES + 9),
+ RXE_END_MASK = BIT(NUM_HDR_TYPES + 10),
+
+ RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12),
+
+ RXE_READ_OR_ATOMIC = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+ RXE_WRITE_OR_SEND = (RXE_WRITE_MASK | RXE_SEND_MASK),
+};
+
+#define OPCODE_NONE (-1)
+#define RXE_NUM_OPCODE 256
+
+struct rxe_opcode_info {
+ char *name;
+ enum rxe_hdr_mask mask;
+ int length;
+ int offset[NUM_HDR_TYPES];
+};
+
+extern struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE];
+
+#endif /* RXE_OPCODE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
new file mode 100644
index 000000000000..f459c43a77c8
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_PARAM_H
+#define RXE_PARAM_H
+
+static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
+{
+ if (mtu < 256)
+ return 0;
+ else if (mtu < 512)
+ return IB_MTU_256;
+ else if (mtu < 1024)
+ return IB_MTU_512;
+ else if (mtu < 2048)
+ return IB_MTU_1024;
+ else if (mtu < 4096)
+ return IB_MTU_2048;
+ else
+ return IB_MTU_4096;
+}
+
+/* Find the IB mtu for a given network MTU. */
+static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
+{
+ mtu -= RXE_MAX_HDR_LENGTH;
+
+ return rxe_mtu_int_to_enum(mtu);
+}
+
+/* default/initial rxe device parameter settings */
+enum rxe_device_param {
+ RXE_FW_VER = 0,
+ RXE_MAX_MR_SIZE = -1ull,
+ RXE_PAGE_SIZE_CAP = 0xfffff000,
+ RXE_VENDOR_ID = 0,
+ RXE_VENDOR_PART_ID = 0,
+ RXE_HW_VER = 0,
+ RXE_MAX_QP = 0x10000,
+ RXE_MAX_QP_WR = 0x4000,
+ RXE_MAX_INLINE_DATA = 400,
+ RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
+ | IB_DEVICE_BAD_QKEY_CNTR
+ | IB_DEVICE_AUTO_PATH_MIG
+ | IB_DEVICE_CHANGE_PHY_PORT
+ | IB_DEVICE_UD_AV_PORT_ENFORCE
+ | IB_DEVICE_PORT_ACTIVE_EVENT
+ | IB_DEVICE_SYS_IMAGE_GUID
+ | IB_DEVICE_RC_RNR_NAK_GEN
+ | IB_DEVICE_SRQ_RESIZE
+ | IB_DEVICE_MEM_MGT_EXTENSIONS,
+ RXE_MAX_SGE = 32,
+ RXE_MAX_SGE_RD = 32,
+ RXE_MAX_CQ = 16384,
+ RXE_MAX_LOG_CQE = 13,
+ RXE_MAX_MR = 2 * 1024,
+ RXE_MAX_PD = 0x7ffc,
+ RXE_MAX_QP_RD_ATOM = 128,
+ RXE_MAX_EE_RD_ATOM = 0,
+ RXE_MAX_RES_RD_ATOM = 0x3f000,
+ RXE_MAX_QP_INIT_RD_ATOM = 128,
+ RXE_MAX_EE_INIT_RD_ATOM = 0,
+ RXE_ATOMIC_CAP = 1,
+ RXE_MAX_EE = 0,
+ RXE_MAX_RDD = 0,
+ RXE_MAX_MW = 0,
+ RXE_MAX_RAW_IPV6_QP = 0,
+ RXE_MAX_RAW_ETHY_QP = 0,
+ RXE_MAX_MCAST_GRP = 8192,
+ RXE_MAX_MCAST_QP_ATTACH = 56,
+ RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
+ RXE_MAX_AH = 100,
+ RXE_MAX_FMR = 0,
+ RXE_MAX_MAP_PER_FMR = 0,
+ RXE_MAX_SRQ = 960,
+ RXE_MAX_SRQ_WR = 0x4000,
+ RXE_MIN_SRQ_WR = 1,
+ RXE_MAX_SRQ_SGE = 27,
+ RXE_MIN_SRQ_SGE = 1,
+ RXE_MAX_FMR_PAGE_LIST_LEN = 512,
+ RXE_MAX_PKEYS = 64,
+ RXE_LOCAL_CA_ACK_DELAY = 15,
+
+ RXE_MAX_UCONTEXT = 512,
+
+ RXE_NUM_PORT = 1,
+ RXE_NUM_COMP_VECTORS = 1,
+
+ RXE_MIN_QP_INDEX = 16,
+ RXE_MAX_QP_INDEX = 0x00020000,
+
+ RXE_MIN_SRQ_INDEX = 0x00020001,
+ RXE_MAX_SRQ_INDEX = 0x00040000,
+
+ RXE_MIN_MR_INDEX = 0x00000001,
+ RXE_MAX_MR_INDEX = 0x00040000,
+ RXE_MIN_MW_INDEX = 0x00040001,
+ RXE_MAX_MW_INDEX = 0x00060000,
+ RXE_MAX_PKT_PER_ACK = 64,
+
+ RXE_MAX_UNACKED_PSNS = 128,
+
+ /* Max inflight SKBs per queue pair */
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
+ RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+
+ /* Delay before calling arbiter timer */
+ RXE_NSEC_ARB_TIMER_DELAY = 200,
+};
+
+/* default/initial rxe port parameters */
+enum rxe_port_param {
+ RXE_PORT_STATE = IB_PORT_DOWN,
+ RXE_PORT_MAX_MTU = IB_MTU_4096,
+ RXE_PORT_ACTIVE_MTU = IB_MTU_256,
+ RXE_PORT_GID_TBL_LEN = 1024,
+ RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
+ RXE_PORT_MAX_MSG_SZ = 0x800000,
+ RXE_PORT_BAD_PKEY_CNTR = 0,
+ RXE_PORT_QKEY_VIOL_CNTR = 0,
+ RXE_PORT_LID = 0,
+ RXE_PORT_SM_LID = 0,
+ RXE_PORT_SM_SL = 0,
+ RXE_PORT_LMC = 0,
+ RXE_PORT_MAX_VL_NUM = 1,
+ RXE_PORT_SUBNET_TIMEOUT = 0,
+ RXE_PORT_INIT_TYPE_REPLY = 0,
+ RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
+ RXE_PORT_ACTIVE_SPEED = 1,
+ RXE_PORT_PKEY_TBL_LEN = 64,
+ RXE_PORT_PHYS_STATE = 2,
+ RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
+};
+
+/* default/initial port info parameters */
+enum rxe_port_info_param {
+ RXE_PORT_INFO_VL_CAP = 4, /* 1-8 */
+ RXE_PORT_INFO_MTU_CAP = 5, /* 4096 */
+ RXE_PORT_INFO_OPER_VL = 1, /* 1 */
+};
+
+#endif /* RXE_PARAM_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
new file mode 100644
index 000000000000..6bac0717c540
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* info about object pools
+ * note that mr and mw share a single index space
+ * so that one can map an lkey to the correct type of object
+ */
+struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+ [RXE_TYPE_UC] = {
+ .name = "rxe-uc",
+ .size = sizeof(struct rxe_ucontext),
+ },
+ [RXE_TYPE_PD] = {
+ .name = "rxe-pd",
+ .size = sizeof(struct rxe_pd),
+ },
+ [RXE_TYPE_AH] = {
+ .name = "rxe-ah",
+ .size = sizeof(struct rxe_ah),
+ .flags = RXE_POOL_ATOMIC,
+ },
+ [RXE_TYPE_SRQ] = {
+ .name = "rxe-srq",
+ .size = sizeof(struct rxe_srq),
+ .flags = RXE_POOL_INDEX,
+ .min_index = RXE_MIN_SRQ_INDEX,
+ .max_index = RXE_MAX_SRQ_INDEX,
+ },
+ [RXE_TYPE_QP] = {
+ .name = "rxe-qp",
+ .size = sizeof(struct rxe_qp),
+ .cleanup = rxe_qp_cleanup,
+ .flags = RXE_POOL_INDEX,
+ .min_index = RXE_MIN_QP_INDEX,
+ .max_index = RXE_MAX_QP_INDEX,
+ },
+ [RXE_TYPE_CQ] = {
+ .name = "rxe-cq",
+ .size = sizeof(struct rxe_cq),
+ .cleanup = rxe_cq_cleanup,
+ },
+ [RXE_TYPE_MR] = {
+ .name = "rxe-mr",
+ .size = sizeof(struct rxe_mem),
+ .cleanup = rxe_mem_cleanup,
+ .flags = RXE_POOL_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
+ .min_index = RXE_MIN_MR_INDEX,
+ },
+ [RXE_TYPE_MW] = {
+ .name = "rxe-mw",
+ .size = sizeof(struct rxe_mem),
+ .flags = RXE_POOL_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
+ .min_index = RXE_MIN_MW_INDEX,
+ },
+ [RXE_TYPE_MC_GRP] = {
+ .name = "rxe-mc_grp",
+ .size = sizeof(struct rxe_mc_grp),
+ .cleanup = rxe_mc_cleanup,
+ .flags = RXE_POOL_KEY,
+ .key_offset = offsetof(struct rxe_mc_grp, mgid),
+ .key_size = sizeof(union ib_gid),
+ },
+ [RXE_TYPE_MC_ELEM] = {
+ .name = "rxe-mc_elem",
+ .size = sizeof(struct rxe_mc_elem),
+ .flags = RXE_POOL_ATOMIC,
+ },
+};
+
+static inline char *pool_name(struct rxe_pool *pool)
+{
+ return rxe_type_info[pool->type].name;
+}
+
+static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
+{
+ return rxe_type_info[pool->type].cache;
+}
+
+static inline enum rxe_elem_type rxe_type(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+
+ return elem->pool->type;
+}
+
+int rxe_cache_init(void)
+{
+ int err;
+ int i;
+ size_t size;
+ struct rxe_type_info *type;
+
+ for (i = 0; i < RXE_NUM_TYPES; i++) {
+ type = &rxe_type_info[i];
+ size = ALIGN(type->size, RXE_POOL_ALIGN);
+ type->cache = kmem_cache_create(type->name, size,
+ RXE_POOL_ALIGN,
+ RXE_POOL_CACHE_FLAGS, NULL);
+ if (!type->cache) {
+ pr_err("Unable to init kmem cache for %s\n",
+ type->name);
+ err = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ while (--i >= 0) {
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
+
+ return err;
+}
+
+void rxe_cache_exit(void)
+{
+ int i;
+ struct rxe_type_info *type;
+
+ for (i = 0; i < RXE_NUM_TYPES; i++) {
+ type = &rxe_type_info[i];
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
+}
+
+static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
+{
+ int err = 0;
+ size_t size;
+
+ if ((max - min + 1) < pool->max_elem) {
+ pr_warn("not enough indices for max_elem\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ pool->max_index = max;
+ pool->min_index = min;
+
+ size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
+ pool->table = kmalloc(size, GFP_KERNEL);
+ if (!pool->table) {
+ pr_warn("no memory for bit table\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ pool->table_size = size;
+ bitmap_zero(pool->table, max - min + 1);
+
+out:
+ return err;
+}
+
+int rxe_pool_init(
+ struct rxe_dev *rxe,
+ struct rxe_pool *pool,
+ enum rxe_elem_type type,
+ unsigned max_elem)
+{
+ int err = 0;
+ size_t size = rxe_type_info[type].size;
+
+ memset(pool, 0, sizeof(*pool));
+
+ pool->rxe = rxe;
+ pool->type = type;
+ pool->max_elem = max_elem;
+ pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
+ pool->flags = rxe_type_info[type].flags;
+ pool->tree = RB_ROOT;
+ pool->cleanup = rxe_type_info[type].cleanup;
+
+ atomic_set(&pool->num_elem, 0);
+
+ kref_init(&pool->ref_cnt);
+
+ spin_lock_init(&pool->pool_lock);
+
+ if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
+ err = rxe_pool_init_index(pool,
+ rxe_type_info[type].max_index,
+ rxe_type_info[type].min_index);
+ if (err)
+ goto out;
+ }
+
+ if (rxe_type_info[type].flags & RXE_POOL_KEY) {
+ pool->key_offset = rxe_type_info[type].key_offset;
+ pool->key_size = rxe_type_info[type].key_size;
+ }
+
+ pool->state = rxe_pool_valid;
+
+out:
+ return err;
+}
+
+static void rxe_pool_release(struct kref *kref)
+{
+ struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
+
+ pool->state = rxe_pool_invalid;
+ kfree(pool->table);
+}
+
+static void rxe_pool_put(struct rxe_pool *pool)
+{
+ kref_put(&pool->ref_cnt, rxe_pool_release);
+}
+
+int rxe_pool_cleanup(struct rxe_pool *pool)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ pool->state = rxe_pool_invalid;
+ if (atomic_read(&pool->num_elem) > 0)
+ pr_warn("%s pool destroyed with unfree'd elem\n",
+ pool_name(pool));
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ rxe_pool_put(pool);
+
+ return 0;
+}
+
+static u32 alloc_index(struct rxe_pool *pool)
+{
+ u32 index;
+ u32 range = pool->max_index - pool->min_index + 1;
+
+ index = find_next_zero_bit(pool->table, range, pool->last);
+ if (index >= range)
+ index = find_first_zero_bit(pool->table, range);
+
+ set_bit(index, pool->table);
+ pool->last = index;
+ return index + pool->min_index;
+}
+
+static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+{
+ struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct rxe_pool_entry *elem;
+
+ while (*link) {
+ parent = *link;
+ elem = rb_entry(parent, struct rxe_pool_entry, node);
+
+ if (elem->index == new->index) {
+ pr_warn("element already exists!\n");
+ goto out;
+ }
+
+ if (elem->index > new->index)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &pool->tree);
+out:
+ return;
+}
+
+static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+{
+ struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct rxe_pool_entry *elem;
+ int cmp;
+
+ while (*link) {
+ parent = *link;
+ elem = rb_entry(parent, struct rxe_pool_entry, node);
+
+ cmp = memcmp((u8 *)elem + pool->key_offset,
+ (u8 *)new + pool->key_offset, pool->key_size);
+
+ if (cmp == 0) {
+ pr_warn("key already exists!\n");
+ goto out;
+ }
+
+ if (cmp > 0)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &pool->tree);
+out:
+ return;
+}
+
+void rxe_add_key(void *arg, void *key)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
+ insert_key(pool, elem);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_drop_key(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ rb_erase(&elem->node, &pool->tree);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_add_index(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ elem->index = alloc_index(pool);
+ insert_index(pool, elem);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_drop_index(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ clear_bit(elem->index - pool->min_index, pool->table);
+ rb_erase(&elem->node, &pool->tree);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void *rxe_alloc(struct rxe_pool *pool)
+{
+ struct rxe_pool_entry *elem;
+ unsigned long flags;
+
+ might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != rxe_pool_valid) {
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return NULL;
+ }
+ kref_get(&pool->ref_cnt);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ kref_get(&pool->rxe->ref_cnt);
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+ return NULL;
+ }
+
+ elem = kmem_cache_zalloc(pool_cache(pool),
+ (pool->flags & RXE_POOL_ATOMIC) ?
+ GFP_ATOMIC : GFP_KERNEL);
+
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return elem;
+}
+
+void rxe_elem_release(struct kref *kref)
+{
+ struct rxe_pool_entry *elem =
+ container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool *pool = elem->pool;
+
+ if (pool->cleanup)
+ pool->cleanup(elem);
+
+ kmem_cache_free(pool_cache(pool), elem);
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+}
+
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
+{
+ struct rb_node *node = NULL;
+ struct rxe_pool_entry *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+
+ if (pool->state != rxe_pool_valid)
+ goto out;
+
+ node = pool->tree.rb_node;
+
+ while (node) {
+ elem = rb_entry(node, struct rxe_pool_entry, node);
+
+ if (elem->index > index)
+ node = node->rb_left;
+ else if (elem->index < index)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (node)
+ kref_get(&elem->ref_cnt);
+
+out:
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return node ? (void *)elem : NULL;
+}
+
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+{
+ struct rb_node *node = NULL;
+ struct rxe_pool_entry *elem = NULL;
+ int cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+
+ if (pool->state != rxe_pool_valid)
+ goto out;
+
+ node = pool->tree.rb_node;
+
+ while (node) {
+ elem = rb_entry(node, struct rxe_pool_entry, node);
+
+ cmp = memcmp((u8 *)elem + pool->key_offset,
+ key, pool->key_size);
+
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (node)
+ kref_get(&elem->ref_cnt);
+
+out:
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return node ? ((void *)elem) : NULL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
new file mode 100644
index 000000000000..4d04830adcae
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_POOL_H
+#define RXE_POOL_H
+
+#define RXE_POOL_ALIGN (16)
+#define RXE_POOL_CACHE_FLAGS (0)
+
+enum rxe_pool_flags {
+ RXE_POOL_ATOMIC = BIT(0),
+ RXE_POOL_INDEX = BIT(1),
+ RXE_POOL_KEY = BIT(2),
+};
+
+enum rxe_elem_type {
+ RXE_TYPE_UC,
+ RXE_TYPE_PD,
+ RXE_TYPE_AH,
+ RXE_TYPE_SRQ,
+ RXE_TYPE_QP,
+ RXE_TYPE_CQ,
+ RXE_TYPE_MR,
+ RXE_TYPE_MW,
+ RXE_TYPE_MC_GRP,
+ RXE_TYPE_MC_ELEM,
+ RXE_NUM_TYPES, /* keep me last */
+};
+
+struct rxe_type_info {
+ char *name;
+ size_t size;
+ void (*cleanup)(void *obj);
+ enum rxe_pool_flags flags;
+ u32 max_index;
+ u32 min_index;
+ size_t key_offset;
+ size_t key_size;
+ struct kmem_cache *cache;
+};
+
+extern struct rxe_type_info rxe_type_info[];
+
+enum rxe_pool_state {
+ rxe_pool_invalid,
+ rxe_pool_valid,
+};
+
+struct rxe_pool_entry {
+ struct rxe_pool *pool;
+ struct kref ref_cnt;
+ struct list_head list;
+
+ /* only used if indexed or keyed */
+ struct rb_node node;
+ u32 index;
+};
+
+struct rxe_pool {
+ struct rxe_dev *rxe;
+ spinlock_t pool_lock; /* pool spinlock */
+ size_t elem_size;
+ struct kref ref_cnt;
+ void (*cleanup)(void *obj);
+ enum rxe_pool_state state;
+ enum rxe_pool_flags flags;
+ enum rxe_elem_type type;
+
+ unsigned int max_elem;
+ atomic_t num_elem;
+
+ /* only used if indexed or keyed */
+ struct rb_root tree;
+ unsigned long *table;
+ size_t table_size;
+ u32 max_index;
+ u32 min_index;
+ u32 last;
+ size_t key_offset;
+ size_t key_size;
+};
+
+/* initialize slab caches for managed objects */
+int rxe_cache_init(void);
+
+/* cleanup slab caches for managed objects */
+void rxe_cache_exit(void);
+
+/* initialize a pool of objects with given limit on
+ * number of elements. gets parameters from rxe_type_info
+ * pool elements will be allocated out of a slab cache
+ */
+int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type, u32 max_elem);
+
+/* free resources from object pool */
+int rxe_pool_cleanup(struct rxe_pool *pool);
+
+/* allocate an object from pool */
+void *rxe_alloc(struct rxe_pool *pool);
+
+/* assign an index to an indexed object and insert object into
+ * pool's rb tree
+ */
+void rxe_add_index(void *elem);
+
+/* drop an index and remove object from rb tree */
+void rxe_drop_index(void *elem);
+
+/* assign a key to a keyed object and insert object into
+ * pool's rb tree
+ */
+void rxe_add_key(void *elem, void *key);
+
+/* remove elem from rb tree */
+void rxe_drop_key(void *elem);
+
+/* lookup an indexed object from index. takes a reference on object */
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
+
+/* lookup keyed object from key. takes a reference on the object */
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
+
+/* cleanup an object when all references are dropped */
+void rxe_elem_release(struct kref *kref);
+
+/* take a reference on an object */
+#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+
+/* drop a reference on an object */
+#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+
+#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
new file mode 100644
index 000000000000..22ba24f2a2c1
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+char *rxe_qp_state_name[] = {
+ [QP_STATE_RESET] = "RESET",
+ [QP_STATE_INIT] = "INIT",
+ [QP_STATE_READY] = "READY",
+ [QP_STATE_DRAIN] = "DRAIN",
+ [QP_STATE_DRAINED] = "DRAINED",
+ [QP_STATE_ERROR] = "ERROR",
+};
+
+static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
+ int has_srq)
+{
+ if (cap->max_send_wr > rxe->attr.max_qp_wr) {
+ pr_warn("invalid send wr = %d > %d\n",
+ cap->max_send_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_send_sge > rxe->attr.max_sge) {
+ pr_warn("invalid send sge = %d > %d\n",
+ cap->max_send_sge, rxe->attr.max_sge);
+ goto err1;
+ }
+
+ if (!has_srq) {
+ if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
+ pr_warn("invalid recv wr = %d > %d\n",
+ cap->max_recv_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_recv_sge > rxe->attr.max_sge) {
+ pr_warn("invalid recv sge = %d > %d\n",
+ cap->max_recv_sge, rxe->attr.max_sge);
+ goto err1;
+ }
+ }
+
+ if (cap->max_inline_data > rxe->max_inline_data) {
+ pr_warn("invalid max inline data = %d > %d\n",
+ cap->max_inline_data, rxe->max_inline_data);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+{
+ struct ib_qp_cap *cap = &init->cap;
+ struct rxe_port *port;
+ int port_num = init->port_num;
+
+ if (!init->recv_cq || !init->send_cq) {
+ pr_warn("missing cq\n");
+ goto err1;
+ }
+
+ if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
+ goto err1;
+
+ if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
+ if (port_num != 1) {
+ pr_warn("invalid port = %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
+ pr_warn("SMI QP exists for port %d\n", port_num);
+ goto err1;
+ }
+
+ if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
+ pr_warn("GSI QP exists for port %d\n", port_num);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
+{
+ qp->resp.res_head = 0;
+ qp->resp.res_tail = 0;
+ qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
+
+ if (!qp->resp.resources)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_rd_atomic_resources(struct rxe_qp *qp)
+{
+ if (qp->resp.resources) {
+ int i;
+
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ free_rd_atomic_resource(qp, res);
+ }
+ kfree(qp->resp.resources);
+ qp->resp.resources = NULL;
+ }
+}
+
+void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+{
+ if (res->type == RXE_ATOMIC_MASK) {
+ rxe_drop_ref(qp);
+ kfree_skb(res->atomic.skb);
+ } else if (res->type == RXE_READ_MASK) {
+ if (res->read.mr)
+ rxe_drop_ref(res->read.mr);
+ }
+ res->type = 0;
+}
+
+static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
+{
+ int i;
+ struct resp_res *res;
+
+ if (qp->resp.resources) {
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ res = &qp->resp.resources[i];
+ free_rd_atomic_resource(qp, res);
+ }
+ }
+}
+
+static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init)
+{
+ struct rxe_port *port;
+ u32 qpn;
+
+ qp->sq_sig_type = init->sq_sig_type;
+ qp->attr.path_mtu = 1;
+ qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
+
+ qpn = qp->pelem.index;
+ port = &rxe->port;
+
+ switch (init->qp_type) {
+ case IB_QPT_SMI:
+ qp->ibqp.qp_num = 0;
+ port->qp_smi_index = qpn;
+ qp->attr.port_num = init->port_num;
+ break;
+
+ case IB_QPT_GSI:
+ qp->ibqp.qp_num = 1;
+ port->qp_gsi_index = qpn;
+ qp->attr.port_num = init->port_num;
+ break;
+
+ default:
+ qp->ibqp.qp_num = qpn;
+ break;
+ }
+
+ INIT_LIST_HEAD(&qp->grp_list);
+
+ skb_queue_head_init(&qp->send_pkts);
+
+ spin_lock_init(&qp->grp_lock);
+ spin_lock_init(&qp->state_lock);
+
+ atomic_set(&qp->ssn, 0);
+ atomic_set(&qp->skb_out, 0);
+}
+
+static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int wqe_size;
+
+ err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
+ if (err < 0)
+ return err;
+ qp->sk->sk->sk_user_data = qp;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ qp->sq.max_sge = init->cap.max_send_sge;
+ qp->sq.max_inline = init->cap.max_inline_data;
+
+ wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
+ qp->sq.max_sge * sizeof(struct ib_sge),
+ sizeof(struct rxe_send_wqe) +
+ qp->sq.max_inline);
+
+ qp->sq.queue = rxe_queue_init(rxe,
+ &qp->sq.max_wr,
+ wqe_size);
+ if (!qp->sq.queue)
+ return -ENOMEM;
+
+ err = do_mmap_info(rxe, udata, true,
+ context, qp->sq.queue->buf,
+ qp->sq.queue->buf_size, &qp->sq.queue->ip);
+
+ if (err) {
+ kvfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ return err;
+ }
+
+ qp->req.wqe_index = producer_index(qp->sq.queue);
+ qp->req.state = QP_STATE_RESET;
+ qp->req.opcode = -1;
+ qp->comp.opcode = -1;
+
+ spin_lock_init(&qp->sq.sq_lock);
+ skb_queue_head_init(&qp->req_pkts);
+
+ rxe_init_task(rxe, &qp->req.task, qp,
+ rxe_requester, "req");
+ rxe_init_task(rxe, &qp->comp.task, qp,
+ rxe_completer, "comp");
+
+ init_timer(&qp->rnr_nak_timer);
+ qp->rnr_nak_timer.function = rnr_nak_timer;
+ qp->rnr_nak_timer.data = (unsigned long)qp;
+
+ init_timer(&qp->retrans_timer);
+ qp->retrans_timer.function = retransmit_timer;
+ qp->retrans_timer.data = (unsigned long)qp;
+ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+
+ return 0;
+}
+
+static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int wqe_size;
+
+ if (!qp->srq) {
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+
+ wqe_size = rcv_wqe_size(qp->rq.max_sge);
+
+ pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
+ qp->rq.max_wr, qp->rq.max_sge, wqe_size);
+
+ qp->rq.queue = rxe_queue_init(rxe,
+ &qp->rq.max_wr,
+ wqe_size);
+ if (!qp->rq.queue)
+ return -ENOMEM;
+
+ err = do_mmap_info(rxe, udata, false, context,
+ qp->rq.queue->buf,
+ qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ kvfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ return err;
+ }
+ }
+
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
+ skb_queue_head_init(&qp->resp_pkts);
+
+ rxe_init_task(rxe, &qp->resp.task, qp,
+ rxe_responder, "resp");
+
+ qp->resp.opcode = OPCODE_NONE;
+ qp->resp.msn = 0;
+ qp->resp.state = QP_STATE_RESET;
+
+ return 0;
+}
+
+/* called by the create qp verb */
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
+ struct ib_pd *ibpd)
+{
+ int err;
+ struct rxe_cq *rcq = to_rcq(init->recv_cq);
+ struct rxe_cq *scq = to_rcq(init->send_cq);
+ struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+ struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+
+ rxe_add_ref(pd);
+ rxe_add_ref(rcq);
+ rxe_add_ref(scq);
+ if (srq)
+ rxe_add_ref(srq);
+
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
+
+ rxe_qp_init_misc(rxe, qp, init);
+
+ err = rxe_qp_init_req(rxe, qp, init, context, udata);
+ if (err)
+ goto err1;
+
+ err = rxe_qp_init_resp(rxe, qp, init, context, udata);
+ if (err)
+ goto err2;
+
+ qp->attr.qp_state = IB_QPS_RESET;
+ qp->valid = 1;
+
+ return 0;
+
+err2:
+ rxe_queue_cleanup(qp->sq.queue);
+err1:
+ if (srq)
+ rxe_drop_ref(srq);
+ rxe_drop_ref(scq);
+ rxe_drop_ref(rcq);
+ rxe_drop_ref(pd);
+
+ return err;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
+{
+ init->event_handler = qp->ibqp.event_handler;
+ init->qp_context = qp->ibqp.qp_context;
+ init->send_cq = qp->ibqp.send_cq;
+ init->recv_cq = qp->ibqp.recv_cq;
+ init->srq = qp->ibqp.srq;
+
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ init->cap.max_recv_wr = qp->rq.max_wr;
+ init->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ init->sq_sig_type = qp->sq_sig_type;
+
+ init->qp_type = qp->ibqp.qp_type;
+ init->port_num = 1;
+
+ return 0;
+}
+
+/* called by the modify qp verb, this routine checks all the parameters before
+ * making any changes
+ */
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->attr.qp_state;
+ enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ pr_warn("invalid mask or state for qp\n");
+ goto err1;
+ }
+
+ if (mask & IB_QP_STATE) {
+ if (cur_state == IB_QPS_SQD) {
+ if (qp->req.state == QP_STATE_DRAIN &&
+ new_state != IB_QPS_ERR)
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_PORT) {
+ if (attr->port_num != 1) {
+ pr_warn("invalid port %d\n", attr->port_num);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
+ goto err1;
+
+ if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
+ goto err1;
+
+ if (mask & IB_QP_ALT_PATH) {
+ if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
+ goto err1;
+ if (attr->alt_port_num != 1) {
+ pr_warn("invalid alt port %d\n", attr->alt_port_num);
+ goto err1;
+ }
+ if (attr->alt_timeout > 31) {
+ pr_warn("invalid QP alt timeout %d > 31\n",
+ attr->alt_timeout);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ struct rxe_port *port = &rxe->port;
+
+ enum ib_mtu max_mtu = port->attr.max_mtu;
+ enum ib_mtu mtu = attr->path_mtu;
+
+ if (mtu > max_mtu) {
+ pr_debug("invalid mtu (%d) > (%d)\n",
+ ib_mtu_enum_to_int(mtu),
+ ib_mtu_enum_to_int(max_mtu));
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
+ pr_warn("invalid max_rd_atomic %d > %d\n",
+ attr->max_rd_atomic,
+ rxe->attr.max_qp_rd_atom);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ if (attr->timeout > 31) {
+ pr_warn("invalid QP timeout %d > 31\n",
+ attr->timeout);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+/* move the qp to the reset state */
+static void rxe_qp_reset(struct rxe_qp *qp)
+{
+ /* stop tasks from running */
+ rxe_disable_task(&qp->resp.task);
+
+ /* stop request/comp */
+ if (qp->sq.queue) {
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_disable_task(&qp->comp.task);
+ rxe_disable_task(&qp->req.task);
+ }
+
+ /* move qp to the reset state */
+ qp->req.state = QP_STATE_RESET;
+ qp->resp.state = QP_STATE_RESET;
+
+ /* let state machines reset themselves drain work and packet queues
+ * etc.
+ */
+ __rxe_do_task(&qp->resp.task);
+
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
+ }
+
+ /* cleanup attributes */
+ atomic_set(&qp->ssn, 0);
+ qp->req.opcode = -1;
+ qp->req.need_retry = 0;
+ qp->req.noack_pkts = 0;
+ qp->resp.msn = 0;
+ qp->resp.opcode = -1;
+ qp->resp.drop_msg = 0;
+ qp->resp.goto_error = 0;
+ qp->resp.sent_psn_nak = 0;
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ cleanup_rd_atomic_resources(qp);
+
+ /* reenable tasks */
+ rxe_enable_task(&qp->resp.task);
+
+ if (qp->sq.queue) {
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_enable_task(&qp->comp.task);
+
+ rxe_enable_task(&qp->req.task);
+ }
+}
+
+/* drain the send queue */
+static void rxe_qp_drain(struct rxe_qp *qp)
+{
+ if (qp->sq.queue) {
+ if (qp->req.state != QP_STATE_DRAINED) {
+ qp->req.state = QP_STATE_DRAIN;
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_run_task(&qp->comp.task, 1);
+ else
+ __rxe_do_task(&qp->comp.task);
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+}
+
+/* move the qp to the error state */
+void rxe_qp_error(struct rxe_qp *qp)
+{
+ qp->req.state = QP_STATE_ERROR;
+ qp->resp.state = QP_STATE_ERROR;
+
+ /* drain work and packet queues */
+ rxe_run_task(&qp->resp.task, 1);
+
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_run_task(&qp->comp.task, 1);
+ else
+ __rxe_do_task(&qp->comp.task);
+ rxe_run_task(&qp->req.task, 1);
+}
+
+/* called by the modify qp verb */
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+
+ free_rd_atomic_resources(qp);
+
+ err = alloc_rd_atomic_resources(qp, max_rd_atomic);
+ if (err)
+ return err;
+
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_CUR_STATE)
+ qp->attr.cur_qp_state = attr->qp_state;
+
+ if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
+
+ if (mask & IB_QP_ACCESS_FLAGS)
+ qp->attr.qp_access_flags = attr->qp_access_flags;
+
+ if (mask & IB_QP_PKEY_INDEX)
+ qp->attr.pkey_index = attr->pkey_index;
+
+ if (mask & IB_QP_PORT)
+ qp->attr.port_num = attr->port_num;
+
+ if (mask & IB_QP_QKEY)
+ qp->attr.qkey = attr->qkey;
+
+ if (mask & IB_QP_AV) {
+ ib_get_cached_gid(&rxe->ib_dev, 1,
+ attr->ah_attr.grh.sgid_index, &sgid,
+ &sgid_attr);
+ rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
+ &attr->ah_attr);
+ rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
+ &sgid_attr, &sgid);
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+ }
+
+ if (mask & IB_QP_ALT_PATH) {
+ ib_get_cached_gid(&rxe->ib_dev, 1,
+ attr->alt_ah_attr.grh.sgid_index, &sgid,
+ &sgid_attr);
+
+ rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
+ &attr->alt_ah_attr);
+ rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
+ &sgid_attr, &sgid);
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ qp->attr.alt_port_num = attr->alt_port_num;
+ qp->attr.alt_pkey_index = attr->alt_pkey_index;
+ qp->attr.alt_timeout = attr->alt_timeout;
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ qp->attr.path_mtu = attr->path_mtu;
+ qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ qp->attr.timeout = attr->timeout;
+ if (attr->timeout == 0) {
+ qp->qp_timeout_jiffies = 0;
+ } else {
+ /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
+ int j = nsecs_to_jiffies(4096ULL << attr->timeout);
+
+ qp->qp_timeout_jiffies = j ? j : 1;
+ }
+ }
+
+ if (mask & IB_QP_RETRY_CNT) {
+ qp->attr.retry_cnt = attr->retry_cnt;
+ qp->comp.retry_cnt = attr->retry_cnt;
+ pr_debug("set retry count = %d\n", attr->retry_cnt);
+ }
+
+ if (mask & IB_QP_RNR_RETRY) {
+ qp->attr.rnr_retry = attr->rnr_retry;
+ qp->comp.rnr_retry = attr->rnr_retry;
+ pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
+ }
+
+ if (mask & IB_QP_RQ_PSN) {
+ qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
+ qp->resp.psn = qp->attr.rq_psn;
+ pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
+ }
+
+ if (mask & IB_QP_MIN_RNR_TIMER) {
+ qp->attr.min_rnr_timer = attr->min_rnr_timer;
+ pr_debug("set min rnr timer = 0x%x\n",
+ attr->min_rnr_timer);
+ }
+
+ if (mask & IB_QP_SQ_PSN) {
+ qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
+ qp->req.psn = qp->attr.sq_psn;
+ qp->comp.psn = qp->attr.sq_psn;
+ pr_debug("set req psn = 0x%x\n", qp->req.psn);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ qp->attr.max_dest_rd_atomic =
+ __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ }
+
+ if (mask & IB_QP_PATH_MIG_STATE)
+ qp->attr.path_mig_state = attr->path_mig_state;
+
+ if (mask & IB_QP_DEST_QPN)
+ qp->attr.dest_qp_num = attr->dest_qp_num;
+
+ if (mask & IB_QP_STATE) {
+ qp->attr.qp_state = attr->qp_state;
+
+ switch (attr->qp_state) {
+ case IB_QPS_RESET:
+ pr_debug("qp state -> RESET\n");
+ rxe_qp_reset(qp);
+ break;
+
+ case IB_QPS_INIT:
+ pr_debug("qp state -> INIT\n");
+ qp->req.state = QP_STATE_INIT;
+ qp->resp.state = QP_STATE_INIT;
+ break;
+
+ case IB_QPS_RTR:
+ pr_debug("qp state -> RTR\n");
+ qp->resp.state = QP_STATE_READY;
+ break;
+
+ case IB_QPS_RTS:
+ pr_debug("qp state -> RTS\n");
+ qp->req.state = QP_STATE_READY;
+ break;
+
+ case IB_QPS_SQD:
+ pr_debug("qp state -> SQD\n");
+ rxe_qp_drain(qp);
+ break;
+
+ case IB_QPS_SQE:
+ pr_warn("qp state -> SQE !!?\n");
+ /* Not possible from modify_qp. */
+ break;
+
+ case IB_QPS_ERR:
+ pr_debug("qp state -> ERR\n");
+ rxe_qp_error(qp);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ *attr = qp->attr;
+
+ attr->rq_psn = qp->resp.psn;
+ attr->sq_psn = qp->req.psn;
+
+ attr->cap.max_send_wr = qp->sq.max_wr;
+ attr->cap.max_send_sge = qp->sq.max_sge;
+ attr->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ attr->cap.max_recv_wr = qp->rq.max_wr;
+ attr->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
+ rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
+
+ if (qp->req.state == QP_STATE_DRAIN) {
+ attr->sq_draining = 1;
+ /* applications that get this state
+ * typically spin on it. yield the
+ * processor
+ */
+ cond_resched();
+ } else {
+ attr->sq_draining = 0;
+ }
+
+ pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
+
+ return 0;
+}
+
+/* called by the destroy qp verb */
+void rxe_qp_destroy(struct rxe_qp *qp)
+{
+ qp->valid = 0;
+ qp->qp_timeout_jiffies = 0;
+ rxe_cleanup_task(&qp->resp.task);
+
+ del_timer_sync(&qp->retrans_timer);
+ del_timer_sync(&qp->rnr_nak_timer);
+
+ rxe_cleanup_task(&qp->req.task);
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_cleanup_task(&qp->comp.task);
+
+ /* flush out any receive wr's or pending requests */
+ __rxe_do_task(&qp->req.task);
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
+ }
+}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(void *arg)
+{
+ struct rxe_qp *qp = arg;
+
+ rxe_drop_all_mcast_groups(qp);
+
+ if (qp->sq.queue)
+ rxe_queue_cleanup(qp->sq.queue);
+
+ if (qp->srq)
+ rxe_drop_ref(qp->srq);
+
+ if (qp->rq.queue)
+ rxe_queue_cleanup(qp->rq.queue);
+
+ if (qp->scq)
+ rxe_drop_ref(qp->scq);
+ if (qp->rcq)
+ rxe_drop_ref(qp->rcq);
+ if (qp->pd)
+ rxe_drop_ref(qp->pd);
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ free_rd_atomic_resources(qp);
+
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
new file mode 100644
index 000000000000..08274254eb88
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must retailuce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int do_mmap_info(struct rxe_dev *rxe,
+ struct ib_udata *udata,
+ bool is_req,
+ struct ib_ucontext *context,
+ struct rxe_queue_buf *buf,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p)
+{
+ int err;
+ u32 len, offset;
+ struct rxe_mmap_info *ip = NULL;
+
+ if (udata) {
+ if (is_req) {
+ len = udata->outlen - sizeof(struct mminfo);
+ offset = sizeof(struct mminfo);
+ } else {
+ len = udata->outlen;
+ offset = 0;
+ }
+
+ if (len < sizeof(ip->info))
+ goto err1;
+
+ ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
+ if (!ip)
+ goto err1;
+
+ err = copy_to_user(udata->outbuf + offset, &ip->info,
+ sizeof(ip->info));
+ if (err)
+ goto err2;
+
+ spin_lock_bh(&rxe->pending_lock);
+ list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+ }
+
+ *ip_p = ip;
+
+ return 0;
+
+err2:
+ kfree(ip);
+err1:
+ return -EINVAL;
+}
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size)
+{
+ struct rxe_queue *q;
+ size_t buf_size;
+ unsigned int num_slots;
+
+ /* num_elem == 0 is allowed, but uninteresting */
+ if (*num_elem < 0)
+ goto err1;
+
+ q = kmalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ goto err1;
+
+ q->rxe = rxe;
+
+ /* used in resize, only need to copy used part of queue */
+ q->elem_size = elem_size;
+
+ /* pad element up to at least a cacheline and always a power of 2 */
+ if (elem_size < cache_line_size())
+ elem_size = cache_line_size();
+ elem_size = roundup_pow_of_two(elem_size);
+
+ q->log2_elem_size = order_base_2(elem_size);
+
+ num_slots = *num_elem + 1;
+ num_slots = roundup_pow_of_two(num_slots);
+ q->index_mask = num_slots - 1;
+
+ buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
+
+ q->buf = vmalloc_user(buf_size);
+ if (!q->buf)
+ goto err2;
+
+ q->buf->log2_elem_size = q->log2_elem_size;
+ q->buf->index_mask = q->index_mask;
+
+ q->buf_size = buf_size;
+
+ *num_elem = num_slots - 1;
+ return q;
+
+err2:
+ kfree(q);
+err1:
+ return NULL;
+}
+
+/* copies elements from original q to new q and then swaps the contents of the
+ * two q headers. This is so that if anyone is holding a pointer to q it will
+ * still work
+ */
+static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
+ unsigned int num_elem)
+{
+ if (!queue_empty(q) && (num_elem < queue_count(q)))
+ return -EINVAL;
+
+ while (!queue_empty(q)) {
+ memcpy(producer_addr(new_q), consumer_addr(q),
+ new_q->elem_size);
+ advance_producer(new_q);
+ advance_consumer(q);
+ }
+
+ swap(*q, *new_q);
+
+ return 0;
+}
+
+int rxe_queue_resize(struct rxe_queue *q,
+ unsigned int *num_elem_p,
+ unsigned int elem_size,
+ struct ib_ucontext *context,
+ struct ib_udata *udata,
+ spinlock_t *producer_lock,
+ spinlock_t *consumer_lock)
+{
+ struct rxe_queue *new_q;
+ unsigned int num_elem = *num_elem_p;
+ int err;
+ unsigned long flags = 0, flags1;
+
+ new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
+ if (!new_q)
+ return -ENOMEM;
+
+ err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
+ new_q->buf_size, &new_q->ip);
+ if (err) {
+ vfree(new_q->buf);
+ kfree(new_q);
+ goto err1;
+ }
+
+ spin_lock_irqsave(consumer_lock, flags1);
+
+ if (producer_lock) {
+ spin_lock_irqsave(producer_lock, flags);
+ err = resize_finish(q, new_q, num_elem);
+ spin_unlock_irqrestore(producer_lock, flags);
+ } else {
+ err = resize_finish(q, new_q, num_elem);
+ }
+
+ spin_unlock_irqrestore(consumer_lock, flags1);
+
+ rxe_queue_cleanup(new_q); /* new/old dep on err */
+ if (err)
+ goto err1;
+
+ *num_elem_p = num_elem;
+ return 0;
+
+err1:
+ return err;
+}
+
+void rxe_queue_cleanup(struct rxe_queue *q)
+{
+ if (q->ip)
+ kref_put(&q->ip->ref, rxe_mmap_release);
+ else
+ vfree(q->buf);
+
+ kfree(q);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
new file mode 100644
index 000000000000..239fd609c31e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_QUEUE_H
+#define RXE_QUEUE_H
+
+/* implements a simple circular buffer that can optionally be
+ * shared between user space and the kernel and can be resized
+
+ * the requested element size is rounded up to a power of 2
+ * and the number of elements in the buffer is also rounded
+ * up to a power of 2. Since the queue is empty when the
+ * producer and consumer indices match the maximum capacity
+ * of the queue is one less than the number of element slots
+ */
+
+/* this data structure is shared between user space and kernel
+ * space for those cases where the queue is shared. It contains
+ * the producer and consumer indices. Is also contains a copy
+ * of the queue size parameters for user space to use but the
+ * kernel must use the parameters in the rxe_queue struct
+ * this MUST MATCH the corresponding librxe struct
+ * for performance reasons arrange to have producer and consumer
+ * pointers in separate cache lines
+ * the kernel should always mask the indices to avoid accessing
+ * memory outside of the data area
+ */
+struct rxe_queue_buf {
+ __u32 log2_elem_size;
+ __u32 index_mask;
+ __u32 pad_1[30];
+ __u32 producer_index;
+ __u32 pad_2[31];
+ __u32 consumer_index;
+ __u32 pad_3[31];
+ __u8 data[0];
+};
+
+struct rxe_queue {
+ struct rxe_dev *rxe;
+ struct rxe_queue_buf *buf;
+ struct rxe_mmap_info *ip;
+ size_t buf_size;
+ size_t elem_size;
+ unsigned int log2_elem_size;
+ unsigned int index_mask;
+};
+
+int do_mmap_info(struct rxe_dev *rxe,
+ struct ib_udata *udata,
+ bool is_req,
+ struct ib_ucontext *context,
+ struct rxe_queue_buf *buf,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p);
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size);
+
+int rxe_queue_resize(struct rxe_queue *q,
+ unsigned int *num_elem_p,
+ unsigned int elem_size,
+ struct ib_ucontext *context,
+ struct ib_udata *udata,
+ /* Protect producers while resizing queue */
+ spinlock_t *producer_lock,
+ /* Protect consumers while resizing queue */
+ spinlock_t *consumer_lock);
+
+void rxe_queue_cleanup(struct rxe_queue *queue);
+
+static inline int next_index(struct rxe_queue *q, int index)
+{
+ return (index + 1) & q->buf->index_mask;
+}
+
+static inline int queue_empty(struct rxe_queue *q)
+{
+ return ((q->buf->producer_index - q->buf->consumer_index)
+ & q->index_mask) == 0;
+}
+
+static inline int queue_full(struct rxe_queue *q)
+{
+ return ((q->buf->producer_index + 1 - q->buf->consumer_index)
+ & q->index_mask) == 0;
+}
+
+static inline void advance_producer(struct rxe_queue *q)
+{
+ q->buf->producer_index = (q->buf->producer_index + 1)
+ & q->index_mask;
+}
+
+static inline void advance_consumer(struct rxe_queue *q)
+{
+ q->buf->consumer_index = (q->buf->consumer_index + 1)
+ & q->index_mask;
+}
+
+static inline void *producer_addr(struct rxe_queue *q)
+{
+ return q->buf->data + ((q->buf->producer_index & q->index_mask)
+ << q->log2_elem_size);
+}
+
+static inline void *consumer_addr(struct rxe_queue *q)
+{
+ return q->buf->data + ((q->buf->consumer_index & q->index_mask)
+ << q->log2_elem_size);
+}
+
+static inline unsigned int producer_index(struct rxe_queue *q)
+{
+ return q->buf->producer_index;
+}
+
+static inline unsigned int consumer_index(struct rxe_queue *q)
+{
+ return q->buf->consumer_index;
+}
+
+static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
+{
+ return q->buf->data + ((index & q->index_mask)
+ << q->buf->log2_elem_size);
+}
+
+static inline unsigned int index_from_addr(const struct rxe_queue *q,
+ const void *addr)
+{
+ return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
+ & q->index_mask;
+}
+
+static inline unsigned int queue_count(const struct rxe_queue *q)
+{
+ return (q->buf->producer_index - q->buf->consumer_index)
+ & q->index_mask;
+}
+
+static inline void *queue_head(struct rxe_queue *q)
+{
+ return queue_empty(q) ? NULL : consumer_addr(q);
+}
+
+#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
new file mode 100644
index 000000000000..3d464c23e08b
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ if (unlikely(!qp->valid))
+ goto err1;
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UC:
+ if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ default:
+ pr_warn_ratelimited("unsupported qp type\n");
+ goto err1;
+ }
+
+ if (pkt->mask & RXE_REQ_MASK) {
+ if (unlikely(qp->resp.state != QP_STATE_READY))
+ goto err1;
+ } else if (unlikely(qp->req.state < QP_STATE_READY ||
+ qp->req.state > QP_STATE_DRAINED)) {
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void set_bad_pkey_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.bad_pkey_cntr = min((u32)0xffff,
+ port->attr.bad_pkey_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static void set_qkey_viol_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.qkey_viol_cntr = min((u32)0xffff,
+ port->attr.qkey_viol_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ u32 qpn, struct rxe_qp *qp)
+{
+ int i;
+ int found_pkey = 0;
+ struct rxe_port *port = &rxe->port;
+ u16 pkey = bth_pkey(pkt);
+
+ pkt->pkey_index = 0;
+
+ if (qpn == 1) {
+ for (i = 0; i < port->attr.pkey_tbl_len; i++) {
+ if (pkey_match(pkey, port->pkey_tbl[i])) {
+ pkt->pkey_index = i;
+ found_pkey = 1;
+ break;
+ }
+ }
+
+ if (!found_pkey) {
+ pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
+ set_bad_pkey_cntr(port);
+ goto err1;
+ }
+ } else if (qpn != 0) {
+ if (unlikely(!pkey_match(pkey,
+ port->pkey_tbl[qp->attr.pkey_index]
+ ))) {
+ pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
+ set_bad_pkey_cntr(port);
+ goto err1;
+ }
+ pkt->pkey_index = qp->attr.pkey_index;
+ }
+
+ if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
+ qpn != 0 && pkt->mask) {
+ u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
+
+ if (unlikely(deth_qkey(pkt) != qkey)) {
+ pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
+ deth_qkey(pkt), qkey, qpn);
+ set_qkey_viol_cntr(port);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
+ goto done;
+
+ if (unlikely(pkt->port_num != qp->attr.port_num)) {
+ pr_warn_ratelimited("port %d != qp port %d\n",
+ pkt->port_num, qp->attr.port_num);
+ goto err1;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct in_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
+
+ if (ip_hdr(skb)->daddr != saddr->s_addr) {
+ pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
+ &ip_hdr(skb)->daddr,
+ &saddr->s_addr);
+ goto err1;
+ }
+
+ if (ip_hdr(skb)->saddr != daddr->s_addr) {
+ pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
+ &ip_hdr(skb)->saddr,
+ &daddr->s_addr);
+ goto err1;
+ }
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct in6_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
+
+ if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
+ pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
+ &ipv6_hdr(skb)->daddr, saddr);
+ goto err1;
+ }
+
+ if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
+ pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
+ &ipv6_hdr(skb)->saddr, daddr);
+ goto err1;
+ }
+ }
+
+done:
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int hdr_check(struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = pkt->rxe;
+ struct rxe_port *port = &rxe->port;
+ struct rxe_qp *qp = NULL;
+ u32 qpn = bth_qpn(pkt);
+ int index;
+ int err;
+
+ if (unlikely(bth_tver(pkt) != BTH_TVER)) {
+ pr_warn_ratelimited("bad tver\n");
+ goto err1;
+ }
+
+ if (qpn != IB_MULTICAST_QPN) {
+ index = (qpn == 0) ? port->qp_smi_index :
+ ((qpn == 1) ? port->qp_gsi_index : qpn);
+ qp = rxe_pool_get_index(&rxe->qp_pool, index);
+ if (unlikely(!qp)) {
+ pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
+ goto err1;
+ }
+
+ err = check_type_state(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_addr(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_keys(rxe, pkt, qpn, qp);
+ if (unlikely(err))
+ goto err2;
+ } else {
+ if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
+ pr_warn_ratelimited("no grh for mcast qpn\n");
+ goto err1;
+ }
+ }
+
+ pkt->qp = qp;
+ return 0;
+
+err2:
+ if (qp)
+ rxe_drop_ref(qp);
+err1:
+ return -EINVAL;
+}
+
+static inline void rxe_rcv_pkt(struct rxe_dev *rxe,
+ struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ if (pkt->mask & RXE_REQ_MASK)
+ rxe_resp_queue_pkt(rxe, pkt->qp, skb);
+ else
+ rxe_comp_queue_pkt(rxe, pkt->qp, skb);
+}
+
+static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_mc_grp *mcg;
+ struct sk_buff *skb_copy;
+ struct rxe_mc_elem *mce;
+ struct rxe_qp *qp;
+ union ib_gid dgid;
+ int err;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
+
+ /* lookup mcast group corresponding to mgid, takes a ref */
+ mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
+ if (!mcg)
+ goto err1; /* mcast group not registered */
+
+ spin_lock_bh(&mcg->mcg_lock);
+
+ list_for_each_entry(mce, &mcg->qp_list, qp_list) {
+ qp = mce->qp;
+ pkt = SKB_TO_PKT(skb);
+
+ /* validate qp for incoming packet */
+ err = check_type_state(rxe, pkt, qp);
+ if (err)
+ continue;
+
+ err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
+ if (err)
+ continue;
+
+ /* if *not* the last qp in the list
+ * make a copy of the skb to post to the next qp
+ */
+ skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
+ skb_clone(skb, GFP_KERNEL) : NULL;
+
+ pkt->qp = qp;
+ rxe_add_ref(qp);
+ rxe_rcv_pkt(rxe, pkt, skb);
+
+ skb = skb_copy;
+ if (!skb)
+ break;
+ }
+
+ spin_unlock_bh(&mcg->mcg_lock);
+
+ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+
+err1:
+ if (skb)
+ kfree_skb(skb);
+}
+
+static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ union ib_gid dgid;
+ union ib_gid *pdgid;
+ u16 index;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ pdgid = &dgid;
+ } else {
+ pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
+ }
+
+ return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
+ IB_GID_TYPE_ROCE_UDP_ENCAP,
+ 1, rxe->ndev, &index);
+}
+
+/* rxe_rcv is called from the interface driver */
+int rxe_rcv(struct sk_buff *skb)
+{
+ int err;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_dev *rxe = pkt->rxe;
+ __be32 *icrcp;
+ u32 calc_icrc, pack_icrc;
+
+ pkt->offset = 0;
+
+ if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
+ goto drop;
+
+ if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
+ pr_warn_ratelimited("failed matching dgid\n");
+ goto drop;
+ }
+
+ pkt->opcode = bth_opcode(pkt);
+ pkt->psn = bth_psn(pkt);
+ pkt->qp = NULL;
+ pkt->mask |= rxe_opcode[pkt->opcode].mask;
+
+ if (unlikely(skb->len < header_size(pkt)))
+ goto drop;
+
+ err = hdr_check(pkt);
+ if (unlikely(err))
+ goto drop;
+
+ /* Verify ICRC */
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ pack_icrc = be32_to_cpu(*icrcp);
+
+ calc_icrc = rxe_icrc_hdr(pkt, skb);
+ calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
+ calc_icrc = cpu_to_be32(~calc_icrc);
+ if (unlikely(calc_icrc != pack_icrc)) {
+ char saddr[sizeof(struct in6_addr)];
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IP))
+ sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
+ else
+ sprintf(saddr, "unknown");
+
+ pr_warn_ratelimited("bad ICRC from %s\n", saddr);
+ goto drop;
+ }
+
+ if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
+ rxe_rcv_mcast_pkt(rxe, skb);
+ else
+ rxe_rcv_pkt(rxe, pkt, skb);
+
+ return 0;
+
+drop:
+ if (pkt->qp)
+ rxe_drop_ref(pkt->qp);
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL(rxe_rcv);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
new file mode 100644
index 000000000000..33b2d9d77021
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ unsigned opcode);
+
+static inline void retry_first_write_send(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ unsigned mask, int npsn)
+{
+ int i;
+
+ for (i = 0; i < npsn; i++) {
+ int to_send = (wqe->dma.resid > qp->mtu) ?
+ qp->mtu : wqe->dma.resid;
+
+ qp->req.opcode = next_opcode(qp, wqe,
+ wqe->wr.opcode);
+
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ wqe->dma.resid -= to_send;
+ wqe->dma.sge_offset += to_send;
+ } else {
+ advance_dma_data(&wqe->dma, to_send);
+ }
+ if (mask & WR_WRITE_MASK)
+ wqe->iova += qp->mtu;
+ }
+}
+
+static void req_retry(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+ unsigned int wqe_index;
+ unsigned int mask;
+ int npsn;
+ int first = 1;
+
+ wqe = queue_head(qp->sq.queue);
+ npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
+
+ qp->req.wqe_index = consumer_index(qp->sq.queue);
+ qp->req.psn = qp->comp.psn;
+ qp->req.opcode = -1;
+
+ for (wqe_index = consumer_index(qp->sq.queue);
+ wqe_index != producer_index(qp->sq.queue);
+ wqe_index = next_index(qp->sq.queue, wqe_index)) {
+ wqe = addr_from_index(qp->sq.queue, wqe_index);
+ mask = wr_opcode_mask(wqe->wr.opcode, qp);
+
+ if (wqe->state == wqe_state_posted)
+ break;
+
+ if (wqe->state == wqe_state_done)
+ continue;
+
+ wqe->iova = (mask & WR_ATOMIC_MASK) ?
+ wqe->wr.wr.atomic.remote_addr :
+ (mask & WR_READ_OR_WRITE_MASK) ?
+ wqe->wr.wr.rdma.remote_addr :
+ 0;
+
+ if (!first || (mask & WR_READ_MASK) == 0) {
+ wqe->dma.resid = wqe->dma.length;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ }
+
+ if (first) {
+ first = 0;
+
+ if (mask & WR_WRITE_OR_SEND_MASK)
+ retry_first_write_send(qp, wqe, mask, npsn);
+
+ if (mask & WR_READ_MASK)
+ wqe->iova += npsn * qp->mtu;
+ }
+
+ wqe->state = wqe_state_posted;
+ }
+}
+
+void rnr_nak_timer(unsigned long data)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)data;
+
+ pr_debug("rnr nak timer fired\n");
+ rxe_run_task(&qp->req.task, 1);
+}
+
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
+ unsigned long flags;
+
+ if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ /* check to see if we are drained;
+ * state_lock used by requester and completer
+ */
+ spin_lock_irqsave(&qp->state_lock, flags);
+ do {
+ if (qp->req.state != QP_STATE_DRAIN) {
+ /* comp just finished */
+ spin_unlock_irqrestore(&qp->state_lock,
+ flags);
+ break;
+ }
+
+ if (wqe && ((qp->req.wqe_index !=
+ consumer_index(qp->sq.queue)) ||
+ (wqe->state != wqe_state_posted))) {
+ /* comp not done yet */
+ spin_unlock_irqrestore(&qp->state_lock,
+ flags);
+ break;
+ }
+
+ qp->req.state = QP_STATE_DRAINED;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ } while (0);
+ }
+
+ if (qp->req.wqe_index == producer_index(qp->sq.queue))
+ return NULL;
+
+ wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
+
+ if (unlikely((qp->req.state == QP_STATE_DRAIN ||
+ qp->req.state == QP_STATE_DRAINED) &&
+ (wqe->state != wqe_state_processing)))
+ return NULL;
+
+ if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
+ qp->req.wait_fence = 1;
+ return NULL;
+ }
+
+ wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
+ return wqe;
+}
+
+static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_RDMA_READ:
+ return IB_OPCODE_RC_RDMA_READ_REQUEST;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_OPCODE_RC_COMPARE_SWAP;
+
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_OPCODE_RC_FETCH_ADD;
+
+ case IB_WR_SEND_WITH_INV:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_FIRST;
+ case IB_WR_REG_MR:
+ case IB_WR_LOCAL_INV:
+ return opcode;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY :
+ IB_OPCODE_UC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_FIRST;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ unsigned opcode)
+{
+ int fits = (wqe->dma.resid <= qp->mtu);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ return next_opcode_rc(qp, opcode, fits);
+
+ case IB_QPT_UC:
+ return next_opcode_uc(qp, opcode, fits);
+
+ case IB_QPT_SMI:
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ switch (opcode) {
+ case IB_WR_SEND:
+ return IB_OPCODE_UD_SEND_ONLY;
+
+ case IB_WR_SEND_WITH_IMM:
+ return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int depth;
+
+ if (wqe->has_rd_atomic)
+ return 0;
+
+ qp->req.need_rd_atomic = 1;
+ depth = atomic_dec_return(&qp->req.rd_atomic);
+
+ if (depth >= 0) {
+ qp->req.need_rd_atomic = 0;
+ wqe->has_rd_atomic = 1;
+ return 0;
+ }
+
+ atomic_inc(&qp->req.rd_atomic);
+ return -EAGAIN;
+}
+
+static inline int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_port *port;
+ struct rxe_av *av;
+
+ if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
+ return qp->mtu;
+
+ av = &wqe->av;
+ port = &rxe->port;
+
+ return port->mtu_cap;
+}
+
+static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ int opcode, int payload,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_port *port = &rxe->port;
+ struct sk_buff *skb;
+ struct rxe_send_wr *ibwr = &wqe->wr;
+ struct rxe_av *av;
+ int pad = (-payload) & 0x3;
+ int paylen;
+ int solicited;
+ u16 pkey;
+ u32 qp_num;
+ int ack_req;
+
+ /* length from start of bth to end of icrc */
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+
+ /* pkt->hdr, rxe, port_num and mask are initialized in ifc
+ * layer
+ */
+ pkt->opcode = opcode;
+ pkt->qp = qp;
+ pkt->psn = qp->req.psn;
+ pkt->mask = rxe_opcode[opcode].mask;
+ pkt->paylen = paylen;
+ pkt->offset = 0;
+ pkt->wqe = wqe;
+
+ /* init skb */
+ av = rxe_get_av(pkt);
+ skb = rxe->ifc_ops->init_packet(rxe, av, paylen, pkt);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* init bth */
+ solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
+ (pkt->mask & RXE_END_MASK) &&
+ ((pkt->mask & (RXE_SEND_MASK)) ||
+ (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
+ (RXE_WRITE_MASK | RXE_IMMDT_MASK));
+
+ pkey = (qp_type(qp) == IB_QPT_GSI) ?
+ port->pkey_tbl[ibwr->wr.ud.pkey_index] :
+ port->pkey_tbl[qp->attr.pkey_index];
+
+ qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+ qp->attr.dest_qp_num;
+
+ ack_req = ((pkt->mask & RXE_END_MASK) ||
+ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (ack_req)
+ qp->req.noack_pkts = 0;
+
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ ack_req, pkt->psn);
+
+ /* init optional headers */
+ if (pkt->mask & RXE_RETH_MASK) {
+ reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+ reth_set_va(pkt, wqe->iova);
+ reth_set_len(pkt, wqe->dma.length);
+ }
+
+ if (pkt->mask & RXE_IMMDT_MASK)
+ immdt_set_imm(pkt, ibwr->ex.imm_data);
+
+ if (pkt->mask & RXE_IETH_MASK)
+ ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
+
+ if (pkt->mask & RXE_ATMETH_MASK) {
+ atmeth_set_va(pkt, wqe->iova);
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
+ opcode == IB_OPCODE_RD_COMPARE_SWAP) {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
+ atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
+ } else {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
+ }
+ atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
+ }
+
+ if (pkt->mask & RXE_DETH_MASK) {
+ if (qp->ibqp.qp_num == 1)
+ deth_set_qkey(pkt, GSI_QKEY);
+ else
+ deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
+ deth_set_sqp(pkt, qp->ibqp.qp_num);
+ }
+
+ return skb;
+}
+
+static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ int paylen)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 crc = 0;
+ u32 *p;
+ int err;
+
+ err = rxe->ifc_ops->prepare(rxe, pkt, skb, &crc);
+ if (err)
+ return err;
+
+ if (pkt->mask & RXE_WRITE_OR_SEND) {
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
+
+ crc = crc32_le(crc, tmp, paylen);
+
+ memcpy(payload_addr(pkt), tmp, paylen);
+
+ wqe->dma.resid -= paylen;
+ wqe->dma.sge_offset += paylen;
+ } else {
+ err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ payload_addr(pkt), paylen,
+ from_mem_obj,
+ &crc);
+ if (err)
+ return err;
+ }
+ }
+ p = payload_addr(pkt) + paylen + bth_pad(pkt);
+
+ *p = ~crc;
+
+ return 0;
+}
+
+static void update_wqe_state(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt,
+ enum wqe_state *prev_state)
+{
+ enum wqe_state prev_state_ = wqe->state;
+
+ if (pkt->mask & RXE_END_MASK) {
+ if (qp_type(qp) == IB_QPT_RC)
+ wqe->state = wqe_state_pending;
+ } else {
+ wqe->state = wqe_state_processing;
+ }
+
+ *prev_state = prev_state_;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, int payload)
+{
+ /* number of packets left to send including current one */
+ int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
+
+ /* handle zero length packet case */
+ if (num_pkt == 0)
+ num_pkt = 1;
+
+ if (pkt->mask & RXE_START_MASK) {
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
+ }
+
+ if (pkt->mask & RXE_READ_MASK)
+ qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
+ else
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+
+ qp->req.opcode = pkt->opcode;
+
+
+ if (pkt->mask & RXE_END_MASK)
+ qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+
+ qp->need_req_skb = 0;
+
+ if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+}
+
+int rxe_requester(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_pkt_info pkt;
+ struct sk_buff *skb;
+ struct rxe_send_wqe *wqe;
+ unsigned mask;
+ int payload;
+ int mtu;
+ int opcode;
+ int ret;
+ enum wqe_state prev_state;
+
+next_wqe:
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ goto exit;
+
+ if (unlikely(qp->req.state == QP_STATE_RESET)) {
+ qp->req.wqe_index = consumer_index(qp->sq.queue);
+ qp->req.opcode = -1;
+ qp->req.need_rd_atomic = 0;
+ qp->req.wait_psn = 0;
+ qp->req.need_retry = 0;
+ goto exit;
+ }
+
+ if (unlikely(qp->req.need_retry)) {
+ req_retry(qp);
+ qp->req.need_retry = 0;
+ }
+
+ wqe = req_next_wqe(qp);
+ if (unlikely(!wqe))
+ goto exit;
+
+ if (wqe->mask & WR_REG_MASK) {
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mem *rmr;
+
+ rmr = rxe_pool_get_index(&rxe->mr_pool,
+ wqe->wr.ex.invalidate_rkey >> 8);
+ if (!rmr) {
+ pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey);
+ wqe->state = wqe_state_error;
+ wqe->status = IB_WC_MW_BIND_ERR;
+ goto exit;
+ }
+ rmr->state = RXE_MEM_STATE_FREE;
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ } else if (wqe->wr.opcode == IB_WR_REG_MR) {
+ struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
+
+ rmr->state = RXE_MEM_STATE_VALID;
+ rmr->access = wqe->wr.wr.reg.access;
+ rmr->lkey = wqe->wr.wr.reg.key;
+ rmr->rkey = wqe->wr.wr.reg.key;
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ } else {
+ goto exit;
+ }
+ qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ goto next_wqe;
+ }
+
+ if (unlikely(qp_type(qp) == IB_QPT_RC &&
+ qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
+ qp->req.wait_psn = 1;
+ goto exit;
+ }
+
+ /* Limit the number of inflight SKBs per QP */
+ if (unlikely(atomic_read(&qp->skb_out) >
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
+ qp->need_req_skb = 1;
+ goto exit;
+ }
+
+ opcode = next_opcode(qp, wqe, wqe->wr.opcode);
+ if (unlikely(opcode < 0)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto exit;
+ }
+
+ mask = rxe_opcode[opcode].mask;
+ if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
+ if (check_init_depth(qp, wqe))
+ goto exit;
+ }
+
+ mtu = get_mtu(qp, wqe);
+ payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
+ if (payload > mtu) {
+ if (qp_type(qp) == IB_QPT_UD) {
+ /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
+ * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
+ * shall not emit any packets for this message. Further, the CI shall not
+ * generate an error due to this condition.
+ */
+
+ /* fake a successful UD send */
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = qp->req.psn;
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+ qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
+ qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ goto complete;
+ }
+ payload = mtu;
+ }
+
+ skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
+ if (unlikely(!skb)) {
+ pr_err("Failed allocating skb\n");
+ goto err;
+ }
+
+ if (fill_packet(qp, wqe, &pkt, skb, payload)) {
+ pr_debug("Error during fill packet\n");
+ goto err;
+ }
+
+ update_wqe_state(qp, wqe, &pkt, &prev_state);
+ ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
+ if (ret) {
+ qp->need_req_skb = 1;
+ kfree_skb(skb);
+
+ wqe->state = prev_state;
+
+ if (ret == -EAGAIN) {
+ rxe_run_task(&qp->req.task, 1);
+ goto exit;
+ }
+
+ goto err;
+ }
+
+ update_state(qp, wqe, &pkt, payload);
+
+ goto next_wqe;
+
+err:
+ kfree_skb(skb);
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ wqe->state = wqe_state_error;
+
+complete:
+ if (qp_type(qp) != IB_QPT_RC) {
+ while (rxe_completer(qp) == 0)
+ ;
+ }
+
+ return 0;
+
+exit:
+ return -EAGAIN;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
new file mode 100644
index 000000000000..ebb03b46e2ad
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -0,0 +1,1380 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+enum resp_states {
+ RESPST_NONE,
+ RESPST_GET_REQ,
+ RESPST_CHK_PSN,
+ RESPST_CHK_OP_SEQ,
+ RESPST_CHK_OP_VALID,
+ RESPST_CHK_RESOURCE,
+ RESPST_CHK_LENGTH,
+ RESPST_CHK_RKEY,
+ RESPST_EXECUTE,
+ RESPST_READ_REPLY,
+ RESPST_COMPLETE,
+ RESPST_ACKNOWLEDGE,
+ RESPST_CLEANUP,
+ RESPST_DUPLICATE_REQUEST,
+ RESPST_ERR_MALFORMED_WQE,
+ RESPST_ERR_UNSUPPORTED_OPCODE,
+ RESPST_ERR_MISALIGNED_ATOMIC,
+ RESPST_ERR_PSN_OUT_OF_SEQ,
+ RESPST_ERR_MISSING_OPCODE_FIRST,
+ RESPST_ERR_MISSING_OPCODE_LAST_C,
+ RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+ RESPST_ERR_RNR,
+ RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_LENGTH,
+ RESPST_ERR_CQ_OVERFLOW,
+ RESPST_ERROR,
+ RESPST_RESET,
+ RESPST_DONE,
+ RESPST_EXIT,
+};
+
+static char *resp_state_name[] = {
+ [RESPST_NONE] = "NONE",
+ [RESPST_GET_REQ] = "GET_REQ",
+ [RESPST_CHK_PSN] = "CHK_PSN",
+ [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
+ [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
+ [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
+ [RESPST_CHK_LENGTH] = "CHK_LENGTH",
+ [RESPST_CHK_RKEY] = "CHK_RKEY",
+ [RESPST_EXECUTE] = "EXECUTE",
+ [RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_COMPLETE] = "COMPLETE",
+ [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
+ [RESPST_CLEANUP] = "CLEANUP",
+ [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
+ [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
+ [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
+ [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
+ [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
+ [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
+ [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
+ [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
+ [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
+ [RESPST_ERR_RNR] = "ERR_RNR",
+ [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
+ [RESPST_ERR_LENGTH] = "ERR_LENGTH",
+ [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
+ [RESPST_ERROR] = "ERROR",
+ [RESPST_RESET] = "RESET",
+ [RESPST_DONE] = "DONE",
+ [RESPST_EXIT] = "EXIT",
+};
+
+/* rxe_recv calls here to add a request packet to the input queue */
+void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct sk_buff *skb)
+{
+ int must_sched;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ skb_queue_tail(&qp->req_pkts, skb);
+
+ must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
+ (skb_queue_len(&qp->req_pkts) > 1);
+
+ rxe_run_task(&qp->resp.task, must_sched);
+}
+
+static inline enum resp_states get_req(struct rxe_qp *qp,
+ struct rxe_pkt_info **pkt_p)
+{
+ struct sk_buff *skb;
+
+ if (qp->resp.state == QP_STATE_ERROR) {
+ skb = skb_dequeue(&qp->req_pkts);
+ if (skb) {
+ /* drain request packet queue */
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ return RESPST_GET_REQ;
+ }
+
+ /* go drain recv wr queue */
+ return RESPST_CHK_RESOURCE;
+ }
+
+ skb = skb_peek(&qp->req_pkts);
+ if (!skb)
+ return RESPST_EXIT;
+
+ *pkt_p = SKB_TO_PKT(skb);
+
+ return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
+}
+
+static enum resp_states check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ int diff = psn_compare(pkt->psn, qp->resp.psn);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (diff > 0) {
+ if (qp->resp.sent_psn_nak)
+ return RESPST_CLEANUP;
+
+ qp->resp.sent_psn_nak = 1;
+ return RESPST_ERR_PSN_OUT_OF_SEQ;
+
+ } else if (diff < 0) {
+ return RESPST_DUPLICATE_REQUEST;
+ }
+
+ if (qp->resp.sent_psn_nak)
+ qp->resp.sent_psn_nak = 0;
+
+ break;
+
+ case IB_QPT_UC:
+ if (qp->resp.drop_msg || diff != 0) {
+ if (pkt->mask & RXE_START_MASK) {
+ qp->resp.drop_msg = 0;
+ return RESPST_CHK_OP_SEQ;
+ }
+
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return RESPST_CHK_OP_SEQ;
+}
+
+static enum resp_states check_op_seq(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_RC_SEND_FIRST:
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ case IB_OPCODE_RC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_ERR_MISSING_OPCODE_FIRST;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ case IB_QPT_UC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_UC_SEND_FIRST:
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ case IB_OPCODE_UC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+}
+
+static enum resp_states check_op_valid(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (((pkt->mask & RXE_READ_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
+ ((pkt->mask & RXE_WRITE_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
+ ((pkt->mask & RXE_ATOMIC_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+ }
+
+ break;
+
+ case IB_QPT_UC:
+ if ((pkt->mask & RXE_WRITE_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+
+ break;
+
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return RESPST_CHK_RESOURCE;
+}
+
+static enum resp_states get_srq_wqe(struct rxe_qp *qp)
+{
+ struct rxe_srq *srq = qp->srq;
+ struct rxe_queue *q = srq->rq.queue;
+ struct rxe_recv_wqe *wqe;
+ struct ib_event ev;
+
+ if (srq->error)
+ return RESPST_ERR_RNR;
+
+ spin_lock_bh(&srq->rq.consumer_lock);
+
+ wqe = queue_head(q);
+ if (!wqe) {
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ return RESPST_ERR_RNR;
+ }
+
+ /* note kernel and user space recv wqes have same size */
+ memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
+
+ qp->resp.wqe = &qp->resp.srq_wqe.wqe;
+ advance_consumer(q);
+
+ if (srq->limit && srq->ibsrq.event_handler &&
+ (queue_count(q) < srq->limit)) {
+ srq->limit = 0;
+ goto event;
+ }
+
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ return RESPST_CHK_LENGTH;
+
+event:
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states check_resource(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_srq *srq = qp->srq;
+
+ if (qp->resp.state == QP_STATE_ERROR) {
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_WR_FLUSH_ERR;
+ return RESPST_COMPLETE;
+ } else if (!srq) {
+ qp->resp.wqe = queue_head(qp->rq.queue);
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_WR_FLUSH_ERR;
+ return RESPST_COMPLETE;
+ } else {
+ return RESPST_EXIT;
+ }
+ } else {
+ return RESPST_EXIT;
+ }
+ }
+
+ if (pkt->mask & RXE_READ_OR_ATOMIC) {
+ /* it is the requesters job to not send
+ * too many read/atomic ops, we just
+ * recycle the responder resource queue
+ */
+ if (likely(qp->attr.max_rd_atomic > 0))
+ return RESPST_CHK_LENGTH;
+ else
+ return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
+ }
+
+ if (pkt->mask & RXE_RWR_MASK) {
+ if (srq)
+ return get_srq_wqe(qp);
+
+ qp->resp.wqe = queue_head(qp->rq.queue);
+ return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
+ }
+
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states check_length(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ return RESPST_CHK_RKEY;
+
+ case IB_QPT_UC:
+ return RESPST_CHK_RKEY;
+
+ default:
+ return RESPST_CHK_RKEY;
+ }
+}
+
+static enum resp_states check_rkey(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_mem *mem;
+ u64 va;
+ u32 rkey;
+ u32 resid;
+ u32 pktlen;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int access;
+
+ if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
+ if (pkt->mask & RXE_RETH_MASK) {
+ qp->resp.va = reth_va(pkt);
+ qp->resp.rkey = reth_rkey(pkt);
+ qp->resp.resid = reth_len(pkt);
+ }
+ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
+ : IB_ACCESS_REMOTE_WRITE;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ qp->resp.va = atmeth_va(pkt);
+ qp->resp.rkey = atmeth_rkey(pkt);
+ qp->resp.resid = sizeof(u64);
+ access = IB_ACCESS_REMOTE_ATOMIC;
+ } else {
+ return RESPST_EXECUTE;
+ }
+
+ va = qp->resp.va;
+ rkey = qp->resp.rkey;
+ resid = qp->resp.resid;
+ pktlen = payload_size(pkt);
+
+ mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
+ if (!mem) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err1;
+ }
+
+ if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err1;
+ }
+
+ if (mem_check_range(mem, va, resid)) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err2;
+ }
+
+ if (pkt->mask & RXE_WRITE_MASK) {
+ if (resid > mtu) {
+ if (pktlen != mtu || bth_pad(pkt)) {
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+
+ resid = mtu;
+ } else {
+ if (pktlen != resid) {
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+ if ((bth_pad(pkt) != (0x3 & (-resid)))) {
+ /* This case may not be exactly that
+ * but nothing else fits.
+ */
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+ }
+ }
+
+ WARN_ON(qp->resp.mr);
+
+ qp->resp.mr = mem;
+ return RESPST_EXECUTE;
+
+err2:
+ rxe_drop_ref(mem);
+err1:
+ return state;
+}
+
+static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
+ int data_len)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ data_addr, data_len, to_mem_obj, NULL);
+ if (unlikely(err))
+ return (err == -ENOSPC) ? RESPST_ERR_LENGTH
+ : RESPST_ERR_MALFORMED_WQE;
+
+ return RESPST_NONE;
+}
+
+static enum resp_states write_data_in(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc = RESPST_NONE;
+ int err;
+ int data_len = payload_size(pkt);
+
+ err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
+ data_len, to_mem_obj, NULL);
+ if (err) {
+ rc = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
+
+ qp->resp.va += data_len;
+ qp->resp.resid -= data_len;
+
+out:
+ return rc;
+}
+
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
+static enum resp_states process_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ u64 iova = atmeth_va(pkt);
+ u64 *vaddr;
+ enum resp_states ret;
+ struct rxe_mem *mr = qp->resp.mr;
+
+ if (mr->state != RXE_MEM_STATE_VALID) {
+ ret = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
+
+ vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
+
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
+ }
+
+ spin_lock_bh(&atomic_ops_lock);
+
+ qp->resp.atomic_orig = *vaddr;
+
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
+ pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
+ if (*vaddr == atmeth_comp(pkt))
+ *vaddr = atmeth_swap_add(pkt);
+ } else {
+ *vaddr += atmeth_swap_add(pkt);
+ }
+
+ spin_unlock_bh(&atomic_ops_lock);
+
+ ret = RESPST_NONE;
+out:
+ return ret;
+}
+
+static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_pkt_info *ack,
+ int opcode,
+ int payload,
+ u32 psn,
+ u8 syndrome,
+ u32 *crcp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct sk_buff *skb;
+ u32 crc = 0;
+ u32 *p;
+ int paylen;
+ int pad;
+ int err;
+
+ /*
+ * allocate packet
+ */
+ pad = (-payload) & 0x3;
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+
+ skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
+ if (!skb)
+ return NULL;
+
+ ack->qp = qp;
+ ack->opcode = opcode;
+ ack->mask = rxe_opcode[opcode].mask;
+ ack->offset = pkt->offset;
+ ack->paylen = paylen;
+
+ /* fill in bth using the request packet headers */
+ memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
+
+ bth_set_opcode(ack, opcode);
+ bth_set_qpn(ack, qp->attr.dest_qp_num);
+ bth_set_pad(ack, pad);
+ bth_set_se(ack, 0);
+ bth_set_psn(ack, psn);
+ bth_set_ack(ack, 0);
+ ack->psn = psn;
+
+ if (ack->mask & RXE_AETH_MASK) {
+ aeth_set_syn(ack, syndrome);
+ aeth_set_msn(ack, qp->resp.msn);
+ }
+
+ if (ack->mask & RXE_ATMACK_MASK)
+ atmack_set_orig(ack, qp->resp.atomic_orig);
+
+ err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
+ if (err) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (crcp) {
+ /* CRC computation will be continued by the caller */
+ *crcp = crc;
+ } else {
+ p = payload_addr(ack) + payload + bth_pad(ack);
+ *p = ~crc;
+ }
+
+ return skb;
+}
+
+/* RDMA read response. If res is not NULL, then we have a current RDMA request
+ * being processed or replayed.
+ */
+static enum resp_states read_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *req_pkt)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int payload;
+ int opcode;
+ int err;
+ struct resp_res *res = qp->resp.res;
+ u32 icrc;
+ u32 *p;
+
+ if (!res) {
+ /* This is the first time we process that request. Get a
+ * resource
+ */
+ res = &qp->resp.resources[qp->resp.res_head];
+
+ free_rd_atomic_resource(qp, res);
+ rxe_advance_resp_resource(qp);
+
+ res->type = RXE_READ_MASK;
+
+ res->read.va = qp->resp.va;
+ res->read.va_org = qp->resp.va;
+
+ res->first_psn = req_pkt->psn;
+ res->last_psn = req_pkt->psn +
+ (reth_len(req_pkt) + mtu - 1) /
+ mtu - 1;
+ res->cur_psn = req_pkt->psn;
+
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ /* note res inherits the reference to mr from qp */
+ res->read.mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+
+ qp->resp.res = res;
+ res->state = rdatm_res_state_new;
+ }
+
+ if (res->state == rdatm_res_state_new) {
+ if (res->read.resid <= mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
+ } else {
+ if (res->read.resid > mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
+ }
+
+ res->state = rdatm_res_state_next;
+
+ payload = min_t(int, res->read.resid, mtu);
+
+ skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
+ res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
+ if (!skb)
+ return RESPST_ERR_RNR;
+
+ err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ payload, from_mem_obj, &icrc);
+ if (err)
+ pr_err("Failed copying memory\n");
+
+ p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
+ *p = ~icrc;
+
+ err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+ if (err) {
+ pr_err("Failed sending RDMA reply.\n");
+ kfree_skb(skb);
+ return RESPST_ERR_RNR;
+ }
+
+ res->read.va += payload;
+ res->read.resid -= payload;
+ res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
+
+ if (res->read.resid > 0) {
+ state = RESPST_DONE;
+ } else {
+ qp->resp.res = NULL;
+ qp->resp.opcode = -1;
+ qp->resp.psn = res->cur_psn;
+ state = RESPST_CLEANUP;
+ }
+
+ return state;
+}
+
+/* Executes a new request. A retried request never reach that function (send
+ * and writes are discarded, and reads and atomics are retried elsewhere.
+ */
+static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ enum resp_states err;
+
+ if (pkt->mask & RXE_SEND_MASK) {
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI) {
+ union rdma_network_hdr hdr;
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ memset(&hdr, 0, sizeof(hdr));
+ if (skb->protocol == htons(ETH_P_IP))
+ memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
+
+ err = send_data_in(qp, &hdr, sizeof(hdr));
+ if (err)
+ return err;
+ }
+ err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_WRITE_MASK) {
+ err = write_data_in(qp, pkt);
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ /* For RDMA Read we can increment the msn now. See C9-148. */
+ qp->resp.msn++;
+ return RESPST_READ_REPLY;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ err = process_atomic(qp, pkt);
+ if (err)
+ return err;
+ } else
+ /* Unreachable */
+ WARN_ON(1);
+
+ /* We successfully processed this new request. */
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+
+ if (pkt->mask & RXE_COMP_MASK)
+ return RESPST_COMPLETE;
+ else if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states do_complete(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_cqe cqe;
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ struct rxe_recv_wqe *wqe = qp->resp.wqe;
+
+ if (unlikely(!wqe))
+ return RESPST_CLEANUP;
+
+ memset(&cqe, 0, sizeof(cqe));
+
+ wc->wr_id = wqe->wr_id;
+ wc->status = qp->resp.status;
+ wc->qp = &qp->ibqp;
+
+ /* fields after status are not required for errors */
+ if (wc->status == IB_WC_SUCCESS) {
+ wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ wc->vendor_err = 0;
+ wc->byte_len = wqe->dma.length - wqe->dma.resid;
+
+ /* fields after byte_len are different between kernel and user
+ * space
+ */
+ if (qp->rcq->is_user) {
+ uwc->wc_flags = IB_WC_GRH;
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_IMM;
+ uwc->ex.imm_data =
+ (__u32 __force)immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ uwc->ex.invalidate_rkey = ieth_rkey(pkt);
+ }
+
+ uwc->qp_num = qp->ibqp.qp_num;
+
+ if (pkt->mask & RXE_DETH_MASK)
+ uwc->src_qp = deth_sqp(pkt);
+
+ uwc->port_num = qp->attr.port_num;
+ } else {
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (skb->protocol == htons(ETH_P_IP))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mem *rmr;
+
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = ieth_rkey(pkt);
+
+ rmr = rxe_pool_get_index(&rxe->mr_pool,
+ wc->ex.invalidate_rkey >> 8);
+ if (unlikely(!rmr)) {
+ pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
+ return RESPST_ERROR;
+ }
+ rmr->state = RXE_MEM_STATE_FREE;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ if (pkt->mask & RXE_DETH_MASK)
+ wc->src_qp = deth_sqp(pkt);
+
+ wc->port_num = qp->attr.port_num;
+ }
+ }
+
+ /* have copy for srq and reference for !srq */
+ if (!qp->srq)
+ advance_consumer(qp->rq.queue);
+
+ qp->resp.wqe = NULL;
+
+ if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
+ return RESPST_ERR_CQ_OVERFLOW;
+
+ if (qp->resp.state == QP_STATE_ERROR)
+ return RESPST_CHK_RESOURCE;
+
+ if (!pkt)
+ return RESPST_DONE;
+ else if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ u8 syndrome, u32 psn)
+{
+ int err = 0;
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
+ 0, psn, syndrome, NULL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+ if (err) {
+ pr_err_ratelimited("Failed sending ack\n");
+ kfree_skb(skb);
+ }
+
+err1:
+ return err;
+}
+
+static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ u8 syndrome)
+{
+ int rc = 0;
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ struct sk_buff *skb_copy;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct resp_res *res;
+
+ skb = prepare_ack_packet(qp, pkt, &ack_pkt,
+ IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
+ syndrome, NULL);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ skb_copy = skb_clone(skb, GFP_ATOMIC);
+ if (skb_copy)
+ rxe_add_ref(qp); /* for the new SKB */
+ else {
+ pr_warn("Could not clone atomic response\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ free_rd_atomic_resource(qp, res);
+ rxe_advance_resp_resource(qp);
+
+ res->type = RXE_ATOMIC_MASK;
+ res->atomic.skb = skb;
+ res->first_psn = qp->resp.psn;
+ res->last_psn = qp->resp.psn;
+ res->cur_psn = qp->resp.psn;
+
+ rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
+ if (rc) {
+ pr_err_ratelimited("Failed sending ack\n");
+ rxe_drop_ref(qp);
+ kfree_skb(skb_copy);
+ }
+
+out:
+ return rc;
+}
+
+static enum resp_states acknowledge(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ if (qp_type(qp) != IB_QPT_RC)
+ return RESPST_CLEANUP;
+
+ if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
+ send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
+ else if (pkt->mask & RXE_ATOMIC_MASK)
+ send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
+ else if (bth_ack(pkt))
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
+
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states cleanup(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb;
+
+ if (pkt) {
+ skb = skb_dequeue(&qp->req_pkts);
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_DONE;
+}
+
+static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
+{
+ int i;
+
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ if (res->type == 0)
+ continue;
+
+ if (psn_compare(psn, res->first_psn) >= 0 &&
+ psn_compare(psn, res->last_psn) <= 0) {
+ return res;
+ }
+ }
+
+ return NULL;
+}
+
+static enum resp_states duplicate_request(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc;
+
+ if (pkt->mask & RXE_SEND_MASK ||
+ pkt->mask & RXE_WRITE_MASK) {
+ /* SEND. Ack again and cleanup. C9-105. */
+ if (bth_ack(pkt))
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ struct resp_res *res;
+
+ res = find_resource(qp, pkt->psn);
+ if (!res) {
+ /* Resource not found. Class D error. Drop the
+ * request.
+ */
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else {
+ /* Ensure this new request is the same as the previous
+ * one or a subset of it.
+ */
+ u64 iova = reth_va(pkt);
+ u32 resid = reth_len(pkt);
+
+ if (iova < res->read.va_org ||
+ resid > res->read.length ||
+ (iova + resid) > (res->read.va_org +
+ res->read.length)) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ if (reth_rkey(pkt) != res->read.rkey) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ res->cur_psn = pkt->psn;
+ res->state = (pkt->psn == res->first_psn) ?
+ rdatm_res_state_new :
+ rdatm_res_state_replay;
+
+ /* Reset the resource, except length. */
+ res->read.va_org = iova;
+ res->read.va = iova;
+ res->read.resid = resid;
+
+ /* Replay the RDMA read reply. */
+ qp->resp.res = res;
+ rc = RESPST_READ_REPLY;
+ goto out;
+ }
+ } else {
+ struct resp_res *res;
+
+ /* Find the operation in our list of responder resources. */
+ res = find_resource(qp, pkt->psn);
+ if (res) {
+ struct sk_buff *skb_copy;
+
+ skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
+ if (skb_copy) {
+ rxe_add_ref(qp); /* for the new SKB */
+ } else {
+ pr_warn("Couldn't clone atomic resp\n");
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+ bth_set_psn(SKB_TO_PKT(skb_copy),
+ qp->resp.psn - 1);
+ /* Resend the result. */
+ rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
+ pkt, skb_copy);
+ if (rc) {
+ pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+ kfree_skb(skb_copy);
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+ }
+
+ /* Resource not found. Class D error. Drop the request. */
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/* Process a class A or C. Both are treated the same in this implementation. */
+static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
+ enum ib_wc_status status)
+{
+ qp->resp.aeth_syndrome = syndrome;
+ qp->resp.status = status;
+
+ /* indicate that we should go through the ERROR state */
+ qp->resp.goto_error = 1;
+}
+
+static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
+{
+ /* UC */
+ if (qp->srq) {
+ /* Class E */
+ qp->resp.drop_msg = 1;
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ return RESPST_COMPLETE;
+ } else {
+ return RESPST_CLEANUP;
+ }
+ } else {
+ /* Class D1. This packet may be the start of a
+ * new message and could be valid. The previous
+ * message is invalid and ignored. reset the
+ * recv wr to its original state
+ */
+ if (qp->resp.wqe) {
+ qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
+ qp->resp.wqe->dma.cur_sge = 0;
+ qp->resp.wqe->dma.sge_offset = 0;
+ qp->resp.opcode = -1;
+ }
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_CLEANUP;
+ }
+}
+
+int rxe_responder(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ enum resp_states state;
+ struct rxe_pkt_info *pkt = NULL;
+ int ret = 0;
+
+ qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
+
+ if (!qp->valid) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (qp->resp.state) {
+ case QP_STATE_RESET:
+ state = RESPST_RESET;
+ break;
+
+ default:
+ state = RESPST_GET_REQ;
+ break;
+ }
+
+ while (1) {
+ pr_debug("state = %s\n", resp_state_name[state]);
+ switch (state) {
+ case RESPST_GET_REQ:
+ state = get_req(qp, &pkt);
+ break;
+ case RESPST_CHK_PSN:
+ state = check_psn(qp, pkt);
+ break;
+ case RESPST_CHK_OP_SEQ:
+ state = check_op_seq(qp, pkt);
+ break;
+ case RESPST_CHK_OP_VALID:
+ state = check_op_valid(qp, pkt);
+ break;
+ case RESPST_CHK_RESOURCE:
+ state = check_resource(qp, pkt);
+ break;
+ case RESPST_CHK_LENGTH:
+ state = check_length(qp, pkt);
+ break;
+ case RESPST_CHK_RKEY:
+ state = check_rkey(qp, pkt);
+ break;
+ case RESPST_EXECUTE:
+ state = execute(qp, pkt);
+ break;
+ case RESPST_COMPLETE:
+ state = do_complete(qp, pkt);
+ break;
+ case RESPST_READ_REPLY:
+ state = read_reply(qp, pkt);
+ break;
+ case RESPST_ACKNOWLEDGE:
+ state = acknowledge(qp, pkt);
+ break;
+ case RESPST_CLEANUP:
+ state = cleanup(qp, pkt);
+ break;
+ case RESPST_DUPLICATE_REQUEST:
+ state = duplicate_request(qp, pkt);
+ break;
+ case RESPST_ERR_PSN_OUT_OF_SEQ:
+ /* RC only - Class B. Drop packet. */
+ send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
+ case RESPST_ERR_MISSING_OPCODE_FIRST:
+ case RESPST_ERR_MISSING_OPCODE_LAST_C:
+ case RESPST_ERR_UNSUPPORTED_OPCODE:
+ case RESPST_ERR_MISALIGNED_ATOMIC:
+ /* RC Only - Class C. */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
+ state = do_class_d1e_error(qp);
+ break;
+ case RESPST_ERR_RNR:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* RC - class B */
+ send_ack(qp, pkt, AETH_RNR_NAK |
+ (~AETH_TYPE_MASK &
+ qp->attr.min_rnr_timer),
+ pkt->psn);
+ } else {
+ /* UD/UC - class D */
+ qp->resp.drop_msg = 1;
+ }
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_RKEY_VIOLATION:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
+ IB_WC_REM_ACCESS_ERR);
+ state = RESPST_COMPLETE;
+ } else {
+ qp->resp.drop_msg = 1;
+ if (qp->srq) {
+ /* UC/SRQ Class D */
+ qp->resp.status = IB_WC_REM_ACCESS_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/non-SRQ Class E. */
+ state = RESPST_CLEANUP;
+ }
+ }
+ break;
+
+ case RESPST_ERR_LENGTH:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ } else if (qp->srq) {
+ /* UC/UD - class E */
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/UD - class D */
+ qp->resp.drop_msg = 1;
+ state = RESPST_CLEANUP;
+ }
+ break;
+
+ case RESPST_ERR_MALFORMED_WQE:
+ /* All, Class A. */
+ do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
+ IB_WC_LOC_QP_OP_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_CQ_OVERFLOW:
+ /* All - Class G */
+ state = RESPST_ERROR;
+ break;
+
+ case RESPST_DONE:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto done;
+
+ case RESPST_EXIT:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto exit;
+
+ case RESPST_RESET: {
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&qp->req_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+
+ while (!qp->srq && qp->rq.queue &&
+ queue_head(qp->rq.queue))
+ advance_consumer(qp->rq.queue);
+
+ qp->resp.wqe = NULL;
+ goto exit;
+ }
+
+ case RESPST_ERROR:
+ qp->resp.goto_error = 0;
+ pr_warn("qp#%d moved to error state\n", qp_num(qp));
+ rxe_qp_error(qp);
+ goto exit;
+
+ default:
+ WARN_ON(1);
+ }
+ }
+
+exit:
+ ret = -EAGAIN;
+done:
+ return ret;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
new file mode 100644
index 000000000000..2a6e3cd2d4e8
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+{
+ if (srq && srq->error) {
+ pr_warn("srq in error state\n");
+ goto err1;
+ }
+
+ if (mask & IB_SRQ_MAX_WR) {
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr <= 0) {
+ pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
+ }
+
+ if (srq && srq->limit && (attr->max_wr < srq->limit)) {
+ pr_warn("max_wr (%d) < srq->limit (%d)\n",
+ attr->max_wr, srq->limit);
+ goto err1;
+ }
+
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
+ }
+
+ if (mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit > rxe->attr.max_srq_wr) {
+ pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
+ attr->srq_limit, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
+ pr_warn("srq_limit (%d) > cur limit(%d)\n",
+ attr->srq_limit,
+ srq->rq.queue->buf->index_mask);
+ goto err1;
+ }
+ }
+
+ if (mask == IB_SRQ_INIT_MASK) {
+ if (attr->max_sge > rxe->attr.max_srq_sge) {
+ pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
+ attr->max_sge, rxe->attr.max_srq_sge);
+ goto err1;
+ }
+
+ if (attr->max_sge < RXE_MIN_SRQ_SGE)
+ attr->max_sge = RXE_MIN_SRQ_SGE;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int srq_wqe_size;
+ struct rxe_queue *q;
+
+ srq->ibsrq.event_handler = init->event_handler;
+ srq->ibsrq.srq_context = init->srq_context;
+ srq->limit = init->attr.srq_limit;
+ srq->srq_num = srq->pelem.index;
+ srq->rq.max_wr = init->attr.max_wr;
+ srq->rq.max_sge = init->attr.max_sge;
+
+ srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+
+ spin_lock_init(&srq->rq.producer_lock);
+ spin_lock_init(&srq->rq.consumer_lock);
+
+ q = rxe_queue_init(rxe, &srq->rq.max_wr,
+ srq_wqe_size);
+ if (!q) {
+ pr_warn("unable to allocate queue for srq\n");
+ return -ENOMEM;
+ }
+
+ srq->rq.queue = q;
+
+ err = do_mmap_info(rxe, udata, false, context, q->buf,
+ q->buf_size, &q->ip);
+ if (err)
+ return err;
+
+ if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) {
+ if (copy_to_user(udata->outbuf + sizeof(struct mminfo),
+ &srq->srq_num, sizeof(u32)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_queue *q = srq->rq.queue;
+ struct mminfo mi = { .offset = 1, .size = 0};
+
+ if (mask & IB_SRQ_MAX_WR) {
+ /* Check that we can write the mminfo struct to user space */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 mi_addr;
+
+ /* Get address of user space mminfo struct */
+ err = ib_copy_from_udata(&mi_addr, udata,
+ sizeof(mi_addr));
+ if (err)
+ goto err1;
+
+ udata->outbuf = (void __user *)(unsigned long)mi_addr;
+ udata->outlen = sizeof(mi);
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)udata->outbuf,
+ udata->outlen)) {
+ err = -EFAULT;
+ goto err1;
+ }
+ }
+
+ err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
+ rcv_wqe_size(srq->rq.max_sge),
+ srq->rq.queue->ip ?
+ srq->rq.queue->ip->context :
+ NULL,
+ udata, &srq->rq.producer_lock,
+ &srq->rq.consumer_lock);
+ if (err)
+ goto err2;
+ }
+
+ if (mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+
+ return 0;
+
+err2:
+ rxe_queue_cleanup(q);
+ srq->rq.queue = NULL;
+err1:
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
new file mode 100644
index 000000000000..cf8e77800046
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_net.h"
+
+/* Copy argument and remove trailing CR. Return the new length. */
+static int sanitize_arg(const char *val, char *intf, int intf_len)
+{
+ int len;
+
+ if (!val)
+ return 0;
+
+ /* Remove newline. */
+ for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
+ intf[len] = val[len];
+ intf[len] = 0;
+
+ if (len == 0 || (val[len] != 0 && val[len] != '\n'))
+ return 0;
+
+ return len;
+}
+
+static void rxe_set_port_state(struct net_device *ndev)
+{
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+ bool is_up = netif_running(ndev) && netif_carrier_ok(ndev);
+
+ if (!rxe)
+ goto out;
+
+ if (is_up)
+ rxe_port_up(rxe);
+ else
+ rxe_port_down(rxe); /* down for unknown state */
+out:
+ return;
+}
+
+static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
+{
+ int len;
+ int err = 0;
+ char intf[32];
+ struct net_device *ndev = NULL;
+ struct rxe_dev *rxe;
+
+ len = sanitize_arg(val, intf, sizeof(intf));
+ if (!len) {
+ pr_err("rxe: add: invalid interface name\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ ndev = dev_get_by_name(&init_net, intf);
+ if (!ndev) {
+ pr_err("interface %s not found\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (net_to_rxe(ndev)) {
+ pr_err("rxe: already configured on %s\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ rxe = rxe_net_add(ndev);
+ if (!rxe) {
+ pr_err("rxe: failed to add %s\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ rxe_set_port_state(ndev);
+ pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf);
+err:
+ if (ndev)
+ dev_put(ndev);
+ return err;
+}
+
+static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
+{
+ int len;
+ char intf[32];
+ struct rxe_dev *rxe;
+
+ len = sanitize_arg(val, intf, sizeof(intf));
+ if (!len) {
+ pr_err("rxe: add: invalid interface name\n");
+ return -EINVAL;
+ }
+
+ if (strncmp("all", intf, len) == 0) {
+ pr_info("rxe_sys: remove all");
+ rxe_remove_all();
+ return 0;
+ }
+
+ rxe = get_rxe_by_name(intf);
+
+ if (!rxe) {
+ pr_err("rxe: not configured on %s\n", intf);
+ return -EINVAL;
+ }
+
+ list_del(&rxe->list);
+ rxe_remove(rxe);
+
+ return 0;
+}
+
+static const struct kernel_param_ops rxe_add_ops = {
+ .set = rxe_param_set_add,
+};
+
+static const struct kernel_param_ops rxe_remove_ops = {
+ .set = rxe_param_set_remove,
+};
+
+module_param_cb(add, &rxe_add_ops, NULL, 0200);
+MODULE_PARM_DESC(add, "Create RXE device over network interface");
+module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
+MODULE_PARM_DESC(remove, "Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
new file mode 100644
index 000000000000..1e19bf828a6e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include "rxe_task.h"
+
+int __rxe_do_task(struct rxe_task *task)
+
+{
+ int ret;
+
+ while ((ret = task->func(task->arg)) == 0)
+ ;
+
+ task->ret = ret;
+
+ return ret;
+}
+
+/*
+ * this locking is due to a potential race where
+ * a second caller finds the task already running
+ * but looks just after the last call to func
+ */
+void rxe_do_task(unsigned long data)
+{
+ int cont;
+ int ret;
+ unsigned long flags;
+ struct rxe_task *task = (struct rxe_task *)data;
+
+ spin_lock_irqsave(&task->state_lock, flags);
+ switch (task->state) {
+ case TASK_STATE_START:
+ task->state = TASK_STATE_BUSY;
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ break;
+
+ case TASK_STATE_BUSY:
+ task->state = TASK_STATE_ARMED;
+ /* fall through to */
+ case TASK_STATE_ARMED:
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ return;
+
+ default:
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ pr_warn("bad state = %d in rxe_do_task\n", task->state);
+ return;
+ }
+
+ do {
+ cont = 0;
+ ret = task->func(task->arg);
+
+ spin_lock_irqsave(&task->state_lock, flags);
+ switch (task->state) {
+ case TASK_STATE_BUSY:
+ if (ret)
+ task->state = TASK_STATE_START;
+ else
+ cont = 1;
+ break;
+
+ /* soneone tried to run the task since the last time we called
+ * func, so we will call one more time regardless of the
+ * return value
+ */
+ case TASK_STATE_ARMED:
+ task->state = TASK_STATE_BUSY;
+ cont = 1;
+ break;
+
+ default:
+ pr_warn("bad state = %d in rxe_do_task\n",
+ task->state);
+ }
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ } while (cont);
+
+ task->ret = ret;
+}
+
+int rxe_init_task(void *obj, struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name)
+{
+ task->obj = obj;
+ task->arg = arg;
+ task->func = func;
+ snprintf(task->name, sizeof(task->name), "%s", name);
+
+ tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
+
+ task->state = TASK_STATE_START;
+ spin_lock_init(&task->state_lock);
+
+ return 0;
+}
+
+void rxe_cleanup_task(struct rxe_task *task)
+{
+ tasklet_kill(&task->tasklet);
+}
+
+void rxe_run_task(struct rxe_task *task, int sched)
+{
+ if (sched)
+ tasklet_schedule(&task->tasklet);
+ else
+ rxe_do_task((unsigned long)task);
+}
+
+void rxe_disable_task(struct rxe_task *task)
+{
+ tasklet_disable(&task->tasklet);
+}
+
+void rxe_enable_task(struct rxe_task *task)
+{
+ tasklet_enable(&task->tasklet);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
new file mode 100644
index 000000000000..d14aa6daed05
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_TASK_H
+#define RXE_TASK_H
+
+enum {
+ TASK_STATE_START = 0,
+ TASK_STATE_BUSY = 1,
+ TASK_STATE_ARMED = 2,
+};
+
+/*
+ * data structure to describe a 'task' which is a short
+ * function that returns 0 as long as it needs to be
+ * called again.
+ */
+struct rxe_task {
+ void *obj;
+ struct tasklet_struct tasklet;
+ int state;
+ spinlock_t state_lock; /* spinlock for task state */
+ void *arg;
+ int (*func)(void *arg);
+ int ret;
+ char name[16];
+};
+
+/*
+ * init rxe_task structure
+ * arg => parameter to pass to fcn
+ * fcn => function to call until it returns != 0
+ */
+int rxe_init_task(void *obj, struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name);
+
+/* cleanup task */
+void rxe_cleanup_task(struct rxe_task *task);
+
+/*
+ * raw call to func in loop without any checking
+ * can call when tasklets are disabled
+ */
+int __rxe_do_task(struct rxe_task *task);
+
+/*
+ * common function called by any of the main tasklets
+ * If there is any chance that there is additional
+ * work to do someone must reschedule the task before
+ * leaving
+ */
+void rxe_do_task(unsigned long data);
+
+/* run a task, else schedule it to run as a tasklet, The decision
+ * to run or schedule tasklet is based on the parameter sched.
+ */
+void rxe_run_task(struct rxe_task *task, int sched);
+
+/* keep a task from scheduling */
+void rxe_disable_task(struct rxe_task *task);
+
+/* allow task to run */
+void rxe_enable_task(struct rxe_task *task);
+
+#endif /* RXE_TASK_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
new file mode 100644
index 000000000000..4552be960c6a
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -0,0 +1,1330 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static int rxe_query_device(struct ib_device *dev,
+ struct ib_device_attr *attr,
+ struct ib_udata *uhw)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ *attr = rxe->attr;
+ return 0;
+}
+
+static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
+ u8 *active_width)
+{
+ if (speed <= 1000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ } else if (speed <= 10000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR10;
+ } else if (speed <= 20000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_DDR;
+ } else if (speed <= 30000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ } else if (speed <= 40000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR10;
+ } else {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ }
+}
+
+static int rxe_query_port(struct ib_device *dev,
+ u8 port_num, struct ib_port_attr *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_port *port;
+ u32 speed;
+
+ if (unlikely(port_num != 1)) {
+ pr_warn("invalid port_number %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ *attr = port->attr;
+
+ mutex_lock(&rxe->usdev_lock);
+ if (rxe->ndev->ethtool_ops->get_link_ksettings) {
+ struct ethtool_link_ksettings ks;
+
+ rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
+ speed = ks.base.speed;
+ } else if (rxe->ndev->ethtool_ops->get_settings) {
+ struct ethtool_cmd cmd;
+
+ rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
+ speed = cmd.speed;
+ } else {
+ pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
+ speed = 1000;
+ }
+ rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
+ mutex_unlock(&rxe->usdev_lock);
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int rxe_query_gid(struct ib_device *device,
+ u8 port_num, int index, union ib_gid *gid)
+{
+ int ret;
+
+ if (index > RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+
+ ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
+ if (ret == -EAGAIN) {
+ memcpy(gid, &zgid, sizeof(*gid));
+ return 0;
+ }
+
+ return ret;
+}
+
+static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
+ index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (index >= RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+ return 0;
+}
+
+static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
+ index, void **context)
+{
+ if (index >= RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+ return 0;
+}
+
+static struct net_device *rxe_get_netdev(struct ib_device *device,
+ u8 port_num)
+{
+ struct rxe_dev *rxe = to_rdev(device);
+
+ if (rxe->ndev) {
+ dev_hold(rxe->ndev);
+ return rxe->ndev;
+ }
+
+ return NULL;
+}
+
+static int rxe_query_pkey(struct ib_device *device,
+ u8 port_num, u16 index, u16 *pkey)
+{
+ struct rxe_dev *rxe = to_rdev(device);
+ struct rxe_port *port;
+
+ if (unlikely(port_num != 1)) {
+ dev_warn(device->dma_device, "invalid port_num = %d\n",
+ port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ if (unlikely(index >= port->attr.pkey_tbl_len)) {
+ dev_warn(device->dma_device, "invalid index = %d\n",
+ index);
+ goto err1;
+ }
+
+ *pkey = port->pkey_tbl[index];
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int rxe_modify_device(struct ib_device *dev,
+ int mask, struct ib_device_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
+ rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(rxe->ib_dev.node_desc,
+ attr->node_desc, sizeof(rxe->ib_dev.node_desc));
+ }
+
+ return 0;
+}
+
+static int rxe_modify_port(struct ib_device *dev,
+ u8 port_num, int mask, struct ib_port_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_port *port;
+
+ if (unlikely(port_num != 1)) {
+ pr_warn("invalid port_num = %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ port->attr.port_cap_flags |= attr->set_port_cap_mask;
+ port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
+
+ if (mask & IB_PORT_RESET_QKEY_CNTR)
+ port->attr.qkey_viol_cntr = 0;
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
+ u8 port_num)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ return rxe->ifc_ops->link_layer(rxe, port_num);
+}
+
+static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_ucontext *uc;
+
+ uc = rxe_alloc(&rxe->uc_pool);
+ return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
+}
+
+static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
+{
+ struct rxe_ucontext *uc = to_ruc(ibuc);
+
+ rxe_drop_ref(uc);
+ return 0;
+}
+
+static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ int err;
+ struct ib_port_attr attr;
+
+ err = rxe_query_port(dev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_pd *pd;
+
+ pd = rxe_alloc(&rxe->pd_pool);
+ return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
+}
+
+static int rxe_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct rxe_pd *pd = to_rpd(ibpd);
+
+ rxe_drop_ref(pd);
+ return 0;
+}
+
+static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
+ struct rxe_av *av)
+{
+ int err;
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+
+ err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
+ attr->grh.sgid_index, &sgid,
+ &sgid_attr);
+ if (err) {
+ pr_err("Failed to query sgid. err = %d\n", err);
+ return err;
+ }
+
+ err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
+ if (!err)
+ err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
+
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+ return err;
+}
+
+static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_ah *ah;
+
+ err = rxe_av_chk_attr(rxe, attr);
+ if (err)
+ goto err1;
+
+ ah = rxe_alloc(&rxe->ah_pool);
+ if (!ah) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_ref(pd);
+ ah->pd = pd;
+
+ err = rxe_init_av(rxe, attr, &ah->av);
+ if (err)
+ goto err2;
+
+ return &ah->ibah;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_ref(ah);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
+
+ err = rxe_av_chk_attr(rxe, attr);
+ if (err)
+ return err;
+
+ err = rxe_init_av(rxe, attr, &ah->av);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
+
+ rxe_av_to_attr(rxe, &ah->av, attr);
+ return 0;
+}
+
+static int rxe_destroy_ah(struct ib_ah *ibah)
+{
+ struct rxe_ah *ah = to_rah(ibah);
+
+ rxe_drop_ref(ah->pd);
+ rxe_drop_ref(ah);
+ return 0;
+}
+
+static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
+{
+ int err;
+ int i;
+ u32 length;
+ struct rxe_recv_wqe *recv_wqe;
+ int num_sge = ibwr->num_sge;
+
+ if (unlikely(queue_full(rq->queue))) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ if (unlikely(num_sge > rq->max_sge)) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ length = 0;
+ for (i = 0; i < num_sge; i++)
+ length += ibwr->sg_list[i].length;
+
+ recv_wqe = producer_addr(rq->queue);
+ recv_wqe->wr_id = ibwr->wr_id;
+ recv_wqe->num_sge = num_sge;
+
+ memcpy(recv_wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ recv_wqe->dma.length = length;
+ recv_wqe->dma.resid = length;
+ recv_wqe->dma.num_sge = num_sge;
+ recv_wqe->dma.cur_sge = 0;
+ recv_wqe->dma.sge_offset = 0;
+
+ /* make sure all changes to the work queue are written before we
+ * update the producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(rq->queue);
+ return 0;
+
+err1:
+ return err;
+}
+
+static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *init,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_srq *srq;
+ struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+
+ err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+ if (err)
+ goto err1;
+
+ srq = rxe_alloc(&rxe->srq_pool);
+ if (!srq) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(srq);
+ rxe_add_ref(pd);
+ srq->pd = pd;
+
+ err = rxe_srq_from_init(rxe, srq, init, context, udata);
+ if (err)
+ goto err2;
+
+ return &srq->ibsrq;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(srq);
+ rxe_drop_ref(srq);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
+
+ err = rxe_srq_chk_attr(rxe, srq, attr, mask);
+ if (err)
+ goto err1;
+
+ err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ if (srq->error)
+ return -EINVAL;
+
+ attr->max_wr = srq->rq.queue->buf->index_mask;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+static int rxe_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ if (srq->rq.queue)
+ rxe_queue_cleanup(srq->rq.queue);
+
+ rxe_drop_ref(srq->pd);
+ rxe_drop_index(srq);
+ rxe_drop_ref(srq);
+
+ return 0;
+}
+
+static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ spin_lock_irqsave(&srq->rq.producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(&srq->rq, wr);
+ if (unlikely(err))
+ break;
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+
+ if (err)
+ *bad_wr = wr;
+
+ return err;
+}
+
+static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_qp *qp;
+
+ err = rxe_qp_chk_init(rxe, init);
+ if (err)
+ goto err1;
+
+ qp = rxe_alloc(&rxe->qp_pool);
+ if (!qp) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ if (udata) {
+ if (udata->inlen) {
+ err = -EINVAL;
+ goto err1;
+ }
+ qp->is_user = 1;
+ }
+
+ rxe_add_index(qp);
+
+ err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
+ if (err)
+ goto err2;
+
+ return &qp->ibqp;
+
+err2:
+ rxe_drop_index(qp);
+ rxe_drop_ref(qp);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ err = rxe_qp_chk_attr(rxe, qp, attr, mask);
+ if (err)
+ goto err1;
+
+ err = rxe_qp_from_attr(qp, attr, mask, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ rxe_qp_to_init(qp, init);
+ rxe_qp_to_attr(qp, attr, mask);
+
+ return 0;
+}
+
+static int rxe_destroy_qp(struct ib_qp *ibqp)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ rxe_qp_destroy(qp);
+ rxe_drop_index(qp);
+ rxe_drop_ref(qp);
+ return 0;
+}
+
+static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned int mask, unsigned int length)
+{
+ int num_sge = ibwr->num_sge;
+ struct rxe_sq *sq = &qp->sq;
+
+ if (unlikely(num_sge > sq->max_sge))
+ goto err1;
+
+ if (unlikely(mask & WR_ATOMIC_MASK)) {
+ if (length < 8)
+ goto err1;
+
+ if (atomic_wr(ibwr)->remote_addr & 0x7)
+ goto err1;
+ }
+
+ if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
+ (length > sq->max_inline)))
+ goto err1;
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+ struct ib_send_wr *ibwr)
+{
+ wr->wr_id = ibwr->wr_id;
+ wr->num_sge = ibwr->num_sge;
+ wr->opcode = ibwr->opcode;
+ wr->send_flags = ibwr->send_flags;
+
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI) {
+ wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
+ wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ if (qp_type(qp) == IB_QPT_GSI)
+ wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
+ if (wr->opcode == IB_WR_SEND_WITH_IMM)
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ } else {
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
+ wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wr->wr.atomic.remote_addr =
+ atomic_wr(ibwr)->remote_addr;
+ wr->wr.atomic.compare_add =
+ atomic_wr(ibwr)->compare_add;
+ wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
+ wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_REG_MR:
+ wr->wr.reg.mr = reg_wr(ibwr)->mr;
+ wr->wr.reg.key = reg_wr(ibwr)->key;
+ wr->wr.reg.access = reg_wr(ibwr)->access;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned int mask, unsigned int length,
+ struct rxe_send_wqe *wqe)
+{
+ int num_sge = ibwr->num_sge;
+ struct ib_sge *sge;
+ int i;
+ u8 *p;
+
+ init_send_wr(qp, &wqe->wr, ibwr);
+
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI)
+ memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
+
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
+ p = wqe->dma.inline_data;
+
+ sge = ibwr->sg_list;
+ for (i = 0; i < num_sge; i++, sge++) {
+ if (qp->is_user && copy_from_user(p, (__user void *)
+ (uintptr_t)sge->addr, sge->length))
+ return -EFAULT;
+
+ else if (!qp->is_user)
+ memcpy(p, (void *)(uintptr_t)sge->addr,
+ sge->length);
+
+ p += sge->length;
+ }
+ } else if (mask & WR_REG_MASK) {
+ wqe->mask = mask;
+ wqe->state = wqe_state_posted;
+ return 0;
+ } else
+ memcpy(wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ wqe->iova = (mask & WR_ATOMIC_MASK) ?
+ atomic_wr(ibwr)->remote_addr :
+ rdma_wr(ibwr)->remote_addr;
+ wqe->mask = mask;
+ wqe->dma.length = length;
+ wqe->dma.resid = length;
+ wqe->dma.num_sge = num_sge;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ wqe->state = wqe_state_posted;
+ wqe->ssn = atomic_add_return(1, &qp->ssn);
+
+ return 0;
+}
+
+static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned mask, u32 length)
+{
+ int err;
+ struct rxe_sq *sq = &qp->sq;
+ struct rxe_send_wqe *send_wqe;
+ unsigned long flags;
+
+ err = validate_send_wr(qp, ibwr, mask, length);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&qp->sq.sq_lock, flags);
+
+ if (unlikely(queue_full(sq->queue))) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ send_wqe = producer_addr(sq->queue);
+
+ err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
+ if (unlikely(err))
+ goto err1;
+
+ /*
+ * make sure all changes to the work queue are
+ * written before we update the producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(sq->queue);
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+
+ return 0;
+
+err1:
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ return err;
+}
+
+static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_qp *qp = to_rqp(ibqp);
+ unsigned int mask;
+ unsigned int length = 0;
+ int i;
+ int must_sched;
+
+ if (unlikely(!qp->valid)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (unlikely(qp->req.state < QP_STATE_READY)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ mask = wr_opcode_mask(wr->opcode, qp);
+ if (unlikely(!mask)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
+ !(mask & WR_INLINE_MASK))) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ length = 0;
+ for (i = 0; i < wr->num_sge; i++)
+ length += wr->sg_list[i].length;
+
+ err = post_one_send(qp, wr, mask, length);
+
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ /*
+ * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
+ * and the requester call ip_local_out_sk() that takes spin_lock_bh.
+ */
+ must_sched = (qp_type(qp) == IB_QPT_GSI) ||
+ (queue_count(qp->sq.queue) > 1);
+
+ rxe_run_task(&qp->req.task, must_sched);
+
+ return err;
+}
+
+static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_rq *rq = &qp->rq;
+ unsigned long flags;
+
+ if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
+ *bad_wr = wr;
+ err = -EINVAL;
+ goto err1;
+ }
+
+ if (unlikely(qp->srq)) {
+ *bad_wr = wr;
+ err = -EINVAL;
+ goto err1;
+ }
+
+ spin_lock_irqsave(&rq->producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(rq, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&rq->producer_lock, flags);
+
+err1:
+ return err;
+}
+
+static struct ib_cq *rxe_create_cq(struct ib_device *dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_cq *cq;
+
+ if (attr->flags)
+ return ERR_PTR(-EINVAL);
+
+ err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
+ if (err)
+ goto err1;
+
+ cq = rxe_alloc(&rxe->cq_pool);
+ if (!cq) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
+ context, udata);
+ if (err)
+ goto err2;
+
+ return &cq->ibcq;
+
+err2:
+ rxe_drop_ref(cq);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_destroy_cq(struct ib_cq *ibcq)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+
+ rxe_drop_ref(cq);
+ return 0;
+}
+
+static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_dev *rxe = to_rdev(ibcq->device);
+
+ err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
+ if (err)
+ goto err1;
+
+ err = rxe_cq_resize_queue(cq, cqe, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ int i;
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (i = 0; i < num_entries; i++) {
+ cqe = queue_head(cq->queue);
+ if (!cqe)
+ break;
+
+ memcpy(wc++, &cqe->ibwc, sizeof(*wc));
+ advance_consumer(cq->queue);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return i;
+}
+
+static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ int count = queue_count(cq->queue);
+
+ return (count > wc_cnt) ? wc_cnt : count;
+}
+
+static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = flags & IB_CQ_SOLICITED_MASK;
+
+ return 0;
+}
+
+static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+ int err;
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_dma(rxe, pd, access, mr);
+ if (err)
+ goto err2;
+
+ return &mr->ibmr;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err1:
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ u64 start,
+ u64 length,
+ u64 iova,
+ int access, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err2;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ access, udata, mr);
+ if (err)
+ goto err3;
+
+ return &mr->ibmr;
+
+err3:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err2:
+ return ERR_PTR(err);
+}
+
+static int rxe_dereg_mr(struct ib_mr *ibmr)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+
+ mr->state = RXE_MEM_STATE_ZOMBIE;
+ rxe_drop_ref(mr->pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+ return 0;
+}
+
+static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+ int err;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ if (err)
+ goto err2;
+
+ return &mr->ibmr;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ mr->nbuf++;
+
+ return 0;
+}
+
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+ int n;
+
+ mr->nbuf = 0;
+
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+
+ mr->va = ibmr->iova;
+ mr->iova = ibmr->iova;
+ mr->length = ibmr->length;
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->page_mask = ibmr->page_size - 1;
+ mr->offset = mr->iova & mr->page_mask;
+
+ return n;
+}
+
+static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mc_grp *grp;
+
+ /* takes a ref on grp if successful */
+ err = rxe_mcast_get_grp(rxe, mgid, &grp);
+ if (err)
+ return err;
+
+ err = rxe_mcast_add_grp_elem(rxe, qp, grp);
+
+ rxe_drop_ref(grp);
+ return err;
+}
+
+static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
+}
+
+static ssize_t rxe_show_parent(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct rxe_dev *rxe = container_of(device, struct rxe_dev,
+ ib_dev.dev);
+ char *name;
+
+ name = rxe->ifc_ops->parent_name(rxe, 1);
+ return snprintf(buf, 16, "%s\n", name);
+}
+
+static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
+
+static struct device_attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent,
+};
+
+int rxe_register_device(struct rxe_dev *rxe)
+{
+ int err;
+ int i;
+ struct ib_device *dev = &rxe->ib_dev;
+
+ strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
+ strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+
+ dev->owner = THIS_MODULE;
+ dev->node_type = RDMA_NODE_IB_CA;
+ dev->phys_port_cnt = 1;
+ dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
+ dev->dma_device = rxe->ifc_ops->dma_device(rxe);
+ dev->local_dma_lkey = 0;
+ dev->node_guid = rxe->ifc_ops->node_guid(rxe);
+ dev->dma_ops = &rxe_dma_mapping_ops;
+
+ dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
+ dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
+ | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
+ | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
+ | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
+ | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
+ ;
+
+ dev->query_device = rxe_query_device;
+ dev->modify_device = rxe_modify_device;
+ dev->query_port = rxe_query_port;
+ dev->modify_port = rxe_modify_port;
+ dev->get_link_layer = rxe_get_link_layer;
+ dev->query_gid = rxe_query_gid;
+ dev->get_netdev = rxe_get_netdev;
+ dev->add_gid = rxe_add_gid;
+ dev->del_gid = rxe_del_gid;
+ dev->query_pkey = rxe_query_pkey;
+ dev->alloc_ucontext = rxe_alloc_ucontext;
+ dev->dealloc_ucontext = rxe_dealloc_ucontext;
+ dev->mmap = rxe_mmap;
+ dev->get_port_immutable = rxe_port_immutable;
+ dev->alloc_pd = rxe_alloc_pd;
+ dev->dealloc_pd = rxe_dealloc_pd;
+ dev->create_ah = rxe_create_ah;
+ dev->modify_ah = rxe_modify_ah;
+ dev->query_ah = rxe_query_ah;
+ dev->destroy_ah = rxe_destroy_ah;
+ dev->create_srq = rxe_create_srq;
+ dev->modify_srq = rxe_modify_srq;
+ dev->query_srq = rxe_query_srq;
+ dev->destroy_srq = rxe_destroy_srq;
+ dev->post_srq_recv = rxe_post_srq_recv;
+ dev->create_qp = rxe_create_qp;
+ dev->modify_qp = rxe_modify_qp;
+ dev->query_qp = rxe_query_qp;
+ dev->destroy_qp = rxe_destroy_qp;
+ dev->post_send = rxe_post_send;
+ dev->post_recv = rxe_post_recv;
+ dev->create_cq = rxe_create_cq;
+ dev->destroy_cq = rxe_destroy_cq;
+ dev->resize_cq = rxe_resize_cq;
+ dev->poll_cq = rxe_poll_cq;
+ dev->peek_cq = rxe_peek_cq;
+ dev->req_notify_cq = rxe_req_notify_cq;
+ dev->get_dma_mr = rxe_get_dma_mr;
+ dev->reg_user_mr = rxe_reg_user_mr;
+ dev->dereg_mr = rxe_dereg_mr;
+ dev->alloc_mr = rxe_alloc_mr;
+ dev->map_mr_sg = rxe_map_mr_sg;
+ dev->attach_mcast = rxe_attach_mcast;
+ dev->detach_mcast = rxe_detach_mcast;
+
+ err = ib_register_device(dev, NULL);
+ if (err) {
+ pr_warn("rxe_register_device failed, err = %d\n", err);
+ goto err1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
+ err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
+ if (err) {
+ pr_warn("device_create_file failed, i = %d, err = %d\n",
+ i, err);
+ goto err2;
+ }
+ }
+
+ return 0;
+
+err2:
+ ib_unregister_device(dev);
+err1:
+ return err;
+}
+
+int rxe_unregister_device(struct rxe_dev *rxe)
+{
+ int i;
+ struct ib_device *dev = &rxe->ib_dev;
+
+ for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
+ device_remove_file(&dev->dev, rxe_dev_attributes[i]);
+
+ ib_unregister_device(dev);
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
new file mode 100644
index 000000000000..cac1d52a08f0
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_VERBS_H
+#define RXE_VERBS_H
+
+#include <linux/interrupt.h>
+#include <rdma/rdma_user_rxe.h>
+#include "rxe_pool.h"
+#include "rxe_task.h"
+
+static inline int pkey_match(u16 key1, u16 key2)
+{
+ return (((key1 & 0x7fff) != 0) &&
+ ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
+ ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
+}
+
+/* Return >0 if psn_a > psn_b
+ * 0 if psn_a == psn_b
+ * <0 if psn_a < psn_b
+ */
+static inline int psn_compare(u32 psn_a, u32 psn_b)
+{
+ s32 diff;
+
+ diff = (psn_a - psn_b) << 8;
+ return diff;
+}
+
+struct rxe_ucontext {
+ struct rxe_pool_entry pelem;
+ struct ib_ucontext ibuc;
+};
+
+struct rxe_pd {
+ struct rxe_pool_entry pelem;
+ struct ib_pd ibpd;
+};
+
+struct rxe_ah {
+ struct rxe_pool_entry pelem;
+ struct ib_ah ibah;
+ struct rxe_pd *pd;
+ struct rxe_av av;
+};
+
+struct rxe_cqe {
+ union {
+ struct ib_wc ibwc;
+ struct ib_uverbs_wc uibwc;
+ };
+};
+
+struct rxe_cq {
+ struct rxe_pool_entry pelem;
+ struct ib_cq ibcq;
+ struct rxe_queue *queue;
+ spinlock_t cq_lock;
+ u8 notify;
+ int is_user;
+ struct tasklet_struct comp_task;
+};
+
+enum wqe_state {
+ wqe_state_posted,
+ wqe_state_processing,
+ wqe_state_pending,
+ wqe_state_done,
+ wqe_state_error,
+};
+
+struct rxe_sq {
+ int max_wr;
+ int max_sge;
+ int max_inline;
+ spinlock_t sq_lock; /* guard queue */
+ struct rxe_queue *queue;
+};
+
+struct rxe_rq {
+ int max_wr;
+ int max_sge;
+ spinlock_t producer_lock; /* guard queue producer */
+ spinlock_t consumer_lock; /* guard queue consumer */
+ struct rxe_queue *queue;
+};
+
+struct rxe_srq {
+ struct rxe_pool_entry pelem;
+ struct ib_srq ibsrq;
+ struct rxe_pd *pd;
+ struct rxe_rq rq;
+ u32 srq_num;
+
+ int limit;
+ int error;
+};
+
+enum rxe_qp_state {
+ QP_STATE_RESET,
+ QP_STATE_INIT,
+ QP_STATE_READY,
+ QP_STATE_DRAIN, /* req only */
+ QP_STATE_DRAINED, /* req only */
+ QP_STATE_ERROR
+};
+
+extern char *rxe_qp_state_name[];
+
+struct rxe_req_info {
+ enum rxe_qp_state state;
+ int wqe_index;
+ u32 psn;
+ int opcode;
+ atomic_t rd_atomic;
+ int wait_fence;
+ int need_rd_atomic;
+ int wait_psn;
+ int need_retry;
+ int noack_pkts;
+ struct rxe_task task;
+};
+
+struct rxe_comp_info {
+ u32 psn;
+ int opcode;
+ int timeout;
+ int timeout_retry;
+ u32 retry_cnt;
+ u32 rnr_retry;
+ struct rxe_task task;
+};
+
+enum rdatm_res_state {
+ rdatm_res_state_next,
+ rdatm_res_state_new,
+ rdatm_res_state_replay,
+};
+
+struct resp_res {
+ int type;
+ u32 first_psn;
+ u32 last_psn;
+ u32 cur_psn;
+ enum rdatm_res_state state;
+
+ union {
+ struct {
+ struct sk_buff *skb;
+ } atomic;
+ struct {
+ struct rxe_mem *mr;
+ u64 va_org;
+ u32 rkey;
+ u32 length;
+ u64 va;
+ u32 resid;
+ } read;
+ };
+};
+
+struct rxe_resp_info {
+ enum rxe_qp_state state;
+ u32 msn;
+ u32 psn;
+ int opcode;
+ int drop_msg;
+ int goto_error;
+ int sent_psn_nak;
+ enum ib_wc_status status;
+ u8 aeth_syndrome;
+
+ /* Receive only */
+ struct rxe_recv_wqe *wqe;
+
+ /* RDMA read / atomic only */
+ u64 va;
+ struct rxe_mem *mr;
+ u32 resid;
+ u32 rkey;
+ u64 atomic_orig;
+
+ /* SRQ only */
+ struct {
+ struct rxe_recv_wqe wqe;
+ struct ib_sge sge[RXE_MAX_SGE];
+ } srq_wqe;
+
+ /* Responder resources. It's a circular list where the oldest
+ * resource is dropped first.
+ */
+ struct resp_res *resources;
+ unsigned int res_head;
+ unsigned int res_tail;
+ struct resp_res *res;
+ struct rxe_task task;
+};
+
+struct rxe_qp {
+ struct rxe_pool_entry pelem;
+ struct ib_qp ibqp;
+ struct ib_qp_attr attr;
+ unsigned int valid;
+ unsigned int mtu;
+ int is_user;
+
+ struct rxe_pd *pd;
+ struct rxe_srq *srq;
+ struct rxe_cq *scq;
+ struct rxe_cq *rcq;
+
+ enum ib_sig_type sq_sig_type;
+
+ struct rxe_sq sq;
+ struct rxe_rq rq;
+
+ struct socket *sk;
+
+ struct rxe_av pri_av;
+ struct rxe_av alt_av;
+
+ /* list of mcast groups qp has joined (for cleanup) */
+ struct list_head grp_list;
+ spinlock_t grp_lock; /* guard grp_list */
+
+ struct sk_buff_head req_pkts;
+ struct sk_buff_head resp_pkts;
+ struct sk_buff_head send_pkts;
+
+ struct rxe_req_info req;
+ struct rxe_comp_info comp;
+ struct rxe_resp_info resp;
+
+ atomic_t ssn;
+ atomic_t skb_out;
+ int need_req_skb;
+
+ /* Timer for retranmitting packet when ACKs have been lost. RC
+ * only. The requester sets it when it is not already
+ * started. The responder resets it whenever an ack is
+ * received.
+ */
+ struct timer_list retrans_timer;
+ u64 qp_timeout_jiffies;
+
+ /* Timer for handling RNR NAKS. */
+ struct timer_list rnr_nak_timer;
+
+ spinlock_t state_lock; /* guard requester and completer */
+};
+
+enum rxe_mem_state {
+ RXE_MEM_STATE_ZOMBIE,
+ RXE_MEM_STATE_INVALID,
+ RXE_MEM_STATE_FREE,
+ RXE_MEM_STATE_VALID,
+};
+
+enum rxe_mem_type {
+ RXE_MEM_TYPE_NONE,
+ RXE_MEM_TYPE_DMA,
+ RXE_MEM_TYPE_MR,
+ RXE_MEM_TYPE_FMR,
+ RXE_MEM_TYPE_MW,
+};
+
+#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
+
+struct rxe_phys_buf {
+ u64 addr;
+ u64 size;
+};
+
+struct rxe_map {
+ struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
+};
+
+struct rxe_mem {
+ struct rxe_pool_entry pelem;
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ struct rxe_pd *pd;
+ struct ib_umem *umem;
+
+ u32 lkey;
+ u32 rkey;
+
+ enum rxe_mem_state state;
+ enum rxe_mem_type type;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
+ int access;
+
+ int page_shift;
+ int page_mask;
+ int map_shift;
+ int map_mask;
+
+ u32 num_buf;
+ u32 nbuf;
+
+ u32 max_buf;
+ u32 num_map;
+
+ struct rxe_map **map;
+};
+
+struct rxe_mc_grp {
+ struct rxe_pool_entry pelem;
+ spinlock_t mcg_lock; /* guard group */
+ struct rxe_dev *rxe;
+ struct list_head qp_list;
+ union ib_gid mgid;
+ int num_qp;
+ u32 qkey;
+ u16 pkey;
+};
+
+struct rxe_mc_elem {
+ struct rxe_pool_entry pelem;
+ struct list_head qp_list;
+ struct list_head grp_list;
+ struct rxe_qp *qp;
+ struct rxe_mc_grp *grp;
+};
+
+struct rxe_port {
+ struct ib_port_attr attr;
+ u16 *pkey_tbl;
+ __be64 port_guid;
+ __be64 subnet_prefix;
+ spinlock_t port_lock; /* guard port */
+ unsigned int mtu_cap;
+ /* special QPs */
+ u32 qp_smi_index;
+ u32 qp_gsi_index;
+};
+
+/* callbacks from rdma_rxe to network interface layer */
+struct rxe_ifc_ops {
+ void (*release)(struct rxe_dev *rxe);
+ __be64 (*node_guid)(struct rxe_dev *rxe);
+ __be64 (*port_guid)(struct rxe_dev *rxe);
+ struct device *(*dma_device)(struct rxe_dev *rxe);
+ int (*mcast_add)(struct rxe_dev *rxe, union ib_gid *mgid);
+ int (*mcast_delete)(struct rxe_dev *rxe, union ib_gid *mgid);
+ int (*prepare)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc);
+ int (*send)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
+ int (*loopback)(struct sk_buff *skb);
+ struct sk_buff *(*init_packet)(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt);
+ char *(*parent_name)(struct rxe_dev *rxe, unsigned int port_num);
+ enum rdma_link_layer (*link_layer)(struct rxe_dev *rxe,
+ unsigned int port_num);
+};
+
+struct rxe_dev {
+ struct ib_device ib_dev;
+ struct ib_device_attr attr;
+ int max_ucontext;
+ int max_inline_data;
+ struct kref ref_cnt;
+ struct mutex usdev_lock;
+
+ struct rxe_ifc_ops *ifc_ops;
+
+ struct net_device *ndev;
+
+ int xmit_errors;
+
+ struct rxe_pool uc_pool;
+ struct rxe_pool pd_pool;
+ struct rxe_pool ah_pool;
+ struct rxe_pool srq_pool;
+ struct rxe_pool qp_pool;
+ struct rxe_pool cq_pool;
+ struct rxe_pool mr_pool;
+ struct rxe_pool mw_pool;
+ struct rxe_pool mc_grp_pool;
+ struct rxe_pool mc_elem_pool;
+
+ spinlock_t pending_lock; /* guard pending_mmaps */
+ struct list_head pending_mmaps;
+
+ spinlock_t mmap_offset_lock; /* guard mmap_offset */
+ int mmap_offset;
+
+ struct rxe_port port;
+ struct list_head list;
+};
+
+static inline struct rxe_dev *to_rdev(struct ib_device *dev)
+{
+ return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
+}
+
+static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
+{
+ return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
+}
+
+static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
+{
+ return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
+}
+
+static inline struct rxe_ah *to_rah(struct ib_ah *ah)
+{
+ return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
+}
+
+static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
+{
+ return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
+}
+
+static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
+{
+ return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
+}
+
+static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
+{
+ return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
+}
+
+static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
+{
+ return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
+}
+
+static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
+{
+ return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
+}
+
+int rxe_register_device(struct rxe_dev *rxe);
+int rxe_unregister_device(struct rxe_dev *rxe);
+
+void rxe_mc_cleanup(void *arg);
+
+#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1502199c8e56..7b6d40ff1acf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -62,10 +62,8 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
{
struct ipoib_dev_priv *priv = netdev_priv(netdev);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d", (int)(priv->ca->attrs.fw_ver >> 32),
- (int)(priv->ca->attrs.fw_ver >> 16) & 0xffff,
- (int)priv->ca->attrs.fw_ver & 0xffff);
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
sizeof(drvinfo->bus_info));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5f58c41ef787..74bcaa064226 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1967,8 +1967,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
priv->hca_caps = hca->attrs.device_cap_flags;
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- priv->dev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ priv->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->hw_features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 1e7cbbaa15bd..c55ecb2c3736 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -135,7 +135,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
- .max_send_sge = 1,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_sge,
+ MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
@@ -205,10 +206,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
- if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge =
- min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
-
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
@@ -234,6 +231,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ if (init_attr.cap.max_send_sge > 1)
+ dev->features |= NETIF_F_SG;
+
priv->max_send_sge = init_attr.cap.max_send_sge;
return 0;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a990c04208c9..ba6be060a476 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -137,8 +137,6 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
- isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
- device->ib_device->attrs.max_sge_rd);
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index e512ba941f2f..fc791efe3a10 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -138,7 +138,6 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
- u32 max_sge;
struct iser_rx_desc *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 4a4155640d51..dfa23b075a88 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1601,6 +1601,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct ib_qp_init_attr *qp_init;
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
+ const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size;
int ret;
@@ -1638,7 +1639,7 @@ retry:
*/
qp_init->cap.max_send_wr = srp_sq_size / 2;
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
- qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+ qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
@@ -2261,7 +2262,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
container_of(cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_device *sdev = ch->sport->sdev;
- struct ib_send_wr send_wr, *first_wr = NULL, *bad_wr;
+ struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
unsigned long flags;
@@ -2302,11 +2303,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
- ch->sport->port, NULL,
- first_wr ? first_wr : &send_wr);
+ ch->sport->port, NULL, first_wr);
}
- } else {
- first_wr = &send_wr;
}
if (state != SRPT_STATE_MGMT)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 389030487da7..581878782854 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -106,7 +106,11 @@ enum {
SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
SRPT_DEF_SG_TABLESIZE = 128,
- SRPT_DEF_SG_PER_WQE = 16,
+ /*
+ * An experimentally determined value that avoids that QP creation
+ * fails due to "swiotlb buffer is full" on systems using the swiotlb.
+ */
+ SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index a529a4535457..83af17ad0f1f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -115,6 +115,10 @@ static bool sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
+static bool auto_poweroff = true;
+module_param(auto_poweroff, bool, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(auto_poweroff, "Power off wireless controllers on suspend");
+
static const struct xpad_device {
u16 idVendor;
u16 idProduct;
@@ -1248,6 +1252,36 @@ static void xpad_stop_input(struct usb_xpad *xpad)
usb_kill_urb(xpad->irq_in);
}
+static void xpad360w_poweroff_controller(struct usb_xpad *xpad)
+{
+ unsigned long flags;
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x08;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out poweroff now */
+ xpad->last_out_packet = -1;
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+}
+
static int xpad360w_start_input(struct usb_xpad *xpad)
{
int error;
@@ -1590,6 +1624,15 @@ static int xpad_suspend(struct usb_interface *intf, pm_message_t message)
* or goes away.
*/
xpad360w_stop_input(xpad);
+
+ /*
+ * The wireless adapter is going off now, so the
+ * gamepads are going to become disconnected.
+ * Unless explicitly disabled, power them down
+ * so they don't just sit there flashing.
+ */
+ if (auto_poweroff && xpad->pad_present)
+ xpad360w_poweroff_controller(xpad);
} else {
mutex_lock(&input->mutex);
if (input->users)
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index b637f1af842e..997e3e97f573 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -101,7 +101,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
priv->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
@@ -181,7 +181,7 @@ static int clps711x_keypad_remove(struct platform_device *pdev)
}
static const struct of_device_id clps711x_keypad_of_match[] = {
- { .compatible = "cirrus,clps711x-keypad", },
+ { .compatible = "cirrus,ep7209-keypad", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_keypad_of_match);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index b01966dc7eb3..4b0878f35471 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -186,7 +186,7 @@ static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
if (ret >= 0)
cros_ec_keyb_process(ckdev, kb_state, ret);
else
- dev_err(ec->dev, "failed to get keyboard state: %d\n", ret);
+ dev_err(ckdev->dev, "failed to get keyboard state: %d\n", ret);
return IRQ_HANDLED;
}
@@ -236,7 +236,7 @@ static void cros_ec_keyb_compute_valid_keys(struct cros_ec_keyb *ckdev)
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
- struct device *dev = ec->dev;
+ struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
struct input_dev *idev;
struct device_node *np;
@@ -246,23 +246,22 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- ckdev = devm_kzalloc(&pdev->dev, sizeof(*ckdev), GFP_KERNEL);
+ ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
return -ENOMEM;
- err = matrix_keypad_parse_of_params(&pdev->dev, &ckdev->rows,
- &ckdev->cols);
+ err = matrix_keypad_parse_of_params(dev, &ckdev->rows, &ckdev->cols);
if (err)
return err;
- ckdev->valid_keys = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ ckdev->valid_keys = devm_kzalloc(dev, ckdev->cols, GFP_KERNEL);
if (!ckdev->valid_keys)
return -ENOMEM;
- ckdev->old_kb_state = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ ckdev->old_kb_state = devm_kzalloc(dev, ckdev->cols, GFP_KERNEL);
if (!ckdev->old_kb_state)
return -ENOMEM;
- idev = devm_input_allocate_device(&pdev->dev);
+ idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
@@ -273,7 +272,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
ckdev->ec = ec;
ckdev->dev = dev;
- dev_set_drvdata(&pdev->dev, ckdev);
+ dev_set_drvdata(dev, ckdev);
idev->name = CROS_EC_DEV_NAME;
idev->phys = ec->phys_name;
@@ -282,7 +281,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
idev->id.bustype = BUS_VIRTUAL;
idev->id.version = 1;
idev->id.product = 0;
- idev->dev.parent = &pdev->dev;
+ idev->dev.parent = dev;
idev->open = cros_ec_keyb_open;
idev->close = cros_ec_keyb_close;
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index c7fc8d4fb080..1588aecafff7 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -28,6 +28,11 @@
#define DRV_NAME "rotary-encoder"
+enum rotary_encoder_encoding {
+ ROTENC_GRAY,
+ ROTENC_BINARY,
+};
+
struct rotary_encoder {
struct input_dev *input;
@@ -37,6 +42,7 @@ struct rotary_encoder {
u32 axis;
bool relative_axis;
bool rollover;
+ enum rotary_encoder_encoding encoding;
unsigned int pos;
@@ -57,8 +63,9 @@ static unsigned int rotary_encoder_get_state(struct rotary_encoder *encoder)
for (i = 0; i < encoder->gpios->ndescs; ++i) {
int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
+
/* convert from gray encoding to normal */
- if (ret & 1)
+ if (encoder->encoding == ROTENC_GRAY && ret & 1)
val = !val;
ret = ret << 1 | val;
@@ -213,6 +220,20 @@ static int rotary_encoder_probe(struct platform_device *pdev)
encoder->rollover =
device_property_read_bool(dev, "rotary-encoder,rollover");
+ if (!device_property_present(dev, "rotary-encoder,encoding") ||
+ !device_property_match_string(dev, "rotary-encoder,encoding",
+ "gray")) {
+ dev_info(dev, "gray");
+ encoder->encoding = ROTENC_GRAY;
+ } else if (!device_property_match_string(dev, "rotary-encoder,encoding",
+ "binary")) {
+ dev_info(dev, "binary");
+ encoder->encoding = ROTENC_BINARY;
+ } else {
+ dev_err(dev, "unknown encoding setting\n");
+ return -EINVAL;
+ }
+
device_property_read_u32(dev, "linux,axis", &encoder->axis);
encoder->relative_axis =
device_property_read_bool(dev, "rotary-encoder,relative-axis");
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 2f589857a039..d15b33813021 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,8 @@
* Copyright (c) 2013 ELAN Microelectronics Corp.
*
* Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.6.0
+ * Author: KT Liao <kt.liao@emc.com.tw>
+ * Version: 1.6.2
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +41,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.1"
+#define ELAN_DRIVER_VERSION "1.6.2"
#define ELAN_VENDOR_ID 0x04f3
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
@@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data)
return error;
}
+static int elan_query_product(struct elan_tp_data *data)
+{
+ int error;
+
+ error = data->ops->get_product_id(data->client, &data->product_id);
+ if (error)
+ return error;
+
+ error = data->ops->get_sm_version(data->client, &data->ic_type,
+ &data->sm_version);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
+{
+ if (data->ic_type != 0x0E)
+ return false;
+
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int __elan_initialize(struct elan_tp_data *data)
{
struct i2c_client *client = data->client;
+ bool woken_up = false;
int error;
error = data->ops->initialize(client);
@@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
+ error = elan_query_product(data);
+ if (error)
+ return error;
+
+ /*
+ * Some ASUS devices were shipped with firmware that requires
+ * touchpads to be woken up first, before attempting to switch
+ * them into absolute reporting mode.
+ */
+ if (elan_check_ASUS_special_fw(data)) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
+
+ msleep(200);
+ woken_up = true;
+ }
+
data->mode |= ETP_ENABLE_ABS;
error = data->ops->set_mode(client, data->mode);
if (error) {
@@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data)
return error;
}
- error = data->ops->sleep_control(client, false);
- if (error) {
- dev_err(&client->dev,
- "failed to wake device up: %d\n", error);
- return error;
+ if (!woken_up) {
+ error = data->ops->sleep_control(client, false);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to wake device up: %d\n", error);
+ return error;
+ }
}
return 0;
@@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
- error = data->ops->get_product_id(data->client, &data->product_id);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, false, &data->fw_version);
if (error)
return error;
@@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version);
- if (error)
- return error;
-
error = data->ops->get_version(data->client, true, &data->iap_version);
if (error)
return error;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 615d23ec0d8e..08e252a42480 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -222,12 +222,8 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
*/
static void elantech_packet_dump(struct psmouse *psmouse)
{
- int i;
-
- psmouse_printk(KERN_DEBUG, psmouse, "PS/2 packet [");
- for (i = 0; i < psmouse->pktsize; i++)
- printk("%s0x%02x ", i ? ", " : " ", psmouse->packet[i]);
- printk("]\n");
+ psmouse_printk(KERN_DEBUG, psmouse, "PS/2 packet [%*ph]\n",
+ psmouse->pktsize, psmouse->packet);
}
/*
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 253df96be427..a73580654c6b 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -232,10 +232,7 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
device_del(&fn->dev);
-
- if (fn->dev.of_node)
- of_node_put(fn->dev.of_node);
-
+ of_node_put(fn->dev.of_node);
put_device(&fn->dev);
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 454195709a82..b4d34086e73f 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1277,6 +1277,7 @@ static int __init i8042_create_kbd_port(void)
serio->start = i8042_start;
serio->stop = i8042_stop;
serio->close = i8042_port_close;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
@@ -1373,21 +1374,6 @@ static void i8042_unregister_ports(void)
}
}
-/*
- * Checks whether port belongs to i8042 controller.
- */
-bool i8042_check_port_owner(const struct serio *port)
-{
- int i;
-
- for (i = 0; i < I8042_NUM_PORTS; i++)
- if (i8042_ports[i].serio == port)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(i8042_check_port_owner);
-
static void i8042_free_irqs(void)
{
if (i8042_aux_irq_registered)
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 316f2c897101..83e9c663aa67 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
void ps2_begin_command(struct ps2dev *ps2dev)
{
- mutex_lock(&ps2dev->cmd_mutex);
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_lock_chip();
+ mutex_lock(m);
}
EXPORT_SYMBOL(ps2_begin_command);
void ps2_end_command(struct ps2dev *ps2dev)
{
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_unlock_chip();
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- mutex_unlock(&ps2dev->cmd_mutex);
+ mutex_unlock(m);
}
EXPORT_SYMBOL(ps2_end_command);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index ee02dc7422bd..2fb1f430a431 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1059,6 +1059,31 @@ config TOUCHSCREEN_RM_TS
To compile this driver as a module, choose M here: the
module will be called raydium_i2c_ts.
+config TOUCHSCREEN_SILEAD
+ tristate "Silead I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have the Silead touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called silead.
+
+config TOUCHSCREEN_SIS_I2C
+ tristate "SiS 9200 family I2C touchscreen"
+ depends on I2C
+ select CRC_ITU_T
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ This enables support for SiS 9200 family over I2C based touchscreens.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sis_i2c.
+
config TOUCHSCREEN_ST1232
tristate "Sitronix ST1232 touchscreen controllers"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 3315882905f7..b4373d6be402 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
+obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index ddf694b9fffc..fe4848bd1f4c 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -169,7 +169,7 @@ static ssize_t ili210x_calibrate(struct device *dev,
return count;
}
-static DEVICE_ATTR(calibrate, 0644, NULL, ili210x_calibrate);
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate);
static struct attribute *ili210x_attributes[] = {
&dev_attr_calibrate.attr,
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
new file mode 100644
index 000000000000..7379fe153cf9
--- /dev/null
+++ b/drivers/input/touchscreen/silead.c
@@ -0,0 +1,565 @@
+/* -------------------------------------------------------------------------
+ * Copyright (C) 2014-2015, Intel Corporation
+ *
+ * Derived from:
+ * gslX68X.c
+ * Copyright (C) 2010-2015, Shanghai Sileadinc Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ * -------------------------------------------------------------------------
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/pm.h>
+#include <linux/irq.h>
+
+#include <asm/unaligned.h>
+
+#define SILEAD_TS_NAME "silead_ts"
+
+#define SILEAD_REG_RESET 0xE0
+#define SILEAD_REG_DATA 0x80
+#define SILEAD_REG_TOUCH_NR 0x80
+#define SILEAD_REG_POWER 0xBC
+#define SILEAD_REG_CLOCK 0xE4
+#define SILEAD_REG_STATUS 0xB0
+#define SILEAD_REG_ID 0xFC
+#define SILEAD_REG_MEM_CHECK 0xB0
+
+#define SILEAD_STATUS_OK 0x5A5A5A5A
+#define SILEAD_TS_DATA_LEN 44
+#define SILEAD_CLOCK 0x04
+
+#define SILEAD_CMD_RESET 0x88
+#define SILEAD_CMD_START 0x00
+
+#define SILEAD_POINT_DATA_LEN 0x04
+#define SILEAD_POINT_Y_OFF 0x00
+#define SILEAD_POINT_Y_MSB_OFF 0x01
+#define SILEAD_POINT_X_OFF 0x02
+#define SILEAD_POINT_X_MSB_OFF 0x03
+#define SILEAD_TOUCH_ID_MASK 0xF0
+
+#define SILEAD_CMD_SLEEP_MIN 10000
+#define SILEAD_CMD_SLEEP_MAX 20000
+#define SILEAD_POWER_SLEEP 20
+#define SILEAD_STARTUP_SLEEP 30
+
+#define SILEAD_MAX_FINGERS 10
+
+enum silead_ts_power {
+ SILEAD_POWER_ON = 1,
+ SILEAD_POWER_OFF = 0
+};
+
+struct silead_ts_data {
+ struct i2c_client *client;
+ struct gpio_desc *gpio_power;
+ struct input_dev *input;
+ char fw_name[64];
+ struct touchscreen_properties prop;
+ u32 max_fingers;
+ u32 chip_id;
+ struct input_mt_pos pos[SILEAD_MAX_FINGERS];
+ int slots[SILEAD_MAX_FINGERS];
+ int id[SILEAD_MAX_FINGERS];
+};
+
+struct silead_fw_data {
+ u32 offset;
+ u32 val;
+};
+
+static int silead_ts_request_input_dev(struct silead_ts_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+
+ data->input = devm_input_allocate_device(dev);
+ if (!data->input) {
+ dev_err(dev,
+ "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_abs_params(data->input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(data->input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ touchscreen_parse_properties(data->input, true, &data->prop);
+
+ input_mt_init_slots(data->input, data->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+
+ data->input->name = SILEAD_TS_NAME;
+ data->input->phys = "input/ts";
+ data->input->id.bustype = BUS_I2C;
+
+ error = input_register_device(data->input);
+ if (error) {
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void silead_ts_set_power(struct i2c_client *client,
+ enum silead_ts_power state)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+
+ if (data->gpio_power) {
+ gpiod_set_value_cansleep(data->gpio_power, state);
+ msleep(SILEAD_POWER_SLEEP);
+ }
+}
+
+static void silead_ts_read_data(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ struct input_dev *input = data->input;
+ struct device *dev = &client->dev;
+ u8 *bufp, buf[SILEAD_TS_DATA_LEN];
+ int touch_nr, error, i;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_DATA,
+ SILEAD_TS_DATA_LEN, buf);
+ if (error < 0) {
+ dev_err(dev, "Data read error %d\n", error);
+ return;
+ }
+
+ touch_nr = buf[0];
+ if (touch_nr > data->max_fingers) {
+ dev_warn(dev, "More touches reported then supported %d > %d\n",
+ touch_nr, data->max_fingers);
+ touch_nr = data->max_fingers;
+ }
+
+ bufp = buf + SILEAD_POINT_DATA_LEN;
+ for (i = 0; i < touch_nr; i++, bufp += SILEAD_POINT_DATA_LEN) {
+ /* Bits 4-7 are the touch id */
+ data->id[i] = (bufp[SILEAD_POINT_X_MSB_OFF] &
+ SILEAD_TOUCH_ID_MASK) >> 4;
+ touchscreen_set_mt_pos(&data->pos[i], &data->prop,
+ get_unaligned_le16(&bufp[SILEAD_POINT_X_OFF]) & 0xfff,
+ get_unaligned_le16(&bufp[SILEAD_POINT_Y_OFF]) & 0xfff);
+ }
+
+ input_mt_assign_slots(input, data->slots, data->pos, touch_nr, 0);
+
+ for (i = 0; i < touch_nr; i++) {
+ input_mt_slot(input, data->slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, data->pos[i].x);
+ input_report_abs(input, ABS_MT_POSITION_Y, data->pos[i].y);
+
+ dev_dbg(dev, "x=%d y=%d hw_id=%d sw_id=%d\n", data->pos[i].x,
+ data->pos[i].y, data->id[i], data->slots[i]);
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int silead_ts_init(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_RESET);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
+ data->max_fingers);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_CLOCK,
+ SILEAD_CLOCK);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_START);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ return 0;
+
+i2c_write_err:
+ dev_err(&client->dev, "Registers clear error %d\n", error);
+ return error;
+}
+
+static int silead_ts_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_RESET);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_CLOCK,
+ SILEAD_CLOCK);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_POWER,
+ SILEAD_CMD_START);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ return 0;
+
+i2c_write_err:
+ dev_err(&client->dev, "Chip reset error %d\n", error);
+ return error;
+}
+
+static int silead_ts_startup(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET, 0x00);
+ if (error) {
+ dev_err(&client->dev, "Startup error %d\n", error);
+ return error;
+ }
+
+ msleep(SILEAD_STARTUP_SLEEP);
+
+ return 0;
+}
+
+static int silead_ts_load_fw(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ unsigned int fw_size, i;
+ const struct firmware *fw;
+ struct silead_fw_data *fw_data;
+ int error;
+
+ dev_dbg(dev, "Firmware file name: %s", data->fw_name);
+
+ error = request_firmware(&fw, data->fw_name, dev);
+ if (error) {
+ dev_err(dev, "Firmware request error %d\n", error);
+ return error;
+ }
+
+ fw_size = fw->size / sizeof(*fw_data);
+ fw_data = (struct silead_fw_data *)fw->data;
+
+ for (i = 0; i < fw_size; i++) {
+ error = i2c_smbus_write_i2c_block_data(client,
+ fw_data[i].offset,
+ 4,
+ (u8 *)&fw_data[i].val);
+ if (error) {
+ dev_err(dev, "Firmware load error %d\n", error);
+ break;
+ }
+ }
+
+ release_firmware(fw);
+ return error ?: 0;
+}
+
+static u32 silead_ts_get_status(struct i2c_client *client)
+{
+ int error;
+ __le32 status;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_STATUS,
+ sizeof(status), (u8 *)&status);
+ if (error < 0) {
+ dev_err(&client->dev, "Status read error %d\n", error);
+ return error;
+ }
+
+ return le32_to_cpu(status);
+}
+
+static int silead_ts_get_id(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ __le32 chip_id;
+ int error;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
+ sizeof(chip_id), (u8 *)&chip_id);
+ if (error < 0) {
+ dev_err(&client->dev, "Chip ID read error %d\n", error);
+ return error;
+ }
+
+ data->chip_id = le32_to_cpu(chip_id);
+ dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
+
+ return 0;
+}
+
+static int silead_ts_setup(struct i2c_client *client)
+{
+ int error;
+ u32 status;
+
+ silead_ts_set_power(client, SILEAD_POWER_OFF);
+ silead_ts_set_power(client, SILEAD_POWER_ON);
+
+ error = silead_ts_get_id(client);
+ if (error)
+ return error;
+
+ error = silead_ts_init(client);
+ if (error)
+ return error;
+
+ error = silead_ts_reset(client);
+ if (error)
+ return error;
+
+ error = silead_ts_load_fw(client);
+ if (error)
+ return error;
+
+ error = silead_ts_startup(client);
+ if (error)
+ return error;
+
+ status = silead_ts_get_status(client);
+ if (status != SILEAD_STATUS_OK) {
+ dev_err(&client->dev,
+ "Initialization error, status: 0x%X\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static irqreturn_t silead_ts_threaded_irq_handler(int irq, void *id)
+{
+ struct silead_ts_data *data = id;
+ struct i2c_client *client = data->client;
+
+ silead_ts_read_data(client);
+
+ return IRQ_HANDLED;
+}
+
+static void silead_ts_read_props(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+ const char *str;
+ int error;
+
+ error = device_property_read_u32(dev, "silead,max-fingers",
+ &data->max_fingers);
+ if (error) {
+ dev_dbg(dev, "Max fingers read error %d\n", error);
+ data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
+ }
+
+ error = device_property_read_string(dev, "touchscreen-fw-name", &str);
+ if (!error)
+ snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
+ else
+ dev_dbg(dev, "Firmware file name read error. Using default.");
+}
+
+#ifdef CONFIG_ACPI
+static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
+ const struct i2c_device_id *id)
+{
+ const struct acpi_device_id *acpi_id;
+ struct device *dev = &data->client->dev;
+ int i;
+
+ if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
+ acpi_id->id);
+
+ for (i = 0; i < strlen(data->fw_name); i++)
+ data->fw_name[i] = tolower(data->fw_name[i]);
+ } else {
+ snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
+ id->name);
+ }
+
+ return 0;
+}
+#else
+static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
+ const struct i2c_device_id *id)
+{
+ snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
+ return 0;
+}
+#endif
+
+static int silead_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct silead_ts_data *data;
+ struct device *dev = &client->dev;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK |
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ dev_err(dev, "I2C functionality check failed\n");
+ return -ENXIO;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ error = silead_ts_set_default_fw_name(data, id);
+ if (error)
+ return error;
+
+ silead_ts_read_props(client);
+
+ /* We must have the IRQ provided by DT or ACPI subsytem */
+ if (client->irq <= 0)
+ return -ENODEV;
+
+ /* Power GPIO pin */
+ data->gpio_power = gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpio_power)) {
+ if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
+ dev_err(dev, "Shutdown GPIO request failed\n");
+ return PTR_ERR(data->gpio_power);
+ }
+
+ error = silead_ts_setup(client);
+ if (error)
+ return error;
+
+ error = silead_ts_request_input_dev(data);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, silead_ts_threaded_irq_handler,
+ IRQF_ONESHOT, client->name, data);
+ if (error) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "IRQ request failed %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused silead_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ silead_ts_set_power(client, SILEAD_POWER_OFF);
+ return 0;
+}
+
+static int __maybe_unused silead_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int error, status;
+
+ silead_ts_set_power(client, SILEAD_POWER_ON);
+
+ error = silead_ts_reset(client);
+ if (error)
+ return error;
+
+ error = silead_ts_startup(client);
+ if (error)
+ return error;
+
+ status = silead_ts_get_status(client);
+ if (status != SILEAD_STATUS_OK) {
+ dev_err(dev, "Resume error, status: 0x%02x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(silead_ts_pm, silead_ts_suspend, silead_ts_resume);
+
+static const struct i2c_device_id silead_ts_id[] = {
+ { "gsl1680", 0 },
+ { "gsl1688", 0 },
+ { "gsl3670", 0 },
+ { "gsl3675", 0 },
+ { "gsl3692", 0 },
+ { "mssl1680", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, silead_ts_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id silead_ts_acpi_match[] = {
+ { "GSL1680", 0 },
+ { "GSL1688", 0 },
+ { "GSL3670", 0 },
+ { "GSL3675", 0 },
+ { "GSL3692", 0 },
+ { "MSSL1680", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
+#endif
+
+static struct i2c_driver silead_ts_driver = {
+ .probe = silead_ts_probe,
+ .id_table = silead_ts_id,
+ .driver = {
+ .name = SILEAD_TS_NAME,
+ .acpi_match_table = ACPI_PTR(silead_ts_acpi_match),
+ .pm = &silead_ts_pm,
+ },
+};
+module_i2c_driver(silead_ts_driver);
+
+MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>");
+MODULE_DESCRIPTION("Silead I2C touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/sis_i2c.c b/drivers/input/touchscreen/sis_i2c.c
new file mode 100644
index 000000000000..8d93f8c9a403
--- /dev/null
+++ b/drivers/input/touchscreen/sis_i2c.c
@@ -0,0 +1,413 @@
+/*
+ * Touch Screen driver for SiS 9200 family I2C Touch panels
+ *
+ * Copyright (C) 2015 SiS, Inc.
+ * Copyright (C) 2016 Nextfour Group
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/crc-itu-t.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define SIS_I2C_NAME "sis_i2c_ts"
+
+/*
+ * The I2C packet format:
+ * le16 byte count
+ * u8 Report ID
+ * <contact data - variable length>
+ * u8 Number of contacts
+ * le16 Scan Time (optional)
+ * le16 CRC
+ *
+ * One touch point information consists of 6+ bytes, the order is:
+ * u8 contact state
+ * u8 finger id
+ * le16 x axis
+ * le16 y axis
+ * u8 contact width (optional)
+ * u8 contact height (optional)
+ * u8 pressure (optional)
+ *
+ * Maximum amount of data transmitted in one shot is 64 bytes, if controller
+ * needs to report more contacts than fit in one packet it will send true
+ * number of contacts in first packet and 0 as number of contacts in second
+ * packet.
+ */
+
+#define SIS_MAX_PACKET_SIZE 64
+
+#define SIS_PKT_LEN_OFFSET 0
+#define SIS_PKT_REPORT_OFFSET 2 /* Report ID/type */
+#define SIS_PKT_CONTACT_OFFSET 3 /* First contact */
+
+#define SIS_SCAN_TIME_LEN 2
+
+/* Supported report types */
+#define SIS_ALL_IN_ONE_PACKAGE 0x10
+#define SIS_PKT_IS_TOUCH(x) (((x) & 0x0f) == 0x01)
+#define SIS_PKT_IS_HIDI2C(x) (((x) & 0x0f) == 0x06)
+
+/* Contact properties within report */
+#define SIS_PKT_HAS_AREA(x) ((x) & BIT(4))
+#define SIS_PKT_HAS_PRESSURE(x) ((x) & BIT(5))
+#define SIS_PKT_HAS_SCANTIME(x) ((x) & BIT(6))
+
+/* Contact size */
+#define SIS_BASE_LEN_PER_CONTACT 6
+#define SIS_AREA_LEN_PER_CONTACT 2
+#define SIS_PRESSURE_LEN_PER_CONTACT 1
+
+/* Offsets within contact data */
+#define SIS_CONTACT_STATUS_OFFSET 0
+#define SIS_CONTACT_ID_OFFSET 1 /* Contact ID */
+#define SIS_CONTACT_X_OFFSET 2
+#define SIS_CONTACT_Y_OFFSET 4
+#define SIS_CONTACT_WIDTH_OFFSET 6
+#define SIS_CONTACT_HEIGHT_OFFSET 7
+#define SIS_CONTACT_PRESSURE_OFFSET(id) (SIS_PKT_HAS_AREA(id) ? 8 : 6)
+
+/* Individual contact state */
+#define SIS_STATUS_UP 0x0
+#define SIS_STATUS_DOWN 0x3
+
+/* Touchscreen parameters */
+#define SIS_MAX_FINGERS 10
+#define SIS_MAX_X 4095
+#define SIS_MAX_Y 4095
+#define SIS_MAX_PRESSURE 255
+
+/* Resolution diagonal */
+#define SIS_AREA_LENGTH_LONGER 5792
+/*((SIS_MAX_X^2) + (SIS_MAX_Y^2))^0.5*/
+#define SIS_AREA_LENGTH_SHORT 5792
+#define SIS_AREA_UNIT (5792 / 32)
+
+struct sis_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ struct gpio_desc *attn_gpio;
+ struct gpio_desc *reset_gpio;
+
+ u8 packet[SIS_MAX_PACKET_SIZE];
+};
+
+static int sis_read_packet(struct i2c_client *client, u8 *buf,
+ unsigned int *num_contacts,
+ unsigned int *contact_size)
+{
+ int count_idx;
+ int ret;
+ u16 len;
+ u16 crc, pkg_crc;
+ u8 report_id;
+
+ ret = i2c_master_recv(client, buf, SIS_MAX_PACKET_SIZE);
+ if (ret <= 0)
+ return -EIO;
+
+ len = get_unaligned_le16(&buf[SIS_PKT_LEN_OFFSET]);
+ if (len > SIS_MAX_PACKET_SIZE) {
+ dev_err(&client->dev,
+ "%s: invalid packet length (%d vs %d)\n",
+ __func__, len, SIS_MAX_PACKET_SIZE);
+ return -E2BIG;
+ }
+
+ if (len < 10)
+ return -EINVAL;
+
+ report_id = buf[SIS_PKT_REPORT_OFFSET];
+ count_idx = len - 1;
+ *contact_size = SIS_BASE_LEN_PER_CONTACT;
+
+ if (report_id != SIS_ALL_IN_ONE_PACKAGE) {
+ if (SIS_PKT_IS_TOUCH(report_id)) {
+ /*
+ * Calculate CRC ignoring packet length
+ * in the beginning and CRC transmitted
+ * at the end of the packet.
+ */
+ crc = crc_itu_t(0, buf + 2, len - 2 - 2);
+ pkg_crc = get_unaligned_le16(&buf[len - 2]);
+
+ if (crc != pkg_crc) {
+ dev_err(&client->dev,
+ "%s: CRC Error (%d vs %d)\n",
+ __func__, crc, pkg_crc);
+ return -EINVAL;
+ }
+
+ count_idx -= 2;
+
+ } else if (!SIS_PKT_IS_HIDI2C(report_id)) {
+ dev_err(&client->dev,
+ "%s: invalid packet ID %#02x\n",
+ __func__, report_id);
+ return -EINVAL;
+ }
+
+ if (SIS_PKT_HAS_SCANTIME(report_id))
+ count_idx -= SIS_SCAN_TIME_LEN;
+
+ if (SIS_PKT_HAS_AREA(report_id))
+ *contact_size += SIS_AREA_LEN_PER_CONTACT;
+ if (SIS_PKT_HAS_PRESSURE(report_id))
+ *contact_size += SIS_PRESSURE_LEN_PER_CONTACT;
+ }
+
+ *num_contacts = buf[count_idx];
+ return 0;
+}
+
+static int sis_ts_report_contact(struct sis_ts_data *ts, const u8 *data, u8 id)
+{
+ struct input_dev *input = ts->input;
+ int slot;
+ u8 status = data[SIS_CONTACT_STATUS_OFFSET];
+ u8 pressure;
+ u8 height, width;
+ u16 x, y;
+
+ if (status != SIS_STATUS_DOWN && status != SIS_STATUS_UP) {
+ dev_err(&ts->client->dev, "Unexpected touch status: %#02x\n",
+ data[SIS_CONTACT_STATUS_OFFSET]);
+ return -EINVAL;
+ }
+
+ slot = input_mt_get_slot_by_key(input, data[SIS_CONTACT_ID_OFFSET]);
+ if (slot < 0)
+ return -ENOENT;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ status == SIS_STATUS_DOWN);
+
+ if (status == SIS_STATUS_DOWN) {
+ pressure = height = width = 1;
+ if (id != SIS_ALL_IN_ONE_PACKAGE) {
+ if (SIS_PKT_HAS_AREA(id)) {
+ width = data[SIS_CONTACT_WIDTH_OFFSET];
+ height = data[SIS_CONTACT_HEIGHT_OFFSET];
+ }
+
+ if (SIS_PKT_HAS_PRESSURE(id))
+ pressure =
+ data[SIS_CONTACT_PRESSURE_OFFSET(id)];
+ }
+
+ x = get_unaligned_le16(&data[SIS_CONTACT_X_OFFSET]);
+ y = get_unaligned_le16(&data[SIS_CONTACT_Y_OFFSET]);
+
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ width * SIS_AREA_UNIT);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ height * SIS_AREA_UNIT);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ }
+
+ return 0;
+}
+
+static void sis_ts_handle_packet(struct sis_ts_data *ts)
+{
+ const u8 *contact;
+ unsigned int num_to_report = 0;
+ unsigned int num_contacts;
+ unsigned int num_reported;
+ unsigned int contact_size;
+ int error;
+ u8 report_id;
+
+ do {
+ error = sis_read_packet(ts->client, ts->packet,
+ &num_contacts, &contact_size);
+ if (error)
+ break;
+
+ if (num_to_report == 0) {
+ num_to_report = num_contacts;
+ } else if (num_contacts != 0) {
+ dev_err(&ts->client->dev,
+ "%s: nonzero (%d) point count in tail packet\n",
+ __func__, num_contacts);
+ break;
+ }
+
+ report_id = ts->packet[SIS_PKT_REPORT_OFFSET];
+ contact = &ts->packet[SIS_PKT_CONTACT_OFFSET];
+ num_reported = 0;
+
+ while (num_to_report > 0) {
+ error = sis_ts_report_contact(ts, contact, report_id);
+ if (error)
+ break;
+
+ contact += contact_size;
+ num_to_report--;
+ num_reported++;
+
+ if (report_id != SIS_ALL_IN_ONE_PACKAGE &&
+ num_reported >= 5) {
+ /*
+ * The remainder of contacts is sent
+ * in the 2nd packet.
+ */
+ break;
+ }
+ }
+ } while (num_to_report > 0);
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t sis_ts_irq_handler(int irq, void *dev_id)
+{
+ struct sis_ts_data *ts = dev_id;
+
+ do {
+ sis_ts_handle_packet(ts);
+ } while (ts->attn_gpio && gpiod_get_value_cansleep(ts->attn_gpio));
+
+ return IRQ_HANDLED;
+}
+
+static void sis_ts_reset(struct sis_ts_data *ts)
+{
+ if (ts->reset_gpio) {
+ /* Get out of reset */
+ usleep_range(1000, 2000);
+ gpiod_set_value(ts->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ts->reset_gpio, 0);
+ msleep(100);
+ }
+}
+
+static int sis_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sis_ts_data *ts;
+ struct input_dev *input;
+ int error;
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ ts->attn_gpio = devm_gpiod_get_optional(&client->dev,
+ "attn", GPIOD_IN);
+ if (IS_ERR(ts->attn_gpio)) {
+ error = PTR_ERR(ts->attn_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get attention GPIO: %d\n", error);
+ return error;
+ }
+
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get reset GPIO: %d\n", error);
+ return error;
+ }
+
+ sis_ts_reset(ts);
+
+ ts->input = input = devm_input_allocate_device(&client->dev);
+ if (!input) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input->name = "SiS Touchscreen";
+ input->id.bustype = BUS_I2C;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, SIS_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, SIS_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, SIS_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ 0, SIS_AREA_LENGTH_LONGER, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ 0, SIS_AREA_LENGTH_SHORT, 0, 0);
+
+ error = input_mt_init_slots(input, SIS_MAX_FINGERS, INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, sis_ts_irq_handler,
+ IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sis_ts_dt_ids[] = {
+ { .compatible = "sis,9200-ts" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sis_ts_dt_ids);
+#endif
+
+static const struct i2c_device_id sis_ts_id[] = {
+ { SIS_I2C_NAME, 0 },
+ { "9200-ts", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sis_ts_id);
+
+static struct i2c_driver sis_ts_driver = {
+ .driver = {
+ .name = SIS_I2C_NAME,
+ .of_match_table = of_match_ptr(sis_ts_dt_ids),
+ },
+ .probe = sis_ts_probe,
+ .id_table = sis_ts_id,
+};
+module_i2c_driver(sis_ts_driver);
+
+MODULE_DESCRIPTION("SiS 9200 Family Touchscreen Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mika Penttilä <mika.penttila@nextfour.com>");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ad0860383cb3..d432ca828472 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -89,8 +89,8 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
- depends on BROKEN
select IOMMU_API
+ select IOMMU_IO_PGTABLE_ARMV7S
help
Support for the IOMMUs found on certain Qualcomm SOCs.
These IOMMUs allow virtualization of the address space used by most
@@ -111,6 +111,7 @@ config AMD_IOMMU
select PCI_PRI
select PCI_PASID
select IOMMU_API
+ select IOMMU_IOVA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
@@ -343,4 +344,22 @@ config MTK_IOMMU
If unsure, say N here.
+config MTK_IOMMU_V1
+ bool "MTK IOMMU Version 1 (M4U gen1) Support"
+ depends on ARM
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select MEMORY
+ select MTK_SMI
+ select COMMON_CLK_MT2701_MMSYS
+ select COMMON_CLK_MT2701_IMGSYS
+ select COMMON_CLK_MT2701_VDECSYS
+ help
+ Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
+ Multimedia Memory Managememt Unit. This option enables remapping of
+ DMA memory accesses for the multimedia subsystem.
+
+ if unsure, say N here.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6edb31bf8c6..195f7b997d8e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
@@ -18,6 +18,7 @@ obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
+obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 634f636393d5..96de97a46079 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -38,6 +39,7 @@
#include <linux/dma-contiguous.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
+#include <linux/iova.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -56,6 +58,17 @@
#define LOOP_TIMEOUT 100000
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
+
+/* Reserved IOVA ranges */
+#define MSI_RANGE_START (0xfee00000)
+#define MSI_RANGE_END (0xfeefffff)
+#define HT_RANGE_START (0xfd00000000ULL)
+#define HT_RANGE_END (0xffffffffffULL)
+
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
@@ -76,6 +89,25 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
+#define FLUSH_QUEUE_SIZE 256
+
+struct flush_queue_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ struct dma_ops_domain *dma_dom;
+};
+
+struct flush_queue {
+ spinlock_t lock;
+ unsigned next;
+ struct flush_queue_entry *entries;
+};
+
+DEFINE_PER_CPU(struct flush_queue, flush_queue);
+
+static atomic_t queue_timer_on;
+static struct timer_list queue_timer;
+
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
@@ -121,44 +153,19 @@ static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
/*
- * For dynamic growth the aperture size is split into ranges of 128MB of
- * DMA address space each. This struct represents one such range.
- */
-struct aperture_range {
-
- spinlock_t bitmap_lock;
-
- /* address allocation bitmap */
- unsigned long *bitmap;
- unsigned long offset;
- unsigned long next_bit;
-
- /*
- * Array of PTE pages for the aperture. In this array we save all the
- * leaf pages of the domain page table used for the aperture. This way
- * we don't need to walk the page table to find a specific PTE. We can
- * just calculate its address in constant time.
- */
- u64 *pte_pages[64];
-};
-
-/*
* Data container for a dma_ops specific protection domain
*/
struct dma_ops_domain {
/* generic protection domain information */
struct protection_domain domain;
- /* size of the aperture for the mappings */
- unsigned long aperture_size;
-
- /* aperture index we start searching for free addresses */
- u32 __percpu *next_index;
-
- /* address space relevant data */
- struct aperture_range *aperture[APERTURE_MAX_RANGES];
+ /* IOVA RB-Tree */
+ struct iova_domain iovad;
};
+static struct iova_domain reserved_iova_ranges;
+static struct lock_class_key reserved_rbtree_key;
+
/****************************************************************************
*
* Helper functions
@@ -224,6 +231,12 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
+{
+ BUG_ON(domain->flags != PD_DMA_OPS_MASK);
+ return container_of(domain, struct dma_ops_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -391,43 +404,6 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
}
/*
- * This function actually applies the mapping to the page table of the
- * dma_ops domain.
- */
-static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
- struct unity_map_entry *e)
-{
- u64 addr;
-
- for (addr = e->address_start; addr < e->address_end;
- addr += PAGE_SIZE) {
- if (addr < dma_dom->aperture_size)
- __set_bit(addr >> PAGE_SHIFT,
- dma_dom->aperture[0]->bitmap);
- }
-}
-
-/*
- * Inits the unity mappings required for a specific device
- */
-static void init_unity_mappings_for_device(struct device *dev,
- struct dma_ops_domain *dma_dom)
-{
- struct unity_map_entry *e;
- int devid;
-
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
- list_for_each_entry(e, &amd_iommu_unity_map, list) {
- if (!(devid >= e->devid_start && devid <= e->devid_end))
- continue;
- alloc_unity_mapping(dma_dom, e);
- }
-}
-
-/*
* This function checks if the driver got a valid device from the caller to
* avoid dereferencing invalid pointers.
*/
@@ -454,22 +430,12 @@ static bool check_device(struct device *dev)
static void init_iommu_group(struct device *dev)
{
- struct dma_ops_domain *dma_domain;
- struct iommu_domain *domain;
struct iommu_group *group;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return;
- domain = iommu_group_default_domain(group);
- if (!domain)
- goto out;
-
- dma_domain = to_pdomain(domain)->priv;
-
- init_unity_mappings_for_device(dev, dma_domain);
-out:
iommu_group_put(group);
}
@@ -1220,7 +1186,7 @@ static void domain_flush_complete(struct protection_domain *domain)
int i;
for (i = 0; i < amd_iommus_present; ++i) {
- if (!domain->dev_iommu[i])
+ if (domain && !domain->dev_iommu[i])
continue;
/*
@@ -1397,8 +1363,9 @@ static u64 *fetch_pte(struct protection_domain *domain,
static int iommu_map_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long phys_addr,
+ unsigned long page_size,
int prot,
- unsigned long page_size)
+ gfp_t gfp)
{
u64 __pte, *pte;
int i, count;
@@ -1410,7 +1377,7 @@ static int iommu_map_page(struct protection_domain *dom,
return -EINVAL;
count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
if (!pte)
return -ENOMEM;
@@ -1474,320 +1441,37 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
* The next functions belong to the address allocator for the dma_ops
- * interface functions. They work like the allocators in the other IOMMU
- * drivers. Its basically a bitmap which marks the allocated pages in
- * the aperture. Maybe it could be enhanced in the future to a more
- * efficient allocator.
+ * interface functions.
*
****************************************************************************/
-/*
- * The address allocator core functions.
- *
- * called with domain->lock held
- */
-/*
- * Used to reserve address ranges in the aperture (e.g. for exclusion
- * ranges.
- */
-static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
- unsigned long start_page,
- unsigned int pages)
+static unsigned long dma_ops_alloc_iova(struct device *dev,
+ struct dma_ops_domain *dma_dom,
+ unsigned int pages, u64 dma_mask)
{
- unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
-
- if (start_page + pages > last_page)
- pages = last_page - start_page;
-
- for (i = start_page; i < start_page + pages; ++i) {
- int index = i / APERTURE_RANGE_PAGES;
- int page = i % APERTURE_RANGE_PAGES;
- __set_bit(page, dom->aperture[index]->bitmap);
- }
-}
+ unsigned long pfn = 0;
-/*
- * This function is used to add a new aperture range to an existing
- * aperture in case of dma_ops domain allocation or address allocation
- * failure.
- */
-static int alloc_new_range(struct dma_ops_domain *dma_dom,
- bool populate, gfp_t gfp)
-{
- int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
- unsigned long i, old_size, pte_pgsize;
- struct aperture_range *range;
- struct amd_iommu *iommu;
- unsigned long flags;
+ pages = __roundup_pow_of_two(pages);
-#ifdef CONFIG_IOMMU_STRESS
- populate = false;
-#endif
+ if (dma_mask > DMA_BIT_MASK(32))
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages,
+ IOVA_PFN(DMA_BIT_MASK(32)));
- if (index >= APERTURE_MAX_RANGES)
- return -ENOMEM;
-
- range = kzalloc(sizeof(struct aperture_range), gfp);
- if (!range)
- return -ENOMEM;
-
- range->bitmap = (void *)get_zeroed_page(gfp);
- if (!range->bitmap)
- goto out_free;
-
- range->offset = dma_dom->aperture_size;
-
- spin_lock_init(&range->bitmap_lock);
-
- if (populate) {
- unsigned long address = dma_dom->aperture_size;
- int i, num_ptes = APERTURE_RANGE_PAGES / 512;
- u64 *pte, *pte_page;
-
- for (i = 0; i < num_ptes; ++i) {
- pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
- &pte_page, gfp);
- if (!pte)
- goto out_free;
-
- range->pte_pages[i] = pte_page;
-
- address += APERTURE_RANGE_SIZE / 64;
- }
- }
+ if (!pfn)
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
-
- /* First take the bitmap_lock and then publish the range */
- spin_lock(&range->bitmap_lock);
-
- old_size = dma_dom->aperture_size;
- dma_dom->aperture[index] = range;
- dma_dom->aperture_size += APERTURE_RANGE_SIZE;
-
- /* Reserve address range used for MSI messages */
- if (old_size < MSI_ADDR_BASE_LO &&
- dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
- unsigned long spage;
- int pages;
-
- pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
- spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
-
- dma_ops_reserve_addresses(dma_dom, spage, pages);
- }
-
- /* Initialize the exclusion range if necessary */
- for_each_iommu(iommu) {
- if (iommu->exclusion_start &&
- iommu->exclusion_start >= dma_dom->aperture[index]->offset
- && iommu->exclusion_start < dma_dom->aperture_size) {
- unsigned long startpage;
- int pages = iommu_num_pages(iommu->exclusion_start,
- iommu->exclusion_length,
- PAGE_SIZE);
- startpage = iommu->exclusion_start >> PAGE_SHIFT;
- dma_ops_reserve_addresses(dma_dom, startpage, pages);
- }
- }
-
- /*
- * Check for areas already mapped as present in the new aperture
- * range and mark those pages as reserved in the allocator. Such
- * mappings may already exist as a result of requested unity
- * mappings for devices.
- */
- for (i = dma_dom->aperture[index]->offset;
- i < dma_dom->aperture_size;
- i += pte_pgsize) {
- u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- continue;
-
- dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
- pte_pgsize >> 12);
- }
-
- update_domain(&dma_dom->domain);
-
- spin_unlock(&range->bitmap_lock);
-
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- return 0;
-
-out_free:
- update_domain(&dma_dom->domain);
-
- free_page((unsigned long)range->bitmap);
-
- kfree(range);
-
- return -ENOMEM;
+ return (pfn << PAGE_SHIFT);
}
-static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
- struct aperture_range *range,
- unsigned long pages,
- unsigned long dma_mask,
- unsigned long boundary_size,
- unsigned long align_mask,
- bool trylock)
+static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
+ unsigned long address,
+ unsigned int pages)
{
- unsigned long offset, limit, flags;
- dma_addr_t address;
- bool flush = false;
-
- offset = range->offset >> PAGE_SHIFT;
- limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
- dma_mask >> PAGE_SHIFT);
-
- if (trylock) {
- if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
- return -1;
- } else {
- spin_lock_irqsave(&range->bitmap_lock, flags);
- }
-
- address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
- pages, offset, boundary_size, align_mask);
- if (address == -1) {
- /* Nothing found, retry one time */
- address = iommu_area_alloc(range->bitmap, limit,
- 0, pages, offset, boundary_size,
- align_mask);
- flush = true;
- }
-
- if (address != -1)
- range->next_bit = address + pages;
-
- spin_unlock_irqrestore(&range->bitmap_lock, flags);
-
- if (flush) {
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- }
-
- return address;
-}
-
-static unsigned long dma_ops_area_alloc(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask)
-{
- unsigned long boundary_size, mask;
- unsigned long address = -1;
- bool first = true;
- u32 start, i;
-
- preempt_disable();
-
- mask = dma_get_seg_boundary(dev);
-
-again:
- start = this_cpu_read(*dom->next_index);
-
- /* Sanity check - is it really necessary? */
- if (unlikely(start > APERTURE_MAX_RANGES)) {
- start = 0;
- this_cpu_write(*dom->next_index, 0);
- }
-
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
- struct aperture_range *range;
- int index;
-
- index = (start + i) % APERTURE_MAX_RANGES;
-
- range = dom->aperture[index];
-
- if (!range || range->offset >= dma_mask)
- continue;
-
- address = dma_ops_aperture_alloc(dom, range, pages,
- dma_mask, boundary_size,
- align_mask, first);
- if (address != -1) {
- address = range->offset + (address << PAGE_SHIFT);
- this_cpu_write(*dom->next_index, index);
- break;
- }
- }
-
- if (address == -1 && first) {
- first = false;
- goto again;
- }
-
- preempt_enable();
-
- return address;
-}
-
-static unsigned long dma_ops_alloc_addresses(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask)
-{
- unsigned long address = -1;
-
- while (address == -1) {
- address = dma_ops_area_alloc(dev, dom, pages,
- align_mask, dma_mask);
-
- if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
- break;
- }
-
- if (unlikely(address == -1))
- address = DMA_ERROR_CODE;
-
- WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
-
- return address;
-}
-
-/*
- * The address free function.
- *
- * called with domain->lock held
- */
-static void dma_ops_free_addresses(struct dma_ops_domain *dom,
- unsigned long address,
- unsigned int pages)
-{
- unsigned i = address >> APERTURE_RANGE_SHIFT;
- struct aperture_range *range = dom->aperture[i];
- unsigned long flags;
-
- BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
-
-#ifdef CONFIG_IOMMU_STRESS
- if (i < 4)
- return;
-#endif
-
- if (amd_iommu_unmap_flush) {
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- }
-
- address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
-
- spin_lock_irqsave(&range->bitmap_lock, flags);
- if (address + pages > range->next_bit)
- range->next_bit = address + pages;
- bitmap_clear(range->bitmap, address, pages);
- spin_unlock_irqrestore(&range->bitmap_lock, flags);
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+ free_iova_fast(&dma_dom->iovad, address, pages);
}
/****************************************************************************
@@ -1961,44 +1645,18 @@ static void free_gcr3_table(struct protection_domain *domain)
*/
static void dma_ops_domain_free(struct dma_ops_domain *dom)
{
- int i;
-
if (!dom)
return;
- free_percpu(dom->next_index);
-
del_domain_from_list(&dom->domain);
- free_pagetable(&dom->domain);
+ put_iova_domain(&dom->iovad);
- for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
- if (!dom->aperture[i])
- continue;
- free_page((unsigned long)dom->aperture[i]->bitmap);
- kfree(dom->aperture[i]);
- }
+ free_pagetable(&dom->domain);
kfree(dom);
}
-static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
- int max_apertures)
-{
- int ret, i, apertures;
-
- apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
- ret = 0;
-
- for (i = apertures; i < max_apertures; ++i) {
- ret = alloc_new_range(dma_dom, false, GFP_KERNEL);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/*
* Allocates a new protection domain usable for the dma_ops functions.
* It also initializes the page table and the address allocator data
@@ -2007,7 +1665,6 @@ static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
static struct dma_ops_domain *dma_ops_domain_alloc(void)
{
struct dma_ops_domain *dma_dom;
- int cpu;
dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
if (!dma_dom)
@@ -2016,30 +1673,19 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (protection_domain_init(&dma_dom->domain))
goto free_dma_dom;
- dma_dom->next_index = alloc_percpu(u32);
- if (!dma_dom->next_index)
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
+ dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
dma_dom->domain.flags = PD_DMA_OPS_MASK;
- dma_dom->domain.priv = dma_dom;
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
- add_domain_to_list(&dma_dom->domain);
+ init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
+ IOVA_START_PFN, DMA_32BIT_PFN);
- if (alloc_new_range(dma_dom, true, GFP_KERNEL))
- goto free_dma_dom;
-
- /*
- * mark the first page as allocated so we never return 0 as
- * a valid dma-address. So we can use 0 as error value
- */
- dma_dom->aperture[0]->bitmap[0] = 1;
+ /* Initialize reserved ranges */
+ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(dma_dom->next_index, cpu) = 0;
+ add_domain_to_list(&dma_dom->domain);
return dma_dom;
@@ -2482,6 +2128,92 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
*
*****************************************************************************/
+static void __queue_flush(struct flush_queue *queue)
+{
+ struct protection_domain *domain;
+ unsigned long flags;
+ int idx;
+
+ /* First flush TLB of all known domains */
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
+ list_for_each_entry(domain, &amd_iommu_pd_list, list)
+ domain_flush_tlb(domain);
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
+
+ /* Wait until flushes have completed */
+ domain_flush_complete(NULL);
+
+ for (idx = 0; idx < queue->next; ++idx) {
+ struct flush_queue_entry *entry;
+
+ entry = queue->entries + idx;
+
+ free_iova_fast(&entry->dma_dom->iovad,
+ entry->iova_pfn,
+ entry->pages);
+
+ /* Not really necessary, just to make sure we catch any bugs */
+ entry->dma_dom = NULL;
+ }
+
+ queue->next = 0;
+}
+
+static void queue_flush_all(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue;
+ unsigned long flags;
+
+ queue = per_cpu_ptr(&flush_queue, cpu);
+ spin_lock_irqsave(&queue->lock, flags);
+ if (queue->next > 0)
+ __queue_flush(queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
+ }
+}
+
+static void queue_flush_timeout(unsigned long unsused)
+{
+ atomic_set(&queue_timer_on, 0);
+ queue_flush_all();
+}
+
+static void queue_add(struct dma_ops_domain *dma_dom,
+ unsigned long address, unsigned long pages)
+{
+ struct flush_queue_entry *entry;
+ struct flush_queue *queue;
+ unsigned long flags;
+ int idx;
+
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+
+ queue = get_cpu_ptr(&flush_queue);
+ spin_lock_irqsave(&queue->lock, flags);
+
+ if (queue->next == FLUSH_QUEUE_SIZE)
+ __queue_flush(queue);
+
+ idx = queue->next++;
+ entry = queue->entries + idx;
+
+ entry->iova_pfn = address;
+ entry->pages = pages;
+ entry->dma_dom = dma_dom;
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
+ mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
+
+ put_cpu_ptr(&flush_queue);
+}
+
+
/*
* In the dma_ops path we only have the struct device. This function
* finds the corresponding IOMMU, the protection domain and the
@@ -2492,16 +2224,11 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
static struct protection_domain *get_domain(struct device *dev)
{
struct protection_domain *domain;
- struct iommu_domain *io_domain;
if (!check_device(dev))
return ERR_PTR(-EINVAL);
- io_domain = iommu_get_domain_for_dev(dev);
- if (!io_domain)
- return NULL;
-
- domain = to_pdomain(io_domain);
+ domain = get_dev_data(dev)->domain;
if (!dma_ops_domain(domain))
return ERR_PTR(-EBUSY);
@@ -2512,8 +2239,15 @@ static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
- list_for_each_entry(dev_data, &domain->dev_list, list)
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+
+ if (dev_data->devid == dev_data->alias)
+ continue;
+
+ /* There is an alias, update device table entry for it */
+ set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
+ }
}
static void update_domain(struct protection_domain *domain)
@@ -2529,94 +2263,17 @@ static void update_domain(struct protection_domain *domain)
domain->updated = false;
}
-/*
- * This function fetches the PTE for a given address in the aperture
- */
-static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
- unsigned long address)
-{
- struct aperture_range *aperture;
- u64 *pte, *pte_page;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return NULL;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte) {
- pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
- GFP_ATOMIC);
- aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
- } else
- pte += PM_LEVEL_INDEX(0, address);
-
- update_domain(&dom->domain);
-
- return pte;
-}
-
-/*
- * This is the generic map function. It maps one 4kb page at paddr to
- * the given address in the DMA address space for the domain.
- */
-static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
- unsigned long address,
- phys_addr_t paddr,
- int direction)
+static int dir2prot(enum dma_data_direction direction)
{
- u64 *pte, __pte;
-
- WARN_ON(address > dom->aperture_size);
-
- paddr &= PAGE_MASK;
-
- pte = dma_ops_get_pte(dom, address);
- if (!pte)
- return DMA_ERROR_CODE;
-
- __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
-
if (direction == DMA_TO_DEVICE)
- __pte |= IOMMU_PTE_IR;
+ return IOMMU_PROT_IR;
else if (direction == DMA_FROM_DEVICE)
- __pte |= IOMMU_PTE_IW;
+ return IOMMU_PROT_IW;
else if (direction == DMA_BIDIRECTIONAL)
- __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
-
- WARN_ON_ONCE(*pte);
-
- *pte = __pte;
-
- return (dma_addr_t)address;
-}
-
-/*
- * The generic unmapping function for on page in the DMA address space.
- */
-static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
- unsigned long address)
-{
- struct aperture_range *aperture;
- u64 *pte;
-
- if (address >= dom->aperture_size)
- return;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte)
- return;
-
- pte += PM_LEVEL_INDEX(0, address);
-
- WARN_ON_ONCE(!*pte);
-
- *pte = 0ULL;
+ return IOMMU_PROT_IW | IOMMU_PROT_IR;
+ else
+ return 0;
}
-
/*
* This function contains common code for mapping of a physically
* contiguous memory region into DMA address space. It is used by all
@@ -2627,32 +2284,29 @@ static dma_addr_t __map_single(struct device *dev,
struct dma_ops_domain *dma_dom,
phys_addr_t paddr,
size_t size,
- int dir,
- bool align,
+ enum dma_data_direction direction,
u64 dma_mask)
{
dma_addr_t offset = paddr & ~PAGE_MASK;
dma_addr_t address, start, ret;
unsigned int pages;
- unsigned long align_mask = 0;
+ int prot = 0;
int i;
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
- if (align)
- align_mask = (1UL << get_order(size)) - 1;
-
- address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
- dma_mask);
-
+ address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
if (address == DMA_ERROR_CODE)
goto out;
+ prot = dir2prot(direction);
+
start = address;
for (i = 0; i < pages; ++i) {
- ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
- if (ret == DMA_ERROR_CODE)
+ ret = iommu_map_page(&dma_dom->domain, start, paddr,
+ PAGE_SIZE, prot, GFP_ATOMIC);
+ if (ret)
goto out_unmap;
paddr += PAGE_SIZE;
@@ -2672,10 +2326,13 @@ out_unmap:
for (--i; i >= 0; --i) {
start -= PAGE_SIZE;
- dma_ops_domain_unmap(dma_dom, start);
+ iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
}
- dma_ops_free_addresses(dma_dom, address, pages);
+ domain_flush_tlb(&dma_dom->domain);
+ domain_flush_complete(&dma_dom->domain);
+
+ dma_ops_free_iova(dma_dom, address, pages);
return DMA_ERROR_CODE;
}
@@ -2693,21 +2350,23 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_addr_t i, start;
unsigned int pages;
- if ((dma_addr == DMA_ERROR_CODE) ||
- (dma_addr + size > dma_dom->aperture_size))
- return;
-
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
for (i = 0; i < pages; ++i) {
- dma_ops_domain_unmap(dma_dom, start);
+ iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
start += PAGE_SIZE;
}
- dma_ops_free_addresses(dma_dom, dma_addr, pages);
+ if (amd_iommu_unmap_flush) {
+ dma_ops_free_iova(dma_dom, dma_addr, pages);
+ domain_flush_tlb(&dma_dom->domain);
+ domain_flush_complete(&dma_dom->domain);
+ } else {
+ queue_add(dma_dom, dma_addr, pages);
+ }
}
/*
@@ -2716,10 +2375,11 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
u64 dma_mask;
domain = get_domain(dev);
@@ -2729,24 +2389,53 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
dma_mask = *dev->dma_mask;
+ dma_dom = to_dma_ops_domain(domain);
- return __map_single(dev, domain->priv, paddr, size, dir, false,
- dma_mask);
+ return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
}
/*
* The exported unmap_single function for dma_ops.
*/
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
- __unmap_single(domain->priv, dma_addr, size, dir);
+ dma_dom = to_dma_ops_domain(domain);
+
+ __unmap_single(dma_dom, dma_addr, size, dir);
+}
+
+static int sg_num_pages(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems)
+{
+ unsigned long mask, boundary_size;
+ struct scatterlist *s;
+ int i, npages = 0;
+
+ mask = dma_get_seg_boundary(dev);
+ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
+ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
+
+ for_each_sg(sglist, s, nelems, i) {
+ int p, n;
+
+ s->dma_address = npages << PAGE_SHIFT;
+ p = npages % boundary_size;
+ n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
+ if (p + n > boundary_size)
+ npages += boundary_size - p;
+ npages += n;
+ }
+
+ return npages;
}
/*
@@ -2754,46 +2443,79 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
* lists).
*/
static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ int nelems, enum dma_data_direction direction,
+ unsigned long attrs)
{
+ int mapped_pages = 0, npages = 0, prot = 0, i;
struct protection_domain *domain;
- int i;
+ struct dma_ops_domain *dma_dom;
struct scatterlist *s;
- phys_addr_t paddr;
- int mapped_elems = 0;
+ unsigned long address;
u64 dma_mask;
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
+ dma_dom = to_dma_ops_domain(domain);
dma_mask = *dev->dma_mask;
+ npages = sg_num_pages(dev, sglist, nelems);
+
+ address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
+ if (address == DMA_ERROR_CODE)
+ goto out_err;
+
+ prot = dir2prot(direction);
+
+ /* Map all sg entries */
for_each_sg(sglist, s, nelems, i) {
- paddr = sg_phys(s);
+ int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- s->dma_address = __map_single(dev, domain->priv,
- paddr, s->length, dir, false,
- dma_mask);
+ for (j = 0; j < pages; ++j) {
+ unsigned long bus_addr, phys_addr;
+ int ret;
- if (s->dma_address) {
- s->dma_length = s->length;
- mapped_elems++;
- } else
- goto unmap;
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+ ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ if (ret)
+ goto out_unmap;
+
+ mapped_pages += 1;
+ }
}
- return mapped_elems;
+ /* Everything is mapped - write the right values into s->dma_address */
+ for_each_sg(sglist, s, nelems, i) {
+ s->dma_address += address + s->offset;
+ s->dma_length = s->length;
+ }
+
+ return nelems;
+
+out_unmap:
+ pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
+ dev_name(dev), npages);
-unmap:
- for_each_sg(sglist, s, mapped_elems, i) {
- if (s->dma_address)
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
+ for_each_sg(sglist, s, nelems, i) {
+ int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
+
+ for (j = 0; j < pages; ++j) {
+ unsigned long bus_addr;
+
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
+
+ if (--mapped_pages)
+ goto out_free_iova;
+ }
}
+out_free_iova:
+ free_iova_fast(&dma_dom->iovad, address, npages);
+
+out_err:
return 0;
}
@@ -2803,21 +2525,22 @@ unmap:
*/
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
- struct scatterlist *s;
- int i;
+ struct dma_ops_domain *dma_dom;
+ unsigned long startaddr;
+ int npages = 2;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
- for_each_sg(sglist, s, nelems, i) {
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
- }
+ startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ dma_dom = to_dma_ops_domain(domain);
+ npages = sg_num_pages(dev, sglist, nelems);
+
+ __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
}
/*
@@ -2825,10 +2548,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
*/
static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
struct page *page;
domain = get_domain(dev);
@@ -2839,6 +2563,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
} else if (IS_ERR(domain))
return NULL;
+ dma_dom = to_dma_ops_domain(domain);
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -2858,8 +2583,8 @@ static void *alloc_coherent(struct device *dev, size_t size,
if (!dma_mask)
dma_mask = *dev->dma_mask;
- *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, true, dma_mask);
+ *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
+ size, DMA_BIDIRECTIONAL, dma_mask);
if (*dma_addr == DMA_ERROR_CODE)
goto out_free;
@@ -2879,9 +2604,10 @@ out_free:
*/
static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
struct page *page;
page = virt_to_page(virt_addr);
@@ -2891,7 +2617,9 @@ static void free_coherent(struct device *dev, size_t size,
if (IS_ERR(domain))
goto free_mem;
- __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
+ dma_dom = to_dma_ops_domain(domain);
+
+ __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
free_mem:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
@@ -2907,48 +2635,92 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
return check_device(dev);
}
-static int set_dma_mask(struct device *dev, u64 mask)
+static struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc = alloc_coherent,
+ .free = free_coherent,
+ .map_page = map_page,
+ .unmap_page = unmap_page,
+ .map_sg = map_sg,
+ .unmap_sg = unmap_sg,
+ .dma_supported = amd_iommu_dma_supported,
+};
+
+static int init_reserved_iova_ranges(void)
{
- struct protection_domain *domain;
- int max_apertures = 1;
+ struct pci_dev *pdev = NULL;
+ struct iova *val;
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return PTR_ERR(domain);
+ init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
+ IOVA_START_PFN, DMA_32BIT_PFN);
- if (mask == DMA_BIT_MASK(64))
- max_apertures = 8;
- else if (mask > DMA_BIT_MASK(32))
- max_apertures = 4;
+ lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
+ &reserved_rbtree_key);
+
+ /* MSI memory range */
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
+ if (!val) {
+ pr_err("Reserving MSI range failed\n");
+ return -ENOMEM;
+ }
+
+ /* HT memory range */
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
+ if (!val) {
+ pr_err("Reserving HT range failed\n");
+ return -ENOMEM;
+ }
/*
- * To prevent lock contention it doesn't make sense to allocate more
- * apertures than online cpus
+ * Memory used for PCI resources
+ * FIXME: Check whether we can reserve the PCI-hole completly
*/
- if (max_apertures > num_online_cpus())
- max_apertures = num_online_cpus();
+ for_each_pci_dev(pdev) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
+ struct resource *r = &pdev->resource[i];
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
- if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures))
- dev_err(dev, "Can't allocate %d iommu apertures\n",
- max_apertures);
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(r->start),
+ IOVA_PFN(r->end));
+ if (!val) {
+ pr_err("Reserve pci-resource range failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
return 0;
}
-static struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .set_dma_mask = set_dma_mask,
-};
-
int __init amd_iommu_init_api(void)
{
- int err = 0;
+ int ret, cpu, err = 0;
+
+ ret = iova_cache_get();
+ if (ret)
+ return ret;
+
+ ret = init_reserved_iova_ranges();
+ if (ret)
+ return ret;
+
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+ queue->entries = kzalloc(FLUSH_QUEUE_SIZE *
+ sizeof(*queue->entries),
+ GFP_KERNEL);
+ if (!queue->entries)
+ goto out_put_iova;
+
+ spin_lock_init(&queue->lock);
+ }
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
@@ -2958,11 +2730,26 @@ int __init amd_iommu_init_api(void)
if (err)
return err;
#endif
+ err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
+ if (err)
+ return err;
return 0;
+
+out_put_iova:
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+ kfree(queue->entries);
+ }
+
+ return -ENOMEM;
}
int __init amd_iommu_init_dma_ops(void)
{
+ setup_timer(&queue_timer, queue_flush_timeout, 0);
+ atomic_set(&queue_timer_on, 0);
+
swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
@@ -2981,6 +2768,7 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
return 0;
+
}
/*****************************************************************************
@@ -3103,9 +2891,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
-
- if (!dom)
- return;
+ struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -3114,13 +2900,31 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- if (domain->mode != PAGE_MODE_NONE)
- free_pagetable(domain);
+ if (!dom)
+ return;
+
+ switch (dom->type) {
+ case IOMMU_DOMAIN_DMA:
+ /*
+ * First make sure the domain is no longer referenced from the
+ * flush queue
+ */
+ queue_flush_all();
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ /* Now release the domain */
+ dma_dom = to_dma_ops_domain(domain);
+ dma_ops_domain_free(dma_dom);
+ break;
+ default:
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
- protection_domain_free(domain);
+ protection_domain_free(domain);
+ break;
+ }
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
@@ -3190,7 +2994,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IW;
mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, prot, page_size);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock);
return ret;
@@ -3292,6 +3096,19 @@ static void amd_iommu_put_dm_regions(struct device *dev,
kfree(entry);
}
+static void amd_iommu_apply_dm_region(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_dm_region *region)
+{
+ struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
+ unsigned long start, end;
+
+ start = IOVA_PFN(region->start);
+ end = IOVA_PFN(region->start + region->length);
+
+ WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
+}
+
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -3307,6 +3124,7 @@ static const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group,
.get_dm_regions = amd_iommu_get_dm_regions,
.put_dm_regions = amd_iommu_put_dm_regions,
+ .apply_dm_region = amd_iommu_apply_dm_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 590956ac704e..caf5e3822715 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -421,7 +421,6 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
};
/*
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index fbdaf81ae925..594849a3a9be 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -960,7 +960,7 @@ static int __init amd_iommu_v2_init(void)
spin_lock_init(&state_lock);
ret = -ENOMEM;
- iommu_wq = create_workqueue("amd_iommu_v2");
+ iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
if (iommu_wq == NULL)
goto out;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5f6b3bcab078..ce801170d5f2 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2687,6 +2687,8 @@ static int __init arm_smmu_init(void)
if (ret)
return ret;
+ pci_request_acs();
+
return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9345a3fcb706..4f49fe29f202 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -987,8 +987,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
- "arm-smmu-context-fault", domain);
+ ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+ IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -1028,7 +1028,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- free_irq(irq, domain);
+ devm_free_irq(smmu->dev, irq, domain);
}
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@@ -1986,15 +1986,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
- err = request_irq(smmu->irqs[i],
- arm_smmu_global_fault,
- IRQF_SHARED,
- "arm-smmu global fault",
- smmu);
+ err = devm_request_irq(smmu->dev, smmu->irqs[i],
+ arm_smmu_global_fault,
+ IRQF_SHARED,
+ "arm-smmu global fault",
+ smmu);
if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]);
- goto out_free_irqs;
+ goto out_put_masters;
}
}
@@ -2006,10 +2006,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
arm_smmu_device_reset(smmu);
return 0;
-out_free_irqs:
- while (i--)
- free_irq(smmu->irqs[i], smmu);
-
out_put_masters:
for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
struct arm_smmu_master *master
@@ -2050,7 +2046,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
dev_err(dev, "removing device with active domains!\n");
for (i = 0; i < smmu->num_global_irqs; ++i)
- free_irq(smmu->irqs[i], smmu);
+ devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
/* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -2096,8 +2092,10 @@ static int __init arm_smmu_init(void)
#endif
#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type))
+ if (!iommu_present(&pci_bus_type)) {
+ pci_request_acs();
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ }
#endif
return 0;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ea5a9ebf0f78..08a1e2f3690f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -286,7 +286,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* or NULL on failure.
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -306,7 +306,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
} else {
size = ALIGN(size, min_size);
}
- if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -400,7 +400,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
@@ -560,7 +560,7 @@ out_restore_sg:
}
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
/*
* The scatterlist segments are mapped into a single
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 7330a66e2b7e..58470f5ced04 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -241,8 +241,20 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
if (!dmar_match_pci_path(info, scope->bus, path, level))
continue;
- if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
- (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
+ /*
+ * We expect devices with endpoint scope to have normal PCI
+ * headers, and devices with bridge scope to have bridge PCI
+ * headers. However PCI NTB devices may be listed in the
+ * DMAR table with bridge scope, even though they have a
+ * normal PCI header. NTB devices are identified by class
+ * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
+ * for this special case.
+ */
+ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
+ info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
+ (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
+ (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
pr_warn("Device scope type does not match for %s\n",
pci_name(info->dev));
return -EINVAL;
@@ -1155,8 +1167,6 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
(unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &qi->desc[index],
- sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
@@ -1231,9 +1241,6 @@ restart:
hw[wait_index] = wait_desc;
- __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
-
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 5ecc86cb74c8..33dcc29ec200 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -54,6 +54,10 @@ typedef u32 sysmmu_pte_t;
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+#ifdef CONFIG_BIG_ENDIAN
+#warning "revisit driver if we can enable big-endian ptes"
+#endif
+
/*
* v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
* v5.0 introduced support for 36bit physical address space by shifting
@@ -322,14 +326,27 @@ static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
__sysmmu_tlb_invalidate(data);
}
+static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
+{
+ BUG_ON(clk_prepare_enable(data->clk_master));
+ BUG_ON(clk_prepare_enable(data->clk));
+ BUG_ON(clk_prepare_enable(data->pclk));
+ BUG_ON(clk_prepare_enable(data->aclk));
+}
+
+static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
+{
+ clk_disable_unprepare(data->aclk);
+ clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk_master);
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
- clk_enable(data->clk_master);
- clk_enable(data->clk);
- clk_enable(data->pclk);
- clk_enable(data->aclk);
+ __sysmmu_enable_clocks(data);
ver = readl(data->sfrbase + REG_MMU_VERSION);
@@ -342,10 +359,7 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
- clk_disable(data->aclk);
- clk_disable(data->pclk);
- clk_disable(data->clk);
- clk_disable(data->clk_master);
+ __sysmmu_disable_clocks(data);
}
static void show_fault_information(struct sysmmu_drvdata *data,
@@ -427,10 +441,7 @@ static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
- clk_disable(data->aclk);
- clk_disable(data->pclk);
- clk_disable(data->clk);
- clk_disable(data->clk_master);
+ __sysmmu_disable_clocks(data);
}
static bool __sysmmu_disable(struct sysmmu_drvdata *data)
@@ -475,10 +486,7 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
{
- clk_enable(data->clk_master);
- clk_enable(data->clk);
- clk_enable(data->pclk);
- clk_enable(data->aclk);
+ __sysmmu_enable_clocks(data);
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
@@ -488,6 +496,12 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ /*
+ * SYSMMU driver keeps master's clock enabled only for the short
+ * time, while accessing the registers. For performing address
+ * translation during DMA transaction it relies on the client
+ * driver to enable it.
+ */
clk_disable(data->clk_master);
}
@@ -524,16 +538,15 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
{
unsigned long flags;
- clk_enable(data->clk_master);
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data)) {
- if (data->version >= MAKE_MMU_VER(3, 3))
- __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
+ clk_enable(data->clk_master);
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
- clk_disable(data->clk_master);
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -572,6 +585,8 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
spin_unlock_irqrestore(&data->lock, flags);
}
+static struct iommu_ops exynos_iommu_ops;
+
static int __init exynos_sysmmu_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -602,37 +617,22 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk = devm_clk_get(dev, "sysmmu");
- if (!IS_ERR(data->clk)) {
- ret = clk_prepare(data->clk);
- if (ret) {
- dev_err(dev, "Failed to prepare clk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->clk) == -ENOENT)
data->clk = NULL;
- }
+ else if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
data->aclk = devm_clk_get(dev, "aclk");
- if (!IS_ERR(data->aclk)) {
- ret = clk_prepare(data->aclk);
- if (ret) {
- dev_err(dev, "Failed to prepare aclk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->aclk) == -ENOENT)
data->aclk = NULL;
- }
+ else if (IS_ERR(data->aclk))
+ return PTR_ERR(data->aclk);
data->pclk = devm_clk_get(dev, "pclk");
- if (!IS_ERR(data->pclk)) {
- ret = clk_prepare(data->pclk);
- if (ret) {
- dev_err(dev, "Failed to prepare pclk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->pclk) == -ENOENT)
data->pclk = NULL;
- }
+ else if (IS_ERR(data->pclk))
+ return PTR_ERR(data->pclk);
if (!data->clk && (!data->aclk || !data->pclk)) {
dev_err(dev, "Failed to get device clock(s)!\n");
@@ -640,15 +640,10 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk_master = devm_clk_get(dev, "master");
- if (!IS_ERR(data->clk_master)) {
- ret = clk_prepare(data->clk_master);
- if (ret) {
- dev_err(dev, "Failed to prepare master's clk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->clk_master) == -ENOENT)
data->clk_master = NULL;
- }
+ else if (IS_ERR(data->clk_master))
+ return PTR_ERR(data->clk_master);
data->sysmmu = dev;
spin_lock_init(&data->lock);
@@ -665,6 +660,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
+
return 0;
}
@@ -709,6 +706,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
.name = "exynos-sysmmu",
.of_match_table = sysmmu_of_match,
.pm = &sysmmu_pm_ops,
+ .suppress_bind_attrs = true,
}
};
@@ -716,7 +714,7 @@ static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
- *ent = val;
+ *ent = cpu_to_le32(val);
dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
}
@@ -1357,7 +1355,6 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
if (!dma_dev)
dma_dev = &pdev->dev;
- of_iommu_set_ops(np, &exynos_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4b9040bb2f1c..ebb5bf3ddbd9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1672,7 +1672,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
- size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+ size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
iommu->domains = kzalloc(size, GFP_KERNEL);
if (iommu->domains) {
@@ -1737,7 +1737,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
- int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+ int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
int i;
for (i = 0; i < elems; i++)
@@ -2076,7 +2076,7 @@ out_unlock:
spin_unlock(&iommu->lock);
spin_unlock_irqrestore(&device_domain_lock, flags);
- return 0;
+ return ret;
}
struct domain_context_mapping_data {
@@ -3552,7 +3552,7 @@ error:
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return __intel_map_single(dev, page_to_phys(page) + offset, size,
dir, *dev->dma_mask);
@@ -3711,14 +3711,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page = NULL;
int order;
@@ -3764,7 +3764,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
struct page *page = virt_to_page(vaddr);
@@ -3779,7 +3779,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
unsigned long nrpages = 0;
@@ -3808,7 +3808,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct dmar_domain *domain;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a1ed1b73fed4..f5c90e1366ce 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -576,7 +576,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return 0;
found_translation:
- iova &= (ARM_LPAE_GRANULE(data) - 1);
+ iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3000051f48b4..b06d93594436 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -34,8 +34,7 @@
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
-static struct ida iommu_group_ida;
-static struct mutex iommu_group_mutex;
+static DEFINE_IDA(iommu_group_ida);
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -144,9 +143,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
- mutex_unlock(&iommu_group_mutex);
+ ida_simple_remove(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -186,26 +183,17 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
- mutex_lock(&iommu_group_mutex);
-
-again:
- if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
+ ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
kfree(group);
- mutex_unlock(&iommu_group_mutex);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
-
- if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
- goto again;
-
- mutex_unlock(&iommu_group_mutex);
+ group->id = ret;
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
- mutex_unlock(&iommu_group_mutex);
+ ida_simple_remove(&iommu_group_ida, group->id);
kfree(group);
return ERR_PTR(ret);
}
@@ -348,6 +336,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
+ if (domain->ops->apply_dm_region)
+ domain->ops->apply_dm_region(dev, domain, entry);
+
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
@@ -1483,9 +1474,6 @@ static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
NULL, kernel_kobj);
- ida_init(&iommu_group_ida);
- mutex_init(&iommu_group_mutex);
-
BUG_ON(!iommu_group_kset);
return 0;
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e321fa517a45..b09692bb5b0a 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -27,32 +27,35 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
+#include "io-pgtable.h"
#define MRC(reg, processor, op1, crn, crm, op2) \
__asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-static int msm_iommu_tex_class[4];
-
DEFINE_SPINLOCK(msm_iommu_lock);
+static LIST_HEAD(qcom_iommu_devices);
+static struct iommu_ops msm_iommu_ops;
struct msm_priv {
- unsigned long *pgtable;
struct list_head list_attached;
struct iommu_domain domain;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+ struct device *dev;
+ spinlock_t pgtlock; /* pagetable lock */
};
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
@@ -60,67 +63,183 @@ static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
return container_of(dom, struct msm_priv, domain);
}
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
- ret = clk_enable(drvdata->pclk);
+ ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
+ if (iommu->clk) {
+ ret = clk_enable(iommu->clk);
if (ret)
- clk_disable(drvdata->pclk);
+ clk_disable(iommu->pclk);
}
fail:
return ret;
}
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+static void __disable_clocks(struct msm_iommu_dev *iommu)
{
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
+ if (iommu->clk)
+ clk_disable(iommu->clk);
+ clk_disable(iommu->pclk);
}
-static int __flush_iotlb(struct iommu_domain *domain)
+static void msm_iommu_reset(void __iomem *base, int ncb)
{
- struct msm_priv *priv = to_msm_priv(domain);
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int ret = 0;
-#ifndef CONFIG_IOMMU_PGTABLES_L2
- unsigned long *fl_table = priv->pgtable;
- int i;
-
- if (!list_empty(&priv->list_attached)) {
- dmac_flush_range(fl_table, fl_table + SZ_16K);
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
- void *sl_table = __va(fl_table[i] &
- FL_BASE_MASK);
- dmac_flush_range(sl_table, sl_table + SZ_4K);
- }
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
}
-#endif
+}
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+static void __flush_iotlb(void *cookie)
+{
+ struct msm_priv *priv = cookie;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
+ int ret = 0;
+
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
- BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+ list_for_each_entry(master, &iommu->ctx_list, list)
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
+ __disable_clocks(iommu);
+ }
+fail:
+ return;
+}
- ret = __enable_clocks(iommu_drvdata);
+static void __flush_iotlb_range(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct msm_priv *priv = cookie;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
+ int ret = 0;
+ int temp_size;
+
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ temp_size = size;
+ do {
+ iova &= TLBIVA_VA;
+ iova |= GET_CONTEXTIDR_ASID(iommu->base,
+ master->num);
+ SET_TLBIVA(iommu->base, master->num, iova);
+ iova += granule;
+ } while (temp_size -= granule);
+ }
+
+ __disable_clocks(iommu);
}
+
fail:
- return ret;
+ return;
+}
+
+static void __flush_iotlb_sync(void *cookie)
+{
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+}
+
+static const struct iommu_gather_ops msm_iommu_gather_ops = {
+ .tlb_flush_all = __flush_iotlb,
+ .tlb_add_flush = __flush_iotlb_range,
+ .tlb_sync = __flush_iotlb_sync,
+};
+
+static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void msm_iommu_free_ctx(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+static void config_mids(struct msm_iommu_dev *iommu,
+ struct msm_iommu_ctx_dev *master)
+{
+ int mid, ctx, i;
+
+ for (i = 0; i < master->num_mids; i++) {
+ mid = master->mids[i];
+ ctx = master->num;
+
+ SET_M2VCBR_N(iommu->base, mid, 0);
+ SET_CBACR_N(iommu->base, ctx, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(iommu->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(iommu->base, mid, ctx);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(iommu->base, ctx, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(iommu->base, mid, 3);
+ }
}
static void __reset_context(void __iomem *base, int ctx)
@@ -143,15 +262,17 @@ static void __reset_context(void __iomem *base, int ctx)
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
}
-static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
+static void __program_context(void __iomem *base, int ctx,
+ struct msm_priv *priv)
{
- unsigned int prrr, nmrr;
__reset_context(base, ctx);
+ /* Turn on TEX Remap */
+ SET_TRE(base, ctx, 1);
+ SET_AFE(base, ctx, 1);
+
/* Set up HTW mode */
/* TLB miss configuration: perform HTW on miss */
SET_TLBMCFG(base, ctx, 0x3);
@@ -159,8 +280,13 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
/* V2P configuration: HTW for access */
SET_V2PCFG(base, ctx, 0x3);
- SET_TTBCR(base, ctx, 0);
- SET_TTBR0_PA(base, ctx, (pgtable >> 14));
+ SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
+ SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
+ SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+
+ /* Set prrr and nmrr */
+ SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
+ SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
/* Invalidate the TLB for this context */
SET_CTX_TLBIALL(base, ctx, 0);
@@ -179,38 +305,9 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
-#ifdef CONFIG_IOMMU_PGTABLES_L2
- /* Configure page tables as inner-cacheable and shareable to reduce
- * the TLB miss penalty.
- */
- SET_TTBR0_SH(base, ctx, 1);
- SET_TTBR1_SH(base, ctx, 1);
-
- SET_TTBR0_NOS(base, ctx, 1);
- SET_TTBR1_NOS(base, ctx, 1);
-
- SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR0_IRGNL(base, ctx, 1);
-
- SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR1_IRGNL(base, ctx, 1);
-
- SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
- SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
-#endif
-
/* Enable the MMU */
SET_M(base, ctx, 1);
}
@@ -227,13 +324,6 @@ static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
goto fail_nomem;
INIT_LIST_HEAD(&priv->list_attached);
- priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
- get_order(SZ_16K));
-
- if (!priv->pgtable)
- goto fail_nomem;
-
- memset(priv->pgtable, 0, SZ_16K);
priv->domain.geometry.aperture_start = 0;
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
@@ -250,304 +340,137 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
- unsigned long *fl_table;
- int i;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
+ kfree(priv);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
- fl_table = priv->pgtable;
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
- free_page((unsigned long) __va(((fl_table[i]) &
- FL_BASE_MASK)));
+static int msm_iommu_domain_config(struct msm_priv *priv)
+{
+ spin_lock_init(&priv->pgtlock);
+
+ priv->cfg = (struct io_pgtable_cfg) {
+ .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+ .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
+ .ias = 32,
+ .oas = 32,
+ .tlb = &msm_iommu_gather_ops,
+ .iommu_dev = priv->dev,
+ };
+
+ priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
+ if (!priv->iop) {
+ dev_err(priv->dev, "Failed to allocate pgtable\n");
+ return -EINVAL;
+ }
- free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
- priv->pgtable = NULL;
+ msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
- kfree(priv);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return 0;
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_dev *master;
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
-
- if (!dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
+ priv->dev = dev;
+ msm_iommu_domain_config(priv);
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ if (master->num) {
+ dev_err(dev, "domain already attached");
+ ret = -EEXIST;
+ goto fail;
+ }
+ master->num =
+ msm_iommu_alloc_ctx(iommu->context_map,
+ 0, iommu->ncb);
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ config_mids(iommu, master);
+ __program_context(iommu->base, master->num,
+ priv);
+ }
+ __disable_clocks(iommu);
+ list_add(&iommu->dom_node, &priv->list_attached);
}
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
- ret = __flush_iotlb(domain);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
return ret;
}
static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
int ret;
- spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = to_msm_priv(domain);
-
- if (!dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
- ret = __flush_iotlb(domain);
- if (ret)
- goto fail;
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
+ free_io_pgtable_ops(priv->iop);
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
-static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot)
{
- struct msm_priv *priv;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- unsigned int pgprot;
- int ret = 0, tex, sh;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
-
- if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
- ret = -EINVAL;
- goto fail;
- }
-
- priv = to_msm_priv(domain);
-
- fl_table = priv->pgtable;
-
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad size: %d\n", len);
- ret = -EINVAL;
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- ret = -EINVAL;
- goto fail;
- }
-
- if (len == SZ_16M || len == SZ_1M) {
- pgprot = sh ? FL_SHARED : 0;
- pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? FL_TEX0 : 0;
- } else {
- pgprot = sh ? SL_SHARED : 0;
- pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? SL_TEX0 : 0;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (len == SZ_16M) {
- int i = 0;
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
- FL_SHARED | FL_NG | pgprot;
- }
-
- if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
- FL_TYPE_SECT | FL_SHARED | pgprot;
-
- /* Need a 2nd level table */
- if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
- unsigned long *sl;
- sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
- get_order(SZ_4K));
-
- if (!sl) {
- pr_debug("Could not allocate second level table\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- memset(sl, 0, SZ_4K);
- *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
- }
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
-
- if (len == SZ_4K)
- *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
- SL_SHARED | SL_TYPE_SMALL | pgprot;
-
- if (len == SZ_64K) {
- int i;
+ int ret;
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
- SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
- }
+ spin_lock_irqsave(&priv->pgtlock, flags);
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+ spin_unlock_irqrestore(&priv->pgtlock, flags);
- ret = __flush_iotlb(domain);
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
-static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- size_t len)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t len)
{
- struct msm_priv *priv;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- int i, ret = 0;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
- fl_table = priv->pgtable;
+ spin_lock_irqsave(&priv->pgtlock, flags);
+ len = priv->iop->unmap(priv->iop, iova, len);
+ spin_unlock_irqrestore(&priv->pgtlock, flags);
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad length: %d\n", len);
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- goto fail;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (*fl_pte == 0) {
- pr_debug("First level PTE is 0\n");
- goto fail;
- }
-
- /* Unmap supersection */
- if (len == SZ_16M)
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = 0;
-
- if (len == SZ_1M)
- *fl_pte = 0;
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
- if (len == SZ_64K) {
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = 0;
- }
-
- if (len == SZ_4K)
- *sl_pte = 0;
-
- if (len == SZ_4K || len == SZ_64K) {
- int used = 0;
-
- for (i = 0; i < NUM_SL_PTE; i++)
- if (sl_table[i])
- used = 1;
- if (!used) {
- free_page((unsigned long)sl_table);
- *fl_pte = 0;
- }
- }
-
- ret = __flush_iotlb(domain);
-
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
- /* the IOMMU API requires us to return how many bytes were unmapped */
- len = ret ? 0 : len;
return len;
}
@@ -555,47 +478,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
- void __iomem *base;
phys_addr_t ret = 0;
- int ctx;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
- if (list_empty(&priv->list_attached))
- goto fail;
+ iommu = list_first_entry(&priv->list_attached,
+ struct msm_iommu_dev, dom_node);
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ if (list_empty(&iommu->ctx_list))
+ goto fail;
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev, list);
+ if (!master)
+ goto fail;
- ret = __enable_clocks(iommu_drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+ SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
- par = GET_PAR(base, ctx);
+ par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
+ if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
- if (GET_FAULT(base, ctx))
+ if (GET_FAULT(iommu->base, master->num))
ret = 0;
- __disable_clocks(iommu_drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -629,49 +551,92 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+static void insert_iommu_master(struct device *dev,
+ struct msm_iommu_dev **iommu,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
+ int sid;
+
+ if (list_empty(&(*iommu)->ctx_list)) {
+ master = kzalloc(sizeof(*master), GFP_ATOMIC);
+ master->of_node = dev->of_node;
+ list_add(&master->list, &(*iommu)->ctx_list);
+ dev->archdata.iommu = master;
+ }
+
+ for (sid = 0; sid < master->num_mids; sid++)
+ if (master->mids[sid] == spec->args[0]) {
+ dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ sid);
+ return;
+ }
+
+ master->mids[master->num_mids++] = spec->args[0];
+}
+
+static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
+ if (iommu->dev->of_node == spec->np)
+ break;
+
+ if (!iommu || iommu->dev->of_node != spec->np) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ insert_iommu_master(dev, &iommu, spec);
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return ret;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
+ struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
- if (!drvdata) {
+ if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
- base = drvdata->base;
-
pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
+ pr_err("base = %08x\n", (unsigned int)iommu->base);
- ret = __enable_clocks(drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
+ for (i = 0; i < iommu->ncb; i++) {
+ fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
+ print_ctx_regs(iommu->base, i);
+ SET_FSR(iommu->base, i, 0x4000000F);
}
}
- __disable_clocks(drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
-static const struct iommu_ops msm_iommu_ops = {
+static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
@@ -682,54 +647,163 @@ static const struct iommu_ops msm_iommu_ops = {
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .of_xlate = qcom_iommu_of_xlate,
};
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+static int msm_iommu_probe(struct platform_device *pdev)
{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
+ struct resource *r;
+ struct msm_iommu_dev *iommu;
+ int ret, par, val;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENODEV;
+
+ iommu->dev = &pdev->dev;
+ INIT_LIST_HEAD(&iommu->ctx_list);
+
+ iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ if (IS_ERR(iommu->pclk)) {
+ dev_err(iommu->dev, "could not get smmu_pclk\n");
+ return PTR_ERR(iommu->pclk);
+ }
+
+ ret = clk_prepare(iommu->pclk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare smmu_pclk\n");
+ return ret;
+ }
+
+ iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk)) {
+ dev_err(iommu->dev, "could not get iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return PTR_ERR(iommu->clk);
+ }
+
+ ret = clk_prepare(iommu->clk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return ret;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(iommu->dev, r);
+ if (IS_ERR(iommu->base)) {
+ dev_err(iommu->dev, "could not get iommu base\n");
+ ret = PTR_ERR(iommu->base);
+ goto fail;
}
- return -ENODEV;
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(iommu->dev, "could not get iommu irq\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
+ if (ret) {
+ dev_err(iommu->dev, "could not get ncb\n");
+ goto fail;
+ }
+ iommu->ncb = val;
+
+ msm_iommu_reset(iommu->base, iommu->ncb);
+ SET_M(iommu->base, 0, 1);
+ SET_PAR(iommu->base, 0, 0);
+ SET_V2PCFG(iommu->base, 0, 1);
+ SET_V2PPR(iommu->base, 0, 0);
+ par = GET_PAR(iommu->base, 0);
+ SET_V2PCFG(iommu->base, 0, 0);
+ SET_M(iommu->base, 0, 0);
+
+ if (!par) {
+ pr_err("Invalid PAR value detected\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
+ msm_iommu_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irpt_handler",
+ iommu);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
+ goto fail;
+ }
+
+ list_add(&iommu->dev_node, &qcom_iommu_devices);
+ of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ pr_info("device mapped at %p, irq %d with %d ctx banks\n",
+ iommu->base, iommu->irq, iommu->ncb);
+
+ return ret;
+fail:
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return ret;
+}
+
+static const struct of_device_id msm_iommu_dt_match[] = {
+ { .compatible = "qcom,apq8064-iommu" },
+ {}
+};
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
+
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return 0;
}
-static void __init setup_iommu_tex_classes(void)
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_dt_match,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+ int ret;
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0)
+ pr_err("Failed to register IOMMU driver\n");
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+ return ret;
+}
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_driver);
}
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
static int __init msm_iommu_init(void)
{
- setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
-subsys_initcall(msm_iommu_init);
+static int __init msm_iommu_of_setup(struct device_node *np)
+{
+ msm_iommu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 5c7c955e6d25..4ca25d50d679 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -42,74 +42,53 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of context banks that can be present in IOMMU */
+#define IOMMU_MAX_CBS 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
- * name Human-readable name given to this IOMMU HW instance
* ncb Number of context banks present on this IOMMU HW instance
+ * dev: IOMMU device
+ * irq: Interrupt number
+ * clk: The bus clock for this IOMMU hardware instance
+ * pclk: The clock for the IOMMU bus interconnect
+ * dev_node: list head in qcom_iommu_device_list
+ * dom_node: list head for domain
+ * ctx_list: list of 'struct msm_iommu_ctx_dev'
+ * context_map: Bitmap to track allocated context banks
*/
struct msm_iommu_dev {
- const char *name;
+ void __iomem *base;
int ncb;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct list_head dev_node;
+ struct list_head dom_node;
+ struct list_head ctx_list;
+ DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
};
/**
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
- * name Human-readable name given to this context bank
+ * of_node node ptr of client device
* num Index of this context bank within the hardware
* mids List of Machine IDs that are to be mapped into this context
* bank, terminated by -1. The MID is a set of signals on the
* AXI bus that identifies the function associated with a specific
* memory request. (See ARM spec).
+ * num_mids Total number of mids
+ * node list head in ctx_list
*/
struct msm_iommu_ctx_dev {
- const char *name;
+ struct device_node *of_node;
int num;
int mids[MAX_NUM_MIDS];
+ int num_mids;
+ struct list_head list;
};
-
-/**
- * struct msm_iommu_drvdata - A single IOMMU hardware instance
- * @base: IOMMU config port base address (VA)
- * @ncb The number of contexts on this IOMMU
- * @irq: Interrupt number
- * @clk: The bus clock for this IOMMU hardware instance
- * @pclk: The clock for the IOMMU bus interconnect
- *
- * A msm_iommu_drvdata holds the global driver data about a single piece
- * of an IOMMU hardware instance.
- */
-struct msm_iommu_drvdata {
- void __iomem *base;
- int irq;
- int ncb;
- struct clk *clk;
- struct clk *pclk;
-};
-
-/**
- * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
- * @num: Hardware context number of this context
- * @pdev: Platform device associated wit this HW instance
- * @attached_elm: List element for domains to track which devices are
- * attached to them
- *
- * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
- * within each IOMMU hardware instance
- */
-struct msm_iommu_ctx_drvdata {
- int num;
- struct platform_device *pdev;
- struct list_head attached_elm;
-};
-
-/*
- * Look up an IOMMU context device by its context name. NULL if none found.
- * Useful for testing and drivers that do not yet fully have IOMMU stuff in
- * their platform devices.
- */
-struct device *msm_iommu_get_ctx(const char *ctx_name);
-
/*
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
* interrupt is not supported in the API yet, but this will print an error
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
deleted file mode 100644
index 4b09e815accf..000000000000
--- a/drivers/iommu/msm_iommu_dev.c
+++ /dev/null
@@ -1,381 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include "msm_iommu_hw-8xxx.h"
-#include "msm_iommu.h"
-
-struct iommu_ctx_iter_data {
- /* input */
- const char *name;
-
- /* output */
- struct device *dev;
-};
-
-static struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
- struct iommu_ctx_iter_data *res = data;
- struct msm_iommu_ctx_dev *c = dev->platform_data;
-
- if (!res || !c || !c->name || !res->name)
- return -EINVAL;
-
- if (!strcmp(res->name, c->name)) {
- res->dev = dev;
- return 1;
- }
- return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
- return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- struct iommu_ctx_iter_data r;
- int found;
-
- if (!msm_iommu_root_dev) {
- pr_err("No root IOMMU device.\n");
- goto fail;
- }
-
- r.name = ctx_name;
- found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
- if (!found) {
- pr_err("Could not find context <%s>\n", ctx_name);
- goto fail;
- }
-
- return r.dev;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
- int ctx;
-
- SET_RPUE(base, 0);
- SET_RPUEIE(base, 0);
- SET_ESRRESTORE(base, 0);
- SET_TBE(base, 0);
- SET_CR(base, 0);
- SET_SPDMBE(base, 0);
- SET_TESTBUSCR(base, 0);
- SET_TLBRSW(base, 0);
- SET_GLOBAL_TLBIALL(base, 0);
- SET_RPU_ACR(base, 0);
- SET_TLBLKCRWE(base, 1);
-
- for (ctx = 0; ctx < ncb; ctx++) {
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
- SET_CONTEXTIDR(base, ctx, 0);
- }
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct clk *iommu_clk;
- struct clk *iommu_pclk;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
- void __iomem *regs_base;
- int ret, irq, par;
-
- if (pdev->id == -1) {
- msm_iommu_root_dev = pdev;
- return 0;
- }
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
- if (!drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (!iommu_dev) {
- ret = -ENODEV;
- goto fail;
- }
-
- iommu_pclk = clk_get(NULL, "smmu_pclk");
- if (IS_ERR(iommu_pclk)) {
- ret = -ENODEV;
- goto fail;
- }
-
- ret = clk_prepare_enable(iommu_pclk);
- if (ret)
- goto fail_enable;
-
- iommu_clk = clk_get(&pdev->dev, "iommu_clk");
-
- if (!IS_ERR(iommu_clk)) {
- if (clk_get_rate(iommu_clk) == 0)
- clk_set_rate(iommu_clk, 1);
-
- ret = clk_prepare_enable(iommu_clk);
- if (ret) {
- clk_put(iommu_clk);
- goto fail_pclk;
- }
- } else
- iommu_clk = NULL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
- regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(regs_base)) {
- ret = PTR_ERR(regs_base);
- goto fail_clk;
- }
-
- irq = platform_get_irq_byname(pdev, "secure_irq");
- if (irq < 0) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- msm_iommu_reset(regs_base, iommu_dev->ncb);
-
- SET_M(regs_base, 0, 1);
- SET_PAR(regs_base, 0, 0);
- SET_V2PCFG(regs_base, 0, 1);
- SET_V2PPR(regs_base, 0, 0);
- par = GET_PAR(regs_base, 0);
- SET_V2PCFG(regs_base, 0, 0);
- SET_M(regs_base, 0, 0);
-
- if (!par) {
- pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
- ret = -ENODEV;
- goto fail_clk;
- }
-
- ret = request_irq(irq, msm_iommu_fault_handler, 0,
- "msm_iommu_secure_irpt_handler", drvdata);
- if (ret) {
- pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_clk;
- }
-
-
- drvdata->pclk = iommu_pclk;
- drvdata->clk = iommu_clk;
- drvdata->base = regs_base;
- drvdata->irq = irq;
- drvdata->ncb = iommu_dev->ncb;
-
- pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
- iommu_dev->name, regs_base, irq, iommu_dev->ncb);
-
- platform_set_drvdata(pdev, drvdata);
-
- clk_disable(iommu_clk);
-
- clk_disable(iommu_pclk);
-
- return 0;
-fail_clk:
- if (iommu_clk) {
- clk_disable(iommu_clk);
- clk_put(iommu_clk);
- }
-fail_pclk:
- clk_disable_unprepare(iommu_pclk);
-fail_enable:
- clk_put(iommu_pclk);
-fail:
- kfree(drvdata);
- return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_drvdata *drv = NULL;
-
- drv = platform_get_drvdata(pdev);
- if (drv) {
- if (drv->clk) {
- clk_unprepare(drv->clk);
- clk_put(drv->clk);
- }
- clk_unprepare(drv->pclk);
- clk_put(drv->pclk);
- memset(drv, 0, sizeof(*drv));
- kfree(drv);
- }
- return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int i, ret;
-
- if (!c || !pdev->dev.parent)
- return -EINVAL;
-
- drvdata = dev_get_drvdata(pdev->dev.parent);
- if (!drvdata)
- return -ENODEV;
-
- ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
- if (!ctx_drvdata)
- return -ENOMEM;
-
- ctx_drvdata->num = c->num;
- ctx_drvdata->pdev = pdev;
-
- INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
- platform_set_drvdata(pdev, ctx_drvdata);
-
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_disable_unprepare(drvdata->pclk);
- goto fail;
- }
- }
-
- /* Program the M2V tables for this context */
- for (i = 0; i < MAX_NUM_MIDS; i++) {
- int mid = c->mids[i];
- if (mid == -1)
- break;
-
- SET_M2VCBR_N(drvdata->base, mid, 0);
- SET_CBACR_N(drvdata->base, c->num, 0);
-
- /* Set VMID = 0 */
- SET_VMID(drvdata->base, mid, 0);
-
- /* Set the context number for that MID to this context */
- SET_CBNDX(drvdata->base, mid, c->num);
-
- /* Set MID associated with this context bank to 0*/
- SET_CBVMID(drvdata->base, c->num, 0);
-
- /* Set the ASID for TLB tagging for this context */
- SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
-
- /* Set security bit override to be Non-secure */
- SET_NSCFG(drvdata->base, mid, 3);
- }
-
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-
- dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
- return 0;
-fail:
- kfree(ctx_drvdata);
- return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_drvdata *drv = NULL;
- drv = platform_get_drvdata(pdev);
- if (drv) {
- memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
- kfree(drv);
- }
- return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
- .driver = {
- .name = "msm_iommu",
- },
- .probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
- .driver = {
- .name = "msm_iommu_ctx",
- },
- .probe = msm_iommu_ctx_probe,
- .remove = msm_iommu_ctx_remove,
-};
-
-static struct platform_driver * const drivers[] = {
- &msm_iommu_driver,
- &msm_iommu_ctx_driver,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index c3043d8754e3..b12c12d74c33 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,7 @@
#include <dt-bindings/memory/mt8173-larb-port.h>
#include <soc/mediatek/smi.h>
-#include "io-pgtable.h"
+#include "mtk_iommu.h"
#define REG_MMU_PT_BASE_ADDR 0x000
@@ -93,20 +93,6 @@
#define MTK_PROTECT_PA_ALIGN 128
-struct mtk_iommu_suspend_reg {
- u32 standard_axi_mode;
- u32 dcm_dis;
- u32 ctrl_reg;
- u32 int_control0;
- u32 int_main_control;
-};
-
-struct mtk_iommu_client_priv {
- struct list_head client;
- unsigned int mtk_m4u_id;
- struct device *m4udev;
-};
-
struct mtk_iommu_domain {
spinlock_t pgtlock; /* lock for page table */
@@ -116,19 +102,6 @@ struct mtk_iommu_domain {
struct iommu_domain domain;
};
-struct mtk_iommu_data {
- void __iomem *base;
- int irq;
- struct device *dev;
- struct clk *bclk;
- phys_addr_t protect_base; /* protect memory base */
- struct mtk_iommu_suspend_reg reg;
- struct mtk_iommu_domain *m4u_dom;
- struct iommu_group *m4u_group;
- struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
- bool enable_4GB;
-};
-
static struct iommu_ops mtk_iommu_ops;
static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
@@ -455,7 +428,6 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
if (!dev->archdata.iommu) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
- of_node_put(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
@@ -552,25 +524,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return 0;
}
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int mtk_iommu_bind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- return component_bind_all(dev, &data->smi_imu);
-}
-
-static void mtk_iommu_unbind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- component_unbind_all(dev, &data->smi_imu);
-}
-
static const struct component_master_ops mtk_iommu_com_ops = {
.bind = mtk_iommu_bind,
.unbind = mtk_iommu_unbind,
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
new file mode 100644
index 000000000000..9ed0a8462ccf
--- /dev/null
+++ b/drivers/iommu/mtk_iommu.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_IOMMU_H_
+#define _MTK_IOMMU_H_
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <soc/mediatek/smi.h>
+
+#include "io-pgtable.h"
+
+struct mtk_iommu_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+ u32 int_main_control;
+};
+
+struct mtk_iommu_client_priv {
+ struct list_head client;
+ unsigned int mtk_m4u_id;
+ struct device *m4udev;
+};
+
+struct mtk_iommu_domain;
+
+struct mtk_iommu_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_suspend_reg reg;
+ struct mtk_iommu_domain *m4u_dom;
+ struct iommu_group *m4u_group;
+ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
+ bool enable_4GB;
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int mtk_iommu_bind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->smi_imu);
+}
+
+static void mtk_iommu_unbind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->smi_imu);
+}
+
+#endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
new file mode 100644
index 000000000000..b8aeb0768483
--- /dev/null
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * Based on driver/iommu/mtk_iommu.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bootmem.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/dma-iommu.h>
+#include <linux/module.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
+#include <soc/mediatek/smi.h>
+#include "mtk_iommu.h"
+
+#define REG_MMU_PT_BASE_ADDR 0x000
+
+#define F_ALL_INVLD 0x2
+#define F_MMU_INV_RANGE 0x1
+#define F_INVLD_EN0 BIT(0)
+#define F_INVLD_EN1 BIT(1)
+
+#define F_MMU_FAULT_VA_MSK 0xfffff000
+#define MTK_PROTECT_PA_ALIGN 128
+
+#define REG_MMU_CTRL_REG 0x210
+#define F_MMU_CTRL_COHERENT_EN BIT(8)
+#define REG_MMU_IVRP_PADDR 0x214
+#define REG_MMU_INT_CONTROL 0x220
+#define F_INT_TRANSLATION_FAULT BIT(0)
+#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
+#define F_INT_INVALID_PA_FAULT BIT(2)
+#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
+#define F_INT_TABLE_WALK_FAULT BIT(4)
+#define F_INT_TLB_MISS_FAULT BIT(5)
+#define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6)
+#define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7)
+
+#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
+#define F_INT_CLR_BIT BIT(12)
+
+#define REG_MMU_FAULT_ST 0x224
+#define REG_MMU_FAULT_VA 0x228
+#define REG_MMU_INVLD_PA 0x22C
+#define REG_MMU_INT_ID 0x388
+#define REG_MMU_INVALIDATE 0x5c0
+#define REG_MMU_INVLD_START_A 0x5c4
+#define REG_MMU_INVLD_END_A 0x5c8
+
+#define REG_MMU_INV_SEL 0x5d8
+#define REG_MMU_STANDARD_AXI_MODE 0x5e8
+
+#define REG_MMU_DCM 0x5f0
+#define F_MMU_DCM_ON BIT(1)
+#define REG_MMU_CPE_DONE 0x60c
+#define F_DESC_VALID 0x2
+#define F_DESC_NONSEC BIT(3)
+#define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7))
+#define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF)
+/* MTK generation one iommu HW only support 4K size mapping */
+#define MT2701_IOMMU_PAGE_SHIFT 12
+#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
+
+/*
+ * MTK m4u support 4GB iova address space, and only support 4K page
+ * mapping. So the pagetable size should be exactly as 4M.
+ */
+#define M2701_IOMMU_PGT_SIZE SZ_4M
+
+struct mtk_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+ struct iommu_domain domain;
+ u32 *pgt_va;
+ dma_addr_t pgt_pa;
+ struct mtk_iommu_data *data;
+};
+
+static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct mtk_iommu_domain, domain);
+}
+
+static const int mt2701_m4u_in_larb[] = {
+ LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
+ LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
+};
+
+static inline int mt2701_m4u_to_larb(int id)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
+ if ((id) >= mt2701_m4u_in_larb[i])
+ return i;
+
+ return 0;
+}
+
+static inline int mt2701_m4u_to_port(int id)
+{
+ int larb = mt2701_m4u_to_larb(id);
+
+ return id - mt2701_m4u_in_larb[larb];
+}
+
+static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
+{
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + REG_MMU_INV_SEL);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+}
+
+static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
+ unsigned long iova, size_t size)
+{
+ int ret;
+ u32 tmp;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + REG_MMU_INV_SEL);
+ writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
+ data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
+ data->base + REG_MMU_INVLD_END_A);
+ writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+
+ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
+ tmp, tmp != 0, 10, 100000);
+ if (ret) {
+ dev_warn(data->dev,
+ "Partial TLB flush timed out, falling back to full flush\n");
+ mtk_iommu_tlb_flush_all(data);
+ }
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+}
+
+static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+{
+ struct mtk_iommu_data *data = dev_id;
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+ u32 int_state, regval, fault_iova, fault_pa;
+ unsigned int fault_larb, fault_port;
+
+ /* Read error information from registers */
+ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
+ fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+
+ fault_iova &= F_MMU_FAULT_VA_MSK;
+ fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
+ regval = readl_relaxed(data->base + REG_MMU_INT_ID);
+ fault_larb = MT2701_M4U_TF_LARB(regval);
+ fault_port = MT2701_M4U_TF_PORT(regval);
+
+ /*
+ * MTK v1 iommu HW could not determine whether the fault is read or
+ * write fault, report as read fault.
+ */
+ if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+ IOMMU_FAULT_READ))
+ dev_err_ratelimited(data->dev,
+ "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
+ int_state, fault_iova, fault_pa,
+ fault_larb, fault_port);
+
+ /* Interrupt clear */
+ regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
+ regval |= F_INT_CLR_BIT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
+
+ mtk_iommu_tlb_flush_all(data);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_iommu_config(struct mtk_iommu_data *data,
+ struct device *dev, bool enable)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+ struct mtk_smi_larb_iommu *larb_mmu;
+ unsigned int larbid, portid;
+
+ head = dev->archdata.iommu;
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
+ portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+ larb_mmu = &data->smi_imu.larb_imu[larbid];
+
+ dev_dbg(dev, "%s iommu port: %d\n",
+ enable ? "enable" : "disable", portid);
+
+ if (enable)
+ larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+ else
+ larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ }
+}
+
+static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+{
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->pgt_va = dma_zalloc_coherent(data->dev,
+ M2701_IOMMU_PGT_SIZE,
+ &dom->pgt_pa, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
+
+ dom->data = data;
+
+ return 0;
+}
+
+static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+{
+ struct mtk_iommu_domain *dom;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ return &dom->domain;
+}
+
+static void mtk_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_data *data = dom->data;
+
+ dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
+ dom->pgt_va, dom->pgt_pa);
+ kfree(to_mtk_domain(domain));
+}
+
+static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_dom) {
+ data->m4u_dom = dom;
+ ret = mtk_iommu_domain_finalise(data);
+ if (ret) {
+ data->m4u_dom = NULL;
+ return ret;
+ }
+ }
+
+ mtk_iommu_config(data, dev, true);
+ return 0;
+}
+
+static void mtk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+
+ if (!priv)
+ return;
+
+ data = dev_get_drvdata(priv->m4udev);
+ mtk_iommu_config(data, dev, false);
+}
+
+static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+ unsigned long flags;
+ unsigned int i;
+ u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
+ u32 pabase = (u32)paddr;
+ int map_size = 0;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ for (i = 0; i < page_num; i++) {
+ if (pgt_base_iova[i]) {
+ memset(pgt_base_iova, 0, i * sizeof(u32));
+ break;
+ }
+ pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
+ pabase += MT2701_IOMMU_PAGE_SIZE;
+ map_size += MT2701_IOMMU_PAGE_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ mtk_iommu_tlb_flush_range(dom->data, iova, size);
+
+ return map_size == size ? 0 : -EEXIST;
+}
+
+static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
+ unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ mtk_iommu_tlb_flush_range(dom->data, iova, size);
+
+ return size;
+}
+
+static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
+ pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+/*
+ * MTK generation one iommu HW only support one iommu domain, and all the client
+ * sharing the same iova address space.
+ */
+static int mtk_iommu_create_mapping(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mtk_iommu_client_priv *head, *priv, *next;
+ struct platform_device *m4updev;
+ struct dma_iommu_mapping *mtk_mapping;
+ struct device *m4udev;
+ int ret;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
+ args->args_count);
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.iommu) {
+ /* Get the m4u device */
+ m4updev = of_find_device_by_node(args->np);
+ if (WARN_ON(!m4updev))
+ return -EINVAL;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ dev->archdata.iommu = head;
+ INIT_LIST_HEAD(&head->client);
+ head->m4udev = &m4updev->dev;
+ } else {
+ head = dev->archdata.iommu;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+ priv->mtk_m4u_id = args->args[0];
+ list_add_tail(&priv->client, &head->client);
+
+ m4udev = head->m4udev;
+ mtk_mapping = m4udev->archdata.iommu;
+ if (!mtk_mapping) {
+ /* MTK iommu support 4GB iova address space. */
+ mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
+ 0, 1ULL << 32);
+ if (IS_ERR(mtk_mapping)) {
+ ret = PTR_ERR(mtk_mapping);
+ goto err_free_mem;
+ }
+ m4udev->archdata.iommu = mtk_mapping;
+ }
+
+ ret = arm_iommu_attach_device(dev, mtk_mapping);
+ if (ret)
+ goto err_release_mapping;
+
+ return 0;
+
+err_release_mapping:
+ arm_iommu_release_mapping(mtk_mapping);
+ m4udev->archdata.iommu = NULL;
+err_free_mem:
+ list_for_each_entry_safe(priv, next, &head->client, client)
+ kfree(priv);
+ kfree(head);
+ dev->archdata.iommu = NULL;
+ return ret;
+}
+
+static int mtk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ struct of_phandle_args iommu_spec;
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, dev->of_node, "iommus",
+ "#iommu-cells", 0) {
+ int count = of_phandle_iterator_args(&it, iommu_spec.args,
+ MAX_PHANDLE_ARGS);
+ iommu_spec.np = of_node_get(it.node);
+ iommu_spec.args_count = count;
+
+ mtk_iommu_create_mapping(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ }
+
+ if (!dev->archdata.iommu) /* Not a iommu client device */
+ return -ENODEV;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_put(group);
+ return 0;
+}
+
+static void mtk_iommu_remove_device(struct device *dev)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+
+ head = dev->archdata.iommu;
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ list_del(&cur->client);
+ kfree(cur);
+ }
+ kfree(head);
+ dev->archdata.iommu = NULL;
+
+ iommu_group_remove_device(dev);
+}
+
+static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+{
+ struct mtk_iommu_data *data;
+ struct mtk_iommu_client_priv *priv;
+
+ priv = dev->archdata.iommu;
+ if (!priv)
+ return ERR_PTR(-ENODEV);
+
+ /* All the client devices are in the same m4u iommu-group */
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_group) {
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ }
+ return data->m4u_group;
+}
+
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+{
+ u32 regval;
+ int ret;
+
+ ret = clk_prepare_enable(data->bclk);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
+ return ret;
+ }
+
+ regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
+ writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
+
+ regval = F_INT_TRANSLATION_FAULT |
+ F_INT_MAIN_MULTI_HIT_FAULT |
+ F_INT_INVALID_PA_FAULT |
+ F_INT_ENTRY_REPLACEMENT_FAULT |
+ F_INT_TABLE_WALK_FAULT |
+ F_INT_TLB_MISS_FAULT |
+ F_INT_PFH_DMA_FIFO_OVERFLOW |
+ F_INT_MISS_DMA_FIFO_OVERFLOW;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
+
+ /* protect memory,hw will write here while translation fault */
+ writel_relaxed(data->protect_base,
+ data->base + REG_MMU_IVRP_PADDR);
+
+ writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
+
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ dev_name(data->dev), (void *)data)) {
+ writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
+ clk_disable_unprepare(data->bclk);
+ dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct iommu_ops mtk_iommu_ops = {
+ .domain_alloc = mtk_iommu_domain_alloc,
+ .domain_free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .add_device = mtk_iommu_add_device,
+ .remove_device = mtk_iommu_remove_device,
+ .device_group = mtk_iommu_device_group,
+ .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
+};
+
+static const struct of_device_id mtk_iommu_of_ids[] = {
+ { .compatible = "mediatek,mt2701-m4u", },
+ {}
+};
+
+static const struct component_master_ops mtk_iommu_com_ops = {
+ .bind = mtk_iommu_bind,
+ .unbind = mtk_iommu_unbind,
+};
+
+static int mtk_iommu_probe(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct component_match *match = NULL;
+ struct of_phandle_args larb_spec;
+ struct of_phandle_iterator it;
+ void *protect;
+ int larb_nr, ret, err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ /* Protect memory. HW will access here while translation fault.*/
+ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
+ GFP_KERNEL | GFP_DMA);
+ if (!protect)
+ return -ENOMEM;
+ data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->bclk = devm_clk_get(dev, "bclk");
+ if (IS_ERR(data->bclk))
+ return PTR_ERR(data->bclk);
+
+ larb_nr = 0;
+ of_for_each_phandle(&it, err, dev->of_node,
+ "mediatek,larbs", NULL, 0) {
+ struct platform_device *plarbdev;
+ int count = of_phandle_iterator_args(&it, larb_spec.args,
+ MAX_PHANDLE_ARGS);
+
+ if (count)
+ continue;
+
+ larb_spec.np = of_node_get(it.node);
+ if (!of_device_is_available(larb_spec.np))
+ continue;
+
+ plarbdev = of_find_device_by_node(larb_spec.np);
+ of_node_put(larb_spec.np);
+ if (!plarbdev) {
+ plarbdev = of_platform_device_create(
+ larb_spec.np, NULL,
+ platform_bus_type.dev_root);
+ if (!plarbdev)
+ return -EPROBE_DEFER;
+ }
+
+ data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
+ component_match_add(dev, &match, compare_of, larb_spec.np);
+ larb_nr++;
+ }
+
+ data->smi_imu.larb_nr = larb_nr;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = mtk_iommu_hw_init(data);
+ if (ret)
+ return ret;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+
+ return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+}
+
+static int mtk_iommu_remove(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+
+ if (iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ clk_disable_unprepare(data->bclk);
+ devm_free_irq(&pdev->dev, data->irq, data);
+ component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ reg->standard_axi_mode = readl_relaxed(base +
+ REG_MMU_STANDARD_AXI_MODE);
+ reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
+ reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
+ reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_resume(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
+ writel_relaxed(reg->standard_axi_mode,
+ base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
+ writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
+ writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
+ writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_iommu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+};
+
+static struct platform_driver mtk_iommu_driver = {
+ .probe = mtk_iommu_probe,
+ .remove = mtk_iommu_remove,
+ .driver = {
+ .name = "mtk-iommu",
+ .of_match_table = mtk_iommu_of_ids,
+ .pm = &mtk_iommu_pm_ops,
+ }
+};
+
+static int __init m4u_init(void)
+{
+ return platform_driver_register(&mtk_iommu_driver);
+}
+
+static void __exit m4u_exit(void)
+{
+ return platform_driver_unregister(&mtk_iommu_driver);
+}
+
+subsys_initcall(m4u_init);
+module_exit(m4u_exit);
+
+MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations");
+MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 25b4627cb57f..9afcbf79f0b0 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -4,11 +4,10 @@
* published by the Free Software Foundation.
*/
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-iommu.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -77,7 +76,9 @@
struct rk_iommu_domain {
struct list_head iommus;
+ struct platform_device *pdev;
u32 *dt; /* page directory table */
+ dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
@@ -93,14 +94,12 @@ struct rk_iommu {
struct iommu_domain *domain; /* domain to which iommu is attached */
};
-static inline void rk_table_flush(u32 *va, unsigned int count)
+static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
+ unsigned int count)
{
- phys_addr_t pa_start = virt_to_phys(va);
- phys_addr_t pa_end = virt_to_phys(va + count);
- size_t size = pa_end - pa_start;
+ size_t size = count * sizeof(u32); /* count of u32 entry */
- __cpuc_flush_dcache_area(va, size);
- outer_flush_range(pa_start, pa_end);
+ dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -183,10 +182,9 @@ static inline bool rk_dte_is_pt_valid(u32 dte)
return dte & RK_DTE_PT_VALID;
}
-static u32 rk_mk_dte(u32 *pt)
+static inline u32 rk_mk_dte(dma_addr_t pt_dma)
{
- phys_addr_t pt_phys = virt_to_phys(pt);
- return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
/*
@@ -603,13 +601,16 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
dma_addr_t iova)
{
+ struct device *dev = &rk_domain->pdev->dev;
u32 *page_table, *dte_addr;
- u32 dte;
+ u32 dte_index, dte;
phys_addr_t pt_phys;
+ dma_addr_t pt_dma;
assert_spin_locked(&rk_domain->dt_lock);
- dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte_index = rk_iova_dte_index(iova);
+ dte_addr = &rk_domain->dt[dte_index];
dte = *dte_addr;
if (rk_dte_is_pt_valid(dte))
goto done;
@@ -618,19 +619,27 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- dte = rk_mk_dte(page_table);
- *dte_addr = dte;
+ pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pt_dma)) {
+ dev_err(dev, "DMA mapping error while allocating page table\n");
+ free_page((unsigned long)page_table);
+ return ERR_PTR(-ENOMEM);
+ }
- rk_table_flush(page_table, NUM_PT_ENTRIES);
- rk_table_flush(dte_addr, 1);
+ dte = rk_mk_dte(pt_dma);
+ *dte_addr = dte;
+ rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
+ rk_table_flush(rk_domain,
+ rk_domain->dt_dma + dte_index * sizeof(u32), 1);
done:
pt_phys = rk_dte_pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
- u32 *pte_addr, dma_addr_t iova, size_t size)
+ u32 *pte_addr, dma_addr_t pte_dma,
+ size_t size)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
@@ -645,14 +654,14 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
pte_addr[pte_count] = rk_mk_pte_invalid(pte);
}
- rk_table_flush(pte_addr, pte_count);
+ rk_table_flush(rk_domain, pte_dma, pte_count);
return pte_count * SPAGE_SIZE;
}
static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
- dma_addr_t iova, phys_addr_t paddr, size_t size,
- int prot)
+ dma_addr_t pte_dma, dma_addr_t iova,
+ phys_addr_t paddr, size_t size, int prot)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
@@ -671,7 +680,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
paddr += SPAGE_SIZE;
}
- rk_table_flush(pte_addr, pte_count);
+ rk_table_flush(rk_domain, pte_dma, pte_total);
/*
* Zap the first and last iova to evict from iotlb any previously
@@ -684,7 +693,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
return 0;
unwind:
/* Unmap the range of iovas that we just mapped */
- rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+ rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
+ pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
page_phys = rk_pte_page_address(pte_addr[pte_count]);
@@ -699,8 +709,9 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- dma_addr_t iova = (dma_addr_t)_iova;
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
+ u32 dte_index, pte_index;
int ret;
spin_lock_irqsave(&rk_domain->dt_lock, flags);
@@ -718,8 +729,13 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
return PTR_ERR(page_table);
}
- pte_addr = &page_table[rk_iova_pte_index(iova)];
- ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
+ pte_index = rk_iova_pte_index(iova);
+ pte_addr = &page_table[pte_index];
+ pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
+ paddr, size, prot);
+
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return ret;
@@ -730,7 +746,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- dma_addr_t iova = (dma_addr_t)_iova;
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
u32 dte;
u32 *pte_addr;
@@ -754,7 +770,8 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
pt_phys = rk_dte_pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+ pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
@@ -787,7 +804,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
int ret, i;
- phys_addr_t dte_addr;
/*
* Allow 'virtual devices' (e.g., drm) to attach to domain.
@@ -807,14 +823,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
iommu->domain = domain;
- ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
if (ret)
return ret;
- dte_addr = virt_to_phys(rk_domain->dt);
for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+ rk_domain->dt_dma);
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -860,7 +876,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
}
rk_iommu_disable_stall(iommu);
- devm_free_irq(dev, iommu->irq, iommu);
+ devm_free_irq(iommu->dev, iommu->irq, iommu);
iommu->domain = NULL;
@@ -870,14 +886,30 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
+ struct platform_device *pdev;
+ struct device *iommu_dev;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
- rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
- if (!rk_domain)
+ /* Register a pdev per domain, so DMA API can base on this *dev
+ * even some virtual master doesn't have an iommu slave
+ */
+ pdev = platform_device_register_simple("rk_iommu_domain",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(pdev))
return NULL;
+ rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ goto err_unreg_pdev;
+
+ rk_domain->pdev = pdev;
+
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&rk_domain->domain))
+ goto err_unreg_pdev;
+
/*
* rk32xx iommus use a 2 level pagetable.
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
@@ -885,18 +917,36 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
*/
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
if (!rk_domain->dt)
- goto err_dt;
+ goto err_put_cookie;
+
+ iommu_dev = &pdev->dev;
+ rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
+ SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
+ dev_err(iommu_dev, "DMA map error for DT\n");
+ goto err_free_dt;
+ }
- rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+ rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
+ rk_domain->domain.geometry.aperture_start = 0;
+ rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ rk_domain->domain.geometry.force_aperture = true;
+
return &rk_domain->domain;
-err_dt:
- kfree(rk_domain);
+err_free_dt:
+ free_page((unsigned long)rk_domain->dt);
+err_put_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
+err_unreg_pdev:
+ platform_device_unregister(pdev);
+
return NULL;
}
@@ -912,12 +962,20 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_dte_pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
+ dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
+ SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)page_table);
}
}
+ dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
+ SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)rk_domain->dt);
- kfree(rk_domain);
+
+ if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
+
+ platform_device_unregister(rk_domain->pdev);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@@ -1022,17 +1080,43 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
};
+static int rk_iommu_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
+ arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ return 0;
+}
+
+static struct platform_driver rk_iommu_domain_driver = {
+ .probe = rk_iommu_domain_probe,
+ .driver = {
+ .name = "rk_iommu_domain",
+ },
+};
+
static int rk_iommu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ int num_res = pdev->num_resources;
int i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
@@ -1042,12 +1126,13 @@ static int rk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
iommu->num_mmu = 0;
- iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
+
+ iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
GFP_KERNEL);
if (!iommu->bases)
return -ENOMEM;
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < num_res; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
continue;
@@ -1103,11 +1188,19 @@ static int __init rk_iommu_init(void)
if (ret)
return ret;
- return platform_driver_register(&rk_iommu_driver);
+ ret = platform_driver_register(&rk_iommu_domain_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&rk_iommu_driver);
+ if (ret)
+ platform_driver_unregister(&rk_iommu_domain_driver);
+ return ret;
}
static void __exit rk_iommu_exit(void)
{
platform_driver_unregister(&rk_iommu_driver);
+ platform_driver_unregister(&rk_iommu_domain_driver);
}
subsys_initcall(rk_iommu_init);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 5495a5ba8039..7f8728984f44 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -21,9 +21,9 @@ config ARM_GIC_MAX_NR
config ARM_GIC_V2M
bool
- depends on ARM_GIC
- depends on PCI && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI
+ select ARM_GIC
+ select PCI_MSI
config GIC_NON_BANKED
bool
@@ -37,7 +37,8 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI
+ depends on PCI_MSI
config ARM_NVIC
bool
@@ -62,13 +63,13 @@ config ARM_VIC_NR
config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+ select PCI_MSI if PCI
config ALPINE_MSI
bool
- depends on PCI && PCI_MSI
+ depends on PCI
+ select PCI_MSI
select GENERIC_IRQ_CHIP
- select PCI_MSI_IRQ_DOMAIN
config ATMEL_AIC_IRQ
bool
@@ -117,7 +118,6 @@ config HISILICON_IRQ_MBIGEN
bool
select ARM_GIC_V3
select ARM_GIC_V3_ITS
- select GENERIC_MSI_IRQ_DOMAIN
config IMGPDC_IRQ
bool
@@ -250,12 +250,10 @@ config IRQ_MXS
config MVEBU_ODMI
bool
- select GENERIC_MSI_IRQ_DOMAIN
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
config PARTITION_PERCPU
bool
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 2223b3f15d68..f913f4db7ae1 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -234,5 +234,5 @@ static int __init clps711x_intc_init_dt(struct device_node *np,
return _clps711x_intc_init(np, res.start, resource_size(&res));
}
-IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt);
+IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
#endif
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 3786d0f21972..c5f33c3bd228 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -359,7 +359,7 @@ static void gic_handle_shared_int(bool chained)
pending_reg += gic_reg_step;
intrmask_reg += gic_reg_step;
- if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+ if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
continue;
pending[i] |= (u64)gic_read(pending_reg) << 32;
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index dfb8bd390125..b2a98c7b521b 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -118,7 +118,7 @@ static int powernv_led_set(struct powernv_led_data *powernv_led,
goto out_token;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS)
dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
__func__, rc);
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index d531f804455d..d6f72c826c1c 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -38,6 +38,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -99,6 +100,7 @@ static DEFINE_MUTEX(smu_mutex);
static struct smu_device *smu;
static DEFINE_MUTEX(smu_part_access);
static int smu_irq_inited;
+static unsigned long smu_cmdbuf_abs;
static void smu_i2c_retry(unsigned long data);
@@ -479,8 +481,13 @@ int __init smu_init (void)
printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
+ /*
+ * SMU based G5s need some memory below 2Gb. Thankfully this is
+ * called at a time where memblock is still available.
+ */
+ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
if (smu_cmdbuf_abs == 0) {
- printk(KERN_ERR "SMU: Command buffer not allocated !\n");
+ printk(KERN_ERR "SMU: Command buffer allocation failed !\n");
ret = -EINVAL;
goto fail_np;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 5305923752d2..97c372908e78 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -123,4 +123,13 @@ config XGENE_SLIMPRO_MBOX
It is used to send short messages between ARM64-bit cores and
the SLIMpro Management Engine, primarily for PM. Say Y here if you
want to use the APM X-Gene SLIMpro IPCM support.
+
+config BCM_PDC_MBOX
+ tristate "Broadcom PDC Mailbox"
+ depends on ARM64 || COMPILE_TEST
+ default ARCH_BCM_IPROC
+ help
+ Mailbox implementation for the Broadcom PDC ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs. Say Y here if you want to use the Broadcom PDC.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 0be3e742bb7d..66c38e300dfc 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
+
+obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
new file mode 100644
index 000000000000..cbe0c1ee4ba9
--- /dev/null
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -0,0 +1,1531 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * Broadcom PDC Mailbox Driver
+ * The PDC provides a ring based programming interface to one or more hardware
+ * offload engines. For example, the PDC driver works with both SPU-M and SPU2
+ * cryptographic offload hardware. In some chips the PDC is referred to as MDE.
+ *
+ * The PDC driver registers with the Linux mailbox framework as a mailbox
+ * controller, once for each PDC instance. Ring 0 for each PDC is registered as
+ * a mailbox channel. The PDC driver uses interrupts to determine when data
+ * transfers to and from an offload engine are complete. The PDC driver uses
+ * threaded IRQs so that response messages are handled outside of interrupt
+ * context.
+ *
+ * The PDC driver allows multiple messages to be pending in the descriptor
+ * rings. The tx_msg_start descriptor index indicates where the last message
+ * starts. The txin_numd value at this index indicates how many descriptor
+ * indexes make up the message. Similar state is kept on the receive side. When
+ * an rx interrupt indicates a response is ready, the PDC driver processes numd
+ * descriptors from the tx and rx ring, thus processing one response at a time.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#define PDC_SUCCESS 0
+
+#define RING_ENTRY_SIZE sizeof(struct dma64dd)
+
+/* # entries in PDC dma ring */
+#define PDC_RING_ENTRIES 128
+#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
+/* Rings are 8k aligned */
+#define RING_ALIGN_ORDER 13
+#define RING_ALIGN BIT(RING_ALIGN_ORDER)
+
+#define RX_BUF_ALIGN_ORDER 5
+#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
+
+/* descriptor bumping macros */
+#define XXD(x, max_mask) ((x) & (max_mask))
+#define TXD(x, max_mask) XXD((x), (max_mask))
+#define RXD(x, max_mask) XXD((x), (max_mask))
+#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
+#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
+#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
+#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
+#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
+#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
+
+/* Length of BCM header at start of SPU msg, in bytes */
+#define BCM_HDR_LEN 8
+
+/*
+ * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
+ * not currently support use of multiple ringsets on a single PDC engine.
+ */
+#define PDC_RINGSET 0
+
+/*
+ * Interrupt mask and status definitions. Enable interrupts for tx and rx on
+ * ring 0
+ */
+#define PDC_XMTINT_0 (24 + PDC_RINGSET)
+#define PDC_RCVINT_0 (16 + PDC_RINGSET)
+#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
+#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
+#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_LAZY_FRAMECOUNT 1
+#define PDC_LAZY_TIMEOUT 10000
+#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
+#define PDC_INTMASK_OFFSET 0x24
+#define PDC_INTSTATUS_OFFSET 0x20
+#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
+
+/*
+ * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
+ * before frame
+ */
+#define PDC_SPU2_RESP_HDR_LEN 17
+#define PDC_CKSUM_CTRL BIT(27)
+#define PDC_CKSUM_CTRL_OFFSET 0x400
+
+#define PDC_SPUM_RESP_HDR_LEN 32
+
+/*
+ * Sets the following bits for write to transmit control reg:
+ * 0 - XmtEn - enable activity on the tx channel
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_TX_CTL 0x000C0801
+
+/*
+ * Sets the following bits for write to receive control reg:
+ * 0 - RcvEn - enable activity on the rx channel
+ * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
+ * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
+ * that have StartOfFrame set
+ * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
+ * remaining bytes in current frame, report error
+ * in rx frame status for current frame
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_RX_CTL 0x000C0E01
+
+#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
+
+/* descriptor flags */
+#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
+#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
+#define D64_CTRL1_EOF BIT(30) /* end of frame */
+#define D64_CTRL1_SOF BIT(31) /* start of frame */
+
+#define RX_STATUS_OVERFLOW 0x00800000
+#define RX_STATUS_LEN 0x0000FFFF
+
+#define PDC_TXREGS_OFFSET 0x200
+#define PDC_RXREGS_OFFSET 0x220
+
+/* Maximum size buffer the DMA engine can handle */
+#define PDC_DMA_BUF_MAX 16384
+
+struct pdc_dma_map {
+ void *ctx; /* opaque context associated with frame */
+};
+
+/* dma descriptor */
+struct dma64dd {
+ u32 ctrl1; /* misc control bits */
+ u32 ctrl2; /* buffer count and address extension */
+ u32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64_regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* descriptor ring base address low 32-bits */
+ u32 addrhigh; /* descriptor ring base address bits 63:32 */
+ u32 status0; /* last rx descriptor written by hw */
+ u32 status1; /* driver does not use */
+};
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* dma registers. matches hw layout. */
+struct dma64 {
+ struct dma64_regs dmaxmt; /* dma tx */
+ u32 PAD[2];
+ struct dma64_regs dmarcv; /* dma rx */
+ u32 PAD[2];
+};
+
+/* PDC registers */
+struct pdc_regs {
+ u32 devcontrol; /* 0x000 */
+ u32 devstatus; /* 0x004 */
+ u32 PAD;
+ u32 biststatus; /* 0x00c */
+ u32 PAD[4];
+ u32 intstatus; /* 0x020 */
+ u32 intmask; /* 0x024 */
+ u32 gptimer; /* 0x028 */
+
+ u32 PAD;
+ u32 intrcvlazy_0; /* 0x030 */
+ u32 intrcvlazy_1; /* 0x034 */
+ u32 intrcvlazy_2; /* 0x038 */
+ u32 intrcvlazy_3; /* 0x03c */
+
+ u32 PAD[48];
+ u32 removed_intrecvlazy; /* 0x100 */
+ u32 flowctlthresh; /* 0x104 */
+ u32 wrrthresh; /* 0x108 */
+ u32 gmac_idle_cnt_thresh; /* 0x10c */
+
+ u32 PAD[4];
+ u32 ifioaccessaddr; /* 0x120 */
+ u32 ifioaccessbyte; /* 0x124 */
+ u32 ifioaccessdata; /* 0x128 */
+
+ u32 PAD[21];
+ u32 phyaccess; /* 0x180 */
+ u32 PAD;
+ u32 phycontrol; /* 0x188 */
+ u32 txqctl; /* 0x18c */
+ u32 rxqctl; /* 0x190 */
+ u32 gpioselect; /* 0x194 */
+ u32 gpio_output_en; /* 0x198 */
+ u32 PAD; /* 0x19c */
+ u32 txq_rxq_mem_ctl; /* 0x1a0 */
+ u32 memory_ecc_status; /* 0x1a4 */
+ u32 serdes_ctl; /* 0x1a8 */
+ u32 serdes_status0; /* 0x1ac */
+ u32 serdes_status1; /* 0x1b0 */
+ u32 PAD[11]; /* 0x1b4-1dc */
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war; /* 0x1e4 */
+ u32 pwrctl; /* 0x1e8 */
+ u32 PAD[5];
+
+#define PDC_NUM_DMA_RINGS 4
+ struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
+
+ /* more registers follow, but we don't use them */
+};
+
+/* structure for allocating/freeing DMA rings */
+struct pdc_ring_alloc {
+ dma_addr_t dmabase; /* DMA address of start of ring */
+ void *vbase; /* base kernel virtual address of ring */
+ u32 size; /* ring allocation size in bytes */
+};
+
+/* PDC state structure */
+struct pdc_state {
+ /* synchronize access to this PDC state structure */
+ spinlock_t pdc_lock;
+
+ /* Index of the PDC whose state is in this structure instance */
+ u8 pdc_idx;
+
+ /* Platform device for this PDC instance */
+ struct platform_device *pdev;
+
+ /*
+ * Each PDC instance has a mailbox controller. PDC receives request
+ * messages through mailboxes, and sends response messages through the
+ * mailbox framework.
+ */
+ struct mbox_controller mbc;
+
+ unsigned int pdc_irq;
+
+ /*
+ * Last interrupt status read from PDC device. Saved in interrupt
+ * handler so the handler can clear the interrupt in the device,
+ * and the interrupt thread called later can know which interrupt
+ * bits are active.
+ */
+ unsigned long intstatus;
+
+ /* Number of bytes of receive status prior to each rx frame */
+ u32 rx_status_len;
+ /* Whether a BCM header is prepended to each frame */
+ bool use_bcm_hdr;
+ /* Sum of length of BCM header and rx status header */
+ u32 pdc_resp_hdr_len;
+
+ /* The base virtual address of DMA hw registers */
+ void __iomem *pdc_reg_vbase;
+
+ /* Pool for allocation of DMA rings */
+ struct dma_pool *ring_pool;
+
+ /* Pool for allocation of metadata buffers for response messages */
+ struct dma_pool *rx_buf_pool;
+
+ /*
+ * The base virtual address of DMA tx/rx descriptor rings. Corresponding
+ * DMA address and size of ring allocation.
+ */
+ struct pdc_ring_alloc tx_ring_alloc;
+ struct pdc_ring_alloc rx_ring_alloc;
+
+ struct pdc_regs *regs; /* start of PDC registers */
+
+ struct dma64_regs *txregs_64; /* dma tx engine registers */
+ struct dma64_regs *rxregs_64; /* dma rx engine registers */
+
+ /*
+ * Arrays of PDC_RING_ENTRIES descriptors
+ * To use multiple ringsets, this needs to be extended
+ */
+ struct dma64dd *txd_64; /* tx descriptor ring */
+ struct dma64dd *rxd_64; /* rx descriptor ring */
+
+ /* descriptor ring sizes */
+ u32 ntxd; /* # tx descriptors */
+ u32 nrxd; /* # rx descriptors */
+ u32 nrxpost; /* # rx buffers to keep posted */
+ u32 ntxpost; /* max number of tx buffers that can be posted */
+
+ /*
+ * Index of next tx descriptor to reclaim. That is, the descriptor
+ * index of the oldest tx buffer for which the host has yet to process
+ * the corresponding response.
+ */
+ u32 txin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 tx_msg_start;
+
+ /* Index of next tx descriptor to post. */
+ u32 txout;
+
+ /*
+ * Number of tx descriptors associated with the message that starts
+ * at this tx descriptor index.
+ */
+ u32 txin_numd[PDC_RING_ENTRIES];
+
+ /*
+ * Index of next rx descriptor to reclaim. This is the index of
+ * the next descriptor whose data has yet to be processed by the host.
+ */
+ u32 rxin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 rx_msg_start;
+
+ /*
+ * Saved value of current hardware rx descriptor index.
+ * The last rx buffer written by the hw is the index previous to
+ * this one.
+ */
+ u32 last_rx_curr;
+
+ /* Index of next rx descriptor to post. */
+ u32 rxout;
+
+ /*
+ * opaque context associated with frame that starts at each
+ * rx ring index.
+ */
+ void *rxp_ctx[PDC_RING_ENTRIES];
+
+ /*
+ * Scatterlists used to form request and reply frames beginning at a
+ * given ring index. Retained in order to unmap each sg after reply
+ * is processed
+ */
+ struct scatterlist *src_sg[PDC_RING_ENTRIES];
+ struct scatterlist *dst_sg[PDC_RING_ENTRIES];
+
+ /*
+ * Number of rx descriptors associated with the message that starts
+ * at this descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries, then
+ * the next three descriptor indexes don't have a value set.
+ */
+ u32 rxin_numd[PDC_RING_ENTRIES];
+
+ void *resp_hdr[PDC_RING_ENTRIES];
+ dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
+
+ struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
+
+ /* counters */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 txnobuf; /* count of tx ring full */
+ u32 rxnobuf; /* count of rx ring full */
+ u32 rx_oflow; /* count of rx overflows */
+};
+
+/* Global variables */
+
+struct pdc_globals {
+ /* Actual number of SPUs in hardware, as reported by device tree */
+ u32 num_spu;
+};
+
+static struct pdc_globals pdcg;
+
+/* top level debug FS directory for PDC driver */
+static struct dentry *debugfs_dir;
+
+static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct pdc_state *pdcs;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 512;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pdcs = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "SPU %u stats:\n", pdcs->pdc_idx);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "PDC requests............%u\n",
+ pdcs->pdc_requests);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "PDC responses...........%u\n",
+ pdcs->pdc_replies);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx err ring full........%u\n",
+ pdcs->txnobuf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Rx err ring full........%u\n",
+ pdcs->rxnobuf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Receive overflow........%u\n",
+ pdcs->rx_oflow);
+
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations pdc_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = pdc_debugfs_read,
+};
+
+/**
+ * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
+ * directory has not yet been created, create it now. Create a stats file in
+ * this directory for a SPU.
+ * @pdcs: PDC state structure
+ */
+void pdc_setup_debugfs(struct pdc_state *pdcs)
+{
+ char spu_stats_name[16];
+
+ if (!debugfs_initialized())
+ return;
+
+ snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
+ if (!debugfs_dir)
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+ debugfs_dir, pdcs,
+ &pdc_debugfs_stats);
+}
+
+void pdc_free_debugfs(void)
+{
+ if (debugfs_dir && simple_empty(debugfs_dir)) {
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
+ }
+}
+
+/**
+ * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
+ * @pdcs: PDC state for SPU that will generate result
+ * @dma_addr: DMA address of buffer that descriptor is being built for
+ * @buf_len: Length of the receive buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
+ u32 buf_len, u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+
+ dev_dbg(dev,
+ "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
+ pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
+
+ iowrite32(lower_32_bits(dma_addr),
+ (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
+ iowrite32(upper_32_bits(dma_addr),
+ (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
+ iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
+ iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+ /* bump ring index and return */
+ pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
+}
+
+/**
+ * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
+ * hardware.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @dma_addr: DMA address of packet to be transmitted
+ * @buf_len: Length of tx buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
+ u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+
+ dev_dbg(dev,
+ "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
+ pdcs->pdc_idx, pdcs->txout, buf_len, flags);
+
+ iowrite32(lower_32_bits(dma_addr),
+ (void *)&pdcs->txd_64[pdcs->txout].addrlow);
+ iowrite32(upper_32_bits(dma_addr),
+ (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
+ iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
+ iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+
+ /* bump ring index and return */
+ pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
+}
+
+/**
+ * pdc_receive() - Receive a response message from a given SPU.
+ * @pdcs: PDC state for the SPU to receive from
+ * @mssg: mailbox message to be returned to client
+ *
+ * When the return code indicates success, the response message is available in
+ * the receive buffers provided prior to submission of the request.
+ *
+ * Input:
+ * pdcs - PDC state structure for the SPU to be polled
+ * mssg - mailbox message to be returned to client. This function sets the
+ * context pointer on the message to help the client associate the
+ * response with a request.
+ *
+ * Return: PDC_SUCCESS if one or more receive descriptors was processed
+ * -EAGAIN indicates that no response message is available
+ * -EIO an error occurred
+ */
+static int
+pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ u32 len, rx_status;
+ u32 num_frags;
+ int i;
+ u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
+ u32 frags_rdy; /* number of fragments ready to read */
+ u32 rx_idx; /* ring index of start of receive frame */
+ dma_addr_t resp_hdr_daddr;
+
+ spin_lock(&pdcs->pdc_lock);
+
+ /*
+ * return if a complete response message is not yet ready.
+ * rxin_numd[rxin] is the number of fragments in the next msg
+ * to read.
+ */
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
+ if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
+ /* See if the hw has written more fragments than we know */
+ pdcs->last_rx_curr =
+ (ioread32((void *)&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost);
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
+ /* No response ready */
+ spin_unlock(&pdcs->pdc_lock);
+ return -EAGAIN;
+ }
+ /* can't read descriptors/data until write index is read */
+ rmb();
+ }
+
+ num_frags = pdcs->txin_numd[pdcs->txin];
+ dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
+ sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
+
+ for (i = 0; i < num_frags; i++)
+ pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+
+ dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ rx_idx = pdcs->rxin;
+ num_frags = pdcs->rxin_numd[rx_idx];
+ /* Return opaque context with result */
+ mssg->ctx = pdcs->rxp_ctx[rx_idx];
+ pdcs->rxp_ctx[rx_idx] = NULL;
+ resp_hdr = pdcs->resp_hdr[rx_idx];
+ resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
+ dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
+ sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
+
+ for (i = 0; i < num_frags; i++)
+ pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+
+ spin_unlock(&pdcs->pdc_lock);
+
+ dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ dev_dbg(dev,
+ "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
+ pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
+ pdcs->rxout, pdcs->last_rx_curr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
+ /*
+ * For SPU-M, get length of response msg and rx overflow status.
+ */
+ rx_status = *((u32 *)resp_hdr);
+ len = rx_status & RX_STATUS_LEN;
+ dev_dbg(dev,
+ "SPU response length %u bytes", len);
+ if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
+ if (rx_status & RX_STATUS_OVERFLOW) {
+ dev_err_ratelimited(dev,
+ "crypto receive overflow");
+ pdcs->rx_oflow++;
+ } else {
+ dev_info_ratelimited(dev, "crypto rx len = 0");
+ }
+ return -EIO;
+ }
+ }
+
+ dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+
+ pdcs->pdc_replies++;
+ /* if we read one or more rx descriptors, claim success */
+ if (num_frags > 0)
+ return PDC_SUCCESS;
+ else
+ return -EIO;
+}
+
+/**
+ * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
+ * descriptors for a given SPU. The scatterlist buffers contain the data for a
+ * SPU request message.
+ * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @sg: Scatterlist whose buffers contain part of the SPU request
+ *
+ * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
+ * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise
+ */
+static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 eot;
+ u32 tx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry in sg.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ /* check whether enough tx descriptors are available */
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(num_desc > tx_avail)) {
+ pdcs->txnobuf++;
+ return -ENOSPC;
+ }
+
+ /* build tx descriptors */
+ if (pdcs->tx_msg_start == pdcs->txout) {
+ /* Start of frame */
+ pdcs->txin_numd[pdcs->tx_msg_start] = 0;
+ pdcs->src_sg[pdcs->txout] = sg;
+ flags = D64_CTRL1_SOF;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
+ flags | eot);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+ }
+ sg = sg_next(sg);
+ if (!sg)
+ /* Writing last descriptor for frame */
+ flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
+ pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
+ desc_w++;
+ /* Clear start of frame after first descriptor */
+ flags &= ~D64_CTRL1_SOF;
+ }
+ pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
+ * ring.
+ * @pdcs: PDC state for SPU to process the request
+ *
+ * Sets the index of the last descriptor written in both the rx and tx ring.
+ *
+ * Return: PDC_SUCCESS
+ */
+static int pdc_tx_list_final(struct pdc_state *pdcs)
+{
+ /*
+ * write barrier to ensure all register writes are complete
+ * before chip starts to process new request
+ */
+ wmb();
+ iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+ pdcs->pdc_requests++;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
+ * @pdcs: PDC state for SPU handling request
+ * @dst_sg: scatterlist providing rx buffers for response to be returned to
+ * mailbox client
+ * @ctx: Opaque context for this request
+ *
+ * Posts a single receive descriptor to hold the metadata that precedes a
+ * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
+ * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
+ * rx to indicate the start of a new message.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 if an error (e.g., rx ring is full)
+ */
+static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
+ void *ctx)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+ u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
+ dma_addr_t daddr;
+ void *vaddr;
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_pkt_cnt > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ /* allocate a buffer for the dma rx status */
+ vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /*
+ * Update msg_start indexes for both tx and rx to indicate the start
+ * of a new sequence of descriptor indexes that contain the fragments
+ * of the same message.
+ */
+ pdcs->rx_msg_start = pdcs->rxout;
+ pdcs->tx_msg_start = pdcs->txout;
+
+ /* This is always the first descriptor in the receive sequence */
+ flags = D64_CTRL1_SOF;
+ pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags |= D64_CTRL1_EOT;
+
+ pdcs->rxp_ctx[pdcs->rxout] = ctx;
+ pdcs->dst_sg[pdcs->rxout] = dst_sg;
+ pdcs->resp_hdr[pdcs->rxout] = vaddr;
+ pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+ pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
+ * descriptors for a given SPU. The caller must have already DMA mapped the
+ * scatterlist.
+ * @spu_idx: Indicates which SPU the buffers are for
+ * @sg: Scatterlist whose buffers are added to the receive ring
+ *
+ * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
+ * multiple receive descriptors are written, each with a buffer <=
+ * PDC_DMA_BUF_MAX.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise (e.g., receive ring is full)
+ */
+static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry from our starting point in the scatterlist.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(num_desc > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+ }
+ pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
+ desc_w++;
+ sg = sg_next(sg);
+ }
+ pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_irq_handler() - Interrupt handler called in interrupt context.
+ * @irq: Interrupt number that has fired
+ * @cookie: PDC state for DMA engine that generated the interrupt
+ *
+ * We have to clear the device interrupt status flags here. So cache the
+ * status for later use in the thread function. Other than that, just return
+ * WAKE_THREAD to invoke the thread function.
+ *
+ * Return: IRQ_WAKE_THREAD if interrupt is ours
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+{
+ struct pdc_state *pdcs = cookie;
+ u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ if (intstatus & PDC_XMTINTEN_0)
+ set_bit(PDC_XMTINT_0, &pdcs->intstatus);
+ if (intstatus & PDC_RCVINTEN_0)
+ set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+
+ /* Clear interrupt flags in device */
+ iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ /* Wakeup IRQ thread */
+ if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+/**
+ * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
+ * completed or data is available to receive.
+ * @irq: Interrupt number
+ * @cookie: PDC state for PDC that generated the interrupt
+ *
+ * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
+ * as many SPU response messages as are available and send each to the mailbox
+ * client.
+ *
+ * Return: IRQ_HANDLED if we recognized and handled the interrupt
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+{
+ struct pdc_state *pdcs = cookie;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ bool tx_int;
+ bool rx_int;
+ int rx_status;
+ struct brcm_message mssg;
+
+ tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
+ rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+
+ if (pdcs && (tx_int || rx_int)) {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s() got irq %d with tx_int %s, rx_int %s",
+ __func__, irq,
+ tx_int ? "set" : "clear", rx_int ? "set" : "clear");
+
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+
+ if (tx_int) {
+ dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
+ /* only one frame in flight at a time */
+ mbox_chan_txdone(chan, PDC_SUCCESS);
+ }
+ if (rx_int) {
+ while (1) {
+ /* Could be many frames ready */
+ memset(&mssg, 0, sizeof(mssg));
+ mssg.type = BRCM_MESSAGE_SPU;
+ rx_status = pdc_receive(pdcs, &mssg);
+ if (rx_status >= 0) {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s(): invoking client rx cb",
+ __func__);
+ mbox_chan_received_data(chan, &mssg);
+ } else {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s(): no SPU response available",
+ __func__);
+ break;
+ }
+ }
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
+ * descriptors in one ringset.
+ * @pdcs: PDC instance state
+ * @ringset: index of ringset being used
+ *
+ * Return: PDC_SUCCESS if ring initialized
+ * < 0 otherwise
+ */
+static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
+{
+ int i;
+ int err = PDC_SUCCESS;
+ struct dma64 *dma_reg;
+ struct device *dev = &pdcs->pdev->dev;
+ struct pdc_ring_alloc tx;
+ struct pdc_ring_alloc rx;
+
+ /* Allocate tx ring */
+ tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
+ if (!tx.vbase) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* Allocate rx ring */
+ rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
+ if (!rx.vbase) {
+ err = -ENOMEM;
+ goto fail_dealloc;
+ }
+
+ dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
+ dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
+ dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
+ dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
+
+ /* lock after ring allocation to avoid scheduling while atomic */
+ spin_lock(&pdcs->pdc_lock);
+
+ memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
+ memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
+
+ pdcs->rxin = 0;
+ pdcs->rx_msg_start = 0;
+ pdcs->last_rx_curr = 0;
+ pdcs->rxout = 0;
+ pdcs->txin = 0;
+ pdcs->tx_msg_start = 0;
+ pdcs->txout = 0;
+
+ /* Set descriptor array base addresses */
+ pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
+ pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
+
+ /* Tell device the base DMA address of each ring */
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+ iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmaxmt.addrlow);
+ iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmaxmt.addrhigh);
+
+ iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmarcv.addrlow);
+ iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmarcv.addrhigh);
+
+ /* Initialize descriptors */
+ for (i = 0; i < PDC_RING_ENTRIES; i++) {
+ /* Every tx descriptor can be used for start of frame. */
+ if (i != pdcs->ntxpost) {
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
+ (void *)&pdcs->txd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
+ D64_CTRL1_EOT,
+ (void *)&pdcs->txd_64[i].ctrl1);
+ }
+
+ /* Every rx descriptor can be used for start of frame */
+ if (i != pdcs->nrxpost) {
+ iowrite32(D64_CTRL1_SOF,
+ (void *)&pdcs->rxd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
+ (void *)&pdcs->rxd_64[i].ctrl1);
+ }
+ }
+ spin_unlock(&pdcs->pdc_lock);
+ return PDC_SUCCESS;
+
+fail_dealloc:
+ dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
+done:
+ return err;
+}
+
+static void pdc_ring_free(struct pdc_state *pdcs)
+{
+ if (pdcs->tx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
+ pdcs->tx_ring_alloc.dmabase);
+ pdcs->tx_ring_alloc.vbase = NULL;
+ }
+
+ if (pdcs->rx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
+ pdcs->rx_ring_alloc.dmabase);
+ pdcs->rx_ring_alloc.vbase = NULL;
+ }
+}
+
+/**
+ * pdc_send_data() - mailbox send_data function
+ * @chan: The mailbox channel on which the data is sent. The channel
+ * corresponds to a DMA ringset.
+ * @data: The mailbox message to be sent. The message must be a
+ * brcm_message structure.
+ *
+ * This function is registered as the send_data function for the mailbox
+ * controller. From the destination scatterlist in the mailbox message, it
+ * creates a sequence of receive descriptors in the rx ring. From the source
+ * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
+ * After creating the descriptors, it writes the rx ptr and tx ptr registers to
+ * initiate the DMA transfer.
+ *
+ * This function does the DMA map and unmap of the src and dst scatterlists in
+ * the mailbox message.
+ *
+ * Return: 0 if successful
+ * -ENOTSUPP if the mailbox message is a type this driver does not
+ * support
+ * < 0 if an error
+ */
+static int pdc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ struct device *dev = &pdcs->pdev->dev;
+ struct brcm_message *mssg = data;
+ int err = PDC_SUCCESS;
+ int src_nent;
+ int dst_nent;
+ int nent;
+
+ if (mssg->type != BRCM_MESSAGE_SPU)
+ return -ENOTSUPP;
+
+ src_nent = sg_nents(mssg->spu.src);
+ if (src_nent) {
+ nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
+ if (nent == 0)
+ return -EIO;
+ }
+
+ dst_nent = sg_nents(mssg->spu.dst);
+ if (dst_nent) {
+ nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
+ DMA_FROM_DEVICE);
+ if (nent == 0) {
+ dma_unmap_sg(dev, mssg->spu.src, src_nent,
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+ }
+
+ spin_lock(&pdcs->pdc_lock);
+
+ /* Create rx descriptors to SPU catch response */
+ err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
+ err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
+
+ /* Create tx descriptors to submit SPU request */
+ err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
+ err |= pdc_tx_list_final(pdcs); /* initiate transfer */
+
+ spin_unlock(&pdcs->pdc_lock);
+
+ if (err)
+ dev_err(&pdcs->pdev->dev,
+ "%s failed with error %d", __func__, err);
+
+ return err;
+}
+
+static int pdc_startup(struct mbox_chan *chan)
+{
+ return pdc_ring_init(chan->con_priv, PDC_RINGSET);
+}
+
+static void pdc_shutdown(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+
+ if (pdcs)
+ dev_dbg(&pdcs->pdev->dev,
+ "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+
+ pdc_ring_free(pdcs);
+}
+
+/**
+ * pdc_hw_init() - Use the given initialization parameters to initialize the
+ * state for one of the PDCs.
+ * @pdcs: state of the PDC
+ */
+static
+void pdc_hw_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct dma64 *dma_reg;
+ int ringset = PDC_RINGSET;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
+ dev_dbg(dev, "state structure: %p",
+ pdcs);
+ dev_dbg(dev, " - base virtual addr of hw regs %p",
+ pdcs->pdc_reg_vbase);
+
+ /* initialize data structures */
+ pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
+ pdcs->txregs_64 = (struct dma64_regs *)
+ (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+ pdcs->rxregs_64 = (struct dma64_regs *)
+ (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+
+ pdcs->ntxd = PDC_RING_ENTRIES;
+ pdcs->nrxd = PDC_RING_ENTRIES;
+ pdcs->ntxpost = PDC_RING_ENTRIES - 1;
+ pdcs->nrxpost = PDC_RING_ENTRIES - 1;
+ pdcs->regs->intmask = 0;
+
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+ iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
+ iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
+
+ iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ (void *)&dma_reg->dmarcv.control);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
+ iowrite32(PDC_CKSUM_CTRL,
+ pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
+}
+
+/**
+ * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
+ * header returned with each response message.
+ * @pdcs: PDC state structure
+ *
+ * The metadata is not returned to the mailbox client. So the PDC driver
+ * manages these buffers.
+ *
+ * Return: PDC_SUCCESS
+ * -ENOMEM if pool creation fails
+ */
+static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
+ if (pdcs->use_bcm_hdr)
+ pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
+
+ pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
+ pdcs->pdc_resp_hdr_len,
+ RX_BUF_ALIGN, 0);
+ if (!pdcs->rx_buf_pool)
+ return -ENOMEM;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
+ * specify a threaded IRQ handler for deferred handling of interrupts outside of
+ * interrupt context.
+ * @pdcs: PDC state
+ *
+ * Set the interrupt mask for transmit and receive done.
+ * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
+ *
+ * Return: PDC_SUCCESS
+ * <0 if threaded irq request fails
+ */
+static int pdc_interrupts_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev = pdcs->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ pdcs->intstatus = 0;
+
+ /* interrupt configuration */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
+
+ /* read irq from device tree */
+ pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
+ dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
+ dev_name(dev), pdcs->pdc_irq, pdcs);
+ err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
+ pdc_irq_handler,
+ pdc_irq_thread, 0, dev_name(dev), pdcs);
+ if (err) {
+ dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+ pdcs->pdc_irq, err);
+ return err;
+ }
+ return PDC_SUCCESS;
+}
+
+static const struct mbox_chan_ops pdc_mbox_chan_ops = {
+ .send_data = pdc_send_data,
+ .startup = pdc_startup,
+ .shutdown = pdc_shutdown
+};
+
+/**
+ * pdc_mb_init() - Initialize the mailbox controller.
+ * @pdcs: PDC state
+ *
+ * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
+ * driver only uses one ringset and thus one mb channel. PDC uses the transmit
+ * complete interrupt to determine when a mailbox message has successfully been
+ * transmitted.
+ *
+ * Return: 0 on success
+ * < 0 if there is an allocation or registration failure
+ */
+static int pdc_mb_init(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ int chan_index;
+ int err;
+
+ mbc = &pdcs->mbc;
+ mbc->dev = dev;
+ mbc->ops = &pdc_mbox_chan_ops;
+ mbc->num_chans = 1;
+ mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
+ GFP_KERNEL);
+ if (!mbc->chans)
+ return -ENOMEM;
+
+ mbc->txdone_irq = true;
+ mbc->txdone_poll = false;
+ for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
+ mbc->chans[chan_index].con_priv = pdcs;
+
+ /* Register mailbox controller */
+ err = mbox_controller_register(mbc);
+ if (err) {
+ dev_crit(dev,
+ "Failed to register PDC mailbox controller. Error %d.",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * pdc_dt_read() - Read application-specific data from device tree.
+ * @pdev: Platform device
+ * @pdcs: PDC state
+ *
+ * Reads the number of bytes of receive status that precede each received frame.
+ * Reads whether transmit and received frames should be preceded by an 8-byte
+ * BCM header.
+ *
+ * Return: 0 if successful
+ * -ENODEV if device not available
+ */
+static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ err = of_property_read_u32(dn, "brcm,rx-status-len",
+ &pdcs->rx_status_len);
+ if (err < 0)
+ dev_err(dev,
+ "%s failed to get DMA receive status length from device tree",
+ __func__);
+
+ pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
+
+ return 0;
+}
+
+/**
+ * pdc_probe() - Probe function for PDC driver.
+ * @pdev: PDC platform device
+ *
+ * Reserve and map register regions defined in device tree.
+ * Allocate and initialize tx and rx DMA rings.
+ * Initialize a mailbox controller for each PDC.
+ *
+ * Return: 0 if successful
+ * < 0 if an error
+ */
+static int pdc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *pdc_regs;
+ struct pdc_state *pdcs;
+
+ /* PDC state for one SPU */
+ pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
+ if (!pdcs) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ spin_lock_init(&pdcs->pdc_lock);
+ pdcs->pdev = pdev;
+ platform_set_drvdata(pdev, pdcs);
+ pdcs->pdc_idx = pdcg.num_spu;
+ pdcg.num_spu++;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
+ goto cleanup;
+ }
+
+ /* Create DMA pool for tx ring */
+ pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
+ RING_ALIGN, 0);
+ if (!pdcs->ring_pool) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ err = pdc_dt_read(pdev, pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pdc_regs) {
+ err = -ENODEV;
+ goto cleanup_ring_pool;
+ }
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
+
+ pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ if (IS_ERR(pdcs->pdc_reg_vbase)) {
+ err = PTR_ERR(pdcs->pdc_reg_vbase);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
+ goto cleanup_ring_pool;
+ }
+
+ /* create rx buffer pool after dt read to know how big buffers are */
+ err = pdc_rx_buf_pool_create(pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_hw_init(pdcs);
+
+ err = pdc_interrupts_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ /* Initialize mailbox controller */
+ err = pdc_mb_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ pdcs->debugfs_stats = NULL;
+ pdc_setup_debugfs(pdcs);
+
+ dev_dbg(dev, "pdc_probe() successful");
+ return PDC_SUCCESS;
+
+cleanup_buf_pool:
+ dma_pool_destroy(pdcs->rx_buf_pool);
+
+cleanup_ring_pool:
+ dma_pool_destroy(pdcs->ring_pool);
+
+cleanup:
+ return err;
+}
+
+static int pdc_remove(struct platform_device *pdev)
+{
+ struct pdc_state *pdcs = platform_get_drvdata(pdev);
+
+ pdc_free_debugfs();
+
+ mbox_controller_unregister(&pdcs->mbc);
+
+ dma_pool_destroy(pdcs->rx_buf_pool);
+ dma_pool_destroy(pdcs->ring_pool);
+ return 0;
+}
+
+static const struct of_device_id pdc_mbox_of_match[] = {
+ {.compatible = "brcm,iproc-pdc-mbox"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
+
+static struct platform_driver pdc_mbox_driver = {
+ .probe = pdc_probe,
+ .remove = pdc_remove,
+ .driver = {
+ .name = "brcm-iproc-pdc-mbox",
+ .of_match_table = of_match_ptr(pdc_mbox_of_match),
+ },
+};
+module_platform_driver(pdc_mbox_driver);
+
+MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 58d04726cdd7..9ca96e9db6bf 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -133,6 +133,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
out:
kfree(tdev->signal);
kfree(tdev->message);
+ tdev->signal = NULL;
return ret < 0 ? ret : count;
}
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index f80acb36ff07..2dbed87094d7 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -58,29 +58,29 @@ static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
static inline void set_destination(int source, int mbox)
{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
}
static inline void clear_destination(int source, int mbox)
{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
}
static void __ipc_send(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
- __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
- __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
+ writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i));
+ writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox));
}
static u32 __ipc_rcv(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
- data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
+ data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i));
return data[1];
}
@@ -112,15 +112,15 @@ static irqreturn_t ipc_handler(int irq, void *dev)
u32 irq_stat;
u32 data[7];
- irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
+ irq_stat = readl_relaxed(ipc_base + IPCMMIS(1));
if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
complete(&ipc_completion);
}
if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
__ipc_rcv(IPC_RX_MBOX, data);
atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
- __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
}
return IRQ_HANDLED;
@@ -146,7 +146,7 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
if (ipc_base == NULL)
return -ENOMEM;
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
ipc_irq = adev->irq[0];
ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
@@ -154,20 +154,20 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
goto err;
/* Init slow mailbox */
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxDSET(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
/* Init receive mailbox */
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxDSET(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
return 0;
err:
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 69f16f43f8ab..4b177fe11ebb 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
* Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
- bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do {
unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
op_is_write(bio_op(bio)) &&
- (bio->bi_rw & REQ_SYNC))
+ (bio->bi_opf & REQ_SYNC))
goto rescale;
spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;
return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_rw & REQ_RAHEAD) &&
- !(bio->bi_rw & REQ_META) &&
+ if (!(bio->bi_opf & REQ_RAHEAD) &&
+ !(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bch_writeback_add(dc);
s->iop.bio = bio;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 88ef6d14cce3..95a4ca6ce6ff 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_rw = REQ_SYNC|REQ_META|op_flags;
+ bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..301eaf565167 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_rw & REQ_SYNC ||
+ return bio->bi_opf & REQ_SYNC ||
in_use <= CUTOFF_WRITEBACK;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 718744db62df..59b2c50562e4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
- !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+ !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
- return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
/*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
- bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+ bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
bio = bio_list_pop(&bios);
- if (bio->bi_rw & REQ_PREFLUSH)
+ if (bio->bi_opf & REQ_PREFLUSH)
process_flush_bio(cache, bio);
else if (bio_op(bio) == REQ_OP_DISCARD)
process_discard_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8f2e3e2ffd26..4e9784b4e0ac 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
+ bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
* - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2faf49d8f4d7..bf2b2676cb8a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio)
/*
* REQ_PREFLUSH bios carry no data, so we're not interested in them.
*/
- if (!(bio->bi_rw & REQ_PREFLUSH) &&
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
(bio_data_dir(bio) == WRITE) &&
!metadata_current_marked(era->md, block)) {
defer_bio(era, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 29b99fb6a16a..97e446d54a15 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -16,7 +16,7 @@
#define DM_MSG_PREFIX "flakey"
#define all_corrupt_bio_flags_match(bio, fc) \
- (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+ (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
/*
* Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
- "(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
+ "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
@@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
pb->bio_submitted = true;
/*
- * Map reads as normal.
+ * Map reads as normal only if corrupt_bio_byte set.
*/
- if (bio_data_dir(bio) == READ)
- goto map_bio;
+ if (bio_data_dir(bio) == READ) {
+ /* If flags were specified, only corrupt those that match. */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ goto map_bio;
+ else
+ return -EIO;
+ }
/*
* Drop writes?
@@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
/*
* Corrupt successful READs while in down state.
- * If flags were specified, only corrupt those that match.
*/
- if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
- (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+ if (fc->corrupt_bio_byte)
+ corrupt_bio_data(bio, fc);
+ else
+ return -EIO;
+ }
return error;
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index daa03e41654a..0bf1a12e35fe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
- * If you fail to do one of these, the IO will be submitted to the disk after
- * q->unplug_delay, which defaults to 3ms in blk-settings.c.
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+ * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index b5dbf7a0515e..4ab68033f9d1 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
struct bio_vec bv;
size_t alloc_size;
int i = 0;
- bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
- bool fua_bio = (bio->bi_rw & REQ_FUA);
+ bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
+ bool fua_bio = (bio->bi_opf & REQ_FUA);
bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
pb->block = NULL;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7eac080fcb18..ac734e5bbe48 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -507,13 +507,27 @@ static bool __must_push_back(struct multipath *m)
static bool must_push_back_rq(struct multipath *m)
{
- return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
- __must_push_back(m));
+ bool r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
+ __must_push_back(m));
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
}
static bool must_push_back_bio(struct multipath *m)
{
- return __must_push_back(m);
+ bool r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = __must_push_back(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
}
/*
@@ -647,7 +661,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bio->bi_error = 0;
bio->bi_bdev = pgpath->path.dev->bdev;
- bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
@@ -1680,12 +1694,14 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ unsigned long flags;
+ spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
else
clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
- smp_mb__after_atomic();
+ spin_unlock_irqrestore(&m->lock, flags);
}
/*
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 84983549b5e1..1b9795d75ef8 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1270,7 +1270,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
}
rs->md.sync_speed_min = (int)value;
} else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
- if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
+ if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
rs->ti->error = "Only one max_recovery_rate argument pair allowed";
return -EINVAL;
}
@@ -1960,6 +1960,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->data_offset = cpu_to_le64(rdev->data_offset);
sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
sb->sectors = cpu_to_le64(rdev->sectors);
+ sb->incompat_features = cpu_to_le32(0);
/* Zero out the rest of the payload after the size of the superblock */
memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
@@ -3577,7 +3578,6 @@ static int raid_preresume(struct dm_target *ti)
/* Be prepared for mddev_resume() in raid_resume() */
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dac55b254a09..bdf1606f67bc 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m;
struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE,
- .bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
+ .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
- if ((bio->bi_rw & REQ_PREFLUSH) ||
+ if ((bio->bi_opf & REQ_PREFLUSH) ||
(bio_op(bio) == REQ_OP_DISCARD)) {
bio_list_add(&sync, bio);
continue;
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
- if (bio->bi_rw & REQ_RAHEAD)
+ if (bio->bi_opf & REQ_RAHEAD)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & REQ_PREFLUSH) &&
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
bio_op(bio) != REQ_OP_DISCARD)
dm_rh_dec(ms->rh, bio_record->write_region);
return error;
@@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index b11813431f31..85c32b22a420 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
rh->flush_failure = 1;
return;
}
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
+ if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 7a9661868496..1ca7463e8bb2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -78,6 +78,7 @@ void dm_start_queue(struct request_queue *q)
if (!q->mq_ops)
dm_old_start_queue(q);
else {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
blk_mq_start_stopped_hw_queues(q, true);
blk_mq_kick_requeue_list(q);
}
@@ -101,8 +102,14 @@ void dm_stop_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_stop_queue(q);
- else
+ else {
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_mq_cancel_requeue_work(q);
blk_mq_stop_hw_queues(q);
+ }
}
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
@@ -864,6 +871,17 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
dm_put_live_table(md, srcu_idx);
}
+ /*
+ * On suspend dm_stop_queue() handles stopping the blk-mq
+ * request_queue BUT: even though the hw_queues are marked
+ * BLK_MQ_S_STOPPED at that point there is still a race that
+ * is allowing block/blk-mq.c to call ->queue_rq against a
+ * hctx that it really shouldn't. The following check guards
+ * against this rarity (albeit _not_ race-free).
+ */
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ce2a910709f7..c65feeada864 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
@@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = o->dev->bdev;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH))
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
if (bio_data_dir(bio) != WRITE)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 83f1d4667195..28193a57bf47 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
uint32_t stripe;
unsigned target_bio_nr;
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
@@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 197ea2003400..d1c05c12a9db 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+ return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
@@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context,
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD))
bio_list_add(&info->defer_bios, bio);
else {
@@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 618b8752dcf1..b616f11d8473 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_READ:
- if (bio->bi_rw & REQ_RAHEAD) {
+ if (bio->bi_opf & REQ_RAHEAD) {
/* readahead of null bytes only wastes buffer cache */
return -EIO;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 25d1d97154a8..fa9b1cb4438a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
+ if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
*/
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
@@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
- BUG_ON(bio->bi_rw & REQ_PREFLUSH);
+ BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
@@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
start_io_acct(ci.io);
- if (bio->bi_rw & REQ_PREFLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
@@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
- if (!(bio->bi_rw & REQ_RAHEAD))
+ if (!(bio->bi_opf & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
@@ -2082,7 +2082,8 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible)
+ unsigned suspend_flags, int interruptible,
+ int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
@@ -2149,6 +2150,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* to finish.
*/
r = dm_wait_for_completion(md, interruptible);
+ if (!r)
+ set_bit(dmf_suspended_flag, &md->flags);
if (noflush)
clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -2210,12 +2213,10 @@ retry:
map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
- r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
+ r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
if (r)
goto out_unlock;
- set_bit(DMF_SUSPENDED, &md->flags);
-
dm_table_postsuspend_targets(map);
out_unlock:
@@ -2309,9 +2310,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
* would require changing .presuspend to return an error -- avoid this
* until there is a need for more elaborate variants of internal suspend.
*/
- (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
-
- set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
+ (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
+ DMF_SUSPENDED_INTERNALLY);
dm_table_postsuspend_targets(map);
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 70ff888d25d0..86f5d435901d 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
sector_t start_sector, end_sector, data_offset;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2c3ab6f5e6be..d646f6e444f0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
- bio->bi_rw &= ~REQ_NOMERGE;
+ bio->bi_opf &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio);
else {
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
mddev->pers->make_request(mddev, bio);
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4974682842ae..673efbd6fc47 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio)
if (!bio->bi_error)
multipath_end_bh_io(mp_bh, 0);
- else if (!(bio->bi_rw & REQ_RAHEAD)) {
+ else if (!(bio->bi_opf & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -112,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -135,7 +135,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+ mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
@@ -360,7 +360,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c3d439083212..258986a2699d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
struct md_rdev *tmp_dev;
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46168ef2e279..21dc00eb1989 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1043,8 +1043,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
unsigned long flags;
const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_rw &
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -2318,7 +2318,7 @@ read_more:
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
- = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ = r1_bio->master_bio->bi_opf & REQ_SYNC;
if (bio) {
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ed29fc899f06..0e4efcd10795 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1054,8 +1054,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int i;
const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1440,7 +1440,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -2533,7 +2533,7 @@ read_more:
return;
}
- do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 5504ce2bac06..51f76ddbe265 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio_endio(bio);
return 0;
}
- bio->bi_rw &= ~REQ_PREFLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d189e894b921..8912407a4dd0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
dd_idx = 0;
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx++;
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+ if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
goto unlock_out;
@@ -1003,7 +1003,7 @@ again:
pr_debug("%s: for %llu schedule op %d on disc %d\n",
__func__, (unsigned long long)sh->sector,
- bi->bi_rw, i);
+ bi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1014,7 +1014,7 @@ again:
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
- bi->bi_rw |= REQ_NOMERGE;
+ bi->bi_opf |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1055,7 +1055,7 @@ again:
pr_debug("%s: for %llu schedule op %d on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
- rbi->bi_rw, i);
+ rbi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1088,7 +1088,7 @@ again:
if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %d on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
+ bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -1619,9 +1619,9 @@ again:
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
- if (wbi->bi_rw & REQ_FUA)
+ if (wbi->bi_opf & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
- if (wbi->bi_rw & REQ_SYNC)
+ if (wbi->bi_opf & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
if (bio_op(wbi) == REQ_OP_DISCARD)
set_bit(R5_Discard, &dev->flags);
@@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
DEFINE_WAIT(w);
bool do_prepare;
- if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
+ if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
@@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector);
sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw & REQ_RAHEAD), 0);
+ (bi->bi_opf & REQ_RAHEAD), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
@@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_rw & REQ_SYNC) &&
+ (bi->bi_opf & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 09c39346167f..ffe88bc6b813 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3378,20 +3378,28 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe,
ret = cxd2841er_get_carrier_offset_i(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBT:
ret = cxd2841er_get_carrier_offset_t(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBT2:
ret = cxd2841er_get_carrier_offset_t2(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBC_ANNEX_A:
ret = cxd2841er_get_carrier_offset_c(
priv, &carrier_offset);
+ if (ret)
+ return ret;
break;
default:
dev_dbg(&priv->i2c->dev,
@@ -3399,8 +3407,6 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe,
__func__, priv->system);
return -EINVAL;
}
- if (ret)
- return ret;
dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n",
__func__, carrier_offset);
p->frequency += carrier_offset;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index b77b0a4dbf68..95cbc857f36e 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -100,7 +100,7 @@
#define ADV7180_REG_IDENT 0x0011
#define ADV7180_ID_7180 0x18
-#define ADV7180_REG_ICONF1 0x0040
+#define ADV7180_REG_ICONF1 0x2040
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
@@ -113,15 +113,15 @@
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
-#define ADV7180_REG_ISR1 0x0042
-#define ADV7180_REG_ICR1 0x0043
-#define ADV7180_REG_IMR1 0x0044
-#define ADV7180_REG_IMR2 0x0048
+#define ADV7180_REG_ISR1 0x2042
+#define ADV7180_REG_ICR1 0x2043
+#define ADV7180_REG_IMR1 0x2044
+#define ADV7180_REG_IMR2 0x2048
#define ADV7180_IRQ3_AD_CHANGE 0x08
-#define ADV7180_REG_ISR3 0x004A
-#define ADV7180_REG_ICR3 0x004B
-#define ADV7180_REG_IMR3 0x004C
-#define ADV7180_REG_IMR4 0x50
+#define ADV7180_REG_ISR3 0x204A
+#define ADV7180_REG_ICR3 0x204B
+#define ADV7180_REG_IMR3 0x204C
+#define ADV7180_REG_IMR4 0x2050
#define ADV7180_REG_NTSC_V_BIT_END 0x00E6
#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 6d7cad54a65d..53030d631653 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1041,6 +1041,8 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 fps;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -1052,15 +1054,29 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
if the format is one of the CEA or DMT timings. */
v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
- timings->bt.flags &= ~V4L2_DV_FL_REDUCED_FPS;
-
/* save timings */
state->dv_timings = *timings;
/* set h/vsync polarities */
adv7511_wr_and_or(sd, 0x17, 0x9f,
- ((timings->bt.polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
- ((timings->bt.polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
+ ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
+ ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
+
+ fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
+ switch (fps) {
+ case 24:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
+ break;
+ case 25:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
+ break;
+ case 30:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
+ break;
+ default:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
+ break;
+ }
/* update quantization range based on new dv_timings */
adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index e277b7c23516..c7806ecda2dd 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -246,7 +246,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
struct video_device *vfd_enc;
struct resource *res;
int i, j, ret;
- DEFINE_DMA_ATTRS(attrs);
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -378,9 +377,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_enc_reg;
}
- /* Avoid the iommu eat big hunks */
- dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs);
-
mtk_v4l2_debug(0, "encoder registered as /dev/video%d",
vfd_enc->num);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4afc999c0780..6b01e126fe73 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -45,7 +45,7 @@
#include <media/v4l2-ioctl.h>
#include <video/omapvrfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "omap_voutlib.h"
#include "omap_voutdef.h"
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 49de1475e473..80c79fabdf95 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -12,7 +12,7 @@
#define OMAP_VOUTDEF_H
#include <media/v4l2-ctrls.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#define YUYV_BPP 2
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 80b0d88f125c..58a25fdf0cce 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "omap_voutlib.h"
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 3df66d11c795..b7892f3efd98 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -430,14 +430,11 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
*/
void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
{
- if (ctx && ctx->node[0]) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (ctx && ctx->node[0])
dma_free_attrs(ctx->bdisp_dev->dev,
sizeof(struct bdisp_node) * MAX_NB_NODE,
- ctx->node[0], ctx->node_paddr[0], &attrs);
- }
+ ctx->node[0], ctx->node_paddr[0],
+ DMA_ATTR_WRITE_COMBINE);
}
/**
@@ -455,12 +452,10 @@ int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
unsigned int i, node_size = sizeof(struct bdisp_node);
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the nodes within a single memory page */
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
- GFP_KERNEL | GFP_DMA, &attrs);
+ GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE);
if (!base) {
dev_err(dev, "%s no mem\n", __func__);
return -ENOMEM;
@@ -493,13 +488,9 @@ void bdisp_hw_free_filters(struct device *dev)
{
int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- if (bdisp_h_filter[0].virt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (bdisp_h_filter[0].virt)
dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
- bdisp_h_filter[0].paddr, &attrs);
- }
+ bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
}
/**
@@ -516,12 +507,11 @@ int bdisp_hw_alloc_filters(struct device *dev)
unsigned int i, size;
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the filters within a single memory page */
size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ DMA_ATTR_WRITE_COMBINE);
if (!base)
return -ENOMEM;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 6b17015048ae..cd0ff4a66fdc 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -171,6 +171,9 @@ struct vim2m_ctx {
int mode;
enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
/* Source and destination queue data */
struct vim2m_q_data q_data[2];
@@ -493,6 +496,9 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
f->fmt.pix.sizeimage = q_data->sizeimage;
f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
return 0;
}
@@ -549,6 +555,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
return vidioc_try_fmt(f, fmt);
}
@@ -630,8 +639,12 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
ret = vidioc_s_fmt(file2ctx(file), f);
- if (!ret)
+ if (!ret) {
ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quant = f->fmt.pix.quantization;
+ }
return ret;
}
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 66aa7292076b..f9f878b8e0a7 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -169,7 +169,6 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
struct vivid_dev *dev = adap->priv;
struct cec_msg reply;
u8 dest = cec_msg_destination(msg);
- u16 pa;
u8 disp_ctl;
char osd[14];
@@ -178,15 +177,6 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
cec_msg_init(&reply, dest, cec_msg_initiator(msg));
switch (cec_msg_opcode(msg)) {
- case CEC_MSG_SET_STREAM_PATH:
- if (cec_is_sink(adap))
- return -ENOMSG;
- cec_ops_set_stream_path(msg, &pa);
- if (pa != adap->phys_addr)
- return -ENOMSG;
- cec_msg_active_source(&reply, adap->phys_addr);
- cec_transmit_msg(adap, &reply, false);
- break;
case CEC_MSG_SET_OSD_STRING:
if (!cec_is_sink(adap))
return -ENOMSG;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index bd4d68500085..370e16e07867 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -336,7 +336,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
+ depends on OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS && LIRC
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 4e1711a40466..82fb6f2ca011 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -12,22 +12,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/wait.h>
-
-#include <plat/dmtimer.h>
-#include <plat/clock.h>
+#include <linux/pwm.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
@@ -41,100 +36,51 @@
#define WBUF_LEN 256
-#define TIMER_MAX_VALUE 0xffffffff
-
struct lirc_rx51 {
- struct omap_dm_timer *pwm_timer;
- struct omap_dm_timer *pulse_timer;
+ struct pwm_device *pwm;
+ struct hrtimer timer;
struct device *dev;
struct lirc_rx51_platform_data *pdata;
wait_queue_head_t wqueue;
- unsigned long fclk_khz;
unsigned int freq; /* carrier frequency */
unsigned int duty_cycle; /* carrier duty cycle */
- unsigned int irq_num;
- unsigned int match;
int wbuf[WBUF_LEN];
int wbuf_index;
unsigned long device_is_open;
- int pwm_timer_num;
};
-static void lirc_rx51_on(struct lirc_rx51 *lirc_rx51)
+static inline void lirc_rx51_on(struct lirc_rx51 *lirc_rx51)
{
- omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1,
- OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
+ pwm_enable(lirc_rx51->pwm);
}
-static void lirc_rx51_off(struct lirc_rx51 *lirc_rx51)
+static inline void lirc_rx51_off(struct lirc_rx51 *lirc_rx51)
{
- omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1,
- OMAP_TIMER_TRIGGER_NONE);
+ pwm_disable(lirc_rx51->pwm);
}
static int init_timing_params(struct lirc_rx51 *lirc_rx51)
{
- u32 load, match;
+ struct pwm_device *pwm = lirc_rx51->pwm;
+ int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, lirc_rx51->freq);
- load = -(lirc_rx51->fclk_khz * 1000 / lirc_rx51->freq);
- match = -(lirc_rx51->duty_cycle * -load / 100);
- omap_dm_timer_set_load(lirc_rx51->pwm_timer, 1, load);
- omap_dm_timer_set_match(lirc_rx51->pwm_timer, 1, match);
- omap_dm_timer_write_counter(lirc_rx51->pwm_timer, TIMER_MAX_VALUE - 2);
- omap_dm_timer_start(lirc_rx51->pwm_timer);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- omap_dm_timer_start(lirc_rx51->pulse_timer);
+ duty = DIV_ROUND_CLOSEST(lirc_rx51->duty_cycle * period, 100);
- lirc_rx51->match = 0;
+ pwm_config(pwm, duty, period);
return 0;
}
-#define tics_after(a, b) ((long)(b) - (long)(a) < 0)
-
-static int pulse_timer_set_timeout(struct lirc_rx51 *lirc_rx51, int usec)
+static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
{
- int counter;
-
- BUG_ON(usec < 0);
-
- if (lirc_rx51->match == 0)
- counter = omap_dm_timer_read_counter(lirc_rx51->pulse_timer);
- else
- counter = lirc_rx51->match;
-
- counter += (u32)(lirc_rx51->fclk_khz * usec / (1000));
- omap_dm_timer_set_match(lirc_rx51->pulse_timer, 1, counter);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer,
- OMAP_TIMER_INT_MATCH);
- if (tics_after(omap_dm_timer_read_counter(lirc_rx51->pulse_timer),
- counter)) {
- return 1;
- }
- return 0;
-}
-
-static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
-{
- unsigned int retval;
- struct lirc_rx51 *lirc_rx51 = ptr;
-
- retval = omap_dm_timer_read_status(lirc_rx51->pulse_timer);
- if (!retval)
- return IRQ_NONE;
+ struct lirc_rx51 *lirc_rx51 =
+ container_of(timer, struct lirc_rx51, timer);
+ ktime_t now;
- if (retval & ~OMAP_TIMER_INT_MATCH)
- dev_err_ratelimited(lirc_rx51->dev,
- ": Unexpected interrupt source: %x\n", retval);
-
- omap_dm_timer_write_status(lirc_rx51->pulse_timer,
- OMAP_TIMER_INT_MATCH |
- OMAP_TIMER_INT_OVERFLOW |
- OMAP_TIMER_INT_CAPTURE);
if (lirc_rx51->wbuf_index < 0) {
dev_err_ratelimited(lirc_rx51->dev,
- ": BUG wbuf_index has value of %i\n",
+ "BUG wbuf_index has value of %i\n",
lirc_rx51->wbuf_index);
goto end;
}
@@ -144,6 +90,8 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
* pulses until we catch up.
*/
do {
+ u64 ns;
+
if (lirc_rx51->wbuf_index >= WBUF_LEN)
goto end;
if (lirc_rx51->wbuf[lirc_rx51->wbuf_index] == -1)
@@ -154,84 +102,24 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
else
lirc_rx51_on(lirc_rx51);
- retval = pulse_timer_set_timeout(lirc_rx51,
- lirc_rx51->wbuf[lirc_rx51->wbuf_index]);
+ ns = 1000 * lirc_rx51->wbuf[lirc_rx51->wbuf_index];
+ hrtimer_add_expires_ns(timer, ns);
+
lirc_rx51->wbuf_index++;
- } while (retval);
+ now = timer->base->get_time();
+
+ } while (hrtimer_get_expires_tv64(timer) < now.tv64);
- return IRQ_HANDLED;
+ return HRTIMER_RESTART;
end:
/* Stop TX here */
lirc_rx51_off(lirc_rx51);
lirc_rx51->wbuf_index = -1;
- omap_dm_timer_stop(lirc_rx51->pwm_timer);
- omap_dm_timer_stop(lirc_rx51->pulse_timer);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- wake_up_interruptible(&lirc_rx51->wqueue);
-
- return IRQ_HANDLED;
-}
-
-static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51)
-{
- struct clk *clk_fclk;
- int retval, pwm_timer = lirc_rx51->pwm_timer_num;
-
- lirc_rx51->pwm_timer = omap_dm_timer_request_specific(pwm_timer);
- if (lirc_rx51->pwm_timer == NULL) {
- dev_err(lirc_rx51->dev, ": Error requesting GPT%d timer\n",
- pwm_timer);
- return -EBUSY;
- }
-
- lirc_rx51->pulse_timer = omap_dm_timer_request();
- if (lirc_rx51->pulse_timer == NULL) {
- dev_err(lirc_rx51->dev, ": Error requesting pulse timer\n");
- retval = -EBUSY;
- goto err1;
- }
-
- omap_dm_timer_set_source(lirc_rx51->pwm_timer, OMAP_TIMER_SRC_SYS_CLK);
- omap_dm_timer_set_source(lirc_rx51->pulse_timer,
- OMAP_TIMER_SRC_SYS_CLK);
-
- omap_dm_timer_enable(lirc_rx51->pwm_timer);
- omap_dm_timer_enable(lirc_rx51->pulse_timer);
-
- lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer);
- retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler,
- IRQF_SHARED, "lirc_pulse_timer", lirc_rx51);
- if (retval) {
- dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n");
- goto err2;
- }
-
- clk_fclk = omap_dm_timer_get_fclk(lirc_rx51->pwm_timer);
- lirc_rx51->fclk_khz = clk_fclk->rate / 1000;
-
- return 0;
-err2:
- omap_dm_timer_free(lirc_rx51->pulse_timer);
-err1:
- omap_dm_timer_free(lirc_rx51->pwm_timer);
-
- return retval;
-}
-
-static int lirc_rx51_free_port(struct lirc_rx51 *lirc_rx51)
-{
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- free_irq(lirc_rx51->irq_num, lirc_rx51);
- lirc_rx51_off(lirc_rx51);
- omap_dm_timer_disable(lirc_rx51->pwm_timer);
- omap_dm_timer_disable(lirc_rx51->pulse_timer);
- omap_dm_timer_free(lirc_rx51->pwm_timer);
- omap_dm_timer_free(lirc_rx51->pulse_timer);
- lirc_rx51->wbuf_index = -1;
+ wake_up_interruptible(&lirc_rx51->wqueue);
- return 0;
+ return HRTIMER_NORESTART;
}
static ssize_t lirc_rx51_write(struct file *file, const char *buf,
@@ -270,8 +158,9 @@ static ssize_t lirc_rx51_write(struct file *file, const char *buf,
lirc_rx51_on(lirc_rx51);
lirc_rx51->wbuf_index = 1;
- pulse_timer_set_timeout(lirc_rx51, lirc_rx51->wbuf[0]);
-
+ hrtimer_start(&lirc_rx51->timer,
+ ns_to_ktime(1000 * lirc_rx51->wbuf[0]),
+ HRTIMER_MODE_REL);
/*
* Don't return back to the userspace until the transfer has
* finished
@@ -371,14 +260,24 @@ static int lirc_rx51_open(struct inode *inode, struct file *file)
if (test_and_set_bit(1, &lirc_rx51->device_is_open))
return -EBUSY;
- return lirc_rx51_init_port(lirc_rx51);
+ lirc_rx51->pwm = pwm_get(lirc_rx51->dev, NULL);
+ if (IS_ERR(lirc_rx51->pwm)) {
+ int res = PTR_ERR(lirc_rx51->pwm);
+
+ dev_err(lirc_rx51->dev, "pwm_get failed: %d\n", res);
+ return res;
+ }
+
+ return 0;
}
static int lirc_rx51_release(struct inode *inode, struct file *file)
{
struct lirc_rx51 *lirc_rx51 = file->private_data;
- lirc_rx51_free_port(lirc_rx51);
+ hrtimer_cancel(&lirc_rx51->timer);
+ lirc_rx51_off(lirc_rx51);
+ pwm_put(lirc_rx51->pwm);
clear_bit(1, &lirc_rx51->device_is_open);
@@ -386,7 +285,6 @@ static int lirc_rx51_release(struct inode *inode, struct file *file)
}
static struct lirc_rx51 lirc_rx51 = {
- .freq = 38000,
.duty_cycle = 50,
.wbuf_index = -1,
};
@@ -444,9 +342,32 @@ static int lirc_rx51_resume(struct platform_device *dev)
static int lirc_rx51_probe(struct platform_device *dev)
{
+ struct pwm_device *pwm;
+
lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES;
lirc_rx51.pdata = dev->dev.platform_data;
- lirc_rx51.pwm_timer_num = lirc_rx51.pdata->pwm_timer;
+
+ if (!lirc_rx51.pdata) {
+ dev_err(&dev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
+ pwm = pwm_get(&dev->dev, NULL);
+ if (IS_ERR(pwm)) {
+ int err = PTR_ERR(pwm);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->dev, "pwm_get failed: %d\n", err);
+ return err;
+ }
+
+ /* Use default, in case userspace does not set the carrier */
+ lirc_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
+ pwm_put(pwm);
+
+ hrtimer_init(&lirc_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lirc_rx51.timer.function = lirc_rx51_timer_cb;
+
lirc_rx51.dev = &dev->dev;
lirc_rx51_driver.dev = &dev->dev;
lirc_rx51_driver.minor = lirc_register_driver(&lirc_rx51_driver);
@@ -457,8 +378,6 @@ static int lirc_rx51_probe(struct platform_device *dev)
lirc_rx51_driver.minor);
return lirc_rx51_driver.minor;
}
- dev_info(lirc_rx51.dev, "registration ok, minor: %d, pwm: %d\n",
- lirc_rx51_driver.minor, lirc_rx51.pwm_timer_num);
return 0;
}
@@ -468,6 +387,14 @@ static int lirc_rx51_remove(struct platform_device *dev)
return lirc_unregister_driver(lirc_rx51_driver.minor);
}
+static const struct of_device_id lirc_rx51_match[] = {
+ {
+ .compatible = "nokia,n900-ir",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lirc_rx51_match);
+
struct platform_driver lirc_rx51_platform_driver = {
.probe = lirc_rx51_probe,
.remove = lirc_rx51_remove,
@@ -475,7 +402,7 @@ struct platform_driver lirc_rx51_platform_driver = {
.resume = lirc_rx51_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lirc_rx51_match),
},
};
module_platform_driver(lirc_rx51_platform_driver);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 863f658a3fa1..59fa204b15f3 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -27,7 +27,7 @@ struct vb2_dc_buf {
unsigned long size;
void *cookie;
dma_addr_t dma_addr;
- struct dma_attrs attrs;
+ unsigned long attrs;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
struct frame_vector *vec;
@@ -130,12 +130,12 @@ static void vb2_dc_put(void *buf_priv)
kfree(buf->sgt_base);
}
dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- &buf->attrs);
+ buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
@@ -146,16 +146,16 @@ static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
return ERR_PTR(-ENOMEM);
if (attrs)
- buf->attrs = *attrs;
+ buf->attrs = attrs;
buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, &buf->attrs);
+ GFP_KERNEL | gfp_flags, buf->attrs);
if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+ if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
buf->vaddr = buf->cookie;
/* Prevent the device from being released while the buffer is used */
@@ -189,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, &buf->attrs);
+ buf->dma_addr, buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
@@ -372,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
}
ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
- buf->size, &buf->attrs);
+ buf->size, buf->attrs);
if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt);
@@ -421,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct page **pages;
if (sgt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/*
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
pages = frame_vector_pages(buf->vec);
/* sgt should exist only if vector contains pages... */
BUG_ON(IS_ERR(pages));
@@ -484,9 +481,6 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) {
@@ -548,7 +542,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
@@ -572,7 +566,7 @@ out:
fail_map_sg:
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init:
sg_free_table(sgt);
@@ -749,7 +743,7 @@ EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
{
if (!dev->dma_parms) {
- dev->dma_parms = kzalloc(sizeof(dev->dma_parms), GFP_KERNEL);
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index a39db8a6db7a..bd82d709ee82 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -95,7 +95,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs,
+static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
@@ -103,9 +103,6 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at
struct sg_table *sgt;
int ret;
int num_pages;
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
if (WARN_ON(dev == NULL))
return NULL;
@@ -144,7 +141,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto fail_map;
@@ -179,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv)
int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -228,10 +222,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
- DEFINE_DMA_ATTRS(attrs);
struct frame_vector *vec;
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
@@ -262,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto userptr_fail_map;
@@ -286,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
- &attrs);
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 7e8a07ed8d82..c2820a6e164d 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -33,7 +33,7 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 81ddb17575a9..4b4c0c3c3d2f 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -25,6 +25,17 @@ config ATMEL_SDRAMC
Starting with the at91sam9g45, this controller supports SDR, DDR and
LP-DDR memories.
+config ATMEL_EBI
+ bool "Atmel EBI driver"
+ default y
+ depends on ARCH_AT91 && OF
+ select MFD_SYSCON
+ help
+ Driver for Atmel EBI controller.
+ Used to configure the EBI (external bus interface) when the device-
+ tree is used. This bus supports NANDs, external ethernet controller,
+ SRAMs, ATA devices, etc.
+
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
@@ -104,7 +115,7 @@ config FSL_CORENET_CF
config FSL_IFC
bool
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_LAYERSCAPE
config JZ4780_NEMC
bool "Ingenic JZ4780 SoC NEMC driver"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index cb0b7a1df11a..b20ae38b5bfb 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF) += of_memory.o
endif
obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
+obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
new file mode 100644
index 000000000000..f87ad6f5d2dc
--- /dev/null
+++ b/drivers/memory/atmel-ebi.c
@@ -0,0 +1,766 @@
+/*
+ * EBI driver for Atmel chips
+ * inspired by the fsl weim bus driver
+ *
+ * Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+struct at91sam9_smc_timings {
+ u32 ncs_rd_setup_ns;
+ u32 nrd_setup_ns;
+ u32 ncs_wr_setup_ns;
+ u32 nwe_setup_ns;
+ u32 ncs_rd_pulse_ns;
+ u32 nrd_pulse_ns;
+ u32 ncs_wr_pulse_ns;
+ u32 nwe_pulse_ns;
+ u32 nrd_cycle_ns;
+ u32 nwe_cycle_ns;
+ u32 tdf_ns;
+};
+
+struct at91sam9_smc_generic_fields {
+ struct regmap_field *setup;
+ struct regmap_field *pulse;
+ struct regmap_field *cycle;
+ struct regmap_field *mode;
+};
+
+struct at91sam9_ebi_dev_config {
+ struct at91sam9_smc_timings timings;
+ u32 mode;
+};
+
+struct at91_ebi_dev_config {
+ int cs;
+ union {
+ struct at91sam9_ebi_dev_config sam9;
+ };
+};
+
+struct at91_ebi;
+
+struct at91_ebi_dev {
+ struct list_head node;
+ struct at91_ebi *ebi;
+ u32 mode;
+ int numcs;
+ struct at91_ebi_dev_config configs[];
+};
+
+struct at91_ebi_caps {
+ unsigned int available_cs;
+ const struct reg_field *ebi_csa;
+ void (*get_config)(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf);
+ int (*xlate_config)(struct at91_ebi_dev *ebid,
+ struct device_node *configs_np,
+ struct at91_ebi_dev_config *conf);
+ int (*apply_config)(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf);
+ int (*init)(struct at91_ebi *ebi);
+};
+
+struct at91_ebi {
+ struct clk *clk;
+ struct regmap *smc;
+ struct regmap *matrix;
+
+ struct regmap_field *ebi_csa;
+
+ struct device *dev;
+ const struct at91_ebi_caps *caps;
+ struct list_head devs;
+ union {
+ struct at91sam9_smc_generic_fields sam9;
+ };
+};
+
+static void at91sam9_ebi_get_config(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+ unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ struct at91sam9_smc_timings *timings = &config->timings;
+ unsigned int val;
+
+ regmap_fields_read(fields->mode, conf->cs, &val);
+ config->mode = val & ~AT91_SMC_TDF;
+
+ val = (val & AT91_SMC_TDF) >> 16;
+ timings->tdf_ns = clk_rate * val;
+
+ regmap_fields_read(fields->setup, conf->cs, &val);
+ timings->ncs_rd_setup_ns = (val >> 24) & 0x1f;
+ timings->ncs_rd_setup_ns += ((val >> 29) & 0x1) * 128;
+ timings->ncs_rd_setup_ns *= clk_rate;
+ timings->nrd_setup_ns = (val >> 16) & 0x1f;
+ timings->nrd_setup_ns += ((val >> 21) & 0x1) * 128;
+ timings->nrd_setup_ns *= clk_rate;
+ timings->ncs_wr_setup_ns = (val >> 8) & 0x1f;
+ timings->ncs_wr_setup_ns += ((val >> 13) & 0x1) * 128;
+ timings->ncs_wr_setup_ns *= clk_rate;
+ timings->nwe_setup_ns = val & 0x1f;
+ timings->nwe_setup_ns += ((val >> 5) & 0x1) * 128;
+ timings->nwe_setup_ns *= clk_rate;
+
+ regmap_fields_read(fields->pulse, conf->cs, &val);
+ timings->ncs_rd_pulse_ns = (val >> 24) & 0x3f;
+ timings->ncs_rd_pulse_ns += ((val >> 30) & 0x1) * 256;
+ timings->ncs_rd_pulse_ns *= clk_rate;
+ timings->nrd_pulse_ns = (val >> 16) & 0x3f;
+ timings->nrd_pulse_ns += ((val >> 22) & 0x1) * 256;
+ timings->nrd_pulse_ns *= clk_rate;
+ timings->ncs_wr_pulse_ns = (val >> 8) & 0x3f;
+ timings->ncs_wr_pulse_ns += ((val >> 14) & 0x1) * 256;
+ timings->ncs_wr_pulse_ns *= clk_rate;
+ timings->nwe_pulse_ns = val & 0x3f;
+ timings->nwe_pulse_ns += ((val >> 6) & 0x1) * 256;
+ timings->nwe_pulse_ns *= clk_rate;
+
+ regmap_fields_read(fields->cycle, conf->cs, &val);
+ timings->nrd_cycle_ns = (val >> 16) & 0x7f;
+ timings->nrd_cycle_ns += ((val >> 23) & 0x3) * 256;
+ timings->nrd_cycle_ns *= clk_rate;
+ timings->nwe_cycle_ns = val & 0x7f;
+ timings->nwe_cycle_ns += ((val >> 7) & 0x3) * 256;
+ timings->nwe_cycle_ns *= clk_rate;
+}
+
+static int at91_xlate_timing(struct device_node *np, const char *prop,
+ u32 *val, bool *required)
+{
+ if (!of_property_read_u32(np, prop, val)) {
+ *required = true;
+ return 0;
+ }
+
+ if (*required)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int at91sam9_smc_xslate_timings(struct at91_ebi_dev *ebid,
+ struct device_node *np,
+ struct at91sam9_smc_timings *timings,
+ bool *required)
+{
+ int ret;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-setup-ns",
+ &timings->ncs_rd_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-setup-ns",
+ &timings->nrd_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-setup-ns",
+ &timings->ncs_wr_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-setup-ns",
+ &timings->nwe_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-pulse-ns",
+ &timings->ncs_rd_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-pulse-ns",
+ &timings->nrd_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-pulse-ns",
+ &timings->ncs_wr_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-pulse-ns",
+ &timings->nwe_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-cycle-ns",
+ &timings->nwe_cycle_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-cycle-ns",
+ &timings->nrd_cycle_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-tdf-ns",
+ &timings->tdf_ns, required);
+
+out:
+ if (ret)
+ dev_err(ebid->ebi->dev,
+ "missing or invalid timings definition in %s",
+ np->full_name);
+
+ return ret;
+}
+
+static int at91sam9_ebi_xslate_config(struct at91_ebi_dev *ebid,
+ struct device_node *np,
+ struct at91_ebi_dev_config *conf)
+{
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ bool required = false;
+ const char *tmp_str;
+ u32 tmp;
+ int ret;
+
+ ret = of_property_read_u32(np, "atmel,smc-bus-width", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 8:
+ config->mode |= AT91_SMC_DBW_8;
+ break;
+
+ case 16:
+ config->mode |= AT91_SMC_DBW_16;
+ break;
+
+ case 32:
+ config->mode |= AT91_SMC_DBW_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ required = true;
+ }
+
+ if (of_property_read_bool(np, "atmel,smc-tdf-optimized")) {
+ config->mode |= AT91_SMC_TDFMODE_OPTIMIZED;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-byte-access-type", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "write")) {
+ config->mode |= AT91_SMC_BAT_WRITE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-read-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nrd")) {
+ config->mode |= AT91_SMC_READMODE_NRD;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-write-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nwe")) {
+ config->mode |= AT91_SMC_WRITEMODE_NWE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-exnw-mode", &tmp_str);
+ if (tmp_str) {
+ if (!strcmp(tmp_str, "frozen"))
+ config->mode |= AT91_SMC_EXNWMODE_FROZEN;
+ else if (!strcmp(tmp_str, "ready"))
+ config->mode |= AT91_SMC_EXNWMODE_READY;
+ else if (strcmp(tmp_str, "disabled"))
+ return -EINVAL;
+
+ required = true;
+ }
+
+ ret = of_property_read_u32(np, "atmel,smc-page-mode", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 4:
+ config->mode |= AT91_SMC_PS_4;
+ break;
+
+ case 8:
+ config->mode |= AT91_SMC_PS_8;
+ break;
+
+ case 16:
+ config->mode |= AT91_SMC_PS_16;
+ break;
+
+ case 32:
+ config->mode |= AT91_SMC_PS_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ config->mode |= AT91_SMC_PMEN;
+ required = true;
+ }
+
+ ret = at91sam9_smc_xslate_timings(ebid, np, &config->timings,
+ &required);
+ if (ret)
+ return ret;
+
+ return required;
+}
+
+static int at91sam9_ebi_apply_config(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf)
+{
+ unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ struct at91sam9_smc_timings *timings = &config->timings;
+ struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+ u32 coded_val;
+ u32 val;
+
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->ncs_rd_setup_ns);
+ val = AT91SAM9_SMC_NCS_NRDSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->nrd_setup_ns);
+ val |= AT91SAM9_SMC_NRDSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->ncs_wr_setup_ns);
+ val |= AT91SAM9_SMC_NCS_WRSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->nwe_setup_ns);
+ val |= AT91SAM9_SMC_NWESETUP(coded_val);
+ regmap_fields_write(fields->setup, conf->cs, val);
+
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->ncs_rd_pulse_ns);
+ val = AT91SAM9_SMC_NCS_NRDPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->nrd_pulse_ns);
+ val |= AT91SAM9_SMC_NRDPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->ncs_wr_pulse_ns);
+ val |= AT91SAM9_SMC_NCS_WRPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->nwe_pulse_ns);
+ val |= AT91SAM9_SMC_NWEPULSE(coded_val);
+ regmap_fields_write(fields->pulse, conf->cs, val);
+
+ coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate,
+ timings->nrd_cycle_ns);
+ val = AT91SAM9_SMC_NRDCYCLE(coded_val);
+ coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate,
+ timings->nwe_cycle_ns);
+ val |= AT91SAM9_SMC_NWECYCLE(coded_val);
+ regmap_fields_write(fields->cycle, conf->cs, val);
+
+ val = DIV_ROUND_UP(timings->tdf_ns, clk_rate);
+ if (val > AT91_SMC_TDF_MAX)
+ val = AT91_SMC_TDF_MAX;
+ regmap_fields_write(fields->mode, conf->cs,
+ config->mode | AT91_SMC_TDF_(val));
+
+ return 0;
+}
+
+static int at91sam9_ebi_init(struct at91_ebi *ebi)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebi->sam9;
+ struct reg_field field = REG_FIELD(0, 0, 31);
+
+ field.id_size = fls(ebi->caps->available_cs);
+ field.id_offset = AT91SAM9_SMC_GENERIC_BLK_SZ;
+
+ field.reg = AT91SAM9_SMC_SETUP(AT91SAM9_SMC_GENERIC);
+ fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->setup))
+ return PTR_ERR(fields->setup);
+
+ field.reg = AT91SAM9_SMC_PULSE(AT91SAM9_SMC_GENERIC);
+ fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->pulse))
+ return PTR_ERR(fields->pulse);
+
+ field.reg = AT91SAM9_SMC_CYCLE(AT91SAM9_SMC_GENERIC);
+ fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->cycle))
+ return PTR_ERR(fields->cycle);
+
+ field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
+ fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->mode))
+ return PTR_ERR(fields->mode);
+
+ return 0;
+}
+
+static int sama5d3_ebi_init(struct at91_ebi *ebi)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebi->sam9;
+ struct reg_field field = REG_FIELD(0, 0, 31);
+
+ field.id_size = fls(ebi->caps->available_cs);
+ field.id_offset = SAMA5_SMC_GENERIC_BLK_SZ;
+
+ field.reg = AT91SAM9_SMC_SETUP(SAMA5_SMC_GENERIC);
+ fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->setup))
+ return PTR_ERR(fields->setup);
+
+ field.reg = AT91SAM9_SMC_PULSE(SAMA5_SMC_GENERIC);
+ fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->pulse))
+ return PTR_ERR(fields->pulse);
+
+ field.reg = AT91SAM9_SMC_CYCLE(SAMA5_SMC_GENERIC);
+ fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->cycle))
+ return PTR_ERR(fields->cycle);
+
+ field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC);
+ fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->mode))
+ return PTR_ERR(fields->mode);
+
+ return 0;
+}
+
+static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np,
+ int reg_cells)
+{
+ const struct at91_ebi_caps *caps = ebi->caps;
+ struct at91_ebi_dev_config conf = { };
+ struct device *dev = ebi->dev;
+ struct at91_ebi_dev *ebid;
+ int ret, numcs = 0, i;
+ bool apply = false;
+
+ numcs = of_property_count_elems_of_size(np, "reg",
+ reg_cells * sizeof(u32));
+ if (numcs <= 0) {
+ dev_err(dev, "invalid reg property in %s\n", np->full_name);
+ return -EINVAL;
+ }
+
+ ebid = devm_kzalloc(ebi->dev,
+ sizeof(*ebid) + (numcs * sizeof(*ebid->configs)),
+ GFP_KERNEL);
+ if (!ebid)
+ return -ENOMEM;
+
+ ebid->ebi = ebi;
+
+ ret = caps->xlate_config(ebid, np, &conf);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ apply = true;
+
+ for (i = 0; i < numcs; i++) {
+ u32 cs;
+
+ ret = of_property_read_u32_index(np, "reg", i * reg_cells,
+ &cs);
+ if (ret)
+ return ret;
+
+ if (cs > AT91_MATRIX_EBI_NUM_CS ||
+ !(ebi->caps->available_cs & BIT(cs))) {
+ dev_err(dev, "invalid reg property in %s\n",
+ np->full_name);
+ return -EINVAL;
+ }
+
+ ebid->configs[i].cs = cs;
+
+ if (apply) {
+ conf.cs = cs;
+ ret = caps->apply_config(ebid, &conf);
+ if (ret)
+ return ret;
+ }
+
+ caps->get_config(ebid, &ebid->configs[i]);
+
+ /*
+ * Attach the EBI device to the generic SMC logic if at least
+ * one "atmel,smc-" property is present.
+ */
+ if (ebi->ebi_csa && ret)
+ regmap_field_update_bits(ebi->ebi_csa,
+ BIT(cs), 0);
+ }
+
+ list_add_tail(&ebid->node, &ebi->devs);
+
+ return 0;
+}
+
+static const struct reg_field at91sam9260_ebi_csa =
+ REG_FIELD(AT91SAM9260_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9260_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa = &at91sam9260_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9261_ebi_csa =
+ REG_FIELD(AT91SAM9261_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9261_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa = &at91sam9261_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9263_ebi0_csa =
+ REG_FIELD(AT91SAM9263_MATRIX_EBI0CSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9263_ebi0_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9263_ebi0_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9263_ebi1_csa =
+ REG_FIELD(AT91SAM9263_MATRIX_EBI1CSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9263_ebi1_caps = {
+ .available_cs = 0x7,
+ .ebi_csa = &at91sam9263_ebi1_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9rl_ebi_csa =
+ REG_FIELD(AT91SAM9RL_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9rl_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9rl_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9g45_ebi_csa =
+ REG_FIELD(AT91SAM9G45_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9g45_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9g45_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct at91_ebi_caps at91sam9x5_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9263_ebi0_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct at91_ebi_caps sama5d3_ebi_caps = {
+ .available_cs = 0xf,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = sama5d3_ebi_init,
+};
+
+static const struct of_device_id at91_ebi_id_table[] = {
+ {
+ .compatible = "atmel,at91sam9260-ebi",
+ .data = &at91sam9260_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-ebi",
+ .data = &at91sam9261_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi0",
+ .data = &at91sam9263_ebi0_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi1",
+ .data = &at91sam9263_ebi1_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9rl-ebi",
+ .data = &at91sam9rl_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-ebi",
+ .data = &at91sam9g45_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-ebi",
+ .data = &at91sam9x5_ebi_caps,
+ },
+ {
+ .compatible = "atmel,sama5d3-ebi",
+ .data = &sama5d3_ebi_caps,
+ },
+ { /* sentinel */ }
+};
+
+static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
+{
+ struct device *dev = ebi->dev;
+ struct property *newprop;
+
+ newprop = devm_kzalloc(dev, sizeof(*newprop), GFP_KERNEL);
+ if (!newprop)
+ return -ENOMEM;
+
+ newprop->name = devm_kstrdup(dev, "status", GFP_KERNEL);
+ if (!newprop->name)
+ return -ENOMEM;
+
+ newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
+ if (!newprop->name)
+ return -ENOMEM;
+
+ newprop->length = sizeof("disabled");
+
+ return of_update_property(np, newprop);
+}
+
+static int at91_ebi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+ const struct of_device_id *match;
+ struct at91_ebi *ebi;
+ int ret, reg_cells;
+ struct clk *clk;
+ u32 val;
+
+ match = of_match_device(at91_ebi_id_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ ebi = devm_kzalloc(dev, sizeof(*ebi), GFP_KERNEL);
+ if (!ebi)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ebi->devs);
+ ebi->caps = match->data;
+ ebi->dev = dev;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ebi->clk = clk;
+
+ ebi->smc = syscon_regmap_lookup_by_phandle(np, "atmel,smc");
+ if (IS_ERR(ebi->smc))
+ return PTR_ERR(ebi->smc);
+
+ /*
+ * The sama5d3 does not provide an EBICSA register and thus does need
+ * to access the matrix registers.
+ */
+ if (ebi->caps->ebi_csa) {
+ ebi->matrix =
+ syscon_regmap_lookup_by_phandle(np, "atmel,matrix");
+ if (IS_ERR(ebi->matrix))
+ return PTR_ERR(ebi->matrix);
+
+ ebi->ebi_csa = regmap_field_alloc(ebi->matrix,
+ *ebi->caps->ebi_csa);
+ if (IS_ERR(ebi->ebi_csa))
+ return PTR_ERR(ebi->ebi_csa);
+ }
+
+ ret = ebi->caps->init(ebi);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "#address-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells = val;
+
+ ret = of_property_read_u32(np, "#size-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells += val;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "reg", NULL))
+ continue;
+
+ ret = at91_ebi_dev_setup(ebi, child, reg_cells);
+ if (ret) {
+ dev_err(dev, "failed to configure EBI bus for %s, disabling the device",
+ child->full_name);
+
+ ret = at91_ebi_dev_disable(ebi, child);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static struct platform_driver at91_ebi_driver = {
+ .driver = {
+ .name = "atmel-ebi",
+ .of_match_table = at91_ebi_id_table,
+ },
+};
+builtin_platform_driver_probe(at91_ebi_driver, at91_ebi_probe);
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index a3ebc8a87479..53a341f3b305 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -1,6 +1,8 @@
/*
* Atmel (Multi-port DDR-)SDRAM Controller driver
*
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
* Copyright (C) 2014 Atmel
*
* This program is free software: you can redistribute it and/or modify
@@ -20,7 +22,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -48,7 +50,6 @@ static const struct of_device_id atmel_ramc_of_match[] = {
{ .compatible = "atmel,sama5d3-ddramc", .data = &sama5d3_caps, },
{},
};
-MODULE_DEVICE_TABLE(of, atmel_ramc_of_match);
static int atmel_ramc_probe(struct platform_device *pdev)
{
@@ -90,8 +91,4 @@ static int __init atmel_ramc_init(void)
{
return platform_driver_register(&atmel_ramc_driver);
}
-module_init(atmel_ramc_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
-MODULE_DESCRIPTION("Atmel (Multi-port DDR-)SDRAM Controller");
+device_initcall(atmel_ramc_init);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 904b4af5f142..1b182b117f9c 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -31,7 +31,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/fsl_ifc.h>
-#include <asm/prom.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index f6b57579185a..4afbc412f959 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -21,19 +21,50 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <soc/mediatek/smi.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
#define SMI_LARB_MMU_EN 0xf00
+#define REG_SMI_SECUR_CON_BASE 0x5c0
+
+/* every register control 8 port, register offset 0x4 */
+#define REG_SMI_SECUR_CON_OFFSET(id) (((id) >> 3) << 2)
+#define REG_SMI_SECUR_CON_ADDR(id) \
+ (REG_SMI_SECUR_CON_BASE + REG_SMI_SECUR_CON_OFFSET(id))
+
+/*
+ * every port have 4 bit to control, bit[port + 3] control virtual or physical,
+ * bit[port + 2 : port + 1] control the domain, bit[port] control the security
+ * or non-security.
+ */
+#define SMI_SECUR_CON_VAL_MSK(id) (~(0xf << (((id) & 0x7) << 2)))
+#define SMI_SECUR_CON_VAL_VIRT(id) BIT((((id) & 0x7) << 2) + 3)
+/* mt2701 domain should be set to 3 */
+#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
+
+struct mtk_smi_larb_gen {
+ int port_in_larb[MTK_LARB_NR_MAX + 1];
+ void (*config_port)(struct device *);
+};
struct mtk_smi {
- struct device *dev;
- struct clk *clk_apb, *clk_smi;
+ struct device *dev;
+ struct clk *clk_apb, *clk_smi;
+ struct clk *clk_async; /*only needed by mt2701*/
+ void __iomem *smi_ao_base;
};
struct mtk_smi_larb { /* larb: local arbiter */
- struct mtk_smi smi;
- void __iomem *base;
- struct device *smi_common_dev;
- u32 *mmu;
+ struct mtk_smi smi;
+ void __iomem *base;
+ struct device *smi_common_dev;
+ const struct mtk_smi_larb_gen *larb_gen;
+ int larbid;
+ u32 *mmu;
+};
+
+enum mtk_smi_gen {
+ MTK_SMI_GEN1,
+ MTK_SMI_GEN2
};
static int mtk_smi_enable(const struct mtk_smi *smi)
@@ -71,6 +102,7 @@ static void mtk_smi_disable(const struct mtk_smi *smi)
int mtk_smi_larb_get(struct device *larbdev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
int ret;
@@ -87,7 +119,7 @@ int mtk_smi_larb_get(struct device *larbdev)
}
/* Configure the iommu info for this larb */
- writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+ larb_gen->config_port(larbdev);
return 0;
}
@@ -126,6 +158,45 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
+static void mtk_smi_larb_config_port(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+}
+
+
+static void mtk_smi_larb_config_port_gen1(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+ int i, m4u_port_id, larb_port_num;
+ u32 sec_con_val, reg_val;
+
+ m4u_port_id = larb_gen->port_in_larb[larb->larbid];
+ larb_port_num = larb_gen->port_in_larb[larb->larbid + 1]
+ - larb_gen->port_in_larb[larb->larbid];
+
+ for (i = 0; i < larb_port_num; i++, m4u_port_id++) {
+ if (*larb->mmu & BIT(i)) {
+ /* bit[port + 3] controls the virtual or physical */
+ sec_con_val = SMI_SECUR_CON_VAL_VIRT(m4u_port_id);
+ } else {
+ /* do not need to enable m4u for this port */
+ continue;
+ }
+ reg_val = readl(common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ reg_val &= SMI_SECUR_CON_VAL_MSK(m4u_port_id);
+ reg_val |= sec_con_val;
+ reg_val |= SMI_SECUR_CON_VAL_DOMAIN(m4u_port_id);
+ writel(reg_val,
+ common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ }
+}
+
static void
mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
{
@@ -137,6 +208,31 @@ static const struct component_ops mtk_smi_larb_component_ops = {
.unbind = mtk_smi_larb_unbind,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
+ .port_in_larb = {
+ LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
+ LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
+ },
+ .config_port = mtk_smi_larb_config_port_gen1,
+};
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-smi-larb",
+ .data = &mtk_smi_larb_mt8173
+ },
+ {
+ .compatible = "mediatek,mt2701-smi-larb",
+ .data = &mtk_smi_larb_mt2701
+ },
+ {}
+};
+
static int mtk_smi_larb_probe(struct platform_device *pdev)
{
struct mtk_smi_larb *larb;
@@ -144,14 +240,20 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *smi_node;
struct platform_device *smi_pdev;
+ const struct of_device_id *of_id;
if (!dev->pm_domain)
return -EPROBE_DEFER;
+ of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
if (!larb)
return -ENOMEM;
+ larb->larb_gen = of_id->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
larb->base = devm_ioremap_resource(dev, res);
if (IS_ERR(larb->base))
@@ -191,24 +293,34 @@ static int mtk_smi_larb_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mtk_smi_larb_of_ids[] = {
- { .compatible = "mediatek,mt8173-smi-larb",},
- {}
-};
-
static struct platform_driver mtk_smi_larb_driver = {
.probe = mtk_smi_larb_probe,
- .remove = mtk_smi_larb_remove,
+ .remove = mtk_smi_larb_remove,
.driver = {
.name = "mtk-smi-larb",
.of_match_table = mtk_smi_larb_of_ids,
}
};
+static const struct of_device_id mtk_smi_common_of_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-smi-common",
+ .data = (void *)MTK_SMI_GEN2
+ },
+ {
+ .compatible = "mediatek,mt2701-smi-common",
+ .data = (void *)MTK_SMI_GEN1
+ },
+ {}
+};
+
static int mtk_smi_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_smi *common;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ enum mtk_smi_gen smi_gen;
if (!dev->pm_domain)
return -EPROBE_DEFER;
@@ -226,6 +338,29 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->clk_smi))
return PTR_ERR(common->clk_smi);
+ of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ /*
+ * for mtk smi gen 1, we need to get the ao(always on) base to config
+ * m4u port, and we need to enable the aync clock for transform the smi
+ * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
+ * base.
+ */
+ smi_gen = (enum mtk_smi_gen)of_id->data;
+ if (smi_gen == MTK_SMI_GEN1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ common->smi_ao_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(common->smi_ao_base))
+ return PTR_ERR(common->smi_ao_base);
+
+ common->clk_async = devm_clk_get(dev, "async");
+ if (IS_ERR(common->clk_async))
+ return PTR_ERR(common->clk_async);
+
+ clk_prepare_enable(common->clk_async);
+ }
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
return 0;
@@ -237,11 +372,6 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mtk_smi_common_of_ids[] = {
- { .compatible = "mediatek,mt8173-smi-common", },
- {}
-};
-
static struct platform_driver mtk_smi_common_driver = {
.probe = mtk_smi_common_probe,
.remove = mtk_smi_common_remove,
@@ -272,4 +402,5 @@ err_unreg_smi:
platform_driver_unregister(&mtk_smi_common_driver);
return ret;
}
+
subsys_initcall(mtk_smi_init);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 4721b591994f..869c83fb3c5d 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -20,7 +20,6 @@
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -1807,7 +1806,6 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ .compatible = "ti,am3352-gpmc" }, /* am335x devices */
{ }
};
-MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
/**
* gpmc_read_settings_dt - read gpmc settings from device-tree
@@ -2154,68 +2152,6 @@ err:
return ret;
}
-static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- return 1; /* we're input only */
-}
-
-static int gpmc_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return 0; /* we're input only */
-}
-
-static int gpmc_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- return -EINVAL; /* we're input only */
-}
-
-static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
-}
-
-static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- u32 reg;
-
- offset += 8;
-
- reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
-
- return !!reg;
-}
-
-static int gpmc_gpio_init(struct gpmc_device *gpmc)
-{
- int ret;
-
- gpmc->gpio_chip.parent = gpmc->dev;
- gpmc->gpio_chip.owner = THIS_MODULE;
- gpmc->gpio_chip.label = DEVICE_NAME;
- gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
- gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
- gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
- gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
- gpmc->gpio_chip.set = gpmc_gpio_set;
- gpmc->gpio_chip.get = gpmc_gpio_get;
- gpmc->gpio_chip.base = -1;
-
- ret = gpiochip_add(&gpmc->gpio_chip);
- if (ret < 0) {
- dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void gpmc_gpio_exit(struct gpmc_device *gpmc)
-{
- gpiochip_remove(&gpmc->gpio_chip);
-}
-
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
@@ -2280,7 +2216,69 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
{
return 0;
}
-#endif
+#endif /* CONFIG_OF */
+
+static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ return 1; /* we're input only */
+}
+
+static int gpmc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return 0; /* we're input only */
+}
+
+static int gpmc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return -EINVAL; /* we're input only */
+}
+
+static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg;
+
+ offset += 8;
+
+ reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
+
+ return !!reg;
+}
+
+static int gpmc_gpio_init(struct gpmc_device *gpmc)
+{
+ int ret;
+
+ gpmc->gpio_chip.parent = gpmc->dev;
+ gpmc->gpio_chip.owner = THIS_MODULE;
+ gpmc->gpio_chip.label = DEVICE_NAME;
+ gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
+ gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
+ gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
+ gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
+ gpmc->gpio_chip.set = gpmc_gpio_set;
+ gpmc->gpio_chip.get = gpmc_gpio_get;
+ gpmc->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&gpmc->gpio_chip);
+ if (ret < 0) {
+ dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gpmc_gpio_exit(struct gpmc_device *gpmc)
+{
+ gpiochip_remove(&gpmc->gpio_chip);
+}
static int gpmc_probe(struct platform_device *pdev)
{
@@ -2436,15 +2434,7 @@ static __init int gpmc_init(void)
{
return platform_driver_register(&gpmc_driver);
}
-
-static __exit void gpmc_exit(void)
-{
- platform_driver_unregister(&gpmc_driver);
-
-}
-
postcore_initcall(gpmc_init);
-module_exit(gpmc_exit);
static struct omap3_gpmc_regs gpmc_context;
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index 96756fb4d6bd..bf827a666694 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -11,7 +11,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -91,17 +91,17 @@ static int exynos_srom_configure_bank(struct exynos_srom *srom,
if (width == 2)
cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
- bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+ bw = readl_relaxed(srom->reg_base + EXYNOS_SROM_BW);
bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
- __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+ writel_relaxed(bw, srom->reg_base + EXYNOS_SROM_BW);
- __raw_writel(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
- (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
- (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
- (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
- (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
- (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
- srom->reg_base + EXYNOS_SROM_BC0 + bank);
+ writel_relaxed(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+ (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+ (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+ (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+ (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+ (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+ srom->reg_base + EXYNOS_SROM_BC0 + bank);
return 0;
}
@@ -134,7 +134,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
- sizeof(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
if (!srom->reg_offset) {
iounmap(srom->reg_base);
return -ENOMEM;
@@ -159,16 +159,6 @@ static int exynos_srom_probe(struct platform_device *pdev)
return of_platform_populate(np, NULL, NULL, dev);
}
-static int exynos_srom_remove(struct platform_device *pdev)
-{
- struct exynos_srom *srom = platform_get_drvdata(pdev);
-
- kfree(srom->reg_offset);
- iounmap(srom->reg_base);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static void exynos_srom_save(void __iomem *base,
struct exynos_srom_reg_dump *rd,
@@ -211,21 +201,16 @@ static const struct of_device_id of_exynos_srom_ids[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
static struct platform_driver exynos_srom_driver = {
.probe = exynos_srom_probe,
- .remove = exynos_srom_remove,
.driver = {
.name = "exynos-srom",
.of_match_table = of_exynos_srom_ids,
.pm = &exynos_srom_pm_ops,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(exynos_srom_driver);
-
-MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
-MODULE_DESCRIPTION("Exynos SROM Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(exynos_srom_driver);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a1ae0cc2b86d..a4803ac192bb 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -186,8 +186,10 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node)
timing = &mc->timings[i++];
err = load_one_timing(mc, timing, child);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
@@ -206,15 +208,13 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc)
for_each_child_of_node(mc->dev->of_node, node) {
err = of_property_read_u32(node, "nvidia,ram-code",
&node_ram_code);
- if (err || (node_ram_code != ram_code)) {
- of_node_put(node);
+ if (err || (node_ram_code != ram_code))
continue;
- }
err = load_timings(mc, node);
+ of_node_put(node);
if (err)
return err;
- of_node_put(node);
break;
}
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 3dac7be39654..06cc781ebac1 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -970,8 +970,10 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
timing = &emc->timings[i++];
err = load_one_timing_from_dt(emc, timing, child);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
@@ -995,10 +997,8 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
u32 value;
err = of_property_read_u32(np, "nvidia,ram-code", &value);
- if (err || (value != ram_code)) {
- of_node_put(np);
+ if (err || (value != ram_code))
continue;
- }
return np;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 40bb8ae5853c..aacf584f2a42 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2338,23 +2338,11 @@ static struct memstick_driver msb_driver = {
.resume = msb_resume
};
-static int major;
-
static int __init msb_init(void)
{
- int rc = register_blkdev(0, DRIVER_NAME);
-
- if (rc < 0) {
- pr_err("failed to register major (error %d)\n", rc);
- return rc;
- }
-
- major = rc;
- rc = memstick_register_driver(&msb_driver);
- if (rc) {
- unregister_blkdev(major, DRIVER_NAME);
+ int rc = memstick_register_driver(&msb_driver);
+ if (rc)
pr_err("failed to register memstick driver (error %d)\n", rc);
- }
return rc;
}
@@ -2362,7 +2350,6 @@ static int __init msb_init(void)
static void __exit msb_exit(void)
{
memstick_unregister_driver(&msb_driver);
- unregister_blkdev(major, DRIVER_NAME);
idr_destroy(&msb_disk_idr);
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ff031a7735a5..2d1fb6420592 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -18,6 +18,17 @@ config MFD_CS5535
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
+config MFD_ALTERA_A10SR
+ bool "Altera Arria10 DevKit System Resource chip"
+ depends on ARCH_SOCFPGA && SPI_MASTER=y && OF
+ select REGMAP_SPI
+ select MFD_CORE
+ help
+ Support for the Altera Arria10 DevKit MAX5 System Resource chip
+ using the SPI interface. This driver provides common support for
+ accessing the external gpio extender (LEDs & buttons) and
+ power supply alarms (hwmon).
+
config MFD_ACT8945A
tristate "Active-semi ACT8945A"
select MFD_CORE
@@ -480,6 +491,8 @@ config MFD_KEMPLD
* COMe-cDC2 (microETXexpress-DC)
* COMe-cHL6
* COMe-cPC2 (microETXexpress-PC)
+ * COMe-cSL6
+ * COMe-mAL10
* COMe-mBT10
* COMe-mCT10
* COMe-mTT10 (nanoETXexpress-TT)
@@ -524,8 +537,8 @@ config MFD_88PM860X
battery-charger under the corresponding menus.
config MFD_MAX14577
- bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
- depends on I2C=y
+ tristate "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 42a66e19e191..2ba3ba35f745 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -205,3 +205,5 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
+
+obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index f3d689176fc2..589eebfc13df 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -1087,7 +1087,6 @@ static int ab8500_probe(struct platform_device *pdev)
"Vbus Detect (USB)",
"USB ID Detect",
"UART Factory Mode Detect"};
- struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
const struct platform_device_id *platid = platform_get_device_id(pdev);
enum ab8500_version version = AB8500_VERSION_UNDEFINED;
struct device_node *np = pdev->dev.of_node;
@@ -1219,9 +1218,6 @@ static int ab8500_probe(struct platform_device *pdev)
pr_cont("None\n");
}
- if (plat && plat->init)
- plat->init(ab8500);
-
if (is_ab9540(ab8500)) {
ret = get_register_interruptible(ab8500, AB8500_CHARGER,
AB8500_CH_USBCH_STAT1_REG, &value);
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index b9f0010309f9..207cc497958a 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -127,45 +127,11 @@ EXPORT_SYMBOL(ab8500_sysctrl_write);
static int ab8500_sysctrl_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *plat;
- struct ab8500_sysctrl_platform_data *pdata;
-
- plat = dev_get_platdata(pdev->dev.parent);
-
- if (!plat)
- return -EINVAL;
-
sysctrl_dev = &pdev->dev;
if (!pm_power_off)
pm_power_off = ab8500_power_off;
- pdata = plat->sysctrl;
- if (pdata) {
- int last, ret, i, j;
-
- if (is_ab8505(ab8500))
- last = AB8500_SYSCLKREQ4RFCLKBUF;
- else
- last = AB8500_SYSCLKREQ8RFCLKBUF;
-
- for (i = AB8500_SYSCLKREQ1RFCLKBUF; i <= last; i++) {
- j = i - AB8500_SYSCLKREQ1RFCLKBUF;
- ret = ab8500_sysctrl_write(i, 0xff,
- pdata->initial_req_buf_config[j]);
- dev_dbg(&pdev->dev,
- "Setting SysClkReq%dRfClkBuf 0x%X\n",
- j + 1,
- pdata->initial_req_buf_config[j]);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Can't set sysClkReq%dRfClkBuf: %d\n",
- j + 1, ret);
- }
- }
- }
-
return 0;
}
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
new file mode 100644
index 000000000000..c05aa4ff57fd
--- /dev/null
+++ b/drivers/mfd/altera-a10sr.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * SPI access for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from DA9052
+ */
+
+#include <linux/mfd/altera-a10sr.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+static const struct mfd_cell altr_a10sr_subdev_info[] = {
+ {
+ .name = "altr_a10sr_gpio",
+ .of_compatible = "altr,a10sr-gpio",
+ },
+};
+
+static bool altr_a10sr_reg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_VERSION_READ:
+ case ALTR_A10SR_LED_REG:
+ case ALTR_A10SR_PBDSW_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_PWR_GOOD1_REG:
+ case ALTR_A10SR_PWR_GOOD2_REG:
+ case ALTR_A10SR_PWR_GOOD3_REG:
+ case ALTR_A10SR_FMCAB_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_USB_QSPI_REG:
+ case ALTR_A10SR_SFPA_REG:
+ case ALTR_A10SR_SFPB_REG:
+ case ALTR_A10SR_I2C_M_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool altr_a10sr_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_LED_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_FMCAB_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_USB_QSPI_REG:
+ case ALTR_A10SR_SFPA_REG:
+ case ALTR_A10SR_SFPB_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool altr_a10sr_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_PBDSW_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_PWR_GOOD1_REG:
+ case ALTR_A10SR_PWR_GOOD2_REG:
+ case ALTR_A10SR_PWR_GOOD3_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_I2C_M_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config altr_a10sr_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .cache_type = REGCACHE_NONE,
+
+ .use_single_rw = true,
+ .read_flag_mask = 1,
+ .write_flag_mask = 0,
+
+ .max_register = ALTR_A10SR_WR_KEY_REG,
+ .readable_reg = altr_a10sr_reg_readable,
+ .writeable_reg = altr_a10sr_reg_writeable,
+ .volatile_reg = altr_a10sr_reg_volatile,
+
+};
+
+static int altr_a10sr_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ struct altr_a10sr *a10sr;
+
+ a10sr = devm_kzalloc(&spi->dev, sizeof(*a10sr),
+ GFP_KERNEL);
+ if (!a10sr)
+ return -ENOMEM;
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ a10sr->dev = &spi->dev;
+
+ spi_set_drvdata(spi, a10sr);
+
+ a10sr->regmap = devm_regmap_init_spi(spi, &altr_a10sr_regmap_config);
+ if (IS_ERR(a10sr->regmap)) {
+ ret = PTR_ERR(a10sr->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(a10sr->dev, PLATFORM_DEVID_AUTO,
+ altr_a10sr_subdev_info,
+ ARRAY_SIZE(altr_a10sr_subdev_info),
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(a10sr->dev, "Failed to register sub-devices: %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct of_device_id altr_a10sr_spi_of_match[] = {
+ { .compatible = "altr,a10sr" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, altr_a10sr_spi_of_match);
+
+static struct spi_driver altr_a10sr_spi_driver = {
+ .probe = altr_a10sr_spi_probe,
+ .driver = {
+ .name = "altr_a10sr",
+ .of_match_table = of_match_ptr(altr_a10sr_spi_of_match),
+ },
+};
+
+module_spi_driver(altr_a10sr_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>");
+MODULE_DESCRIPTION("Altera Arria10 DevKit System Resource MFD Driver");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bf2717967597..e4f97b3c824b 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1462,7 +1462,7 @@ int arizona_dev_init(struct arizona *arizona)
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
- goto err_reset;
+ goto err_pm;
pm_runtime_set_autosuspend_delay(arizona->dev, 100);
pm_runtime_use_autosuspend(arizona->dev);
@@ -1486,6 +1486,8 @@ int arizona_dev_init(struct arizona *arizona)
err_irq:
arizona_irq_exit(arizona);
+err_pm:
+ pm_runtime_disable(arizona->dev);
err_reset:
arizona_enable_reset(arizona);
regulator_disable(arizona->dcvdd);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index edeb4951366a..5e18d3c77582 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -109,8 +109,20 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
do {
poll = false;
- if (arizona->aod_irq_chip)
- handle_nested_irq(irq_find_mapping(arizona->virq, 0));
+ if (arizona->aod_irq_chip) {
+ /*
+ * Check the AOD status register to determine whether
+ * the nested IRQ handler should be called.
+ */
+ ret = regmap_read(arizona->regmap,
+ ARIZONA_AOD_IRQ1, &val);
+ if (ret)
+ dev_warn(arizona->dev,
+ "Failed to read AOD IRQ1 %d\n", ret);
+ else if (val)
+ handle_nested_irq(
+ irq_find_mapping(arizona->virq, 0));
+ }
/*
* Check if one of the main interrupts is asserted and only
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e4e32978c377..fd80b0981f0f 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -17,6 +17,7 @@
*/
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -93,7 +94,10 @@ static const struct regmap_range axp22x_writeable_ranges[] = {
};
static const struct regmap_range axp22x_volatile_ranges[] = {
+ regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+ regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+ regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES),
};
static const struct regmap_access_table axp22x_writeable_table = {
@@ -157,6 +161,11 @@ static struct resource axp20x_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
};
+static struct resource axp22x_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+};
+
static struct resource axp22x_pek_resources[] = {
{
.name = "PEK_DBR",
@@ -524,6 +533,11 @@ static struct mfd_cell axp22x_cells[] = {
.resources = axp22x_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .of_compatible = "x-powers,axp221-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp22x_usb_power_supply_resources),
+ .resources = axp22x_usb_power_supply_resources,
},
};
@@ -664,6 +678,9 @@ static void axp20x_power_off(void)
regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL,
AXP20X_OFF);
+
+ /* Give capacitors etc. time to drain to avoid kernel panic msg. */
+ msleep(500);
}
int axp20x_match_device(struct axp20x_dev *axp20x)
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index c0a86aeb1733..388e268b9bcf 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3094,8 +3094,7 @@ static void db8500_prcmu_update_cpufreq(void)
}
}
-static int db8500_prcmu_register_ab8500(struct device *parent,
- struct ab8500_platform_data *pdata)
+static int db8500_prcmu_register_ab8500(struct device *parent)
{
struct device_node *np;
struct resource ab8500_resource;
@@ -3103,8 +3102,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent,
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.id = AB8500_VERSION_AB8500,
- .platform_data = pdata,
- .pdata_size = sizeof(struct ab8500_platform_data),
.resources = &ab8500_resource,
.num_resources = 1,
};
@@ -3133,7 +3130,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent,
static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0;
struct resource *res;
@@ -3149,7 +3145,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
init_prcm_registers();
- dbx500_fw_version_init(pdev, pdata->version_offset);
+ dbx500_fw_version_init(pdev, DB8500_PRCMU_FW_VERSION_OFFSET);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
@@ -3204,7 +3200,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
}
}
- err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata);
+ err = db8500_prcmu_register_ab8500(&pdev->dev);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add ab8500 subdevice\n");
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 14661ec5ef7f..270e19c0bba1 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -199,11 +199,8 @@ static struct device *add_child(struct i2c_client *client, const char *name,
int status;
pdev = platform_device_alloc(name, -1);
- if (!pdev) {
- dev_dbg(&client->dev, "can't alloc dev\n");
- status = -ENOMEM;
- goto err;
- }
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
device_init_wakeup(&pdev->dev, can_wakeup);
pdev->dev.parent = &client->dev;
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 05ddc7882362..0fc62995695b 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -24,19 +24,15 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static const struct mfd_cell hi655x_pmic_devs[] = {
- { .name = "hi655x-regulator", },
-};
-
static const struct regmap_irq hi655x_irqs[] = {
- { .reg_offset = 0, .mask = OTMP_D1R_INT },
- { .reg_offset = 0, .mask = VSYS_2P5_R_INT },
- { .reg_offset = 0, .mask = VSYS_UV_D3R_INT },
- { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT },
- { .reg_offset = 0, .mask = PWRON_D4SR_INT },
- { .reg_offset = 0, .mask = PWRON_D20F_INT },
- { .reg_offset = 0, .mask = PWRON_D20R_INT },
- { .reg_offset = 0, .mask = RESERVE_INT },
+ { .reg_offset = 0, .mask = OTMP_D1R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_2P5_R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_UV_D3R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D4SR_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D20F_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D20R_INT_MASK },
+ { .reg_offset = 0, .mask = RESERVE_INT_MASK },
};
static const struct regmap_irq_chip hi655x_irq_chip = {
@@ -45,6 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = {
.num_regs = 1,
.num_irqs = ARRAY_SIZE(hi655x_irqs),
.status_base = HI655X_IRQ_STAT_BASE,
+ .ack_base = HI655X_IRQ_STAT_BASE,
.mask_base = HI655X_IRQ_MASK_BASE,
};
@@ -55,6 +52,34 @@ static struct regmap_config hi655x_regmap_config = {
.max_register = HI655X_BUS_ADDR(0xFFF),
};
+static struct resource pwrkey_resources[] = {
+ {
+ .name = "down",
+ .start = PWRON_D20R_INT,
+ .end = PWRON_D20R_INT,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "up",
+ .start = PWRON_D20F_INT,
+ .end = PWRON_D20F_INT,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "hold 4s",
+ .start = PWRON_D4SR_INT,
+ .end = PWRON_D4SR_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct mfd_cell hi655x_pmic_devs[] = {
+ {
+ .name = "hi65xx-powerkey",
+ .num_resources = ARRAY_SIZE(pwrkey_resources),
+ .resources = &pwrkey_resources[0],
+ },
+ { .name = "hi655x-regulator", },
+};
+
static void hi655x_local_irq_clear(struct regmap *map)
{
int i;
@@ -80,12 +105,9 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
pmic->dev = dev;
pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!pmic->res)
- return -ENOENT;
-
base = devm_ioremap_resource(dev, pmic->res);
- if (!base)
- return -ENOMEM;
+ if (IS_ERR(base))
+ return PTR_ERR(base);
pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&hi655x_regmap_config);
@@ -123,7 +145,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, hi655x_pmic_devs,
- ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, NULL);
+ ARRAY_SIZE(hi655x_pmic_devs), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_data));
if (ret) {
dev_err(dev, "Failed to register device %d\n", ret);
regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 05b924542ee2..da5722d7c540 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -624,6 +624,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CSL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cSL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CVV6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -647,6 +655,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "MAL1",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-mAL10"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "MBR1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 2280b3fdcf68..6c245128ab2e 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -561,7 +561,7 @@ static int __init max14577_i2c_init(void)
return i2c_add_driver(&max14577_i2c_driver);
}
-subsys_initcall(max14577_i2c_init);
+module_init(max14577_i2c_init);
static void __exit max14577_i2c_exit(void)
{
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index f32fbb8e8129..258757e216c4 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -31,25 +31,25 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77620.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
};
-static struct resource power_resources[] = {
+static const struct resource power_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_MBATLOW),
};
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_RTC),
};
-static struct resource thermal_resources[] = {
+static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM1),
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM2),
};
@@ -111,15 +111,6 @@ static const struct mfd_cell max20024_children[] = {
},
};
-static struct regmap_irq_chip max77620_top_irq_chip = {
- .name = "max77620-top",
- .irqs = max77620_top_irqs,
- .num_irqs = ARRAY_SIZE(max77620_top_irqs),
- .num_regs = 2,
- .status_base = MAX77620_REG_IRQTOP,
- .mask_base = MAX77620_REG_IRQTOPM,
-};
-
static const struct regmap_range max77620_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
@@ -180,6 +171,51 @@ static const struct regmap_config max20024_regmap_config = {
.volatile_table = &max77620_volatile_table,
};
+/*
+ * MAX77620 and MAX20024 has the following steps of the interrupt handling
+ * for TOP interrupts:
+ * 1. When interrupt occurs from PMIC, mask the PMIC interrupt by setting GLBLM.
+ * 2. Read IRQTOP and service the interrupt.
+ * 3. Once all interrupts has been checked and serviced, the interrupt service
+ * routine un-masks the hardware interrupt line by clearing GLBLM.
+ */
+static int max77620_irq_global_mask(void *irq_drv_data)
+{
+ struct max77620_chip *chip = irq_drv_data;
+ int ret;
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
+ MAX77620_GLBLM_MASK, MAX77620_GLBLM_MASK);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to set GLBLM: %d\n", ret);
+
+ return ret;
+}
+
+static int max77620_irq_global_unmask(void *irq_drv_data)
+{
+ struct max77620_chip *chip = irq_drv_data;
+ int ret;
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
+ MAX77620_GLBLM_MASK, 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to reset GLBLM: %d\n", ret);
+
+ return ret;
+}
+
+static struct regmap_irq_chip max77620_top_irq_chip = {
+ .name = "max77620-top",
+ .irqs = max77620_top_irqs,
+ .num_irqs = ARRAY_SIZE(max77620_top_irqs),
+ .num_regs = 2,
+ .status_base = MAX77620_REG_IRQTOP,
+ .mask_base = MAX77620_REG_IRQTOPM,
+ .handle_pre_irq = max77620_irq_global_mask,
+ .handle_post_irq = max77620_irq_global_unmask,
+};
+
/* max77620_get_fps_period_reg_value: Get FPS bit field value from
* requested periods.
* MAX77620 supports the FPS period of 40, 80, 160, 320, 540, 1280, 2560
@@ -433,6 +469,7 @@ static int max77620_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ max77620_top_irq_chip.irq_drv_data = chip;
ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
IRQF_ONESHOT | IRQF_SHARED,
chip->irq_base, &max77620_top_irq_chip,
@@ -568,7 +605,6 @@ static const struct i2c_device_id max77620_id[] = {
{"max20024", MAX20024},
{},
};
-MODULE_DEVICE_TABLE(i2c, max77620_id);
static const struct dev_pm_ops max77620_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(max77620_i2c_suspend, max77620_i2c_resume)
@@ -582,11 +618,4 @@ static struct i2c_driver max77620_driver = {
.probe = max77620_probe,
.id_table = max77620_id,
};
-
-module_i2c_driver(max77620_driver);
-
-MODULE_DESCRIPTION("MAX77620/MAX20024 Multi Function Device Core Driver");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>");
-MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
-MODULE_LICENSE("GPL v2");
+builtin_i2c_driver(max77620_driver);
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index 7cfc95b49c5d..dc5caeaaa6a1 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
@@ -171,19 +171,6 @@ err_pmic_id:
return ret;
}
-static int max77843_remove(struct i2c_client *i2c)
-{
- struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max77843->dev);
-
- regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
-
- i2c_unregister_device(max77843->i2c_chg);
-
- return 0;
-}
-
static const struct of_device_id max77843_dt_match[] = {
{ .compatible = "maxim,max77843", },
{ },
@@ -193,7 +180,6 @@ static const struct i2c_device_id max77843_id[] = {
{ "max77843", TYPE_MAX77843, },
{ },
};
-MODULE_DEVICE_TABLE(i2c, max77843_id);
static int __maybe_unused max77843_suspend(struct device *dev)
{
@@ -226,9 +212,9 @@ static struct i2c_driver max77843_i2c_driver = {
.name = "max77843",
.pm = &max77843_pm,
.of_match_table = max77843_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = max77843_probe,
- .remove = max77843_remove,
.id_table = max77843_id,
};
@@ -237,9 +223,3 @@ static int __init max77843_i2c_init(void)
return i2c_add_driver(&max77843_i2c_driver);
}
subsys_initcall(max77843_i2c_init);
-
-static void __exit max77843_i2c_exit(void)
-{
- i2c_del_driver(&max77843_i2c_driver);
-}
-module_exit(max77843_i2c_exit);
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 70443b161a5b..5c80aea3211f 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/mfd/max8925.h>
@@ -133,7 +133,6 @@ static const struct i2c_device_id max8925_id_table[] = {
{ "max8925", 0 },
{ },
};
-MODULE_DEVICE_TABLE(i2c, max8925_id_table);
static int max8925_dt_init(struct device_node *np, struct device *dev,
struct max8925_platform_data *pdata)
@@ -240,7 +239,6 @@ static const struct of_device_id max8925_dt_ids[] = {
{ .compatible = "maxim,max8925", },
{},
};
-MODULE_DEVICE_TABLE(of, max8925_dt_ids);
static struct i2c_driver max8925_driver = {
.driver = {
@@ -264,13 +262,3 @@ static int __init max8925_i2c_init(void)
return ret;
}
subsys_initcall(max8925_i2c_init);
-
-static void __exit max8925_i2c_exit(void)
-{
- i2c_del_driver(&max8925_driver);
-}
-module_exit(max8925_i2c_exit);
-
-MODULE_DESCRIPTION("I2C Driver for Maxim 8925");
-MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index f316348e3d98..2d6e2c392786 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -2,7 +2,7 @@
* max8997.c - mfd core driver for the Maxim 8966 and 8997
*
* Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@smasung.com>
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
@@ -55,7 +55,6 @@ static const struct of_device_id max8997_pmic_dt_match[] = {
{ .compatible = "maxim,max8997-pmic", .data = (void *)TYPE_MAX8997 },
{},
};
-MODULE_DEVICE_TABLE(of, max8997_pmic_dt_match);
#endif
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
@@ -263,24 +262,11 @@ err_i2c_haptic:
return ret;
}
-static int max8997_i2c_remove(struct i2c_client *i2c)
-{
- struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max8997->dev);
- i2c_unregister_device(max8997->muic);
- i2c_unregister_device(max8997->haptic);
- i2c_unregister_device(max8997->rtc);
-
- return 0;
-}
-
static const struct i2c_device_id max8997_i2c_id[] = {
{ "max8997", TYPE_MAX8997 },
{ "max8966", TYPE_MAX8966 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
static u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_INT1MSK,
@@ -510,10 +496,10 @@ static struct i2c_driver max8997_i2c_driver = {
.driver = {
.name = "max8997",
.pm = &max8997_pm,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
.probe = max8997_i2c_probe,
- .remove = max8997_i2c_remove,
.id_table = max8997_i2c_id,
};
@@ -523,13 +509,3 @@ static int __init max8997_i2c_init(void)
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8997_i2c_init);
-
-static void __exit max8997_i2c_exit(void)
-{
- i2c_del_driver(&max8997_i2c_driver);
-}
-module_exit(max8997_i2c_exit);
-
-MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index ab28b29400f6..4c33b8063bc3 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -21,8 +21,6 @@
*/
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -138,7 +136,6 @@ static const struct of_device_id max8998_dt_match[] = {
{ .compatible = "ti,lp3974", .data = (void *)TYPE_LP3974 },
{},
};
-MODULE_DEVICE_TABLE(of, max8998_dt_match);
#endif
/*
@@ -254,23 +251,11 @@ err:
return ret;
}
-static int max8998_i2c_remove(struct i2c_client *i2c)
-{
- struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max8998->dev);
- max8998_irq_exit(max8998);
- i2c_unregister_device(max8998->rtc);
-
- return 0;
-}
-
static const struct i2c_device_id max8998_i2c_id[] = {
{ "max8998", TYPE_MAX8998 },
{ "lp3974", TYPE_LP3974},
{ }
};
-MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
static int max8998_suspend(struct device *dev)
{
@@ -378,10 +363,10 @@ static struct i2c_driver max8998_i2c_driver = {
.driver = {
.name = "max8998",
.pm = &max8998_pm,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8998_dt_match),
},
.probe = max8998_i2c_probe,
- .remove = max8998_i2c_remove,
.id_table = max8998_i2c_id,
};
@@ -391,13 +376,3 @@ static int __init max8998_i2c_init(void)
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8998_i2c_init);
-
-static void __exit max8998_i2c_exit(void)
-{
- i2c_del_driver(&max8998_i2c_driver);
-}
-module_exit(max8998_i2c_exit);
-
-MODULE_DESCRIPTION("MAXIM 8998 multi-function core driver");
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index c30290f33430..1aa74c4c3ced 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -30,6 +30,8 @@
#include <linux/platform_data/usb-omap.h>
#include <linux/of.h>
+#include "omap-usb.h"
+
#define USBTLL_DRIVER_NAME "usbhs_tll"
/* TLL Register Set */
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 1be47ad6441b..2e44323455dd 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -34,7 +34,13 @@ struct qcom_rpm_resource {
struct qcom_rpm_data {
u32 version;
const struct qcom_rpm_resource *resource_table;
- unsigned n_resources;
+ unsigned int n_resources;
+ unsigned int req_ctx_off;
+ unsigned int req_sel_off;
+ unsigned int ack_ctx_off;
+ unsigned int ack_sel_off;
+ unsigned int req_sel_size;
+ unsigned int ack_sel_size;
};
struct qcom_rpm {
@@ -61,17 +67,11 @@ struct qcom_rpm {
#define RPM_REQUEST_TIMEOUT (5 * HZ)
-#define RPM_REQUEST_CONTEXT 3
-#define RPM_REQ_SELECT 11
-#define RPM_ACK_CONTEXT 15
-#define RPM_ACK_SELECTOR 23
-#define RPM_SELECT_SIZE 7
+#define RPM_MAX_SEL_SIZE 7
#define RPM_NOTIFICATION BIT(30)
#define RPM_REJECTED BIT(31)
-#define RPM_SIGNAL BIT(2)
-
static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
[QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
[QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
@@ -157,6 +157,12 @@ static const struct qcom_rpm_data apq8064_template = {
.version = 3,
.resource_table = apq8064_rpm_resource_table,
.n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
@@ -240,6 +246,12 @@ static const struct qcom_rpm_data msm8660_template = {
.version = 2,
.resource_table = msm8660_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 19,
+ .ack_sel_off = 27,
+ .req_sel_size = 7,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
@@ -322,6 +334,12 @@ static const struct qcom_rpm_data msm8960_template = {
.version = 3,
.resource_table = msm8960_rpm_resource_table,
.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
@@ -362,6 +380,12 @@ static const struct qcom_rpm_data ipq806x_template = {
.version = 3,
.resource_table = ipq806x_rpm_resource_table,
.n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
+ .req_ctx_off = 3,
+ .req_sel_off = 11,
+ .ack_ctx_off = 15,
+ .ack_sel_off = 23,
+ .req_sel_size = 4,
+ .ack_sel_size = 7,
};
static const struct of_device_id qcom_rpm_of_match[] = {
@@ -380,7 +404,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
{
const struct qcom_rpm_resource *res;
const struct qcom_rpm_data *data = rpm->data;
- u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
+ u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
int left;
int ret = 0;
int i;
@@ -398,12 +422,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
- for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
+ for (i = 0; i < rpm->data->req_sel_size; i++) {
writel_relaxed(sel_mask[i],
- RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
+ RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
}
- writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
+ writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
reinit_completion(&rpm->ack);
regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
@@ -426,10 +450,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
u32 ack;
int i;
- ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
- for (i = 0; i < RPM_SELECT_SIZE; i++)
- writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
- writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+ ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+ for (i = 0; i < rpm->data->ack_sel_size; i++)
+ writel_relaxed(0,
+ RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
+ writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
if (ack & RPM_NOTIFICATION) {
dev_warn(rpm->dev, "ignoring notification!\n");
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index fb4ce6d04c30..c180b7533bba 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -600,7 +600,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core,
unknown_revision:
dev_err(&core->client->dev,
"Unsupported version of the firmware: %d.%d.%d, "
- "reverting to A10 comptible functions\n",
+ "reverting to A10 compatible functions\n",
major, minor1, minor2);
return SI476X_REVISION_A10;
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 7f89e89b8a5e..cd18c09827ef 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -36,7 +36,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
{
struct smsc *smsc;
int devid, rev, venid_l, venid_h;
- int ret = 0;
+ int ret;
smsc = devm_kzalloc(&i2c->dev, sizeof(struct smsc),
GFP_KERNEL);
@@ -46,10 +46,8 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
}
smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config);
- if (IS_ERR(smsc->regmap)) {
- ret = PTR_ERR(smsc->regmap);
- goto err;
- }
+ if (IS_ERR(smsc->regmap))
+ return PTR_ERR(smsc->regmap);
i2c_set_clientdata(i2c, smsc);
smsc->dev = &i2c->dev;
@@ -68,7 +66,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk);
if (ret)
- goto err;
+ return ret;
#ifdef CONFIG_OF
if (i2c->dev.of_node)
@@ -76,7 +74,6 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
NULL, NULL, &i2c->dev);
#endif
-err:
return ret;
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index fb8f9e8b75df..94c7cc02fdab 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -23,6 +23,27 @@
#include <linux/regulator/consumer.h>
#include "stmpe.h"
+/**
+ * struct stmpe_platform_data - STMPE platform data
+ * @id: device id to distinguish between multiple STMPEs on the same board
+ * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
+ * @irq_trigger: IRQ trigger to use for the interrupt to the host
+ * @autosleep: bool to enable/disable stmpe autosleep
+ * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
+ * @irq_over_gpio: true if gpio is used to get irq
+ * @irq_gpio: gpio number over which irq will be requested (significant only if
+ * irq_over_gpio is true)
+ */
+struct stmpe_platform_data {
+ int id;
+ unsigned int blocks;
+ unsigned int irq_trigger;
+ bool autosleep;
+ bool irq_over_gpio;
+ int irq_gpio;
+ int autosleep_timeout;
+};
+
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
{
return stmpe->variant->enable(stmpe, blocks, true);
@@ -1187,24 +1208,19 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
/* Called from client specific probe routines */
int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
{
- struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+ struct stmpe_platform_data *pdata;
struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
- if (!pdata) {
- if (!np)
- return -EINVAL;
-
- pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- stmpe_of_probe(pdata, np);
+ stmpe_of_probe(pdata, np);
- if (of_find_property(np, "interrupts", NULL) == NULL)
- ci->irq = -1;
- }
+ if (of_find_property(np, "interrupts", NULL) == NULL)
+ ci->irq = -1;
stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index e4e4b22eebc9..c8f027b4ea4c 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -27,20 +27,6 @@
#include <linux/mfd/ti_am335x_tscadc.h>
-static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
-{
- unsigned int val;
-
- regmap_read(tsadc->regmap_tscadc, reg, &val);
- return val;
-}
-
-static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
- unsigned int val)
-{
- regmap_write(tsadc->regmap_tscadc, reg, val);
-}
-
static const struct regmap_config tscadc_regmap_config = {
.name = "ti_tscadc",
.reg_bits = 32,
@@ -48,89 +34,89 @@ static const struct regmap_config tscadc_regmap_config = {
.val_bits = 32,
};
-void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tscadc, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache |= val;
- if (tsadc->adc_waiting)
- wake_up(&tsadc->reg_se_wait);
- else if (!tsadc->adc_in_use)
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->reg_se_cache |= val;
+ if (tscadc->adc_waiting)
+ wake_up(&tscadc->reg_se_wait);
+ else if (!tscadc->adc_in_use)
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set_cache);
-static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
+static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tscadc)
{
DEFINE_WAIT(wait);
u32 reg;
- reg = tscadc_readl(tsadc, REG_ADCFSM);
+ regmap_read(tscadc->regmap, REG_ADCFSM, &reg);
if (reg & SEQ_STATUS) {
- tsadc->adc_waiting = true;
- prepare_to_wait(&tsadc->reg_se_wait, &wait,
+ tscadc->adc_waiting = true;
+ prepare_to_wait(&tscadc->reg_se_wait, &wait,
TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&tsadc->reg_lock);
+ spin_unlock_irq(&tscadc->reg_lock);
schedule();
- spin_lock_irq(&tsadc->reg_lock);
- finish_wait(&tsadc->reg_se_wait, &wait);
+ spin_lock_irq(&tscadc->reg_lock);
+ finish_wait(&tscadc->reg_se_wait, &wait);
/*
* Sequencer should either be idle or
* busy applying the charge step.
*/
- reg = tscadc_readl(tsadc, REG_ADCFSM);
+ regmap_read(tscadc->regmap, REG_ADCFSM, &reg);
WARN_ON((reg & SEQ_STATUS) && !(reg & CHARGE_STEP));
- tsadc->adc_waiting = false;
+ tscadc->adc_waiting = false;
}
- tsadc->adc_in_use = true;
+ tscadc->adc_in_use = true;
}
-void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_set_once(struct ti_tscadc_dev *tscadc, u32 val)
{
- spin_lock_irq(&tsadc->reg_lock);
- am335x_tscadc_need_adc(tsadc);
+ spin_lock_irq(&tscadc->reg_lock);
+ am335x_tscadc_need_adc(tscadc);
- tscadc_writel(tsadc, REG_SE, val);
- spin_unlock_irq(&tsadc->reg_lock);
+ regmap_write(tscadc->regmap, REG_SE, val);
+ spin_unlock_irq(&tscadc->reg_lock);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set_once);
-void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc)
+void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tscadc)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->adc_in_use = false;
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->adc_in_use = false;
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_adc_done);
-void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_clr(struct ti_tscadc_dev *tscadc, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache &= ~val;
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->reg_se_cache &= ~val;
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
-static void tscadc_idle_config(struct ti_tscadc_dev *config)
+static void tscadc_idle_config(struct ti_tscadc_dev *tscadc)
{
unsigned int idleconfig;
idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
- tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+ regmap_write(tscadc->regmap, REG_IDLECONFIG, idleconfig);
}
static int ti_tscadc_probe(struct platform_device *pdev)
@@ -182,8 +168,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
/* Allocate memory for device */
- tscadc = devm_kzalloc(&pdev->dev,
- sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+ tscadc = devm_kzalloc(&pdev->dev, sizeof(*tscadc), GFP_KERNEL);
if (!tscadc) {
dev_err(&pdev->dev, "failed to allocate memory.\n");
return -ENOMEM;
@@ -202,11 +187,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
- tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+ tscadc->regmap = devm_regmap_init_mmio(&pdev->dev,
tscadc->tscadc_base, &tscadc_regmap_config);
- if (IS_ERR(tscadc->regmap_tscadc)) {
+ if (IS_ERR(tscadc->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
- err = PTR_ERR(tscadc->regmap_tscadc);
+ err = PTR_ERR(tscadc->regmap);
goto ret;
}
@@ -236,11 +221,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
tscadc->clk_div--;
- tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div);
+ regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div);
/* Set the control register bits */
ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
- tscadc_writel(tscadc, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
/* Set register bits for Idle Config Mode */
if (tsc_wires > 0) {
@@ -254,7 +239,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
/* Enable the TSC module enable bit */
ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(tscadc, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
tscadc->used_cells = 0;
tscadc->tsc_cell = -1;
@@ -300,7 +285,7 @@ static int ti_tscadc_remove(struct platform_device *pdev)
{
struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
- tscadc_writel(tscadc, REG_SE, 0x00);
+ regmap_write(tscadc->regmap, REG_SE, 0x00);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -310,51 +295,43 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tscadc_suspend(struct device *dev)
+static int __maybe_unused tscadc_suspend(struct device *dev)
{
- struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
- tscadc_writel(tscadc_dev, REG_SE, 0x00);
+ regmap_write(tscadc->regmap, REG_SE, 0x00);
pm_runtime_put_sync(dev);
return 0;
}
-static int tscadc_resume(struct device *dev)
+static int __maybe_unused tscadc_resume(struct device *dev)
{
- struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
u32 ctrl;
pm_runtime_get_sync(dev);
/* context restore */
ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
- tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
- if (tscadc_dev->tsc_cell != -1) {
- if (tscadc_dev->tsc_wires == 5)
+ if (tscadc->tsc_cell != -1) {
+ if (tscadc->tsc_wires == 5)
ctrl |= CNTRLREG_5WIRE | CNTRLREG_TSCENB;
else
ctrl |= CNTRLREG_4WIRE | CNTRLREG_TSCENB;
- tscadc_idle_config(tscadc_dev);
+ tscadc_idle_config(tscadc);
}
ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
- tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div);
+ regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div);
return 0;
}
-static const struct dev_pm_ops tscadc_pm_ops = {
- .suspend = tscadc_suspend,
- .resume = tscadc_resume,
-};
-#define TSCADC_PM_OPS (&tscadc_pm_ops)
-#else
-#define TSCADC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(tscadc_pm_ops, tscadc_suspend, tscadc_resume);
static const struct of_device_id ti_tscadc_dt_ids[] = {
{ .compatible = "ti,am3359-tscadc", },
@@ -365,7 +342,7 @@ MODULE_DEVICE_TABLE(of, ti_tscadc_dt_ids);
static struct platform_driver ti_tscadc_driver = {
.driver = {
.name = "ti_am3359-tscadc",
- .pm = TSCADC_PM_OPS,
+ .pm = &tscadc_pm_ops,
.of_match_table = ti_tscadc_dt_ids,
},
.probe = ti_tscadc_probe,
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 40beb2f4350c..1f308c4e3694 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -105,8 +105,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
}
static const struct i2c_device_id tps6507x_i2c_id[] = {
- { "tps6507x", 0 },
- { }
+ { "tps6507x", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 831696ee2472..a49d3db6d936 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -622,11 +622,8 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
twl = &twl_priv->twl_modules[sid];
pdev = platform_device_alloc(name, num);
- if (!pdev) {
- dev_dbg(&twl->client->dev, "can't alloc dev\n");
- status = -ENOMEM;
- goto err;
- }
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
pdev->dev.parent = &twl->client->dev;
@@ -634,7 +631,7 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
status = platform_device_add_data(pdev, pdata, pdata_len);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add platform_data\n");
- goto err;
+ goto put_device;
}
}
@@ -647,21 +644,22 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add irqs\n");
- goto err;
+ goto put_device;
}
}
status = platform_device_add(pdev);
- if (status == 0)
- device_init_wakeup(&pdev->dev, can_wakeup);
+ if (status)
+ goto put_device;
+
+ device_init_wakeup(&pdev->dev, can_wakeup);
-err:
- if (status < 0) {
- platform_device_put(pdev);
- dev_err(&twl->client->dev, "can't add %s dev\n", name);
- return ERR_PTR(status);
- }
return &pdev->dev;
+
+put_device:
+ platform_device_put(pdev);
+ dev_err(&twl->client->dev, "failed to add device %s\n", name);
+ return ERR_PTR(status);
}
static inline struct device *add_child(unsigned mod_no, const char *name,
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 852d5874aabb..ab328ec49353 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -323,8 +323,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
- twl6040->sysclk = 19200000;
- twl6040->mclk = 32768;
+ twl6040->sysclk_rate = 19200000;
} else {
/* already powered-down */
if (!twl6040->power_count) {
@@ -352,8 +351,12 @@ int twl6040_power(struct twl6040 *twl6040, int on)
regcache_cache_only(twl6040->regmap, true);
regcache_mark_dirty(twl6040->regmap);
- twl6040->sysclk = 0;
- twl6040->mclk = 0;
+ twl6040->sysclk_rate = 0;
+
+ if (twl6040->pll == TWL6040_SYSCLK_SEL_HPPLL) {
+ clk_disable_unprepare(twl6040->mclk);
+ twl6040->mclk_rate = 0;
+ }
clk_disable_unprepare(twl6040->clk32k);
}
@@ -377,15 +380,15 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
/* Force full reconfiguration when switching between PLL */
if (pll_id != twl6040->pll) {
- twl6040->sysclk = 0;
- twl6040->mclk = 0;
+ twl6040->sysclk_rate = 0;
+ twl6040->mclk_rate = 0;
}
switch (pll_id) {
case TWL6040_SYSCLK_SEL_LPPLL:
/* low-power PLL divider */
/* Change the sysclk configuration only if it has been canged */
- if (twl6040->sysclk != freq_out) {
+ if (twl6040->sysclk_rate != freq_out) {
switch (freq_out) {
case 17640000:
lppllctl |= TWL6040_LPLLFIN;
@@ -427,6 +430,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
ret = -EINVAL;
goto pll_out;
}
+
+ clk_disable_unprepare(twl6040->mclk);
break;
case TWL6040_SYSCLK_SEL_HPPLL:
/* high-performance PLL can provide only 19.2 MHz */
@@ -437,7 +442,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
- if (twl6040->mclk != freq_in) {
+ if (twl6040->mclk_rate != freq_in) {
hppllctl &= ~TWL6040_MCLK_MSK;
switch (freq_in) {
@@ -468,6 +473,9 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
+ /* When switching to HPPLL, enable the mclk first */
+ if (pll_id != twl6040->pll)
+ clk_prepare_enable(twl6040->mclk);
/*
* enable clock slicer to ensure input waveform is
* square
@@ -483,6 +491,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
lppllctl &= ~TWL6040_LPLLENA;
twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
lppllctl);
+
+ twl6040->mclk_rate = freq_in;
}
break;
default:
@@ -491,8 +501,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
- twl6040->sysclk = freq_out;
- twl6040->mclk = freq_in;
+ twl6040->sysclk_rate = freq_out;
twl6040->pll = pll_id;
pll_out:
@@ -512,7 +521,7 @@ EXPORT_SYMBOL(twl6040_get_pll);
unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
{
- return twl6040->sysclk;
+ return twl6040->sysclk_rate;
}
EXPORT_SYMBOL(twl6040_get_sysclk);
@@ -655,10 +664,18 @@ static int twl6040_probe(struct i2c_client *client,
if (IS_ERR(twl6040->clk32k)) {
if (PTR_ERR(twl6040->clk32k) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- dev_info(&client->dev, "clk32k is not handled\n");
+ dev_dbg(&client->dev, "clk32k is not handled\n");
twl6040->clk32k = NULL;
}
+ twl6040->mclk = devm_clk_get(&client->dev, "mclk");
+ if (IS_ERR(twl6040->mclk)) {
+ if (PTR_ERR(twl6040->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(&client->dev, "mclk is not handled\n");
+ twl6040->mclk = NULL;
+ }
+
twl6040->supplies[0].supply = "vio";
twl6040->supplies[1].supply = "v2v1";
ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 4387ccb79e64..7410c6d9a34d 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -69,5 +69,6 @@ OBJCOPYFLAGS :=
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
--set-section-flags .text=alloc,readonly \
--rename-section .text=.rodata
-$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 8756d06e2bb8..b75cf830d08a 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -7,11 +7,7 @@ config CXL_BASE
default n
select PPC_COPRO_BASE
-config CXL_KERNEL_API
- bool
- default n
-
-config CXL_EEH
+config CXL_AFU_DRIVER_OPS
bool
default n
@@ -19,8 +15,7 @@ config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- select CXL_KERNEL_API
- select CXL_EEH
+ select CXL_AFU_DRIVER_OPS
default m
help
Select this option to enable driver support for IBM Coherent
@@ -33,3 +28,11 @@ config CXL
CAPI adapters are found in POWER8 based systems.
If unsure, say N.
+
+config CXL_BIMODAL
+ bool "Support for bi-modal CAPI cards"
+ depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m
+ default y
+ help
+ Select this option to enable support for bi-modal CAPI cards, such as
+ the Mellanox CX-4.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 8a55c1aa11aa..56e9a4732ef0 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
-cxl-y += vphb.o api.o
+cxl-y += vphb.o phb.o api.o
cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
obj-$(CONFIG_CXL) += cxl.o
obj-$(CONFIG_CXL_BASE) += base.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 6d228ccd884d..f3d34b941f85 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -13,6 +13,8 @@
#include <linux/file.h>
#include <misc/cxl.h>
#include <linux/fs.h>
+#include <asm/pnv-pci.h>
+#include <linux/msi.h>
#include "cxl.h"
@@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
int rc;
afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return ERR_CAST(afu);
ctx = cxl_context_alloc();
if (IS_ERR(ctx)) {
@@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
+int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
+{
+ if (*ctx == NULL || *afu_irq == 0) {
+ *afu_irq = 1;
+ *ctx = cxl_get_context(pdev);
+ } else {
+ (*afu_irq)++;
+ if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
+ *ctx = list_next_entry(*ctx, extra_irq_contexts);
+ *afu_irq = 1;
+ }
+ }
+ return cxl_find_afu_irq(*ctx, *afu_irq);
+}
+/* Exported via cxl_base */
+
+int cxl_set_priv(struct cxl_context *ctx, void *priv)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_set_priv);
+
+void *cxl_get_priv(struct cxl_context *ctx)
+{
+ if (!ctx)
+ return ERR_PTR(-EINVAL);
+
+ return ctx->priv;
+}
+EXPORT_SYMBOL_GPL(cxl_get_priv);
+
int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
{
int res;
@@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
if (num == 0)
num = ctx->afu->pp_irqs;
res = afu_allocate_irqs(ctx, num);
- if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (res)
+ return res;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE)) {
/* In a guest, the PSL interrupt is not multiplexed. It was
* allocated above, and we need to set its handler
*/
@@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
if (hwirq)
cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
}
+
+ if (ctx->status == STARTED) {
+ if (cxl_ops->update_ivtes)
+ cxl_ops->update_ivtes(ctx);
+ else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
+ }
+
return res;
}
EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
@@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file)
}
EXPORT_SYMBOL_GPL(cxl_fops_get_context);
+void cxl_set_driver_ops(struct cxl_context *ctx,
+ struct cxl_afu_driver_ops *ops)
+{
+ WARN_ON(!ops->fetch_event || !ops->event_delivered);
+ atomic_set(&ctx->afu_driver_events, 0);
+ ctx->afu_driver_ops = ops;
+}
+EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
+
+void cxl_context_events_pending(struct cxl_context *ctx,
+ unsigned int new_events)
+{
+ atomic_add(new_events, &ctx->afu_driver_events);
+ wake_up_all(&ctx->wq);
+}
+EXPORT_SYMBOL_GPL(cxl_context_events_pending);
+
int cxl_start_work(struct cxl_context *ctx,
struct cxl_ioctl_start_work *work)
{
@@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
{
struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
+
+int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
+
+ if (irqs > afu->adapter->user_irqs)
+ return -EINVAL;
+
+ /* Limit user_irqs to prevent the user increasing this via sysfs */
+ afu->adapter->user_irqs = irqs;
+ afu->irqs_max = irqs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
+
+int cxl_get_max_irqs_per_process(struct pci_dev *dev)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
+
+ return afu->irqs_max;
+}
+EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
+
+/*
+ * This is a special interrupt allocation routine called from the PHB's MSI
+ * setup function. When capi interrupts are allocated in this manner they must
+ * still be associated with a running context, but since the MSI APIs have no
+ * way to specify this we use the default context associated with the device.
+ *
+ * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
+ * interrupt number, so in order to overcome this their driver informs us of
+ * the restriction by setting the maximum interrupts per context, and we
+ * allocate additional contexts as necessary so that we can keep the AFU
+ * interrupt number within the supported range.
+ */
+int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct cxl_context *ctx, *new_ctx, *default_ctx;
+ int remaining;
+ int rc;
+
+ ctx = default_ctx = cxl_get_context(pdev);
+ if (WARN_ON(!default_ctx))
+ return -ENODEV;
+
+ remaining = nvec;
+ while (remaining > 0) {
+ rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
+ if (rc) {
+ pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
+ return rc;
+ }
+ remaining -= ctx->afu->irqs_max;
+
+ if (ctx != default_ctx && default_ctx->status == STARTED) {
+ WARN_ON(cxl_start_context(ctx,
+ be64_to_cpu(default_ctx->elem->common.wed),
+ NULL));
+ }
+
+ if (remaining > 0) {
+ new_ctx = cxl_dev_context_init(pdev);
+ if (!new_ctx) {
+ pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
+ return -ENOSPC;
+ }
+ list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
+ ctx = new_ctx;
+ }
+ }
+
+ return 0;
+}
+/* Exported via cxl_base */
+
+void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct cxl_context *ctx, *pos, *tmp;
+
+ ctx = cxl_get_context(pdev);
+ if (WARN_ON(!ctx))
+ return;
+
+ cxl_free_afu_irqs(ctx);
+ list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
+ cxl_stop_context(pos);
+ cxl_free_afu_irqs(pos);
+ list_del(&pos->extra_irq_contexts);
+ cxl_release_context(pos);
+ }
+}
+/* Exported via cxl_base */
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index 9b90ec6c07cd..cd54ce6f6230 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -54,6 +54,19 @@ static inline void cxl_calls_put(struct cxl_calls *calls) { }
#endif /* CONFIG_CXL_MODULE */
+/* AFU refcount management */
+struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+ return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+EXPORT_SYMBOL_GPL(cxl_afu_get);
+
+void cxl_afu_put(struct cxl_afu *afu)
+{
+ put_device(&afu->dev);
+}
+EXPORT_SYMBOL_GPL(cxl_afu_put);
+
void cxl_slbia(struct mm_struct *mm)
{
struct cxl_calls *calls;
@@ -93,9 +106,92 @@ int cxl_update_properties(struct device_node *dn,
}
EXPORT_SYMBOL_GPL(cxl_update_properties);
+/*
+ * API calls into the driver that may be called from the PHB code and must be
+ * built in.
+ */
+bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ bool ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return false;
+
+ ret = calls->cxl_pci_associate_default_context(dev, afu);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context);
+
+void cxl_pci_disable_device(struct pci_dev *dev)
+{
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return;
+
+ calls->cxl_pci_disable_device(dev);
+
+ cxl_calls_put(calls);
+}
+EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
+
+int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
+{
+ int ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return -EBUSY;
+
+ ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
+
+int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ int ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return false;
+
+ ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs);
+
+void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return;
+
+ calls->cxl_cx4_teardown_msi_irqs(pdev);
+
+ cxl_calls_put(calls);
+}
+EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs);
+
static int __init cxl_base_init(void)
{
- struct device_node *np = NULL;
+ struct device_node *np;
struct platform_device *dev;
int count = 0;
@@ -105,8 +201,7 @@ static int __init cxl_base_init(void)
if (cpu_has_feature(CPU_FTR_HVMODE))
return 0;
- while ((np = of_find_compatible_node(np, NULL,
- "ibm,coherent-platform-facility"))) {
+ for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
dev = of_platform_device_create(np, NULL, NULL);
if (dev)
count++;
@@ -114,5 +209,4 @@ static int __init cxl_base_init(void)
pr_devel("Found %d cxl device(s)\n", count);
return 0;
}
-
-module_init(cxl_base_init);
+device_initcall(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 26d206b1d08c..bdee9a01ef35 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -67,6 +67,9 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->pending_fault = false;
ctx->pending_afu_err = false;
+ INIT_LIST_HEAD(&ctx->irq_names);
+ INIT_LIST_HEAD(&ctx->extra_irq_contexts);
+
/*
* When we have to destroy all contexts in cxl_context_detach_all() we
* end up with afu_release_irqs() called from inside a
@@ -87,7 +90,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
*/
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
+ i = idr_alloc(&ctx->afu->contexts_idr, ctx,
+ ctx->afu->adapter->native->sl_ops->min_pe,
ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fe50788ff45..de090533f18c 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -24,6 +24,7 @@
#include <asm/reg.h>
#include <misc/cxl-base.h>
+#include <misc/cxl.h>
#include <uapi/misc/cxl.h>
extern uint cxl_verbose;
@@ -34,7 +35,7 @@ extern uint cxl_verbose;
* Bump version each time a user API change is made, whether it is
* backwards compatible ot not.
*/
-#define CXL_API_VERSION 2
+#define CXL_API_VERSION 3
#define CXL_API_VERSION_COMPATIBLE 1
/*
@@ -81,6 +82,7 @@ static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
/* 0x00C0:7EFF Implementation dependent area */
+/* PSL registers */
static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
@@ -91,6 +93,11 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
+/* XSL registers (Mellanox CX4) */
+static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100};
+static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108};
+static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
+static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
@@ -182,6 +189,18 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_ID_An_F (1ull << (63-31))
#define CXL_PSL_ID_An_L (1ull << (63-30))
+/****** CXL_PSL_SERR_An ****************************************************/
+#define CXL_PSL_SERR_An_afuto (1ull << (63-0))
+#define CXL_PSL_SERR_An_afudis (1ull << (63-1))
+#define CXL_PSL_SERR_An_afuov (1ull << (63-2))
+#define CXL_PSL_SERR_An_badsrc (1ull << (63-3))
+#define CXL_PSL_SERR_An_badctx (1ull << (63-4))
+#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5))
+#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6))
+#define CXL_PSL_SERR_An_afupar (1ull << (63-7))
+#define CXL_PSL_SERR_An_afudup (1ull << (63-8))
+#define CXL_PSL_SERR_An_AE (1ull << (63-30))
+
/****** CXL_PSL_SCNTL_An ****************************************************/
#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15))
/* Programming Modes: */
@@ -421,18 +440,6 @@ struct cxl_afu {
bool enabled;
};
-/* AFU refcount management */
-static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
-{
-
- return (get_device(&afu->dev) == NULL) ? NULL : afu;
-}
-
-static inline void cxl_afu_put(struct cxl_afu *afu)
-{
- put_device(&afu->dev);
-}
-
struct cxl_irq_name {
struct list_head list;
@@ -477,6 +484,9 @@ struct cxl_context {
/* Only used in PR mode */
u64 process_token;
+ /* driver private data */
+ void *priv;
+
unsigned long *irq_bitmap; /* Accessed from IRQ context */
struct cxl_irq_ranges irqs;
struct list_head irq_names;
@@ -522,7 +532,36 @@ struct cxl_context {
bool pending_fault;
bool pending_afu_err;
+ /* Used by AFU drivers for driver specific event delivery */
+ struct cxl_afu_driver_ops *afu_driver_ops;
+ atomic_t afu_driver_events;
+
struct rcu_head rcu;
+
+ /*
+ * Only used when more interrupts are allocated via
+ * pci_enable_msix_range than are supported in the default context, to
+ * use additional contexts to overcome the limitation. i.e. Mellanox
+ * CX4 only:
+ */
+ struct list_head extra_irq_contexts;
+};
+
+struct cxl_service_layer_ops {
+ int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev);
+ int (*afu_regs_init)(struct cxl_afu *afu);
+ int (*register_serr_irq)(struct cxl_afu *afu);
+ void (*release_serr_irq)(struct cxl_afu *afu);
+ void (*debugfs_add_adapter_sl_regs)(struct cxl *adapter, struct dentry *dir);
+ void (*debugfs_add_afu_sl_regs)(struct cxl_afu *afu, struct dentry *dir);
+ void (*psl_irq_dump_registers)(struct cxl_context *ctx);
+ void (*err_irq_dump_registers)(struct cxl *adapter);
+ void (*debugfs_stop_trace)(struct cxl *adapter);
+ void (*write_timebase_ctrl)(struct cxl *adapter);
+ u64 (*timebase_read)(struct cxl *adapter);
+ int capi_mode;
+ bool needs_reset_before_disable;
+ int min_pe;
};
struct cxl_native {
@@ -533,6 +572,7 @@ struct cxl_native {
irq_hw_number_t err_hwirq;
unsigned int err_virq;
u64 ps_off;
+ const struct cxl_service_layer_ops *sl_ops;
};
struct cxl_guest {
@@ -688,9 +728,21 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
+/* Internal functions wrapped in cxl_base to allow PHB to call them */
+bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
+void _cxl_pci_disable_device(struct pci_dev *dev);
+int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
+int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
+ bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
+ void (*cxl_pci_disable_device)(struct pci_dev *dev);
+ int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
+ int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type);
+ void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev);
+
struct module *owner;
};
int register_cxl_calls(struct cxl_calls *calls);
@@ -805,6 +857,11 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
+void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir);
+void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx);
+void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
@@ -855,6 +912,7 @@ struct cxl_backend_ops {
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
+ void (*update_ivtes)(struct cxl_context *ctx);
bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
void (*release_afu)(struct device *dev);
@@ -879,4 +937,7 @@ extern const struct cxl_backend_ops *cxl_ops;
/* check if the given pci_dev is on the the cxl vphb bus */
bool cxl_pci_is_vphb_device(struct pci_dev *dev);
+
+/* decode AFU error bits in the PSL register PSL_SERR_An */
+void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 5751899e0c17..ec7b8a017439 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -51,6 +51,19 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
}
+void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
+{
+ debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
+ debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
+ debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
+}
+
+void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir)
+{
+ debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC));
+}
+
int cxl_debugfs_adapter_add(struct cxl *adapter)
{
struct dentry *dir;
@@ -65,13 +78,10 @@ int cxl_debugfs_adapter_add(struct cxl *adapter)
return PTR_ERR(dir);
adapter->debugfs = dir;
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
-
+ if (adapter->native->sl_ops->debugfs_add_adapter_sl_regs)
+ adapter->native->sl_ops->debugfs_add_adapter_sl_regs(adapter, dir);
return 0;
}
@@ -80,6 +90,14 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter)
debugfs_remove_recursive(adapter->debugfs);
}
+void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir)
+{
+ debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
+ debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
+ debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
+}
+
int cxl_debugfs_afu_add(struct cxl_afu *afu)
{
struct dentry *dir;
@@ -94,18 +112,15 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu)
return PTR_ERR(dir);
afu->debugfs = dir;
- debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
- debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
-
debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
+ if (afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs)
+ afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs(afu, dir);
return 0;
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index eec468f1612f..5fb9894b157f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -293,6 +293,17 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm)
return cxl_context_iomap(ctx, vm);
}
+static inline bool ctx_event_pending(struct cxl_context *ctx)
+{
+ if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
+ return true;
+
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
+ return true;
+
+ return false;
+}
+
unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
@@ -305,8 +316,7 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
spin_lock_irqsave(&ctx->lock, flags);
- if (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err)
+ if (ctx_event_pending(ctx))
mask |= POLLIN | POLLRDNORM;
else if (ctx->status == CLOSED)
/* Only error on closed when there are no futher events pending
@@ -319,16 +329,46 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
return mask;
}
-static inline int ctx_event_pending(struct cxl_context *ctx)
+static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
+ char __user *buf,
+ struct cxl_event *event,
+ struct cxl_event_afu_driver_reserved *pl)
{
- return (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err || (ctx->status == CLOSED));
+ /* Check event */
+ if (!pl) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Check event size */
+ event->header.size += pl->data_size;
+ if (event->header.size > CXL_READ_MIN_SIZE) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Copy event header */
+ if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ /* Copy event data */
+ buf += sizeof(struct cxl_event_header);
+ if (copy_to_user(buf, &pl->data, pl->data_size)) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
+ return event->header.size;
}
ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct cxl_context *ctx = file->private_data;
+ struct cxl_event_afu_driver_reserved *pl = NULL;
struct cxl_event event;
unsigned long flags;
int rc;
@@ -344,7 +384,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
for (;;) {
prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
- if (ctx_event_pending(ctx))
+ if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
break;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
@@ -374,7 +414,12 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
memset(&event, 0, sizeof(event));
event.header.process_element = ctx->pe;
event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
+ pr_devel("afu_read delivering AFU driver specific event\n");
+ pl = ctx->afu_driver_ops->fetch_event(ctx);
+ atomic_dec(&ctx->afu_driver_events);
+ event.header.type = CXL_EVENT_AFU_DRIVER;
+ } else if (ctx->pending_irq) {
pr_devel("afu_read delivering AFU interrupt\n");
event.header.size += sizeof(struct cxl_event_afu_interrupt);
event.header.type = CXL_EVENT_AFU_INTERRUPT;
@@ -404,6 +449,9 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
spin_unlock_irqrestore(&ctx->lock, flags);
+ if (event.header.type == CXL_EVENT_AFU_DRIVER)
+ return afu_driver_event_copy(ctx, buf, &event, pl);
+
if (copy_to_user(buf, &event, event.header.size))
return -EFAULT;
return event.header.size;
@@ -558,7 +606,7 @@ int __init cxl_file_init(void)
* If these change we really need to update API. Either change some
* flags or update API version number CXL_API_VERSION.
*/
- BUILD_BUG_ON(CXL_API_VERSION != 2);
+ BUILD_BUG_ON(CXL_API_VERSION != 3);
BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 68dd0b7da471..c63d61e17d56 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -24,8 +24,8 @@ struct ai_header {
};
static struct semaphore sem;
-unsigned long *buffer[CXL_AI_MAX_ENTRIES];
-struct sg_list *le;
+static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
+static struct sg_list *le;
static u64 continue_token;
static unsigned int transfer;
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index bc8d0b9870eb..9aa58a77a24d 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -196,15 +196,18 @@ static irqreturn_t guest_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
int rc;
- u64 serr;
+ u64 serr, afu_error, dsisr;
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
if (rc) {
dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
return IRQ_HANDLED;
}
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+ afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ cxl_afu_decode_psl_serr(afu, serr);
+ dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
+ dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
if (rc)
@@ -1052,16 +1055,18 @@ static void free_adapter(struct cxl *adapter)
struct irq_avail *cur;
int i;
- if (adapter->guest->irq_avail) {
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- kfree(cur->bitmap);
+ if (adapter->guest) {
+ if (adapter->guest->irq_avail) {
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
}
- kfree(adapter->guest->irq_avail);
+ kfree(adapter->guest->status);
+ kfree(adapter->guest);
}
- kfree(adapter->guest->status);
cxl_remove_adapter_nr(adapter);
- kfree(adapter->guest);
kfree(adapter);
}
@@ -1182,6 +1187,7 @@ const struct cxl_backend_ops cxl_guest_ops = {
.ack_irq = guest_ack_irq,
.attach_process = guest_attach_process,
.detach_process = guest_detach_process,
+ .update_ivtes = NULL,
.support_attributes = guest_support_attributes,
.link_ok = guest_link_ok,
.release_afu = guest_release_afu,
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 8def4553acba..dec60f58a767 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -260,9 +260,6 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
else
alloc_count = count + 1;
- /* Initialize the list head to hold irq names */
- INIT_LIST_HEAD(&ctx->irq_names);
-
if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
alloc_count)))
return rc;
@@ -374,3 +371,32 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
ctx->irq_count = 0;
}
+
+void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
+{
+ dev_crit(&afu->dev,
+ "PSL Slice error received. Check AFU for root cause.\n");
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ if (serr & CXL_PSL_SERR_An_afuto)
+ dev_crit(&afu->dev, "AFU MMIO Timeout\n");
+ if (serr & CXL_PSL_SERR_An_afudis)
+ dev_crit(&afu->dev,
+ "MMIO targeted Accelerator that was not enabled\n");
+ if (serr & CXL_PSL_SERR_An_afuov)
+ dev_crit(&afu->dev, "AFU CTAG Overflow\n");
+ if (serr & CXL_PSL_SERR_An_badsrc)
+ dev_crit(&afu->dev, "Bad Interrupt Source\n");
+ if (serr & CXL_PSL_SERR_An_badctx)
+ dev_crit(&afu->dev, "Bad Context Handle\n");
+ if (serr & CXL_PSL_SERR_An_llcmdis)
+ dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
+ if (serr & CXL_PSL_SERR_An_llcmdto)
+ dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
+ if (serr & CXL_PSL_SERR_An_afupar)
+ dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
+ if (serr & CXL_PSL_SERR_An_afudup)
+ dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
+ if (serr & CXL_PSL_SERR_An_AE)
+ dev_crit(&afu->dev,
+ "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
+}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index ae68c3201156..d9be23b24aa3 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -110,6 +110,11 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core,
+ .cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
+ .cxl_pci_disable_device = _cxl_pci_disable_device,
+ .cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
+ .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs,
+ .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs,
.owner = THIS_MODULE,
};
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 55d8a1459f28..3bcdaee11ba1 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -21,10 +21,10 @@
#include "cxl.h"
#include "trace.h"
-static int afu_control(struct cxl_afu *afu, u64 command,
+static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
u64 result, u64 mask, bool enabled)
{
- u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ u64 AFU_Cntl;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command,
trace_cxl_afu_ctrl(afu, command);
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
+ AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
while ((AFU_Cntl & mask) != result) {
@@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command,
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
};
+
+ if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
+ /*
+ * Workaround for a bug in the XSL used in the Mellanox CX4
+ * that fails to clear the RA bit after an AFU reset,
+ * preventing subsequent AFU resets from working.
+ */
+ cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
+ }
+
pr_devel("AFU command complete: %llx\n", command);
afu->enabled = enabled;
out:
@@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu)
{
pr_devel("AFU enable request\n");
- return afu_control(afu, CXL_AFU_Cntl_An_E,
+ return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
CXL_AFU_Cntl_An_ES_Enabled,
CXL_AFU_Cntl_An_ES_MASK, true);
}
@@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu)
{
pr_devel("AFU disable request\n");
- return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
+ return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
+ CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_ES_MASK, false);
}
@@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu)
{
pr_devel("AFU reset request\n");
- return afu_control(afu, CXL_AFU_Cntl_An_RA,
+ return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
false);
@@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu)
unsigned spa_size;
/* Work out how many pages to allocate */
- afu->native->spa_order = 0;
+ afu->native->spa_order = -1;
do {
afu->native->spa_order++;
spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
@@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx)
return rc;
}
-
void cxl_assign_psn_space(struct cxl_context *ctx)
{
if (!ctx->afu->pp_size || ctx->master) {
@@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx)
return sr;
}
+static void update_ivtes_directed(struct cxl_context *ctx)
+{
+ bool need_update = (ctx->status == STARTED);
+ int r;
+
+ if (need_update) {
+ WARN_ON(terminate_process_element(ctx));
+ WARN_ON(remove_process_element(ctx));
+ }
+
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
+ ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
+ }
+
+ /*
+ * Theoretically we could use the update llcmd, instead of a
+ * terminate/remove/add (or if an atomic update was required we could
+ * do a suspend/update/resume), however it seems there might be issues
+ * with the update llcmd on some cards (including those using an XSL on
+ * an ASIC) so for now it's safest to go with the commands that are
+ * known to work. In the future if we come across a situation where the
+ * card may be performing transactions using the same PE while we are
+ * doing this update we might need to revisit this.
+ */
+ if (need_update)
+ WARN_ON(add_process_element(ctx));
+}
+
static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
{
u32 pid;
- int r, result;
+ int result;
cxl_assign_psn_space(ctx);
@@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->irqs.range[0] = 1;
}
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
+ update_ivtes_directed(ctx);
ctx->elem->common.amr = cpu_to_be64(amr);
ctx->elem->common.wed = cpu_to_be64(wed);
@@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
- cxl_ops->afu_reset(afu);
+ /*
+ * The CAIA section 2.2.1 indicates that the procedure for starting and
+ * stopping an AFU in AFU directed mode is AFU specific, which is not
+ * ideal since this code is generic and with one exception has no
+ * knowledge of the AFU. This is in contrast to the procedure for
+ * disabling a dedicated process AFU, which is documented to just
+ * require a reset. The architecture does indicate that both an AFU
+ * reset and an AFU disable should result in the AFU being disabled and
+ * we do both followed by a PSL purge for safety.
+ *
+ * Notably we used to have some issues with the disable sequence on PSL
+ * cards, which is why we ended up using this heavy weight procedure in
+ * the first place, however a bug was discovered that had rendered the
+ * disable operation ineffective, so it is conceivable that was the
+ * sole explanation for those difficulties. Careful regression testing
+ * is recommended if anyone attempts to remove or reorder these
+ * operations.
+ *
+ * The XSL on the Mellanox CX4 behaves a little differently from the
+ * PSL based cards and will time out an AFU reset if the AFU is still
+ * enabled. That card is special in that we do have a means to identify
+ * it from this code, so in that case we skip the reset and just use a
+ * disable/purge to avoid the timeout and corresponding noise in the
+ * kernel log.
+ */
+ if (afu->adapter->native->sl_ops->needs_reset_before_disable)
+ cxl_ops->afu_reset(afu);
cxl_afu_disable(afu);
cxl_psl_purge(afu);
@@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu)
return cxl_chardev_d_afu_add(afu);
}
+static void update_ivtes_dedicated(struct cxl_context *ctx)
+{
+ struct cxl_afu *afu = ctx->afu;
+
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
+ (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.offset[3] & 0xffff));
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
+ (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.range[3] & 0xffff));
+}
+
static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_afu *afu = ctx->afu;
@@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
cxl_prefault(ctx, wed);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
- (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.offset[3] & 0xffff));
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
- (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.range[3] & 0xffff));
+ update_ivtes_dedicated(ctx);
cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
@@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{
+ /*
+ * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
+ * stop the AFU in dedicated mode (we therefore do not make that
+ * optional like we do in the afu directed path). It does not indicate
+ * that we need to do an explicit disable (which should occur
+ * implicitly as part of the reset) or purge, but we do these as well
+ * to be on the safe side.
+ *
+ * Notably we used to have some issues with the disable sequence
+ * (before the sequence was spelled out in the architecture) which is
+ * why we were so heavy weight in the first place, however a bug was
+ * discovered that had rendered the disable operation ineffective, so
+ * it is conceivable that was the sole explanation for those
+ * difficulties. Point is, we should be careful and do some regression
+ * testing if we ever attempt to remove any part of this procedure.
+ */
cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu);
return 0;
}
+static void native_update_ivtes(struct cxl_context *ctx)
+{
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return update_ivtes_directed(ctx);
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
+ return update_ivtes_dedicated(ctx);
+ WARN(1, "native_update_ivtes: Bad mode\n");
+}
+
static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
@@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
return 0;
}
-static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
- u64 dsisr, u64 errstat)
+void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
{
u64 fir1, fir2, fir_slice, serr, afu_debug;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ cxl_afu_decode_psl_serr(ctx->afu, serr);
+ }
dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+}
+
+static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat)
+{
+
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(ctx->afu->adapter);
+ if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
+ ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
+
+ if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
+ dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
+ ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
+ }
return cxl_ops->ack_irq(ctx, 0, errstat);
}
@@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx)
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
- u64 fir_slice, errstat, serr, afu_debug;
-
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+ u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
+ /*
+ * slice err interrupt is only used with full PSL (no XSL)
+ */
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ cxl_afu_decode_psl_serr(afu, serr);
dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+ dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
+ dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
return IRQ_HANDLED;
}
+void cxl_native_err_irq_dump_regs(struct cxl *adapter)
+{
+ u64 fir1, fir2;
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+}
+
static irqreturn_t native_irq_err(int irq, void *data)
{
struct cxl *adapter = data;
- u64 fir1, fir2, err_ivte;
+ u64 err_ivte;
WARN(1, "CXL ERROR interrupt %i\n", irq);
err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(adapter);
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+ if (adapter->native->sl_ops->debugfs_stop_trace) {
+ dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
+ adapter->native->sl_ops->debugfs_stop_trace(adapter);
+ }
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+ if (adapter->native->sl_ops->err_irq_dump_registers)
+ adapter->native->sl_ops->err_irq_dump_registers(adapter);
return IRQ_HANDLED;
}
@@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
+ .update_ivtes = native_update_ivtes,
.support_attributes = native_support_attributes,
.link_ok = cxl_adapter_link_ok,
.release_afu = cxl_pci_release_afu,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index a08fcc888a71..d152e2de8c93 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -55,6 +55,8 @@
pci_read_config_byte(dev, vsec + 0xa, dest)
#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
pci_write_config_byte(dev, vsec + 0xa, val)
+#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
+ pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
#define CXL_VSEC_PROTOCOL_MASK 0xe0
#define CXL_VSEC_PROTOCOL_1024TB 0x80
#define CXL_VSEC_PROTOCOL_512TB 0x40
@@ -352,13 +354,10 @@ static u64 get_capp_unit_id(struct device_node *np)
return 0;
}
-static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
+static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id)
{
struct device_node *np;
const __be32 *prop;
- u64 psl_dsnctl;
- u64 chipid;
- u64 capp_unit_id;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
@@ -367,14 +366,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
- chipid = be32_to_cpup(prop);
- capp_unit_id = get_capp_unit_id(np);
+ *chipid = be32_to_cpup(prop);
+ *capp_unit_id = get_capp_unit_id(np);
of_node_put(np);
- if (!capp_unit_id) {
+ if (!*capp_unit_id) {
pr_err("cxl: invalid capp unit id\n");
return -ENODEV;
}
+ return 0;
+}
+
+static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 psl_dsnctl;
+ u64 chipid;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ if (rc)
+ return rc;
+
psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
/* Tell PSL where to route data to */
@@ -393,8 +406,61 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
return 0;
}
+static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 xsl_dsnctl;
+ u64 chipid;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ if (rc)
+ return rc;
+
+ /* Tell XSL where to route data to */
+ xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
+ xsl_dsnctl |= (capp_unit_id << (63-13));
+ cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
+
+ return 0;
+}
+
+/* PSL & XSL */
+#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
-#define _2048_250MHZ_CYCLES 1
+/* For the PSL this is a multiple for 0 < n <= 7: */
+#define PSL_2048_250MHZ_CYCLES 1
+
+static void write_timebase_ctrl_psl(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+ TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
+}
+
+/* XSL */
+#define TBSYNC_ENA (1ULL << 63)
+/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
+#define XSL_2000_CLOCKS 1
+#define XSL_4000_CLOCKS 2
+#define XSL_8000_CLOCKS 3
+
+static void write_timebase_ctrl_xsl(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
+ TBSYNC_ENA |
+ TBSYNC_CAL(3) |
+ TBSYNC_CNT(XSL_4000_CLOCKS));
+}
+
+static u64 timebase_read_psl(struct cxl *adapter)
+{
+ return cxl_p1_read(adapter, CXL_PSL_Timebase);
+}
+
+static u64 timebase_read_xsl(struct cxl *adapter)
+{
+ return cxl_p1_read(adapter, CXL_XSL_Timebase);
+}
static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
{
@@ -421,8 +487,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
* Setup PSL Timebase Control and Status register
* with the recommended Timebase Sync Count value
*/
- cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
- TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+ adapter->native->sl_ops->write_timebase_ctrl(adapter);
/* Enable PSL Timebase */
cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
@@ -435,7 +500,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
dev_info(&dev->dev, "PSL timebase can't synchronize\n");
return;
}
- psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+ psl_tb = adapter->native->sl_ops->timebase_read(adapter);
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
@@ -445,7 +510,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
return;
}
-static int init_implementation_afu_regs(struct cxl_afu *afu)
+static int init_implementation_afu_psl_regs(struct cxl_afu *afu)
{
/* read/write masks for this slice */
cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
@@ -551,36 +616,234 @@ static int setup_cxl_bars(struct pci_dev *dev)
return 0;
}
-/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
-static int switch_card_to_cxl(struct pci_dev *dev)
-{
+#ifdef CONFIG_CXL_BIMODAL
+
+struct cxl_switch_work {
+ struct pci_dev *dev;
+ struct work_struct work;
int vsec;
+ int mode;
+};
+
+static void switch_card_to_cxl(struct work_struct *work)
+{
+ struct cxl_switch_work *switch_work =
+ container_of(work, struct cxl_switch_work, work);
+ struct pci_dev *dev = switch_work->dev;
+ struct pci_bus *bus = dev->bus;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pci_dev *bridge;
+ struct pnv_php_slot *php_slot;
+ unsigned int devfn;
u8 val;
int rc;
- dev_info(&dev->dev, "switch card to CXL\n");
+ dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
+ bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
+ bus_list);
+ if (!bridge) {
+ dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
+ goto err_dev_put;
+ }
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
+ php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
+ if (!php_slot) {
+ dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
+ "information. You may need to upgrade "
+ "skiboot. Aborting.\n");
+ goto err_dev_put;
+ }
+
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
+ goto err_dev_put;
+ }
+ devfn = dev->devfn;
+
+ /* Release the reference obtained in cxl_check_and_switch_mode() */
+ pci_dev_put(dev);
+
+ dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bridge->subordinate);
+ pci_unlock_rescan_remove();
+
+ /* Switch the CXL protocol on the card */
+ if (switch_work->mode == CXL_BIMODE_CXL) {
+ dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
+ rc = pnv_cxl_enable_phb_kernel_api(hose, true);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to enable kernel API"
+ " on real PHB, aborting\n");
+ goto err_free_work;
+ }
+ } else {
+ dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
+ goto err_free_work;
+ }
+
+ rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
+ goto err_free_work;
+ }
+
+ /*
+ * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states
+ * we must wait 100ms after this mode switch before touching PCIe config
+ * space.
+ */
+ msleep(100);
+
+ /*
+ * Hot reset to cause the card to come back in cxl mode. A
+ * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support
+ * in skiboot, so we use a hot reset instead.
+ *
+ * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is
+ * guaranteed to sit directly under the root port, and setting the reset
+ * state on a device directly under the root port is equivalent to doing
+ * it on the root port iself.
+ */
+ dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
+ pci_set_pcie_reset_state(bridge, pcie_hot_reset);
+ pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
+
+ dev_dbg(&bus->dev, "cxl: Offlining slot\n");
+ rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
+ goto err_free_work;
+ }
+
+ dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
+ rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
+ goto err_free_work;
+ }
+
+ pci_lock_rescan_remove();
+ pci_hp_add_devices(bridge->subordinate);
+ pci_unlock_rescan_remove();
+
+ dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
+ kfree(switch_work);
+ return;
+
+err_dev_put:
+ /* Release the reference obtained in cxl_check_and_switch_mode() */
+ pci_dev_put(dev);
+err_free_work:
+ kfree(switch_work);
+}
+
+int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
+{
+ struct cxl_switch_work *work;
+ u8 val;
+ int rc;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
return -ENODEV;
+
+ if (!vsec) {
+ vsec = find_cxl_vsec(dev);
+ if (!vsec) {
+ dev_info(&dev->dev, "CXL VSEC not found\n");
+ return -ENODEV;
+ }
}
- if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
- dev_err(&dev->dev, "failed to read current mode control: %i", rc);
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
return rc;
}
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
- if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
- dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
- return rc;
+
+ if (mode == CXL_BIMODE_PCI) {
+ if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
+ dev_info(&dev->dev, "Card is already in PCI mode\n");
+ return 0;
+ }
+ /*
+ * TODO: Before it's safe to switch the card back to PCI mode
+ * we need to disable the CAPP and make sure any cachelines the
+ * card holds have been flushed out. Needs skiboot support.
+ */
+ dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
+ return -EIO;
}
+
+ if (val & CXL_VSEC_PROTOCOL_ENABLE) {
+ dev_info(&dev->dev, "Card is already in CXL mode\n");
+ return 0;
+ }
+
+ dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
+ "to switch to CXL mode\n");
+
+ work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ pci_dev_get(dev);
+ work->dev = dev;
+ work->vsec = vsec;
+ work->mode = mode;
+ INIT_WORK(&work->work, switch_card_to_cxl);
+
+ schedule_work(&work->work);
+
/*
- * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
- * we must wait 100ms after this mode switch before touching
- * PCIe config space.
+ * We return a failure now to abort the driver init. Once the
+ * link has been cycled and the card is in cxl mode we will
+ * come back (possibly using the generic cxl driver), but
+ * return success as the card should then be in cxl mode.
+ *
+ * TODO: What if the card comes back in PCI mode even after
+ * the switch? Don't want to spin endlessly.
*/
- msleep(100);
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
+
+#endif /* CONFIG_CXL_BIMODAL */
+
+static int setup_cxl_protocol_area(struct pci_dev *dev)
+{
+ u8 val;
+ int rc;
+ int vsec = find_cxl_vsec(dev);
+
+ if (!vsec) {
+ dev_info(&dev->dev, "CXL VSEC not found\n");
+ return -ENODEV;
+ }
+
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
+ return rc;
+ }
+
+ if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
+ dev_err(&dev->dev, "Card not in CAPI mode!\n");
+ return -EIO;
+ }
+
+ if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB;
+ rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
+ return rc;
+ }
+ }
return 0;
}
@@ -712,6 +975,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
}
}
+ if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
+ /*
+ * We could also check this for the dedicated process model
+ * since the architecture indicates it should be set to 1, but
+ * in that case we ignore the value and I'd rather not risk
+ * breaking any existing dedicated process AFUs that left it as
+ * 0 (not that I'm aware of any). It is clearly an error for an
+ * AFU directed AFU to set this to 0, and would have previously
+ * triggered a bug resulting in the maximum not being enforced
+ * at all since idr_alloc treats 0 as no maximum.
+ */
+ dev_err(&afu->dev, "AFU does not support any processes\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -753,11 +1031,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
}
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0xffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ if (afu->adapter->native->sl_ops->register_serr_irq) {
+ reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ if (reg) {
+ if (reg & ~0xffff)
+ dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ }
}
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
if (reg) {
@@ -835,11 +1115,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = cxl_afu_descriptor_looks_ok(afu)))
goto err1;
- if ((rc = init_implementation_afu_regs(afu)))
- goto err1;
+ if (adapter->native->sl_ops->afu_regs_init)
+ if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
+ goto err1;
- if ((rc = cxl_native_register_serr_irq(afu)))
- goto err1;
+ if (adapter->native->sl_ops->register_serr_irq)
+ if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
+ goto err1;
if ((rc = cxl_native_register_psl_irq(afu)))
goto err2;
@@ -847,7 +1129,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
return 0;
err2:
- cxl_native_release_serr_irq(afu);
+ if (adapter->native->sl_ops->release_serr_irq)
+ adapter->native->sl_ops->release_serr_irq(afu);
err1:
pci_unmap_slice_regs(afu);
return rc;
@@ -856,7 +1139,8 @@ err1:
static void pci_deconfigure_afu(struct cxl_afu *afu)
{
cxl_native_release_psl_irq(afu);
- cxl_native_release_serr_irq(afu);
+ if (afu->adapter->native->sl_ops->release_serr_irq)
+ afu->adapter->native->sl_ops->release_serr_irq(afu);
pci_unmap_slice_regs(afu);
}
@@ -1165,7 +1449,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = setup_cxl_bars(dev)))
return rc;
- if ((rc = switch_card_to_cxl(dev)))
+ if ((rc = setup_cxl_protocol_area(dev)))
return rc;
if ((rc = cxl_update_image_control(adapter)))
@@ -1177,10 +1461,13 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = sanitise_adapter_regs(adapter)))
goto err;
- if ((rc = init_implementation_adapter_regs(adapter, dev)))
+ if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
goto err;
- if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
+ /* Required for devices using CAPP DMA mode, harmless for others */
+ pci_set_master(dev);
+
+ if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
goto err;
/* If recovery happened, the last step is to turn on snooping.
@@ -1212,6 +1499,43 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
pci_disable_device(pdev);
}
+static const struct cxl_service_layer_ops psl_ops = {
+ .adapter_regs_init = init_implementation_adapter_psl_regs,
+ .afu_regs_init = init_implementation_afu_psl_regs,
+ .register_serr_irq = cxl_native_register_serr_irq,
+ .release_serr_irq = cxl_native_release_serr_irq,
+ .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs,
+ .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs,
+ .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
+ .debugfs_stop_trace = cxl_stop_trace,
+ .write_timebase_ctrl = write_timebase_ctrl_psl,
+ .timebase_read = timebase_read_psl,
+ .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
+ .needs_reset_before_disable = true,
+};
+
+static const struct cxl_service_layer_ops xsl_ops = {
+ .adapter_regs_init = init_implementation_adapter_xsl_regs,
+ .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs,
+ .write_timebase_ctrl = write_timebase_ctrl_xsl,
+ .timebase_read = timebase_read_xsl,
+ .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
+ .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
+};
+
+static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
+{
+ if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+ dev_info(&adapter->dev, "Device uses an XSL\n");
+ adapter->native->sl_ops = &xsl_ops;
+ } else {
+ dev_info(&adapter->dev, "Device uses a PSL\n");
+ adapter->native->sl_ops = &psl_ops;
+ }
+}
+
+
static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
{
struct cxl *adapter;
@@ -1227,6 +1551,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
goto err_release;
}
+ set_sl_ops(adapter, dev);
+
/* Set defaults for parameters which need to persist over
* configure/reconfigure
*/
@@ -1280,6 +1606,67 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
device_unregister(&adapter->dev);
}
+#define CXL_MAX_PCIEX_PARENT 2
+
+static int cxl_slot_is_switched(struct pci_dev *dev)
+{
+ struct device_node *np;
+ int depth = 0;
+ const __be32 *prop;
+
+ if (!(np = pci_device_to_OF_node(dev))) {
+ pr_err("cxl: np = NULL\n");
+ return -ENODEV;
+ }
+ of_node_get(np);
+ while (np) {
+ np = of_get_next_parent(np);
+ prop = of_get_property(np, "device_type", NULL);
+ if (!prop || strcmp((char *)prop, "pciex"))
+ break;
+ depth++;
+ }
+ of_node_put(np);
+ return (depth > CXL_MAX_PCIEX_PARENT);
+}
+
+bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
+{
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return false;
+
+ if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
+ /*
+ * CAPP DMA mode is technically supported on regular P8, but
+ * will EEH if the card attempts to access memory < 4GB, which
+ * we cannot realistically avoid. We might be able to work
+ * around the issue, but until then return unsupported:
+ */
+ return false;
+ }
+
+ if (cxl_slot_is_switched(dev))
+ return false;
+
+ /*
+ * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since
+ * the CAPP can be connected to PHB 0, 1 or 2 on a first come first
+ * served basis, which is racy to check from here. If we need to
+ * support this in future we might need to consider having this
+ * function effectively reserve it ahead of time.
+ *
+ * Currently, the only user of this API is the Mellanox CX4, which is
+ * only supported on P8NVL due to the above mentioned limitation of
+ * CAPP DMA mode and therefore does not need to worry about this. If the
+ * issue with CAPP DMA mode is later worked around on P8 we might need
+ * to revisit this.
+ */
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
+
+
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct cxl *adapter;
@@ -1291,6 +1678,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ if (cxl_slot_is_switched(dev)) {
+ dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
+ return -ENODEV;
+ }
+
if (cxl_verbose)
dump_cxl_config_space(dev);
@@ -1311,6 +1703,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
}
+ if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
+ pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
+
return 0;
}
@@ -1381,6 +1776,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
+ /* Only participate in EEH if we are on a virtual PHB */
+ if (afu->phb == NULL)
+ return PCI_ERS_RESULT_NONE;
cxl_vphb_error_detected(afu, state);
}
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
new file mode 100644
index 000000000000..0935d44c1770
--- /dev/null
+++ b/drivers/misc/cxl/phb.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014-2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/pci.h>
+#include "cxl.h"
+
+bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ struct cxl_context *ctx;
+
+ /*
+ * Allocate a context to do cxl things to. This is used for interrupts
+ * in the peer model using a real phb, and if we eventually do DMA ops
+ * in the virtual phb, we'll need a default context to attach them to.
+ */
+ ctx = cxl_dev_context_init(dev);
+ if (!ctx)
+ return false;
+ dev->dev.archdata.cxl_ctx = ctx;
+
+ return (cxl_ops->afu_check_and_enable(afu) == 0);
+}
+/* exported via cxl_base */
+
+void _cxl_pci_disable_device(struct pci_dev *dev)
+{
+ struct cxl_context *ctx = cxl_get_context(dev);
+
+ if (ctx) {
+ if (ctx->status == STARTED) {
+ dev_err(&dev->dev, "Default context started\n");
+ return;
+ }
+ dev->dev.archdata.cxl_ctx = NULL;
+ cxl_release_context(ctx);
+ }
+}
+/* exported via cxl_base */
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index cdc7723b845d..dee8def1c193 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <misc/cxl.h>
+#include <asm/pnv-pci.h>
#include "cxl.h"
static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -44,7 +45,6 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *phb;
struct cxl_afu *afu;
- struct cxl_context *ctx;
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
@@ -57,30 +57,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
set_dma_ops(&dev->dev, &dma_direct_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
- /*
- * Allocate a context to do cxl things too. If we eventually do real
- * DMA ops, we'll need a default context to attach them to
- */
- ctx = cxl_dev_context_init(dev);
- if (!ctx)
- return false;
- dev->dev.archdata.cxl_ctx = ctx;
-
- return (cxl_ops->afu_check_and_enable(afu) == 0);
-}
-
-static void cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_context *ctx = cxl_get_context(dev);
-
- if (ctx) {
- if (ctx->status == STARTED) {
- dev_err(&dev->dev, "Default context started\n");
- return;
- }
- dev->dev.archdata.cxl_ctx = NULL;
- cxl_release_context(ctx);
- }
+ return _cxl_pci_associate_default_context(dev, afu);
}
static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
@@ -197,8 +174,8 @@ static struct pci_controller_ops cxl_pci_controller_ops =
{
.probe_mode = cxl_pci_probe_mode,
.enable_device_hook = cxl_pci_enable_device_hook,
- .disable_device = cxl_pci_disable_device,
- .release_device = cxl_pci_disable_device,
+ .disable_device = _cxl_pci_disable_device,
+ .release_device = _cxl_pci_disable_device,
.window_alignment = cxl_pci_window_alignment,
.reset_secondary_bus = cxl_pci_reset_secondary_bus,
.setup_msi_irqs = cxl_setup_msi_irqs,
@@ -208,20 +185,30 @@ static struct pci_controller_ops cxl_pci_controller_ops =
int cxl_pci_vphb_add(struct cxl_afu *afu)
{
- struct pci_dev *phys_dev;
- struct pci_controller *phb, *phys_phb;
+ struct pci_controller *phb;
struct device_node *vphb_dn;
struct device *parent;
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- phys_dev = to_pci_dev(afu->adapter->dev.parent);
- phys_phb = pci_bus_to_host(phys_dev->bus);
- vphb_dn = phys_phb->dn;
- parent = &phys_dev->dev;
- } else {
- vphb_dn = afu->adapter->dev.parent->of_node;
- parent = afu->adapter->dev.parent;
- }
+ /*
+ * If there are no AFU configuration records we won't have anything to
+ * expose under the vPHB, so skip creating one, returning success since
+ * this is still a valid case. This will also opt us out of EEH
+ * handling since we won't have anything special to do if there are no
+ * kernel drivers attached to the vPHB, and EEH handling is not yet
+ * supported in the peer model.
+ */
+ if (!afu->crs_num)
+ return 0;
+
+ /* The parent device is the adapter. Reuse the device node of
+ * the adapter.
+ * We don't seem to care what device node is used for the vPHB,
+ * but tools such as lsvpd walk up the device parents looking
+ * for a valid location code, so we might as well show devices
+ * attached to the adapter as being located on that adapter.
+ */
+ parent = afu->adapter->dev.parent;
+ vphb_dn = parent->of_node;
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(vphb_dn);
@@ -272,13 +259,18 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
pcibios_free_controller(phb);
}
+static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
+{
+ return (phb->ops == &cxl_pcie_pci_ops);
+}
+
bool cxl_pci_is_vphb_device(struct pci_dev *dev)
{
struct pci_controller *phb;
phb = pci_bus_to_host(dev->bus);
- return (phb->ops == &cxl_pcie_pci_ops);
+ return _cxl_pci_is_vphb_device(phb);
}
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
@@ -287,7 +279,13 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
- return (struct cxl_afu *)phb->private_data;
+ if (_cxl_pci_is_vphb_device(phb))
+ return (struct cxl_afu *)phb->private_data;
+
+ if (pnv_pci_on_cxl_phb(dev))
+ return pnv_cxl_phb_to_afu(phb);
+
+ return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 4cf8f82cfca2..a70b853fa2c9 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -182,7 +182,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
*/
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
- int bars, rc = 0;
+ int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
void __iomem *mmio;
@@ -193,8 +193,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
cd->mmio = NULL;
pci_iounmap(pci_dev, mmio);
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
/*
* Firmware/BIOS might change memory mapping during bus reset.
@@ -218,7 +217,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
GENWQE_INJECT_GFIR_FATAL |
GENWQE_INJECT_GFIR_INFO);
- rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ rc = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (rc) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, rc);
@@ -1068,10 +1067,9 @@ static int genwqe_health_check_stop(struct genwqe_dev *cd)
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
- int err, bars;
+ int err;
struct pci_dev *pci_dev = cd->pci_dev;
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
err = pci_enable_device_mem(pci_dev);
if (err) {
dev_err(&pci_dev->dev,
@@ -1080,7 +1078,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
}
/* Reserve PCI I/O and memory resources */
- err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ err = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (err) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, err);
@@ -1142,7 +1140,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
out_iounmap:
pci_iounmap(pci_dev, cd->mmio);
out_release_resources:
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
err_disable_device:
pci_disable_device(pci_dev);
err_out:
@@ -1154,14 +1152,12 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
- int bars;
struct pci_dev *pci_dev = cd->pci_dev;
if (cd->mmio)
pci_iounmap(pci_dev, cd->mmio);
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
pci_disable_device(pci_dev);
}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index 5a3fd76eec27..5525a204db93 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -49,7 +49,7 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
- bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 89e5917e1c33..6fd9d367dea7 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -146,3 +146,7 @@ config VOP
More information about the Intel MIC family as well as the Linux
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+
+if VOP
+source "drivers/vhost/Kconfig.vringh"
+endif
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index e047efd83f57..9599d732aff3 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev)
static dma_addr_t
_mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = vpdev_to_mdev(dev);
@@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page,
static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = vpdev_to_mdev(dev);
@@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
static void *__mic_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
}
static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
static dma_addr_t
__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
@@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
static void
__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -229,7 +229,7 @@ err:
static void __mic_dma_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
static dma_addr_t
mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = dev_get_drvdata(dev->parent);
@@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page,
static void
mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = dev_get_drvdata(dev->parent);
mic_unmap_single(mdev, dma_addr, size);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 10b553765ee7..48a5dd740f3b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1801,8 +1801,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
do_data_tag = (card->ext_csd.data_tag_unit_size) &&
(prq->cmd_flags & REQ_META) &&
(rq_data_dir(prq) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
/* Argument of CMD23 */
packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
@@ -1977,8 +1976,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* When 4KB native sector is enabled, only 8 blocks
* multiple read or write is allowed
*/
- if ((brq->data.blocks & 0x07) &&
- (card->ext_csd.data_sector_size == 4096)) {
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
req->rq_disk->disk_name);
mq_rq = mq->mqrq_cur;
@@ -2501,12 +2500,6 @@ force_ro_fail:
return ret;
}
-#define CID_MANFID_SANDISK 0x2
-#define CID_MANFID_TOSHIBA 0x11
-#define CID_MANFID_MICRON 0x13
-#define CID_MANFID_SAMSUNG 0x15
-#define CID_MANFID_KINGSTON 0x70
-
static const struct mmc_fixup blk_fixups[] =
{
MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 4bc48f10452f..c64266f5a399 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -332,12 +332,13 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr52(card) ? "DDR " : "",
type);
} else {
- pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_hs(card) ? "high speed " : ""),
mmc_card_hs400(card) ? "HS400 " :
(mmc_card_hs200(card) ? "HS200 " : ""),
+ mmc_card_hs400es(card) ? "Enhanced strobe " : "",
mmc_card_ddr52(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8b4dfd45433b..e55cde6d436d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1127,6 +1127,15 @@ void mmc_set_initial_state(struct mmc_host *host)
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
host->ios.drv_type = 0;
+ host->ios.enhanced_strobe = false;
+
+ /*
+ * Make sure we are in non-enhanced strobe mode before we
+ * actually enable it in ext_csd.
+ */
+ if ((host->caps2 & MMC_CAP2_HS400_ES) &&
+ host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
mmc_set_ios(host);
}
@@ -1925,17 +1934,15 @@ void mmc_init_erase(struct mmc_card *card)
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
- * Allocation Unit at a time. For MMC cards that define High Capacity
- * Erase Size, whether it is switched on or not, limit to that size.
- * Otherwise just have a stab at a good value. For modern cards it
- * will end up being 4MiB. Note that if the value is too small, it
- * can end up taking longer to erase.
+ * Allocation Unit at a time.
+ * For MMC, have a stab at ai good value and for modern cards it will
+ * end up being 4MiB. Note that if the value is too small, it can end
+ * up taking longer to erase. Also note, erase_size is already set to
+ * High Capacity Erase Size if available when this function is called.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
- } else if (card->ext_csd.hc_erase_size) {
- card->pref_erase = card->ext_csd.hc_erase_size;
} else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
@@ -2060,7 +2067,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {0};
- unsigned int qty = 0;
+ unsigned int qty = 0, busy_timeout = 0;
+ bool use_r1b_resp = false;
unsigned long timeout;
int err;
@@ -2128,8 +2136,22 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
+ busy_timeout = mmc_erase_timeout(card, arg, qty);
+ /*
+ * If the host controller supports busy signalling and the timeout for
+ * the erase operation does not exceed the max_busy_timeout, we should
+ * use R1B response. Or we need to prevent the host from doing hw busy
+ * detection, which is done by converting to a R1 response instead.
+ */
+ if (card->host->max_busy_timeout &&
+ busy_timeout > card->host->max_busy_timeout) {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = busy_timeout;
+ use_r1b_resp = true;
+ }
+
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
@@ -2141,7 +2163,14 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
if (mmc_host_is_spi(card->host))
goto out;
- timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+ /*
+ * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
+ * shall be avoided.
+ */
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ goto out;
+
+ timeout = jiffies + msecs_to_jiffies(busy_timeout);
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
@@ -2321,23 +2350,41 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
- unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
unsigned int last_timeout = 0;
- if (card->erase_shift)
+ if (card->erase_shift) {
max_qty = UINT_MAX >> card->erase_shift;
- else if (mmc_card_sd(card))
+ min_qty = card->pref_erase >> card->erase_shift;
+ } else if (mmc_card_sd(card)) {
max_qty = UINT_MAX;
- else
+ min_qty = card->pref_erase;
+ } else {
max_qty = UINT_MAX / card->erase_size;
+ min_qty = card->pref_erase / card->erase_size;
+ }
- /* Find the largest qty with an OK timeout */
+ /*
+ * We should not only use 'host->max_busy_timeout' as the limitation
+ * when deciding the max discard sectors. We should set a balance value
+ * to improve the erase speed, and it can not get too long timeout at
+ * the same time.
+ *
+ * Here we set 'card->pref_erase' as the minimal discard sectors no
+ * matter what size of 'host->max_busy_timeout', but if the
+ * 'host->max_busy_timeout' is large enough for more discard sectors,
+ * then we can continue to increase the max discard sectors until we
+ * get a balance value.
+ */
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
- if (timeout > host->max_busy_timeout)
+
+ if (qty + x > min_qty &&
+ timeout > host->max_busy_timeout)
break;
+
if (timeout < last_timeout)
break;
last_timeout = timeout;
@@ -2491,17 +2538,21 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
if (!mmc_attach_sdio(host))
return 0;
- if (!mmc_attach_sd(host))
- return 0;
- if (!mmc_attach_mmc(host))
- return 0;
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ if (!mmc_attach_sd(host))
+ return 0;
+
+ if (!(host->caps2 & MMC_CAP2_NO_MMC))
+ if (!mmc_attach_mmc(host))
+ return 0;
mmc_power_off(host);
return -EIO;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9382a57a5aa4..c8451ce557ae 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -148,7 +148,8 @@ static int mmc_ios_show(struct seq_file *s, void *data)
str = "mmc HS200";
break;
case MMC_TIMING_MMC_HS400:
- str = "mmc HS400";
+ str = mmc_card_hs400es(host->card) ?
+ "mmc HS400 enhanced strobe" : "mmc HS400";
break;
default:
str = "invalid";
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1be42fab1a30..98f25ffb4258 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -313,6 +313,14 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
if (of_property_read_bool(np, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+ if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+ host->caps2 |= MMC_CAP2_HS400_ES;
+ if (of_property_read_bool(np, "no-sdio"))
+ host->caps2 |= MMC_CAP2_NO_SDIO;
+ if (of_property_read_bool(np, "no-sd"))
+ host->caps2 |= MMC_CAP2_NO_SD;
+ if (of_property_read_bool(np, "no-mmc"))
+ host->caps2 |= MMC_CAP2_NO_MMC;
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5d438ad3ee32..f2d185cf8a8b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -45,6 +45,17 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
+static const struct mmc_fixup mmc_ext_csd_fixups[] = {
+ /*
+ * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
+ * is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
+ 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
+ END_FIXUP
+};
+
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -235,6 +246,11 @@ static void mmc_select_card_type(struct mmc_card *card)
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
}
+ if ((caps2 & MMC_CAP2_HS400_ES) &&
+ card->ext_csd.strobe_support &&
+ (avail_type & EXT_CSD_CARD_TYPE_HS400))
+ avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
+
card->ext_csd.hs_max_dtr = hs_max_dtr;
card->ext_csd.hs200_max_dtr = hs200_max_dtr;
card->mmc_avail_type = avail_type;
@@ -370,6 +386,9 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
*/
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+ /* fixup device after ext_csd revision field is updated */
+ mmc_fixup_device(card, mmc_ext_csd_fixups);
+
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
@@ -386,6 +405,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_card_set_blockaddr(card);
}
+ card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
mmc_select_card_type(card);
@@ -500,7 +520,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
- if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ if (!mmc_card_broken_hpi(card) &&
+ ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
@@ -513,7 +534,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
/* check whether the eMMC card supports HPI */
- if (!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
+ if (!mmc_card_broken_hpi(card) &&
+ !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
card->ext_csd.hpi = 1;
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
@@ -727,6 +749,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
static ssize_t mmc_fwrev_show(struct device *dev,
struct device_attribute *attr,
@@ -744,6 +767,22 @@ static ssize_t mmc_fwrev_show(struct device *dev,
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
@@ -762,6 +801,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
&dev_attr_rel_sectors.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_dsr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mmc_std);
@@ -959,6 +1000,19 @@ static int mmc_select_bus_width(struct mmc_card *card)
return err;
}
+/* Caller must hold re-tuning */
+static int mmc_switch_status(struct mmc_card *card)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
/*
* Switch to the high-speed mode
*/
@@ -969,9 +1023,11 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time,
- true, true, true);
- if (!err)
+ true, false, true);
+ if (!err) {
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ err = mmc_switch_status(card);
+ }
return err;
}
@@ -1047,23 +1103,9 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
return err;
}
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
- u32 status;
- int err;
-
- err = mmc_send_status(card, &status);
- if (err)
- return err;
-
- return mmc_switch_status_error(card->host, status);
-}
-
static int mmc_select_hs400(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int max_dtr;
int err = 0;
u8 val;
@@ -1075,19 +1117,12 @@ static int mmc_select_hs400(struct mmc_card *card)
host->ios.bus_width == MMC_BUS_WIDTH_8))
return 0;
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
- /* Reduce frequency to HS frequency */
- max_dtr = card->ext_csd.hs_max_dtr;
- mmc_set_clock(host, max_dtr);
-
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1097,11 +1132,13 @@ static int mmc_select_hs400(struct mmc_card *card)
/* Set host controller to HS timing */
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1120,7 +1157,7 @@ static int mmc_select_hs400(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1131,11 +1168,9 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
return 0;
@@ -1153,14 +1188,10 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
int mmc_hs400_to_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int max_dtr;
int err;
u8 val;
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
/* Reduce frequency to HS */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
@@ -1169,49 +1200,43 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch HS to HS200 */
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time, true,
- send_status, true);
+ val, card->ext_csd.generic_cmd6_time,
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
mmc_set_bus_speed(card);
@@ -1223,6 +1248,78 @@ out_err:
return err;
}
+static int mmc_select_hs400es(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+ u8 val;
+
+ if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+ err = -ENOTSUPP;
+ goto out_err;
+ }
+
+ err = mmc_select_bus_width(card);
+ if (err < 0)
+ goto out_err;
+
+ /* Switch card to HS mode */
+ err = mmc_select_hs(card);
+ if (err) {
+ pr_err("%s: switch to high-speed failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+
+ /* Switch card to DDR with strobe bit */
+ val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ val,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /* Switch card to HS400 */
+ val = EXT_CSD_TIMING_HS400 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time,
+ true, false, true);
+ if (err) {
+ pr_err("%s: switch to hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /* Set host controller to HS400 timing and frequency */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+
+ /* Controller enable enhanced strobe function */
+ host->ios.enhanced_strobe = true;
+ if (host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
@@ -1250,7 +1347,6 @@ static void mmc_select_driver_type(struct mmc_card *card)
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int old_timing, old_signal_voltage;
int err = -EINVAL;
u8 val;
@@ -1268,34 +1364,30 @@ static int mmc_select_hs200(struct mmc_card *card)
mmc_select_driver_type(card);
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
/*
* Set the bus width(4 or 8) with host's support and
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (err >= 0) {
+ if (err > 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto err;
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- if (!send_status) {
- err = mmc_switch_status(card);
- /*
- * mmc_select_timing() assumes timing has not changed if
- * it is a switch error.
- */
- if (err == -EBADMSG)
- mmc_set_timing(host, old_timing);
- }
+
+ err = mmc_switch_status(card);
+ /*
+ * mmc_select_timing() assumes timing has not changed if
+ * it is a switch error.
+ */
+ if (err == -EBADMSG)
+ mmc_set_timing(host, old_timing);
}
err:
if (err) {
@@ -1310,7 +1402,7 @@ err:
}
/*
- * Activate High Speed or HS200 mode if supported.
+ * Activate High Speed, HS200 or HS400ES mode if supported.
*/
static int mmc_select_timing(struct mmc_card *card)
{
@@ -1319,7 +1411,9 @@ static int mmc_select_timing(struct mmc_card *card)
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ err = mmc_select_hs400es(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
err = mmc_select_hs200(card);
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
@@ -1583,7 +1677,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (err >= 0) {
+ if (err > 0) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
@@ -1616,7 +1710,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
*/
- if (card->ext_csd.cache_size > 0) {
+ if (!mmc_card_broken_hpi(card) &&
+ card->ext_csd.cache_size > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1,
card->ext_csd.generic_cmd6_time);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 62355bda608f..ad6e9798e949 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -480,6 +480,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
bool expired = false;
+ bool busy = false;
mmc_retune_hold(host);
@@ -533,21 +534,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout_ms = MMC_OPS_TIMEOUT_MS;
/* Must check status to be sure of no errors. */
- timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
+ /*
+ * Due to the possibility of being preempted after
+ * sending the status command, check the expiration
+ * time first.
+ */
+ expired = time_after(jiffies, timeout);
if (send_status) {
- /*
- * Due to the possibility of being preempted after
- * sending the status command, check the expiration
- * time first.
- */
- expired = time_after(jiffies, timeout);
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
goto out;
}
if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
break;
+ if (host->ops->card_busy) {
+ if (!host->ops->card_busy(host))
+ break;
+ busy = true;
+ }
if (mmc_host_is_spi(host))
break;
@@ -556,19 +562,20 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
* does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
* rely on waiting for the stated timeout to be sufficient.
*/
- if (!send_status) {
+ if (!send_status && !host->ops->card_busy) {
mmc_delay(timeout_ms);
goto out;
}
/* Timeout if the device never leaves the program state. */
- if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
+ if (expired &&
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(host), __func__);
err = -ETIMEDOUT;
goto out;
}
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
err = mmc_switch_status_error(host, status);
out:
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index fad660b95809..ca9cade317c7 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -72,6 +72,8 @@ void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
f->cis_vendor == (u16) SDIO_ANY_ID) &&
(f->cis_device == card->cis.device ||
f->cis_device == (u16) SDIO_ANY_ID) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
rev >= f->rev_start && rev <= f->rev_end) {
dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
f->vendor_fixup(card, f->data);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index b95bd24d92f4..0123936241b0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -675,8 +675,25 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
static struct attribute *sd_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
@@ -690,6 +707,8 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_dsr.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_std);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 0aa484c10c0a..5274f503a39a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -122,6 +122,7 @@ config MMC_SDHCI_OF_ARASAN
tristate "SDHCI OF support for the Arasan SDHCI controllers"
depends on MMC_SDHCI_PLTFM
depends on OF
+ depends on COMMON_CLK
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -296,17 +297,6 @@ config MMC_SDHCI_BCM_KONA
If you have a controller with this interface, say Y or M here.
-config MMC_SDHCI_BCM2835
- tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
- depends on ARCH_BCM2835
- depends on MMC_SDHCI_PLTFM
- select MMC_SDHCI_IO_ACCESSORS
- help
- This selects the BCM2835 SD/MMC controller. If you have a BCM2835
- platform with SD or MMC devices, say Y or M here.
-
- If unsure, say N.
-
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
@@ -798,3 +788,13 @@ config MMC_SDHCI_MICROCHIP_PIC32
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_BRCMSTB
+ tristate "Broadcom SDIO/SD/MMC support"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on MMC_SDHCI_PLTFM
+ default y
+ help
+ This selects support for the SDIO/SD/MMC Host Controller on
+ Broadcom STB SoCs.
+
+ If unsure, say Y.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index af918d261ff9..e2bdaaf43184 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -71,11 +71,11 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
-obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
+obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 7e3a3247b852..da0ef1765735 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -157,7 +157,7 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
* HOLD register should be bypassed in case there is no phase shift
* applied on CMD/DATA that is sent to the card.
*/
- if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel))
+ if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->cur_slot)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 63c2e2ed1288..8e9d886bfcda 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -32,6 +32,12 @@ struct k3_priv {
struct regmap *reg;
};
+static unsigned long dw_mci_hi6220_caps[] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ 0
+};
+
static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
@@ -126,6 +132,7 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
}
static const struct dw_mci_drv_data hi6220_data = {
+ .caps = dw_mci_hi6220_caps,
.switch_voltage = dw_mci_hi6220_switch_voltage,
.set_ios = dw_mci_hi6220_set_ios,
.parse_dt = dw_mci_hi6220_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 358b0dc853b0..25eae359a5ea 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -285,9 +285,6 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
- /* It needs this quirk on all Rockchip SoCs */
- host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
-
if (of_device_is_compatible(host->dev->of_node,
"rockchip,rk3288-dw-mshc"))
host->bus_hz /= RK3288_CLKGEN_DIV;
@@ -297,10 +294,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* Common capabilities of RK3288 SoC */
static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
};
static const struct dw_mci_drv_data rk2928_drv_data = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2cc6123b1df9..32380d5d4f6b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -44,11 +44,11 @@
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
SDMMC_INT_HTO | SDMMC_INT_SBE | \
- SDMMC_INT_EBE)
+ SDMMC_INT_EBE | SDMMC_INT_HLE)
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
- SDMMC_INT_RESP_ERR)
+ SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
- DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+ DW_MCI_CMD_ERROR_FLAGS)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
@@ -92,7 +92,7 @@ struct idmac_desc {
__le32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
+ ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
__le32 des2; /* buffer 1 physical address */
@@ -105,6 +105,7 @@ struct idmac_desc {
static bool dw_mci_reset(struct dw_mci *host);
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
static int dw_mci_card_busy(struct mmc_host *mmc);
+static int dw_mci_get_cd(struct mmc_host *mmc);
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
@@ -898,23 +899,35 @@ done:
mci_writel(host, FIFOTH, fifoth_val);
}
-static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
u32 blksz_depth, fifo_depth;
u16 thld_size;
-
- WARN_ON(!(data->flags & MMC_DATA_READ));
+ u8 enable;
/*
* CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
* in the FIFO region, so we really shouldn't access it).
*/
- if (host->verid < DW_MMC_240A)
+ if (host->verid < DW_MMC_240A ||
+ (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
+ return;
+
+ /*
+ * Card write Threshold is introduced since 2.80a
+ * It's used when HS400 mode is enabled.
+ */
+ if (data->flags & MMC_DATA_WRITE &&
+ !(host->timing != MMC_TIMING_MMC_HS400))
return;
+ if (data->flags & MMC_DATA_WRITE)
+ enable = SDMMC_CARD_WR_THR_EN;
+ else
+ enable = SDMMC_CARD_RD_THR_EN;
+
if (host->timing != MMC_TIMING_MMC_HS200 &&
- host->timing != MMC_TIMING_MMC_HS400 &&
host->timing != MMC_TIMING_UHS_SDR104)
goto disable;
@@ -930,11 +943,11 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
* Currently just choose blksz.
*/
thld_size = blksz;
- mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
+ mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
return;
disable:
- mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
+ mci_writel(host, CDTHRCTL, 0);
}
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
@@ -1005,12 +1018,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
- if (data->flags & MMC_DATA_READ) {
+ if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
- dw_mci_ctrl_rd_thld(host, data);
- } else {
+ else
host->dir_status = DW_MCI_SEND_STATUS;
- }
+
+ dw_mci_ctrl_thld(host, data);
if (dw_mci_submit_data_dma(host, data)) {
if (host->data->flags & MMC_DATA_READ)
@@ -1099,12 +1112,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- if ((clock << div) != slot->__clk_old || force_clkinit)
- dev_info(&slot->mmc->class_dev,
- "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
- slot->id, host->bus_hz, clock,
- div ? ((host->bus_hz / div) >> 1) :
- host->bus_hz, div);
+ dev_info(&slot->mmc->class_dev,
+ "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
+ slot->id, host->bus_hz, clock,
+ div ? ((host->bus_hz / div) >> 1) :
+ host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
@@ -1127,9 +1139,6 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
-
- /* keep the clock with reflecting clock dividor */
- slot->__clk_old = clock << div;
}
host->current_speed = clock;
@@ -1253,15 +1262,15 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* atomic, otherwise the card could be removed in between and the
* request wouldn't fail until another card was inserted.
*/
- spin_lock_bh(&host->lock);
- if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
- spin_unlock_bh(&host->lock);
+ if (!dw_mci_get_cd(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
+ spin_lock_bh(&host->lock);
+
dw_mci_queue_request(host, slot, mrq);
spin_unlock_bh(&host->lock);
@@ -1451,8 +1460,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
- (mmc->caps & MMC_CAP_NONREMOVABLE))
+ if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
present = 1;
else if (gpio_cd >= 0)
present = gpio_cd;
@@ -1761,6 +1769,33 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
if (cmd->data && err) {
+ /*
+ * During UHS tuning sequence, sending the stop
+ * command after the response CRC error would
+ * throw the system into a confused state
+ * causing all future tuning phases to report
+ * failure.
+ *
+ * In such case controller will move into a data
+ * transfer state after a response error or
+ * response CRC error. Let's let that finish
+ * before trying to send a stop, so we'll go to
+ * STATE_SENDING_DATA.
+ *
+ * Although letting the data transfer take place
+ * will waste a bit of time (we already know
+ * the command was bad), it can't cause any
+ * errors since it's possible it would have
+ * taken place anyway if this tasklet got
+ * delayed. Allowing the transfer to take place
+ * avoids races and keeps things simple.
+ */
+ if ((err != -ETIMEDOUT) &&
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ state = STATE_SENDING_DATA;
+ continue;
+ }
+
dw_mci_stop_dma(host);
send_stop_abort(host, data);
state = STATE_SENDING_STOP;
@@ -1801,8 +1836,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* If all data-related interrupts don't come
* within the given time in reading data state.
*/
- if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
- (host->dir_status == DW_MCI_RECV_STATUS))
+ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
@@ -1844,8 +1878,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* interrupt doesn't come within the given time.
* in reading data state.
*/
- if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
- (host->dir_status == DW_MCI_RECV_STATUS))
+ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
@@ -2411,8 +2444,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
- if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
- del_timer(&host->dto_timer);
+ del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
@@ -2474,7 +2506,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete((void *)host);
+ if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
+ host->dma_ops->complete((void *)host);
}
} else {
pending = mci_readl(host, IDSTS);
@@ -2482,7 +2515,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete((void *)host);
+ if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
+ host->dma_ops->complete((void *)host);
}
}
@@ -2570,6 +2604,12 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
+ /*
+ * Support MMC_CAP_ERASE by default.
+ * It needs to use trim/discard/erase commands.
+ */
+ mmc->caps |= MMC_CAP_ERASE;
+
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
@@ -2616,10 +2656,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->max_seg_size = mmc->max_req_size;
}
- if (dw_mci_get_cd(mmc))
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- else
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ dw_mci_get_cd(mmc);
ret = mmc_add_host(mmc);
if (ret)
@@ -3006,11 +3043,8 @@ int dw_mci_probe(struct dw_mci *host)
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
- host->quirks = host->pdata->quirks;
-
- if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ setup_timer(&host->dto_timer,
+ dw_mci_dto_timer, (unsigned long)host);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 1e8d8380f9cf..9e740bc232a8 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -15,6 +15,7 @@
#define _DW_MMC_H_
#define DW_MMC_240A 0x240a
+#define DW_MMC_280A 0x280a
#define SDMMC_CTRL 0x000
#define SDMMC_PWREN 0x004
@@ -175,7 +176,10 @@
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
-#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
+#define SDMMC_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
+#define SDMMC_CARD_WR_THR_EN BIT(2)
+#define SDMMC_CARD_RD_THR_EN BIT(0)
+/* UHS-1 register defines */
#define SDMMC_UHS_18V BIT(0)
/* All ctrl reset bits */
#define SDMMC_CTRL_ALL_RESET_FLAGS \
@@ -245,9 +249,6 @@ extern int dw_mci_resume(struct dw_mci *host);
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
- * @__clk_old: The last updated clock with reflecting clock divider.
- * Keeping track of this helps us to avoid spamming the console
- * with CONFIG_MMC_CLKGATE.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -262,7 +263,6 @@ struct dw_mci_slot {
struct list_head queue_node;
unsigned int clock;
- unsigned int __clk_old;
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5642f71f8bf0..84e9afcb5c09 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -287,6 +287,11 @@ struct msdc_save_para {
u32 emmc50_cfg0;
};
+struct msdc_tune_para {
+ u32 iocon;
+ u32 pad_tune;
+};
+
struct msdc_delay_phase {
u8 maxlen;
u8 start;
@@ -326,7 +331,10 @@ struct msdc_host {
unsigned char timing;
bool vqmmc_enabled;
u32 hs400_ds_delay;
+ bool hs400_mode; /* current eMMC will run at hs400 mode */
struct msdc_save_para save_para; /* used when gate HCLK */
+ struct msdc_tune_para def_tune_para; /* default tune setting */
+ struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
static void sdr_set_bits(void __iomem *reg, u32 bs)
@@ -582,6 +590,18 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
sdr_set_bits(host->base + MSDC_INTEN, flags);
+ /*
+ * mmc_select_hs400() will drop to 50Mhz and High speed mode,
+ * tune result of hs200/200Mhz is not suitable for 50Mhz
+ */
+ if (host->sclk <= 52000000) {
+ writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
+ writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ } else {
+ writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
+ writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ }
+
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
}
@@ -781,7 +801,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
}
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
- msdc_reset_hw(host);
+ if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ /*
+ * should not clear fifo/interrupt as the tune data
+ * may have alreay come.
+ */
+ msdc_reset_hw(host);
if (events & MSDC_INT_RSPCRCERR) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
@@ -865,7 +891,11 @@ static void msdc_start_command(struct msdc_host *host,
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
- if (cmd->error || (mrq->sbc && mrq->sbc->error))
+ if ((cmd->error &&
+ !(cmd->error == -EILSEQ &&
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ (mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
msdc_start_command(host, mrq, mrq->cmd);
@@ -1158,6 +1188,8 @@ static void msdc_init_hw(struct msdc_host *host)
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
+ host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1296,7 +1328,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
- struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int cmd_err;
int i;
@@ -1309,6 +1341,11 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
if (!cmd_err)
rise_delay |= (1 << i);
}
+ final_rise_delay = get_best_delay(host, rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 10 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
@@ -1318,10 +1355,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
if (!cmd_err)
fall_delay |= (1 << i);
}
-
- final_rise_delay = get_best_delay(host, rise_delay);
final_fall_delay = get_best_delay(host, fall_delay);
+skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
@@ -1342,7 +1378,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
- struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
@@ -1355,6 +1391,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
if (!ret)
rise_delay |= (1 << i);
}
+ final_rise_delay = get_best_delay(host, rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 10 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
@@ -1365,14 +1406,10 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
if (!ret)
fall_delay |= (1 << i);
}
-
- final_rise_delay = get_best_delay(host, rise_delay);
final_fall_delay = get_best_delay(host, fall_delay);
+skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
- /* Rising edge is more stable, prefer to use it */
- if (final_rise_delay.maxlen >= 10)
- final_maxlen = final_rise_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
@@ -1402,16 +1439,21 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
dev_err(host->dev, "Tune response fail!\n");
return ret;
}
- ret = msdc_tune_data(mmc, opcode);
- if (ret == -EIO)
- dev_err(host->dev, "Tune data fail!\n");
+ if (host->hs400_mode == false) {
+ ret = msdc_tune_data(mmc, opcode);
+ if (ret == -EIO)
+ dev_err(host->dev, "Tune data fail!\n");
+ }
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
return ret;
}
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+ host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
return 0;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 3d1ea5e0e549..fb3ca8296273 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1065,7 +1065,7 @@ static int mxcmci_probe(struct platform_device *pdev)
if (pdata)
dat3_card_detect = pdata->dat3_card_detect;
- else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
+ else if (mmc_card_is_removable(mmc)
&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
dat3_card_detect = true;
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 93137483ecde..396c9b7e4121 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -38,7 +38,6 @@ struct realtek_pci_sdmmc {
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
struct mmc_request *mrq;
- struct workqueue_struct *workq;
#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq"
struct work_struct work;
@@ -244,7 +243,7 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
stat_idx = sd_status_index(rsp_type);
if (rsp_type == SD_RSP_TYPE_R1b)
- timeout = 3000;
+ timeout = cmd->busy_timeout ? cmd->busy_timeout : 3000;
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
@@ -885,7 +884,7 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data))
host->using_cookie = sd_pre_dma_transfer(host, data, false);
- queue_work(host->workq, &host->work);
+ schedule_work(&host->work);
}
static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1360,7 +1359,7 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
- MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_ERASE;
mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
@@ -1404,11 +1403,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
return -ENOMEM;
host = mmc_priv(mmc);
- host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME);
- if (!host->workq) {
- mmc_free_host(mmc);
- return -ENOMEM;
- }
host->pcr = pcr;
host->mmc = mmc;
host->pdev = pdev;
@@ -1462,9 +1456,7 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
host->eject = true;
- flush_workqueue(host->workq);
- destroy_workqueue(host->workq);
- host->workq = NULL;
+ flush_work(&host->work);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 39814f3dc96f..c531deef3258 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1365,7 +1365,7 @@ static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
.no_detect = 1,
};
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3cmci_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index cc2e46cb5c64..30c2c0dd1bc8 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -74,7 +74,7 @@ struct s3cmci_host {
struct dentry *debug_regs;
#endif
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
struct notifier_block freq_transition;
#endif
};
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 008709c5cb09..8fe0756c8e1e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -532,11 +532,6 @@ static int sdhci_acpi_resume(struct device *dev)
return sdhci_resume_host(c->host);
}
-#else
-
-#define sdhci_acpi_suspend NULL
-#define sdhci_acpi_resume NULL
-
#endif
#ifdef CONFIG_PM
@@ -560,8 +555,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
- .suspend = sdhci_acpi_suspend,
- .resume = sdhci_acpi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
sdhci_acpi_runtime_resume, NULL)
};
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 00a8a40a3729..e5c634bdfdd9 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -264,12 +264,12 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
}
dev_dbg(dev, "non-removable=%c\n",
- (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N');
+ mmc_card_is_removable(host->mmc) ? 'N' : 'Y');
dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n",
(mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N',
(mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N');
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host->mmc))
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
dev_dbg(dev, "is_8bit=%c\n",
@@ -288,7 +288,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
}
/* if device is eMMC, emulate card insert right here */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE) {
+ if (!mmc_card_is_removable(host->mmc)) {
ret = sdhci_bcm_kona_sd_card_emulate(host, 1);
if (ret) {
dev_err(dev,
@@ -326,7 +326,7 @@ err_pltfm_free:
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_bcm_kona_of_match,
},
.probe = sdhci_bcm_kona_probe,
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
deleted file mode 100644
index 4a6a1d1386cb..000000000000
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * BCM2835 SDHCI
- * Copyright (C) 2012 Stephen Warren
- * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me
- * Portions of the code there were obviously based on the Linux kernel at:
- * git://github.com/raspberrypi/linux.git rpi-3.6.y
- * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/mmc/host.h>
-#include "sdhci-pltfm.h"
-
-/*
- * 400KHz is max freq for card ID etc. Use that as min card clock. We need to
- * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY.
- */
-#define MIN_FREQ 400000
-
-/*
- * The Arasan has a bugette whereby it may lose the content of successive
- * writes to registers that are within two SD-card clock cycles of each other
- * (a clock domain crossing problem). It seems, however, that the data
- * register does not have this problem, which is just as well - otherwise we'd
- * have to nobble the DMA engine too.
- *
- * This should probably be dynamically calculated based on the actual card
- * frequency. However, this is the longest we'll have to wait, and doesn't
- * seem to slow access down too much, so the added complexity doesn't seem
- * worth it for now.
- *
- * 1/MIN_FREQ is (max) time per tick of eMMC clock.
- * 2/MIN_FREQ is time for two ticks.
- * Multiply by 1000000 to get uS per two ticks.
- * *1000000 for uSecs.
- * +1 for hack rounding.
- */
-#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1)
-
-struct bcm2835_sdhci {
- u32 shadow;
-};
-
-static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
-{
- writel(val, host->ioaddr + reg);
-
- udelay(BCM2835_SDHCI_WRITE_DELAY);
-}
-
-static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
-{
- u32 val = readl(host->ioaddr + reg);
-
- if (reg == SDHCI_CAPABILITIES)
- val |= SDHCI_CAN_VDD_330;
-
- return val;
-}
-
-static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct bcm2835_sdhci *bcm2835_host = sdhci_pltfm_priv(pltfm_host);
- u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
- bcm2835_sdhci_readl(host, reg & ~3);
- u32 word_num = (reg >> 1) & 1;
- u32 word_shift = word_num * 16;
- u32 mask = 0xffff << word_shift;
- u32 newval = (oldval & ~mask) | (val << word_shift);
-
- if (reg == SDHCI_TRANSFER_MODE)
- bcm2835_host->shadow = newval;
- else
- bcm2835_sdhci_writel(host, newval, reg & ~3);
-}
-
-static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg)
-{
- u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
- u32 word_num = (reg >> 1) & 1;
- u32 word_shift = word_num * 16;
- u32 word = (val >> word_shift) & 0xffff;
-
- return word;
-}
-
-static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
-{
- u32 oldval = bcm2835_sdhci_readl(host, reg & ~3);
- u32 byte_num = reg & 3;
- u32 byte_shift = byte_num * 8;
- u32 mask = 0xff << byte_shift;
- u32 newval = (oldval & ~mask) | (val << byte_shift);
-
- bcm2835_sdhci_writel(host, newval, reg & ~3);
-}
-
-static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg)
-{
- u32 val = bcm2835_sdhci_readl(host, (reg & ~3));
- u32 byte_num = reg & 3;
- u32 byte_shift = byte_num * 8;
- u32 byte = (val >> byte_shift) & 0xff;
-
- return byte;
-}
-
-static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host)
-{
- return MIN_FREQ;
-}
-
-static const struct sdhci_ops bcm2835_sdhci_ops = {
- .write_l = bcm2835_sdhci_writel,
- .write_w = bcm2835_sdhci_writew,
- .write_b = bcm2835_sdhci_writeb,
- .read_l = bcm2835_sdhci_readl,
- .read_w = bcm2835_sdhci_readw,
- .read_b = bcm2835_sdhci_readb,
- .set_clock = sdhci_set_clock,
- .get_max_clock = sdhci_pltfm_clk_get_max_clock,
- .get_min_clock = bcm2835_sdhci_get_min_clock,
- .set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
-static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = {
- .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
- .ops = &bcm2835_sdhci_ops,
-};
-
-static int bcm2835_sdhci_probe(struct platform_device *pdev)
-{
- struct sdhci_host *host;
- struct bcm2835_sdhci *bcm2835_host;
- struct sdhci_pltfm_host *pltfm_host;
- int ret;
-
- host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata,
- sizeof(*bcm2835_host));
- if (IS_ERR(host))
- return PTR_ERR(host);
-
- pltfm_host = sdhci_priv(host);
-
- pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pltfm_host->clk)) {
- ret = PTR_ERR(pltfm_host->clk);
- goto err;
- }
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable host clk\n");
- goto err;
- }
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_clk;
-
- return 0;
-err_clk:
- clk_disable_unprepare(pltfm_host->clk);
-err:
- sdhci_pltfm_free(pdev);
- return ret;
-}
-
-static const struct of_device_id bcm2835_sdhci_of_match[] = {
- { .compatible = "brcm,bcm2835-sdhci" },
- { }
-};
-MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match);
-
-static struct platform_driver bcm2835_sdhci_driver = {
- .driver = {
- .name = "sdhci-bcm2835",
- .of_match_table = bcm2835_sdhci_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
- },
- .probe = bcm2835_sdhci_probe,
- .remove = sdhci_pltfm_unregister,
-};
-module_platform_driver(bcm2835_sdhci_driver);
-
-MODULE_DESCRIPTION("BCM2835 SDHCI driver");
-MODULE_AUTHOR("Stephen Warren");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
new file mode 100644
index 000000000000..cce10fe3e19e
--- /dev/null
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -0,0 +1,143 @@
+/*
+ * sdhci-brcmstb.c Support for SDHCI on Broadcom BRCMSTB SoC's
+ *
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "sdhci-pltfm.h"
+
+#ifdef CONFIG_PM_SLEEP
+
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int res;
+
+ res = sdhci_suspend_host(host);
+ if (res)
+ return res;
+ clk_disable_unprepare(pltfm_host->clk);
+ return res;
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int err;
+
+ err = clk_prepare_enable(pltfm_host->clk);
+ if (err)
+ return err;
+ return sdhci_resume_host(host);
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend,
+ sdhci_brcmstb_resume);
+
+static const struct sdhci_ops sdhci_brcmstb_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static int sdhci_brcmstb_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct clk *clk;
+ int res;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Clock not found in Device Tree\n");
+ clk = NULL;
+ }
+ res = clk_prepare_enable(clk);
+ if (res)
+ return res;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ if (IS_ERR(host)) {
+ res = PTR_ERR(host);
+ goto err_clk;
+ }
+
+ /* Enable MMC_CAP2_HC_ERASE_SZ for better max discard calculations */
+ host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+
+ sdhci_get_of_property(pdev);
+ mmc_of_parse(host->mmc);
+
+ /*
+ * Supply the existing CAPS, but clear the UHS modes. This
+ * will allow these modes to be specified by device tree
+ * properties through mmc_of_parse().
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ res = sdhci_add_host(host);
+ if (res)
+ goto err;
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = clk;
+ return res;
+
+err:
+ sdhci_pltfm_free(pdev);
+err_clk:
+ clk_disable_unprepare(clk);
+ return res;
+}
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+
+static struct platform_driver sdhci_brcmstb_driver = {
+ .driver = {
+ .name = "sdhci-brcmstb",
+ .owner = THIS_MODULE,
+ .pm = &sdhci_brcmstb_pmops,
+ .of_match_table = of_match_ptr(sdhci_brcm_of_match),
+ },
+ .probe = sdhci_brcmstb_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_brcmstb_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Broadcom BRCMSTB SoCs");
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 59f2923f8054..bd286db7f9af 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -101,7 +101,7 @@ static int sdhci_cns3xxx_probe(struct platform_device *pdev)
static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_cns3xxx_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 407c21f152b2..de9f9603dbdc 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9d3ae1f4bd3c..99e0b334f9df 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -39,11 +39,13 @@
#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
#define ESDHC_WTMK_LVL 0x44
+#define ESDHC_WTMK_DEFAULT_VAL 0x10401040
#define ESDHC_MIX_CTRL 0x48
#define ESDHC_MIX_CTRL_DDREN (1 << 3)
#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
+#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
/* Bits 3 and 6 are not SDHCI standard definitions */
@@ -75,7 +77,8 @@
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
-#define ESDHC_TUNING_START_TAP 0x1
+#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
+#define ESDHC_TUNING_START_TAP_MASK 0xff
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -299,7 +302,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
/* imx6q/dl does not have cap_1 register, fake one */
val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
- | SDHCI_USE_SDR50_TUNING;
+ | SDHCI_USE_SDR50_TUNING
+ | (SDHCI_TUNING_MODE_3 << SDHCI_RETUNING_MODE_SHIFT);
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
val |= SDHCI_SUPPORT_HS400;
@@ -469,32 +473,29 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
- if (val & SDHCI_CTRL_TUNED_CLK)
+ if (val & SDHCI_CTRL_TUNED_CLK) {
new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- else
+ new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ } else {
new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ }
writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
- u32 tuning_ctrl;
if (val & SDHCI_CTRL_TUNED_CLK) {
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
} else {
v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
}
if (val & SDHCI_CTRL_EXEC_TUNING) {
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
- tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
- tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
- if (imx_data->boarddata.tuning_step) {
- tuning_ctrl &= ~ESDHC_TUNING_STEP_MASK;
- tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
- }
- writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
+ m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
@@ -751,6 +752,7 @@ static void esdhc_post_tuning(struct sdhci_host *host)
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
}
@@ -838,6 +840,11 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
u32 v;
if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
+ /* disable clock before enabling strobe dll */
+ writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
+ ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+
/* force a reset on strobe dll */
writel(ESDHC_STROBE_DLL_CTRL_RESET,
host->ioaddr + ESDHC_STROBE_DLL_CTRL);
@@ -899,6 +906,8 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 1;
+ /* update clock after enable DDR for strobe DLL lock */
+ host->ops->set_clock(host, host->clock);
esdhc_set_strobe_dll(host);
break;
}
@@ -957,6 +966,62 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
+static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ int tmp;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ /*
+ * The imx6q ROM code will change the default watermark
+ * level setting to something insane. Change it back here.
+ */
+ writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL);
+
+ /*
+ * ROM code will change the bit burst_length_enable setting
+ * to zero if this usdhc is choosed to boot system. Change
+ * it back here, otherwise it will impact the performance a
+ * lot. This bit is used to enable/disable the burst length
+ * for the external AHB2AXI bridge, it's usefully especially
+ * for INCR transfer because without burst length indicator,
+ * the AHB2AXI bridge does not know the burst length in
+ * advance. And without burst length indicator, AHB INCR
+ * transfer can only be converted to singles on the AXI side.
+ */
+ writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+ | ESDHC_BURST_LEN_EN_INCR,
+ host->ioaddr + SDHCI_HOST_CONTROL);
+ /*
+ * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ host->ioaddr + 0x6c);
+
+ /* disable DLL_CTRL delay line settings */
+ writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tmp |= ESDHC_STD_TUNING_EN |
+ ESDHC_TUNING_START_TAP_DEFAULT;
+ if (imx_data->boarddata.tuning_start_tap) {
+ tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+ tmp |= imx_data->boarddata.tuning_start_tap;
+ }
+
+ if (imx_data->boarddata.tuning_step) {
+ tmp &= ~ESDHC_TUNING_STEP_MASK;
+ tmp |= imx_data->boarddata.tuning_step
+ << ESDHC_TUNING_STEP_SHIFT;
+ }
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ }
+ }
+}
+
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -975,6 +1040,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
boarddata->wp_type = ESDHC_WP_GPIO;
of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
+ of_property_read_u32(np, "fsl,tuning-start-tap",
+ &boarddata->tuning_start_tap);
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
@@ -1147,58 +1214,27 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (IS_ERR(imx_data->pins_default))
dev_warn(mmc_dev(host->mmc), "could not get default state\n");
- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
-
if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
| SDHCI_QUIRK_BROKEN_ADMA;
- /*
- * The imx6q ROM code will change the default watermark level setting
- * to something insane. Change it back here.
- */
if (esdhc_is_usdhc(imx_data)) {
- writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL);
-
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR;
-
- /*
- * ROM code will change the bit burst_length_enable setting
- * to zero if this usdhc is choosed to boot system. Change
- * it back here, otherwise it will impact the performance a
- * lot. This bit is used to enable/disable the burst length
- * for the external AHB2AXI bridge, it's usefully especially
- * for INCR transfer because without burst length indicator,
- * the AHB2AXI bridge does not know the burst length in
- * advance. And without burst length indicator, AHB INCR
- * transfer can only be converted to singles on the AXI side.
- */
- writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
- | ESDHC_BURST_LEN_EN_INCR,
- host->ioaddr + SDHCI_HOST_CONTROL);
-
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
- /*
- * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
- host->ioaddr + 0x6c);
+ /* clear tuning bits in case ROM has set it already */
+ writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
- if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
- writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
- ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
- host->ioaddr + ESDHC_TUNING_CTRL);
-
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
@@ -1212,6 +1248,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
+ sdhci_esdhc_imx_hwinit(host);
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
@@ -1255,6 +1293,25 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_esdhc_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(host);
+}
+
+static int sdhci_esdhc_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ /* re-initialize hw state in case it's lost in low power mode */
+ sdhci_esdhc_imx_hwinit(host);
+
+ return sdhci_resume_host(host);
+}
+#endif
+
#ifdef CONFIG_PM
static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
@@ -1291,7 +1348,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_esdhc_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
sdhci_esdhc_runtime_resume, NULL)
};
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 1110f73b08aa..726246665850 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -164,8 +164,17 @@ static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
static const struct sdhci_iproc_data iproc_data = {
.pdata = &sdhci_iproc_pltfm_data,
- .caps = 0x05E90000,
- .caps1 = 0x00000064,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD |
+ SDHCI_CAN_DO_ADMA2 |
+ SDHCI_CAN_DO_SDMA,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D |
+ SDHCI_SUPPORT_DDR50,
.mmc_caps = MMC_CAP_1_8V_DDR,
};
@@ -251,7 +260,7 @@ static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
.of_match_table = sdhci_iproc_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0653fe730150..8ef44a2a2fd9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -32,6 +32,21 @@
#define CORE_POWER 0x0
#define CORE_SW_RST BIT(7)
+#define CORE_PWRCTL_STATUS 0xdc
+#define CORE_PWRCTL_MASK 0xe0
+#define CORE_PWRCTL_CLEAR 0xe4
+#define CORE_PWRCTL_CTL 0xe8
+#define CORE_PWRCTL_BUS_OFF BIT(0)
+#define CORE_PWRCTL_BUS_ON BIT(1)
+#define CORE_PWRCTL_IO_LOW BIT(2)
+#define CORE_PWRCTL_IO_HIGH BIT(3)
+#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
+#define CORE_PWRCTL_IO_SUCCESS BIT(2)
+#define REQ_BUS_OFF BIT(0)
+#define REQ_BUS_ON BIT(1)
+#define REQ_IO_LOW BIT(2)
+#define REQ_IO_HIGH BIT(3)
+#define INT_MASK 0xf
#define MAX_PHASES 16
#define CORE_DLL_LOCK BIT(7)
#define CORE_DLL_EN BIT(16)
@@ -56,6 +71,7 @@
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
+ int pwr_irq; /* power irq */
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
@@ -410,6 +426,85 @@ retry:
return rc;
}
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct mmc_host *mmc = host->mmc;
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ break;
+ }
+
+ /*
+ * When clock frequency is less than 100MHz, the feedback clock must be
+ * provided and DLL must not be used so that tuning can be skipped. To
+ * provide feedback clock, the mode selection can be any value less
+ * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+ */
+ if (host->clock <= 100000000 &&
+ (uhs == MMC_TIMING_MMC_HS400 ||
+ uhs == MMC_TIMING_MMC_HS200 ||
+ uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+ dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
+ mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 irq_status, irq_ack = 0;
+
+ irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+ irq_status &= INT_MASK;
+
+ writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+
+ if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ /*
+ * The driver has to acknowledge the interrupt, switch voltages and
+ * report back if it succeded or not to this register. The voltage
+ * switches are handled by the sdhci core, so just report success.
+ */
+ writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+}
+
+static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+
+ sdhci_msm_voltage_switch(host);
+
+ return IRQ_HANDLED;
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -422,11 +517,13 @@ static const struct sdhci_ops sdhci_msm_ops = {
.reset = sdhci_reset,
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+ .voltage_switch = sdhci_msm_voltage_switch,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
SDHCI_QUIRK_SINGLE_POWER_WRITE,
.ops = &sdhci_msm_ops,
};
@@ -473,7 +570,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(msm_host->pclk)) {
ret = PTR_ERR(msm_host->pclk);
- dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
+ dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
@@ -545,6 +642,22 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /* Setup IRQ for handling power/voltage tasks with PMIC */
+ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
+ msm_host->pwr_irq);
+ goto clk_disable;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+ sdhci_msm_pwr_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
+ goto clk_disable;
+ }
+
ret = sdhci_add_host(host);
if (ret)
goto clk_disable;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index b6f4c1d41636..e0f193f7e3e5 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -19,27 +19,136 @@
* your option) any later version.
*/
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
+#include <linux/regmap.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define VENDOR_ENHANCED_STROBE BIT(0)
#define CLK_CTRL_TIMEOUT_SHIFT 16
#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+/*
+ * On some SoCs the syscon area has a feature where the upper 16-bits of
+ * each 32-bit register act as a write mask for the lower 16-bits. This allows
+ * atomic updates of the register without locking. This macro is used on SoCs
+ * that have that feature.
+ */
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+/**
+ * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
+ *
+ * @reg: Offset within the syscon of the register containing this field
+ * @width: Number of bits for this field
+ * @shift: Bit offset within @reg of this field (or -1 if not avail)
+ */
+struct sdhci_arasan_soc_ctl_field {
+ u32 reg;
+ u16 width;
+ s16 shift;
+};
+
+/**
+ * struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers
+ *
+ * It's up to the licensee of the Arsan IP block to make these available
+ * somewhere if needed. Presumably these will be scattered somewhere that's
+ * accessible via the syscon API.
+ *
+ * @baseclkfreq: Where to find corecfg_baseclkfreq
+ * @hiword_update: If true, use HIWORD_UPDATE to access the syscon
+ */
+struct sdhci_arasan_soc_ctl_map {
+ struct sdhci_arasan_soc_ctl_field baseclkfreq;
+ bool hiword_update;
+};
+
/**
* struct sdhci_arasan_data
- * @clk_ahb: Pointer to the AHB clock
- * @phy: Pointer to the generic phy
+ * @host: Pointer to the main SDHCI host structure.
+ * @clk_ahb: Pointer to the AHB clock
+ * @phy: Pointer to the generic phy
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
+ * @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
struct sdhci_arasan_data {
+ struct sdhci_host *host;
struct clk *clk_ahb;
struct phy *phy;
+
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+
+ struct regmap *soc_ctl_base;
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+};
+
+static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 },
+ .hiword_update = true,
};
+/**
+ * sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
+ *
+ * This function allows writing to fields in sdhci_arasan_soc_ctl_map.
+ * Note that if a field is specified as not available (shift < 0) then
+ * this function will silently return an error code. It will be noisy
+ * and print errors for any other (unexpected) errors.
+ *
+ * @host: The sdhci_host
+ * @fld: The field to write to
+ * @val: The value to write
+ */
+static int sdhci_arasan_syscon_write(struct sdhci_host *host,
+ const struct sdhci_arasan_soc_ctl_field *fld,
+ u32 val)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct regmap *soc_ctl_base = sdhci_arasan->soc_ctl_base;
+ u32 reg = fld->reg;
+ u16 width = fld->width;
+ s16 shift = fld->shift;
+ int ret;
+
+ /*
+ * Silently return errors for shift < 0 so caller doesn't have
+ * to check for fields which are optional. For fields that
+ * are required then caller needs to do something special
+ * anyway.
+ */
+ if (shift < 0)
+ return -EINVAL;
+
+ if (sdhci_arasan->soc_ctl_map->hiword_update)
+ ret = regmap_write(soc_ctl_base, reg,
+ HIWORD_UPDATE(val, GENMASK(width, 0),
+ shift));
+ else
+ ret = regmap_update_bits(soc_ctl_base, reg,
+ GENMASK(shift + width, shift),
+ val << shift);
+
+ /* Yell about (unexpected) regmap errors */
+ if (ret)
+ pr_warn("%s: Regmap write fail: %d\n",
+ mmc_hostname(host->mmc), ret);
+
+ return ret;
+}
+
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
{
u32 div;
@@ -79,6 +188,21 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 vendor;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ vendor = readl(host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
+ if (ios->enhanced_strobe)
+ vendor |= VENDOR_ENHANCED_STROBE;
+ else
+ vendor &= ~VENDOR_ENHANCED_STROBE;
+
+ writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -172,9 +296,168 @@ static int sdhci_arasan_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
sdhci_arasan_resume);
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ /* SoC-specific compatible strings w/ soc_ctl_map */
+ {
+ .compatible = "rockchip,rk3399-sdhci-5.1",
+ .data = &rk3399_soc_ctl_map,
+ },
+
+ /* Generic compatible below here */
+ { .compatible = "arasan,sdhci-8.9a" },
+ { .compatible = "arasan,sdhci-5.1" },
+ { .compatible = "arasan,sdhci-4.9a" },
+
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
+/**
+ * sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
+ *
+ * Return the current actual rate of the SD card clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the card clock rate.
+ */
+static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+};
+
+/**
+ * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
+ *
+ * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
+ * function can be used to make that happen.
+ *
+ * NOTES:
+ * - Many existing devices don't seem to do this and work fine. To keep
+ * compatibility for old hardware where the device tree doesn't provide a
+ * register map, this function is a noop if a soc_ctl_map hasn't been provided
+ * for this platform.
+ * - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider
+ * to achieve lower clock rates. That means that this function is called once
+ * at probe time and never called again.
+ *
+ * @host: The sdhci_host
+ */
+static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
+ sdhci_arasan->soc_ctl_map;
+ u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000);
+
+ /* Having a map is optional */
+ if (!soc_ctl_map)
+ return;
+
+ /* If we have a map, we expect to have a syscon */
+ if (!sdhci_arasan->soc_ctl_base) {
+ pr_warn("%s: Have regmap, but no soc-ctl-syscon\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
+}
+
+/**
+ * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sdcardclk_init;
+ const char *parent_clk_name;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (!of_find_property(np, "#clock-cells", NULL))
+ return 0;
+
+ ret = of_property_read_string_index(np, "clock-output-names", 0,
+ &sdcardclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sdcardclk_init.parent_names = &parent_clk_name;
+ sdcardclk_init.num_parents = 1;
+ sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
+ sdhci_arasan->sdcardclk =
+ devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
+ sdhci_arasan->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ sdhci_arasan->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk()
+ *
+ * Should be called any time we're exiting and sdhci_arasan_register_sdclk()
+ * returned success.
+ *
+ * @dev: Pointer to our struct device.
+ */
+static void sdhci_arasan_unregister_sdclk(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!of_find_property(np, "#clock-cells", NULL))
+ return;
+
+ of_clk_del_provider(dev->of_node);
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
+ const struct of_device_id *match;
+ struct device_node *node;
struct clk *clk_xin;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -187,6 +470,24 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ sdhci_arasan->host = host;
+
+ match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
+ sdhci_arasan->soc_ctl_map = match->data;
+
+ node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
+ if (node) {
+ sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
+ of_node_put(node);
+
+ if (IS_ERR(sdhci_arasan->soc_ctl_base)) {
+ ret = PTR_ERR(sdhci_arasan->soc_ctl_base);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get syscon: %d\n",
+ ret);
+ goto err_pltfm_free;
+ }
+ }
sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
@@ -217,10 +518,16 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pltfm_host->clk = clk_xin;
+ sdhci_arasan_update_baseclkfreq(host);
+
+ ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
+ if (ret)
+ goto clk_disable_all;
+
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret);
- goto clk_disable_all;
+ goto unreg_clk;
}
sdhci_arasan->phy = ERR_PTR(-ENODEV);
@@ -231,13 +538,13 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (IS_ERR(sdhci_arasan->phy)) {
ret = PTR_ERR(sdhci_arasan->phy);
dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n");
- goto clk_disable_all;
+ goto unreg_clk;
}
ret = phy_init(sdhci_arasan->phy);
if (ret < 0) {
dev_err(&pdev->dev, "phy_init err.\n");
- goto clk_disable_all;
+ goto unreg_clk;
}
ret = phy_power_on(sdhci_arasan->phy);
@@ -245,6 +552,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "phy_power_on err.\n");
goto err_phy_power;
}
+
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ sdhci_arasan_hs400_enhanced_strobe;
}
ret = sdhci_add_host(host);
@@ -259,6 +569,8 @@ err_add_host:
err_phy_power:
if (!IS_ERR(sdhci_arasan->phy))
phy_exit(sdhci_arasan->phy);
+unreg_clk:
+ sdhci_arasan_unregister_sdclk(&pdev->dev);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
@@ -281,6 +593,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
phy_exit(sdhci_arasan->phy);
}
+ sdhci_arasan_unregister_sdclk(&pdev->dev);
+
ret = sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(clk_ahb);
@@ -288,14 +602,6 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id sdhci_arasan_of_match[] = {
- { .compatible = "arasan,sdhci-8.9a" },
- { .compatible = "arasan,sdhci-5.1" },
- { .compatible = "arasan,sdhci-4.9a" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
-
static struct platform_driver sdhci_arasan_driver = {
.driver = {
.name = "sdhci-arasan",
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d4cef713d246..a9b7fc06c434 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -288,7 +288,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries
* to enable polling via device tree with broken-cd property.
*/
- if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ if (mmc_card_is_removable(host->mmc) &&
mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 3f34d354f1fc..239be2fde242 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -481,7 +481,7 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
@@ -504,16 +504,12 @@ static int esdhc_of_resume(struct device *dev)
}
return ret;
}
-
-static const struct dev_pm_ops esdhc_pmops = {
- .suspend = esdhc_of_suspend,
- .resume = esdhc_of_resume,
-};
-#define ESDHC_PMOPS (&esdhc_pmops)
-#else
-#define ESDHC_PMOPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
+ esdhc_of_suspend,
+ esdhc_of_resume);
+
static const struct sdhci_ops sdhci_esdhc_be_ops = {
.read_l = esdhc_be_readl,
.read_w = esdhc_be_readw,
@@ -657,7 +653,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
.of_match_table = sdhci_esdhc_of_match,
- .pm = ESDHC_PMOPS,
+ .pm = &esdhc_of_dev_pm_ops,
},
.probe = sdhci_esdhc_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 4079a96ad37e..ac00c5efb2a3 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -85,7 +85,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.driver = {
.name = "sdhci-hlwd",
.of_match_table = sdhci_hlwd_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_hlwd_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index a4dbf7421edc..897cfd24ca2e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -419,13 +419,13 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
};
/* Define Host controllers for Intel Merrifield platform */
-#define INTEL_MRFL_EMMC_0 0
-#define INTEL_MRFL_EMMC_1 1
+#define INTEL_MRFLD_EMMC_0 0
+#define INTEL_MRFLD_EMMC_1 1
-static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
- if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
- (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
+ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_0) &&
+ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_1))
/* SD support is not ready yet */
return -ENODEV;
@@ -435,12 +435,12 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
- .probe_slot = intel_mrfl_mmc_probe_slot,
+ .probe_slot = intel_mrfld_mmc_probe_slot,
};
/* O2Micro extra registers */
@@ -1104,10 +1104,10 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .device = PCI_DEVICE_ID_INTEL_MRFLD_MMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfld_mmc,
},
{
@@ -1413,8 +1413,7 @@ static const struct sdhci_ops sdhci_pci_ops = {
* *
\*****************************************************************************/
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1496,7 +1495,9 @@ static int sdhci_pci_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static int sdhci_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1562,17 +1563,10 @@ static int sdhci_pci_runtime_resume(struct device *dev)
return 0;
}
-
-#else /* CONFIG_PM */
-
-#define sdhci_pci_suspend NULL
-#define sdhci_pci_resume NULL
-
-#endif /* CONFIG_PM */
+#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
sdhci_pci_runtime_resume, NULL)
};
@@ -1760,11 +1754,12 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
static void sdhci_pci_runtime_pm_allow(struct device *dev)
{
- pm_runtime_put_noidle(dev);
- pm_runtime_allow(dev);
+ pm_suspend_ignore_children(dev, 1);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
- pm_suspend_ignore_children(dev, 1);
+ pm_runtime_allow(dev);
+ /* Stay active until mmc core scans for a card */
+ pm_runtime_put_noidle(dev);
}
static void sdhci_pci_runtime_pm_forbid(struct device *dev)
@@ -1810,15 +1805,13 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
- if (!chip) {
- ret = -ENOMEM;
- goto err;
- }
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
chip->pdev = pdev;
chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
@@ -1834,7 +1827,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
if (chip->fixes && chip->fixes->probe) {
ret = chip->fixes->probe(chip);
if (ret)
- goto free;
+ return ret;
}
slots = chip->num_slots; /* Quirk may have changed this */
@@ -1844,8 +1837,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
- ret = PTR_ERR(slot);
- goto free;
+ return PTR_ERR(slot);
}
chip->slots[i] = slot;
@@ -1855,35 +1847,18 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
sdhci_pci_runtime_pm_allow(&pdev->dev);
return 0;
-
-free:
- pci_set_drvdata(pdev, NULL);
- kfree(chip);
-
-err:
- pci_disable_device(pdev);
- return ret;
}
static void sdhci_pci_remove(struct pci_dev *pdev)
{
int i;
- struct sdhci_pci_chip *chip;
+ struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
- chip = pci_get_drvdata(pdev);
-
- if (chip) {
- if (chip->allow_runtime_pm)
- sdhci_pci_runtime_pm_forbid(&pdev->dev);
-
- for (i = 0; i < chip->num_slots; i++)
- sdhci_pci_remove_slot(chip->slots[i]);
-
- pci_set_drvdata(pdev, NULL);
- kfree(chip);
- }
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_forbid(&pdev->dev);
- pci_disable_device(pdev);
+ for (i = 0; i < chip->num_slots; i++)
+ sdhci_pci_remove_slot(chip->slots[i]);
}
static struct pci_driver sdhci_driver = {
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 89e7151684a1..7e0788712e1a 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -14,7 +14,7 @@
#define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294
#define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295
#define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190
#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 64f287a03cd3..1d17dcfc3ffb 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -215,29 +215,26 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
-#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pltfm_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+#endif
const struct dev_pm_ops sdhci_pltfm_pmops = {
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
};
EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
-#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index d38053bf9e4d..3280f2077959 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -109,13 +109,6 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
return (void *)host->private;
}
-#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct device *dev);
-extern int sdhci_pltfm_resume(struct device *dev);
extern const struct dev_pm_ops sdhci_pltfm_pmops;
-#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
-#else
-#define SDHCI_PLTFM_PMOPS NULL
-#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 1d8dd3540636..347eae2d7b6a 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -252,7 +252,7 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.of_match_table = of_match_ptr(sdhci_pxav2_of_match),
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_pxav2_probe,
.remove = sdhci_pxav2_remove,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 30132500aa1c..dd1938d341f7 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -583,24 +583,17 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops sdhci_pxav3_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
sdhci_pxav3_runtime_resume, NULL)
};
-#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
-
-#else
-#define SDHCI_PXAV3_PMOPS NULL
-#endif
-
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.of_match_table = of_match_ptr(sdhci_pxav3_of_match),
- .pm = SDHCI_PXAV3_PMOPS,
+ .pm = &sdhci_pxav3_pmops,
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 70c724bc6fc7..784c5a848fb4 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -714,19 +714,12 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops sdhci_s3c_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
NULL)
};
-#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
-
-#else
-#define SDHCI_S3C_PMOPS NULL
-#endif
-
#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
.no_divider = true,
@@ -765,7 +758,7 @@ static struct platform_driver sdhci_s3c_driver = {
.driver = {
.name = "s3c-sdhci",
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
- .pm = SDHCI_S3C_PMOPS,
+ .pm = &sdhci_s3c_pmops,
},
};
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 34866f668dd7..5d068639dd3f 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -260,9 +260,9 @@ static int sdhci_sirf_resume(struct device *dev)
return sdhci_resume_host(host);
}
+#endif
static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume);
-#endif
static const struct of_device_id sdhci_sirf_of_match[] = {
{ .compatible = "sirf,prima2-sdhc" },
@@ -274,9 +274,7 @@ static struct platform_driver sdhci_sirf_driver = {
.driver = {
.name = "sdhci-sirf",
.of_match_table = sdhci_sirf_of_match,
-#ifdef CONFIG_PM_SLEEP
.pm = &sdhci_sirf_pm_ops,
-#endif
},
.probe = sdhci_sirf_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 320e1c2f8853..c95ba83366a0 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -183,7 +183,7 @@ static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host)
writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2);
- if (mhost->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(mhost))
cconf3 |= ST_MMC_CCONFIG_EMMC_SLOT_TYPE;
else
/* CARD _D ET_CTRL */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index bcc0de47fe7e..1e93dc4e303e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -148,28 +148,37 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
return;
misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
- /* Erratum: Enable SDHCI spec v3.00 support */
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
- /* Advertise UHS modes as supported by host */
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
- if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
- sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
-
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+
+ misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
+ SDHCI_MISC_CTRL_ENABLE_SDR50 |
+ SDHCI_MISC_CTRL_ENABLE_DDR50 |
+ SDHCI_MISC_CTRL_ENABLE_SDR104);
+
clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
- clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
+
+ /*
+ * If the board does not define a regulator for the SDHCI
+ * IO voltage, then don't advertise support for UHS modes
+ * even if the device supports it because the IO voltage
+ * cannot be configured.
+ */
+ if (!IS_ERR(host->mmc->supply.vqmmc)) {
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ /* Advertise UHS modes as supported by host */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+ if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
+ }
+
+ sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
@@ -474,7 +483,7 @@ static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.of_match_table = sdhci_tegra_dt_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_tegra_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0e3d7c056cb1..cd65d474afa2 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -45,65 +45,62 @@ static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
-static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_get_cd(struct mmc_host *mmc);
static void sdhci_dumpregs(struct sdhci_host *host)
{
- pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
- mmc_hostname(host->mmc));
-
- pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
- sdhci_readl(host, SDHCI_DMA_ADDRESS),
- sdhci_readw(host, SDHCI_HOST_VERSION));
- pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
- sdhci_readw(host, SDHCI_BLOCK_SIZE),
- sdhci_readw(host, SDHCI_BLOCK_COUNT));
- pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
- sdhci_readl(host, SDHCI_ARGUMENT),
- sdhci_readw(host, SDHCI_TRANSFER_MODE));
- pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
- sdhci_readl(host, SDHCI_PRESENT_STATE),
- sdhci_readb(host, SDHCI_HOST_CONTROL));
- pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
- sdhci_readb(host, SDHCI_POWER_CONTROL),
- sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
- sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
- sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
- sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
- sdhci_readl(host, SDHCI_INT_STATUS));
- pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
- sdhci_readl(host, SDHCI_INT_ENABLE),
- sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
- sdhci_readw(host, SDHCI_ACMD12_ERR),
- sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
- sdhci_readl(host, SDHCI_CAPABILITIES),
- sdhci_readl(host, SDHCI_CAPABILITIES_1));
- pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
- sdhci_readw(host, SDHCI_COMMAND),
- sdhci_readl(host, SDHCI_MAX_CURRENT));
- pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
- sdhci_readw(host, SDHCI_HOST_CONTROL2));
+ pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
+
+ pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ sdhci_readl(host, SDHCI_DMA_ADDRESS),
+ sdhci_readw(host, SDHCI_HOST_VERSION));
+ pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
+ pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
+ pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ sdhci_readl(host, SDHCI_PRESENT_STATE),
+ sdhci_readb(host, SDHCI_HOST_CONTROL));
+ pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ sdhci_readb(host, SDHCI_POWER_CONTROL),
+ sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
+ pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
+ sdhci_readw(host, SDHCI_CLOCK_CONTROL));
+ pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
+ sdhci_readl(host, SDHCI_INT_STATUS));
+ pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ sdhci_readw(host, SDHCI_ACMD12_ERR),
+ sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
+ pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ sdhci_readl(host, SDHCI_CAPABILITIES),
+ sdhci_readl(host, SDHCI_CAPABILITIES_1));
+ pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_MAX_CURRENT));
+ pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA) {
if (host->flags & SDHCI_USE_64_BIT_DMA)
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
- readl(host->ioaddr + SDHCI_ADMA_ERROR),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
else
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
- readl(host->ioaddr + SDHCI_ADMA_ERROR),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
}
- pr_debug(DRIVER_NAME ": ===========================================\n");
+ pr_err(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -112,12 +109,17 @@ static void sdhci_dumpregs(struct sdhci_host *host)
* *
\*****************************************************************************/
+static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
+{
+ return cmd->data || cmd->flags & MMC_RSP_BUSY;
+}
+
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ !mmc_card_is_removable(host->mmc))
return;
if (enable) {
@@ -193,7 +195,9 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!sdhci_get_cd(host->mmc))
+ struct mmc_host *mmc = host->mmc;
+
+ if (!mmc->ops->get_cd(mmc))
return;
}
@@ -210,10 +214,10 @@ static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
}
}
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
-
static void sdhci_init(struct sdhci_host *host, int soft)
{
+ struct mmc_host *mmc = host->mmc;
+
if (soft)
sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
else
@@ -225,13 +229,17 @@ static void sdhci_init(struct sdhci_host *host, int soft)
SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
SDHCI_INT_RESPONSE;
+ if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
+ host->tuning_mode == SDHCI_TUNING_MODE_3)
+ host->ier |= SDHCI_INT_RETUNE;
+
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
}
}
@@ -429,8 +437,6 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
{
u32 mask;
- BUG_ON(!host->data);
-
if (host->blocks == 0)
return;
@@ -747,14 +753,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
u8 ctrl;
struct mmc_data *data = cmd->data;
- WARN_ON(host->data);
-
- if (data || (cmd->flags & MMC_RSP_BUSY))
+ if (sdhci_data_line_cmd(cmd))
sdhci_set_timeout(host, cmd);
if (!data)
return;
+ WARN_ON(host->data);
+
/* Sanity checks */
BUG_ON(data->blksz * data->blocks > 524288);
BUG_ON(data->blksz > host->mmc->max_blk_size);
@@ -879,6 +885,12 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
}
+static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12);
+}
+
static void sdhci_set_transfer_mode(struct sdhci_host *host,
struct mmc_command *cmd)
{
@@ -909,12 +921,12 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
* If we are sending CMD23, CMD12 never gets sent
* on successful completion (so no Auto-CMD12).
*/
- if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
+ if (sdhci_auto_cmd12(host, cmd->mrq) &&
(cmd->opcode != SD_IO_RW_EXTENDED))
mode |= SDHCI_TRNS_AUTO_CMD12;
- else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
mode |= SDHCI_TRNS_AUTO_CMD23;
- sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
+ sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
}
}
@@ -926,14 +938,63 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
-static void sdhci_finish_data(struct sdhci_host *host)
+static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ return (!(host->flags & SDHCI_DEVICE_DEAD) &&
+ ((mrq->cmd && mrq->cmd->error) ||
+ (mrq->sbc && mrq->sbc->error) ||
+ (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ int i;
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ if (host->mrqs_done[i] == mrq) {
+ WARN_ON(1);
+ return;
+ }
+ }
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ if (!host->mrqs_done[i]) {
+ host->mrqs_done[i] = mrq;
+ break;
+ }
+ }
+
+ WARN_ON(i >= SDHCI_MAX_MRQS);
+
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
- struct mmc_data *data;
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
- BUG_ON(!host->data);
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ __sdhci_finish_mrq(host, mrq);
+}
+
+static void sdhci_finish_data(struct sdhci_host *host)
+{
+ struct mmc_command *data_cmd = host->data_cmd;
+ struct mmc_data *data = host->data;
- data = host->data;
host->data = NULL;
+ host->data_cmd = NULL;
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
@@ -958,20 +1019,41 @@ static void sdhci_finish_data(struct sdhci_host *host)
*/
if (data->stop &&
(data->error ||
- !host->mrq->sbc)) {
+ !data->mrq->sbc)) {
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (data->error) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
+ if (!host->cmd || host->cmd == data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
}
+ /* Avoid triggering warning in sdhci_send_command() */
+ host->cmd = NULL;
sdhci_send_command(host, data->stop);
- } else
- tasklet_schedule(&host->finish_tasklet);
+ } else {
+ sdhci_finish_mrq(host, data->mrq);
+ }
+}
+
+static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
+ unsigned long timeout)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ mod_timer(&host->data_timer, timeout);
+ else
+ mod_timer(&host->timer, timeout);
+}
+
+static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ del_timer(&host->data_timer);
+ else
+ del_timer(&host->timer);
}
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
@@ -989,12 +1071,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout = 10;
mask = SDHCI_CMD_INHIBIT;
- if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
+ if (sdhci_data_line_cmd(cmd))
mask |= SDHCI_DATA_INHIBIT;
/* We shouldn't wait for data inihibit for stop commands, even
though they might use busy signaling */
- if (host->mrq->data && (cmd == host->mrq->data->stop))
+ if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
mask &= ~SDHCI_DATA_INHIBIT;
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
@@ -1003,7 +1085,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, cmd->mrq);
return;
}
timeout--;
@@ -1015,10 +1097,13 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
else
timeout += 10 * HZ;
- mod_timer(&host->timer, timeout);
+ sdhci_mod_timer(host, cmd->mrq, timeout);
host->cmd = cmd;
- host->busy_handle = 0;
+ if (sdhci_data_line_cmd(cmd)) {
+ WARN_ON(host->data_cmd);
+ host->data_cmd = cmd;
+ }
sdhci_prepare_data(host, cmd);
@@ -1030,7 +1115,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
pr_err("%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
cmd->error = -EINVAL;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, cmd->mrq);
return;
}
@@ -1059,40 +1144,58 @@ EXPORT_SYMBOL_GPL(sdhci_send_command);
static void sdhci_finish_command(struct sdhci_host *host)
{
+ struct mmc_command *cmd = host->cmd;
int i;
- BUG_ON(host->cmd == NULL);
+ host->cmd = NULL;
- if (host->cmd->flags & MMC_RSP_PRESENT) {
- if (host->cmd->flags & MMC_RSP_136) {
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
/* CRC is stripped so we need to do some shifting. */
for (i = 0;i < 4;i++) {
- host->cmd->resp[i] = sdhci_readl(host,
+ cmd->resp[i] = sdhci_readl(host,
SDHCI_RESPONSE + (3-i)*4) << 8;
if (i != 3)
- host->cmd->resp[i] |=
+ cmd->resp[i] |=
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
} else {
- host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ }
+ }
+
+ /*
+ * The host can send and interrupt when the busy state has
+ * ended, allowing us to wait without wasting CPU cycles.
+ * The busy signal uses DAT0 so this is similar to waiting
+ * for data to complete.
+ *
+ * Note: The 1.0 specification is a bit ambiguous about this
+ * feature so there might be some problems with older
+ * controllers.
+ */
+ if (cmd->flags & MMC_RSP_BUSY) {
+ if (cmd->data) {
+ DBG("Cannot wait for busy signal when also doing a data transfer");
+ } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
+ cmd == host->data_cmd) {
+ /* Command complete before busy is ended */
+ return;
}
}
/* Finished CMD23, now send actual command. */
- if (host->cmd == host->mrq->sbc) {
- host->cmd = NULL;
- sdhci_send_command(host, host->mrq->cmd);
+ if (cmd == cmd->mrq->sbc) {
+ sdhci_send_command(host, cmd->mrq->cmd);
} else {
/* Processed actual command. */
if (host->data && host->data_early)
sdhci_finish_data(host);
- if (!host->cmd->data)
- tasklet_schedule(&host->finish_tasklet);
-
- host->cmd = NULL;
+ if (!cmd->data)
+ sdhci_finish_mrq(host, cmd->mrq);
}
}
@@ -1373,26 +1476,22 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_lock_irqsave(&host->lock, flags);
- WARN_ON(host->mrq != NULL);
-
sdhci_led_activate(host);
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
* requests if Auto-CMD12 is enabled.
*/
- if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
+ if (sdhci_auto_cmd12(host, mrq)) {
if (mrq->stop) {
mrq->data->stop = NULL;
mrq->stop = NULL;
}
}
- host->mrq = mrq;
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ mrq->cmd->error = -ENOMEDIUM;
+ sdhci_finish_mrq(host, mrq);
} else {
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
@@ -1617,7 +1716,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return 0;
/* If nonremovable, assume that the card is always present. */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host->mmc))
return 1;
/*
@@ -1733,13 +1832,14 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
+ if (!(host->flags & SDHCI_SIGNALING_330))
+ return -EINVAL;
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
- 3600000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1759,9 +1859,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_180:
+ if (!(host->flags & SDHCI_SIGNALING_180))
+ return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1700000, 1950000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1790,9 +1891,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_120:
+ if (!(host->flags & SDHCI_SIGNALING_120))
+ return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
- 1300000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1811,10 +1913,10 @@ static int sdhci_card_busy(struct mmc_host *mmc)
struct sdhci_host *host = mmc_priv(mmc);
u32 present_state;
- /* Check whether DAT[3:0] is 0000 */
+ /* Check whether DAT[0] is 0 */
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- return !(present_state & SDHCI_DATA_LVL_MASK);
+ return !(present_state & SDHCI_DATA_0_LVL_MASK);
}
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1909,7 +2011,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
- * of loops reaches 40 times or a timeout of 150ms occurs.
+ * of loops reaches 40 times.
*/
do {
struct mmc_command cmd = {0};
@@ -1920,13 +2022,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
cmd.data = NULL;
+ cmd.mrq = &mrq;
cmd.error = 0;
if (tuning_loop_counter-- == 0)
break;
mrq.cmd = &cmd;
- host->mrq = &mrq;
/*
* In response to CMD19, the card sends 64 bytes of tuning
@@ -1956,7 +2058,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_send_command(host, &cmd);
host->cmd = NULL;
- host->mrq = NULL;
+ sdhci_del_timer(host, &mrq);
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
@@ -2086,6 +2188,24 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
}
+static inline bool sdhci_has_requests(struct sdhci_host *host)
+{
+ return host->cmd || host->data_cmd;
+}
+
+static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
+{
+ if (host->data_cmd) {
+ host->data_cmd->error = err;
+ sdhci_finish_mrq(host, host->data_cmd->mrq);
+ }
+
+ if (host->cmd) {
+ host->cmd->error = err;
+ sdhci_finish_mrq(host, host->cmd->mrq);
+ }
+}
+
static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -2096,12 +2216,12 @@ static void sdhci_card_event(struct mmc_host *mmc)
if (host->ops->card_event)
host->ops->card_event(host);
- present = sdhci_get_cd(host->mmc);
+ present = mmc->ops->get_cd(mmc);
spin_lock_irqsave(&host->lock, flags);
- /* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !present) {
+ /* Check sdhci_has_requests() first in case we are runtime suspended */
+ if (sdhci_has_requests(host) && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2110,8 +2230,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -2140,28 +2259,28 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
-static void sdhci_tasklet_finish(unsigned long param)
+static bool sdhci_request_done(struct sdhci_host *host)
{
- struct sdhci_host *host;
unsigned long flags;
struct mmc_request *mrq;
-
- host = (struct sdhci_host*)param;
+ int i;
spin_lock_irqsave(&host->lock, flags);
- /*
- * If this tasklet gets rescheduled while running, it will
- * be run again afterwards but without any active request.
- */
- if (!host->mrq) {
- spin_unlock_irqrestore(&host->lock, flags);
- return;
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+ if (mrq) {
+ host->mrqs_done[i] = NULL;
+ break;
+ }
}
- del_timer(&host->timer);
+ if (!mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
- mrq = host->mrq;
+ sdhci_del_timer(host, mrq);
/*
* Always unmap the data buffers if they were mapped by
@@ -2183,13 +2302,7 @@ static void sdhci_tasklet_finish(unsigned long param)
* The controller needs a reset of internal state machines
* upon error conditions.
*/
- if (!(host->flags & SDHCI_DEVICE_DEAD) &&
- ((mrq->cmd && mrq->cmd->error) ||
- (mrq->sbc && mrq->sbc->error) ||
- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
- (mrq->data->stop && mrq->data->stop->error))) ||
- (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
-
+ if (sdhci_needs_reset(host, mrq)) {
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
@@ -2197,20 +2310,31 @@ static void sdhci_tasklet_finish(unsigned long param)
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
+ if (!host->cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ if (!host->data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
- host->mrq = NULL;
- host->cmd = NULL;
- host->data = NULL;
+ host->pending_reset = false;
+ }
- sdhci_led_deactivate(host);
+ if (!sdhci_has_requests(host))
+ sdhci_led_deactivate(host);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
+
+ return false;
+}
+
+static void sdhci_tasklet_finish(unsigned long param)
+{
+ struct sdhci_host *host = (struct sdhci_host *)param;
+
+ while (!sdhci_request_done(host))
+ ;
}
static void sdhci_timeout_timer(unsigned long data)
@@ -2222,7 +2346,30 @@ static void sdhci_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
- if (host->mrq) {
+ if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
+ pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+
+ host->cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->cmd->mrq);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdhci_timeout_data_timer(unsigned long data)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host *)data;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->data || host->data_cmd ||
+ (host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
@@ -2230,13 +2377,12 @@ static void sdhci_timeout_timer(unsigned long data)
if (host->data) {
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
+ } else if (host->data_cmd) {
+ host->data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->data_cmd->mrq);
} else {
- if (host->cmd)
- host->cmd->error = -ETIMEDOUT;
- else
- host->mrq->cmd->error = -ETIMEDOUT;
-
- tasklet_schedule(&host->finish_tasklet);
+ host->cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->cmd->mrq);
}
}
@@ -2252,9 +2398,14 @@ static void sdhci_timeout_timer(unsigned long data)
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
{
- BUG_ON(intmask == 0);
-
if (!host->cmd) {
+ /*
+ * SDHCI recovers from errors by resetting the cmd and data
+ * circuits. Until that is done, there very well might be more
+ * interrupts, so ignore them in that case.
+ */
+ if (host->pending_reset)
+ return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2285,37 +2436,14 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, host->cmd->mrq);
return;
}
- /*
- * The host can send and interrupt when the busy state has
- * ended, allowing us to wait without wasting CPU cycles.
- * Unfortunately this is overloaded on the "data complete"
- * interrupt, so we need to take some care when handling
- * it.
- *
- * Note: The 1.0 specification is a bit ambiguous about this
- * feature so there might be some problems with older
- * controllers.
- */
- if (host->cmd->flags & MMC_RSP_BUSY) {
- if (host->cmd->data)
- DBG("Cannot wait for busy signal when also doing a data transfer");
- else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
- && !host->busy_handle) {
- /* Mark that command complete before busy is ended */
- host->busy_handle = 1;
- return;
- }
-
- /* The controller does not support the end-of-busy IRQ,
- * fall through and take the SDHCI_INT_RESPONSE */
- } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
+ host->cmd->opcode == MMC_STOP_TRANSMISSION)
*mask &= ~SDHCI_INT_DATA_END;
- }
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
@@ -2357,7 +2485,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host) { }
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
- BUG_ON(intmask == 0);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
@@ -2371,15 +2498,20 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (!host->data) {
+ struct mmc_command *data_cmd = host->data_cmd;
+
+ if (data_cmd)
+ host->data_cmd = NULL;
+
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
*/
- if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
- host->cmd->error = -ETIMEDOUT;
- tasklet_schedule(&host->finish_tasklet);
+ data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
@@ -2388,14 +2520,22 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
* before the command completed, so make
* sure we do things in the proper order.
*/
- if (host->busy_handle)
- sdhci_finish_command(host);
- else
- host->busy_handle = 1;
+ if (host->cmd == data_cmd)
+ return;
+
+ sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
}
+ /*
+ * SDHCI recovers from errors by resetting the cmd and data
+ * circuits. Until that is done, there very well might be more
+ * interrupts, so ignore them in that case.
+ */
+ if (host->pending_reset)
+ return;
+
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2453,7 +2593,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (intmask & SDHCI_INT_DATA_END) {
- if (host->cmd) {
+ if (host->cmd == host->data_cmd) {
/*
* Data managed to finish before the
* command completed. Make sure we do
@@ -2537,6 +2677,9 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
pr_err("%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
+ if (intmask & SDHCI_INT_RETUNE)
+ mmc_retune_needed(host->mmc);
+
if (intmask & SDHCI_INT_CARD_INT) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
@@ -2546,7 +2689,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
- SDHCI_INT_CARD_INT);
+ SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
if (intmask) {
unexpected |= intmask;
@@ -2582,8 +2725,10 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
spin_unlock_irqrestore(&host->lock, flags);
if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
- sdhci_card_event(host->mmc);
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->ops->card_event(mmc);
+ mmc_detect_change(mmc, msecs_to_jiffies(200));
}
if (isr & SDHCI_INT_CARD_INT) {
@@ -2605,18 +2750,31 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
\*****************************************************************************/
#ifdef CONFIG_PM
+/*
+ * To enable wakeup events, the corresponding events have to be enabled in
+ * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
+ * Table' in the SD Host Controller Standard Specification.
+ * It is useless to restore SDHCI_INT_ENABLE state in
+ * sdhci_disable_irq_wakeups() since it will be set by
+ * sdhci_enable_card_detection() or sdhci_init().
+ */
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
+ u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
+ SDHCI_INT_CARD_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val |= mask ;
/* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ }
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
@@ -2636,7 +2794,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_disable_card_detection(host);
mmc_retune_timer_stop(host->mmc);
- mmc_retune_needed(host->mmc);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
if (!device_may_wakeup(mmc_dev(host->mmc))) {
host->ier = 0;
@@ -2654,6 +2813,7 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
+ struct mmc_host *mmc = host->mmc;
int ret = 0;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -2667,7 +2827,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
@@ -2696,7 +2856,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
unsigned long flags;
mmc_retune_timer_stop(host->mmc);
- mmc_retune_needed(host->mmc);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
spin_lock_irqsave(&host->lock, flags);
host->ier &= SDHCI_INT_CARD_INT;
@@ -2716,6 +2877,7 @@ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
int sdhci_runtime_resume_host(struct sdhci_host *host)
{
+ struct mmc_host *mmc = host->mmc;
unsigned long flags;
int host_flags = host->flags;
@@ -2729,8 +2891,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
- sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
@@ -2781,6 +2943,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->mmc_host_ops = sdhci_ops;
mmc->ops = &host->mmc_host_ops;
+ host->flags = SDHCI_SIGNALING_330;
+
return host;
}
@@ -2816,10 +2980,41 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
return ret;
}
-int sdhci_add_host(struct sdhci_host *host)
+void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+{
+ u16 v;
+
+ if (host->read_caps)
+ return;
+
+ host->read_caps = true;
+
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
+
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
+
+ if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
+ return;
+
+ host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ if (host->version < SDHCI_SPEC_300)
+ return;
+
+ host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
+}
+EXPORT_SYMBOL_GPL(__sdhci_read_caps);
+
+int sdhci_setup_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
@@ -2832,34 +3027,28 @@ int sdhci_add_host(struct sdhci_host *host)
mmc = host->mmc;
- if (debug_quirks)
- host->quirks = debug_quirks;
- if (debug_quirks2)
- host->quirks2 = debug_quirks2;
+ /*
+ * If there are external regulators, get them. Note this must be done
+ * early before resetting the host and reading the capabilities so that
+ * the host can take the appropriate action if regulators are not
+ * available.
+ */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ return ret;
- override_timeout_clk = host->timeout_clk;
+ sdhci_read_caps(host);
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ override_timeout_clk = host->timeout_clk;
- host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
- host->version = (host->version & SDHCI_SPEC_VER_MASK)
- >> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
mmc_hostname(mmc), host->version);
}
- caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
- sdhci_readl(host, SDHCI_CAPABILITIES);
-
- if (host->version >= SDHCI_SPEC_300)
- caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
- host->caps1 :
- sdhci_readl(host, SDHCI_CAPABILITIES_1);
-
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
- else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
+ else if (!(host->caps & SDHCI_CAN_DO_SDMA))
DBG("Controller doesn't have SDMA capability\n");
else
host->flags |= SDHCI_USE_SDMA;
@@ -2871,7 +3060,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
if ((host->version >= SDHCI_SPEC_200) &&
- (caps[0] & SDHCI_CAN_DO_ADMA2))
+ (host->caps & SDHCI_CAN_DO_ADMA2))
host->flags |= SDHCI_USE_ADMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -2887,7 +3076,7 @@ int sdhci_add_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (caps[0] & SDHCI_CAN_64BIT)
+ if (host->caps & SDHCI_CAN_64BIT)
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -2963,10 +3152,10 @@ int sdhci_add_host(struct sdhci_host *host)
}
if (host->version >= SDHCI_SPEC_300)
- host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
+ host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
else
- host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
+ host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
host->max_clk *= 1000000;
@@ -2985,7 +3174,7 @@ int sdhci_add_host(struct sdhci_host *host)
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
*/
- host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
+ host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
SDHCI_CLOCK_MUL_SHIFT;
/*
@@ -3017,7 +3206,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->f_max = max_clk;
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
+ host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
if (host->timeout_clk == 0) {
if (host->ops->get_timeout_clock) {
@@ -3031,7 +3220,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
if (override_timeout_clk)
@@ -3072,27 +3261,22 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
mmc->caps &= ~MMC_CAP_CMD23;
- if (caps[0] & SDHCI_CAN_DO_HISPD)
+ if (host->caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ mmc_card_is_removable(mmc) &&
mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- /* If there are external regulators, get them */
- ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
- goto undma;
-
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = regulator_enable(mmc->supply.vqmmc);
if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
1950000))
- caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
if (ret) {
pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
mmc_hostname(mmc), ret);
@@ -3100,28 +3284,30 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
- caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50);
+ if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
+ }
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
- if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50))
+ if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50))
mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
/* SDR104 supports also implies SDR50 support */
- if (caps[1] & SDHCI_SUPPORT_SDR104) {
+ if (host->caps1 & SDHCI_SUPPORT_SDR104) {
mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
mmc->caps2 |= MMC_CAP2_HS200;
- } else if (caps[1] & SDHCI_SUPPORT_SDR50)
+ } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
mmc->caps |= MMC_CAP_UHS_SDR50;
+ }
if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
- (caps[1] & SDHCI_SUPPORT_HS400))
+ (host->caps1 & SDHCI_SUPPORT_HS400))
mmc->caps2 |= MMC_CAP2_HS400;
if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
@@ -3130,25 +3316,25 @@ int sdhci_add_host(struct sdhci_host *host)
1300000)))
mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
- if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
- !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
+ if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
+ !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
mmc->caps |= MMC_CAP_UHS_DDR50;
/* Does the host need tuning for SDR50? */
- if (caps[1] & SDHCI_USE_SDR50_TUNING)
+ if (host->caps1 & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
/* Driver Type(s) (A, C, D) supported by the host */
- if (caps[1] & SDHCI_DRIVER_TYPE_A)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
- if (caps[1] & SDHCI_DRIVER_TYPE_C)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_C)
mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
- if (caps[1] & SDHCI_DRIVER_TYPE_D)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
/* Initial value for re-tuning timer count */
- host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
- SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+ host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
+ SDHCI_RETUNING_TIMER_COUNT_SHIFT;
/*
* In case Re-tuning Timer is not disabled, the actual value of
@@ -3158,7 +3344,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->tuning_count = 1 << (host->tuning_count - 1);
/* Re-tuning mode supported by the Host Controller */
- host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
+ host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
@@ -3187,7 +3373,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (caps[0] & SDHCI_CAN_VDD_330) {
+ if (host->caps & SDHCI_CAN_VDD_330) {
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->max_current_330 = ((max_current_caps &
@@ -3195,7 +3381,7 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_330_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
}
- if (caps[0] & SDHCI_CAN_VDD_300) {
+ if (host->caps & SDHCI_CAN_VDD_300) {
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
mmc->max_current_300 = ((max_current_caps &
@@ -3203,7 +3389,7 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_300_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
}
- if (caps[0] & SDHCI_CAN_VDD_180) {
+ if (host->caps & SDHCI_CAN_VDD_180) {
ocr_avail |= MMC_VDD_165_195;
mmc->max_current_180 = ((max_current_caps &
@@ -3240,6 +3426,15 @@ int sdhci_add_host(struct sdhci_host *host)
goto unreg;
}
+ if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
+ (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
+ host->flags |= SDHCI_SIGNALING_180;
+
+ if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
+ host->flags |= SDHCI_SIGNALING_120;
+
spin_lock_init(&host->lock);
/*
@@ -3281,7 +3476,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
mmc->max_blk_size = 2;
} else {
- mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
+ mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
@@ -3297,6 +3492,28 @@ int sdhci_add_host(struct sdhci_host *host)
*/
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
+ return 0;
+
+unreg:
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+undma:
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_setup_host);
+
+int __sdhci_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
/*
* Init tasklets.
*/
@@ -3304,6 +3521,8 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_tasklet_finish, (unsigned long)host);
setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
+ setup_timer(&host->data_timer, sdhci_timeout_data_timer,
+ (unsigned long)host);
init_waitqueue_head(&host->buf_ready_int);
@@ -3353,10 +3572,10 @@ unirq:
free_irq(host->irq, host);
untasklet:
tasklet_kill(&host->finish_tasklet);
-unreg:
+
if (!IS_ERR(mmc->supply.vqmmc))
regulator_disable(mmc->supply.vqmmc);
-undma:
+
if (host->align_buffer)
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
@@ -3366,7 +3585,18 @@ undma:
return ret;
}
+EXPORT_SYMBOL_GPL(__sdhci_add_host);
+
+int sdhci_add_host(struct sdhci_host *host)
+{
+ int ret;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ return __sdhci_add_host(host);
+}
EXPORT_SYMBOL_GPL(sdhci_add_host);
void sdhci_remove_host(struct sdhci_host *host, int dead)
@@ -3379,12 +3609,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->flags |= SDHCI_DEVICE_DEAD;
- if (host->mrq) {
+ if (sdhci_has_requests(host)) {
pr_err("%s: Controller removed during "
" transfer!\n", mmc_hostname(mmc));
-
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -3404,6 +3632,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
free_irq(host->irq, host);
del_timer_sync(&host->timer);
+ del_timer_sync(&host->data_timer);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 609f87ca536b..0411c9f36461 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -128,6 +128,7 @@
#define SDHCI_INT_CARD_INSERT 0x00000040
#define SDHCI_INT_CARD_REMOVE 0x00000080
#define SDHCI_INT_CARD_INT 0x00000100
+#define SDHCI_INT_RETUNE 0x00001000
#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
#define SDHCI_INT_CRC 0x00020000
@@ -186,6 +187,7 @@
#define SDHCI_CAN_DO_ADMA1 0x00100000
#define SDHCI_CAN_DO_HISPD 0x00200000
#define SDHCI_CAN_DO_SDMA 0x00400000
+#define SDHCI_CAN_DO_SUSPEND 0x00800000
#define SDHCI_CAN_VDD_330 0x01000000
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
@@ -314,6 +316,9 @@ struct sdhci_adma2_64_desc {
*/
#define SDHCI_MAX_SEGS 128
+/* Allow for a a command request and a data request at the same time */
+#define SDHCI_MAX_MRQS 2
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
@@ -447,6 +452,9 @@ struct sdhci_host {
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
+#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */
+#define SDHCI_SIGNALING_180 (1<<15) /* Host is capable of 1.8V signaling */
+#define SDHCI_SIGNALING_120 (1<<16) /* Host is capable of 1.2V signaling */
unsigned int version; /* SDHCI spec. version */
@@ -460,12 +468,13 @@ struct sdhci_host {
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
+ bool pending_reset; /* Cmd/data reset is pending */
- struct mmc_request *mrq; /* Current request */
+ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
+ struct mmc_command *data_cmd; /* Current data command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
- unsigned int busy_handle:1; /* Handling the order of Busy-end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
@@ -486,9 +495,11 @@ struct sdhci_host {
struct tasklet_struct finish_tasklet; /* Tasklet structures */
struct timer_list timer; /* Timer for timeouts */
+ struct timer_list data_timer; /* Timer for data timeouts */
- u32 caps; /* Alternative CAPABILITY_0 */
- u32 caps1; /* Alternative CAPABILITY_1 */
+ u32 caps; /* CAPABILITY_0 */
+ u32 caps1; /* CAPABILITY_1 */
+ bool read_caps; /* Capability flags have been read */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
@@ -508,6 +519,8 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
+#define SDHCI_TUNING_MODE_2 1
+#define SDHCI_TUNING_MODE_3 2
unsigned long private[0] ____cacheline_aligned;
};
@@ -645,11 +658,20 @@ static inline void *sdhci_priv(struct sdhci_host *host)
}
extern void sdhci_card_detect(struct sdhci_host *host);
+extern void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps,
+ u32 *caps1);
+extern int sdhci_setup_host(struct sdhci_host *host);
+extern int __sdhci_add_host(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
extern void sdhci_send_command(struct sdhci_host *host,
struct mmc_command *cmd);
+static inline void sdhci_read_caps(struct sdhci_host *host)
+{
+ __sdhci_read_caps(host, NULL, NULL, NULL);
+}
+
static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
{
return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 983b8b32ef96..111b66f5439b 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -222,7 +222,7 @@ static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
.of_match_table = f_sdh30_dt_ids,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
.remove = sdhci_f_sdh30_remove,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index dd64b8663984..900778421be6 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -574,7 +574,7 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
if (state1 & STS1_CMDSEQ) {
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
- for (timeout = 10000000; timeout; timeout--) {
+ for (timeout = 10000; timeout; timeout--) {
if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
& STS1_CMDSEQ))
break;
@@ -819,10 +819,12 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
tmp |= CMD_SET_RTYP_NO;
break;
case MMC_RSP_R1:
- case MMC_RSP_R1B:
case MMC_RSP_R3:
tmp |= CMD_SET_RTYP_6B;
break;
+ case MMC_RSP_R1B:
+ tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
+ break;
case MMC_RSP_R2:
tmp |= CMD_SET_RTYP_17B;
break;
@@ -830,17 +832,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
dev_err(dev, "Unsupported response type.\n");
break;
}
- switch (opc) {
- /* RBSY */
- case MMC_SLEEP_AWAKE:
- case MMC_SWITCH:
- case MMC_STOP_TRANSMISSION:
- case MMC_SET_WRITE_PROT:
- case MMC_CLR_WRITE_PROT:
- case MMC_ERASE:
- tmp |= CMD_SET_RBSY;
- break;
- }
+
/* WDAT / DATW */
if (data) {
tmp |= CMD_SET_WDAT;
@@ -925,23 +917,13 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
{
struct mmc_command *cmd = mrq->cmd;
u32 opc = cmd->opcode;
- u32 mask;
+ u32 mask = 0;
unsigned long flags;
- switch (opc) {
- /* response busy check */
- case MMC_SLEEP_AWAKE:
- case MMC_SWITCH:
- case MMC_STOP_TRANSMISSION:
- case MMC_SET_WRITE_PROT:
- case MMC_CLR_WRITE_PROT:
- case MMC_ERASE:
+ if (cmd->flags & MMC_RSP_BUSY)
mask = MASK_START_CMD | MASK_MRBSYE;
- break;
- default:
+ else
mask = MASK_START_CMD | MASK_MCRSPE;
- break;
- }
if (host->ccs_enable)
mask |= MASK_MCCSTO;
@@ -1009,22 +991,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->state = STATE_REQUEST;
spin_unlock_irqrestore(&host->lock, flags);
- switch (mrq->cmd->opcode) {
- /* MMCIF does not support SD/SDIO command */
- case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
- case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
- if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
- break;
- case MMC_APP_CMD:
- case SD_IO_RW_DIRECT:
- host->state = STATE_IDLE;
- mrq->cmd->error = -ETIMEDOUT;
- mmc_request_done(mmc, mrq);
- return;
- default:
- break;
- }
-
host->mrq = mrq;
sh_mmcif_start_cmd(host, mrq);
@@ -1488,6 +1454,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_init_ocr(host);
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ mmc->max_busy_timeout = 10000;
+
if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index f750f9494410..c3b651bf89cb 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -39,6 +39,12 @@
#define EXT_ACC 0xe4
+#define SDHI_VER_GEN2_SDR50 0x490c
+/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
+#define SDHI_VER_GEN2_SDR104 0xcb0d
+#define SDHI_VER_GEN3_SD 0xcc10
+#define SDHI_VER_GEN3_SDMMC 0xcd10
+
#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
struct sh_mobile_sdhi_of_data {
@@ -109,14 +115,14 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
* sh_mobile_sdhi_of_data :: dma_buswidth
*/
switch (sd_ctrl_read16(host, CTL_VERSION)) {
- case 0x490C:
+ case SDHI_VER_GEN2_SDR50:
val = (width == 32) ? 0x0001 : 0x0000;
break;
- case 0xCB0D:
+ case SDHI_VER_GEN2_SDR104:
val = (width == 32) ? 0x0000 : 0x0001;
break;
- case 0xCC10: /* Gen3, SD only */
- case 0xCD10: /* Gen3, SD + MMC */
+ case SDHI_VER_GEN3_SD:
+ case SDHI_VER_GEN3_SDMMC:
if (width == 64)
val = 0x0000;
else if (width == 32)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 1aac2ad8edf2..7f63ec05bdf4 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -259,7 +259,7 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
{
- writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index f44e2ab7aea2..92467efc4e2c 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1086,7 +1086,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- mmc->caps & MMC_CAP_NONREMOVABLE ||
+ !mmc_card_is_removable(mmc) ||
mmc->slot.cd_irq >= 0);
if (tmio_mmc_clk_enable(_host) < 0) {
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 845dd27d9f41..377947580203 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -122,7 +122,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
/* Nothing more in higher memory on BCM47XX (MIPS) */
- if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
+ if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 9a1a6ffd16b8..94d3eb42c4d5 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -416,7 +416,7 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
return ret;
}
-static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const u_char *buf, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 64a248556d29..58329d2dacd1 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -113,12 +113,12 @@ config MTD_SST25L
if you want to specify device partitioning.
config MTD_BCM47XXSFLASH
- tristate "R/O support for serial flash on BCMA bus"
+ tristate "Support for serial flash on BCMA bus"
depends on BCMA_SFLASH && (MIPS || ARM)
help
BCMA bus can have various flash memories attached, they are
registered by bcma as platform devices. This enables driver for
- serial flash memories (only read-only mode is implemented).
+ serial flash memories.
config MTD_SLRAM
tristate "Uncached system RAM"
@@ -171,18 +171,6 @@ config MTDRAM_ERASE_SIZE
as a module, it is also possible to specify this as a parameter when
loading the module.
-#If not a module (I don't want to test it as a module)
-config MTDRAM_ABS_POS
- hex "SRAM Hexadecimal Absolute position or 0"
- depends on MTD_MTDRAM=y
- default "0"
- help
- If you have system RAM accessible by the CPU but not used by Linux
- in normal operation, you can give the physical address at which the
- available RAM starts, and the MTDRAM driver will use it instead of
- allocating space from Linux's available memory. Otherwise, leave
- this set to zero. Most people will want to leave this as zero.
-
config MTD_BLOCK2MTD
tristate "MTD using block device"
depends on BLOCK
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9d6854467651..9cf7fcd28034 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,15 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return spi_write(spi, flash->command, len + 1);
}
-static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
struct spi_transfer t[2] = {};
struct spi_message m;
int cmd_sz = m25p_cmdsz(nor);
+ ssize_t ret;
spi_message_init(&m);
@@ -98,9 +99,14 @@ static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
- *retlen += m.actual_length - cmd_sz;
+ ret = m.actual_length - cmd_sz;
+ if (ret < 0)
+ return -EIO;
+ return ret;
}
static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
@@ -119,21 +125,21 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
* Read an address range from the nor chip. The address range
* may be any size provided it is within the physical boundaries.
*/
-static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
unsigned int dummy = nor->read_dummy;
+ ssize_t ret;
/* convert the dummy cycles to the number of bytes */
dummy /= 8;
if (spi_flash_read_supported(spi)) {
struct spi_flash_read_message msg;
- int ret;
memset(&msg, 0, sizeof(msg));
@@ -149,8 +155,9 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
msg.data_nbits = m25p80_rx_nbits(nor);
ret = spi_flash_read(spi, &msg);
- *retlen = msg.retlen;
- return ret;
+ if (ret < 0)
+ return ret;
+ return msg.retlen;
}
spi_message_init(&m);
@@ -165,13 +172,17 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
t[1].rx_buf = buf;
t[1].rx_nbits = m25p80_rx_nbits(nor);
- t[1].len = len;
+ t[1].len = min(len, spi_max_transfer_size(spi));
spi_message_add_tail(&t[1], &m);
- spi_sync(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
- *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
- return 0;
+ ret = m.actual_length - m25p_cmdsz(nor) - dummy;
+ if (ret < 0)
+ return -EIO;
+ return ret;
}
/*
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index d5b870b3fd4e..f5396f26ddb4 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -95,7 +95,7 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
return -EIO;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc == OPAL_SUCCESS) {
rc = 0;
if (retlen)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 22f3858c0364..3fad35942895 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -186,7 +186,7 @@ static int of_flash_probe(struct platform_device *dev)
* consists internally of 2 non-identical NOR chips on one die.
*/
p = of_get_property(dp, "reg", &count);
- if (count % reg_tuple_size != 0) {
+ if (!p || count % reg_tuple_size != 0) {
dev_err(&dev->dev, "Malformed reg property on %s\n",
dev->dev.of_node->full_name);
err = -EINVAL;
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index 744ca5cacc9b..f9fa3fad728e 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -75,15 +75,15 @@ static int __init init_msp_flash(void)
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
- msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
+ msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL);
if (!msp_flash)
return -ENOMEM;
- msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
+ msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL);
if (!msp_parts)
goto free_msp_flash;
- msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
+ msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL);
if (!msp_maps)
goto free_msp_parts;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 142fc3d79463..784c6e1a0391 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
- if (info->mtd == NULL)
+ if (info->mtd == NULL) {
ret = -ENXIO;
+ goto err;
+ }
}
info->mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f05e0e9eb2f7..21ff58099f3b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
- depends on MTD_NAND && FSL_SOC
+ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
select FSL_IFC
select MEMORY
help
@@ -539,7 +539,6 @@ config MTD_NAND_FSMC
config MTD_NAND_XWAY
tristate "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
- select MTD_NAND_PLATFORM
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
@@ -563,4 +562,11 @@ config MTD_NAND_QCOM
Enables support for NAND flash chips on SoCs containing the EBI2 NAND
controller. This controller is found on IPQ806x SoC.
+config MTD_NAND_MTK
+ tristate "Support for NAND controller on MTK SoCs"
+ depends on HAS_DMA
+ help
+ Enables support for NAND controller on MTK SoCs.
+ This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index f55335373f7c..cafde6f3d957 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index b76ad7c0144f..8eb2c64df38c 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x400,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x600,
+};
+
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0701)
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->reg_offsets = brcmnand_regs_v72;
+ else if (ctrl->nand_version >= 0x0701)
ctrl->reg_offsets = brcmnand_regs_v71;
else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
@@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Maximum spare area sector size (per 512B) */
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->max_oob = 128;
+ else if (ctrl->nand_version >= 0x0600)
ctrl->max_oob = 64;
else if (ctrl->nand_version >= 0x0500)
ctrl->max_oob = 32;
@@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
int cs = host->cs;
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ bits = 7;
+ else if (ctrl->nand_version >= 0x0600)
bits = 6;
else if (ctrl->nand_version >= 0x0500)
bits = 5;
else
bits = 4;
- if (ctrl->nand_version >= 0x0600) {
+ if (ctrl->nand_version >= 0x0702) {
+ if (cs >= 4)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 4) * bits;
+ } else if (ctrl->nand_version >= 0x0600) {
if (cs >= 5)
reg = BRCMNAND_CORR_THRESHOLD_EXT;
shift = (cs % 5) * bits;
@@ -631,19 +671,28 @@ enum {
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ return GENMASK(7, 0);
+ else if (ctrl->nand_version >= 0x0600)
return GENMASK(6, 0);
else
return GENMASK(5, 0);
}
#define NAND_ACC_CONTROL_ECC_SHIFT 16
+#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- return mask << NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+
+ /* v7.2 includes additional ECC levels */
+ if (ctrl->nand_version >= 0x0702)
+ mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+
+ return mask;
}
static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
@@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ return 9;
+ else if (ctrl->nand_version >= 0x0600)
return 7;
else if (ctrl->nand_version >= 0x0500)
return 6;
@@ -773,10 +824,16 @@ enum brcmnand_llop_type {
* Internal support functions
***********************************************************************/
-static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+ struct brcmnand_cfg *cfg)
{
- return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
- cfg->ecc_level == 15;
+ if (ctrl->nand_version <= 0x0701)
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+ else
+ return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15) ||
+ (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
}
/*
@@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
if (p->sector_size_1k)
ecc_level <<= 1;
- if (is_hamming_ecc(p)) {
+ if (is_hamming_ecc(host->ctrl, p)) {
ecc->bytes = 3 * sectors;
mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
return 0;
@@ -1108,7 +1165,7 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
ctrl->cmd_pending = cmd;
intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
- BUG_ON(!(intfc & INTFC_CTLR_READY));
+ WARN_ON(!(intfc & INTFC_CTLR_READY));
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
@@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, void *buf, u64 addr)
+{
+ int i, sas;
+ void *oob = chip->oob_poi;
+ int bitflips = 0;
+ int page = addr >> chip->page_shift;
+ int ret;
+
+ if (!buf) {
+ buf = chip->buffers->databuf;
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
+
+ sas = mtd->oobsize / chip->ecc.steps;
+
+ /* read without ecc for verification */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+ ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
+ oob, sas, NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max(bitflips, ret);
+ }
+
+ return bitflips;
+}
+
static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf, u8 *oob)
{
@@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_controller *ctrl = host->ctrl;
u64 err_addr = 0;
int err;
+ bool retry = true;
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+try_dmaread:
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
@@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
}
if (mtd_is_eccerr(err)) {
+ /*
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
+ * the DMA engine captures this error following DMA read
+ * cleared only on subsequent DMA read, so just retry once
+ * to clear a possible false error reported for current DMA
+ * read
+ */
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
+ }
+
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * bitflips, apply sw verification for older controllers only
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+ addr);
+ /* erased page bitflips corrected */
+ if (err > 0)
+ return err;
+ }
+
dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
@@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
return 0;
}
-static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+ char *buf, struct brcmnand_cfg *cfg)
{
buf += sprintf(buf,
"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
@@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
cfg->spare_area_size, cfg->device_width);
/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
- if (is_hamming_ecc(cfg))
+ if (is_hamming_ecc(host->ctrl, cfg))
sprintf(buf, ", Hamming ECC");
else if (cfg->sector_size_1k)
sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
@@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
brcmnand_set_ecc_enabled(host, 1);
- brcmnand_print_cfg(msg, cfg);
+ brcmnand_print_cfg(host, msg, cfg);
dev_info(ctrl->dev, "detected %s\n", msg);
/* Configure ACC_CONTROL */
@@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
tmp = nand_readreg(ctrl, offs);
tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
tmp &= ~ACC_CONTROL_RD_ERASED;
+
+ /* We need to turn on Read from erased paged protected by ECC */
+ if (ctrl->nand_version >= 0x0702)
+ tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
/*
@@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = {
{ .compatible = "brcm,brcmnand-v6.2" },
{ .compatible = "brcm,brcmnand-v7.0" },
{ .compatible = "brcm,brcmnand-v7.1" },
+ { .compatible = "brcm,brcmnand-v7.2" },
{},
};
MODULE_DEVICE_TABLE(of, brcmnand_of_match);
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c
index d74f4ba4a6f4..731c6051d91e 100644
--- a/drivers/mtd/nand/jz4780_bch.c
+++ b/drivers/mtd/nand/jz4780_bch.c
@@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = {
module_platform_driver(jz4780_bch_driver);
MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index daf3c4217f4d..175f67da25af 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = {
module_platform_driver(jz4780_nand_driver);
MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
new file mode 100644
index 000000000000..25a4fbd4d24a
--- /dev/null
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -0,0 +1,530 @@
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+
+#include "mtk_ecc.h"
+
+#define ECC_IDLE_MASK BIT(0)
+#define ECC_IRQ_EN BIT(0)
+#define ECC_OP_ENABLE (1)
+#define ECC_OP_DISABLE (0)
+
+#define ECC_ENCCON (0x00)
+#define ECC_ENCCNFG (0x04)
+#define ECC_CNFG_4BIT (0)
+#define ECC_CNFG_6BIT (1)
+#define ECC_CNFG_8BIT (2)
+#define ECC_CNFG_10BIT (3)
+#define ECC_CNFG_12BIT (4)
+#define ECC_CNFG_14BIT (5)
+#define ECC_CNFG_16BIT (6)
+#define ECC_CNFG_18BIT (7)
+#define ECC_CNFG_20BIT (8)
+#define ECC_CNFG_22BIT (9)
+#define ECC_CNFG_24BIT (0xa)
+#define ECC_CNFG_28BIT (0xb)
+#define ECC_CNFG_32BIT (0xc)
+#define ECC_CNFG_36BIT (0xd)
+#define ECC_CNFG_40BIT (0xe)
+#define ECC_CNFG_44BIT (0xf)
+#define ECC_CNFG_48BIT (0x10)
+#define ECC_CNFG_52BIT (0x11)
+#define ECC_CNFG_56BIT (0x12)
+#define ECC_CNFG_60BIT (0x13)
+#define ECC_MODE_SHIFT (5)
+#define ECC_MS_SHIFT (16)
+#define ECC_ENCDIADDR (0x08)
+#define ECC_ENCIDLE (0x0C)
+#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
+#define ECC_ENCIRQ_EN (0x80)
+#define ECC_ENCIRQ_STA (0x84)
+#define ECC_DECCON (0x100)
+#define ECC_DECCNFG (0x104)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CNFG_CORRECT (0x3 << 12)
+#define ECC_DECIDLE (0x10C)
+#define ECC_DECENUM0 (0x114)
+#define ERR_MASK (0x3f)
+#define ECC_DECDONE (0x124)
+#define ECC_DECIRQ_EN (0x200)
+#define ECC_DECIRQ_STA (0x204)
+
+#define ECC_TIMEOUT (500000)
+
+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
+ ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+
+struct mtk_ecc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+
+ struct completion done;
+ struct mutex lock;
+ u32 sectors;
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+ enum mtk_ecc_operation op)
+{
+ struct device *dev = ecc->dev;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+ val & ECC_IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "%s NOT idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+ struct mtk_ecc *ecc = id;
+ enum mtk_ecc_operation op;
+ u32 dec, enc;
+
+ dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ if (dec) {
+ op = ECC_DECODE;
+ dec = readw(ecc->regs + ECC_DECDONE);
+ if (dec & ecc->sectors) {
+ ecc->sectors = 0;
+ complete(&ecc->done);
+ } else {
+ return IRQ_HANDLED;
+ }
+ } else {
+ enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
+ if (enc) {
+ op = ECC_ENCODE;
+ complete(&ecc->done);
+ } else {
+ return IRQ_NONE;
+ }
+ }
+
+ writel(0, ecc->regs + ECC_IRQ_REG(op));
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
+ u32 reg;
+
+ switch (config->strength) {
+ case 4:
+ ecc_bit = ECC_CNFG_4BIT;
+ break;
+ case 6:
+ ecc_bit = ECC_CNFG_6BIT;
+ break;
+ case 8:
+ ecc_bit = ECC_CNFG_8BIT;
+ break;
+ case 10:
+ ecc_bit = ECC_CNFG_10BIT;
+ break;
+ case 12:
+ ecc_bit = ECC_CNFG_12BIT;
+ break;
+ case 14:
+ ecc_bit = ECC_CNFG_14BIT;
+ break;
+ case 16:
+ ecc_bit = ECC_CNFG_16BIT;
+ break;
+ case 18:
+ ecc_bit = ECC_CNFG_18BIT;
+ break;
+ case 20:
+ ecc_bit = ECC_CNFG_20BIT;
+ break;
+ case 22:
+ ecc_bit = ECC_CNFG_22BIT;
+ break;
+ case 24:
+ ecc_bit = ECC_CNFG_24BIT;
+ break;
+ case 28:
+ ecc_bit = ECC_CNFG_28BIT;
+ break;
+ case 32:
+ ecc_bit = ECC_CNFG_32BIT;
+ break;
+ case 36:
+ ecc_bit = ECC_CNFG_36BIT;
+ break;
+ case 40:
+ ecc_bit = ECC_CNFG_40BIT;
+ break;
+ case 44:
+ ecc_bit = ECC_CNFG_44BIT;
+ break;
+ case 48:
+ ecc_bit = ECC_CNFG_48BIT;
+ break;
+ case 52:
+ ecc_bit = ECC_CNFG_52BIT;
+ break;
+ case 56:
+ ecc_bit = ECC_CNFG_56BIT;
+ break;
+ case 60:
+ ecc_bit = ECC_CNFG_60BIT;
+ break;
+ default:
+ dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
+ config->strength);
+ }
+
+ if (config->op == ECC_ENCODE) {
+ /* configure ECC encoder (in bits) */
+ enc_sz = config->len << 3;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (enc_sz << ECC_MS_SHIFT);
+ writel(reg, ecc->regs + ECC_ENCCNFG);
+
+ if (config->mode != ECC_NFI_MODE)
+ writel(lower_32_bits(config->addr),
+ ecc->regs + ECC_ENCDIADDR);
+
+ } else {
+ /* configure ECC decoder (in bits) */
+ dec_sz = (config->len << 3) +
+ config->strength * ECC_PARITY_BITS;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+ reg |= DEC_EMPTY_EN;
+ writel(reg, ecc->regs + ECC_DECCNFG);
+
+ if (config->sectors)
+ ecc->sectors = 1 << (config->sectors - 1);
+ }
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+ int sectors)
+{
+ u32 offset, i, err;
+ u32 bitflips = 0;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ for (i = 0; i < sectors; i++) {
+ offset = (i >> 2) << 2;
+ err = readl(ecc->regs + ECC_DECENUM0 + offset);
+ err = err >> ((i % 4) * 8);
+ err &= ERR_MASK;
+ if (err == ERR_MASK) {
+ /* uncorrectable errors */
+ stats->failed++;
+ continue;
+ }
+
+ stats->corrected += err;
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+ mtk_ecc_wait_idle(ecc, ECC_DECODE);
+ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct mtk_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&pdev->dev);
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+ mtk_ecc_hw_init(ecc);
+
+ return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+ struct mtk_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+ if (np) {
+ ecc = mtk_ecc_get(np);
+ of_node_put(np);
+ }
+
+ return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ enum mtk_ecc_operation op = config->op;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ecc->lock);
+ if (ret) {
+ dev_err(ecc->dev, "interrupted when attempting to lock\n");
+ return ret;
+ }
+
+ mtk_ecc_wait_idle(ecc, op);
+ mtk_ecc_config(ecc, config);
+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+ init_completion(&ecc->done);
+ writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+ enum mtk_ecc_operation op = ECC_ENCODE;
+
+ /* find out the running operation */
+ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+ op = ECC_DECODE;
+
+ /* disable it */
+ mtk_ecc_wait_idle(ecc, op);
+ writew(0, ecc->regs + ECC_IRQ_REG(op));
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+ mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+ (op == ECC_ENCODE) ? "encoder" : "decoder");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes)
+{
+ dma_addr_t addr;
+ u32 *p, len, i;
+ int ret = 0;
+
+ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ecc->dev, addr);
+ if (ret) {
+ dev_err(ecc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ config->op = ECC_ENCODE;
+ config->addr = addr;
+ ret = mtk_ecc_enable(ecc, config);
+ if (ret) {
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+ if (ret)
+ goto timeout;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ p = (u32 *)(data + bytes);
+
+ /* write the parity bytes generated by the ECC back to the OOB region */
+ for (i = 0; i < len; i++)
+ p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+timeout:
+
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ mtk_ecc_disable(ecc);
+
+ return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(u32 *p)
+{
+ u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecc); i++) {
+ if (*p <= ecc[i]) {
+ if (!i)
+ *p = ecc[i];
+ else if (*p != ecc[i])
+ *p = ecc[i - 1];
+ return;
+ }
+ }
+
+ *p = ecc[ARRAY_SIZE(ecc) - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ecc *ecc;
+ struct resource *res;
+ int irq, ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ecc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ecc->regs)) {
+ dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+ return PTR_ERR(ecc->regs);
+ }
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return -EINVAL;
+ }
+
+ ecc->dev = dev;
+ mutex_init(&ecc->lock);
+ platform_set_drvdata(pdev, ecc);
+ dev_info(dev, "probed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ecc->clk);
+
+ return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ecc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ mtk_ecc_hw_init(ecc);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+ { .compatible = "mediatek,mt2701-ecc" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+ .probe = mtk_ecc_probe,
+ .driver = {
+ .name = "mtk-ecc",
+ .of_match_table = of_match_ptr(mtk_ecc_dt_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_ecc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
new file mode 100644
index 000000000000..cbeba5cd1c13
--- /dev/null
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -0,0 +1,50 @@
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+#define ECC_PARITY_BITS (14)
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(u32 *);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
new file mode 100644
index 000000000000..ddaa2acb9dd7
--- /dev/null
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -0,0 +1,1526 @@
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include "mtk_ecc.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG (0x00)
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_OP_CUST (6 << 12)
+#define NFI_PAGEFMT (0x04)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+#define PAGEFMT_FDM_SHIFT (8)
+#define PAGEFMT_SPARE_16 (0)
+#define PAGEFMT_SPARE_26 (1)
+#define PAGEFMT_SPARE_27 (2)
+#define PAGEFMT_SPARE_28 (3)
+#define PAGEFMT_SPARE_32 (4)
+#define PAGEFMT_SPARE_36 (5)
+#define PAGEFMT_SPARE_40 (6)
+#define PAGEFMT_SPARE_44 (7)
+#define PAGEFMT_SPARE_48 (8)
+#define PAGEFMT_SPARE_49 (9)
+#define PAGEFMT_SPARE_50 (0xa)
+#define PAGEFMT_SPARE_51 (0xb)
+#define PAGEFMT_SPARE_52 (0xc)
+#define PAGEFMT_SPARE_62 (0xd)
+#define PAGEFMT_SPARE_63 (0xe)
+#define PAGEFMT_SPARE_64 (0xf)
+#define PAGEFMT_SPARE_SHIFT (4)
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K (0)
+#define PAGEFMT_2K_4K (1)
+#define PAGEFMT_4K_8K (2)
+#define PAGEFMT_8K_16K (3)
+/* NFI control */
+#define NFI_CON (0x08)
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8) /* burst read */
+#define CON_BWR BIT(9) /* burst write */
+#define CON_SEC_SHIFT (12)
+/* Timming control register */
+#define NFI_ACCCON (0x0C)
+#define NFI_INTR_EN (0x10)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA (0x14)
+#define NFI_CMD (0x20)
+#define NFI_ADDRNOB (0x30)
+#define NFI_COLADDR (0x34)
+#define NFI_ROWADDR (0x38)
+#define NFI_STRDATA (0x40)
+#define STAR_EN (1)
+#define STAR_DE (0)
+#define NFI_CNRNB (0x44)
+#define NFI_DATAW (0x50)
+#define NFI_DATAR (0x54)
+#define NFI_PIO_DIRDY (0x58)
+#define PIO_DI_RDY (0x01)
+#define NFI_STA (0x60)
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define STA_BUSY BIT(8)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK (0xf << 16)
+#define NFI_ADDRCNTR (0x70)
+#define CNTR_MASK GENMASK(16, 12)
+#define NFI_STRADDR (0x80)
+#define NFI_BYTELEN (0x84)
+#define NFI_CSEL (0x90)
+#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE (8)
+#define NFI_FDM_MIN_SIZE (1)
+#define NFI_MASTER_STA (0x224)
+#define MASTER_STA_MASK (0x0FFF)
+#define NFI_EMPTY_THRESH (0x23C)
+
+#define MTK_NAME "mtk-nand"
+#define KB(x) ((x) * 1024UL)
+#define MB(x) (KB(x) * 1024UL)
+
+#define MTK_TIMEOUT (500000)
+#define MTK_RESET_TIMEOUT (1000000)
+#define MTK_MAX_SECTOR (16)
+#define MTK_NAND_MAX_NSELS (2)
+
+struct mtk_nfc_bad_mark_ctl {
+ void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+ u32 sec;
+ u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+ u32 reg_size;
+ u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+
+ struct mtk_nfc_bad_mark_ctl bad_mark;
+ struct mtk_nfc_fdm fdm;
+ u32 spare_per_sector;
+
+ int nsels;
+ u8 sels[0];
+ /* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+ struct nand_hw_control controller;
+ struct mtk_ecc_config ecc_cfg;
+ struct mtk_nfc_clk clk;
+ struct mtk_ecc *ecc;
+
+ struct device *dev;
+ void __iomem *regs;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *buffer;
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u8 *poi;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+
+ if (i < mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+ else if (i == mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi;
+ else
+ poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+ return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+ return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+ writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+ writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+ return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+ return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+ return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ /* reset all registers and force the NFI master to terminate */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+ /* wait for the master to finish the last transaction */
+ ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+ !(val & MASTER_STA_MASK), 50,
+ MTK_RESET_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+ NFI_MASTER_STA, val);
+
+ /* ensure any status register affected by the NFI master is reset */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+ nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, command, NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_CMD), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering command mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, addr, NFI_COLADDR);
+ nfi_writel(nfc, 0, NFI_ROWADDR);
+ nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_ADDR), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering address mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 fmt, spare;
+
+ if (!mtd->writesize)
+ return 0;
+
+ spare = mtk_nand->spare_per_sector;
+
+ switch (mtd->writesize) {
+ case 512:
+ fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+ case KB(2):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_512_2K;
+ break;
+ case KB(4):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_2K_4K;
+ break;
+ case KB(8):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_4K_8K;
+ break;
+ case KB(16):
+ fmt = PAGEFMT_8K_16K;
+ break;
+ default:
+ dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ /*
+ * the hardware will double the value for this eccsize, so we need to
+ * halve it
+ */
+ if (chip->ecc.size == 1024)
+ spare >>= 1;
+
+ switch (spare) {
+ case 16:
+ fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 26:
+ fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 27:
+ fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 28:
+ fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 32:
+ fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 36:
+ fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 40:
+ fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 44:
+ fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 48:
+ fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 49:
+ fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 50:
+ fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 51:
+ fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 52:
+ fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 62:
+ fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 63:
+ fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 64:
+ fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
+ break;
+ default:
+ dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
+ return -EINVAL;
+ }
+
+ fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+ fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ nfi_writew(nfc, fmt, NFI_PAGEFMT);
+
+ nfc->ecc_cfg.strength = chip->ecc.strength;
+ nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+ return 0;
+}
+
+static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+ if (chip < 0)
+ return;
+
+ mtk_nfc_hw_runtime_config(mtd);
+
+ nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
+}
+
+static int mtk_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+ if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
+ return 0;
+
+ return 1;
+}
+
+static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+ if (ctrl & NAND_ALE) {
+ mtk_nfc_send_address(nfc, dat);
+ } else if (ctrl & NAND_CLE) {
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+ mtk_nfc_send_command(nfc, dat);
+ }
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+ int rc;
+ u8 val;
+
+ rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+ val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+ if (rc < 0)
+ dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ /*
+ * set to max sector to allow the HW to continue reading over
+ * unaligned accesses
+ */
+ reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ /* trigger to fetch data */
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+
+ return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = mtk_nfc_read_byte(mtd);
+}
+
+static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ u32 reg;
+
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+ nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ mtk_nfc_write_byte(mtd, buf[i]);
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+ nfc->ecc_cfg.mode = ECC_DMA_MODE;
+ nfc->ecc_cfg.op = ECC_ENCODE;
+
+ return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+ /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+ u32 bad_pos = nand->bad_mark.pos;
+
+ if (raw)
+ bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+ else
+ bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+ swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 start, end;
+ int i, ret;
+
+ start = offset / chip->ecc.size;
+ end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (start > i || i >= end)
+ continue;
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+ /* program the CRC back to the OOB */
+ ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 i;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (buf)
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+ }
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+ u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < sectors; i++) {
+ oobptr = oob_ptr(chip, start + i);
+ vall = nfi_readl(nfc, NFI_FDML(i));
+ valm = nfi_readl(nfc, NFI_FDMM(i));
+
+ for (j = 0; j < fdm->reg_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+ }
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ oobptr = oob_ptr(chip, i);
+ vall = 0;
+ valm = 0;
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+ nfi_writel(nfc, vall, NFI_FDML(i));
+ nfi_writel(nfc, valm, NFI_FDMM(i));
+ }
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int len)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct device *dev = nfc->dev;
+ dma_addr_t addr;
+ u32 reg;
+ int ret;
+
+ addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nfc->dev, addr);
+ if (ret) {
+ dev_err(nfc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+ init_completion(&nfc->done);
+
+ reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "program ahb done timeout\n");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ ret = -ETIMEDOUT;
+ goto timeout;
+ }
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+ (reg & CNTR_MASK) >= chip->ecc.steps,
+ 10, MTK_TIMEOUT);
+ if (ret)
+ dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ size_t len;
+ const u8 *bufpoi;
+ u32 reg;
+ int ret;
+
+ if (!raw) {
+ /* OOB => FDM: from register, ECC: from HW */
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+ nfc->ecc_cfg.op = ECC_ENCODE;
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (ret) {
+ /* clear NFI config */
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ return ret;
+ }
+
+ memcpy(nfc->buffer, buf, mtd->writesize);
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+ bufpoi = nfc->buffer;
+
+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
+ mtk_nfc_write_fdm(chip);
+ } else {
+ bufpoi = buf;
+ }
+
+ len = mtd->writesize + (raw ? mtd->oobsize : 0);
+ ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+ if (!raw)
+ mtk_ecc_disable(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ return mtk_nfc_write_page(mtd, chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_on, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(mtd, buf);
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
+ if (ret < 0)
+ return -EIO;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+
+ return ret & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ memset(buf, 0xff, sectors * chip->ecc.size);
+ for (i = 0; i < sectors; i++)
+ memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+ return 0;
+ }
+
+ mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+ mtd->ecc_stats.corrected += stats.corrected;
+ mtd->ecc_stats.failed += stats.failed;
+
+ return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ dma_addr_t addr;
+ int bitflips;
+ size_t len;
+ u8 *buf;
+ int rc;
+
+ start = data_offs / chip->ecc.size;
+ end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+ sectors = end - start;
+ column = start * (chip->ecc.size + spare);
+
+ len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+ buf = bufpoi + start * chip->ecc.size;
+
+ if (column != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+
+ addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(nfc->dev, addr);
+ if (rc) {
+ dev_err(nfc->dev, "dma mapping error\n");
+
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+ if (!raw) {
+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ nfc->ecc_cfg.sectors = sectors;
+ nfc->ecc_cfg.op = ECC_DECODE;
+ rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (rc) {
+ dev_err(nfc->dev, "ecc enable\n");
+ /* clear NFI_CNFG */
+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ return rc;
+ }
+ } else {
+ nfi_writew(nfc, reg, NFI_CNFG);
+ }
+
+ nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+ init_completion(&nfc->done);
+ reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!rc)
+ dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+ rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+ (reg & CNTR_MASK) >= sectors, 10,
+ MTK_TIMEOUT);
+ if (rc < 0) {
+ dev_err(nfc->dev, "subpage done timeout\n");
+ bitflips = -EIO;
+ } else {
+ bitflips = 0;
+ if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
+ }
+ }
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ if (raw)
+ goto done;
+
+ mtk_ecc_disable(nfc->ecc);
+
+ if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *p,
+ int oob_on, int pg)
+{
+ return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_on, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+ page, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ if (buf)
+ memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc.size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: minimum required time for CS post pulling down after accessing
+ * the device
+ * 27:22: minimum required time for CS pre pulling down before accessing
+ * the device
+ * 21:16: minimum required time from NCEB low to NREB low
+ * 15:12: minimum required time from NWEB high to NREB low.
+ * 11:08: write enable hold time
+ * 07:04: write wait states
+ * 03:00: read wait states
+ */
+ nfi_writel(nfc, 0x10804211, NFI_ACCCON);
+
+ /*
+ * CNRNB: nand ready/busy register
+ * -------------------------------
+ * 7:4: timeout register for polling the NAND busy/ready signal
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
+ */
+ nfi_writew(nfc, 0xf1, NFI_CNRNB);
+ nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_readl(nfc, NFI_INTR_STA);
+ nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+ struct mtk_nfc *nfc = id;
+ u16 sta, ien;
+
+ sta = nfi_readw(nfc, NFI_INTR_STA);
+ ien = nfi_readw(nfc, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk->nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk->pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(clk->nfi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
+{
+ clk_disable_unprepare(clk->nfi_clk);
+ clk_disable_unprepare(clk->pad_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 eccsteps;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+
+ if (section >= eccsteps)
+ return -ERANGE;
+
+ oob_region->length = fdm->reg_size - fdm->ecc_size;
+ oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+ return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 eccsteps;
+
+ if (section)
+ return -ERANGE;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+ oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+ oob_region->length = mtd->oobsize - oob_region->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+ .free = mtk_nfc_ooblayout_free,
+ .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ u32 ecc_bytes;
+
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+
+ fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+ if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+ fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+ /* bad block mark storage */
+ fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (mtd->writesize == 512) {
+ bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+ } else {
+ bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+ bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+ bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+ }
+}
+
+static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
+ 48, 49, 50, 51, 52, 62, 63, 64};
+ u32 eccsteps, i;
+
+ eccsteps = mtd->writesize / nand->ecc.size;
+ *sps = mtd->oobsize / eccsteps;
+
+ if (nand->ecc.size == 1024)
+ *sps >>= 1;
+
+ for (i = 0; i < ARRAY_SIZE(spare); i++) {
+ if (*sps <= spare[i]) {
+ if (!i)
+ *sps = spare[i];
+ else if (*sps != spare[i])
+ *sps = spare[i - 1];
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(spare))
+ *sps = spare[ARRAY_SIZE(spare) - 1];
+
+ if (nand->ecc.size == 1024)
+ *sps <<= 1;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ u32 spare;
+ int free;
+
+ /* support only ecc hw mode */
+ if (nand->ecc.mode != NAND_ECC_HW) {
+ dev_err(dev, "ecc.mode not supported\n");
+ return -EINVAL;
+ }
+
+ /* if optional dt settings not present */
+ if (!nand->ecc.size || !nand->ecc.strength) {
+ /* use datasheet requirements */
+ nand->ecc.strength = nand->ecc_strength_ds;
+ nand->ecc.size = nand->ecc_step_ds;
+
+ /*
+ * align eccstrength and eccsize
+ * this controller only supports 512 and 1024 sizes
+ */
+ if (nand->ecc.size < 1024) {
+ if (mtd->writesize > 512) {
+ nand->ecc.size = 1024;
+ nand->ecc.strength <<= 1;
+ } else {
+ nand->ecc.size = 512;
+ }
+ } else {
+ nand->ecc.size = 1024;
+ }
+
+ mtk_nfc_set_spare_per_sector(&spare, mtd);
+
+ /* calculate oob bytes except ecc parity data */
+ free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = spare - free;
+
+ /*
+ * enhance ecc strength if oob left is bigger than max FDM size
+ * or reduce ecc strength if oob size is not enough for ecc
+ * parity data.
+ */
+ if (free > NFI_FDM_MAX_SIZE) {
+ spare -= NFI_FDM_MAX_SIZE;
+ nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ } else if (free < 0) {
+ spare -= NFI_FDM_MIN_SIZE;
+ nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ }
+ }
+
+ mtk_ecc_adjust_strength(&nand->ecc.strength);
+
+ dev_info(dev, "eccsize %d eccstrength %d\n",
+ nand->ecc.size, nand->ecc.strength);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+ struct device_node *np)
+{
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int nsels, len;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+ chip->sels[i] = tmp;
+ }
+
+ nand = &chip->nand;
+ nand->controller = &nfc->controller;
+
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+ nand->dev_ready = mtk_nfc_dev_ready;
+ nand->select_chip = mtk_nfc_select_chip;
+ nand->write_byte = mtk_nfc_write_byte;
+ nand->write_buf = mtk_nfc_write_buf;
+ nand->read_byte = mtk_nfc_read_byte;
+ nand->read_buf = mtk_nfc_read_buf;
+ nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+
+ /* set default mode in case dt entry is missing */
+ nand->ecc.mode = NAND_ECC_HW;
+
+ nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+ nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+ nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+ nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+ nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+ nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+ nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+ nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+ mtd->name = MTK_NAME;
+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+ mtk_nfc_hw_init(nfc);
+
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return -ENODEV;
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return -EINVAL;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+ mtk_nfc_set_fdm(&chip->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return -ENODEV;
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ if (ret) {
+ dev_err(dev, "mtd parse partition error\n");
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ /* probe defer if not ready */
+ nfc->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+ else if (!nfc->ecc)
+ return -ENODEV;
+
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ dev_err(dev, "no nfi base\n");
+ goto release_ecc;
+ }
+
+ nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ if (IS_ERR(nfc->clk.nfi_clk)) {
+ dev_err(dev, "no clk\n");
+ ret = PTR_ERR(nfc->clk.nfi_clk);
+ goto release_ecc;
+ }
+
+ nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ if (IS_ERR(nfc->clk.pad_clk)) {
+ dev_err(dev, "no pad clk\n");
+ ret = PTR_ERR(nfc->clk.pad_clk);
+ goto release_ecc;
+ }
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ goto release_ecc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no nfi irq resource\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request nfi irq\n");
+ goto clk_disable;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set dma mask\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = mtk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto clk_disable;
+ }
+
+ return 0;
+
+clk_disable:
+ mtk_nfc_disable_clk(&nfc->clk);
+
+release_ecc:
+ mtk_ecc_release(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_remove(struct platform_device *pdev)
+{
+ struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+ struct mtk_nfc_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
+ node);
+ nand_release(nand_to_mtd(&chip->nand));
+ list_del(&chip->node);
+ }
+
+ mtk_ecc_release(nfc->ecc);
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret;
+ u32 i;
+
+ udelay(200);
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ return ret;
+
+ mtk_nfc_hw_init(nfc);
+
+ /* reset NAND chip if VCC was powered off */
+ list_for_each_entry(chip, &nfc->chips, node) {
+ nand = &chip->nand;
+ mtd = nand_to_mtd(nand);
+ for (i = 0; i < chip->nsels; i++) {
+ nand->select_chip(mtd, i);
+ nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+ { .compatible = "mediatek,mt2701-nfc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static struct platform_driver mtk_nfc_driver = {
+ .probe = mtk_nfc_probe,
+ .remove = mtk_nfc_remove,
+ .driver = {
+ .name = MTK_NAME,
+ .of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_nfc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0b0dc29d2af7..77533f7f2429 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
int cached = writelen > bytes && page != blockmask;
uint8_t *wbuf = buf;
int use_bufpoi;
- int part_pagewr = (column || writelen < (mtd->writesize - 1));
+ int part_pagewr = (column || writelen < mtd->writesize);
if (part_pagewr)
use_bufpoi = 1;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index ccc05f5b2695..2af9869a115e 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_ESMT, "ESMT"},
{NAND_MFR_SAMSUNG, "Samsung"},
{NAND_MFR_FUJITSU, "Fujitsu"},
{NAND_MFR_NATIONAL, "National"},
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index a136da8df6fe..a59361c36f40 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -118,8 +118,6 @@
#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
#define STATUS_BUFF_EMPTY 0x00000001
-#define OMAP24XX_DMA_GPMC 4
-
#define SECTOR_BYTES 512
/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
#define BCH4_BIT_PAD 4
@@ -1811,7 +1809,6 @@ static int omap_nand_probe(struct platform_device *pdev)
struct nand_chip *nand_chip;
int err;
dma_cap_mask_t mask;
- unsigned sig;
struct resource *res;
struct device *dev = &pdev->dev;
int min_oobbytes = BADBLOCK_MARKER_LENGTH;
@@ -1924,11 +1921,11 @@ static int omap_nand_probe(struct platform_device *pdev)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- sig = OMAP24XX_DMA_GPMC;
- info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
- if (!info->dma) {
+ info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+
+ if (IS_ERR(info->dma)) {
dev_err(&pdev->dev, "DMA engine request failed\n");
- err = -ENXIO;
+ err = PTR_ERR(info->dma);
goto return_error;
} else {
struct dma_slave_config cfg;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index a83a690688b4..e414b31b71c1 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -39,6 +39,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/reset.h>
#define NFC_REG_CTL 0x0000
#define NFC_REG_ST 0x0004
@@ -153,6 +154,7 @@
/* define bit use in NFC_ECC_ST */
#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_ERR_MSK GENMASK(15, 0)
#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
@@ -269,10 +271,12 @@ struct sunxi_nfc {
void __iomem *regs;
struct clk *ahb_clk;
struct clk *mod_clk;
+ struct reset_control *reset;
unsigned long assigned_cs;
unsigned long clk_rate;
struct list_head chips;
struct completion complete;
+ struct dma_chan *dmac;
};
static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
@@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
return ret;
}
+static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+ int chunksize, int nchunks,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct dma_async_tx_descriptor *dmad;
+ enum dma_transfer_direction tdir;
+ dma_cookie_t dmat;
+ int ret;
+
+ if (ddir == DMA_FROM_DEVICE)
+ tdir = DMA_DEV_TO_MEM;
+ else
+ tdir = DMA_MEM_TO_DEV;
+
+ sg_init_one(sg, buf, nchunks * chunksize);
+ ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+ if (!ret)
+ return -ENOMEM;
+
+ dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+ if (!dmad) {
+ ret = -EINVAL;
+ goto err_unmap_buf;
+ }
+
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+ writel(chunksize, nfc->regs + NFC_REG_CNT);
+ dmat = dmaengine_submit(dmad);
+
+ ret = dma_submit_error(dmat);
+ if (ret)
+ goto err_clr_dma_flag;
+
+ return 0;
+
+err_clr_dma_flag:
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+}
+
static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
@@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
}
static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
- int step, bool *erased)
+ int step, u32 status, bool *erased)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
- u32 status, tmp;
+ u32 tmp;
*erased = false;
- status = readl(nfc->regs + NFC_REG_ECC_ST);
-
if (status & NFC_ECC_ERR(step))
return -EBADMSG;
@@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
*cur_off = oob_off + ecc->bytes + 4;
ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+ readl(nfc->regs + NFC_REG_ECC_ST),
&erased);
if (erased)
return 1;
@@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
*cur_off = mtd->oobsize + mtd->writesize;
}
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+ int oob_required, int page,
+ int nchunks)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, raw_mode = 0;
+ struct scatterlist sg;
+ u32 status;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+ DMA_FROM_DEVICE, &sg);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_randomizer_config(mtd, page, false);
+ sunxi_nfc_randomizer_enable(mtd);
+
+ writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+ NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+ bool erased;
+
+ ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+ oob_required ? oob : NULL,
+ i, status, &erased);
+
+ /* ECC errors are handled in the second loop. */
+ if (ret < 0)
+ continue;
+
+ if (oob_required && !erased) {
+ /* TODO: use DMA to retrieve OOB */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+ !i, page);
+ }
+
+ if (erased)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ }
+
+ if (status & NFC_ECC_ERR_MSK) {
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ if (!(status & NFC_ECC_ERR(i)))
+ continue;
+
+ /*
+ * Re-read the data with the randomizer disabled to
+ * identify bitflips in erased pages.
+ */
+ if (randomized) {
+ /* TODO: use DMA to read page in raw mode */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ data_off, -1);
+ nand->read_buf(mtd, data, ecc->size);
+ }
+
+ /* TODO: use DMA to retrieve OOB */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0,
+ ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ }
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+ NULL, !raw_mode,
+ page);
+
+ return max_bitflips;
+}
+
static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
@@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
return max_bitflips;
}
+static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
+ chip->ecc.steps);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+ return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+}
+
static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
struct nand_chip *chip,
u32 data_offs, u32 readlen,
@@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
return max_bitflips;
}
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *buf, int page)
+{
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+ int ret;
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+ return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+ buf, page);
+}
+
static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required,
@@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
return 0;
}
+static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 data_len,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct scatterlist sg;
+ int ret, i;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+ DMA_TO_DEVICE, &sg);
+ if (ret)
+ goto pio_fallback;
+
+ for (i = 0; i < ecc->steps; i++) {
+ const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+ }
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_randomizer_config(mtd, page, false);
+ sunxi_nfc_randomizer_enable(mtd);
+
+ writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+ nfc->regs + NFC_REG_RCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+ NFC_DATA_TRANS | NFC_ACCESS_DIR,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ /* TODO: use DMA to transfer extra OOB bytes ? */
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ NULL, page);
+
+ return 0;
+
+pio_fallback:
+ return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+}
+
static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
struct nand_chip *chip,
uint8_t *buf, int oob_required,
@@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
int ret;
int i;
+ if (ecc->size != 512 && ecc->size != 1024)
+ return -EINVAL;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ /* Prefer 1k ECC chunk over 512 ones */
+ if (ecc->size == 512 && mtd->writesize > 512) {
+ ecc->size = 1024;
+ ecc->strength *= 2;
+ }
+
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
if (ecc->strength <= strengths[i])
@@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
if (ret)
return ret;
- ecc->read_page = sunxi_nfc_hw_ecc_read_page;
- ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ if (nfc->dmac) {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+ nand->options |= NAND_USE_BOUNCE_BUFFER;
+ } else {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ }
+
+ /* TODO: support DMA for raw accesses and subpage write */
+ ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
ecc->read_oob_raw = nand_read_oob_std;
ecc->write_oob_raw = nand_write_oob_std;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_clk_unprepare;
+ nfc->reset = devm_reset_control_get_optional(dev, "ahb");
+ if (!IS_ERR(nfc->reset)) {
+ ret = reset_control_deassert(nfc->reset);
+ if (ret) {
+ dev_err(dev, "reset err %d\n", ret);
+ goto out_mod_clk_unprepare;
+ }
+ } else if (PTR_ERR(nfc->reset) != -ENOENT) {
+ ret = PTR_ERR(nfc->reset);
+ goto out_mod_clk_unprepare;
+ }
+
ret = sunxi_nfc_rst(nfc);
if (ret)
- goto out_mod_clk_unprepare;
+ goto out_ahb_reset_reassert;
writel(0, nfc->regs + NFC_REG_INT);
ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
0, "sunxi-nand", nfc);
if (ret)
- goto out_mod_clk_unprepare;
+ goto out_ahb_reset_reassert;
+
+ nfc->dmac = dma_request_slave_channel(dev, "rxtx");
+ if (nfc->dmac) {
+ struct dma_slave_config dmac_cfg = { };
+
+ dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+ dmac_cfg.dst_addr = dmac_cfg.src_addr;
+ dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+ dmac_cfg.src_maxburst = 4;
+ dmac_cfg.dst_maxburst = 4;
+ dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+ } else {
+ dev_warn(dev, "failed to request rxtx DMA channel\n");
+ }
platform_set_drvdata(pdev, nfc);
ret = sunxi_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
- goto out_mod_clk_unprepare;
+ goto out_release_dmac;
}
return 0;
+out_release_dmac:
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+ if (!IS_ERR(nfc->reset))
+ reset_control_assert(nfc->reset);
out_mod_clk_unprepare:
clk_disable_unprepare(nfc->mod_clk);
out_ahb_clk_unprepare:
@@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
sunxi_nand_chips_cleanup(nfc);
+
+ if (!IS_ERR(nfc->reset))
+ reset_control_assert(nfc->reset);
+
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
clk_disable_unprepare(nfc->mod_clk);
clk_disable_unprepare(nfc->ahb_clk);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 0cf0ac07a8c2..1f2948c0c458 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -4,6 +4,7 @@
* by the Free Software Foundation.
*
* Copyright © 2012 John Crispin <blogic@openwrt.org>
+ * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
*/
#include <linux/mtd/nand.h>
@@ -16,20 +17,28 @@
#define EBU_ADDSEL1 0x24
#define EBU_NAND_CON 0xB0
#define EBU_NAND_WAIT 0xB4
+#define NAND_WAIT_RD BIT(0) /* NAND flash status output */
+#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */
#define EBU_NAND_ECC0 0xB8
#define EBU_NAND_ECC_AC 0xBC
-/* nand commands */
-#define NAND_CMD_ALE (1 << 2)
-#define NAND_CMD_CLE (1 << 3)
-#define NAND_CMD_CS (1 << 4)
-#define NAND_WRITE_CMD_RESET 0xff
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE BIT(2) /* address latch enable */
+#define NAND_CMD_CLE BIT(3) /* command latch enable */
+#define NAND_CMD_CS BIT(4) /* chip select */
+#define NAND_CMD_SE BIT(5) /* spare area access latch */
+#define NAND_CMD_WP BIT(6) /* write protect */
#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
#define NAND_WRITE_DATA (NAND_CMD_CS)
#define NAND_READ_DATA (NAND_CMD_CS)
-#define NAND_WAIT_WR_C (1 << 3)
-#define NAND_WAIT_RD (0x1)
/* we need to tel the ebu which addr we mapped the nand to */
#define ADDSEL1_MASK(x) (x << 4)
@@ -54,31 +63,41 @@
#define NAND_CON_CSMUX (1 << 1)
#define NAND_CON_NANDM 1
-static void xway_reset_chip(struct nand_chip *chip)
+struct xway_nand_data {
+ struct nand_chip chip;
+ unsigned long csflags;
+ void __iomem *nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
{
- unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
- unsigned long flags;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
- nandaddr &= ~NAND_WRITE_ADDR;
- nandaddr |= NAND_WRITE_CMD;
+ return readb(data->nandaddr + op);
+}
- /* finish with a reset */
- spin_lock_irqsave(&ebu_lock, flags);
- writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
- while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
- ;
- spin_unlock_irqrestore(&ebu_lock, flags);
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ writeb(value, data->nandaddr + op);
}
-static void xway_select_chip(struct mtd_info *mtd, int chip)
+static void xway_select_chip(struct mtd_info *mtd, int select)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
- switch (chip) {
+ switch (select) {
case -1:
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ spin_unlock_irqrestore(&ebu_lock, data->csflags);
break;
case 0:
+ spin_lock_irqsave(&ebu_lock, data->csflags);
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
break;
@@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip)
static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
- unsigned long flags;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
- if (ctrl & NAND_CLE)
- nandaddr |= NAND_WRITE_CMD;
- else
- nandaddr |= NAND_WRITE_ADDR;
- this->IO_ADDR_W = (void __iomem *) nandaddr;
- }
+ if (cmd == NAND_CMD_NONE)
+ return;
- if (cmd != NAND_CMD_NONE) {
- spin_lock_irqsave(&ebu_lock, flags);
- writeb(cmd, this->IO_ADDR_W);
- while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
- ;
- spin_unlock_irqrestore(&ebu_lock, flags);
- }
+ if (ctrl & NAND_CLE)
+ xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+ else if (ctrl & NAND_ALE)
+ xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
}
static int xway_dev_ready(struct mtd_info *mtd)
@@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd)
static unsigned char xway_read_byte(struct mtd_info *mtd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
- unsigned long flags;
- int ret;
+ return xway_readb(mtd, NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
- spin_lock_irqsave(&ebu_lock, flags);
- ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
- spin_unlock_irqrestore(&ebu_lock, flags);
+ for (i = 0; i < len; i++)
+ buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
+}
- return ret;
+static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
}
+/*
+ * Probe for the NAND device.
+ */
static int xway_nand_probe(struct platform_device *pdev)
{
- struct nand_chip *this = platform_get_drvdata(pdev);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
- const __be32 *cs = of_get_property(pdev->dev.of_node,
- "lantiq,cs", NULL);
+ struct xway_nand_data *data;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int err;
+ u32 cs;
u32 cs_flag = 0;
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->nandaddr))
+ return PTR_ERR(data->nandaddr);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.cmd_ctrl = xway_cmd_ctrl;
+ data->chip.dev_ready = xway_dev_ready;
+ data->chip.select_chip = xway_select_chip;
+ data->chip.write_buf = xway_write_buf;
+ data->chip.read_buf = xway_read_buf;
+ data->chip.read_byte = xway_read_byte;
+ data->chip.chip_delay = 30;
+
+ data->chip.ecc.mode = NAND_ECC_SOFT;
+ data->chip.ecc.algo = NAND_ECC_HAMMING;
+
+ platform_set_drvdata(pdev, data);
+ nand_set_controller_data(&data->chip, data);
+
/* load our CS from the DT. Either we find a valid 1 or default to 0 */
- if (cs && (*cs == 1))
+ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+ if (!err && cs == 1)
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
/* setup the EBU to run in NAND mode on our base addr */
- ltq_ebu_w32(CPHYSADDR(nandaddr)
- | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+ ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
- | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
- | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
- | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
- | cs_flag, EBU_NAND_CON);
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
- /* finish with a reset */
- xway_reset_chip(this);
+ /* Scan to find existence of the device */
+ err = nand_scan(mtd, 1);
+ if (err)
+ return err;
- return 0;
-}
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ nand_release(mtd);
-static struct platform_nand_data xway_nand_data = {
- .chip = {
- .nr_chips = 1,
- .chip_delay = 30,
- },
- .ctrl = {
- .probe = xway_nand_probe,
- .cmd_ctrl = xway_cmd_ctrl,
- .dev_ready = xway_dev_ready,
- .select_chip = xway_select_chip,
- .read_byte = xway_read_byte,
- }
-};
+ return err;
+}
/*
- * Try to find the node inside the DT. If it is available attach out
- * platform_nand_data
+ * Remove a NAND device.
*/
-static int __init xway_register_nand(void)
+static int xway_nand_remove(struct platform_device *pdev)
{
- struct device_node *node;
- struct platform_device *pdev;
-
- node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
- if (!node)
- return -ENOENT;
- pdev = of_find_device_by_node(node);
- if (!pdev)
- return -EINVAL;
- pdev->dev.platform_data = &xway_nand_data;
- of_node_put(node);
+ struct xway_nand_data *data = platform_get_drvdata(pdev);
+
+ nand_release(nand_to_mtd(&data->chip));
+
return 0;
}
-subsys_initcall(xway_register_nand);
+static const struct of_device_id xway_nand_match[] = {
+ { .compatible = "lantiq,nand-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_nand_match);
+
+static struct platform_driver xway_nand_driver = {
+ .probe = xway_nand_probe,
+ .remove = xway_nand_remove,
+ .driver = {
+ .name = "lantiq,nand-xway",
+ .of_match_table = xway_nand_match,
+ },
+};
+
+module_platform_driver(xway_nand_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a4b029a417f0..1a6d0e367b89 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3188,13 +3188,13 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t tmp_retlen;
ret = action(mtd, from, len, &tmp_retlen, buf);
+ if (ret)
+ break;
buf += tmp_retlen;
len -= tmp_retlen;
*retlen += tmp_retlen;
- if (ret)
- break;
}
otp_pages--;
}
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index d42c98e1f581..4a682ee0f632 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -29,6 +29,26 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && ARM
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
@@ -38,6 +58,13 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI. It only supports
SPI NOR.
+config SPI_HISI_SFC
+ tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ This enables support for hisilicon SPI-NOR flash controller.
+
config SPI_NXP_SPIFI
tristate "NXP SPI Flash Interface (SPIFI)"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 0bf3a7f81675..121695e83542 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
new file mode 100644
index 000000000000..47937d9beec6
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -0,0 +1,732 @@
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+/* QSPI register offsets */
+#define QSPI_CR 0x0000 /* Control Register */
+#define QSPI_MR 0x0004 /* Mode Register */
+#define QSPI_RD 0x0008 /* Receive Data Register */
+#define QSPI_TD 0x000c /* Transmit Data Register */
+#define QSPI_SR 0x0010 /* Status Register */
+#define QSPI_IER 0x0014 /* Interrupt Enable Register */
+#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
+#define QSPI_IMR 0x001c /* Interrupt Mask Register */
+#define QSPI_SCR 0x0020 /* Serial Clock Register */
+
+#define QSPI_IAR 0x0030 /* Instruction Address Register */
+#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+
+#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
+#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+
+#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
+#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC /* Version Register */
+
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN BIT(0)
+#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_LASTXFER BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SSM BIT(0)
+#define QSPI_MR_LLB BIT(1)
+#define QSPI_MR_WDRBT BIT(2)
+#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
+#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
+#define QSPI_SR_RDRF BIT(0)
+#define QSPI_SR_TDRE BIT(1)
+#define QSPI_SR_TXEMPTY BIT(2)
+#define QSPI_SR_OVRES BIT(3)
+#define QSPI_SR_CSR BIT(8)
+#define QSPI_SR_CSS BIT(9)
+#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_QSPIENS BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL BIT(0)
+#define QSPI_SCR_CPHA BIT(1)
+#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Instruction Code Register) */
+#define QSPI_ICR_INST_MASK GENMASK(7, 0)
+#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_INSTEN BIT(4)
+#define QSPI_IFR_ADDREN BIT(5)
+#define QSPI_IFR_OPTEN BIT(6)
+#define QSPI_IFR_DATAEN BIT(7)
+#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT (0 << 8)
+#define QSPI_IFR_OPTL_2BIT (1 << 8)
+#define QSPI_IFR_OPTL_4BIT (2 << 8)
+#define QSPI_IFR_OPTL_8BIT (3 << 8)
+#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN BIT(0)
+#define QSPI_SMR_RVDIS BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+
+struct atmel_qspi {
+ void __iomem *regs;
+ void __iomem *mem;
+ struct clk *clk;
+ struct platform_device *pdev;
+ u32 pending;
+
+ struct spi_nor nor;
+ u32 clk_rate;
+ struct completion cmd_completion;
+};
+
+struct atmel_qspi_command {
+ union {
+ struct {
+ u32 instruction:1;
+ u32 address:3;
+ u32 mode:1;
+ u32 dummy:1;
+ u32 data:1;
+ u32 reserved:25;
+ } bits;
+ u32 word;
+ } enable;
+ u8 instruction;
+ u8 mode;
+ u8 num_mode_cycles;
+ u8 num_dummy_cycles;
+ u32 address;
+
+ size_t buf_len;
+ const void *tx_buf;
+ void *rx_buf;
+};
+
+/* Register access functions */
+static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
+{
+ return readl_relaxed(aq->regs + reg);
+}
+
+static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
+{
+ writel_relaxed(value, aq->regs + reg);
+}
+
+static int atmel_qspi_run_transfer(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd)
+{
+ void __iomem *ahb_mem;
+
+ /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */
+ ahb_mem = aq->mem;
+ if (cmd->enable.bits.address)
+ ahb_mem += cmd->address;
+ if (cmd->tx_buf)
+ _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len);
+ else
+ _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len);
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void atmel_qspi_debug_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr)
+{
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ size_t len = 0;
+ int i;
+
+ if (cmd->enable.bits.instruction)
+ cmd_buf[len++] = cmd->instruction;
+
+ for (i = cmd->enable.bits.address-1; i >= 0; --i)
+ cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff;
+
+ if (cmd->enable.bits.mode)
+ cmd_buf[len++] = cmd->mode;
+
+ if (cmd->enable.bits.dummy) {
+ int num = cmd->num_dummy_cycles;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ num >>= 3;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ num >>= 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ num >>= 1;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < num; ++i)
+ cmd_buf[len++] = 0;
+ }
+
+ /* Dump the SPI command */
+ print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE,
+ 32, 1, cmd_buf, len, false);
+
+#ifdef VERBOSE_DEBUG
+ /* If verbose debug is enabled, also dump the TX data */
+ if (cmd->enable.bits.data && cmd->tx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->tx_buf, cmd->buf_len, false);
+#endif
+}
+#else
+#define atmel_qspi_debug_command(aq, cmd, ifr)
+#endif
+
+static int atmel_qspi_run_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr_tfrtyp, u32 ifr_width)
+{
+ u32 iar, icr, ifr, sr;
+ int err = 0;
+
+ iar = 0;
+ icr = 0;
+ ifr = ifr_tfrtyp | ifr_width;
+
+ /* Compute instruction parameters */
+ if (cmd->enable.bits.instruction) {
+ icr |= QSPI_ICR_INST(cmd->instruction);
+ ifr |= QSPI_IFR_INSTEN;
+ }
+
+ /* Compute address parameters */
+ switch (cmd->enable.bits.address) {
+ case 4:
+ ifr |= QSPI_IFR_ADDRL;
+ /* fall through to the 24bit (3 byte) address case. */
+ case 3:
+ iar = (cmd->enable.bits.data) ? 0 : cmd->address;
+ ifr |= QSPI_IFR_ADDREN;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Compute option parameters */
+ if (cmd->enable.bits.mode && cmd->num_mode_cycles) {
+ u32 mode_cycle_bits, mode_bits;
+
+ icr |= QSPI_ICR_OPT(cmd->mode);
+ ifr |= QSPI_IFR_OPTEN;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ mode_cycle_bits = 1;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ mode_cycle_bits = 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ mode_cycle_bits = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mode_bits = cmd->num_mode_cycles * mode_cycle_bits;
+ switch (mode_bits) {
+ case 1:
+ ifr |= QSPI_IFR_OPTL_1BIT;
+ break;
+
+ case 2:
+ ifr |= QSPI_IFR_OPTL_2BIT;
+ break;
+
+ case 4:
+ ifr |= QSPI_IFR_OPTL_4BIT;
+ break;
+
+ case 8:
+ ifr |= QSPI_IFR_OPTL_8BIT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Set number of dummy cycles */
+ if (cmd->enable.bits.dummy)
+ ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles);
+
+ /* Set data enable */
+ if (cmd->enable.bits.data) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ /* Special case for Continuous Read Mode */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ ifr |= QSPI_IFR_CRM;
+ }
+
+ /* Clear pending interrupts */
+ (void)qspi_readl(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ atmel_qspi_debug_command(aq, cmd, ifr);
+ qspi_writel(aq, QSPI_IAR, iar);
+ qspi_writel(aq, QSPI_ICR, icr);
+ qspi_writel(aq, QSPI_IFR, ifr);
+
+ /* Skip to the final steps if there is no data */
+ if (!cmd->enable.bits.data)
+ goto no_data;
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)qspi_readl(aq, QSPI_IFR);
+
+ /* Stop here for continuous read */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ return 0;
+ /* Send/Receive data */
+ err = atmel_qspi_run_transfer(aq, cmd);
+
+ /* Release the chip-select */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+
+ if (err)
+ return err;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ /*
+ * If verbose debug is enabled, also dump the RX data in addition to
+ * the SPI command previously dumped by atmel_qspi_debug_command()
+ */
+ if (cmd->rx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->rx_buf, cmd->buf_len, false);
+#endif
+no_data:
+ /* Poll INSTRuction End status */
+ sr = qspi_readl(aq, QSPI_SR);
+ if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ return err;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+ qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+
+ return err;
+}
+
+static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = opcode;
+ cmd.rx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = (buf != NULL && len > 0);
+ cmd.instruction = opcode;
+ cmd.tx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *write_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ ssize_t ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->program_opcode;
+ cmd.address = (u32)to;
+ cmd.tx_buf = write_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.instruction = nor->erase_opcode;
+ cmd.address = (u32)offs;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ u8 num_mode_cycles, num_dummy_cycles;
+ u32 ifr_width;
+ ssize_t ret;
+
+ switch (nor->flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
+ break;
+
+ case SPI_NOR_DUAL:
+ ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT;
+ break;
+
+ case SPI_NOR_QUAD:
+ ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (nor->read_dummy >= 2) {
+ num_mode_cycles = 2;
+ num_dummy_cycles = nor->read_dummy - 2;
+ } else {
+ num_mode_cycles = nor->read_dummy;
+ num_dummy_cycles = 0;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.mode = (num_mode_cycles > 0);
+ cmd.enable.bits.dummy = (num_dummy_cycles > 0);
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->read_opcode;
+ cmd.address = (u32)from;
+ cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */
+ cmd.num_mode_cycles = num_mode_cycles;
+ cmd.num_dummy_cycles = num_dummy_cycles;
+ cmd.rx_buf = read_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
+ ifr_width);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_init(struct atmel_qspi *aq)
+{
+ unsigned long src_rate;
+ u32 mr, scr, scbr;
+
+ /* Reset the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+
+ /* Set the QSPI controller in Serial Memory Mode */
+ mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM;
+ qspi_writel(aq, QSPI_MR, mr);
+
+ src_rate = clk_get_rate(aq->clk);
+ if (!src_rate)
+ return -EINVAL;
+
+ /* Compute the QSPI baudrate */
+ scbr = DIV_ROUND_UP(src_rate, aq->clk_rate);
+ if (scbr > 0)
+ scbr--;
+ scr = QSPI_SCR_SCBR(scbr);
+ qspi_writel(aq, QSPI_SCR, scr);
+
+ /* Enable the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+
+ return 0;
+}
+
+static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+ u32 status, mask, pending;
+
+ status = qspi_readl(aq, QSPI_SR);
+ mask = qspi_readl(aq, QSPI_IMR);
+ pending = status & mask;
+
+ if (!pending)
+ return IRQ_NONE;
+
+ aq->pending |= pending;
+ if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ complete(&aq->cmd_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int atmel_qspi_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct atmel_qspi *aq;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int irq, err = 0;
+
+ if (of_get_child_count(np) != 1)
+ return -ENODEV;
+ child = of_get_next_child(np, NULL);
+
+ aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL);
+ if (!aq) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, aq);
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
+ /* Map the registers */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ aq->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->regs)) {
+ dev_err(&pdev->dev, "missing registers\n");
+ err = PTR_ERR(aq->regs);
+ goto exit;
+ }
+
+ /* Map the AHB memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
+ aq->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->mem)) {
+ dev_err(&pdev->dev, "missing AHB memory\n");
+ err = PTR_ERR(aq->mem);
+ goto exit;
+ }
+
+ /* Get the peripheral clock */
+ aq->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aq->clk)) {
+ dev_err(&pdev->dev, "missing peripheral clock\n");
+ err = PTR_ERR(aq->clk);
+ goto exit;
+ }
+
+ /* Enable the peripheral clock */
+ err = clk_prepare_enable(aq->clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
+ goto exit;
+ }
+
+ /* Request the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ err = irq;
+ goto disable_clk;
+ }
+ err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
+ 0, dev_name(&pdev->dev), aq);
+ if (err)
+ goto disable_clk;
+
+ /* Setup the spi-nor */
+ nor = &aq->nor;
+ mtd = &nor->mtd;
+
+ nor->dev = &pdev->dev;
+ spi_nor_set_flash_node(nor, child);
+ nor->priv = aq;
+ mtd->priv = nor;
+
+ nor->read_reg = atmel_qspi_read_reg;
+ nor->write_reg = atmel_qspi_write_reg;
+ nor->read = atmel_qspi_read;
+ nor->write = atmel_qspi_write;
+ nor->erase = atmel_qspi_erase;
+
+ err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate);
+ if (err < 0)
+ goto disable_clk;
+
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto disable_clk;
+
+ err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (err)
+ goto disable_clk;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto disable_clk;
+
+ of_node_put(child);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(aq->clk);
+exit:
+ of_node_put(child);
+
+ return err;
+}
+
+static int atmel_qspi_remove(struct platform_device *pdev)
+{
+ struct atmel_qspi *aq = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&aq->nor.mtd);
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
+ clk_disable_unprepare(aq->clk);
+ return 0;
+}
+
+
+static const struct of_device_id atmel_qspi_dt_ids[] = {
+ { .compatible = "atmel,sama5d2-qspi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
+
+static struct platform_driver atmel_qspi_driver = {
+ .driver = {
+ .name = "atmel_qspi",
+ .of_match_table = atmel_qspi_dt_ids,
+ },
+ .probe = atmel_qspi_probe,
+ .remove = atmel_qspi_remove,
+};
+module_platform_driver(atmel_qspi_driver);
+
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_DESCRIPTION("Atmel QSPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
new file mode 100644
index 000000000000..d403ba7b8f43
--- /dev/null
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -0,0 +1,1299 @@
+/*
+ * Driver for Cadence QSPI Controller
+ *
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/timer.h>
+
+#define CQSPI_NAME "cadence-qspi"
+#define CQSPI_MAX_CHIPSELECT 16
+
+struct cqspi_st;
+
+struct cqspi_flash_pdata {
+ struct spi_nor nor;
+ struct cqspi_st *cqspi;
+ u32 clk_rate;
+ u32 read_delay;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+ u8 inst_width;
+ u8 addr_width;
+ u8 data_width;
+ u8 cs;
+ bool registered;
+};
+
+struct cqspi_st {
+ struct platform_device *pdev;
+
+ struct clk *clk;
+ unsigned int sclk;
+
+ void __iomem *iobase;
+ void __iomem *ahb_base;
+ struct completion transfer_complete;
+ struct mutex bus_mutex;
+
+ int current_cs;
+ int current_page_size;
+ int current_erase_size;
+ int current_addr_width;
+ unsigned long master_ref_clk_hz;
+ bool is_decoded_cs;
+ u32 fifo_depth;
+ u32 fifo_width;
+ u32 trigger_address;
+ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+};
+
+/* Operation timeout value */
+#define CQSPI_TIMEOUT_MS 500
+#define CQSPI_READ_TIMEOUT_MS 10
+
+/* Instruction type */
+#define CQSPI_INST_TYPE_SINGLE 0
+#define CQSPI_INST_TYPE_DUAL 1
+#define CQSPI_INST_TYPE_QUAD 2
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
+#define CQSPI_DUMMY_CLKS_MAX 31
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
+/* Register map */
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
+#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
+#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_DMA 0x20
+#define CQSPI_REG_DMA_SINGLE_LSB 0
+#define CQSPI_REG_DMA_BURST_LSB 8
+#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
+#define CQSPI_REG_DMA_BURST_MASK 0xFF
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
+/* Interrupt status bits */
+#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
+#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
+#define CQSPI_REG_IRQ_IND_COMP BIT(2)
+#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
+#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
+#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
+#define CQSPI_REG_IRQ_WATERMARK BIT(6)
+#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
+
+#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_SRAM_FULL | \
+ CQSPI_REG_IRQ_IND_COMP)
+
+#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
+ CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_UNDERFLOW)
+
+#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+
+static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ u32 val;
+
+ while (1) {
+ val = readl(reg);
+ if (clear)
+ val = ~val;
+ val &= mask;
+
+ if (val == mask)
+ return 0;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+ }
+}
+
+static bool cqspi_is_idle(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+
+ return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
+}
+
+static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
+
+ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Clear interrupt */
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+ if (irq_status)
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ u32 rdreg = 0;
+
+ rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+ return rdreg;
+}
+
+static int cqspi_wait_idle(struct cqspi_st *cqspi)
+{
+ const unsigned int poll_idle_retry = 3;
+ unsigned int count = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ while (1) {
+ /*
+ * Read few times in succession to ensure the controller
+ * is indeed idle, that is, the bit does not transition
+ * low again.
+ */
+ if (cqspi_is_idle(cqspi))
+ count++;
+ else
+ count = 0;
+
+ if (count >= poll_idle_retry)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, in busy mode. */
+ dev_err(&cqspi->pdev->dev,
+ "QSPI is still busy after %dms timeout.\n",
+ CQSPI_TIMEOUT_MS);
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+}
+
+static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ int ret;
+
+ /* Write the CMDCTRL without start execution. */
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+ /* Start execute */
+ reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+ /* Polling for completion. */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
+ CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
+ if (ret) {
+ dev_err(&cqspi->pdev->dev,
+ "Flash command execution timed out.\n");
+ return ret;
+ }
+
+ /* Polling QSPI idle status. */
+ return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_command_read(struct spi_nor *nor,
+ const u8 *txbuf, const unsigned n_tx,
+ u8 *rxbuf, const unsigned n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int rdreg;
+ unsigned int reg;
+ unsigned int read_len;
+ int status;
+
+ if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+ dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ n_rx, rxbuf);
+ return -EINVAL;
+ }
+
+ reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+ /* 0 means 1 byte. */
+ reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ status = cqspi_exec_flash_cmd(cqspi, reg);
+ if (status)
+ return status;
+
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ read_len = (n_rx > 4) ? 4 : n_rx;
+ memcpy(rxbuf, &reg, read_len);
+ rxbuf += read_len;
+
+ if (n_rx > 4) {
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+ read_len = n_rx - read_len;
+ memcpy(rxbuf, &reg, read_len);
+ }
+
+ return 0;
+}
+
+static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
+ const u8 *txbuf, const unsigned n_tx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ unsigned int data;
+ int ret;
+
+ if (n_tx > 4 || (n_tx && !txbuf)) {
+ dev_err(nor->dev,
+ "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ n_tx, txbuf);
+ return -EINVAL;
+ }
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ if (n_tx) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ data = 0;
+ memcpy(&data, txbuf, n_tx);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+ }
+
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+ return ret;
+}
+
+static int cqspi_command_write_addr(struct spi_nor *nor,
+ const u8 opcode, const unsigned int addr)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
+
+ return cqspi_exec_flash_cmd(cqspi, reg);
+}
+
+static int cqspi_indirect_read_setup(struct spi_nor *nor,
+ const unsigned int from_addr)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int dummy_clk = 0;
+ unsigned int reg;
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+
+ reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+
+ /* Setup dummy clock cycles */
+ dummy_clk = nor->read_dummy;
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ dummy_clk = CQSPI_DUMMY_CLKS_MAX;
+
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ /* Set mode bits high to ensure chip doesn't enter XIP */
+ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Need to subtract the mode byte (8 clocks). */
+ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+ dummy_clk -= 8;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ /* Set address width */
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_read_execute(struct spi_nor *nor,
+ u8 *rxbuf, const unsigned n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ void __iomem *ahb_base = cqspi->ahb_base;
+ unsigned int remaining = n_rx;
+ unsigned int bytes_to_read = 0;
+ int ret = 0;
+
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ while (remaining > 0) {
+ ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies
+ (CQSPI_READ_TIMEOUT_MS));
+
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+
+ if (!ret && bytes_to_read == 0) {
+ dev_err(nor->dev, "Indirect read timeout, no bytes\n");
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ while (bytes_to_read != 0) {
+ bytes_to_read *= cqspi->fifo_width;
+ bytes_to_read = bytes_to_read > remaining ?
+ remaining : bytes_to_read;
+ readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
+ rxbuf += bytes_to_read;
+ remaining -= bytes_to_read;
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+ }
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
+
+ return 0;
+
+failrd:
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+ return ret;
+}
+
+static int cqspi_indirect_write_setup(struct spi_nor *nor,
+ const unsigned int to_addr)
+{
+ unsigned int reg;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+
+ /* Set opcode. */
+ reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
+
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_write_execute(struct spi_nor *nor,
+ const u8 *txbuf, const unsigned n_tx)
+{
+ const unsigned int page_size = nor->page_size;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int remaining = n_tx;
+ unsigned int write_bytes;
+ int ret;
+
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+
+ while (remaining > 0) {
+ write_bytes = remaining > page_size ? page_size : remaining;
+ writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
+
+ ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies
+ (CQSPI_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(nor->dev, "Indirect write timeout\n");
+ ret = -ETIMEDOUT;
+ goto failwr;
+ }
+
+ txbuf += write_bytes;
+ remaining -= write_bytes;
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
+ CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect write completion error (%i)\n", ret);
+ goto failwr;
+ }
+
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
+
+ cqspi_wait_idle(cqspi);
+
+ return 0;
+
+failwr:
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect write */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ return ret;
+}
+
+static void cqspi_chipselect(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int chip_select = f_pdata->cs;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ if (cqspi->is_decoded_cs) {
+ reg |= CQSPI_REG_CONFIG_DECODE_MASK;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
+
+ /* Convert CS if without decoder.
+ * CS0 to 4b'1110
+ * CS1 to 4b'1101
+ * CS2 to 4b'1011
+ * CS3 to 4b'0111
+ */
+ chip_select = 0xF & ~(1 << chip_select);
+ }
+
+ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ unsigned int reg;
+
+ /* configure page size and block size. */
+ reg = readl(iobase + CQSPI_REG_SIZE);
+ reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
+ reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+ reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg |= (nor->addr_width - 1);
+ writel(reg, iobase + CQSPI_REG_SIZE);
+
+ /* configure the chip select */
+ cqspi_chipselect(nor);
+
+ /* Store the new configuration of the controller */
+ cqspi->current_page_size = nor->page_size;
+ cqspi->current_erase_size = nor->mtd.erasesize;
+ cqspi->current_addr_width = nor->addr_width;
+}
+
+static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
+ const unsigned int ns_val)
+{
+ unsigned int ticks;
+
+ ticks = ref_clk_hz / 1000; /* kHz */
+ ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
+
+ return ticks;
+}
+
+static void cqspi_delay(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ unsigned int tshsl, tchsh, tslch, tsd2d;
+ unsigned int reg;
+ unsigned int tsclk;
+
+ /* calculate the number of ref ticks for one sclk tick */
+ tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
+
+ tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
+ /* this particular value must be at least one sclk */
+ if (tshsl < tsclk)
+ tshsl = tsclk;
+
+ tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
+ tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
+ tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
+
+ reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+ << CQSPI_REG_DELAY_TSHSL_LSB;
+ reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+ << CQSPI_REG_DELAY_TCHSH_LSB;
+ reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+ << CQSPI_REG_DELAY_TSLCH_LSB;
+ reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+ << CQSPI_REG_DELAY_TSD2D_LSB;
+ writel(reg, iobase + CQSPI_REG_DELAY);
+}
+
+static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
+{
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, div;
+
+ /* Recalculate the baudrate divisor based on QSPI specification. */
+ div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+ reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_readdata_capture(struct cqspi_st *cqspi,
+ const unsigned int bypass,
+ const unsigned int delay)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_READCAPTURE);
+
+ if (bypass)
+ reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ else
+ reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+
+ reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
+ << CQSPI_REG_READCAPTURE_DELAY_LSB);
+
+ reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
+ << CQSPI_REG_READCAPTURE_DELAY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_READCAPTURE);
+}
+
+static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ if (enable)
+ reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
+ else
+ reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ const unsigned int sclk = f_pdata->clk_rate;
+ int switch_cs = (cqspi->current_cs != f_pdata->cs);
+ int switch_ck = (cqspi->sclk != sclk);
+
+ if ((cqspi->current_page_size != nor->page_size) ||
+ (cqspi->current_erase_size != nor->mtd.erasesize) ||
+ (cqspi->current_addr_width != nor->addr_width))
+ switch_cs = 1;
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Switch chip select. */
+ if (switch_cs) {
+ cqspi->current_cs = f_pdata->cs;
+ cqspi_configure_cs_and_sizes(nor);
+ }
+
+ /* Setup baudrate divisor and delays */
+ if (switch_ck) {
+ cqspi->sclk = sclk;
+ cqspi_config_baudrate_div(cqspi);
+ cqspi_delay(nor);
+ cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ }
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_set_protocol(struct spi_nor *nor, const int read)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+
+ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+
+ if (read) {
+ switch (nor->flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case SPI_NOR_DUAL:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case SPI_NOR_QUAD:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ cqspi_configure(nor);
+
+ return 0;
+}
+
+static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_write_setup(nor, to);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (ret)
+ return ret;
+
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 1);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_read_setup(nor, from);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (ret)
+ return ret;
+
+ return (ret < 0) ? ret : len;
+}
+
+static int cqspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Set up command buffer. */
+ ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_lock(&cqspi->bus_mutex);
+
+ return 0;
+}
+
+static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_unlock(&cqspi->bus_mutex);
+}
+
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+
+ return ret;
+}
+
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_write(nor, opcode, buf, len);
+
+ return ret;
+}
+
+static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
+ struct cqspi_flash_pdata *f_pdata,
+ struct device_node *np)
+{
+ if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
+ dev_err(&pdev->dev, "couldn't determine read-delay\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
+ dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cqspi_of_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+
+ cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
+
+ if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-width\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,trigger-address",
+ &cqspi->trigger_address)) {
+ dev_err(&pdev->dev, "couldn't determine trigger-address\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void cqspi_controller_init(struct cqspi_st *cqspi)
+{
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Configure the remap address register, no remap */
+ writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+
+ /* Disable all interrupts. */
+ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+
+ /* Configure the SRAM split to 1:1 . */
+ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
+
+ /* Load indirect trigger address. */
+ writel(cqspi->trigger_address,
+ cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Program read watermark -- 1/2 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
+ cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
+ /* Program write watermark -- 1/8 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
+ cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ struct cqspi_flash_pdata *f_pdata;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ unsigned int cs;
+ int i, ret;
+
+ /* Get flash device data */
+ for_each_available_child_of_node(dev->of_node, np) {
+ if (of_property_read_u32(np, "reg", &cs)) {
+ dev_err(dev, "Couldn't determine chip select.\n");
+ goto err;
+ }
+
+ if (cs > CQSPI_MAX_CHIPSELECT) {
+ dev_err(dev, "Chip select %d out of range.\n", cs);
+ goto err;
+ }
+
+ f_pdata = &cqspi->f_pdata[cs];
+ f_pdata->cqspi = cqspi;
+ f_pdata->cs = cs;
+
+ ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
+ if (ret)
+ goto err;
+
+ nor = &f_pdata->nor;
+ mtd = &nor->mtd;
+
+ mtd->priv = nor;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+ nor->priv = f_pdata;
+
+ nor->read_reg = cqspi_read_reg;
+ nor->write_reg = cqspi_write_reg;
+ nor->read = cqspi_read;
+ nor->write = cqspi_write;
+ nor->erase = cqspi_erase;
+ nor->prepare = cqspi_prep;
+ nor->unprepare = cqspi_unprep;
+
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
+ dev_name(dev), cs);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err;
+
+ f_pdata->registered = true;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+ return ret;
+}
+
+static int cqspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct cqspi_st *cqspi;
+ struct resource *res;
+ struct resource *res_ahb;
+ int ret;
+ int irq;
+
+ cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
+ if (!cqspi)
+ return -ENOMEM;
+
+ mutex_init(&cqspi->bus_mutex);
+ cqspi->pdev = pdev;
+ platform_set_drvdata(pdev, cqspi);
+
+ /* Obtain configuration from OF. */
+ ret = cqspi_of_get_pdata(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot get mandatory OF data.\n");
+ return -ENODEV;
+ }
+
+ /* Obtain QSPI clock. */
+ cqspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cqspi->clk)) {
+ dev_err(dev, "Cannot claim QSPI clock.\n");
+ return PTR_ERR(cqspi->clk);
+ }
+
+ /* Obtain and remap controller address. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cqspi->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cqspi->iobase)) {
+ dev_err(dev, "Cannot remap controller address.\n");
+ return PTR_ERR(cqspi->iobase);
+ }
+
+ /* Obtain and remap AHB address. */
+ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+ if (IS_ERR(cqspi->ahb_base)) {
+ dev_err(dev, "Cannot remap AHB address.\n");
+ return PTR_ERR(cqspi->ahb_base);
+ }
+
+ init_completion(&cqspi->transfer_complete);
+
+ /* Obtain IRQ line. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot obtain IRQ.\n");
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cqspi->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable QSPI clock.\n");
+ return ret;
+ }
+
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+
+ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+ pdev->name, cqspi);
+ if (ret) {
+ dev_err(dev, "Cannot request IRQ.\n");
+ goto probe_irq_failed;
+ }
+
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ ret = cqspi_setup_flash(cqspi, np);
+ if (ret) {
+ dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ return ret;
+probe_irq_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_setup_failed:
+ clk_disable_unprepare(cqspi->clk);
+ return ret;
+}
+
+static int cqspi_remove(struct platform_device *pdev)
+{
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+
+ cqspi_controller_enable(cqspi, 0);
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 0);
+ return 0;
+}
+
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops cqspi__dev_pm_ops = {
+ .suspend = cqspi_suspend,
+ .resume = cqspi_resume,
+};
+
+#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
+#else
+#define CQSPI_DEV_PM_OPS NULL
+#endif
+
+static struct of_device_id const cqspi_dt_ids[] = {
+ {.compatible = "cdns,qspi-nor",},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
+
+static struct platform_driver cqspi_platform_driver = {
+ .probe = cqspi_probe,
+ .remove = cqspi_remove,
+ .driver = {
+ .name = CQSPI_NAME,
+ .pm = CQSPI_DEV_PM_OPS,
+ .of_match_table = cqspi_dt_ids,
+ },
+};
+
+module_platform_driver(cqspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CQSPI_NAME);
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 9ab2b51d54b8..5c82e4ef1904 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -618,9 +618,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
-static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
u8 opcode, unsigned int to, u32 *txbuf,
- unsigned count, size_t *retlen)
+ unsigned count)
{
int ret, i, j;
u32 tmp;
@@ -647,8 +647,8 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
- if (ret == 0 && retlen)
- *retlen += count;
+ if (ret == 0)
+ return count;
return ret;
}
@@ -859,7 +859,9 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
} else if (len > 0) {
ret = fsl_qspi_nor_write(q, nor, opcode, 0,
- (u32 *)buf, len, NULL);
+ (u32 *)buf, len);
+ if (ret > 0)
+ return 0;
} else {
dev_err(q->dev, "invalid cmd %d\n", opcode);
ret = -EINVAL;
@@ -868,20 +870,20 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return ret;
}
-static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
- size_t len, size_t *retlen, const u_char *buf)
+static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
{
struct fsl_qspi *q = nor->priv;
-
- fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
- (u32 *)buf, len, retlen);
+ ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len);
/* invalid the data in the AHB buffer. */
fsl_qspi_invalid(q);
+ return ret;
}
-static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
+static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
{
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
@@ -923,8 +925,7 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
len);
- *retlen += len;
- return 0;
+ return len;
}
static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
new file mode 100644
index 000000000000..20378b0d55e9
--- /dev/null
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -0,0 +1,489 @@
+/*
+ * HiSilicon SPI Nor Flash Controller Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field definitions */
+#define FMC_CFG 0x00
+#define FMC_CFG_OP_MODE_MASK BIT_MASK(0)
+#define FMC_CFG_OP_MODE_BOOT 0
+#define FMC_CFG_OP_MODE_NORMAL 1
+#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1)
+#define FMC_CFG_FLASH_SEL_MASK 0x6
+#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5)
+#define FMC_ECC_TYPE_MASK GENMASK(7, 5)
+#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10)
+#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10)
+#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10)
+#define FMC_GLOBAL_CFG 0x04
+#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6)
+#define FMC_SPI_TIMING_CFG 0x08
+#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8)
+#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4)
+#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf)
+#define CS_HOLD_TIME 0x6
+#define CS_SETUP_TIME 0x6
+#define CS_DESELECT_TIME 0xf
+#define FMC_INT 0x18
+#define FMC_INT_OP_DONE BIT(0)
+#define FMC_INT_CLR 0x20
+#define FMC_CMD 0x24
+#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff)
+#define FMC_ADDRL 0x2c
+#define FMC_OP_CFG 0x30
+#define OP_CFG_FM_CS(cs) ((cs) << 11)
+#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7)
+#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4)
+#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf)
+#define FMC_DATA_NUM 0x38
+#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0))
+#define FMC_OP 0x3c
+#define FMC_OP_DUMMY_EN BIT(8)
+#define FMC_OP_CMD1_EN BIT(7)
+#define FMC_OP_ADDR_EN BIT(6)
+#define FMC_OP_WRITE_DATA_EN BIT(5)
+#define FMC_OP_READ_DATA_EN BIT(2)
+#define FMC_OP_READ_STATUS_EN BIT(1)
+#define FMC_OP_REG_OP_START BIT(0)
+#define FMC_DMA_LEN 0x40
+#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0))
+#define FMC_DMA_SADDR_D0 0x4c
+#define HIFMC_DMA_MAX_LEN (4096)
+#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1)
+#define FMC_OP_DMA 0x68
+#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16)
+#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8)
+#define OP_CTRL_RW_OP(op) ((op) << 1)
+#define OP_CTRL_DMA_OP_READY BIT(0)
+#define FMC_OP_READ 0x0
+#define FMC_OP_WRITE 0x1
+#define FMC_WAIT_TIMEOUT 1000000
+
+enum hifmc_iftype {
+ IF_TYPE_STD,
+ IF_TYPE_DUAL,
+ IF_TYPE_DIO,
+ IF_TYPE_QUAD,
+ IF_TYPE_QIO,
+};
+
+struct hifmc_priv {
+ u32 chipselect;
+ u32 clkrate;
+ struct hifmc_host *host;
+};
+
+#define HIFMC_MAX_CHIP_NUM 2
+struct hifmc_host {
+ struct device *dev;
+ struct mutex lock;
+
+ void __iomem *regbase;
+ void __iomem *iobase;
+ struct clk *clk;
+ void *buffer;
+ dma_addr_t dma_buffer;
+
+ struct spi_nor *nor[HIFMC_MAX_CHIP_NUM];
+ u32 num_chip;
+};
+
+static inline int wait_op_finish(struct hifmc_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + FMC_INT, reg,
+ (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
+}
+
+static int get_if_type(enum read_mode flash_read)
+{
+ enum hifmc_iftype if_type;
+
+ switch (flash_read) {
+ case SPI_NOR_DUAL:
+ if_type = IF_TYPE_DUAL;
+ break;
+ case SPI_NOR_QUAD:
+ if_type = IF_TYPE_QUAD;
+ break;
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ default:
+ if_type = IF_TYPE_STD;
+ break;
+ }
+
+ return if_type;
+}
+
+static void hisi_spi_nor_init(struct hifmc_host *host)
+{
+ u32 reg;
+
+ reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
+ | TIMING_CFG_TCSS(CS_SETUP_TIME)
+ | TIMING_CFG_TSHSL(CS_DESELECT_TIME);
+ writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
+}
+
+static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ mutex_lock(&host->lock);
+
+ ret = clk_set_rate(host->clk, priv->clkrate);
+ if (ret)
+ goto out;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mutex_unlock(&host->lock);
+ return ret;
+}
+
+static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ clk_disable_unprepare(host->clk);
+ mutex_unlock(&host->lock);
+}
+
+static int hisi_spi_nor_op_reg(struct spi_nor *nor,
+ u8 opcode, int len, u8 optype)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u32 reg;
+
+ reg = FMC_CMD_CMD1(opcode);
+ writel(reg, host->regbase + FMC_CMD);
+
+ reg = FMC_DATA_NUM_CNT(len);
+ writel(reg, host->regbase + FMC_DATA_NUM);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
+ writel(reg, host->regbase + FMC_OP);
+
+ return wait_op_finish(host);
+}
+
+static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, host->iobase, len);
+ return 0;
+}
+
+static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ if (len)
+ memcpy_toio(host->iobase, buf, len);
+
+ return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
+}
+
+static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
+ dma_addr_t dma_buf, size_t len, u8 op_type)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u8 if_type = 0;
+ u32 reg;
+
+ reg = readl(host->regbase + FMC_CFG);
+ reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
+ reg |= FMC_CFG_OP_MODE_NORMAL;
+ reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ : SPI_NOR_ADDR_MODE_3BYTES;
+ writel(reg, host->regbase + FMC_CFG);
+
+ writel(start_off, host->regbase + FMC_ADDRL);
+ writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
+ writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ if_type = get_if_type(nor->flash_read);
+ reg |= OP_CFG_MEM_IF_TYPE(if_type);
+ if (op_type == FMC_OP_READ)
+ reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
+ reg |= (op_type == FMC_OP_READ)
+ ? OP_CTRL_RD_OPCODE(nor->read_opcode)
+ : OP_CTRL_WR_OPCODE(nor->program_opcode);
+ writel(reg, host->regbase + FMC_OP_DMA);
+
+ return wait_op_finish(host);
+}
+
+static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ ret = hisi_spi_nor_dma_transfer(nor,
+ from + offset, host->dma_buffer, trans, FMC_OP_READ);
+ if (ret) {
+ dev_warn(nor->dev, "DMA read timeout\n");
+ return ret;
+ }
+ memcpy(read_buf + offset, host->buffer, trans);
+ }
+
+ return len;
+}
+
+static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ memcpy(host->buffer, write_buf + offset, trans);
+ ret = hisi_spi_nor_dma_transfer(nor,
+ to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
+ if (ret) {
+ dev_warn(nor->dev, "DMA write timeout\n");
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+/**
+ * Get spi flash device information and register it as a mtd device.
+ */
+static int hisi_spi_nor_register(struct device_node *np,
+ struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct spi_nor *nor;
+ struct hifmc_priv *priv;
+ struct mtd_info *mtd;
+ int ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &priv->chipselect);
+ if (ret) {
+ dev_err(dev, "There's no reg property for %s\n",
+ np->full_name);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &priv->clkrate);
+ if (ret) {
+ dev_err(dev, "There's no spi-max-frequency property for %s\n",
+ np->full_name);
+ return ret;
+ }
+ priv->host = host;
+ nor->priv = priv;
+
+ nor->prepare = hisi_spi_nor_prep;
+ nor->unprepare = hisi_spi_nor_unprep;
+ nor->read_reg = hisi_spi_nor_read_reg;
+ nor->write_reg = hisi_spi_nor_write_reg;
+ nor->read = hisi_spi_nor_read;
+ nor->write = hisi_spi_nor_write;
+ nor->erase = NULL;
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (ret)
+ return ret;
+
+ mtd = &nor->mtd;
+ mtd->name = np->name;
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ return ret;
+
+ host->nor[host->num_chip] = nor;
+ host->num_chip++;
+ return 0;
+}
+
+static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->num_chip; i++)
+ mtd_device_unregister(&host->nor[i]->mtd);
+}
+
+static int hisi_spi_nor_register_all(struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct device_node *np;
+ int ret;
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+ if (ret)
+ goto fail;
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+ break;
+ }
+ }
+
+ return 0;
+
+fail:
+ hisi_spi_nor_unregister_all(host);
+ return ret;
+}
+
+static int hisi_spi_nor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct hifmc_host *host;
+ int ret;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, host);
+ host->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ host->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->regbase))
+ return PTR_ERR(host->regbase);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
+ host->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Unable to set dma mask\n");
+ return ret;
+ }
+
+ host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ mutex_init(&host->lock);
+ clk_prepare_enable(host->clk);
+ hisi_spi_nor_init(host);
+ ret = hisi_spi_nor_register_all(host);
+ if (ret)
+ mutex_destroy(&host->lock);
+
+ clk_disable_unprepare(host->clk);
+ return ret;
+}
+
+static int hisi_spi_nor_remove(struct platform_device *pdev)
+{
+ struct hifmc_host *host = platform_get_drvdata(pdev);
+
+ hisi_spi_nor_unregister_all(host);
+ mutex_destroy(&host->lock);
+ clk_disable_unprepare(host->clk);
+ return 0;
+}
+
+static const struct of_device_id hisi_spi_nor_dt_ids[] = {
+ { .compatible = "hisilicon,fmc-spi-nor"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
+
+static struct platform_driver hisi_spi_nor_driver = {
+ .driver = {
+ .name = "hisi-sfc",
+ .of_match_table = hisi_spi_nor_dt_ids,
+ },
+ .probe = hisi_spi_nor_probe,
+ .remove = hisi_spi_nor_remove,
+};
+module_platform_driver(hisi_spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 8bed1a4cb79c..e661877c23de 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/mtd/mtd.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -243,8 +242,8 @@ static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
}
-static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- size_t *retlen, u_char *buffer)
+static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
@@ -255,13 +254,13 @@ static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
mt8173_nor_set_read_mode(mt8173_nor);
mt8173_nor_set_addr(mt8173_nor, addr);
- for (i = 0; i < length; i++, (*retlen)++) {
+ for (i = 0; i < length; i++) {
ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
}
- return 0;
+ return length;
}
static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
@@ -297,36 +296,44 @@ static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
}
-static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
struct mt8173_nor *mt8173_nor = nor->priv;
+ size_t i;
ret = mt8173_nor_write_buffer_enable(mt8173_nor);
- if (ret < 0)
+ if (ret < 0) {
dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ return ret;
+ }
- while (len >= SFLASH_WRBUF_SIZE) {
+ for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
- if (ret < 0)
+ if (ret < 0) {
dev_err(mt8173_nor->dev, "write buffer failed!\n");
- len -= SFLASH_WRBUF_SIZE;
+ return ret;
+ }
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
- (*retlen) += SFLASH_WRBUF_SIZE;
}
ret = mt8173_nor_write_buffer_disable(mt8173_nor);
- if (ret < 0)
+ if (ret < 0) {
dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ return ret;
+ }
- if (len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
- (u8 *)buf);
- if (ret < 0)
+ if (i < len) {
+ ret = mt8173_nor_write_single_byte(mt8173_nor, to,
+ (int)(len - i), (u8 *)buf);
+ if (ret < 0) {
dev_err(mt8173_nor->dev, "write single byte failed!\n");
- (*retlen) += len;
+ return ret;
+ }
}
+
+ return len;
}
static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index ae428cb0e04b..73a14f40928b 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -172,8 +172,8 @@ static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return nxp_spifi_wait_for_cmd(spifi);
}
-static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
int ret;
@@ -183,24 +183,23 @@ static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
return ret;
memcpy_fromio(buf, spifi->flash_base + from, len);
- *retlen += len;
- return 0;
+ return len;
}
-static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
+ size_t i;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
- return;
+ return ret;
writel(to, spifi->io_base + SPIFI_ADDR);
- *retlen += len;
cmd = SPIFI_CMD_DOUT |
SPIFI_CMD_DATALEN(len) |
@@ -209,10 +208,14 @@ static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
- while (len--)
- writeb(*buf++, spifi->io_base + SPIFI_DATA);
+ for (i = 0; i < len; i++)
+ writeb(buf[i], spifi->io_base + SPIFI_DATA);
+
+ ret = nxp_spifi_wait_for_cmd(spifi);
+ if (ret)
+ return ret;
- nxp_spifi_wait_for_cmd(spifi);
+ return len;
}
static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index c52e45594bfd..d0fc165d7d66 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -661,7 +661,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
status_new = (status_old & ~mask & ~SR_TB) | val;
/* Don't protect status register if we're fully unlocked */
- if (lock_len == mtd->size)
+ if (lock_len == 0)
status_new &= ~SR_SRWD;
if (!use_top)
@@ -830,10 +830,26 @@ static const struct flash_info spi_nor_ids[] = {
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
/* GigaDevice */
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
+ {
+ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -871,6 +887,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -1031,8 +1048,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
if (ret)
return ret;
- ret = nor->read(nor, from, len, retlen, buf);
+ while (len) {
+ ret = nor->read(nor, from, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+read_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
return ret;
}
@@ -1060,10 +1094,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
- nor->write(nor, to, 1, retlen, buf);
+ ret = nor->write(nor, to, 1, buf);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
}
to += actual;
@@ -1072,10 +1110,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_AAI_WP;
/* write two bytes. */
- nor->write(nor, to, 2, retlen, buf + actual);
+ ret = nor->write(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
to += 2;
nor->sst_write_second = true;
}
@@ -1084,21 +1126,26 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
write_disable(nor);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
/* Write out trailing byte if it exists. */
if (actual != len) {
write_enable(nor);
nor->program_opcode = SPINOR_OP_BP;
- nor->write(nor, to, 1, retlen, buf + actual);
-
+ ret = nor->write(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
write_disable(nor);
+ actual += 1;
}
-time_out:
+sst_write_err:
+ *retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
}
@@ -1112,8 +1159,8 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 page_offset, page_size, i;
- int ret;
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -1121,35 +1168,37 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- write_enable(nor);
-
- page_offset = to & (nor->page_size - 1);
+ for (i = 0; i < len; ) {
+ ssize_t written;
- /* do all the bytes fit onto one page? */
- if (page_offset + len <= nor->page_size) {
- nor->write(nor, to, len, retlen, buf);
- } else {
+ page_offset = (to + i) & (nor->page_size - 1);
+ WARN_ONCE(page_offset,
+ "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
+ page_offset);
/* the size of data remaining on the first page */
- page_size = nor->page_size - page_offset;
- nor->write(nor, to, page_size, retlen, buf);
-
- /* write everything in nor->page_size chunks */
- for (i = page_size; i < len; i += page_size) {
- page_size = len - i;
- if (page_size > nor->page_size)
- page_size = nor->page_size;
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto write_err;
-
- write_enable(nor);
+ write_enable(nor);
+ ret = nor->write(nor, to + i, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
- nor->write(nor, to + i, page_size, retlen, buf + i);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ if (written != page_remain) {
+ dev_err(nor->dev,
+ "While writing %zu bytes written %zd bytes\n",
+ page_remain, written);
+ ret = -EIO;
+ goto write_err;
}
}
- ret = spi_nor_wait_till_ready(nor);
write_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index daf82ba7aba0..41b13d1cdcc4 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -380,8 +380,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
block_address);
- if (block_address >= ssfdc->map_len)
- BUG();
+ BUG_ON(block_address >= ssfdc->map_len);
block_address = ssfdc->logic_block_map[block_address];
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 09a4ccac53a2..f26dec896afa 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -290,7 +290,7 @@ static int overwrite_test(void)
while (opno < max_overwrite) {
- err = rewrite_page(0);
+ err = write_page(0);
if (err)
break;
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c1aaf0336cf2..903becd31410 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -175,6 +175,40 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
}
/**
+ * add_fastmap - add a Fastmap related physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number the VID header came from
+ * @vid_hdr: the volume identifier header
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
+ * physical eraseblock @pnum and adds it to the 'fastmap' list.
+ * Such blocks can be Fastmap super and data blocks from both the most
+ * recent Fastmap we're attaching from or from old Fastmaps which will
+ * be erased.
+ */
+static int add_fastmap(struct ubi_attach_info *ai, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = be32_to_cpu(vidh->vol_id);
+ aeb->sqnum = be64_to_cpu(vidh->sqnum);
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->fastmap);
+
+ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
+ aeb->vol_id, aeb->sqnum);
+
+ return 0;
+}
+
+/**
* validate_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
@@ -803,13 +837,26 @@ out_unlock:
return err;
}
+static bool vol_ignored(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_LAYOUT_VOLUME_ID:
+ return true;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ return ubi_is_fm_vol(vol_id);
+#else
+ return false;
+#endif
+}
+
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
- * @vid: The volume ID of the found volume will be stored in this pointer
- * @sqnum: The sqnum of the found volume will be stored in this pointer
+ * @fast: true if we're scanning for a Fastmap
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
@@ -817,9 +864,9 @@ out_unlock:
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int pnum, int *vid, unsigned long long *sqnum)
+ int pnum, bool fast)
{
- long long uninitialized_var(ec);
+ long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -935,6 +982,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
+ /*
+ * If we're facing a bad VID header we have to drop *all*
+ * Fastmap data structures we find. The most recent Fastmap
+ * could be bad and therefore there is a chance that we attach
+ * from an old one. On a fine MTD stack a PEB must not render
+ * bad all of a sudden, but the reality is different.
+ * So, let's be paranoid and help finding the root cause by
+ * falling back to scanning mode instead of attaching with a
+ * bad EBA table and cause data corruption which is hard to
+ * analyze.
+ */
+ if (fast)
+ ai->force_full_scan = 1;
+
if (ec_err)
/*
* Both headers are corrupted. There is a possibility
@@ -991,21 +1052,15 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
vol_id = be32_to_cpu(vidh->vol_id);
- if (vid)
- *vid = vol_id;
- if (sqnum)
- *sqnum = be64_to_cpu(vidh->sqnum);
- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- if (vol_id != UBI_FM_SB_VOLUME_ID
- && vol_id != UBI_FM_DATA_VOLUME_ID) {
- ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
- vol_id, lnum);
- }
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
@@ -1037,7 +1092,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (ec_err)
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
+ if (ubi_is_fm_vol(vol_id))
+ err = add_fastmap(ai, pnum, vidh, ec);
+ else
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
if (err)
return err;
@@ -1186,6 +1246,10 @@ static void destroy_ai(struct ubi_attach_info *ai)
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
@@ -1245,7 +1309,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ err = scan_peb(ubi, ai, pnum, false);
if (err < 0)
goto out_vidh;
}
@@ -1311,6 +1375,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
+ INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
@@ -1326,7 +1391,7 @@ static struct ubi_attach_info *alloc_ai(void)
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * scan_fastmap - try to find a fastmap and attach from it.
+ * scan_fast - try to find a fastmap and attach from it.
* @ubi: UBI device description object
* @ai: attach info object
*
@@ -1337,52 +1402,58 @@ static struct ubi_attach_info *alloc_ai(void)
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
- int err, pnum, fm_anchor = -1;
- unsigned long long max_sqnum = 0;
+ int err, pnum;
+ struct ubi_attach_info *scan_ai;
err = -ENOMEM;
+ scan_ai = alloc_ai();
+ if (!scan_ai)
+ goto out;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out;
+ goto out_ai;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
- int vol_id = -1;
- unsigned long long sqnum = -1;
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
+ err = scan_peb(ubi, scan_ai, pnum, true);
if (err < 0)
goto out_vidh;
-
- if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
- max_sqnum = sqnum;
- fm_anchor = pnum;
- }
}
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- if (fm_anchor < 0)
- return UBI_NO_FASTMAP;
+ if (scan_ai->force_full_scan)
+ err = UBI_NO_FASTMAP;
+ else
+ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
- destroy_ai(*ai);
- *ai = alloc_ai();
- if (!*ai)
- return -ENOMEM;
+ if (err) {
+ /*
+ * Didn't attach via fastmap, do a full scan but reuse what
+ * we've aready scanned.
+ */
+ destroy_ai(*ai);
+ *ai = scan_ai;
+ } else
+ destroy_ai(scan_ai);
- return ubi_scan_fastmap(ubi, *ai, fm_anchor);
+ return err;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_ai:
+ destroy_ai(scan_ai);
out:
return err;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index ef3618299494..0680516bb472 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -874,7 +874,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
if (ubi && mtd->index == ubi->mtd->index) {
- ubi_err(ubi, "mtd%d is already attached to ubi%d",
+ pr_err("ubi: mtd%d is already attached to ubi%d",
mtd->index, i);
return -EEXIST;
}
@@ -889,7 +889,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
+ pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
mtd->index);
return -EINVAL;
}
@@ -900,7 +900,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi_devices[ubi_num])
break;
if (ubi_num == UBI_MAX_DEVICES) {
- ubi_err(ubi, "only %d UBI devices may be created",
+ pr_err("ubi: only %d UBI devices may be created",
UBI_MAX_DEVICES);
return -ENFILE;
}
@@ -910,7 +910,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
/* Make sure ubi_num is not busy */
if (ubi_devices[ubi_num]) {
- ubi_err(ubi, "already exists");
+ pr_err("ubi: ubi%i already exists", ubi_num);
return -EEXIST;
}
}
@@ -992,6 +992,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_detach;
}
+ /* Make device "available" before it becomes accessible via sysfs */
+ ubi_devices[ubi_num] = ubi;
+
err = uif_init(ubi, &ref);
if (err)
goto out_detach;
@@ -1036,7 +1039,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
- ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
@@ -1047,6 +1049,7 @@ out_uif:
ubi_assert(ref);
uif_close(ubi);
out_detach:
+ ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 990898b9dc72..48eb55f344eb 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -15,20 +15,22 @@
*/
#include <linux/crc32.h>
+#include <linux/bitmap.h>
#include "ubi.h"
/**
* init_seen - allocate memory for used for debugging.
* @ubi: UBI device description object
*/
-static inline int *init_seen(struct ubi_device *ubi)
+static inline unsigned long *init_seen(struct ubi_device *ubi)
{
- int *ret;
+ unsigned long *ret;
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+ ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
+ GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -39,7 +41,7 @@ static inline int *init_seen(struct ubi_device *ubi)
* free_seen - free the seen logic integer array.
* @seen: integer array of @ubi->peb_count size
*/
-static inline void free_seen(int *seen)
+static inline void free_seen(unsigned long *seen)
{
kfree(seen);
}
@@ -50,12 +52,12 @@ static inline void free_seen(int *seen)
* @pnum: The PEB to be makred as seen
* @seen: integer array of @ubi->peb_count size
*/
-static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
{
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return;
- seen[pnum] = 1;
+ set_bit(pnum, seen);
}
/**
@@ -63,7 +65,7 @@ static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
* @ubi: UBI device description object
* @seen: integer array of @ubi->peb_count size
*/
-static int self_check_seen(struct ubi_device *ubi, int *seen)
+static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
{
int pnum, ret = 0;
@@ -71,7 +73,7 @@ static int self_check_seen(struct ubi_device *ubi, int *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+ if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@@ -578,7 +580,7 @@ static int count_fastmap_pebs(struct ubi_attach_info *ai)
list_for_each_entry(aeb, &ai->free, u.list)
n++;
- ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
n++;
@@ -850,27 +852,57 @@ fail:
}
/**
+ * find_fm_anchor - find the most recent Fastmap superblock (anchor)
+ * @ai: UBI attach info to be filled
+ */
+static int find_fm_anchor(struct ubi_attach_info *ai)
+{
+ int ret = -1;
+ struct ubi_ainf_peb *aeb;
+ unsigned long long max_sqnum = 0;
+
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
+ max_sqnum = aeb->sqnum;
+ ret = aeb->pnum;
+ }
+ }
+
+ return ret;
+}
+
+/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
- * @fm_anchor: The fastmap starts at this PEB
+ * @scan_ai: UBI attach info from the first 64 PEBs,
+ * used to find the most recent Fastmap data structure
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor)
+ struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
- int i, used_blocks, pnum, ret = 0;
+ struct ubi_ainf_peb *tmp_aeb, *aeb;
+ int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
+ fm_anchor = find_fm_anchor(scan_ai);
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ /* Move all (possible) fastmap blocks into our new attach structure. */
+ list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
+ list_move_tail(&aeb->u.list, &ai->fastmap);
+
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -945,6 +977,13 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_hdr;
}
+ if (i == 0 && pnum != fm_anchor) {
+ ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
+ pnum, fm_anchor);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
@@ -1102,7 +1141,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
- int *seen_pebs = NULL;
+ unsigned long *seen_pebs = NULL;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index cb7c075f2144..1cb287ec32ad 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -99,9 +99,6 @@ static int gluebi_get_device(struct mtd_info *mtd)
struct gluebi_device *gluebi;
int ubi_mode = UBI_READONLY;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
if (mtd->flags & MTD_WRITEABLE)
ubi_mode = UBI_READWRITE;
@@ -129,7 +126,6 @@ static int gluebi_get_device(struct mtd_info *mtd)
ubi_mode);
if (IS_ERR(gluebi->desc)) {
mutex_unlock(&devices_mutex);
- module_put(THIS_MODULE);
return PTR_ERR(gluebi->desc);
}
gluebi->refcnt += 1;
@@ -153,7 +149,6 @@ static void gluebi_put_device(struct mtd_info *mtd)
gluebi->refcnt -= 1;
if (gluebi->refcnt == 0)
ubi_close_volume(gluebi->desc);
- module_put(THIS_MODULE);
mutex_unlock(&devices_mutex);
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 10cf3b549959..ff8cafe1e5cd 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1019,7 +1019,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
- ubi->vid_hdr_alsize);
+ ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 61d4e99755a4..b616a115c9d3 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -703,6 +703,8 @@ struct ubi_ainf_volume {
* @erase: list of physical eraseblocks which have to be erased
* @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
* those belonging to "preserve"-compatible internal volumes)
+ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
+ * eraseblocks of the current and not yet erased old fastmap blocks)
* @corr_peb_count: count of PEBs in the @corr list
* @empty_peb_count: count of PEBs which are presumably empty (contain only
* 0xFF bytes)
@@ -713,6 +715,8 @@ struct ubi_ainf_volume {
* @vols_found: number of volumes found
* @highest_vol_id: highest volume ID
* @is_empty: flag indicating whether the MTD device is empty or not
+ * @force_full_scan: flag indicating whether we need to do a full scan and drop
+ all existing Fastmap data structures
* @min_ec: lowest erase counter value
* @max_ec: highest erase counter value
* @max_sqnum: highest sequence number value
@@ -731,6 +735,7 @@ struct ubi_attach_info {
struct list_head free;
struct list_head erase;
struct list_head alien;
+ struct list_head fastmap;
int corr_peb_count;
int empty_peb_count;
int alien_peb_count;
@@ -739,6 +744,7 @@ struct ubi_attach_info {
int vols_found;
int highest_vol_id;
int is_empty;
+ int force_full_scan;
int min_ec;
int max_ec;
unsigned long long max_sqnum;
@@ -911,7 +917,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor);
+ struct ubi_attach_info *scan_ai);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
#endif
@@ -1105,4 +1111,42 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
return idx;
}
+/**
+ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
+ * @vol_id: volume ID
+ */
+static inline bool ubi_is_fm_vol(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_FM_SB_VOLUME_ID:
+ case UBI_FM_DATA_VOLUME_ID:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to look for
+ *
+ * This function returns a wear leveling object if @pnum relates to the current
+ * fastmap, @NULL otherwise.
+ */
+static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
+ int pnum)
+{
+ int i;
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ if (ubi->fm->e[i]->pnum == pnum)
+ return ubi->fm->e[i];
+ }
+ }
+
+ return NULL;
+}
+
#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 10059dfdc1b6..0138f526474a 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
- /* Change volume table record */
- vtbl_rec = ubi->vtbl[vol_id];
- vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
- if (err)
- goto out_acc;
-
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_unlock(&ubi->volumes_lock);
}
+ /*
+ * When we shrink a volume we have to flush all pending (erase) work.
+ * Otherwise it can happen that upon next attach UBI finds a LEB with
+ * lnum > highest_lnum and refuses to attach.
+ */
+ if (pebs < 0) {
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 959c7b12e0b1..f4533266d7b2 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1598,19 +1598,44 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
}
}
- dbg_wl("found %i PEBs", found_pebs);
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ cond_resched();
+
+ e = ubi_find_fm_block(ubi, aeb->pnum);
- if (ubi->fm) {
- ubi_assert(ubi->good_peb_count ==
- found_pebs + ubi->fm->used_blocks);
+ if (e) {
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
+ } else {
+ /*
+ * Usually old Fastmap PEBs are scheduled for erasure
+ * and we don't have to care about them but if we face
+ * an power cut before scheduling them we need to
+ * take care of them here.
+ */
+ if (ubi->lookuptbl[aeb->pnum])
+ continue;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
- for (i = 0; i < ubi->fm->used_blocks; i++) {
- e = ubi->fm->e[i];
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
}
+
+ found_pebs++;
}
- else
- ubi_assert(ubi->good_peb_count == found_pebs);
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
ubi_fastmap_init(ubi, &reserved_pebs);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 547098086773..f81df91a9ce1 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -52,5 +52,5 @@ config CAIF_VIRTIO
The caif driver for CAIF over Virtio.
if CAIF_VIRTIO
-source "drivers/vhost/Kconfig"
+source "drivers/vhost/Kconfig.vringh"
endif
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 4721948a92f6..3a529fbe539f 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -185,8 +185,8 @@ static ssize_t print_frame(char *buf, size_t size, char *frm,
/* Fast forward. */
i = count - cut;
len += snprintf((buf + len), (size - len),
- "--- %u bytes skipped ---\n",
- (int)(count - (cut * 2)));
+ "--- %zu bytes skipped ---\n",
+ count - (cut * 2));
}
if ((!(i % 10)) && i) {
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 21f1068b0804..77ffc4312808 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -233,8 +233,7 @@ static int b53_mmap_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- if (pdata)
- dev->pdata = pdata;
+ dev->pdata = pdata;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cd1d630ae3a9..b2b838724a9b 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1622,7 +1622,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
"switch_0", priv);
if (ret < 0) {
pr_err("failed to request switch_0 IRQ\n");
- goto out_unmap;
+ goto out_mdio;
}
ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
@@ -1679,6 +1679,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
out_free_irq0:
free_irq(priv->irq0, priv);
+out_mdio:
+ bcm_sf2_mdio_unregister(priv);
out_unmap:
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
@@ -1686,7 +1688,6 @@ out_unmap:
iounmap(*base);
base++;
}
- bcm_sf2_mdio_unregister(priv);
return ret;
}
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 5698f5354c0b..39ca9350d1b2 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -910,7 +910,8 @@ static int ax_probe(struct platform_device *pdev)
iounmap(ax->map2);
exit_mem2:
- release_mem_region(mem2->start, mem2_size);
+ if (mem2)
+ release_mem_region(mem2->start, mem2_size);
exit_mem1:
iounmap(ei_local->mem);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 49025e99fb0e..bda31f308cc2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, phynode,
&altera_tse_adjust_link, 0, priv->phy_iface);
}
+ of_node_put(phynode);
if (!phydev) {
netdev_err(dev, "Could not find the PHY\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ebf9224b2d31..a9b2709567ec 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
goto err_rx_ring;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 7714b7d4026a..37a0f463b8de 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -772,6 +772,7 @@ int xgene_enet_phy_connect(struct net_device *ndev)
phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link,
0, pdata->phy_mode);
+ of_node_put(np);
if (!phy_dev) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 586bedac457d..4bff0f3040df 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -749,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = of_address_to_resource(dev->of_node, 0, &res_regs);
if (err) {
dev_err(dev, "failed to retrieve registers base from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
/* Get IRQ from device tree */
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "failed to retrieve <irq> value from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -778,7 +780,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(dev, "failed to enable clock\n");
- return err;
+ goto out_put_node;
}
clock_frequency = clk_get_rate(priv->clk);
@@ -787,7 +789,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
if (of_property_read_u32(dev->of_node, "clock-frequency",
&clock_frequency)) {
dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
}
@@ -867,6 +870,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_netif_api;
}
+ of_node_put(phy_node);
return 0;
out_netif_api:
@@ -877,6 +881,9 @@ out_mdio:
out_clken:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+out_put_node:
+ of_node_put(phy_node);
+
return err;
}
EXPORT_SYMBOL_GPL(arc_emac_probe);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e708e360a9e3..6453148d066a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, err;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ err = pci_request_mem_regions(pdev, alx_drv_name);
if (err) {
dev_err(&pdev->dev,
- "pci_request_selected_regions failed(bars:%d)\n", bars);
+ "pci_request_mem_regions failed\n");
goto out_pci_disable;
}
@@ -1401,7 +1400,7 @@ out_unmap:
out_free_netdev:
free_netdev(netdev);
out_pci_release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev)
unregister_netdev(alx->dev);
iounmap(hw->hw_addr);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 0d4ea92a0d37..b047fd607b83 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1504,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev)
err_free_dma:
nb8800_dma_free(dev);
err_free_bus:
+ of_node_put(priv->phy_node);
mdiobus_unregister(bus);
err_disable_clk:
clk_disable_unprepare(priv->clk);
@@ -1519,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev)
struct nb8800_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 87c6b5bdd616..6c8bc5fadac7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
} else {
/* run platform code to initialize PHY device */
- if (pd->mii_config &&
+ if (pd && pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n");
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 8fc246ea1fb8..05c1c1dd7751 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bnad_debug_info *regrd_debug = file->private_data;
struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, len, rc, i;
+ int rc, i;
+ u32 addr, len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
struct bnad_debug_info *debug = file->private_data;
struct bnad *bnad = (struct bnad *)debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, val, rc;
+ int rc;
+ u32 addr, val;
void __iomem *reg_addr;
unsigned long flags;
void *kern_buf;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index e8bc15bcde70..4ab404f45b21 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1513,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
return 0;
err:
+ of_node_put(p->phy_np);
free_netdev(netdev);
return result;
}
@@ -1520,8 +1521,10 @@ err:
static int octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
+ struct octeon_mgmt *p = netdev_priv(netdev);
unregister_netdev(netdev);
+ of_node_put(p->phy_np);
free_netdev(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index bad253beb8c8..ad3552df0545 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb);
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 4705e2dea423..e0ebe1378cb2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -104,6 +104,8 @@ enum {
enum CPL_error {
CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_MISS = 2,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_BAD_LENGTH = 15,
CPL_ERR_BAD_ROUTE = 18,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1bb57d3fbbe8..c8fd4f8fe1fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f15560a06718..48f82ab6c25b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%d", netdev->name, i);
+ "%.11s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%d", netdev->name, i);
+ "%.11s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index cbe84972ff7a..f0e9e2ef62a0 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 92fd5c0bf4df..c865135f3cb9 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -444,6 +444,8 @@ struct bufdesc_ex {
#define FEC_QUIRK_HAS_RACC (1 << 12)
/* Controller supports interrupt coalesc */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
+/* Interrupt doesn't wake CPU from deep idle */
+#define FEC_QUIRK_ERR006687 (1 << 14)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4040003a74f9..01f7e811739b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -60,6 +60,7 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -2825,6 +2826,9 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -2860,6 +2864,9 @@ fec_enet_close(struct net_device *ndev)
phy_disconnect(ndev->phydev);
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_unused();
+
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
@@ -3304,6 +3311,11 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl")) &&
+ !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+ fep->quirks |= FEC_QUIRK_ERR006687;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3fb87e233c49..5c8afe1a5ccb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -795,6 +795,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
mac_cb->mac_id, np->name);
}
+ of_node_put(np);
return 0;
}
@@ -812,10 +813,12 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
mac_cb->mac_id, np->name);
}
+ of_node_put(np);
- syscon = syscon_node_to_regmap(
- of_parse_phandle(to_of_node(mac_cb->fw_port),
- "serdes-syscon", 0));
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0);
+ syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR_OR_NULL(syscon)) {
dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 2ef4277d00b3..afb5daa3721d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -51,7 +51,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
const char *mode_str;
struct regmap *syscon;
struct resource *res;
- struct device_node *np = dsaf_dev->dev->of_node;
+ struct device_node *np = dsaf_dev->dev->of_node, *np_temp;
struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
if (dev_of_node(dsaf_dev->dev)) {
@@ -102,8 +102,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
if (dev_of_node(dsaf_dev->dev)) {
- syscon = syscon_node_to_regmap(
- of_parse_phandle(np, "subctrl-syscon", 0));
+ np_temp = of_parse_phandle(np, "subctrl-syscon", 0);
+ syscon = syscon_node_to_regmap(np_temp);
+ of_node_put(np_temp);
if (IS_ERR_OR_NULL(syscon)) {
res = platform_get_resource(pdev, IORESOURCE_MEM,
res_idx++);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 41f32c0b341e..02f443958f31 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7330,8 +7330,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -7398,8 +7397,7 @@ static void e1000_remove(struct pci_dev *pdev)
if ((adapter->hw.flash_address) &&
(adapter->hw.mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b8245c734c96..774a5654bf42 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1963,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_dma;
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- fm10k_driver_name);
+ err = pci_request_mem_regions(pdev, fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed: %d\n", err);
@@ -2070,8 +2067,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2119,8 +2115,7 @@ static void fm10k_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 339d99be4702..81c99e1be708 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10710,8 +10710,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set up pci connections */
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), i40e_driver_name);
+ err = pci_request_mem_regions(pdev, i40e_driver_name);
if (err) {
dev_info(&pdev->dev,
"pci_request_selected_regions failed %d\n", err);
@@ -11208,8 +11207,7 @@ err_ioremap:
kfree(pf);
err_pf_alloc:
pci_disable_pcie_error_reporting(pdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -11320,8 +11318,7 @@ static void i40e_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
kfree(pf);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9bcba42abb91..942a89fb0090 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2324,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
+ err = pci_request_mem_regions(pdev, igb_driver_name);
if (err)
goto err_pci_reg;
@@ -2750,8 +2748,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2916,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7871f538f0ad..5418c69a7463 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9353,8 +9353,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_using_dac = 0;
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
+ err = pci_request_mem_regions(pdev, ixgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
@@ -9740,8 +9739,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -9808,8 +9806,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
#endif
iounmap(adapter->io_addr);
- pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
e_dev_info("complete\n");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f92018b13d28..d41c28d00b57 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4118,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->bm_priv = NULL;
}
}
+ of_node_put(bm_node);
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 0b047178cda1..60227a3452a4 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6234,6 +6234,7 @@ err_free_stats:
err_free_irq:
irq_dispose_mapping(port->irq);
err_free_netdev:
+ of_node_put(phy_node);
free_netdev(dev);
return err;
}
@@ -6244,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ of_node_put(port->phy_node);
free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index aeeb2e79a91a..5d5000c8edf1 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1506,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+ of_node_put(np);
}
/* Hardware supports only 3 ports */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f4497cf4d06d..d728704d0c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -721,6 +721,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
+#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
@@ -935,6 +936,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
+ if (field32 & (1 << 17))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -2457,6 +2461,42 @@ int mlx4_NOP(struct mlx4_dev *dev)
MLX4_CMD_NATIVE);
}
+int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+ const u32 offset[],
+ u32 value[], size_t array_len, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ size_t i;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ outbox = mailbox->buf;
+
+ ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
+ MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < array_len; i++) {
+ if (offset[i] > MLX4_MAILBOX_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ MLX4_GET(value[i], outbox, offset[i]);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL(mlx4_query_diag_counters);
+
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
{
u8 port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
goto free_uar;
}
- uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+ uar->index << PAGE_SHIFT,
+ PAGE_SIZE);
if (!uar->bf_map) {
err = -ENOMEM;
goto unamp_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 04bc522605a0..c07f4d01b70e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
-static int get_pas_size(void *srqc)
+static int get_pas_size(struct mlx5_srq_attr *in)
{
- u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
- u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
- u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
- u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 log_page_size = in->log_page_size + 12;
+ u32 log_srq_size = in->log_size;
+ u32 log_rq_stride = in->wqe_shift;
+ u32 page_offset = in->page_offset;
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
@@ -78,57 +78,58 @@ static int get_pas_size(void *srqc)
return rq_num_pas * sizeof(u64);
}
-static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
{
- void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- if (srqc_to_rmpc) {
- switch (MLX5_GET(srqc, srqc, state)) {
- case MLX5_SRQC_STATE_GOOD:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
- break;
- case MLX5_SRQC_STATE_ERROR:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
- break;
- default:
- pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
- __LINE__, MLX5_GET(srqc, srqc, state));
- MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
- }
-
- MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
- MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
- MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
- MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
- MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
- MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
- MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
- MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
- } else {
- switch (MLX5_GET(rmpc, rmpc, state)) {
- case MLX5_RMPC_STATE_RDY:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
- break;
- case MLX5_RMPC_STATE_ERR:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
- break;
- default:
- pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
- __func__, __LINE__,
- MLX5_GET(rmpc, rmpc, state));
- MLX5_SET(srqc, srqc, state,
- MLX5_GET(rmpc, rmpc, state));
- }
-
- MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
- MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
- MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
- MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
- MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
- MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
- MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
- MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
- }
+ MLX5_SET(wq, wq, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
+ MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
+ MLX5_SET(wq, wq, log_wq_sz, in->log_size);
+ MLX5_SET(wq, wq, page_offset, in->page_offset);
+ MLX5_SET(wq, wq, lwm, in->lwm);
+ MLX5_SET(wq, wq, pd, in->pd);
+ MLX5_SET64(wq, wq, dbr_addr, in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
+ MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
+ MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
+ MLX5_SET(srqc, srqc, page_offset, in->page_offset);
+ MLX5_SET(srqc, srqc, lwm, in->lwm);
+ MLX5_SET(srqc, srqc, pd, in->pd);
+ MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
+ MLX5_SET(srqc, srqc, xrcd, in->xrcd);
+ MLX5_SET(srqc, srqc, cqn, in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(wq, wq, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
+ in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
+ in->log_size = MLX5_GET(wq, wq, log_wq_sz);
+ in->page_offset = MLX5_GET(wq, wq, page_offset);
+ in->lwm = MLX5_GET(wq, wq, lwm);
+ in->pd = MLX5_GET(wq, wq, pd);
+ in->db_record = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(srqc, srqc, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
+ in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
+ in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
+ in->page_offset = MLX5_GET(srqc, srqc, page_offset);
+ in->lwm = MLX5_GET(srqc, srqc, lwm);
+ in->pd = MLX5_GET(srqc, srqc, pd);
+ in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
@@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
EXPORT_SYMBOL(mlx5_core_get_srq);
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+ struct mlx5_srq_attr *in)
{
- struct mlx5_create_srq_mbox_out out;
+ u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+ void *create_in;
+ void *srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
int err;
- memset(&out, 0, sizeof(out));
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+ pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
+ set_srqc(srqc, in);
+ memcpy(pas, in->pas, pas_size);
- err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
- sizeof(out));
+ MLX5_SET(create_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_SRQ);
- srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err)
+ srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
return err;
}
@@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
+ u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(destroy_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)(&out), sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ /* arm_srq structs missing using identical xrc ones */
+ u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
+ MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
- sizeof(in), (u32 *)(&out),
- sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
- struct mlx5_query_srq_mbox_in in;
+ u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 *srq_out;
+ void *srqc;
+ int err;
- memset(&in, 0, sizeof(in));
+ srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ if (!srq_out)
+ return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(query_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out,
+ MLX5_ST_SZ_BYTES(query_srq_out));
+ if (err)
+ goto out;
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)out, sizeof(*out));
+ srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+ get_srqc(srqc, out);
+ if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+ kvfree(srq_out);
+ return err;
}
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int srq_inlen)
+ struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
void *create_in;
- void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
@@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
- memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ set_srqc(xrc_srqc, in);
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
memcpy(pas, in->pas, pas_size);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
@@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
u32 *xrcsrq_out;
- void *srqc;
void *xrc_srqc;
int err;
@@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
- memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ get_srqc(xrc_srqc, out);
+ if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(xrcsrq_out);
@@ -326,26 +353,27 @@ out:
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+ struct mlx5_srq_attr *in)
{
void *create_in;
void *rmpc;
- void *srqc;
+ void *wq;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
- rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
@@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 *rmp_out;
void *rmpc;
- void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
@@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (err)
goto out;
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
- rmpc_srqc_reformat(srqc, rmpc, false);
+ get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+ if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(rmp_out);
@@ -416,15 +444,14 @@ out:
static int create_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int inlen, int is_xrc)
+ struct mlx5_srq_attr *in)
{
if (!dev->issi)
- return create_srq_cmd(dev, srq, in, inlen);
+ return create_srq_cmd(dev, srq, in);
else if (srq->common.res == MLX5_RES_XSRQ)
- return create_xrc_srq_cmd(dev, srq, in, inlen);
+ return create_xrc_srq_cmd(dev, srq, in);
else
- return create_rmp_cmd(dev, srq, in, inlen);
+ return create_rmp_cmd(dev, srq, in);
}
static int destroy_srq_split(struct mlx5_core_dev *dev,
@@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen,
- int is_xrc)
+ struct mlx5_srq_attr *in)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
- srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+ if (in->type == IB_SRQT_XRC)
+ srq->common.res = MLX5_RES_XSRQ;
+ else
+ srq->common.res = MLX5_RES_SRQ;
- err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ err = create_srq_split(dev, srq, in);
if (err)
return err;
@@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
if (!dev->issi)
return query_srq_cmd(dev, srq, out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 03a5093ffeb7..28274a6fbafe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -85,6 +85,7 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rq);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
{
@@ -110,6 +111,7 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
@@ -430,6 +432,7 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
@@ -455,3 +458,4 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 2874dffe77de..eaa37c079a7c 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7412,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
((!ring_data->lro) ||
- (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+ (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
(dev->features & NETIF_F_RXCSUM)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b26fe267a150..0e4f4a9306b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -509,6 +509,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
DP_ERR(p_hwfn,
"Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
+ rc = -EINVAL;
goto alloc_err;
}
@@ -888,7 +889,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (hw_mode & (1 << MODE_MF_SI)) {
u8 pf_id = 0;
- u32 val;
+ u32 val = 0;
if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
if (p_hwfn->rel_pf_id == pf_id) {
@@ -2539,7 +2540,7 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
- if (!rc) {
+ if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index a12c6caa6c66..401e738543b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -590,7 +590,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
- u64 init_prod_val = 0;
+ u32 init_prod_val = 0;
u16 abs_l2_queue = 0;
u8 abs_stats_id = 0;
int rc;
@@ -618,7 +618,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
@@ -1664,6 +1664,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
if (IS_PF(cdev)) {
+ int max_vf_vlan_filters = 0;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
info->num_queues +=
@@ -1676,7 +1678,12 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_queues = cdev->num_hwfns;
}
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ if (IS_QED_SRIOV(cdev))
+ max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+ max_vf_vlan_filters;
+
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 1f13abb5c316..c7dc34bfdd0a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -659,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
struct qed_sb_cnt_info sb_cnt_info;
int rc;
int i;
- memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 4d161c751c12..15399da268d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1404,7 +1404,7 @@ static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
params.anti_spoofing_en = val;
rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
- if (rc) {
+ if (!rc) {
p_vf->spoof_chk = val;
p_vf->req_spoofchk_val = p_vf->spoof_chk;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 9819230947bf..9b780b31b15c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -388,7 +388,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
/* Learn the address of the producer from the response */
if (pp_prod) {
- u64 init_prod_val = 0;
+ u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -396,7 +396,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
rx_qid, *pp_prod, resp->offset);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 91e7bb0b85c8..e4bd02e46e57 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2064,10 +2064,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* Remove vlan */
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
- if (rc) {
- DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
}
qede_del_vlan_from_list(edev, vlan);
@@ -3268,6 +3271,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
start.vport_id = 0;
start.drop_ttl0 = true;
start.remove_inner_vlan = vlan_removal_en;
+ start.clear_stats = clear_stats;
rc = edev->ops->vport_start(cdev, &start);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 9777e5713525..f4aa6331b367 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -45,7 +45,6 @@ struct qlcnic_dcb {
static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
{
kfree(dcb);
- dcb = NULL;
}
static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index fd5d1c93b55b..fd4a8e473f11 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->len += length;
skb->data_len += length;
skb->truesize += length;
- length -= length;
ql_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300800..da4c2d8a4173 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
+ synchronize_sched();
+
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus),
RTL_R16(IntrMask), RTL_R8(MediaStatus));
@@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- if (netif_running(dev)) {
- rtl8139_hw_start (dev);
- netif_wake_queue (dev);
- }
+ napi_enable(&tp->napi);
+ rtl8139_hw_start(dev);
+ netif_wake_queue(dev);
+
spin_unlock_bh(&tp->rx_lock);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0e62d74b09b3..e55638c7505a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1749,13 +1749,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
wol->supported = WAKE_ANY;
- wol->wolopts = __rtl8169_get_wol(tp);
+ if (pm_runtime_active(d))
+ wol->wolopts = __rtl8169_get_wol(tp);
+ else
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
+
+ pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1845,6 +1853,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
@@ -1852,12 +1863,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
- __rtl8169_set_wol(tp, wol->wolopts);
+ if (pm_runtime_active(d))
+ __rtl8169_set_wol(tp, wol->wolopts);
+ else
+ tp->saved_wolopts = wol->wolopts;
rtl_unlock_work(tp);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+ pm_runtime_put_noidle(d);
+
return 0;
}
@@ -2292,11 +2308,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
- rtl8169_update_counters(dev);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl8169_update_counters(dev);
+
+ pm_runtime_put_noidle(d);
data[0] = le64_to_cpu(counters->tx_packets);
data[1] = le64_to_cpu(counters->rx_packets);
@@ -4458,6 +4480,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
static int rtl_set_mac_address(struct net_device *dev, void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -4465,7 +4488,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- rtl_rar_set(tp, dev->dev_addr);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl_rar_set(tp, dev->dev_addr);
+
+ pm_runtime_put_noidle(d);
return 0;
}
@@ -7868,6 +7896,7 @@ static int rtl8169_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_rar_set(tp, dev->dev_addr);
if (!tp->TxDescArray)
return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8377d0220fa8..1e1cc0fad17f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1005,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev)
}
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
priv->phy_interface);
+ of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
return -ENOENT;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7bd910ce8b34..799d58d86e6d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1780,6 +1780,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
sh_eth_adjust_link, 0,
mdp->phy_interface);
+ of_node_put(pn);
if (!phydev)
phydev = ERR_PTR(-ENOENT);
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index edd20c3b2b3d..bec6963ac71e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -135,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
if (np_splitter) {
- if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
+ ret = of_address_to_resource(np_splitter, 0, &res_splitter);
+ of_node_put(np_splitter);
+ if (ret) {
dev_info(dev, "Missing emac splitter address\n");
return -EINVAL;
}
@@ -159,14 +161,17 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dev_err(dev,
"%s: ERROR: missing emac splitter address\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
dwmac->splitter_base =
devm_ioremap_resource(dev, &res_splitter);
- if (IS_ERR(dwmac->splitter_base))
- return PTR_ERR(dwmac->splitter_base);
+ if (IS_ERR(dwmac->splitter_base)) {
+ ret = PTR_ERR(dwmac->splitter_base);
+ goto err_node_put;
+ }
}
index = of_property_match_string(np_sgmii_adapter, "reg-names",
@@ -178,14 +183,17 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dev_err(dev,
"%s: ERROR: failed mapping adapter\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
dwmac->pcs.sgmii_adapter_base =
devm_ioremap_resource(dev, &res_sgmii_adapter);
- if (IS_ERR(dwmac->pcs.sgmii_adapter_base))
- return PTR_ERR(dwmac->pcs.sgmii_adapter_base);
+ if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) {
+ ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base);
+ goto err_node_put;
+ }
}
index = of_property_match_string(np_sgmii_adapter, "reg-names",
@@ -197,22 +205,30 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
dev_err(dev,
"%s: ERROR: failed mapping tse control port\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_node_put;
}
dwmac->pcs.tse_pcs_base =
devm_ioremap_resource(dev, &res_tse_pcs);
- if (IS_ERR(dwmac->pcs.tse_pcs_base))
- return PTR_ERR(dwmac->pcs.tse_pcs_base);
+ if (IS_ERR(dwmac->pcs.tse_pcs_base)) {
+ ret = PTR_ERR(dwmac->pcs.tse_pcs_base);
+ goto err_node_put;
+ }
}
}
dwmac->reg_offset = reg_offset;
dwmac->reg_shift = reg_shift;
dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
dwmac->dev = dev;
+ of_node_put(np_sgmii_adapter);
return 0;
+
+err_node_put:
+ of_node_put(np_sgmii_adapter);
+ return ret;
}
static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c23ccabc2d8a..4c8c60af7985 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3397,6 +3397,7 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->plat->phy_node);
if (priv->stmmac_rst)
reset_control_assert(priv->stmmac_rst);
clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f7dfc0ae8e9c..756bb548e81a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
return NULL;
axi = kzalloc(sizeof(*axi), GFP_KERNEL);
- if (!axi)
+ if (!axi) {
+ of_node_put(np);
return ERR_PTR(-ENOMEM);
+ }
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
@@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_node_put(np);
return axi;
}
@@ -302,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- of_node_put(np);
+ of_node_put(plat->phy_node);
return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1a93a1f28433..c51f34693eae 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2564,19 +2564,17 @@ clean_ndev_ret:
return ret;
}
-static int cpsw_remove_child_device(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- of_device_unregister(pdev);
-
- return 0;
-}
-
static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
@@ -2584,8 +2582,9 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_ale_destroy(priv->ale);
cpdma_ctlr_destroy(priv->dma);
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 73638f7a55d4..19e5f32a8a64 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -357,13 +357,11 @@ EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
- unsigned long flags;
int ret = 0, i;
if (!ctlr)
return -EINVAL;
- spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
@@ -371,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
- spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 6e305a82ed43..727a79f3c7dd 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1964,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 2d0beb1b801c..d13e6e15d7b5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -344,7 +344,6 @@ static void free_rxsa(struct rcu_head *head)
crypto_free_aead(sa->key.tfm);
free_percpu(sa->stats);
- macsec_rxsc_put(sa->sc);
kfree(sa);
}
@@ -863,6 +862,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
+ struct macsec_rx_sc *rx_sc = rx_sa->sc;
int len, ret;
u32 pn;
@@ -891,6 +891,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
dev_put(dev);
}
@@ -1106,6 +1107,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
+ sc = sc ? macsec_rxsc_get(sc) : NULL;
if (sc) {
secy = &macsec->secy;
@@ -1180,8 +1182,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
if (IS_ERR(skb)) {
/* the decrypt callback needs the reference */
- if (PTR_ERR(skb) != -EINPROGRESS)
+ if (PTR_ERR(skb) != -EINPROGRESS) {
macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
+ }
rcu_read_unlock();
*pskb = NULL;
return RX_HANDLER_CONSUMED;
@@ -1197,6 +1201,7 @@ deliver:
if (rx_sa)
macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
@@ -1212,6 +1217,7 @@ deliver:
drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
+ macsec_rxsc_put(rx_sc);
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
@@ -1646,7 +1652,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
- if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) {
+ if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
@@ -3173,6 +3179,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
return err;
+ dev_hold(real_dev);
+
/* need to be already registered so that ->init has run and
* the MAC addr is set
*/
@@ -3201,8 +3209,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec_generation++;
- dev_hold(real_dev);
-
return 0;
del_dev:
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index d94a978024d9..775674808249 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -345,10 +345,8 @@ static int xgene_mdio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
csr_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(csr_base)) {
- dev_err(dev, "Unable to retrieve mac CSR region\n");
+ if (IS_ERR(csr_base))
return PTR_ERR(csr_base);
- }
pdata->mac_csr_addr = csr_base;
pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET;
pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 059f13b60fe0..1882d9828c99 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -869,7 +869,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
- .phy_id_mask = 0x00ffffff,
+ .phy_id_mask = 0x00fffffc,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
@@ -993,7 +993,7 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8001, 0x00ffffff },
+ { PHY_ID_KSZ8001, 0x00fffffc },
{ PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 2fc50ec453d0..6f044450b702 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -862,7 +862,7 @@ static int uhdlc_suspend(struct device *dev)
static int uhdlc_resume(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
- struct ucc_tdm *utdm = priv->utdm;
+ struct ucc_tdm *utdm;
struct ucc_tdm_info *ut_info;
struct ucc_fast __iomem *uf_regs;
struct ucc_fast_private *uccf;
@@ -877,6 +877,7 @@ static int uhdlc_resume(struct device *dev)
if (!netif_running(priv->ndev))
return 0;
+ utdm = priv->utdm;
ut_info = priv->ut_info;
uf_info = &ut_info->uf_info;
uf_regs = priv->uf_regs;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 355e1ae665f9..8f0fd41dfd4b 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -139,11 +139,11 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
ar->id.subsystem_vendor, ar->id.subsystem_device);
ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
- config_enabled(CONFIG_ATH10K_DEBUG),
- config_enabled(CONFIG_ATH10K_DEBUGFS),
- config_enabled(CONFIG_ATH10K_TRACING),
- config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
- config_enabled(CONFIG_NL80211_TESTMODE));
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
firmware = ar->normal_mode_fw.fw_file.firmware;
if (firmware)
@@ -2424,7 +2424,7 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_simulate_radar);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index fb8e38df9446..0bbd0a00edcc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3039,7 +3039,7 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair = ar->ath_common.regulatory.regpair;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
nl_dfs_reg = ar->dfs_detector->region;
wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
} else {
@@ -3068,7 +3068,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
request->dfs_region);
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
@@ -7955,7 +7955,7 @@ int ath10k_mac_register(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
ar->ath_common.debug_mask = ATH_DBG_DFS;
ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
@@ -8003,7 +8003,7 @@ err_unregister:
ieee80211_unregister_hw(ar->hw);
err_dfs_detector_exit:
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
@@ -8018,7 +8018,7 @@ void ath10k_mac_unregister(struct ath10k *ar)
{
ieee80211_unregister_hw(ar->hw);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 169cd2e783eb..d2462886b75c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3704,7 +3704,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar,
phyerr->tsf_timestamp, tsf, buf_len);
/* Skip event if DFS disabled */
- if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
return;
ATH10K_DFS_STAT_INC(ar, pulses_total);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 4ad6284fc37d..72e2ec67768d 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3881,7 +3881,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+ if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) &&
test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index a8762711ad74..e2512d5bc0e1 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -731,7 +731,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
struct ath_hw *ah = spec_priv->ah;
u32 rxfilter;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
@@ -806,7 +806,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
char buf[32];
ssize_t len;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
len = min(count, sizeof(buf) - 1);
@@ -1072,7 +1072,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index edc74fca60aa..cfa3fe82ade3 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -843,7 +843,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_P2P_GO_CTWIN;
- if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ if (!IS_ENABLED(CONFIG_ATH9K_TX99)) {
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 7594650f214f..a394622c9022 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1250,7 +1250,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
@@ -1300,7 +1300,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
}
@@ -1360,7 +1360,7 @@ static void ath9k_enable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = true;
@@ -1379,7 +1379,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = false;
@@ -1953,7 +1953,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct ieee80211_channel *chan;
int pos;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
spin_lock_bh(&common->cc_lock);
@@ -2003,7 +2003,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
mutex_lock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 32160fca876a..669734252664 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -377,7 +377,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 rfilt;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return 0;
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 2303ef96299d..2f8136d50f78 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -352,7 +352,7 @@ dfs_pattern_detector_init(struct ath_common *common,
{
struct dfs_pattern_detector *dpd;
- if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS))
return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7e15ed9ed31f..f8506037736f 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -116,7 +116,7 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
static bool dynamic_country_user_possible(struct ath_regulatory *reg)
{
- if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
return true;
switch (reg->country_code) {
@@ -188,7 +188,7 @@ static bool dynamic_country_user_possible(struct ath_regulatory *reg)
static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
{
- if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
return false;
if (!dynamic_country_user_possible(reg))
return false;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 40d04ef5da9e..0d5c29ae51de 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -551,13 +551,15 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
+ struct pci_dev *pdev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
- union { u64 v64; u32 v32; u16 v16; } u;
+ union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
ndev = filp->private_data;
+ pdev = ndev_pdev(ndev);
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
@@ -632,6 +634,41 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
"Doorbell Bell -\t\t%#llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Window Size:\n");
+
+ pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR23SZ %hhu\n", u.v8);
+ if (!ndev->bar4_split) {
+ pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR45SZ %hhu\n", u.v8);
+ } else {
+ pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR4SZ %hhu\n", u.v8);
+ pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR5SZ %hhu\n", u.v8);
+ }
+
+ pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR23SZ %hhu\n", u.v8);
+ if (!ndev->bar4_split) {
+ pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR45SZ %hhu\n", u.v8);
+ } else {
+ pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR4SZ %hhu\n", u.v8);
+ pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR5SZ %hhu\n", u.v8);
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
@@ -669,7 +706,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
"LMT45 -\t\t\t%#018llx\n", u.v64);
}
- if (pdev_is_xeon(ndev->ntb.pdev)) {
+ if (pdev_is_xeon(pdev)) {
if (ntb_topo_is_b2b(ndev->ntb.topo)) {
off += scnprintf(buf + off, buf_size - off,
"\nNTB Outgoing B2B XLAT:\n");
@@ -750,22 +787,22 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
off += scnprintf(buf + off, buf_size - off,
"\nXEON NTB Hardware Errors:\n");
- if (!pci_read_config_word(ndev->ntb.pdev,
+ if (!pci_read_config_word(pdev,
XEON_DEVSTS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"DEVSTS -\t\t%#06x\n", u.v16);
- if (!pci_read_config_word(ndev->ntb.pdev,
+ if (!pci_read_config_word(pdev,
XEON_LINK_STATUS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"LNKSTS -\t\t%#06x\n", u.v16);
- if (!pci_read_config_dword(ndev->ntb.pdev,
+ if (!pci_read_config_dword(pdev,
XEON_UNCERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"UNCERRSTS -\t\t%#06x\n", u.v32);
- if (!pci_read_config_dword(ndev->ntb.pdev,
+ if (!pci_read_config_dword(pdev,
XEON_CORERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"CORERRSTS -\t\t%#06x\n", u.v32);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 2ef9d9130864..d5c5894f252e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -153,6 +153,7 @@ struct ntb_transport_qp {
unsigned int rx_index;
unsigned int rx_max_entry;
unsigned int rx_max_frame;
+ unsigned int rx_alloc_entry;
dma_cookie_t last_cookie;
struct tasklet_struct rxc_db_work;
@@ -480,7 +481,9 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
+ "rx_max_entry - \t%u\n", qp->rx_max_entry);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -597,9 +600,12 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw;
+ struct ntb_dev *ndev = nt->ndev;
+ struct ntb_queue_entry *entry;
unsigned int rx_size, num_qps_mw;
unsigned int mw_num, mw_count, qp_count;
unsigned int i;
+ int node;
mw_count = nt->mw_count;
qp_count = nt->qp_count;
@@ -626,6 +632,23 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
+ /*
+ * Checking to see if we have more entries than the default.
+ * We should add additional entries if that is the case so we
+ * can be in sync with the transport frames.
+ */
+ node = dev_to_node(&ndev->dev);
+ for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
+ entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->qp = qp;
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
+ &qp->rx_free_q);
+ qp->rx_alloc_entry++;
+ }
+
qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */
@@ -1037,6 +1060,13 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
int node;
int rc, i;
+ mw_count = ntb_mw_count(ndev);
+ if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
+ dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
+ NTB_TRANSPORT_NAME);
+ return -EIO;
+ }
+
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"doorbell is unsafe, proceed anyway...\n");
@@ -1052,8 +1082,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
nt->ndev = ndev;
- mw_count = ntb_mw_count(ndev);
-
nt->mw_count = mw_count;
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
@@ -1722,8 +1750,9 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
+ qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
- for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ for (i = 0; i < qp->tx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry)
goto err2;
@@ -1744,6 +1773,7 @@ err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
+ qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->tx_dma_chan)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 8dfce9c9aad0..6a50f20bf1cd 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -58,6 +58,7 @@
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/ntb.h>
+#include <linux/mutex.h>
#define DRIVER_NAME "ntb_perf"
#define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
@@ -83,6 +84,10 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
static struct dentry *perf_debugfs_dir;
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
static unsigned int seg_order = 19; /* 512K */
module_param(seg_order, uint, 0644);
MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
@@ -117,6 +122,10 @@ struct pthr_ctx {
int dma_prep_err;
int src_idx;
void *srcs[MAX_SRCS];
+ wait_queue_head_t *wq;
+ int status;
+ u64 copied;
+ u64 diff_us;
};
struct perf_ctx {
@@ -124,23 +133,23 @@ struct perf_ctx {
spinlock_t db_lock;
struct perf_mw mw;
bool link_is_up;
- struct work_struct link_cleanup;
struct delayed_work link_work;
+ wait_queue_head_t link_wq;
struct dentry *debugfs_node_dir;
struct dentry *debugfs_run;
struct dentry *debugfs_threads;
u8 perf_threads;
- bool run;
+ /* mutex ensures only one set of threads run at once */
+ struct mutex run_mutex;
struct pthr_ctx pthr_ctx[MAX_THREADS];
atomic_t tsync;
+ atomic_t tdone;
};
enum {
VERSION = 0,
MW_SZ_HIGH,
MW_SZ_LOW,
- SPAD_MSG,
- SPAD_ACK,
MAX_SPAD
};
@@ -148,10 +157,16 @@ static void perf_link_event(void *ctx)
{
struct perf_ctx *perf = ctx;
- if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1)
+ if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
schedule_delayed_work(&perf->link_work, 2*HZ);
- else
- schedule_work(&perf->link_cleanup);
+ } else {
+ dev_dbg(&perf->ntb->pdev->dev, "link down\n");
+
+ if (!perf->link_is_up)
+ cancel_delayed_work_sync(&perf->link_work);
+
+ perf->link_is_up = false;
+ }
}
static void perf_db_event(void *ctx, int vec)
@@ -271,6 +286,7 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
char __iomem *tmp = dst;
u64 perf, diff_us;
ktime_t kstart, kstop, kdiff;
+ unsigned long last_sleep = jiffies;
chunks = div64_u64(win_size, buf_size);
total_chunks = div64_u64(total, buf_size);
@@ -286,30 +302,40 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
} else
tmp += buf_size;
- /* Probably should schedule every 4GB to prevent soft hang. */
- if (((copied % SZ_4G) == 0) && !use_dma) {
+ /* Probably should schedule every 5s to prevent soft hang. */
+ if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
+ last_sleep = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
+
+ if (unlikely(kthread_should_stop()))
+ break;
}
if (use_dma) {
- pr_info("%s: All DMA descriptors submitted\n", current->comm);
- while (atomic_read(&pctx->dma_sync) != 0)
+ pr_debug("%s: All DMA descriptors submitted\n", current->comm);
+ while (atomic_read(&pctx->dma_sync) != 0) {
+ if (kthread_should_stop())
+ break;
msleep(20);
+ }
}
kstop = ktime_get();
kdiff = ktime_sub(kstop, kstart);
diff_us = ktime_to_us(kdiff);
- pr_info("%s: copied %llu bytes\n", current->comm, copied);
+ pr_debug("%s: copied %llu bytes\n", current->comm, copied);
- pr_info("%s: lasted %llu usecs\n", current->comm, diff_us);
+ pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
perf = div64_u64(copied, diff_us);
- pr_info("%s: MBytes/s: %llu\n", current->comm, perf);
+ pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
+
+ pctx->copied = copied;
+ pctx->diff_us = diff_us;
return 0;
}
@@ -331,7 +357,7 @@ static int ntb_perf_thread(void *data)
int rc, node, i;
struct dma_chan *dma_chan = NULL;
- pr_info("kthread %s starting...\n", current->comm);
+ pr_debug("kthread %s starting...\n", current->comm);
node = dev_to_node(&pdev->dev);
@@ -389,7 +415,10 @@ static int ntb_perf_thread(void *data)
pctx->srcs[i] = NULL;
}
- return 0;
+ atomic_inc(&perf->tdone);
+ wake_up(pctx->wq);
+ rc = 0;
+ goto done;
err:
for (i = 0; i < MAX_SRCS; i++) {
@@ -402,6 +431,16 @@ err:
pctx->dma_chan = NULL;
}
+done:
+ /* Wait until we are told to stop */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
return rc;
}
@@ -472,6 +511,10 @@ static void perf_link_work(struct work_struct *work)
dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
size = perf->mw.phys_size;
+
+ if (max_mw_size && size > max_mw_size)
+ size = max_mw_size;
+
ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
@@ -496,6 +539,7 @@ static void perf_link_work(struct work_struct *work)
goto out1;
perf->link_is_up = true;
+ wake_up(&perf->link_wq);
return;
@@ -508,18 +552,6 @@ out:
msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
}
-static void perf_link_cleanup(struct work_struct *work)
-{
- struct perf_ctx *perf = container_of(work,
- struct perf_ctx,
- link_cleanup);
-
- dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
-
- if (!perf->link_is_up)
- cancel_delayed_work_sync(&perf->link_work);
-}
-
static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
{
struct perf_mw *mw;
@@ -544,16 +576,44 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
{
struct perf_ctx *perf = filp->private_data;
char *buf;
- ssize_t ret, out_offset;
+ ssize_t ret, out_off = 0;
+ struct pthr_ctx *pctx;
+ int i;
+ u64 rate;
if (!perf)
return 0;
- buf = kmalloc(64, GFP_KERNEL);
+ buf = kmalloc(1024, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- out_offset = snprintf(buf, 64, "%d\n", perf->run);
- ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+
+ if (mutex_is_locked(&perf->run_mutex)) {
+ out_off = snprintf(buf, 64, "running\n");
+ goto read_from_buf;
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ pctx = &perf->pthr_ctx[i];
+
+ if (pctx->status == -ENODATA)
+ break;
+
+ if (pctx->status) {
+ out_off += snprintf(buf + out_off, 1024 - out_off,
+ "%d: error %d\n", i,
+ pctx->status);
+ continue;
+ }
+
+ rate = div64_u64(pctx->copied, pctx->diff_us);
+ out_off += snprintf(buf + out_off, 1024 - out_off,
+ "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
+ i, pctx->copied, pctx->diff_us, rate);
+ }
+
+read_from_buf:
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
kfree(buf);
return ret;
@@ -564,80 +624,90 @@ static void threads_cleanup(struct perf_ctx *perf)
struct pthr_ctx *pctx;
int i;
- perf->run = false;
for (i = 0; i < MAX_THREADS; i++) {
pctx = &perf->pthr_ctx[i];
if (pctx->thread) {
- kthread_stop(pctx->thread);
+ pctx->status = kthread_stop(pctx->thread);
pctx->thread = NULL;
}
}
}
+static void perf_clear_thread_status(struct perf_ctx *perf)
+{
+ int i;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ perf->pthr_ctx[i].status = -ENODATA;
+}
+
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct perf_ctx *perf = filp->private_data;
int node, i;
+ DECLARE_WAIT_QUEUE_HEAD(wq);
- if (!perf->link_is_up)
- return 0;
+ if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
+ return -ENOLINK;
if (perf->perf_threads == 0)
- return 0;
+ return -EINVAL;
- if (atomic_read(&perf->tsync) == 0)
- perf->run = false;
+ if (!mutex_trylock(&perf->run_mutex))
+ return -EBUSY;
- if (perf->run)
- threads_cleanup(perf);
- else {
- perf->run = true;
+ perf_clear_thread_status(perf);
- if (perf->perf_threads > MAX_THREADS) {
- perf->perf_threads = MAX_THREADS;
- pr_info("Reset total threads to: %u\n", MAX_THREADS);
- }
+ if (perf->perf_threads > MAX_THREADS) {
+ perf->perf_threads = MAX_THREADS;
+ pr_info("Reset total threads to: %u\n", MAX_THREADS);
+ }
- /* no greater than 1M */
- if (seg_order > MAX_SEG_ORDER) {
- seg_order = MAX_SEG_ORDER;
- pr_info("Fix seg_order to %u\n", seg_order);
- }
+ /* no greater than 1M */
+ if (seg_order > MAX_SEG_ORDER) {
+ seg_order = MAX_SEG_ORDER;
+ pr_info("Fix seg_order to %u\n", seg_order);
+ }
- if (run_order < seg_order) {
- run_order = seg_order;
- pr_info("Fix run_order to %u\n", run_order);
- }
+ if (run_order < seg_order) {
+ run_order = seg_order;
+ pr_info("Fix run_order to %u\n", run_order);
+ }
- node = dev_to_node(&perf->ntb->pdev->dev);
- /* launch kernel thread */
- for (i = 0; i < perf->perf_threads; i++) {
- struct pthr_ctx *pctx;
-
- pctx = &perf->pthr_ctx[i];
- atomic_set(&pctx->dma_sync, 0);
- pctx->perf = perf;
- pctx->thread =
- kthread_create_on_node(ntb_perf_thread,
- (void *)pctx,
- node, "ntb_perf %d", i);
- if (IS_ERR(pctx->thread)) {
- pctx->thread = NULL;
- goto err;
- } else
- wake_up_process(pctx->thread);
-
- if (perf->run == false)
- return -ENXIO;
- }
+ node = dev_to_node(&perf->ntb->pdev->dev);
+ atomic_set(&perf->tdone, 0);
+ /* launch kernel thread */
+ for (i = 0; i < perf->perf_threads; i++) {
+ struct pthr_ctx *pctx;
+
+ pctx = &perf->pthr_ctx[i];
+ atomic_set(&pctx->dma_sync, 0);
+ pctx->perf = perf;
+ pctx->wq = &wq;
+ pctx->thread =
+ kthread_create_on_node(ntb_perf_thread,
+ (void *)pctx,
+ node, "ntb_perf %d", i);
+ if (IS_ERR(pctx->thread)) {
+ pctx->thread = NULL;
+ goto err;
+ } else {
+ wake_up_process(pctx->thread);
+ }
}
+ wait_event_interruptible(wq,
+ atomic_read(&perf->tdone) == perf->perf_threads);
+
+ threads_cleanup(perf);
+ mutex_unlock(&perf->run_mutex);
return count;
err:
threads_cleanup(perf);
+ mutex_unlock(&perf->run_mutex);
return -ENXIO;
}
@@ -688,6 +758,12 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
int node;
int rc = 0;
+ if (ntb_spad_count(ntb) < MAX_SPAD) {
+ dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
+ DRIVER_NAME);
+ return -EIO;
+ }
+
node = dev_to_node(&pdev->dev);
perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
@@ -699,11 +775,11 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
perf->ntb = ntb;
perf->perf_threads = 1;
atomic_set(&perf->tsync, 0);
- perf->run = false;
+ mutex_init(&perf->run_mutex);
spin_lock_init(&perf->db_lock);
perf_setup_mw(ntb, perf);
+ init_waitqueue_head(&perf->link_wq);
INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
- INIT_WORK(&perf->link_cleanup, perf_link_cleanup);
rc = ntb_set_ctx(ntb, perf, &perf_ops);
if (rc)
@@ -717,11 +793,12 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
if (rc)
goto err_ctx;
+ perf_clear_thread_status(perf);
+
return 0;
err_ctx:
cancel_delayed_work_sync(&perf->link_work);
- cancel_work_sync(&perf->link_cleanup);
kfree(perf);
err_perf:
return rc;
@@ -734,8 +811,9 @@ static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
+ mutex_lock(&perf->run_mutex);
+
cancel_delayed_work_sync(&perf->link_work);
- cancel_work_sync(&perf->link_cleanup);
ntb_clear_ctx(ntb);
ntb_link_disable(ntb);
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index fe1600566981..7d311799fca1 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -61,6 +61,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
#include <linux/ntb.h>
@@ -96,8 +97,13 @@ struct pp_ctx {
spinlock_t db_lock;
struct timer_list db_timer;
unsigned long db_delay;
+ struct dentry *debugfs_node_dir;
+ struct dentry *debugfs_count;
+ atomic_t count;
};
+static struct dentry *pp_debugfs_dir;
+
static void pp_ping(unsigned long ctx)
{
struct pp_ctx *pp = (void *)ctx;
@@ -171,10 +177,32 @@ static void pp_db_event(void *ctx, int vec)
dev_dbg(&pp->ntb->dev,
"Pong vec %d bits %#llx\n",
vec, db_bits);
+ atomic_inc(&pp->count);
}
spin_unlock_irqrestore(&pp->db_lock, irqflags);
}
+static int pp_debugfs_setup(struct pp_ctx *pp)
+{
+ struct pci_dev *pdev = pp->ntb->pdev;
+
+ if (!pp_debugfs_dir)
+ return -ENODEV;
+
+ pp->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
+ pp_debugfs_dir);
+ if (!pp->debugfs_node_dir)
+ return -ENODEV;
+
+ pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR,
+ pp->debugfs_node_dir,
+ &pp->count);
+ if (!pp->debugfs_count)
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct ntb_ctx_ops pp_ops = {
.link_event = pp_link_event,
.db_event = pp_db_event,
@@ -210,6 +238,7 @@ static int pp_probe(struct ntb_client *client,
pp->ntb = ntb;
pp->db_bits = 0;
+ atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
pp->db_delay = msecs_to_jiffies(delay_ms);
@@ -218,6 +247,10 @@ static int pp_probe(struct ntb_client *client,
if (rc)
goto err_ctx;
+ rc = pp_debugfs_setup(pp);
+ if (rc)
+ goto err_ctx;
+
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
ntb_link_event(ntb);
@@ -234,6 +267,8 @@ static void pp_remove(struct ntb_client *client,
{
struct pp_ctx *pp = ntb->ctx;
+ debugfs_remove_recursive(pp->debugfs_node_dir);
+
ntb_clear_ctx(ntb);
del_timer_sync(&pp->db_timer);
ntb_link_disable(ntb);
@@ -247,4 +282,29 @@ static struct ntb_client pp_client = {
.remove = pp_remove,
},
};
-module_ntb_client(pp_client);
+
+static int __init pp_init(void)
+{
+ int rc;
+
+ if (debugfs_initialized())
+ pp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ rc = ntb_register_client(&pp_client);
+ if (rc)
+ goto err_client;
+
+ return 0;
+
+err_client:
+ debugfs_remove_recursive(pp_debugfs_dir);
+ return rc;
+}
+module_init(pp_init);
+
+static void __exit pp_exit(void)
+{
+ ntb_unregister_client(&pp_client);
+ debugfs_remove_recursive(pp_debugfs_dir);
+}
+module_exit(pp_exit);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 6f5dc6ca673d..61bf2ef87e0e 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -59,6 +59,12 @@
*
* Eg: check if clearing the doorbell mask generates an interrupt.
*
+ * # Check the link status
+ * root@self# cat $DBG_DIR/link
+ *
+ * # Block until the link is up
+ * root@self# echo Y > $DBG_DIR/link_event
+ *
* # Set the doorbell mask
* root@self# echo 's 1' > $DBG_DIR/mask
*
@@ -79,6 +85,13 @@
* root@self# cat $DBG_DIR/spad
*
* Observe that spad 0 and 1 have the values set by the peer.
+ *
+ * # Check the memory window translation info
+ * cat $DBG_DIR/peer_trans0
+ *
+ * # Setup a 16k memory window buffer
+ * echo 16384 > $DBG_DIR/peer_trans0
+ *
*/
#include <linux/init.h>
@@ -89,6 +102,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/ntb.h>
@@ -105,11 +119,27 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+#define MAX_MWS 16
+
static struct dentry *tool_dbgfs;
+struct tool_mw {
+ int idx;
+ struct tool_ctx *tc;
+ resource_size_t win_size;
+ resource_size_t size;
+ u8 __iomem *local;
+ u8 *peer;
+ dma_addr_t peer_dma;
+ struct dentry *peer_dbg_file;
+};
+
struct tool_ctx {
struct ntb_dev *ntb;
struct dentry *dbgfs;
+ wait_queue_head_t link_wq;
+ int mw_count;
+ struct tool_mw mws[MAX_MWS];
};
#define SPAD_FNAME_SIZE 0x10
@@ -135,6 +165,8 @@ static void tool_link_event(void *ctx)
dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
up ? "up" : "down", speed, width);
+
+ wake_up(&tc->link_wq);
}
static void tool_db_event(void *ctx, int vec)
@@ -239,7 +271,14 @@ static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
if (!spad_read_fn)
return -EINVAL;
- buf_size = min_t(size_t, size, 0x100);
+ spad_count = ntb_spad_count(tc->ntb);
+
+ /*
+ * We multiply the number of spads by 15 to get the buffer size
+ * this is from 3 for the %d, 10 for the largest hex value
+ * (0x00000000) and 2 for the tab and line feed.
+ */
+ buf_size = min_t(size_t, size, spad_count * 15);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
@@ -247,7 +286,6 @@ static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
pos = 0;
- spad_count = ntb_spad_count(tc->ntb);
for (i = 0; i < spad_count; ++i) {
pos += scnprintf(buf + pos, buf_size - pos, "%d\t%#x\n",
i, spad_read_fn(tc->ntb, i));
@@ -268,7 +306,7 @@ static ssize_t tool_spadfn_write(struct tool_ctx *tc,
{
int spad_idx;
u32 spad_val;
- char *buf;
+ char *buf, *buf_ptr;
int pos, n;
ssize_t rc;
@@ -288,14 +326,15 @@ static ssize_t tool_spadfn_write(struct tool_ctx *tc,
}
buf[size] = 0;
-
- n = sscanf(buf, "%d %i%n", &spad_idx, &spad_val, &pos);
+ buf_ptr = buf;
+ n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
while (n == 2) {
+ buf_ptr += pos;
rc = spad_write_fn(tc->ntb, spad_idx, spad_val);
if (rc)
break;
- n = sscanf(buf + pos, "%d %i%n", &spad_idx, &spad_val, &pos);
+ n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
}
if (n < 0)
@@ -442,8 +481,384 @@ static TOOL_FOPS_RDWR(tool_peer_spad_fops,
tool_peer_spad_read,
tool_peer_spad_write);
+static ssize_t tool_link_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[3];
+
+ buf[0] = ntb_link_is_up(tc->ntb, NULL, NULL) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+
+ return simple_read_from_buffer(ubuf, size, offp, buf, 2);
+}
+
+static ssize_t tool_link_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[32];
+ size_t buf_size;
+ bool val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = strtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ else
+ rc = ntb_link_disable(tc->ntb);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_fops,
+ tool_link_read,
+ tool_link_write);
+
+static ssize_t tool_link_event_write(struct file *filep,
+ const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[32];
+ size_t buf_size;
+ bool val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = strtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ if (wait_event_interruptible(tc->link_wq,
+ ntb_link_is_up(tc->ntb, NULL, NULL) == val))
+ return -ERESTART;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_event_fops,
+ NULL,
+ tool_link_event_write);
+
+static ssize_t tool_mw_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+ ssize_t rc;
+ loff_t pos = *offp;
+ void *buf;
+
+ if (mw->local == NULL)
+ return -EIO;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= mw->win_size || !size)
+ return 0;
+ if (size > mw->win_size - pos)
+ size = mw->win_size - pos;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy_fromio(buf, mw->local + pos, size);
+ rc = copy_to_user(ubuf, buf, size);
+ if (rc == size) {
+ rc = -EFAULT;
+ goto err_free;
+ }
+
+ size -= rc;
+ *offp = pos + size;
+ rc = size;
+
+err_free:
+ kfree(buf);
+
+ return rc;
+}
+
+static ssize_t tool_mw_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+ ssize_t rc;
+ loff_t pos = *offp;
+ void *buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= mw->win_size || !size)
+ return 0;
+ if (size > mw->win_size - pos)
+ size = mw->win_size - pos;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = copy_from_user(buf, ubuf, size);
+ if (rc == size) {
+ rc = -EFAULT;
+ goto err_free;
+ }
+
+ size -= rc;
+ *offp = pos + size;
+ rc = size;
+
+ memcpy_toio(mw->local + pos, buf, size);
+
+err_free:
+ kfree(buf);
+
+ return rc;
+}
+
+static TOOL_FOPS_RDWR(tool_mw_fops,
+ tool_mw_read,
+ tool_mw_write);
+
+static ssize_t tool_peer_mw_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ if (!mw->peer)
+ return -ENXIO;
+
+ return simple_read_from_buffer(ubuf, size, offp, mw->peer, mw->size);
+}
+
+static ssize_t tool_peer_mw_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ if (!mw->peer)
+ return -ENXIO;
+
+ return simple_write_to_buffer(mw->peer, mw->size, offp, ubuf, size);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_fops,
+ tool_peer_mw_read,
+ tool_peer_mw_write);
+
+static int tool_setup_mw(struct tool_ctx *tc, int idx, size_t req_size)
+{
+ int rc;
+ struct tool_mw *mw = &tc->mws[idx];
+ phys_addr_t base;
+ resource_size_t size, align, align_size;
+ char buf[16];
+
+ if (mw->peer)
+ return 0;
+
+ rc = ntb_mw_get_range(tc->ntb, idx, &base, &size, &align,
+ &align_size);
+ if (rc)
+ return rc;
+
+ mw->size = min_t(resource_size_t, req_size, size);
+ mw->size = round_up(mw->size, align);
+ mw->size = round_up(mw->size, align_size);
+ mw->peer = dma_alloc_coherent(&tc->ntb->pdev->dev, mw->size,
+ &mw->peer_dma, GFP_KERNEL);
+
+ if (!mw->peer)
+ return -ENOMEM;
+
+ rc = ntb_mw_set_trans(tc->ntb, idx, mw->peer_dma, mw->size);
+ if (rc)
+ goto err_free_dma;
+
+ snprintf(buf, sizeof(buf), "peer_mw%d", idx);
+ mw->peer_dbg_file = debugfs_create_file(buf, S_IRUSR | S_IWUSR,
+ mw->tc->dbgfs, mw,
+ &tool_peer_mw_fops);
+
+ return 0;
+
+err_free_dma:
+ dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+ mw->peer,
+ mw->peer_dma);
+ mw->peer = NULL;
+ mw->peer_dma = 0;
+ mw->size = 0;
+
+ return rc;
+}
+
+static void tool_free_mw(struct tool_ctx *tc, int idx)
+{
+ struct tool_mw *mw = &tc->mws[idx];
+
+ if (mw->peer) {
+ ntb_mw_clear_trans(tc->ntb, idx);
+ dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+ mw->peer,
+ mw->peer_dma);
+ }
+
+ mw->peer = NULL;
+ mw->peer_dma = 0;
+
+ debugfs_remove(mw->peer_dbg_file);
+
+ mw->peer_dbg_file = NULL;
+}
+
+static ssize_t tool_peer_mw_trans_read(struct file *filep,
+ char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off = 0;
+
+ phys_addr_t base;
+ resource_size_t mw_size;
+ resource_size_t align;
+ resource_size_t align_size;
+
+ buf_size = min_t(size_t, size, 512);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ntb_mw_get_range(mw->tc->ntb, mw->idx,
+ &base, &mw_size, &align, &align_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Peer MW %d Information:\n", mw->idx);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Physical Address \t%pa[p]\n",
+ &base);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Window Size \t%lld\n",
+ (unsigned long long)mw_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Alignment \t%lld\n",
+ (unsigned long long)align);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Size Alignment \t%lld\n",
+ (unsigned long long)align_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Ready \t%c\n",
+ (mw->peer) ? 'Y' : 'N');
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Allocated Size \t%zd\n",
+ (mw->peer) ? (size_t)mw->size : 0);
+
+ ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t tool_peer_mw_trans_write(struct file *filep,
+ const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ char buf[32];
+ size_t buf_size;
+ unsigned long long val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = kstrtoull(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ tool_free_mw(mw->tc, mw->idx);
+ if (val)
+ rc = tool_setup_mw(mw->tc, mw->idx, val);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_trans_fops,
+ tool_peer_mw_trans_read,
+ tool_peer_mw_trans_write);
+
+static int tool_init_mw(struct tool_ctx *tc, int idx)
+{
+ struct tool_mw *mw = &tc->mws[idx];
+ phys_addr_t base;
+ int rc;
+
+ rc = ntb_mw_get_range(tc->ntb, idx, &base, &mw->win_size,
+ NULL, NULL);
+ if (rc)
+ return rc;
+
+ mw->tc = tc;
+ mw->idx = idx;
+ mw->local = ioremap_wc(base, mw->win_size);
+ if (!mw->local)
+ return -EFAULT;
+
+ return 0;
+}
+
+static void tool_free_mws(struct tool_ctx *tc)
+{
+ int i;
+
+ for (i = 0; i < tc->mw_count; i++) {
+ tool_free_mw(tc, i);
+
+ if (tc->mws[i].local)
+ iounmap(tc->mws[i].local);
+
+ tc->mws[i].local = NULL;
+ }
+}
+
static void tool_setup_dbgfs(struct tool_ctx *tc)
{
+ int i;
+
/* This modules is useless without dbgfs... */
if (!tool_dbgfs) {
tc->dbgfs = NULL;
@@ -472,12 +887,31 @@ static void tool_setup_dbgfs(struct tool_ctx *tc)
debugfs_create_file("peer_spad", S_IRUSR | S_IWUSR, tc->dbgfs,
tc, &tool_peer_spad_fops);
+
+ debugfs_create_file("link", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_link_fops);
+
+ debugfs_create_file("link_event", S_IWUSR, tc->dbgfs,
+ tc, &tool_link_event_fops);
+
+ for (i = 0; i < tc->mw_count; i++) {
+ char buf[30];
+
+ snprintf(buf, sizeof(buf), "mw%d", i);
+ debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+ &tc->mws[i], &tool_mw_fops);
+
+ snprintf(buf, sizeof(buf), "peer_trans%d", i);
+ debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+ &tc->mws[i], &tool_peer_mw_trans_fops);
+ }
}
static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc;
int rc;
+ int i;
if (ntb_db_is_unsafe(ntb))
dev_dbg(&ntb->dev, "doorbell is unsafe\n");
@@ -485,13 +919,21 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
if (ntb_spad_is_unsafe(ntb))
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
- tc = kmalloc(sizeof(*tc), GFP_KERNEL);
+ tc = kzalloc(sizeof(*tc), GFP_KERNEL);
if (!tc) {
rc = -ENOMEM;
goto err_tc;
}
tc->ntb = ntb;
+ init_waitqueue_head(&tc->link_wq);
+
+ tc->mw_count = min(ntb_mw_count(tc->ntb), MAX_MWS);
+ for (i = 0; i < tc->mw_count; i++) {
+ rc = tool_init_mw(tc, i);
+ if (rc)
+ goto err_ctx;
+ }
tool_setup_dbgfs(tc);
@@ -505,6 +947,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
return 0;
err_ctx:
+ tool_free_mws(tc);
debugfs_remove_recursive(tc->dbgfs);
kfree(tc);
err_tc:
@@ -515,6 +958,8 @@ static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc = ntb->ctx;
+ tool_free_mws(tc);
+
ntb_clear_ctx(ntb);
ntb_link_disable(ntb);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9dce03f420eb..88e91666f145 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- int rw, sector_t sector)
+ bool is_write, sector_t sector)
{
int ret;
- if (rw == READ) {
+ if (!is_write) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
- int err = 0, rw;
+ int err = 0;
bool do_acct;
/*
@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
do_acct = nd_iostat_start(bio, &start);
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -1181,11 +1180,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- rw, iter.bi_sector);
+ op_is_write(bio_op(bio)), iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
- (rw == READ) ? "READ" : "WRITE",
+ (op_is_write(bio_op(bio))) ? "WRITE" :
+ "READ",
(unsigned long long) iter.bi_sector, len);
bio->bi_error = err;
break;
@@ -1200,12 +1200,12 @@ out:
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, 0);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, 0);
return 0;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b511099457db..571a6c7ee2fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (rw == READ) {
+ if (!is_write) {
if (unlikely(bad_pmem))
rc = -EIO;
else {
@@ -128,13 +128,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
struct pmem_device *pmem = q->queuedata;
struct nd_region *nd_region = to_region(pmem);
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_opf & REQ_FLUSH)
nvdimm_flush(nd_region);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_data_dir(bio),
+ bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
@@ -144,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
if (do_acct)
nd_iostat_end(bio, start);
- if (bio->bi_rw & REQ_FUA)
+ if (bio->bi_opf & REQ_FUA)
nvdimm_flush(nd_region);
bio_endio(bio);
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, rw & WRITE, 0);
+ page_endio(page, is_write, 0);
return rc;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4cb9b156cab7..d7c33f9361aa 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1661,14 +1661,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int bars;
-
if (dev->bar)
iounmap(dev->bar);
-
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(to_pci_dev(dev->dev));
}
static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1897,13 +1892,9 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
static int nvme_dev_map(struct nvme_dev *dev)
{
- int bars;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- return -ENODEV;
- if (pci_request_selected_regions(pdev, bars, "nvme"))
+ if (pci_request_mem_regions(pdev, "nvme"))
return -ENODEV;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -1912,7 +1903,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
return -ENODEV;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a4b608776c73..7792266db259 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -502,6 +502,28 @@ int of_device_is_compatible(const struct device_node *device,
}
EXPORT_SYMBOL(of_device_is_compatible);
+/** Checks if the device is compatible with any of the entries in
+ * a NULL terminated array of strings. Returns the best match
+ * score or 0.
+ */
+int of_device_compatible_match(struct device_node *device,
+ const char *const *compat)
+{
+ unsigned int tmp, score = 0;
+
+ if (!compat)
+ return 0;
+
+ while (*compat) {
+ tmp = of_device_is_compatible(device, *compat);
+ if (tmp > score)
+ score = tmp;
+ compat++;
+ }
+
+ return score;
+}
+
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
* @compat: compatible string to look for in root node's compatible property.
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 765390e3ed8d..8aa197691074 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -499,8 +499,24 @@ EXPORT_SYMBOL_GPL(of_platform_default_populate);
static int __init of_platform_default_populate_init(void)
{
- if (of_have_populated_dt())
- of_platform_default_populate(NULL, NULL, NULL);
+ struct device_node *node;
+
+ if (!of_have_populated_dt())
+ return -ENODEV;
+
+ /*
+ * Handle ramoops explicitly, since it is inside /reserved-memory,
+ * which lacks a "compatible" property.
+ */
+ node = of_find_node_by_path("/reserved-memory");
+ if (node) {
+ node = of_find_compatible_node(node, NULL, "ramoops");
+ if (node)
+ of_platform_device_create(node, NULL, NULL);
+ }
+
+ /* Populate everything else. */
+ of_platform_default_populate(NULL, NULL, NULL);
return 0;
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index e24b05996a1b..3ed6238f8f6e 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -790,7 +790,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return ccio_map_single(dev, page_address(page) + offset, size,
direction);
@@ -806,7 +806,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
*/
static void
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
unsigned long flags;
@@ -844,7 +844,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
*/
static void *
ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
#if 0
@@ -878,9 +878,9 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
*/
static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, NULL);
+ ccio_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -907,7 +907,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr,
*/
static int
ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -984,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -1004,7 +1004,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
ccio_unmap_page(dev, sg_dma_address(sglist),
- sg_dma_len(sglist), direction, NULL);
+ sg_dma_len(sglist), direction, 0);
++sglist;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42ec4600b7e4..151b86b6d2e2 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -783,7 +783,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return sba_map_single(dev, page_address(page) + offset, size,
direction);
@@ -801,7 +801,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
*/
static void
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -876,7 +876,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -908,9 +908,9 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
*/
static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
+ sba_unmap_page(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -943,7 +943,7 @@ int dump_run_sg = 0;
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -1026,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
@@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (sg_dma_len(sglist) && nents--) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
- direction, NULL);
+ direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 56389be5d08b..67f9916ff14d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -25,7 +25,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- bool
+ def_bool ARM || ARM64 || X86
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index dd7cdbee8029..c288e5a52575 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -91,6 +91,35 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
+int devm_request_pci_bus_resources(struct device *dev,
+ struct list_head *resources)
+{
+ struct resource_entry *win;
+ struct resource *parent, *res;
+ int err;
+
+ resource_list_for_each_entry(win, resources) {
+ res = win->res;
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ break;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
+
static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
static struct pci_bus_region pci_64_bit = {0,
@@ -291,6 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
+ pci_bridge_d3_device_changed(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
@@ -397,4 +427,3 @@ void pci_bus_put(struct pci_bus *bus)
put_device(&bus->dev);
}
EXPORT_SYMBOL(pci_bus_put);
-
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index f9832ad8efe2..43ed08dd8b01 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -19,16 +19,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/slab.h>
-#include "ecam.h"
-
/*
* On 64-bit systems, we do a single ioremap for the whole config space
* since we have enough virtual address range available. On 32-bit, we
* ioremap the config space for each bus individually.
*/
-static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT);
+static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
/*
* Create a PCI config space window
@@ -52,6 +51,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
if (!cfg)
return ERR_PTR(-ENOMEM);
+ cfg->parent = dev;
cfg->ops = ops;
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
@@ -95,7 +95,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
}
if (ops->init) {
- err = ops->init(dev, cfg);
+ err = ops->init(cfg);
if (err)
goto err_exit;
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 5d2374e4ee7f..9b485d873b0d 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -3,8 +3,9 @@ menu "PCI host controller drivers"
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
- select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
@@ -16,11 +17,20 @@ config PCI_MVEBU
depends on ARM
depends on OF
+config PCI_AARDVARK
+ bool "Aardvark PCIe controller"
+ depends on ARCH_MVEBU && ARM64
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Add support for Aardvark 64bit PCIe Host Controller. This
+ controller is part of the South Bridge of the Marvel Armada
+ 3700 SoC.
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
@@ -29,6 +39,7 @@ config PCIE_XILINX_NWL
config PCIE_DW_PLAT
bool "Platform bus based DesignWare PCIe Controller"
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
---help---
This selects the DesignWare PCIe controller support. Select this if
@@ -40,16 +51,19 @@ config PCIE_DW_PLAT
config PCIE_DW
bool
+ depends on PCI_MSI_IRQ_DOMAIN
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
depends on SOC_EXYNOS5440
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
depends on SOC_IMX6Q
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
@@ -72,8 +86,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
- select PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -85,6 +98,7 @@ config PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on (ARM || ARM64) && OF
select PCI_HOST_COMMON
+ select IRQ_DOMAIN
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -92,6 +106,7 @@ config PCI_HOST_GENERIC
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
help
@@ -100,6 +115,7 @@ config PCIE_SPEAR13XX
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
depends on ARCH_KEYSTONE
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -120,7 +136,6 @@ config PCI_XGENE
depends on ARCH_XGENE
depends on OF
select PCIEPORTBUS
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -128,7 +143,8 @@ config PCI_XGENE
config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
- depends on PCI_XGENE && PCI_MSI
+ depends on PCI_XGENE
+ depends on PCI_MSI_IRQ_DOMAIN
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -137,6 +153,7 @@ config PCI_XGENE_MSI
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
depends on OF && (ARM || ARCH_LAYERSCAPE)
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select MFD_SYSCON
help
@@ -177,8 +194,7 @@ config PCIE_IPROC_BCMA
config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
- depends on PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -195,8 +211,8 @@ config PCIE_ALTERA
config PCIE_ALTERA_MSI
bool "Altera PCIe MSI feature"
- depends on PCIE_ALTERA && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCIE_ALTERA
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -204,6 +220,7 @@ config PCIE_ALTERA_MSI
config PCI_HISI
depends on OF && ARM64
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
help
@@ -213,6 +230,7 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
depends on ARCH_QCOM && OF
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -237,6 +255,7 @@ config PCI_HOST_THUNDER_ECAM
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -245,4 +264,14 @@ config PCIE_ARMADA_8K
Designware hardware and therefore the driver re-uses the
Designware core functions to implement the driver.
+config PCIE_ARTPEC6
+ bool "Axis ARTPEC-6 PCIe controller"
+ depends on MACH_ARTPEC6
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here to enable PCIe controller support on Axis ARTPEC-6
+ SoCs. This PCIe controller uses the DesignWare core.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 9c8698e89e96..88434101e4c4 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
@@ -29,3 +30,4 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
new file mode 100644
index 000000000000..ef9893fa3176
--- /dev/null
+++ b/drivers/pci/host/pci-aardvark.c
@@ -0,0 +1,1001 @@
+/*
+ * Driver for the Aardvark PCIe controller, used on Marvell Armada
+ * 3700.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+/* PCIe core registers */
+#define PCIE_CORE_CMD_STATUS_REG 0x4
+#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
+#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
+#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
+#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
+#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
+#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
+#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
+#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
+#define PCIE_CORE_LINK_TRAINING BIT(5)
+#define PCIE_CORE_LINK_WIDTH_SHIFT 20
+#define PCIE_CORE_ERR_CAPCTL_REG 0x118
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
+
+/* PIO registers base address and register offsets */
+#define PIO_BASE_ADDR 0x4000
+#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
+#define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
+#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
+#define PIO_STAT (PIO_BASE_ADDR + 0x4)
+#define PIO_COMPLETION_STATUS_SHIFT 7
+#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
+#define PIO_COMPLETION_STATUS_OK 0
+#define PIO_COMPLETION_STATUS_UR 1
+#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_CA 4
+#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
+#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
+#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
+#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
+#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
+#define PIO_START (PIO_BASE_ADDR + 0x1c)
+#define PIO_ISR (PIO_BASE_ADDR + 0x20)
+#define PIO_ISRM (PIO_BASE_ADDR + 0x24)
+
+/* Aardvark Control registers */
+#define CONTROL_BASE_ADDR 0x4800
+#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
+#define PCIE_GEN_SEL_MSK 0x3
+#define PCIE_GEN_SEL_SHIFT 0x0
+#define SPEED_GEN_1 0
+#define SPEED_GEN_2 1
+#define SPEED_GEN_3 2
+#define IS_RC_MSK 1
+#define IS_RC_SHIFT 2
+#define LANE_CNT_MSK 0x18
+#define LANE_CNT_SHIFT 0x3
+#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
+#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
+#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
+#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
+#define LINK_TRAINING_EN BIT(6)
+#define LEGACY_INTA BIT(28)
+#define LEGACY_INTB BIT(29)
+#define LEGACY_INTC BIT(30)
+#define LEGACY_INTD BIT(31)
+#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
+#define HOT_RESET_GEN BIT(0)
+#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
+#define PCIE_CORE_CTRL2_RESERVED 0x7
+#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
+#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
+#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
+#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
+#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
+#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
+#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
+#define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
+#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
+#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
+#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
+#define PCIE_ISR1_FLUSH BIT(5)
+#define PCIE_ISR1_ALL_MASK GENMASK(5, 4)
+#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
+#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
+#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
+#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+
+/* PCIe window types */
+#define OB_PCIE_MEM 0x0
+#define OB_PCIE_IO 0x4
+
+/* LMI registers base address and register offsets */
+#define LMI_BASE_ADDR 0x6000
+#define CFG_REG (LMI_BASE_ADDR + 0x0)
+#define LTSSM_SHIFT 24
+#define LTSSM_MASK 0x3f
+#define LTSSM_L0 0x10
+#define RC_BAR_CONFIG 0x300
+
+/* PCIe core controller registers */
+#define CTRL_CORE_BASE_ADDR 0x18000
+#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
+#define CTRL_MODE_SHIFT 0x0
+#define CTRL_MODE_MASK 0x1
+#define PCIE_CORE_MODE_DIRECT 0x0
+#define PCIE_CORE_MODE_COMMAND 0x1
+
+/* PCIe Central Interrupts Registers */
+#define CENTRAL_INT_BASE_ADDR 0x1b000
+#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
+#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
+#define PCIE_IRQ_CMDQ_INT BIT(0)
+#define PCIE_IRQ_MSI_STATUS_INT BIT(1)
+#define PCIE_IRQ_CMD_SENT_DONE BIT(3)
+#define PCIE_IRQ_DMA_INT BIT(4)
+#define PCIE_IRQ_IB_DXFERDONE BIT(5)
+#define PCIE_IRQ_OB_DXFERDONE BIT(6)
+#define PCIE_IRQ_OB_RXFERDONE BIT(7)
+#define PCIE_IRQ_COMPQ_INT BIT(12)
+#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
+#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
+#define PCIE_IRQ_CORE_INT BIT(16)
+#define PCIE_IRQ_CORE_INT_PIO BIT(17)
+#define PCIE_IRQ_DPMU_INT BIT(18)
+#define PCIE_IRQ_PCIE_MIS_INT BIT(19)
+#define PCIE_IRQ_MSI_INT1_DET BIT(20)
+#define PCIE_IRQ_MSI_INT2_DET BIT(21)
+#define PCIE_IRQ_RC_DBELL_DET BIT(22)
+#define PCIE_IRQ_EP_STATUS BIT(23)
+#define PCIE_IRQ_ALL_MASK 0xfff0fb
+#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
+
+/* Transaction types */
+#define PCIE_CONFIG_RD_TYPE0 0x8
+#define PCIE_CONFIG_RD_TYPE1 0x9
+#define PCIE_CONFIG_WR_TYPE0 0xa
+#define PCIE_CONFIG_WR_TYPE1 0xb
+
+/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
+#define PCIE_BDF(dev) (dev << 4)
+#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
+#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
+#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
+#define PCIE_CONF_REG(reg) ((reg) & 0xffc)
+#define PCIE_CONF_ADDR(bus, devfn, where) \
+ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
+
+#define PIO_TIMEOUT_MS 1
+
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+#define LEGACY_IRQ_NUM 4
+#define MSI_IRQ_NUM 32
+
+struct advk_pcie {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct list_head resources;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
+ struct msi_controller msi;
+ struct irq_domain *msi_domain;
+ struct irq_chip msi_irq_chip;
+ DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+ struct mutex msi_used_lock;
+ u16 msi_msg;
+ int root_bus_nr;
+};
+
+static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
+{
+ return readl(pcie->base + reg);
+}
+
+static int advk_pcie_link_up(struct advk_pcie *pcie)
+{
+ u32 val, ltssm_state;
+
+ val = advk_readl(pcie, CFG_REG);
+ ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+ return ltssm_state >= LTSSM_L0;
+}
+
+static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (advk_pcie_link_up(pcie)) {
+ dev_info(&pcie->pdev->dev, "link up\n");
+ return 0;
+ }
+
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
+ u32 win_num, u32 match_ms,
+ u32 match_ls, u32 mask_ms,
+ u32 mask_ls, u32 remap_ms,
+ u32 remap_ls, u32 action)
+{
+ advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
+ advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
+}
+
+static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+{
+ u32 reg;
+ int i;
+
+ /* Point PCIe unit MBUS decode windows to DRAM space */
+ for (i = 0; i < 8; i++)
+ advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
+
+ /* Set to Direct mode */
+ reg = advk_readl(pcie, CTRL_CONFIG_REG);
+ reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
+ reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
+ advk_writel(pcie, reg, CTRL_CONFIG_REG);
+
+ /* Set PCI global control register to RC mode */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= (IS_RC_MSK << IS_RC_SHIFT);
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Set Advanced Error Capabilities and Control PF0 register */
+ reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+ advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+
+ /* Set PCIe Device Control and Status 1 PF0 register */
+ reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
+ (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
+ PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
+ PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+ advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+
+ /* Program PCIe Control 2 to disable strict ordering */
+ reg = PCIE_CORE_CTRL2_RESERVED |
+ PCIE_CORE_CTRL2_TD_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Set GEN2 */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ reg |= SPEED_GEN_2;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Set lane X1 */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LANE_CNT_MSK;
+ reg |= LANE_COUNT_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Enable link training */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Enable MSI */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Disable All ISR0/1 Sources */
+ reg = PCIE_ISR0_ALL_MASK;
+ reg &= ~PCIE_ISR0_MSI_INT_PENDING;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+
+ /* Unmask all MSI's */
+ advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+
+ /* Enable summary interrupt for GIC SPI source */
+ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+ advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Bypass the address window mapping for PIO */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Start link training */
+ reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
+ reg |= PCIE_CORE_LINK_TRAINING;
+ advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+ advk_pcie_wait_for_link(pcie);
+
+ reg = PCIE_CORE_LINK_L0S_ENTRY |
+ (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
+ advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
+ PCIE_CORE_CMD_IO_ACCESS_EN |
+ PCIE_CORE_CMD_MEM_IO_REQ_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+}
+
+static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+{
+ u32 reg;
+ unsigned int status;
+ char *strcomp_status, *str_posted;
+
+ reg = advk_readl(pcie, PIO_STAT);
+ status = (reg & PIO_COMPLETION_STATUS_MASK) >>
+ PIO_COMPLETION_STATUS_SHIFT;
+
+ if (!status)
+ return;
+
+ switch (status) {
+ case PIO_COMPLETION_STATUS_UR:
+ strcomp_status = "UR";
+ break;
+ case PIO_COMPLETION_STATUS_CRS:
+ strcomp_status = "CRS";
+ break;
+ case PIO_COMPLETION_STATUS_CA:
+ strcomp_status = "CA";
+ break;
+ default:
+ strcomp_status = "Unknown";
+ break;
+ }
+
+ if (reg & PIO_NON_POSTED_REQ)
+ str_posted = "Non-posted";
+ else
+ str_posted = "Posted";
+
+ dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+}
+
+static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+
+ while (time_before(jiffies, timeout)) {
+ u32 start, isr;
+
+ start = advk_readl(pcie, PIO_START);
+ isr = advk_readl(pcie, PIO_ISR);
+ if (!start && isr)
+ return 0;
+ }
+
+ dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ u32 reg;
+ int ret;
+
+ if (PCI_SLOT(devfn) != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Start PIO */
+ advk_writel(pcie, 0, PIO_START);
+ advk_writel(pcie, 1, PIO_ISR);
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (bus->number == pcie->root_bus_nr)
+ reg |= PCIE_CONFIG_RD_TYPE0;
+ else
+ reg |= PCIE_CONFIG_RD_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Program the data strobe */
+ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+
+ /* Start the transfer */
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
+
+ advk_pcie_check_pio_status(pcie);
+
+ /* Get the read result */
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ u32 reg;
+ u32 data_strobe = 0x0;
+ int offset;
+ int ret;
+
+ if (PCI_SLOT(devfn) != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (where % size)
+ return PCIBIOS_SET_FAILED;
+
+ /* Start PIO */
+ advk_writel(pcie, 0, PIO_START);
+ advk_writel(pcie, 1, PIO_ISR);
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (bus->number == pcie->root_bus_nr)
+ reg |= PCIE_CONFIG_WR_TYPE0;
+ else
+ reg |= PCIE_CONFIG_WR_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Calculate the write strobe */
+ offset = where & 0x3;
+ reg = val << (8 * offset);
+ data_strobe = GENMASK(size - 1, 0) << offset;
+
+ /* Program the data register */
+ advk_writel(pcie, reg, PIO_WR_DATA);
+
+ /* Program the data strobe */
+ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+
+ /* Start the transfer */
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
+
+ advk_pcie_check_pio_status(pcie);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops advk_pcie_ops = {
+ .read = advk_pcie_rd_conf,
+ .write = advk_pcie_wr_conf,
+};
+
+static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+{
+ int hwirq;
+
+ mutex_lock(&pcie->msi_used_lock);
+ hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
+ if (hwirq >= MSI_IRQ_NUM)
+ hwirq = -ENOSPC;
+ else
+ set_bit(hwirq, pcie->msi_irq_in_use);
+ mutex_unlock(&pcie->msi_used_lock);
+
+ return hwirq;
+}
+
+static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+{
+ mutex_lock(&pcie->msi_used_lock);
+ if (!test_bit(hwirq, pcie->msi_irq_in_use))
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
+ hwirq);
+ else
+ clear_bit(hwirq, pcie->msi_irq_in_use);
+ mutex_unlock(&pcie->msi_used_lock);
+}
+
+static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct advk_pcie *pcie = pdev->bus->sysdata;
+ struct msi_msg msg;
+ int virq, hwirq;
+ phys_addr_t msi_msg_phys;
+
+ /* We support MSI, but not MSI-X */
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
+ hwirq = advk_pcie_alloc_msi(pcie);
+ if (hwirq < 0)
+ return hwirq;
+
+ virq = irq_create_mapping(pcie->msi_domain, hwirq);
+ if (!virq) {
+ advk_pcie_free_msi(pcie, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(virq, desc);
+
+ msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+ msg.address_lo = lower_32_bits(msi_msg_phys);
+ msg.address_hi = upper_32_bits(msi_msg_phys);
+ msg.data = virq;
+
+ pci_write_msi_msg(virq, &msg);
+
+ return 0;
+}
+
+static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
+ unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ struct msi_desc *msi = irq_data_get_msi_desc(d);
+ struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
+ unsigned long hwirq = d->hwirq;
+
+ irq_dispose_mapping(irq);
+ advk_pcie_free_msi(pcie, hwirq);
+}
+
+static int advk_pcie_msi_map(struct irq_domain *domain,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct advk_pcie *pcie = domain->host_data;
+
+ irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
+ handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
+ .map = advk_pcie_msi_map,
+};
+
+static void advk_pcie_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 mask;
+
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static void advk_pcie_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 mask;
+
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static int advk_pcie_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ advk_pcie_irq_mask(irq_get_irq_data(virq));
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pcie->irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
+ .map = advk_pcie_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct irq_chip *msi_irq_chip;
+ struct msi_controller *msi;
+ phys_addr_t msi_msg_phys;
+ int ret;
+
+ msi_irq_chip = &pcie->msi_irq_chip;
+
+ msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
+ dev_name(dev));
+ if (!msi_irq_chip->name)
+ return -ENOMEM;
+
+ msi_irq_chip->irq_enable = pci_msi_unmask_irq;
+ msi_irq_chip->irq_disable = pci_msi_mask_irq;
+ msi_irq_chip->irq_mask = pci_msi_mask_irq;
+ msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+
+ msi = &pcie->msi;
+
+ msi->setup_irq = advk_pcie_setup_msi_irq;
+ msi->teardown_irq = advk_pcie_teardown_msi_irq;
+ msi->of_node = node;
+
+ mutex_init(&pcie->msi_used_lock);
+
+ msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+ advk_writel(pcie, lower_32_bits(msi_msg_phys),
+ PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, upper_32_bits(msi_msg_phys),
+ PCIE_MSI_ADDR_HIGH_REG);
+
+ pcie->msi_domain =
+ irq_domain_add_linear(NULL, MSI_IRQ_NUM,
+ &advk_pcie_msi_irq_ops, pcie);
+ if (!pcie->msi_domain)
+ return -ENOMEM;
+
+ ret = of_pci_msi_chip_add(msi);
+ if (ret < 0) {
+ irq_domain_remove(pcie->msi_domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
+{
+ of_pci_msi_chip_remove(&pcie->msi);
+ irq_domain_remove(pcie->msi_domain);
+}
+
+static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ struct irq_chip *irq_chip;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_chip = &pcie->irq_chip;
+
+ irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+ dev_name(dev));
+ if (!irq_chip->name) {
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_chip->irq_mask = advk_pcie_irq_mask;
+ irq_chip->irq_mask_ack = advk_pcie_irq_mask;
+ irq_chip->irq_unmask = advk_pcie_irq_unmask;
+
+ pcie->irq_domain =
+ irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM,
+ &advk_pcie_irq_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->irq_domain);
+}
+
+static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+{
+ u32 msi_val, msi_mask, msi_status, msi_idx;
+ u16 msi_data;
+
+ msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
+ msi_status = msi_val & ~msi_mask;
+
+ for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
+ if (!(BIT(msi_idx) & msi_status))
+ continue;
+
+ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
+ msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
+ generic_handle_irq(msi_data);
+ }
+
+ advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
+ PCIE_ISR0_REG);
+}
+
+static void advk_pcie_handle_int(struct advk_pcie *pcie)
+{
+ u32 val, mask, status;
+ int i, virq;
+
+ val = advk_readl(pcie, PCIE_ISR0_REG);
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
+
+ if (!status) {
+ advk_writel(pcie, val, PCIE_ISR0_REG);
+ return;
+ }
+
+ /* Process MSI interrupts */
+ if (status & PCIE_ISR0_MSI_INT_PENDING)
+ advk_pcie_handle_msi(pcie);
+
+ /* Process legacy interrupts */
+ for (i = 0; i < LEGACY_IRQ_NUM; i++) {
+ if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
+ continue;
+
+ advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
+ PCIE_ISR0_REG);
+
+ virq = irq_find_mapping(pcie->irq_domain, i);
+ generic_handle_irq(virq);
+ }
+}
+
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+{
+ struct advk_pcie *pcie = arg;
+ u32 status;
+
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
+
+ advk_pcie_handle_int(pcie);
+
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
+{
+ int err, res_valid = 0;
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource_entry *win;
+ resource_size_t iobase;
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
+ &iobase);
+ if (err)
+ return err;
+
+ err = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (err)
+ goto out_release_res;
+
+ resource_list_for_each_entry(win, &pcie->resources) {
+ struct resource *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ advk_pcie_set_ob_win(pcie, 1,
+ upper_32_bits(res->start),
+ lower_32_bits(res->start),
+ 0, 0xF8000000, 0,
+ lower_32_bits(res->start),
+ OB_PCIE_IO);
+ err = pci_remap_iospace(res, iobase);
+ if (err)
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ break;
+ case IORESOURCE_MEM:
+ advk_pcie_set_ob_win(pcie, 0,
+ upper_32_bits(res->start),
+ lower_32_bits(res->start),
+ 0x0, 0xF8000000, 0,
+ lower_32_bits(res->start),
+ (2 << 20) | OB_PCIE_MEM);
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ case IORESOURCE_BUS:
+ pcie->root_bus_nr = res->start;
+ break;
+ }
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ pci_free_resource_list(&pcie->resources);
+ return err;
+}
+
+static int advk_pcie_probe(struct platform_device *pdev)
+{
+ struct advk_pcie *pcie;
+ struct resource *res;
+ struct pci_bus *bus, *child;
+ struct msi_controller *msi;
+ struct device_node *msi_node;
+ int ret, irq;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
+ GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcie->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcie->base)) {
+ dev_err(&pdev->dev, "Failed to map registers\n");
+ return PTR_ERR(pcie->base);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse resources\n");
+ return ret;
+ }
+
+ advk_pcie_setup_hw(pcie);
+
+ ret = advk_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize irq\n");
+ return ret;
+ }
+
+ ret = advk_pcie_init_msi_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize irq\n");
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+ if (msi_node)
+ msi = of_pci_find_msi_chip_by_node(msi_node);
+ else
+ msi = NULL;
+
+ bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+ pcie, &pcie->resources, &pcie->msi);
+ if (!bus) {
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return -ENOMEM;
+ }
+
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ return 0;
+}
+
+static const struct of_device_id advk_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-3700-pcie", },
+ {},
+};
+
+static struct platform_driver advk_pcie_driver = {
+ .driver = {
+ .name = "advk-pcie",
+ .of_match_table = advk_pcie_of_match_table,
+ /* Driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
+ },
+ .probe = advk_pcie_probe,
+};
+builtin_platform_driver(advk_pcie_driver);
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index f441130407e7..81b3949a26db 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -181,14 +181,14 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
- return PTR_ERR(pcie_intc_node);
+ return -ENODEV;
}
pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops, pp);
if (!pp->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return PTR_ERR(pp->irq_domain);
+ return -ENODEV;
}
return 0;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 8cba7ab73df9..9d9d34e959b6 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -20,10 +20,9 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources, struct resource **bus_range)
{
@@ -36,44 +35,34 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, resources);
+ if (err)
+ return err;
+
resource_list_for_each_entry(win, resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
- parent = &ioport_resource;
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
break;
case IORESOURCE_MEM:
- parent = &iomem_resource;
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
*bus_range = res;
- default:
- continue;
+ break;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
}
- return 0;
+ if (res_valid)
+ return 0;
-out_release_res:
- return err;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ return -EINVAL;
}
static void gen_pci_unmap_cfg(void *ptr)
@@ -155,7 +144,14 @@ int pci_host_common_probe(struct platform_device *pdev,
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 6eaceab1bf04..c05ea9d72f69 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -20,13 +20,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
@@ -46,8 +45,6 @@ static const struct of_device_id gen_pci_of_match[] = {
{ },
};
-MODULE_DEVICE_TABLE(of, gen_pci_of_match);
-
static int gen_pci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
@@ -66,8 +63,4 @@ static struct platform_driver gen_pci_driver = {
},
.probe = gen_pci_probe,
};
-module_platform_driver(gen_pci_driver);
-
-MODULE_DESCRIPTION("Generic PCI host driver");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 7e9b2de2aa24..6955ffdb89f3 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -732,16 +732,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
pdev = msi_desc_to_pci_dev(msi);
hbus = info->data;
- hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
- if (!hpdev)
+ int_desc = irq_data_get_irq_chip_data(irq_data);
+ if (!int_desc)
return;
- int_desc = irq_data_get_irq_chip_data(irq_data);
- if (int_desc) {
- irq_data->chip_data = NULL;
- hv_int_desc_free(hpdev, int_desc);
+ irq_data->chip_data = NULL;
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev) {
+ kfree(int_desc);
+ return;
}
+ hv_int_desc_free(hpdev, int_desc);
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
}
@@ -1657,14 +1659,16 @@ static void hv_pci_onchannelcallback(void *context)
continue;
}
+ /* Zero length indicates there are no more packets. */
+ if (ret || !bytes_recvd)
+ break;
+
/*
* All incoming packets must be at least as large as a
* response.
*/
- if (bytes_recvd <= sizeof(struct pci_response)) {
- kfree(buffer);
- return;
- }
+ if (bytes_recvd <= sizeof(struct pci_response))
+ continue;
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
@@ -1679,8 +1683,7 @@ static void hv_pci_onchannelcallback(void *context)
comp_packet->completion_func(comp_packet->compl_ctxt,
response,
bytes_recvd);
- kfree(buffer);
- return;
+ break;
case VM_PKT_DATA_INBAND:
@@ -1727,8 +1730,9 @@ static void hv_pci_onchannelcallback(void *context)
desc->type, req_id, bytes_recvd);
break;
}
- break;
}
+
+ kfree(buffer);
}
/**
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 6b8301ef21ca..8ba28834d470 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -17,7 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
#include <linux/of.h>
@@ -360,7 +360,6 @@ static const struct of_device_id ks_pcie_of_match[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
static int __exit ks_pcie_remove(struct platform_device *pdev)
{
@@ -439,9 +438,4 @@ static struct platform_driver ks_pcie_driver __refdata = {
.of_match_table = of_match_ptr(ks_pcie_of_match),
},
};
-
-module_platform_driver(ks_pcie_driver);
-
-MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
-MODULE_DESCRIPTION("Keystone PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index a21e229d95e0..114ba819277a 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
@@ -211,7 +211,6 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
};
-MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
static int __init ls_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
@@ -275,9 +274,4 @@ static struct platform_driver ls_pcie_driver = {
.of_match_table = ls_pcie_of_match,
},
};
-
-module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
-
-MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
-MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 6b451df6502c..307f81d6b479 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1,6 +1,8 @@
/*
* PCIe driver for Marvell Armada 370 and Armada XP SoCs
*
+ * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -11,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/msi.h>
#include <linux/slab.h>
@@ -839,25 +841,22 @@ static struct pci_ops mvebu_pcie_ops = {
static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
- int i;
+ int err, i;
pcie->mem.name = "PCI MEM";
pcie->realio.name = "PCI I/O";
- if (request_resource(&iomem_resource, &pcie->mem))
- return 0;
-
- if (resource_size(&pcie->realio) != 0) {
- if (request_resource(&ioport_resource, &pcie->realio)) {
- release_resource(&pcie->mem);
- return 0;
- }
+ if (resource_size(&pcie->realio) != 0)
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
- }
+
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
+ err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
+ if (err)
+ return 0;
+
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
@@ -1298,7 +1297,6 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1314,8 +1312,4 @@ static struct platform_driver mvebu_pcie_driver = {
},
.probe = mvebu_pcie_probe,
};
-module_platform_driver(mvebu_pcie_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell EBU PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(mvebu_pcie_driver);
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 9980a4bdae7e..597566f96f5e 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -4,6 +4,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc.
*
+ * Author: Valentine Barshak <valentine.barshak@cogentembedded.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -14,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -97,7 +98,6 @@
struct rcar_pci_priv {
struct device *dev;
void __iomem *reg;
- struct resource io_res;
struct resource mem_res;
struct resource *cfg_res;
unsigned busnr;
@@ -194,6 +194,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
struct rcar_pci_priv *priv = sys->private_data;
void __iomem *reg = priv->reg;
u32 val;
+ int ret;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
@@ -273,8 +274,10 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
rcar_pci_setup_errirq(priv);
/* Add PCI resources */
- pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
+ ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+ if (ret < 0)
+ return ret;
/* Setup bus number based on platform device id / of bus-range */
sys->busnr = priv->busnr;
@@ -371,14 +374,6 @@ static int rcar_pci_probe(struct platform_device *pdev)
return -ENOMEM;
priv->mem_res = *mem_res;
- /*
- * The controller does not support/use port I/O,
- * so setup a dummy port I/O region here.
- */
- priv->io_res.start = priv->mem_res.start;
- priv->io_res.end = priv->mem_res.end;
- priv->io_res.flags = IORESOURCE_IO;
-
priv->cfg_res = cfg_res;
priv->irq = platform_get_irq(pdev, 0);
@@ -421,6 +416,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw_private[0] = priv;
memset(&hw, 0, sizeof(hw));
hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.io_optional = 1;
hw.private_data = hw_private;
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
@@ -437,8 +433,6 @@ static struct of_device_id rcar_pci_of_match[] = {
{ },
};
-MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
-
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
@@ -447,9 +441,4 @@ static struct platform_driver rcar_pci_driver = {
},
.probe = rcar_pci_probe,
};
-
-module_platform_driver(rcar_pci_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
-MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
+builtin_platform_driver(rcar_pci_driver);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index c388468c202a..6de0757b11e4 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -9,6 +9,8 @@
*
* Bits taken from arch/arm/mach-dove/pcie.c
*
+ * Author: Thierry Reding <treding@nvidia.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -32,7 +34,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -183,26 +185,26 @@
#define AFI_PEXBIAS_CTRL_0 0x168
-#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP 0x00000f00
#define RP_VEND_XP_DL_UP (1 << 30)
-#define RP_PRIV_MISC 0x00000FE0
-#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
-#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+#define RP_PRIV_MISC 0x00000fe0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
#define RP_LINK_CONTROL_STATUS 0x00000090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
-#define PADS_CTL_SEL 0x0000009C
+#define PADS_CTL_SEL 0x0000009c
-#define PADS_CTL 0x000000A0
+#define PADS_CTL 0x000000a0
#define PADS_CTL_IDDQ_1L (1 << 0)
#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
-#define PADS_PLL_CTL_TEGRA20 0x000000B8
-#define PADS_PLL_CTL_TEGRA30 0x000000B4
+#define PADS_PLL_CTL_TEGRA20 0x000000b8
+#define PADS_PLL_CTL_TEGRA30 0x000000b4
#define PADS_PLL_CTL_RST_B4SM (1 << 1)
#define PADS_PLL_CTL_LOCKDET (1 << 8)
#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
@@ -214,9 +216,9 @@
#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
-#define PADS_REFCLK_CFG0 0x000000C8
-#define PADS_REFCLK_CFG1 0x000000CC
-#define PADS_REFCLK_BIAS 0x000000D0
+#define PADS_REFCLK_CFG0 0x000000c8
+#define PADS_REFCLK_CFG1 0x000000cc
+#define PADS_REFCLK_BIAS 0x000000d0
/*
* Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -228,15 +230,6 @@
#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
-/* Default value provided by HW engineering is 0xfa5c */
-#define PADS_REFCLK_CFG_VALUE \
- ( \
- (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
- (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
- (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
- (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
- )
-
struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -252,6 +245,8 @@ struct tegra_pcie_soc_data {
unsigned int msi_base_shift;
u32 pads_pll_ctl;
u32 tx_ref_sel;
+ u32 pads_refclk_cfg0;
+ u32 pads_refclk_cfg1;
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
@@ -274,7 +269,6 @@ struct tegra_pcie {
struct list_head buses;
struct resource *cs;
- struct resource all;
struct resource io;
struct resource pio;
struct resource mem;
@@ -623,30 +617,21 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->io);
- if (err < 0)
- return err;
-
- err = devm_request_resource(pcie->dev, &ioport_resource, &pcie->pio);
- if (err < 0)
- return err;
-
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+ err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
- if (err)
- return err;
-
pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- pci_ioremap_io(pcie->pio.start, pcie->io.start);
+ err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+ if (err < 0)
+ return err;
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
return 1;
}
@@ -838,12 +823,6 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
value |= PADS_PLL_CTL_RST_B4SM;
pads_writel(pcie, value, soc->pads_pll_ctl);
- /* Configure the reference clock driver */
- value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
- pads_writel(pcie, value, PADS_REFCLK_CFG0);
- if (soc->num_ports > 2)
- pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
-
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
@@ -927,6 +906,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct tegra_pcie_port *port;
int err;
@@ -952,6 +932,12 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
}
}
+ /* Configure the reference clock driver */
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
+
return 0;
}
@@ -1822,12 +1808,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct resource res;
int err;
- memset(&pcie->all, 0, sizeof(pcie->all));
- pcie->all.flags = IORESOURCE_MEM;
- pcie->all.name = np->full_name;
- pcie->all.start = ~0;
- pcie->all.end = 0;
-
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
return -EINVAL;
@@ -1880,18 +1860,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
}
break;
}
-
- if (res.start <= pcie->all.start)
- pcie->all.start = res.start;
-
- if (res.end >= pcie->all.end)
- pcie->all.end = res.end;
}
- err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
- if (err < 0)
- return err;
-
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -2078,6 +2048,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.msi_base_shift = 0,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
.has_pex_clkreq_en = false,
.has_pex_bias_ctrl = false,
.has_intr_prsnt_sense = false,
@@ -2090,6 +2061,8 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+ .pads_refclk_cfg1 = 0xfa5cfa5c,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2102,6 +2075,7 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x44ac44ac,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2115,7 +2089,6 @@ static const struct of_device_id tegra_pcie_of_match[] = {
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
{
@@ -2249,8 +2222,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (err < 0)
return err;
- pcibios_min_mem = 0;
-
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
@@ -2306,8 +2277,4 @@ static struct platform_driver tegra_pcie_driver = {
},
.probe = tegra_pcie_probe,
};
-module_platform_driver(tegra_pcie_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index 540d030613eb..d50a3dc2d8db 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -7,14 +7,13 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_pci.h>
#include <linux/of.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
@@ -360,7 +359,6 @@ static const struct of_device_id thunder_ecam_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-ecam" },
{ },
};
-MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
static int thunder_ecam_probe(struct platform_device *pdev)
{
@@ -374,7 +372,4 @@ static struct platform_driver thunder_ecam_driver = {
},
.probe = thunder_ecam_probe,
};
-module_platform_driver(thunder_ecam_driver);
-
-MODULE_DESCRIPTION("Thunder ECAM PCI host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_ecam_driver);
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 9b8ab94f3c8c..6abaf80ffb39 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -15,13 +15,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
@@ -285,8 +284,9 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg)
+static int thunder_pem_init(struct pci_config_window *cfg)
{
+ struct device *dev = cfg->parent;
resource_size_t bar4_start;
struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
@@ -346,7 +346,6 @@ static const struct of_device_id thunder_pem_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-pem" },
{ },
};
-MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
static int thunder_pem_probe(struct platform_device *pdev)
{
@@ -360,7 +359,4 @@ static struct platform_driver thunder_pem_driver = {
},
.probe = thunder_pem_probe,
};
-module_platform_driver(thunder_pem_driver);
-
-MODULE_DESCRIPTION("Thunder PEM PCIe host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_pem_driver);
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f843a72dc51c..f234405770ab 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,21 +80,21 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, res);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, res) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
- parent = &ioport_resource;
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
break;
case IORESOURCE_MEM:
- parent = &iomem_resource;
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
writel(res->start >> 28, PCI_IMAP(mem));
@@ -102,23 +102,14 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
mem++;
break;
- case IORESOURCE_BUS:
- default:
- continue;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
+ if (res_valid)
+ return 0;
- return 0;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
out_release_res:
pci_free_resource_list(res);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index ae00ce22d5a6..a81273c23341 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -21,7 +21,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -540,14 +540,20 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ if (ret)
+ goto error;
+
ret = xgene_pcie_setup(port, &res, iobase);
if (ret)
- return ret;
+ goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto error;
+ }
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
@@ -555,6 +561,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {
@@ -569,8 +579,4 @@ static struct platform_driver xgene_pcie_driver = {
},
.probe = xgene_pcie_probe_bridge,
};
-module_platform_driver(xgene_pcie_driver);
-
-MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
-MODULE_DESCRIPTION("APM X-Gene PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(xgene_pcie_driver);
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index dbac6fb3f0bd..2b7837650db8 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -61,6 +61,8 @@
#define TLP_LOOP 500
#define RP_DEVFN 0
+#define LINK_UP_TIMEOUT 5000
+
#define INTX_NUM 4
#define DWORD_MASK 3
@@ -81,9 +83,30 @@ struct tlp_rp_regpair_t {
u32 reg1;
};
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
+
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
+}
+
static void altera_pcie_retrain(struct pci_dev *dev)
{
u16 linkcap, linkstat;
+ struct altera_pcie *pcie = dev->bus->sysdata;
+ int timeout = 0;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
/*
* Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
@@ -95,9 +118,16 @@ static void altera_pcie_retrain(struct pci_dev *dev)
return;
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_RL);
+ while (!altera_pcie_link_is_up(pcie)) {
+ timeout++;
+ if (timeout > LINK_UP_TIMEOUT)
+ break;
+ udelay(5);
+ }
+ }
}
DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
@@ -120,17 +150,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
return false;
}
-static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
- const u32 reg)
-{
- writel_relaxed(value, pcie->cra_base + reg);
-}
-
-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
-{
- return readl_relaxed(pcie->cra_base + reg);
-}
-
static void tlp_write_tx(struct altera_pcie *pcie,
struct tlp_rp_regpair_t *tlp_rp_regdata)
{
@@ -139,11 +158,6 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
-{
- return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
-}
-
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
@@ -415,11 +429,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie)
-{
- pci_free_resource_list(&pcie->resources);
-}
-
static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
@@ -432,33 +441,25 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, &pcie->resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
- switch (resource_type(res)) {
- case IORESOURCE_MEM:
- parent = &iomem_resource;
+ if (resource_type(res) == IORESOURCE_MEM)
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- default:
- continue;
- }
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
+ if (res_valid)
+ return 0;
- return 0;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
out_release_res:
- altera_pcie_release_of_pci_ranges(pcie);
+ pci_free_resource_list(&pcie->resources);
return err;
}
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
index 55723567b5d4..0f4f570068e3 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -5,6 +5,9 @@
*
* Copyright (C) 2016 Marvell Technology Group Ltd.
*
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -14,7 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -244,7 +247,6 @@ static const struct of_device_id armada8k_pcie_of_match[] = {
{ .compatible = "marvell,armada8k-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
@@ -253,10 +255,4 @@ static struct platform_driver armada8k_pcie_driver = {
.of_match_table = of_match_ptr(armada8k_pcie_of_match),
},
};
-
-module_platform_driver(armada8k_pcie_driver);
-
-MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
-MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
-MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
new file mode 100644
index 000000000000..16ba70b7ec65
--- /dev/null
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -0,0 +1,280 @@
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
+
+struct artpec6_pcie {
+ struct pcie_port pp;
+ struct regmap *regmap;
+ void __iomem *phy_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc)
+#define DBI_RO_WR_EN 1
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG 0x18
+#define PCIECFG_DBG_OEN (1 << 24)
+#define PCIECFG_CORE_RESET_REQ (1 << 21)
+#define PCIECFG_LTSSM_ENABLE (1 << 20)
+#define PCIECFG_CLKREQ_B (1 << 11)
+#define PCIECFG_REFCLK_ENABLE (1 << 10)
+#define PCIECFG_PLL_ENABLE (1 << 9)
+#define PCIECFG_PCLK_ENABLE (1 << 8)
+#define PCIECFG_RISRCREN (1 << 4)
+#define PCIECFG_MODE_TX_DRV_EN (1 << 3)
+#define PCIECFG_CISRREN (1 << 2)
+#define PCIECFG_MACRO_ENABLE (1 << 0)
+
+#define NOCCFG 0x40
+#define NOCCFG_ENABLE_CLK_PCIE (1 << 4)
+#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3)
+#define NOCCFG_POWER_PCIE_IDLE (1 << 2)
+#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1)
+
+#define PHY_STATUS 0x118
+#define PHY_COSPLLLOCK (1 << 0)
+
+#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
+
+static int artpec6_pcie_establish_link(struct pcie_port *pp)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+ u32 val;
+ unsigned int retries;
+
+ /* Hold DW core in reset */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_CORE_RESET_REQ;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_MODE_TX_DRV_EN |
+ PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
+ PCIECFG_MACRO_ENABLE;
+ val |= PCIECFG_REFCLK_ENABLE;
+ val &= ~PCIECFG_DBG_OEN;
+ val &= ~PCIECFG_CLKREQ_B;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(5000, 6000);
+
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ usleep_range(20, 30);
+
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(6000, 7000);
+
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+ retries--;
+ } while (retries && !(val & PHY_COSPLLLOCK));
+
+ /* Take DW core out of reset */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val &= ~PCIECFG_CORE_RESET_REQ;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(100, 200);
+
+ /*
+ * Enable writing to config regs. This is required as the Synopsys
+ * driver changes the class code. That register needs DBI write enable.
+ */
+ writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+
+ pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_LTSSM_ENABLE;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+ /* check if the link is up or not */
+ if (!dw_pcie_wait_for_link(pp))
+ return 0;
+
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+
+ return -ETIMEDOUT;
+}
+
+static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+}
+
+static void artpec6_pcie_host_init(struct pcie_port *pp)
+{
+ artpec6_pcie_establish_link(pp);
+ artpec6_pcie_enable_interrupts(pp);
+}
+
+static int artpec6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc;
+
+ /*
+ * Get status from Synopsys IP
+ * link is debug bit 36, debug register 1 starts at bit 32
+ */
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+ if (rc)
+ return 1;
+
+ return 0;
+}
+
+static struct pcie_host_ops artpec6_pcie_host_ops = {
+ .link_up = artpec6_pcie_link_up,
+ .host_init = artpec6_pcie_host_init,
+};
+
+static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static int __init artpec6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ artpec6_pcie_msi_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "artpec6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return ret;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+ struct artpec6_pcie *artpec6_pcie;
+ struct pcie_port *pp;
+ struct resource *dbi_base;
+ struct resource *phy_base;
+ int ret;
+
+ artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
+ GFP_KERNEL);
+ if (!artpec6_pcie)
+ return -ENOMEM;
+
+ pp = &artpec6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(artpec6_pcie->phy_base))
+ return PTR_ERR(artpec6_pcie->phy_base);
+
+ artpec6_pcie->regmap =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "axis,syscon-pcie");
+ if (IS_ERR(artpec6_pcie->regmap))
+ return PTR_ERR(artpec6_pcie->regmap);
+
+ ret = artpec6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, artpec6_pcie);
+ return 0;
+}
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+ { .compatible = "axis,artpec6-pcie", },
+ {},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+ .probe = artpec6_pcie_probe,
+ .driver = {
+ .name = "artpec6-pcie",
+ .of_match_table = artpec6_pcie_of_match,
+ },
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index b3500994d08a..c8079dc81c10 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -14,7 +14,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -121,7 +121,6 @@ static const struct of_device_id dw_plat_pcie_of_match[] = {
{ .compatible = "snps,dw-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, dw_plat_pcie_of_match);
static struct platform_driver dw_plat_pcie_driver = {
.driver = {
@@ -130,9 +129,4 @@ static struct platform_driver dw_plat_pcie_driver = {
},
.probe = dw_plat_pcie_probe,
};
-
-module_platform_driver(dw_plat_pcie_driver);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys PCIe host controller glue platform driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index aafd766546f3..12afce19890b 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -452,6 +452,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
+ ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ if (ret)
+ goto error;
+
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
@@ -461,11 +465,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
ret = pci_remap_iospace(pp->io, pp->io_base);
- if (ret) {
+ if (ret)
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
ret, pp->io);
- continue;
- }
break;
case IORESOURCE_MEM:
pp->mem = win->res;
@@ -483,8 +485,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
case IORESOURCE_BUS:
pp->busn = win->res;
break;
- default:
- continue;
}
}
@@ -493,7 +493,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_size(pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -504,7 +505,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(pp->dev, "error with ioremap in function\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -513,7 +515,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -528,7 +531,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
&dw_pcie_msi_chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto error;
}
for (i = 0; i < MAX_MSI_IRQS; i++)
@@ -536,7 +540,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
if (ret < 0)
- return ret;
+ goto error;
}
}
@@ -552,8 +556,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else
bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
pp, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto error;
+ }
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
@@ -571,6 +577,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
pci_bus_add_devices(bus);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return ret;
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 3e98d4edae2d..7ee9dfcc45fb 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -12,7 +12,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -235,9 +235,6 @@ static const struct of_device_id hisi_pcie_of_match[] = {
{},
};
-
-MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
-
static struct platform_driver hisi_pcie_driver = {
.probe = hisi_pcie_probe,
.driver = {
@@ -245,10 +242,4 @@ static struct platform_driver hisi_pcie_driver = {
.of_match_table = hisi_pcie_of_match,
},
};
-
-module_platform_driver(hisi_pcie_driver);
-
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_AUTHOR("Dacai Zhu <zhudacai@hisilicon.com>");
-MODULE_AUTHOR("Gabriele Paoloni <gabriele.paoloni@huawei.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(hisi_pcie_driver);
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index a576aeeb22da..e167b2f0098d 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -462,6 +462,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
+ ret = devm_request_pci_bus_resources(pcie->dev, res);
+ if (ret)
+ return ret;
+
ret = phy_init(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 35092188039b..65db7a221509 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -7,6 +7,8 @@
* arch/sh/drivers/pci/ops-sh7786.c
* Copyright (C) 2009 - 2011 Paul Mundt
*
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -18,7 +20,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -936,12 +938,6 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
{},
};
-MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
-
-static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci)
-{
- pci_free_resource_list(&pci->resources);
-}
static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
{
@@ -955,37 +951,25 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, &pci->resources);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, &pci->resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- parent = &ioport_resource;
+ if (resource_type(res) == IORESOURCE_IO) {
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- parent = &iomem_resource;
- break;
-
- case IORESOURCE_BUS:
- default:
- continue;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
return 0;
out_release_res:
- rcar_pcie_release_of_pci_ranges(pci);
+ pci_free_resource_list(&pci->resources);
return err;
}
@@ -1073,8 +1057,4 @@ static struct platform_driver rcar_pcie_driver = {
},
.probe = rcar_pcie_probe,
};
-module_platform_driver(rcar_pcie_driver);
-
-MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
-MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 3479d30e2be8..0b597d9190b4 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -825,27 +825,33 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
- pr_err("Getting bridge resources failed\n");
+ dev_err(pcie->dev, "Getting bridge resources failed\n");
return err;
}
+ err = devm_request_pci_bus_resources(pcie->dev, &res);
+ if (err)
+ goto error;
+
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(pcie->dev, "Failed creating IRQ Domain\n");
- return err;
+ goto error;
}
bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto error;
+ }
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n", err);
- return err;
+ goto error;
}
}
pci_scan_child_bus(bus);
@@ -855,6 +861,10 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
}
static int nwl_pcie_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 65f0fe0c2eaf..a30e01639557 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -550,7 +550,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
- return PTR_ERR(pcie_intc_node);
+ return -ENODEV;
}
port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
@@ -558,7 +558,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
port);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return PTR_ERR(port->irq_domain);
+ return -ENODEV;
}
/* Setup MSI */
@@ -569,7 +569,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
&xilinx_pcie_msi_chip);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n");
- return PTR_ERR(port->irq_domain);
+ return -ENODEV;
}
xilinx_pcie_enable_msi(port);
@@ -660,7 +660,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
struct pci_bus *bus;
-
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
@@ -694,10 +693,17 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto error;
+
bus = pci_create_root_bus(&pdev->dev, 0,
&xilinx_pcie_ops, port, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto error;
+ }
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
@@ -712,6 +718,10 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
}
/**
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index df8caec59789..aadce45a9b4a 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -113,6 +113,19 @@ config HOTPLUG_PCI_SHPC
When in doubt, say N.
+config HOTPLUG_PCI_POWERNV
+ tristate "PowerPC PowerNV PCI Hotplug driver"
+ depends on PPC_POWERNV && EEH
+ select OF_DYNAMIC
+ help
+ Say Y here if you run PowerPC PowerNV platform that supports
+ PCI Hotplug
+
+ To compile this driver as a module, choose M here: the
+ module will be called pnv-php.
+
+ When in doubt, say N.
+
config HOTPLUG_PCI_RPA
tristate "RPA PCI Hotplug driver"
depends on PPC_PSERIES && EEH
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index b616e7588ff4..e33cdda45a4d 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o
obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
+obj-$(CONFIG_HOTPLUG_PCI_POWERNV) += pnv-php.o
obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
@@ -50,6 +51,8 @@ ibmphp-objs := ibmphp_core.o \
acpiphp-objs := acpiphp_core.o \
acpiphp_glue.o
+pnv-php-objs := pnv_php.o
+
rpaphp-objs := rpaphp_core.o \
rpaphp_pci.o \
rpaphp_slot.o
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fa49f9143b80..a46b585fae31 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -675,6 +675,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
if (bridge->is_going_away)
return;
+ if (bridge->pci_dev)
+ pm_runtime_get_sync(&bridge->pci_dev->dev);
+
list_for_each_entry(slot, &bridge->slots, node) {
struct pci_bus *bus = slot->bus;
struct pci_dev *dev, *tmp;
@@ -694,6 +697,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
disable_slot(slot);
}
}
+
+ if (bridge->pci_dev)
+ pm_runtime_put(&bridge->pci_dev->dev);
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 5c24e938042f..08e84d61874e 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -546,6 +546,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
u8 present;
bool link;
+ /* Interrupts cannot originate from a controller that's asleep */
+ if (pdev->current_state == PCI_D3cold)
+ return IRQ_NONE;
+
/*
* In order to guarantee that all interrupt events are
* serviced, we need to re-inspect Slot Status register after
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
new file mode 100644
index 000000000000..e6245b03f0a1
--- /dev/null
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -0,0 +1,711 @@
+/*
+ * PCI Hotplug Driver for PowerPC PowerNV platform.
+ *
+ * Copyright Gavin Shan, IBM Corporation 2016.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/libfdt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+#include <asm/opal.h>
+#include <asm/pnv-pci.h>
+#include <asm/ppc-pci.h>
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
+#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+
+static LIST_HEAD(pnv_php_slot_list);
+static DEFINE_SPINLOCK(pnv_php_lock);
+
+static void pnv_php_register(struct device_node *dn);
+static void pnv_php_unregister_one(struct device_node *dn);
+static void pnv_php_unregister(struct device_node *dn);
+
+static void pnv_php_free_slot(struct kref *kref)
+{
+ struct pnv_php_slot *php_slot = container_of(kref,
+ struct pnv_php_slot, kref);
+
+ WARN_ON(!list_empty(&php_slot->children));
+ kfree(php_slot->name);
+ kfree(php_slot);
+}
+
+static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot)
+{
+
+ if (WARN_ON(!php_slot))
+ return;
+
+ kref_put(&php_slot->kref, pnv_php_free_slot);
+}
+
+static struct pnv_php_slot *pnv_php_match(struct device_node *dn,
+ struct pnv_php_slot *php_slot)
+{
+ struct pnv_php_slot *target, *tmp;
+
+ if (php_slot->dn == dn) {
+ kref_get(&php_slot->kref);
+ return php_slot;
+ }
+
+ list_for_each_entry(tmp, &php_slot->children, link) {
+ target = pnv_php_match(dn, tmp);
+ if (target)
+ return target;
+ }
+
+ return NULL;
+}
+
+struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ list_for_each_entry(tmp, &pnv_php_slot_list, link) {
+ php_slot = pnv_php_match(dn, tmp);
+ if (php_slot) {
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+ return php_slot;
+ }
+ }
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pnv_php_find_slot);
+
+/*
+ * Remove pdn for all children of the indicated device node.
+ * The function should remove pdn in a depth-first manner.
+ */
+static void pnv_php_rmv_pdns(struct device_node *dn)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(dn, child) {
+ pnv_php_rmv_pdns(child);
+
+ pci_remove_device_node_info(child);
+ }
+}
+
+/*
+ * Detach all child nodes of the indicated device nodes. The
+ * function should handle device nodes in depth-first manner.
+ *
+ * We should not invoke of_node_release() as the memory for
+ * individual device node is part of large memory block. The
+ * large block is allocated from memblock (system bootup) or
+ * kmalloc() when unflattening the device tree by OF changeset.
+ * We can not free the large block allocated from memblock. For
+ * later case, it should be released at once.
+ */
+static void pnv_php_detach_device_nodes(struct device_node *parent)
+{
+ struct device_node *dn;
+ int refcount;
+
+ for_each_child_of_node(parent, dn) {
+ pnv_php_detach_device_nodes(dn);
+
+ of_node_put(dn);
+ refcount = atomic_read(&dn->kobj.kref.refcount);
+ if (unlikely(refcount != 1))
+ pr_warn("Invalid refcount %d on <%s>\n",
+ refcount, of_node_full_name(dn));
+
+ of_detach_node(dn);
+ }
+}
+
+static void pnv_php_rmv_devtree(struct pnv_php_slot *php_slot)
+{
+ pnv_php_rmv_pdns(php_slot->dn);
+
+ /*
+ * Decrease the refcount if the device nodes were created
+ * through OF changeset before detaching them.
+ */
+ if (php_slot->fdt)
+ of_changeset_destroy(&php_slot->ocs);
+ pnv_php_detach_device_nodes(php_slot->dn);
+
+ if (php_slot->fdt) {
+ kfree(php_slot->dt);
+ kfree(php_slot->fdt);
+ php_slot->dt = NULL;
+ php_slot->dn->child = NULL;
+ php_slot->fdt = NULL;
+ }
+}
+
+/*
+ * As the nodes in OF changeset are applied in reverse order, we
+ * need revert the nodes in advance so that we have correct node
+ * order after the changeset is applied.
+ */
+static void pnv_php_reverse_nodes(struct device_node *parent)
+{
+ struct device_node *child, *next;
+
+ /* In-depth first */
+ for_each_child_of_node(parent, child)
+ pnv_php_reverse_nodes(child);
+
+ /* Reverse the nodes in the child list */
+ child = parent->child;
+ parent->child = NULL;
+ while (child) {
+ next = child->sibling;
+
+ child->sibling = parent->child;
+ parent->child = child;
+ child = next;
+ }
+}
+
+static int pnv_php_populate_changeset(struct of_changeset *ocs,
+ struct device_node *dn)
+{
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_child_of_node(dn, child) {
+ ret = of_changeset_attach_node(ocs, child);
+ if (unlikely(ret))
+ break;
+
+ ret = pnv_php_populate_changeset(ocs, child);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
+{
+ struct pci_controller *hose = (struct pci_controller *)data;
+ struct pci_dn *pdn;
+
+ pdn = pci_add_device_node_info(hose, dn);
+ if (unlikely(!pdn))
+ return ERR_PTR(-ENOMEM);
+
+ return NULL;
+}
+
+static void pnv_php_add_pdns(struct pnv_php_slot *slot)
+{
+ struct pci_controller *hose = pci_bus_to_host(slot->bus);
+
+ pci_traverse_device_nodes(slot->dn, pnv_php_add_one_pdn, hose);
+}
+
+static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
+{
+ void *fdt, *fdt1, *dt;
+ int ret;
+
+ /* We don't know the FDT blob size. We try to get it through
+ * maximal memory chunk and then copy it to another chunk that
+ * fits the real size.
+ */
+ fdt1 = kzalloc(0x10000, GFP_KERNEL);
+ if (unlikely(!fdt1)) {
+ ret = -ENOMEM;
+ dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
+ goto out;
+ }
+
+ ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n",
+ ret);
+ goto free_fdt1;
+ }
+
+ fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
+ if (unlikely(!fdt)) {
+ ret = -ENOMEM;
+ dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
+ fdt_totalsize(fdt1));
+ goto free_fdt1;
+ }
+
+ /* Unflatten device tree blob */
+ memcpy(fdt, fdt1, fdt_totalsize(fdt1));
+ dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
+ if (unlikely(!dt)) {
+ ret = -EINVAL;
+ dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n");
+ goto free_fdt;
+ }
+
+ /* Initialize and apply the changeset */
+ of_changeset_init(&php_slot->ocs);
+ pnv_php_reverse_nodes(php_slot->dn);
+ ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
+ if (unlikely(ret)) {
+ pnv_php_reverse_nodes(php_slot->dn);
+ dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n",
+ ret);
+ goto free_dt;
+ }
+
+ php_slot->dn->child = NULL;
+ ret = of_changeset_apply(&php_slot->ocs);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n",
+ ret);
+ goto destroy_changeset;
+ }
+
+ /* Add device node firmware data */
+ pnv_php_add_pdns(php_slot);
+ php_slot->fdt = fdt;
+ php_slot->dt = dt;
+ kfree(fdt1);
+ goto out;
+
+destroy_changeset:
+ of_changeset_destroy(&php_slot->ocs);
+free_dt:
+ kfree(dt);
+ php_slot->dn->child = NULL;
+free_fdt:
+ kfree(fdt);
+free_fdt1:
+ kfree(fdt1);
+out:
+ return ret;
+}
+
+int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+ uint8_t state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ struct opal_msg msg;
+ int ret;
+
+ ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
+ if (likely(ret > 0)) {
+ if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
+ be64_to_cpu(msg.params[2]) != state ||
+ be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
+ dev_warn(&php_slot->pdev->dev, "Wrong msg (%lld, %lld, %lld)\n",
+ be64_to_cpu(msg.params[1]),
+ be64_to_cpu(msg.params[2]),
+ be64_to_cpu(msg.params[3]));
+ return -ENOMSG;
+ }
+ } else if (unlikely(ret < 0)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n",
+ ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
+ return ret;
+ }
+
+ if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE)
+ pnv_php_rmv_devtree(php_slot);
+ else
+ ret = pnv_php_add_devtree(php_slot);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
+
+static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ uint8_t power_state = OPAL_PCI_SLOT_POWER_ON;
+ int ret;
+
+ /*
+ * Retrieve power status from firmware. If we fail
+ * getting that, the power status fails back to
+ * be on.
+ */
+ ret = pnv_pci_get_power_state(php_slot->id, &power_state);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n",
+ ret);
+ } else {
+ *state = power_state;
+ slot->info->power_status = power_state;
+ }
+
+ return 0;
+}
+
+static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ uint8_t presence = OPAL_PCI_SLOT_EMPTY;
+ int ret;
+
+ /*
+ * Retrieve presence status from firmware. If we can't
+ * get that, it will fail back to be empty.
+ */
+ ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ if (likely(ret >= 0)) {
+ *state = presence;
+ slot->info->adapter_status = presence;
+ ret = 0;
+ } else {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting presence\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
+{
+ /* FIXME: Make it real once firmware supports it */
+ slot->info->attention_status = state;
+
+ return 0;
+}
+
+static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
+{
+ struct hotplug_slot *slot = &php_slot->slot;
+ uint8_t presence = OPAL_PCI_SLOT_EMPTY;
+ uint8_t power_status = OPAL_PCI_SLOT_POWER_ON;
+ int ret;
+
+ /* Check if the slot has been configured */
+ if (php_slot->state != PNV_PHP_STATE_REGISTERED)
+ return 0;
+
+ /* Retrieve slot presence status */
+ ret = pnv_php_get_adapter_state(slot, &presence);
+ if (unlikely(ret))
+ return ret;
+
+ /* Proceed if there have nothing behind the slot */
+ if (presence == OPAL_PCI_SLOT_EMPTY)
+ goto scan;
+
+ /*
+ * If the power supply to the slot is off, we can't detect
+ * adapter presence state. That means we have to turn the
+ * slot on before going to probe slot's presence state.
+ *
+ * On the first time, we don't change the power status to
+ * boost system boot with assumption that the firmware
+ * supplies consistent slot power status: empty slot always
+ * has its power off and non-empty slot has its power on.
+ */
+ if (!php_slot->power_state_check) {
+ php_slot->power_state_check = true;
+
+ ret = pnv_php_get_power_state(slot, &power_status);
+ if (unlikely(ret))
+ return ret;
+
+ if (power_status != OPAL_PCI_SLOT_POWER_ON)
+ return 0;
+ }
+
+ /* Check the power status. Scan the slot if it is already on */
+ ret = pnv_php_get_power_state(slot, &power_status);
+ if (unlikely(ret))
+ return ret;
+
+ if (power_status == OPAL_PCI_SLOT_POWER_ON)
+ goto scan;
+
+ /* Power is off, turn it on and then scan the slot */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ if (unlikely(ret))
+ return ret;
+
+scan:
+ if (presence == OPAL_PCI_SLOT_PRESENT) {
+ if (rescan) {
+ pci_lock_rescan_remove();
+ pci_hp_add_devices(php_slot->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ /* Rescan for child hotpluggable slots */
+ php_slot->state = PNV_PHP_STATE_POPULATED;
+ if (rescan)
+ pnv_php_register(php_slot->dn);
+ } else {
+ php_slot->state = PNV_PHP_STATE_POPULATED;
+ }
+
+ return 0;
+}
+
+static int pnv_php_enable_slot(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = container_of(slot,
+ struct pnv_php_slot, slot);
+
+ return pnv_php_enable(php_slot, true);
+}
+
+static int pnv_php_disable_slot(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ int ret;
+
+ if (php_slot->state != PNV_PHP_STATE_POPULATED)
+ return 0;
+
+ /* Remove all devices behind the slot */
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(php_slot->bus);
+ pci_unlock_rescan_remove();
+
+ /* Detach the child hotpluggable slots */
+ pnv_php_unregister(php_slot->dn);
+
+ /* Notify firmware and remove device nodes */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_OFF);
+
+ php_slot->state = PNV_PHP_STATE_REGISTERED;
+ return ret;
+}
+
+static struct hotplug_slot_ops php_slot_ops = {
+ .get_power_status = pnv_php_get_power_state,
+ .get_adapter_status = pnv_php_get_adapter_state,
+ .set_attention_status = pnv_php_set_attention_state,
+ .enable_slot = pnv_php_enable_slot,
+ .disable_slot = pnv_php_disable_slot,
+};
+
+static void pnv_php_release(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ unsigned long flags;
+
+ /* Remove from global or child list */
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ list_del(&php_slot->link);
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ /* Detach from parent */
+ pnv_php_put_slot(php_slot);
+ pnv_php_put_slot(php_slot->parent);
+}
+
+static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+ struct pci_bus *bus;
+ const char *label;
+ uint64_t id;
+
+ label = of_get_property(dn, "ibm,slot-label", NULL);
+ if (unlikely(!label))
+ return NULL;
+
+ if (pnv_pci_get_slot_id(dn, &id))
+ return NULL;
+
+ bus = pci_find_bus_by_node(dn);
+ if (unlikely(!bus))
+ return NULL;
+
+ php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL);
+ if (unlikely(!php_slot))
+ return NULL;
+
+ php_slot->name = kstrdup(label, GFP_KERNEL);
+ if (unlikely(!php_slot->name)) {
+ kfree(php_slot);
+ return NULL;
+ }
+
+ if (likely(dn->child && PCI_DN(dn->child)))
+ php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ else
+ php_slot->slot_no = -1; /* Placeholder slot */
+
+ kref_init(&php_slot->kref);
+ php_slot->state = PNV_PHP_STATE_INITIALIZED;
+ php_slot->dn = dn;
+ php_slot->pdev = bus->self;
+ php_slot->bus = bus;
+ php_slot->id = id;
+ php_slot->power_state_check = false;
+ php_slot->slot.ops = &php_slot_ops;
+ php_slot->slot.info = &php_slot->slot_info;
+ php_slot->slot.release = pnv_php_release;
+ php_slot->slot.private = php_slot;
+
+ INIT_LIST_HEAD(&php_slot->children);
+ INIT_LIST_HEAD(&php_slot->link);
+
+ return php_slot;
+}
+
+static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
+{
+ struct pnv_php_slot *parent;
+ struct device_node *dn = php_slot->dn;
+ unsigned long flags;
+ int ret;
+
+ /* Check if the slot is registered or not */
+ parent = pnv_php_find_slot(php_slot->dn);
+ if (unlikely(parent)) {
+ pnv_php_put_slot(parent);
+ return -EEXIST;
+ }
+
+ /* Register PCI slot */
+ ret = pci_hp_register(&php_slot->slot, php_slot->bus,
+ php_slot->slot_no, php_slot->name);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n",
+ ret);
+ return ret;
+ }
+
+ /* Attach to the parent's child list or global list */
+ while ((dn = of_get_parent(dn))) {
+ if (!PCI_DN(dn)) {
+ of_node_put(dn);
+ break;
+ }
+
+ parent = pnv_php_find_slot(dn);
+ if (parent) {
+ of_node_put(dn);
+ break;
+ }
+
+ of_node_put(dn);
+ }
+
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ php_slot->parent = parent;
+ if (parent)
+ list_add_tail(&php_slot->link, &parent->children);
+ else
+ list_add_tail(&php_slot->link, &pnv_php_slot_list);
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ php_slot->state = PNV_PHP_STATE_REGISTERED;
+ return 0;
+}
+
+static int pnv_php_register_one(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+ const __be32 *prop32;
+ int ret;
+
+ /* Check if it's hotpluggable slot */
+ prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL);
+ if (!prop32 || !of_read_number(prop32, 1))
+ return -ENXIO;
+
+ prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL);
+ if (!prop32 || !of_read_number(prop32, 1))
+ return -ENXIO;
+
+ php_slot = pnv_php_alloc_slot(dn);
+ if (unlikely(!php_slot))
+ return -ENODEV;
+
+ ret = pnv_php_register_slot(php_slot);
+ if (unlikely(ret))
+ goto free_slot;
+
+ ret = pnv_php_enable(php_slot, false);
+ if (unlikely(ret))
+ goto unregister_slot;
+
+ return 0;
+
+unregister_slot:
+ pnv_php_unregister_one(php_slot->dn);
+free_slot:
+ pnv_php_put_slot(php_slot);
+ return ret;
+}
+
+static void pnv_php_register(struct device_node *dn)
+{
+ struct device_node *child;
+
+ /*
+ * The parent slots should be registered before their
+ * child slots.
+ */
+ for_each_child_of_node(dn, child) {
+ pnv_php_register_one(child);
+ pnv_php_register(child);
+ }
+}
+
+static void pnv_php_unregister_one(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+
+ php_slot = pnv_php_find_slot(dn);
+ if (!php_slot)
+ return;
+
+ php_slot->state = PNV_PHP_STATE_OFFLINE;
+ pnv_php_put_slot(php_slot);
+ pci_hp_deregister(&php_slot->slot);
+}
+
+static void pnv_php_unregister(struct device_node *dn)
+{
+ struct device_node *child;
+
+ /* The child slots should go before their parent slots */
+ for_each_child_of_node(dn, child) {
+ pnv_php_unregister(child);
+ pnv_php_unregister_one(child);
+ }
+}
+
+static int __init pnv_php_init(void)
+{
+ struct device_node *dn;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-phb")
+ pnv_php_register(dn);
+
+ return 0;
+}
+
+static void __exit pnv_php_exit(void)
+{
+ struct device_node *dn;
+
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-phb")
+ pnv_php_unregister(dn);
+}
+
+module_init(pnv_php_init);
+module_exit(pnv_php_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 6937c725b00b..388c4d8fcdd1 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -117,8 +117,10 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct device_node *child;
+ u32 my_index;
int retval;
- int slotno;
+ int slotno = -1;
dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name,
@@ -130,10 +132,15 @@ int rpaphp_register_slot(struct slot *slot)
return -EAGAIN;
}
- if (slot->dn->child)
- slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
- else
- slotno = -1;
+ for_each_child_of_node(slot->dn, child) {
+ retval = of_property_read_u32(child, "ibm,my-drc-index", &my_index);
+ if (my_index == slot->index) {
+ slotno = PCI_SLOT(PCI_DN(child)->devfn);
+ of_node_put(child);
+ break;
+ }
+ }
+
retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a080f4496fe2..a02981efdad5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Copyright (C) 2016 Christoph Hellwig.
*/
#include <linux/err.h>
@@ -207,6 +208,12 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
}
+static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+{
+ return desc->mask_base +
+ desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+}
+
/*
* This internal function does not flush PCI writes to the device.
* All users must ensure that they read from the device before either
@@ -217,8 +224,6 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
- unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
if (pci_msi_ignore_mask)
return 0;
@@ -226,7 +231,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
if (flag)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
- writel(mask_bits, desc->mask_base + offset);
+ writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
return mask_bits;
}
@@ -284,8 +289,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
- void __iomem *base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ void __iomem *base = pci_msix_desc_addr(entry);
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -315,9 +319,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
- void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ void __iomem *base = pci_msix_desc_addr(entry);
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -567,6 +569,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
entry->nvec_used = nvec;
+ entry->affinity = dev->irq_affinity;
if (control & PCI_MSI_FLAGS_64BIT)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -678,10 +681,18 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec)
{
+ const struct cpumask *mask = NULL;
struct msi_desc *entry;
- int i;
+ int cpu = -1, i;
for (i = 0; i < nvec; i++) {
+ if (dev->irq_affinity) {
+ cpu = cpumask_next(cpu, dev->irq_affinity);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(dev->irq_affinity);
+ mask = cpumask_of(cpu);
+ }
+
entry = alloc_msi_entry(&dev->dev);
if (!entry) {
if (!i)
@@ -694,10 +705,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
- entry->msi_attrib.entry_nr = entries[i].entry;
+ if (entries)
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ else
+ entry->msi_attrib.entry_nr = i;
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
entry->nvec_used = 1;
+ entry->affinity = mask;
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
}
@@ -712,13 +727,11 @@ static void msix_program_entries(struct pci_dev *dev,
int i = 0;
for_each_pci_msi_entry(entry, dev) {
- int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- entries[i].vector = entry->irq;
- entry->masked = readl(entry->mask_base + offset);
+ if (entries)
+ entries[i++].vector = entry->irq;
+ entry->masked = readl(pci_msix_desc_addr(entry) +
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
msix_mask_irq(entry, 1);
- i++;
}
}
@@ -931,7 +944,7 @@ EXPORT_SYMBOL(pci_msix_vec_count);
/**
* pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of MSI-X entries
+ * @entries: pointer to an array of MSI-X entries (optional)
* @nvec: number of MSI-X irqs requested for allocation by device driver
*
* Setup the MSI-X capability structure of device function with the number
@@ -951,22 +964,21 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
if (!pci_msi_supported(dev, nvec))
return -EINVAL;
- if (!entries)
- return -EINVAL;
-
nr_entries = pci_msix_vec_count(dev);
if (nr_entries < 0)
return nr_entries;
if (nvec > nr_entries)
return nr_entries;
- /* Check for any invalid entries */
- for (i = 0; i < nvec; i++) {
- if (entries[i].entry >= nr_entries)
- return -EINVAL; /* invalid entry */
- for (j = i + 1; j < nvec; j++) {
- if (entries[i].entry == entries[j].entry)
- return -EINVAL; /* duplicate entry */
+ if (entries) {
+ /* Check for any invalid entries */
+ for (i = 0; i < nvec; i++) {
+ if (entries[i].entry >= nr_entries)
+ return -EINVAL; /* invalid entry */
+ for (j = i + 1; j < nvec; j++) {
+ if (entries[i].entry == entries[j].entry)
+ return -EINVAL; /* duplicate entry */
+ }
}
}
WARN_ON(!!dev->msix_enabled);
@@ -1026,19 +1038,8 @@ int pci_msi_enabled(void)
}
EXPORT_SYMBOL(pci_msi_enabled);
-/**
- * pci_enable_msi_range - configure device's MSI capability structure
- * @dev: device to configure
- * @minvec: minimal number of interrupts to configure
- * @maxvec: maximum number of interrupts to configure
- *
- * This function tries to allocate a maximum possible number of interrupts in a
- * range between @minvec and @maxvec. It returns a negative errno if an error
- * occurs. If it succeeds, it returns the actual number of interrupts allocated
- * and updates the @dev's irq member to the lowest new interrupt number;
- * the other interrupt numbers allocated to this device are consecutive.
- **/
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
+ unsigned int flags)
{
int nvec;
int rc;
@@ -1061,25 +1062,85 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
- else if (nvec < minvec)
+ if (nvec < minvec)
return -EINVAL;
- else if (nvec > maxvec)
+
+ if (nvec > maxvec)
nvec = maxvec;
- do {
+ for (;;) {
+ if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (nvec < minvec)
+ return -ENOSPC;
+ }
+
rc = msi_capability_init(dev, nvec);
- if (rc < 0) {
+ if (rc == 0)
+ return nvec;
+
+ kfree(dev->irq_affinity);
+ dev->irq_affinity = NULL;
+
+ if (rc < 0)
return rc;
- } else if (rc > 0) {
- if (rc < minvec)
+ if (rc < minvec)
+ return -ENOSPC;
+
+ nvec = rc;
+ }
+}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+ return __pci_enable_msi_range(dev, minvec, maxvec, PCI_IRQ_NOAFFINITY);
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+static int __pci_enable_msix_range(struct pci_dev *dev,
+ struct msix_entry *entries, int minvec, int maxvec,
+ unsigned int flags)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ for (;;) {
+ if (!(flags & PCI_IRQ_NOAFFINITY)) {
+ dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (nvec < minvec)
return -ENOSPC;
- nvec = rc;
}
- } while (rc);
- return nvec;
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc == 0)
+ return nvec;
+
+ kfree(dev->irq_affinity);
+ dev->irq_affinity = NULL;
+
+ if (rc < 0)
+ return rc;
+ if (rc < minvec)
+ return -ENOSPC;
+
+ nvec = rc;
+ }
}
-EXPORT_SYMBOL(pci_enable_msi_range);
/**
* pci_enable_msix_range - configure device's MSI-X capability structure
@@ -1097,28 +1158,101 @@ EXPORT_SYMBOL(pci_enable_msi_range);
* with new allocated MSI-X interrupts.
**/
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
- int minvec, int maxvec)
+ int minvec, int maxvec)
{
- int nvec = maxvec;
- int rc;
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec,
+ PCI_IRQ_NOAFFINITY);
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
- if (maxvec < minvec)
- return -ERANGE;
+/**
+ * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * @dev: PCI device to operate on
+ * @min_vecs: minimum number of vectors required (must be >= 1)
+ * @max_vecs: maximum (desired) number of vectors
+ * @flags: flags or quirks for the allocation
+ *
+ * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
+ * vectors if available, and fall back to a single legacy vector
+ * if neither is available. Return the number of vectors allocated,
+ * (which might be smaller than @max_vecs) if successful, or a negative
+ * error code on error. If less than @min_vecs interrupt vectors are
+ * available for @dev the function will fail with -ENOSPC.
+ *
+ * To get the Linux IRQ number used for a vector that can be passed to
+ * request_irq() use the pci_irq_vector() helper.
+ */
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ int vecs = -ENOSPC;
- do {
- rc = pci_enable_msix(dev, entries, nvec);
- if (rc < 0) {
- return rc;
- } else if (rc > 0) {
- if (rc < minvec)
- return -ENOSPC;
- nvec = rc;
+ if (!(flags & PCI_IRQ_NOMSIX)) {
+ vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
+ flags);
+ if (vecs > 0)
+ return vecs;
+ }
+
+ if (!(flags & PCI_IRQ_NOMSI)) {
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ if (vecs > 0)
+ return vecs;
+ }
+
+ /* use legacy irq if allowed */
+ if (!(flags & PCI_IRQ_NOLEGACY) && min_vecs == 1)
+ return 1;
+ return vecs;
+}
+EXPORT_SYMBOL(pci_alloc_irq_vectors);
+
+/**
+ * pci_free_irq_vectors - free previously allocated IRQs for a device
+ * @dev: PCI device to operate on
+ *
+ * Undoes the allocations and enabling in pci_alloc_irq_vectors().
+ */
+void pci_free_irq_vectors(struct pci_dev *dev)
+{
+ pci_disable_msix(dev);
+ pci_disable_msi(dev);
+}
+EXPORT_SYMBOL(pci_free_irq_vectors);
+
+/**
+ * pci_irq_vector - return Linux IRQ number of a device vector
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ */
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ if (dev->msix_enabled) {
+ struct msi_desc *entry;
+ int i = 0;
+
+ for_each_pci_msi_entry(entry, dev) {
+ if (i == nr)
+ return entry->irq;
+ i++;
}
- } while (rc);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
- return nvec;
+ if (dev->msi_enabled) {
+ struct msi_desc *entry = first_pci_msi_entry(dev);
+
+ if (WARN_ON_ONCE(nr >= entry->nvec_used))
+ return -EINVAL;
+ } else {
+ if (WARN_ON_ONCE(nr > 0))
+ return -EINVAL;
+ }
+
+ return dev->irq + nr;
}
-EXPORT_SYMBOL(pci_enable_msix_range);
+EXPORT_SYMBOL(pci_irq_vector);
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d7ffd66814bb..e39a67c8ef39 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -777,7 +777,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
- if (!pci_has_subordinate(pci_dev))
+ if (pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -1144,7 +1144,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
return -ENOSYS;
pci_dev->state_saved = false;
- pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
if (error) {
/*
@@ -1161,8 +1160,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
return error;
}
- if (!pci_dev->d3cold_allowed)
- pci_dev->no_d3cold = true;
pci_fixup_device(pci_fixup_suspend, pci_dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index d319a9ca9b7b..bcd10c795284 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -406,6 +406,11 @@ static ssize_t d3cold_allowed_store(struct device *dev,
return -EINVAL;
pdev->d3cold_allowed = !!val;
+ if (pdev->d3cold_allowed)
+ pci_d3cold_enable(pdev);
+ else
+ pci_d3cold_disable(pdev);
+
pm_runtime_resume(dev);
return count;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index badbddc683f0..aab9d5115a5f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -7,8 +7,10 @@
* Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_pci.h>
@@ -25,7 +27,9 @@
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
+#include <linux/vmalloc.h>
#include <asm/setup.h>
+#include <asm/dma.h>
#include <linux/aer.h>
#include "pci.h"
@@ -81,6 +85,9 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+#define DEFAULT_HOTPLUG_BUS_SIZE 1
+unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
+
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
/*
@@ -101,6 +108,21 @@ unsigned int pcibios_max_latency = 255;
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;
+/* Disable bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_disable;
+/* Force bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_force;
+
+static int __init pcie_port_pm_setup(char *str)
+{
+ if (!strcmp(str, "off"))
+ pci_bridge_d3_disable = true;
+ else if (!strcmp(str, "force"))
+ pci_bridge_d3_force = true;
+ return 1;
+}
+__setup("pcie_port_pm=", pcie_port_pm_setup);
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -2156,6 +2178,164 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
}
/**
+ * pci_bridge_d3_possible - Is it possible to put the bridge into D3
+ * @bridge: Bridge to check
+ *
+ * This function checks if it is possible to move the bridge to D3.
+ * Currently we only allow D3 for recent enough PCIe ports.
+ */
+static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+{
+ unsigned int year;
+
+ if (!pci_is_pcie(bridge))
+ return false;
+
+ switch (pci_pcie_type(bridge)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ if (pci_bridge_d3_disable)
+ return false;
+ if (pci_bridge_d3_force)
+ return true;
+
+ /*
+ * It should be safe to put PCIe ports from 2015 or newer
+ * to D3.
+ */
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+ year >= 2015) {
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
+{
+ bool *d3cold_ok = data;
+ bool no_d3cold;
+
+ /*
+ * The device needs to be allowed to go D3cold and if it is wake
+ * capable to do so from D3cold.
+ */
+ no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
+ (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
+ !pci_power_manageable(dev);
+
+ *d3cold_ok = !no_d3cold;
+
+ return no_d3cold;
+}
+
+/*
+ * pci_bridge_d3_update - Update bridge D3 capabilities
+ * @dev: PCI device which is changed
+ * @remove: Is the device being removed
+ *
+ * Update upstream bridge PM capabilities accordingly depending on if the
+ * device PM configuration was changed or the device is being removed. The
+ * change is also propagated upstream.
+ */
+static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+{
+ struct pci_dev *bridge;
+ bool d3cold_ok = true;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge || !pci_bridge_d3_possible(bridge))
+ return;
+
+ pci_dev_get(bridge);
+ /*
+ * If the device is removed we do not care about its D3cold
+ * capabilities.
+ */
+ if (!remove)
+ pci_dev_check_d3cold(dev, &d3cold_ok);
+
+ if (d3cold_ok) {
+ /*
+ * We need to go through all children to find out if all of
+ * them can still go to D3cold.
+ */
+ pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
+ &d3cold_ok);
+ }
+
+ if (bridge->bridge_d3 != d3cold_ok) {
+ bridge->bridge_d3 = d3cold_ok;
+ /* Propagate change to upstream bridges */
+ pci_bridge_d3_update(bridge, false);
+ }
+
+ pci_dev_put(bridge);
+}
+
+/**
+ * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
+ * @dev: PCI device that was changed
+ *
+ * If a device is added or its PM configuration, such as is it allowed to
+ * enter D3cold, is changed this function updates upstream bridge PM
+ * capabilities accordingly.
+ */
+void pci_bridge_d3_device_changed(struct pci_dev *dev)
+{
+ pci_bridge_d3_update(dev, false);
+}
+
+/**
+ * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
+ * @dev: PCI device being removed
+ *
+ * Function updates upstream bridge PM capabilities based on other devices
+ * still left on the bus.
+ */
+void pci_bridge_d3_device_removed(struct pci_dev *dev)
+{
+ pci_bridge_d3_update(dev, true);
+}
+
+/**
+ * pci_d3cold_enable - Enable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to enable D3cold from the device
+ * they handle. It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_enable(struct pci_dev *dev)
+{
+ if (dev->no_d3cold) {
+ dev->no_d3cold = false;
+ pci_bridge_d3_device_changed(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_enable);
+
+/**
+ * pci_d3cold_disable - Disable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to disable D3cold from the device
+ * they handle. It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_disable(struct pci_dev *dev)
+{
+ if (!dev->no_d3cold) {
+ dev->no_d3cold = true;
+ pci_bridge_d3_device_changed(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+
+/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
@@ -2189,6 +2369,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+ dev->bridge_d3 = pci_bridge_d3_possible(dev);
dev->d3cold_allowed = true;
dev->d1_support = false;
@@ -3165,6 +3346,23 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
#endif
}
+/**
+ * pci_unmap_iospace - Unmap the memory mapped I/O space
+ * @res: resource to be unmapped
+ *
+ * Unmap the CPU virtual address @res from virtual address space.
+ * Only architectures that have memory mapped IO functions defined
+ * (and the PCI_IOBASE value defined) should call this function.
+ */
+void pci_unmap_iospace(struct resource *res)
+{
+#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ unmap_kernel_range(vaddr, resource_size(res));
+#endif
+}
+
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4755,6 +4953,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
+ unsigned short vendor, device, subsystem_vendor, subsystem_device;
resource_size_t align = 0;
char *p;
@@ -4768,28 +4967,55 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
} else {
align_order = -1;
}
- if (sscanf(p, "%x:%x:%x.%x%n",
- &seg, &bus, &slot, &func, &count) != 4) {
- seg = 0;
- if (sscanf(p, "%x:%x.%x%n",
- &bus, &slot, &func, &count) != 3) {
- /* Invalid format */
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
- p);
+ if (strncmp(p, "pci:", 4) == 0) {
+ /* PCI vendor/device (subvendor/subdevice) ids are specified */
+ p += 4;
+ if (sscanf(p, "%hx:%hx:%hx:%hx%n",
+ &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
+ if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
+ p);
+ break;
+ }
+ subsystem_vendor = subsystem_device = 0;
+ }
+ p += count;
+ if ((!vendor || (vendor == dev->vendor)) &&
+ (!device || (device == dev->device)) &&
+ (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
+ (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ /* Found */
break;
}
}
- p += count;
- if (seg == pci_domain_nr(dev->bus) &&
- bus == dev->bus->number &&
- slot == PCI_SLOT(dev->devfn) &&
- func == PCI_FUNC(dev->devfn)) {
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
+ else {
+ if (sscanf(p, "%x:%x:%x.%x%n",
+ &seg, &bus, &slot, &func, &count) != 4) {
+ seg = 0;
+ if (sscanf(p, "%x:%x.%x%n",
+ &bus, &slot, &func, &count) != 3) {
+ /* Invalid format */
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
+ }
+ }
+ p += count;
+ if (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ slot == PCI_SLOT(dev->devfn) &&
+ func == PCI_FUNC(dev->devfn)) {
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ /* Found */
+ break;
+ }
}
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
@@ -4897,7 +5123,7 @@ static ssize_t pci_resource_alignment_store(struct bus_type *bus,
return pci_set_resource_alignment_param(buf, count);
}
-BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
+static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
pci_resource_alignment_store);
static int __init pci_resource_alignment_sysfs_init(void)
@@ -4923,7 +5149,7 @@ int pci_get_new_domain_nr(void)
}
#ifdef CONFIG_PCI_DOMAINS_GENERIC
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+static int of_pci_bus_find_domain_nr(struct device *parent)
{
static int use_dt_domains = -1;
int domain = -1;
@@ -4967,7 +5193,13 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
domain = -1;
}
- bus->domain_nr = domain;
+ return domain;
+}
+
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
+ acpi_pci_bus_find_domain_nr(bus);
}
#endif
#endif
@@ -5021,6 +5253,11 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "hpbussize=", 10)) {
+ pci_hotplug_bus_size =
+ simple_strtoul(str + 10, &str, 0);
+ if (pci_hotplug_bus_size > 0xff)
+ pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
pcie_bus_config = PCIE_BUS_TUNE_OFF;
} else if (!strncmp(str, "pcie_bus_safe", 13)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a814bbb80fcb..9730c474b016 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -82,6 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
+void pci_bridge_d3_device_changed(struct pci_dev *dev);
+void pci_bridge_d3_device_removed(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -94,6 +96,15 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
return !!(pci_dev->subordinate);
}
+static inline bool pci_power_manageable(struct pci_dev *pci_dev)
+{
+ /*
+ * Currently we allow normal PCI devices and PCI bridges transition
+ * into D3 if their bridge_d3 is set.
+ */
+ return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3;
+}
+
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 22ca6412bd15..7fcea75afa4c 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -83,7 +83,7 @@ config PCIE_PME
depends on PCIEPORTBUS && PM
config PCIE_DPC
- tristate "PCIe Downstream Port Containment support"
+ bool "PCIe Downstream Port Containment support"
depends on PCIEPORTBUS
default n
help
@@ -92,6 +92,3 @@ config PCIE_DPC
will be handled by the DPC driver. If your system doesn't
have this capability or you do not want to use this feature,
it is safe to answer N.
-
- To compile this driver as a module, choose M here: the module
- will be called pcie-dpc.
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2dfe7fdb77e7..0ec649d961d7 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -139,7 +139,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable && enable)
+ if (!link->clkpm_capable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index ab552f1bc08f..250f87861786 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -15,8 +15,8 @@
struct dpc_dev {
struct pcie_device *dev;
- struct work_struct work;
- int cap_pos;
+ struct work_struct work;
+ int cap_pos;
};
static void dpc_wait_link_inactive(struct pci_dev *pdev)
@@ -89,7 +89,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- dpc = kzalloc(sizeof(*dpc), GFP_KERNEL);
+ dpc = devm_kzalloc(&dev->device, sizeof(*dpc), GFP_KERNEL);
if (!dpc)
return -ENOMEM;
@@ -98,11 +98,12 @@ static int dpc_probe(struct pcie_device *dev)
INIT_WORK(&dpc->work, interrupt_event_handler);
set_service_data(dev, dpc);
- status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc);
+ status = devm_request_irq(&dev->device, dev->irq, dpc_irq, IRQF_SHARED,
+ "pcie-dpc", dpc);
if (status) {
dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq,
status);
- goto out;
+ return status;
}
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
@@ -117,9 +118,6 @@ static int dpc_probe(struct pcie_device *dev)
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
return status;
- out:
- kfree(dpc);
- return status;
}
static void dpc_remove(struct pcie_device *dev)
@@ -131,14 +129,11 @@ static void dpc_remove(struct pcie_device *dev)
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
-
- free_irq(dev->irq, dpc);
- kfree(dpc);
}
static struct pcie_port_service_driver dpcdriver = {
.name = "dpc",
- .port_type = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM,
+ .port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 32d4d0a3d20e..e9270b4026f3 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pcieport_if.h>
@@ -342,6 +343,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
return retval;
}
+ pm_runtime_no_callbacks(device);
+
return 0;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index be35da2e105e..70d7ad8c6d17 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,6 +93,26 @@ static int pcie_port_resume_noirq(struct device *dev)
return 0;
}
+static int pcie_port_runtime_suspend(struct device *dev)
+{
+ return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
+static int pcie_port_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+ /*
+ * Assume the PCI core has set bridge_d3 whenever it thinks the port
+ * should be good to go to D3. Everything else, including moving
+ * the port to D3, is handled by the PCI core.
+ */
+ return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -101,6 +121,9 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
.resume_noirq = pcie_port_resume_noirq,
+ .runtime_suspend = pcie_port_runtime_suspend,
+ .runtime_resume = pcie_port_runtime_resume,
+ .runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -134,16 +157,39 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
return status;
pci_save_state(dev);
+
/*
- * D3cold may not work properly on some PCIe port, so disable
- * it by default.
+ * Prevent runtime PM if the port is advertising support for PCIe
+ * hotplug. Otherwise the BIOS hotplug SMI code might not be able
+ * to enumerate devices behind this port properly (the port is
+ * powered down preventing all config space accesses to the
+ * subordinate devices). We can't be sure for native PCIe hotplug
+ * either so prevent that as well.
*/
- dev->d3cold_allowed = false;
+ if (!dev->is_hotplug_bridge) {
+ /*
+ * Keep the port resumed 100ms to make sure things like
+ * config space accesses from userspace (lspci) will not
+ * cause the port to repeatedly suspend and resume.
+ */
+ pm_runtime_set_autosuspend_delay(&dev->dev, 100);
+ pm_runtime_use_autosuspend(&dev->dev);
+ pm_runtime_mark_last_busy(&dev->dev);
+ pm_runtime_put_autosuspend(&dev->dev);
+ pm_runtime_allow(&dev->dev);
+ }
+
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
+ if (!dev->is_hotplug_bridge) {
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+ pm_runtime_dont_use_autosuspend(&dev->dev);
+ }
+
pcie_port_device_remove(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8e3ef720997d..93f280df3428 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -16,6 +16,7 @@
#include <linux/aer.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -832,6 +833,12 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
u8 primary, secondary, subordinate;
int broken = 0;
+ /*
+ * Make sure the bridge is powered on to be able to access config
+ * space of devices below it.
+ */
+ pm_runtime_get_sync(&dev->dev);
+
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
primary = buses & 0xFF;
secondary = (buses >> 8) & 0xFF;
@@ -1012,6 +1019,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
out:
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
+ pm_runtime_put(&dev->dev);
+
return max;
}
EXPORT_SYMBOL(pci_scan_bridge);
@@ -2077,6 +2086,15 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
}
/*
+ * Make sure a hotplug bridge has at least the minimum requested
+ * number of buses.
+ */
+ if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
+ if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
+ max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+ }
+
+ /*
* We've scanned the bus and so we know all about what's on
* the other side of any bridges that may be on this bus plus
* any devices.
@@ -2127,7 +2145,9 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
b->sysdata = sysdata;
b->ops = ops;
b->number = b->busn_res.start = bus;
- pci_bus_assign_domain_nr(b, parent);
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ b->domain_nr = pci_bus_find_domain_nr(b, parent);
+#endif
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 3f155e78513f..2408abe4ee8c 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,7 +231,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
- int i, ret;
+ int i, ret, write_combine;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -245,9 +245,12 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
+ if (fpriv->mmap_state == pci_mmap_mem)
+ write_combine = fpriv->write_combine;
+ else
+ write_combine = 0;
ret = pci_mmap_page_range(dev, vma,
- fpriv->mmap_state,
- fpriv->write_combine);
+ fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ee72ebe18f4b..37ff0158e45f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3189,13 +3189,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
}
/*
- * Atheros AR93xx chips do not behave after a bus reset. The device will
- * throw a Link Down error on AER-capable systems and regardless of AER,
- * config space of the device is never accessible again and typically
- * causes the system to hang or reset when access is attempted.
+ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
+ * The device will throw a Link Down error on AER-capable systems and
+ * regardless of AER, config space of the device is never accessible again
+ * and typically causes the system to hang or reset when access is attempted.
* http://www.spinics.net/lists/linux-pci/msg34797.html
*/
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
@@ -3711,6 +3713,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
@@ -3747,6 +3752,9 @@ static const struct pci_device_id fixed_dma_alias_tbl[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
.driver_data = PCI_DEVFN(1, 0) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+ PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */
+ .driver_data = PCI_DEVFN(1, 0) },
{ 0 }
};
@@ -4087,6 +4095,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 8982026637d5..d1ef7acf6930 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -96,6 +96,8 @@ static void pci_remove_bus_device(struct pci_dev *dev)
dev->subordinate = NULL;
}
+ pci_bridge_d3_device_removed(dev);
+
pci_destroy_dev(dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 55641a39a3e9..c74059e10a6d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -695,11 +695,16 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
+void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+}
+
void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
+ pcibios_setup_bridge(bus, type);
__pci_setup_bridge(bus, type);
}
@@ -1423,6 +1428,74 @@ void pci_bus_assign_resources(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_assign_resources);
+static void pci_claim_device_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (!r->flags || r->parent)
+ continue;
+
+ pci_claim_resource(dev, i);
+ }
+}
+
+static void pci_claim_bridge_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (!r->flags || r->parent)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
+ }
+}
+
+static void pci_bus_allocate_dev_resources(struct pci_bus *b)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+ list_for_each_entry(dev, &b->devices, bus_list) {
+ pci_claim_device_resources(dev);
+
+ child = dev->subordinate;
+ if (child)
+ pci_bus_allocate_dev_resources(child);
+ }
+}
+
+static void pci_bus_allocate_resources(struct pci_bus *b)
+{
+ struct pci_bus *child;
+
+ /*
+ * Carry out a depth-first search on the PCI bus
+ * tree to allocate bridge apertures. Read the
+ * programmed bridge bases and recursively claim
+ * the respective bridge resources.
+ */
+ if (b->self) {
+ pci_read_bridge_bases(b);
+ pci_claim_bridge_resources(b->self);
+ }
+
+ list_for_each_entry(child, &b->children, node)
+ pci_bus_allocate_resources(child);
+}
+
+void pci_bus_claim_resources(struct pci_bus *b)
+{
+ pci_bus_allocate_resources(b);
+ pci_bus_allocate_dev_resources(b);
+}
+EXPORT_SYMBOL(pci_bus_claim_resources);
+
static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
struct list_head *add_head,
struct list_head *fail_head)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 5f70fee59a94..d6ff5e82377d 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1086,7 +1086,7 @@ out:
return err;
}
-static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
+static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
enum xenbus_state be_state)
{
struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 6ebcf3e41c46..fd57345ffed2 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -14,6 +14,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -31,42 +32,64 @@
((val) << (shift) | (mask) << ((shift) + 16))
/* Register definition */
-#define GRF_EMMCPHY_CON0 0x0
-#define GRF_EMMCPHY_CON1 0x4
-#define GRF_EMMCPHY_CON2 0x8
-#define GRF_EMMCPHY_CON3 0xc
-#define GRF_EMMCPHY_CON4 0x10
-#define GRF_EMMCPHY_CON5 0x14
-#define GRF_EMMCPHY_CON6 0x18
-#define GRF_EMMCPHY_STATUS 0x20
-
-#define PHYCTRL_PDB_MASK 0x1
-#define PHYCTRL_PDB_SHIFT 0x0
-#define PHYCTRL_PDB_PWR_ON 0x1
-#define PHYCTRL_PDB_PWR_OFF 0x0
-#define PHYCTRL_ENDLL_MASK 0x1
-#define PHYCTRL_ENDLL_SHIFT 0x1
-#define PHYCTRL_ENDLL_ENABLE 0x1
-#define PHYCTRL_ENDLL_DISABLE 0x0
-#define PHYCTRL_CALDONE_MASK 0x1
-#define PHYCTRL_CALDONE_SHIFT 0x6
-#define PHYCTRL_CALDONE_DONE 0x1
-#define PHYCTRL_CALDONE_GOING 0x0
-#define PHYCTRL_DLLRDY_MASK 0x1
-#define PHYCTRL_DLLRDY_SHIFT 0x5
-#define PHYCTRL_DLLRDY_DONE 0x1
-#define PHYCTRL_DLLRDY_GOING 0x0
+#define GRF_EMMCPHY_CON0 0x0
+#define GRF_EMMCPHY_CON1 0x4
+#define GRF_EMMCPHY_CON2 0x8
+#define GRF_EMMCPHY_CON3 0xc
+#define GRF_EMMCPHY_CON4 0x10
+#define GRF_EMMCPHY_CON5 0x14
+#define GRF_EMMCPHY_CON6 0x18
+#define GRF_EMMCPHY_STATUS 0x20
+
+#define PHYCTRL_PDB_MASK 0x1
+#define PHYCTRL_PDB_SHIFT 0x0
+#define PHYCTRL_PDB_PWR_ON 0x1
+#define PHYCTRL_PDB_PWR_OFF 0x0
+#define PHYCTRL_ENDLL_MASK 0x1
+#define PHYCTRL_ENDLL_SHIFT 0x1
+#define PHYCTRL_ENDLL_ENABLE 0x1
+#define PHYCTRL_ENDLL_DISABLE 0x0
+#define PHYCTRL_CALDONE_MASK 0x1
+#define PHYCTRL_CALDONE_SHIFT 0x6
+#define PHYCTRL_CALDONE_DONE 0x1
+#define PHYCTRL_CALDONE_GOING 0x0
+#define PHYCTRL_DLLRDY_MASK 0x1
+#define PHYCTRL_DLLRDY_SHIFT 0x5
+#define PHYCTRL_DLLRDY_DONE 0x1
+#define PHYCTRL_DLLRDY_GOING 0x0
+#define PHYCTRL_FREQSEL_200M 0x0
+#define PHYCTRL_FREQSEL_50M 0x1
+#define PHYCTRL_FREQSEL_100M 0x2
+#define PHYCTRL_FREQSEL_150M 0x3
+#define PHYCTRL_FREQSEL_MASK 0x3
+#define PHYCTRL_FREQSEL_SHIFT 0xc
+#define PHYCTRL_DR_MASK 0x7
+#define PHYCTRL_DR_SHIFT 0x4
+#define PHYCTRL_DR_50OHM 0x0
+#define PHYCTRL_DR_33OHM 0x1
+#define PHYCTRL_DR_66OHM 0x2
+#define PHYCTRL_DR_100OHM 0x3
+#define PHYCTRL_DR_40OHM 0x4
+#define PHYCTRL_OTAPDLYENA 0x1
+#define PHYCTRL_OTAPDLYENA_MASK 0x1
+#define PHYCTRL_OTAPDLYENA_SHIFT 0xb
+#define PHYCTRL_OTAPDLYSEL_MASK 0xf
+#define PHYCTRL_OTAPDLYSEL_SHIFT 0x7
struct rockchip_emmc_phy {
unsigned int reg_offset;
struct regmap *reg_base;
+ struct clk *emmcclk;
};
-static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
- bool on_off)
+static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
unsigned int caldone;
unsigned int dllrdy;
+ unsigned int freqsel = PHYCTRL_FREQSEL_200M;
+ unsigned long rate;
+ unsigned long timeout;
/*
* Keep phyctrl_pdb and phyctrl_endll low to allow
@@ -87,6 +110,43 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
if (on_off == PHYCTRL_PDB_PWR_OFF)
return 0;
+ rate = clk_get_rate(rk_phy->emmcclk);
+
+ if (rate != 0) {
+ unsigned long ideal_rate;
+ unsigned long diff;
+
+ switch (rate) {
+ case 1 ... 74999999:
+ ideal_rate = 50000000;
+ freqsel = PHYCTRL_FREQSEL_50M;
+ break;
+ case 75000000 ... 124999999:
+ ideal_rate = 100000000;
+ freqsel = PHYCTRL_FREQSEL_100M;
+ break;
+ case 125000000 ... 174999999:
+ ideal_rate = 150000000;
+ freqsel = PHYCTRL_FREQSEL_150M;
+ break;
+ default:
+ ideal_rate = 200000000;
+ break;
+ };
+
+ diff = (rate > ideal_rate) ?
+ rate - ideal_rate : ideal_rate - rate;
+
+ /*
+ * In order for tuning delays to be accurate we need to be
+ * pretty spot on for the DLL range, so warn if we're too
+ * far off. Also warn if we're above the 200 MHz max. Don't
+ * warn for really slow rates since we won't be tuning then.
+ */
+ if ((rate > 50000000 && diff > 15000000) || (rate > 200000000))
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ }
+
/*
* According to the user manual, calpad calibration
* cycle takes more than 2us without the minimal recommended
@@ -113,20 +173,62 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
return -ETIMEDOUT;
}
+ /* Set the frequency of the DLL operation */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(freqsel, PHYCTRL_FREQSEL_MASK,
+ PHYCTRL_FREQSEL_SHIFT));
+
+ /* Turn on the DLL */
regmap_write(rk_phy->reg_base,
rk_phy->reg_offset + GRF_EMMCPHY_CON6,
HIWORD_UPDATE(PHYCTRL_ENDLL_ENABLE,
PHYCTRL_ENDLL_MASK,
PHYCTRL_ENDLL_SHIFT));
+
/*
- * After enable analog DLL circuits, we need extra 10.2us
- * for dll to be ready for work.
+ * We turned on the DLL even though the rate was 0 because we the
+ * clock might be turned on later. ...but we can't wait for the DLL
+ * to lock when the rate is 0 because it will never lock with no
+ * input clock.
+ *
+ * Technically we should be checking the lock later when the clock
+ * is turned on, but for now we won't.
*/
- udelay(11);
- regmap_read(rk_phy->reg_base,
- rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
- &dllrdy);
- dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK;
+ if (rate == 0)
+ return 0;
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms. We still busy wait here because:
+ * - In most cases it should be super fast.
+ * - This is not called lots during normal operation so it shouldn't
+ * be a power or performance problem to busy wait. We expect it
+ * only at boot / resume. In both cases, eMMC is probably on the
+ * critical path so busy waiting a little extra time should be OK.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ udelay(1);
+
+ regmap_read(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
+ &dllrdy);
+ dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK;
+ if (dllrdy == PHYCTRL_DLLRDY_DONE)
+ break;
+ } while (!time_after(jiffies, timeout));
+
if (dllrdy != PHYCTRL_DLLRDY_DONE) {
pr_err("rockchip_emmc_phy_power: dllrdy timeout.\n");
return -ETIMEDOUT;
@@ -135,33 +237,82 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
return 0;
}
-static int rockchip_emmc_phy_power_off(struct phy *phy)
+static int rockchip_emmc_phy_init(struct phy *phy)
{
struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
int ret = 0;
- /* Power down emmc phy analog blocks */
- ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_OFF);
- if (ret)
- return ret;
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error we just set to NULL.
+ *
+ * NOTE: we don't do anything special for EPROBE_DEFER here. Given the
+ * above expected use case, EPROBE_DEFER isn't sensible to expect, so
+ * it's just like any other error.
+ */
+ rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
+ if (IS_ERR(rk_phy->emmcclk)) {
+ dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ rk_phy->emmcclk = NULL;
+ }
+
+ return ret;
+}
+
+static int rockchip_emmc_phy_exit(struct phy *phy)
+{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
+
+ clk_put(rk_phy->emmcclk);
return 0;
}
+static int rockchip_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down emmc phy analog blocks */
+ return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_OFF);
+}
+
static int rockchip_emmc_phy_power_on(struct phy *phy)
{
struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
- int ret = 0;
- /* Power up emmc phy analog blocks */
- ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_ON);
- if (ret)
- return ret;
+ /* Drive impedance: 50 Ohm */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_DR_50OHM,
+ PHYCTRL_DR_MASK,
+ PHYCTRL_DR_SHIFT));
- return 0;
+ /* Output tap delay: enable */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(PHYCTRL_OTAPDLYENA,
+ PHYCTRL_OTAPDLYENA_MASK,
+ PHYCTRL_OTAPDLYENA_SHIFT));
+
+ /* Output tap delay */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(4,
+ PHYCTRL_OTAPDLYSEL_MASK,
+ PHYCTRL_OTAPDLYSEL_SHIFT));
+
+ /* Power up emmc phy analog blocks */
+ return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_ON);
}
static const struct phy_ops ops = {
+ .init = rockchip_emmc_phy_init,
+ .exit = rockchip_emmc_phy_exit,
.power_on = rockchip_emmc_phy_power_on,
.power_off = rockchip_emmc_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index b6e161f71b26..6c084b266651 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -380,3 +380,20 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result != EC_RES_SUCCESS) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
+ return -EPROTO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 4034d2d4c507..a66be137324c 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -31,19 +31,21 @@
/**
* DOC: Overview
*
- * :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
- * :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
- *
* gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
- * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas.
+ * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas.
*
* (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
* dual GPUs but no built-in display.)
*
* gmux is connected to the LPC bus of the southbridge. Its I/O ports are
* accessed differently depending on the microcontroller: Driver functions
- * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux
- * are infixed `_index_`.
+ * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux
+ * are infixed ``_index_``.
+ *
+ * .. _Lattice XP2:
+ * http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
+ * .. _Renesas R4F2113:
+ * http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
*/
struct apple_gmux_data {
@@ -272,15 +274,15 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
/**
* DOC: Backlight control
*
- * :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf
- * :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf
- *
* On single GPU MacBooks, the PWM signal for the backlight is generated by
* the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
* to conserve energy. Hence the PWM signal needs to be generated by a separate
* backlight driver which is controlled by gmux. The earliest generation
- * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models
- * use a {4}[TI LP8545].
+ * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
+ * use a `TI LP8545`_.
+ *
+ * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf
+ * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf
*/
static int gmux_get_brightness(struct backlight_device *bd)
@@ -312,28 +314,20 @@ static const struct backlight_ops gmux_bl_ops = {
/**
* DOC: Graphics mux
*
- * :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
- * :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
- * :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
- * :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
- * :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
- * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
- * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
- *
* On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
* either of them to the panel. One of the tricks gmux has up its sleeve is
* to lengthen the blanking interval of its output during a switch to
* synchronize it with the GPU switched to. This allows for a flicker-free
- * switch that is imperceptible by the user ({5}[US 8,687,007 B2]).
+ * switch that is imperceptible by the user (`US 8,687,007 B2`_).
*
* On retinas, muxing is no longer done by gmux itself, but by a separate
* chip which is controlled by gmux. The chip is triple sourced, it is
- * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412].
+ * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_.
* The panel is driven with eDP instead of LVDS since the pixel clock
* required for retina resolution exceeds LVDS' limits.
*
* Pre-retinas are able to switch the panel's DDC pins separately.
- * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux.
+ * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux.
* The inactive GPU can thus probe the panel's EDID without switching over
* the entire panel. Retinas lack this functionality as the chips used for
* eDP muxing are incapable of switching the AUX channel separately (see
@@ -344,15 +338,15 @@ static const struct backlight_ops gmux_bl_ops = {
*
* The external DP port is only fully switchable on the first two unibody
* MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
- * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the
+ * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the
* eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
*
* The following MacBook Pro generations replaced the external DP port with a
* combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
* connecting it either to the discrete GPU or the Thunderbolt controller.
* Oddly enough, while the full port is no longer switchable, AUX and HPD
- * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas
- * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the
+ * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas
+ * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the
* control of gmux. Since the integrated GPU is missing the main link,
* external displays appear to it as phantoms which fail to link-train.
*
@@ -365,10 +359,19 @@ static const struct backlight_ops gmux_bl_ops = {
* of this feature.
*
* gmux' initial switch state on bootup is user configurable via the EFI
- * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte,
+ * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte,
* 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
* switch the panel and the external DP connector and allocates a framebuffer
* for the selected GPU.
+ *
+ * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
+ * .. _NXP CBTL06141: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _NXP CBTL06142: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _TI HD3SS212: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
+ * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
+ * .. _TI SN74LV4066A: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
+ * .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
+ * .. _TI TS3DS10224: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
*/
static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index d2bc092defd7..da2fe18162e1 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -110,8 +110,8 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
/* BIOS error detected */
{ KE_IGNORE, 0xe00d, { KEY_RESERVED } },
- /* Unknown, defined in ACPI DSDT */
- /* { KE_IGNORE, 0xe00e, { KEY_RESERVED } }, */
+ /* Battery was removed or inserted */
+ { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
/* Wifi Catcher */
{ KE_KEY, 0xe011, { KEY_PROG2 } },
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 6f497e80c9df..b86e1bcaa055 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -85,7 +85,7 @@
* platform device and to export resources for those functions.
*/
#define TCO_DEVICE_NAME "iTCO_wdt"
-#define SMI_EN_OFFSET 0x30
+#define SMI_EN_OFFSET 0x40
#define SMI_EN_SIZE 4
#define TCO_BASE_OFFSET 0x60
#define TCO_REGS_SIZE 16
@@ -94,6 +94,8 @@
#define TELEM_SSRAM_SIZE 240
#define TELEM_PMC_SSRAM_OFFSET 0x1B00
#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
+#define TCO_PMC_OFFSET 0x8
+#define TCO_PMC_SIZE 0x4
static const int iTCO_version = 3;
@@ -502,7 +504,7 @@ static struct resource tco_res[] = {
static struct itco_wdt_platform_data tco_info = {
.name = "Apollo Lake SoC",
- .version = 3,
+ .version = 5,
};
#define TELEMETRY_RESOURCE_PUNIT_SSRAM 0
@@ -572,8 +574,8 @@ static int ipc_create_tco_device(void)
res->end = res->start + SMI_EN_SIZE - 1;
res = tco_res + TCO_RESOURCE_GCR_MEM;
- res->start = ipcdev.gcr_base;
- res->end = res->start + ipcdev.gcr_size - 1;
+ res->start = ipcdev.gcr_base + TCO_PMC_OFFSET;
+ res->end = res->start + TCO_PMC_SIZE - 1;
ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
if (ret) {
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index bedb361746a0..c38a5b9733c8 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -60,6 +60,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/kmod.h>
#include <linux/kthread.h>
#include <asm/page.h>
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 7053abced0bc..3bfac539334b 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -46,6 +46,16 @@ config POWER_RESET_AXXIA
Say Y if you have an Axxia family SoC.
+config POWER_RESET_BRCMKONA
+ bool "Broadcom Kona reset driver"
+ depends on ARM || COMPILE_TEST
+ default ARCH_BCM_MOBILE
+ help
+ This driver provides restart support for Broadcom Kona chips.
+
+ Say Y here if you have a Broadcom Kona-based board and you wish
+ to have restart support.
+
config POWER_RESET_BRCMSTB
bool "Broadcom STB reset driver"
depends on ARM || MIPS || COMPILE_TEST
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index d6b2560d5c4a..1be307c7fc25 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
obj-$(CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC) += at91-sama5d2_shdwc.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
+obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
new file mode 100644
index 000000000000..8eaa959d8be6
--- /dev/null
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/reboot.h>
+
+#define RSTMGR_REG_WR_ACCESS_OFFSET 0
+#define RSTMGR_REG_CHIP_SOFT_RST_OFFSET 4
+
+#define RSTMGR_WR_PASSWORD 0xa5a5
+#define RSTMGR_WR_PASSWORD_SHIFT 8
+#define RSTMGR_WR_ACCESS_ENABLE 1
+
+static void __iomem *kona_reset_base;
+
+static int kona_reset_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ /*
+ * A soft reset is triggered by writing a 0 to bit 0 of the soft reset
+ * register. To write to that register we must first write the password
+ * and the enable bit in the write access enable register.
+ */
+ writel((RSTMGR_WR_PASSWORD << RSTMGR_WR_PASSWORD_SHIFT) |
+ RSTMGR_WR_ACCESS_ENABLE,
+ kona_reset_base + RSTMGR_REG_WR_ACCESS_OFFSET);
+ writel(0, kona_reset_base + RSTMGR_REG_CHIP_SOFT_RST_OFFSET);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kona_reset_nb = {
+ .notifier_call = kona_reset_handler,
+ .priority = 128,
+};
+
+static int kona_reset_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ kona_reset_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kona_reset_base))
+ return PTR_ERR(kona_reset_base);
+
+ return register_restart_handler(&kona_reset_nb);
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "brcm,bcm21664-resetmgr" },
+ {},
+};
+
+static struct platform_driver bcm_kona_reset_driver = {
+ .probe = kona_reset_probe,
+ .driver = {
+ .name = "brcm-kona-reset",
+ .of_match_table = of_match,
+ },
+};
+
+builtin_platform_driver(bcm_kona_reset_driver);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 6a9bf7089373..102f95a09460 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -74,8 +74,8 @@ static ssize_t vexpress_reset_active_store(struct device *dev,
return err ? err : count;
}
-DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
- vexpress_reset_active_store);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
+ vexpress_reset_active_store);
enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT };
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index c182efc62c7b..80a566a00d04 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -74,6 +74,16 @@ config PWM_ATMEL_TCB
To compile this driver as a module, choose M here: the module
will be called pwm-atmel-tcb.
+config PWM_BCM_IPROC
+ tristate "iProc PWM support"
+ depends on ARCH_BCM_IPROC
+ help
+ Generic PWM framework driver for Broadcom iProc PWM block. This
+ block is used in Broadcom iProc SoC's.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm-iproc.
+
config PWM_BCM_KONA
tristate "Kona PWM support"
depends on ARCH_BCM_MOBILE
@@ -137,6 +147,13 @@ config PWM_CRC
Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
control.
+config PWM_CROS_EC
+ tristate "ChromeOS EC PWM driver"
+ depends on MFD_CROS_EC
+ help
+ PWM driver for exposing a PWM attached to the ChromeOS Embedded
+ Controller.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX
@@ -305,7 +322,7 @@ config PWM_PXA
config PWM_RCAR
tristate "Renesas R-Car PWM support"
- depends on ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
help
This driver exposes the PWM Timer controller found in Renesas
@@ -362,6 +379,13 @@ config PWM_STI
To compile this driver as a module, choose M here: the module
will be called pwm-sti.
+config PWM_STMPE
+ bool "STMPE expander PWM export"
+ depends on MFD_STMPE
+ help
+ This enables support for the PWMs found in the STMPE I/O
+ expanders.
+
config PWM_SUN4I
tristate "Allwinner PWM support"
depends on ARCH_SUNXI || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index dd35bc121a18..feef1dd29f73 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
+obj-$(CONFIG_PWM_BCM_IPROC) += pwm-bcm-iproc.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o
@@ -11,6 +12,7 @@ obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
+obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
+obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o
obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ed337a8c34ab..0dbd29e287db 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -526,6 +526,33 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
EXPORT_SYMBOL_GPL(pwm_apply_state);
/**
+ * pwm_capture() - capture and report a PWM signal
+ * @pwm: PWM device
+ * @result: structure to fill with capture result
+ * @timeout: time to wait, in milliseconds, before giving up on capture
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout)
+{
+ int err;
+
+ if (!pwm || !pwm->chip->ops)
+ return -EINVAL;
+
+ if (!pwm->chip->ops->capture)
+ return -ENOSYS;
+
+ mutex_lock(&pwm_lock);
+ err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
+ mutex_unlock(&pwm_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pwm_capture);
+
+/**
* pwm_adjust_config() - adjust the current PWM config to the PWM arguments
* @pwm: PWM device
*
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 0e4bd4e8e582..e6b8b1b7e6ba 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -64,7 +64,8 @@ struct atmel_pwm_chip {
void __iomem *base;
unsigned int updated_pwms;
- struct mutex isr_lock; /* ISR is cleared when read, ensure only one thread does that */
+ /* ISR is cleared when read, ensure only one thread does that */
+ struct mutex isr_lock;
void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long dty, unsigned long prd);
@@ -271,6 +272,16 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
+ /*
+ * Wait for the PWM channel disable operation to be effective before
+ * stopping the clock.
+ */
+ timeout = jiffies + 2 * HZ;
+
+ while ((atmel_pwm_readl(atmel_pwm, PWM_SR) & (1 << pwm->hwpwm)) &&
+ time_before(jiffies, timeout))
+ usleep_range(10, 100);
+
clk_disable(atmel_pwm->clk);
}
@@ -324,21 +335,14 @@ MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
static inline const struct atmel_pwm_data *
atmel_pwm_get_driver_data(struct platform_device *pdev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_device(atmel_pwm_dt_ids, &pdev->dev);
- if (!match)
- return NULL;
+ const struct platform_device_id *id;
- return match->data;
- } else {
- const struct platform_device_id *id;
+ if (pdev->dev.of_node)
+ return of_device_get_match_data(&pdev->dev);
- id = platform_get_device_id(pdev);
+ id = platform_get_device_id(pdev);
- return (struct atmel_pwm_data *)id->driver_data;
- }
+ return (struct atmel_pwm_data *)id->driver_data;
}
static int atmel_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
new file mode 100644
index 000000000000..d961a8207b1c
--- /dev/null
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define IPROC_PWM_CTRL_OFFSET 0x00
+#define IPROC_PWM_CTRL_TYPE_SHIFT(ch) (15 + (ch))
+#define IPROC_PWM_CTRL_POLARITY_SHIFT(ch) (8 + (ch))
+#define IPROC_PWM_CTRL_EN_SHIFT(ch) (ch)
+
+#define IPROC_PWM_PERIOD_OFFSET(ch) (0x04 + ((ch) << 3))
+#define IPROC_PWM_PERIOD_MIN 0x02
+#define IPROC_PWM_PERIOD_MAX 0xffff
+
+#define IPROC_PWM_DUTY_CYCLE_OFFSET(ch) (0x08 + ((ch) << 3))
+#define IPROC_PWM_DUTY_CYCLE_MIN 0x00
+#define IPROC_PWM_DUTY_CYCLE_MAX 0xffff
+
+#define IPROC_PWM_PRESCALE_OFFSET 0x24
+#define IPROC_PWM_PRESCALE_BITS 0x06
+#define IPROC_PWM_PRESCALE_SHIFT(ch) ((3 - (ch)) * \
+ IPROC_PWM_PRESCALE_BITS)
+#define IPROC_PWM_PRESCALE_MASK(ch) (IPROC_PWM_PRESCALE_MAX << \
+ IPROC_PWM_PRESCALE_SHIFT(ch))
+#define IPROC_PWM_PRESCALE_MIN 0x00
+#define IPROC_PWM_PRESCALE_MAX 0x3f
+
+struct iproc_pwmc {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct iproc_pwmc *to_iproc_pwmc(struct pwm_chip *chip)
+{
+ return container_of(chip, struct iproc_pwmc, chip);
+}
+
+static void iproc_pwmc_enable(struct iproc_pwmc *ip, unsigned int channel)
+{
+ u32 value;
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+ value |= 1 << IPROC_PWM_CTRL_EN_SHIFT(channel);
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ /* must be a 400 ns delay between clearing and setting enable bit */
+ ndelay(400);
+}
+
+static void iproc_pwmc_disable(struct iproc_pwmc *ip, unsigned int channel)
+{
+ u32 value;
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+ value &= ~(1 << IPROC_PWM_CTRL_EN_SHIFT(channel));
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ /* must be a 400 ns delay between clearing and setting enable bit */
+ ndelay(400);
+}
+
+static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct iproc_pwmc *ip = to_iproc_pwmc(chip);
+ u64 tmp, multi, rate;
+ u32 value, prescale;
+
+ rate = clk_get_rate(ip->clk);
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ if (value & BIT(IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm)))
+ state->polarity = PWM_POLARITY_NORMAL;
+ else
+ state->polarity = PWM_POLARITY_INVERSED;
+
+ value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+ prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
+ prescale &= IPROC_PWM_PRESCALE_MAX;
+
+ multi = NSEC_PER_SEC * (prescale + 1);
+
+ value = readl(ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
+ tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
+ state->period = div64_u64(tmp, rate);
+
+ value = readl(ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
+ tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
+ state->duty_cycle = div64_u64(tmp, rate);
+}
+
+static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ unsigned long prescale = IPROC_PWM_PRESCALE_MIN;
+ struct iproc_pwmc *ip = to_iproc_pwmc(chip);
+ u32 value, period, duty;
+ u64 rate;
+
+ rate = clk_get_rate(ip->clk);
+
+ /*
+ * Find period count, duty count and prescale to suit duty_cycle and
+ * period. This is done according to formulas described below:
+ *
+ * period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ *
+ * PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
+ * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
+ */
+ while (1) {
+ u64 value, div;
+
+ div = NSEC_PER_SEC * (prescale + 1);
+ value = rate * state->period;
+ period = div64_u64(value, div);
+ value = rate * state->duty_cycle;
+ duty = div64_u64(value, div);
+
+ if (period < IPROC_PWM_PERIOD_MIN ||
+ duty < IPROC_PWM_DUTY_CYCLE_MIN)
+ return -EINVAL;
+
+ if (period <= IPROC_PWM_PERIOD_MAX &&
+ duty <= IPROC_PWM_DUTY_CYCLE_MAX)
+ break;
+
+ /* Otherwise, increase prescale and recalculate counts */
+ if (++prescale > IPROC_PWM_PRESCALE_MAX)
+ return -EINVAL;
+ }
+
+ iproc_pwmc_disable(ip, pwm->hwpwm);
+
+ /* Set prescale */
+ value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+ value &= ~IPROC_PWM_PRESCALE_MASK(pwm->hwpwm);
+ value |= prescale << IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
+ writel(value, ip->base + IPROC_PWM_PRESCALE_OFFSET);
+
+ /* set period and duty cycle */
+ writel(period, ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
+ writel(duty, ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
+
+ /* set polarity */
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm);
+ else
+ value &= ~(1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm));
+
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (state->enabled)
+ iproc_pwmc_enable(ip, pwm->hwpwm);
+
+ return 0;
+}
+
+static const struct pwm_ops iproc_pwm_ops = {
+ .apply = iproc_pwmc_apply,
+ .get_state = iproc_pwmc_get_state,
+};
+
+static int iproc_pwmc_probe(struct platform_device *pdev)
+{
+ struct iproc_pwmc *ip;
+ struct resource *res;
+ unsigned int i;
+ u32 value;
+ int ret;
+
+ ip = devm_kzalloc(&pdev->dev, sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ip);
+
+ ip->chip.dev = &pdev->dev;
+ ip->chip.ops = &iproc_pwm_ops;
+ ip->chip.base = -1;
+ ip->chip.npwm = 4;
+ ip->chip.of_xlate = of_pwm_xlate_with_flags;
+ ip->chip.of_pwm_n_cells = 3;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ip->base))
+ return PTR_ERR(ip->base);
+
+ ip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ip->clk)) {
+ dev_err(&pdev->dev, "failed to get clock: %ld\n",
+ PTR_ERR(ip->clk));
+ return PTR_ERR(ip->clk);
+ }
+
+ ret = clk_prepare_enable(ip->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ /* Set full drive and normal polarity for all channels */
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ for (i = 0; i < ip->chip.npwm; i++) {
+ value &= ~(1 << IPROC_PWM_CTRL_TYPE_SHIFT(i));
+ value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(i);
+ }
+
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ ret = pwmchip_add(&ip->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(ip->clk);
+ }
+
+ return ret;
+}
+
+static int iproc_pwmc_remove(struct platform_device *pdev)
+{
+ struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ip->clk);
+
+ return pwmchip_remove(&ip->chip);
+}
+
+static const struct of_device_id bcm_iproc_pwmc_dt[] = {
+ { .compatible = "brcm,iproc-pwm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_pwmc_dt);
+
+static struct platform_driver iproc_pwmc_driver = {
+ .driver = {
+ .name = "bcm-iproc-pwm",
+ .of_match_table = bcm_iproc_pwmc_dt,
+ },
+ .probe = iproc_pwmc_probe,
+ .remove = iproc_pwmc_remove,
+};
+module_platform_driver(iproc_pwmc_driver);
+
+MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 7d335422cfda..26ec24e457b1 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -155,7 +155,7 @@ static int clps711x_pwm_remove(struct platform_device *pdev)
}
static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
- { .compatible = "cirrus,clps711x-pwm", },
+ { .compatible = "cirrus,ep7209-pwm", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
new file mode 100644
index 000000000000..99b9acc1a420
--- /dev/null
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published by
+ * the Free Software Foundation.
+ *
+ * Expose a PWM controlled by the ChromeOS EC to the host processor.
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/**
+ * struct cros_ec_pwm_device - Driver data for EC PWM
+ *
+ * @dev: Device node
+ * @ec: Pointer to EC device
+ * @chip: PWM controller chip
+ */
+struct cros_ec_pwm_device {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct pwm_chip chip;
+};
+
+static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
+{
+ return container_of(c, struct cros_ec_pwm_device, chip);
+}
+
+static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_duty params;
+ } buf;
+ struct ec_params_pwm_set_duty *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->version = 0;
+ msg->command = EC_CMD_PWM_SET_DUTY;
+ msg->insize = 0;
+ msg->outsize = sizeof(*params);
+
+ params->duty = duty;
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+
+ return cros_ec_cmd_xfer_status(ec, msg);
+}
+
+static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
+ u32 *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_params_pwm_get_duty params;
+ struct ec_response_pwm_get_duty resp;
+ };
+ } buf;
+ struct ec_params_pwm_get_duty *params = &buf.params;
+ struct ec_response_pwm_get_duty *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->version = 0;
+ msg->command = EC_CMD_PWM_GET_DUTY;
+ msg->insize = sizeof(*params);
+ msg->outsize = sizeof(*resp);
+
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+
+ ret = cros_ec_cmd_xfer_status(ec, msg);
+ if (result)
+ *result = msg->result;
+ if (ret < 0)
+ return ret;
+
+ return resp->duty;
+}
+
+static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
+{
+ return __cros_ec_pwm_get_duty(ec, index, NULL);
+}
+
+static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ int duty_cycle;
+
+ /* The EC won't let us change the period */
+ if (state->period != EC_PWM_MAX_DUTY)
+ return -EINVAL;
+
+ /*
+ * EC doesn't separate the concept of duty cycle and enabled, but
+ * kernel does. Translate.
+ */
+ duty_cycle = state->enabled ? state->duty_cycle : 0;
+
+ return cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+}
+
+static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ int ret;
+
+ ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
+ if (ret < 0) {
+ dev_err(chip->dev, "error getting initial duty: %d\n", ret);
+ return;
+ }
+
+ state->enabled = (ret > 0);
+ state->period = EC_PWM_MAX_DUTY;
+
+ /* Note that "disabled" and "duty cycle == 0" are treated the same */
+ state->duty_cycle = ret;
+}
+
+static struct pwm_device *
+cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ /* The EC won't let us change the period */
+ pwm->args.period = EC_PWM_MAX_DUTY;
+
+ return pwm;
+}
+
+static const struct pwm_ops cros_ec_pwm_ops = {
+ .get_state = cros_ec_pwm_get_state,
+ .apply = cros_ec_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int cros_ec_num_pwms(struct cros_ec_device *ec)
+{
+ int i, ret;
+
+ /* The index field is only 8 bits */
+ for (i = 0; i <= U8_MAX; i++) {
+ u32 result = 0;
+
+ ret = __cros_ec_pwm_get_duty(ec, i, &result);
+ /* We want to parse EC protocol errors */
+ if (ret < 0 && !(ret == -EPROTO && result))
+ return ret;
+
+ /*
+ * We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
+ * responses; everything else is treated as an error.
+ */
+ if (result == EC_RES_INVALID_COMMAND)
+ return -ENODEV;
+ else if (result == EC_RES_INVALID_PARAM)
+ return i;
+ else if (result)
+ return -EPROTO;
+ }
+
+ return U8_MAX;
+}
+
+static int cros_ec_pwm_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct cros_ec_pwm_device *ec_pwm;
+ struct pwm_chip *chip;
+ int ret;
+
+ if (!ec) {
+ dev_err(dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
+ if (!ec_pwm)
+ return -ENOMEM;
+ chip = &ec_pwm->chip;
+ ec_pwm->ec = ec;
+
+ /* PWM chip */
+ chip->dev = dev;
+ chip->ops = &cros_ec_pwm_ops;
+ chip->of_xlate = cros_ec_pwm_xlate;
+ chip->of_pwm_n_cells = 1;
+ chip->base = -1;
+ ret = cros_ec_num_pwms(ec);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't find PWMs: %d\n", ret);
+ return ret;
+ }
+ chip->npwm = ret;
+ dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
+
+ ret = pwmchip_add(chip);
+ if (ret < 0) {
+ dev_err(dev, "cannot register PWM: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ec_pwm);
+
+ return ret;
+}
+
+static int cros_ec_pwm_remove(struct platform_device *dev)
+{
+ struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
+ struct pwm_chip *chip = &ec_pwm->chip;
+
+ return pwmchip_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_ec_pwm_of_match[] = {
+ { .compatible = "google,cros-ec-pwm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
+#endif
+
+static struct platform_driver cros_ec_pwm_driver = {
+ .probe = cros_ec_pwm_probe,
+ .remove = cros_ec_pwm_remove,
+ .driver = {
+ .name = "cros-ec-pwm",
+ .of_match_table = of_match_ptr(cros_ec_pwm_of_match),
+ },
+};
+module_platform_driver(cros_ec_pwm_driver);
+
+MODULE_ALIAS("platform:cros-ec-pwm");
+MODULE_DESCRIPTION("ChromeOS EC PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 4d470c1a406a..a9b3cff96aac 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -25,6 +25,7 @@ struct lpc32xx_pwm_chip {
};
#define PWM_ENABLE BIT(31)
+#define PWM_PIN_LEVEL BIT(30)
#define to_lpc32xx_pwm_chip(_chip) \
container_of(_chip, struct lpc32xx_pwm_chip, chip)
@@ -103,6 +104,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
struct lpc32xx_pwm_chip *lpc32xx;
struct resource *res;
int ret;
+ u32 val;
lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
if (!lpc32xx)
@@ -128,6 +130,11 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
return ret;
}
+ /* When PWM is disable, configure the output to the default value */
+ val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val &= ~PWM_PIN_LEVEL;
+ writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+
platform_set_drvdata(pdev, lpc32xx);
return 0;
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 7160e8ab38a4..3622f093490e 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -76,6 +76,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
+ { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 295b963dbddb..72c0bce5a75c 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -27,7 +27,6 @@
#define PWM_SW_UPDATE BIT(30)
#define PWM_BASE_UNIT_SHIFT 8
#define PWM_ON_TIME_DIV_MASK 0x000000ff
-#define PWM_DIVISION_CORRECTION 0x2
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
@@ -92,8 +91,8 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
- u8 on_time_div;
- unsigned long c, base_unit_range;
+ unsigned long long on_time_div;
+ unsigned long c = lpwm->info->clk_rate, base_unit_range;
unsigned long long base_unit, freq = NSEC_PER_SEC;
u32 ctrl;
@@ -101,21 +100,18 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* The equation is:
- * base_unit = ((freq / c) * base_unit_range) + correction
+ * base_unit = round(base_unit_range * freq / c)
*/
base_unit_range = BIT(lpwm->info->base_unit_bits);
- base_unit = freq * base_unit_range;
+ freq *= base_unit_range;
- c = lpwm->info->clk_rate;
- if (!c)
- return -EINVAL;
-
- do_div(base_unit, c);
- base_unit += PWM_DIVISION_CORRECTION;
+ base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
if (duty_ns <= 0)
duty_ns = 1;
- on_time_div = 255 - (255 * duty_ns / period_ns);
+ on_time_div = 255ULL * duty_ns;
+ do_div(on_time_div, period_ns);
+ on_time_div = 255ULL - on_time_div;
pm_runtime_get_sync(chip->dev);
@@ -169,6 +165,7 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
+ unsigned long c;
int ret;
lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
@@ -180,6 +177,11 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
return ERR_CAST(lpwm->regs);
lpwm->info = info;
+
+ c = lpwm->info->clk_rate;
+ if (!c)
+ return ERR_PTR(-EINVAL);
+
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.base = -1;
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 3e95090cd7cf..5ad42f33e70c 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -245,7 +245,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
struct pwm_omap_dmtimer_chip *omap;
struct pwm_omap_dmtimer_pdata *pdata;
pwm_omap_dmtimer *dm_timer;
- u32 prescaler;
+ u32 v;
int status;
pdata = dev_get_platdata(&pdev->dev);
@@ -306,10 +306,12 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
- /* setup dmtimer prescaler */
- if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler",
- &prescaler))
- omap->pdata->set_prescaler(omap->dm_timer, prescaler);
+ if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler", &v))
+ omap->pdata->set_prescaler(omap->dm_timer, v);
+
+ /* setup dmtimer clock source */
+ if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v))
+ omap->pdata->set_source(omap->dm_timer, v);
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 7d9cc9049522..ef89df1f7336 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -47,10 +47,14 @@ struct rockchip_pwm_regs {
struct rockchip_pwm_data {
struct rockchip_pwm_regs regs;
unsigned int prescaler;
+ bool supports_polarity;
const struct pwm_ops *ops;
void (*set_enable)(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable);
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity);
+ void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
};
static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
@@ -59,7 +63,8 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
}
static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable)
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
@@ -75,15 +80,29 @@ static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
writel_relaxed(val, pc->base + pc->data->regs.ctrl);
}
+static void rockchip_pwm_get_state_v1(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ if ((val & enable_conf) == enable_conf)
+ state->enabled = true;
+}
+
static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable)
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
PWM_CONTINUOUS;
u32 val;
- if (pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED)
+ if (polarity == PWM_POLARITY_INVERSED)
enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
else
enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
@@ -98,13 +117,59 @@ static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
writel_relaxed(val, pc->base + pc->data->regs.ctrl);
}
+static void rockchip_pwm_get_state_v2(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
+ PWM_CONTINUOUS;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ if ((val & enable_conf) != enable_conf)
+ return;
+
+ state->enabled = true;
+
+ if (!(val & PWM_DUTY_POSITIVE))
+ state->polarity = PWM_POLARITY_INVERSED;
+}
+
+static void rockchip_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ unsigned long clk_rate;
+ u64 tmp;
+ int ret;
+
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return;
+
+ clk_rate = clk_get_rate(pc->clk);
+
+ tmp = readl_relaxed(pc->base + pc->data->regs.period);
+ tmp *= pc->data->prescaler * NSEC_PER_SEC;
+ state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+
+ tmp = readl_relaxed(pc->base + pc->data->regs.duty);
+ tmp *= pc->data->prescaler * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+
+ pc->data->get_state(chip, pwm, state);
+
+ clk_disable(pc->clk);
+}
+
static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
unsigned long period, duty;
u64 clk_rate, div;
- int ret;
clk_rate = clk_get_rate(pc->clk);
@@ -114,74 +179,72 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* default prescaler value for all practical clock rate values.
*/
div = clk_rate * period_ns;
- do_div(div, pc->data->prescaler * NSEC_PER_SEC);
- period = div;
+ period = DIV_ROUND_CLOSEST_ULL(div,
+ pc->data->prescaler * NSEC_PER_SEC);
div = clk_rate * duty_ns;
- do_div(div, pc->data->prescaler * NSEC_PER_SEC);
- duty = div;
-
- ret = clk_enable(pc->clk);
- if (ret)
- return ret;
+ duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC);
writel(period, pc->base + pc->data->regs.period);
writel(duty, pc->base + pc->data->regs.duty);
- writel(0, pc->base + pc->data->regs.cntr);
-
- clk_disable(pc->clk);
-
- return 0;
-}
-
-static int rockchip_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- /*
- * No action needed here because pwm->polarity will be set by the core
- * and the core will only change polarity when the PWM is not enabled.
- * We'll handle things in set_enable().
- */
return 0;
}
-static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ struct pwm_state curstate;
+ bool enabled;
int ret;
+ pwm_get_state(pwm, &curstate);
+ enabled = curstate.enabled;
+
ret = clk_enable(pc->clk);
if (ret)
return ret;
- pc->data->set_enable(chip, pwm, true);
+ if (state->polarity != curstate.polarity && enabled) {
+ pc->data->set_enable(chip, pwm, false, state->polarity);
+ enabled = false;
+ }
- return 0;
-}
+ ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (ret) {
+ if (enabled != curstate.enabled)
+ pc->data->set_enable(chip, pwm, !enabled,
+ state->polarity);
-static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ goto out;
+ }
+
+ if (state->enabled != enabled)
+ pc->data->set_enable(chip, pwm, state->enabled,
+ state->polarity);
- pc->data->set_enable(chip, pwm, false);
+ /*
+ * Update the state with the real hardware, which can differ a bit
+ * because of period/duty_cycle approximation.
+ */
+ rockchip_pwm_get_state(chip, pwm, state);
+out:
clk_disable(pc->clk);
+
+ return ret;
}
static const struct pwm_ops rockchip_pwm_ops_v1 = {
- .config = rockchip_pwm_config,
- .enable = rockchip_pwm_enable,
- .disable = rockchip_pwm_disable,
+ .get_state = rockchip_pwm_get_state,
+ .apply = rockchip_pwm_apply,
.owner = THIS_MODULE,
};
static const struct pwm_ops rockchip_pwm_ops_v2 = {
- .config = rockchip_pwm_config,
- .set_polarity = rockchip_pwm_set_polarity,
- .enable = rockchip_pwm_enable,
- .disable = rockchip_pwm_disable,
+ .get_state = rockchip_pwm_get_state,
+ .apply = rockchip_pwm_apply,
.owner = THIS_MODULE,
};
@@ -195,6 +258,7 @@ static const struct rockchip_pwm_data pwm_data_v1 = {
.prescaler = 2,
.ops = &rockchip_pwm_ops_v1,
.set_enable = rockchip_pwm_set_enable_v1,
+ .get_state = rockchip_pwm_get_state_v1,
};
static const struct rockchip_pwm_data pwm_data_v2 = {
@@ -205,8 +269,10 @@ static const struct rockchip_pwm_data pwm_data_v2 = {
.ctrl = 0x0c,
},
.prescaler = 1,
+ .supports_polarity = true,
.ops = &rockchip_pwm_ops_v2,
.set_enable = rockchip_pwm_set_enable_v2,
+ .get_state = rockchip_pwm_get_state_v2,
};
static const struct rockchip_pwm_data pwm_data_vop = {
@@ -217,8 +283,10 @@ static const struct rockchip_pwm_data pwm_data_vop = {
.ctrl = 0x00,
},
.prescaler = 1,
+ .supports_polarity = true,
.ops = &rockchip_pwm_ops_v2,
.set_enable = rockchip_pwm_set_enable_v2,
+ .get_state = rockchip_pwm_get_state_v2,
};
static const struct of_device_id rockchip_pwm_dt_ids[] = {
@@ -253,7 +321,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->clk))
return PTR_ERR(pc->clk);
- ret = clk_prepare(pc->clk);
+ ret = clk_prepare_enable(pc->clk);
if (ret)
return ret;
@@ -265,7 +333,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = 1;
- if (pc->data->ops->set_polarity) {
+ if (pc->data->supports_polarity) {
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
}
@@ -276,6 +344,10 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
}
+ /* Keep the PWM clk enabled if the PWM appears to be up and running. */
+ if (!pwm_is_enabled(pc->chip.pwms))
+ clk_disable(pc->clk);
+
return ret;
}
@@ -283,6 +355,20 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
+ /*
+ * Disable the PWM clk before unpreparing it if the PWM device is still
+ * running. This should only happen when the last PWM user left it
+ * enabled, or when nobody requested a PWM that was previously enabled
+ * by the bootloader.
+ *
+ * FIXME: Maybe the core should disable all PWM devices in
+ * pwmchip_remove(). In this case we'd only have to call
+ * clk_unprepare() after pwmchip_remove().
+ *
+ */
+ if (pwm_is_enabled(pc->chip.pwms))
+ clk_disable(pc->clk);
+
clk_unprepare(pc->clk);
return pwmchip_remove(&pc->chip);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
new file mode 100644
index 000000000000..e464582a390a
--- /dev/null
+++ b/drivers/pwm/pwm-stmpe.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mfd/stmpe.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define STMPE24XX_PWMCS 0x30
+#define PWMCS_EN_PWM0 BIT(0)
+#define PWMCS_EN_PWM1 BIT(1)
+#define PWMCS_EN_PWM2 BIT(2)
+#define STMPE24XX_PWMIC0 0x38
+#define STMPE24XX_PWMIC1 0x39
+#define STMPE24XX_PWMIC2 0x3a
+
+#define STMPE_PWM_24XX_PINBASE 21
+
+struct stmpe_pwm {
+ struct stmpe *stmpe;
+ struct pwm_chip chip;
+ u8 last_duty;
+};
+
+static inline struct stmpe_pwm *to_stmpe_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct stmpe_pwm, chip);
+}
+
+static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ u8 value;
+ int ret;
+
+ ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
+ if (ret < 0) {
+ dev_err(chip->dev, "error reading PWM#%u control\n",
+ pwm->hwpwm);
+ return ret;
+ }
+
+ value = ret | BIT(pwm->hwpwm);
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing PWM#%u control\n",
+ pwm->hwpwm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ u8 value;
+ int ret;
+
+ ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
+ if (ret < 0) {
+ dev_err(chip->dev, "error reading PWM#%u control\n",
+ pwm->hwpwm);
+ return;
+ }
+
+ value = ret & ~BIT(pwm->hwpwm);
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing PWM#%u control\n",
+ pwm->hwpwm);
+ return;
+ }
+}
+
+/* STMPE 24xx PWM instructions */
+#define SMAX 0x007f
+#define SMIN 0x00ff
+#define GTS 0x0000
+#define LOAD BIT(14) /* Only available on 2403 */
+#define RAMPUP 0x0000
+#define RAMPDOWN BIT(7)
+#define PRESCALE_512 BIT(14)
+#define STEPTIME_1 BIT(8)
+#define BRANCH (BIT(15) | BIT(13))
+
+static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ unsigned int i, pin;
+ u16 program[3] = {
+ SMAX,
+ GTS,
+ GTS,
+ };
+ u8 offset;
+ int ret;
+
+ /* Make sure we are disabled */
+ if (pwm_is_enabled(pwm)) {
+ stmpe_24xx_pwm_disable(chip, pwm);
+ } else {
+ /* Connect the PWM to the pin */
+ pin = pwm->hwpwm;
+
+ /* On STMPE2401 and 2403 pins 21,22,23 are used */
+ if (stmpe_pwm->stmpe->partnum == STMPE2401 ||
+ stmpe_pwm->stmpe->partnum == STMPE2403)
+ pin += STMPE_PWM_24XX_PINBASE;
+
+ ret = stmpe_set_altfunc(stmpe_pwm->stmpe, BIT(pin),
+ STMPE_BLOCK_PWM);
+ if (ret) {
+ dev_err(chip->dev, "unable to connect PWM#%u to pin\n",
+ pwm->hwpwm);
+ return ret;
+ }
+ }
+
+ /* STMPE24XX */
+ switch (pwm->hwpwm) {
+ case 0:
+ offset = STMPE24XX_PWMIC0;
+ break;
+
+ case 1:
+ offset = STMPE24XX_PWMIC1;
+ break;
+
+ case 2:
+ offset = STMPE24XX_PWMIC1;
+ break;
+
+ default:
+ /* Should not happen as npwm is 3 */
+ return -ENODEV;
+ }
+
+ dev_dbg(chip->dev, "PWM#%u: config duty %d ns, period %d ns\n",
+ pwm->hwpwm, duty_ns, period_ns);
+
+ if (duty_ns == 0) {
+ if (stmpe_pwm->stmpe->partnum == STMPE2401)
+ program[0] = SMAX; /* off all the time */
+
+ if (stmpe_pwm->stmpe->partnum == STMPE2403)
+ program[0] = LOAD | 0xff; /* LOAD 0xff */
+
+ stmpe_pwm->last_duty = 0x00;
+ } else if (duty_ns == period_ns) {
+ if (stmpe_pwm->stmpe->partnum == STMPE2401)
+ program[0] = SMIN; /* on all the time */
+
+ if (stmpe_pwm->stmpe->partnum == STMPE2403)
+ program[0] = LOAD | 0x00; /* LOAD 0x00 */
+
+ stmpe_pwm->last_duty = 0xff;
+ } else {
+ u8 value, last = stmpe_pwm->last_duty;
+ unsigned long duty;
+
+ /*
+ * Counter goes from 0x00 to 0xff repeatedly at 32768 Hz,
+ * (means a period of 30517 ns) then this is compared to the
+ * counter from the ramp, if this is >= PWM counter the output
+ * is high. With LOAD we can define how much of the cycle it
+ * is on.
+ *
+ * Prescale = 0 -> 2 kHz -> T = 1/f = 488281.25 ns
+ */
+
+ /* Scale to 0..0xff */
+ duty = duty_ns * 256;
+ duty = DIV_ROUND_CLOSEST(duty, period_ns);
+ value = duty;
+
+ if (value == last) {
+ /* Run the old program */
+ if (pwm_is_enabled(pwm))
+ stmpe_24xx_pwm_enable(chip, pwm);
+
+ return 0;
+ } else if (stmpe_pwm->stmpe->partnum == STMPE2403) {
+ /* STMPE2403 can simply set the right PWM value */
+ program[0] = LOAD | value;
+ program[1] = 0x0000;
+ } else if (stmpe_pwm->stmpe->partnum == STMPE2401) {
+ /* STMPE2401 need a complex program */
+ u16 incdec = 0x0000;
+
+ if (last < value)
+ /* Count up */
+ incdec = RAMPUP | (value - last);
+ else
+ /* Count down */
+ incdec = RAMPDOWN | (last - value);
+
+ /* Step to desired value, smoothly */
+ program[0] = PRESCALE_512 | STEPTIME_1 | incdec;
+
+ /* Loop eternally to 0x00 */
+ program[1] = BRANCH;
+ }
+
+ dev_dbg(chip->dev,
+ "PWM#%u: value = %02x, last_duty = %02x, program=%04x,%04x,%04x\n",
+ pwm->hwpwm, value, last, program[0], program[1],
+ program[2]);
+ stmpe_pwm->last_duty = value;
+ }
+
+ /*
+ * We can write programs of up to 64 16-bit words into this channel.
+ */
+ for (i = 0; i < ARRAY_SIZE(program); i++) {
+ u8 value;
+
+ value = (program[i] >> 8) & 0xff;
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing register %02x: %d\n",
+ offset, ret);
+ return ret;
+ }
+
+ value = program[i] & 0xff;
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing register %02x: %d\n",
+ offset, ret);
+ return ret;
+ }
+ }
+
+ /* If we were enabled, re-enable this PWM */
+ if (pwm_is_enabled(pwm))
+ stmpe_24xx_pwm_enable(chip, pwm);
+
+ /* Sleep for 200ms so we're sure it will take effect */
+ msleep(200);
+
+ dev_dbg(chip->dev, "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
+
+ return 0;
+}
+
+static const struct pwm_ops stmpe_24xx_pwm_ops = {
+ .config = stmpe_24xx_pwm_config,
+ .enable = stmpe_24xx_pwm_enable,
+ .disable = stmpe_24xx_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int __init stmpe_pwm_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct stmpe_pwm *pwm;
+ int ret;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return -ENOMEM;
+
+ pwm->stmpe = stmpe;
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.base = -1;
+
+ if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
+ pwm->chip.ops = &stmpe_24xx_pwm_ops;
+ pwm->chip.npwm = 3;
+ } else {
+ if (stmpe->partnum == STMPE1601)
+ dev_err(&pdev->dev, "STMPE1601 not yet supported\n");
+ else
+ dev_err(&pdev->dev, "Unknown STMPE PWM\n");
+
+ return -ENODEV;
+ }
+
+ ret = stmpe_enable(stmpe, STMPE_BLOCK_PWM);
+ if (ret)
+ return ret;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret) {
+ stmpe_disable(stmpe, STMPE_BLOCK_PWM);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+
+ return 0;
+}
+
+static struct platform_driver stmpe_pwm_driver = {
+ .driver = {
+ .name = "stmpe-pwm",
+ },
+};
+builtin_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index d4de0607b502..e4647840cd6e 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -26,9 +26,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/reset.h>
#define PWM_ENABLE (1 << 31)
#define PWM_DUTY_WIDTH 8
@@ -36,15 +38,20 @@
#define PWM_SCALE_WIDTH 13
#define PWM_SCALE_SHIFT 0
-#define NUM_PWM 4
+struct tegra_pwm_soc {
+ unsigned int num_channels;
+};
struct tegra_pwm_chip {
- struct pwm_chip chip;
- struct device *dev;
+ struct pwm_chip chip;
+ struct device *dev;
+
+ struct clk *clk;
+ struct reset_control*rst;
- struct clk *clk;
+ void __iomem *regs;
- void __iomem *mmio_base;
+ const struct tegra_pwm_soc *soc;
};
static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
@@ -54,20 +61,20 @@ static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
static inline u32 pwm_readl(struct tegra_pwm_chip *chip, unsigned int num)
{
- return readl(chip->mmio_base + (num << 4));
+ return readl(chip->regs + (num << 4));
}
static inline void pwm_writel(struct tegra_pwm_chip *chip, unsigned int num,
unsigned long val)
{
- writel(val, chip->mmio_base + (num << 4));
+ writel(val, chip->regs + (num << 4));
}
static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- unsigned long long c;
+ unsigned long long c = duty_ns;
unsigned long rate, hz;
u32 val = 0;
int err;
@@ -77,7 +84,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* per (1 << PWM_DUTY_WIDTH) cycles and make sure to round to the
* nearest integer during division.
*/
- c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1) + period_ns / 2;
+ c *= (1 << PWM_DUTY_WIDTH);
+ c += period_ns / 2;
do_div(c, period_ns);
val = (u32)c << PWM_DUTY_SHIFT;
@@ -176,12 +184,13 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (!pwm)
return -ENOMEM;
+ pwm->soc = of_device_get_match_data(&pdev->dev);
pwm->dev = &pdev->dev;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(pwm->mmio_base))
- return PTR_ERR(pwm->mmio_base);
+ pwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pwm->regs))
+ return PTR_ERR(pwm->regs);
platform_set_drvdata(pdev, pwm);
@@ -189,14 +198,24 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
+ pwm->rst = devm_reset_control_get(&pdev->dev, "pwm");
+ if (IS_ERR(pwm->rst)) {
+ ret = PTR_ERR(pwm->rst);
+ dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
+ return ret;
+ }
+
+ reset_control_deassert(pwm->rst);
+
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &tegra_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = NUM_PWM;
+ pwm->chip.npwm = pwm->soc->num_channels;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ reset_control_assert(pwm->rst);
return ret;
}
@@ -206,12 +225,17 @@ static int tegra_pwm_probe(struct platform_device *pdev)
static int tegra_pwm_remove(struct platform_device *pdev)
{
struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
+ int err;
if (WARN_ON(!pc))
return -ENODEV;
- for (i = 0; i < NUM_PWM; i++) {
+ err = clk_prepare_enable(pc->clk);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
if (!pwm_is_enabled(pwm))
@@ -223,12 +247,23 @@ static int tegra_pwm_remove(struct platform_device *pdev)
clk_disable_unprepare(pc->clk);
}
+ reset_control_assert(pc->rst);
+ clk_disable_unprepare(pc->clk);
+
return pwmchip_remove(&pc->chip);
}
+static const struct tegra_pwm_soc tegra20_pwm_soc = {
+ .num_channels = 4,
+};
+
+static const struct tegra_pwm_soc tegra186_pwm_soc = {
+ .num_channels = 1,
+};
+
static const struct of_device_id tegra_pwm_of_match[] = {
- { .compatible = "nvidia,tegra20-pwm" },
- { .compatible = "nvidia,tegra30-pwm" },
+ { .compatible = "nvidia,tegra20-pwm", .data = &tegra20_pwm_soc },
+ { .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
{ }
};
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 616af764a276..6ec342dd3eea 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -27,8 +27,6 @@
#include <linux/pwm.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
/* ECAP registers and bits definitions */
#define CAP1 0x08
#define CAP2 0x0C
@@ -195,6 +193,7 @@ static const struct pwm_ops ecap_pwm_ops = {
};
static const struct of_device_id ecap_of_match[] = {
+ { .compatible = "ti,am3352-ecap" },
{ .compatible = "ti,am33xx-ecap" },
{},
};
@@ -202,11 +201,11 @@ MODULE_DEVICE_TABLE(of, ecap_of_match);
static int ecap_pwm_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
int ret;
struct resource *r;
struct clk *clk;
struct ecap_pwm_chip *pc;
- u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -214,6 +213,13 @@ static int ecap_pwm_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
+ if (of_device_is_compatible(np, "ti,am33xx-ecap")) {
+ dev_warn(&pdev->dev, "Binding is obsolete.\n");
+ clk = devm_clk_get(pdev->dev.parent, "fck");
+ }
+ }
+
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
@@ -243,40 +249,15 @@ static int ecap_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- status = pwmss_submodule_state_change(pdev->dev.parent,
- PWMSS_ECAPCLK_EN);
- if (!(status & PWMSS_ECAPCLK_EN_ACK)) {
- dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
- ret = -EINVAL;
- goto pwmss_clk_failure;
- }
-
- pm_runtime_put_sync(&pdev->dev);
platform_set_drvdata(pdev, pc);
return 0;
-
-pwmss_clk_failure:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pwmchip_remove(&pc->chip);
- return ret;
}
static int ecap_pwm_remove(struct platform_device *pdev)
{
struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
- pm_runtime_get_sync(&pdev->dev);
- /*
- * Due to hardware misbehaviour, acknowledge of the stop_req
- * is missing. Hence checking of the status bit skipped.
- */
- pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
- pm_runtime_put_sync(&pdev->dev);
-
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 6a41e66015b6..b5c6b0636893 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -27,8 +27,6 @@
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
/* EHRPWM registers and bits definitions */
/* Time base module registers */
@@ -426,6 +424,7 @@ static const struct pwm_ops ehrpwm_pwm_ops = {
};
static const struct of_device_id ehrpwm_of_match[] = {
+ { .compatible = "ti,am3352-ehrpwm" },
{ .compatible = "ti,am33xx-ehrpwm" },
{},
};
@@ -433,11 +432,11 @@ MODULE_DEVICE_TABLE(of, ehrpwm_of_match);
static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
int ret;
struct resource *r;
struct clk *clk;
struct ehrpwm_pwm_chip *pc;
- u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -445,6 +444,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
+ if (of_device_is_compatible(np, "ti,am33xx-ecap")) {
+ dev_warn(&pdev->dev, "Binding is obsolete.\n");
+ clk = devm_clk_get(pdev->dev.parent, "fck");
+ }
+ }
+
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
@@ -487,27 +493,9 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- status = pwmss_submodule_state_change(pdev->dev.parent,
- PWMSS_EPWMCLK_EN);
- if (!(status & PWMSS_EPWMCLK_EN_ACK)) {
- dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
- ret = -EINVAL;
- goto pwmss_clk_failure;
- }
-
- pm_runtime_put_sync(&pdev->dev);
platform_set_drvdata(pdev, pc);
return 0;
-
-pwmss_clk_failure:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pwmchip_remove(&pc->chip);
- clk_unprepare(pc->tbclk);
- return ret;
}
static int ehrpwm_pwm_remove(struct platform_device *pdev)
@@ -516,14 +504,6 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
clk_unprepare(pc->tbclk);
- pm_runtime_get_sync(&pdev->dev);
- /*
- * Due to hardware misbehaviour, acknowledge of the stop_req
- * is missing. Hence checking of the status bit skipped.
- */
- pwmss_submodule_state_change(pdev->dev.parent, PWMSS_EPWMCLK_STOP_REQ);
- pm_runtime_put_sync(&pdev->dev);
-
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 5cf65a15d021..829f4991c96f 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -22,32 +22,6 @@
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
-#define PWMSS_CLKCONFIG 0x8 /* Clock gating reg */
-#define PWMSS_CLKSTATUS 0xc /* Clock gating status reg */
-
-struct pwmss_info {
- void __iomem *mmio_base;
- struct mutex pwmss_lock;
- u16 pwmss_clkconfig;
-};
-
-u16 pwmss_submodule_state_change(struct device *dev, int set)
-{
- struct pwmss_info *info = dev_get_drvdata(dev);
- u16 val;
-
- mutex_lock(&info->pwmss_lock);
- val = readw(info->mmio_base + PWMSS_CLKCONFIG);
- val |= set;
- writew(val , info->mmio_base + PWMSS_CLKCONFIG);
- mutex_unlock(&info->pwmss_lock);
-
- return readw(info->mmio_base + PWMSS_CLKSTATUS);
-}
-EXPORT_SYMBOL(pwmss_submodule_state_change);
-
static const struct of_device_id pwmss_of_match[] = {
{ .compatible = "ti,am33xx-pwmss" },
{},
@@ -57,24 +31,10 @@ MODULE_DEVICE_TABLE(of, pwmss_of_match);
static int pwmss_probe(struct platform_device *pdev)
{
int ret;
- struct resource *r;
- struct pwmss_info *info;
struct device_node *node = pdev->dev.of_node;
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- mutex_init(&info->pwmss_lock);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(info->mmio_base))
- return PTR_ERR(info->mmio_base);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- platform_set_drvdata(pdev, info);
/* Populate all the child nodes here... */
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -86,30 +46,21 @@ static int pwmss_probe(struct platform_device *pdev)
static int pwmss_remove(struct platform_device *pdev)
{
- struct pwmss_info *info = platform_get_drvdata(pdev);
-
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- mutex_destroy(&info->pwmss_lock);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pwmss_suspend(struct device *dev)
{
- struct pwmss_info *info = dev_get_drvdata(dev);
-
- info->pwmss_clkconfig = readw(info->mmio_base + PWMSS_CLKCONFIG);
pm_runtime_put_sync(dev);
return 0;
}
static int pwmss_resume(struct device *dev)
{
- struct pwmss_info *info = dev_get_drvdata(dev);
-
pm_runtime_get_sync(dev);
- writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
return 0;
}
#endif
diff --git a/drivers/pwm/pwm-tipwmss.h b/drivers/pwm/pwm-tipwmss.h
deleted file mode 100644
index 10ad8040408b..000000000000
--- a/drivers/pwm/pwm-tipwmss.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * TI PWM Subsystem driver
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __TIPWMSS_H
-#define __TIPWMSS_H
-
-/* PWM substem clock gating */
-#define PWMSS_ECAPCLK_EN BIT(0)
-#define PWMSS_ECAPCLK_STOP_REQ BIT(1)
-#define PWMSS_EPWMCLK_EN BIT(8)
-#define PWMSS_EPWMCLK_STOP_REQ BIT(9)
-
-#define PWMSS_ECAPCLK_EN_ACK BIT(0)
-#define PWMSS_EPWMCLK_EN_ACK BIT(8)
-
-#ifdef CONFIG_PWM_TIPWMSS
-extern u16 pwmss_submodule_state_change(struct device *dev, int set);
-#else
-static inline u16 pwmss_submodule_state_change(struct device *dev, int set)
-{
- /* return success status value */
- return 0xFFFF;
-}
-#endif
-#endif /* __TIPWMSS_H */
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 01695d48dd54..18ed725594c3 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -208,16 +208,33 @@ static ssize_t polarity_store(struct device *child,
return ret ? : size;
}
+static ssize_t capture_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_capture result;
+ int ret;
+
+ ret = pwm_capture(pwm, &result, jiffies_to_msecs(HZ));
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
+}
+
static DEVICE_ATTR_RW(period);
static DEVICE_ATTR_RW(duty_cycle);
static DEVICE_ATTR_RW(enable);
static DEVICE_ATTR_RW(polarity);
+static DEVICE_ATTR_RO(capture);
static struct attribute *pwm_attrs[] = {
&dev_attr_period.attr,
&dev_attr_duty_cycle.attr,
&dev_attr_enable.attr,
&dev_attr_polarity.attr,
+ &dev_attr_capture.attr,
NULL
};
ATTRIBUTE_GROUPS(pwm);
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index b5a10d3c92c7..d6d2f20c4597 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -67,6 +67,15 @@ config RAPIDIO_ENUM_BASIC
endchoice
+config RAPIDIO_CHMAN
+ tristate "RapidIO Channelized Messaging driver"
+ depends on RAPIDIO
+ help
+ This option includes RapidIO channelized messaging driver which
+ provides socket-like interface to allow sharing of single RapidIO
+ messaging mailbox between multiple user-space applications.
+ See "Documentation/rapidio/rio_cm.txt" for driver description.
+
config RAPIDIO_MPORT_CDEV
tristate "RapidIO /dev mport device driver"
depends on RAPIDIO
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 6271ada6993f..74dcea45ad49 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_RAPIDIO) += rapidio.o
rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
+obj-$(CONFIG_RAPIDIO_CHMAN) += rio_cm.o
obj-$(CONFIG_RAPIDIO) += switches/
obj-$(CONFIG_RAPIDIO) += devices/
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index e165b7ce29d7..436dfe871d32 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1813,7 +1813,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = rval & 0xffff;
rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
- hopcount);
+ hopcount, &rdev->phys_rmap);
rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
hopcount, RIO_EFB_ERR_MGMNT);
@@ -2242,7 +2242,7 @@ static void mport_mm_open(struct vm_area_struct *vma)
{
struct rio_mport_mapping *map = vma->vm_private_data;
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ rmcd_debug(MMAP, "%pad", &map->phys_addr);
kref_get(&map->ref);
}
@@ -2250,7 +2250,7 @@ static void mport_mm_close(struct vm_area_struct *vma)
{
struct rio_mport_mapping *map = vma->vm_private_data;
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ rmcd_debug(MMAP, "%pad", &map->phys_addr);
mutex_lock(&map->md->buf_mutex);
kref_put(&map->ref, mport_release_mapping);
mutex_unlock(&map->md->buf_mutex);
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index b5b455614f8a..32f0f014a067 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,11 +37,20 @@
#include "tsi721.h"
#ifdef DEBUG
-u32 dbg_level = DBG_INIT | DBG_EXIT;
+u32 dbg_level;
module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
+static int pcie_mrrs = -1;
+module_param(pcie_mrrs, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
+
+static u8 mbox_sel = 0x0f;
+module_param(mbox_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(mbox_sel,
+ "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
@@ -1081,7 +1090,7 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
* from rstart to lstart.
*/
static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
- u64 rstart, u32 size, u32 flags)
+ u64 rstart, u64 size, u32 flags)
{
struct tsi721_device *priv = mport->priv;
int i, avail = -1;
@@ -1094,6 +1103,10 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
struct tsi721_ib_win_mapping *map = NULL;
int ret = -EBUSY;
+ /* Max IBW size supported by HW is 16GB */
+ if (size > 0x400000000UL)
+ return -EINVAL;
+
if (direct) {
/* Calculate minimal acceptable window size and base address */
@@ -1101,15 +1114,15 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
ibw_start = lstart & ~(ibw_size - 1);
tsi_debug(IBW, &priv->pdev->dev,
- "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx",
+ "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
rstart, &lstart, size, ibw_start);
while ((lstart + size) > (ibw_start + ibw_size)) {
ibw_size *= 2;
ibw_start = lstart & ~(ibw_size - 1);
- if (ibw_size > 0x80000000) { /* Limit max size to 2GB */
+ /* Check for crossing IBW max size 16GB */
+ if (ibw_size > 0x400000000UL)
return -EBUSY;
- }
}
loc_start = ibw_start;
@@ -1120,7 +1133,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
} else {
tsi_debug(IBW, &priv->pdev->dev,
- "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x",
+ "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
rstart, &lstart, size);
if (!is_power_of_2(size) || size < 0x1000 ||
@@ -1215,7 +1228,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
priv->ibwin_cnt--;
tsi_debug(IBW, &priv->pdev->dev,
- "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx",
+ "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
i, ibw_start, &loc_start, ibw_size);
return 0;
@@ -1237,7 +1250,7 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
int i;
tsi_debug(IBW, &priv->pdev->dev,
- "Unmap IBW mapped to PCIe_0x%pad", &lstart);
+ "Unmap IBW mapped to PCIe_%pad", &lstart);
/* Search for matching active inbound translation window */
for (i = 0; i < TSI721_IBWIN_NUM; i++) {
@@ -1877,6 +1890,11 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out;
}
+ if ((mbox_sel & (1 << mbox)) == 0) {
+ rc = -ENODEV;
+ goto out;
+ }
+
priv->omsg_ring[mbox].dev_id = dev_id;
priv->omsg_ring[mbox].size = entries;
priv->omsg_ring[mbox].sts_rdptr = 0;
@@ -2161,6 +2179,11 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
goto out;
}
+ if ((mbox_sel & (1 << mbox)) == 0) {
+ rc = -ENODEV;
+ goto out;
+ }
+
/* Initialize IB Messaging Ring */
priv->imsg_ring[mbox].dev_id = dev_id;
priv->imsg_ring[mbox].size = entries;
@@ -2532,11 +2555,11 @@ static int tsi721_query_mport(struct rio_mport *mport,
struct tsi721_device *priv = mport->priv;
u32 rval;
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
} else
attr->link_speed = RIO_LINK_DOWN;
@@ -2650,9 +2673,9 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
mport->ops = &tsi721_rio_ops;
mport->index = 0;
mport->sys_size = 0; /* small system */
- mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ mport->phys_rmap = 1;
mport->dev.parent = &pdev->dev;
mport->dev.release = tsi721_mport_release;
@@ -2840,6 +2863,16 @@ static int tsi721_probe(struct pci_dev *pdev,
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+ /* Override PCIe Maximum Read Request Size setting if requested */
+ if (pcie_mrrs >= 0) {
+ if (pcie_mrrs <= 5)
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
+ else
+ tsi_info(&pdev->dev,
+ "Invalid MRRS override value %d", pcie_mrrs);
+ }
+
/* Adjust PCIe completion timeout. */
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5456dbddc929..5941437cbdd1 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -661,7 +661,7 @@ enum dma_rtype {
*/
#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH
-#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
+#define TSI721_DMACH_MAINT 7 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 155cae1e62de..e2a418598129 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -36,18 +36,26 @@
#include "tsi721.h"
-#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
-
#ifdef CONFIG_PCI_MSI
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
#endif
static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
static unsigned int dma_desc_per_channel = 128;
-module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
+module_param(dma_desc_per_channel, uint, S_IRUGO);
MODULE_PARM_DESC(dma_desc_per_channel,
"Number of DMA descriptors per channel (default: 128)");
+static unsigned int dma_txqueue_sz = 16;
+module_param(dma_txqueue_sz, uint, S_IRUGO);
+MODULE_PARM_DESC(dma_txqueue_sz,
+ "DMA Transactions Queue Size (default: 16)");
+
+static u8 dma_sel = 0x7f;
+module_param(dma_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(dma_sel,
+ "DMA Channel Selection Mask (default: 0x7f = all)");
+
static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
{
return container_of(chan, struct tsi721_bdma_chan, dchan);
@@ -718,6 +726,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
cookie = dma_cookie_assign(txd);
desc->status = DMA_IN_PROGRESS;
list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ tsi721_advance_work(bdma_chan, NULL);
spin_unlock_bh(&bdma_chan->lock);
return cookie;
@@ -732,7 +741,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
if (bdma_chan->bd_base)
- return TSI721_DMA_TX_QUEUE_SZ;
+ return dma_txqueue_sz;
/* Initialize BDMA channel */
if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
@@ -742,7 +751,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
}
/* Allocate queue of transaction descriptors */
- desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
+ desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
GFP_ATOMIC);
if (!desc) {
tsi_err(&dchan->dev->device,
@@ -754,7 +763,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
bdma_chan->tx_desc = desc;
- for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
+ for (i = 0; i < dma_txqueue_sz; i++) {
dma_async_tx_descriptor_init(&desc[i].txd, dchan);
desc[i].txd.tx_submit = tsi721_tx_submit;
desc[i].txd.flags = DMA_CTRL_ACK;
@@ -766,7 +775,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
- return TSI721_DMA_TX_QUEUE_SZ;
+ return dma_txqueue_sz;
}
static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
@@ -962,7 +971,7 @@ void tsi721_dma_stop_all(struct tsi721_device *priv)
int i;
for (i = 0; i < TSI721_DMA_MAXCH; i++) {
- if (i != TSI721_DMACH_MAINT)
+ if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
tsi721_dma_stop(&priv->bdma[i]);
}
}
@@ -979,7 +988,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
for (i = 0; i < TSI721_DMA_MAXCH; i++) {
struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
- if (i == TSI721_DMACH_MAINT)
+ if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
continue;
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index a63a380809d1..23429bdaca84 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -49,15 +49,6 @@ struct rio_id_table {
static int next_destid = 0;
static int next_comptag = 1;
-static int rio_mport_phys_table[] = {
- RIO_EFB_PAR_EP_ID,
- RIO_EFB_PAR_EP_REC_ID,
- RIO_EFB_SER_EP_ID,
- RIO_EFB_SER_EP_REC_ID,
- -1,
-};
-
-
/**
* rio_destid_alloc - Allocate next available destID for given network
* @net: RIO network
@@ -380,10 +371,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = result & 0xffff;
rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
- hopcount);
+ hopcount, &rdev->phys_rmap);
+ pr_debug("RIO: %s Register Map %d device\n",
+ __func__, rdev->phys_rmap);
rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
hopcount, RIO_EFB_ERR_MGMNT);
+ if (!rdev->em_efptr)
+ rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
+ hopcount, RIO_EFB_ERR_MGMNT_HS);
}
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
@@ -445,7 +441,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0);
} else {
if (do_enum)
- /*Enable Input Output Port (transmitter reviever)*/
+ /*Enable Input Output Port (transmitter receiver)*/
rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
@@ -481,10 +477,8 @@ cleanup:
/**
* rio_sport_is_active- Tests if a switch port has an active connection.
- * @port: Master port to send transaction
- * @destid: Associated destination ID for switch
- * @hopcount: Hopcount to reach switch
- * @sport: Switch port number
+ * @rdev: RapidIO device object
+ * @sp: Switch port number
*
* Reads the port error status CSR for a particular switch port to
* determine if the port has an active link. Returns
@@ -492,31 +486,12 @@ cleanup:
* inactive.
*/
static int
-rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
+rio_sport_is_active(struct rio_dev *rdev, int sp)
{
u32 result = 0;
- u32 ext_ftr_ptr;
- ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
-
- while (ext_ftr_ptr) {
- rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr, &result);
- result = RIO_GET_BLOCK_ID(result);
- if ((result == RIO_EFB_SER_EP_FREE_ID) ||
- (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
- (result == RIO_EFB_SER_EP_FREC_ID))
- break;
-
- ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
- ext_ftr_ptr);
- }
-
- if (ext_ftr_ptr)
- rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr +
- RIO_PORT_N_ERR_STS_CSR(sport),
- &result);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp),
+ &result);
return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
@@ -655,9 +630,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
cur_destid = next_destid;
- if (rio_sport_is_active
- (port, RIO_ANY_DESTID(port->sys_size), hopcount,
- port_num)) {
+ if (rio_sport_is_active(rdev, port_num)) {
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
@@ -785,8 +758,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
continue;
- if (rio_sport_is_active
- (port, destid, hopcount, port_num)) {
+ if (rio_sport_is_active(rdev, port_num)) {
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
@@ -831,21 +803,11 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
static int rio_mport_is_active(struct rio_mport *port)
{
u32 result = 0;
- u32 ext_ftr_ptr;
- int *entry = rio_mport_phys_table;
-
- do {
- if ((ext_ftr_ptr =
- rio_mport_get_feature(port, 1, 0, 0, *entry)))
- break;
- } while (*++entry >= 0);
-
- if (ext_ftr_ptr)
- rio_local_read_config_32(port,
- ext_ftr_ptr +
- RIO_PORT_N_ERR_STS_CSR(port->index),
- &result);
+ rio_local_read_config_32(port,
+ port->phys_efptr +
+ RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap),
+ &result);
return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 0dcaa660cba1..37042858c2db 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -268,6 +268,12 @@ int rio_request_inb_mbox(struct rio_mport *mport,
mport->inb_msg[mbox].mcback = minb;
rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);
+ if (rc) {
+ mport->inb_msg[mbox].mcback = NULL;
+ mport->inb_msg[mbox].res = NULL;
+ release_resource(res);
+ kfree(res);
+ }
} else
rc = -ENOMEM;
@@ -285,13 +291,22 @@ int rio_request_inb_mbox(struct rio_mport *mport,
*/
int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
{
- if (mport->ops->close_inb_mbox) {
- mport->ops->close_inb_mbox(mport, mbox);
+ int rc;
- /* Release the mailbox resource */
- return release_resource(mport->inb_msg[mbox].res);
- } else
- return -ENOSYS;
+ if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res)
+ return -EINVAL;
+
+ mport->ops->close_inb_mbox(mport, mbox);
+ mport->inb_msg[mbox].mcback = NULL;
+
+ rc = release_resource(mport->inb_msg[mbox].res);
+ if (rc)
+ return rc;
+
+ kfree(mport->inb_msg[mbox].res);
+ mport->inb_msg[mbox].res = NULL;
+
+ return 0;
}
/**
@@ -336,6 +351,12 @@ int rio_request_outb_mbox(struct rio_mport *mport,
mport->outb_msg[mbox].mcback = moutb;
rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);
+ if (rc) {
+ mport->outb_msg[mbox].mcback = NULL;
+ mport->outb_msg[mbox].res = NULL;
+ release_resource(res);
+ kfree(res);
+ }
} else
rc = -ENOMEM;
@@ -353,13 +374,22 @@ int rio_request_outb_mbox(struct rio_mport *mport,
*/
int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
{
- if (mport->ops->close_outb_mbox) {
- mport->ops->close_outb_mbox(mport, mbox);
+ int rc;
- /* Release the mailbox resource */
- return release_resource(mport->outb_msg[mbox].res);
- } else
- return -ENOSYS;
+ if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res)
+ return -EINVAL;
+
+ mport->ops->close_outb_mbox(mport, mbox);
+ mport->outb_msg[mbox].mcback = NULL;
+
+ rc = release_resource(mport->outb_msg[mbox].res);
+ if (rc)
+ return rc;
+
+ kfree(mport->outb_msg[mbox].res);
+ mport->outb_msg[mbox].res = NULL;
+
+ return 0;
}
/**
@@ -756,10 +786,11 @@ EXPORT_SYMBOL_GPL(rio_unmap_outb_region);
* @local: Indicate a local master port or remote device access
* @destid: Destination ID of the device
* @hopcount: Number of switch hops to the device
+ * @rmap: pointer to location to store register map type info
*/
u32
rio_mport_get_physefb(struct rio_mport *port, int local,
- u16 destid, u8 hopcount)
+ u16 destid, u8 hopcount, u32 *rmap)
{
u32 ext_ftr_ptr;
u32 ftr_header;
@@ -777,14 +808,21 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
ftr_header = RIO_GET_BLOCK_ID(ftr_header);
switch (ftr_header) {
- case RIO_EFB_SER_EP_ID_V13P:
- case RIO_EFB_SER_EP_REC_ID_V13P:
- case RIO_EFB_SER_EP_FREE_ID_V13P:
case RIO_EFB_SER_EP_ID:
case RIO_EFB_SER_EP_REC_ID:
case RIO_EFB_SER_EP_FREE_ID:
- case RIO_EFB_SER_EP_FREC_ID:
+ case RIO_EFB_SER_EP_M1_ID:
+ case RIO_EFB_SER_EP_SW_M1_ID:
+ case RIO_EFB_SER_EPF_M1_ID:
+ case RIO_EFB_SER_EPF_SW_M1_ID:
+ *rmap = 1;
+ return ext_ftr_ptr;
+ case RIO_EFB_SER_EP_M2_ID:
+ case RIO_EFB_SER_EP_SW_M2_ID:
+ case RIO_EFB_SER_EPF_M2_ID:
+ case RIO_EFB_SER_EPF_SW_M2_ID:
+ *rmap = 2;
return ext_ftr_ptr;
default:
@@ -843,16 +881,16 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
u32 regval;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
- &regval);
+ RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+ &regval);
if (lock)
regval |= RIO_PORT_N_CTL_LOCKOUT;
else
regval &= ~RIO_PORT_N_CTL_LOCKOUT;
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
- regval);
+ RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+ regval);
return 0;
}
EXPORT_SYMBOL_GPL(rio_set_port_lockout);
@@ -876,6 +914,7 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
u32 regval;
u32 ext_ftr_ptr;
+ u32 rmap;
/*
* enable rx input tx output port
@@ -883,34 +922,29 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
"%d, port_num = %d)\n", local, destid, hopcount, port_num);
- ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+ ext_ftr_ptr = rio_mport_get_physefb(port, local, destid,
+ hopcount, &rmap);
if (local) {
- rio_local_read_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0),
+ rio_local_read_config_32(port,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap),
&regval);
} else {
if (rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+ &regval) < 0)
return -EIO;
}
- if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
- /* serial */
- regval = regval | RIO_PORT_N_CTL_EN_RX_SER
- | RIO_PORT_N_CTL_EN_TX_SER;
- } else {
- /* parallel */
- regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
- | RIO_PORT_N_CTL_EN_TX_PAR;
- }
+ regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX;
if (local) {
- rio_local_write_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0), regval);
+ rio_local_write_config_32(port,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval);
} else {
if (rio_mport_write_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+ regval) < 0)
return -EIO;
}
#endif
@@ -1012,14 +1046,14 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
/* Read from link maintenance response register
* to clear valid bit */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
&regval);
udelay(50);
}
/* Issue Input-status command */
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum),
RIO_MNT_REQ_CMD_IS);
/* Exit if the response is not expected */
@@ -1030,7 +1064,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
while (checkcount--) {
udelay(50);
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
*lnkresp = regval;
@@ -1046,6 +1080,13 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
* @rdev: Pointer to RIO device control structure
* @pnum: Switch port number to clear errors
* @err_status: port error status (if 0 reads register from device)
+ *
+ * TODO: Currently this routine is not compatible with recovery process
+ * specified for idt_gen3 RapidIO switch devices. It has to be reviewed
+ * to implement universal recovery process that is compatible full range
+ * off available devices.
+ * IDT gen3 switch driver now implements HW-specific error handler that
+ * issues soft port reset to the port to reset ERR_STOP bits and ackIDs.
*/
static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
{
@@ -1055,10 +1096,10 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
if (err_status == 0)
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
&err_status);
- if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+ if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) {
pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
/*
* Send a Link-Request/Input-Status control symbol
@@ -1073,7 +1114,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
&regval);
pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
@@ -1091,43 +1132,43 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
* far inbound.
*/
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
(near_ackid << 24) |
(far_ackid << 8) | far_ackid);
/* Align far outstanding/outbound ackIDs with
* near inbound.
*/
far_ackid++;
- if (nextdev)
- rio_write_config_32(nextdev,
- nextdev->phys_efptr +
- RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
- (far_ackid << 24) |
- (near_ackid << 8) | near_ackid);
- else
- pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+ if (!nextdev) {
+ pr_debug("RIO_EM: nextdev pointer == NULL\n");
+ goto rd_err;
+ }
+
+ rio_write_config_32(nextdev,
+ RIO_DEV_PORT_N_ACK_STS_CSR(nextdev,
+ RIO_GET_PORT_NUM(nextdev->swpinfo)),
+ (far_ackid << 24) |
+ (near_ackid << 8) | near_ackid);
}
rd_err:
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
}
- if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+ if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) {
pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
rio_get_input_status(nextdev,
RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
udelay(50);
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
}
- return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+ return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0;
}
/**
@@ -1227,9 +1268,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle)
rdev->rswitch->ops->em_handle(rdev, portnum);
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+ &err_status);
pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
@@ -1246,8 +1286,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
* Depending on the link partner state, two attempts
* may be needed for successful recovery.
*/
- if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) {
if (rio_clr_err_stopped(rdev, portnum, err_status))
rio_clr_err_stopped(rdev, portnum, 0);
}
@@ -1257,10 +1297,18 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
rdev->rswitch->port_ok &= ~(1 << portnum);
rio_set_port_lockout(rdev, portnum, 1);
+ if (rdev->phys_rmap == 1) {
rio_write_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ACK_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum),
RIO_PORT_N_ACK_CLEAR);
+ } else {
+ rio_write_config_32(rdev,
+ RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum),
+ RIO_PORT_N_OB_ACK_CLEAR);
+ rio_write_config_32(rdev,
+ RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum),
+ 0);
+ }
/* Schedule Extraction Service */
pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
@@ -1289,9 +1337,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
}
/* Clear remaining error bits and Port-Write Pending bit */
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- err_status);
+ rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+ err_status);
return 0;
}
@@ -1342,20 +1389,7 @@ EXPORT_SYMBOL_GPL(rio_mport_get_efb);
* Tell if a device supports a given RapidIO capability.
* Returns the offset of the requested extended feature
* block within the device's RIO configuration space or
- * 0 in case the device does not support it. Possible
- * values for @ftr:
- *
- * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices
- *
- * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices
- *
- * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices
- *
- * %RIO_EFB_SER_EP_ID LP/Serial EP Devices
- *
- * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices
- *
- * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices
+ * 0 in case the device does not support it.
*/
u32
rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
@@ -1848,7 +1882,9 @@ EXPORT_SYMBOL_GPL(rio_release_dma);
* Initializes RapidIO capable DMA channel for the specified data transfer.
* Uses DMA channel private extension to pass information related to remote
* target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ * error-valued pointer or NULL if failed.
*/
struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
u16 destid, struct rio_dma_data *data,
@@ -1883,7 +1919,9 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
* Initializes RapidIO capable DMA channel for the specified data transfer.
* Uses DMA channel private extension to pass information related to remote
* target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ * error-valued pointer or NULL if failed.
*/
struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
struct dma_chan *dchan, struct rio_dma_data *data,
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 625d09add001..9796b3fee70d 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -22,7 +22,7 @@
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
u8 hopcount, int ftr);
extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
- u16 destid, u8 hopcount);
+ u16 destid, u8 hopcount, u32 *rmap);
extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
new file mode 100644
index 000000000000..cecc15a880de
--- /dev/null
+++ b/drivers/rapidio/rio_cm.c
@@ -0,0 +1,2366 @@
+/*
+ * rio_cm - RapidIO Channelized Messaging Driver
+ *
+ * Copyright 2013-2016 Integrated Device Technology, Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, RapidIO Trade Association
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
+ * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE
+ * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/rio_cm_cdev.h>
+
+#define DRV_NAME "rio_cm"
+#define DRV_VERSION "1.0.0"
+#define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
+#define DRV_DESC "RapidIO Channelized Messaging Driver"
+#define DEV_NAME "rio_cm"
+
+/* Debug output filtering masks */
+enum {
+ DBG_NONE = 0,
+ DBG_INIT = BIT(0), /* driver init */
+ DBG_EXIT = BIT(1), /* driver exit */
+ DBG_MPORT = BIT(2), /* mport add/remove */
+ DBG_RDEV = BIT(3), /* RapidIO device add/remove */
+ DBG_CHOP = BIT(4), /* channel operations */
+ DBG_WAIT = BIT(5), /* waiting for events */
+ DBG_TX = BIT(6), /* message TX */
+ DBG_TX_EVENT = BIT(7), /* message TX event */
+ DBG_RX_DATA = BIT(8), /* inbound data messages */
+ DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
+ DBG_ALL = ~0,
+};
+
+#ifdef DEBUG
+#define riocm_debug(level, fmt, arg...) \
+ do { \
+ if (DBG_##level & dbg_level) \
+ pr_debug(DRV_NAME ": %s " fmt "\n", \
+ __func__, ##arg); \
+ } while (0)
+#else
+#define riocm_debug(level, fmt, arg...) \
+ no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
+#endif
+
+#define riocm_warn(fmt, arg...) \
+ pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
+
+#define riocm_error(fmt, arg...) \
+ pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
+
+
+static int cmbox = 1;
+module_param(cmbox, int, S_IRUGO);
+MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
+
+static int chstart = 256;
+module_param(chstart, int, S_IRUGO);
+MODULE_PARM_DESC(chstart,
+ "Start channel number for dynamic allocation (default 256)");
+
+#ifdef DEBUG
+static u32 dbg_level = DBG_NONE;
+module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define RIOCM_TX_RING_SIZE 128
+#define RIOCM_RX_RING_SIZE 128
+#define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
+
+#define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
+#define RIOCM_CHNUM_AUTO 0
+#define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
+
+enum rio_cm_state {
+ RIO_CM_IDLE,
+ RIO_CM_CONNECT,
+ RIO_CM_CONNECTED,
+ RIO_CM_DISCONNECT,
+ RIO_CM_CHAN_BOUND,
+ RIO_CM_LISTEN,
+ RIO_CM_DESTROYING,
+};
+
+enum rio_cm_pkt_type {
+ RIO_CM_SYS = 0xaa,
+ RIO_CM_CHAN = 0x55,
+};
+
+enum rio_cm_chop {
+ CM_CONN_REQ,
+ CM_CONN_ACK,
+ CM_CONN_CLOSE,
+ CM_DATA_MSG,
+};
+
+struct rio_ch_base_bhdr {
+ u32 src_id;
+ u32 dst_id;
+#define RIO_HDR_LETTER_MASK 0xffff0000
+#define RIO_HDR_MBOX_MASK 0x0000ffff
+ u8 src_mbox;
+ u8 dst_mbox;
+ u8 type;
+} __attribute__((__packed__));
+
+struct rio_ch_chan_hdr {
+ struct rio_ch_base_bhdr bhdr;
+ u8 ch_op;
+ u16 dst_ch;
+ u16 src_ch;
+ u16 msg_len;
+ u16 rsrvd;
+} __attribute__((__packed__));
+
+struct tx_req {
+ struct list_head node;
+ struct rio_dev *rdev;
+ void *buffer;
+ size_t len;
+};
+
+struct cm_dev {
+ struct list_head list;
+ struct rio_mport *mport;
+ void *rx_buf[RIOCM_RX_RING_SIZE];
+ int rx_slots;
+ struct mutex rx_lock;
+
+ void *tx_buf[RIOCM_TX_RING_SIZE];
+ int tx_slot;
+ int tx_cnt;
+ int tx_ack_slot;
+ struct list_head tx_reqs;
+ spinlock_t tx_lock;
+
+ struct list_head peers;
+ u32 npeers;
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+};
+
+struct chan_rx_ring {
+ void *buf[RIOCM_RX_RING_SIZE];
+ int head;
+ int tail;
+ int count;
+
+ /* Tracking RX buffers reported to upper level */
+ void *inuse[RIOCM_RX_RING_SIZE];
+ int inuse_cnt;
+};
+
+struct rio_channel {
+ u16 id; /* local channel ID */
+ struct kref ref; /* channel refcount */
+ struct file *filp;
+ struct cm_dev *cmdev; /* associated CM device object */
+ struct rio_dev *rdev; /* remote RapidIO device */
+ enum rio_cm_state state;
+ int error;
+ spinlock_t lock;
+ void *context;
+ u32 loc_destid; /* local destID */
+ u32 rem_destid; /* remote destID */
+ u16 rem_channel; /* remote channel ID */
+ struct list_head accept_queue;
+ struct list_head ch_node;
+ struct completion comp;
+ struct completion comp_close;
+ struct chan_rx_ring rx_ring;
+};
+
+struct cm_peer {
+ struct list_head node;
+ struct rio_dev *rdev;
+};
+
+struct rio_cm_work {
+ struct work_struct work;
+ struct cm_dev *cm;
+ void *data;
+};
+
+struct conn_req {
+ struct list_head node;
+ u32 destid; /* requester destID */
+ u16 chan; /* requester channel ID */
+ struct cm_dev *cmdev;
+};
+
+/*
+ * A channel_dev structure represents a CM_CDEV
+ * @cdev Character device
+ * @dev Associated device object
+ */
+struct channel_dev {
+ struct cdev cdev;
+ struct device *dev;
+};
+
+static struct rio_channel *riocm_ch_alloc(u16 ch_num);
+static void riocm_ch_free(struct kref *ref);
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len);
+static int riocm_ch_close(struct rio_channel *ch);
+
+static DEFINE_SPINLOCK(idr_lock);
+static DEFINE_IDR(ch_idr);
+
+static LIST_HEAD(cm_dev_list);
+static DECLARE_RWSEM(rdev_sem);
+
+static struct class *dev_class;
+static unsigned int dev_major;
+static unsigned int dev_minor_base;
+static dev_t dev_number;
+static struct channel_dev riocm_cdev;
+
+#define is_msg_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+ (dst_ops & RIO_DST_OPS_DATA_MSG))
+#define dev_cm_capable(dev) \
+ is_msg_capable(dev->src_ops, dev->dst_ops)
+
+static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
+{
+ int ret;
+
+ spin_lock_bh(&ch->lock);
+ ret = (ch->state == cmp);
+ spin_unlock_bh(&ch->lock);
+ return ret;
+}
+
+static int riocm_cmp_exch(struct rio_channel *ch,
+ enum rio_cm_state cmp, enum rio_cm_state exch)
+{
+ int ret;
+
+ spin_lock_bh(&ch->lock);
+ ret = (ch->state == cmp);
+ if (ret)
+ ch->state = exch;
+ spin_unlock_bh(&ch->lock);
+ return ret;
+}
+
+static enum rio_cm_state riocm_exch(struct rio_channel *ch,
+ enum rio_cm_state exch)
+{
+ enum rio_cm_state old;
+
+ spin_lock_bh(&ch->lock);
+ old = ch->state;
+ ch->state = exch;
+ spin_unlock_bh(&ch->lock);
+ return old;
+}
+
+static struct rio_channel *riocm_get_channel(u16 nr)
+{
+ struct rio_channel *ch;
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, nr);
+ if (ch)
+ kref_get(&ch->ref);
+ spin_unlock_bh(&idr_lock);
+ return ch;
+}
+
+static void riocm_put_channel(struct rio_channel *ch)
+{
+ kref_put(&ch->ref, riocm_ch_free);
+}
+
+static void *riocm_rx_get_msg(struct cm_dev *cm)
+{
+ void *msg;
+ int i;
+
+ msg = rio_get_inb_message(cm->mport, cmbox);
+ if (msg) {
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (cm->rx_buf[i] == msg) {
+ cm->rx_buf[i] = NULL;
+ cm->rx_slots++;
+ break;
+ }
+ }
+
+ if (i == RIOCM_RX_RING_SIZE)
+ riocm_warn("no record for buffer 0x%p", msg);
+ }
+
+ return msg;
+}
+
+/*
+ * riocm_rx_fill - fills a ring of receive buffers for given cm device
+ * @cm: cm_dev object
+ * @nent: max number of entries to fill
+ *
+ * Returns: none
+ */
+static void riocm_rx_fill(struct cm_dev *cm, int nent)
+{
+ int i;
+
+ if (cm->rx_slots == 0)
+ return;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
+ if (cm->rx_buf[i] == NULL) {
+ cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
+ if (cm->rx_buf[i] == NULL)
+ break;
+ rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
+ cm->rx_slots--;
+ nent--;
+ }
+ }
+}
+
+/*
+ * riocm_rx_free - frees all receive buffers associated with given cm device
+ * @cm: cm_dev object
+ *
+ * Returns: none
+ */
+static void riocm_rx_free(struct cm_dev *cm)
+{
+ int i;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (cm->rx_buf[i] != NULL) {
+ kfree(cm->rx_buf[i]);
+ cm->rx_buf[i] = NULL;
+ }
+ }
+}
+
+/*
+ * riocm_req_handler - connection request handler
+ * @cm: cm_dev object
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if channel is not in correct state,
+ * -ENODEV if cannot find a channel with specified ID,
+ * -ENOMEM if unable to allocate memory to store the request
+ */
+static int riocm_req_handler(struct cm_dev *cm, void *req_data)
+{
+ struct rio_channel *ch;
+ struct conn_req *req;
+ struct rio_ch_chan_hdr *hh = req_data;
+ u16 chnum;
+
+ chnum = ntohs(hh->dst_ch);
+
+ ch = riocm_get_channel(chnum);
+
+ if (!ch)
+ return -ENODEV;
+
+ if (ch->state != RIO_CM_LISTEN) {
+ riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
+ riocm_put_channel(ch);
+ return -EINVAL;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ riocm_put_channel(ch);
+ return -ENOMEM;
+ }
+
+ req->destid = ntohl(hh->bhdr.src_id);
+ req->chan = ntohs(hh->src_ch);
+ req->cmdev = cm;
+
+ spin_lock_bh(&ch->lock);
+ list_add_tail(&req->node, &ch->accept_queue);
+ spin_unlock_bh(&ch->lock);
+ complete(&ch->comp);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * riocm_resp_handler - response to connection request handler
+ * @resp_data: pointer to the response packet
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if channel is not in correct state,
+ * -ENODEV if cannot find a channel with specified ID,
+ */
+static int riocm_resp_handler(void *resp_data)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hh = resp_data;
+ u16 chnum;
+
+ chnum = ntohs(hh->dst_ch);
+ ch = riocm_get_channel(chnum);
+ if (!ch)
+ return -ENODEV;
+
+ if (ch->state != RIO_CM_CONNECT) {
+ riocm_put_channel(ch);
+ return -EINVAL;
+ }
+
+ riocm_exch(ch, RIO_CM_CONNECTED);
+ ch->rem_channel = ntohs(hh->src_ch);
+ complete(&ch->comp);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * riocm_close_handler - channel close request handler
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find a channel with specified ID,
+ * + error codes returned by riocm_ch_close.
+ */
+static int riocm_close_handler(void *data)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hh = data;
+ int ret;
+
+ riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
+ if (!ch) {
+ spin_unlock_bh(&idr_lock);
+ return -ENODEV;
+ }
+ idr_remove(&ch_idr, ch->id);
+ spin_unlock_bh(&idr_lock);
+
+ riocm_exch(ch, RIO_CM_DISCONNECT);
+
+ ret = riocm_ch_close(ch);
+ if (ret)
+ riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
+
+ return 0;
+}
+
+/*
+ * rio_cm_handler - function that services request (non-data) packets
+ * @cm: cm_dev object
+ * @data: pointer to the packet
+ */
+static void rio_cm_handler(struct cm_dev *cm, void *data)
+{
+ struct rio_ch_chan_hdr *hdr;
+
+ if (!rio_mport_is_running(cm->mport))
+ goto out;
+
+ hdr = data;
+
+ riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
+ hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
+
+ switch (hdr->ch_op) {
+ case CM_CONN_REQ:
+ riocm_req_handler(cm, data);
+ break;
+ case CM_CONN_ACK:
+ riocm_resp_handler(data);
+ break;
+ case CM_CONN_CLOSE:
+ riocm_close_handler(data);
+ break;
+ default:
+ riocm_error("Invalid packet header");
+ break;
+ }
+out:
+ kfree(data);
+}
+
+/*
+ * rio_rx_data_handler - received data packet handler
+ * @cm: cm_dev object
+ * @buf: data packet
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find a channel with specified ID,
+ * -EIO if channel is not in CONNECTED state,
+ * -ENOMEM if channel RX queue is full (packet discarded)
+ */
+static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
+{
+ struct rio_ch_chan_hdr *hdr;
+ struct rio_channel *ch;
+
+ hdr = buf;
+
+ riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
+
+ ch = riocm_get_channel(ntohs(hdr->dst_ch));
+ if (!ch) {
+ /* Discard data message for non-existing channel */
+ kfree(buf);
+ return -ENODEV;
+ }
+
+ /* Place pointer to the buffer into channel's RX queue */
+ spin_lock(&ch->lock);
+
+ if (ch->state != RIO_CM_CONNECTED) {
+ /* Channel is not ready to receive data, discard a packet */
+ riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
+ ch->id, ch->state);
+ spin_unlock(&ch->lock);
+ kfree(buf);
+ riocm_put_channel(ch);
+ return -EIO;
+ }
+
+ if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
+ /* If RX ring is full, discard a packet */
+ riocm_debug(RX_DATA, "ch=%d is full", ch->id);
+ spin_unlock(&ch->lock);
+ kfree(buf);
+ riocm_put_channel(ch);
+ return -ENOMEM;
+ }
+
+ ch->rx_ring.buf[ch->rx_ring.head] = buf;
+ ch->rx_ring.head++;
+ ch->rx_ring.count++;
+ ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
+
+ complete(&ch->comp);
+
+ spin_unlock(&ch->lock);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * rio_ibmsg_handler - inbound message packet handler
+ */
+static void rio_ibmsg_handler(struct work_struct *work)
+{
+ struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
+ void *data;
+ struct rio_ch_chan_hdr *hdr;
+
+ if (!rio_mport_is_running(cm->mport))
+ return;
+
+ while (1) {
+ mutex_lock(&cm->rx_lock);
+ data = riocm_rx_get_msg(cm);
+ if (data)
+ riocm_rx_fill(cm, 1);
+ mutex_unlock(&cm->rx_lock);
+
+ if (data == NULL)
+ break;
+
+ hdr = data;
+
+ if (hdr->bhdr.type != RIO_CM_CHAN) {
+ /* For now simply discard packets other than channel */
+ riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
+ hdr->bhdr.type);
+ kfree(data);
+ continue;
+ }
+
+ /* Process a channel message */
+ if (hdr->ch_op == CM_DATA_MSG)
+ rio_rx_data_handler(cm, data);
+ else
+ rio_cm_handler(cm, data);
+ }
+}
+
+static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
+ int mbox, int slot)
+{
+ struct cm_dev *cm = dev_id;
+
+ if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
+ queue_work(cm->rx_wq, &cm->rx_work);
+}
+
+/*
+ * rio_txcq_handler - TX completion handler
+ * @cm: cm_dev object
+ * @slot: TX queue slot
+ *
+ * TX completion handler also ensures that pending request packets are placed
+ * into transmit queue as soon as a free slot becomes available. This is done
+ * to give higher priority to request packets during high intensity data flow.
+ */
+static void rio_txcq_handler(struct cm_dev *cm, int slot)
+{
+ int ack_slot;
+
+ /* ATTN: Add TX completion notification if/when direct buffer
+ * transfer is implemented. At this moment only correct tracking
+ * of tx_count is important.
+ */
+ riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
+ cm->mport->id, slot, cm->tx_cnt);
+
+ spin_lock(&cm->tx_lock);
+ ack_slot = cm->tx_ack_slot;
+
+ if (ack_slot == slot)
+ riocm_debug(TX_EVENT, "slot == ack_slot");
+
+ while (cm->tx_cnt && ((ack_slot != slot) ||
+ (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
+
+ cm->tx_buf[ack_slot] = NULL;
+ ++ack_slot;
+ ack_slot &= (RIOCM_TX_RING_SIZE - 1);
+ cm->tx_cnt--;
+ }
+
+ if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
+ riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
+
+ WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
+
+ cm->tx_ack_slot = ack_slot;
+
+ /*
+ * If there are pending requests, insert them into transmit queue
+ */
+ if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
+ struct tx_req *req, *_req;
+ int rc;
+
+ list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
+ list_del(&req->node);
+ cm->tx_buf[cm->tx_slot] = req->buffer;
+ rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
+ req->buffer, req->len);
+ kfree(req->buffer);
+ kfree(req);
+
+ ++cm->tx_cnt;
+ ++cm->tx_slot;
+ cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+ if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
+ break;
+ }
+ }
+
+ spin_unlock(&cm->tx_lock);
+}
+
+static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
+ int mbox, int slot)
+{
+ struct cm_dev *cm = dev_id;
+
+ if (cm && rio_mport_is_running(cm->mport))
+ rio_txcq_handler(cm, slot);
+}
+
+static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len)
+{
+ unsigned long flags;
+ struct tx_req *treq;
+
+ treq = kzalloc(sizeof(*treq), GFP_KERNEL);
+ if (treq == NULL)
+ return -ENOMEM;
+
+ treq->rdev = rdev;
+ treq->buffer = buffer;
+ treq->len = len;
+
+ spin_lock_irqsave(&cm->tx_lock, flags);
+ list_add_tail(&treq->node, &cm->tx_reqs);
+ spin_unlock_irqrestore(&cm->tx_lock, flags);
+ return 0;
+}
+
+/*
+ * riocm_post_send - helper function that places packet into msg TX queue
+ * @cm: cm_dev object
+ * @rdev: target RapidIO device object (required by outbound msg interface)
+ * @buffer: pointer to a packet buffer to send
+ * @len: length of data to transfer
+ * @req: request priority flag
+ *
+ * Returns: 0 if success, or error code otherwise.
+ */
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm->tx_lock, flags);
+
+ if (cm->mport == NULL) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
+ riocm_debug(TX, "Tx Queue is full");
+ rc = -EBUSY;
+ goto err_out;
+ }
+
+ cm->tx_buf[cm->tx_slot] = buffer;
+ rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
+
+ riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
+ buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
+
+ ++cm->tx_cnt;
+ ++cm->tx_slot;
+ cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+
+err_out:
+ spin_unlock_irqrestore(&cm->tx_lock, flags);
+ return rc;
+}
+
+/*
+ * riocm_ch_send - sends a data packet to a remote device
+ * @ch_id: local channel ID
+ * @buf: pointer to a data buffer to send (including CM header)
+ * @len: length of data to transfer (including CM header)
+ *
+ * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if one or more input parameters is/are not valid,
+ * -ENODEV if cannot find a channel with specified ID,
+ * -EAGAIN if a channel is not in CONNECTED state,
+ * + error codes returned by HW send routine.
+ */
+static int riocm_ch_send(u16 ch_id, void *buf, int len)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch) {
+ riocm_error("%s(%d) ch_%d not found", current->comm,
+ task_pid_nr(current), ch_id);
+ return -ENODEV;
+ }
+
+ if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /*
+ * Fill buffer header section with corresponding channel data
+ */
+ hdr = buf;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_DATA_MSG;
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+ hdr->msg_len = htons((u16)len);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * HW-specific add_outb_message() routine copies TX data into its own
+ * internal transfer buffer (true for all RIONET compatible mport
+ * drivers). Must be reviewed if mport driver uses the buffer directly.
+ */
+
+ ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
+ if (ret)
+ riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
+err_out:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
+{
+ int i, ret = -EINVAL;
+
+ spin_lock_bh(&ch->lock);
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (ch->rx_ring.inuse[i] == buf) {
+ ch->rx_ring.inuse[i] = NULL;
+ ch->rx_ring.inuse_cnt--;
+ ret = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ch->lock);
+
+ if (!ret)
+ kfree(buf);
+
+ return ret;
+}
+
+/*
+ * riocm_ch_receive - fetch a data packet received for the specified channel
+ * @ch: local channel ID
+ * @buf: pointer to a packet buffer
+ * @timeout: timeout to wait for incoming packet (in jiffies)
+ *
+ * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
+ * -EAGAIN if a channel is not in CONNECTED state,
+ * -ENOMEM if in-use tracking queue is full,
+ * -ETIME if wait timeout expired,
+ * -EINTR if wait was interrupted.
+ */
+static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
+{
+ void *rxmsg = NULL;
+ int i, ret = 0;
+ long wret;
+
+ if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
+ /* If we do not have entries to track buffers given to upper
+ * layer, reject request.
+ */
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
+
+ riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
+
+ if (!wret)
+ ret = -ETIME;
+ else if (wret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
+
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ch->lock);
+
+ rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
+ ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
+ ch->rx_ring.count--;
+ ch->rx_ring.tail++;
+ ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
+ ret = -ENOMEM;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (ch->rx_ring.inuse[i] == NULL) {
+ ch->rx_ring.inuse[i] = rxmsg;
+ ch->rx_ring.inuse_cnt++;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* We have no entry to store pending message: drop it */
+ kfree(rxmsg);
+ rxmsg = NULL;
+ }
+
+ spin_unlock_bh(&ch->lock);
+out:
+ *buf = rxmsg;
+ return ret;
+}
+
+/*
+ * riocm_ch_connect - sends a connect request to a remote device
+ * @loc_ch: local channel ID
+ * @cm: CM device to send connect request
+ * @peer: target RapidIO device
+ * @rem_ch: remote channel ID
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if the channel is not in IDLE state,
+ * -EAGAIN if no connection request available immediately,
+ * -ETIME if ACK response timeout expired,
+ * -EINTR if wait for response was interrupted.
+ */
+static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
+ struct cm_peer *peer, u16 rem_ch)
+{
+ struct rio_channel *ch = NULL;
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+ long wret;
+
+ ch = riocm_get_channel(loc_ch);
+ if (!ch)
+ return -ENODEV;
+
+ if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
+ ret = -EINVAL;
+ goto conn_done;
+ }
+
+ ch->cmdev = cm;
+ ch->rdev = peer->rdev;
+ ch->context = NULL;
+ ch->loc_destid = cm->mport->host_deviceid;
+ ch->rem_channel = rem_ch;
+
+ /*
+ * Send connect request to the remote RapidIO device
+ */
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL) {
+ ret = -ENOMEM;
+ goto conn_done;
+ }
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(peer->rdev->destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_REQ;
+ hdr->dst_ch = htons(rem_ch);
+ hdr->src_ch = htons(loc_ch);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * HW-specific add_outb_message() routine copies TX data into its
+ * internal transfer buffer. Must be reviewed if mport driver uses
+ * this buffer directly.
+ */
+ ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
+
+ if (ret != -EBUSY) {
+ kfree(hdr);
+ } else {
+ ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
+ if (ret)
+ kfree(hdr);
+ }
+
+ if (ret) {
+ riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
+ goto conn_done;
+ }
+
+ /* Wait for connect response from the remote device */
+ wret = wait_for_completion_interruptible_timeout(&ch->comp,
+ RIOCM_CONNECT_TO * HZ);
+ riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+ if (!wret)
+ ret = -ETIME;
+ else if (wret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
+
+conn_done:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+static int riocm_send_ack(struct rio_channel *ch)
+{
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL)
+ return -ENOMEM;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_ACK;
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * add_outb_message() routine copies TX data into its internal transfer
+ * buffer. Review if switching to direct buffer version.
+ */
+ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+ if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
+ ch->rdev, hdr, sizeof(*hdr)))
+ return 0;
+ kfree(hdr);
+
+ if (ret)
+ riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
+ ch->id, rio_name(ch->rdev), ret);
+ return ret;
+}
+
+/*
+ * riocm_ch_accept - accept incoming connection request
+ * @ch_id: channel ID
+ * @new_ch_id: local mport device
+ * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
+ * request is not available).
+ *
+ * Returns: pointer to new channel struct if success, or error-valued pointer:
+ * -ENODEV - cannot find specified channel or mport,
+ * -EINVAL - the channel is not in IDLE state,
+ * -EAGAIN - no connection request available immediately (timeout=0),
+ * -ENOMEM - unable to allocate new channel,
+ * -ETIME - wait timeout expired,
+ * -EINTR - wait was interrupted.
+ */
+static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
+ long timeout)
+{
+ struct rio_channel *ch = NULL;
+ struct rio_channel *new_ch = NULL;
+ struct conn_req *req;
+ struct cm_peer *peer;
+ int found = 0;
+ int err = 0;
+ long wret;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch)
+ return ERR_PTR(-EINVAL);
+
+ if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /* Don't sleep if this is a non blocking call */
+ if (!timeout) {
+ if (!try_wait_for_completion(&ch->comp)) {
+ err = -EAGAIN;
+ goto err_put;
+ }
+ } else {
+ riocm_debug(WAIT, "on %d", ch->id);
+
+ wret = wait_for_completion_interruptible_timeout(&ch->comp,
+ timeout);
+ if (!wret) {
+ err = -ETIME;
+ goto err_put;
+ } else if (wret == -ERESTARTSYS) {
+ err = -EINTR;
+ goto err_put;
+ }
+ }
+
+ spin_lock_bh(&ch->lock);
+
+ if (ch->state != RIO_CM_LISTEN) {
+ err = -ECANCELED;
+ } else if (list_empty(&ch->accept_queue)) {
+ riocm_debug(WAIT, "on %d accept_queue is empty on completion",
+ ch->id);
+ err = -EIO;
+ }
+
+ spin_unlock_bh(&ch->lock);
+
+ if (err) {
+ riocm_debug(WAIT, "on %d returns %d", ch->id, err);
+ goto err_put;
+ }
+
+ /* Create new channel for this connection */
+ new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
+
+ if (IS_ERR(new_ch)) {
+ riocm_error("failed to get channel for new req (%ld)",
+ PTR_ERR(new_ch));
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ spin_lock_bh(&ch->lock);
+
+ req = list_first_entry(&ch->accept_queue, struct conn_req, node);
+ list_del(&req->node);
+ new_ch->cmdev = ch->cmdev;
+ new_ch->loc_destid = ch->loc_destid;
+ new_ch->rem_destid = req->destid;
+ new_ch->rem_channel = req->chan;
+
+ spin_unlock_bh(&ch->lock);
+ riocm_put_channel(ch);
+ kfree(req);
+
+ down_read(&rdev_sem);
+ /* Find requester's device object */
+ list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
+ if (peer->rdev->destid == new_ch->rem_destid) {
+ riocm_debug(RX_CMD, "found matching device(%s)",
+ rio_name(peer->rdev));
+ found = 1;
+ break;
+ }
+ }
+ up_read(&rdev_sem);
+
+ if (!found) {
+ /* If peer device object not found, simply ignore the request */
+ err = -ENODEV;
+ goto err_nodev;
+ }
+
+ new_ch->rdev = peer->rdev;
+ new_ch->state = RIO_CM_CONNECTED;
+ spin_lock_init(&new_ch->lock);
+
+ /* Acknowledge the connection request. */
+ riocm_send_ack(new_ch);
+
+ *new_ch_id = new_ch->id;
+ return new_ch;
+err_put:
+ riocm_put_channel(ch);
+err_nodev:
+ if (new_ch) {
+ spin_lock_bh(&idr_lock);
+ idr_remove(&ch_idr, new_ch->id);
+ spin_unlock_bh(&idr_lock);
+ riocm_put_channel(new_ch);
+ }
+ *new_ch_id = 0;
+ return ERR_PTR(err);
+}
+
+/*
+ * riocm_ch_listen - puts a channel into LISTEN state
+ * @ch_id: channel ID
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if the specified channel does not exists or
+ * is not in CHAN_BOUND state.
+ */
+static int riocm_ch_listen(u16 ch_id)
+{
+ struct rio_channel *ch = NULL;
+ int ret = 0;
+
+ riocm_debug(CHOP, "(ch_%d)", ch_id);
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
+ ret = -EINVAL;
+ riocm_put_channel(ch);
+ return ret;
+}
+
+/*
+ * riocm_ch_bind - associate a channel object and an mport device
+ * @ch_id: channel ID
+ * @mport_id: local mport device ID
+ * @context: pointer to the additional caller's context
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find specified mport,
+ * -EINVAL if the specified channel does not exist or
+ * is not in IDLE state.
+ */
+static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
+{
+ struct rio_channel *ch = NULL;
+ struct cm_dev *cm;
+ int rc = -ENODEV;
+
+ riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
+
+ /* Find matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if ((cm->mport->id == mport_id) &&
+ rio_mport_is_running(cm->mport)) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc)
+ goto exit;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&ch->lock);
+ if (ch->state != RIO_CM_IDLE) {
+ spin_unlock_bh(&ch->lock);
+ rc = -EINVAL;
+ goto err_put;
+ }
+
+ ch->cmdev = cm;
+ ch->loc_destid = cm->mport->host_deviceid;
+ ch->context = context;
+ ch->state = RIO_CM_CHAN_BOUND;
+ spin_unlock_bh(&ch->lock);
+err_put:
+ riocm_put_channel(ch);
+exit:
+ up_read(&rdev_sem);
+ return rc;
+}
+
+/*
+ * riocm_ch_alloc - channel object allocation helper routine
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Return value: pointer to newly created channel object,
+ * or error-valued pointer
+ */
+static struct rio_channel *riocm_ch_alloc(u16 ch_num)
+{
+ int id;
+ int start, end;
+ struct rio_channel *ch;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return ERR_PTR(-ENOMEM);
+
+ if (ch_num) {
+ /* If requested, try to obtain the specified channel ID */
+ start = ch_num;
+ end = ch_num + 1;
+ } else {
+ /* Obtain channel ID from the dynamic allocation range */
+ start = chstart;
+ end = RIOCM_MAX_CHNUM + 1;
+ }
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(&idr_lock);
+ id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
+ spin_unlock_bh(&idr_lock);
+ idr_preload_end();
+
+ if (id < 0) {
+ kfree(ch);
+ return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
+ }
+
+ ch->id = (u16)id;
+ ch->state = RIO_CM_IDLE;
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->accept_queue);
+ INIT_LIST_HEAD(&ch->ch_node);
+ init_completion(&ch->comp);
+ init_completion(&ch->comp_close);
+ kref_init(&ch->ref);
+ ch->rx_ring.head = 0;
+ ch->rx_ring.tail = 0;
+ ch->rx_ring.count = 0;
+ ch->rx_ring.inuse_cnt = 0;
+
+ return ch;
+}
+
+/*
+ * riocm_ch_create - creates a new channel object and allocates ID for it
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Allocates and initializes a new channel object. If the parameter ch_num > 0
+ * and is within the valid range, riocm_ch_create tries to allocate the
+ * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
+ * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
+ * Module parameter 'chstart' defines start of an ID range available for dynamic
+ * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
+ * Available channel numbers are limited by 16-bit size of channel numbers used
+ * in the packet header.
+ *
+ * Return value: PTR to rio_channel structure if successful (with channel number
+ * updated via pointer) or error-valued pointer if error.
+ */
+static struct rio_channel *riocm_ch_create(u16 *ch_num)
+{
+ struct rio_channel *ch = NULL;
+
+ ch = riocm_ch_alloc(*ch_num);
+
+ if (IS_ERR(ch))
+ riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
+ *ch_num, PTR_ERR(ch));
+ else
+ *ch_num = ch->id;
+
+ return ch;
+}
+
+/*
+ * riocm_ch_free - channel object release routine
+ * @ref: pointer to a channel's kref structure
+ */
+static void riocm_ch_free(struct kref *ref)
+{
+ struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
+ int i;
+
+ riocm_debug(CHOP, "(ch_%d)", ch->id);
+
+ if (ch->rx_ring.inuse_cnt) {
+ for (i = 0;
+ i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
+ if (ch->rx_ring.inuse[i] != NULL) {
+ kfree(ch->rx_ring.inuse[i]);
+ ch->rx_ring.inuse_cnt--;
+ }
+ }
+ }
+
+ if (ch->rx_ring.count)
+ for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
+ if (ch->rx_ring.buf[i] != NULL) {
+ kfree(ch->rx_ring.buf[i]);
+ ch->rx_ring.count--;
+ }
+ }
+
+ complete(&ch->comp_close);
+}
+
+static int riocm_send_close(struct rio_channel *ch)
+{
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ /*
+ * Send CH_CLOSE notification to the remote RapidIO device
+ */
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL)
+ return -ENOMEM;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_CLOSE;
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * add_outb_message() routine copies TX data into its internal transfer
+ * buffer. Needs to be reviewed if switched to direct buffer mode.
+ */
+ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+ if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
+ hdr, sizeof(*hdr)))
+ return 0;
+ kfree(hdr);
+
+ if (ret)
+ riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
+
+ return ret;
+}
+
+/*
+ * riocm_ch_close - closes a channel object with specified ID (by local request)
+ * @ch: channel to be closed
+ */
+static int riocm_ch_close(struct rio_channel *ch)
+{
+ unsigned long tmo = msecs_to_jiffies(3000);
+ enum rio_cm_state state;
+ long wret;
+ int ret = 0;
+
+ riocm_debug(CHOP, "ch_%d by %s(%d)",
+ ch->id, current->comm, task_pid_nr(current));
+
+ state = riocm_exch(ch, RIO_CM_DESTROYING);
+ if (state == RIO_CM_CONNECTED)
+ riocm_send_close(ch);
+
+ complete_all(&ch->comp);
+
+ riocm_put_channel(ch);
+ wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
+
+ riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+ if (wret == 0) {
+ /* Timeout on wait occurred */
+ riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
+ current->comm, task_pid_nr(current), ch->id);
+ ret = -ETIMEDOUT;
+ } else if (wret == -ERESTARTSYS) {
+ /* Wait_for_completion was interrupted by a signal */
+ riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
+ current->comm, task_pid_nr(current), ch->id);
+ ret = -EINTR;
+ }
+
+ if (!ret) {
+ riocm_debug(CHOP, "ch_%d resources released", ch->id);
+ kfree(ch);
+ } else {
+ riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
+ }
+
+ return ret;
+}
+
+/*
+ * riocm_cdev_open() - Open character device
+ */
+static int riocm_cdev_open(struct inode *inode, struct file *filp)
+{
+ riocm_debug(INIT, "by %s(%d) filp=%p ",
+ current->comm, task_pid_nr(current), filp);
+
+ if (list_empty(&cm_dev_list))
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * riocm_cdev_release() - Release character device
+ */
+static int riocm_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ LIST_HEAD(list);
+
+ riocm_debug(EXIT, "by %s(%d) filp=%p",
+ current->comm, task_pid_nr(current), filp);
+
+ /* Check if there are channels associated with this file descriptor */
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch && ch->filp == filp) {
+ riocm_debug(EXIT, "ch_%d not released by %s(%d)",
+ ch->id, current->comm,
+ task_pid_nr(current));
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * cm_ep_get_list_size() - Reports number of endpoints in the network
+ */
+static int cm_ep_get_list_size(void __user *arg)
+{
+ u32 __user *p = arg;
+ u32 mport_id;
+ u32 count = 0;
+ struct cm_dev *cm;
+
+ if (get_user(mport_id, p))
+ return -EFAULT;
+ if (mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ /* Find a matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport->id == mport_id) {
+ count = cm->npeers;
+ up_read(&rdev_sem);
+ if (copy_to_user(arg, &count, sizeof(u32)))
+ return -EFAULT;
+ return 0;
+ }
+ }
+ up_read(&rdev_sem);
+
+ return -ENODEV;
+}
+
+/*
+ * cm_ep_get_list() - Returns list of attached endpoints
+ */
+static int cm_ep_get_list(void __user *arg)
+{
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ u32 info[2];
+ void *buf;
+ u32 nent;
+ u32 *entry_ptr;
+ u32 i = 0;
+ int ret = 0;
+
+ if (copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+
+ if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
+ return -EINVAL;
+
+ /* Find a matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list)
+ if (cm->mport->id == (u8)info[1])
+ goto found;
+
+ up_read(&rdev_sem);
+ return -ENODEV;
+
+found:
+ nent = min(info[0], cm->npeers);
+ buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
+ if (!buf) {
+ up_read(&rdev_sem);
+ return -ENOMEM;
+ }
+
+ entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
+
+ list_for_each_entry(peer, &cm->peers, node) {
+ *entry_ptr = (u32)peer->rdev->destid;
+ entry_ptr++;
+ if (++i == nent)
+ break;
+ }
+ up_read(&rdev_sem);
+
+ ((u32 *)buf)[0] = i; /* report an updated number of entries */
+ ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
+ if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
+ ret = -EFAULT;
+
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_mport_get_list() - Returns list of available local mport devices
+ */
+static int cm_mport_get_list(void __user *arg)
+{
+ int ret = 0;
+ u32 entries;
+ void *buf;
+ struct cm_dev *cm;
+ u32 *entry_ptr;
+ int count = 0;
+
+ if (copy_from_user(&entries, arg, sizeof(entries)))
+ return -EFAULT;
+ if (entries == 0 || entries > RIO_MAX_MPORTS)
+ return -EINVAL;
+ buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Scan all registered cm_dev objects */
+ entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (count++ < entries) {
+ *entry_ptr = (cm->mport->id << 16) |
+ cm->mport->host_deviceid;
+ entry_ptr++;
+ }
+ }
+ up_read(&rdev_sem);
+
+ *((u32 *)buf) = count; /* report a real number of entries */
+ if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
+ ret = -EFAULT;
+
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_chan_create() - Create a message exchange channel
+ */
+static int cm_chan_create(struct file *filp, void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+ struct rio_channel *ch;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "ch_%d requested by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+ ch = riocm_ch_create(&ch_num);
+ if (IS_ERR(ch))
+ return PTR_ERR(ch);
+
+ ch->filp = filp;
+ riocm_debug(CHOP, "ch_%d created by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+ return put_user(ch_num, p);
+}
+
+/*
+ * cm_chan_close() - Close channel
+ * @filp: Pointer to file object
+ * @arg: Channel to close
+ */
+static int cm_chan_close(struct file *filp, void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+ struct rio_channel *ch;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "ch_%d by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, ch_num);
+ if (!ch) {
+ spin_unlock_bh(&idr_lock);
+ return 0;
+ }
+ if (ch->filp != filp) {
+ spin_unlock_bh(&idr_lock);
+ return -EINVAL;
+ }
+ idr_remove(&ch_idr, ch->id);
+ spin_unlock_bh(&idr_lock);
+
+ return riocm_ch_close(ch);
+}
+
+/*
+ * cm_chan_bind() - Bind channel
+ * @arg: Channel number
+ */
+static int cm_chan_bind(void __user *arg)
+{
+ struct rio_cm_channel chan;
+
+ if (copy_from_user(&chan, arg, sizeof(chan)))
+ return -EFAULT;
+ if (chan.mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ return riocm_ch_bind(chan.id, chan.mport_id, NULL);
+}
+
+/*
+ * cm_chan_listen() - Listen on channel
+ * @arg: Channel number
+ */
+static int cm_chan_listen(void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ return riocm_ch_listen(ch_num);
+}
+
+/*
+ * cm_chan_accept() - Accept incoming connection
+ * @filp: Pointer to file object
+ * @arg: Channel number
+ */
+static int cm_chan_accept(struct file *filp, void __user *arg)
+{
+ struct rio_cm_accept param;
+ long accept_to;
+ struct rio_channel *ch;
+
+ if (copy_from_user(&param, arg, sizeof(param)))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "on ch_%d by %s(%d)",
+ param.ch_num, current->comm, task_pid_nr(current));
+
+ accept_to = param.wait_to ?
+ msecs_to_jiffies(param.wait_to) : 0;
+
+ ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to);
+ if (IS_ERR(ch))
+ return PTR_ERR(ch);
+ ch->filp = filp;
+
+ riocm_debug(CHOP, "new ch_%d for %s(%d)",
+ ch->id, current->comm, task_pid_nr(current));
+
+ if (copy_to_user(arg, &param, sizeof(param)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * cm_chan_connect() - Connect on channel
+ * @arg: Channel information
+ */
+static int cm_chan_connect(void __user *arg)
+{
+ struct rio_cm_channel chan;
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ int ret = -ENODEV;
+
+ if (copy_from_user(&chan, arg, sizeof(chan)))
+ return -EFAULT;
+ if (chan.mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ down_read(&rdev_sem);
+
+ /* Find matching cm_dev object */
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport->id == chan.mport_id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ goto err_out;
+
+ if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Find corresponding RapidIO endpoint device object */
+ ret = -ENODEV;
+
+ list_for_each_entry(peer, &cm->peers, node) {
+ if (peer->rdev->destid == chan.remote_destid) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ goto err_out;
+
+ up_read(&rdev_sem);
+
+ return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
+err_out:
+ up_read(&rdev_sem);
+ return ret;
+}
+
+/*
+ * cm_chan_msg_send() - Send a message through channel
+ * @arg: Outbound message information
+ */
+static int cm_chan_msg_send(void __user *arg)
+{
+ struct rio_cm_msg msg;
+ void *buf;
+ int ret = 0;
+
+ if (copy_from_user(&msg, arg, sizeof(msg)))
+ return -EFAULT;
+ if (msg.size > RIO_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ buf = kmalloc(msg.size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = riocm_ch_send(msg.ch_num, buf, msg.size);
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_chan_msg_rcv() - Receive a message through channel
+ * @arg: Inbound message information
+ */
+static int cm_chan_msg_rcv(void __user *arg)
+{
+ struct rio_cm_msg msg;
+ struct rio_channel *ch;
+ void *buf;
+ long rxto;
+ int ret = 0, msg_size;
+
+ if (copy_from_user(&msg, arg, sizeof(msg)))
+ return -EFAULT;
+
+ if (msg.ch_num == 0 || msg.size == 0)
+ return -EINVAL;
+
+ ch = riocm_get_channel(msg.ch_num);
+ if (!ch)
+ return -ENODEV;
+
+ rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
+
+ ret = riocm_ch_receive(ch, &buf, rxto);
+ if (ret)
+ goto out;
+
+ msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
+
+ if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
+ ret = -EFAULT;
+
+ riocm_ch_free_rxbuf(ch, buf);
+out:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+/*
+ * riocm_cdev_ioctl() - IOCTL requests handler
+ */
+static long
+riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case RIO_CM_EP_GET_LIST_SIZE:
+ return cm_ep_get_list_size((void __user *)arg);
+ case RIO_CM_EP_GET_LIST:
+ return cm_ep_get_list((void __user *)arg);
+ case RIO_CM_CHAN_CREATE:
+ return cm_chan_create(filp, (void __user *)arg);
+ case RIO_CM_CHAN_CLOSE:
+ return cm_chan_close(filp, (void __user *)arg);
+ case RIO_CM_CHAN_BIND:
+ return cm_chan_bind((void __user *)arg);
+ case RIO_CM_CHAN_LISTEN:
+ return cm_chan_listen((void __user *)arg);
+ case RIO_CM_CHAN_ACCEPT:
+ return cm_chan_accept(filp, (void __user *)arg);
+ case RIO_CM_CHAN_CONNECT:
+ return cm_chan_connect((void __user *)arg);
+ case RIO_CM_CHAN_SEND:
+ return cm_chan_msg_send((void __user *)arg);
+ case RIO_CM_CHAN_RECEIVE:
+ return cm_chan_msg_rcv((void __user *)arg);
+ case RIO_CM_MPORT_GET_LIST:
+ return cm_mport_get_list((void __user *)arg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations riocm_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = riocm_cdev_open,
+ .release = riocm_cdev_release,
+ .unlocked_ioctl = riocm_cdev_ioctl,
+};
+
+/*
+ * riocm_add_dev - add new remote RapidIO device into channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Adds the specified RapidIO device (if applicable) into peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ struct cm_peer *peer;
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct cm_dev *cm;
+
+ /* Check if the remote device has capabilities required to support CM */
+ if (!dev_cm_capable(rdev))
+ return 0;
+
+ riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ /* Find a corresponding cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == rdev->net->hport)
+ goto found;
+ }
+
+ up_write(&rdev_sem);
+ kfree(peer);
+ return -ENODEV;
+
+found:
+ peer->rdev = rdev;
+ list_add_tail(&peer->node, &cm->peers);
+ cm->npeers++;
+
+ up_write(&rdev_sem);
+ return 0;
+}
+
+/*
+ * riocm_remove_dev - remove remote RapidIO device from channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Removes the specified RapidIO device (if applicable) from peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ bool found = false;
+ LIST_HEAD(list);
+
+ /* Check if the remote device has capabilities required to support CM */
+ if (!dev_cm_capable(rdev))
+ return;
+
+ riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+ /* Find matching cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == rdev->net->hport) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ up_write(&rdev_sem);
+ return;
+ }
+
+ /* Remove remote device from the list of peers */
+ found = false;
+ list_for_each_entry(peer, &cm->peers, node) {
+ if (peer->rdev == rdev) {
+ riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
+ found = true;
+ list_del(&peer->node);
+ cm->npeers--;
+ kfree(peer);
+ break;
+ }
+ }
+
+ up_write(&rdev_sem);
+
+ if (!found)
+ return;
+
+ /*
+ * Release channels associated with this peer
+ */
+
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch && ch->rdev == rdev) {
+ if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
+ riocm_exch(ch, RIO_CM_DISCONNECT);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+}
+
+/*
+ * riocm_cdev_add() - Create rio_cm char device
+ * @devno: device number assigned to device (MAJ + MIN)
+ */
+static int riocm_cdev_add(dev_t devno)
+{
+ int ret;
+
+ cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
+ riocm_cdev.cdev.owner = THIS_MODULE;
+ ret = cdev_add(&riocm_cdev.cdev, devno, 1);
+ if (ret < 0) {
+ riocm_error("Cannot register a device with error %d", ret);
+ return ret;
+ }
+
+ riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
+ if (IS_ERR(riocm_cdev.dev)) {
+ cdev_del(&riocm_cdev.cdev);
+ return PTR_ERR(riocm_cdev.dev);
+ }
+
+ riocm_debug(MPORT, "Added %s cdev(%d:%d)",
+ DEV_NAME, MAJOR(devno), MINOR(devno));
+
+ return 0;
+}
+
+/*
+ * riocm_add_mport - add new local mport device into channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * When a new mport device is added, CM immediately reserves inbound and
+ * outbound RapidIO mailboxes that will be used.
+ */
+static int riocm_add_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int rc;
+ int i;
+ struct cm_dev *cm;
+ struct rio_mport *mport = to_rio_mport(dev);
+
+ riocm_debug(MPORT, "add mport %s", mport->name);
+
+ cm = kzalloc(sizeof(*cm), GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
+
+ cm->mport = mport;
+
+ rc = rio_request_outb_mbox(mport, cm, cmbox,
+ RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
+ if (rc) {
+ riocm_error("failed to allocate OBMBOX_%d on %s",
+ cmbox, mport->name);
+ kfree(cm);
+ return -ENODEV;
+ }
+
+ rc = rio_request_inb_mbox(mport, cm, cmbox,
+ RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
+ if (rc) {
+ riocm_error("failed to allocate IBMBOX_%d on %s",
+ cmbox, mport->name);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate and register inbound messaging buffers to be ready
+ * to receive channel and system management requests
+ */
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
+ cm->rx_buf[i] = NULL;
+
+ cm->rx_slots = RIOCM_RX_RING_SIZE;
+ mutex_init(&cm->rx_lock);
+ riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
+
+ cm->tx_slot = 0;
+ cm->tx_cnt = 0;
+ cm->tx_ack_slot = 0;
+ spin_lock_init(&cm->tx_lock);
+
+ INIT_LIST_HEAD(&cm->peers);
+ cm->npeers = 0;
+ INIT_LIST_HEAD(&cm->tx_reqs);
+
+ down_write(&rdev_sem);
+ list_add_tail(&cm->list, &cm_dev_list);
+ up_write(&rdev_sem);
+
+ return 0;
+}
+
+/*
+ * riocm_remove_mport - remove local mport device from channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * Removes a local mport device from the list of registered devices that provide
+ * channel management services. Returns an error if the specified mport is not
+ * registered with the CM core.
+ */
+static void riocm_remove_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+ struct cm_dev *cm;
+ struct cm_peer *peer, *temp;
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ bool found = false;
+ LIST_HEAD(list);
+
+ riocm_debug(MPORT, "%s", mport->name);
+
+ /* Find a matching cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == mport) {
+ list_del(&cm->list);
+ found = true;
+ break;
+ }
+ }
+ up_write(&rdev_sem);
+ if (!found)
+ return;
+
+ flush_workqueue(cm->rx_wq);
+ destroy_workqueue(cm->rx_wq);
+
+ /* Release channels bound to this mport */
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch->cmdev == cm) {
+ riocm_debug(RDEV, "%s drop ch_%d",
+ mport->name, ch->id);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+
+ /* Remove and free peer entries */
+ if (!list_empty(&cm->peers))
+ riocm_debug(RDEV, "ATTN: peer list not empty");
+ list_for_each_entry_safe(peer, temp, &cm->peers, node) {
+ riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
+ list_del(&peer->node);
+ kfree(peer);
+ }
+
+ riocm_rx_free(cm);
+ kfree(cm);
+ riocm_debug(MPORT, "%s done", mport->name);
+}
+
+static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct rio_channel *ch;
+ unsigned int i;
+
+ riocm_debug(EXIT, ".");
+
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ riocm_debug(EXIT, "close ch %d", ch->id);
+ if (ch->state == RIO_CM_CONNECTED)
+ riocm_send_close(ch);
+ }
+ spin_unlock_bh(&idr_lock);
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * riocm_interface handles addition/removal of remote RapidIO devices
+ */
+static struct subsys_interface riocm_interface = {
+ .name = "rio_cm",
+ .subsys = &rio_bus_type,
+ .add_dev = riocm_add_dev,
+ .remove_dev = riocm_remove_dev,
+};
+
+/*
+ * rio_mport_interface handles addition/removal local mport devices
+ */
+static struct class_interface rio_mport_interface __refdata = {
+ .class = &rio_mport_class,
+ .add_dev = riocm_add_mport,
+ .remove_dev = riocm_remove_mport,
+};
+
+static struct notifier_block rio_cm_notifier = {
+ .notifier_call = rio_cm_shutdown,
+};
+
+static int __init riocm_init(void)
+{
+ int ret;
+
+ /* Create device class needed by udev */
+ dev_class = class_create(THIS_MODULE, DRV_NAME);
+ if (IS_ERR(dev_class)) {
+ riocm_error("Cannot create " DRV_NAME " class");
+ return PTR_ERR(dev_class);
+ }
+
+ ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
+ if (ret) {
+ class_destroy(dev_class);
+ return ret;
+ }
+
+ dev_major = MAJOR(dev_number);
+ dev_minor_base = MINOR(dev_number);
+ riocm_debug(INIT, "Registered class with %d major", dev_major);
+
+ /*
+ * Register as rapidio_port class interface to get notifications about
+ * mport additions and removals.
+ */
+ ret = class_interface_register(&rio_mport_interface);
+ if (ret) {
+ riocm_error("class_interface_register error: %d", ret);
+ goto err_reg;
+ }
+
+ /*
+ * Register as RapidIO bus interface to get notifications about
+ * addition/removal of remote RapidIO devices.
+ */
+ ret = subsys_interface_register(&riocm_interface);
+ if (ret) {
+ riocm_error("subsys_interface_register error: %d", ret);
+ goto err_cl;
+ }
+
+ ret = register_reboot_notifier(&rio_cm_notifier);
+ if (ret) {
+ riocm_error("failed to register reboot notifier (err=%d)", ret);
+ goto err_sif;
+ }
+
+ ret = riocm_cdev_add(dev_number);
+ if (ret) {
+ unregister_reboot_notifier(&rio_cm_notifier);
+ ret = -ENODEV;
+ goto err_sif;
+ }
+
+ return 0;
+err_sif:
+ subsys_interface_unregister(&riocm_interface);
+err_cl:
+ class_interface_unregister(&rio_mport_interface);
+err_reg:
+ unregister_chrdev_region(dev_number, 1);
+ class_destroy(dev_class);
+ return ret;
+}
+
+static void __exit riocm_exit(void)
+{
+ riocm_debug(EXIT, "enter");
+ unregister_reboot_notifier(&rio_cm_notifier);
+ subsys_interface_unregister(&riocm_interface);
+ class_interface_unregister(&rio_mport_interface);
+ idr_destroy(&ch_idr);
+
+ device_unregister(riocm_cdev.dev);
+ cdev_del(&(riocm_cdev.cdev));
+
+ class_destroy(dev_class);
+ unregister_chrdev_region(dev_number, 1);
+}
+
+late_initcall(riocm_init);
+module_exit(riocm_exit);
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 345841562f95..92767fd3b541 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -22,3 +22,9 @@ config RAPIDIO_CPS_GEN2
default n
---help---
Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
+config RAPIDIO_RXS_GEN3
+ tristate "IDT RXS Gen.3 SRIO switch support"
+ default n
+ ---help---
+ Includes support for ITD RXS Gen.3 serial RapidIO switches.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 051cc6b38188..6bdd54c4e733 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
+obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 9f7fe21580bb..e67b923b1ca6 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -436,10 +436,11 @@ static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id)
RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
}
+ spin_unlock(&rdev->rswitch->lock);
+
/* Create device-specific sysfs attributes */
idtg2_sysfs(rdev, true);
- spin_unlock(&rdev->rswitch->lock);
return 0;
}
@@ -452,11 +453,9 @@ static void idtg2_remove(struct rio_dev *rdev)
return;
}
rdev->rswitch->ops = NULL;
-
+ spin_unlock(&rdev->rswitch->lock);
/* Remove device-specific sysfs attributes */
idtg2_sysfs(rdev, false);
-
- spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id idtg2_id_table[] = {
diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c
new file mode 100644
index 000000000000..c5923a547bed
--- /dev/null
+++ b/drivers/rapidio/switches/idt_gen3.c
@@ -0,0 +1,382 @@
+/*
+ * IDT RXS Gen.3 Serial RapidIO switch family support
+ *
+ * Copyright 2016 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+
+#include <asm/page.h>
+#include "../rio.h"
+
+#define RIO_EM_PW_STAT 0x40020
+#define RIO_PW_CTL 0x40204
+#define RIO_PW_CTL_PW_TMR 0xffffff00
+#define RIO_PW_ROUTE 0x40208
+
+#define RIO_EM_DEV_INT_EN 0x40030
+
+#define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100)
+#define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000
+
+#define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100)
+#define RIO_PLM_SPx_PW_EN_OK2U 0x40000000
+#define RIO_PLM_SPx_PW_EN_LINIT 0x10000000
+
+#define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4)
+#define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \
+ (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4)
+
+static int
+idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ u32 rval;
+ u32 entry = route_port;
+ int err = 0;
+
+ pr_debug("RIO: %s t=0x%x did_%x to p_%x\n",
+ __func__, table, route_destid, entry);
+
+ if (route_destid > 0xFF)
+ return -EINVAL;
+
+ if (route_port == RIO_INVALID_ROUTE)
+ entry = RIO_RT_ENTRY_DROP_PKT;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ /* Use broadcast register to update all per-port tables */
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid),
+ entry);
+ return err;
+ }
+
+ /*
+ * Verify that specified port/table number is valid
+ */
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+ entry);
+ return err;
+}
+
+static int
+idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 rval;
+ int err;
+
+ if (route_destid > 0xFF)
+ return -EINVAL;
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ /*
+ * This switch device does not have the dedicated global routing table.
+ * It is substituted by reading routing table of the ingress port of
+ * maintenance read requests.
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = RIO_GET_PORT_NUM(rval);
+ else if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+ &rval);
+ if (err)
+ return err;
+
+ if (rval == RIO_RT_ENTRY_DROP_PKT)
+ *route_port = RIO_INVALID_ROUTE;
+ else
+ *route_port = (u8)rval;
+
+ return 0;
+}
+
+static int
+idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 i;
+ u32 rval;
+ int err;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ for (i = 0; i <= 0xff; i++) {
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_BC_L2_Gn_ENTRYx_CSR(0, i),
+ RIO_RT_ENTRY_DROP_PKT);
+ if (err)
+ break;
+ }
+
+ return err;
+ }
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ for (i = 0; i <= 0xff; i++) {
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i),
+ RIO_RT_ENTRY_DROP_PKT);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * This routine performs device-specific initialization only.
+ * All standard EM configuration should be performed at upper level.
+ */
+static int
+idtg3_em_init(struct rio_dev *rdev)
+{
+ int i, tmp;
+ u32 rval;
+
+ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
+
+ /* Disable assertion of interrupt signal */
+ rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0);
+
+ /* Disable port-write event notifications during initialization */
+ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL,
+ RIO_EM_PW_TX_CTRL_PW_DIS);
+
+ /* Configure Port-Write notifications for hot-swap events */
+ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ for (i = 0; i < tmp; i++) {
+
+ rio_read_config_32(rdev,
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i),
+ &rval);
+ if (rval & RIO_PORT_N_ERR_STS_PORT_UA)
+ continue;
+
+ /* Clear events signaled before enabling notification */
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0);
+
+ /* Enable event notifications */
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i),
+ RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK);
+ /* Enable port-write generation on events */
+ rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i),
+ RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT);
+
+ }
+
+ /* Set Port-Write destination port */
+ tmp = RIO_GET_PORT_NUM(rdev->swpinfo);
+ rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp);
+
+
+ /* Enable sending port-write event notifications */
+ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+
+ /* set TVAL = ~50us */
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+ return 0;
+}
+
+
+/*
+ * idtg3_em_handler - device-specific error handler
+ *
+ * If the link is down (PORT_UNINIT) does nothing - this is considered
+ * as link partner removal from the port.
+ *
+ * If the link is up (PORT_OK) - situation is handled as *new* device insertion.
+ * In this case ERR_STOP bits are cleared by issuing soft reset command to the
+ * reporting port. Inbound and outbound ackIDs are cleared by the reset as well.
+ * This way the port is synchronized with freshly inserted device (assuming it
+ * was reset/powered-up on insertion).
+ *
+ * TODO: This is not sufficient in a situation when a link between two devices
+ * was down and up again (e.g. cable disconnect). For that situation full ackID
+ * realignment process has to be implemented.
+ */
+static int
+idtg3_em_handler(struct rio_dev *rdev, u8 pnum)
+{
+ u32 err_status;
+ u32 rval;
+
+ rio_read_config_32(rdev,
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
+
+ /* Do nothing for device/link removal */
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT)
+ return 0;
+
+ /* When link is OK we have a device insertion.
+ * Request port soft reset to clear errors if they present.
+ * Inbound and outbound ackIDs will be 0 after reset.
+ */
+ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) {
+ rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval);
+ rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum),
+ rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST);
+ udelay(10);
+ rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval);
+ msleep(500);
+ }
+
+ return 0;
+}
+
+static struct rio_switch_ops idtg3_switch_ops = {
+ .owner = THIS_MODULE,
+ .add_entry = idtg3_route_add_entry,
+ .get_entry = idtg3_route_get_entry,
+ .clr_table = idtg3_route_clr_table,
+ .em_init = idtg3_em_init,
+ .em_handle = idtg3_em_handler,
+};
+
+static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+
+ spin_lock(&rdev->rswitch->lock);
+
+ if (rdev->rswitch->ops) {
+ spin_unlock(&rdev->rswitch->lock);
+ return -EINVAL;
+ }
+
+ rdev->rswitch->ops = &idtg3_switch_ops;
+
+ if (rdev->do_enum) {
+ /* Disable hierarchical routing support: Existing fabric
+ * enumeration/discovery process (see rio-scan.c) uses 8-bit
+ * flat destination ID routing only.
+ */
+ rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0);
+ }
+
+ spin_unlock(&rdev->rswitch->lock);
+
+ return 0;
+}
+
+static void idtg3_remove(struct rio_dev *rdev)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ spin_lock(&rdev->rswitch->lock);
+ if (rdev->rswitch->ops == &idtg3_switch_ops)
+ rdev->rswitch->ops = NULL;
+ spin_unlock(&rdev->rswitch->lock);
+}
+
+/*
+ * Gen3 switches repeat sending PW messages until a corresponding event flag
+ * is cleared. Use shutdown notification to disable generation of port-write
+ * messages if their destination node is shut down.
+ */
+static void idtg3_shutdown(struct rio_dev *rdev)
+{
+ int i;
+ u32 rval;
+ u16 destid;
+
+ /* Currently the enumerator node acts also as PW handler */
+ if (!rdev->do_enum)
+ return;
+
+ pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev));
+
+ rio_read_config_32(rdev, RIO_PW_ROUTE, &rval);
+ i = RIO_GET_PORT_NUM(rdev->swpinfo);
+
+ /* Check port-write destination port */
+ if (!((1 << i) & rval))
+ return;
+
+ /* Disable sending port-write event notifications if PW destID
+ * matches to one of the enumerator node
+ */
+ rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval);
+
+ if (rval & RIO_EM_PW_TGT_DEVID_DEV16)
+ destid = rval >> 16;
+ else
+ destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16);
+
+ if (rdev->net->hport->host_deviceid == destid) {
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+ pr_debug("RIO: %s(%s) PW transmission disabled\n",
+ __func__, rio_name(rdev));
+ }
+}
+
+static struct rio_device_id idtg3_id_table[] = {
+ {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
+ {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
+ { 0, } /* terminate list */
+};
+
+static struct rio_driver idtg3_driver = {
+ .name = "idt_gen3",
+ .id_table = idtg3_id_table,
+ .probe = idtg3_probe,
+ .remove = idtg3_remove,
+ .shutdown = idtg3_shutdown,
+};
+
+static int __init idtg3_init(void)
+{
+ return rio_register_driver(&idtg3_driver);
+}
+
+static void __exit idtg3_exit(void)
+{
+ pr_debug("RIO: %s\n", __func__);
+ rio_unregister_driver(&idtg3_driver);
+ pr_debug("RIO: %s done\n", __func__);
+}
+
+device_initcall(idtg3_init);
+module_exit(idtg3_exit);
+
+MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver");
+MODULE_AUTHOR("Integrated Device Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 42c8b014fe15..2700d15f7584 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -175,12 +175,10 @@ tsi57x_em_init(struct rio_dev *rdev)
/* Clear all pending interrupts */
rio_read_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
&regval);
rio_write_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
regval & 0x07120214);
rio_read_config_32(rdev,
@@ -198,7 +196,7 @@ tsi57x_em_init(struct rio_dev *rdev)
/* Skip next (odd) port if the current port is in x4 mode */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
&regval);
if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
portnum++;
@@ -221,23 +219,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
u32 regval;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
&err_status);
if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
- (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES))) {
+ (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES))) {
/* Remove any queued packets by locking/unlocking port */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
&regval);
if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
regval | RIO_PORT_N_CTL_LOCKOUT);
udelay(50);
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
regval);
}
@@ -245,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
* valid bit
*/
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum),
&regval);
/* Send a Packet-Not-Accepted/Link-Request-Input-Status control
@@ -259,8 +257,8 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
while (checkcount--) {
udelay(50);
rio_read_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_MNT_RSP_CSR(portnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev,
+ portnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL)
goto exit_es;
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 84c1ee39ddae..2ca00045eb99 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -25,6 +25,456 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
+static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
+ /* SFH7741 proximity sensor */
+ REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
+ /* BH1780GLS ambient light sensor */
+ REGULATOR_SUPPLY("vcc", "2-0029"),
+ /* lsm303dlh accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0018"),
+ /* lsm303dlhc accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0019"),
+ /* lsm303dlh magnetometer */
+ REGULATOR_SUPPLY("vdd", "2-001e"),
+ /* Rohm BU21013 Touchscreen devices */
+ REGULATOR_SUPPLY("avdd", "3-005c"),
+ REGULATOR_SUPPLY("avdd", "3-005d"),
+ /* Synaptics RMI4 Touchscreen device */
+ REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "2-0068"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Pressure sensor device */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
+ /* On-board eMMC power */
+ REGULATOR_SUPPLY("vmmc", "sdi4"),
+ /* AB8500 audio codec */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ REGULATOR_SUPPLY("v-SD-STM", "stm"),
+ /* External MMC slot power */
+ REGULATOR_SUPPLY("vmmc", "sdi0"),
+};
+
+static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
+ /* TV-out DENC supply */
+ REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
+ /* Internal general-purpose ADC */
+ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
+};
+
+static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
+ /* AB8500 audio-codec main supply */
+ REGULATOR_SUPPLY("vaud", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
+ /* AB8500 audio-codec Mic1 supply */
+ REGULATOR_SUPPLY("vamic1", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
+ /* AB8500 audio-codec Mic2 supply */
+ REGULATOR_SUPPLY("vamic2", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
+ /* AB8500 audio-codec DMic supply */
+ REGULATOR_SUPPLY("vdmic", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
+ /* SoC core supply, no device */
+ REGULATOR_SUPPLY("v-intcore", NULL),
+ /* USB Transceiver */
+ REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
+ /* Handled by abx500 clk driver */
+ REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vana_consumers[] = {
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
+};
+
+/* ab8500 regulator register initialization */
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
+ /*
+ * VanaRequestCtrl = HP/LP depending on VxRequest
+ * VextSupply1RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
+ /*
+ * VextSupply2RequestCtrl = HP/LP depending on VxRequest
+ * VextSupply3RequestCtrl = HP/LP depending on VxRequest
+ * Vaux1RequestCtrl = HP/LP depending on VxRequest
+ * Vaux2RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
+ /*
+ * Vaux3RequestCtrl = HP/LP depending on VxRequest
+ * SwHPReq = Control through SWValid disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
+ /*
+ * VanaSysClkReq1HPValid = disabled
+ * Vaux1SysClkReq1HPValid = disabled
+ * Vaux2SysClkReq1HPValid = disabled
+ * Vaux3SysClkReq1HPValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1SysClkReq1HPValid = disabled
+ * VextSupply2SysClkReq1HPValid = disabled
+ * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
+ /*
+ * VanaHwHPReq1Valid = disabled
+ * Vaux1HwHPreq1Valid = disabled
+ * Vaux2HwHPReq1Valid = disabled
+ * Vaux3HwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1HwHPReq1Valid = disabled
+ * VextSupply2HwHPReq1Valid = disabled
+ * VextSupply3HwHPReq1Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
+ /*
+ * VanaHwHPReq2Valid = disabled
+ * Vaux1HwHPReq2Valid = disabled
+ * Vaux2HwHPReq2Valid = disabled
+ * Vaux3HwHPReq2Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1HwHPReq2Valid = disabled
+ * VextSupply2HwHPReq2Valid = disabled
+ * VextSupply3HwHPReq2Valid = HWReq2 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
+ /*
+ * VanaSwHPReqValid = disabled
+ * Vaux1SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
+ /*
+ * Vaux2SwHPReqValid = disabled
+ * Vaux3SwHPReqValid = disabled
+ * VextSupply1SwHPReqValid = disabled
+ * VextSupply2SwHPReqValid = disabled
+ * VextSupply3SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
+ /*
+ * SysClkReq2Valid1 = SysClkReq2 controlled
+ * SysClkReq3Valid1 = disabled
+ * SysClkReq4Valid1 = SysClkReq4 controlled
+ * SysClkReq5Valid1 = disabled
+ * SysClkReq6Valid1 = SysClkReq6 controlled
+ * SysClkReq7Valid1 = disabled
+ * SysClkReq8Valid1 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
+ /*
+ * SysClkReq2Valid2 = disabled
+ * SysClkReq3Valid2 = disabled
+ * SysClkReq4Valid2 = disabled
+ * SysClkReq5Valid2 = disabled
+ * SysClkReq6Valid2 = SysClkReq6 controlled
+ * SysClkReq7Valid2 = disabled
+ * SysClkReq8Valid2 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
+ /*
+ * VTVoutEna = disabled
+ * Vintcore12Ena = disabled
+ * Vintcore12Sel = 1.25 V
+ * Vintcore12LP = inactive (HP)
+ * VTVoutLP = inactive (HP)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
+ /*
+ * VaudioEna = disabled
+ * VdmicEna = disabled
+ * Vamic1Ena = disabled
+ * Vamic2Ena = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
+ /*
+ * Vamic1_dzout = high-Z when Vamic1 is disabled
+ * Vamic2_dzout = high-Z when Vamic2 is disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
+ /*
+ * VPll = Hw controlled (NOTE! PRCMU bits)
+ * VanaRegu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
+ /*
+ * VrefDDREna = disabled
+ * VrefDDRSleepMode = inactive (no pulldown)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
+ /*
+ * VextSupply1Regu = force LP
+ * VextSupply2Regu = force OFF
+ * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
+ * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
+ * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
+ */
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
+ /*
+ * Vaux1Regu = force HP
+ * Vaux2Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
+ /*
+ * Vaux3Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
+ /*
+ * Vaux1Sel = 2.8 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
+ /*
+ * Vaux2Sel = 2.9 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
+ /*
+ * Vaux3Sel = 2.91 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
+ /*
+ * VextSupply12LP = disabled (no LP)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
+ /*
+ * Vaux1Disch = short discharge time
+ * Vaux2Disch = short discharge time
+ * Vaux3Disch = short discharge time
+ * Vintcore12Disch = short discharge time
+ * VTVoutDisch = short discharge time
+ * VaudioDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
+ /*
+ * VanaDisch = short discharge time
+ * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
+ * VdmicDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
+};
+
+/* AB8500 regulators */
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+ /* supplies to the display/camera */
+ [AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-DISPLAY",
+ .min_uV = 2800000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1, /* display is on at boot */
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
+ .consumer_supplies = ab8500_vaux1_consumers,
+ },
+ /* supplies to the on-board eMMC */
+ [AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-eMMC1",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
+ .consumer_supplies = ab8500_vaux2_consumers,
+ },
+ /* supply for VAUX3, supplies to SDcard slots */
+ [AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-MMC-SD",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
+ .consumer_supplies = ab8500_vaux3_consumers,
+ },
+ /* supply for tvout, gpadc, TVOUT LDO */
+ [AB8500_LDO_TVOUT] = {
+ .constraints = {
+ .name = "V-TVOUT",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers),
+ .consumer_supplies = ab8500_vtvout_consumers,
+ },
+ /* supply for ab8500-vaudio, VAUDIO LDO */
+ [AB8500_LDO_AUDIO] = {
+ .constraints = {
+ .name = "V-AUD",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
+ .consumer_supplies = ab8500_vaud_consumers,
+ },
+ /* supply for v-anamic1 VAMic1-LDO */
+ [AB8500_LDO_ANAMIC1] = {
+ .constraints = {
+ .name = "V-AMIC1",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
+ },
+ /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+ [AB8500_LDO_ANAMIC2] = {
+ .constraints = {
+ .name = "V-AMIC2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
+ },
+ /* supply for v-dmic, VDMIC LDO */
+ [AB8500_LDO_DMIC] = {
+ .constraints = {
+ .name = "V-DMIC",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
+ .consumer_supplies = ab8500_vdmic_consumers,
+ },
+ /* supply for v-intcore12, VINTCORE12 LDO */
+ [AB8500_LDO_INTCORE] = {
+ .constraints = {
+ .name = "V-INTCORE",
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
+ .consumer_supplies = ab8500_vintcore_consumers,
+ },
+ /* supply for U8500 CSI-DSI, VANA LDO */
+ [AB8500_LDO_ANA] = {
+ .constraints = {
+ .name = "V-CSI-DSI",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
+ .consumer_supplies = ab8500_vana_consumers,
+ },
+};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS1_EXT_1V8 */
+ [AB8500_EXT_SUPPLY1] = {
+ .constraints = {
+ .name = "ab8500-ext-supply1",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .initial_mode = REGULATOR_MODE_IDLE,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ },
+ /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
+ [AB8500_EXT_SUPPLY2] = {
+ .constraints = {
+ .name = "ab8500-ext-supply2",
+ .min_uV = 1360000,
+ .max_uV = 1360000,
+ },
+ },
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
+};
+
/**
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
@@ -344,8 +794,7 @@ static struct of_regulator_match ab8500_ext_regulator_match[] = {
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *ppdata;
- struct ab8500_regulator_platform_data *pdata;
+ struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data;
struct device_node *np = pdev->dev.of_node;
struct regulator_config config = { };
int i, err;
@@ -366,18 +815,6 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- ppdata = dev_get_platdata(ab8500->dev);
- if (!ppdata) {
- dev_err(&pdev->dev, "null parent pdata\n");
- return -EINVAL;
- }
-
- pdata = ppdata->regulator;
- if (!pdata) {
- dev_err(&pdev->dev, "null pdata\n");
- return -EINVAL;
- }
-
/* make sure the platform data has the correct size */
if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 666bc3bb52ef..c24524242da2 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -22,6 +22,12 @@
#include <linux/pwm.h>
#include <linux/gpio/consumer.h>
+struct pwm_continuous_reg_data {
+ unsigned int min_uV_dutycycle;
+ unsigned int max_uV_dutycycle;
+ unsigned int dutycycle_unit;
+};
+
struct pwm_regulator_data {
/* Shared */
struct pwm_device *pwm;
@@ -29,6 +35,9 @@ struct pwm_regulator_data {
/* Voltage table */
struct pwm_voltages *duty_cycle_table;
+ /* Continuous mode info */
+ struct pwm_continuous_reg_data continuous;
+
/* regulator descriptor */
struct regulator_desc desc;
@@ -37,9 +46,6 @@ struct pwm_regulator_data {
int state;
- /* Continuous voltage */
- int volt_uV;
-
/* Enable GPIO */
struct gpio_desc *enb_gpio;
};
@@ -52,10 +58,31 @@ struct pwm_voltages {
/**
* Voltage table call-backs
*/
+static void pwm_regulator_init_state(struct regulator_dev *rdev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ struct pwm_state pwm_state;
+ unsigned int dutycycle;
+ int i;
+
+ pwm_get_state(drvdata->pwm, &pwm_state);
+ dutycycle = pwm_get_relative_duty_cycle(&pwm_state, 100);
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ if (dutycycle == drvdata->duty_cycle_table[i].dutycycle) {
+ drvdata->state = i;
+ return;
+ }
+ }
+}
+
static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ if (drvdata->state < 0)
+ pwm_regulator_init_state(rdev);
+
return drvdata->state;
}
@@ -63,16 +90,14 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
- struct pwm_args pargs;
- int dutycycle;
+ struct pwm_state pstate;
int ret;
- pwm_get_args(drvdata->pwm, &pargs);
-
- dutycycle = (pargs.period *
- drvdata->duty_cycle_table[selector].dutycycle) / 100;
+ pwm_init_state(drvdata->pwm, &pstate);
+ pwm_set_relative_duty_cycle(&pstate,
+ drvdata->duty_cycle_table[selector].dutycycle, 100);
- ret = pwm_config(drvdata->pwm, dutycycle, pargs.period);
+ ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -129,57 +154,90 @@ static int pwm_regulator_is_enabled(struct regulator_dev *dev)
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
+ unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
+ unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
+ int min_uV = rdev->constraints->min_uV;
+ int max_uV = rdev->constraints->max_uV;
+ int diff_uV = max_uV - min_uV;
+ struct pwm_state pstate;
+ unsigned int diff_duty;
+ unsigned int voltage;
+
+ pwm_get_state(drvdata->pwm, &pstate);
+
+ voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+ * This is happening when the user needs an inversed polarity, but the
+ * PWM device does not support inversing it in hardware.
+ */
+ if (max_uV_duty < min_uV_duty) {
+ voltage = min_uV_duty - voltage;
+ diff_duty = min_uV_duty - max_uV_duty;
+ } else {
+ voltage = voltage - min_uV_duty;
+ diff_duty = max_uV_duty - min_uV_duty;
+ }
- return drvdata->volt_uV;
+ voltage = DIV_ROUND_CLOSEST_ULL((u64)voltage * diff_uV, diff_duty);
+
+ return voltage + min_uV;
}
static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
+ int req_min_uV, int req_max_uV,
+ unsigned int *selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
+ unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
+ unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
unsigned int ramp_delay = rdev->constraints->ramp_delay;
- struct pwm_args pargs;
- unsigned int req_diff = min_uV - rdev->constraints->min_uV;
- unsigned int diff;
- unsigned int duty_pulse;
- u64 req_period;
- u32 rem;
+ int min_uV = rdev->constraints->min_uV;
+ int max_uV = rdev->constraints->max_uV;
+ int diff_uV = max_uV - min_uV;
+ struct pwm_state pstate;
int old_uV = pwm_regulator_get_voltage(rdev);
+ unsigned int diff_duty;
+ unsigned int dutycycle;
int ret;
- pwm_get_args(drvdata->pwm, &pargs);
- diff = rdev->constraints->max_uV - rdev->constraints->min_uV;
+ pwm_init_state(drvdata->pwm, &pstate);
- /* First try to find out if we get the iduty cycle time which is
- * factor of PWM period time. If (request_diff_to_min * pwm_period)
- * is perfect divided by voltage_range_diff then it is possible to
- * get duty cycle time which is factor of PWM period. This will help
- * to get output voltage nearer to requested value as there is no
- * calculation loss.
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+ * This is happening when the user needs an inversed polarity, but the
+ * PWM device does not support inversing it in hardware.
*/
- req_period = req_diff * pargs.period;
- div_u64_rem(req_period, diff, &rem);
- if (!rem) {
- do_div(req_period, diff);
- duty_pulse = (unsigned int)req_period;
- } else {
- duty_pulse = (pargs.period / 100) * ((req_diff * 100) / diff);
- }
+ if (max_uV_duty < min_uV_duty)
+ diff_duty = min_uV_duty - max_uV_duty;
+ else
+ diff_duty = max_uV_duty - min_uV_duty;
+
+ dutycycle = DIV_ROUND_CLOSEST_ULL((u64)(req_min_uV - min_uV) *
+ diff_duty,
+ diff_uV);
+
+ if (max_uV_duty < min_uV_duty)
+ dutycycle = min_uV_duty - dutycycle;
+ else
+ dutycycle = min_uV_duty + dutycycle;
+
+ pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
- ret = pwm_config(drvdata->pwm, duty_pulse, pargs.period);
+ ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
- drvdata->volt_uV = min_uV;
-
if ((ramp_delay == 0) || !pwm_regulator_is_enabled(rdev))
return 0;
/* Ramp delay is in uV/uS. Adjust to uS and delay */
- ramp_delay = DIV_ROUND_UP(abs(min_uV - old_uV), ramp_delay);
+ ramp_delay = DIV_ROUND_UP(abs(req_min_uV - old_uV), ramp_delay);
usleep_range(ramp_delay, ramp_delay + DIV_ROUND_UP(ramp_delay, 10));
return 0;
@@ -239,6 +297,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
return ret;
}
+ drvdata->state = -EINVAL;
drvdata->duty_cycle_table = duty_cycle_table;
memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
sizeof(drvdata->ops));
@@ -251,11 +310,28 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
static int pwm_regulator_init_continuous(struct platform_device *pdev,
struct pwm_regulator_data *drvdata)
{
+ u32 dutycycle_range[2] = { 0, 100 };
+ u32 dutycycle_unit = 100;
+
memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
sizeof(drvdata->ops));
drvdata->desc.ops = &drvdata->ops;
drvdata->desc.continuous_voltage_range = true;
+ of_property_read_u32_array(pdev->dev.of_node,
+ "pwm-dutycycle-range",
+ dutycycle_range, 2);
+ of_property_read_u32(pdev->dev.of_node, "pwm-dutycycle-unit",
+ &dutycycle_unit);
+
+ if (dutycycle_range[0] > dutycycle_unit ||
+ dutycycle_range[1] > dutycycle_unit)
+ return -EINVAL;
+
+ drvdata->continuous.dutycycle_unit = dutycycle_unit;
+ drvdata->continuous.min_uV_dutycycle = dutycycle_range[0];
+ drvdata->continuous.max_uV_dutycycle = dutycycle_range[1];
+
return 0;
}
@@ -316,11 +392,9 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return ret;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(drvdata->pwm);
+ ret = pwm_adjust_config(drvdata->pwm);
+ if (ret)
+ return ret;
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 24791886219a..2a1b2c7d8f2c 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -349,13 +349,12 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
void *ptr;
int ret;
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
- ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
if (!ptr) {
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
@@ -372,7 +371,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
else if (ret < 0)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
- dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);
+ dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 0b2733db0e9e..4be1b8c21f6f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -12,8 +12,22 @@ menuconfig RESET_CONTROLLER
If unsure, say no.
+if RESET_CONTROLLER
+
config RESET_OXNAS
bool
+config TI_SYSCON_RESET
+ tristate "TI SYSCON Reset Driver"
+ depends on HAS_IOMEM
+ select MFD_SYSCON
+ help
+ This enables the reset driver support for TI devices with
+ memory-mapped reset registers as part of a syscon device node. If
+ you wish to use the reset framework for such memory-mapped devices,
+ say Y here. Otherwise, say N.
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+
+endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index f173fc3847b4..5d65a93d3c43 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -3,9 +3,11 @@ obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_ARCH_MESON) += reset-meson.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 72b32bd15549..395dc9ce492e 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -93,6 +93,43 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev)
}
EXPORT_SYMBOL_GPL(reset_controller_unregister);
+static void devm_reset_controller_release(struct device *dev, void *res)
+{
+ reset_controller_unregister(*(struct reset_controller_dev **)res);
+}
+
+/**
+ * devm_reset_controller_register - resource managed reset_controller_register()
+ * @dev: device that is registering this reset controller
+ * @rcdev: a pointer to the initialized reset controller device
+ *
+ * Managed reset_controller_register(). For reset controllers registered by
+ * this function, reset_controller_unregister() is automatically called on
+ * driver detach. See reset_controller_register() for more information.
+ */
+int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ struct reset_controller_dev **rcdevp;
+ int ret;
+
+ rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
+ GFP_KERNEL);
+ if (!rcdevp)
+ return -ENOMEM;
+
+ ret = reset_controller_register(rcdev);
+ if (!ret) {
+ *rcdevp = rcdev;
+ devres_add(dev, rcdevp);
+ } else {
+ devres_free(rcdevp);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_reset_controller_register);
+
/**
* reset_control_reset - reset the controlled device
* @rstc: reset controller
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 8f55fd4a2630..35ce53edabf9 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -1,7 +1,8 @@
/*
* Hisilicon Hi6220 reset controller driver
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2015-2016 Hisilicon Limited.
*
* Author: Feng Chen <puck.chen@hisilicon.com>
*
@@ -15,81 +16,130 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
-#define ASSERT_OFFSET 0x300
-#define DEASSERT_OFFSET 0x304
-#define MAX_INDEX 0x509
+#define PERIPH_ASSERT_OFFSET 0x300
+#define PERIPH_DEASSERT_OFFSET 0x304
+#define PERIPH_MAX_INDEX 0x509
+
+#define SC_MEDIA_RSTEN 0x052C
+#define SC_MEDIA_RSTDIS 0x0530
+#define MEDIA_MAX_INDEX 8
#define to_reset_data(x) container_of(x, struct hi6220_reset_data, rc_dev)
+enum hi6220_reset_ctrl_type {
+ PERIPHERAL,
+ MEDIA,
+};
+
struct hi6220_reset_data {
- void __iomem *assert_base;
- void __iomem *deassert_base;
- struct reset_controller_dev rc_dev;
+ struct reset_controller_dev rc_dev;
+ struct regmap *regmap;
};
-static int hi6220_reset_assert(struct reset_controller_dev *rc_dev,
- unsigned long idx)
+static int hi6220_peripheral_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ u32 bank = idx >> 8;
+ u32 offset = idx & 0xff;
+ u32 reg = PERIPH_ASSERT_OFFSET + bank * 0x10;
- int bank = idx >> 8;
- int offset = idx & 0xff;
+ return regmap_write(regmap, reg, BIT(offset));
+}
- writel(BIT(offset), data->assert_base + (bank * 0x10));
+static int hi6220_peripheral_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ u32 bank = idx >> 8;
+ u32 offset = idx & 0xff;
+ u32 reg = PERIPH_DEASSERT_OFFSET + bank * 0x10;
- return 0;
+ return regmap_write(regmap, reg, BIT(offset));
}
-static int hi6220_reset_deassert(struct reset_controller_dev *rc_dev,
- unsigned long idx)
+static const struct reset_control_ops hi6220_peripheral_reset_ops = {
+ .assert = hi6220_peripheral_assert,
+ .deassert = hi6220_peripheral_deassert,
+};
+
+static int hi6220_media_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
- int bank = idx >> 8;
- int offset = idx & 0xff;
+ return regmap_write(regmap, SC_MEDIA_RSTEN, BIT(idx));
+}
- writel(BIT(offset), data->deassert_base + (bank * 0x10));
+static int hi6220_media_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
- return 0;
+ return regmap_write(regmap, SC_MEDIA_RSTDIS, BIT(idx));
}
-static const struct reset_control_ops hi6220_reset_ops = {
- .assert = hi6220_reset_assert,
- .deassert = hi6220_reset_deassert,
+static const struct reset_control_ops hi6220_media_reset_ops = {
+ .assert = hi6220_media_assert,
+ .deassert = hi6220_media_deassert,
};
static int hi6220_reset_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ enum hi6220_reset_ctrl_type type;
struct hi6220_reset_data *data;
- struct resource *res;
- void __iomem *src_base;
+ struct regmap *regmap;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- src_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(src_base))
- return PTR_ERR(src_base);
+ type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get reset controller regmap\n");
+ return PTR_ERR(regmap);
+ }
- data->assert_base = src_base + ASSERT_OFFSET;
- data->deassert_base = src_base + DEASSERT_OFFSET;
- data->rc_dev.nr_resets = MAX_INDEX;
- data->rc_dev.ops = &hi6220_reset_ops;
- data->rc_dev.of_node = pdev->dev.of_node;
+ data->regmap = regmap;
+ data->rc_dev.of_node = np;
+ if (type == MEDIA) {
+ data->rc_dev.ops = &hi6220_media_reset_ops;
+ data->rc_dev.nr_resets = MEDIA_MAX_INDEX;
+ } else {
+ data->rc_dev.ops = &hi6220_peripheral_reset_ops;
+ data->rc_dev.nr_resets = PERIPH_MAX_INDEX;
+ }
return reset_controller_register(&data->rc_dev);
}
static const struct of_device_id hi6220_reset_match[] = {
- { .compatible = "hisilicon,hi6220-sysctrl" },
- { },
+ {
+ .compatible = "hisilicon,hi6220-sysctrl",
+ .data = (void *)PERIPHERAL,
+ },
+ {
+ .compatible = "hisilicon,hi6220-mediactrl",
+ .data = (void *)MEDIA,
+ },
+ { /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, hi6220_reset_match);
static struct platform_driver hi6220_reset_driver = {
.probe = hi6220_reset_probe,
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index ccb940a8d9fb..16d410cd6146 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -112,7 +112,7 @@ static int ath79_reset_probe(struct platform_device *pdev)
ath79_reset->rcdev.of_reset_n_cells = 1;
ath79_reset->rcdev.nr_resets = 32;
- err = reset_controller_register(&ath79_reset->rcdev);
+ err = devm_reset_controller_register(&pdev->dev, &ath79_reset->rcdev);
if (err)
return err;
@@ -131,7 +131,6 @@ static int ath79_reset_remove(struct platform_device *pdev)
struct ath79_reset *ath79_reset = platform_get_drvdata(pdev);
unregister_restart_handler(&ath79_reset->restart_nb);
- reset_controller_unregister(&ath79_reset->rcdev);
return 0;
}
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
new file mode 100644
index 000000000000..c32f11a30c5f
--- /dev/null
+++ b/drivers/reset/reset-meson.c
@@ -0,0 +1,136 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define REG_COUNT 8
+#define BITS_PER_REG 32
+
+struct meson_reset {
+ void __iomem *reg_base;
+ struct reset_controller_dev rcdev;
+};
+
+static int meson_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct meson_reset *data =
+ container_of(rcdev, struct meson_reset, rcdev);
+ unsigned int bank = id / BITS_PER_REG;
+ unsigned int offset = id % BITS_PER_REG;
+ void __iomem *reg_addr = data->reg_base + (bank << 2);
+
+ if (bank >= REG_COUNT)
+ return -EINVAL;
+
+ writel(BIT(offset), reg_addr);
+
+ return 0;
+}
+
+static const struct reset_control_ops meson_reset_ops = {
+ .reset = meson_reset_reset,
+};
+
+static const struct of_device_id meson_reset_dt_ids[] = {
+ { .compatible = "amlogic,meson8b-reset", },
+ { .compatible = "amlogic,meson-gxbb-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
+
+static int meson_reset_probe(struct platform_device *pdev)
+{
+ struct meson_reset *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_base))
+ return PTR_ERR(data->reg_base);
+
+ platform_set_drvdata(pdev, data);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG;
+ data->rcdev.ops = &meson_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static struct platform_driver meson_reset_driver = {
+ .probe = meson_reset_probe,
+ .driver = {
+ .name = "meson_reset",
+ .of_match_table = meson_reset_dt_ids,
+ },
+};
+
+module_platform_driver(meson_reset_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index c60fb2dace3e..944980572f79 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -112,21 +112,11 @@ static int oxnas_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &oxnas_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int oxnas_reset_remove(struct platform_device *pdev)
-{
- struct oxnas_reset *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static struct platform_driver oxnas_reset_driver = {
.probe = oxnas_reset_probe,
- .remove = oxnas_reset_remove,
.driver = {
.name = "oxnas-reset",
.of_match_table = oxnas_reset_dt_ids,
diff --git a/drivers/reset/reset-pistachio.c b/drivers/reset/reset-pistachio.c
index 72a97a15a4c8..bbc4c06dd33b 100644
--- a/drivers/reset/reset-pistachio.c
+++ b/drivers/reset/reset-pistachio.c
@@ -121,16 +121,7 @@ static int pistachio_reset_probe(struct platform_device *pdev)
rd->rcdev.ops = &pistachio_reset_ops;
rd->rcdev.of_node = np;
- return reset_controller_register(&rd->rcdev);
-}
-
-static int pistachio_reset_remove(struct platform_device *pdev)
-{
- struct pistachio_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(dev, &rd->rcdev);
}
static const struct of_device_id pistachio_reset_dt_ids[] = {
@@ -141,7 +132,6 @@ MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids);
static struct platform_driver pistachio_reset_driver = {
.probe = pistachio_reset_probe,
- .remove = pistachio_reset_remove,
.driver = {
.name = "pistachio-reset",
.of_match_table = pistachio_reset_dt_ids,
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index cd05a7032b17..12add9b0fa49 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -134,16 +134,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &socfpga_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int socfpga_reset_remove(struct platform_device *pdev)
-{
- struct socfpga_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct of_device_id socfpga_reset_dt_ids[] = {
@@ -153,7 +144,6 @@ static const struct of_device_id socfpga_reset_dt_ids[] = {
static struct platform_driver socfpga_reset_driver = {
.probe = socfpga_reset_probe,
- .remove = socfpga_reset_remove,
.driver = {
.name = "socfpga-reset",
.of_match_table = socfpga_reset_dt_ids,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 677f86555212..3080190b3f90 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -165,21 +165,11 @@ static int sunxi_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &sunxi_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int sunxi_reset_remove(struct platform_device *pdev)
-{
- struct sunxi_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static struct platform_driver sunxi_reset_driver = {
.probe = sunxi_reset_probe,
- .remove = sunxi_reset_remove,
.driver = {
.name = "sunxi-reset",
.of_match_table = sunxi_reset_dt_ids,
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
new file mode 100644
index 000000000000..47f0ffd3b013
--- /dev/null
+++ b/drivers/reset/reset-ti-syscon.c
@@ -0,0 +1,237 @@
+/*
+ * TI SYSCON regmap reset driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ * Suman Anna <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/reset/ti-syscon.h>
+
+/**
+ * struct ti_syscon_reset_control - reset control structure
+ * @assert_offset: reset assert control register offset from syscon base
+ * @assert_bit: reset assert bit in the reset assert control register
+ * @deassert_offset: reset deassert control register offset from syscon base
+ * @deassert_bit: reset deassert bit in the reset deassert control register
+ * @status_offset: reset status register offset from syscon base
+ * @status_bit: reset status bit in the reset status register
+ * @flags: reset flag indicating how the (de)assert and status are handled
+ */
+struct ti_syscon_reset_control {
+ unsigned int assert_offset;
+ unsigned int assert_bit;
+ unsigned int deassert_offset;
+ unsigned int deassert_bit;
+ unsigned int status_offset;
+ unsigned int status_bit;
+ u32 flags;
+};
+
+/**
+ * struct ti_syscon_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @regmap: regmap handle containing the memory-mapped reset registers
+ * @controls: array of reset controls
+ * @nr_controls: number of controls in control array
+ */
+struct ti_syscon_reset_data {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ struct ti_syscon_reset_control *controls;
+ unsigned int nr_controls;
+};
+
+#define to_ti_syscon_reset_data(rcdev) \
+ container_of(rcdev, struct ti_syscon_reset_data, rcdev)
+
+/**
+ * ti_syscon_reset_assert() - assert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be asserted
+ *
+ * This function implements the reset driver op to assert a device's reset.
+ * This asserts the reset in a manner prescribed by the reset flags.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int mask, value;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & ASSERT_NONE)
+ return -ENOTSUPP; /* assert not supported for this reset */
+
+ mask = BIT(control->assert_bit);
+ value = (control->flags & ASSERT_SET) ? mask : 0x0;
+
+ return regmap_update_bits(data->regmap, control->assert_offset, mask, value);
+}
+
+/**
+ * ti_syscon_reset_deassert() - deassert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of reset to be deasserted
+ *
+ * This function implements the reset driver op to deassert a device's reset.
+ * This deasserts the reset in a manner prescribed by the reset flags.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int mask, value;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & DEASSERT_NONE)
+ return -ENOTSUPP; /* deassert not supported for this reset */
+
+ mask = BIT(control->deassert_bit);
+ value = (control->flags & DEASSERT_SET) ? mask : 0x0;
+
+ return regmap_update_bits(data->regmap, control->deassert_offset, mask, value);
+}
+
+/**
+ * ti_syscon_reset_status() - check device reset status
+ * @rcdev: reset controller entity
+ * @id: ID of the reset for which the status is being requested
+ *
+ * This function implements the reset driver op to return the status of a
+ * device's reset.
+ *
+ * Return: 0 if reset is deasserted, true if reset is asserted, else a
+ * corresponding error value
+ */
+static int ti_syscon_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int reset_state;
+ int ret;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & STATUS_NONE)
+ return -ENOTSUPP; /* status not supported for this reset */
+
+ ret = regmap_read(data->regmap, control->status_offset, &reset_state);
+ if (ret)
+ return ret;
+
+ return (reset_state & BIT(control->status_bit)) &&
+ (control->flags & STATUS_SET);
+}
+
+static struct reset_control_ops ti_syscon_reset_ops = {
+ .assert = ti_syscon_reset_assert,
+ .deassert = ti_syscon_reset_deassert,
+ .status = ti_syscon_reset_status,
+};
+
+static int ti_syscon_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ti_syscon_reset_data *data;
+ struct regmap *regmap;
+ const __be32 *list;
+ struct ti_syscon_reset_control *controls;
+ int size, nr_controls, i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ list = of_get_property(np, "ti,reset-bits", &size);
+ if (!list || (size / sizeof(*list)) % 7 != 0) {
+ dev_err(dev, "invalid DT reset description\n");
+ return -EINVAL;
+ }
+
+ nr_controls = (size / sizeof(*list)) / 7;
+ controls = devm_kzalloc(dev, nr_controls * sizeof(*controls), GFP_KERNEL);
+ if (!controls)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_controls; i++) {
+ controls[i].assert_offset = be32_to_cpup(list++);
+ controls[i].assert_bit = be32_to_cpup(list++);
+ controls[i].deassert_offset = be32_to_cpup(list++);
+ controls[i].deassert_bit = be32_to_cpup(list++);
+ controls[i].status_offset = be32_to_cpup(list++);
+ controls[i].status_bit = be32_to_cpup(list++);
+ controls[i].flags = be32_to_cpup(list++);
+ }
+
+ data->rcdev.ops = &ti_syscon_reset_ops;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.of_node = np;
+ data->rcdev.nr_resets = nr_controls;
+ data->regmap = regmap;
+ data->controls = controls;
+ data->nr_controls = nr_controls;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static const struct of_device_id ti_syscon_reset_of_match[] = {
+ { .compatible = "ti,syscon-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_syscon_reset_of_match);
+
+static struct platform_driver ti_syscon_reset_driver = {
+ .probe = ti_syscon_reset_probe,
+ .driver = {
+ .name = "ti-syscon-reset",
+ .of_match_table = ti_syscon_reset_of_match,
+ },
+};
+module_platform_driver(ti_syscon_reset_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("TI SYSCON Regmap Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index a7e87bc45885..138f2f205662 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -122,16 +122,7 @@ static int zynq_reset_probe(struct platform_device *pdev)
priv->rcdev.ops = &zynq_reset_ops;
priv->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&priv->rcdev);
-}
-
-static int zynq_reset_remove(struct platform_device *pdev)
-{
- struct zynq_reset_data *priv = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&priv->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id zynq_reset_dt_ids[] = {
@@ -141,7 +132,6 @@ static const struct of_device_id zynq_reset_dt_ids[] = {
static struct platform_driver zynq_reset_driver = {
.probe = zynq_reset_probe,
- .remove = zynq_reset_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = zynq_reset_dt_ids,
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index f8c15a37fb35..613178553612 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -2,7 +2,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
- select RESET_CONTROLLER
config STIH415_RESET
bool
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 18639e0cb6e2..e215f50794b6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -5,6 +5,10 @@
config RTC_LIB
bool
+config RTC_MC146818_LIB
+ bool
+ select RTC_LIB
+
menuconfig RTC_CLASS
bool "Real Time Clock"
default n
@@ -574,10 +578,10 @@ config RTC_DRV_EM3027
will be called rtc-em3027.
config RTC_DRV_RV8803
- tristate "Micro Crystal RV8803"
+ tristate "Micro Crystal RV8803, Epson RX8900"
help
- If you say yes here you get support for the Micro Crystal
- RV8803 RTC chips.
+ If you say yes here you get support for the Micro Crystal RV8803 and
+ Epson RX8900 RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rv8803.
@@ -670,6 +674,18 @@ config RTC_DRV_DS1390
This driver can also be built as a module. If so, the module
will be called rtc-ds1390.
+config RTC_DRV_MAX6916
+ tristate "Maxim MAX6916"
+ help
+ If you say yes here you will get support for the
+ Maxim MAX6916 SPI RTC chip.
+
+ This driver only supports the RTC feature, and not other chip
+ features such as alarms.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max6916.
+
config RTC_DRV_R9701
tristate "Epson RTC-9701JE"
help
@@ -795,8 +811,9 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ARM || M32R || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || PPC || MIPS || SPARC64 || MN10300
default y if X86
+ select RTC_MC146818_LIB
help
Say "yes" here to get direct support for the real time clock
found in every PC or ACPI-based system, and some other boards.
@@ -815,6 +832,7 @@ config RTC_DRV_CMOS
config RTC_DRV_ALPHA
bool "Alpha PC-style CMOS"
depends on ALPHA
+ select RTC_MC146818_LIB
default y
help
Direct support for the real-time clock found on every Alpha
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ea2833723fa9..7cf7ad559c79 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_RTC_LIB) += rtc-lib.o
obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
+obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o
rtc-core-y := class.o interface.o
ifdef CONFIG_RTC_DRV_EFI
@@ -85,6 +86,7 @@ obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MAX6916) += rtc-max6916.o
obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
obj-$(CONFIG_RTC_DRV_MAX8907) += rtc-max8907.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 9ef5f6f89f98..84a52db9b05f 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -104,7 +104,17 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *al
else if (!rtc->ops->read_alarm)
err = -EINVAL;
else {
- memset(alarm, 0, sizeof(struct rtc_wkalrm));
+ alarm->enabled = 0;
+ alarm->pending = 0;
+ alarm->time.tm_sec = -1;
+ alarm->time.tm_min = -1;
+ alarm->time.tm_hour = -1;
+ alarm->time.tm_mday = -1;
+ alarm->time.tm_mon = -1;
+ alarm->time.tm_year = -1;
+ alarm->time.tm_wday = -1;
+ alarm->time.tm_yday = -1;
+ alarm->time.tm_isdst = -1;
err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
}
@@ -383,7 +393,7 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
rtc->aie_timer.period = ktime_set(0, 0);
- /* Alarm has to be enabled & in the futrure for us to enqueue it */
+ /* Alarm has to be enabled & in the future for us to enqueue it */
if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 <
rtc->aie_timer.node.expires.tv64)) {
@@ -395,8 +405,6 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
-
-
int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
int err = mutex_lock_interruptible(&rtc->ops_lock);
@@ -748,9 +756,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
*/
static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ struct rtc_time tm;
+ ktime_t now;
+
timer->enabled = 1;
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+
+ /* Skip over expired timers */
+ while (next) {
+ if (next->expires.tv64 >= now.tv64)
+ break;
+ next = timerqueue_iterate_next(next);
+ }
+
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ if (!next) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index ba0d61934d35..fea9a60b06cf 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -643,17 +643,15 @@ static int abx80x_probe(struct i2c_client *client,
return err;
}
- err = devm_add_action(&client->dev, rtc_calib_remove_sysfs_group,
- &client->dev);
- if (err) {
- rtc_calib_remove_sysfs_group(&client->dev);
+ err = devm_add_action_or_reset(&client->dev,
+ rtc_calib_remove_sysfs_group,
+ &client->dev);
+ if (err)
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
- return err;
- }
- return 0;
+ return err;
}
static int abx80x_remove(struct i2c_client *client)
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 355fdb97a006..5219916ce11d 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -343,7 +343,6 @@ static struct platform_driver asm9260_rtc_driver = {
.remove = asm9260_rtc_remove,
.driver = {
.name = "asm9260-rtc",
- .owner = THIS_MODULE,
.of_match_table = asm9260_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 99732e6f8c3b..7418a763ce52 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -375,6 +375,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
+ spin_lock_init(&rtc->lock);
rtc->irq = irq;
/* platform setup code should have handled this; sigh */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index fbe9c72438e1..43745cac0141 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -43,7 +43,7 @@
#include <linux/of_platform.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
-#include <asm-generic/rtc.h>
+#include <linux/mc146818rtc.h>
struct cmos_rtc {
struct rtc_device *rtc;
@@ -190,10 +190,10 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
/* REVISIT: if the clock has a "century" register, use
- * that instead of the heuristic in get_rtc_time().
+ * that instead of the heuristic in mc146818_get_time().
* That'll make Y3K compatility (year > 2070) easy!
*/
- get_rtc_time(t);
+ mc146818_get_time(t);
return 0;
}
@@ -205,7 +205,7 @@ static int cmos_set_time(struct device *dev, struct rtc_time *t)
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
- return set_rtc_time(t);
+ return mc146818_set_time(t);
}
static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -220,8 +220,6 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
* Some also support day and month, for alarms up to a year in
* the future.
*/
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
spin_lock_irq(&rtc_lock);
t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
@@ -272,7 +270,6 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
}
}
}
- t->time.tm_year = -1;
t->enabled = !!(rtc_control & RTC_AIE);
t->pending = 0;
@@ -630,7 +627,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
address_space = 64;
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
|| defined(__sparc__) || defined(__mips__) \
- || defined(__powerpc__)
+ || defined(__powerpc__) || defined(CONFIG_MN10300)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -1142,14 +1139,14 @@ static __init void cmos_of_init(struct platform_device *pdev)
if (val)
CMOS_WRITE(be32_to_cpup(val), RTC_FREQ_SELECT);
- get_rtc_time(&time);
+ cmos_read_time(&pdev->dev, &time);
ret = rtc_valid_tm(&time);
if (ret) {
struct rtc_time def_time = {
.tm_year = 1,
.tm_mday = 1,
};
- set_rtc_time(&def_time);
+ cmos_set_time(&pdev->dev, &def_time);
}
}
#else
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index a20bcf0e33cd..4273377562ec 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -85,6 +85,7 @@ static int da9052_read_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
rtc_tm->tm_mday = v[0][2] & DA9052_RTC_DAY;
rtc_tm->tm_hour = v[0][1] & DA9052_RTC_HOUR;
rtc_tm->tm_min = v[0][0] & DA9052_RTC_MIN;
+ rtc_tm->tm_sec = 0;
ret = rtc_valid_tm(rtc_tm);
return ret;
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 7ec0872d5e3b..678af8648c45 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -74,6 +74,7 @@ static int da9055_read_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
rtc_tm->tm_mday = v[2] & DA9055_RTC_ALM_DAY;
rtc_tm->tm_hour = v[1] & DA9055_RTC_ALM_HOUR;
rtc_tm->tm_min = v[0] & DA9055_RTC_ALM_MIN;
+ rtc_tm->tm_sec = 0;
return rtc_valid_tm(rtc_tm);
}
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index c5432bf64e1c..dba60c1dfce2 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -388,6 +388,8 @@ static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
u8 day0, day1;
unsigned long flags;
+ alm->time.tm_sec = 0;
+
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 756e509f6ed2..ef75c349dff9 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -16,7 +16,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
-#include <linux/ds1286.h>
+#include <linux/rtc/ds1286.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 8e41c4613e51..72b22935eb62 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -313,13 +313,6 @@ static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->time.tm_sec = bcd2bin(buf[DS1305_SEC]);
alm->time.tm_min = bcd2bin(buf[DS1305_MIN]);
alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]);
- alm->time.tm_mday = -1;
- alm->time.tm_mon = -1;
- alm->time.tm_year = -1;
- /* next three fields are unused by Linux */
- alm->time.tm_wday = -1;
- alm->time.tm_mday = -1;
- alm->time.tm_isdst = -1;
return 0;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 821d9c089cdb..8e1c5cb6ece6 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -482,11 +482,6 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_min = bcd2bin(ds1307->regs[1] & 0x7f);
t->time.tm_hour = bcd2bin(ds1307->regs[2] & 0x3f);
t->time.tm_mday = bcd2bin(ds1307->regs[3] & 0x3f);
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
- t->time.tm_wday = -1;
- t->time.tm_yday = -1;
- t->time.tm_isdst = -1;
/* ... and status */
t->enabled = !!(ds1307->regs[7] & DS1337_BIT_A1IE);
@@ -602,6 +597,8 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
* Alarm support for mcp794xx devices.
*/
+#define MCP794XX_REG_WEEKDAY 0x3
+#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
@@ -1231,13 +1228,16 @@ static int ds1307_probe(struct i2c_client *client,
{
struct ds1307 *ds1307;
int err = -ENODEV;
- int tmp;
+ int tmp, wday;
struct chip_desc *chip = &chips[id->driver_data];
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
bool want_irq = false;
bool ds1307_can_wakeup_device = false;
unsigned char *buf;
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct rtc_time tm;
+ unsigned long timestamp;
+
irq_handler_t irq_handler = ds1307_irq;
static const int bbsqi_bitpos[] = {
@@ -1526,6 +1526,27 @@ read_rtc:
bin2bcd(tmp));
}
+ /*
+ * Some IPs have weekday reset value = 0x1 which might not correct
+ * hence compute the wday using the current date/month/year values
+ */
+ ds1307_get_time(&client->dev, &tm);
+ wday = tm.tm_wday;
+ timestamp = rtc_tm_to_time64(&tm);
+ rtc_time64_to_tm(timestamp, &tm);
+
+ /*
+ * Check if reset wday is different from the computed wday
+ * If different then set the wday which we computed using
+ * timestamp
+ */
+ if (wday != tm.tm_wday) {
+ wday = i2c_smbus_read_byte_data(client, MCP794XX_REG_WEEKDAY);
+ wday = wday & ~MCP794XX_REG_WEEKDAY_WDAY_MASK;
+ wday = wday | (tm.tm_wday + 1);
+ i2c_smbus_write_byte_data(client, MCP794XX_REG_WEEKDAY, wday);
+ }
+
if (want_irq) {
device_set_wakeup_capable(&client->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index 23fa9f0cb5e3..895fbeeb47fe 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -504,12 +504,6 @@ static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
- alarm->time.tm_mon = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_wday = -1;
- alarm->time.tm_yday = -1;
- alarm->time.tm_isdst = -1;
-
out:
mutex_unlock(&priv->mutex);
return res;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index b3ce3c652fcd..ed43b4311660 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -103,6 +103,26 @@ ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask)
}
/**
+ * s1685_rtc_check_mday - check validity of the day of month.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @mday: day of month.
+ *
+ * Returns -EDOM if the day of month is not within 1..31 range.
+ */
+static inline int
+ds1685_rtc_check_mday(struct ds1685_priv *rtc, u8 mday)
+{
+ if (rtc->bcd_mode) {
+ if (mday < 0x01 || mday > 0x31 || (mday & 0x0f) > 0x09)
+ return -EDOM;
+ } else {
+ if (mday < 1 || mday > 31)
+ return -EDOM;
+ }
+ return 0;
+}
+
+/**
* ds1685_rtc_switch_to_bank0 - switch the rtc to bank 0.
* @rtc: pointer to the ds1685 rtc structure.
*/
@@ -377,6 +397,7 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 seconds, minutes, hours, mday, ctrlb, ctrlc;
+ int ret;
/* Fetch the alarm info from the RTC alarm registers. */
ds1685_rtc_begin_data_access(rtc);
@@ -388,34 +409,29 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ctrlc = rtc->read(rtc, RTC_CTRL_C);
ds1685_rtc_end_data_access(rtc);
- /* Check month date. */
- if (!(mday >= 1) && (mday <= 31))
- return -EDOM;
+ /* Check the month date for validity. */
+ ret = ds1685_rtc_check_mday(rtc, mday);
+ if (ret)
+ return ret;
/*
* Check the three alarm bytes.
*
* The Linux RTC system doesn't support the "don't care" capability
* of this RTC chip. We check for it anyways in case support is
- * added in the future.
+ * added in the future and only assign when we care.
*/
- if (unlikely(seconds >= 0xc0))
- alrm->time.tm_sec = -1;
- else
+ if (likely(seconds < 0xc0))
alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
RTC_SECS_BCD_MASK,
RTC_SECS_BIN_MASK);
- if (unlikely(minutes >= 0xc0))
- alrm->time.tm_min = -1;
- else
+ if (likely(minutes < 0xc0))
alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
RTC_MINS_BCD_MASK,
RTC_MINS_BIN_MASK);
- if (unlikely(hours >= 0xc0))
- alrm->time.tm_hour = -1;
- else
+ if (likely(hours < 0xc0))
alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
RTC_HRS_24_BCD_MASK,
RTC_HRS_24_BIN_MASK);
@@ -423,11 +439,6 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Write the data to rtc_wkalrm. */
alrm->time.tm_mday = ds1685_rtc_bcd2bin(rtc, mday, RTC_MDAY_BCD_MASK,
RTC_MDAY_BIN_MASK);
- alrm->time.tm_mon = -1;
- alrm->time.tm_year = -1;
- alrm->time.tm_wday = -1;
- alrm->time.tm_yday = -1;
- alrm->time.tm_isdst = -1;
alrm->enabled = !!(ctrlb & RTC_CTRL_B_AIE);
alrm->pending = !!(ctrlc & RTC_CTRL_C_AF);
@@ -445,6 +456,7 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 ctrlb, seconds, minutes, hours, mday;
+ int ret;
/* Fetch the alarm info and convert to BCD. */
seconds = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_sec,
@@ -461,8 +473,9 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
RTC_MDAY_BCD_MASK);
/* Check the month date for validity. */
- if (!(mday >= 1) && (mday <= 31))
- return -EDOM;
+ ret = ds1685_rtc_check_mday(rtc, mday);
+ if (ret)
+ return ret;
/*
* Check the three alarm bytes.
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 16310fe79d76..9a1582ed7070 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -13,7 +13,7 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/rtc-ds2404.h>
+#include <linux/platform_data/rtc-ds2404.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 04fbd7fffd0d..b1f20d8c358f 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -197,12 +197,6 @@ static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
- alarm->time.tm_mon = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_wday = -1;
- alarm->time.tm_yday = -1;
- alarm->time.tm_isdst = -1;
-
alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
alarm->pending = !!(stat & DS3232_REG_SR_A1F);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 96d38609d803..0130afd7fe88 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -259,6 +259,12 @@ static const struct rtc_class_ops efi_rtc_ops = {
static int __init efi_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
+ efi_time_t eft;
+ efi_time_cap_t cap;
+
+ /* First check if the RTC is usable */
+ if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
+ return -ENODEV;
rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
THIS_MODULE);
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c
index d726c6aa96a8..1bf5d2347928 100644
--- a/drivers/rtc/rtc-generic.c
+++ b/drivers/rtc/rtc-generic.c
@@ -9,44 +9,10 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
-#if defined(CONFIG_M68K) || defined(CONFIG_PARISC) || \
- defined(CONFIG_PPC) || defined(CONFIG_SUPERH32)
-#include <asm/rtc.h>
-
-static int generic_get_time(struct device *dev, struct rtc_time *tm)
-{
- unsigned int ret = get_rtc_time(tm);
-
- if (ret & RTC_BATT_BAD)
- return -EOPNOTSUPP;
-
- return rtc_valid_tm(tm);
-}
-
-static int generic_set_time(struct device *dev, struct rtc_time *tm)
-{
- if (set_rtc_time(tm) < 0)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static const struct rtc_class_ops generic_rtc_ops = {
- .read_time = generic_get_time,
- .set_time = generic_set_time,
-};
-#else
-#define generic_rtc_ops *(struct rtc_class_ops*)NULL
-#endif
-
static int __init generic_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
- const struct rtc_class_ops *ops;
-
- ops = dev_get_platdata(&dev->dev);
- if (!ops)
- ops = &generic_rtc_ops;
+ const struct rtc_class_ops *ops = dev_get_platdata(&dev->dev);
rtc = devm_rtc_device_register(&dev->dev, "rtc-generic",
ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 207270376b55..e5ad527cb75e 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -198,7 +198,7 @@ static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
/* The alarm only has a minute accuracy */
- alm_tm->tm_sec = -1;
+ alm_tm->tm_sec = 0;
alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
@@ -213,9 +213,6 @@ static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
-1 :
bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK);
- alm_tm->tm_mon = -1;
- alm_tm->tm_year = -1;
-
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 54328d4ac0d3..0e7f0f52bfe4 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -245,8 +245,7 @@ static int isl12057_rtc_update_alarm(struct device *dev, int enable)
static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time rtc_tm, *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
+ struct rtc_time *alarm_tm = &alarm->time;
u8 regs[ISL12057_A1_SEC_LEN];
unsigned int ir;
int ret;
@@ -264,36 +263,6 @@ static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
- alarm_tm->tm_wday = -1;
-
- /*
- * The alarm section does not store year/month. We use the ones in rtc
- * section as a basis and increment month and then year if needed to get
- * alarm after current time.
- */
- ret = _isl12057_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err_unlock;
-
- alarm_tm->tm_year = rtc_tm.tm_year;
- alarm_tm->tm_mon = rtc_tm.tm_mon;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err_unlock;
-
- if (alarm_secs < rtc_secs) {
- if (alarm_tm->tm_mon == 11) {
- alarm_tm->tm_mon = 0;
- alarm_tm->tm_year += 1;
- } else {
- alarm_tm->tm_mon += 1;
- }
- }
ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
if (ret) {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index d1bf93a87200..58698d21c2c3 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -244,7 +244,7 @@ static int m41t80_alarm_irq_enable(struct device *dev, unsigned int enabled)
retval = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, flags);
if (retval < 0) {
- dev_info(dev, "Unable to enable alarm IRQ %d\n", retval);
+ dev_err(dev, "Unable to enable alarm IRQ %d\n", retval);
return retval;
}
return 0;
@@ -320,10 +320,8 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_sec = bcd2bin(alarmvals[4] & 0x7f);
alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
- alrm->time.tm_wday = -1;
alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
- alrm->time.tm_year = -1;
alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
@@ -337,6 +335,30 @@ static struct rtc_class_ops m41t80_rtc_ops = {
.proc = m41t80_rtc_proc,
};
+#ifdef CONFIG_PM_SLEEP
+static int m41t80_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq >= 0 && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int m41t80_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq >= 0 && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
+
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -831,10 +853,9 @@ static int m41t80_probe(struct i2c_client *client,
return rc;
}
- rc = devm_add_action(&client->dev, m41t80_remove_sysfs_group,
- &client->dev);
+ rc = devm_add_action_or_reset(&client->dev, m41t80_remove_sysfs_group,
+ &client->dev);
if (rc) {
- m41t80_remove_sysfs_group(&client->dev);
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n", rc);
return rc;
@@ -873,6 +894,7 @@ static int m41t80_remove(struct i2c_client *client)
static struct i2c_driver m41t80_driver = {
.driver = {
.name = "rtc-m41t80",
+ .pm = &m41t80_pm,
},
.probe = m41t80_probe,
.remove = m41t80_remove,
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f72b91f2501f..0eeb5714c00f 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <linux/m48t86.h>
+#include <linux/platform_data/rtc-m48t86.h>
#include <linux/bcd.h>
#define M48T86_REG_SEC 0x00
diff --git a/drivers/rtc/rtc-max6916.c b/drivers/rtc/rtc-max6916.c
new file mode 100644
index 000000000000..623ab27b2757
--- /dev/null
+++ b/drivers/rtc/rtc-max6916.c
@@ -0,0 +1,164 @@
+/* rtc-max6916.c
+ *
+ * Driver for MAXIM max6916 Low Current, SPI Compatible
+ * Real Time Clock
+ *
+ * Author : Venkat Prashanth B U <venkat.prashanth2498@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+
+/* Registers in max6916 rtc */
+
+#define MAX6916_SECONDS_REG 0x01
+#define MAX6916_MINUTES_REG 0x02
+#define MAX6916_HOURS_REG 0x03
+#define MAX6916_DATE_REG 0x04
+#define MAX6916_MONTH_REG 0x05
+#define MAX6916_DAY_REG 0x06
+#define MAX6916_YEAR_REG 0x07
+#define MAX6916_CONTROL_REG 0x08
+#define MAX6916_STATUS_REG 0x0C
+#define MAX6916_CLOCK_BURST 0x3F
+
+static int max6916_read_reg(struct device *dev, unsigned char address,
+ unsigned char *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ *data = address | 0x80;
+
+ return spi_write_then_read(spi, data, 1, data, 1);
+}
+
+static int max6916_write_reg(struct device *dev, unsigned char address,
+ unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ buf[0] = address & 0x7F;
+ buf[1] = data;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int max6916_read_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int err;
+ unsigned char buf[8];
+
+ buf[0] = MAX6916_CLOCK_BURST | 0x80;
+
+ err = spi_write_then_read(spi, buf, 1, buf, 8);
+
+ if (err)
+ return err;
+
+ dt->tm_sec = bcd2bin(buf[0]);
+ dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_hour = bcd2bin(buf[2] & 0x3F);
+ dt->tm_mday = bcd2bin(buf[3]);
+ dt->tm_mon = bcd2bin(buf[4]) - 1;
+ dt->tm_wday = bcd2bin(buf[5]) - 1;
+ dt->tm_year = bcd2bin(buf[6]) + 100;
+
+ return rtc_valid_tm(dt);
+}
+
+static int max6916_set_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[9];
+
+ if (dt->tm_year < 100 || dt->tm_year > 199) {
+ dev_err(&spi->dev, "Year must be between 2000 and 2099. It's %d.\n",
+ dt->tm_year + 1900);
+ return -EINVAL;
+ }
+
+ buf[0] = MAX6916_CLOCK_BURST & 0x7F;
+ buf[1] = bin2bcd(dt->tm_sec);
+ buf[2] = bin2bcd(dt->tm_min);
+ buf[3] = (bin2bcd(dt->tm_hour) & 0X3F);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_wday + 1);
+ buf[7] = bin2bcd(dt->tm_year % 100);
+ buf[8] = bin2bcd(0x00);
+
+ /* write the rtc settings */
+ return spi_write_then_read(spi, buf, 9, NULL, 0);
+}
+
+static const struct rtc_class_ops max6916_rtc_ops = {
+ .read_time = max6916_read_time,
+ .set_time = max6916_set_time,
+};
+
+static int max6916_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char data;
+ int res;
+
+ /* spi setup with max6916 in mode 3 and bits per word as 8 */
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ /* RTC Settings */
+ res = max6916_read_reg(&spi->dev, MAX6916_SECONDS_REG, &data);
+ if (res)
+ return res;
+
+ /* Disable the write protect of rtc */
+ max6916_read_reg(&spi->dev, MAX6916_CONTROL_REG, &data);
+ data = data & ~(1 << 7);
+ max6916_write_reg(&spi->dev, MAX6916_CONTROL_REG, data);
+
+ /*Enable oscillator,disable oscillator stop flag, glitch filter*/
+ max6916_read_reg(&spi->dev, MAX6916_STATUS_REG, &data);
+ data = data & 0x1B;
+ max6916_write_reg(&spi->dev, MAX6916_STATUS_REG, data);
+
+ /* display the settings */
+ max6916_read_reg(&spi->dev, MAX6916_CONTROL_REG, &data);
+ dev_info(&spi->dev, "MAX6916 RTC CTRL Reg = 0x%02x\n", data);
+
+ max6916_read_reg(&spi->dev, MAX6916_STATUS_REG, &data);
+ dev_info(&spi->dev, "MAX6916 RTC Status Reg = 0x%02x\n", data);
+
+ rtc = devm_rtc_device_register(&spi->dev, "max6916",
+ &max6916_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ spi_set_drvdata(spi, rtc);
+
+ return 0;
+}
+
+static struct spi_driver max6916_driver = {
+ .driver = {
+ .name = "max6916",
+ },
+ .probe = max6916_probe,
+};
+module_spi_driver(max6916_driver);
+
+MODULE_DESCRIPTION("MAX6916 SPI RTC DRIVER");
+MODULE_AUTHOR("Venkat Prashanth B U <venkat.prashanth2498@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/include/asm-generic/rtc.h b/drivers/rtc/rtc-mc146818-lib.c
index 4e3b6558331e..2f1772a358ca 100644
--- a/include/asm-generic/rtc.h
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -1,40 +1,16 @@
-/*
- * include/asm-generic/rtc.h
- *
- * Author: Tom Rini <trini@mvista.com>
- *
- * Based on:
- * drivers/char/rtc.c
- *
- * Please read the COPYING file for all license details.
- */
-
-#ifndef __ASM_RTC_H__
-#define __ASM_RTC_H__
-
-#include <linux/mc146818rtc.h>
-#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/mc146818rtc.h>
+
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
-#define RTC_PIE 0x40 /* periodic interrupt enable */
-#define RTC_AIE 0x20 /* alarm interrupt enable */
-#define RTC_UIE 0x10 /* update-finished interrupt enable */
-
-/* some dummy definitions */
-#define RTC_BATT_BAD 0x100 /* battery bad */
-#define RTC_SQWE 0x08 /* enable square-wave output */
-#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
-#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
-#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
-
/*
* Returns true if a clock update is in progress
*/
-static inline unsigned char rtc_is_updating(void)
+static inline unsigned char mc146818_is_updating(void)
{
unsigned char uip;
unsigned long flags;
@@ -45,7 +21,7 @@ static inline unsigned char rtc_is_updating(void)
return uip;
}
-static inline unsigned int __get_rtc_time(struct rtc_time *time)
+unsigned int mc146818_get_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
@@ -60,11 +36,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
+ * periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
*/
- if (rtc_is_updating())
+ if (mc146818_is_updating())
mdelay(20);
/*
@@ -120,13 +96,10 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
return RTC_24H;
}
-
-#ifndef get_rtc_time
-#define get_rtc_time __get_rtc_time
-#endif
+EXPORT_SYMBOL_GPL(mc146818_get_time);
/* Set the current date and time in the real time clock. */
-static inline int __set_rtc_time(struct rtc_time *time)
+int mc146818_set_time(struct rtc_time *time)
{
unsigned long flags;
unsigned char mon, day, hrs, min, sec;
@@ -222,26 +195,4 @@ static inline int __set_rtc_time(struct rtc_time *time)
return 0;
}
-
-#ifndef set_rtc_time
-#define set_rtc_time __set_rtc_time
-#endif
-
-static inline unsigned int get_rtc_ss(void)
-{
- struct rtc_time h;
-
- get_rtc_time(&h);
- return h.tm_sec;
-}
-
-static inline int get_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-static inline int set_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-
-#endif /* __ASM_RTC_H__ */
+EXPORT_SYMBOL_GPL(mc146818_set_time);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 0094d9bdd1e6..7334c44fa7c3 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -32,11 +32,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sfi.h>
-#include <asm-generic/rtc.h>
#include <asm/intel_scu_ipc.h>
#include <asm/intel-mid.h>
#include <asm/intel_mid_vrtc.h>
@@ -149,14 +149,6 @@ static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
if (mrst->irq <= 0)
return -EIO;
- /* Basic alarms only support hour, minute, and seconds fields.
- * Some also support day and month, for alarms up to a year in
- * the future.
- */
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
/* vRTC only supports binary mode */
spin_lock_irq(&rtc_lock);
t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 9c18d6fd8107..ea20f627dabe 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -134,7 +134,7 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
goto exit;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS) {
rc = -EIO;
goto exit;
@@ -181,7 +181,7 @@ static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
goto exit;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS)
rc = -EIO;
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index f22e060709e5..b4478cc92b55 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -96,7 +96,7 @@
#define CD_TMR_TE BIT(3) /* Countdown timer enable */
/* PCF2123_REG_OFFSET BITS */
-#define OFFSET_SIGN_BIT BIT(6) /* 2's complement sign bit */
+#define OFFSET_SIGN_BIT 6 /* 2's complement sign bit */
#define OFFSET_COARSE BIT(7) /* Coarse mode offset */
#define OFFSET_STEP (2170) /* Offset step in parts per billion */
@@ -217,7 +217,7 @@ static int pcf2123_read_offset(struct device *dev, long *offset)
if (reg & OFFSET_COARSE)
reg <<= 1; /* multiply by 2 and sign extend */
else
- reg |= (reg & OFFSET_SIGN_BIT) << 1; /* sign extend only */
+ reg = sign_extend32(reg, OFFSET_SIGN_BIT);
*offset = ((long)reg) * OFFSET_STEP;
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index e8ddbb359d11..efb0a08ac117 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -16,6 +16,16 @@
#include <linux/rtc.h>
#include <linux/module.h>
+/*
+ * Information for this driver was pulled from the following datasheets.
+ *
+ * http://www.nxp.com/documents/data_sheet/PCF85063A.pdf
+ * http://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
+ *
+ * PCF85063A -- Rev. 6 — 18 November 2015
+ * PCF85063TP -- Rev. 4 — 6 May 2015
+*/
+
#define PCF85063_REG_CTRL1 0x00 /* status */
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL2 0x01
@@ -55,10 +65,22 @@ static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
return 0;
}
-/*
- * In the routines that deal directly with the pcf85063 hardware, we use
- * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
- */
+static int pcf85063_start_clock(struct i2c_client *client, u8 ctrl1)
+{
+ s32 ret;
+
+ /* start the clock */
+ ctrl1 &= PCF85063_REG_CTRL1_STOP;
+
+ ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ctrl1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failing to start the clock\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int rc;
@@ -90,8 +112,7 @@ static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
tm->tm_wday = regs[4] & 0x07;
tm->tm_mon = bcd2bin(regs[5] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(regs[6]);
- if (tm->tm_year < 70)
- tm->tm_year += 100; /* assume we are in 1970...2069 */
+ tm->tm_year += 100;
return rtc_valid_tm(tm);
}
@@ -99,13 +120,17 @@ static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int rc;
- u8 regs[8];
+ u8 regs[7];
+ u8 ctrl1;
+
+ if ((tm->tm_year < 100) || (tm->tm_year > 199))
+ return -EINVAL;
/*
* to accurately set the time, reset the divider chain and keep it in
* reset state until all time/date registers are written
*/
- rc = pcf85063_stop_clock(client, &regs[7]);
+ rc = pcf85063_stop_clock(client, &ctrl1);
if (rc != 0)
return rc;
@@ -125,14 +150,7 @@ static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
regs[5] = bin2bcd(tm->tm_mon + 1);
/* year and century */
- regs[6] = bin2bcd(tm->tm_year % 100);
-
- /*
- * after all time/date registers are written, let the 'address auto
- * increment' feature wrap around and write register CTRL1 to re-enable
- * the clock divider chain again
- */
- regs[7] &= ~PCF85063_REG_CTRL1_STOP;
+ regs[6] = bin2bcd(tm->tm_year - 100);
/* write all registers at once */
rc = i2c_smbus_write_i2c_block_data(client, PCF85063_REG_SC,
@@ -142,6 +160,15 @@ static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return rc;
}
+ /*
+ * Write the control register as a separate action since the size of
+ * the register space is different between the PCF85063TP and
+ * PCF85063A devices. The rollover point can not be used.
+ */
+ rc = pcf85063_start_clock(client, ctrl1);
+ if (rc != 0)
+ return rc;
+
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b9ddbb001283..1227ceab61ee 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -341,14 +341,11 @@ static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
"%s: raw data is min=%02x, hr=%02x, mday=%02x, wday=%02x\n",
__func__, buf[0], buf[1], buf[2], buf[3]);
+ tm->time.tm_sec = 0;
tm->time.tm_min = bcd2bin(buf[0] & 0x7F);
tm->time.tm_hour = bcd2bin(buf[1] & 0x3F);
tm->time.tm_mday = bcd2bin(buf[2] & 0x3F);
tm->time.tm_wday = bcd2bin(buf[3] & 0x7);
- tm->time.tm_mon = -1;
- tm->time.tm_year = -1;
- tm->time.tm_yday = -1;
- tm->time.tm_isdst = -1;
err = pcf8563_get_alarm_mode(client, &tm->enabled, &tm->pending);
if (err < 0)
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index f28d57788951..68ce77414bdc 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -128,6 +128,7 @@ static int rc5t583_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
}
+ alm->time.tm_sec = 0;
alm->time.tm_min = bcd2bin(alarm_data[0]);
alm->time.tm_hour = bcd2bin(alarm_data[1]);
alm->time.tm_mday = bcd2bin(alarm_data[2]);
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index ef86229428fc..c8c757466783 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -341,12 +341,6 @@ static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
- t->time.tm_wday = -1;
- t->time.tm_yday = -1;
- t->time.tm_isdst = -1;
/* ... and status */
t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f623038e586e..9a2f6a95d5a7 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -13,12 +13,15 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtc.h>
+#define RV8803_I2C_TRY_COUNT 4
+
#define RV8803_SEC 0x00
#define RV8803_MIN 0x01
#define RV8803_HOUR 0x02
@@ -56,19 +59,85 @@ struct rv8803_data {
u8 ctrl;
};
+static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ /*
+ * There is a 61µs window during which the RTC does not acknowledge I2C
+ * transfers. In that case, ensure that there are multiple attempts.
+ */
+ do
+ ret = i2c_smbus_read_byte_data(client, reg);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret < 0)
+ dev_err(&client->dev, "Unable to read register 0x%02x\n", reg);
+
+ return ret;
+}
+
+static int rv8803_read_regs(const struct i2c_client *client,
+ u8 reg, u8 count, u8 *values)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_read_i2c_block_data(client, reg, count, values);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret != count) {
+ dev_err(&client->dev,
+ "Unable to read registers 0x%02x..0x%02x\n",
+ reg, reg + count - 1);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int rv8803_write_reg(const struct i2c_client *client, u8 reg, u8 value)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret)
+ dev_err(&client->dev, "Unable to write register 0x%02x\n", reg);
+
+ return ret;
+}
+
+static int rv8803_write_regs(const struct i2c_client *client,
+ u8 reg, u8 count, const u8 *values)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_write_i2c_block_data(client, reg, count,
+ values);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret)
+ dev_err(&client->dev,
+ "Unable to write registers 0x%02x..0x%02x\n",
+ reg, reg + count - 1);
+
+ return ret;
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rv8803_data *rv8803 = i2c_get_clientdata(client);
unsigned long events = 0;
- int flags, try = 0;
+ int flags;
mutex_lock(&rv8803->flags_lock);
- do {
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
- try++;
- } while ((flags == -ENXIO) && (try < 3));
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags <= 0) {
mutex_unlock(&rv8803->flags_lock);
return IRQ_NONE;
@@ -100,9 +169,8 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
if (events) {
rtc_update_irq(rv8803->rtc, 1, events);
- i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
- i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ rv8803_write_reg(client, RV8803_FLAG, flags);
+ rv8803_write_reg(rv8803->client, RV8803_CTRL, rv8803->ctrl);
}
mutex_unlock(&rv8803->flags_lock);
@@ -118,7 +186,7 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
u8 *date = date1;
int ret, flags;
- flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
+ flags = rv8803_read_reg(rv8803->client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -127,16 +195,14 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- ret = i2c_smbus_read_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date);
- if (ret != 7)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(rv8803->client, RV8803_SEC, 7, date);
+ if (ret)
+ return ret;
if ((date1[RV8803_SEC] & 0x7f) == bin2bcd(59)) {
- ret = i2c_smbus_read_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date2);
- if (ret != 7)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(rv8803->client, RV8803_SEC, 7, date2);
+ if (ret)
+ return ret;
if ((date2[RV8803_SEC] & 0x7f) != bin2bcd(59))
date = date2;
@@ -145,23 +211,33 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec = bcd2bin(date[RV8803_SEC] & 0x7f);
tm->tm_min = bcd2bin(date[RV8803_MIN] & 0x7f);
tm->tm_hour = bcd2bin(date[RV8803_HOUR] & 0x3f);
- tm->tm_wday = ffs(date[RV8803_WEEK] & 0x7f);
+ tm->tm_wday = ilog2(date[RV8803_WEEK] & 0x7f);
tm->tm_mday = bcd2bin(date[RV8803_DAY] & 0x3f);
tm->tm_mon = bcd2bin(date[RV8803_MONTH] & 0x1f) - 1;
tm->tm_year = bcd2bin(date[RV8803_YEAR]) + 100;
- return rtc_valid_tm(tm);
+ return 0;
}
static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
{
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
u8 date[7];
- int flags, ret;
+ int ctrl, flags, ret;
if ((tm->tm_year < 100) || (tm->tm_year > 199))
return -EINVAL;
+ ctrl = rv8803_read_reg(rv8803->client, RV8803_CTRL);
+ if (ctrl < 0)
+ return ctrl;
+
+ /* Stop the clock */
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ ctrl | RV8803_CTRL_RESET);
+ if (ret)
+ return ret;
+
date[RV8803_SEC] = bin2bcd(tm->tm_sec);
date[RV8803_MIN] = bin2bcd(tm->tm_min);
date[RV8803_HOUR] = bin2bcd(tm->tm_hour);
@@ -170,21 +246,26 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
date[RV8803_MONTH] = bin2bcd(tm->tm_mon + 1);
date[RV8803_YEAR] = bin2bcd(tm->tm_year - 100);
- ret = i2c_smbus_write_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date);
- if (ret < 0)
+ ret = rv8803_write_regs(rv8803->client, RV8803_SEC, 7, date);
+ if (ret)
+ return ret;
+
+ /* Restart the clock */
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ ctrl & ~RV8803_CTRL_RESET);
+ if (ret)
return ret;
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
+ flags = rv8803_read_reg(rv8803->client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
- ret = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG,
- flags & ~RV8803_FLAG_V2F);
+ ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
+ flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
mutex_unlock(&rv8803->flags_lock);
@@ -198,22 +279,18 @@ static int rv8803_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u8 alarmvals[3];
int flags, ret;
- ret = i2c_smbus_read_i2c_block_data(client, RV8803_ALARM_MIN,
- 3, alarmvals);
- if (ret != 3)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(client, RV8803_ALARM_MIN, 3, alarmvals);
+ if (ret)
+ return ret;
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
alrm->time.tm_sec = 0;
alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
- alrm->time.tm_wday = -1;
alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f);
- alrm->time.tm_mon = -1;
- alrm->time.tm_year = -1;
alrm->enabled = !!(rv8803->ctrl & RV8803_CTRL_AIE);
alrm->pending = (flags & RV8803_FLAG_AF) && alrm->enabled;
@@ -239,10 +316,10 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
mutex_lock(&rv8803->flags_lock);
- ret = i2c_smbus_read_i2c_block_data(client, RV8803_FLAG, 2, ctrl);
- if (ret != 2) {
+ ret = rv8803_read_regs(client, RV8803_FLAG, 2, ctrl);
+ if (ret) {
mutex_unlock(&rv8803->flags_lock);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
alarmvals[0] = bin2bcd(alrm->time.tm_min);
@@ -251,8 +328,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (rv8803->ctrl & (RV8803_CTRL_AIE | RV8803_CTRL_UIE)) {
rv8803->ctrl &= ~(RV8803_CTRL_AIE | RV8803_CTRL_UIE);
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ rv8803->ctrl);
if (err) {
mutex_unlock(&rv8803->flags_lock);
return err;
@@ -260,13 +337,12 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
ctrl[1] &= ~RV8803_FLAG_AF;
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG, ctrl[1]);
+ err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[1]);
mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
- err = i2c_smbus_write_i2c_block_data(rv8803->client, RV8803_ALARM_MIN,
- 3, alarmvals);
+ err = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3, alarmvals);
if (err)
return err;
@@ -276,8 +352,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (rv8803->rtc->aie_timer.enabled)
rv8803->ctrl |= RV8803_CTRL_AIE;
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ rv8803->ctrl);
if (err)
return err;
}
@@ -306,21 +382,20 @@ static int rv8803_alarm_irq_enable(struct device *dev, unsigned int enabled)
}
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_AF | RV8803_FLAG_UF);
- err = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
+ err = rv8803_write_reg(client, RV8803_FLAG, flags);
mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
if (ctrl != rv8803->ctrl) {
rv8803->ctrl = ctrl;
- err = i2c_smbus_write_byte_data(client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(client, RV8803_CTRL, rv8803->ctrl);
if (err)
return err;
}
@@ -336,7 +411,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_VL_READ:
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -355,16 +430,16 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
case RTC_VL_CLR:
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
- ret = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
+ ret = rv8803_write_reg(client, RV8803_FLAG, flags);
mutex_unlock(&rv8803->flags_lock);
- if (ret < 0)
+ if (ret)
return ret;
return 0;
@@ -382,8 +457,8 @@ static ssize_t rv8803_nvram_write(struct file *filp, struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = i2c_smbus_write_byte_data(client, RV8803_RAM, buf[0]);
- if (ret < 0)
+ ret = rv8803_write_reg(client, RV8803_RAM, buf[0]);
+ if (ret)
return ret;
return 1;
@@ -397,7 +472,7 @@ static ssize_t rv8803_nvram_read(struct file *filp, struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = i2c_smbus_read_byte_data(client, RV8803_RAM);
+ ret = rv8803_read_reg(client, RV8803_RAM);
if (ret < 0)
return ret;
@@ -427,7 +502,7 @@ static int rv8803_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rv8803_data *rv8803;
- int err, flags, try = 0;
+ int err, flags;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK)) {
@@ -444,16 +519,7 @@ static int rv8803_probe(struct i2c_client *client,
rv8803->client = client;
i2c_set_clientdata(client, rv8803);
- /*
- * There is a 60µs window where the RTC may not reply on the i2c bus in
- * that case, the transfer is not ACKed. In that case, ensure there are
- * multiple attempts.
- */
- do {
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
- try++;
- } while ((flags == -ENXIO) && (try < 3));
-
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -488,12 +554,7 @@ static int rv8803_probe(struct i2c_client *client,
return PTR_ERR(rv8803->rtc);
}
- try = 0;
- do {
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT,
- RV8803_EXT_WADA);
- try++;
- } while ((err == -ENXIO) && (try < 3));
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
if (err)
return err;
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 772d221ec2d9..7163b91bb773 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -272,15 +272,9 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
t->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
- if (alarmvals[2] & RX8010_ALARM_AE)
- t->time.tm_mday = -1;
- else
+ if (!(alarmvals[2] & RX8010_ALARM_AE))
t->time.tm_mday = bcd2bin(alarmvals[2] & 0x7f);
- t->time.tm_wday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 9f105efbc546..2b85cc7a24e7 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -319,11 +319,6 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
+ (ald[1] & 0x20 ? 12 : 0);
- t->time.tm_wday = -1;
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n",
__func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index f40afdd0e5f5..5dab4665ca3b 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -15,6 +15,7 @@
#include <linux/bitrev.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#define S35390A_CMD_STATUS1 0
#define S35390A_CMD_STATUS2 1
@@ -34,10 +35,14 @@
#define S35390A_ALRM_BYTE_HOURS 1
#define S35390A_ALRM_BYTE_MINS 2
+/* flags for STATUS1 */
#define S35390A_FLAG_POC 0x01
#define S35390A_FLAG_BLD 0x02
+#define S35390A_FLAG_INT2 0x04
#define S35390A_FLAG_24H 0x40
#define S35390A_FLAG_RESET 0x80
+
+/* flag for STATUS2 */
#define S35390A_FLAG_TEST 0x01
#define S35390A_INT2_MODE_MASK 0xF0
@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
return 0;
}
-static int s35390a_reset(struct s35390a *s35390a)
+/*
+ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
+ * To keep the information if an irq is pending, pass the value read from
+ * STATUS1 to the caller.
+ */
+static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf[1];
-
- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
- return -EIO;
-
- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+ char buf;
+ int ret;
+ unsigned initcount = 0;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (*status1 & S35390A_FLAG_POC)
+ /*
+ * Do not communicate for 0.5 seconds since the power-on
+ * detection circuit is in operation.
+ */
+ msleep(500);
+ else if (!(*status1 & S35390A_FLAG_BLD))
+ /*
+ * If both POC and BLD are unset everything is fine.
+ */
return 0;
- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
- buf[0] &= 0xf0;
- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+ /*
+ * At least one of POC and BLD are set, so reinitialise chip. Keeping
+ * this information in the hardware to know later that the time isn't
+ * valid is unfortunately not possible because POC and BLD are cleared
+ * on read. So the reset is best done now.
+ *
+ * The 24H bit is kept over reset, so set it already here.
+ */
+initialize:
+ *status1 = S35390A_FLAG_24H;
+ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
+ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+
+ if (ret < 0)
+ return ret;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
+ /* Try up to five times to reset the chip */
+ if (initcount < 5) {
+ ++initcount;
+ goto initialize;
+ } else
+ return -EIO;
+ }
+
+ return 1;
}
static int s35390a_disable_test_mode(struct s35390a *s35390a)
@@ -217,12 +266,12 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
alm->time.tm_min, alm->time.tm_hour, alm->time.tm_mday,
alm->time.tm_mon, alm->time.tm_year, alm->time.tm_wday);
- /* disable interrupt */
+ /* disable interrupt (which deasserts the irq line) */
err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
return err;
- /* clear pending interrupt, if any */
+ /* clear pending interrupt (in STATUS1 only), if any */
err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &sts, sizeof(sts));
if (err < 0)
return err;
@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
if (alm->time.tm_wday != -1)
buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
+ else
+ buf[S35390A_ALRM_BYTE_WDAY] = 0;
buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
alm->time.tm_hour) | 0x80;
@@ -269,23 +320,43 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
if (err < 0)
return err;
- if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
- return -EINVAL;
+ if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
+ /*
+ * When the alarm isn't enabled, the register to configure
+ * the alarm time isn't accessible.
+ */
+ alm->enabled = 0;
+ return 0;
+ } else {
+ alm->enabled = 1;
+ }
err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
if (err < 0)
return err;
/* This chip returns the bits of each byte in reverse order */
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 3; ++i)
buf[i] = bitrev8(buf[i]);
- buf[i] &= ~0x80;
- }
- alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
- alm->time.tm_hour = s35390a_reg2hr(s35390a,
- buf[S35390A_ALRM_BYTE_HOURS]);
- alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
+ /*
+ * B0 of the three matching registers is an enable flag. Iff it is set
+ * the configured value is used for matching.
+ */
+ if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
+ alm->time.tm_wday =
+ bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
+ alm->time.tm_hour =
+ s35390a_reg2hr(s35390a,
+ buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
+ alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
+
+ /* alarm triggers always at s=0 */
+ alm->time.tm_sec = 0;
dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
__func__, alm->time.tm_min, alm->time.tm_hour,
@@ -327,11 +398,11 @@ static struct i2c_driver s35390a_driver;
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int err;
+ int err, err_reset;
unsigned int i;
struct s35390a *s35390a;
struct rtc_time tm;
- char buf[1];
+ char buf, status1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -360,29 +431,35 @@ static int s35390a_probe(struct i2c_client *client,
}
}
- err = s35390a_reset(s35390a);
- if (err < 0) {
+ err_reset = s35390a_reset(s35390a, &status1);
+ if (err_reset < 0) {
+ err = err_reset;
dev_err(&client->dev, "error resetting chip\n");
goto exit_dummy;
}
- err = s35390a_disable_test_mode(s35390a);
- if (err < 0) {
- dev_err(&client->dev, "error disabling test mode\n");
- goto exit_dummy;
- }
-
- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
- if (err < 0) {
- dev_err(&client->dev, "error checking 12/24 hour mode\n");
- goto exit_dummy;
- }
- if (buf[0] & S35390A_FLAG_24H)
+ if (status1 & S35390A_FLAG_24H)
s35390a->twentyfourhour = 1;
else
s35390a->twentyfourhour = 0;
- if (s35390a_get_datetime(client, &tm) < 0)
+ if (status1 & S35390A_FLAG_INT2) {
+ /* disable alarm (and maybe test mode) */
+ buf = 0;
+ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling alarm");
+ goto exit_dummy;
+ }
+ } else {
+ err = s35390a_disable_test_mode(s35390a);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling test mode\n");
+ goto exit_dummy;
+ }
+ }
+
+ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
device_set_wakeup_capable(&client->dev, 1);
@@ -395,6 +472,10 @@ static int s35390a_probe(struct i2c_client *client,
err = PTR_ERR(s35390a->rtc);
goto exit_dummy;
}
+
+ if (status1 & S35390A_FLAG_INT2)
+ rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+
return 0;
exit_dummy:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d01ad7e8078e..d44fb34df8fe 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
+ s3c_rtc_enable_clk(info);
spin_lock_irq(&info->pie_lock);
if (info->data->set_freq)
info->data->set_freq(info, freq);
spin_unlock_irq(&info->pie_lock);
+ s3c_rtc_disable_clk(info);
return 0;
}
@@ -264,35 +266,23 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
/* decode the alarm enable field */
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
- else
- alm_tm->tm_sec = -1;
if (alm_en & S3C2410_RTCALM_MINEN)
alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
- else
- alm_tm->tm_min = -1;
if (alm_en & S3C2410_RTCALM_HOUREN)
alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
- else
- alm_tm->tm_hour = -1;
if (alm_en & S3C2410_RTCALM_DAYEN)
alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
- else
- alm_tm->tm_mday = -1;
if (alm_en & S3C2410_RTCALM_MONEN) {
alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
alm_tm->tm_mon -= 1;
- } else {
- alm_tm->tm_mon = -1;
}
if (alm_en & S3C2410_RTCALM_YEAREN)
alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
- else
- alm_tm->tm_year = -1;
return 0;
}
@@ -577,8 +567,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(info, 1);
- s3c_rtc_disable_clk(info);
-
return 0;
err_nortc:
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index a45845a571e5..17b6235d67a5 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -481,7 +481,6 @@ static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR);
if (tm->tm_mon > 0)
tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
- tm->tm_year = 0xffff;
wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
@@ -500,52 +499,13 @@ static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off);
}
-static int sh_rtc_check_alarm(struct rtc_time *tm)
-{
- /*
- * The original rtc says anything > 0xc0 is "don't care" or "match
- * all" - most users use 0xff but rtc-dev uses -1 for the same thing.
- * The original rtc doesn't support years - some things use -1 and
- * some 0xffff. We use -1 to make out tests easier.
- */
- if (tm->tm_year == 0xffff)
- tm->tm_year = -1;
- if (tm->tm_mon >= 0xff)
- tm->tm_mon = -1;
- if (tm->tm_mday >= 0xff)
- tm->tm_mday = -1;
- if (tm->tm_wday >= 0xff)
- tm->tm_wday = -1;
- if (tm->tm_hour >= 0xff)
- tm->tm_hour = -1;
- if (tm->tm_min >= 0xff)
- tm->tm_min = -1;
- if (tm->tm_sec >= 0xff)
- tm->tm_sec = -1;
-
- if (tm->tm_year > 9999 ||
- tm->tm_mon >= 12 ||
- tm->tm_mday == 0 || tm->tm_mday >= 32 ||
- tm->tm_wday >= 7 ||
- tm->tm_hour >= 24 ||
- tm->tm_min >= 60 ||
- tm->tm_sec >= 60)
- return -EINVAL;
-
- return 0;
-}
-
static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct sh_rtc *rtc = platform_get_drvdata(pdev);
unsigned int rcr1;
struct rtc_time *tm = &wkalrm->time;
- int mon, err;
-
- err = sh_rtc_check_alarm(tm);
- if (unlikely(err < 0))
- return err;
+ int mon;
spin_lock_irq(&rtc->lock);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 60232bd366ef..15ac597d54da 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -179,12 +179,6 @@ static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (sec == 0) {
/* alarm is disabled. */
alarm->enabled = 0;
- alarm->time.tm_mon = -1;
- alarm->time.tm_mday = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_hour = -1;
- alarm->time.tm_min = -1;
- alarm->time.tm_sec = -1;
} else {
/* alarm is enabled. */
alarm->enabled = 1;
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 7a0436329d6c..1f3117b5a83c 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -25,7 +25,7 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/rtc-v3020.h>
+#include <linux/platform_data/rtc-v3020.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 0ac520dd1b21..c71df0c7dedc 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -46,7 +46,8 @@ struct read_info_sccb {
u64 rnmax2; /* 104-111 */
u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
- u8 _pad_117[119 - 117]; /* 117-118 */
+ u8 fac117; /* 117 */
+ u8 _pad_118; /* 118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
@@ -114,7 +115,12 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01);
+ sclp.has_gsls = !!(sccb->fac85 & 0x80);
+ sclp.has_64bscao = !!(sccb->fac116 & 0x80);
+ sclp.has_cmma = !!(sccb->fac116 & 0x40);
sclp.has_esca = !!(sccb->fac116 & 0x08);
+ sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
+ sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
@@ -145,6 +151,10 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
+ sclp.has_gpere = cpue->gpere;
+ sclp.has_ib = cpue->ib;
+ sclp.has_cei = cpue->cei;
+ sclp.has_skey = cpue->skey;
break;
}
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index 2553db0fdb52..f59b71776bbd 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -26,7 +26,7 @@
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
-static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
+static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
@@ -72,9 +72,8 @@ static void sclp_ocf_handler(struct evbuf_header *evbuf)
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
+ memset(cpc_name, 0, OCF_LENGTH_CPC_NAME);
memcpy(cpc_name, cpc + 1, size);
- EBCASC(cpc_name, size);
- cpc_name[size] = 0;
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
@@ -85,15 +84,23 @@ static struct sclp_register sclp_ocf_event = {
.receiver_fn = sclp_ocf_handler,
};
+void sclp_ocf_cpc_name_copy(char *dst)
+{
+ spin_lock_irq(&sclp_ocf_lock);
+ memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME);
+ spin_unlock_irq(&sclp_ocf_lock);
+}
+EXPORT_SYMBOL(sclp_ocf_cpc_name_copy);
+
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- int rc;
+ char name[OCF_LENGTH_CPC_NAME + 1];
- spin_lock_irq(&sclp_ocf_lock);
- rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
- spin_unlock_irq(&sclp_ocf_lock);
- return rc;
+ sclp_ocf_cpc_name_copy(name);
+ name[OCF_LENGTH_CPC_NAME] = 0;
+ EBCASC(name, OCF_LENGTH_CPC_NAME);
+ return snprintf(page, PAGE_SIZE, "%s\n", name);
}
static struct kobj_attribute cpc_name_attr =
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index e96aced58627..46be25c7461e 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -37,8 +37,7 @@ enum cfg_task_t {
/* Map for pending configure tasks. */
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
-static DEFINE_MUTEX(cfg_lock);
-static int cfg_busy;
+static DEFINE_SPINLOCK(cfg_lock);
/* Map for channel-path status. */
static struct sclp_chp_info chp_info;
@@ -666,6 +665,20 @@ static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
+/* Fetch the first configure task. Set chpid accordingly. */
+static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
+{
+ enum cfg_task_t t = cfg_none;
+
+ chp_id_for_each(chpid) {
+ t = cfg_get_task(*chpid);
+ if (t != cfg_none)
+ break;
+ }
+
+ return t;
+}
+
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */
static void cfg_func(struct work_struct *work)
@@ -674,16 +687,9 @@ static void cfg_func(struct work_struct *work)
enum cfg_task_t t;
int rc;
- mutex_lock(&cfg_lock);
- t = cfg_none;
- chp_id_for_each(&chpid) {
- t = cfg_get_task(chpid);
- if (t != cfg_none) {
- cfg_set_task(chpid, cfg_none);
- break;
- }
- }
- mutex_unlock(&cfg_lock);
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
switch (t) {
case cfg_configure:
@@ -709,12 +715,13 @@ static void cfg_func(struct work_struct *work)
case cfg_none:
/* Get updated information after last change. */
info_update();
- mutex_lock(&cfg_lock);
- cfg_busy = 0;
- mutex_unlock(&cfg_lock);
wake_up_interruptible(&cfg_wait_queue);
return;
}
+ spin_lock(&cfg_lock);
+ if (t == cfg_get_task(chpid))
+ cfg_set_task(chpid, cfg_none);
+ spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
@@ -729,10 +736,9 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
- mutex_lock(&cfg_lock);
+ spin_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
- cfg_busy = 1;
- mutex_unlock(&cfg_lock);
+ spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
@@ -746,15 +752,27 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
- mutex_lock(&cfg_lock);
+ spin_lock(&cfg_lock);
if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
- mutex_unlock(&cfg_lock);
+ spin_unlock(&cfg_lock);
+}
+
+static bool cfg_idle(void)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
+
+ return t == cfg_none;
}
static int cfg_wait_idle(void)
{
- if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+ if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
return -ERESTARTSYS;
return 0;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 4feb27215ab6..ed92fb09fc8e 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -468,6 +468,8 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
+ if (msg == NULL)
+ return -EINVAL;
status = __ap_recv(qid, psmid, msg, length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -617,6 +619,8 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
{
struct ap_queue_status status;
+ if (!ap_dev->reply)
+ return AP_WAIT_NONE;
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -638,6 +642,31 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
}
/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP device
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @ap_dev: pointer to the AP device
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
+{
+ struct ap_queue_status status;
+
+ if (!ap_dev->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(ap_dev);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (ap_dev->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fall through */
+ default:
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
* ap_sm_write(): Send messages from the request queue to an AP device.
* @ap_dev: pointer to the AP device
*
@@ -738,7 +767,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
struct ap_queue_status status;
unsigned long info;
- if (ap_dev->queue_count > 0)
+ if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
@@ -778,7 +807,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
struct ap_queue_status status;
unsigned long info;
- if (ap_dev->queue_count > 0)
+ if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
@@ -834,7 +863,7 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_read,
+ [AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_BORKED] = {
@@ -1335,6 +1364,17 @@ static struct bus_type ap_bus_type = {
.resume = ap_dev_resume,
};
+void ap_device_init_reply(struct ap_device *ap_dev,
+ struct ap_message *reply)
+{
+ ap_dev->reply = reply;
+
+ spin_lock_bh(&ap_dev->lock);
+ ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
+ spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_device_init_reply);
+
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1779,7 +1819,8 @@ int __init ap_module_init(void)
if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
pr_warn("%d is not a valid cryptographic domain\n",
ap_domain_index);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out_free;
}
/* In resume callback we need to know if the user had set the domain.
* If so, we can not just reset it.
@@ -1852,6 +1893,7 @@ out:
unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
+out_free:
kfree(ap_configuration);
return rc;
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6adcbdf225d1..d7fdf5c024d7 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -262,6 +262,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_device *ap_dev);
void ap_bus_force_rescan(void);
+void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 1e849d6e1dfe..15104aaa075a 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -126,7 +126,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
MSGTYPE50_VARIANT_DEFAULT);
zdev->ap_dev = ap_dev;
zdev->online = 1;
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index bb3908818505..ccb2e78ebf0e 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -147,7 +147,7 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
return -ENODEV;
zdev->ap_dev = ap_dev;
zdev->online = 1;
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index f41852768953..df8f0c4dacb7 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -327,7 +327,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
else
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 1918f5483b23..7d1b4317eccc 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -838,6 +838,23 @@ config SCSI_IBMVSCSI
To compile this driver as a module, choose M here: the
module will be called ibmvscsi.
+config SCSI_IBMVSCSIS
+ tristate "IBM Virtual SCSI Server support"
+ depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI
+ help
+ This is the IBM POWER Virtual SCSI Target Server
+ This driver uses the SRP protocol for communication betwen servers
+ guest and/or the host that run on the same server.
+ More information on VSCSI protocol can be found at www.power.org
+
+ The userspace configuration needed to initialize the driver can be
+ be found here:
+
+ https://github.com/powervm/ibmvscsis/wiki/Configuration
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvscsis.
+
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 862ab4efad61..d5397987e731 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -128,6 +128,7 @@ obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi_tgt/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 860008d42466..661bb94e2548 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -778,7 +778,7 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
{
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct sisl_global_map __iomem *global = &afu->afu_map->global;
+ struct sisl_global_map __iomem *global;
struct dev_dependent_vals *ddv;
u64 reg, status;
int i, retry_cnt = 0;
@@ -787,6 +787,14 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
return;
+ if (!afu || !afu->afu_map) {
+ dev_dbg(dev, "%s: The problem state area is not mapped\n",
+ __func__);
+ return;
+ }
+
+ global = &afu->afu_map->global;
+
/* Notify AFU */
for (i = 0; i < NUM_FC_PORTS; i++) {
reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index f54bbd5a6062..e43545c86bcf 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -102,8 +102,4 @@ struct asyc_intr_info {
#define SCAN_HOST 0x04
};
-#ifndef CONFIG_CXL_EEH
-#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
-#endif
-
#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c8a4305c7662..9bd41a35a78a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -92,6 +92,8 @@ static struct fcoe_interface
static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
+static int fcoe_fip_vlan_recv(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
static void fcoe_update_src_mac(struct fc_lport *, u8 *);
@@ -363,6 +365,12 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
fcoe->fip_packet_type.dev = netdev;
dev_add_pack(&fcoe->fip_packet_type);
+ if (netdev != real_dev) {
+ fcoe->fip_vlan_packet_type.func = fcoe_fip_vlan_recv;
+ fcoe->fip_vlan_packet_type.type = htons(ETH_P_FIP);
+ fcoe->fip_vlan_packet_type.dev = real_dev;
+ dev_add_pack(&fcoe->fip_vlan_packet_type);
+ }
return 0;
}
@@ -450,6 +458,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
*/
__dev_remove_pack(&fcoe->fcoe_packet_type);
__dev_remove_pack(&fcoe->fip_packet_type);
+ if (netdev != fcoe->realdev)
+ __dev_remove_pack(&fcoe->fip_vlan_packet_type);
synchronize_net();
/* Delete secondary MAC addresses */
@@ -520,6 +530,29 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
}
/**
+ * fcoe_fip_vlan_recv() - Handler for received FIP VLAN discovery frames
+ * @skb: The receive skb
+ * @netdev: The associated net device
+ * @ptype: The packet_type structure which was used to register this handler
+ * @orig_dev: The original net_device the the skb was received on.
+ * (in case dev is a bond)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_fip_vlan_recv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
+
+ fcoe = container_of(ptype, struct fcoe_interface, fip_vlan_packet_type);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
+ return 0;
+}
+
+/**
* fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
* @port: The FCoE port
* @skb: The FIP/FCoE packet to be sent
@@ -539,7 +572,21 @@ static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
*/
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
- skb->dev = fcoe_from_ctlr(fip)->netdev;
+ struct fcoe_interface *fcoe = fcoe_from_ctlr(fip);
+ struct fip_frame {
+ struct ethhdr eth;
+ struct fip_header fip;
+ } __packed *frame;
+
+ /*
+ * Use default VLAN for FIP VLAN discovery protocol
+ */
+ frame = (struct fip_frame *)skb->data;
+ if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ fcoe->realdev != fcoe->netdev)
+ skb->dev = fcoe->realdev;
+ else
+ skb->dev = fcoe->netdev;
fcoe_port_send(lport_priv(fip->lp), skb);
}
@@ -2448,7 +2495,7 @@ static int __init fcoe_init(void)
if (rc) {
printk(KERN_ERR "failed to register an fcoe transport, check "
"if libfcoe is loaded\n");
- return rc;
+ goto out_destroy;
}
mutex_lock(&fcoe_config_mutex);
@@ -2471,6 +2518,7 @@ static int __init fcoe_init(void)
out_free:
mutex_unlock(&fcoe_config_mutex);
+out_destroy:
destroy_workqueue(fcoe_wq);
return rc;
}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 2b53672bf932..6aa4820f6cc0 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -80,6 +80,7 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
+ struct packet_type fip_vlan_packet_type;
struct fc_exch_mgr *oem;
u8 removed;
u8 priority;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 8fae03215a85..5c70a52ad346 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/types.h>
-#include "viosrp.h"
+#include <scsi/viosrp.h>
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.11"
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 1067367395cd..e0f6c3aeb4ee 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -33,7 +33,7 @@
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
-#include "viosrp.h"
+#include <scsi/viosrp.h>
struct scsi_cmnd;
struct Scsi_Host;
diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile
new file mode 100644
index 000000000000..0c060ce64cb0
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o
+
+ibmvscsis-y := libsrp.o ibmvscsi_tgt.o
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
new file mode 100644
index 000000000000..b29fef9d0f27
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -0,0 +1,4087 @@
+/*******************************************************************************
+ * IBM Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ * Santiago Leon (santil@us.ibm.com) IBM Corp.
+ * Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+ * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/string.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <asm/hvcall.h>
+#include <asm/vio.h>
+
+#include <scsi/viosrp.h>
+
+#include "ibmvscsi_tgt.h"
+
+#define IBMVSCSIS_VERSION "v0.2"
+
+#define INITIAL_SRP_LIMIT 800
+#define DEFAULT_MAX_SECTORS 256
+
+static uint max_vdma_size = MAX_H_COPY_RDMA;
+
+static char system_id[SYS_ID_NAME_LEN] = "";
+static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
+static uint partition_number = -1;
+
+/* Adapter list and lock to control it */
+static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
+static LIST_HEAD(ibmvscsis_dev_list);
+
+static long ibmvscsis_parse_command(struct scsi_info *vscsi,
+ struct viosrp_crq *crq);
+
+static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
+
+static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
+ struct srp_rsp *rsp)
+{
+ u32 residual_count = se_cmd->residual_count;
+
+ if (!residual_count)
+ return;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ rsp->data_in_res_cnt = cpu_to_be32(residual_count);
+ }
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ rsp->flags = SRP_RSP_FLAG_DOOVER;
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ rsp->flags = SRP_RSP_FLAG_DIOVER;
+ rsp->data_in_res_cnt = cpu_to_be32(residual_count);
+ }
+ }
+}
+
+/**
+ * connection_broken() - Determine if the connection to the client is good
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function attempts to send a ping MAD to the client. If the call to
+ * queue the request returns H_CLOSED then the connection has been broken
+ * and the function returns TRUE.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt or Process environment
+ */
+static bool connection_broken(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long h_return_code;
+ bool rc = false;
+
+ /* create a PING crq */
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = MESSAGE_IN_CRQ;
+ crq->status = PING;
+
+ h_return_code = h_send_crq(vscsi->dds.unit_id,
+ cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ pr_debug("connection_broken: rc %ld\n", h_return_code);
+
+ if (h_return_code == H_CLOSED)
+ rc = true;
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q then frees the interrupt bit etc.
+ * It must release the lock before doing so because of the time it can take
+ * for h_free_crq in PHYP
+ * NOTE: the caller must make sure that state and or flags will prevent
+ * interrupt handler from scheduling work.
+ * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
+ * we can't do it here, because we don't have the lock
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level
+ */
+static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
+{
+ long qrc;
+ long rc = ADAPT_SUCCESS;
+ int ticks = 0;
+
+ do {
+ qrc = h_free_crq(vscsi->dds.unit_id);
+ switch (qrc) {
+ case H_SUCCESS:
+ break;
+
+ case H_HARDWARE:
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
+ qrc);
+ rc = ERROR;
+ break;
+
+ case H_BUSY:
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ /* msleep not good for small values */
+ usleep_range(1000, 2000);
+ ticks += 1;
+ break;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ usleep_range(10000, 20000);
+ ticks += 10;
+ break;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ msleep(100);
+ ticks += 100;
+ break;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ ssleep(1);
+ ticks += 1000;
+ break;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ ssleep(10);
+ ticks += 10000;
+ break;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ ssleep(100);
+ ticks += 100000;
+ break;
+ default:
+ dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
+ qrc);
+ rc = ERROR;
+ break;
+ }
+
+ /*
+ * dont wait more then 300 seconds
+ * ticks are in milliseconds more or less
+ */
+ if (ticks > 300000 && qrc != H_SUCCESS) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
+ }
+ } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
+
+ pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
+ * @vscsi: Pointer to our adapter structure
+ * @client_closed: True if client closed its queue
+ *
+ * Deletes information specific to the client when the client goes away
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt or Process
+ */
+static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
+ bool client_closed)
+{
+ vscsi->client_cap = 0;
+
+ /*
+ * Some things we don't want to clear if we're closing the queue,
+ * because some clients don't resend the host handshake when they
+ * get a transport event.
+ */
+ if (client_closed)
+ vscsi->client_data.os_type = 0;
+}
+
+/**
+ * ibmvscsis_free_command_q() - Free Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls unregister_command_q, then clears interrupts and
+ * any pending interrupt acknowledgments associated with the command q.
+ * It also clears memory if there is no error.
+ *
+ * PHYP did not meet the PAPR architecture so that we must give up the
+ * lock. This causes a timing hole regarding state change. To close the
+ * hole this routine does accounting on any change that occurred during
+ * the time the lock is not held.
+ * NOTE: must give up and then acquire the interrupt lock, the caller must
+ * make sure that state and or flags will prevent interrupt handler from
+ * scheduling work.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level, interrupt lock is held
+ */
+static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
+{
+ int bytes;
+ u32 flags_under_lock;
+ u16 state_under_lock;
+ long rc = ADAPT_SUCCESS;
+
+ if (!(vscsi->flags & CRQ_CLOSED)) {
+ vio_disable_interrupts(vscsi->dma_dev);
+
+ state_under_lock = vscsi->new_state;
+ flags_under_lock = vscsi->flags;
+ vscsi->phyp_acr_state = 0;
+ vscsi->phyp_acr_flags = 0;
+
+ spin_unlock_bh(&vscsi->intr_lock);
+ rc = ibmvscsis_unregister_command_q(vscsi);
+ spin_lock_bh(&vscsi->intr_lock);
+
+ if (state_under_lock != vscsi->new_state)
+ vscsi->phyp_acr_state = vscsi->new_state;
+
+ vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
+
+ if (rc == ADAPT_SUCCESS) {
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ memset(vscsi->cmd_q.base_addr, 0, bytes);
+ vscsi->cmd_q.index = 0;
+ vscsi->flags |= CRQ_CLOSED;
+
+ ibmvscsis_delete_client_info(vscsi, false);
+ }
+
+ pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
+ }
+ return rc;
+}
+
+/**
+ * ibmvscsis_cmd_q_dequeue() - Get valid Command element
+ * @mask: Mask to use in case index wraps
+ * @current_index: Current index into command queue
+ * @base_addr: Pointer to start of command queue
+ *
+ * Returns a pointer to a valid command element or NULL, if the command
+ * queue is empty
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt environment, interrupt lock held
+ */
+static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
+ uint *current_index,
+ struct viosrp_crq *base_addr)
+{
+ struct viosrp_crq *ptr;
+
+ ptr = base_addr + *current_index;
+
+ if (ptr->valid) {
+ *current_index = (*current_index + 1) & mask;
+ dma_rmb();
+ } else {
+ ptr = NULL;
+ }
+
+ return ptr;
+}
+
+/**
+ * ibmvscsis_send_init_message() - send initialize message to the client
+ * @vscsi: Pointer to our adapter structure
+ * @format: Which Init Message format to send
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt environment interrupt lock held
+ */
+static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long rc;
+
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_INIT_MSG;
+ crq->format = format;
+ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_check_init_msg() - Check init message valid
+ * @vscsi: Pointer to our adapter structure
+ * @format: Pointer to return format of Init Message, if any.
+ * Set to UNUSED_FORMAT if no Init Message in queue.
+ *
+ * Checks if an initialize message was queued by the initiatior
+ * after the queue was created and before the interrupt was enabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only, interrupt lock held
+ */
+static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
+{
+ struct viosrp_crq *crq;
+ long rc = ADAPT_SUCCESS;
+
+ crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
+ vscsi->cmd_q.base_addr);
+ if (!crq) {
+ *format = (uint)UNUSED_FORMAT;
+ } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
+ *format = (uint)INIT_MSG;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+
+ /*
+ * the caller has ensured no initialize message was
+ * sent after the queue was
+ * created so there should be no other message on the queue.
+ */
+ crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
+ &vscsi->cmd_q.index,
+ vscsi->cmd_q.base_addr);
+ if (crq) {
+ *format = (uint)(crq->format);
+ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+ } else {
+ *format = (uint)(crq->format);
+ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: New state being established after resetting the queue
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: New state to establish after resetting the queue
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = new_state;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = new_state;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi,
+ new_state);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
+ * ibmvscsis_disconnect() - Helper function to disconnect
+ * @work: Pointer to work_struct, gives access to our adapter structure
+ *
+ * An error has occurred or the driver received a Transport event,
+ * and the driver is requesting that the command queue be de-registered
+ * in a safe manner. If there is no outstanding I/O then we can stop the
+ * queue. If we are restarting the queue it will be reflected in the
+ * the state of the adapter.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment
+ */
+static void ibmvscsis_disconnect(struct work_struct *work)
+{
+ struct scsi_info *vscsi = container_of(work, struct scsi_info,
+ proc_work);
+ u16 new_state;
+ bool wait_idle = false;
+ long rc = ADAPT_SUCCESS;
+
+ spin_lock_bh(&vscsi->intr_lock);
+ new_state = vscsi->new_state;
+ vscsi->new_state = 0;
+
+ pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
+ vscsi->state);
+
+ /*
+ * check which state we are in and see if we
+ * should transitition to the new state
+ */
+ switch (vscsi->state) {
+ /* Should never be called while in this state. */
+ case NO_QUEUE:
+ /*
+ * Can never transition from this state;
+ * igonore errors and logout.
+ */
+ case UNCONFIGURING:
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case ERR_DISCONNECT:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+
+ /*
+ * Can transition from this state to to unconfiguring
+ * or err disconnect.
+ */
+ case ERR_DISCONNECT_RECONNECT:
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ vscsi->state = new_state;
+ break;
+
+ case WAIT_IDLE:
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case ERR_DISCONNECTED:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+
+ /*
+ * If this is a transition into an error state.
+ * a client is attempting to establish a connection
+ * and has violated the RPA protocol.
+ * There can be nothing pending on the adapter although
+ * there can be requests in the command queue.
+ */
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ switch (new_state) {
+ case ERR_DISCONNECT:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
+ case ERR_DISCONNECT_RECONNECT:
+ ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ break;
+
+ /* should never happen */
+ case WAIT_IDLE:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
+ vscsi->state);
+ break;
+ }
+ break;
+
+ case WAIT_IDLE:
+ switch (new_state) {
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ vscsi->state = new_state;
+ break;
+ }
+ break;
+
+ /*
+ * Initiator has not done a successful srp login
+ * or has done a successful srp logout ( adapter was not
+ * busy). In the first case there can be responses queued
+ * waiting for space on the initiators response queue (MAD)
+ * The second case the adapter is idle. Assume the worse case,
+ * i.e. the second case.
+ */
+ case WAIT_CONNECTION:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ wait_idle = true;
+ vscsi->state = new_state;
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case UNDEFINED:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+ default:
+ break;
+ }
+
+ if (wait_idle) {
+ pr_debug("disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
+ if (!list_empty(&vscsi->active_q) ||
+ !list_empty(&vscsi->schedule_q)) {
+ vscsi->flags |= WAIT_FOR_IDLE;
+ pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ /*
+ * This routine is can not be called with the interrupt
+ * lock held.
+ */
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->wait_idle);
+ spin_lock_bh(&vscsi->intr_lock);
+ }
+ pr_debug("disconnect stop wait\n");
+
+ ibmvscsis_adapter_idle(vscsi);
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+/**
+ * ibmvscsis_post_disconnect() - Schedule the disconnect
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: State to move to after disconnecting
+ * @flag_bits: Flags to turn on in adapter structure
+ *
+ * If it's already been scheduled, then see if we need to "upgrade"
+ * the new state (if the one passed in is more "severe" than the
+ * previous one).
+ *
+ * PRECONDITION:
+ * interrupt lock is held
+ */
+static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
+ uint flag_bits)
+{
+ uint state;
+
+ /* check the validity of the new state */
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
+ new_state);
+ return;
+ }
+
+ vscsi->flags |= flag_bits;
+
+ pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
+
+ if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
+ vscsi->flags |= SCHEDULE_DISCONNECT;
+ vscsi->new_state = new_state;
+
+ INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
+ (void)queue_work(vscsi->work_q, &vscsi->proc_work);
+ } else {
+ if (vscsi->new_state)
+ state = vscsi->new_state;
+ else
+ state = vscsi->state;
+
+ switch (state) {
+ case NO_QUEUE:
+ case UNCONFIGURING:
+ break;
+
+ case ERR_DISCONNECTED:
+ case ERR_DISCONNECT:
+ case UNDEFINED:
+ if (new_state == UNCONFIGURING)
+ vscsi->new_state = new_state;
+ break;
+
+ case ERR_DISCONNECT_RECONNECT:
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ vscsi->new_state = new_state;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ case WAIT_IDLE:
+ case WAIT_CONNECTION:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ vscsi->new_state = new_state;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
+}
+
+/**
+ * ibmvscsis_trans_event() - Handle a Transport Event
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ entry containing the Transport Event
+ *
+ * Do the logic to close the I_T nexus. This function may not
+ * behave to specification.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_trans_event(struct scsi_info *vscsi,
+ struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
+
+ switch (crq->format) {
+ case MIGRATED:
+ case PARTNER_FAILED:
+ case PARTNER_DEREGISTER:
+ ibmvscsis_delete_client_info(vscsi, true);
+ break;
+
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
+ RESPONSE_Q_DOWN);
+ break;
+ }
+
+ if (rc == ADAPT_SUCCESS) {
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECTED:
+ case UNDEFINED:
+ break;
+
+ case UNCONFIGURING:
+ vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
+ break;
+
+ case WAIT_ENABLED:
+ break;
+
+ case WAIT_CONNECTION:
+ break;
+
+ case CONNECTED:
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
+ (RESPONSE_Q_DOWN |
+ TRANS_EVENT));
+ break;
+
+ case PART_UP_WAIT_ENAB:
+ vscsi->state = WAIT_ENABLED;
+ break;
+
+ case SRP_PROCESSING:
+ if ((vscsi->debit > 0) ||
+ !list_empty(&vscsi->schedule_q) ||
+ !list_empty(&vscsi->waiting_rsp) ||
+ !list_empty(&vscsi->active_q)) {
+ pr_debug("debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ pr_warn("connection lost with outstanding work\n");
+ } else {
+ pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ }
+
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
+ (RESPONSE_Q_DOWN |
+ TRANS_EVENT));
+ break;
+
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+ vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
+ break;
+ }
+ }
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+
+ pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_poll_cmd_q() - Poll Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Called to handle command elements that may have arrived while
+ * interrupts were disabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * intr_lock must be held
+ */
+static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ long rc;
+ bool ack = true;
+ volatile u8 valid;
+
+ pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+
+ while (valid) {
+poll_work:
+ vscsi->cmd_q.index =
+ (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
+
+ if (!rc) {
+ rc = ibmvscsis_parse_command(vscsi, crq);
+ } else {
+ if ((uint)crq->valid == VALID_TRANS_EVENT) {
+ /*
+ * must service the transport layer events even
+ * in an error state, dont break out until all
+ * the consecutive transport events have been
+ * processed
+ */
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ } else if (vscsi->flags & TRANS_EVENT) {
+ /*
+ * if a tranport event has occurred leave
+ * everything but transport events on the queue
+ */
+ pr_debug("poll_cmd_q, ignoring\n");
+
+ /*
+ * need to decrement the queue index so we can
+ * look at the elment again
+ */
+ if (vscsi->cmd_q.index)
+ vscsi->cmd_q.index -= 1;
+ else
+ /*
+ * index is at 0 it just wrapped.
+ * have it index last element in q
+ */
+ vscsi->cmd_q.index = vscsi->cmd_q.mask;
+ break;
+ }
+ }
+
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+ }
+
+ if (!rc) {
+ if (ack) {
+ vio_enable_interrupts(vscsi->dma_dev);
+ ack = false;
+ pr_debug("poll_cmd_q, reenabling interrupts\n");
+ }
+ valid = crq->valid;
+ dma_rmb();
+ if (valid)
+ goto poll_work;
+ }
+
+ pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+}
+
+/**
+ * ibmvscsis_free_cmd_qs() - Free elements in queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Free all of the elements on all queues that are waiting for
+ * whatever reason.
+ *
+ * PRECONDITION:
+ * Called with interrupt lock held
+ */
+static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
+{
+ struct ibmvscsis_cmd *cmd, *nxt;
+
+ pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
+
+ list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+}
+
+/**
+ * ibmvscsis_get_free_cmd() - Get free command from list
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
+{
+ struct ibmvscsis_cmd *cmd = NULL;
+ struct iu_entry *iue;
+
+ iue = srp_iu_get(&vscsi->target);
+ if (iue) {
+ cmd = list_first_entry_or_null(&vscsi->free_cmd,
+ struct ibmvscsis_cmd, list);
+ if (cmd) {
+ list_del(&cmd->list);
+ cmd->iue = iue;
+ cmd->type = UNSET_TYPE;
+ memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
+ } else {
+ srp_iu_put(iue);
+ }
+ }
+
+ return cmd;
+}
+
+/**
+ * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function is called when the adapter is idle when the driver
+ * is attempting to clear an error condition.
+ * The adapter is considered busy if any of its cmd queues
+ * are non-empty. This function can be invoked
+ * from the off level disconnect function.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment called with interrupt lock held
+ */
+static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
+{
+ int free_qs = false;
+
+ pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
+ vscsi->state);
+
+ /* Only need to free qs if we're disconnecting from client */
+ if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
+ free_qs = true;
+
+ switch (vscsi->state) {
+ case ERR_DISCONNECT_RECONNECT:
+ ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ break;
+
+ case ERR_DISCONNECT:
+ ibmvscsis_free_command_q(vscsi);
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = ERR_DISCONNECTED;
+ pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ break;
+
+ case WAIT_IDLE:
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ if (vscsi->flags & TRANS_EVENT) {
+ vscsi->state = WAIT_CONNECTION;
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ } else {
+ vscsi->state = CONNECTED;
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ }
+
+ pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ ibmvscsis_poll_cmd_q(vscsi);
+ break;
+
+ case ERR_DISCONNECTED:
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
+ vscsi->state);
+ break;
+ }
+
+ if (free_qs)
+ ibmvscsis_free_cmd_qs(vscsi);
+
+ /*
+ * There is a timing window where we could lose a disconnect request.
+ * The known path to this window occurs during the DISCONNECT_RECONNECT
+ * case above: reset_queue calls free_command_q, which will release the
+ * interrupt lock. During that time, a new post_disconnect call can be
+ * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
+ * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
+ * will only set the new_state. Now free_command_q reacquires the intr
+ * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
+ * FIELDS), and the disconnect is lost. This is particularly bad when
+ * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
+ * forever.
+ * Fix is that free command queue sets acr state and acr flags if there
+ * is a change under the lock
+ * note free command queue writes to this state it clears it
+ * before releasing the lock, different drivers call the free command
+ * queue different times so dont initialize above
+ */
+ if (vscsi->phyp_acr_state != 0) {
+ /*
+ * set any bits in flags that may have been cleared by
+ * a call to free command queue in switch statement
+ * or reset queue
+ */
+ vscsi->flags |= vscsi->phyp_acr_flags;
+ ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
+ vscsi->phyp_acr_state = 0;
+ vscsi->phyp_acr_flags = 0;
+
+ pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
+ }
+
+ pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
+}
+
+/**
+ * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element to use to process the request
+ * @crq: Pointer to CRQ entry containing the request
+ *
+ * Copy the srp information unit from the hosted
+ * partition using remote dma
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ long rc = 0;
+ u16 len;
+
+ len = be16_to_cpu(crq->IU_length);
+ if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
+ dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return SRP_VIOLATION;
+ }
+
+ rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(crq->IU_data_ptr),
+ vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
+
+ switch (rc) {
+ case H_SUCCESS:
+ cmd->init_time = mftb();
+ iue->remote_token = crq->IU_data_ptr;
+ iue->iu_len = len;
+ pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ (RESPONSE_Q_DOWN |
+ CLIENT_FAILED));
+ else
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+
+ dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
+ rc);
+ break;
+ case H_DEST_PARM:
+ case H_SOURCE_PARM:
+ default:
+ dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the Adapter Info MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt adpater lock is held
+ */
+static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
+ struct iu_entry *iue)
+{
+ struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
+ struct mad_adapter_info_data *info;
+ uint flag_bits = 0;
+ dma_addr_t token;
+ long rc;
+
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+
+ if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
+ GFP_KERNEL);
+ if (!info) {
+ dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ iue->target);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ /* Get remote info */
+ rc = h_copy_rdma(be16_to_cpu(mad->common.length),
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer),
+ vscsi->dds.window[LOCAL].liobn, token);
+
+ if (rc != H_SUCCESS) {
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ }
+ pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ goto free_dma;
+ }
+
+ /*
+ * Copy client info, but ignore partition number, which we
+ * already got from phyp - unless we failed to get it from
+ * phyp (e.g. if we're running on a p5 system).
+ */
+ if (vscsi->client_data.partition_number == 0)
+ vscsi->client_data.partition_number =
+ be32_to_cpu(info->partition_number);
+ strncpy(vscsi->client_data.srp_version, info->srp_version,
+ sizeof(vscsi->client_data.srp_version));
+ strncpy(vscsi->client_data.partition_name, info->partition_name,
+ sizeof(vscsi->client_data.partition_name));
+ vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
+ vscsi->client_data.os_type = be32_to_cpu(info->os_type);
+
+ /* Copy our info */
+ strncpy(info->srp_version, SRP_VERSION,
+ sizeof(info->srp_version));
+ strncpy(info->partition_name, vscsi->dds.partition_name,
+ sizeof(info->partition_name));
+ info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
+ info->mad_version = cpu_to_be32(MAD_VERSION_1);
+ info->os_type = cpu_to_be32(LINUX);
+ memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
+ info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+
+ dma_wmb();
+ rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
+ token, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer));
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ default:
+ dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ }
+
+free_dma:
+ dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
+ pr_debug("Leaving adapter_info, rc %ld\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the Capabilities MAD request
+ *
+ * NOTE: if you return an error from this routine you must be
+ * disconnecting or you will cause a hang
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt called with adapter lock held
+ */
+static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
+{
+ struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
+ struct capabilities *cap;
+ struct mad_capability_common *common;
+ dma_addr_t token;
+ u16 olen, len, status, min_len, cap_len;
+ u32 flag;
+ uint flag_bits = 0;
+ long rc = 0;
+
+ olen = be16_to_cpu(mad->common.length);
+ /*
+ * struct capabilities hardcodes a couple capabilities after the
+ * header, but the capabilities can actually be in any order.
+ */
+ min_len = offsetof(struct capabilities, migration);
+ if ((olen < min_len) || (olen > PAGE_SIZE)) {
+ pr_warn("cap_mad: invalid len %d\n", olen);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
+ GFP_KERNEL);
+ if (!cap) {
+ dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ iue->target);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+ rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer),
+ vscsi->dds.window[LOCAL].liobn, token);
+ if (rc == H_SUCCESS) {
+ strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
+ SRP_MAX_LOC_LEN);
+
+ len = olen - min_len;
+ status = VIOSRP_MAD_SUCCESS;
+ common = (struct mad_capability_common *)&cap->migration;
+
+ while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
+ pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
+
+ cap_len = be16_to_cpu(common->length);
+ if (cap_len > len) {
+ dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
+ status = VIOSRP_MAD_FAILED;
+ break;
+ }
+
+ if (cap_len == 0) {
+ dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
+ status = VIOSRP_MAD_FAILED;
+ break;
+ }
+
+ switch (common->cap_type) {
+ default:
+ pr_debug("cap_mad: unsupported capability\n");
+ common->server_support = 0;
+ flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
+ cap->flags &= ~flag;
+ break;
+ }
+
+ len = len - cap_len;
+ common = (struct mad_capability_common *)
+ ((char *)common + cap_len);
+ }
+
+ mad->common.status = cpu_to_be16(status);
+
+ dma_wmb();
+ rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer));
+
+ if (rc != H_SUCCESS) {
+ pr_debug("cap_mad: failed to copy to client, rc %ld\n",
+ rc);
+
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN |
+ CLIENT_FAILED);
+ }
+
+ pr_warn("cap_mad: error copying data to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ }
+ }
+
+ dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
+
+ pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_process_mad() - Service a MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the MAD request
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
+{
+ struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
+ struct viosrp_empty_iu *empty;
+ long rc = ADAPT_SUCCESS;
+
+ switch (be32_to_cpu(mad->type)) {
+ case VIOSRP_EMPTY_IU_TYPE:
+ empty = &vio_iu(iue)->mad.empty_iu;
+ vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
+ vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
+ mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+ break;
+ case VIOSRP_ADAPTER_INFO_TYPE:
+ rc = ibmvscsis_adapter_info(vscsi, iue);
+ break;
+ case VIOSRP_CAPABILITIES_TYPE:
+ rc = ibmvscsis_cap_mad(vscsi, iue);
+ break;
+ case VIOSRP_ENABLE_FAST_FAIL:
+ if (vscsi->state == CONNECTED) {
+ vscsi->fast_fail = true;
+ mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+ } else {
+ pr_warn("fast fail mad sent after login\n");
+ mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ }
+ break;
+ default:
+ mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * srp_snd_msg_failed() - Handle an error when sending a response
+ * @vscsi: Pointer to our adapter structure
+ * @rc: The return code from the h_send_crq command
+ *
+ * Must be called with interrupt lock held.
+ */
+static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
+{
+ ktime_t kt;
+
+ if (rc != H_DROPPED) {
+ ibmvscsis_free_cmd_qs(vscsi);
+
+ if (rc == H_CLOSED)
+ vscsi->flags |= CLIENT_FAILED;
+
+ /* don't flag the same problem multiple times */
+ if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ if (!(vscsi->state & (ERR_DISCONNECT |
+ ERR_DISCONNECT_RECONNECT |
+ ERR_DISCONNECTED | UNDEFINED))) {
+ dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
+ vscsi->state, vscsi->flags, rc);
+ }
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ }
+ return;
+ }
+
+ /*
+ * The response queue is full.
+ * If the server is processing SRP requests, i.e.
+ * the client has successfully done an
+ * SRP_LOGIN, then it will wait forever for room in
+ * the queue. However if the system admin
+ * is attempting to unconfigure the server then one
+ * or more children will be in a state where
+ * they are being removed. So if there is even one
+ * child being removed then the driver assumes
+ * the system admin is attempting to break the
+ * connection with the client and MAX_TIMER_POPS
+ * is honored.
+ */
+ if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
+ (vscsi->state == SRP_PROCESSING)) {
+ pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
+
+ /*
+ * Check if the timer is running; if it
+ * is not then start it up.
+ */
+ if (!vscsi->rsp_q_timer.started) {
+ if (vscsi->rsp_q_timer.timer_pops <
+ MAX_TIMER_POPS) {
+ kt = ktime_set(0, WAIT_NANO_SECONDS);
+ } else {
+ /*
+ * slide the timeslice if the maximum
+ * timer pops have already happened
+ */
+ kt = ktime_set(WAIT_SECONDS, 0);
+ }
+
+ vscsi->rsp_q_timer.started = true;
+ hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
+ HRTIMER_MODE_REL);
+ }
+ } else {
+ /*
+ * TBD: Do we need to worry about this? Need to get
+ * remove working.
+ */
+ /*
+ * waited a long time and it appears the system admin
+ * is bring this driver down
+ */
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_cmd_qs(vscsi);
+ /*
+ * if the driver is already attempting to disconnect
+ * from the client and has already logged an error
+ * trace this event but don't put it in the error log
+ */
+ if (!(vscsi->state & (ERR_DISCONNECT |
+ ERR_DISCONNECT_RECONNECT |
+ ERR_DISCONNECTED | UNDEFINED))) {
+ dev_err(&vscsi->dev, "client crq full too long\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ 0);
+ }
+ }
+}
+
+/**
+ * ibmvscsis_send_messages() - Send a Response
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Send a response, first checking the waiting queue. Responses are
+ * sent in order they are received. If the response cannot be sent,
+ * because the client queue is full, it stays on the waiting queue.
+ *
+ * PRECONDITION:
+ * Called with interrupt lock held
+ */
+static void ibmvscsis_send_messages(struct scsi_info *vscsi)
+{
+ u64 msg_hi = 0;
+ /* note do not attmempt to access the IU_data_ptr with this pointer
+ * it is not valid
+ */
+ struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
+ struct ibmvscsis_cmd *cmd, *nxt;
+ struct iu_entry *iue;
+ long rc = ADAPT_SUCCESS;
+
+ if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
+ list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
+ pr_debug("send_messages cmd %p\n", cmd);
+
+ iue = cmd->iue;
+
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = cmd->rsp.format;
+
+ if (cmd->flags & CMD_FAST_FAIL)
+ crq->status = VIOSRP_ADAPTER_FAIL;
+
+ crq->IU_length = cpu_to_be16(cmd->rsp.len);
+
+ rc = h_send_crq(vscsi->dma_dev->unit_address,
+ be64_to_cpu(msg_hi),
+ be64_to_cpu(cmd->rsp.tag));
+
+ pr_debug("send_messages: tag 0x%llx, rc %ld\n",
+ be64_to_cpu(cmd->rsp.tag), rc);
+
+ /* if all ok free up the command element resources */
+ if (rc == H_SUCCESS) {
+ /* some movement has occurred */
+ vscsi->rsp_q_timer.timer_pops = 0;
+ list_del(&cmd->list);
+
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ } else {
+ srp_snd_msg_failed(vscsi, rc);
+ break;
+ }
+ }
+
+ if (!rc) {
+ /*
+ * The timer could pop with the queue empty. If
+ * this happens, rc will always indicate a
+ * success; clear the pop count.
+ */
+ vscsi->rsp_q_timer.timer_pops = 0;
+ }
+ } else {
+ ibmvscsis_free_cmd_qs(vscsi);
+ }
+}
+
+/* Called with intr lock held */
+static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
+ uint flag_bits = 0;
+ long rc;
+
+ dma_wmb();
+ rc = h_copy_rdma(sizeof(struct mad_common),
+ vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(crq->IU_data_ptr));
+ if (!rc) {
+ cmd->rsp.format = VIOSRP_MAD_FORMAT;
+ cmd->rsp.len = sizeof(struct mad_common);
+ cmd->rsp.tag = mad->tag;
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ } else {
+ pr_debug("Error sending mad response, rc %ld\n", rc);
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ }
+ dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
+ rc);
+
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ }
+}
+
+/**
+ * ibmvscsis_mad() - Service a MAnagement Data gram.
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to the CRQ entry containing the MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt called with adapter lock held
+ */
+static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ struct iu_entry *iue;
+ struct ibmvscsis_cmd *cmd;
+ struct mad_common *mad;
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ /*
+ * We have not exchanged Init Msgs yet, so this MAD was sent
+ * before the last Transport Event; client will not be
+ * expecting a response.
+ */
+ case WAIT_CONNECTION:
+ pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
+ return ADAPT_SUCCESS;
+
+ case SRP_PROCESSING:
+ case CONNECTED:
+ break;
+
+ /*
+ * We should never get here while we're in these states.
+ * Just log an error and get out.
+ */
+ case UNCONFIGURING:
+ case WAIT_IDLE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ default:
+ dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
+ vscsi->state);
+ return ADAPT_SUCCESS;
+ }
+
+ cmd = ibmvscsis_get_free_cmd(vscsi);
+ if (!cmd) {
+ dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
+ vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return ERROR;
+ }
+ iue = cmd->iue;
+ cmd->type = ADAPTER_MAD;
+
+ rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
+ if (!rc) {
+ mad = (struct mad_common *)&vio_iu(iue)->mad;
+
+ pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+
+ if (be16_to_cpu(mad->length) < 0) {
+ dev_err(&vscsi->dev, "mad: length is < 0\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ rc = SRP_VIOLATION;
+ } else {
+ rc = ibmvscsis_process_mad(vscsi, iue);
+ }
+
+ pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
+ rc);
+
+ if (!rc)
+ ibmvscsis_send_mad_resp(vscsi, cmd, crq);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+
+ pr_debug("Leaving mad, rc %ld\n", rc);
+ return rc;
+}
+
+/**
+ * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to the command for the SRP Login request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
+ struct format_code *fmt;
+ uint flag_bits = 0;
+ long rc = ADAPT_SUCCESS;
+
+ memset(rsp, 0, sizeof(struct srp_login_rsp));
+
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
+ rsp->tag = cmd->rsp.tag;
+ rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
+ rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
+ fmt = (struct format_code *)&rsp->buf_fmt;
+ fmt->buffers = SUPPORTED_FORMATS;
+ vscsi->credit = 0;
+
+ cmd->rsp.len = sizeof(struct srp_login_rsp);
+
+ dma_wmb();
+ rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
+ iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to the command for the SRP Login request
+ * @reason: The reason the SRP Login is being rejected, per SRP protocol
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd, u32 reason)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
+ struct format_code *fmt;
+ uint flag_bits = 0;
+ long rc = ADAPT_SUCCESS;
+
+ memset(rej, 0, sizeof(*rej));
+
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->reason = cpu_to_be32(reason);
+ rej->tag = cmd->rsp.tag;
+ fmt = (struct format_code *)&rej->buf_fmt;
+ fmt->buffers = SUPPORTED_FORMATS;
+
+ cmd->rsp.len = sizeof(*rej);
+
+ dma_wmb();
+ rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
+ iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
+{
+ char *name = tport->tport_name;
+ struct ibmvscsis_nexus *nexus;
+ int rc;
+
+ if (tport->ibmv_nexus) {
+ pr_debug("tport->ibmv_nexus already exists\n");
+ return 0;
+ }
+
+ nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!nexus) {
+ pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ return -ENOMEM;
+ }
+
+ nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
+ TARGET_PROT_NORMAL, name, nexus,
+ NULL);
+ if (IS_ERR(nexus->se_sess)) {
+ rc = PTR_ERR(nexus->se_sess);
+ goto transport_init_fail;
+ }
+
+ tport->ibmv_nexus = nexus;
+
+ return 0;
+
+transport_init_fail:
+ kfree(nexus);
+ return rc;
+}
+
+static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
+{
+ struct se_session *se_sess;
+ struct ibmvscsis_nexus *nexus;
+
+ nexus = tport->ibmv_nexus;
+ if (!nexus)
+ return -ENODEV;
+
+ se_sess = nexus->se_sess;
+ if (!se_sess)
+ return -ENODEV;
+
+ /*
+ * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
+ */
+ transport_deregister_session(se_sess);
+ tport->ibmv_nexus = NULL;
+ kfree(nexus);
+
+ return 0;
+}
+
+/**
+ * ibmvscsis_srp_login() - Process an SRP Login Request
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command element to use to process the SRP Login request
+ * @crq: Pointer to CRQ entry containing the SRP Login request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, called with interrupt lock held
+ */
+static long ibmvscsis_srp_login(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
+ struct port_id {
+ __be64 id_extension;
+ __be64 io_guid;
+ } *iport, *tport;
+ struct format_code *fmt;
+ u32 reason = 0x0;
+ long rc = ADAPT_SUCCESS;
+
+ iport = (struct port_id *)req->initiator_port_id;
+ tport = (struct port_id *)req->target_port_id;
+ fmt = (struct format_code *)&req->req_buf_fmt;
+ if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
+ reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
+ else if (be32_to_cpu(req->req_it_iu_len) < 64)
+ reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
+ else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
+ (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
+ reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
+ else if (req->req_flags & SRP_MULTICHAN_MULTI)
+ reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
+ else if (fmt->buffers & (~SUPPORTED_FORMATS))
+ reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
+ else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
+ reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
+
+ if (vscsi->state == SRP_PROCESSING)
+ reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
+
+ rc = ibmvscsis_make_nexus(&vscsi->tport);
+ if (rc)
+ reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
+
+ cmd->rsp.format = VIOSRP_SRP_FORMAT;
+ cmd->rsp.tag = req->tag;
+
+ pr_debug("srp_login: reason 0x%x\n", reason);
+
+ if (reason)
+ rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
+ else
+ rc = ibmvscsis_login_rsp(vscsi, cmd);
+
+ if (!rc) {
+ if (!reason)
+ vscsi->state = SRP_PROCESSING;
+
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+
+ pr_debug("Leaving srp_login, rc %ld\n", rc);
+ return rc;
+}
+
+/**
+ * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command element to use to process the Implicit Logout request
+ * @crq: Pointer to CRQ entry containing the Implicit Logout request
+ *
+ * Do the logic to close the I_T nexus. This function may not
+ * behave to specification.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
+ long rc = ADAPT_SUCCESS;
+
+ if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
+ !list_empty(&vscsi->waiting_rsp)) {
+ dev_err(&vscsi->dev, "i_logout: outstanding work\n");
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ } else {
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.tag = log_out->tag;
+ cmd->rsp.len = sizeof(struct mad_common);
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
+ }
+
+ return rc;
+}
+
+/* Called with intr lock held */
+static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ struct ibmvscsis_cmd *cmd;
+ struct iu_entry *iue;
+ struct srp_cmd *srp;
+ struct srp_tsk_mgmt *tsk;
+ long rc;
+
+ if (vscsi->request_limit - vscsi->debit <= 0) {
+ /* Client has exceeded request limit */
+ dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
+ vscsi->request_limit, vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return;
+ }
+
+ cmd = ibmvscsis_get_free_cmd(vscsi);
+ if (!cmd) {
+ dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
+ vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return;
+ }
+ iue = cmd->iue;
+ srp = &vio_iu(iue)->srp.cmd;
+
+ rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
+ if (rc) {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ return;
+ }
+
+ if (vscsi->state == SRP_PROCESSING) {
+ switch (srp->opcode) {
+ case SRP_LOGIN_REQ:
+ rc = ibmvscsis_srp_login(vscsi, cmd, crq);
+ break;
+
+ case SRP_TSK_MGMT:
+ tsk = &vio_iu(iue)->srp.tsk_mgmt;
+ pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
+ tsk->tag);
+ cmd->rsp.tag = tsk->tag;
+ vscsi->debit += 1;
+ cmd->type = TASK_MANAGEMENT;
+ list_add_tail(&cmd->list, &vscsi->schedule_q);
+ queue_work(vscsi->work_q, &cmd->work);
+ break;
+
+ case SRP_CMD:
+ pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
+ srp->tag);
+ cmd->rsp.tag = srp->tag;
+ vscsi->debit += 1;
+ cmd->type = SCSI_CDB;
+ /*
+ * We want to keep track of work waiting for
+ * the workqueue.
+ */
+ list_add_tail(&cmd->list, &vscsi->schedule_q);
+ queue_work(vscsi->work_q, &cmd->work);
+ break;
+
+ case SRP_I_LOGOUT:
+ rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
+ break;
+
+ case SRP_CRED_RSP:
+ case SRP_AER_RSP:
+ default:
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
+ (uint)srp->opcode);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+ } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
+ rc = ibmvscsis_srp_login(vscsi, cmd, crq);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+}
+
+/**
+ * ibmvscsis_ping_response() - Respond to a ping request
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Let the client know that the server is alive and waiting on
+ * its native I/O stack.
+ * If any type of error occurs from the call to queue a ping
+ * response then the client is either not accepting or receiving
+ * interrupts. Disconnect with an error.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_ping_response(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long rc;
+
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = (u8)MESSAGE_IN_CRQ;
+ crq->status = PING_RESPONSE;
+
+ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_CLOSED:
+ vscsi->flags |= CLIENT_FAILED;
+ case H_DROPPED:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ case H_REMOTE_PARM:
+ dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ default:
+ dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_ENABLED:
+ vscsi->state = PART_UP_WAIT_ENAB;
+ break;
+
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case PART_UP_WAIT_ENAB:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the SRP request
+ *
+ * This function will return success if the command queue element is valid
+ * and the srp iu or MAD request it pointed to was also valid. That does
+ * not mean that an error was not returned to the client.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, intr lock held
+ */
+static long ibmvscsis_parse_command(struct scsi_info *vscsi,
+ struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (crq->valid) {
+ case VALID_CMD_RESP_EL:
+ switch (crq->format) {
+ case OS400_FORMAT:
+ case AIX_FORMAT:
+ case LINUX_FORMAT:
+ case MAD_FORMAT:
+ if (vscsi->flags & PROCESSING_MAD) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "parse_command: already processing mad\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ 0);
+ } else {
+ vscsi->flags |= PROCESSING_MAD;
+ rc = ibmvscsis_mad(vscsi, crq);
+ }
+ break;
+
+ case SRP_FORMAT:
+ ibmvscsis_srp_cmd(vscsi, crq);
+ break;
+
+ case MESSAGE_IN_CRQ:
+ if (crq->status == PING)
+ ibmvscsis_ping_response(vscsi);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+ break;
+
+ case VALID_TRANS_EVENT:
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ break;
+
+ case VALID_INIT_MSG:
+ rc = ibmvscsis_init_msg(vscsi, crq);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
+ (uint)crq->valid);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ /*
+ * Return only what the interrupt handler cares
+ * about. Most errors we keep right on trucking.
+ */
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+
+ return rc;
+}
+
+static int read_dma_window(struct scsi_info *vscsi)
+{
+ struct vio_dev *vdev = vscsi->dma_dev;
+ const __be32 *dma_window;
+ const __be32 *prop;
+
+ /* TODO Using of_parse_dma_window would be better, but it doesn't give
+ * a way to read multiple windows without already knowing the size of
+ * a window or the number of windows.
+ */
+ dma_window = (const __be32 *)vio_get_attribute(vdev,
+ "ibm,my-dma-window",
+ NULL);
+ if (!dma_window) {
+ pr_err("Couldn't find ibm,my-dma-window property\n");
+ return -1;
+ }
+
+ vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
+ dma_window++;
+
+ prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
+ NULL);
+ if (!prop) {
+ pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dma_window++;
+ } else {
+ dma_window += be32_to_cpu(*prop);
+ }
+
+ prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
+ NULL);
+ if (!prop) {
+ pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dma_window++;
+ } else {
+ dma_window += be32_to_cpu(*prop);
+ }
+
+ /* dma_window should point to the second window now */
+ vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
+
+ return 0;
+}
+
+static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
+{
+ struct ibmvscsis_tport *tport = NULL;
+ struct vio_dev *vdev;
+ struct scsi_info *vscsi;
+
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
+ vdev = vscsi->dma_dev;
+ if (!strcmp(dev_name(&vdev->dev), name)) {
+ tport = &vscsi->tport;
+ break;
+ }
+ }
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+
+ return tport;
+}
+
+/**
+ * ibmvscsis_parse_cmd() - Parse SRP Command
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element with SRP command
+ *
+ * Parse the srp command; if it is valid then submit it to tcm.
+ * Note: The return code does not reflect the status of the SCSI CDB.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level
+ */
+static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+ struct ibmvscsis_nexus *nexus;
+ u64 data_len = 0;
+ enum dma_data_direction dir;
+ int attr = 0;
+ int rc = 0;
+
+ nexus = vscsi->tport.ibmv_nexus;
+ /*
+ * additional length in bytes. Note that the SRP spec says that
+ * additional length is in 4-byte words, but technically the
+ * additional length field is only the upper 6 bits of the byte.
+ * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
+ * all reserved fields should be), then interpreting the byte as
+ * an int will yield the length in bytes.
+ */
+ if (srp->add_cdb_len & 0x03) {
+ dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ if (srp_get_desc_table(srp, &dir, &data_len)) {
+ dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
+ srp->tag);
+ goto fail;
+ return;
+ }
+
+ cmd->rsp.sol_not = srp->sol_not;
+
+ switch (srp->task_attr) {
+ case SRP_SIMPLE_TASK:
+ attr = TCM_SIMPLE_TAG;
+ break;
+ case SRP_ORDERED_TASK:
+ attr = TCM_ORDERED_TAG;
+ break;
+ case SRP_HEAD_TASK:
+ attr = TCM_HEAD_TAG;
+ break;
+ case SRP_ACA_TASK:
+ attr = TCM_ACA_TAG;
+ break;
+ default:
+ dev_err(&vscsi->dev, "Invalid task attribute %d\n",
+ srp->task_attr);
+ goto fail;
+ }
+
+ cmd->se_cmd.tag = be64_to_cpu(srp->tag);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ list_add_tail(&cmd->list, &vscsi->active_q);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ srp->lun.scsi_lun[0] &= 0x3f;
+
+ pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
+ &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
+ attr);
+
+ rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+ cmd->sense_buf, scsilun_to_int(&srp->lun),
+ data_len, attr, dir, 0);
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ goto fail;
+ }
+ return;
+
+fail:
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+/**
+ * ibmvscsis_parse_task() - Parse SRP Task Management Request
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element with SRP task management request
+ *
+ * Parse the srp task management request; if it is valid then submit it to tcm.
+ * Note: The return code does not reflect the status of the task management
+ * request.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Processor level
+ */
+static void ibmvscsis_parse_task(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+ int tcm_type;
+ u64 tag_to_abort = 0;
+ int rc = 0;
+ struct ibmvscsis_nexus *nexus;
+
+ nexus = vscsi->tport.ibmv_nexus;
+
+ cmd->rsp.sol_not = srp_tsk->sol_not;
+
+ switch (srp_tsk->tsk_mgmt_func) {
+ case SRP_TSK_ABORT_TASK:
+ tcm_type = TMR_ABORT_TASK;
+ tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
+ break;
+ case SRP_TSK_ABORT_TASK_SET:
+ tcm_type = TMR_ABORT_TASK_SET;
+ break;
+ case SRP_TSK_CLEAR_TASK_SET:
+ tcm_type = TMR_CLEAR_TASK_SET;
+ break;
+ case SRP_TSK_LUN_RESET:
+ tcm_type = TMR_LUN_RESET;
+ break;
+ case SRP_TSK_CLEAR_ACA:
+ tcm_type = TMR_CLEAR_ACA;
+ break;
+ default:
+ dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
+ srp_tsk->tsk_mgmt_func);
+ cmd->se_cmd.se_tmr_req->response =
+ TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ rc = -1;
+ break;
+ }
+
+ if (!rc) {
+ cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ list_add_tail(&cmd->list, &vscsi->active_q);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ srp_tsk->lun.scsi_lun[0] &= 0x3f;
+
+ pr_debug("calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
+ rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
+ scsilun_to_int(&srp_tsk->lun), srp_tsk,
+ tcm_type, GFP_KERNEL, tag_to_abort, 0);
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
+ rc);
+ cmd->se_cmd.se_tmr_req->response =
+ TMR_FUNCTION_REJECTED;
+ }
+ }
+
+ if (rc)
+ transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
+}
+
+static void ibmvscsis_scheduler(struct work_struct *work)
+{
+ struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
+ work);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ /* Remove from schedule_q */
+ list_del(&cmd->list);
+
+ /* Don't submit cmd if we're disconnecting */
+ if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+
+ /* ibmvscsis_disconnect might be waiting for us */
+ if (list_empty(&vscsi->active_q) &&
+ list_empty(&vscsi->schedule_q) &&
+ (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ switch (cmd->type) {
+ case SCSI_CDB:
+ ibmvscsis_parse_cmd(vscsi, cmd);
+ break;
+ case TASK_MANAGEMENT:
+ ibmvscsis_parse_task(vscsi, cmd);
+ break;
+ default:
+ dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
+ cmd->type);
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
+ break;
+ }
+}
+
+static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
+{
+ struct ibmvscsis_cmd *cmd;
+ int i;
+
+ INIT_LIST_HEAD(&vscsi->free_cmd);
+ vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
+ GFP_KERNEL);
+ if (!vscsi->cmd_pool)
+ return -ENOMEM;
+
+ for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
+ i++, cmd++) {
+ cmd->adapter = vscsi;
+ INIT_WORK(&cmd->work, ibmvscsis_scheduler);
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ }
+
+ return 0;
+}
+
+static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
+{
+ kfree(vscsi->cmd_pool);
+ vscsi->cmd_pool = NULL;
+ INIT_LIST_HEAD(&vscsi->free_cmd);
+}
+
+/**
+ * ibmvscsis_service_wait_q() - Service Waiting Queue
+ * @timer: Pointer to timer which has expired
+ *
+ * This routine is called when the timer pops to service the waiting
+ * queue. Elements on the queue have completed, their responses have been
+ * copied to the client, but the client's response queue was full so
+ * the queue message could not be sent. The routine grabs the proper locks
+ * and calls send messages.
+ *
+ * EXECUTION ENVIRONMENT:
+ * called at interrupt level
+ */
+static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
+{
+ struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
+ struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
+ rsp_q_timer);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ p_timer->timer_pops += 1;
+ p_timer->started = false;
+ ibmvscsis_send_messages(vscsi);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ return HRTIMER_NORESTART;
+}
+
+static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
+{
+ struct timer_cb *p_timer;
+
+ p_timer = &vscsi->rsp_q_timer;
+ hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ p_timer->timer.function = ibmvscsis_service_wait_q;
+ p_timer->started = false;
+ p_timer->timer_pops = 0;
+
+ return ADAPT_SUCCESS;
+}
+
+static void ibmvscsis_freetimer(struct scsi_info *vscsi)
+{
+ struct timer_cb *p_timer;
+
+ p_timer = &vscsi->rsp_q_timer;
+
+ (void)hrtimer_cancel(&p_timer->timer);
+
+ p_timer->started = false;
+ p_timer->timer_pops = 0;
+}
+
+static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
+{
+ struct scsi_info *vscsi = data;
+
+ vio_disable_interrupts(vscsi->dma_dev);
+ tasklet_schedule(&vscsi->work_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ibmvscsis_check_q() - Helper function to Check Init Message Valid
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Checks if a initialize message was queued by the initiatior
+ * while the timing window was open. This function is called from
+ * probe after the CRQ is created and interrupts are enabled.
+ * It would only be used by adapters who wait for some event before
+ * completing the init handshake with the client. For ibmvscsi, this
+ * event is waiting for the port to be enabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only, interrupt lock held
+ */
+static long ibmvscsis_check_q(struct scsi_info *vscsi)
+{
+ uint format;
+ long rc;
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc)
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ else if (format == UNUSED_FORMAT)
+ vscsi->state = WAIT_ENABLED;
+ else
+ vscsi->state = PART_UP_WAIT_ENAB;
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_enable_change_state() - Set new state based on enabled status
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function determines our new state now that we are enabled. This
+ * may involve sending an Init Complete message to the client.
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+handle_state_change:
+ switch (vscsi->state) {
+ case WAIT_ENABLED:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ vscsi->state = WAIT_CONNECTION;
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ break;
+
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ break;
+ case PART_UP_WAIT_ENAB:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_DROPPED:
+ case H_CLOSED:
+ vscsi->state = WAIT_ENABLED;
+ goto handle_state_change;
+
+ case H_PARAMETER:
+ break;
+
+ case H_HARDWARE:
+ break;
+
+ default:
+ rc = H_HARDWARE;
+ break;
+ }
+ break;
+
+ case WAIT_CONNECTION:
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ rc = ADAPT_SUCCESS;
+ break;
+ /* should not be able to get here */
+ case UNCONFIGURING:
+ rc = ERROR;
+ vscsi->state = UNDEFINED;
+ break;
+
+ /* driver should never allow this to happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ default:
+ dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
+ vscsi->state);
+ rc = ADAPT_SUCCESS;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_create_command_q() - Create Command Queue
+ * @vscsi: Pointer to our adapter structure
+ * @num_cmds: Currently unused. In the future, may be used to determine
+ * the size of the CRQ.
+ *
+ * Allocates memory for command queue maps remote memory into an ioba
+ * initializes the command response queue
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only
+ */
+static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
+{
+ long rc = 0;
+ int pages;
+ struct vio_dev *vdev = vscsi->dma_dev;
+
+ /* We might support multiple pages in the future, but just 1 for now */
+ pages = 1;
+
+ vscsi->cmd_q.size = pages;
+
+ vscsi->cmd_q.base_addr =
+ (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!vscsi->cmd_q.base_addr)
+ return -ENOMEM;
+
+ vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
+
+ vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
+ vscsi->cmd_q.base_addr,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ return -ENOMEM;
+ }
+
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
+ if (rc) {
+ if (rc == H_CLOSED) {
+ vscsi->state = WAIT_ENABLED;
+ rc = 0;
+ } else {
+ dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ rc = -ENODEV;
+ }
+ } else {
+ vscsi->state = WAIT_ENABLED;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_destroy_command_q - Destroy Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Releases memory for command queue and unmaps mapped remote memory.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only
+ */
+static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
+{
+ dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ vscsi->cmd_q.base_addr = NULL;
+ vscsi->state = NO_QUEUE;
+}
+
+static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+ struct scsi_sense_hdr sshdr;
+ u8 rc = se_cmd->scsi_status;
+
+ if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
+ if (scsi_normalize_sense(se_cmd->sense_buffer,
+ se_cmd->scsi_sense_length, &sshdr))
+ if (sshdr.sense_key == HARDWARE_ERROR &&
+ (se_cmd->residual_count == 0 ||
+ se_cmd->residual_count == se_cmd->data_length)) {
+ rc = NO_SENSE;
+ cmd->flags |= CMD_FAST_FAIL;
+ }
+
+ return rc;
+}
+
+/**
+ * srp_build_response() - Build an SRP response buffer
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command for which to send the response
+ * @len_p: Where to return the length of the IU response sent. This
+ * is needed to construct the CRQ response.
+ *
+ * Build the SRP response buffer and copy it to the client's memory space.
+ */
+static long srp_build_response(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd, uint *len_p)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct srp_rsp *rsp;
+ uint len;
+ u32 rsp_code;
+ char *data;
+ u32 *tsk_status;
+ long rc = ADAPT_SUCCESS;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ rsp = &vio_iu(iue)->srp.rsp;
+ len = sizeof(*rsp);
+ memset(rsp, 0, len);
+ data = rsp->data;
+
+ rsp->opcode = SRP_RSP;
+
+ if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
+ rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
+ else
+ rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+ rsp->tag = cmd->rsp.tag;
+ rsp->flags = 0;
+
+ if (cmd->type == SCSI_CDB) {
+ rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
+ if (rsp->status) {
+ pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
+ (int)rsp->status);
+ ibmvscsis_determine_resid(se_cmd, rsp);
+ if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
+ rsp->sense_data_len =
+ cpu_to_be32(se_cmd->scsi_sense_length);
+ rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ len += se_cmd->scsi_sense_length;
+ memcpy(data, se_cmd->sense_buffer,
+ se_cmd->scsi_sense_length);
+ }
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ } else if (cmd->flags & CMD_FAST_FAIL) {
+ pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ } else {
+ rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
+ SCSOLNT_RESP_SHIFT;
+ }
+ } else {
+ /* this is task management */
+ rsp->status = 0;
+ rsp->resp_data_len = cpu_to_be32(4);
+ rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+
+ switch (se_cmd->se_tmr_req->response) {
+ case TMR_FUNCTION_COMPLETE:
+ case TMR_TASK_DOES_NOT_EXIST:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
+ rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
+ SCSOLNT_RESP_SHIFT;
+ break;
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ case TMR_LUN_DOES_NOT_EXIST:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ break;
+ case TMR_FUNCTION_FAILED:
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ break;
+ }
+
+ tsk_status = (u32 *)data;
+ *tsk_status = cpu_to_be32(rsp_code);
+ data = (char *)(tsk_status + 1);
+ len += 4;
+ }
+
+ dma_wmb();
+ rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->credit = 0;
+ *len_p = len;
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
+
+ dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
+ rc, vscsi->flags, vscsi->state);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
+ rc);
+ break;
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ return rc;
+}
+
+static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
+ int nsg, struct srp_direct_buf *md, int nmd,
+ enum dma_data_direction dir, unsigned int bytes)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_target *target = iue->target;
+ struct scsi_info *vscsi = target->ldata;
+ struct scatterlist *sgp;
+ dma_addr_t client_ioba, server_ioba;
+ ulong buf_len;
+ ulong client_len, server_len;
+ int md_idx;
+ long tx_len;
+ long rc = 0;
+
+ pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
+
+ if (bytes == 0)
+ return 0;
+
+ sgp = sg;
+ client_len = 0;
+ server_len = 0;
+ md_idx = 0;
+ tx_len = bytes;
+
+ do {
+ if (client_len == 0) {
+ if (md_idx >= nmd) {
+ dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
+ rc = -EIO;
+ break;
+ }
+ client_ioba = be64_to_cpu(md[md_idx].va);
+ client_len = be32_to_cpu(md[md_idx].len);
+ }
+ if (server_len == 0) {
+ if (!sgp) {
+ dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
+ rc = -EIO;
+ break;
+ }
+ server_ioba = sg_dma_address(sgp);
+ server_len = sg_dma_len(sgp);
+ }
+
+ buf_len = tx_len;
+
+ if (buf_len > client_len)
+ buf_len = client_len;
+
+ if (buf_len > server_len)
+ buf_len = server_len;
+
+ if (buf_len > max_vdma_size)
+ buf_len = max_vdma_size;
+
+ if (dir == DMA_TO_DEVICE) {
+ /* read from client */
+ rc = h_copy_rdma(buf_len,
+ vscsi->dds.window[REMOTE].liobn,
+ client_ioba,
+ vscsi->dds.window[LOCAL].liobn,
+ server_ioba);
+ } else {
+ /* write to client */
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+
+ if (!READ_CMD(srp->cdb))
+ print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
+ sg_virt(sgp), buf_len);
+ /* The h_copy_rdma will cause phyp, running in another
+ * partition, to read memory, so we need to make sure
+ * the data has been written out, hence these syncs.
+ */
+ /* ensure that everything is in memory */
+ isync();
+ /* ensure that memory has been made visible */
+ dma_wmb();
+ rc = h_copy_rdma(buf_len,
+ vscsi->dds.window[LOCAL].liobn,
+ server_ioba,
+ vscsi->dds.window[REMOTE].liobn,
+ client_ioba);
+ }
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_PERMISSION:
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ if (connection_broken(vscsi)) {
+ spin_lock_bh(&vscsi->intr_lock);
+ vscsi->flags |=
+ (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ spin_unlock_bh(&vscsi->intr_lock);
+ }
+ dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
+ rc);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
+ rc);
+ break;
+ }
+
+ if (!rc) {
+ tx_len -= buf_len;
+ if (tx_len) {
+ client_len -= buf_len;
+ if (client_len == 0)
+ md_idx++;
+ else
+ client_ioba += buf_len;
+
+ server_len -= buf_len;
+ if (server_len == 0)
+ sgp = sg_next(sgp);
+ else
+ server_ioba += buf_len;
+ } else {
+ break;
+ }
+ }
+ } while (!rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_crq() - Handle CRQ
+ * @data: Pointer to our adapter structure
+ *
+ * Read the command elements from the command queue and copy the payloads
+ * associated with the command elements to local memory and execute the
+ * SRP requests.
+ *
+ * Note: this is an edge triggered interrupt. It can not be shared.
+ */
+static void ibmvscsis_handle_crq(unsigned long data)
+{
+ struct scsi_info *vscsi = (struct scsi_info *)data;
+ struct viosrp_crq *crq;
+ long rc;
+ bool ack = true;
+ volatile u8 valid;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ pr_debug("got interrupt\n");
+
+ /*
+ * if we are in a path where we are waiting for all pending commands
+ * to complete because we received a transport event and anything in
+ * the command queue is for a new connection, do nothing
+ */
+ if (TARGET_STOP(vscsi)) {
+ vio_enable_interrupts(vscsi->dma_dev);
+
+ pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+
+ while (valid) {
+ /*
+ * These are edege triggered interrupts. After dropping out of
+ * the while loop, the code must check for work since an
+ * interrupt could be lost, and an elment be left on the queue,
+ * hence the label.
+ */
+cmd_work:
+ vscsi->cmd_q.index =
+ (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
+
+ if (!rc) {
+ rc = ibmvscsis_parse_command(vscsi, crq);
+ } else {
+ if ((uint)crq->valid == VALID_TRANS_EVENT) {
+ /*
+ * must service the transport layer events even
+ * in an error state, dont break out until all
+ * the consecutive transport events have been
+ * processed
+ */
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ } else if (vscsi->flags & TRANS_EVENT) {
+ /*
+ * if a tranport event has occurred leave
+ * everything but transport events on the queue
+ */
+ pr_debug("handle_crq, ignoring\n");
+
+ /*
+ * need to decrement the queue index so we can
+ * look at the elment again
+ */
+ if (vscsi->cmd_q.index)
+ vscsi->cmd_q.index -= 1;
+ else
+ /*
+ * index is at 0 it just wrapped.
+ * have it index last element in q
+ */
+ vscsi->cmd_q.index = vscsi->cmd_q.mask;
+ break;
+ }
+ }
+
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+ }
+
+ if (!rc) {
+ if (ack) {
+ vio_enable_interrupts(vscsi->dma_dev);
+ ack = false;
+ pr_debug("handle_crq, reenabling interrupts\n");
+ }
+ valid = crq->valid;
+ dma_rmb();
+ if (valid)
+ goto cmd_work;
+ } else {
+ pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ }
+
+ pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
+
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+static int ibmvscsis_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct scsi_info *vscsi;
+ int rc = 0;
+ long hrc = 0;
+ char wq_name[24];
+
+ vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
+ if (!vscsi) {
+ rc = -ENOMEM;
+ pr_err("probe: allocation of adapter failed\n");
+ return rc;
+ }
+
+ vscsi->dma_dev = vdev;
+ vscsi->dev = vdev->dev;
+ INIT_LIST_HEAD(&vscsi->schedule_q);
+ INIT_LIST_HEAD(&vscsi->waiting_rsp);
+ INIT_LIST_HEAD(&vscsi->active_q);
+
+ snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+
+ pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+
+ rc = read_dma_window(vscsi);
+ if (rc)
+ goto free_adapter;
+ pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
+
+ strcpy(vscsi->eye, "VSCSI ");
+ strncat(vscsi->eye, vdev->name, MAX_EYE);
+
+ vscsi->dds.unit_id = vdev->unit_address;
+
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+
+ /*
+ * TBD: How do we determine # of cmds to request? Do we know how
+ * many "children" we have?
+ */
+ vscsi->request_limit = INITIAL_SRP_LIMIT;
+ rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
+ SRP_MAX_IU_LEN);
+ if (rc)
+ goto rem_list;
+
+ vscsi->target.ldata = vscsi;
+
+ rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
+ if (rc) {
+ dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
+ rc, vscsi->request_limit);
+ goto free_target;
+ }
+
+ /*
+ * Note: the lock is used in freeing timers, so must initialize
+ * first so that ordering in case of error is correct.
+ */
+ spin_lock_init(&vscsi->intr_lock);
+
+ rc = ibmvscsis_alloctimer(vscsi);
+ if (rc) {
+ dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
+ goto free_cmds;
+ }
+
+ rc = ibmvscsis_create_command_q(vscsi, 256);
+ if (rc) {
+ dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
+ rc);
+ goto free_timer;
+ }
+
+ vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vscsi->map_buf) {
+ rc = -ENOMEM;
+ dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
+ goto destroy_queue;
+ }
+
+ vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
+ dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
+ goto free_buf;
+ }
+
+ hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (hrc == H_SUCCESS)
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ /*
+ * We expect the VIOCTL to fail if we're configured as "any
+ * client can connect" and the client isn't activated yet.
+ * We'll make the call again when he sends an init msg.
+ */
+ pr_debug("probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
+
+ tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
+ (unsigned long)vscsi);
+
+ init_completion(&vscsi->wait_idle);
+
+ snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
+ vscsi->work_q = create_workqueue(wq_name);
+ if (!vscsi->work_q) {
+ rc = -ENOMEM;
+ dev_err(&vscsi->dev, "create_workqueue failed\n");
+ goto unmap_buf;
+ }
+
+ rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
+ if (rc) {
+ rc = -EPERM;
+ dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
+ goto destroy_WQ;
+ }
+
+ spin_lock_bh(&vscsi->intr_lock);
+ vio_enable_interrupts(vdev);
+ if (rc) {
+ dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
+ rc = -ENODEV;
+ spin_unlock_bh(&vscsi->intr_lock);
+ goto free_irq;
+ }
+
+ if (ibmvscsis_check_q(vscsi)) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
+ spin_unlock_bh(&vscsi->intr_lock);
+ goto disable_interrupt;
+ }
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ dev_set_drvdata(&vdev->dev, vscsi);
+
+ return 0;
+
+disable_interrupt:
+ vio_disable_interrupts(vdev);
+free_irq:
+ free_irq(vdev->irq, vscsi);
+destroy_WQ:
+ destroy_workqueue(vscsi->work_q);
+unmap_buf:
+ dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+free_buf:
+ kfree(vscsi->map_buf);
+destroy_queue:
+ tasklet_kill(&vscsi->work_task);
+ ibmvscsis_unregister_command_q(vscsi);
+ ibmvscsis_destroy_command_q(vscsi);
+free_timer:
+ ibmvscsis_freetimer(vscsi);
+free_cmds:
+ ibmvscsis_free_cmds(vscsi);
+free_target:
+ srp_target_free(&vscsi->target);
+rem_list:
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_del(&vscsi->list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+free_adapter:
+ kfree(vscsi);
+
+ return rc;
+}
+
+static int ibmvscsis_remove(struct vio_dev *vdev)
+{
+ struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
+
+ pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+
+ /*
+ * TBD: Need to handle if there are commands on the waiting_rsp q
+ * Actually, can there still be cmds outstanding to tcm?
+ */
+
+ vio_disable_interrupts(vdev);
+ free_irq(vdev->irq, vscsi);
+ destroy_workqueue(vscsi->work_q);
+ dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ kfree(vscsi->map_buf);
+ tasklet_kill(&vscsi->work_task);
+ ibmvscsis_unregister_command_q(vscsi);
+ ibmvscsis_destroy_command_q(vscsi);
+ ibmvscsis_freetimer(vscsi);
+ ibmvscsis_free_cmds(vscsi);
+ srp_target_free(&vscsi->target);
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_del(&vscsi->list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+ kfree(vscsi);
+
+ return 0;
+}
+
+static ssize_t system_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+}
+
+static ssize_t partition_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+}
+
+static ssize_t unit_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
+}
+
+static int ibmvscsis_get_system_info(void)
+{
+ struct device_node *rootdn, *vdevdn;
+ const char *id, *model, *name;
+ const uint *num;
+
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn)
+ return -ENOENT;
+
+ model = of_get_property(rootdn, "model", NULL);
+ id = of_get_property(rootdn, "system-id", NULL);
+ if (model && id)
+ snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
+
+ name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (name)
+ strncpy(partition_name, name, sizeof(partition_name));
+
+ num = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (num)
+ partition_number = *num;
+
+ of_node_put(rootdn);
+
+ vdevdn = of_find_node_by_path("/vdevice");
+ if (vdevdn) {
+ const uint *mvds;
+
+ mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
+ NULL);
+ if (mvds)
+ max_vdma_size = *mvds;
+ of_node_put(vdevdn);
+ }
+
+ return 0;
+}
+
+static char *ibmvscsis_get_fabric_name(void)
+{
+ return "ibmvscsis";
+}
+
+static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
+
+ return tport->tport_name;
+}
+
+static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
+
+ return tport->tport_tpgt;
+}
+
+static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd);
+}
+
+static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ /* Remove from active_q */
+ list_del(&cmd->list);
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct iu_entry *iue = cmd->iue;
+ int rc;
+
+ pr_debug("write_pending, se_cmd %p, length 0x%x\n",
+ se_cmd, se_cmd->data_length);
+
+ rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
+ 1, 1);
+ if (rc) {
+ pr_err("srp_transfer_data() failed: %d\n", rc);
+ return -EAGAIN;
+ }
+ /*
+ * We now tell TCM to add this WRITE CDB directly into the TCM storage
+ * object execution queue.
+ */
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct iu_entry *iue = cmd->iue;
+ struct scsi_info *vscsi = cmd->adapter;
+ char *sd;
+ uint len = 0;
+ int rc;
+
+ pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
+ se_cmd, se_cmd->data_length);
+
+ rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
+ 1);
+ if (rc) {
+ pr_err("srp_transfer_data failed: %d\n", rc);
+ sd = se_cmd->sense_buffer;
+ se_cmd->scsi_sense_length = 18;
+ memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
+ /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
+ scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
+ 0x08, 0x01);
+ }
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+
+ return 0;
+}
+
+static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+ uint len;
+
+ pr_debug("queue_status %p\n", se_cmd);
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+
+ return 0;
+}
+
+static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+ uint len;
+
+ pr_debug("queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+}
+
+static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
+{
+ /* TBD: What (if anything) should we do here? */
+ pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+}
+
+static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct ibmvscsis_tport *tport;
+
+ tport = ibmvscsis_lookup_port(name);
+ if (tport) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SRP;
+ pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
+ return &tport->tport_wwn;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void ibmvscsis_drop_tport(struct se_wwn *wwn)
+{
+ struct ibmvscsis_tport *tport = container_of(wwn,
+ struct ibmvscsis_tport,
+ tport_wwn);
+
+ pr_debug("drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+}
+
+static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(wwn, struct ibmvscsis_tport, tport_wwn);
+ int rc;
+
+ tport->releasing = false;
+
+ rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
+ tport->tport_proto_id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return &tport->se_tpg;
+}
+
+static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+
+ tport->releasing = true;
+ tport->enabled = false;
+
+ /*
+ * Release the virtual I_T Nexus for this ibmvscsis TPG
+ */
+ ibmvscsis_drop_nexus(tport);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+}
+
+static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
+}
+CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
+
+static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
+ &ibmvscsis_wwn_attr_version,
+ NULL,
+};
+
+static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
+}
+
+static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
+ unsigned long tmp;
+ int rc;
+ long lrc;
+
+ rc = kstrtoul(page, 0, &tmp);
+ if (rc < 0) {
+ pr_err("Unable to extract srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ pr_err("Illegal value for srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if (tmp) {
+ tport->enabled = true;
+ spin_lock_bh(&vscsi->intr_lock);
+ lrc = ibmvscsis_enable_change_state(vscsi);
+ if (lrc)
+ pr_err("enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
+ spin_unlock_bh(&vscsi->intr_lock);
+ } else {
+ tport->enabled = false;
+ }
+
+ pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+
+ return count;
+}
+CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
+
+static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
+ &ibmvscsis_tpg_attr_enable,
+ NULL,
+};
+
+static const struct target_core_fabric_ops ibmvscsis_ops = {
+ .module = THIS_MODULE,
+ .name = "ibmvscsis",
+ .get_fabric_name = ibmvscsis_get_fabric_name,
+ .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
+ .tpg_get_tag = ibmvscsis_get_tag,
+ .tpg_get_default_depth = ibmvscsis_get_default_depth,
+ .tpg_check_demo_mode = ibmvscsis_check_true,
+ .tpg_check_demo_mode_cache = ibmvscsis_check_true,
+ .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
+ .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
+ .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
+ .check_stop_free = ibmvscsis_check_stop_free,
+ .release_cmd = ibmvscsis_release_cmd,
+ .sess_get_index = ibmvscsis_sess_get_index,
+ .write_pending = ibmvscsis_write_pending,
+ .write_pending_status = ibmvscsis_write_pending_status,
+ .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
+ .get_cmd_state = ibmvscsis_get_cmd_state,
+ .queue_data_in = ibmvscsis_queue_data_in,
+ .queue_status = ibmvscsis_queue_status,
+ .queue_tm_rsp = ibmvscsis_queue_tm_rsp,
+ .aborted_task = ibmvscsis_aborted_task,
+ /*
+ * Setup function pointers for logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = ibmvscsis_make_tport,
+ .fabric_drop_wwn = ibmvscsis_drop_tport,
+ .fabric_make_tpg = ibmvscsis_make_tpg,
+ .fabric_drop_tpg = ibmvscsis_drop_tpg,
+
+ .tfc_wwn_attrs = ibmvscsis_wwn_attrs,
+ .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
+};
+
+static void ibmvscsis_dev_release(struct device *dev) {};
+
+static struct class_attribute ibmvscsis_class_attrs[] = {
+ __ATTR_NULL,
+};
+
+static struct device_attribute dev_attr_system_id =
+ __ATTR(system_id, S_IRUGO, system_id_show, NULL);
+
+static struct device_attribute dev_attr_partition_number =
+ __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
+
+static struct device_attribute dev_attr_unit_address =
+ __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
+
+static struct attribute *ibmvscsis_dev_attrs[] = {
+ &dev_attr_system_id.attr,
+ &dev_attr_partition_number.attr,
+ &dev_attr_unit_address.attr,
+};
+ATTRIBUTE_GROUPS(ibmvscsis_dev);
+
+static struct class ibmvscsis_class = {
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
+};
+
+static struct vio_device_id ibmvscsis_device_table[] = {
+ { "v-scsi-host", "IBM,v-scsi-host" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
+
+static struct vio_driver ibmvscsis_driver = {
+ .name = "ibmvscsis",
+ .id_table = ibmvscsis_device_table,
+ .probe = ibmvscsis_probe,
+ .remove = ibmvscsis_remove,
+};
+
+/*
+ * ibmvscsis_init() - Kernel Module initialization
+ *
+ * Note: vio_register_driver() registers callback functions, and at least one
+ * of those callback functions calls TCM - Linux IO Target Subsystem, thus
+ * the SCSI Target template must be registered before vio_register_driver()
+ * is called.
+ */
+static int __init ibmvscsis_init(void)
+{
+ int rc = 0;
+
+ rc = ibmvscsis_get_system_info();
+ if (rc) {
+ pr_err("rc %d from get_system_info\n", rc);
+ goto out;
+ }
+
+ rc = class_register(&ibmvscsis_class);
+ if (rc) {
+ pr_err("failed class register\n");
+ goto out;
+ }
+
+ rc = target_register_template(&ibmvscsis_ops);
+ if (rc) {
+ pr_err("rc %d from target_register_template\n", rc);
+ goto unregister_class;
+ }
+
+ rc = vio_register_driver(&ibmvscsis_driver);
+ if (rc) {
+ pr_err("rc %d from vio_register_driver\n", rc);
+ goto unregister_target;
+ }
+
+ return 0;
+
+unregister_target:
+ target_unregister_template(&ibmvscsis_ops);
+unregister_class:
+ class_unregister(&ibmvscsis_class);
+out:
+ return rc;
+}
+
+static void __exit ibmvscsis_exit(void)
+{
+ pr_info("Unregister IBM virtual SCSI host driver\n");
+ vio_unregister_driver(&ibmvscsis_driver);
+ target_unregister_template(&ibmvscsis_ops);
+ class_unregister(&ibmvscsis_class);
+}
+
+MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
+MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVSCSIS_VERSION);
+module_init(ibmvscsis_init);
+module_exit(ibmvscsis_exit);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
new file mode 100644
index 000000000000..981a0c992b6c
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -0,0 +1,346 @@
+/*******************************************************************************
+ * IBM Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ * Santiago Leon (santil@us.ibm.com) IBM Corp.
+ * Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
+ * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
+ *
+ * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+ * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#ifndef __H_IBMVSCSI_TGT
+#define __H_IBMVSCSI_TGT
+
+#include "libsrp.h"
+
+#define SYS_ID_NAME_LEN 64
+#define PARTITION_NAMELEN 96
+#define IBMVSCSIS_NAMELEN 32
+
+#define MSG_HI 0
+#define MSG_LOW 1
+
+#define MAX_CMD_Q_PAGES 4
+#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq))
+/* in terms of number of elements */
+#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE
+#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES)
+
+#define SRP_VIOLATION 0x102 /* general error code */
+
+/*
+ * SRP buffer formats defined as of 16.a supported by this driver.
+ */
+#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \
+ (SRP_DATA_DESC_INDIRECT << 1))
+
+#define SCSI_LUN_ADDR_METHOD_FLAT 1
+
+struct dma_window {
+ u32 liobn; /* Unique per vdevice */
+ u64 tce_base; /* Physical location of the TCE table */
+ u64 tce_size; /* Size of the TCE table in bytes */
+};
+
+struct target_dds {
+ u64 unit_id; /* 64 bit will force alignment */
+#define NUM_DMA_WINDOWS 2
+#define LOCAL 0
+#define REMOTE 1
+ struct dma_window window[NUM_DMA_WINDOWS];
+
+ /* root node property "ibm,partition-no" */
+ uint partition_num;
+ char partition_name[PARTITION_NAMELEN];
+};
+
+#define MAX_NUM_PORTS 1
+#define MAX_H_COPY_RDMA (128 * 1024)
+
+#define MAX_EYE 64
+
+/* Return codes */
+#define ADAPT_SUCCESS 0L
+/* choose error codes that do not conflict with PHYP */
+#define ERROR -40L
+
+struct format_code {
+ u8 reserved;
+ u8 buffers;
+};
+
+struct client_info {
+#define SRP_VERSION "16.a"
+ char srp_version[8];
+ /* root node property ibm,partition-name */
+ char partition_name[PARTITION_NAMELEN];
+ /* root node property ibm,partition-no */
+ u32 partition_number;
+ /* initially 1 */
+ u32 mad_version;
+ u32 os_type;
+};
+
+/*
+ * Changing this constant changes the number of seconds to wait before
+ * considering the client will never service its queue again.
+ */
+#define SECONDS_TO_CONSIDER_FAILED 30
+/*
+ * These constants set the polling period used to determine if the client
+ * has freed at least one element in the response queue.
+ */
+#define WAIT_SECONDS 1
+#define WAIT_NANO_SECONDS 5000
+#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \
+ SECONDS_TO_CONSIDER_FAILED)
+/*
+ * general purpose timer control block
+ * which can be used for multiple functions
+ */
+struct timer_cb {
+ struct hrtimer timer;
+ /*
+ * how long has it been since the client
+ * serviced the queue. The variable is incrmented
+ * in the service_wait_q routine and cleared
+ * in send messages
+ */
+ int timer_pops;
+ /* the timer is started */
+ bool started;
+};
+
+struct cmd_queue {
+ /* kva */
+ struct viosrp_crq *base_addr;
+ dma_addr_t crq_token;
+ /* used to maintain index */
+ uint mask;
+ /* current element */
+ uint index;
+ int size;
+};
+
+#define SCSOLNT_RESP_SHIFT 1
+#define UCSOLNT_RESP_SHIFT 2
+
+#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT)
+#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT)
+
+enum cmd_type {
+ SCSI_CDB = 0x01,
+ TASK_MANAGEMENT = 0x02,
+ /* MAD or addressed to port 0 */
+ ADAPTER_MAD = 0x04,
+ UNSET_TYPE = 0x08,
+};
+
+struct iu_rsp {
+ u8 format;
+ u8 sol_not;
+ u16 len;
+ /* tag is just to help client identify cmd, so don't translate be/le */
+ u64 tag;
+};
+
+struct ibmvscsis_cmd {
+ struct list_head list;
+ /* Used for TCM Core operations */
+ struct se_cmd se_cmd;
+ struct iu_entry *iue;
+ struct iu_rsp rsp;
+ struct work_struct work;
+ struct scsi_info *adapter;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
+ u64 init_time;
+#define CMD_FAST_FAIL BIT(0)
+ u32 flags;
+ char type;
+};
+
+struct ibmvscsis_nexus {
+ struct se_session *se_sess;
+};
+
+struct ibmvscsis_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* ASCII formatted WWPN for SRP Target port */
+ char tport_name[IBMVSCSIS_NAMELEN];
+ /* Returned by ibmvscsis_make_tport() */
+ struct se_wwn tport_wwn;
+ /* Returned by ibmvscsis_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* ibmvscsis port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Pointer to TCM session for I_T Nexus */
+ struct ibmvscsis_nexus *ibmv_nexus;
+ bool enabled;
+ bool releasing;
+};
+
+struct scsi_info {
+ struct list_head list;
+ char eye[MAX_EYE];
+
+ /* commands waiting for space on repsonse queue */
+ struct list_head waiting_rsp;
+#define NO_QUEUE 0x00
+#define WAIT_ENABLED 0X01
+ /* driver has received an initialize command */
+#define PART_UP_WAIT_ENAB 0x02
+#define WAIT_CONNECTION 0x04
+ /* have established a connection */
+#define CONNECTED 0x08
+ /* at least one port is processing SRP IU */
+#define SRP_PROCESSING 0x10
+ /* remove request received */
+#define UNCONFIGURING 0x20
+ /* disconnect by letting adapter go idle, no error */
+#define WAIT_IDLE 0x40
+ /* disconnecting to clear an error */
+#define ERR_DISCONNECT 0x80
+ /* disconnect to clear error state, then come back up */
+#define ERR_DISCONNECT_RECONNECT 0x100
+ /* disconnected after clearing an error */
+#define ERR_DISCONNECTED 0x200
+ /* A series of errors caused unexpected errors */
+#define UNDEFINED 0x400
+ u16 state;
+ int fast_fail;
+ struct target_dds dds;
+ char *cmd_pool;
+ /* list of free commands */
+ struct list_head free_cmd;
+ /* command elements ready for scheduler */
+ struct list_head schedule_q;
+ /* commands sent to TCM */
+ struct list_head active_q;
+ caddr_t *map_buf;
+ /* ioba of map buffer */
+ dma_addr_t map_ioba;
+ /* allowable number of outstanding SRP requests */
+ int request_limit;
+ /* extra credit */
+ int credit;
+ /* outstanding transactions against credit limit */
+ int debit;
+
+ /* allow only one outstanding mad request */
+#define PROCESSING_MAD 0x00002
+ /* Waiting to go idle */
+#define WAIT_FOR_IDLE 0x00004
+ /* H_REG_CRQ called */
+#define CRQ_CLOSED 0x00010
+ /* detected that client has failed */
+#define CLIENT_FAILED 0x00040
+ /* detected that transport event occurred */
+#define TRANS_EVENT 0x00080
+ /* don't attempt to send anything to the client */
+#define RESPONSE_Q_DOWN 0x00100
+ /* request made to schedule disconnect handler */
+#define SCHEDULE_DISCONNECT 0x00400
+ /* disconnect handler is scheduled */
+#define DISCONNECT_SCHEDULED 0x00800
+ u32 flags;
+ /* adapter lock */
+ spinlock_t intr_lock;
+ /* information needed to manage command queue */
+ struct cmd_queue cmd_q;
+ /* used in hcall to copy response back into srp buffer */
+ u64 empty_iu_id;
+ /* used in crq, to tag what iu the response is for */
+ u64 empty_iu_tag;
+ uint new_state;
+ /* control block for the response queue timer */
+ struct timer_cb rsp_q_timer;
+ /* keep last client to enable proper accounting */
+ struct client_info client_data;
+ /* what can this client do */
+ u32 client_cap;
+ /*
+ * The following two fields capture state and flag changes that
+ * can occur when the lock is given up. In the orginal design,
+ * the lock was held during calls into phyp;
+ * however, phyp did not meet PAPR architecture. This is
+ * a work around.
+ */
+ u16 phyp_acr_state;
+ u32 phyp_acr_flags;
+
+ struct workqueue_struct *work_q;
+ struct completion wait_idle;
+ struct device dev;
+ struct vio_dev *dma_dev;
+ struct srp_target target;
+ struct ibmvscsis_tport tport;
+ struct tasklet_struct work_task;
+ struct work_struct proc_work;
+};
+
+/*
+ * Provide a constant that allows software to detect the adapter is
+ * disconnecting from the client from one of several states.
+ */
+#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \
+ ERR_DISCONNECT)
+
+/*
+ * Provide a constant that can be used with interrupt handling that
+ * essentially lets the interrupt handler know that all requests should
+ * be thrown out,
+ */
+#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \
+ ERR_DISCONNECTED | WAIT_IDLE)
+
+/*
+ * If any of these flag bits are set then do not allow the interrupt
+ * handler to schedule the off level handler.
+ */
+#define BLOCK (DISCONNECT_SCHEDULED)
+
+/* State and transition events that stop the interrupt handler */
+#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
+ ((VSCSI)->flags & BLOCK))
+
+/* flag bit that are not reset during disconnect */
+#define PRESERVE_FLAG_FIELDS 0
+
+#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
+
+#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8)
+#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
+
+#ifndef H_GET_PARTNER_INFO
+#define H_GET_PARTNER_INFO 0x0000000000000008LL
+#endif
+
+#define h_copy_rdma(l, sa, sb, da, db) \
+ plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
+#define h_vioctl(u, o, a, u1, u2, u3, u4) \
+ plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2)
+#define h_reg_crq(ua, tok, sz) \
+ plpar_hcall_norets(H_REG_CRQ, ua, tok, sz)
+#define h_free_crq(ua) \
+ plpar_hcall_norets(H_FREE_CRQ, ua)
+#define h_send_crq(ua, d1, d2) \
+ plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2)
+
+#endif
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
new file mode 100644
index 000000000000..5a4cc28ca5ff
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
@@ -0,0 +1,427 @@
+/*******************************************************************************
+ * SCSI RDMA Protocol lib functions
+ *
+ * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ***********************************************************************/
+
+#define pr_fmt(fmt) "libsrp: " fmt
+
+#include <linux/printk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <scsi/srp.h>
+#include <target/target_core_base.h>
+#include "libsrp.h"
+#include "ibmvscsi_tgt.h"
+
+static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
+ struct srp_buf **ring)
+{
+ struct iu_entry *iue;
+ int i;
+
+ q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
+ if (!q->pool)
+ return -ENOMEM;
+ q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
+ if (!q->items)
+ goto free_pool;
+
+ spin_lock_init(&q->lock);
+ kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
+
+ for (i = 0, iue = q->items; i < max; i++) {
+ kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
+ iue->sbuf = ring[i];
+ iue++;
+ }
+ return 0;
+
+free_pool:
+ kfree(q->pool);
+ return -ENOMEM;
+}
+
+static void srp_iu_pool_free(struct srp_queue *q)
+{
+ kfree(q->items);
+ kfree(q->pool);
+}
+
+static struct srp_buf **srp_ring_alloc(struct device *dev,
+ size_t max, size_t size)
+{
+ struct srp_buf **ring;
+ int i;
+
+ ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ for (i = 0; i < max; i++) {
+ ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
+ if (!ring[i])
+ goto out;
+ ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
+ GFP_KERNEL);
+ if (!ring[i]->buf)
+ goto out;
+ }
+ return ring;
+
+out:
+ for (i = 0; i < max && ring[i]; i++) {
+ if (ring[i]->buf) {
+ dma_free_coherent(dev, size, ring[i]->buf,
+ ring[i]->dma);
+ }
+ kfree(ring[i]);
+ }
+ kfree(ring);
+
+ return NULL;
+}
+
+static void srp_ring_free(struct device *dev, struct srp_buf **ring,
+ size_t max, size_t size)
+{
+ int i;
+
+ for (i = 0; i < max; i++) {
+ dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+ kfree(ring[i]);
+ }
+ kfree(ring);
+}
+
+int srp_target_alloc(struct srp_target *target, struct device *dev,
+ size_t nr, size_t iu_size)
+{
+ int err;
+
+ spin_lock_init(&target->lock);
+
+ target->dev = dev;
+
+ target->srp_iu_size = iu_size;
+ target->rx_ring_size = nr;
+ target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
+ if (!target->rx_ring)
+ return -ENOMEM;
+ err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
+ if (err)
+ goto free_ring;
+
+ dev_set_drvdata(target->dev, target);
+ return 0;
+
+free_ring:
+ srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
+ return -ENOMEM;
+}
+
+void srp_target_free(struct srp_target *target)
+{
+ dev_set_drvdata(target->dev, NULL);
+ srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
+ target->srp_iu_size);
+ srp_iu_pool_free(&target->iu_queue);
+}
+
+struct iu_entry *srp_iu_get(struct srp_target *target)
+{
+ struct iu_entry *iue = NULL;
+
+ if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
+ sizeof(void *),
+ &target->iu_queue.lock) != sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ return NULL;
+ }
+ if (!iue)
+ return iue;
+ iue->target = target;
+ iue->flags = 0;
+ return iue;
+}
+
+void srp_iu_put(struct iu_entry *iue)
+{
+ kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
+ sizeof(void *), &iue->target->iu_queue.lock);
+}
+
+static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
+ enum dma_data_direction dir, srp_rdma_t rdma_io,
+ int dma_map, int ext_desc)
+{
+ struct iu_entry *iue = NULL;
+ struct scatterlist *sg = NULL;
+ int err, nsg = 0, len;
+
+ if (dma_map) {
+ iue = cmd->iue;
+ sg = cmd->se_cmd.t_data_sg;
+ nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
+ DMA_BIDIRECTIONAL);
+ if (!nsg) {
+ pr_err("fail to map %p %d\n", iue,
+ cmd->se_cmd.t_data_nents);
+ return 0;
+ }
+ len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
+ } else {
+ len = be32_to_cpu(md->len);
+ }
+
+ err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
+
+ if (dma_map)
+ dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+ return err;
+}
+
+static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
+ struct srp_indirect_buf *id,
+ enum dma_data_direction dir, srp_rdma_t rdma_io,
+ int dma_map, int ext_desc)
+{
+ struct iu_entry *iue = NULL;
+ struct srp_direct_buf *md = NULL;
+ struct scatterlist dummy, *sg = NULL;
+ dma_addr_t token = 0;
+ int err = 0;
+ int nmd, nsg = 0, len;
+
+ if (dma_map || ext_desc) {
+ iue = cmd->iue;
+ sg = cmd->se_cmd.t_data_sg;
+ }
+
+ nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
+
+ if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
+ (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
+ md = &id->desc_list[0];
+ goto rdma;
+ }
+
+ if (ext_desc && dma_map) {
+ md = dma_alloc_coherent(iue->target->dev,
+ be32_to_cpu(id->table_desc.len),
+ &token, GFP_KERNEL);
+ if (!md) {
+ pr_err("Can't get dma memory %u\n",
+ be32_to_cpu(id->table_desc.len));
+ return -ENOMEM;
+ }
+
+ sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
+ sg_dma_address(&dummy) = token;
+ sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
+ err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
+ be32_to_cpu(id->table_desc.len));
+ if (err) {
+ pr_err("Error copying indirect table %d\n", err);
+ goto free_mem;
+ }
+ } else {
+ pr_err("This command uses external indirect buffer\n");
+ return -EINVAL;
+ }
+
+rdma:
+ if (dma_map) {
+ nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
+ DMA_BIDIRECTIONAL);
+ if (!nsg) {
+ pr_err("fail to map %p %d\n", iue,
+ cmd->se_cmd.t_data_nents);
+ err = -EIO;
+ goto free_mem;
+ }
+ len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
+ } else {
+ len = be32_to_cpu(id->len);
+ }
+
+ err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
+
+ if (dma_map)
+ dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+free_mem:
+ if (token && dma_map) {
+ dma_free_coherent(iue->target->dev,
+ be32_to_cpu(id->table_desc.len), md, token);
+ }
+ return err;
+}
+
+static int data_out_desc_size(struct srp_cmd *cmd)
+{
+ int size = 0;
+ u8 fmt = cmd->buf_fmt >> 4;
+
+ switch (fmt) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ size = sizeof(struct srp_direct_buf);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ size = sizeof(struct srp_indirect_buf) +
+ sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
+ break;
+ default:
+ pr_err("client error. Invalid data_out_format %x\n", fmt);
+ break;
+ }
+ return size;
+}
+
+/*
+ * TODO: this can be called multiple times for a single command if it
+ * has very long data.
+ */
+int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
+ srp_rdma_t rdma_io, int dma_map, int ext_desc)
+{
+ struct srp_direct_buf *md;
+ struct srp_indirect_buf *id;
+ enum dma_data_direction dir;
+ int offset, err = 0;
+ u8 format;
+
+ if (!cmd->se_cmd.t_data_nents)
+ return 0;
+
+ offset = srp_cmd->add_cdb_len & ~3;
+
+ dir = srp_cmd_direction(srp_cmd);
+ if (dir == DMA_FROM_DEVICE)
+ offset += data_out_desc_size(srp_cmd);
+
+ if (dir == DMA_TO_DEVICE)
+ format = srp_cmd->buf_fmt >> 4;
+ else
+ format = srp_cmd->buf_fmt & ((1U << 4) - 1);
+
+ switch (format) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
+ err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
+ err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
+ ext_desc);
+ break;
+ default:
+ pr_err("Unknown format %d %x\n", dir, format);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
+{
+ struct srp_direct_buf *md;
+ struct srp_indirect_buf *id;
+ u64 len = 0;
+ uint offset = cmd->add_cdb_len & ~3;
+ u8 fmt;
+
+ if (dir == DMA_TO_DEVICE) {
+ fmt = cmd->buf_fmt >> 4;
+ } else {
+ fmt = cmd->buf_fmt & ((1U << 4) - 1);
+ offset += data_out_desc_size(cmd);
+ }
+
+ switch (fmt) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ md = (struct srp_direct_buf *)(cmd->add_data + offset);
+ len = be32_to_cpu(md->len);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ id = (struct srp_indirect_buf *)(cmd->add_data + offset);
+ len = be32_to_cpu(id->len);
+ break;
+ default:
+ pr_err("invalid data format %x\n", fmt);
+ break;
+ }
+ return len;
+}
+
+int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ u64 *data_len)
+{
+ struct srp_indirect_buf *idb;
+ struct srp_direct_buf *db;
+ uint add_cdb_offset;
+ int rc;
+
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+ && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ rc = 0;
+ *data_len = 0;
+
+ *dir = DMA_NONE;
+
+ if (srp_cmd->buf_fmt & 0xf)
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ *dir = DMA_TO_DEVICE;
+
+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ db = (struct srp_direct_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+ *data_len = be32_to_cpu(db->len);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+
+ *data_len = be32_to_cpu(idb->len);
+ }
+ return rc;
+}
+
+MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
+MODULE_AUTHOR("FUJITA Tomonori");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
new file mode 100644
index 000000000000..4696f331453e
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -0,0 +1,123 @@
+#ifndef __LIBSRP_H__
+#define __LIBSRP_H__
+
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <scsi/srp.h>
+
+enum srp_valid {
+ INVALIDATE_CMD_RESP_EL = 0,
+ VALID_CMD_RESP_EL = 0x80,
+ VALID_INIT_MSG = 0xC0,
+ VALID_TRANS_EVENT = 0xFF
+};
+
+enum srp_format {
+ SRP_FORMAT = 1,
+ MAD_FORMAT = 2,
+ OS400_FORMAT = 3,
+ AIX_FORMAT = 4,
+ LINUX_FORMAT = 5,
+ MESSAGE_IN_CRQ = 6
+};
+
+enum srp_init_msg {
+ INIT_MSG = 1,
+ INIT_COMPLETE_MSG = 2
+};
+
+enum srp_trans_event {
+ UNUSED_FORMAT = 0,
+ PARTNER_FAILED = 1,
+ PARTNER_DEREGISTER = 2,
+ MIGRATED = 6
+};
+
+enum srp_status {
+ HEADER_DESCRIPTOR = 0xF1,
+ PING = 0xF5,
+ PING_RESPONSE = 0xF6
+};
+
+enum srp_mad_version {
+ MAD_VERSION_1 = 1
+};
+
+enum srp_os_type {
+ OS400 = 1,
+ LINUX = 2,
+ AIX = 3,
+ OFW = 4
+};
+
+enum srp_task_attributes {
+ SRP_SIMPLE_TASK = 0,
+ SRP_HEAD_TASK = 1,
+ SRP_ORDERED_TASK = 2,
+ SRP_ACA_TASK = 4
+};
+
+enum {
+ SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0,
+ SRP_REQUEST_FIELDS_INVALID = 2,
+ SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4,
+ SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5
+};
+
+struct srp_buf {
+ dma_addr_t dma;
+ void *buf;
+};
+
+struct srp_queue {
+ void *pool;
+ void *items;
+ struct kfifo queue;
+ spinlock_t lock;
+};
+
+struct srp_target {
+ struct device *dev;
+
+ spinlock_t lock;
+ struct list_head cmd_queue;
+
+ size_t srp_iu_size;
+ struct srp_queue iu_queue;
+ size_t rx_ring_size;
+ struct srp_buf **rx_ring;
+
+ void *ldata;
+};
+
+struct iu_entry {
+ struct srp_target *target;
+
+ struct list_head ilist;
+ dma_addr_t remote_token;
+ unsigned long flags;
+
+ struct srp_buf *sbuf;
+ u16 iu_len;
+};
+
+struct ibmvscsis_cmd;
+
+typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int,
+ struct srp_direct_buf *, int,
+ enum dma_data_direction, unsigned int);
+int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
+void srp_target_free(struct srp_target *);
+struct iu_entry *srp_iu_get(struct srp_target *);
+void srp_iu_put(struct iu_entry *);
+int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *,
+ srp_rdma_t, int, int);
+u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir);
+int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ u64 *data_len);
+static inline int srp_cmd_direction(struct srp_cmd *cmd)
+{
+ return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
+#endif
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 1f539c288ae8..17d04c702e1b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3288,6 +3288,11 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
+ if (!ioa_cfg->scan_enabled) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
restart:
do {
did_work = 0;
@@ -10214,6 +10219,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (!ioa_cfg->reset_work_q) {
dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ rc = -ENOMEM;
goto out_free_irq;
}
} else
@@ -10362,6 +10368,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long flags;
int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -10403,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
__ipr_remove(pdev);
return rc;
}
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ ioa_cfg->scan_enabled = 1;
+ schedule_work(&ioa_cfg->work_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- scsi_scan_host(ioa_cfg->host);
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10414,7 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
}
}
- schedule_work(&ioa_cfg->work_q);
+ scsi_scan_host(ioa_cfg->host);
+
return 0;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 1d42c7464dfc..cdb51960b53c 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1478,6 +1478,7 @@ struct ipr_ioa_cfg {
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
u8 dump_taken:1;
+ u8 scan_enabled:1;
u8 scan_done:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index adf61b43eb70..734a0428ef0e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4854,20 +4854,17 @@ static int
lpfc_enable_pci_dev(struct lpfc_hba *phba)
{
struct pci_dev *pdev;
- int bars = 0;
/* Obtain PCI device reference */
if (!phba->pcidev)
goto out_error;
else
pdev = phba->pcidev;
- /* Select PCI BARs */
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
/* Enable PCI device */
if (pci_enable_device_mem(pdev))
goto out_error;
/* Request PCI resource for the device */
- if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+ if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
goto out_disable_device;
/* Set up device as PCI master and save state for EEH */
pci_set_master(pdev);
@@ -4884,7 +4881,7 @@ out_disable_device:
pci_disable_device(pdev);
out_error:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1401 Failed to enable pci device, bars:x%x\n", bars);
+ "1401 Failed to enable pci device\n");
return -ENODEV;
}
@@ -4899,17 +4896,14 @@ static void
lpfc_disable_pci_dev(struct lpfc_hba *phba)
{
struct pci_dev *pdev;
- int bars;
/* Obtain PCI device reference */
if (!phba->pcidev)
return;
else
pdev = phba->pcidev;
- /* Select PCI BARs */
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
/* Release PCI resource and disable PCI device */
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
pci_disable_device(pdev);
return;
@@ -9811,7 +9805,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
struct lpfc_vport **vports;
struct lpfc_hba *phba = vport->phba;
int i;
- int bars = pci_select_bars(pdev, IORESOURCE_MEM);
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
@@ -9886,7 +9879,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_hba_free(phba);
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a5655d571906..d197aa176dee 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3877,7 +3877,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
uint32_t tag;
uint16_t hwq;
- if (shost_use_blk_mq(cmnd->device->host)) {
+ if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 351d08ace24a..7080ce2920fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1323,21 +1323,18 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
lockdep_assert_held(&phba->hbalock);
+ BUG_ON(!piocb || !piocb->vport);
+
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
- (!(piocb->vport->load_flag & FC_UNLOADING))) {
- if (!piocb->vport)
- BUG();
- else
- mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
- }
-
+ (!(piocb->vport->load_flag & FC_UNLOADING)))
+ mod_timer(&piocb->vport->els_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
return 0;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index cb58ef0d9b2c..fe42a2fdf351 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,7 +1,6 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
-source "drivers/soc/brcmstb/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
@@ -10,6 +9,7 @@ source "drivers/soc/samsung/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
+source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 380230f03874..50c23d0bd457 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -3,7 +3,6 @@
#
obj-y += bcm/
-obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
@@ -15,4 +14,5 @@ obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 3066edea184d..a39b0d58ddd0 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,9 +1,23 @@
+menu "Broadcom SoC drivers"
+
config RASPBERRYPI_POWER
bool "Raspberry Pi power domain driver"
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
depends on RASPBERRYPI_FIRMWARE=y
select PM_GENERIC_DOMAINS if PM
- select PM_GENERIC_DOMAINS_OF if PM
help
This enables support for the RPi power domains which can be enabled
or disabled via the RPi firmware.
+
+config SOC_BRCMSTB
+ bool "Broadcom STB SoC drivers"
+ depends on ARM
+ select SOC_BUS
+ help
+ Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
+ This option alone enables only some support code, while the drivers
+ can be enabled individually within this menu.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index 63aa3eb23087..dc4fced72d21 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
+obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
index 9120b2715d3e..9120b2715d3e 100644
--- a/drivers/soc/brcmstb/Makefile
+++ b/drivers/soc/bcm/brcmstb/Makefile
diff --git a/drivers/soc/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 9049c076f9a1..3c39415d484f 100644
--- a/drivers/soc/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
+#include <linux/soc/brcmstb/brcmstb.h>
#define CPU_CREDIT_REG_OFFSET 0x184
#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
diff --git a/drivers/soc/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 94e7335553f4..94e7335553f4 100644
--- a/drivers/soc/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
diff --git a/drivers/soc/brcmstb/Kconfig b/drivers/soc/brcmstb/Kconfig
deleted file mode 100644
index 7fec3b4c80a1..000000000000
--- a/drivers/soc/brcmstb/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-menuconfig SOC_BRCMSTB
- bool "Broadcom STB SoC drivers"
- depends on ARM
- select SOC_BUS
- help
- Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
- This option alone enables only some support code, while the drivers
- can be enabled individually within this menu.
-
- If unsure, say N.
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index 54261decb369..d5437ca76ed9 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -104,26 +104,26 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (con_id) {
index = of_property_match_string(dev->of_node,
- "qcom,state-names",
+ "qcom,smem-state-names",
con_id);
if (index < 0) {
- dev_err(dev, "missing qcom,state-names\n");
+ dev_err(dev, "missing qcom,smem-state-names\n");
return ERR_PTR(index);
}
}
ret = of_parse_phandle_with_args(dev->of_node,
- "qcom,state",
- "#qcom,state-cells",
+ "qcom,smem-states",
+ "#qcom,smem-state-cells",
index,
&args);
if (ret) {
- dev_err(dev, "failed to parse qcom,state property\n");
+ dev_err(dev, "failed to parse qcom,smem-states property\n");
return ERR_PTR(ret);
}
if (args.args_count != 1) {
- dev_err(dev, "invalid #qcom,state-cells\n");
+ dev_err(dev, "invalid #qcom,smem-state-cells\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index f1eed7f9dd67..f51fb2ea7200 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -196,7 +196,7 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
/* Match newly created entries */
for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
list_for_each_entry(entry, &smp2p->inbound, node) {
- memcpy_fromio(buf, in->entries[i].name, sizeof(buf));
+ memcpy(buf, in->entries[i].name, sizeof(buf));
if (!strcmp(buf, entry->name)) {
entry->value = &in->entries[i].value;
break;
@@ -343,12 +343,13 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
/* Allocate an entry from the smem item */
strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
- memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
- out->valid_entries++;
+ memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
/* Make the logical entry reference the physical value */
entry->value = &out->entries[out->valid_entries].value;
+ out->valid_entries++;
+
entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
if (IS_ERR(entry->state)) {
dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 6b777af1bc19..d0337b2a71c8 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -495,7 +495,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
if (!smsm->hosts)
return -ENOMEM;
- local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,state-cells");
+ local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells");
if (!local_node) {
dev_err(&pdev->dev, "no state entry\n");
return -EINVAL;
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index c544f3d2c6ee..520aedd29965 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016, Linaro Ltd.
* Copyright (c) 2015, Sony Mobile Communications Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -14,8 +15,16 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smd.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
#define WCNSS_REQUEST_TIMEOUT (5 * HZ)
+#define WCNSS_CBC_TIMEOUT (10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING 1
+#define WCNSS_ACK_COLD_BOOTING 2
#define NV_FRAGMENT_SIZE 3072
#define NVBIN_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin"
@@ -25,17 +34,19 @@
* @dev: device handle
* @channel: SMD channel handle
* @ack: completion for outstanding requests
+ * @cbc: completion for cbc complete indication
* @ack_status: status of the outstanding request
- * @download_nv_work: worker for uploading nv binary
+ * @probe_work: worker for uploading nv binary
*/
struct wcnss_ctrl {
struct device *dev;
struct qcom_smd_channel *channel;
struct completion ack;
+ struct completion cbc;
int ack_status;
- struct work_struct download_nv_work;
+ struct work_struct probe_work;
};
/* message types */
@@ -48,6 +59,11 @@ enum {
WCNSS_UPLOAD_CAL_RESP,
WCNSS_DOWNLOAD_CAL_REQ,
WCNSS_DOWNLOAD_CAL_RESP,
+ WCNSS_VBAT_LEVEL_IND,
+ WCNSS_BUILD_VERSION_REQ,
+ WCNSS_BUILD_VERSION_RESP,
+ WCNSS_PM_CONFIG_REQ,
+ WCNSS_CBC_COMPLETE_IND,
};
/**
@@ -128,7 +144,7 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
version->major, version->minor,
version->version, version->revision);
- schedule_work(&wcnss->download_nv_work);
+ complete(&wcnss->ack);
break;
case WCNSS_DOWNLOAD_NV_RESP:
if (count != sizeof(*nvresp)) {
@@ -141,6 +157,10 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
wcnss->ack_status = nvresp->status;
complete(&wcnss->ack);
break;
+ case WCNSS_CBC_COMPLETE_IND:
+ dev_dbg(wcnss->dev, "cold boot complete\n");
+ complete(&wcnss->cbc);
+ break;
default:
dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
break;
@@ -156,20 +176,32 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
static int wcnss_request_version(struct wcnss_ctrl *wcnss)
{
struct wcnss_msg_hdr msg;
+ int ret;
msg.type = WCNSS_VERSION_REQ;
msg.len = sizeof(msg);
+ ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for version response\n");
+ return -ETIMEDOUT;
+ }
- return qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+ return 0;
}
/**
* wcnss_download_nv() - send nv binary to WCNSS
- * @work: work struct to acquire wcnss context
+ * @wcnss: wcnss_ctrl state handle
+ * @expect_cbc: indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
*/
-static void wcnss_download_nv(struct work_struct *work)
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
{
- struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, download_nv_work);
struct wcnss_download_nv_req *req;
const struct firmware *fw;
const void *data;
@@ -178,10 +210,10 @@ static void wcnss_download_nv(struct work_struct *work)
req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
if (!req)
- return;
+ return -ENOMEM;
ret = request_firmware(&fw, NVBIN_FILE, wcnss->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
NVBIN_FILE, ret);
goto free_req;
@@ -207,7 +239,7 @@ static void wcnss_download_nv(struct work_struct *work)
memcpy(req->fragment, data, req->frag_size);
ret = qcom_smd_send(wcnss->channel, req, req->hdr.len);
- if (ret) {
+ if (ret < 0) {
dev_err(wcnss->dev, "failed to send smd packet\n");
goto release_fw;
}
@@ -220,16 +252,58 @@ static void wcnss_download_nv(struct work_struct *work)
} while (left > 0);
ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
- if (!ret)
+ if (!ret) {
dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
- else if (wcnss->ack_status != 1)
- dev_err(wcnss->dev, "nv upload response failed err: %d\n",
- wcnss->ack_status);
+ ret = -ETIMEDOUT;
+ } else {
+ *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+ ret = 0;
+ }
release_fw:
release_firmware(fw);
free_req:
kfree(req);
+
+ return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss: wcnss handle, retrieved from drvdata
+ * @name: SMD channel name
+ * @cb: callback to handle incoming data on the channel
+ */
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+ struct wcnss_ctrl *_wcnss = wcnss;
+
+ return qcom_smd_open_channel(_wcnss->channel, name, cb);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+ struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+ bool expect_cbc;
+ int ret;
+
+ ret = wcnss_request_version(wcnss);
+ if (ret < 0)
+ return;
+
+ ret = wcnss_download_nv(wcnss, &expect_cbc);
+ if (ret < 0)
+ return;
+
+ /* Wait for pending cold boot completion if indicated by the nv downloader */
+ if (expect_cbc) {
+ ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+ if (!ret)
+ dev_err(wcnss->dev, "expected cold boot completion\n");
+ }
+
+ of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
}
static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
@@ -244,25 +318,38 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
wcnss->channel = sdev->channel;
init_completion(&wcnss->ack);
- INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
+ init_completion(&wcnss->cbc);
+ INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
qcom_smd_set_drvdata(sdev->channel, wcnss);
+ dev_set_drvdata(&sdev->dev, wcnss);
+
+ schedule_work(&wcnss->probe_work);
+
+ return 0;
+}
+
+static void wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel);
- return wcnss_request_version(wcnss);
+ cancel_work_sync(&wcnss->probe_work);
+ of_platform_depopulate(&sdev->dev);
}
-static const struct qcom_smd_id wcnss_ctrl_smd_match[] = {
- { .name = "WCNSS_CTRL" },
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss", },
{}
};
static struct qcom_smd_driver wcnss_ctrl_driver = {
.probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
.callback = wcnss_ctrl_smd_callback,
- .smd_match_table = wcnss_ctrl_smd_match,
.driver = {
.name = "qcom_wcnss_ctrl",
.owner = THIS_MODULE,
+ .of_match_table = wcnss_ctrl_of_match,
},
};
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 151fcd3f025b..623039c3514c 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,7 +1,9 @@
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
+obj-$(CONFIG_ARCH_R8A7792) += rcar-sysc.o r8a7792-sysc.o
# R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
obj-$(CONFIG_ARCH_R8A7793) += rcar-sysc.o r8a7791-sysc.o
obj-$(CONFIG_ARCH_R8A7794) += rcar-sysc.o r8a7794-sysc.o
obj-$(CONFIG_ARCH_R8A7795) += rcar-sysc.o r8a7795-sysc.o
+obj-$(CONFIG_ARCH_R8A7796) += rcar-sysc.o r8a7796-sysc.o
diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c
new file mode 100644
index 000000000000..ca7467d7b7ec
--- /dev/null
+++ b/drivers/soc/renesas/r8a7792-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car V2H (R8A7792) System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7792-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7792_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7792_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7792_PD_CA15_SCU, R8A7792_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7792_PD_CA15_CPU0, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7792_PD_CA15_CPU1, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7792_PD_SGX, R8A7792_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7792_PD_IMP, R8A7792_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7792_sysc_info __initconst = {
+ .areas = r8a7792_areas,
+ .num_areas = ARRAY_SIZE(r8a7792_areas),
+};
diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c
new file mode 100644
index 000000000000..f700c842b9e1
--- /dev/null
+++ b/drivers/soc/renesas/r8a7796-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car M3-W System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7796-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A7796_PD_CA57_CPU0, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A7796_PD_CA57_CPU1, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A7796_PD_CA53_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A7796_PD_CA53_CPU0, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A7796_PD_CA53_CPU1, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A7796_PD_CA53_CPU2, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A7796_PD_CA53_CPU3, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A7796_PD_CR7, R8A7796_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A7796_PD_A3VC, R8A7796_PD_ALWAYS_ON },
+ { "a2vc0", 0x3c0, 0, R8A7796_PD_A2VC0, R8A7796_PD_A3VC },
+ { "a2vc1", 0x3c0, 1, R8A7796_PD_A2VC1, R8A7796_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A7796_PD_3DG_A, R8A7796_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A7796_PD_3DG_B, R8A7796_PD_3DG_A },
+ { "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7796_sysc_info __initconst = {
+ .areas = r8a7796_areas,
+ .num_areas = ARRAY_SIZE(r8a7796_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 79dbc770895f..65c8e1eb90c0 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -164,15 +164,6 @@ static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
return false;
}
-void __iomem *rcar_sysc_init(phys_addr_t base)
-{
- rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
- if (!rcar_sysc_base)
- panic("unable to ioremap R-Car SYSC hardware block\n");
-
- return rcar_sysc_base;
-}
-
struct rcar_sysc_pd {
struct generic_pm_domain genpd;
struct rcar_sysc_ch ch;
@@ -293,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_ARCH_R8A7791
{ .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7793
/* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
{ .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
@@ -303,6 +297,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_ARCH_R8A7795
{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
+#endif
{ /* sentinel */ }
};
@@ -322,6 +319,9 @@ static int __init rcar_sysc_pd_init(void)
unsigned int i;
int error;
+ if (rcar_sysc_base)
+ return 0;
+
np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
if (!np)
return -ENODEV;
@@ -392,10 +392,35 @@ static int __init rcar_sysc_pd_init(void)
domains->domains[area->isr_bit] = &pd->genpd;
}
- of_genpd_add_provider_onecell(np, &domains->onecell_data);
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_sysc_pd_init);
+
+void __init rcar_sysc_init(phys_addr_t base, u32 syscier)
+{
+ u32 syscimr;
+
+ if (!rcar_sysc_pd_init())
+ return;
+
+ rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
+
+ /*
+ * Mask all interrupt sources to prevent the CPU from receiving them.
+ * Make sure not to clear reserved bits that were set before.
+ */
+ syscimr = ioread32(rcar_sysc_base + SYSCIMR);
+ syscimr |= syscier;
+ pr_debug("%s: syscimr = 0x%08x\n", __func__, syscimr);
+ iowrite32(syscimr, rcar_sysc_base + SYSCIMR);
+
+ /*
+ * SYSC needs all interrupt sources enabled to control power.
+ */
+ pr_debug("%s: syscier = 0x%08x\n", __func__, syscier);
+ iowrite32(syscier, rcar_sysc_base + SYSCIER);
+}
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 5e766174c2f4..77dbe861473f 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -53,6 +53,8 @@ struct rcar_sysc_info {
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
+extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
+extern const struct rcar_sysc_info r8a7796_sysc_info;
#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index d7fc123006a3..245533907d1b 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -10,4 +10,8 @@ config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST)
+config EXYNOS_PM_DOMAINS
+ bool "Exynos PM domains" if COMPILE_TEST
+ depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+
endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index f64ac4d80564..3619f2ecddaa 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
+obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 20b3ab8aa790..af2f54e14b83 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -14,7 +14,7 @@
#include "exynos-pmu.h"
-static struct exynos_pmu_conf exynos3250_pmu_config[] = {
+static const struct exynos_pmu_conf exynos3250_pmu_config[] = {
/* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
{ EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
{ EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index b962fb6a5d22..3f2c64180ef8 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -17,7 +17,7 @@
#include "exynos-pmu.h"
-static struct exynos_pmu_conf exynos5420_pmu_config[] = {
+static const struct exynos_pmu_conf exynos5420_pmu_config[] = {
/* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
{ EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
{ EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
diff --git a/arch/arm/mach-exynos/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index 875a2bab64f6..4822346aadc6 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -23,9 +23,13 @@
#include <linux/of_platform.h>
#include <linux/sched.h>
-#define INT_LOCAL_PWR_EN 0x7
#define MAX_CLK_PER_DOMAIN 4
+struct exynos_pm_domain_config {
+ /* Value for LOCAL_PWR_CFG and STATUS fields for each domain */
+ u32 local_pwr_cfg;
+};
+
/*
* Exynos specific wrapper around the generic power domain
*/
@@ -38,6 +42,7 @@ struct exynos_pm_domain {
struct clk *clk[MAX_CLK_PER_DOMAIN];
struct clk *pclk[MAX_CLK_PER_DOMAIN];
struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
+ u32 local_pwr_cfg;
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -69,13 +74,13 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
}
}
- pwr = power_on ? INT_LOCAL_PWR_EN : 0;
- __raw_writel(pwr, base);
+ pwr = power_on ? pd->local_pwr_cfg : 0;
+ writel_relaxed(pwr, base);
/* Wait max 1ms */
timeout = 10;
- while ((__raw_readl(base + 0x4) & INT_LOCAL_PWR_EN) != pwr) {
+ while ((readl_relaxed(base + 0x4) & pd->local_pwr_cfg) != pwr) {
if (!timeout) {
op = (power_on) ? "enable" : "disable";
pr_err("Power domain %s %s failed\n", domain->name, op);
@@ -119,14 +124,30 @@ static int exynos_pd_power_off(struct generic_pm_domain *domain)
return exynos_pd_power(domain, false);
}
+static const struct exynos_pm_domain_config exynos4210_cfg __initconst = {
+ .local_pwr_cfg = 0x7,
+};
+
+static const struct of_device_id exynos_pm_domain_of_match[] __initconst = {
+ {
+ .compatible = "samsung,exynos4210-pd",
+ .data = &exynos4210_cfg,
+ },
+ { },
+};
+
static __init int exynos4_pm_init_power_domain(void)
{
struct device_node *np;
+ const struct of_device_id *match;
- for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) {
+ const struct exynos_pm_domain_config *pm_domain_cfg;
struct exynos_pm_domain *pd;
int on, i;
+ pm_domain_cfg = match->data;
+
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
pr_err("%s: failed to allocate memory for domain\n",
@@ -153,6 +174,7 @@ static __init int exynos4_pm_init_power_domain(void)
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
+ pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
char clk_name[8];
@@ -185,14 +207,14 @@ static __init int exynos4_pm_init_power_domain(void)
clk_put(pd->oscclk);
no_clk:
- on = __raw_readl(pd->base + 0x4) & INT_LOCAL_PWR_EN;
+ on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
pm_genpd_init(&pd->pd, NULL, !on);
of_genpd_add_provider_simple(np, &pd->pd);
}
/* Assign the child power domains to their parents */
- for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ for_each_matching_node(np, exynos_pm_domain_of_match) {
struct generic_pm_domain *child_domain, *parent_domain;
struct of_phandle_args args;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index bb173456bbff..71c834f3847e 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -51,6 +51,7 @@
#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
+#define PMC_CNTRL_MAIN_RST (1 << 4)
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE (1 << 0)
@@ -80,6 +81,14 @@
#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2)
#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1)
+#define PMC_RST_STATUS 0x1b4
+#define PMC_RST_STATUS_POR 0
+#define PMC_RST_STATUS_WATCHDOG 1
+#define PMC_RST_STATUS_SENSOR 2
+#define PMC_RST_STATUS_SW_MAIN 3
+#define PMC_RST_STATUS_LP0 4
+#define PMC_RST_STATUS_AOTAG 5
+
#define IO_DPD_REQ 0x1b8
#define IO_DPD_REQ_CODE_IDLE (0 << 30)
#define IO_DPD_REQ_CODE_OFF (1 << 30)
@@ -399,6 +408,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
disable_clks:
tegra_powergate_disable_clocks(pg);
usleep_range(10, 20);
+
powergate_off:
tegra_powergate_set(pg->id, false);
@@ -436,6 +446,7 @@ assert_resets:
usleep_range(10, 20);
tegra_powergate_reset_deassert(pg);
usleep_range(10, 20);
+
disable_clks:
tegra_powergate_disable_clocks(pg);
@@ -540,6 +551,9 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct tegra_powergate pg;
int err;
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
pg.id = id;
pg.clks = &clk;
pg.num_clks = 1;
@@ -638,9 +652,10 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
tegra_pmc_writel(value, PMC_SCRATCH0);
- value = tegra_pmc_readl(0);
- value |= 0x10;
- tegra_pmc_writel(value, 0);
+ /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_MAIN_RST;
+ tegra_pmc_writel(value, PMC_CNTRL);
return NOTIFY_DONE;
}
@@ -722,13 +737,14 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
err:
while (i--)
clk_put(pg->clks[i]);
+
kfree(pg->clks);
return err;
}
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
- struct device_node *np)
+ struct device_node *np, bool off)
{
struct reset_control *rst;
unsigned int i, count;
@@ -748,6 +764,16 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
err = PTR_ERR(pg->resets[i]);
goto error;
}
+
+ if (off)
+ err = reset_control_assert(pg->resets[i]);
+ else
+ err = reset_control_deassert(pg->resets[i]);
+
+ if (err) {
+ reset_control_put(pg->resets[i]);
+ goto error;
+ }
}
pg->num_resets = count;
@@ -757,6 +783,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
error:
while (i--)
reset_control_put(pg->resets[i]);
+
kfree(pg->resets);
return err;
@@ -765,16 +792,19 @@ error:
static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct tegra_powergate *pg;
+ int id, err;
bool off;
- int id;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
- goto error;
+ return;
id = tegra_powergate_lookup(pmc, np->name);
- if (id < 0)
+ if (id < 0) {
+ dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
+ np->name, id);
goto free_mem;
+ }
/*
* Clear the bit for this powergate so it cannot be managed
@@ -788,31 +818,64 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
- if (tegra_powergate_of_get_clks(pg, np))
+ off = !tegra_powergate_is_powered(pg->id);
+
+ err = tegra_powergate_of_get_clks(pg, np);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
+ np->name, err);
goto set_available;
+ }
- if (tegra_powergate_of_get_resets(pg, np))
+ err = tegra_powergate_of_get_resets(pg, np, off);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to get resets for %s: %d\n",
+ np->name, err);
goto remove_clks;
+ }
- off = !tegra_powergate_is_powered(pg->id);
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
+ goto power_on_cleanup;
+
+ /*
+ * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
+ * host and super-speed partitions. Once the XHCI driver
+ * manages the partitions itself this code can be removed. Note
+ * that we don't register these partitions with the genpd core
+ * to avoid it from powering down the partitions as they appear
+ * to be unused.
+ */
+ if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
+ goto power_on_cleanup;
pm_genpd_init(&pg->genpd, NULL, off);
- if (of_genpd_add_provider_simple(np, &pg->genpd))
+ err = of_genpd_add_provider_simple(np, &pg->genpd);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
+ np->name, err);
goto remove_resets;
+ }
dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
return;
+power_on_cleanup:
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
remove_resets:
while (pg->num_resets--)
reset_control_put(pg->resets[pg->num_resets]);
+
kfree(pg->resets);
remove_clks:
while (pg->num_clks--)
clk_put(pg->clks[pg->num_clks]);
+
kfree(pg->clks);
set_available:
@@ -820,16 +883,20 @@ set_available:
free_mem:
kfree(pg);
-
-error:
- dev_err(pmc->dev, "failed to create power domain for %s\n", np->name);
}
-static void tegra_powergate_init(struct tegra_pmc *pmc)
+static void tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
{
struct device_node *np, *child;
+ unsigned int i;
+
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
- np = of_get_child_by_name(pmc->dev->of_node, "powergates");
+ np = of_get_child_by_name(parent, "powergates");
if (!np)
return;
@@ -1205,6 +1272,14 @@ static int tegra_pmc_probe(struct platform_device *pdev)
struct resource *res;
int err;
+ /*
+ * Early initialisation should have configured an initial
+ * register mapping and setup the soc data pointer. If these
+ * are not valid then something went badly wrong!
+ */
+ if (WARN_ON(!pmc->base || !pmc->soc))
+ return -ENODEV;
+
err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node);
if (err < 0)
return err;
@@ -1242,8 +1317,6 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return err;
}
- tegra_powergate_init(pmc);
-
mutex_lock(&pmc->powergates_lock);
iounmap(pmc->base);
pmc->base = base;
@@ -1477,10 +1550,11 @@ static int __init tegra_pmc_early_init(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
- unsigned int i;
bool invert;
u32 value;
+ mutex_init(&pmc->powergates_lock);
+
np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
if (!np) {
/*
@@ -1515,39 +1589,40 @@ static int __init tegra_pmc_early_init(void)
*/
if (of_address_to_resource(np, 0, &regs) < 0) {
pr_err("failed to get PMC registers\n");
+ of_node_put(np);
return -ENXIO;
}
-
- pmc->soc = match->data;
}
pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
+ of_node_put(np);
return -ENXIO;
}
- /* Create a bit-map of the available and valid partitions */
- for (i = 0; i < pmc->soc->num_powergates; i++)
- if (pmc->soc->powergates[i])
- set_bit(i, pmc->powergates_available);
+ if (np) {
+ pmc->soc = match->data;
- mutex_init(&pmc->powergates_lock);
+ tegra_powergate_init(pmc, np);
- /*
- * Invert the interrupt polarity if a PMC device tree node exists and
- * contains the nvidia,invert-interrupt property.
- */
- invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+ /*
+ * Invert the interrupt polarity if a PMC device tree node
+ * exists and contains the nvidia,invert-interrupt property.
+ */
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(PMC_CNTRL);
- if (invert)
- value |= PMC_CNTRL_INTR_POLARITY;
- else
- value &= ~PMC_CNTRL_INTR_POLARITY;
+ if (invert)
+ value |= PMC_CNTRL_INTR_POLARITY;
+ else
+ value &= ~PMC_CNTRL_INTR_POLARITY;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ of_node_put(np);
+ }
return 0;
}
diff --git a/drivers/soc/ux500/Kconfig b/drivers/soc/ux500/Kconfig
new file mode 100644
index 000000000000..025a44aef5db
--- /dev/null
+++ b/drivers/soc/ux500/Kconfig
@@ -0,0 +1,7 @@
+config UX500_SOC_ID
+ bool "SoC bus for ST-Ericsson ux500"
+ depends on ARCH_U8500 || COMPILE_TEST
+ default ARCH_U8500
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/ux500/Makefile b/drivers/soc/ux500/Makefile
new file mode 100644
index 000000000000..0b87ad04b018
--- /dev/null
+++ b/drivers/soc/ux500/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_UX500_SOC_ID) += ux500-soc-id.o
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
new file mode 100644
index 000000000000..6c1be74e5fcc
--- /dev/null
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/sys_soc.h>
+
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+/**
+ * struct dbx500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ */
+struct dbx500_asic_id {
+ u16 partnumber;
+ u8 revision;
+ u8 process;
+};
+
+static struct dbx500_asic_id dbx500_id;
+
+static unsigned int __init ux500_read_asicid(phys_addr_t addr)
+{
+ void __iomem *virt = ioremap(addr, 4);
+ unsigned int asicid;
+
+ if (!virt)
+ return 0;
+
+ asicid = readl(virt);
+ iounmap(virt);
+
+ return asicid;
+}
+
+static void ux500_print_soc_info(unsigned int asicid)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ pr_info("DB%4x ", dbx500_id.partnumber);
+
+ if (rev == 0x01)
+ pr_cont("Early Drop");
+ else if (rev >= 0xA0)
+ pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
+ else
+ pr_cont("Unknown");
+
+ pr_cont(" [%#010x]\n", asicid);
+}
+
+static unsigned int partnumber(unsigned int asicid)
+{
+ return (asicid >> 8) & 0xffff;
+}
+
+/*
+ * SOC MIDR ASICID ADDRESS ASICID VALUE
+ * DB8500ed 0x410fc090 0x9001FFF4 0x00850001
+ * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
+ * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
+ * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
+ * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
+ * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
+ * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
+ */
+
+static void __init ux500_setup_id(void)
+{
+ unsigned int cpuid = read_cpuid_id();
+ unsigned int asicid = 0;
+ phys_addr_t addr = 0;
+
+ switch (cpuid) {
+ case 0x410fc090: /* DB8500ed */
+ case 0x411fc091: /* DB8500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
+ asicid = ux500_read_asicid(0x9001DBF4);
+ if (partnumber(asicid) == 0x8500 ||
+ partnumber(asicid) == 0x8520)
+ /* DB8500v2 */
+ break;
+
+ /* DB5500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x413fc090: /* DB9540 */
+ addr = 0xFFFFDBF4;
+ break;
+ }
+
+ if (addr)
+ asicid = ux500_read_asicid(addr);
+
+ if (!asicid) {
+ pr_err("Unable to identify SoC\n");
+ BUG();
+ }
+
+ dbx500_id.process = asicid >> 24;
+ dbx500_id.partnumber = partnumber(asicid);
+ dbx500_id.revision = asicid & 0xff;
+
+ ux500_print_soc_info(asicid);
+}
+
+static const char * __init ux500_get_machine(void)
+{
+ return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber);
+}
+
+static const char * __init ux500_get_family(void)
+{
+ return kasprintf(GFP_KERNEL, "ux500");
+}
+
+static const char * __init ux500_get_revision(void)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return kasprintf(GFP_KERNEL, "%s", "ED");
+ else if (rev >= 0xA0)
+ return kasprintf(GFP_KERNEL, "%d.%d",
+ (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return kasprintf(GFP_KERNEL, "%s", "Unknown");
+}
+
+static ssize_t ux500_get_process(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static const char *db8500_read_soc_id(struct device_node *backupram)
+{
+ void __iomem *base;
+ void __iomem *uid;
+ const char *retstr;
+
+ base = of_iomap(backupram, 0);
+ if (!base)
+ return NULL;
+ uid = base + 0x1fc0;
+
+ /* Throw these device-specific numbers into the entropy pool */
+ add_device_randomness(uid, 0x14);
+ retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+ readl((u32 *)uid+0),
+ readl((u32 *)uid+1), readl((u32 *)uid+2),
+ readl((u32 *)uid+3), readl((u32 *)uid+4));
+ iounmap(base);
+ return retstr;
+}
+
+static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
+ struct device_node *backupram)
+{
+ soc_dev_attr->soc_id = db8500_read_soc_id(backupram);
+ soc_dev_attr->machine = ux500_get_machine();
+ soc_dev_attr->family = ux500_get_family();
+ soc_dev_attr->revision = ux500_get_revision();
+}
+
+static const struct device_attribute ux500_soc_attr =
+ __ATTR(process, S_IRUGO, ux500_get_process, NULL);
+
+static int __init ux500_soc_device_init(void)
+{
+ struct device *parent;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *backupram;
+
+ backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
+ if (!backupram)
+ return 0;
+
+ ux500_setup_id();
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_info_populate(soc_dev_attr, backupram);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ parent = soc_device_to_device(soc_dev);
+ device_create_file(parent, &ux500_soc_attr);
+
+ return 0;
+}
+subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index d5adf9f31602..a56eca0e95a6 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -68,32 +68,6 @@ struct spi_st {
struct completion done;
};
-static int spi_st_clk_enable(struct spi_st *spi_st)
-{
- /*
- * Current platforms use one of the core clocks for SPI and I2C.
- * If we attempt to disable the clock, the system will hang.
- *
- * TODO: Remove this when platform supports power domains.
- */
- return 0;
-
- return clk_prepare_enable(spi_st->clk);
-}
-
-static void spi_st_clk_disable(struct spi_st *spi_st)
-{
- /*
- * Current platforms use one of the core clocks for SPI and I2C.
- * If we attempt to disable the clock, the system will hang.
- *
- * TODO: Remove this when platform supports power domains.
- */
- return;
-
- clk_disable_unprepare(spi_st->clk);
-}
-
/* Load the TX FIFO */
static void ssc_write_tx_fifo(struct spi_st *spi_st)
{
@@ -349,7 +323,7 @@ static int spi_st_probe(struct platform_device *pdev)
goto put_master;
}
- ret = spi_st_clk_enable(spi_st);
+ ret = clk_prepare_enable(spi_st->clk);
if (ret)
goto put_master;
@@ -408,7 +382,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
return ret;
@@ -419,7 +393,7 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
@@ -435,7 +409,7 @@ static int spi_st_runtime_suspend(struct device *dev)
writel_relaxed(0, spi_st->base + SSC_IEN);
pinctrl_pm_select_sleep_state(dev);
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
return 0;
}
@@ -446,7 +420,7 @@ static int spi_st_runtime_resume(struct device *dev)
struct spi_st *spi_st = spi_master_get_devdata(master);
int ret;
- ret = spi_st_clk_enable(spi_st);
+ ret = clk_prepare_enable(spi_st->clk);
pinctrl_pm_select_default_state(dev);
return ret;
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 180e027b1c8a..796e22037bc4 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -23,7 +23,7 @@
**************************************************/
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
-static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -38,14 +38,14 @@ static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
* ChipCommon
**************************************************/
-static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
}
-static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
+static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -54,7 +54,7 @@ static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
}
static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
- unsigned gpio)
+ unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -63,7 +63,7 @@ static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
}
static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
+ unsigned int gpio, int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -72,7 +72,7 @@ static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
return 0;
}
-static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -85,7 +85,7 @@ static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
return 0;
}
-static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
+static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -256,14 +256,14 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
#ifdef CONFIG_SSB_DRIVER_EXTIF
-static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
}
-static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
+static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -272,7 +272,7 @@ static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
}
static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
- unsigned gpio)
+ unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -281,7 +281,7 @@ static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
}
static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
+ unsigned int gpio, int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
index 425ebc5c32aa..fab66396d421 100644
--- a/drivers/staging/android/sync_debug.h
+++ b/drivers/staging/android/sync_debug.h
@@ -34,7 +34,8 @@ struct sync_timeline {
char name[32];
/* protected by child_list_lock */
- int context, value;
+ u64 context;
+ int value;
struct list_head child_list_head;
spinlock_t child_list_lock;
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index cc3402020487..d7577096fb25 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,5 +1,5 @@
config USB_EMXX
- bool "EMXX USB Function Device Controller"
+ tristate "EMXX USB Function Device Controller"
depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
help
The Emma Mobile series of SoCs from Renesas Electronics and
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 3bd91758b2da..3b56b2826263 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -15,7 +15,7 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -39,9 +39,11 @@
#include "emxx_udc.h"
+#define DRIVER_DESC "EMXX UDC driver"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name[] = "emxx_udc";
+static const char driver_desc[] = DRIVER_DESC;
/*===========================================================================*/
/* Prototype */
@@ -3296,6 +3298,28 @@ static void nbu2ss_drv_shutdown(struct platform_device *pdev)
}
/*-------------------------------------------------------------------------*/
+static int nbu2ss_drv_remove(struct platform_device *pdev)
+{
+ struct nbu2ss_udc *udc;
+ struct nbu2ss_ep *ep;
+ int i;
+
+ udc = &udc_controller;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ if (ep->virt_buf)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ (void *)ep->virt_buf, ep->phys_buf);
+ }
+
+ /* Interrupt Handler - Release */
+ free_irq(INT_VBUS, udc);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
{
struct nbu2ss_udc *udc;
@@ -3347,12 +3371,16 @@ static int nbu2ss_drv_resume(struct platform_device *pdev)
static struct platform_driver udc_driver = {
.probe = nbu2ss_drv_probe,
.shutdown = nbu2ss_drv_shutdown,
+ .remove = nbu2ss_drv_remove,
.suspend = nbu2ss_drv_suspend,
.resume = nbu2ss_drv_resume,
.driver = {
- .name = driver_name,
- .suppress_bind_attrs = true,
+ .name = driver_name,
},
};
-builtin_platform_driver(udc_driver);
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Renesas Electronics Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 581a63a0a63e..463b1a360733 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -78,7 +78,7 @@ static void ll_release(struct dentry *de)
* INVALID) so d_lookup() matches it, but we have no lock on it (so
* lock_match() fails) and we spin around real_lookup().
*/
-static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
+static int ll_dcompare(const struct dentry *dentry,
unsigned int len, const char *str,
const struct qstr *name)
{
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 8e52722266fe..c1cb6b19e724 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -781,7 +781,7 @@ static int sa_args_init(struct inode *dir, struct inode *child,
struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
struct ldlm_enqueue_info **pei)
{
- struct qstr *qstr = &entry->se_qstr;
+ const struct qstr *qstr = &entry->se_qstr;
struct ll_inode_info *lli = ll_i2info(dir);
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
@@ -1340,7 +1340,7 @@ enum {
static int is_first_dirent(struct inode *dir, struct dentry *dentry)
{
struct ll_dir_chain chain;
- struct qstr *target = &dentry->d_name;
+ const struct qstr *target = &dentry->d_name;
struct page *page;
__u64 pos = 0;
int dot_de;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index cae42e56f270..7292f23954df 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -16,7 +16,7 @@ menuconfig STAGING_MEDIA
If in doubt, say N here.
-if STAGING_MEDIA
+if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/bcm2048/Kconfig"
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index 9fffddb7ac7e..b2393bbacb26 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -1252,7 +1252,7 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
return -EINVAL;
}
/* Zero unused part of the feature array */
- memset(features + i, 0, feature_sz - i);
+ memset(features + i + 1, 0, feature_sz - i - 1);
}
if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 50f3d3a0dd7b..39b928c2849d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -492,7 +492,8 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
spin_lock_bh(&conn->cmd_lock);
- if (!list_empty(&cmd->i_conn_node))
+ if (!list_empty(&cmd->i_conn_node) &&
+ !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
@@ -4034,6 +4035,7 @@ int iscsi_target_rx_thread(void *arg)
static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
+ LIST_HEAD(tmp_list);
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
/*
@@ -4042,18 +4044,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+ list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ list_for_each_entry(cmd, &tmp_list, i_conn_node) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->se_tfo != NULL) {
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
- spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
-
iscsit_free_cmd(cmd, true);
- spin_lock_bh(&conn->cmd_lock);
}
- spin_unlock_bh(&conn->cmd_lock);
}
static void iscsit_stop_timers_for_cmds(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index b5212f0f9571..adf419fa4291 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1371,8 +1371,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
login->zero_tsih = zero_tsih;
- conn->sess->se_sess->sup_prot_ops =
- conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+ if (conn->sess)
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg;
if (!tpg) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index a4046ca6e60d..6b423485c5d6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size)
+ struct request_queue *q)
{
+ int block_size = queue_logical_block_size(q);
+
if (!blk_queue_discard(q))
return false;
- attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
- block_size;
+ attrib->max_unmap_lba_count =
+ q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 75f0f08b2a34..d545993df18b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- fd_dev->fd_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
@@ -523,7 +522,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
*/
if (cmd->data_length > FD_MAX_BYTES) {
pr_err("FILEIO: Not able to process I/O of %u bytes due to"
- "FD_MAX_BYTES: %u iovec count limitiation\n",
+ "FD_MAX_BYTES: %u iovec count limitation\n",
cmd->data_length, FD_MAX_BYTES);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 22af12f8b8eb..372d744315f3 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- dev->dev_attrib.hw_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
@@ -389,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_rw = WRITE_FLUSH;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
if (!immed)
bio->bi_private = cmd;
submit_bio(bio);
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index fc91e85f54ba..e2c970a9d61c 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -146,6 +146,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
bool target_check_wce(struct se_device *dev);
bool target_check_fua(struct se_device *dev);
+void __target_execute_cmd(struct se_cmd *, bool);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index a9057aa07176..04f616b3ba0a 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -602,7 +602,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, false);
kfree(buf);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5ab3967dda43..6094a6beddde 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,7 +754,15 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if (scsi_status != SAM_STAT_GOOD) {
+ return;
+ }
+
+ /*
+ * Calculate new residual count based upon length of SCSI data
+ * transferred.
+ */
+ if (length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -763,6 +771,12 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
+ } else if (length > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = length - cmd->data_length;
+ } else {
+ cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
+ cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1303,23 +1317,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
trace_target_sequencer_start(cmd);
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- ret = target_scsi3_ua_check(cmd);
- if (ret)
- return ret;
-
- ret = target_alua_state_check(cmd);
- if (ret)
- return ret;
-
- ret = target_check_reservation(cmd);
- if (ret) {
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- return ret;
- }
-
ret = dev->transport->parse_cdb(cmd);
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
@@ -1761,20 +1758,45 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{
sense_reason_t ret;
- if (cmd->execute_cmd) {
- ret = cmd->execute_cmd(cmd);
- if (ret) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
- spin_unlock_irq(&cmd->t_state_lock);
+ if (!cmd->execute_cmd) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto err;
+ }
+ if (do_checks) {
+ /*
+ * Check for an existing UNIT ATTENTION condition after
+ * target_handle_task_attr() has done SAM task attr
+ * checking, and possibly have already defered execution
+ * out to target_restart_delayed_cmds() context.
+ */
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ goto err;
- transport_generic_request_failure(cmd, ret);
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ goto err;
+
+ ret = target_check_reservation(cmd);
+ if (ret) {
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ goto err;
}
}
+
+ ret = cmd->execute_cmd(cmd);
+ if (!ret)
+ return;
+err:
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_generic_request_failure(cmd, ret);
}
static int target_write_prot_action(struct se_cmd *cmd)
@@ -1819,6 +1841,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
+ cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
@@ -1899,7 +1923,7 @@ void target_execute_cmd(struct se_cmd *cmd)
return;
}
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1923,7 +1947,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
break;
@@ -1941,6 +1965,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
+ if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+ goto restart;
+
if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
atomic_dec_mb(&dev->simple_cmds);
dev->dev_cur_ordered_id++;
@@ -1957,7 +1984,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
-
+restart:
target_restart_delayed_cmds(dev);
}
@@ -2557,15 +2584,10 @@ static void target_release_cmd_kref(struct kref *kref)
bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_free_cmd_mem(se_cmd);
- se_cmd->se_tfo->release_cmd(se_cmd);
- return;
- }
spin_lock(&se_cmd->t_state_lock);
- fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+ (se_cmd->transport_state & CMD_T_ABORTED);
spin_unlock(&se_cmd->t_state_lock);
if (se_cmd->cmd_wait_set || fabric_stop) {
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index f5186a744399..6ffbb603d912 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -91,6 +91,7 @@ static void ft_tport_delete(struct ft_tport *tport)
ft_sess_delete_all(tport);
lport = tport->lport;
+ lport->service_params &= ~FCP_SPPF_TARG_FCN;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
@@ -110,6 +111,7 @@ void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
ft_tport_get(lport);
+ lport->service_params |= FCP_SPPF_TARG_FCN;
mutex_unlock(&ft_lport_lock);
}
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 913101980827..798c48d0d32c 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -60,6 +60,7 @@ struct hvc_struct {
struct winsize ws;
struct work_struct tty_resize;
struct list_head next;
+ unsigned long flags;
};
/* implemented by a low level driver */
diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index c9adb0559f61..bc7a96874637 100644
--- a/drivers/tty/hvc/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
@@ -14,6 +14,11 @@ static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
/* if hvc_poll request a repoll, then kick the hvcd thread */
if (hvc_poll(dev_instance))
hvc_kick();
+
+ /*
+ * We're safe to always return IRQ_HANDLED as the hvcd thread will
+ * iterate through each hvc_struct.
+ */
return IRQ_HANDLED;
}
@@ -28,8 +33,8 @@ int notifier_add_irq(struct hvc_struct *hp, int irq)
hp->irq_requested = 0;
return 0;
}
- rc = request_irq(irq, hvc_handle_interrupt, 0,
- "hvc_console", hp);
+ rc = request_irq(irq, hvc_handle_interrupt, hp->flags,
+ "hvc_console", hp);
if (!rc)
hp->irq_requested = 1;
return rc;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 47b54c6aefd2..510799311099 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -214,7 +214,13 @@ static int hvc_opal_probe(struct platform_device *dev)
dev->dev.of_node->full_name,
boot ? " (boot console)" : "");
- irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT));
+ irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (!irq) {
+ pr_info("hvc%d: No interrupts property, using OPAL event\n",
+ termno);
+ irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT));
+ }
+
if (!irq) {
pr_err("hvc_opal: Unable to map interrupt for device %s\n",
dev->dev.of_node->full_name);
@@ -224,6 +230,9 @@ static int hvc_opal_probe(struct platform_device *dev)
hp = hvc_alloc(termno, irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
+
+ /* hvc consoles on powernv may need to share a single irq */
+ hp->flags = IRQF_SHARED;
dev_set_drvdata(&dev->dev, hp);
return 0;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 1519d2ca7705..73137f4aac20 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -54,7 +54,7 @@ struct ar933x_uart_port {
static inline bool ar933x_uart_console_enabled(void)
{
- return config_enabled(CONFIG_SERIAL_AR933X_CONSOLE);
+ return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
}
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
@@ -636,7 +636,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
int ret;
np = pdev->dev.of_node;
- if (config_enabled(CONFIG_OF) && np) {
+ if (IS_ENABLED(CONFIG_OF) && np) {
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 5beafd2d2218..ac1328629baa 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -539,7 +539,7 @@ static int uart_clps711x_remove(struct platform_device *pdev)
}
static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
- { .compatible = "cirrus,clps711x-uart", },
+ { .compatible = "cirrus,ep7209-uart", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c10972fcc8e4..4fd041bec332 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -387,7 +387,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* need to have the registers polled during D3, so avoid D3cold.
*/
if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
- pdev->no_d3cold = true;
+ pci_d3cold_disable(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 96a70789b4c2..4d6a5c672a3d 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -496,12 +496,10 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
void *dmabuf;
int result;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
- memcpy(dmabuf, buf, bufsize);
-
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
req, REQTYPE_HOST_TO_INTERFACE, 0,
port_priv->bInterfaceNumber, dmabuf, bufsize,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ae8c0365abd6..944de657a07a 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -350,6 +350,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
+ int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
@@ -360,22 +361,22 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
+ __func__, status);
goto resubmit;
}
@@ -399,6 +400,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
+ int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
@@ -410,22 +412,22 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
case -EPIPE:
dev_err_console(port, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
default:
dev_err_console(port, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
+ __func__, status);
goto resubmit;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d96d423d00e6..8e07536c233a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
+#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -1198,6 +1199,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e7dbbef2af2a..07b4bf01061d 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1,5 +1,4 @@
-/* vi: ts=8 sw=8
- *
+/*
* TI 3410/5052 USB Serial Driver
*
* Copyright (C) 2004 Texas Instruments
@@ -35,9 +34,238 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include "ti_usb_3410_5052.h"
-
-/* Defines */
+/* Configuration ids */
+#define TI_BOOT_CONFIG 1
+#define TI_ACTIVE_CONFIG 2
+
+/* Vendor and product ids */
+#define TI_VENDOR_ID 0x0451
+#define IBM_VENDOR_ID 0x04b3
+#define TI_3410_PRODUCT_ID 0x3410
+#define IBM_4543_PRODUCT_ID 0x4543
+#define IBM_454B_PRODUCT_ID 0x454b
+#define IBM_454C_PRODUCT_ID 0x454c
+#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
+#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
+#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
+#define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */
+#define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */
+#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */
+
+/* Multi-Tech vendor and product ids */
+#define MTS_VENDOR_ID 0x06E0
+#define MTS_GSM_NO_FW_PRODUCT_ID 0xF108
+#define MTS_CDMA_NO_FW_PRODUCT_ID 0xF109
+#define MTS_CDMA_PRODUCT_ID 0xF110
+#define MTS_GSM_PRODUCT_ID 0xF111
+#define MTS_EDGE_PRODUCT_ID 0xF112
+#define MTS_MT9234MU_PRODUCT_ID 0xF114
+#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
+#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
+
+/* Abbott Diabetics vendor and product ids */
+#define ABBOTT_VENDOR_ID 0x1a61
+#define ABBOTT_STEREO_PLUG_ID 0x3410
+#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID 0x3420
+
+/* Honeywell vendor and product IDs */
+#define HONEYWELL_VENDOR_ID 0x10ac
+#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
+
+/* Moxa UPORT 11x0 vendor and product IDs */
+#define MXU1_VENDOR_ID 0x110a
+#define MXU1_1110_PRODUCT_ID 0x1110
+#define MXU1_1130_PRODUCT_ID 0x1130
+#define MXU1_1150_PRODUCT_ID 0x1150
+#define MXU1_1151_PRODUCT_ID 0x1151
+#define MXU1_1131_PRODUCT_ID 0x1131
+
+/* Commands */
+#define TI_GET_VERSION 0x01
+#define TI_GET_PORT_STATUS 0x02
+#define TI_GET_PORT_DEV_INFO 0x03
+#define TI_GET_CONFIG 0x04
+#define TI_SET_CONFIG 0x05
+#define TI_OPEN_PORT 0x06
+#define TI_CLOSE_PORT 0x07
+#define TI_START_PORT 0x08
+#define TI_STOP_PORT 0x09
+#define TI_TEST_PORT 0x0A
+#define TI_PURGE_PORT 0x0B
+#define TI_RESET_EXT_DEVICE 0x0C
+#define TI_WRITE_DATA 0x80
+#define TI_READ_DATA 0x81
+#define TI_REQ_TYPE_CLASS 0x82
+
+/* Module identifiers */
+#define TI_I2C_PORT 0x01
+#define TI_IEEE1284_PORT 0x02
+#define TI_UART1_PORT 0x03
+#define TI_UART2_PORT 0x04
+#define TI_RAM_PORT 0x05
+
+/* Modem status */
+#define TI_MSR_DELTA_CTS 0x01
+#define TI_MSR_DELTA_DSR 0x02
+#define TI_MSR_DELTA_RI 0x04
+#define TI_MSR_DELTA_CD 0x08
+#define TI_MSR_CTS 0x10
+#define TI_MSR_DSR 0x20
+#define TI_MSR_RI 0x40
+#define TI_MSR_CD 0x80
+#define TI_MSR_DELTA_MASK 0x0F
+#define TI_MSR_MASK 0xF0
+
+/* Line status */
+#define TI_LSR_OVERRUN_ERROR 0x01
+#define TI_LSR_PARITY_ERROR 0x02
+#define TI_LSR_FRAMING_ERROR 0x04
+#define TI_LSR_BREAK 0x08
+#define TI_LSR_ERROR 0x0F
+#define TI_LSR_RX_FULL 0x10
+#define TI_LSR_TX_EMPTY 0x20
+
+/* Line control */
+#define TI_LCR_BREAK 0x40
+
+/* Modem control */
+#define TI_MCR_LOOP 0x04
+#define TI_MCR_DTR 0x10
+#define TI_MCR_RTS 0x20
+
+/* Mask settings */
+#define TI_UART_ENABLE_RTS_IN 0x0001
+#define TI_UART_DISABLE_RTS 0x0002
+#define TI_UART_ENABLE_PARITY_CHECKING 0x0008
+#define TI_UART_ENABLE_DSR_OUT 0x0010
+#define TI_UART_ENABLE_CTS_OUT 0x0020
+#define TI_UART_ENABLE_X_OUT 0x0040
+#define TI_UART_ENABLE_XA_OUT 0x0080
+#define TI_UART_ENABLE_X_IN 0x0100
+#define TI_UART_ENABLE_DTR_IN 0x0800
+#define TI_UART_DISABLE_DTR 0x1000
+#define TI_UART_ENABLE_MS_INTS 0x2000
+#define TI_UART_ENABLE_AUTO_START_DMA 0x4000
+
+/* Parity */
+#define TI_UART_NO_PARITY 0x00
+#define TI_UART_ODD_PARITY 0x01
+#define TI_UART_EVEN_PARITY 0x02
+#define TI_UART_MARK_PARITY 0x03
+#define TI_UART_SPACE_PARITY 0x04
+
+/* Stop bits */
+#define TI_UART_1_STOP_BITS 0x00
+#define TI_UART_1_5_STOP_BITS 0x01
+#define TI_UART_2_STOP_BITS 0x02
+
+/* Bits per character */
+#define TI_UART_5_DATA_BITS 0x00
+#define TI_UART_6_DATA_BITS 0x01
+#define TI_UART_7_DATA_BITS 0x02
+#define TI_UART_8_DATA_BITS 0x03
+
+/* 232/485 modes */
+#define TI_UART_232 0x00
+#define TI_UART_485_RECEIVER_DISABLED 0x01
+#define TI_UART_485_RECEIVER_ENABLED 0x02
+
+/* Pipe transfer mode and timeout */
+#define TI_PIPE_MODE_CONTINUOUS 0x01
+#define TI_PIPE_MODE_MASK 0x03
+#define TI_PIPE_TIMEOUT_MASK 0x7C
+#define TI_PIPE_TIMEOUT_ENABLE 0x80
+
+/* Config struct */
+struct ti_uart_config {
+ __u16 wBaudRate;
+ __u16 wFlags;
+ __u8 bDataBits;
+ __u8 bParity;
+ __u8 bStopBits;
+ char cXon;
+ char cXoff;
+ __u8 bUartMode;
+} __packed;
+
+/* Get port status */
+struct ti_port_status {
+ __u8 bCmdCode;
+ __u8 bModuleId;
+ __u8 bErrorCode;
+ __u8 bMSR;
+ __u8 bLSR;
+} __packed;
+
+/* Purge modes */
+#define TI_PURGE_OUTPUT 0x00
+#define TI_PURGE_INPUT 0x80
+
+/* Read/Write data */
+#define TI_RW_DATA_ADDR_SFR 0x10
+#define TI_RW_DATA_ADDR_IDATA 0x20
+#define TI_RW_DATA_ADDR_XDATA 0x30
+#define TI_RW_DATA_ADDR_CODE 0x40
+#define TI_RW_DATA_ADDR_GPIO 0x50
+#define TI_RW_DATA_ADDR_I2C 0x60
+#define TI_RW_DATA_ADDR_FLASH 0x70
+#define TI_RW_DATA_ADDR_DSP 0x80
+
+#define TI_RW_DATA_UNSPECIFIED 0x00
+#define TI_RW_DATA_BYTE 0x01
+#define TI_RW_DATA_WORD 0x02
+#define TI_RW_DATA_DOUBLE_WORD 0x04
+
+struct ti_write_data_bytes {
+ __u8 bAddrType;
+ __u8 bDataType;
+ __u8 bDataCounter;
+ __be16 wBaseAddrHi;
+ __be16 wBaseAddrLo;
+ __u8 bData[0];
+} __packed;
+
+struct ti_read_data_request {
+ __u8 bAddrType;
+ __u8 bDataType;
+ __u8 bDataCounter;
+ __be16 wBaseAddrHi;
+ __be16 wBaseAddrLo;
+} __packed;
+
+struct ti_read_data_bytes {
+ __u8 bCmdCode;
+ __u8 bModuleId;
+ __u8 bErrorCode;
+ __u8 bData[0];
+} __packed;
+
+/* Interrupt struct */
+struct ti_interrupt {
+ __u8 bICode;
+ __u8 bIInfo;
+} __packed;
+
+/* Interrupt codes */
+#define TI_CODE_HARDWARE_ERROR 0xFF
+#define TI_CODE_DATA_ERROR 0x03
+#define TI_CODE_MODEM_STATUS 0x04
+
+/* Download firmware max packet size */
+#define TI_DOWNLOAD_MAX_PACKET_SIZE 64
+
+/* Firmware image header */
+struct ti_firmware_header {
+ __le16 wLength;
+ __u8 bCheckSum;
+} __packed;
+
+/* UART addresses */
+#define TI_UART1_BASE_ADDR 0xFFA0 /* UART 1 base address */
+#define TI_UART2_BASE_ADDR 0xFFB0 /* UART 2 base address */
+#define TI_UART_OFFSET_LCR 0x0002 /* UART MCR register offset */
+#define TI_UART_OFFSET_MCR 0x0004 /* UART MCR register offset */
#define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>"
#define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver"
@@ -58,9 +286,6 @@
#define TI_EXTRA_VID_PID_COUNT 5
-
-/* Structures */
-
struct ti_port {
int tp_is_open;
__u8 tp_msr;
@@ -84,9 +309,6 @@ struct ti_device {
int td_urb_error;
};
-
-/* Function Declarations */
-
static int ti_startup(struct usb_serial *serial);
static void ti_release(struct usb_serial *serial);
static int ti_port_probe(struct usb_serial_port *port);
@@ -136,13 +358,8 @@ static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev,
static int ti_download_firmware(struct ti_device *tdev);
-
-/* Data */
-
-/* module parameters */
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
-/* supported devices */
static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
@@ -174,7 +391,7 @@ static const struct usb_device_id ti_id_table_5052[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
- { } /* terminator */
+ { }
};
static const struct usb_device_id ti_id_table_combined[] = {
@@ -275,8 +492,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
&ti_1port_device, &ti_2port_device, NULL
};
-/* Module */
-
MODULE_AUTHOR(TI_DRIVER_AUTHOR);
MODULE_DESCRIPTION(TI_DRIVER_DESC);
MODULE_LICENSE("GPL");
@@ -302,8 +517,6 @@ MODULE_DEVICE_TABLE(usb, ti_id_table_combined);
module_usb_serial_driver(serial_drivers, ti_id_table_combined);
-/* Functions */
-
static int ti_startup(struct usb_serial *serial)
{
struct ti_device *tdev;
@@ -319,7 +532,6 @@ static int ti_startup(struct usb_serial *serial)
dev->descriptor.bNumConfigurations,
dev->actconfig->desc.bConfigurationValue);
- /* create device structure */
tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
@@ -435,7 +647,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
struct urb *urb;
int port_number;
int status;
- __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINOUS |
+ __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINUOUS |
TI_PIPE_TIMEOUT_ENABLE |
(TI_TRANSFER_TIMEOUT << 2));
@@ -954,6 +1166,15 @@ static void ti_break(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s - error setting break, %d\n", __func__, status);
}
+static int ti_get_port_from_code(unsigned char code)
+{
+ return (code >> 4) - 3;
+}
+
+static int ti_get_func_from_code(unsigned char code)
+{
+ return code & 0x0f;
+}
static void ti_interrupt_callback(struct urb *urb)
{
@@ -995,8 +1216,8 @@ static void ti_interrupt_callback(struct urb *urb)
goto exit;
}
- port_number = TI_GET_PORT_FROM_CODE(data[0]);
- function = TI_GET_FUNC_FROM_CODE(data[0]);
+ port_number = ti_get_port_from_code(data[0]);
+ function = ti_get_func_from_code(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, data 0x%02X\n",
__func__, port_number, function, data[1]);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
deleted file mode 100644
index bbfd3a184600..000000000000
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/* vi: ts=8 sw=8
- *
- * TI 3410/5052 USB Serial Driver Header
- *
- * Copyright (C) 2004 Texas Instruments
- *
- * This driver is based on the Linux io_ti driver, which is
- * Copyright (C) 2000-2002 Inside Out Networks
- * Copyright (C) 2001-2002 Greg Kroah-Hartman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * For questions or problems with this driver, contact Texas Instruments
- * technical support, or Al Borchers <alborchers@steinerpoint.com>, or
- * Peter Berger <pberger@brimson.com>.
- */
-
-#ifndef _TI_3410_5052_H_
-#define _TI_3410_5052_H_
-
-/* Configuration ids */
-#define TI_BOOT_CONFIG 1
-#define TI_ACTIVE_CONFIG 2
-
-/* Vendor and product ids */
-#define TI_VENDOR_ID 0x0451
-#define IBM_VENDOR_ID 0x04b3
-#define TI_3410_PRODUCT_ID 0x3410
-#define IBM_4543_PRODUCT_ID 0x4543
-#define IBM_454B_PRODUCT_ID 0x454b
-#define IBM_454C_PRODUCT_ID 0x454c
-#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
-#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
-#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
-#define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */
-#define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */
-#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */
-
-/* Multi-Tech vendor and product ids */
-#define MTS_VENDOR_ID 0x06E0
-#define MTS_GSM_NO_FW_PRODUCT_ID 0xF108
-#define MTS_CDMA_NO_FW_PRODUCT_ID 0xF109
-#define MTS_CDMA_PRODUCT_ID 0xF110
-#define MTS_GSM_PRODUCT_ID 0xF111
-#define MTS_EDGE_PRODUCT_ID 0xF112
-#define MTS_MT9234MU_PRODUCT_ID 0xF114
-#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
-#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
-
-/* Abbott Diabetics vendor and product ids */
-#define ABBOTT_VENDOR_ID 0x1a61
-#define ABBOTT_STEREO_PLUG_ID 0x3410
-#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
-#define ABBOTT_STRIP_PORT_ID 0x3420
-
-/* Honeywell vendor and product IDs */
-#define HONEYWELL_VENDOR_ID 0x10ac
-#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
-
-/* Moxa UPORT 11x0 vendor and product IDs */
-#define MXU1_VENDOR_ID 0x110a
-#define MXU1_1110_PRODUCT_ID 0x1110
-#define MXU1_1130_PRODUCT_ID 0x1130
-#define MXU1_1131_PRODUCT_ID 0x1131
-#define MXU1_1150_PRODUCT_ID 0x1150
-#define MXU1_1151_PRODUCT_ID 0x1151
-
-/* Commands */
-#define TI_GET_VERSION 0x01
-#define TI_GET_PORT_STATUS 0x02
-#define TI_GET_PORT_DEV_INFO 0x03
-#define TI_GET_CONFIG 0x04
-#define TI_SET_CONFIG 0x05
-#define TI_OPEN_PORT 0x06
-#define TI_CLOSE_PORT 0x07
-#define TI_START_PORT 0x08
-#define TI_STOP_PORT 0x09
-#define TI_TEST_PORT 0x0A
-#define TI_PURGE_PORT 0x0B
-#define TI_RESET_EXT_DEVICE 0x0C
-#define TI_WRITE_DATA 0x80
-#define TI_READ_DATA 0x81
-#define TI_REQ_TYPE_CLASS 0x82
-
-/* Module identifiers */
-#define TI_I2C_PORT 0x01
-#define TI_IEEE1284_PORT 0x02
-#define TI_UART1_PORT 0x03
-#define TI_UART2_PORT 0x04
-#define TI_RAM_PORT 0x05
-
-/* Modem status */
-#define TI_MSR_DELTA_CTS 0x01
-#define TI_MSR_DELTA_DSR 0x02
-#define TI_MSR_DELTA_RI 0x04
-#define TI_MSR_DELTA_CD 0x08
-#define TI_MSR_CTS 0x10
-#define TI_MSR_DSR 0x20
-#define TI_MSR_RI 0x40
-#define TI_MSR_CD 0x80
-#define TI_MSR_DELTA_MASK 0x0F
-#define TI_MSR_MASK 0xF0
-
-/* Line status */
-#define TI_LSR_OVERRUN_ERROR 0x01
-#define TI_LSR_PARITY_ERROR 0x02
-#define TI_LSR_FRAMING_ERROR 0x04
-#define TI_LSR_BREAK 0x08
-#define TI_LSR_ERROR 0x0F
-#define TI_LSR_RX_FULL 0x10
-#define TI_LSR_TX_EMPTY 0x20
-
-/* Line control */
-#define TI_LCR_BREAK 0x40
-
-/* Modem control */
-#define TI_MCR_LOOP 0x04
-#define TI_MCR_DTR 0x10
-#define TI_MCR_RTS 0x20
-
-/* Mask settings */
-#define TI_UART_ENABLE_RTS_IN 0x0001
-#define TI_UART_DISABLE_RTS 0x0002
-#define TI_UART_ENABLE_PARITY_CHECKING 0x0008
-#define TI_UART_ENABLE_DSR_OUT 0x0010
-#define TI_UART_ENABLE_CTS_OUT 0x0020
-#define TI_UART_ENABLE_X_OUT 0x0040
-#define TI_UART_ENABLE_XA_OUT 0x0080
-#define TI_UART_ENABLE_X_IN 0x0100
-#define TI_UART_ENABLE_DTR_IN 0x0800
-#define TI_UART_DISABLE_DTR 0x1000
-#define TI_UART_ENABLE_MS_INTS 0x2000
-#define TI_UART_ENABLE_AUTO_START_DMA 0x4000
-
-/* Parity */
-#define TI_UART_NO_PARITY 0x00
-#define TI_UART_ODD_PARITY 0x01
-#define TI_UART_EVEN_PARITY 0x02
-#define TI_UART_MARK_PARITY 0x03
-#define TI_UART_SPACE_PARITY 0x04
-
-/* Stop bits */
-#define TI_UART_1_STOP_BITS 0x00
-#define TI_UART_1_5_STOP_BITS 0x01
-#define TI_UART_2_STOP_BITS 0x02
-
-/* Bits per character */
-#define TI_UART_5_DATA_BITS 0x00
-#define TI_UART_6_DATA_BITS 0x01
-#define TI_UART_7_DATA_BITS 0x02
-#define TI_UART_8_DATA_BITS 0x03
-
-/* 232/485 modes */
-#define TI_UART_232 0x00
-#define TI_UART_485_RECEIVER_DISABLED 0x01
-#define TI_UART_485_RECEIVER_ENABLED 0x02
-
-/* Pipe transfer mode and timeout */
-#define TI_PIPE_MODE_CONTINOUS 0x01
-#define TI_PIPE_MODE_MASK 0x03
-#define TI_PIPE_TIMEOUT_MASK 0x7C
-#define TI_PIPE_TIMEOUT_ENABLE 0x80
-
-/* Config struct */
-struct ti_uart_config {
- __u16 wBaudRate;
- __u16 wFlags;
- __u8 bDataBits;
- __u8 bParity;
- __u8 bStopBits;
- char cXon;
- char cXoff;
- __u8 bUartMode;
-} __attribute__((packed));
-
-/* Get port status */
-struct ti_port_status {
- __u8 bCmdCode;
- __u8 bModuleId;
- __u8 bErrorCode;
- __u8 bMSR;
- __u8 bLSR;
-} __attribute__((packed));
-
-/* Purge modes */
-#define TI_PURGE_OUTPUT 0x00
-#define TI_PURGE_INPUT 0x80
-
-/* Read/Write data */
-#define TI_RW_DATA_ADDR_SFR 0x10
-#define TI_RW_DATA_ADDR_IDATA 0x20
-#define TI_RW_DATA_ADDR_XDATA 0x30
-#define TI_RW_DATA_ADDR_CODE 0x40
-#define TI_RW_DATA_ADDR_GPIO 0x50
-#define TI_RW_DATA_ADDR_I2C 0x60
-#define TI_RW_DATA_ADDR_FLASH 0x70
-#define TI_RW_DATA_ADDR_DSP 0x80
-
-#define TI_RW_DATA_UNSPECIFIED 0x00
-#define TI_RW_DATA_BYTE 0x01
-#define TI_RW_DATA_WORD 0x02
-#define TI_RW_DATA_DOUBLE_WORD 0x04
-
-struct ti_write_data_bytes {
- __u8 bAddrType;
- __u8 bDataType;
- __u8 bDataCounter;
- __be16 wBaseAddrHi;
- __be16 wBaseAddrLo;
- __u8 bData[0];
-} __attribute__((packed));
-
-struct ti_read_data_request {
- __u8 bAddrType;
- __u8 bDataType;
- __u8 bDataCounter;
- __be16 wBaseAddrHi;
- __be16 wBaseAddrLo;
-} __attribute__((packed));
-
-struct ti_read_data_bytes {
- __u8 bCmdCode;
- __u8 bModuleId;
- __u8 bErrorCode;
- __u8 bData[0];
-} __attribute__((packed));
-
-/* Interrupt struct */
-struct ti_interrupt {
- __u8 bICode;
- __u8 bIInfo;
-} __attribute__((packed));
-
-/* Interrupt codes */
-#define TI_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3)
-#define TI_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
-#define TI_CODE_HARDWARE_ERROR 0xFF
-#define TI_CODE_DATA_ERROR 0x03
-#define TI_CODE_MODEM_STATUS 0x04
-
-/* Download firmware max packet size */
-#define TI_DOWNLOAD_MAX_PACKET_SIZE 64
-
-/* Firmware image header */
-struct ti_firmware_header {
- __le16 wLength;
- __u8 bCheckSum;
-} __attribute__((packed));
-
-/* UART addresses */
-#define TI_UART1_BASE_ADDR 0xFFA0 /* UART 1 base address */
-#define TI_UART2_BASE_ADDR 0xFFB0 /* UART 2 base address */
-#define TI_UART_OFFSET_LCR 0x0002 /* UART MCR register offset */
-#define TI_UART_OFFSET_MCR 0x0004 /* UART MCR register offset */
-
-#endif /* _TI_3410_5052_H_ */
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 533eaf04f12f..40764ecad9ce 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -2,7 +2,6 @@ config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST
- select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
@@ -15,17 +14,24 @@ config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m
select VHOST
- select VHOST_RING
default n
---help---
Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests
-config VHOST_RING
- tristate
+config VHOST_VSOCK
+ tristate "vhost virtio-vsock driver"
+ depends on VSOCKETS && EVENTFD
+ select VIRTIO_VSOCKETS_COMMON
+ select VHOST
+ default n
---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
+ This kernel module can be loaded in the host kernel to provide AF_VSOCK
+ sockets for communicating with guests. The guests must have the
+ virtio_transport.ko driver loaded to use the virtio-vsock device.
+
+ To compile this driver as a module, choose M here: the module will be called
+ vhost_vsock.
config VHOST
tristate
diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh
new file mode 100644
index 000000000000..6a4490c09d7f
--- /dev/null
+++ b/drivers/vhost/Kconfig.vringh
@@ -0,0 +1,5 @@
+config VHOST_RING
+ tristate
+ ---help---
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index e0441c34db1c..6b012b986b57 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -4,5 +4,9 @@ vhost_net-y := net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
+obj-$(CONFIG_VHOST_VSOCK) += vhost_vsock.o
+vhost_vsock-y := vsock.o
+
obj-$(CONFIG_VHOST_RING) += vringh.o
+
obj-$(CONFIG_VHOST) += vhost.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e032ca397371..5dc128a8da83 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF)
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM)
};
enum {
@@ -334,7 +335,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
{
unsigned long uninitialized_var(endtime);
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
- out_num, in_num, NULL, NULL);
+ out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
preempt_disable();
@@ -344,7 +345,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
cpu_relax_lowlatency();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
- out_num, in_num, NULL, NULL);
+ out_num, in_num, NULL, NULL);
}
return r;
@@ -377,6 +378,9 @@ static void handle_tx(struct vhost_net *net)
if (!sock)
goto out;
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
hdr_size = nvq->vhost_hlen;
@@ -652,6 +656,10 @@ static void handle_rx(struct vhost_net *net)
sock = vq->private_data;
if (!sock)
goto out;
+
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
vhost_net_disable_vq(net, vq);
@@ -1052,20 +1060,20 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
@@ -1096,10 +1104,14 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&n->dev)) {
- mutex_unlock(&n->dev.mutex);
- return -EFAULT;
+ !vhost_log_access_ok(&n->dev))
+ goto out_unlock;
+
+ if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+ if (vhost_init_device_iotlb(&n->dev, true))
+ goto out_unlock;
}
+
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_features = features;
@@ -1109,6 +1121,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_unlock(&n->dev.mutex);
return 0;
+
+out_unlock:
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
}
static long vhost_net_set_owner(struct vhost_net *n)
@@ -1182,9 +1198,40 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
}
#endif
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
+ .read_iter = vhost_net_chr_read_iter,
+ .write_iter = vhost_net_chr_write_iter,
+ .poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vhost_net_compat_ioctl,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 669fef1e2bb6..c6f2d89c0e97 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -27,6 +27,7 @@
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/interval_tree_generic.h>
#include "vhost.h"
@@ -34,6 +35,10 @@ static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
"Maximum number of memory regions in memory map. (default: 64)");
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. (default: 2048)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
@@ -42,6 +47,10 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+INTERVAL_TREE_DEFINE(struct vhost_umem_node,
+ rb, __u64, __subtree_last,
+ START, LAST, , vhost_umem_interval_tree);
+
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
@@ -131,6 +140,19 @@ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
vq->is_le = virtio_legacy_is_little_endian();
}
+struct vhost_flush_struct {
+ struct vhost_work work;
+ struct completion wait_event;
+};
+
+static void vhost_flush_work(struct vhost_work *work)
+{
+ struct vhost_flush_struct *s;
+
+ s = container_of(work, struct vhost_flush_struct, work);
+ complete(&s->wait_event);
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -155,11 +177,9 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- INIT_LIST_HEAD(&work->node);
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -211,31 +231,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
-{
- int left;
-
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
-}
-
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
- spin_lock_irq(&dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, vhost_work_seq_done(dev, work, seq));
- spin_lock_irq(&dev->work_lock);
- flushing = --work->flushing;
- spin_unlock_irq(&dev->work_lock);
- BUG_ON(flushing < 0);
+ if (dev->worker) {
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_work_queue(dev, &flush.work);
+ wait_for_completion(&flush.wait_event);
+ }
}
EXPORT_SYMBOL_GPL(vhost_work_flush);
@@ -249,16 +255,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned long flags;
+ if (!dev->worker)
+ return;
- spin_lock_irqsave(&dev->work_lock, flags);
- if (list_empty(&work->node)) {
- list_add_tail(&work->node, &dev->work_list);
- work->queue_seq++;
- spin_unlock_irqrestore(&dev->work_lock, flags);
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ */
+ smp_mb();
+ llist_add(&work->node, &dev->work_list);
wake_up_process(dev->worker);
- } else {
- spin_unlock_irqrestore(&dev->work_lock, flags);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -266,7 +272,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
- return !list_empty(&dev->work_list);
+ return !llist_empty(&dev->work_list);
}
EXPORT_SYMBOL_GPL(vhost_has_work);
@@ -300,17 +306,18 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
- vq->memory = NULL;
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
}
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -320,35 +327,25 @@ static int vhost_worker(void *data)
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
-
if (kthread_should_stop()) {
- spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
- if (!list_empty(&dev->work_list)) {
- work = list_first_entry(&dev->work_list,
- struct vhost_work, node);
- list_del_init(&work->node);
- seq = work->queue_seq;
- } else
- work = NULL;
- spin_unlock_irq(&dev->work_lock);
- if (work) {
+ node = llist_del_all(&dev->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
- } else
- schedule();
-
+ }
}
unuse_mm(dev->mm);
set_fs(oldfs);
@@ -407,11 +404,16 @@ void vhost_dev_init(struct vhost_dev *dev,
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
dev->log_file = NULL;
- dev->memory = NULL;
+ dev->umem = NULL;
+ dev->iotlb = NULL;
dev->mm = NULL;
- spin_lock_init(&dev->work_lock);
- INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
+ init_llist_head(&dev->work_list);
+ init_waitqueue_head(&dev->wait);
+ INIT_LIST_HEAD(&dev->read_list);
+ INIT_LIST_HEAD(&dev->pending_list);
+ spin_lock_init(&dev->iotlb_lock);
+
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
@@ -512,27 +514,36 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void)
+static void *vhost_kvzalloc(unsigned long size)
{
- return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n)
+ n = vzalloc(size);
+ return n;
+}
+
+struct vhost_umem *vhost_dev_reset_owner_prepare(void)
+{
+ return vhost_kvzalloc(sizeof(struct vhost_umem));
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
{
int i;
vhost_dev_cleanup(dev, true);
/* Restore memory to default empty mapping. */
- memory->nregions = 0;
- dev->memory = memory;
+ INIT_LIST_HEAD(&umem->umem_list);
+ dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
*/
for (i = 0; i < dev->nvqs; ++i)
- dev->vqs[i]->memory = memory;
+ dev->vqs[i]->umem = umem;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
@@ -549,6 +560,47 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
+static void vhost_umem_free(struct vhost_umem *umem,
+ struct vhost_umem_node *node)
+{
+ vhost_umem_interval_tree_remove(node, &umem->umem_tree);
+ list_del(&node->link);
+ kfree(node);
+ umem->numem--;
+}
+
+static void vhost_umem_clean(struct vhost_umem *umem)
+{
+ struct vhost_umem_node *node, *tmp;
+
+ if (!umem)
+ return;
+
+ list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
+ vhost_umem_free(umem, node);
+
+ kvfree(umem);
+}
+
+static void vhost_clear_msg(struct vhost_dev *dev)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&dev->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &dev->read_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ list_for_each_entry_safe(node, n, &dev->pending_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ spin_unlock(&dev->iotlb_lock);
+}
+
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
@@ -575,9 +627,13 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kvfree(dev->memory);
- dev->memory = NULL;
- WARN_ON(!list_empty(&dev->work_list));
+ vhost_umem_clean(dev->umem);
+ dev->umem = NULL;
+ vhost_umem_clean(dev->iotlb);
+ dev->iotlb = NULL;
+ vhost_clear_msg(dev);
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+ WARN_ON(!llist_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
@@ -601,26 +657,34 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+static bool vhost_overflow(u64 uaddr, u64 size)
+{
+ /* Make sure 64 bit math will not overflow. */
+ return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+}
+
/* Caller should have vq mutex and device mutex. */
-static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
+static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
int log_all)
{
- int i;
+ struct vhost_umem_node *node;
- if (!mem)
+ if (!umem)
return 0;
- for (i = 0; i < mem->nregions; ++i) {
- struct vhost_memory_region *m = mem->regions + i;
- unsigned long a = m->userspace_addr;
- if (m->memory_size > ULONG_MAX)
+ list_for_each_entry(node, &umem->umem_list, link) {
+ unsigned long a = node->userspace_addr;
+
+ if (vhost_overflow(node->userspace_addr, node->size))
return 0;
- else if (!access_ok(VERIFY_WRITE, (void __user *)a,
- m->memory_size))
+
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)a,
+ node->size))
return 0;
else if (log_all && !log_access_ok(log_base,
- m->guest_phys_addr,
- m->memory_size))
+ node->start,
+ node->size))
return 0;
}
return 1;
@@ -628,7 +692,7 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
+static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
int log_all)
{
int i;
@@ -641,7 +705,8 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
- ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
+ ok = vq_memory_access_ok(d->vqs[i]->log_base,
+ umem, log);
else
ok = 1;
mutex_unlock(&d->vqs[i]->mutex);
@@ -651,12 +716,385 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access);
+
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+ const void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_to_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that all vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter t;
+ ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_WO);
+ if (ret < 0)
+ goto out;
+ iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
+ ret = copy_to_iter(from, size, &t);
+ if (ret == size)
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_from_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter f;
+ ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", from,
+ (unsigned long long) size);
+ goto out;
+ }
+ iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
+ ret = copy_from_iter(to, size, &f);
+ if (ret == size)
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
+ void *addr, unsigned size)
+{
+ int ret;
+
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
+ vq_err(vq, "Non atomic userspace memory access: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ return vq->iotlb_iov[0].iov_base;
+}
+
+#define vhost_put_user(vq, x, ptr) \
+({ \
+ int ret = -EFAULT; \
+ if (!vq->iotlb) { \
+ ret = __put_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) to = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (to != NULL) \
+ ret = __put_user(x, to); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+#define vhost_get_user(vq, x, ptr) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __get_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) from = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (from != NULL) \
+ ret = __get_user(x, from); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock(&d->vqs[i]->mutex);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
+static int vhost_new_umem_range(struct vhost_umem *umem,
+ u64 start, u64 size, u64 end,
+ u64 userspace_addr, int perm)
+{
+ struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+
+ if (!node)
+ return -ENOMEM;
+
+ if (umem->numem == max_iotlb_entries) {
+ tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
+ vhost_umem_free(umem, tmp);
+ }
+
+ node->start = start;
+ node->size = size;
+ node->last = end;
+ node->userspace_addr = userspace_addr;
+ node->perm = perm;
+ INIT_LIST_HEAD(&node->link);
+ list_add_tail(&node->link, &umem->umem_list);
+ vhost_umem_interval_tree_insert(node, &umem->umem_tree);
+ umem->numem++;
+
+ return 0;
+}
+
+static void vhost_del_umem_range(struct vhost_umem *umem,
+ u64 start, u64 end)
+{
+ struct vhost_umem_node *node;
+
+ while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ start, end)))
+ vhost_umem_free(umem, node);
+}
+
+static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&d->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 > vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static int umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ unsigned long a = uaddr;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (vhost_overflow(uaddr, size))
+ return -EFAULT;
+
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok(VERIFY_READ, (void __user *)a, size))
+ return -EFAULT;
+ if ((access & VHOST_ACCESS_WO) &&
+ !access_ok(VERIFY_WRITE, (void __user *)a, size))
+ return -EFAULT;
+ return 0;
+}
+
+int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
+{
+ int ret = 0;
+
+ vhost_dev_lock_vqs(dev);
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
+ ret = -ENOMEM;
+ break;
+ }
+ vhost_iotlb_notify_vq(dev, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_del_umem_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ vhost_dev_unlock_vqs(dev);
+ return ret;
+}
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from)
+{
+ struct vhost_msg_node node;
+ unsigned size = sizeof(struct vhost_msg);
+ size_t ret;
+ int err;
+
+ if (iov_iter_count(from) < size)
+ return 0;
+ ret = copy_from_iter(&node.msg, size, from);
+ if (ret != size)
+ goto done;
+
+ switch (node.msg.type) {
+ case VHOST_IOTLB_MSG:
+ err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
+ if (err)
+ ret = err;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(vhost_chr_write_iter);
+
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &dev->wait, wait);
+
+ if (!list_empty(&dev->read_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+EXPORT_SYMBOL(vhost_chr_poll);
+
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct vhost_msg_node *node;
+ ssize_t ret = 0;
+ unsigned size = sizeof(struct vhost_msg);
+
+ if (iov_iter_count(to) < size)
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(&dev->wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+ node = vhost_dequeue_msg(dev, &dev->read_list);
+ if (node)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ if (!dev->iotlb) {
+ ret = -EBADFD;
+ break;
+ }
+
+ schedule();
+ }
+
+ if (!noblock)
+ finish_wait(&dev->wait, &wait);
+
+ if (node) {
+ ret = copy_to_iter(&node->msg, size, to);
+
+ if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ kfree(node);
+ return ret;
+ }
+
+ vhost_enqueue_msg(dev, &dev->pending_list, node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
+
+static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
+{
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_msg_node *node;
+ struct vhost_iotlb_msg *msg;
+
+ node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
+ if (!node)
+ return -ENOMEM;
+
+ msg = &node->msg.iotlb;
+ msg->type = VHOST_IOTLB_MISS;
+ msg->iova = iova;
+ msg->perm = access;
+
+ vhost_enqueue_msg(dev, &dev->read_list, node);
+
+ return 0;
+}
+
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
+
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
@@ -664,11 +1102,59 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
sizeof *used + num * sizeof *used->ring + s);
}
+static int iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len)
+{
+ const struct vhost_umem_node *node;
+ struct vhost_umem *umem = vq->iotlb;
+ u64 s = 0, size;
+
+ while (len > s) {
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr,
+ addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ vhost_iotlb_miss(vq, addr, access);
+ return false;
+ } else if (!(node->perm & access)) {
+ /* Report the possible access violation by
+ * request another translation from userspace.
+ */
+ return false;
+ }
+
+ size = node->size - addr + node->start;
+ s += size;
+ addr += size;
+ }
+
+ return true;
+}
+
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
+{
+ size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ unsigned int num = vq->num;
+
+ if (!vq->iotlb)
+ return 1;
+
+ return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+ num * sizeof *vq->desc) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
+ sizeof *vq->avail +
+ num * sizeof *vq->avail->ring + s) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
+ sizeof *vq->used +
+ num * sizeof *vq->used->ring + s);
+}
+EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
+
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- return memory_access_ok(dev, dev->memory, 1);
+ return memory_access_ok(dev, dev->umem, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
@@ -679,7 +1165,7 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
- return vq_memory_access_ok(log_base, vq->memory,
+ return vq_memory_access_ok(log_base, vq->umem,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
@@ -690,33 +1176,36 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
+ if (vq->iotlb) {
+ /* When device IOTLB was used, the access validation
+ * will be validated during prefetching.
+ */
+ return 1;
+ }
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
vq_log_access_ok(vq, vq->log_base);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
-static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+static struct vhost_umem *vhost_umem_alloc(void)
{
- const struct vhost_memory_region *r1 = p1, *r2 = p2;
- if (r1->guest_phys_addr < r2->guest_phys_addr)
- return 1;
- if (r1->guest_phys_addr > r2->guest_phys_addr)
- return -1;
- return 0;
-}
+ struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
-static void *vhost_kvzalloc(unsigned long size)
-{
- void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!umem)
+ return NULL;
- if (!n)
- n = vzalloc(size);
- return n;
+ umem->umem_tree = RB_ROOT;
+ umem->numem = 0;
+ INIT_LIST_HEAD(&umem->umem_list);
+
+ return umem;
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
- struct vhost_memory mem, *newmem, *oldmem;
+ struct vhost_memory mem, *newmem;
+ struct vhost_memory_region *region;
+ struct vhost_umem *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
@@ -736,24 +1225,47 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
kvfree(newmem);
return -EFAULT;
}
- sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
- vhost_memory_reg_sort_cmp, NULL);
- if (!memory_access_ok(d, newmem, 0)) {
+ newumem = vhost_umem_alloc();
+ if (!newumem) {
kvfree(newmem);
- return -EFAULT;
+ return -ENOMEM;
}
- oldmem = d->memory;
- d->memory = newmem;
+
+ for (region = newmem->regions;
+ region < newmem->regions + mem.nregions;
+ region++) {
+ if (vhost_new_umem_range(newumem,
+ region->guest_phys_addr,
+ region->memory_size,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_ACCESS_RW))
+ goto err;
+ }
+
+ if (!memory_access_ok(d, newumem, 0))
+ goto err;
+
+ oldumem = d->umem;
+ d->umem = newumem;
/* All memory accesses are done under some VQ mutex. */
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
- d->vqs[i]->memory = newmem;
+ d->vqs[i]->umem = newumem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kvfree(oldmem);
+
+ kvfree(newmem);
+ vhost_umem_clean(oldumem);
return 0;
+
+err:
+ vhost_umem_clean(newumem);
+ kvfree(newmem);
+ return -EFAULT;
}
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
@@ -974,6 +1486,30 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
+{
+ struct vhost_umem *niotlb, *oiotlb;
+ int i;
+
+ niotlb = vhost_umem_alloc();
+ if (!niotlb)
+ return -ENOMEM;
+
+ oiotlb = d->iotlb;
+ d->iotlb = niotlb;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->iotlb = niotlb;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+
+ vhost_umem_clean(oiotlb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
+
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
@@ -1056,28 +1592,6 @@ done:
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
-static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
- __u64 addr, __u32 len)
-{
- const struct vhost_memory_region *reg;
- int start = 0, end = mem->nregions;
-
- while (start < end) {
- int slot = start + (end - start) / 2;
- reg = mem->regions + slot;
- if (addr >= reg->guest_phys_addr)
- end = slot;
- else
- start = slot + 1;
- }
-
- reg = mem->regions + start;
- if (addr >= reg->guest_phys_addr &&
- reg->guest_phys_addr + reg->memory_size > addr)
- return reg;
- return NULL;
-}
-
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
* returning -EFAULT). See Documentation/x86/exception-tables.txt.
@@ -1156,7 +1670,8 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
+ &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
@@ -1174,7 +1689,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
- if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1208,15 +1724,20 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
if (r)
goto err;
vq->signalled_used_valid = false;
- if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+ if (!vq->iotlb &&
+ !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
r = -EFAULT;
goto err;
}
- r = __get_user(last_used_idx, &vq->used->idx);
- if (r)
+ r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
+ if (r) {
+ vq_err(vq, "Can't access used idx at %p\n",
+ &vq->used->idx);
goto err;
+ }
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
+
err:
vq->is_le = is_le;
return r;
@@ -1224,36 +1745,48 @@ err:
EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+ struct iovec iov[], int iov_size, int access)
{
- const struct vhost_memory_region *reg;
- struct vhost_memory *mem;
+ const struct vhost_umem_node *node;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
- mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
- reg = find_region(mem, addr, len);
- if (unlikely(!reg)) {
- ret = -EFAULT;
+
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr, addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ } else if (!(node->perm & access)) {
+ ret = -EPERM;
break;
}
+
_iov = iov + ret;
- size = reg->memory_size - addr + reg->guest_phys_addr;
+ size = node->size - addr + node->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (reg->userspace_addr + addr - reg->guest_phys_addr);
+ (node->userspace_addr + addr - node->start);
s += size;
addr += size;
++ret;
}
+ if (ret == -EAGAIN)
+ vhost_iotlb_miss(vq, addr, access);
return ret;
}
@@ -1288,7 +1821,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
struct iov_iter from;
- int ret;
+ int ret, access;
/* Sanity check */
if (unlikely(len % sizeof desc)) {
@@ -1300,9 +1833,10 @@ static int get_indirect(struct vhost_virtqueue *vq,
}
ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
- UIO_MAXIOV);
+ UIO_MAXIOV, VHOST_ACCESS_RO);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
iov_iter_init(&from, READ, vq->indirect, ret, len);
@@ -1340,16 +1874,22 @@ static int get_indirect(struct vhost_virtqueue *vq,
return -EINVAL;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
*in_num += ret;
if (unlikely(log)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
@@ -1388,11 +1928,11 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
u16 last_avail_idx;
__virtio16 avail_idx;
__virtio16 ring_head;
- int ret;
+ int ret, access;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
+ if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
@@ -1414,8 +1954,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (unlikely(__get_user(ring_head,
- &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
+ if (unlikely(vhost_get_user(vq, ring_head,
+ &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
@@ -1450,7 +1990,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
i, vq->num, head);
return -EINVAL;
}
- ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
+ ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
+ sizeof desc);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
@@ -1461,22 +2002,28 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
return ret;
}
continue;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
return ret;
}
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
@@ -1538,15 +2085,15 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
if (count == 1) {
- if (__put_user(heads[0].id, &used->id)) {
+ if (vhost_put_user(vq, heads[0].id, &used->id)) {
vq_err(vq, "Failed to write used id");
return -EFAULT;
}
- if (__put_user(heads[0].len, &used->len)) {
+ if (vhost_put_user(vq, heads[0].len, &used->len)) {
vq_err(vq, "Failed to write used len");
return -EFAULT;
}
- } else if (__copy_to_user(used, heads, count * sizeof *used)) {
+ } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
@@ -1590,7 +2137,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
+ &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
@@ -1622,7 +2170,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
- if (__get_user(flags, &vq->avail->flags)) {
+ if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
@@ -1636,7 +2184,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (__get_user(event, vhost_used_event(vq))) {
+ if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
@@ -1678,7 +2226,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__virtio16 avail_idx;
int r;
- r = __get_user(avail_idx, &vq->avail->idx);
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
if (r)
return false;
@@ -1713,7 +2261,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = __get_user(avail_idx, &vq->avail->idx);
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
&vq->avail->idx, r);
@@ -1741,6 +2289,47 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
+/* Create a new message. */
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+{
+ struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ if (!node)
+ return NULL;
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_new_msg);
+
+void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
+ struct vhost_msg_node *node)
+{
+ spin_lock(&dev->iotlb_lock);
+ list_add_tail(&node->node, head);
+ spin_unlock(&dev->iotlb_lock);
+
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
+
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head)
+{
+ struct vhost_msg_node *node = NULL;
+
+ spin_lock(&dev->iotlb_lock);
+ if (!list_empty(head)) {
+ node = list_first_entry(head, struct vhost_msg_node,
+ node);
+ list_del(&node->node);
+ }
+ spin_unlock(&dev->iotlb_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+
+
static int __init vhost_init(void)
{
return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d36d8beb3351..78f3c5fc02e4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,13 +15,15 @@
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct list_head node;
+ struct llist_node node;
vhost_work_fn_t fn;
wait_queue_head_t done;
int flushing;
unsigned queue_seq;
unsigned done_seq;
+ unsigned long flags;
};
/* Poll a file (eventfd or socket) */
@@ -53,6 +55,27 @@ struct vhost_log {
u64 len;
};
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+struct vhost_umem_node {
+ struct rb_node rb;
+ struct list_head link;
+ __u64 start;
+ __u64 last;
+ __u64 size;
+ __u64 userspace_addr;
+ __u32 perm;
+ __u32 flags_padding;
+ __u64 __subtree_last;
+};
+
+struct vhost_umem {
+ struct rb_root umem_tree;
+ struct list_head umem_list;
+ int numem;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -98,10 +121,12 @@ struct vhost_virtqueue {
u64 log_addr;
struct iovec iov[UIO_MAXIOV];
+ struct iovec iotlb_iov[64];
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
void *private_data;
u64 acked_features;
/* Log write descriptors */
@@ -118,25 +143,35 @@ struct vhost_virtqueue {
u32 busyloop_timeout;
};
+struct vhost_msg_node {
+ struct vhost_msg msg;
+ struct vhost_virtqueue *vq;
+ struct list_head node;
+};
+
struct vhost_dev {
- struct vhost_memory *memory;
struct mm_struct *mm;
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
struct file *log_file;
struct eventfd_ctx *log_ctx;
- spinlock_t work_lock;
- struct list_head work_list;
+ struct llist_head work_list;
struct task_struct *worker;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
+ spinlock_t iotlb_lock;
+ struct list_head read_list;
+ struct list_head pending_list;
+ wait_queue_head_t wait;
};
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
+struct vhost_umem *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -165,6 +200,21 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
+
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+void vhost_enqueue_msg(struct vhost_dev *dev,
+ struct list_head *head,
+ struct vhost_msg_node *node);
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head);
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait);
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock);
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
new file mode 100644
index 000000000000..0ddf3a2dbfc4
--- /dev/null
+++ b/drivers/vhost/vsock.c
@@ -0,0 +1,719 @@
+/*
+ * vhost transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <net/sock.h>
+#include <linux/virtio_vsock.h>
+#include <linux/vhost.h>
+
+#include <net/af_vsock.h>
+#include "vhost.h"
+
+#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+
+enum {
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+};
+
+/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_SPINLOCK(vhost_vsock_lock);
+static LIST_HEAD(vhost_vsock_list);
+
+struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
+ struct list_head list;
+
+ struct vhost_work send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+ u32 guest_cid;
+};
+
+static u32 vhost_transport_get_local_cid(void)
+{
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+}
+
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_for_each_entry(vsock, &vhost_vsock_list, list) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return vsock;
+ }
+ }
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return NULL;
+}
+
+static void
+vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ bool added = false;
+ bool restart_tx = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
+ vhost_disable_notify(&vsock->dev, vq);
+
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ unsigned out, in;
+ size_t nbytes;
+ size_t len;
+ int head;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ break;
+ }
+
+ if (head == vq->num) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ if (out) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ len = iov_length(&vq->iov[out], in);
+ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
+
+ nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+ nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ added = true;
+
+ if (pkt->reply) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx processing? */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ }
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
+}
+
+static void vhost_transport_send_pkt_work(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vsock *vsock;
+
+ vsock = container_of(work, struct vhost_vsock, send_pkt_work);
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int
+vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct vhost_vsock *vsock;
+ struct vhost_virtqueue *vq;
+ int len = pkt->len;
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ if (!vsock) {
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add_tail(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ return len;
+}
+
+static struct virtio_vsock_pkt *
+vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ size_t nbytes;
+ size_t len;
+
+ if (in != 0) {
+ vq_err(vq, "Expected 0 input buffers, got %u\n", in);
+ return NULL;
+ }
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return NULL;
+
+ len = iov_length(vq->iov, out);
+ iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
+
+ nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+ sizeof(pkt->hdr), nbytes);
+ kfree(pkt);
+ return NULL;
+ }
+
+ if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
+ pkt->len = le32_to_cpu(pkt->hdr.len);
+
+ /* No payload */
+ if (!pkt->len)
+ return pkt;
+
+ /* The pkt is too big */
+ if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
+ pkt->len, nbytes);
+ virtio_transport_free_pkt(pkt);
+ return NULL;
+ }
+
+ return pkt;
+}
+
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < vq->num;
+}
+
+static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+ struct virtio_vsock_pkt *pkt;
+ int head;
+ unsigned int out, in;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ vhost_disable_notify(&vsock->dev, vq);
+ for (;;) {
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0)
+ break;
+
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ pkt = vhost_vsock_alloc_pkt(vq, out, in);
+ if (!pkt) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+ /* Only accept correctly addressed packets */
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ virtio_transport_recv_pkt(pkt);
+ else
+ virtio_transport_free_pkt(pkt);
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ added = true;
+ }
+
+no_more_replies:
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ mutex_unlock(&vq->mutex);
+ goto err_vq;
+ }
+
+ if (!vq->private_data) {
+ vq->private_data = vsock;
+ vhost_vq_init_access(vq);
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
+static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+{
+ struct vhost_virtqueue **vqs;
+ struct vhost_vsock *vsock;
+ int ret;
+
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!vsock) {
+ vsock = vmalloc(sizeof(*vsock));
+ if (!vsock)
+ return -ENOMEM;
+ }
+
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+
+ file->private_data = vsock;
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_add_tail(&vsock->list, &vhost_vsock_list);
+ spin_unlock_bh(&vhost_vsock_lock);
+ return 0;
+
+out:
+ vhost_vsock_free(vsock);
+ return ret;
+}
+
+static void vhost_vsock_flush(struct vhost_vsock *vsock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
+ if (vsock->vqs[i].handle_kick)
+ vhost_poll_flush(&vsock->vqs[i].poll);
+ vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
+}
+
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+ }
+}
+
+static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+{
+ struct vhost_vsock *vsock = file->private_data;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_del(&vsock->list);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+
+ vhost_vsock_stop(vsock);
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_dev_cleanup(&vsock->dev, false);
+ kfree(vsock->dev.vqs);
+ vhost_vsock_free(vsock);
+ return 0;
+}
+
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+{
+ struct vhost_vsock *other;
+
+ /* Refuse reserved CIDs */
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
+ return -EINVAL;
+
+ /* Refuse if CID is already in use */
+ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock)
+ return -EADDRINUSE;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ vsock->guest_cid = guest_cid;
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return 0;
+}
+
+static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_VSOCK_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vsock->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vsock->dev)) {
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+}
+
+static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vsock *vsock = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 guest_cid;
+ u64 features;
+ int start;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_VSOCK_SET_GUEST_CID:
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
+ return -EFAULT;
+ return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock);
+ case VHOST_GET_FEATURES:
+ features = VHOST_VSOCK_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ return vhost_vsock_set_features(vsock, features);
+ default:
+ mutex_lock(&vsock->dev.mutex);
+ r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
+ else
+ vhost_vsock_flush(vsock);
+ mutex_unlock(&vsock->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_vsock_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vsock_dev_open,
+ .release = vhost_vsock_dev_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = vhost_vsock_dev_ioctl,
+};
+
+static struct miscdevice vhost_vsock_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vhost-vsock",
+ .fops = &vhost_vsock_fops,
+};
+
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
+static int __init vhost_vsock_init(void)
+{
+ int ret;
+
+ ret = vsock_core_init(&vhost_transport.transport);
+ if (ret < 0)
+ return ret;
+ return misc_register(&vhost_vsock_misc);
+};
+
+static void __exit vhost_vsock_exit(void)
+{
+ misc_deregister(&vhost_vsock_misc);
+ vsock_core_exit();
+};
+
+module_init(vhost_vsock_init);
+module_exit(vhost_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("vhost transport for vsock ");
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index e5b14f52628f..939f057836e1 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_data/lp855x.h>
@@ -74,6 +75,7 @@ struct lp855x {
struct lp855x_platform_data *pdata;
struct pwm_device *pwm;
struct regulator *supply; /* regulator for VDD input */
+ struct regulator *enable; /* regulator for EN/VDDIO input */
};
static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -433,6 +435,19 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->supply = NULL;
}
+ lp->enable = devm_regulator_get_optional(lp->dev, "enable");
+ if (IS_ERR(lp->enable)) {
+ ret = PTR_ERR(lp->enable);
+ if (ret == -ENODEV) {
+ lp->enable = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(lp->dev, "error getting enable regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
if (lp->supply) {
ret = regulator_enable(lp->supply);
if (ret < 0) {
@@ -441,6 +456,20 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
}
}
+ if (lp->enable) {
+ ret = regulator_enable(lp->enable);
+ if (ret < 0) {
+ dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * LP8555 datasheet says t_RESPONSE (time between VDDIO and
+ * I2C) is 1ms.
+ */
+ usleep_range(1000, 2000);
+ }
+
i2c_set_clientdata(cl, lp);
ret = lp855x_configure(lp);
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index 8fe41caac38e..e2d7d039ce3b 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -10,6 +10,8 @@
* TODO: Code Cleanup
*/
+#define DRIVER_NAME "bfin-adv7393"
+
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/module.h>
diff --git a/drivers/video/fbdev/bfin_adv7393fb.h b/drivers/video/fbdev/bfin_adv7393fb.h
index cd591b5152a5..afd0380e19e1 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.h
+++ b/drivers/video/fbdev/bfin_adv7393fb.h
@@ -59,8 +59,6 @@ enum {
BLANK_OFF,
};
-#define DRIVER_NAME "bfin-adv7393"
-
struct adv7393fb_modes {
const s8 name[25]; /* Full name */
u16 xres; /* Active Horizonzal Pixels */
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 649b32f78c08..ff561073ee4e 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -273,7 +273,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
}
cfb->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
if (IS_ERR(cfb->syscon)) {
ret = PTR_ERR(cfb->syscon);
goto out_fb_release;
@@ -376,7 +376,7 @@ static int clps711x_fb_remove(struct platform_device *pdev)
}
static const struct of_device_id clps711x_fb_dt_ids[] = {
- { .compatible = "cirrus,clps711x-fb", },
+ { .compatible = "cirrus,ep7209-fb", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 8511c648a15c..9d78411a3bf7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
struct omap_video_timings timings;
- enum omap_dss_venc_type connector_type;
bool invert_polarity;
};
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
static const struct of_device_id tvc_of_match[];
-struct tvc_of_data {
- enum omap_dss_venc_type connector_type;
-};
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->timings);
if (!ddata->dev->of_node) {
- in->ops.atv->set_type(in, ddata->connector_type);
+ in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
in->ops.atv->invert_vid_out_polarity(in,
ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
ddata->in = in;
- ddata->connector_type = pdata->connector_type;
ddata->invert_polarity = pdata->invert_polarity;
dssdev = &ddata->dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index d811e6dcaef7..06e1db34541e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -16,8 +16,7 @@
#include <drm/drm_edid.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static const struct omap_video_timings dvic_default_timings = {
.x_res = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
.detect = dvic_detect,
};
-static int dvic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_dvi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
- int i2c_bus_num;
-
- pdata = dev_get_platdata(&pdev->dev);
- i2c_bus_num = pdata->i2c_bus_num;
-
- if (i2c_bus_num != -1) {
- struct i2c_adapter *adapter;
-
- adapter = i2c_get_adapter(i2c_bus_num);
- if (!adapter) {
- dev_err(&pdev->dev,
- "Failed to get I2C adapter, bus %d\n",
- i2c_bus_num);
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- i2c_put_adapter(ddata->i2c_adapter);
-
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dvic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = dvic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dvic_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = dvic_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings = dvic_default_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 6ee4129bc0c0..58d5803ede67 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -17,8 +17,7 @@
#include <drm/drm_edid.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static const struct omap_video_timings hdmic_default_timings = {
.x_res = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
.set_hdmi_infoframe = hdmic_set_infoframe,
};
-static int hdmic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_hdmi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->hpd_gpio = -ENODEV;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (dev_get_platdata(&pdev->dev)) {
- r = hdmic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = hdmic_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = hdmic_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->hpd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
index 8c246c213e06..a9a67167cc3d 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
index d9048b3df495..8c0953d069b7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
.get_timings = tfp410_get_timings,
};
-static int tfp410_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct encoder_tfp410_platform_data *pdata;
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->pd_gpio = pdata->power_down_gpio;
-
- ddata->data_lines = pdata->data_lines;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tfp410_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = tfp410_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = tfp410_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = tfp410_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->pd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 677e2545fcbe..80dc47347e21 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index e780fd4f8b46..ace3d818afe5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -16,7 +16,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
#include <video/of_display_timing.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 3414c2609320..b58012b82b6f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -25,8 +25,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
#include <video/mipi_display.h>
/* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
.memory_read = dsicm_memory_read,
};
-static int dsicm_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_dsicm_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->reset_gpio = pdata->reset_gpio;
-
- if (pdata->use_ext_te)
- ddata->ext_te_gpio = pdata->ext_te_gpio;
- else
- ddata->ext_te_gpio = -1;
-
- ddata->ulps_timeout = pdata->ulps_timeout;
-
- ddata->use_dsi_backlight = pdata->use_dsi_backlight;
-
- ddata->pin_config = pdata->pin_config;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
dev_dbg(dev, "probe\n");
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (dev_get_platdata(dev)) {
- r = dsicm_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = dsicm_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings.x_res = 864;
ddata->timings.y_res = 480;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c9ec..f14691ce8d02 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -16,8 +16,7 @@
#include <linux/mutex.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static struct omap_video_timings lb035q02_timings = {
.x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int lb035q02_probe_pdata(struct spi_device *spi)
-{
- const struct panel_lb035q02_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
- GPIOF_OUT_INIT_LOW, "panel enable");
- if (r)
- goto err_gpio;
-
- ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
- ddata->backlight_gpio = pdata->backlight_gpio;
-
- return 0;
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
-}
-
static int lb035q02_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
struct omap_dss_device *dssdev;
int r;
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = lb035q02_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = lb035q02_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = lb035q02_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->backlight_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index 8a928c9a2fc9..a2cbadd3eca3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -18,8 +18,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
};
-static int nec_8048_probe_pdata(struct spi_device *spi)
-{
- const struct panel_nec_nl8048hl11_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->qvga_gpio = pdata->qvga_gpio;
- ddata->res_gpio = pdata->res_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int nec_8048_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->mode = SPI_MODE_0;
spi->bits_per_word = 32;
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = nec_8048_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = nec_8048_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = nec_8048_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->qvga_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 1954ec913ce5..a8be18a87fa0 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -17,8 +17,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
- char *desc, struct gpio_desc **gpiod)
-{
- int r;
-
- r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r) {
- *gpiod = NULL;
- return r == -ENOENT ? 0 : r;
- }
-
- *gpiod = gpio_to_desc(gpio);
-
- return 0;
-}
-
-static int sharp_ls_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_sharp_ls037v7dw01_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
- "lcd MO", &ddata->mo_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd LR", &ddata->lr_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd UD", &ddata->ud_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
- "lcd RESB", &ddata->resb_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
- "lcd INI", &ddata->ini_gpio);
- if (r)
- return r;
-
- return 0;
-}
-
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
const char *desc, struct gpio_desc **gpiod)
{
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = sharp_ls_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = sharp_ls_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = sharp_ls_probe_of(pdev);
+ if (r)
+ return r;
ddata->videomode = sharp_ls_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 31efcca801bd..468560a6daae 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -33,7 +33,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 4d657f3ab679..b529a8c2b652 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -28,8 +28,7 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
.check_timings = td028ttec1_panel_check_timings,
};
-static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td028ttec1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int td028ttec1_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->bits_per_word = 9;
spi->mode = SPI_MODE_3;
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
ddata->spi_dev = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = td028ttec1_panel_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = td028ttec1_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = td028ttec1_probe_of(spi);
+ if (r)
+ return r;
ddata->videomode = td028ttec1_panel_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 68e3b68a2920..51e628b85f4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -19,8 +19,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
};
-static int tpo_td043_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td043mtea1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->nreset_gpio = pdata->nreset_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tpo_td043_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = tpo_td043_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = tpo_td043_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = tpo_td043_probe_of(spi);
+ if (r)
+ return r;
ddata->mode = TPO_R02_MODE_800x480;
memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index 663ccc3bf4e5..2481f4871f66 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 5a87179b7312..29de4827589d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -35,7 +35,7 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.default_display_name = def_disp_name;
else if (pdata->default_display_name)
core.default_display_name = pdata->default_display_name;
- else if (pdata->default_device)
- core.default_display_name = pdata->default_device->name;
register_pm_notifier(&omap_dss_pm_notif_block);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 6607db37a5e4..3691bde4ce0a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -26,7 +26,7 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 5491e304f4fe..7a75dfda9845 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -41,7 +41,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
index 038c15b04215..59c9a5c47ca9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
@@ -18,7 +18,7 @@
*/
#include <linux/kernel.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dispc.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 75b5286029ee..b3fdbfd0b82d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -25,7 +25,7 @@
#include <linux/platform_device.h>
#include <linux/sysfs.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index ef5b9027985d..dd5468695c43 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 7953e6a52346..da09806b940c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -34,7 +34,7 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d63e59807707..9e4800a4e3d1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -42,7 +42,7 @@
#include <linux/of_platform.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/mipi_display.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index bf407b6ba15c..d356a252ab4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -18,7 +18,7 @@
#include <linux/of.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 0078c4d1fc31..47d7f69ad9ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -41,7 +41,7 @@
#include <linux/suspend.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 0184a8461df1..a3cc0ca8f9d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -73,6 +73,17 @@
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+enum omap_dss_clk_source {
+ OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
+};
+
enum dss_io_pad_mode {
DSS_IO_PAD_MODE_RESET,
DSS_IO_PAD_MODE_RFBI,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index c886a2927f73..8fc843b56b26 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -23,7 +23,7 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 53616b02b613..f6de87e078b0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -23,7 +23,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
+#include <sound/omap-hdmi-audio.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 2e71aec838b1..926a6f20dbb2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -33,7 +33,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <sound/omap-hdmi-audio.h>
#include "hdmi4_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index aade6d99662a..0ee829a165c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -38,7 +38,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <sound/omap-hdmi-audio.h>
#include "hdmi5_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 1b8fcc6c4ba1..189a5ad125a3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 1f5d19c119ce..9a13c35fd6d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 06e23a7c432c..eac3665aba6c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 7c544bc56fb5..705373e4cf38 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index a7414fb12830..9e2a67fdf4d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 08a67f4f6a20..69f86d2cc274 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
break;
}
- mgr->caps = 0;
mgr->supported_displays =
dss_feat_get_supported_displays(mgr->id);
mgr->supported_outputs =
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 16072159bd24..bed9a978269d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 4cc5ddebfb34..f1f6c0aea752 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -26,7 +26,7 @@
#include <linux/kobject.h>
#include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 2f7cee985cdd..d6c5d75d2ef8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -30,7 +30,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
index f974ddcd3b6e..0564c5606cd0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
@@ -22,7 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
index aea6a1d0fb20..562b0c4ae0c6 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
@@ -38,7 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
struct rfbi_reg { u16 idx; };
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index d747cc6b59e1..c4be732a4714 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -29,7 +29,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
static struct {
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 26e0ee30adf8..392464da12e4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -37,7 +37,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index b1ec59e42940..a890540f2037 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b84c..ef69273074ba 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -30,7 +30,7 @@
#include <linux/export.h>
#include <linux/sizes.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index d3af01c94a58..1d7c012f09db 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
#include <linux/platform_device.h>
#include <linux/omapfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
@@ -1332,7 +1332,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
}
dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
- &rg->attrs);
+ rg->attrs);
rg->token = NULL;
rg->vaddr = NULL;
@@ -1370,7 +1370,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
void *token;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs;
dma_addr_t dma_handle;
int r;
@@ -1386,15 +1386,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
size = PAGE_ALIGN(size);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs = DMA_ATTR_WRITE_COMBINE;
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
- GFP_KERNEL, &attrs);
+ GFP_KERNEL, attrs);
if (token == NULL) {
dev_err(fbdev->dev, "failed to allocate framebuffer\n");
@@ -1408,7 +1408,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
dma_free_attrs(fbdev->dev, size, token, dma_handle,
- &attrs);
+ attrs);
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0033..8087a009c54f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
#include <linux/mm.h>
#include <linux/omapfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a367..555487d6dbea 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
@@ -28,10 +28,9 @@
#endif
#include <linux/rwsem.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#ifdef DEBUG
extern bool omapfb_debug;
@@ -51,7 +50,7 @@ extern bool omapfb_debug;
struct omapfb2_mem_region {
int id;
- struct dma_attrs attrs;
+ unsigned long attrs;
void *token;
dma_addr_t dma_handle;
u32 paddr;
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 10fbfd8ab963..b6bc4a0bda2a 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -36,11 +36,11 @@ static int __init fb_logo_late_init(void)
late_initcall(fb_logo_late_init);
-/* logo's are marked __initdata. Use __init_refok to tell
+/* logo's are marked __initdata. Use __ref to tell
* modpost that it is intended that this function uses data
* marked __initdata.
*/
-const struct linux_logo * __init_refok fb_find_logo(int depth)
+const struct linux_logo * __ref fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 888d5f8322ce..4e7003db12c4 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -207,6 +207,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
num = min(num, ARRAY_SIZE(vb->pfns));
mutex_lock(&vb->balloon_lock);
+ /* We can't release more pages than taken */
+ num = min(num, (size_t)vb->num_pages);
for (vb->num_pfns = 0; vb->num_pfns < num;
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
page = balloon_page_dequeue(vb_dev_info);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ca6bfddaacad..114a0c88afb8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -117,7 +117,10 @@ struct vring_virtqueue {
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
/*
- * The interaction between virtio and a possible IOMMU is a mess.
+ * Modern virtio devices have feature bits to specify whether they need a
+ * quirk and bypass the IOMMU. If not there, just use the DMA API.
+ *
+ * If there, the interaction between virtio and DMA API is messy.
*
* On most systems with virtio, physical addresses match bus addresses,
* and it doesn't particularly matter whether we use the DMA API.
@@ -133,10 +136,18 @@ struct vring_virtqueue {
*
* For the time being, we preserve historic behavior and bypass the DMA
* API.
+ *
+ * TODO: install a per-device DMA ops structure that does the right thing
+ * taking into account all the above quirks, and use the DMA API
+ * unconditionally on data path.
*/
static bool vring_use_dma_api(struct virtio_device *vdev)
{
+ if (!virtio_has_iommu_quirk(vdev))
+ return true;
+
+ /* Otherwise, we are left to guess. */
/*
* In theory, it's possible to have a buggy QEMU-supposed
* emulated Q35 IOMMU and Xen enabled at the same time. On
@@ -1099,6 +1110,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_VERSION_1:
break;
+ case VIRTIO_F_IOMMU_PLATFORM:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index a2eec97d5064..bb09de633939 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
goto out;
}
- hdq_data->hdq_irqstatus = 0;
-
if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index d488961a8c90..51f2f66d6555 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -153,16 +153,4 @@ static struct w1_family w1_family_12 = {
.fid = W1_FAMILY_DS2406,
.fops = &w1_f12_fops,
};
-
-static int __init w1_f12_init(void)
-{
- return w1_register_family(&w1_family_12);
-}
-
-static void __exit w1_f12_exit(void)
-{
- w1_unregister_family(&w1_family_12);
-}
-
-module_init(w1_f12_init);
-module_exit(w1_f12_exit);
+module_w1_family(w1_family_12);
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 7dfa0e11688a..aec5958e66e9 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -351,16 +351,4 @@ static struct w1_family w1_family_29 = {
.fid = W1_FAMILY_DS2408,
.fops = &w1_f29_fops,
};
-
-static int __init w1_f29_init(void)
-{
- return w1_register_family(&w1_family_29);
-}
-
-static void __exit w1_f29_exit(void)
-{
- w1_unregister_family(&w1_family_29);
-}
-
-module_init(w1_f29_init);
-module_exit(w1_f29_exit);
+module_w1_family(w1_family_29);
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index ee28fc1ff390..f2e1c51533b9 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -135,16 +135,4 @@ static struct w1_family w1_family_3a = {
.fid = W1_FAMILY_DS2413,
.fops = &w1_f3a_fops,
};
-
-static int __init w1_f3a_init(void)
-{
- return w1_register_family(&w1_family_3a);
-}
-
-static void __exit w1_f3a_exit(void)
-{
- w1_unregister_family(&w1_family_3a);
-}
-
-module_init(w1_f3a_init);
-module_exit(w1_f3a_exit);
+module_w1_family(w1_family_3a);
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index 7e41b7d91fb5..4ab54fd9dde2 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -138,19 +138,7 @@ static struct w1_family w1_family_1d = {
.fid = W1_COUNTER_DS2423,
.fops = &w1_f1d_fops,
};
-
-static int __init w1_f1d_init(void)
-{
- return w1_register_family(&w1_family_1d);
-}
-
-static void __exit w1_f1d_exit(void)
-{
- w1_unregister_family(&w1_family_1d);
-}
-
-module_init(w1_f1d_init);
-module_exit(w1_f1d_exit);
+module_w1_family(w1_family_1d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 9c4ff9d28adc..80572cb63ba8 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -288,19 +288,7 @@ static struct w1_family w1_family_2d = {
.fid = W1_EEPROM_DS2431,
.fops = &w1_f2d_fops,
};
-
-static int __init w1_f2d_init(void)
-{
- return w1_register_family(&w1_family_2d);
-}
-
-static void __exit w1_f2d_fini(void)
-{
- w1_unregister_family(&w1_family_2d);
-}
-
-module_init(w1_f2d_init);
-module_exit(w1_f2d_fini);
+module_w1_family(w1_family_2d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>");
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 72319a968a9e..6cf378c89ecb 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -305,16 +305,4 @@ static struct w1_family w1_family_23 = {
.fid = W1_EEPROM_DS2433,
.fops = &w1_f23_fops,
};
-
-static int __init w1_f23_init(void)
-{
- return w1_register_family(&w1_family_23);
-}
-
-static void __exit w1_f23_fini(void)
-{
- w1_unregister_family(&w1_family_23);
-}
-
-module_init(w1_f23_init);
-module_exit(w1_f23_fini);
+module_w1_family(w1_family_23);
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index d9079d48d112..ffa37f773b3b 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -121,25 +121,14 @@ static const struct attribute_group *w1_ds2760_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2760_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2760-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -148,24 +137,19 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
dev_set_drvdata(&sl->dev, pdev);
- goto success;
+ return 0;
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
-success:
+
return ret;
}
static void w1_ds2760_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2760_fops = {
@@ -178,28 +162,13 @@ static struct w1_family w1_ds2760_family = {
.fid = W1_FAMILY_DS2760,
.fops = &w1_ds2760_fops,
};
-
-static int __init w1_ds2760_init(void)
-{
- pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n");
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2760_family);
-}
-
-static void __exit w1_ds2760_exit(void)
-{
- w1_unregister_family(&w1_ds2760_family);
- ida_destroy(&bat_ida);
-}
+module_w1_family(w1_ds2760_family);
EXPORT_SYMBOL(w1_ds2760_read);
EXPORT_SYMBOL(w1_ds2760_write);
EXPORT_SYMBOL(w1_ds2760_store_eeprom);
EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
-module_init(w1_ds2760_init);
-module_exit(w1_ds2760_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 50e85f7929d4..f5c2aa429a92 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -113,25 +113,14 @@ static const struct attribute_group *w1_ds2780_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2780_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2780-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -144,19 +133,15 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
+
return ret;
}
static void w1_ds2780_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2780_fops = {
@@ -169,21 +154,7 @@ static struct w1_family w1_ds2780_family = {
.fid = W1_FAMILY_DS2780,
.fops = &w1_ds2780_fops,
};
-
-static int __init w1_ds2780_init(void)
-{
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2780_family);
-}
-
-static void __exit w1_ds2780_exit(void)
-{
- w1_unregister_family(&w1_ds2780_family);
- ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2780_init);
-module_exit(w1_ds2780_exit);
+module_w1_family(w1_ds2780_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 1eb98fb1688d..9c03e014cf9e 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <linux/idr.h>
#include "../w1.h"
#include "../w1_int.h"
@@ -111,25 +110,14 @@ static const struct attribute_group *w1_ds2781_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2781_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2781-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -142,19 +130,15 @@ static int w1_ds2781_add_slave(struct w1_slave *sl)
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
+
return ret;
}
static void w1_ds2781_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2781_fops = {
@@ -167,21 +151,7 @@ static struct w1_family w1_ds2781_family = {
.fid = W1_FAMILY_DS2781,
.fops = &w1_ds2781_fops,
};
-
-static int __init w1_ds2781_init(void)
-{
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2781_family);
-}
-
-static void __exit w1_ds2781_exit(void)
-{
- w1_unregister_family(&w1_ds2781_family);
- ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2781_init);
-module_exit(w1_ds2781_exit);
+module_w1_family(w1_ds2781_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 365d6dff21de..5e348d38ec5c 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -427,16 +427,4 @@ static struct w1_family w1_family_1C = {
.fid = W1_FAMILY_DS28E04,
.fops = &w1_f1C_fops,
};
-
-static int __init w1_f1C_init(void)
-{
- return w1_register_family(&w1_family_1C);
-}
-
-static void __exit w1_f1C_fini(void)
-{
- w1_unregister_family(&w1_family_1C);
-}
-
-module_init(w1_f1C_init);
-module_exit(w1_f1C_fini);
+module_w1_family(w1_family_1C);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index ed5dcb80a1f7..10a7a0767187 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -88,4 +88,16 @@ struct w1_family * w1_family_registered(u8);
void w1_unregister_family(struct w1_family *);
int w1_register_family(struct w1_family *);
+/**
+ * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * @__w1_family: w1_family struct
+ *
+ * Helper macro for 1-Wire families which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_w1_family(__w1_family) \
+ module_driver(__w1_family, w1_register_family, \
+ w1_unregister_family)
+
#endif /* __W1_FAMILY_H */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b4b3e256491b..1bffe006ca9a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -48,7 +48,6 @@ config WATCHDOG_NOWAYOUT
config WATCHDOG_SYSFS
bool "Read different watchdog information through sysfs"
- default n
help
Say Y here if you want to enable watchdog device status read through
sysfs attributes.
@@ -516,6 +515,15 @@ config MAX63XX_WATCHDOG
help
Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
+config MAX77620_WATCHDOG
+ tristate "Maxim Max77620 Watchdog Timer"
+ depends on MFD_MAX77620
+ help
+ This is the driver for the Max77620 watchdog timer.
+ Say 'Y' here to enable the watchdog timer support for
+ MAX77620 chips. To compile this driver as a module,
+ choose M here: the module will be called max77620_wdt.
+
config IMX2_WDT
tristate "IMX2+ Watchdog"
depends on ARCH_MXC || ARCH_LAYERSCAPE
@@ -609,6 +617,16 @@ config QCOM_WDT
To compile this driver as a module, choose M here: the
module will be called qcom_wdt.
+config MESON_GXBB_WATCHDOG
+ tristate "Amlogic Meson GXBB SoCs watchdog support"
+ depends on ARCH_MESON
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Amlogic Meson GXBB SoCs.
+ To compile this driver as a module, choose M here: the
+ module will be called meson_gxbb_wdt.
+
config MESON_WATCHDOG
tristate "Amlogic Meson SoCs watchdog support"
depends on ARCH_MESON
@@ -669,6 +687,19 @@ config RENESAS_WDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+config ASPEED_WATCHDOG
+ tristate "Aspeed 2400 watchdog support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Apseed BMC SoCs.
+
+ This driver is required to reboot the SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aspeed_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a46e7c1380ac..c22ad3ea3539 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_ST_LPC_WATCHDOG) += st_lpc_wdt.o
obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
+obj-$(CONFIG_MESON_GXBB_WATCHDOG) += meson_gxbb_wdt.o
obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
+obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -203,6 +205,7 @@ obj-$(CONFIG_TANGOX_WATCHDOG) += tangox_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
+obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
new file mode 100644
index 000000000000..f5ad8023c2e6
--- /dev/null
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2016 IBM Corporation
+ *
+ * Joel Stanley <joel@jms.id.au>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+struct aspeed_wdt {
+ struct watchdog_device wdd;
+ void __iomem *base;
+ u32 ctrl;
+};
+
+static const struct of_device_id aspeed_wdt_of_table[] = {
+ { .compatible = "aspeed,ast2400-wdt" },
+ { .compatible = "aspeed,ast2500-wdt" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+
+#define WDT_STATUS 0x00
+#define WDT_RELOAD_VALUE 0x04
+#define WDT_RESTART 0x08
+#define WDT_CTRL 0x0C
+#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
+#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
+#define WDT_CTRL_1MHZ_CLK BIT(4)
+#define WDT_CTRL_WDT_EXT BIT(3)
+#define WDT_CTRL_WDT_INTR BIT(2)
+#define WDT_CTRL_RESET_SYSTEM BIT(1)
+#define WDT_CTRL_ENABLE BIT(0)
+
+#define WDT_RESTART_MAGIC 0x4755
+
+/* 32 bits at 1MHz, in milliseconds */
+#define WDT_MAX_TIMEOUT_MS 4294967
+#define WDT_DEFAULT_TIMEOUT 30
+#define WDT_RATE_1MHZ 1000000
+
+static struct aspeed_wdt *to_aspeed_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct aspeed_wdt, wdd);
+}
+
+static void aspeed_wdt_enable(struct aspeed_wdt *wdt, int count)
+{
+ wdt->ctrl |= WDT_CTRL_ENABLE;
+
+ writel(0, wdt->base + WDT_CTRL);
+ writel(count, wdt->base + WDT_RELOAD_VALUE);
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+ writel(wdt->ctrl, wdt->base + WDT_CTRL);
+}
+
+static int aspeed_wdt_start(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ aspeed_wdt_enable(wdt, wdd->timeout * WDT_RATE_1MHZ);
+
+ return 0;
+}
+
+static int aspeed_wdt_stop(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ wdt->ctrl &= ~WDT_CTRL_ENABLE;
+ writel(wdt->ctrl, wdt->base + WDT_CTRL);
+
+ return 0;
+}
+
+static int aspeed_wdt_ping(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+
+ return 0;
+}
+
+static int aspeed_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ u32 actual;
+
+ wdd->timeout = timeout;
+
+ actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000);
+
+ writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE);
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+
+ return 0;
+}
+
+static int aspeed_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
+
+ mdelay(1000);
+
+ return 0;
+}
+
+static const struct watchdog_ops aspeed_wdt_ops = {
+ .start = aspeed_wdt_start,
+ .stop = aspeed_wdt_stop,
+ .ping = aspeed_wdt_ping,
+ .set_timeout = aspeed_wdt_set_timeout,
+ .restart = aspeed_wdt_restart,
+ .owner = THIS_MODULE,
+};
+
+static const struct watchdog_info aspeed_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+ | WDIOF_SETTIMEOUT,
+ .identity = KBUILD_MODNAME,
+};
+
+static int aspeed_wdt_remove(struct platform_device *pdev)
+{
+ struct aspeed_wdt *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+
+ return 0;
+}
+
+static int aspeed_wdt_probe(struct platform_device *pdev)
+{
+ struct aspeed_wdt *wdt;
+ struct resource *res;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ /*
+ * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
+ * runs at 1MHz. We chose to always run at 1MHz, as there's no
+ * good reason to have a faster watchdog counter.
+ */
+ wdt->wdd.info = &aspeed_wdt_info;
+ wdt->wdd.ops = &aspeed_wdt_ops;
+ wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+ wdt->wdd.parent = &pdev->dev;
+
+ wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
+ watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+
+ /*
+ * Control reset on a per-device basis to ensure the
+ * host is not affected by a BMC reboot, so only reset
+ * the SOC and not the full chip
+ */
+ wdt->ctrl = WDT_CTRL_RESET_MODE_SOC |
+ WDT_CTRL_1MHZ_CLK |
+ WDT_CTRL_RESET_SYSTEM;
+
+ if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
+ aspeed_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_watchdog_driver = {
+ .probe = aspeed_wdt_probe,
+ .remove = aspeed_wdt_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(aspeed_wdt_of_table),
+ },
+};
+module_platform_driver(aspeed_watchdog_driver);
+
+MODULE_DESCRIPTION("Aspeed Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 2e6164c4abc0..4dddd8298a22 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -82,12 +82,6 @@ static int bcm2835_wdt_stop(struct watchdog_device *wdog)
return 0;
}
-static int bcm2835_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
-{
- wdog->timeout = t;
- return 0;
-}
-
static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog)
{
struct bcm2835_wdt *wdt = watchdog_get_drvdata(wdog);
@@ -96,15 +90,14 @@ static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog)
return WDOG_TICKS_TO_SECS(ret & PM_WDOG_TIME_SET);
}
-static struct watchdog_ops bcm2835_wdt_ops = {
+static const struct watchdog_ops bcm2835_wdt_ops = {
.owner = THIS_MODULE,
.start = bcm2835_wdt_start,
.stop = bcm2835_wdt_stop,
- .set_timeout = bcm2835_wdt_set_timeout,
.get_timeleft = bcm2835_wdt_get_timeleft,
};
-static struct watchdog_info bcm2835_wdt_info = {
+static const struct watchdog_info bcm2835_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
.identity = "Broadcom BCM2835 Watchdog timer",
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index a100f648880d..5d6b4e5f7989 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -34,6 +34,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN]
#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX]
#define DA9063_WDG_TIMEOUT wdt_timeout[3]
+#define DA9063_RESET_PROTECTION_MS 256
struct da9063_watchdog {
struct da9063 *da9063;
@@ -171,6 +172,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.ops = &da9063_watchdog_ops;
wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
+ wdt->wdtdev.min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS;
wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
wdt->wdtdev.parent = &pdev->dev;
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index d4ba262da7ba..1b7e9169072f 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -45,9 +45,11 @@
#define SIO_REG_DEVREV 0x22 /* Device revision */
#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
#define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */
+#define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */
#define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */
#define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */
#define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */
+#define SIO_F81866_REG_GPIO1 0x2c /* F81866 GPIO1 Enable Register */
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
@@ -60,6 +62,7 @@
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
+#define SIO_F81866_ID 0x1010 /* Chipset ID */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
@@ -116,7 +119,8 @@ module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865,
+ f81866};
static const char *f71808e_names[] = {
"f71808fg",
@@ -126,6 +130,7 @@ static const char *f71808e_names[] = {
"f71882fg",
"f71889fg",
"f81865",
+ "f81866",
};
/* Super-I/O Function prototypes */
@@ -370,6 +375,22 @@ static int watchdog_start(void)
superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5);
break;
+ case f81866:
+ /* Set pin 70 to WDTRST# */
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
+ BIT(3) | BIT(0));
+ superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
+ BIT(2));
+ /*
+ * GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
+ * The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
+ * BIT5: 0 -> WDTRST#
+ * 1 -> GPIO15
+ */
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1,
+ BIT(5));
+ break;
+
default:
/*
* 'default' label to shut up the compiler and catch
@@ -382,7 +403,7 @@ static int watchdog_start(void)
superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0);
- if (watchdog.type == f81865)
+ if (watchdog.type == f81865 || watchdog.type == f81866)
superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF,
F81865_FLAG_WDOUT_EN);
else
@@ -788,6 +809,9 @@ static int __init f71808e_find(int sioaddr)
case SIO_F81865_ID:
watchdog.type = f81865;
break;
+ case SIO_F81866_ID:
+ watchdog.type = f81866;
+ break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index ba066e4a707b..93457cabc178 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -151,6 +151,8 @@ static int gpio_wdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
if (!gpio_is_valid(priv->gpio))
return priv->gpio;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 0acc6c5f729d..54cab189a763 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -150,6 +150,7 @@ static inline u32 no_reboot_bit(void)
u32 enable_bit;
switch (iTCO_wdt_private.iTCO_version) {
+ case 5:
case 3:
enable_bit = 0x00000010;
break;
@@ -512,6 +513,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
/* Clear out the (probably old) status */
switch (iTCO_wdt_private.iTCO_version) {
+ case 5:
case 4:
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
new file mode 100644
index 000000000000..48b84df2afda
--- /dev/null
+++ b/drivers/watchdog/max77620_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * Maxim MAX77620 Watchdog Driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/max77620.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+struct max77620_wdt {
+ struct device *dev;
+ struct regmap *rmap;
+ struct watchdog_device wdt_dev;
+};
+
+static int max77620_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTEN, MAX77620_WDTEN);
+}
+
+static int max77620_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTEN, 0);
+}
+
+static int max77620_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3,
+ MAX77620_WDTC_MASK, 0x1);
+}
+
+static int max77620_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+ unsigned int wdt_timeout;
+ u8 regval;
+ int ret;
+
+ switch (timeout) {
+ case 0 ... 2:
+ regval = MAX77620_TWD_2s;
+ wdt_timeout = 2;
+ break;
+
+ case 3 ... 16:
+ regval = MAX77620_TWD_16s;
+ wdt_timeout = 16;
+ break;
+
+ case 17 ... 64:
+ regval = MAX77620_TWD_64s;
+ wdt_timeout = 64;
+ break;
+
+ default:
+ regval = MAX77620_TWD_128s;
+ wdt_timeout = 128;
+ break;
+ }
+
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3,
+ MAX77620_WDTC_MASK, 0x1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_TWD_MASK, regval);
+ if (ret < 0)
+ return ret;
+
+ wdt_dev->timeout = wdt_timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info max77620_wdt_info = {
+ .identity = "max77620-watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops max77620_wdt_ops = {
+ .start = max77620_wdt_start,
+ .stop = max77620_wdt_stop,
+ .ping = max77620_wdt_ping,
+ .set_timeout = max77620_wdt_set_timeout,
+};
+
+static int max77620_wdt_probe(struct platform_device *pdev)
+{
+ struct max77620_wdt *wdt;
+ struct watchdog_device *wdt_dev;
+ unsigned int regval;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->dev = &pdev->dev;
+ wdt->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!wdt->rmap) {
+ dev_err(wdt->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ wdt_dev = &wdt->wdt_dev;
+ wdt_dev->info = &max77620_wdt_info;
+ wdt_dev->ops = &max77620_wdt_ops;
+ wdt_dev->min_timeout = 2;
+ wdt_dev->max_timeout = 128;
+ wdt_dev->max_hw_heartbeat_ms = 128 * 1000;
+
+ platform_set_drvdata(pdev, wdt);
+
+ /* Enable WD_RST_WK - WDT expire results in a restart */
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_WD_RST_WK,
+ MAX77620_ONOFFCNFG2_WD_RST_WK);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to set WD_RST_WK: %d\n", ret);
+ return ret;
+ }
+
+ /* Set WDT clear in OFF and sleep mode */
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTOFFC | MAX77620_WDTSLPC,
+ MAX77620_WDTOFFC | MAX77620_WDTSLPC);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to set WDT OFF mode: %d\n", ret);
+ return ret;
+ }
+
+ /* Check if WDT running and if yes then set flags properly */
+ ret = regmap_read(wdt->rmap, MAX77620_REG_CNFGGLBL2, &regval);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to read WDT CFG register: %d\n", ret);
+ return ret;
+ }
+
+ switch (regval & MAX77620_TWD_MASK) {
+ case MAX77620_TWD_2s:
+ wdt_dev->timeout = 2;
+ break;
+ case MAX77620_TWD_16s:
+ wdt_dev->timeout = 16;
+ break;
+ case MAX77620_TWD_64s:
+ wdt_dev->timeout = 64;
+ break;
+ default:
+ wdt_dev->timeout = 128;
+ break;
+ }
+
+ if (regval & MAX77620_WDTEN)
+ set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
+
+ watchdog_set_nowayout(wdt_dev, nowayout);
+ watchdog_set_drvdata(wdt_dev, wdt);
+
+ ret = watchdog_register_device(wdt_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77620_wdt_remove(struct platform_device *pdev)
+{
+ struct max77620_wdt *wdt = platform_get_drvdata(pdev);
+
+ max77620_wdt_stop(&wdt->wdt_dev);
+ watchdog_unregister_device(&wdt->wdt_dev);
+
+ return 0;
+}
+
+static struct platform_device_id max77620_wdt_devtype[] = {
+ { .name = "max77620-watchdog", },
+ { },
+};
+
+static struct platform_driver max77620_wdt_driver = {
+ .driver = {
+ .name = "max77620-watchdog",
+ },
+ .probe = max77620_wdt_probe,
+ .remove = max77620_wdt_remove,
+ .id_table = max77620_wdt_devtype,
+};
+
+module_platform_driver(max77620_wdt_driver);
+
+MODULE_DESCRIPTION("Max77620 watchdog timer driver");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
new file mode 100644
index 000000000000..44d180a2c5e5
--- /dev/null
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -0,0 +1,270 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT 30 /* seconds */
+
+#define GXBB_WDT_CTRL_REG 0x0
+#define GXBB_WDT_TCNT_REG 0x8
+#define GXBB_WDT_RSET_REG 0xc
+
+#define GXBB_WDT_CTRL_CLKDIV_EN BIT(25)
+#define GXBB_WDT_CTRL_CLK_EN BIT(24)
+#define GXBB_WDT_CTRL_EE_RESET BIT(21)
+#define GXBB_WDT_CTRL_EN BIT(18)
+#define GXBB_WDT_CTRL_DIV_MASK (BIT(18) - 1)
+
+#define GXBB_WDT_TCNT_SETUP_MASK (BIT(16) - 1)
+#define GXBB_WDT_TCNT_CNT_SHIFT 16
+
+struct meson_gxbb_wdt {
+ void __iomem *reg_base;
+ struct watchdog_device wdt_dev;
+ struct clk *clk;
+};
+
+static int meson_gxbb_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) | GXBB_WDT_CTRL_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) & ~GXBB_WDT_CTRL_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(0, data->reg_base + GXBB_WDT_RSET_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+ unsigned long tcnt = timeout * 1000;
+
+ if (tcnt > GXBB_WDT_TCNT_SETUP_MASK)
+ tcnt = GXBB_WDT_TCNT_SETUP_MASK;
+
+ wdt_dev->timeout = timeout;
+
+ meson_gxbb_wdt_ping(wdt_dev);
+
+ writel(tcnt, data->reg_base + GXBB_WDT_TCNT_REG);
+
+ return 0;
+}
+
+static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+ unsigned long reg;
+
+ reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
+
+ return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
+ (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
+}
+
+static const struct watchdog_ops meson_gxbb_wdt_ops = {
+ .start = meson_gxbb_wdt_start,
+ .stop = meson_gxbb_wdt_stop,
+ .ping = meson_gxbb_wdt_ping,
+ .set_timeout = meson_gxbb_wdt_set_timeout,
+ .get_timeleft = meson_gxbb_wdt_get_timeleft,
+};
+
+static const struct watchdog_info meson_gxbb_wdt_info = {
+ .identity = "Meson GXBB Watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static int __maybe_unused meson_gxbb_wdt_resume(struct device *dev)
+{
+ struct meson_gxbb_wdt *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt_dev))
+ meson_gxbb_wdt_start(&data->wdt_dev);
+
+ return 0;
+}
+
+static int __maybe_unused meson_gxbb_wdt_suspend(struct device *dev)
+{
+ struct meson_gxbb_wdt *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt_dev))
+ meson_gxbb_wdt_stop(&data->wdt_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops meson_gxbb_wdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_gxbb_wdt_suspend, meson_gxbb_wdt_resume)
+};
+
+static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-wdt", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
+
+static int meson_gxbb_wdt_probe(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_base))
+ return PTR_ERR(data->reg_base);
+
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ clk_prepare_enable(data->clk);
+
+ platform_set_drvdata(pdev, data);
+
+ data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.info = &meson_gxbb_wdt_info;
+ data->wdt_dev.ops = &meson_gxbb_wdt_ops;
+ data->wdt_dev.max_hw_heartbeat_ms = GXBB_WDT_TCNT_SETUP_MASK;
+ data->wdt_dev.min_timeout = 1;
+ data->wdt_dev.timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(&data->wdt_dev, data);
+
+ /* Setup with 1ms timebase */
+ writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) |
+ GXBB_WDT_CTRL_EE_RESET |
+ GXBB_WDT_CTRL_CLK_EN |
+ GXBB_WDT_CTRL_CLKDIV_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
+
+ ret = watchdog_register_device(&data->wdt_dev);
+ if (ret) {
+ clk_disable_unprepare(data->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_remove(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&data->wdt_dev);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static void meson_gxbb_wdt_shutdown(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
+
+ meson_gxbb_wdt_stop(&data->wdt_dev);
+}
+
+static struct platform_driver meson_gxbb_wdt_driver = {
+ .probe = meson_gxbb_wdt_probe,
+ .remove = meson_gxbb_wdt_remove,
+ .shutdown = meson_gxbb_wdt_shutdown,
+ .driver = {
+ .name = "meson-gxbb-wdt",
+ .pm = &meson_gxbb_wdt_pm_ops,
+ .of_match_table = meson_gxbb_wdt_dt_ids,
+ },
+};
+
+module_platform_driver(meson_gxbb_wdt_driver);
+
+MODULE_ALIAS("platform:meson-gxbb-wdt");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Meson GXBB Watchdog timer driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index bd917bb757b8..a0fabf6f92b0 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -294,6 +294,8 @@ static const struct pci_device_id tco_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index e936f15dc7c7..3ad5206d7935 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -992,19 +992,7 @@ static struct isa_driver pcwd_isa_driver = {
},
};
-static int __init pcwd_init_module(void)
-{
- return isa_register_driver(&pcwd_isa_driver, PCWD_ISA_NR_CARDS);
-}
-
-static void __exit pcwd_cleanup_module(void)
-{
- isa_unregister_driver(&pcwd_isa_driver);
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(pcwd_init_module);
-module_exit(pcwd_cleanup_module);
+module_isa_driver(pcwd_isa_driver, PCWD_ISA_NR_CARDS);
MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, "
"Wim Van Sebroeck <wim@iguana.be>");
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index 962f58c03353..c797305f8338 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -176,8 +176,8 @@ static int pic32_dmt_probe(struct platform_device *pdev)
struct watchdog_device *wdd = &pic32_dmt_wdd;
dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
- if (IS_ERR(dmt))
- return PTR_ERR(dmt);
+ if (!dmt)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -245,7 +245,6 @@ static struct platform_driver pic32_dmt_driver = {
.remove = pic32_dmt_remove,
.driver = {
.name = "pic32-dmt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_dmt_of_ids),
}
};
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index 6047aa89a4d3..e2761068dc6f 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -174,8 +174,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev)
struct resource *mem;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
- if (IS_ERR(wdt))
- return PTR_ERR(wdt);
+ if (!wdt)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -183,8 +183,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev)
return PTR_ERR(wdt->regs);
wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
- if (IS_ERR(wdt->rst_base))
- return PTR_ERR(wdt->rst_base);
+ if (!wdt->rst_base)
+ return -ENOMEM;
wdt->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt->clk)) {
@@ -251,7 +251,6 @@ static struct platform_driver pic32_wdt_driver = {
.remove = pic32_wdt_drv_remove,
.driver = {
.name = "pic32-wdt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_wdt_dt_ids),
}
};
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index a043fa4f60e5..5796b5d1b3f2 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -18,19 +18,45 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
+#include <linux/of_device.h>
+
+enum wdt_reg {
+ WDT_RST,
+ WDT_EN,
+ WDT_STS,
+ WDT_BARK_TIME,
+ WDT_BITE_TIME,
+};
-#define WDT_RST 0x38
-#define WDT_EN 0x40
-#define WDT_STS 0x44
-#define WDT_BITE_TIME 0x5C
+static const u32 reg_offset_data_apcs_tmr[] = {
+ [WDT_RST] = 0x38,
+ [WDT_EN] = 0x40,
+ [WDT_STS] = 0x44,
+ [WDT_BARK_TIME] = 0x4C,
+ [WDT_BITE_TIME] = 0x5C,
+};
+
+static const u32 reg_offset_data_kpss[] = {
+ [WDT_RST] = 0x4,
+ [WDT_EN] = 0x8,
+ [WDT_STS] = 0xC,
+ [WDT_BARK_TIME] = 0x10,
+ [WDT_BITE_TIME] = 0x14,
+};
struct qcom_wdt {
struct watchdog_device wdd;
struct clk *clk;
unsigned long rate;
void __iomem *base;
+ const u32 *layout;
};
+static void __iomem *wdt_addr(struct qcom_wdt *wdt, enum wdt_reg reg)
+{
+ return wdt->base + wdt->layout[reg];
+}
+
static inline
struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
{
@@ -41,10 +67,11 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(0, wdt->base + WDT_EN);
- writel(1, wdt->base + WDT_RST);
- writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME);
- writel(1, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
+ writel(1, wdt_addr(wdt, WDT_EN));
return 0;
}
@@ -52,7 +79,7 @@ static int qcom_wdt_stop(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(0, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
return 0;
}
@@ -60,7 +87,7 @@ static int qcom_wdt_ping(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(1, wdt->base + WDT_RST);
+ writel(1, wdt_addr(wdt, WDT_RST));
return 0;
}
@@ -83,10 +110,11 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
*/
timeout = 128 * wdt->rate / 1000;
- writel(0, wdt->base + WDT_EN);
- writel(1, wdt->base + WDT_RST);
- writel(timeout, wdt->base + WDT_BITE_TIME);
- writel(1, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(timeout, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(timeout, wdt_addr(wdt, WDT_BITE_TIME));
+ writel(1, wdt_addr(wdt, WDT_EN));
/*
* Actually make sure the above sequence hits hardware before sleeping.
@@ -119,9 +147,16 @@ static int qcom_wdt_probe(struct platform_device *pdev)
struct qcom_wdt *wdt;
struct resource *res;
struct device_node *np = pdev->dev.of_node;
+ const u32 *regs;
u32 percpu_offset;
int ret;
+ regs = of_device_get_match_data(&pdev->dev);
+ if (!regs) {
+ dev_err(&pdev->dev, "Unsupported QCOM WDT module\n");
+ return -ENODEV;
+ }
+
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -172,6 +207,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
wdt->wdd.parent = &pdev->dev;
+ wdt->layout = regs;
if (readl(wdt->base + WDT_STS) & 1)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
@@ -208,8 +244,9 @@ static int qcom_wdt_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-timer" },
- { .compatible = "qcom,scss-timer" },
+ { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr },
+ { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr },
+ { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ad383f6f15fc..ce0c38bd0f00 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -180,15 +180,6 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
return 0;
}
-static unsigned int sbsa_gwdt_status(struct watchdog_device *wdd)
-{
- struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
- u32 status = readl(gwdt->control_base + SBSA_GWDT_WCS);
-
- /* is the watchdog timer running? */
- return (status & SBSA_GWDT_WCS_EN) << WDOG_ACTIVE;
-}
-
static int sbsa_gwdt_start(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
@@ -228,7 +219,6 @@ static struct watchdog_ops sbsa_gwdt_ops = {
.owner = THIS_MODULE,
.start = sbsa_gwdt_start,
.stop = sbsa_gwdt_stop,
- .status = sbsa_gwdt_status,
.ping = sbsa_gwdt_keepalive,
.set_timeout = sbsa_gwdt_set_timeout,
.get_timeleft = sbsa_gwdt_get_timeleft,
@@ -273,7 +263,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
wdd->info = &sbsa_gwdt_info;
wdd->ops = &sbsa_gwdt_ops;
wdd->min_timeout = 1;
- wdd->max_timeout = U32_MAX / gwdt->clk;
+ wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
wdd->timeout = DEFAULT_TIMEOUT;
watchdog_set_drvdata(wdd, gwdt);
watchdog_set_nowayout(wdd, nowayout);
@@ -283,6 +273,8 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
dev_warn(dev, "System reset by WDT.\n");
wdd->bootstatus |= WDIOF_CARDRESET;
}
+ if (status & SBSA_GWDT_WCS_EN)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
if (action) {
irq = platform_get_irq(pdev, 0);
@@ -310,7 +302,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
* the timeout is (WOR * 2), so the maximum timeout should be doubled.
*/
if (!action)
- wdd->max_timeout *= 2;
+ wdd->max_hw_heartbeat_ms *= 2;
watchdog_init_timeout(wdd, timeout, dev);
/*
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index d0578ab2e636..3050a0031479 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -39,13 +39,18 @@ MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static void __iomem *sirfsoc_wdt_base(struct watchdog_device *wdd)
+{
+ return (void __iomem __force *)watchdog_get_drvdata(wdd);
+}
+
static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd)
{
u32 counter, match;
void __iomem *wdt_base;
int time_left;
- wdt_base = watchdog_get_drvdata(wdd);
+ wdt_base = sirfsoc_wdt_base(wdd);
counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO);
match = readl(wdt_base +
SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
@@ -61,7 +66,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
void __iomem *wdt_base;
timeout_ticks = wdd->timeout * CLOCK_FREQ;
- wdt_base = watchdog_get_drvdata(wdd);
+ wdt_base = sirfsoc_wdt_base(wdd);
/* Enable the latch before reading the LATCH_LO register */
writel(1, wdt_base + SIRFSOC_TIMER_LATCH);
@@ -79,7 +84,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
{
- void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
sirfsoc_wdt_updatetimeout(wdd);
/*
@@ -96,7 +101,7 @@ static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
static int sirfsoc_wdt_disable(struct watchdog_device *wdd)
{
- void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
@@ -150,7 +155,7 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- watchdog_set_drvdata(&sirfsoc_wdd, base);
+ watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base);
watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 99a06f9e3930..b067edf246df 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -17,36 +17,19 @@
*
* Software only watchdog driver. Unlike its big brother the WDT501P
* driver this won't always recover a failed machine.
- *
- * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> :
- * Modularised.
- * Added soft_margin; use upon insmod to change the timer delay.
- * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate
- * minors.
- *
- * 19980911 Alan Cox
- * Made SMP safe for 2.3.x
- *
- * 20011127 Joel Becker (jlbec@evilplan.org>
- * Added soft_noboot; Allows testing the softdog trigger without
- * requiring a recompile.
- * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT.
- *
- * 20020530 Joel Becker <joel.becker@oracle.com>
- * Added Matt Domsch's nowayout module option.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/types.h>
+#include <linux/reboot.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
#define TIMER_MARGIN 60 /* Default is 60 seconds */
static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */
@@ -71,25 +54,12 @@ module_param(soft_panic, int, 0);
MODULE_PARM_DESC(soft_panic,
"Softdog action, set to 1 to panic, 0 to reboot (default=0)");
-/*
- * Our timer
- */
-
-static void watchdog_fire(unsigned long);
-
-static struct timer_list watchdog_ticktock =
- TIMER_INITIALIZER(watchdog_fire, 0, 0);
-
-/*
- * If the timer expires..
- */
-
-static void watchdog_fire(unsigned long data)
+static void softdog_fire(unsigned long data)
{
module_put(THIS_MODULE);
- if (soft_noboot)
+ if (soft_noboot) {
pr_crit("Triggered - Reboot ignored\n");
- else if (soft_panic) {
+ } else if (soft_panic) {
pr_crit("Initiating panic\n");
panic("Software Watchdog Timer expired");
} else {
@@ -99,35 +69,24 @@ static void watchdog_fire(unsigned long data)
}
}
-/*
- * Softdog operations
- */
+static struct timer_list softdog_ticktock =
+ TIMER_INITIALIZER(softdog_fire, 0, 0);
static int softdog_ping(struct watchdog_device *w)
{
- if (!mod_timer(&watchdog_ticktock, jiffies+(w->timeout*HZ)))
+ if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ)))
__module_get(THIS_MODULE);
return 0;
}
static int softdog_stop(struct watchdog_device *w)
{
- if (del_timer(&watchdog_ticktock))
+ if (del_timer(&softdog_ticktock))
module_put(THIS_MODULE);
return 0;
}
-static int softdog_set_timeout(struct watchdog_device *w, unsigned int t)
-{
- w->timeout = t;
- return 0;
-}
-
-/*
- * Kernel Interfaces
- */
-
static struct watchdog_info softdog_info = {
.identity = "Software Watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
@@ -137,29 +96,21 @@ static struct watchdog_ops softdog_ops = {
.owner = THIS_MODULE,
.start = softdog_ping,
.stop = softdog_stop,
- .set_timeout = softdog_set_timeout,
};
static struct watchdog_device softdog_dev = {
.info = &softdog_info,
.ops = &softdog_ops,
.min_timeout = 1,
- .max_timeout = 0xFFFF
+ .max_timeout = 65535,
+ .timeout = TIMER_MARGIN,
};
-static int __init watchdog_init(void)
+static int __init softdog_init(void)
{
int ret;
- /* Check that the soft_margin value is within it's range;
- if not reset to the default */
- if (soft_margin < 1 || soft_margin > 65535) {
- pr_info("soft_margin must be 0 < soft_margin < 65536, using %d\n",
- TIMER_MARGIN);
- return -EINVAL;
- }
- softdog_dev.timeout = soft_margin;
-
+ watchdog_init_timeout(&softdog_dev, soft_margin, NULL);
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
@@ -167,19 +118,18 @@ static int __init watchdog_init(void)
if (ret)
return ret;
- pr_info("Software Watchdog Timer: 0.08 initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
- soft_noboot, soft_margin, soft_panic, nowayout);
+ pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
+ soft_noboot, softdog_dev.timeout, soft_panic, nowayout);
return 0;
}
+module_init(softdog_init);
-static void __exit watchdog_exit(void)
+static void __exit softdog_exit(void)
{
watchdog_unregister_device(&softdog_dev);
}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
+module_exit(softdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Software Watchdog Device Driver");
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index cfbed7e051b6..202c4b9cc921 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -149,7 +149,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
dev->wdt.ops = &tangox_wdt_ops;
dev->wdt.timeout = DEFAULT_TIMEOUT;
dev->wdt.min_timeout = 1;
- dev->wdt.max_timeout = (U32_MAX - 1) / dev->clk_rate;
+ dev->wdt.max_hw_heartbeat_ms = (U32_MAX - 1) / dev->clk_rate;
watchdog_init_timeout(&dev->wdt, timeout, &pdev->dev);
watchdog_set_nowayout(&dev->wdt, nowayout);
@@ -170,7 +170,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
* already running.
*/
if (readl(dev->base + WD_COUNTER)) {
- set_bit(WDOG_ACTIVE, &dev->wdt.status);
+ set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
tangox_wdt_start(&dev->wdt);
}
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 7c3ba58ae1be..6abb83cd7681 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -88,7 +88,7 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
* Check that we have valid min and max timeout values, if
* not reset them both to 0 (=not used or unknown)
*/
- if (wdd->min_timeout > wdd->max_timeout) {
+ if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) {
pr_info("Invalid min and max timeout values, resetting to 0!\n");
wdd->min_timeout = 0;
wdd->max_timeout = 0;
@@ -329,6 +329,43 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static void devm_watchdog_unregister_device(struct device *dev, void *res)
+{
+ watchdog_unregister_device(*(struct watchdog_device **)res);
+}
+
+/**
+ * devm_watchdog_register_device() - resource managed watchdog_register_device()
+ * @dev: device that is registering this watchdog device
+ * @wdd: watchdog device
+ *
+ * Managed watchdog_register_device(). For watchdog device registered by this
+ * function, watchdog_unregister_device() is automatically called on driver
+ * detach. See watchdog_register_device() for more information.
+ */
+int devm_watchdog_register_device(struct device *dev,
+ struct watchdog_device *wdd)
+{
+ struct watchdog_device **rcwdd;
+ int ret;
+
+ rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+ GFP_KERNEL);
+ if (!rcwdd)
+ return -ENOMEM;
+
+ ret = watchdog_register_device(wdd);
+ if (!ret) {
+ *rcwdd = wdd;
+ devres_add(dev, rcwdd);
+ } else {
+ devres_free(rcwdd);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_watchdog_register_device);
+
static int __init watchdog_deferred_registration(void)
{
mutex_lock(&wtd_deferred_reg_mutex);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 3595cffa24ea..040bf8382f46 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -69,6 +69,7 @@ struct watchdog_core_data {
unsigned long status; /* Internal status bits */
#define _WDOG_DEV_OPEN 0 /* Opened ? */
#define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
+#define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
};
/* the dev_t structure to store the dynamically allocated watchdog devices */
@@ -92,9 +93,13 @@ static inline bool watchdog_need_worker(struct watchdog_device *wdd)
* thus is aware that the framework supports generating heartbeat
* requests.
* - Userspace requests a longer timeout than the hardware can handle.
+ *
+ * Alternatively, if userspace has not opened the watchdog
+ * device, we take care of feeding the watchdog if it is
+ * running.
*/
- return hm && ((watchdog_active(wdd) && t > hm) ||
- (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)));
+ return (hm && watchdog_active(wdd) && t > hm) ||
+ (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
}
static long watchdog_next_keepalive(struct watchdog_device *wdd)
@@ -107,7 +112,7 @@ static long watchdog_next_keepalive(struct watchdog_device *wdd)
unsigned int hw_heartbeat_ms;
virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
- hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
+ hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
if (!watchdog_active(wdd))
@@ -180,6 +185,8 @@ static int watchdog_ping(struct watchdog_device *wdd)
if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
return 0;
+ set_bit(_WDOG_KEEPALIVE, &wd_data->status);
+
wd_data->last_keepalive = jiffies;
return __watchdog_ping(wdd);
}
@@ -219,6 +226,8 @@ static int watchdog_start(struct watchdog_device *wdd)
if (watchdog_active(wdd))
return 0;
+ set_bit(_WDOG_KEEPALIVE, &wd_data->status);
+
started_at = jiffies;
if (watchdog_hw_running(wdd) && wdd->ops->ping)
err = wdd->ops->ping(wdd);
@@ -258,10 +267,12 @@ static int watchdog_stop(struct watchdog_device *wdd)
return -EBUSY;
}
- if (wdd->ops->stop)
+ if (wdd->ops->stop) {
+ clear_bit(WDOG_HW_RUNNING, &wdd->status);
err = wdd->ops->stop(wdd);
- else
+ } else {
set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
if (err == 0) {
clear_bit(WDOG_ACTIVE, &wdd->status);
@@ -282,10 +293,27 @@ static int watchdog_stop(struct watchdog_device *wdd)
static unsigned int watchdog_get_status(struct watchdog_device *wdd)
{
- if (!wdd->ops->status)
- return 0;
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ unsigned int status;
+
+ if (wdd->ops->status)
+ status = wdd->ops->status(wdd);
+ else
+ status = wdd->bootstatus & (WDIOF_CARDRESET |
+ WDIOF_OVERHEAT |
+ WDIOF_FANFAULT |
+ WDIOF_EXTERN1 |
+ WDIOF_EXTERN2 |
+ WDIOF_POWERUNDER |
+ WDIOF_POWEROVER);
- return wdd->ops->status(wdd);
+ if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
+ status |= WDIOF_MAGICCLOSE;
+
+ if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
+ status |= WDIOF_KEEPALIVEPING;
+
+ return status;
}
/*
@@ -361,7 +389,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
status = watchdog_get_status(wdd);
mutex_unlock(&wd_data->lock);
- return sprintf(buf, "%u\n", status);
+ return sprintf(buf, "0x%x\n", status);
}
static DEVICE_ATTR_RO(status);
@@ -429,9 +457,7 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
struct watchdog_device *wdd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
- if (attr == &dev_attr_status.attr && !wdd->ops->status)
- mode = 0;
- else if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
+ if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
mode = 0;
return mode;
@@ -948,17 +974,22 @@ int __init watchdog_dev_init(void)
err = class_register(&watchdog_class);
if (err < 0) {
pr_err("couldn't register class\n");
- return err;
+ goto err_register;
}
err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
if (err < 0) {
pr_err("watchdog: unable to allocate char dev region\n");
- class_unregister(&watchdog_class);
- return err;
+ goto err_alloc;
}
return 0;
+
+err_alloc:
+ class_unregister(&watchdog_class);
+err_register:
+ destroy_workqueue(watchdog_wq);
+ return err;
}
/*
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index cbe373de3659..fa1efef3c96e 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -339,7 +339,7 @@ static int ziirave_wdt_remove(struct i2c_client *client)
}
static struct i2c_device_id ziirave_wdt_id[] = {
- { "ziirave-wdt", 0 },
+ { "rave-wdt", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ziirave_wdt_id);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7399782c0998..87e6035c9e81 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -294,7 +294,7 @@ error:
void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
int order = get_order(size);
@@ -346,7 +346,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr, struct dma_attrs *attrs)
+ dma_addr_t dev_addr, unsigned long attrs)
{
int order = get_order(size);
phys_addr_t phys;
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
@@ -434,7 +434,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
@@ -462,7 +462,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -599,7 +599,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 47db55aee7f2..60fb47469c86 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -257,36 +257,12 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
return v9fs_fid_lookup_with_uid(dentry, uid, any);
}
-struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
-{
- struct p9_fid *fid, *ret;
-
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return fid;
-
- ret = p9_client_walk(fid, 0, NULL, 1);
- return ret;
-}
-
-static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid)
-{
- struct p9_fid *fid, *ret;
-
- fid = v9fs_fid_lookup_with_uid(dentry, uid, 0);
- if (IS_ERR(fid))
- return fid;
-
- ret = p9_client_walk(fid, 0, NULL, 1);
- return ret;
-}
-
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
int err;
struct p9_fid *fid;
- fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID);
+ fid = clone_fid(v9fs_fid_lookup_with_uid(dentry, GLOBAL_ROOT_UID, 0));
if (IS_ERR(fid))
goto error_out;
/*
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 12700df0bb51..4491bcaf42b8 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -28,7 +28,14 @@ static inline struct p9_fid *v9fs_parent_fid(struct dentry *dentry)
{
return v9fs_fid_lookup(dentry->d_parent);
}
-struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
+static inline struct p9_fid *clone_fid(struct p9_fid *fid)
+{
+ return IS_ERR(fid) ? fid : p9_client_walk(fid, 0, NULL, 1);
+}
+static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
+{
+ return clone_fid(v9fs_fid_lookup(dentry));
+}
#endif
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 7da9a8354fad..8b1999b528e9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -661,7 +661,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
}
/* clone a fid to use for creation */
- ofid = p9_client_walk(dfid, 0, NULL, 1);
+ ofid = clone_fid(dfid);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
@@ -975,13 +975,13 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_ERR(oldfid))
return PTR_ERR(oldfid);
- olddirfid = v9fs_parent_fid(old_dentry);
+ olddirfid = clone_fid(v9fs_parent_fid(old_dentry));
if (IS_ERR(olddirfid)) {
retval = PTR_ERR(olddirfid);
goto done;
}
- newdirfid = v9fs_parent_fid(new_dentry);
+ newdirfid = clone_fid(v9fs_parent_fid(new_dentry));
if (IS_ERR(newdirfid)) {
retval = PTR_ERR(newdirfid);
goto clunk_olddir;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 2ed04c2fe7af..eeabcb0bad12 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -281,7 +281,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
/* clone a fid to use for creation */
- ofid = p9_client_walk(dfid, 0, NULL, 1);
+ ofid = clone_fid(dfid);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index a6bd349bab23..f329eee6dc93 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -97,8 +97,6 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t value_len, int flags)
{
struct p9_fid *fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
}
@@ -115,7 +113,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
name, value_len, flags);
/* Clone it */
- fid = p9_client_walk(fid, 0, NULL, 1);
+ fid = clone_fid(fid);
if (IS_ERR(fid))
return PTR_ERR(fid);
diff --git a/fs/Kconfig b/fs/Kconfig
index 4524916fa200..2bc7ad775842 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -70,6 +70,12 @@ config FS_POSIX_ACL
config EXPORTFS
tristate
+config EXPORTFS_BLOCK_OPS
+ bool "Enable filesystem export operations for block IO"
+ help
+ This option enables the export operations for a filesystem to support
+ external block IO.
+
config FILE_LOCKING
bool "Enable POSIX file locking API" if EXPERT
default y
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 72c03354c14b..c7efddf6e038 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -89,7 +89,8 @@ config BINFMT_SCRIPT
config BINFMT_FLAT
bool "Kernel support for flat binaries"
- depends on !MMU && (!FRV || BROKEN)
+ depends on !MMU || M68K
+ depends on !FRV || BROKEN
help
Support uClinux FLAT format binaries.
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index bec25f7017c0..29444c83da48 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -101,7 +101,7 @@ out:
}
static int
-adfs_match(struct qstr *name, struct object_info *obj)
+adfs_match(const struct qstr *name, struct object_info *obj)
{
int i;
@@ -126,7 +126,7 @@ adfs_match(struct qstr *name, struct object_info *obj)
}
static int
-adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_info *obj)
+adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct object_info *obj)
{
struct super_block *sb = inode->i_sb;
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
@@ -227,7 +227,7 @@ adfs_hash(const struct dentry *parent, struct qstr *qstr)
* requirements of the underlying filesystem.
*/
static int
-adfs_compare(const struct dentry *parent, const struct dentry *dentry,
+adfs_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
int i;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index d6c7a51c93e4..d8f217c711d3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -472,9 +472,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
bool
affs_nofilenametruncate(const struct dentry *dentry)
{
- struct inode *inode = d_inode(dentry);
-
- return affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_NO_TRUNCATE);
+ return affs_test_opt(AFFS_SB(dentry->d_sb)->s_flags, SF_NO_TRUNCATE);
}
/* Check if the name is valid for a affs object. */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index eb32029bc776..a2d68f828d53 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -14,11 +14,11 @@ typedef int (*toupper_t)(int);
static int affs_toupper(int ch);
static int affs_hash_dentry(const struct dentry *, struct qstr *);
-static int affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int affs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(const struct dentry *, struct qstr *);
-static int affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int affs_intl_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
const struct dentry_operations affs_dentry_operations = {
@@ -131,20 +131,20 @@ static inline int __affs_compare_dentry(unsigned int len,
}
static int
-affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+affs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_toupper,
- affs_nofilenametruncate(parent));
+ affs_nofilenametruncate(dentry));
}
static int
-affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+affs_intl_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_intl_toupper,
- affs_nofilenametruncate(parent));
+ affs_nofilenametruncate(dentry));
}
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 3767f6641af1..fa84bb8832e0 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -159,7 +159,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
@@ -172,7 +172,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *active;
- struct qstr *qstr;
+ const struct qstr *qstr;
ino = list_entry(p, struct autofs_info, active);
active = ino->dentry;
@@ -214,7 +214,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
@@ -227,7 +227,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *expiring;
- struct qstr *qstr;
+ const struct qstr *qstr;
if (rcu_walk) {
spin_unlock(&sbi->lookup_lock);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 708214457d16..431fd7ee3488 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -225,7 +225,7 @@ rename_retry:
}
static struct autofs_wait_queue *
-autofs4_find_wait(struct autofs_sb_info *sbi, struct qstr *qstr)
+autofs4_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
{
struct autofs_wait_queue *wq;
@@ -249,7 +249,7 @@ autofs4_find_wait(struct autofs_sb_info *sbi, struct qstr *qstr)
*/
static int validate_request(struct autofs_wait_queue **wait,
struct autofs_sb_info *sbi,
- struct qstr *qstr,
+ const struct qstr *qstr,
struct dentry *dentry, enum autofs_notify notify)
{
struct autofs_wait_queue *wq;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index a7a28110dc80..7f6aff3f72eb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -605,28 +605,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
* Do the same thing for the memory mapping - between
* elf_bss and last_bss is the bss section.
*/
- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
if (k > last_bss)
last_bss = k;
}
}
+ /*
+ * Now fill out the bss section: first pad the last page from
+ * the file up to the page boundary, and zero it from elf_bss
+ * up to the end of the page.
+ */
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out;
+ }
+ /*
+ * Next, align both the file and mem bss up to the page size,
+ * since this is where elf_bss was just zeroed up to, and where
+ * last_bss will end after the vm_brk() below.
+ */
+ elf_bss = ELF_PAGEALIGN(elf_bss);
+ last_bss = ELF_PAGEALIGN(last_bss);
+ /* Finally, if there is still more bss to allocate, do it. */
if (last_bss > elf_bss) {
- /*
- * Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out;
- }
-
- /* What we have mapped so far */
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
-
- /* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
if (error)
goto out;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 203589311bf8..464a972e88c1 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -67,8 +67,6 @@ static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *,
struct elf_fdpic_params *);
#ifndef CONFIG_MMU
-static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *,
- unsigned long *);
static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
struct file *,
struct mm_struct *);
@@ -515,8 +513,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = mm->start_stack;
/* stack the program arguments and environment */
- if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0)
+ if (transfer_args_to_stack(bprm, &sp) < 0)
return -EFAULT;
+ sp &= ~15;
#endif
/*
@@ -711,39 +710,6 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/*****************************************************************************/
/*
- * transfer the program arguments and environment from the holding pages onto
- * the stack
- */
-#ifndef CONFIG_MMU
-static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm,
- unsigned long *_sp)
-{
- unsigned long index, stop, sp;
- char *src;
- int ret = 0;
-
- stop = bprm->p >> PAGE_SHIFT;
- sp = *_sp;
-
- for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
- src = kmap(bprm->page[index]);
- sp -= PAGE_SIZE;
- if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0)
- ret = -EFAULT;
- kunmap(bprm->page[index]);
- if (ret < 0)
- goto out;
- }
-
- *_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15;
-
-out:
- return ret;
-}
-#endif
-
-/*****************************************************************************/
-/*
* load the appropriate binary image (executable or interpreter) into memory
* - we assume no MMU is available
* - if no other PIC bits are set in params->hdr->e_flags
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 490538536cb4..dd2d3f0cd55d 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -24,7 +24,8 @@
static int load_em86(struct linux_binprm *bprm)
{
- char *interp, *i_name, *i_arg;
+ const char *i_name, *i_arg;
+ char *interp;
struct file * file;
int retval;
struct elfhdr elf_ex;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index caf9e39bb82b..9b2917a30294 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -15,7 +15,8 @@
* JAN/99 -- coded full program relocation (gerg@snapgear.com)
*/
-#include <linux/export.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -25,8 +26,6 @@
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/slab.h>
@@ -34,26 +33,16 @@
#include <linux/personality.h>
#include <linux/init.h>
#include <linux/flat.h>
-#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/****************************************************************************/
-#if 0
-#define DEBUG 1
-#endif
-
-#ifdef DEBUG
-#define DBG_FLT(a...) printk(a)
-#else
-#define DBG_FLT(a...)
-#endif
-
/*
* User data (data section and bss) needs to be aligned.
* We pick 0x20 here because it is the max value elf2flt has always
@@ -80,7 +69,7 @@ struct lib_info {
unsigned long text_len; /* Length of text segment */
unsigned long entry; /* Start address for this module */
unsigned long build_date; /* When this one was compiled */
- short loaded; /* Has this library been loaded? */
+ bool loaded; /* Has this library been loaded? */
} lib_list[MAX_SHARED_LIBS];
};
@@ -106,59 +95,67 @@ static struct linux_binfmt flat_format = {
static int flat_core_dump(struct coredump_params *cprm)
{
- printk("Process %s:%d received signr %d and should have core dumped\n",
- current->comm, current->pid, (int) cprm->siginfo->si_signo);
- return(1);
+ pr_warn("Process %s:%d received signr %d and should have core dumped\n",
+ current->comm, current->pid, cprm->siginfo->si_signo);
+ return 1;
}
/****************************************************************************/
/*
* create_flat_tables() parses the env- and arg-strings in new user
* memory and creates the pointer tables from them, and puts their
- * addresses on the "stack", returning the new stack pointer value.
+ * addresses on the "stack", recording the new stack pointer value.
*/
-static unsigned long create_flat_tables(
- unsigned long pp,
- struct linux_binprm * bprm)
+static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start)
{
- unsigned long *argv,*envp;
- unsigned long * sp;
- char * p = (char*)pp;
- int argc = bprm->argc;
- int envc = bprm->envc;
- char uninitialized_var(dummy);
-
- sp = (unsigned long *)p;
- sp -= (envc + argc + 2) + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
- sp = (unsigned long *) ((unsigned long)sp & -FLAT_STACK_ALIGN);
- argv = sp + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
- envp = argv + (argc + 1);
+ char __user *p;
+ unsigned long __user *sp;
+ long i, len;
+
+ p = (char __user *)arg_start;
+ sp = (unsigned long __user *)current->mm->start_stack;
+
+ sp -= bprm->envc + 1;
+ sp -= bprm->argc + 1;
+ sp -= flat_argvp_envp_on_stack() ? 2 : 0;
+ sp -= 1; /* &argc */
+ current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
+ sp = (unsigned long __user *)current->mm->start_stack;
+
+ __put_user(bprm->argc, sp++);
if (flat_argvp_envp_on_stack()) {
- put_user((unsigned long) envp, sp + 2);
- put_user((unsigned long) argv, sp + 1);
- }
-
- put_user(argc, sp);
- current->mm->arg_start = (unsigned long) p;
- while (argc-->0) {
- put_user((unsigned long) p, argv++);
- do {
- get_user(dummy, p); p++;
- } while (dummy);
- }
- put_user((unsigned long) NULL, argv);
- current->mm->arg_end = current->mm->env_start = (unsigned long) p;
- while (envc-->0) {
- put_user((unsigned long)p, envp); envp++;
- do {
- get_user(dummy, p); p++;
- } while (dummy);
- }
- put_user((unsigned long) NULL, envp);
- current->mm->env_end = (unsigned long) p;
- return (unsigned long)sp;
+ unsigned long argv, envp;
+ argv = (unsigned long)(sp + 2);
+ envp = (unsigned long)(sp + 2 + bprm->argc + 1);
+ __put_user(argv, sp++);
+ __put_user(envp, sp++);
+ }
+
+ current->mm->arg_start = (unsigned long)p;
+ for (i = bprm->argc; i > 0; i--) {
+ __put_user((unsigned long)p, sp++);
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
+ return -EINVAL;
+ p += len;
+ }
+ __put_user(0, sp++);
+ current->mm->arg_end = (unsigned long)p;
+
+ current->mm->env_start = (unsigned long) p;
+ for (i = bprm->envc; i > 0; i--) {
+ __put_user((unsigned long)p, sp++);
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
+ return -EINVAL;
+ p += len;
+ }
+ __put_user(0, sp++);
+ current->mm->env_end = (unsigned long)p;
+
+ return 0;
}
/****************************************************************************/
@@ -190,17 +187,17 @@ static int decompress_exec(
loff_t fpos;
int ret, retval;
- DBG_FLT("decompress_exec(offset=%x,buf=%x,len=%x)\n",(int)offset, (int)dst, (int)len);
+ pr_debug("decompress_exec(offset=%lx,buf=%p,len=%lx)\n", offset, dst, len);
memset(&strm, 0, sizeof(strm));
strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (strm.workspace == NULL) {
- DBG_FLT("binfmt_flat: no memory for decompress workspace\n");
+ pr_debug("no memory for decompress workspace\n");
return -ENOMEM;
}
buf = kmalloc(LBUFSIZE, GFP_KERNEL);
if (buf == NULL) {
- DBG_FLT("binfmt_flat: no memory for read buffer\n");
+ pr_debug("no memory for read buffer\n");
retval = -ENOMEM;
goto out_free;
}
@@ -218,49 +215,49 @@ static int decompress_exec(
/* Check minimum size -- gzip header */
if (ret < 10) {
- DBG_FLT("binfmt_flat: file too small?\n");
+ pr_debug("file too small?\n");
goto out_free_buf;
}
/* Check gzip magic number */
if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) {
- DBG_FLT("binfmt_flat: unknown compression magic?\n");
+ pr_debug("unknown compression magic?\n");
goto out_free_buf;
}
/* Check gzip method */
if (buf[2] != 8) {
- DBG_FLT("binfmt_flat: unknown compression method?\n");
+ pr_debug("unknown compression method?\n");
goto out_free_buf;
}
/* Check gzip flags */
if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) ||
(buf[3] & RESERVED)) {
- DBG_FLT("binfmt_flat: unknown flags?\n");
+ pr_debug("unknown flags?\n");
goto out_free_buf;
}
ret = 10;
if (buf[3] & EXTRA_FIELD) {
ret += 2 + buf[10] + (buf[11] << 8);
- if (unlikely(LBUFSIZE <= ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (EXTRA)?\n");
+ if (unlikely(ret >= LBUFSIZE)) {
+ pr_debug("buffer overflow (EXTRA)?\n");
goto out_free_buf;
}
}
if (buf[3] & ORIG_NAME) {
while (ret < LBUFSIZE && buf[ret++] != 0)
;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (ORIG_NAME)?\n");
+ if (unlikely(ret == LBUFSIZE)) {
+ pr_debug("buffer overflow (ORIG_NAME)?\n");
goto out_free_buf;
}
}
if (buf[3] & COMMENT) {
while (ret < LBUFSIZE && buf[ret++] != 0)
;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (COMMENT)?\n");
+ if (unlikely(ret == LBUFSIZE)) {
+ pr_debug("buffer overflow (COMMENT)?\n");
goto out_free_buf;
}
}
@@ -273,7 +270,7 @@ static int decompress_exec(
strm.total_out = 0;
if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) {
- DBG_FLT("binfmt_flat: zlib init failed?\n");
+ pr_debug("zlib init failed?\n");
goto out_free_buf;
}
@@ -290,7 +287,7 @@ static int decompress_exec(
}
if (ret < 0) {
- DBG_FLT("binfmt_flat: decompression failed (%d), %s\n",
+ pr_debug("decompression failed (%d), %s\n",
ret, strm.msg);
goto out_zlib;
}
@@ -327,24 +324,23 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
r &= 0x00ffffff; /* Trim ID off here */
}
if (id >= MAX_SHARED_LIBS) {
- printk("BINFMT_FLAT: reference 0x%x to shared library %d",
- (unsigned) r, id);
+ pr_err("reference 0x%lx to shared library %d", r, id);
goto failed;
}
if (curid != id) {
if (internalp) {
- printk("BINFMT_FLAT: reloc address 0x%x not in same module "
- "(%d != %d)", (unsigned) r, curid, id);
+ pr_err("reloc address 0x%lx not in same module "
+ "(%d != %d)", r, curid, id);
goto failed;
- } else if ( ! p->lib_list[id].loaded &&
- load_flat_shared_library(id, p) < 0) {
- printk("BINFMT_FLAT: failed to load library %d", id);
+ } else if (!p->lib_list[id].loaded &&
+ load_flat_shared_library(id, p) < 0) {
+ pr_err("failed to load library %d", id);
goto failed;
}
/* Check versioning information (i.e. time stamps) */
if (p->lib_list[id].build_date && p->lib_list[curid].build_date &&
p->lib_list[curid].build_date < p->lib_list[id].build_date) {
- printk("BINFMT_FLAT: library %d is younger than %d", id, curid);
+ pr_err("library %d is younger than %d", id, curid);
goto failed;
}
}
@@ -358,8 +354,8 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
text_len = p->lib_list[id].text_len;
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
- printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
+ pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)",
+ r, start_brk-start_data+text_len, text_len);
goto failed;
}
@@ -369,10 +365,10 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
addr = r - text_len + start_data;
/* Range checked already above so doing the range tests is redundant...*/
- return(addr);
+ return addr;
failed:
- printk(", killing %s!\n", current->comm);
+ pr_cont(", killing %s!\n", current->comm);
send_sig(SIGSEGV, current, 0);
return RELOC_FAILED;
@@ -382,62 +378,57 @@ failed:
static void old_reloc(unsigned long rl)
{
-#ifdef DEBUG
- char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
-#endif
+ static const char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
flat_v2_reloc_t r;
- unsigned long *ptr;
-
+ unsigned long __user *ptr;
+ unsigned long val;
+
r.value = rl;
#if defined(CONFIG_COLDFIRE)
- ptr = (unsigned long *) (current->mm->start_code + r.reloc.offset);
+ ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset);
#else
- ptr = (unsigned long *) (current->mm->start_data + r.reloc.offset);
+ ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset);
#endif
+ get_user(val, ptr);
+
+ pr_debug("Relocation of variable at DATASEG+%x "
+ "(address %p, currently %lx) into segment %s\n",
+ r.reloc.offset, ptr, val, segment[r.reloc.type]);
-#ifdef DEBUG
- printk("Relocation of variable at DATASEG+%x "
- "(address %p, currently %x) into segment %s\n",
- r.reloc.offset, ptr, (int)*ptr, segment[r.reloc.type]);
-#endif
-
switch (r.reloc.type) {
case OLD_FLAT_RELOC_TYPE_TEXT:
- *ptr += current->mm->start_code;
+ val += current->mm->start_code;
break;
case OLD_FLAT_RELOC_TYPE_DATA:
- *ptr += current->mm->start_data;
+ val += current->mm->start_data;
break;
case OLD_FLAT_RELOC_TYPE_BSS:
- *ptr += current->mm->end_data;
+ val += current->mm->end_data;
break;
default:
- printk("BINFMT_FLAT: Unknown relocation type=%x\n", r.reloc.type);
+ pr_err("Unknown relocation type=%x\n", r.reloc.type);
break;
}
+ put_user(val, ptr);
-#ifdef DEBUG
- printk("Relocation became %x\n", (int)*ptr);
-#endif
-}
+ pr_debug("Relocation became %lx\n", val);
+}
/****************************************************************************/
-static int load_flat_file(struct linux_binprm * bprm,
+static int load_flat_file(struct linux_binprm *bprm,
struct lib_info *libinfo, int id, unsigned long *extra_stack)
{
- struct flat_hdr * hdr;
- unsigned long textpos = 0, datapos = 0, result;
- unsigned long realdatastart = 0;
- unsigned long text_len, data_len, bss_len, stack_len, flags;
- unsigned long full_data;
- unsigned long len, memp = 0;
- unsigned long memp_size, extra, rlim;
- unsigned long *reloc = 0, *rp;
+ struct flat_hdr *hdr;
+ unsigned long textpos, datapos, realdatastart;
+ unsigned long text_len, data_len, bss_len, stack_len, full_data, flags;
+ unsigned long len, memp, memp_size, extra, rlim;
+ unsigned long __user *reloc, *rp;
struct inode *inode;
- int i, rev, relocs = 0;
+ int i, rev, relocs;
loff_t fpos;
unsigned long start_code, end_code;
+ ssize_t result;
int ret;
hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */
@@ -469,20 +460,30 @@ static int load_flat_file(struct linux_binprm * bprm,
}
if (flags & FLAT_FLAG_KTRACE)
- printk("BINFMT_FLAT: Loading file: %s\n", bprm->filename);
+ pr_info("Loading file: %s\n", bprm->filename);
if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) {
- printk("BINFMT_FLAT: bad flat file version 0x%x (supported "
- "0x%lx and 0x%lx)\n",
- rev, FLAT_VERSION, OLD_FLAT_VERSION);
+ pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n",
+ rev, FLAT_VERSION, OLD_FLAT_VERSION);
ret = -ENOEXEC;
goto err;
}
-
+
/* Don't allow old format executables to use shared libraries */
if (rev == OLD_FLAT_VERSION && id != 0) {
- printk("BINFMT_FLAT: shared libraries are not available before rev 0x%x\n",
- (int) FLAT_VERSION);
+ pr_err("shared libraries are not available before rev 0x%lx\n",
+ FLAT_VERSION);
+ ret = -ENOEXEC;
+ goto err;
+ }
+
+ /*
+ * Make sure the header params are sane.
+ * 28 bits (256 MB) is way more than reasonable in this case.
+ * If some top bits are set we have probable binary corruption.
+ */
+ if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
+ pr_err("bad header\n");
ret = -ENOEXEC;
goto err;
}
@@ -496,7 +497,7 @@ static int load_flat_file(struct linux_binprm * bprm,
#ifndef CONFIG_BINFMT_ZFLAT
if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
- printk("Support for ZFLAT executables is not enabled.\n");
+ pr_err("Support for ZFLAT executables is not enabled.\n");
ret = -ENOEXEC;
goto err;
}
@@ -517,11 +518,9 @@ static int load_flat_file(struct linux_binprm * bprm,
/* Flush all traces of the currently running executable */
if (id == 0) {
- result = flush_old_exec(bprm);
- if (result) {
- ret = result;
+ ret = flush_old_exec(bprm);
+ if (ret)
goto err;
- }
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
@@ -539,48 +538,48 @@ static int load_flat_file(struct linux_binprm * bprm,
* case, and then the fully copied to RAM case which lumps
* it all together.
*/
- if ((flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP)) == 0) {
+ if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) {
/*
* this should give us a ROM ptr, but if it doesn't we don't
* really care
*/
- DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
+ pr_debug("ROM mapping of file (we hope)\n");
textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
MAP_PRIVATE|MAP_EXECUTABLE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
- if (!textpos)
- textpos = (unsigned long) -ENOMEM;
- printk("Unable to mmap process text, errno %d\n", (int)-textpos);
ret = textpos;
+ if (!textpos)
+ ret = -ENOMEM;
+ pr_err("Unable to mmap process text, errno %d\n", ret);
goto err;
}
len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
- realdatastart = vm_mmap(0, 0, len,
+ realdatastart = vm_mmap(NULL, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
+ ret = realdatastart;
if (!realdatastart)
- realdatastart = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process data, errno %d\n",
- (int)-realdatastart);
+ ret = -ENOMEM;
+ pr_err("Unable to allocate RAM for process data, "
+ "errno %d\n", ret);
vm_munmap(textpos, text_len);
- ret = realdatastart;
goto err;
}
datapos = ALIGN(realdatastart +
MAX_SHARED_LIBS * sizeof(unsigned long),
FLAT_DATA_ALIGN);
- DBG_FLT("BINFMT_FLAT: Allocated data+bss+stack (%d bytes): %x\n",
- (int)(data_len + bss_len + stack_len), (int)datapos);
+ pr_debug("Allocated data+bss+stack (%ld bytes): %lx\n",
+ data_len + bss_len + stack_len, datapos);
fpos = ntohl(hdr->data_start);
#ifdef CONFIG_BINFMT_ZFLAT
if (flags & FLAT_FLAG_GZDATA) {
- result = decompress_exec(bprm, fpos, (char *) datapos,
+ result = decompress_exec(bprm, fpos, (char *)datapos,
full_data, 0);
} else
#endif
@@ -589,29 +588,30 @@ static int load_flat_file(struct linux_binprm * bprm,
full_data);
}
if (IS_ERR_VALUE(result)) {
- printk("Unable to read data+bss, errno %d\n", (int)-result);
+ ret = result;
+ pr_err("Unable to read data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len);
vm_munmap(realdatastart, len);
- ret = result;
goto err;
}
- reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
+ reloc = (unsigned long __user *)
+ (datapos + (ntohl(hdr->reloc_start) - text_len));
memp = realdatastart;
memp_size = len;
} else {
len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
- textpos = vm_mmap(0, 0, len,
+ textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
- if (!textpos)
- textpos = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process text/data, errno %d\n",
- (int)-textpos);
ret = textpos;
+ if (!textpos)
+ ret = -ENOMEM;
+ pr_err("Unable to allocate RAM for process text/data, "
+ "errno %d\n", ret);
goto err;
}
@@ -620,7 +620,7 @@ static int load_flat_file(struct linux_binprm * bprm,
MAX_SHARED_LIBS * sizeof(unsigned long),
FLAT_DATA_ALIGN);
- reloc = (unsigned long *)
+ reloc = (unsigned long __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
memp = textpos;
memp_size = len;
@@ -629,21 +629,59 @@ static int load_flat_file(struct linux_binprm * bprm,
* load it all in and treat it like a RAM load from now on
*/
if (flags & FLAT_FLAG_GZIP) {
- result = decompress_exec(bprm, sizeof (struct flat_hdr),
- (((char *) textpos) + sizeof (struct flat_hdr)),
+#ifndef CONFIG_MMU
+ result = decompress_exec(bprm, sizeof(struct flat_hdr),
+ (((char *)textpos) + sizeof(struct flat_hdr)),
(text_len + full_data
- - sizeof (struct flat_hdr)),
+ - sizeof(struct flat_hdr)),
0);
memmove((void *) datapos, (void *) realdatastart,
full_data);
+#else
+ /*
+ * This is used on MMU systems mainly for testing.
+ * Let's use a kernel buffer to simplify things.
+ */
+ long unz_text_len = text_len - sizeof(struct flat_hdr);
+ long unz_len = unz_text_len + full_data;
+ char *unz_data = vmalloc(unz_len);
+ if (!unz_data) {
+ result = -ENOMEM;
+ } else {
+ result = decompress_exec(bprm, sizeof(struct flat_hdr),
+ unz_data, unz_len, 0);
+ if (result == 0 &&
+ (copy_to_user((void __user *)textpos + sizeof(struct flat_hdr),
+ unz_data, unz_text_len) ||
+ copy_to_user((void __user *)datapos,
+ unz_data + unz_text_len, full_data)))
+ result = -EFAULT;
+ vfree(unz_data);
+ }
+#endif
} else if (flags & FLAT_FLAG_GZDATA) {
result = read_code(bprm->file, textpos, 0, text_len);
- if (!IS_ERR_VALUE(result))
+ if (!IS_ERR_VALUE(result)) {
+#ifndef CONFIG_MMU
result = decompress_exec(bprm, text_len, (char *) datapos,
full_data, 0);
- }
- else
+#else
+ char *unz_data = vmalloc(full_data);
+ if (!unz_data) {
+ result = -ENOMEM;
+ } else {
+ result = decompress_exec(bprm, text_len,
+ unz_data, full_data, 0);
+ if (result == 0 &&
+ copy_to_user((void __user *)datapos,
+ unz_data, full_data))
+ result = -EFAULT;
+ vfree(unz_data);
+ }
#endif
+ }
+ } else
+#endif /* CONFIG_BINFMT_ZFLAT */
{
result = read_code(bprm->file, textpos, 0, text_len);
if (!IS_ERR_VALUE(result))
@@ -652,21 +690,19 @@ static int load_flat_file(struct linux_binprm * bprm,
full_data);
}
if (IS_ERR_VALUE(result)) {
- printk("Unable to read code+data+bss, errno %d\n",(int)-result);
+ ret = result;
+ pr_err("Unable to read code+data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len + data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long));
- ret = result;
goto err;
}
}
- if (flags & FLAT_FLAG_KTRACE)
- printk("Mapping is %x, Entry point is %x, data_start is %x\n",
- (int)textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
+ start_code = textpos + sizeof(struct flat_hdr);
+ end_code = textpos + text_len;
+ text_len -= sizeof(struct flat_hdr); /* the real code len */
/* The main program needs a little extra setup in the task structure */
- start_code = textpos + sizeof (struct flat_hdr);
- end_code = textpos + text_len;
if (id == 0) {
current->mm->start_code = start_code;
current->mm->end_code = end_code;
@@ -681,19 +717,19 @@ static int load_flat_file(struct linux_binprm * bprm,
*/
current->mm->start_brk = datapos + data_len + bss_len;
current->mm->brk = (current->mm->start_brk + 3) & ~3;
+#ifndef CONFIG_MMU
current->mm->context.end_brk = memp + memp_size - stack_len;
+#endif
}
- if (flags & FLAT_FLAG_KTRACE)
- printk("%s %s: TEXT=%x-%x DATA=%x-%x BSS=%x-%x\n",
+ if (flags & FLAT_FLAG_KTRACE) {
+ pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n",
+ textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
+ pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n",
id ? "Lib" : "Load", bprm->filename,
- (int) start_code, (int) end_code,
- (int) datapos,
- (int) (datapos + data_len),
- (int) (datapos + data_len),
- (int) (((datapos + data_len + bss_len) + 3) & ~3));
-
- text_len -= sizeof(struct flat_hdr); /* the real code len */
+ start_code, end_code, datapos, datapos + data_len,
+ datapos + data_len, (datapos + data_len + bss_len + 3) & ~3);
+ }
/* Store the current module values into the global library structure */
libinfo->lib_list[id].start_code = start_code;
@@ -703,7 +739,7 @@ static int load_flat_file(struct linux_binprm * bprm,
libinfo->lib_list[id].loaded = 1;
libinfo->lib_list[id].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
libinfo->lib_list[id].build_date = ntohl(hdr->build_date);
-
+
/*
* We just load the allocations into some temporary memory to
* help simplify all this mumbo jumbo
@@ -717,15 +753,20 @@ static int load_flat_file(struct linux_binprm * bprm,
* image.
*/
if (flags & FLAT_FLAG_GOTPIC) {
- for (rp = (unsigned long *)datapos; *rp != 0xffffffff; rp++) {
- unsigned long addr;
- if (*rp) {
- addr = calc_reloc(*rp, libinfo, id, 0);
+ for (rp = (unsigned long __user *)datapos; ; rp++) {
+ unsigned long addr, rp_val;
+ if (get_user(rp_val, rp))
+ return -EFAULT;
+ if (rp_val == 0xffffffff)
+ break;
+ if (rp_val) {
+ addr = calc_reloc(rp_val, libinfo, id, 0);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
}
- *rp = addr;
+ if (put_user(addr, rp))
+ return -EFAULT;
}
}
}
@@ -742,19 +783,23 @@ static int load_flat_file(struct linux_binprm * bprm,
* __start to address 4 so that is okay).
*/
if (rev > OLD_FLAT_VERSION) {
- unsigned long persistent = 0;
- for (i=0; i < relocs; i++) {
+ unsigned long __maybe_unused persistent = 0;
+ for (i = 0; i < relocs; i++) {
unsigned long addr, relval;
- /* Get the address of the pointer to be
- relocated (of course, the address has to be
- relocated first). */
- relval = ntohl(reloc[i]);
- if (flat_set_persistent (relval, &persistent))
+ /*
+ * Get the address of the pointer to be
+ * relocated (of course, the address has to be
+ * relocated first).
+ */
+ if (get_user(relval, reloc + i))
+ return -EFAULT;
+ relval = ntohl(relval);
+ if (flat_set_persistent(relval, &persistent))
continue;
addr = flat_get_relocate_addr(relval);
- rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
- if (rp == (unsigned long *)RELOC_FAILED) {
+ rp = (unsigned long __user *)calc_reloc(addr, libinfo, id, 1);
+ if (rp == (unsigned long __user *)RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
}
@@ -780,17 +825,23 @@ static int load_flat_file(struct linux_binprm * bprm,
}
}
} else {
- for (i=0; i < relocs; i++)
- old_reloc(ntohl(reloc[i]));
+ for (i = 0; i < relocs; i++) {
+ unsigned long relval;
+ if (get_user(relval, reloc + i))
+ return -EFAULT;
+ relval = ntohl(relval);
+ old_reloc(relval);
+ }
}
-
+
flush_icache_range(start_code, end_code);
/* zero the BSS, BRK and stack areas */
- memset((void*)(datapos + data_len), 0, bss_len +
- (memp + memp_size - stack_len - /* end brk */
- libinfo->lib_list[id].start_brk) + /* start brk */
- stack_len);
+ if (clear_user((void __user *)(datapos + data_len), bss_len +
+ (memp + memp_size - stack_len - /* end brk */
+ libinfo->lib_list[id].start_brk) + /* start brk */
+ stack_len))
+ return -EFAULT;
return 0;
err:
@@ -846,7 +897,7 @@ out:
allow_write_access(bprm.file);
fput(bprm.file);
- return(res);
+ return res;
}
#endif /* CONFIG_BINFMT_SHARED_FLAT */
@@ -857,18 +908,17 @@ out:
* libraries. There is no binary dependent code anywhere else.
*/
-static int load_flat_binary(struct linux_binprm * bprm)
+static int load_flat_binary(struct linux_binprm *bprm)
{
struct lib_info libinfo;
struct pt_regs *regs = current_pt_regs();
- unsigned long p = bprm->p;
- unsigned long stack_len;
+ unsigned long stack_len = 0;
unsigned long start_addr;
- unsigned long *sp;
int res;
int i, j;
memset(&libinfo, 0, sizeof(libinfo));
+
/*
* We have to add the size of our arguments to our stack size
* otherwise it's too easy for users to create stack overflows
@@ -876,38 +926,54 @@ static int load_flat_binary(struct linux_binprm * bprm)
* pedantic and include space for the argv/envp array as it may have
* a lot of entries.
*/
-#define TOP_OF_ARGS (PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *))
- stack_len = TOP_OF_ARGS - bprm->p; /* the strings */
- stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
- stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
- stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */
-
+#ifndef CONFIG_MMU
+ stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */
+#endif
+ stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
+ stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
+ stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN);
+
res = load_flat_file(bprm, &libinfo, 0, &stack_len);
if (res < 0)
return res;
-
+
/* Update data segment pointers for all libraries */
- for (i=0; i<MAX_SHARED_LIBS; i++)
- if (libinfo.lib_list[i].loaded)
- for (j=0; j<MAX_SHARED_LIBS; j++)
- (-(j+1))[(unsigned long *)(libinfo.lib_list[i].start_data)] =
- (libinfo.lib_list[j].loaded)?
- libinfo.lib_list[j].start_data:UNLOADED_LIB;
+ for (i = 0; i < MAX_SHARED_LIBS; i++) {
+ if (!libinfo.lib_list[i].loaded)
+ continue;
+ for (j = 0; j < MAX_SHARED_LIBS; j++) {
+ unsigned long val = libinfo.lib_list[j].loaded ?
+ libinfo.lib_list[j].start_data : UNLOADED_LIB;
+ unsigned long __user *p = (unsigned long __user *)
+ libinfo.lib_list[i].start_data;
+ p -= j + 1;
+ if (put_user(val, p))
+ return -EFAULT;
+ }
+ }
install_exec_creds(bprm);
set_binfmt(&flat_format);
- p = ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
- DBG_FLT("p=%x\n", (int)p);
+#ifdef CONFIG_MMU
+ res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+ if (!res)
+ res = create_flat_tables(bprm, bprm->p);
+#else
+ /* Stash our initial stack pointer into the mm structure */
+ current->mm->start_stack =
+ ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
+ pr_debug("sp=%lx\n", current->mm->start_stack);
- /* copy the arg pages onto the stack, this could be more efficient :-) */
- for (i = TOP_OF_ARGS - 1; i >= bprm->p; i--)
- * (char *) --p =
- ((char *) page_address(bprm->page[i/PAGE_SIZE]))[i % PAGE_SIZE];
+ /* copy the arg pages onto the stack */
+ res = transfer_args_to_stack(bprm, &current->mm->start_stack);
+ if (!res)
+ res = create_flat_tables(bprm, current->mm->start_stack);
+#endif
+ if (res)
+ return res;
- sp = (unsigned long *) create_flat_tables(p, bprm);
-
/* Fake some return addresses to ensure the call chain will
* initialise library in order for us. We are required to call
* lib 1 first, then 2, ... and finally the main program (id 0).
@@ -915,24 +981,24 @@ static int load_flat_binary(struct linux_binprm * bprm)
start_addr = libinfo.lib_list[0].entry;
#ifdef CONFIG_BINFMT_SHARED_FLAT
- for (i = MAX_SHARED_LIBS-1; i>0; i--) {
+ for (i = MAX_SHARED_LIBS-1; i > 0; i--) {
if (libinfo.lib_list[i].loaded) {
/* Push previos first to call address */
- --sp; put_user(start_addr, sp);
+ unsigned long __user *sp;
+ current->mm->start_stack -= sizeof(unsigned long);
+ sp = (unsigned long __user *)current->mm->start_stack;
+ __put_user(start_addr, sp);
start_addr = libinfo.lib_list[i].entry;
}
}
#endif
-
- /* Stash our initial stack pointer into the mm structure */
- current->mm->start_stack = (unsigned long )sp;
#ifdef FLAT_PLAT_INIT
FLAT_PLAT_INIT(regs);
#endif
- DBG_FLT("start_thread(regs=0x%x, entry=0x%x, start_stack=0x%x)\n",
- (int)regs, (int)start_addr, (int)current->mm->start_stack);
-
+
+ pr_debug("start_thread(regs=0x%p, entry=0x%lx, start_stack=0x%lx)\n",
+ regs, start_addr, current->mm->start_stack);
start_thread(regs, start_addr, current->mm->start_stack);
return 0;
@@ -945,9 +1011,6 @@ static int __init init_flat_binfmt(void)
register_binfmt(&flat_format);
return 0;
}
-
-/****************************************************************************/
-
core_initcall(init_flat_binfmt);
/****************************************************************************/
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 5417516f6e59..6103a6362ccd 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -26,6 +26,8 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#ifdef DEBUG
# define USE_DEBUG 1
#else
@@ -43,6 +45,7 @@ enum {Enabled, Magic};
#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
#define MISC_FMT_OPEN_BINARY (1 << 30)
#define MISC_FMT_CREDENTIALS (1 << 29)
+#define MISC_FMT_OPEN_FILE (1 << 28)
typedef struct {
struct list_head list;
@@ -54,6 +57,7 @@ typedef struct {
char *interpreter; /* filename of interpreter */
char *name;
struct dentry *dentry;
+ struct file *interp_file;
} Node;
static DEFINE_RWLOCK(entries_lock);
@@ -201,7 +205,13 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (retval < 0)
goto error;
- interp_file = open_exec(iname);
+ if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) {
+ interp_file = filp_clone_open(fmt->interp_file);
+ if (!IS_ERR(interp_file))
+ deny_write_access(interp_file);
+ } else {
+ interp_file = open_exec(iname);
+ }
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
goto error;
@@ -285,6 +295,11 @@ static char *check_special_flags(char *sfs, Node *e)
e->flags |= (MISC_FMT_CREDENTIALS |
MISC_FMT_OPEN_BINARY);
break;
+ case 'F':
+ pr_debug("register: flag: F: open interpreter file now\n");
+ p++;
+ e->flags |= MISC_FMT_OPEN_FILE;
+ break;
default:
cont = 0;
}
@@ -543,6 +558,8 @@ static void entry_status(Node *e, char *page)
*dp++ = 'O';
if (e->flags & MISC_FMT_CREDENTIALS)
*dp++ = 'C';
+ if (e->flags & MISC_FMT_OPEN_FILE)
+ *dp++ = 'F';
*dp++ = '\n';
if (!test_bit(Magic, &e->flags)) {
@@ -590,6 +607,11 @@ static void kill_node(Node *e)
}
write_unlock(&entries_lock);
+ if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) {
+ filp_close(e->interp_file, NULL);
+ e->interp_file = NULL;
+ }
+
if (dentry) {
drop_nlink(d_inode(dentry));
d_drop(dentry);
@@ -696,6 +718,21 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
goto out2;
}
+ if (e->flags & MISC_FMT_OPEN_FILE) {
+ struct file *f;
+
+ f = open_exec(e->interpreter);
+ if (IS_ERR(f)) {
+ err = PTR_ERR(f);
+ pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
+ simple_release_fs(&bm_mnt, &entry_count);
+ iput(inode);
+ inode = NULL;
+ goto out2;
+ }
+ e->interp_file = f;
+ }
+
e->dentry = dget(dentry);
inode->i_private = e;
inode->i_fop = &bm_entry_operations;
@@ -713,7 +750,7 @@ out:
if (err) {
kfree(e);
- return -EINVAL;
+ return err;
}
return count;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ada42cf42d06..c3cdde87cc8c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -416,7 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, false);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
blk_queue_exit(bdev->bd_queue);
return result;
}
@@ -445,7 +445,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
struct page *page, struct writeback_control *wbc)
{
int result;
- int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
const struct block_device_operations *ops = bdev->bd_disk->fops;
if (!ops->rw_page || bdev_get_integrity(bdev))
@@ -455,7 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
if (result)
end_page_writeback(page);
else
@@ -1275,11 +1274,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- if (IS_ENABLED(CONFIG_BLK_DEV_DAX) &&
- blk_queue_dax(disk->queue))
- bdev->bd_inode->i_flags = S_DAX;
- else
- bdev->bd_inode->i_flags = 0;
+ bdev->bd_inode->i_flags = 0;
if (!partno) {
ret = -ENXIO;
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 67a607709d4f..53bb7af4e5f0 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -55,8 +55,7 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
}
if (size > 0) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
- } else if (size == -ENOENT || size == -ENODATA || size == 0) {
- /* FIXME, who returns -ENOENT? I think nobody */
+ } else if (size == -ERANGE || size == -ENODATA || size == 0) {
acl = NULL;
} else {
acl = ERR_PTR(-EIO);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 5fb60ea7eee2..e0f071f6b5a7 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -34,6 +34,10 @@
struct __btrfs_workqueue {
struct workqueue_struct *normal_wq;
+
+ /* File system this workqueue services */
+ struct btrfs_fs_info *fs_info;
+
/* List head pointing to ordered work list */
struct list_head ordered_list;
@@ -70,6 +74,18 @@ void btrfs_##name(struct work_struct *arg) \
normal_work_helper(work); \
}
+struct btrfs_fs_info *
+btrfs_workqueue_owner(struct __btrfs_workqueue *wq)
+{
+ return wq->fs_info;
+}
+
+struct btrfs_fs_info *
+btrfs_work_owner(struct btrfs_work *work)
+{
+ return work->wq->fs_info;
+}
+
BTRFS_WORK_HELPER(worker_helper);
BTRFS_WORK_HELPER(delalloc_helper);
BTRFS_WORK_HELPER(flush_delalloc_helper);
@@ -94,14 +110,15 @@ BTRFS_WORK_HELPER(scrubnc_helper);
BTRFS_WORK_HELPER(scrubparity_helper);
static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
- int thresh)
+__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
+ unsigned int flags, int limit_active, int thresh)
{
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
+ ret->fs_info = fs_info;
ret->limit_active = limit_active;
atomic_set(&ret->pending, 0);
if (thresh == 0)
@@ -143,7 +160,8 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
-struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+ const char *name,
unsigned int flags,
int limit_active,
int thresh)
@@ -153,7 +171,8 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
if (!ret)
return NULL;
- ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
+ ret->normal = __btrfs_alloc_workqueue(fs_info, name,
+ flags & ~WQ_HIGHPRI,
limit_active, thresh);
if (!ret->normal) {
kfree(ret);
@@ -161,8 +180,8 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
}
if (flags & WQ_HIGHPRI) {
- ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
- thresh);
+ ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
+ limit_active, thresh);
if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal);
kfree(ret);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index ad4d0647d1a6..8e52484cd461 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -21,6 +21,7 @@
#define __BTRFS_ASYNC_THREAD_
#include <linux/workqueue.h>
+struct btrfs_fs_info;
struct btrfs_workqueue;
/* Internal use only */
struct __btrfs_workqueue;
@@ -67,7 +68,8 @@ BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
-struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+ const char *name,
unsigned int flags,
int limit_active,
int thresh);
@@ -80,4 +82,6 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
+struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
+struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8bb3509099e8..2b88439c2ee8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -139,7 +139,7 @@ int __init btrfs_prelim_ref_init(void)
btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
sizeof(struct __prelim_ref),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_prelim_ref_cache)
return -ENOMEM;
@@ -361,7 +361,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
}
- if (btrfs_test_is_dummy_root(root)) {
+ if (btrfs_is_testing(fs_info)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
ret = -ENOENT;
goto out;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 5d5cae05818d..66789471b49d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -2945,7 +2945,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
printk(KERN_INFO
"submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
- bio_op(bio), bio->bi_rw, bio->bi_vcnt,
+ bio_op(bio), bio->bi_opf, bio->bi_vcnt,
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);
@@ -2976,18 +2976,18 @@ static void __btrfsic_submit_bio(struct bio *bio)
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched,
- NULL, bio->bi_rw);
+ NULL, bio->bi_opf);
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
- } else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
+ } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
"submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
- bio_op(bio), bio->bi_rw, bio->bi_bdev);
+ bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -3005,7 +3005,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = bio->bi_rw;
+ block->submit_bio_bh_rw = bio->bi_opf;
block->orig_bio_bh_private = bio->bi_private;
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block->next_in_same_bio = NULL;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index cefedabf0a92..029db6e1105c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -403,7 +403,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
ret = btrfs_map_bio(root, bio, 0, 1);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ bio->bi_error = ret;
+ bio_endio(bio);
+ }
bio_put(bio);
@@ -434,7 +437,10 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
ret = btrfs_map_bio(root, bio, 0, 1);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ bio->bi_error = ret;
+ bio_endio(bio);
+ }
bio_put(bio);
return 0;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a85cf7d23309..d1c56c94dd5a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1153,14 +1153,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -1198,7 +1198,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (last_ref) {
ret = tree_mod_log_free_eb(root->fs_info, buf);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -1505,7 +1505,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
{
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
/* ensure we can see the force_cow */
@@ -1771,6 +1771,14 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
unsigned long map_len = 0;
int err;
+ if (low > high) {
+ btrfs_err(eb->fs_info,
+ "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
+ __func__, low, high, eb->start,
+ btrfs_header_owner(eb), btrfs_header_level(eb));
+ return -EINVAL;
+ }
+
while (low < high) {
mid = (low + high) / 2;
offset = p + mid * item_size;
@@ -1858,7 +1866,6 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
- * NULL is returned on error.
*/
static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
struct extent_buffer *parent, int slot)
@@ -1866,19 +1873,16 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
- if (slot < 0)
- return NULL;
- if (slot >= btrfs_header_nritems(parent))
- return NULL;
+ if (slot < 0 || slot >= btrfs_header_nritems(parent))
+ return ERR_PTR(-ENOENT);
BUG_ON(level == 0);
eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
btrfs_node_ptr_generation(parent, slot));
- if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
- if (!IS_ERR(eb))
- free_extent_buffer(eb);
- eb = NULL;
+ if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ eb = ERR_PTR(-EIO);
}
return eb;
@@ -1931,8 +1935,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
- if (!child) {
- ret = -EROFS;
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
btrfs_handle_fs_error(root->fs_info, ret, NULL);
goto enospc;
}
@@ -1970,6 +1974,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
left = read_node_slot(root, parent, pslot - 1);
+ if (IS_ERR(left))
+ left = NULL;
+
if (left) {
btrfs_tree_lock(left);
btrfs_set_lock_blocking(left);
@@ -1980,7 +1987,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto enospc;
}
}
+
right = read_node_slot(root, parent, pslot + 1);
+ if (IS_ERR(right))
+ right = NULL;
+
if (right) {
btrfs_tree_lock(right);
btrfs_set_lock_blocking(right);
@@ -2135,6 +2146,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return 1;
left = read_node_slot(root, parent, pslot - 1);
+ if (IS_ERR(left))
+ left = NULL;
/* first, try to make some room in the middle buffer */
if (left) {
@@ -2185,6 +2198,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
free_extent_buffer(left);
}
right = read_node_slot(root, parent, pslot + 1);
+ if (IS_ERR(right))
+ right = NULL;
/*
* then try to empty the right most buffer into the middle
@@ -3240,7 +3255,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
push_items);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
@@ -3315,7 +3330,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
src_nritems - push_items, push_items);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
@@ -3519,7 +3534,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
mid, c_nritems - mid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(split, c,
@@ -3773,7 +3788,11 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
right = read_node_slot(root, upper, slot + 1);
- if (right == NULL)
+ /*
+ * slot + 1 is not valid or we fail to read the right node,
+ * no big deal, just return.
+ */
+ if (IS_ERR(right))
return 1;
btrfs_tree_lock(right);
@@ -4003,7 +4022,11 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
left = read_node_slot(root, path->nodes[1], slot - 1);
- if (left == NULL)
+ /*
+ * slot - 1 is not valid or we fail to read the left node,
+ * no big deal, just return.
+ */
+ if (IS_ERR(left))
return 1;
btrfs_tree_lock(left);
@@ -5210,7 +5233,10 @@ find_next_key:
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
- BUG_ON(!cur); /* -ENOMEM */
+ if (IS_ERR(cur)) {
+ ret = PTR_ERR(cur);
+ goto out;
+ }
btrfs_tree_read_lock(cur);
@@ -5229,15 +5255,21 @@ out:
return ret;
}
-static void tree_move_down(struct btrfs_root *root,
+static int tree_move_down(struct btrfs_root *root,
struct btrfs_path *path,
int *level, int root_level)
{
+ struct extent_buffer *eb;
+
BUG_ON(*level == 0);
- path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
- path->slots[*level]);
+ eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+
+ path->nodes[*level - 1] = eb;
path->slots[*level - 1] = 0;
(*level)--;
+ return 0;
}
static int tree_move_next_or_upnext(struct btrfs_root *root,
@@ -5282,8 +5314,7 @@ static int tree_advance(struct btrfs_root *root,
if (*level == 0 || !allow_down) {
ret = tree_move_next_or_upnext(root, path, level, root_level);
} else {
- tree_move_down(root, path, level, root_level);
- ret = 0;
+ ret = tree_move_down(root, path, level, root_level);
}
if (ret >= 0) {
if (*level == 0)
@@ -5457,8 +5488,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
left_root_level,
advance_left != ADVANCE_ONLY_NEXT,
&left_key);
- if (ret < 0)
+ if (ret == -1)
left_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
advance_left = 0;
}
if (advance_right && !right_end_reached) {
@@ -5466,8 +5499,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_root_level,
advance_right != ADVANCE_ONLY_NEXT,
&right_key);
- if (ret < 0)
+ if (ret == -1)
right_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
advance_right = 0;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b2620d1f883f..2fe8f89091a3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -117,6 +117,7 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
#define BTRFS_FS_STATE_REMOUNTING 1
#define BTRFS_FS_STATE_TRANS_ABORTED 2
#define BTRFS_FS_STATE_DEV_REPLACING 3
+#define BTRFS_FS_STATE_DUMMY_FS_INFO 4
#define BTRFS_BACKREF_REV_MAX 256
#define BTRFS_BACKREF_REV_SHIFT 56
@@ -144,21 +145,6 @@ struct btrfs_header {
u8 level;
} __attribute__ ((__packed__));
-#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \
- sizeof(struct btrfs_header)) / \
- sizeof(struct btrfs_key_ptr))
-#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
-#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize))
-#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
- (offsetof(struct btrfs_file_extent_item, disk_bytenr))
-#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
- sizeof(struct btrfs_item) - \
- BTRFS_FILE_EXTENT_INLINE_DATA_START)
-#define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
- sizeof(struct btrfs_item) -\
- sizeof(struct btrfs_dir_item))
-
-
/*
* this is a very generous portion of the super block, giving us
* room to translate 14 chunks with 3 stripes each.
@@ -439,6 +425,8 @@ struct btrfs_space_info {
struct list_head list;
/* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
+ struct list_head priority_tickets;
+ struct list_head tickets;
struct rw_semaphore groups_sem;
/* for block groups in our same type */
@@ -1112,12 +1100,11 @@ struct btrfs_subvolume_writers {
#define BTRFS_ROOT_REF_COWS 1
#define BTRFS_ROOT_TRACK_DIRTY 2
#define BTRFS_ROOT_IN_RADIX 3
-#define BTRFS_ROOT_DUMMY_ROOT 4
-#define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5
-#define BTRFS_ROOT_DEFRAG_RUNNING 6
-#define BTRFS_ROOT_FORCE_COW 7
-#define BTRFS_ROOT_MULTI_LOG_TASKS 8
-#define BTRFS_ROOT_DIRTY 9
+#define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4
+#define BTRFS_ROOT_DEFRAG_RUNNING 5
+#define BTRFS_ROOT_FORCE_COW 6
+#define BTRFS_ROOT_MULTI_LOG_TASKS 7
+#define BTRFS_ROOT_DIRTY 8
/*
* in ram representation of the tree. extent_root is used for all allocations
@@ -1179,8 +1166,10 @@ struct btrfs_root {
u64 highest_objectid;
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */
u64 alloc_bytenr;
+#endif
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
@@ -1257,6 +1246,39 @@ struct btrfs_root {
atomic_t qgroup_meta_rsv;
};
+static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
+{
+ return blocksize - sizeof(struct btrfs_header);
+}
+
+static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root)
+{
+ return __BTRFS_LEAF_DATA_SIZE(root->nodesize);
+}
+
+static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+}
+
+static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root)
+{
+ return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr);
+}
+
+#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
+ (offsetof(struct btrfs_file_extent_item, disk_bytenr))
+static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_MAX_ITEM_SIZE(root) -
+ BTRFS_FILE_EXTENT_INLINE_DATA_START;
+}
+
+static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item);
+}
+
/*
* Flags for mount options.
*
@@ -1297,21 +1319,21 @@ struct btrfs_root {
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
-#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
+#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
BTRFS_MOUNT_##opt)
-#define btrfs_set_and_info(root, opt, fmt, args...) \
+#define btrfs_set_and_info(fs_info, opt, fmt, args...) \
{ \
- if (!btrfs_test_opt(root, opt)) \
- btrfs_info(root->fs_info, fmt, ##args); \
- btrfs_set_opt(root->fs_info->mount_opt, opt); \
+ if (!btrfs_test_opt(fs_info, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+ btrfs_set_opt(fs_info->mount_opt, opt); \
}
-#define btrfs_clear_and_info(root, opt, fmt, args...) \
+#define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
{ \
- if (btrfs_test_opt(root, opt)) \
- btrfs_info(root->fs_info, fmt, ##args); \
- btrfs_clear_opt(root->fs_info->mount_opt, opt); \
+ if (btrfs_test_opt(fs_info, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+ btrfs_clear_opt(fs_info->mount_opt, opt); \
}
#ifdef CONFIG_BTRFS_DEBUG
@@ -1319,9 +1341,9 @@ static inline int
btrfs_should_fragment_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
- return (btrfs_test_opt(root, FRAGMENT_METADATA) &&
+ return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- (btrfs_test_opt(root, FRAGMENT_DATA) &&
+ (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
}
#endif
@@ -2624,6 +2646,15 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_ALL,
};
+enum btrfs_flush_state {
+ FLUSH_DELAYED_ITEMS_NR = 1,
+ FLUSH_DELAYED_ITEMS = 2,
+ FLUSH_DELALLOC = 3,
+ FLUSH_DELALLOC_WAIT = 4,
+ ALLOC_CHUNK = 5,
+ COMMIT_TRANS = 6,
+};
+
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
@@ -2661,8 +2692,8 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
- struct btrfs_block_rsv *dst_rsv,
- u64 num_bytes);
+ struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
+ int update_size);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
@@ -2875,9 +2906,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
-int btrfs_find_root_ref(struct btrfs_root *tree_root,
- struct btrfs_path *path,
- u64 root_id, u64 ref_id);
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
@@ -3351,23 +3379,23 @@ const char *btrfs_decode_error(int errno);
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *function,
+ const char *function,
unsigned int line, int errno);
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact line number is reported.
*/
-#define btrfs_abort_transaction(trans, root, errno) \
+#define btrfs_abort_transaction(trans, errno) \
do { \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
- &((root)->fs_info->fs_state))) { \
+ &((trans)->fs_info->fs_state))) { \
WARN(1, KERN_DEBUG \
"BTRFS: Transaction aborted (error %d)\n", \
(errno)); \
} \
- __btrfs_abort_transaction((trans), (root), __func__, \
+ __btrfs_abort_transaction((trans), __func__, \
__LINE__, (errno)); \
} while (0)
@@ -3599,13 +3627,13 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
void btrfs_test_destroy_inode(struct inode *inode);
#endif
-static inline int btrfs_test_is_dummy_root(struct btrfs_root *root)
+static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
{
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
+ &fs_info->fs_state)))
return 1;
#endif
return 0;
}
-
#endif
diff --git a/fs/btrfs/dedupe.h b/fs/btrfs/dedupe.h
new file mode 100644
index 000000000000..83ebfe28da9e
--- /dev/null
+++ b/fs/btrfs/dedupe.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 Fujitsu. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_DEDUPE__
+#define __BTRFS_DEDUPE__
+
+/* later in-band dedupe will expand this struct */
+struct btrfs_dedupe_hash;
+#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index d3aaabbfada0..3eeb9cd8cfa5 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -34,7 +34,7 @@ int __init btrfs_delayed_inode_init(void)
delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
sizeof(struct btrfs_delayed_node),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!delayed_node_cache)
return -ENOMEM;
@@ -553,7 +553,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
dst_rsv = &root->fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_item",
item->key.objectid,
@@ -598,6 +598,29 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
/*
+ * If our block_rsv is the delalloc block reserve then check and see if
+ * we have our extra reservation for updating the inode. If not fall
+ * through and try to reserve space quickly.
+ *
+ * We used to try and steal from the delalloc block rsv or the global
+ * reserve, but we'd steal a full reservation, which isn't kind. We are
+ * here through delalloc which means we've likely just cowed down close
+ * to the leaf that contains the inode, so we would steal less just
+ * doing the fallback inode update, so if we do end up having to steal
+ * from the global block rsv we hopefully only steal one or two blocks
+ * worth which is less likely to hurt us.
+ */
+ if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags))
+ release = true;
+ else
+ src_rsv = NULL;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
+
+ /*
* btrfs_dirty_inode will update the inode under btrfs_join_transaction
* which doesn't reserve space for speed. This is a problem since we
* still need to reserve space for this update, so try to reserve the
@@ -626,51 +649,10 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes, 1);
}
return ret;
- } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
- spin_lock(&BTRFS_I(inode)->lock);
- if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags)) {
- spin_unlock(&BTRFS_I(inode)->lock);
- release = true;
- goto migrate;
- }
- spin_unlock(&BTRFS_I(inode)->lock);
-
- /* Ok we didn't have space pre-reserved. This shouldn't happen
- * too often but it can happen if we do delalloc to an existing
- * inode which gets dirtied because of the time update, and then
- * isn't touched again until after the transaction commits and
- * then we try to write out the data. First try to be nice and
- * reserve something strictly for us. If not be a pain and try
- * to steal from the delalloc block rsv.
- */
- ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
- BTRFS_RESERVE_NO_FLUSH);
- if (!ret)
- goto out;
-
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
- if (!ret)
- goto out;
-
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
- btrfs_debug(root->fs_info,
- "block rsv migrate returned %d", ret);
- WARN_ON(1);
- }
- /*
- * Ok this is a problem, let's just steal from the global rsv
- * since this really shouldn't happen that often.
- */
- ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
- dst_rsv, num_bytes);
- goto out;
}
-migrate:
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
-out:
/*
* Migrate only takes a reservation, it doesn't touch the size of the
* block_rsv. This is to simplify people who don't normally have things
@@ -1188,7 +1170,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_release_delayed_node(curr_node);
curr_node = NULL;
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 430b3689b112..b6d210e7a993 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -606,7 +606,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
- qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
+ qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
+ delayed_refs,
qrecord);
if (qexisting)
kfree(qrecord);
@@ -615,7 +616,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
- trace_add_delayed_ref_head(ref, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
@@ -682,7 +683,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->type = BTRFS_TREE_BLOCK_REF_KEY;
full_ref->level = level;
- trace_add_delayed_tree_ref(ref, full_ref, action);
+ trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
@@ -739,7 +740,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
full_ref->objectid = owner;
full_ref->offset = offset;
- trace_add_delayed_data_ref(ref, full_ref, action);
+ trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
@@ -940,28 +941,28 @@ int btrfs_delayed_ref_init(void)
btrfs_delayed_ref_head_cachep = kmem_cache_create(
"btrfs_delayed_ref_head",
sizeof(struct btrfs_delayed_ref_head), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_ref_head_cachep)
goto fail;
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
"btrfs_delayed_tree_ref",
sizeof(struct btrfs_delayed_tree_ref), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_tree_ref_cachep)
goto fail;
btrfs_delayed_data_ref_cachep = kmem_cache_create(
"btrfs_delayed_data_ref",
sizeof(struct btrfs_delayed_data_ref), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_data_ref_cachep)
goto fail;
btrfs_delayed_extent_op_cachep = kmem_cache_create(
"btrfs_delayed_extent_op",
sizeof(struct btrfs_delayed_extent_op), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_extent_op_cachep)
goto fail;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 63ef9cdf0144..e9bbff3c0029 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -142,7 +142,7 @@ no_valid_dev_replace_entry_found:
* missing
*/
if (!dev_replace->srcdev &&
- !btrfs_test_opt(dev_root, DEGRADED)) {
+ !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -151,7 +151,7 @@ no_valid_dev_replace_entry_found:
src_devid);
}
if (!dev_replace->tgtdev &&
- !btrfs_test_opt(dev_root, DEGRADED)) {
+ !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9a726ded2c6d..59febfb8d04a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -101,7 +101,7 @@ int __init btrfs_end_io_wq_init(void)
btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
sizeof(struct btrfs_end_io_wq),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_end_io_wq_cache)
return -ENOMEM;
@@ -870,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
atomic_inc(&fs_info->nr_async_submits);
- if (bio->bi_rw & REQ_SYNC)
+ if (bio->bi_opf & REQ_SYNC)
btrfs_set_work_high_priority(&async->work);
btrfs_queue_work(fs_info->workers, &async->work);
@@ -1140,7 +1140,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr)
{
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return alloc_test_extent_buffer(root->fs_info, bytenr,
root->nodesize);
return alloc_extent_buffer(root->fs_info, bytenr);
@@ -1227,6 +1227,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
+ bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
root->node = NULL;
root->commit_root = NULL;
root->sectorsize = sectorsize;
@@ -1281,14 +1282,14 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
- if (fs_info)
+ if (!dummy)
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
- if (fs_info)
+ if (!dummy)
root->defrag_trans_start = fs_info->generation;
else
root->defrag_trans_start = 0;
@@ -1309,17 +1310,20 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
- root = btrfs_alloc_root(NULL, GFP_KERNEL);
+ if (!fs_info)
+ return ERR_PTR(-EINVAL);
+
+ root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
/* We don't use the stripesize in selftest, set it as sectorsize */
- __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+ __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
BTRFS_ROOT_TREE_OBJECTID);
- set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
root->alloc_bytenr = 0;
return root;
@@ -1594,14 +1598,14 @@ int btrfs_init_fs_root(struct btrfs_root *root)
ret = get_anon_bdev(&root->anon_dev);
if (ret)
- goto free_writers;
+ goto fail;
mutex_lock(&root->objectid_mutex);
ret = btrfs_find_highest_objectid(root,
&root->highest_objectid);
if (ret) {
mutex_unlock(&root->objectid_mutex);
- goto free_root_dev;
+ goto fail;
}
ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
@@ -1609,14 +1613,8 @@ int btrfs_init_fs_root(struct btrfs_root *root)
mutex_unlock(&root->objectid_mutex);
return 0;
-
-free_root_dev:
- free_anon_bdev(root->anon_dev);
-free_writers:
- btrfs_free_subvolume_writers(root->subv_writers);
fail:
- kfree(root->free_ino_ctl);
- kfree(root->free_ino_pinned);
+ /* the caller is responsible to call free_fs_root */
return ret;
}
@@ -2310,17 +2308,19 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
fs_info->workers =
- btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
- max_active, 16);
+ btrfs_alloc_workqueue(fs_info, "worker",
+ flags | WQ_HIGHPRI, max_active, 16);
fs_info->delalloc_workers =
- btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "delalloc",
+ flags, max_active, 2);
fs_info->flush_workers =
- btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "flush_delalloc",
+ flags, max_active, 0);
fs_info->caching_workers =
- btrfs_alloc_workqueue("cache", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
/*
* a higher idle thresh on the submit workers makes it much more
@@ -2328,41 +2328,48 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
* devices
*/
fs_info->submit_workers =
- btrfs_alloc_workqueue("submit", flags,
+ btrfs_alloc_workqueue(fs_info, "submit", flags,
min_t(u64, fs_devices->num_devices,
max_active), 64);
fs_info->fixup_workers =
- btrfs_alloc_workqueue("fixup", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
/*
* endios are largely parallel and should have a very
* low idle thresh
*/
fs_info->endio_workers =
- btrfs_alloc_workqueue("endio", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
fs_info->endio_meta_workers =
- btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
+ max_active, 4);
fs_info->endio_meta_write_workers =
- btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
+ max_active, 2);
fs_info->endio_raid56_workers =
- btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
+ max_active, 4);
fs_info->endio_repair_workers =
- btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
fs_info->rmw_workers =
- btrfs_alloc_workqueue("rmw", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
fs_info->endio_write_workers =
- btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "endio-write", flags,
+ max_active, 2);
fs_info->endio_freespace_worker =
- btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
+ max_active, 0);
fs_info->delayed_workers =
- btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
+ max_active, 0);
fs_info->readahead_workers =
- btrfs_alloc_workqueue("readahead", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "readahead", flags,
+ max_active, 2);
fs_info->qgroup_rescan_workers =
- btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
fs_info->extent_workers =
- btrfs_alloc_workqueue("extent-refs", flags,
+ btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
min_t(u64, fs_devices->num_devices,
max_active), 8);
@@ -3010,8 +3017,8 @@ retry_root_backup:
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
- if (!btrfs_test_opt(tree_root, SSD) &&
- !btrfs_test_opt(tree_root, NOSSD) &&
+ if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
+ !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) {
btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
btrfs_set_opt(fs_info->mount_opt, SSD);
@@ -3024,9 +3031,9 @@ retry_root_backup:
btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
+ if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(tree_root, fs_devices,
- btrfs_test_opt(tree_root,
+ btrfs_test_opt(tree_root->fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
fs_info->check_integrity_print_mask);
@@ -3042,7 +3049,7 @@ retry_root_backup:
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
- !btrfs_test_opt(tree_root, NOLOGREPLAY)) {
+ !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3083,7 +3090,7 @@ retry_root_backup:
if (sb->s_flags & MS_RDONLY)
return 0;
- if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) &&
+ if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree");
ret = btrfs_create_free_space_tree(fs_info);
@@ -3120,7 +3127,7 @@ retry_root_backup:
btrfs_qgroup_rescan_resume(fs_info);
- if (btrfs_test_opt(tree_root, CLEAR_CACHE) &&
+ if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "clearing free space tree");
ret = btrfs_clear_free_space_tree(fs_info);
@@ -3141,7 +3148,7 @@ retry_root_backup:
close_ctree(tree_root);
return ret;
}
- } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
+ } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
fs_info->generation !=
btrfs_super_uuid_tree_generation(disk_super)) {
btrfs_info(fs_info, "checking UUID tree");
@@ -3218,7 +3225,7 @@ fail:
return err;
recovery_tree_root:
- if (!btrfs_test_opt(tree_root, USEBACKUPROOT))
+ if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
goto fail_tree_roots;
free_root_pointers(fs_info, 0);
@@ -3634,7 +3641,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
int total_errors = 0;
u64 flags;
- do_barriers = !btrfs_test_opt(root, NOBARRIER);
+ do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
backup_super_roots(root->fs_info);
sb = root->fs_info->super_for_commit;
@@ -3918,7 +3925,7 @@ void close_ctree(struct btrfs_root *root)
iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root, CHECK_INTEGRITY))
+ if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
btrfsic_unmount(root, fs_info->fs_devices);
#endif
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index dbf3e1aab69e..b3207a0e09f7 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -90,7 +90,8 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
+ u32 sectorsize, u32 nodesize);
#endif
/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b480fd555774..61b494e8e604 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -111,6 +111,16 @@ static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int reserved);
+static int __reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_space_info *space_info,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush);
+static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes);
+static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -2170,7 +2180,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_free_path(path);
return ret;
@@ -2194,7 +2204,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(node, ref, node->action);
+ trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
@@ -2349,7 +2359,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
SKINNY_METADATA);
ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(node, ref, node->action);
+ trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
@@ -2413,7 +2423,8 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
*/
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(node, head, node->action);
+ trace_run_delayed_ref_head(root->fs_info, node, head,
+ node->action);
if (insert_reserved) {
btrfs_pin_extent(root, node->bytenr,
@@ -2768,7 +2779,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
u64 num_csums_per_leaf;
u64 num_csums;
- csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+ csum_size = BTRFS_MAX_ITEM_SIZE(root);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(root->fs_info->super_copy));
num_csums = div64_u64(csum_bytes, root->sectorsize);
@@ -2960,7 +2971,7 @@ again:
trans->can_flush_pending_bgs = false;
ret = __btrfs_run_delayed_refs(trans, root, count);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3224,7 +3235,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
u64, u64, u64, u64, u64, u64);
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
ref_root = btrfs_header_owner(buf);
@@ -3419,7 +3430,7 @@ again:
* transaction, this only happens in really bad situations
* anyway.
*/
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
WARN_ON(ret);
@@ -3437,7 +3448,7 @@ again:
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(root, SPACE_CACHE)) {
+ !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
@@ -3514,7 +3525,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
if (list_empty(&cur_trans->dirty_bgs) ||
- !btrfs_test_opt(root, SPACE_CACHE))
+ !btrfs_test_opt(root->fs_info, SPACE_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -3659,7 +3670,7 @@ again:
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
}
@@ -3805,7 +3816,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
cache);
}
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
/* if its not on the io list, we need to put the block group */
@@ -3913,6 +3924,7 @@ static const char *alloc_name(u64 flags)
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
+ u64 bytes_readonly,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
@@ -3933,8 +3945,11 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->disk_total += total_bytes * factor;
found->bytes_used += bytes_used;
found->disk_used += bytes_used * factor;
+ found->bytes_readonly += bytes_readonly;
if (total_bytes > 0)
found->full = 0;
+ space_info_add_new_bytes(info, found, total_bytes -
+ bytes_used - bytes_readonly);
spin_unlock(&found->lock);
*space_info = found;
return 0;
@@ -3960,7 +3975,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->disk_used = bytes_used * factor;
found->bytes_pinned = 0;
found->bytes_reserved = 0;
- found->bytes_readonly = 0;
+ found->bytes_readonly = bytes_readonly;
found->bytes_may_use = 0;
found->full = 0;
found->max_extent_size = 0;
@@ -3969,6 +3984,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->flush = 0;
init_waitqueue_head(&found->wait);
INIT_LIST_HEAD(&found->ro_bgs);
+ INIT_LIST_HEAD(&found->tickets);
+ INIT_LIST_HEAD(&found->priority_tickets);
ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
info->space_info_kobj, "%s",
@@ -4427,7 +4444,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
btrfs_calc_trans_metadata_size(root, 1);
- if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
left, thresh, type);
dump_space_info(info, 0, 0);
@@ -4470,7 +4487,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info = __find_space_info(extent_root->fs_info, flags);
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
- 0, 0, &space_info);
+ 0, 0, 0, &space_info);
BUG_ON(ret); /* -ENOMEM */
}
BUG_ON(!space_info); /* Logic error */
@@ -4572,7 +4589,7 @@ out:
*/
if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
- btrfs_create_pending_block_groups(trans, trans->root);
+ btrfs_create_pending_block_groups(trans, extent_root);
btrfs_trans_release_chunk_metadata(trans);
}
return ret;
@@ -4582,12 +4599,19 @@ static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
- u64 profile = btrfs_get_alloc_profile(root, 0);
+ struct btrfs_block_rsv *global_rsv;
+ u64 profile;
u64 space_size;
u64 avail;
u64 used;
+ /* Don't overcommit when in mixed mode. */
+ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
+ return 0;
+
+ BUG_ON(root->fs_info == NULL);
+ global_rsv = &root->fs_info->global_block_rsv;
+ profile = btrfs_get_alloc_profile(root, 0);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly;
@@ -4739,6 +4763,11 @@ skip_async:
spin_unlock(&space_info->lock);
break;
}
+ if (list_empty(&space_info->tickets) &&
+ list_empty(&space_info->priority_tickets)) {
+ spin_unlock(&space_info->lock);
+ break;
+ }
spin_unlock(&space_info->lock);
loops++;
@@ -4807,13 +4836,11 @@ commit:
return btrfs_commit_transaction(trans, root);
}
-enum flush_state {
- FLUSH_DELAYED_ITEMS_NR = 1,
- FLUSH_DELAYED_ITEMS = 2,
- FLUSH_DELALLOC = 3,
- FLUSH_DELALLOC_WAIT = 4,
- ALLOC_CHUNK = 5,
- COMMIT_TRANS = 6,
+struct reserve_ticket {
+ u64 bytes;
+ int error;
+ struct list_head list;
+ wait_queue_head_t wait;
};
static int flush_space(struct btrfs_root *root,
@@ -4866,6 +4893,8 @@ static int flush_space(struct btrfs_root *root,
break;
}
+ trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
+ orig_bytes, state, ret);
return ret;
}
@@ -4873,17 +4902,22 @@ static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
struct btrfs_space_info *space_info)
{
+ struct reserve_ticket *ticket;
u64 used;
u64 expected;
- u64 to_reclaim;
+ u64 to_reclaim = 0;
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
- spin_lock(&space_info->lock);
if (can_overcommit(root, space_info, to_reclaim,
- BTRFS_RESERVE_FLUSH_ALL)) {
- to_reclaim = 0;
- goto out;
- }
+ BTRFS_RESERVE_FLUSH_ALL))
+ return 0;
+
+ list_for_each_entry(ticket, &space_info->tickets, list)
+ to_reclaim += ticket->bytes;
+ list_for_each_entry(ticket, &space_info->priority_tickets, list)
+ to_reclaim += ticket->bytes;
+ if (to_reclaim)
+ return to_reclaim;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
@@ -4899,14 +4933,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
to_reclaim = 0;
to_reclaim = min(to_reclaim, space_info->bytes_may_use +
space_info->bytes_reserved);
-out:
- spin_unlock(&space_info->lock);
-
return to_reclaim;
}
static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
- struct btrfs_fs_info *fs_info, u64 used)
+ struct btrfs_root *root, u64 used)
{
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
@@ -4914,73 +4945,177 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
return 0;
- return (used >= thresh && !btrfs_fs_closing(fs_info) &&
- !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
+ if (!btrfs_calc_reclaim_metadata_size(root, space_info))
+ return 0;
+
+ return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
+ !test_bit(BTRFS_FS_STATE_REMOUNTING,
+ &root->fs_info->fs_state));
}
-static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
- struct btrfs_fs_info *fs_info,
- int flush_state)
+static void wake_all_tickets(struct list_head *head)
{
- u64 used;
-
- spin_lock(&space_info->lock);
- /*
- * We run out of space and have not got any free space via flush_space,
- * so don't bother doing async reclaim.
- */
- if (flush_state > COMMIT_TRANS && space_info->full) {
- spin_unlock(&space_info->lock);
- return 0;
- }
+ struct reserve_ticket *ticket;
- used = space_info->bytes_used + space_info->bytes_reserved +
- space_info->bytes_pinned + space_info->bytes_readonly +
- space_info->bytes_may_use;
- if (need_do_async_reclaim(space_info, fs_info, used)) {
- spin_unlock(&space_info->lock);
- return 1;
+ while (!list_empty(head)) {
+ ticket = list_first_entry(head, struct reserve_ticket, list);
+ list_del_init(&ticket->list);
+ ticket->error = -ENOSPC;
+ wake_up(&ticket->wait);
}
- spin_unlock(&space_info->lock);
-
- return 0;
}
+/*
+ * This is for normal flushers, we can wait all goddamned day if we want to. We
+ * will loop and continuously try to flush as long as we are making progress.
+ * We count progress as clearing off tickets each time we have to loop.
+ */
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
+ struct reserve_ticket *last_ticket = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 to_reclaim;
int flush_state;
+ int commit_cycles = 0;
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
space_info);
- if (!to_reclaim)
+ if (!to_reclaim) {
+ space_info->flush = 0;
+ spin_unlock(&space_info->lock);
return;
+ }
+ last_ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+ spin_unlock(&space_info->lock);
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
+ struct reserve_ticket *ticket;
+ int ret;
+
+ ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
+ to_reclaim, flush_state);
+ spin_lock(&space_info->lock);
+ if (list_empty(&space_info->tickets)) {
+ space_info->flush = 0;
+ spin_unlock(&space_info->lock);
+ return;
+ }
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+ space_info);
+ ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+ if (last_ticket == ticket) {
+ flush_state++;
+ } else {
+ last_ticket = ticket;
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
+ if (commit_cycles)
+ commit_cycles--;
+ }
+
+ if (flush_state > COMMIT_TRANS) {
+ commit_cycles++;
+ if (commit_cycles > 2) {
+ wake_all_tickets(&space_info->tickets);
+ space_info->flush = 0;
+ } else {
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
+ }
+ }
+ spin_unlock(&space_info->lock);
+ } while (flush_state <= COMMIT_TRANS);
+}
+
+void btrfs_init_async_reclaim_work(struct work_struct *work)
+{
+ INIT_WORK(work, btrfs_async_reclaim_metadata_space);
+}
+
+static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket)
+{
+ u64 to_reclaim;
+ int flush_state = FLUSH_DELAYED_ITEMS_NR;
+
+ spin_lock(&space_info->lock);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+ space_info);
+ if (!to_reclaim) {
+ spin_unlock(&space_info->lock);
+ return;
+ }
+ spin_unlock(&space_info->lock);
+
+ do {
flush_space(fs_info->fs_root, space_info, to_reclaim,
to_reclaim, flush_state);
flush_state++;
- if (!btrfs_need_do_async_reclaim(space_info, fs_info,
- flush_state))
+ spin_lock(&space_info->lock);
+ if (ticket->bytes == 0) {
+ spin_unlock(&space_info->lock);
return;
+ }
+ spin_unlock(&space_info->lock);
+
+ /*
+ * Priority flushers can't wait on delalloc without
+ * deadlocking.
+ */
+ if (flush_state == FLUSH_DELALLOC ||
+ flush_state == FLUSH_DELALLOC_WAIT)
+ flush_state = ALLOC_CHUNK;
} while (flush_state < COMMIT_TRANS);
}
-void btrfs_init_async_reclaim_work(struct work_struct *work)
+static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket, u64 orig_bytes)
+
{
- INIT_WORK(work, btrfs_async_reclaim_metadata_space);
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ spin_lock(&space_info->lock);
+ while (ticket->bytes > 0 && ticket->error == 0) {
+ ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
+ if (ret) {
+ ret = -EINTR;
+ break;
+ }
+ spin_unlock(&space_info->lock);
+
+ schedule();
+
+ finish_wait(&ticket->wait, &wait);
+ spin_lock(&space_info->lock);
+ }
+ if (!ret)
+ ret = ticket->error;
+ if (!list_empty(&ticket->list))
+ list_del_init(&ticket->list);
+ if (ticket->bytes && ticket->bytes < orig_bytes) {
+ u64 num_bytes = orig_bytes - ticket->bytes;
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, num_bytes, 0);
+ }
+ spin_unlock(&space_info->lock);
+
+ return ret;
}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
- * @block_rsv - the block_rsv we're allocating for
+ * @space_info - the space info we want to allocate from
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
@@ -4991,81 +5126,36 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int reserve_metadata_bytes(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 orig_bytes,
- enum btrfs_reserve_flush_enum flush)
+static int __reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_space_info *space_info,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_space_info *space_info = block_rsv->space_info;
+ struct reserve_ticket ticket;
u64 used;
- u64 num_bytes = orig_bytes;
- int flush_state = FLUSH_DELAYED_ITEMS_NR;
int ret = 0;
- bool flushing = false;
-
-again:
- ret = 0;
- spin_lock(&space_info->lock);
- /*
- * We only want to wait if somebody other than us is flushing and we
- * are actually allowed to flush all things.
- */
- while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
- space_info->flush) {
- spin_unlock(&space_info->lock);
- /*
- * If we have a trans handle we can't wait because the flusher
- * may have to commit the transaction, which would mean we would
- * deadlock since we are waiting for the flusher to finish, but
- * hold the current transaction open.
- */
- if (current->journal_info)
- return -EAGAIN;
- ret = wait_event_killable(space_info->wait, !space_info->flush);
- /* Must have been killed, return */
- if (ret)
- return -EINTR;
- spin_lock(&space_info->lock);
- }
+ ASSERT(orig_bytes);
+ ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
+ spin_lock(&space_info->lock);
ret = -ENOSPC;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use;
/*
- * The idea here is that we've not already over-reserved the block group
- * then we can go ahead and save our reservation first and then start
- * flushing if we need to. Otherwise if we've already overcommitted
- * lets start flushing stuff first and then come back and try to make
- * our reservation.
+ * If we have enough space then hooray, make our reservation and carry
+ * on. If not see if we can overcommit, and if we can, hooray carry on.
+ * If not things get more complicated.
*/
- if (used <= space_info->total_bytes) {
- if (used + orig_bytes <= space_info->total_bytes) {
- space_info->bytes_may_use += orig_bytes;
- trace_btrfs_space_reservation(root->fs_info,
- "space_info", space_info->flags, orig_bytes, 1);
- ret = 0;
- } else {
- /*
- * Ok set num_bytes to orig_bytes since we aren't
- * overocmmitted, this way we only try and reclaim what
- * we need.
- */
- num_bytes = orig_bytes;
- }
- } else {
- /*
- * Ok we're over committed, set num_bytes to the overcommitted
- * amount plus the amount of bytes that we need for this
- * reservation.
- */
- num_bytes = used - space_info->total_bytes +
- (orig_bytes * 2);
- }
-
- if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
+ if (used + orig_bytes <= space_info->total_bytes) {
+ space_info->bytes_may_use += orig_bytes;
+ trace_btrfs_space_reservation(root->fs_info, "space_info",
+ space_info->flags, orig_bytes,
+ 1);
+ ret = 0;
+ } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
space_info->flags, orig_bytes,
@@ -5074,16 +5164,31 @@ again:
}
/*
- * Couldn't make our reservation, save our place so while we're trying
- * to reclaim space we can actually use it instead of somebody else
- * stealing it from us.
+ * If we couldn't make a reservation then setup our reservation ticket
+ * and kick the async worker if it's not already running.
*
- * We make the other tasks wait for the flush only when we can flush
- * all things.
+ * If we are a priority flusher then we just need to add our ticket to
+ * the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
- flushing = true;
- space_info->flush = 1;
+ ticket.bytes = orig_bytes;
+ ticket.error = 0;
+ init_waitqueue_head(&ticket.wait);
+ if (flush == BTRFS_RESERVE_FLUSH_ALL) {
+ list_add_tail(&ticket.list, &space_info->tickets);
+ if (!space_info->flush) {
+ space_info->flush = 1;
+ trace_btrfs_trigger_flush(root->fs_info,
+ space_info->flags,
+ orig_bytes, flush,
+ "enospc");
+ queue_work(system_unbound_wq,
+ &root->fs_info->async_reclaim_work);
+ }
+ } else {
+ list_add_tail(&ticket.list,
+ &space_info->priority_tickets);
+ }
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
/*
@@ -5092,39 +5197,67 @@ again:
* the async reclaim as we will panic.
*/
if (!root->fs_info->log_root_recovering &&
- need_do_async_reclaim(space_info, root->fs_info, used) &&
- !work_busy(&root->fs_info->async_reclaim_work))
+ need_do_async_reclaim(space_info, root, used) &&
+ !work_busy(&root->fs_info->async_reclaim_work)) {
+ trace_btrfs_trigger_flush(root->fs_info,
+ space_info->flags,
+ orig_bytes, flush,
+ "preempt");
queue_work(system_unbound_wq,
&root->fs_info->async_reclaim_work);
+ }
}
spin_unlock(&space_info->lock);
-
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
- goto out;
+ return ret;
- ret = flush_space(root, space_info, num_bytes, orig_bytes,
- flush_state);
- flush_state++;
+ if (flush == BTRFS_RESERVE_FLUSH_ALL)
+ return wait_reserve_ticket(root->fs_info, space_info, &ticket,
+ orig_bytes);
- /*
- * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
- * would happen. So skip delalloc flush.
- */
- if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
- (flush_state == FLUSH_DELALLOC ||
- flush_state == FLUSH_DELALLOC_WAIT))
- flush_state = ALLOC_CHUNK;
+ ret = 0;
+ priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
+ spin_lock(&space_info->lock);
+ if (ticket.bytes) {
+ if (ticket.bytes < orig_bytes) {
+ u64 num_bytes = orig_bytes - ticket.bytes;
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(root->fs_info,
+ "space_info", space_info->flags,
+ num_bytes, 0);
- if (!ret)
- goto again;
- else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
- flush_state < COMMIT_TRANS)
- goto again;
- else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
- flush_state <= COMMIT_TRANS)
- goto again;
+ }
+ list_del_init(&ticket.list);
+ ret = -ENOSPC;
+ }
+ spin_unlock(&space_info->lock);
+ ASSERT(list_empty(&ticket.list));
+ return ret;
+}
-out:
+/**
+ * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
+ * @root - the root we're allocating for
+ * @block_rsv - the block_rsv we're allocating for
+ * @orig_bytes - the number of bytes we want
+ * @flush - whether or not we can flush to make our reservation
+ *
+ * This will reserve orgi_bytes number of bytes from the space info associated
+ * with the block_rsv. If there is not enough space it will make an attempt to
+ * flush out space to make room. It will do this by flushing delalloc if
+ * possible or committing the transaction. If flush is 0 then no attempts to
+ * regain reservations will be made and this will fail if there is not enough
+ * space already.
+ */
+static int reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ int ret;
+
+ ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
+ flush);
if (ret == -ENOSPC &&
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
struct btrfs_block_rsv *global_rsv =
@@ -5137,13 +5270,8 @@ out:
if (ret == -ENOSPC)
trace_btrfs_space_reservation(root->fs_info,
"space_info:enospc",
- space_info->flags, orig_bytes, 1);
- if (flushing) {
- spin_lock(&space_info->lock);
- space_info->flush = 0;
- wake_up_all(&space_info->wait);
- spin_unlock(&space_info->lock);
- }
+ block_rsv->space_info->flags,
+ orig_bytes, 1);
return ret;
}
@@ -5219,6 +5347,108 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
return 0;
}
+/*
+ * This is for space we already have accounted in space_info->bytes_may_use, so
+ * basically when we're returning space from block_rsv's.
+ */
+static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ struct reserve_ticket *ticket;
+ struct list_head *head;
+ u64 used;
+ enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
+ bool check_overcommit = false;
+
+ spin_lock(&space_info->lock);
+ head = &space_info->priority_tickets;
+
+ /*
+ * If we are over our limit then we need to check and see if we can
+ * overcommit, and if we can't then we just need to free up our space
+ * and not satisfy any requests.
+ */
+ used = space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use;
+ if (used - num_bytes >= space_info->total_bytes)
+ check_overcommit = true;
+again:
+ while (!list_empty(head) && num_bytes) {
+ ticket = list_first_entry(head, struct reserve_ticket,
+ list);
+ /*
+ * We use 0 bytes because this space is already reserved, so
+ * adding the ticket space would be a double count.
+ */
+ if (check_overcommit &&
+ !can_overcommit(fs_info->extent_root, space_info, 0,
+ flush))
+ break;
+ if (num_bytes >= ticket->bytes) {
+ list_del_init(&ticket->list);
+ num_bytes -= ticket->bytes;
+ ticket->bytes = 0;
+ wake_up(&ticket->wait);
+ } else {
+ ticket->bytes -= num_bytes;
+ num_bytes = 0;
+ }
+ }
+
+ if (num_bytes && head == &space_info->priority_tickets) {
+ head = &space_info->tickets;
+ flush = BTRFS_RESERVE_FLUSH_ALL;
+ goto again;
+ }
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, num_bytes, 0);
+ spin_unlock(&space_info->lock);
+}
+
+/*
+ * This is for newly allocated space that isn't accounted in
+ * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
+ * we use this helper.
+ */
+static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ struct reserve_ticket *ticket;
+ struct list_head *head = &space_info->priority_tickets;
+
+again:
+ while (!list_empty(head) && num_bytes) {
+ ticket = list_first_entry(head, struct reserve_ticket,
+ list);
+ if (num_bytes >= ticket->bytes) {
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags,
+ ticket->bytes, 1);
+ list_del_init(&ticket->list);
+ num_bytes -= ticket->bytes;
+ space_info->bytes_may_use += ticket->bytes;
+ ticket->bytes = 0;
+ wake_up(&ticket->wait);
+ } else {
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags,
+ num_bytes, 1);
+ space_info->bytes_may_use += num_bytes;
+ ticket->bytes -= num_bytes;
+ num_bytes = 0;
+ }
+ }
+
+ if (num_bytes && head == &space_info->priority_tickets) {
+ head = &space_info->tickets;
+ goto again;
+ }
+}
+
static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_block_rsv *dest, u64 num_bytes)
@@ -5253,18 +5483,15 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
}
spin_unlock(&dest->lock);
}
- if (num_bytes) {
- spin_lock(&space_info->lock);
- space_info->bytes_may_use -= num_bytes;
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags, num_bytes, 0);
- spin_unlock(&space_info->lock);
- }
+ if (num_bytes)
+ space_info_add_old_bytes(fs_info, space_info,
+ num_bytes);
}
}
-static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
- struct btrfs_block_rsv *dst, u64 num_bytes)
+int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
+ struct btrfs_block_rsv *dst, u64 num_bytes,
+ int update_size)
{
int ret;
@@ -5272,7 +5499,7 @@ static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
if (ret)
return ret;
- block_rsv_add_bytes(dst, num_bytes, 1);
+ block_rsv_add_bytes(dst, num_bytes, update_size);
return 0;
}
@@ -5379,13 +5606,6 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
- struct btrfs_block_rsv *dst_rsv,
- u64 num_bytes)
-{
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
-}
-
void btrfs_block_rsv_release(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
@@ -5398,48 +5618,21 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
num_bytes);
}
-/*
- * helper to calculate size of global block reservation.
- * the desired value is sum of space used by extent tree,
- * checksum tree and root tree
- */
-static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *sinfo;
- u64 num_bytes;
- u64 meta_used;
- u64 data_used;
- int csum_size = btrfs_super_csum_size(fs_info->super_copy);
-
- sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
- spin_lock(&sinfo->lock);
- data_used = sinfo->bytes_used;
- spin_unlock(&sinfo->lock);
-
- sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
- spin_lock(&sinfo->lock);
- if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
- data_used = 0;
- meta_used = sinfo->bytes_used;
- spin_unlock(&sinfo->lock);
-
- num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
- csum_size * 2;
- num_bytes += div_u64(data_used + meta_used, 50);
-
- if (num_bytes * 3 > meta_used)
- num_bytes = div_u64(meta_used, 3);
-
- return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
-}
-
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
u64 num_bytes;
- num_bytes = calc_global_metadata_size(fs_info);
+ /*
+ * The global block rsv is based on the size of the extent tree, the
+ * checksum tree and the root tree. If the fs is empty we want to set
+ * it to a minimal amount for safety.
+ */
+ num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
+ btrfs_root_used(&fs_info->csum_root->root_item) +
+ btrfs_root_used(&fs_info->tree_root->root_item);
+ num_bytes = max_t(u64, num_bytes, SZ_16M);
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
@@ -5537,7 +5730,7 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
*/
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
- struct btrfs_fs_info *fs_info = trans->root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
if (!trans->chunk_bytes_reserved)
return;
@@ -5554,7 +5747,13 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
+ /*
+ * We always use trans->block_rsv here as we will have reserved space
+ * for our orphan when starting the transaction, using get_block_rsv()
+ * here will sometimes make us choose the wrong block rsv as we could be
+ * doing a reloc inode for a non refcounted root.
+ */
+ struct btrfs_block_rsv *src_rsv = trans->block_rsv;
struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
/*
@@ -5565,7 +5764,7 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
trace_btrfs_space_reservation(root->fs_info, "orphan",
btrfs_ino(inode), num_bytes, 1);
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
+ return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
}
void btrfs_orphan_release_metadata(struct inode *inode)
@@ -5620,7 +5819,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
- ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
if (ret && *qgroup_reserved)
btrfs_qgroup_free_meta(root, *qgroup_reserved);
@@ -5730,21 +5929,26 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
u64 to_reserve = 0;
u64 csum_bytes;
unsigned nr_extents = 0;
- int extra_reserve = 0;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
u64 to_free = 0;
unsigned dropped;
+ bool release_extra = false;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
* mutex since we won't race with anybody. We need this mostly to make
* lockdep shut its filthy mouth.
+ *
+ * If we have a transaction open (can happen if we call truncate_block
+ * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
*/
if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
delalloc_lock = false;
+ } else if (current->journal_info) {
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
}
if (flush != BTRFS_RESERVE_NO_FLUSH &&
@@ -5761,24 +5965,15 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
BTRFS_I(inode)->outstanding_extents += nr_extents;
- nr_extents = 0;
+ nr_extents = 0;
if (BTRFS_I(inode)->outstanding_extents >
BTRFS_I(inode)->reserved_extents)
- nr_extents = BTRFS_I(inode)->outstanding_extents -
+ nr_extents += BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- /*
- * Add an item to reserve for updating the inode when we complete the
- * delalloc io.
- */
- if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags)) {
- nr_extents++;
- extra_reserve = 1;
- }
-
- to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+ /* We always want to reserve a slot for updating the inode. */
+ to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
@@ -5790,17 +5985,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
goto out_fail;
}
- ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+ ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
if (unlikely(ret)) {
btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
goto out_fail;
}
spin_lock(&BTRFS_I(inode)->lock);
- if (extra_reserve) {
- set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags);
- nr_extents--;
+ if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags)) {
+ to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
+ release_extra = true;
}
BTRFS_I(inode)->reserved_extents += nr_extents;
spin_unlock(&BTRFS_I(inode)->lock);
@@ -5811,8 +6006,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (to_reserve)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_reserve, 1);
- block_rsv_add_bytes(block_rsv, to_reserve, 1);
-
+ if (release_extra)
+ btrfs_block_rsv_release(root, block_rsv,
+ btrfs_calc_trans_metadata_size(root,
+ 1));
return 0;
out_fail:
@@ -5904,7 +6101,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
if (dropped > 0)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
trace_btrfs_space_reservation(root->fs_info, "delalloc",
@@ -6019,7 +6216,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
- if (btrfs_test_opt(root, SPACE_CACHE) &&
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
@@ -6044,6 +6241,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ trace_btrfs_space_reservation(root->fs_info, "pinned",
+ cache->space_info->flags,
+ num_bytes, 1);
set_extent_dirty(info->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
@@ -6118,10 +6318,10 @@ static int pin_down_extent(struct btrfs_root *root,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ trace_btrfs_space_reservation(root->fs_info, "pinned",
+ cache->space_info->flags, num_bytes, 1);
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
- if (reserved)
- trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
return 0;
}
@@ -6398,7 +6598,7 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
u64 *empty_cluster)
{
struct btrfs_free_cluster *ret = NULL;
- bool ssd = btrfs_test_opt(root, SSD);
+ bool ssd = btrfs_test_opt(root->fs_info, SSD);
*empty_cluster = 0;
if (btrfs_mixed_space_info(space_info))
@@ -6476,6 +6676,9 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
spin_lock(&cache->lock);
cache->pinned -= len;
space_info->bytes_pinned -= len;
+
+ trace_btrfs_space_reservation(fs_info, "pinned",
+ space_info->flags, len, 0);
space_info->max_extent_size = 0;
percpu_counter_add(&space_info->total_bytes_pinned, -len);
if (cache->ro) {
@@ -6483,17 +6686,29 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
readonly = true;
}
spin_unlock(&cache->lock);
- if (!readonly && global_rsv->space_info == space_info) {
+ if (!readonly && return_free_space &&
+ global_rsv->space_info == space_info) {
+ u64 to_add = len;
+ WARN_ON(!return_free_space);
spin_lock(&global_rsv->lock);
if (!global_rsv->full) {
- len = min(len, global_rsv->size -
- global_rsv->reserved);
- global_rsv->reserved += len;
- space_info->bytes_may_use += len;
+ to_add = min(len, global_rsv->size -
+ global_rsv->reserved);
+ global_rsv->reserved += to_add;
+ space_info->bytes_may_use += to_add;
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
+ trace_btrfs_space_reservation(fs_info,
+ "space_info",
+ space_info->flags,
+ to_add, 1);
+ len -= to_add;
}
spin_unlock(&global_rsv->lock);
+ /* Add to any tickets we may have */
+ if (len)
+ space_info_add_new_bytes(fs_info, space_info,
+ len);
}
spin_unlock(&space_info->lock);
}
@@ -6528,7 +6743,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
break;
}
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(root->fs_info, DISCARD))
ret = btrfs_discard_extent(root, start,
end + 1 - start, NULL);
@@ -6666,7 +6881,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
NULL, refs_to_drop,
is_data, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -6715,7 +6930,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
path->nodes[0]);
}
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
extent_slot = path->slots[0];
@@ -6726,10 +6941,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
owner_offset);
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
} else {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6741,7 +6956,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = convert_extent_item_v0(trans, extent_root, path,
owner_objectid, 0);
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6760,7 +6975,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_print_leaf(extent_root, path->nodes[0]);
}
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6785,7 +7000,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_err(info, "trying to drop %d refs but we only have %Lu "
"for bytenr %Lu", refs_to_drop, refs, bytenr);
ret = -EINVAL;
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
refs -= refs_to_drop;
@@ -6808,7 +7023,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
iref, refs_to_drop,
is_data, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -6831,7 +7046,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -6839,7 +7054,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -6847,13 +7062,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
num_bytes);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -7002,7 +7217,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(fs_info))
return 0;
add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
@@ -7637,8 +7852,7 @@ loop:
* can do more things.
*/
if (ret < 0 && ret != -ENOSPC)
- btrfs_abort_transaction(trans,
- root, ret);
+ btrfs_abort_transaction(trans, ret);
else
ret = 0;
if (!exist)
@@ -7692,8 +7906,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
info->flags,
info->total_bytes - info->bytes_used - info->bytes_pinned -
- info->bytes_reserved - info->bytes_readonly,
- (info->full) ? "" : "not ");
+ info->bytes_reserved - info->bytes_readonly -
+ info->bytes_may_use, (info->full) ? "" : "not ");
printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
"reserved=%llu, may_use=%llu, readonly=%llu\n",
info->total_bytes, info->bytes_used, info->bytes_pinned,
@@ -7747,7 +7961,7 @@ again:
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
- } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, flags);
@@ -7778,16 +7992,14 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
if (pin)
pin_down_extent(root, cache, start, len, 1);
else {
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(root->fs_info, DISCARD))
ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ trace_btrfs_reserved_extent_free(root, start, len);
}
btrfs_put_block_group(cache);
-
- trace_btrfs_reserved_extent_free(root, start, len);
-
return ret;
}
@@ -8088,7 +8300,7 @@ again:
goto again;
}
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
/*DEFAULT_RATELIMIT_BURST*/ 1);
@@ -8142,13 +8354,15 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
SKINNY_METADATA);
- if (btrfs_test_is_dummy_root(root)) {
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ if (btrfs_is_testing(root->fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
level);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
}
+#endif
block_rsv = use_block_rsv(trans, root, blocksize);
if (IS_ERR(block_rsv))
@@ -8328,7 +8542,8 @@ static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
+ if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
+ delayed_refs, qrecord))
kfree(qrecord);
spin_unlock(&delayed_refs->lock);
@@ -9113,7 +9328,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
&root->root_key,
root_item);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -9140,7 +9355,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
ret = btrfs_del_root(trans, tree_root, &root->root_key);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -9148,7 +9363,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
ret = btrfs_find_root(tree_root, &root->root_key, path,
NULL, NULL);
if (ret < 0) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
} else if (ret > 0) {
@@ -9519,7 +9734,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
int full = 0;
int ret = 0;
- debug = btrfs_test_opt(root, ENOSPC_DEBUG);
+ debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
@@ -9675,7 +9890,22 @@ static int find_first_block_group(struct btrfs_root *root,
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- ret = 0;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &root->fs_info->mapping_tree.map_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, found_key.objectid,
+ found_key.offset);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ btrfs_err(root->fs_info,
+ "logical %llu len %llu found bg but no related chunk",
+ found_key.objectid, found_key.offset);
+ ret = -ENOENT;
+ } else {
+ ret = 0;
+ }
goto out;
}
path->slots[0]++;
@@ -9791,13 +10021,15 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
- if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
- if (WARN_ON(space_info->bytes_pinned > 0 ||
+
+ /*
+ * Do not hide this behind enospc_debug, this is actually
+ * important and indicates a real bug if this happens.
+ */
+ if (WARN_ON(space_info->bytes_pinned > 0 ||
space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0)) {
- dump_space_info(space_info, 0, 0);
- }
- }
+ space_info->bytes_may_use > 0))
+ dump_space_info(space_info, 0, 0);
list_del(&space_info->list);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
struct kobject *kobj;
@@ -9915,10 +10147,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
path->reada = READA_FORWARD;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
- if (btrfs_test_opt(root, SPACE_CACHE) &&
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
need_clear = 1;
- if (btrfs_test_opt(root, CLEAR_CACHE))
+ if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
need_clear = 1;
while (1) {
@@ -9949,7 +10181,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* b) Setting 'dirty flag' makes sure that we flush
* the new space cache info onto disk.
*/
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
cache->disk_cache_state = BTRFS_DC_CLEAR;
}
@@ -10005,9 +10237,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
goto error;
}
+ trace_btrfs_add_block_group(root->fs_info, cache, 0);
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
- &space_info);
+ cache->bytes_super, &space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
spin_lock(&info->block_group_cache_lock);
@@ -10020,9 +10253,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
}
cache->space_info = space_info;
- spin_lock(&cache->space_info->lock);
- cache->space_info->bytes_readonly += cache->bytes_super;
- spin_unlock(&cache->space_info->lock);
__link_block_group(space_info, cache);
@@ -10093,11 +10323,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
ret = btrfs_insert_item(trans, extent_root, &key, &item,
sizeof(item));
if (ret)
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
ret = btrfs_finish_chunk_alloc(trans, extent_root,
key.objectid, key.offset);
if (ret)
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, root->fs_info, block_group);
/* already aborted the transaction if it failed. */
next:
@@ -10114,7 +10344,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_root *extent_root;
struct btrfs_block_group_cache *cache;
-
extent_root = root->fs_info->extent_root;
btrfs_set_log_full_commit(root->fs_info, trans);
@@ -10160,7 +10389,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* assigned to our block group, but don't update its counters just yet.
* We want our bg to be added to the rbtree with its ->space_info set.
*/
- ret = update_space_info(root->fs_info, cache->flags, 0, 0,
+ ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
&cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
@@ -10179,8 +10408,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* Now that our block group has its ->space_info set and is inserted in
* the rbtree, update the space info's counters.
*/
+ trace_btrfs_add_block_group(root->fs_info, cache, 1);
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
- &cache->space_info);
+ cache->bytes_super, &cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
spin_lock(&root->fs_info->block_group_cache_lock);
@@ -10193,16 +10423,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
}
update_global_block_rsv(root->fs_info);
- spin_lock(&cache->space_info->lock);
- cache->space_info->bytes_readonly += cache->bytes_super;
- spin_unlock(&cache->space_info->lock);
-
__link_block_group(cache->space_info, cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
set_avail_alloc_bits(extent_root->fs_info, type);
-
return 0;
}
@@ -10415,7 +10640,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&block_group->space_info->lock);
list_del_init(&block_group->ro_list);
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
WARN_ON(block_group->space_info->total_bytes
< block_group->key.offset);
WARN_ON(block_group->space_info->bytes_readonly
@@ -10683,7 +10908,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&space_info->lock);
/* DISCARD can flip during remount */
- trimming = btrfs_test_opt(root, DISCARD);
+ trimming = btrfs_test_opt(root->fs_info, DISCARD);
/* Implicit trim during transaction commit. */
if (trimming)
@@ -10747,21 +10972,21 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
mixed = 1;
flags = BTRFS_BLOCK_GROUP_SYSTEM;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
if (ret)
goto out;
if (mixed) {
flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
} else {
flags = BTRFS_BLOCK_GROUP_METADATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
if (ret)
goto out;
flags = BTRFS_BLOCK_GROUP_DATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
}
out:
return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cee4cb99b8ce..44fe66b53c8b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -163,13 +163,13 @@ int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
goto free_state_cache;
@@ -2049,7 +2049,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return -EIO;
}
bio->bi_bdev = dev->bdev;
- bio->bi_rw = WRITE_SYNC;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
bio_add_page(bio, page, length, pg_offset);
if (btrfsic_submit_bio_wait(bio)) {
@@ -2697,12 +2697,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
btrfs_bio->end_io = NULL;
-
-#ifdef CONFIG_BLK_CGROUP
- /* FIXME, put this into bio_clone_bioset */
- if (bio->bi_css)
- bio_associate_blkcg(new, bio->bi_css);
-#endif
}
return new;
}
@@ -2756,7 +2750,6 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page,
if (tree->ops && tree->ops->merge_bio_hook)
ret = tree->ops->merge_bio_hook(page, offset, size, bio,
bio_flags);
- BUG_ON(ret < 0);
return ret;
}
@@ -2879,6 +2872,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* into the tree that are removed when the IO is done (by the end_io
* handlers)
* XXX JDM: This needs looking at to ensure proper page locking
+ * return 0 on success, otherwise return error
*/
static int __do_readpage(struct extent_io_tree *tree,
struct page *page,
@@ -2900,7 +2894,7 @@ static int __do_readpage(struct extent_io_tree *tree,
sector_t sector;
struct extent_map *em;
struct block_device *bdev;
- int ret;
+ int ret = 0;
int nr = 0;
size_t pg_offset = 0;
size_t iosize;
@@ -3081,6 +3075,7 @@ static int __do_readpage(struct extent_io_tree *tree,
} else {
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
+ goto out;
}
cur = cur + iosize;
pg_offset += iosize;
@@ -3091,7 +3086,7 @@ out:
SetPageUptodate(page);
unlock_page(page);
}
- return 0;
+ return ret;
}
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
@@ -5230,14 +5225,31 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
page = eb->pages[i];
+
if (!PageUptodate(page)) {
+ if (ret) {
+ atomic_dec(&eb->io_pages);
+ unlock_page(page);
+ continue;
+ }
+
ClearPageError(page);
err = __extent_read_full_page(tree, page,
get_extent, &bio,
mirror_num, &bio_flags,
REQ_META);
- if (err)
+ if (err) {
ret = err;
+ /*
+ * We use &bio in above __extent_read_full_page,
+ * so we ensure that if it returns error, the
+ * current page fails to add itself to bio and
+ * it's been unlocked.
+ *
+ * We must dec io_pages by ourselves.
+ */
+ atomic_dec(&eb->io_pages);
+ }
} else {
unlock_page(page);
}
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index e0715fcfb11e..26f9ac719d20 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -13,7 +13,7 @@ int __init extent_map_init(void)
{
extent_map_cache = kmem_cache_create("btrfs_extent_map",
sizeof(struct extent_map), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_map_cache)
return -ENOMEM;
return 0;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 62a81ee13a5f..d0d571c47d33 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -250,7 +250,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
offset + root->sectorsize - 1,
EXTENT_NODATASUM);
} else {
- btrfs_info(BTRFS_I(inode)->root->fs_info,
+ btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
"no csum found for inode %llu start %llu",
btrfs_ino(inode), offset);
}
@@ -699,7 +699,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
*/
ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2234e88cf674..9404121fd5f7 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -132,7 +132,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
static inline int __need_auto_defrag(struct btrfs_root *root)
{
- if (!btrfs_test_opt(root, AUTO_DEFRAG))
+ if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(root->fs_info))
@@ -950,7 +950,7 @@ delete_extent_item:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
@@ -974,7 +974,7 @@ delete_extent_item:
path->slots[0] = del_slot;
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
leaf = path->nodes[0];
@@ -1190,7 +1190,7 @@ again:
goto again;
}
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1278,7 +1278,7 @@ again:
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -1629,13 +1629,11 @@ again:
* managed to copy.
*/
if (num_sectors > dirty_sectors) {
- /*
- * we round down because we don't want to count
- * any partial blocks actually sent through the
- * IO machines
- */
- release_bytes = round_down(release_bytes - copied,
- root->sectorsize);
+
+ /* release everything except the sectors we dirtied */
+ release_bytes -= dirty_sectors <<
+ root->fs_info->sb->s_blocksize_bits;
+
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@@ -2479,7 +2477,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size);
+ min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2522,7 +2520,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size);
+ rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@@ -2977,7 +2975,7 @@ int btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 69d270f6602c..d571bd2b697b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -280,7 +280,7 @@ fail:
if (locked)
mutex_unlock(&trans->transaction->cache_write_mutex);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3026,7 +3026,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For
* data, keep it dense.
*/
- if (btrfs_test_opt(root, SSD_SPREAD)) {
+ if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
@@ -3470,7 +3470,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
/*
@@ -3514,7 +3514,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_io_ctl io_ctl;
bool release_metadata = true;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
memset(&io_ctl, 0, sizeof(io_ctl));
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 53dbeaf6ce94..87e7e3d3e676 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -305,7 +305,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
out:
kvfree(bitmap);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -454,7 +454,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
out:
kvfree(bitmap);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -851,7 +851,7 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1047,7 +1047,7 @@ int add_to_free_space_tree(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1193,7 +1193,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
fs_info->creating_free_space_tree = 0;
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -1280,7 +1280,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
return 0;
abort:
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -1333,7 +1333,7 @@ out:
btrfs_free_path(path);
mutex_unlock(&block_group->free_space_lock);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1410,7 +1410,7 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 70107f7c9307..aa6fabaee72e 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,7 +38,7 @@ static int caching_kthread(void *data)
int slot;
int ret;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -141,7 +141,7 @@ static void start_caching(struct btrfs_root *root)
int ret;
u64 objectid;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
spin_lock(&root->ino_cache_lock);
@@ -185,7 +185,7 @@ static void start_caching(struct btrfs_root *root)
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
{
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return btrfs_find_free_objectid(root, objectid);
again:
@@ -211,7 +211,7 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
again:
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
@@ -251,7 +251,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
struct rb_node *n;
u64 count;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
while (1) {
@@ -412,7 +412,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (btrfs_root_refs(&root->root_item) == 0)
return 0;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -458,7 +458,7 @@ again:
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
@@ -466,7 +466,7 @@ again:
ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
if (ret) {
if (ret != -ENOSPC)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index df731c0ebec7..2f5975954ccf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -60,6 +60,7 @@
#include "hash.h"
#include "props.h"
#include "qgroup.h"
+#include "dedupe.h"
struct btrfs_iget_args {
struct btrfs_key *location;
@@ -105,8 +106,9 @@ static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock);
+ u64 start, u64 end, u64 delalloc_end,
+ int *page_started, unsigned long *nr_written,
+ int unlock, struct btrfs_dedupe_hash *hash);
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
@@ -294,7 +296,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
start, aligned_end, NULL,
1, 1, extent_item_size, &extent_inserted);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -305,7 +307,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret && ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
ret = 1;
@@ -374,12 +376,12 @@ static inline int inode_need_compress(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
/* force compress */
- if (btrfs_test_opt(root, FORCE_COMPRESS))
+ if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
return 1;
/* bad compression ratios */
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
- if (btrfs_test_opt(root, COMPRESS) ||
+ if (btrfs_test_opt(root->fs_info, COMPRESS) ||
BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
BTRFS_I(inode)->force_compress)
return 1;
@@ -585,9 +587,27 @@ cont:
will_compress = 0;
} else {
num_bytes = total_in;
+ *num_added += 1;
+
+ /*
+ * The async work queues will take care of doing actual
+ * allocation on disk for these compressed pages, and
+ * will submit them to the elevator.
+ */
+ add_async_extent(async_cow, start, num_bytes,
+ total_compressed, pages, nr_pages_ret,
+ compress_type);
+
+ if (start + num_bytes < end) {
+ start += num_bytes;
+ pages = NULL;
+ cond_resched();
+ goto again;
+ }
+ return;
}
}
- if (!will_compress && pages) {
+ if (pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
@@ -602,48 +622,28 @@ cont:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
- if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
+ if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
- if (will_compress) {
- *num_added += 1;
-
- /* the async work queues will take care of doing actual
- * allocation on disk for these compressed pages,
- * and will submit them to the elevator.
- */
- add_async_extent(async_cow, start, num_bytes,
- total_compressed, pages, nr_pages_ret,
- compress_type);
-
- if (start + num_bytes < end) {
- start += num_bytes;
- pages = NULL;
- cond_resched();
- goto again;
- }
- } else {
cleanup_and_bail_uncompressed:
- /*
- * No compression, but we still need to write the pages in
- * the file we've been given so far. redirty the locked
- * page if it corresponds to our extent and set things up
- * for the async work queue to run cow_file_range to do
- * the normal delalloc dance
- */
- if (page_offset(locked_page) >= start &&
- page_offset(locked_page) <= end) {
- __set_page_dirty_nobuffers(locked_page);
- /* unlocked later on in the async handlers */
- }
- if (redirty)
- extent_range_redirty_for_io(inode, start, end);
- add_async_extent(async_cow, start, end - start + 1,
- 0, NULL, 0, BTRFS_COMPRESS_NONE);
- *num_added += 1;
- }
+ /*
+ * No compression, but we still need to write the pages in the file
+ * we've been given so far. redirty the locked page if it corresponds
+ * to our extent and set things up for the async work queue to run
+ * cow_file_range to do the normal delalloc dance.
+ */
+ if (page_offset(locked_page) >= start &&
+ page_offset(locked_page) <= end)
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+
+ if (redirty)
+ extent_range_redirty_for_io(inode, start, end);
+ add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
+ BTRFS_COMPRESS_NONE);
+ *num_added += 1;
return;
@@ -712,7 +712,10 @@ retry:
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- &page_started, &nr_written, 0);
+ async_extent->start +
+ async_extent->ram_size - 1,
+ &page_started, &nr_written, 0,
+ NULL);
/* JDM XXX */
@@ -925,9 +928,9 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- int unlock)
+ u64 start, u64 end, u64 delalloc_end,
+ int *page_started, unsigned long *nr_written,
+ int unlock, struct btrfs_dedupe_hash *hash)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 alloc_hint = 0;
@@ -1156,7 +1159,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
- !btrfs_test_opt(root, FORCE_COMPRESS))
+ !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
cur_end = end;
else
cur_end = min(end, start + SZ_512K - 1);
@@ -1418,7 +1421,8 @@ out_check:
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page,
cow_start, found_key.offset - 1,
- page_started, nr_written, 1);
+ end, page_started, nr_written, 1,
+ NULL);
if (ret) {
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
@@ -1501,8 +1505,8 @@ out_check:
}
if (cow_start != (u64)-1) {
- ret = cow_file_range(inode, locked_page, cow_start, end,
- page_started, nr_written, 1);
+ ret = cow_file_range(inode, locked_page, cow_start, end, end,
+ page_started, nr_written, 1, NULL);
if (ret)
goto error;
}
@@ -1561,8 +1565,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
} else if (!inode_need_compress(inode)) {
- ret = cow_file_range(inode, locked_page, start, end,
- page_started, nr_written, 1);
+ ret = cow_file_range(inode, locked_page, start, end, end,
+ page_started, nr_written, 1, NULL);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
@@ -1740,7 +1744,7 @@ static void btrfs_set_bit_hook(struct inode *inode,
}
/* For sanity tests */
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
@@ -1799,7 +1803,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
btrfs_delalloc_release_metadata(inode, len);
/* For sanity tests. */
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
@@ -1822,6 +1826,10 @@ static void btrfs_clear_bit_hook(struct inode *inode,
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
+ *
+ * return 1 if page cannot be merged to bio
+ * return 0 if page can be merged to bio
+ * return error otherwise
*/
int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
@@ -1840,8 +1848,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
map_length = length;
ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
&map_length, NULL, 0);
- /* Will always return 0 with map_multi == NULL */
- BUG_ON(ret < 0);
+ if (ret < 0)
+ return ret;
if (map_length < length + size)
return 1;
return 0;
@@ -2594,7 +2602,7 @@ again:
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -2621,7 +2629,7 @@ again:
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -2890,7 +2898,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2950,7 +2958,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset, ordered_extent->len,
trans->transid);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_unlock;
}
@@ -2960,7 +2968,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_unlock;
}
ret = 0;
@@ -3204,7 +3212,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
else
clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state);
@@ -3295,7 +3303,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (ret != -EEXIST) {
clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -3307,7 +3315,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -4006,20 +4014,20 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
btrfs_info(root->fs_info,
"failed to delete reference to %.*s, inode %llu parent %llu",
name_len, name, ino, dir_ino);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
skip_backref:
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
@@ -4028,7 +4036,7 @@ skip_backref:
if (ret == -ENOENT)
ret = 0;
else if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err:
btrfs_free_path(path);
if (ret)
@@ -4142,7 +4150,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -4152,7 +4160,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
@@ -4162,7 +4170,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = -ENOENT;
else
ret = PTR_ERR(di);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4175,7 +4183,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4184,7 +4192,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_free_path(path);
return ret;
@@ -4505,7 +4513,6 @@ search_again:
pending_del_nr);
if (err) {
btrfs_abort_transaction(trans,
- root,
err);
goto error;
}
@@ -4517,8 +4524,7 @@ search_again:
item_end,
new_size);
if (err) {
- btrfs_abort_transaction(trans,
- root, err);
+ btrfs_abort_transaction(trans, err);
goto error;
}
} else if (test_bit(BTRFS_ROOT_REF_COWS,
@@ -4582,8 +4588,7 @@ delete:
pending_del_slot,
pending_del_nr);
if (ret) {
- btrfs_abort_transaction(trans,
- root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error;
}
pending_del_nr = 0;
@@ -4616,7 +4621,7 @@ out:
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
error:
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
@@ -4785,7 +4790,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
return ret;
}
@@ -4793,7 +4798,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
0, 0, len, 0, len, 0, 0, 0);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
else
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
@@ -5020,7 +5025,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
err = btrfs_orphan_del(trans, inode);
if (err)
- btrfs_abort_transaction(trans, root, err);
+ btrfs_abort_transaction(trans, err);
btrfs_end_transaction(trans, root);
}
}
@@ -5158,11 +5163,18 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
int steal_from_global = 0;
- u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ u64 min_size;
int ret;
trace_btrfs_inode_evict(inode);
+ if (!root) {
+ kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+ return;
+ }
+
+ min_size = btrfs_calc_trunc_metadata_size(root, 1);
+
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
@@ -5263,7 +5275,7 @@ void btrfs_evict_inode(struct inode *inode)
if (steal_from_global) {
if (!btrfs_check_space_for_delayed_refs(trans, root))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
- min_size);
+ min_size, 0);
else
ret = -ENOSPC;
}
@@ -6239,9 +6251,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
- if (btrfs_test_opt(root, NODATASUM))
+ if (btrfs_test_opt(root->fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(root->fs_info, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
}
@@ -6319,7 +6331,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
else if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -6330,7 +6342,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
current_fs_time(parent_inode->i_sb);
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
fail_dir_item:
@@ -8197,7 +8209,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
if (err)
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
"direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
- btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
+ btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf,
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err);
@@ -8361,7 +8373,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
if (!bio)
return -ENOMEM;
- bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+ bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
@@ -8399,7 +8411,7 @@ next_block:
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
- bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+ bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
@@ -9116,7 +9128,7 @@ static int btrfs_truncate(struct inode *inode)
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size);
+ min_size, 0);
BUG_ON(ret);
/*
@@ -9156,7 +9168,7 @@ static int btrfs_truncate(struct inode *inode)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size);
+ rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
}
@@ -9177,7 +9189,6 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
}
-
out:
btrfs_free_block_rsv(root, rsv);
@@ -9386,25 +9397,25 @@ int btrfs_init_cachep(void)
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
sizeof(struct btrfs_trans_handle), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
sizeof(struct btrfs_transaction), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path",
sizeof(struct btrfs_path), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
sizeof(struct btrfs_free_space), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
@@ -9554,7 +9565,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9574,7 +9585,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
ret = btrfs_update_inode(trans, dest, new_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9582,7 +9593,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, old_idx);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9590,7 +9601,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
old_dentry->d_name.name,
old_dentry->d_name.len, 0, new_idx);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9829,7 +9840,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9853,7 +9864,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!ret && new_inode->i_nlink == 0)
ret = btrfs_orphan_add(trans, d_inode(new_dentry));
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
@@ -9862,7 +9873,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9882,7 +9893,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
@@ -10308,7 +10319,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid,
ins.offset, 0);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
@@ -10368,7 +10379,7 @@ next:
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 05173563e4a6..14ed1e9e6bc8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -561,7 +561,7 @@ static noinline int create_subvol(struct inode *dir,
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -570,7 +570,7 @@ static noinline int create_subvol(struct inode *dir,
ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
if (ret) {
/* We potentially lose an unused inode item here */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -583,7 +583,7 @@ static noinline int create_subvol(struct inode *dir,
*/
ret = btrfs_set_inode_index(dir, &index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -591,7 +591,7 @@ static noinline int create_subvol(struct inode *dir,
name, namelen, dir, &key,
BTRFS_FT_DIR, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -608,7 +608,7 @@ static noinline int create_subvol(struct inode *dir,
root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
objectid);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
fail:
kfree(root_item);
@@ -1948,8 +1948,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
return 1;
}
-static noinline int copy_to_sk(struct btrfs_root *root,
- struct btrfs_path *path,
+static noinline int copy_to_sk(struct btrfs_path *path,
struct btrfs_key *key,
struct btrfs_ioctl_search_key *sk,
size_t *buf_size,
@@ -2120,7 +2119,7 @@ static noinline int search_ioctl(struct inode *inode,
ret = 0;
goto err;
}
- ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf,
+ ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
&sk_offset, &num_found);
btrfs_release_path(path);
if (ret)
@@ -2406,7 +2405,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* rmdir(2).
*/
err = -EPERM;
- if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ if (!btrfs_test_opt(root->fs_info, USER_SUBVOL_RM_ALLOWED))
goto out_dput;
/*
@@ -2489,7 +2488,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
dentry->d_name.len);
if (ret) {
err = ret;
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -2505,7 +2504,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
root->fs_info->tree_root,
dest->root_key.objectid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -2515,7 +2514,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -2525,7 +2524,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -3292,7 +3291,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3694,7 +3693,7 @@ process_slot:
if (ret) {
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
- root, ret);
+ ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3702,8 +3701,7 @@ process_slot:
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret) {
- btrfs_abort_transaction(trans, root,
- ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3735,7 +3733,6 @@ process_slot:
new_key.offset - datao);
if (ret) {
btrfs_abort_transaction(trans,
- root,
ret);
btrfs_end_transaction(trans,
root);
@@ -3772,7 +3769,6 @@ process_slot:
if (ret) {
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
- root,
ret);
btrfs_end_transaction(trans, root);
goto out;
@@ -3828,7 +3824,7 @@ process_slot:
last_dest_end, destoff + len, 1);
if (ret) {
if (ret != -EOPNOTSUPP)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -5164,13 +5160,13 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
root->root_key.objectid);
if (ret < 0 && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_commit_transaction(trans, root);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index aca8264f4a49..3b78d38173b3 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1122,7 +1122,7 @@ int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
sizeof(struct btrfs_ordered_extent), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_ordered_extent_cache)
return -ENOMEM;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 36992128c746..cf0b444ac4f3 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -350,6 +350,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_root *parent_root)
{
+ struct super_block *sb = root->fs_info->sb;
struct btrfs_key key;
struct inode *parent_inode, *child_inode;
int ret;
@@ -358,12 +359,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- parent_inode = btrfs_iget(parent_root->fs_info->sb, &key,
- parent_root, NULL);
+ parent_inode = btrfs_iget(sb, &key, parent_root, NULL);
if (IS_ERR(parent_inode))
return PTR_ERR(parent_inode);
- child_inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ child_inode = btrfs_iget(sb, &key, root, NULL);
if (IS_ERR(child_inode)) {
iput(parent_inode);
return PTR_ERR(child_inode);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9d4c05b14f6e..93ee1c18ef9d 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -571,7 +571,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
- if (btrfs_test_is_dummy_root(quota_root))
+ if (btrfs_is_testing(quota_root->fs_info))
return 0;
path = btrfs_alloc_path();
@@ -728,7 +728,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
int ret;
int slot;
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
key.objectid = 0;
@@ -1453,9 +1453,10 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
return ret;
}
-struct btrfs_qgroup_extent_record
-*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+struct btrfs_qgroup_extent_record *
+btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record)
{
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL;
@@ -1463,7 +1464,7 @@ struct btrfs_qgroup_extent_record
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
- trace_btrfs_qgroup_insert_dirty_extent(record);
+ trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
while (*p) {
parent_node = *p;
@@ -1595,8 +1596,8 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
- trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
- cur_new_count);
+ trace_qgroup_update_counters(fs_info, qg->qgroupid,
+ cur_old_count, cur_new_count);
/* Rfer update part */
if (cur_old_count == 0 && cur_new_count > 0) {
@@ -1687,8 +1688,8 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
goto out_free;
BUG_ON(!fs_info->quota_root);
- trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
- nr_new_roots);
+ trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
+ nr_old_roots, nr_new_roots);
qgroups = ulist_alloc(GFP_NOFS);
if (!qgroups) {
@@ -1759,7 +1760,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
- trace_btrfs_qgroup_account_extents(record);
+ trace_btrfs_qgroup_account_extents(fs_info, record);
if (!ret) {
/*
@@ -2195,7 +2196,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
{
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
return;
- btrfs_err(trans->root->fs_info,
+ btrfs_err(trans->fs_info,
"qgroups not uptodate in trans handle %p: list is%s empty, "
"seq is %#x.%x",
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index ecb2c143ef75..710887c06aaf 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -63,9 +63,10 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-struct btrfs_qgroup_extent_record
-*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
+struct btrfs_qgroup_extent_record *
+btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record);
int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
@@ -88,7 +89,7 @@ static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes)
{
btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
- trace_btrfs_qgroup_free_delayed_ref(ref_root, num_bytes);
+ trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
}
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0477dca154ed..b26a5aea41b4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -235,12 +235,12 @@ static void backref_cache_cleanup(struct backref_cache *cache)
cache->last_trans = 0;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
- BUG_ON(!list_empty(&cache->pending[i]));
- BUG_ON(!list_empty(&cache->changed));
- BUG_ON(!list_empty(&cache->detached));
- BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
- BUG_ON(cache->nr_nodes);
- BUG_ON(cache->nr_edges);
+ ASSERT(list_empty(&cache->pending[i]));
+ ASSERT(list_empty(&cache->changed));
+ ASSERT(list_empty(&cache->detached));
+ ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
+ ASSERT(!cache->nr_nodes);
+ ASSERT(!cache->nr_edges);
}
static struct backref_node *alloc_backref_node(struct backref_cache *cache)
@@ -1171,8 +1171,12 @@ out:
lower = list_entry(useless.next,
struct backref_node, list);
list_del_init(&lower->list);
+ if (lower == node)
+ node = NULL;
free_backref_node(cache, lower);
}
+
+ free_backref_node(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
@@ -1719,7 +1723,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
@@ -1727,7 +1731,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
}
@@ -2604,25 +2608,28 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
+
+ /*
+ * We are under a transaction here so we can only do limited flushing.
+ * If we get an enospc just kick back -EAGAIN so we know to drop the
+ * transaction and try to refill when we can flush all the things.
+ */
ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
- BTRFS_RESERVE_FLUSH_ALL);
+ BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- if (ret == -EAGAIN) {
- tmp = rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
- while (tmp <= rc->reserved_bytes)
- tmp <<= 1;
- /*
- * only one thread can access block_rsv at this point,
- * so we don't need hold lock to protect block_rsv.
- * we expand more reservation size here to allow enough
- * space for relocation and we will return earlier in
- * enospc case.
- */
- rc->block_rsv->size = tmp + rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
- }
- return ret;
+ tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES;
+ while (tmp <= rc->reserved_bytes)
+ tmp <<= 1;
+ /*
+ * only one thread can access block_rsv at this point,
+ * so we don't need hold lock to protect block_rsv.
+ * we expand more reservation size here to allow enough
+ * space for relocation and we will return eailer in
+ * enospc case.
+ */
+ rc->block_rsv->size = tmp + rc->extent_root->nodesize *
+ RELOCATION_RESERVED_NODES;
+ return -EAGAIN;
}
return 0;
@@ -3871,6 +3878,7 @@ static noinline_for_stack
int prepare_to_relocate(struct reloc_control *rc)
{
struct btrfs_trans_handle *trans;
+ int ret;
rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
BTRFS_BLOCK_RSV_TEMP);
@@ -3885,6 +3893,11 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->reserved_bytes = 0;
rc->block_rsv->size = rc->extent_root->nodesize *
RELOCATION_RESERVED_NODES;
+ ret = btrfs_block_rsv_refill(rc->extent_root,
+ rc->block_rsv, rc->block_rsv->size,
+ BTRFS_RESERVE_FLUSH_ALL);
+ if (ret)
+ return ret;
rc->create_reloc_tree = 1;
set_reloc_control(rc);
@@ -4643,7 +4656,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
if (rc->merge_reloc_tree) {
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
- rc->nodes_relocated);
+ rc->nodes_relocated, 1);
if (ret)
return ret;
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f1c30861d062..7fd7e1830cfe 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -150,7 +150,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -176,20 +176,20 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path,
-1, 1);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path,
key, sizeof(*item));
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
l = path->nodes[0];
@@ -448,7 +448,7 @@ again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
sizeof(*ref) + name_len);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e08b6bc676e3..1d195d2b32c6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3785,27 +3785,27 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (fs_info->scrub_workers_refcnt == 0) {
if (is_dev_replace)
fs_info->scrub_workers =
- btrfs_alloc_workqueue("scrub", flags,
+ btrfs_alloc_workqueue(fs_info, "scrub", flags,
1, 4);
else
fs_info->scrub_workers =
- btrfs_alloc_workqueue("scrub", flags,
+ btrfs_alloc_workqueue(fs_info, "scrub", flags,
max_active, 4);
if (!fs_info->scrub_workers)
goto fail_scrub_workers;
fs_info->scrub_wr_completion_workers =
- btrfs_alloc_workqueue("scrubwrc", flags,
+ btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
fs_info->scrub_nocow_workers =
- btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
if (!fs_info->scrub_nocow_workers)
goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers =
- btrfs_alloc_workqueue("scrubparity", flags,
+ btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
if (!fs_info->scrub_parity_workers)
goto fail_scrub_parity_workers;
@@ -3860,7 +3860,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
- btrfs_err(fs_info,
+ btrfs_err_rl(fs_info,
"scrub: size assumption sectorsize != PAGE_SIZE "
"(%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60e7179ed4b7..864ce334f696 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -184,6 +184,22 @@ static const char * const logtypes[] = {
"debug",
};
+
+/*
+ * Use one ratelimit state per log level so that a flood of less important
+ * messages doesn't cause more important ones to be dropped.
+ */
+static struct ratelimit_state printk_limits[] = {
+ RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
+};
+
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
@@ -192,6 +208,7 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
va_list args;
const char *type = logtypes[4];
int kern_level;
+ struct ratelimit_state *ratelimit;
va_start(args, fmt);
@@ -202,13 +219,18 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
lvl[size] = '\0';
fmt += size;
type = logtypes[kern_level - '0'];
- } else
+ ratelimit = &printk_limits[kern_level - '0'];
+ } else {
*lvl = '\0';
+ /* Default to debug output */
+ ratelimit = &printk_limits[7];
+ }
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
+ if (__ratelimit(ratelimit))
+ printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
va_end(args);
}
@@ -229,9 +251,11 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
*/
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *function,
+ const char *function,
unsigned int line, int errno)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
@@ -239,16 +263,16 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *errstr;
errstr = btrfs_decode_error(errno);
- btrfs_warn(root->fs_info,
+ btrfs_warn(fs_info,
"%s:%d: Aborting unused transaction(%s).",
function, line, errstr);
return;
}
ACCESS_ONCE(trans->transaction->aborted) = errno;
/* Wake up anybody who may be waiting on this transaction */
- wake_up(&root->fs_info->transaction_wait);
- wake_up(&root->fs_info->transaction_blocked_wait);
- __btrfs_handle_fs_error(root->fs_info, function, line, errno, NULL);
+ wake_up(&fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
+ __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
}
/*
* __btrfs_panic decodes unexpected, fatal errors from the caller,
@@ -432,12 +456,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
*/
break;
case Opt_nodatasum:
- btrfs_set_and_info(root, NODATASUM,
+ btrfs_set_and_info(info, NODATASUM,
"setting nodatasum");
break;
case Opt_datasum:
- if (btrfs_test_opt(root, NODATASUM)) {
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(info, NODATASUM)) {
+ if (btrfs_test_opt(info, NODATACOW))
btrfs_info(root->fs_info, "setting datasum, datacow enabled");
else
btrfs_info(root->fs_info, "setting datasum");
@@ -446,9 +470,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_clear_opt(info->mount_opt, NODATASUM);
break;
case Opt_nodatacow:
- if (!btrfs_test_opt(root, NODATACOW)) {
- if (!btrfs_test_opt(root, COMPRESS) ||
- !btrfs_test_opt(root, FORCE_COMPRESS)) {
+ if (!btrfs_test_opt(info, NODATACOW)) {
+ if (!btrfs_test_opt(info, COMPRESS) ||
+ !btrfs_test_opt(info, FORCE_COMPRESS)) {
btrfs_info(root->fs_info,
"setting nodatacow, compression disabled");
} else {
@@ -461,7 +485,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_datacow:
- btrfs_clear_and_info(root, NODATACOW,
+ btrfs_clear_and_info(info, NODATACOW,
"setting datacow");
break;
case Opt_compress_force:
@@ -470,10 +494,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
/* Fallthrough */
case Opt_compress:
case Opt_compress_type:
- saved_compress_type = btrfs_test_opt(root, COMPRESS) ?
+ saved_compress_type = btrfs_test_opt(info,
+ COMPRESS) ?
info->compress_type : BTRFS_COMPRESS_NONE;
saved_compress_force =
- btrfs_test_opt(root, FORCE_COMPRESS);
+ btrfs_test_opt(info, FORCE_COMPRESS);
if (token == Opt_compress ||
token == Opt_compress_force ||
strcmp(args[0].from, "zlib") == 0) {
@@ -513,10 +538,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
*/
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
}
- if ((btrfs_test_opt(root, COMPRESS) &&
+ if ((btrfs_test_opt(info, COMPRESS) &&
(info->compress_type != saved_compress_type ||
compress_force != saved_compress_force)) ||
- (!btrfs_test_opt(root, COMPRESS) &&
+ (!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) {
btrfs_info(root->fs_info,
"%s %s compression",
@@ -526,25 +551,25 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
compress_force = false;
break;
case Opt_ssd:
- btrfs_set_and_info(root, SSD,
+ btrfs_set_and_info(info, SSD,
"use ssd allocation scheme");
break;
case Opt_ssd_spread:
- btrfs_set_and_info(root, SSD_SPREAD,
+ btrfs_set_and_info(info, SSD_SPREAD,
"use spread ssd allocation scheme");
btrfs_set_opt(info->mount_opt, SSD);
break;
case Opt_nossd:
- btrfs_set_and_info(root, NOSSD,
+ btrfs_set_and_info(info, NOSSD,
"not using ssd allocation scheme");
btrfs_clear_opt(info->mount_opt, SSD);
break;
case Opt_barrier:
- btrfs_clear_and_info(root, NOBARRIER,
+ btrfs_clear_and_info(info, NOBARRIER,
"turning on barriers");
break;
case Opt_nobarrier:
- btrfs_set_and_info(root, NOBARRIER,
+ btrfs_set_and_info(info, NOBARRIER,
"turning off barriers");
break;
case Opt_thread_pool:
@@ -604,24 +629,24 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
root->fs_info->sb->s_flags &= ~MS_POSIXACL;
break;
case Opt_notreelog:
- btrfs_set_and_info(root, NOTREELOG,
+ btrfs_set_and_info(info, NOTREELOG,
"disabling tree log");
break;
case Opt_treelog:
- btrfs_clear_and_info(root, NOTREELOG,
+ btrfs_clear_and_info(info, NOTREELOG,
"enabling tree log");
break;
case Opt_norecovery:
case Opt_nologreplay:
- btrfs_set_and_info(root, NOLOGREPLAY,
+ btrfs_set_and_info(info, NOLOGREPLAY,
"disabling log replay at mount time");
break;
case Opt_flushoncommit:
- btrfs_set_and_info(root, FLUSHONCOMMIT,
+ btrfs_set_and_info(info, FLUSHONCOMMIT,
"turning on flush-on-commit");
break;
case Opt_noflushoncommit:
- btrfs_clear_and_info(root, FLUSHONCOMMIT,
+ btrfs_clear_and_info(info, FLUSHONCOMMIT,
"turning off flush-on-commit");
break;
case Opt_ratio:
@@ -638,11 +663,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
}
break;
case Opt_discard:
- btrfs_set_and_info(root, DISCARD,
+ btrfs_set_and_info(info, DISCARD,
"turning on discard");
break;
case Opt_nodiscard:
- btrfs_clear_and_info(root, DISCARD,
+ btrfs_clear_and_info(info, DISCARD,
"turning off discard");
break;
case Opt_space_cache:
@@ -651,12 +676,13 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
strcmp(args[0].from, "v1") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
FREE_SPACE_TREE);
- btrfs_set_and_info(root, SPACE_CACHE,
+ btrfs_set_and_info(info, SPACE_CACHE,
"enabling disk space caching");
} else if (strcmp(args[0].from, "v2") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
SPACE_CACHE);
- btrfs_set_and_info(root, FREE_SPACE_TREE,
+ btrfs_set_and_info(info,
+ FREE_SPACE_TREE,
"enabling free space tree");
} else {
ret = -EINVAL;
@@ -667,12 +693,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
break;
case Opt_no_space_cache:
- if (btrfs_test_opt(root, SPACE_CACHE)) {
- btrfs_clear_and_info(root, SPACE_CACHE,
+ if (btrfs_test_opt(info, SPACE_CACHE)) {
+ btrfs_clear_and_info(info,
+ SPACE_CACHE,
"disabling disk space caching");
}
- if (btrfs_test_opt(root, FREE_SPACE_TREE)) {
- btrfs_clear_and_info(root, FREE_SPACE_TREE,
+ if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
+ btrfs_clear_and_info(info,
+ FREE_SPACE_TREE,
"disabling free space tree");
}
break;
@@ -685,7 +713,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
"disabling inode map caching");
break;
case Opt_clear_cache:
- btrfs_set_and_info(root, CLEAR_CACHE,
+ btrfs_set_and_info(info, CLEAR_CACHE,
"force clearing of disk cache");
break;
case Opt_user_subvol_rm_allowed:
@@ -698,11 +726,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
break;
case Opt_defrag:
- btrfs_set_and_info(root, AUTO_DEFRAG,
+ btrfs_set_and_info(info, AUTO_DEFRAG,
"enabling auto defrag");
break;
case Opt_nodefrag:
- btrfs_clear_and_info(root, AUTO_DEFRAG,
+ btrfs_clear_and_info(info, AUTO_DEFRAG,
"disabling auto defrag");
break;
case Opt_recovery:
@@ -810,22 +838,22 @@ check:
/*
* Extra check for current option against current flag
*/
- if (btrfs_test_opt(root, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+ if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
btrfs_err(root->fs_info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
}
out:
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
- !btrfs_test_opt(root, FREE_SPACE_TREE) &&
- !btrfs_test_opt(root, CLEAR_CACHE)) {
+ !btrfs_test_opt(info, FREE_SPACE_TREE) &&
+ !btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(root->fs_info, "cannot disable free space tree");
ret = -EINVAL;
}
- if (!ret && btrfs_test_opt(root, SPACE_CACHE))
+ if (!ret && btrfs_test_opt(info, SPACE_CACHE))
btrfs_info(root->fs_info, "disk space caching is enabled");
- if (!ret && btrfs_test_opt(root, FREE_SPACE_TREE))
+ if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(root->fs_info, "using free space tree");
kfree(orig);
return ret;
@@ -1149,7 +1177,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
- trace_btrfs_sync_fs(wait);
+ trace_btrfs_sync_fs(fs_info, wait);
if (!wait) {
filemap_flush(fs_info->btree_inode->i_mapping);
@@ -1192,13 +1220,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
struct btrfs_root *root = info->tree_root;
char *compress_type;
- if (btrfs_test_opt(root, DEGRADED))
+ if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
- if (btrfs_test_opt(root, NODATASUM))
+ if (btrfs_test_opt(info, NODATASUM))
seq_puts(seq, ",nodatasum");
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(info, NODATACOW))
seq_puts(seq, ",nodatacow");
- if (btrfs_test_opt(root, NOBARRIER))
+ if (btrfs_test_opt(info, NOBARRIER))
seq_puts(seq, ",nobarrier");
if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
seq_printf(seq, ",max_inline=%llu", info->max_inline);
@@ -1207,56 +1235,56 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (info->thread_pool_size != min_t(unsigned long,
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
- if (btrfs_test_opt(root, COMPRESS)) {
+ if (btrfs_test_opt(info, COMPRESS)) {
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
compress_type = "zlib";
else
compress_type = "lzo";
- if (btrfs_test_opt(root, FORCE_COMPRESS))
+ if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
}
- if (btrfs_test_opt(root, NOSSD))
+ if (btrfs_test_opt(info, NOSSD))
seq_puts(seq, ",nossd");
- if (btrfs_test_opt(root, SSD_SPREAD))
+ if (btrfs_test_opt(info, SSD_SPREAD))
seq_puts(seq, ",ssd_spread");
- else if (btrfs_test_opt(root, SSD))
+ else if (btrfs_test_opt(info, SSD))
seq_puts(seq, ",ssd");
- if (btrfs_test_opt(root, NOTREELOG))
+ if (btrfs_test_opt(info, NOTREELOG))
seq_puts(seq, ",notreelog");
- if (btrfs_test_opt(root, NOLOGREPLAY))
+ if (btrfs_test_opt(info, NOLOGREPLAY))
seq_puts(seq, ",nologreplay");
- if (btrfs_test_opt(root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(info, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
- else if (btrfs_test_opt(root, FREE_SPACE_TREE))
+ else if (btrfs_test_opt(info, FREE_SPACE_TREE))
seq_puts(seq, ",space_cache=v2");
else
seq_puts(seq, ",nospace_cache");
- if (btrfs_test_opt(root, RESCAN_UUID_TREE))
+ if (btrfs_test_opt(info, RESCAN_UUID_TREE))
seq_puts(seq, ",rescan_uuid_tree");
- if (btrfs_test_opt(root, CLEAR_CACHE))
+ if (btrfs_test_opt(info, CLEAR_CACHE))
seq_puts(seq, ",clear_cache");
- if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
seq_puts(seq, ",user_subvol_rm_allowed");
- if (btrfs_test_opt(root, ENOSPC_DEBUG))
+ if (btrfs_test_opt(info, ENOSPC_DEBUG))
seq_puts(seq, ",enospc_debug");
- if (btrfs_test_opt(root, AUTO_DEFRAG))
+ if (btrfs_test_opt(info, AUTO_DEFRAG))
seq_puts(seq, ",autodefrag");
- if (btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (btrfs_test_opt(info, INODE_MAP_CACHE))
seq_puts(seq, ",inode_cache");
- if (btrfs_test_opt(root, SKIP_BALANCE))
+ if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
+ if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
seq_puts(seq, ",check_int_data");
- else if (btrfs_test_opt(root, CHECK_INTEGRITY))
+ else if (btrfs_test_opt(info, CHECK_INTEGRITY))
seq_puts(seq, ",check_int");
if (info->check_integrity_print_mask)
seq_printf(seq, ",check_int_print_mask=%d",
@@ -1265,14 +1293,14 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (info->metadata_ratio)
seq_printf(seq, ",metadata_ratio=%d",
info->metadata_ratio);
- if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
+ if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
seq_puts(seq, ",fatal_errors=panic");
if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
seq_printf(seq, ",commit=%d", info->commit_interval);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_test_opt(root, FRAGMENT_DATA))
+ if (btrfs_test_opt(info, FRAGMENT_DATA))
seq_puts(seq, ",fragment=data");
- if (btrfs_test_opt(root, FRAGMENT_METADATA))
+ if (btrfs_test_opt(info, FRAGMENT_METADATA))
seq_puts(seq, ",fragment=metadata");
#endif
seq_printf(seq, ",subvolid=%llu",
@@ -2030,9 +2058,6 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
* chunk).
*
* If metadata is exhausted, f_bavail will be 0.
- *
- * FIXME: not accurate for mixed block groups, total and free/used are ok,
- * available appears slightly larger.
*/
static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -2319,49 +2344,6 @@ static void btrfs_print_mod_info(void)
btrfs_crc32c_impl());
}
-static int btrfs_run_sanity_tests(void)
-{
- int ret, i;
- u32 sectorsize, nodesize;
- u32 test_sectorsize[] = {
- PAGE_SIZE,
- };
- ret = btrfs_init_test_fs();
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
- sectorsize = test_sectorsize[i];
- for (nodesize = sectorsize;
- nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
- nodesize <<= 1) {
- pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
- sectorsize, nodesize);
- ret = btrfs_test_free_space_cache(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_extent_buffer_operations(sectorsize,
- nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_extent_io(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_inodes(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_qgroups(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_free_space_tree(sectorsize, nodesize);
- if (ret)
- goto out;
- }
- }
-out:
- btrfs_destroy_test_fs();
- return ret;
-}
-
static int __init init_btrfs_fs(void)
{
int err;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4879656bda3c..c6569905d3d1 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -326,6 +326,7 @@ SPACE_INFO_ATTR(bytes_used);
SPACE_INFO_ATTR(bytes_pinned);
SPACE_INFO_ATTR(bytes_reserved);
SPACE_INFO_ATTR(bytes_may_use);
+SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
BTRFS_ATTR(total_bytes_pinned, btrfs_space_info_show_total_bytes_pinned);
@@ -337,6 +338,7 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(bytes_pinned),
BTRFS_ATTR_PTR(bytes_reserved),
BTRFS_ATTR_PTR(bytes_may_use),
+ BTRFS_ATTR_PTR(bytes_readonly),
BTRFS_ATTR_PTR(disk_used),
BTRFS_ATTR_PTR(disk_total),
BTRFS_ATTR_PTR(total_bytes_pinned),
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 02223f3f78f4..bf62ad919a95 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -54,7 +54,7 @@ struct inode *btrfs_new_test_inode(void)
return new_inode(test_mnt->mnt_sb);
}
-int btrfs_init_test_fs(void)
+static int btrfs_init_test_fs(void)
{
int ret;
@@ -73,7 +73,7 @@ int btrfs_init_test_fs(void)
return 0;
}
-void btrfs_destroy_test_fs(void)
+static void btrfs_destroy_test_fs(void)
{
kern_unmount(test_mnt);
unregister_filesystem(&test_type);
@@ -128,14 +128,27 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
extent_io_tree_init(&fs_info->freed_extents[0], NULL);
extent_io_tree_init(&fs_info->freed_extents[1], NULL);
fs_info->pinned_extents = &fs_info->freed_extents[0];
+ set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+
+ test_mnt->mnt_sb->s_fs_info = fs_info;
+
return fs_info;
}
-static void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
struct radix_tree_iter iter;
void **slot;
+ if (!fs_info)
+ return;
+
+ if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
+ &fs_info->fs_state)))
+ return;
+
+ test_mnt->mnt_sb->s_fs_info = NULL;
+
spin_lock(&fs_info->buffer_lock);
radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
struct extent_buffer *eb;
@@ -167,10 +180,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
{
if (!root)
return;
+ /* Will be freed by btrfs_free_fs_roots */
+ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
+ return;
if (root->node)
free_extent_buffer(root->node);
- if (root->fs_info)
- btrfs_free_dummy_fs_info(root->fs_info);
kfree(root);
}
@@ -220,3 +234,46 @@ void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans)
INIT_LIST_HEAD(&trans->qgroup_ref_list);
trans->type = __TRANS_DUMMY;
}
+
+int btrfs_run_sanity_tests(void)
+{
+ int ret, i;
+ u32 sectorsize, nodesize;
+ u32 test_sectorsize[] = {
+ PAGE_SIZE,
+ };
+ ret = btrfs_init_test_fs();
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+ sectorsize = test_sectorsize[i];
+ for (nodesize = sectorsize;
+ nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+ nodesize <<= 1) {
+ pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
+ sectorsize, nodesize);
+ ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_buffer_operations(sectorsize,
+ nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_io(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_inodes(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_qgroups(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ btrfs_destroy_test_fs();
+ return ret;
+}
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 66fb6b701eb7..b17ffbe8f9f3 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -20,57 +20,29 @@
#define __BTRFS_TESTS
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+int btrfs_run_sanity_tests(void);
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
struct btrfs_root;
struct btrfs_trans_handle;
-int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
-int btrfs_init_test_fs(void);
-void btrfs_destroy_test_fs(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else
-static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
- u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_init_test_fs(void)
-{
- return 0;
-}
-static inline void btrfs_destroy_test_fs(void)
-{
-}
-static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
+static inline int btrfs_run_sanity_tests(void)
{
return 0;
}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 4f8cbd1ec5ee..199569174637 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -24,8 +24,9 @@
static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
{
- struct btrfs_path *path;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_path *path = NULL;
+ struct btrfs_root *root = NULL;
struct extent_buffer *eb;
struct btrfs_item *item;
char *value = "mary had a little lamb";
@@ -40,17 +41,24 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
test_msg("Running btrfs_split_item tests\n");
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Could not allocate fs_info\n");
+ return -ENOMEM;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
- return PTR_ERR(root);
+ ret = PTR_ERR(root);
+ goto out;
}
path = btrfs_alloc_path();
if (!path) {
test_msg("Could not allocate path\n");
- kfree(root);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
@@ -219,7 +227,8 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
}
out:
btrfs_free_path(path);
- kfree(root);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 3956bb2ff84c..3221c8dee272 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -837,6 +837,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_block_group_cache *cache;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
@@ -855,15 +856,17 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
return 0;
}
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- ret = PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ ret = -ENOMEM;
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info)
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
goto out;
+ }
root->fs_info->extent_root = root;
cache->fs_info = root->fs_info;
@@ -882,6 +885,7 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
out:
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
test_msg("Free space cache tests finished\n");
return ret;
}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index aac507085ab0..7508d3b42780 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -443,23 +443,24 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
static int run_test(test_func_t test_func, int bitmaps,
u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_root *root = NULL;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_trans_handle trans;
struct btrfs_path *path = NULL;
int ret;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate dummy root\n");
- ret = PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
+ ret = -ENOMEM;
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
- ret = -ENOMEM;
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate dummy root\n");
+ ret = PTR_ERR(root);
goto out;
}
@@ -534,6 +535,7 @@ out:
btrfs_free_path(path);
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 29648c0a39f1..9f72aeda9220 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -230,6 +230,7 @@ static unsigned long vacancy_only = 0;
static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
@@ -248,19 +249,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- /*
- * We do this since btrfs_get_extent wants to assign em->bdev to
- * root->fs_info->fs_devices->latest_bdev.
- */
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -835,11 +832,13 @@ out:
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
static int test_hole_first(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
@@ -855,15 +854,15 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -934,11 +933,13 @@ out:
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
static int test_extent_accounting(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
@@ -949,15 +950,15 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
return ret;
}
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -1132,6 +1133,7 @@ out:
NULL, GFP_KERNEL);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 57a12c0d680b..4407fef7c16c 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -453,22 +453,24 @@ static int test_multiple_refs(struct btrfs_root *root,
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct btrfs_root *root;
struct btrfs_root *tmp_root;
int ret = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
- return PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
+ return -ENOMEM;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
- ret = -ENOMEM;
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
+ ret = PTR_ERR(root);
goto out;
}
+
/* We are using this root as our extent root */
root->fs_info->extent_root = root;
@@ -495,7 +497,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
- tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@@ -510,7 +512,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
goto out;
}
- tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@@ -531,5 +533,6 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
ret = test_multiple_refs(root, sectorsize, nodesize);
out:
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 948aa186b353..9cca0a721961 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -561,6 +561,7 @@ again:
h->transaction = cur_trans;
h->root = root;
h->use_count = 1;
+ h->fs_info = root->fs_info;
h->type = type;
h->can_flush_pending_bgs = true;
@@ -1491,7 +1492,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto dir_item_existed;
} else if (IS_ERR(dir_item)) {
ret = PTR_ERR(dir_item);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
btrfs_release_path(path);
@@ -1504,7 +1505,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
*/
ret = btrfs_run_delayed_items(trans, root);
if (ret) { /* Transaction aborted */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1543,7 +1544,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1554,7 +1555,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(old);
free_extent_buffer(old);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
/* see comments in should_cow_block() */
@@ -1568,7 +1569,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1580,7 +1581,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1588,19 +1589,19 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_reloc_post_snapshot(trans, pending);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1622,7 +1623,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/* We have check then name at the beginning, so it is impossible. */
BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1632,13 +1633,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
current_fs_time(parent_inode->i_sb);
ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
BTRFS_UUID_KEY_SUBVOL, objectid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
@@ -1647,14 +1648,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
if (ret && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1709,7 +1710,7 @@ static void update_super_roots(struct btrfs_root *root)
super->root = root_item->bytenr;
super->generation = root_item->generation;
super->root_level = root_item->level;
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation;
if (root->fs_info->update_uuid_tree_gen)
super->uuid_tree_generation = root_item->generation;
@@ -1850,7 +1851,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
WARN_ON(trans->use_count > 1);
- btrfs_abort_transaction(trans, root, err);
+ btrfs_abort_transaction(trans, err);
spin_lock(&root->fs_info->trans_lock);
@@ -1895,14 +1896,14 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
return btrfs_start_delalloc_roots(fs_info, 1, -1);
return 0;
}
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index c5abee4f01ad..efb122643380 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -128,6 +128,7 @@ struct btrfs_trans_handle {
* Subvolume quota depends on this
*/
struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
struct seq_list delayed_ref_elem;
struct list_head qgroup_ref_list;
struct list_head new_bgs;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c05f69a8ec42..d31a0c4f56be 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2757,7 +2757,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
while (1) {
int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
- if (!btrfs_test_opt(root, SSD) &&
+ if (!btrfs_test_opt(root->fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
@@ -2788,7 +2788,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(root->fs_info, trans);
mutex_unlock(&root->log_mutex);
@@ -2838,7 +2838,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_log_full_commit(root->fs_info, trans);
if (ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
}
@@ -2898,7 +2898,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
blk_finish_plug(&plug);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
@@ -2934,7 +2934,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
@@ -2991,7 +2991,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
ret = walk_log_tree(trans, log, &wc);
/* I don't think this can happen but just in case */
if (ret)
- btrfs_abort_transaction(trans, log, ret);
+ btrfs_abort_transaction(trans, ret);
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -3160,7 +3160,7 @@ out_unlock:
btrfs_set_log_full_commit(root->fs_info, trans);
ret = 0;
} else if (ret < 0)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_log_trans(root);
@@ -3193,7 +3193,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
btrfs_set_log_full_commit(root->fs_info, trans);
ret = 0;
} else if (ret < 0 && ret != -ENOENT)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_log_trans(root);
return ret;
@@ -4703,6 +4703,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
ins_nr = 0;
ret = btrfs_search_forward(root, &min_key,
path, trans->transid);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ }
if (ret != 0)
break;
again:
@@ -5301,7 +5305,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
sb = inode->i_sb;
- if (btrfs_test_opt(root, NOTREELOG)) {
+ if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0fb4a959012e..51f125508771 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -140,7 +140,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
-static void btrfs_close_one_device(struct btrfs_device *device);
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
@@ -853,6 +852,46 @@ static void free_device(struct rcu_head *head)
schedule_work(&device->rcu_work);
}
+static void btrfs_close_one_device(struct btrfs_device *device)
+{
+ struct btrfs_fs_devices *fs_devices = device->fs_devices;
+ struct btrfs_device *new_device;
+ struct rcu_string *name;
+
+ if (device->bdev)
+ fs_devices->open_devices--;
+
+ if (device->writeable &&
+ device->devid != BTRFS_DEV_REPLACE_DEVID) {
+ list_del_init(&device->dev_alloc_list);
+ fs_devices->rw_devices--;
+ }
+
+ if (device->missing)
+ fs_devices->missing_devices--;
+
+ if (device->bdev && device->writeable) {
+ sync_blockdev(device->bdev);
+ invalidate_bdev(device->bdev);
+ }
+
+ new_device = btrfs_alloc_device(NULL, &device->devid,
+ device->uuid);
+ BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
+
+ /* Safe because we are under uuid_mutex */
+ if (device->name) {
+ name = rcu_string_strdup(device->name->str, GFP_NOFS);
+ BUG_ON(!name); /* -ENOMEM */
+ rcu_assign_pointer(new_device->name, name);
+ }
+
+ list_replace_rcu(&device->dev_list, &new_device->dev_list);
+ new_device->fs_devices = device->fs_devices;
+
+ call_rcu(&device->rcu, free_device);
+}
+
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
@@ -2399,14 +2438,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = init_first_rw_device(trans, root, device);
unlock_chunks(root);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
}
ret = btrfs_add_device(trans, root, device);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
@@ -2415,7 +2454,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = btrfs_finish_sprout(trans, root);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
@@ -2801,7 +2840,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
&dev_extent_len);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2820,7 +2859,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
ret = btrfs_update_device(trans, map->stripes[i].dev);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -2829,7 +2868,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2838,14 +2877,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2902,7 +2941,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
* chunk tree entries
*/
ret = btrfs_remove_chunk(trans, root, chunk_offset);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans, extent_root);
return ret;
}
@@ -3421,7 +3460,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
u64 size_to_free;
u64 chunk_type;
struct btrfs_chunk *chunk;
- struct btrfs_path *path;
+ struct btrfs_path *path = NULL;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_trans_handle *trans;
@@ -3455,13 +3494,33 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
ret = btrfs_shrink_device(device, old_size - size_to_free);
if (ret == -ENOSPC)
break;
- BUG_ON(ret);
+ if (ret) {
+ /* btrfs_shrink_device never returns ret > 0 */
+ WARN_ON(ret > 0);
+ goto error;
+ }
trans = btrfs_start_transaction(dev_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_info_in_rcu(fs_info,
+ "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
+ rcu_str_deref(device->name), ret,
+ old_size, old_size - size_to_free);
+ goto error;
+ }
ret = btrfs_grow_device(trans, device, old_size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_end_transaction(trans, dev_root);
+ /* btrfs_grow_device never returns ret > 0 */
+ WARN_ON(ret > 0);
+ btrfs_info_in_rcu(fs_info,
+ "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
+ rcu_str_deref(device->name), ret,
+ old_size, old_size - size_to_free);
+ goto error;
+ }
btrfs_end_transaction(trans, dev_root);
}
@@ -3885,7 +3944,7 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->balance_lock);
- if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+ if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
btrfs_info(fs_info, "force skipping balance");
return 0;
}
@@ -4240,7 +4299,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
BTRFS_UUID_TREE_OBJECTID);
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -4514,8 +4573,7 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
-#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
- - sizeof(struct btrfs_item) \
+#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
@@ -5954,7 +6012,7 @@ static void btrfs_end_bio(struct bio *bio)
else
btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_READ_ERRS);
- if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+ if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
btrfs_dev_stat_print_on_error(dev);
@@ -6031,7 +6089,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
bio->bi_next = NULL;
spin_lock(&device->io_lock);
- if (bio->bi_rw & REQ_SYNC)
+ if (bio->bi_opf & REQ_SYNC)
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
@@ -6069,7 +6127,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
rcu_read_lock();
name = rcu_dereference(dev->name);
pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
- "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
+ "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
(u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
name->str, dev->devid, bio->bi_iter.bi_size);
rcu_read_unlock();
@@ -6401,7 +6459,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
uuid, NULL);
- if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
+ if (!map->stripes[i].dev &&
+ !btrfs_test_opt(root->fs_info, DEGRADED)) {
free_extent_map(em);
return -EIO;
}
@@ -6469,7 +6528,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
fs_devices = find_fsid(fsid);
if (!fs_devices) {
- if (!btrfs_test_opt(root, DEGRADED))
+ if (!btrfs_test_opt(root->fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
fs_devices = alloc_fs_devices(fsid);
@@ -6531,7 +6590,7 @@ static int read_one_dev(struct btrfs_root *root,
device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
if (!device) {
- if (!btrfs_test_opt(root, DEGRADED))
+ if (!btrfs_test_opt(root->fs_info, DEGRADED))
return -EIO;
device = add_missing_dev(root, fs_devices, devid, dev_uuid);
@@ -6540,7 +6599,7 @@ static int read_one_dev(struct btrfs_root *root,
btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
devid, dev_uuid);
} else {
- if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
+ if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
return -EIO;
if(!device->bdev && !device->missing) {
@@ -7143,38 +7202,3 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
fs_devices = fs_devices->seed;
}
}
-
-static void btrfs_close_one_device(struct btrfs_device *device)
-{
- struct btrfs_fs_devices *fs_devices = device->fs_devices;
- struct btrfs_device *new_device;
- struct rcu_string *name;
-
- if (device->bdev)
- fs_devices->open_devices--;
-
- if (device->writeable &&
- device->devid != BTRFS_DEV_REPLACE_DEVID) {
- list_del_init(&device->dev_alloc_list);
- fs_devices->rw_devices--;
- }
-
- if (device->missing)
- fs_devices->missing_devices--;
-
- new_device = btrfs_alloc_device(NULL, &device->devid,
- device->uuid);
- BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
-
- /* Safe because we are under uuid_mutex */
- if (device->name) {
- name = rcu_string_strdup(device->name->str, GFP_NOFS);
- BUG_ON(!name); /* -ENOMEM */
- rcu_assign_pointer(new_device->name, name);
- }
-
- list_replace_rcu(&device->dev_list, &new_device->dev_list);
- new_device->fs_devices = device->fs_devices;
-
- call_rcu(&device->rcu, free_device);
-}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ae75006e73b..3f7c2cd41f8f 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -263,6 +263,8 @@ requeue:
void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
+ blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
+
write_lock(&cache->active_lock);
rb_erase(&object->active_node, &cache->active_nodes);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
@@ -273,8 +275,7 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
/* This object can now be culled, so we need to let the daemon know
* that there is something it can remove if it needs to.
*/
- atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
- &cache->b_released);
+ atomic_long_add(i_blocks, &cache->b_released);
if (atomic_inc_return(&cache->f_released))
cachefiles_state_changed(cache);
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 26a9d10d75e9..d5b6f959a3c3 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1730,7 +1730,8 @@ enum {
POOL_WRITE = 2,
};
-static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
+static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
+ s64 pool, struct ceph_string *pool_ns)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1738,6 +1739,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
struct rb_node **p, *parent;
struct ceph_pool_perm *perm;
struct page **pages;
+ size_t pool_ns_len;
int err = 0, err2 = 0, have = 0;
down_read(&mdsc->pool_perm_rwsem);
@@ -1749,17 +1751,31 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
else if (pool > perm->pool)
p = &(*p)->rb_right;
else {
- have = perm->perm;
- break;
+ int ret = ceph_compare_string(pool_ns,
+ perm->pool_ns,
+ perm->pool_ns_len);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else {
+ have = perm->perm;
+ break;
+ }
}
}
up_read(&mdsc->pool_perm_rwsem);
if (*p)
goto out;
- dout("__ceph_pool_perm_get pool %u no perm cached\n", pool);
+ if (pool_ns)
+ dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
+ pool, (int)pool_ns->len, pool_ns->str);
+ else
+ dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
down_write(&mdsc->pool_perm_rwsem);
+ p = &mdsc->pool_perm_tree.rb_node;
parent = NULL;
while (*p) {
parent = *p;
@@ -1769,8 +1785,17 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
else if (pool > perm->pool)
p = &(*p)->rb_right;
else {
- have = perm->perm;
- break;
+ int ret = ceph_compare_string(pool_ns,
+ perm->pool_ns,
+ perm->pool_ns_len);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else {
+ have = perm->perm;
+ break;
+ }
}
}
if (*p) {
@@ -1788,6 +1813,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
rd_req->r_flags = CEPH_OSD_FLAG_READ;
osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
rd_req->r_base_oloc.pool = pool;
+ if (pool_ns)
+ rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
@@ -1841,7 +1868,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
goto out_unlock;
}
- perm = kmalloc(sizeof(*perm), GFP_NOFS);
+ pool_ns_len = pool_ns ? pool_ns->len : 0;
+ perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
if (!perm) {
err = -ENOMEM;
goto out_unlock;
@@ -1849,6 +1877,11 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
perm->pool = pool;
perm->perm = have;
+ perm->pool_ns_len = pool_ns_len;
+ if (pool_ns_len > 0)
+ memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
+ perm->pool_ns[pool_ns_len] = 0;
+
rb_link_node(&perm->node, parent, p);
rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
err = 0;
@@ -1860,43 +1893,46 @@ out_unlock:
out:
if (!err)
err = have;
- dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err);
+ if (pool_ns)
+ dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
+ pool, (int)pool_ns->len, pool_ns->str, err);
+ else
+ dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
return err;
}
int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
{
- u32 pool;
+ s64 pool;
+ struct ceph_string *pool_ns;
int ret, flags;
- /* does not support pool namespace yet */
- if (ci->i_pool_ns_len)
- return -EIO;
-
if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
NOPOOLPERM))
return 0;
spin_lock(&ci->i_ceph_lock);
flags = ci->i_ceph_flags;
- pool = ceph_file_layout_pg_pool(ci->i_layout);
+ pool = ci->i_layout.pool_id;
spin_unlock(&ci->i_ceph_lock);
check:
if (flags & CEPH_I_POOL_PERM) {
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
- dout("ceph_pool_perm_check pool %u no read perm\n",
+ dout("ceph_pool_perm_check pool %lld no read perm\n",
pool);
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
- dout("ceph_pool_perm_check pool %u no write perm\n",
+ dout("ceph_pool_perm_check pool %lld no write perm\n",
pool);
return -EPERM;
}
return 0;
}
- ret = __ceph_pool_perm_get(ci, pool);
+ pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
+ ret = __ceph_pool_perm_get(ci, pool, pool_ns);
+ ceph_put_string(pool_ns);
if (ret < 0)
return ret;
@@ -1907,10 +1943,11 @@ check:
flags |= CEPH_I_POOL_WR;
spin_lock(&ci->i_ceph_lock);
- if (pool == ceph_file_layout_pg_pool(ci->i_layout)) {
- ci->i_ceph_flags = flags;
+ if (pool == ci->i_layout.pool_id &&
+ pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
+ ci->i_ceph_flags |= flags;
} else {
- pool = ceph_file_layout_pg_pool(ci->i_layout);
+ pool = ci->i_layout.pool_id;
flags = ci->i_ceph_flags;
}
spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 238c55b01723..5bc5d37b1217 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -71,7 +71,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
&ceph_fscache_fsid_object_def,
fsc, true);
if (!fsc->fscache)
- pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
+ pr_err("Unable to register fsid: %p fscache cookie\n", fsc);
return 0;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6f60d0a3d0f9..99115cae1652 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -40,6 +40,11 @@
* cluster to release server state.
*/
+static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
+static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_inode_info *ci,
+ u64 oldest_flush_tid);
/*
* Generate readable cap strings for debugging output.
@@ -849,12 +854,14 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
*/
int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
{
- int want = 0;
- int mode;
- for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
- if (ci->i_nr_by_mode[mode])
- want |= ceph_caps_for_mode(mode);
- return want;
+ int i, bits = 0;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (ci->i_nr_by_mode[i])
+ bits |= 1 << i;
+ }
+ if (bits == 0)
+ return 0;
+ return ceph_caps_for_mode(bits >> 1);
}
/*
@@ -991,7 +998,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
u32 seq, u64 flush_tid, u64 oldest_flush_tid,
u32 issue_seq, u32 mseq, u64 size, u64 max_size,
struct timespec *mtime, struct timespec *atime,
- struct timespec *ctime, u64 time_warp_seq,
+ struct timespec *ctime, u32 time_warp_seq,
kuid_t uid, kgid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
@@ -1116,8 +1123,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
struct inode *inode = &ci->vfs_inode;
u64 cap_id = cap->cap_id;
int held, revoking, dropping, keep;
- u64 seq, issue_seq, mseq, time_warp_seq, follows;
- u64 size, max_size;
+ u64 follows, size, max_size;
+ u32 seq, issue_seq, mseq, time_warp_seq;
struct timespec mtime, atime, ctime;
int wake = 0;
umode_t mode;
@@ -1215,6 +1222,22 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
return delayed;
}
+static inline int __send_flush_snap(struct inode *inode,
+ struct ceph_mds_session *session,
+ struct ceph_cap_snap *capsnap,
+ u32 mseq, u64 oldest_flush_tid)
+{
+ return send_cap_msg(session, ceph_vino(inode).ino, 0,
+ CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
+ capsnap->dirty, 0, capsnap->cap_flush.tid,
+ oldest_flush_tid, 0, mseq, capsnap->size, 0,
+ &capsnap->mtime, &capsnap->atime,
+ &capsnap->ctime, capsnap->time_warp_seq,
+ capsnap->uid, capsnap->gid, capsnap->mode,
+ capsnap->xattr_version, capsnap->xattr_blob,
+ capsnap->follows, capsnap->inline_data);
+}
+
/*
* When a snapshot is taken, clients accumulate dirty metadata on
* inodes with capabilities in ceph_cap_snaps to describe the file
@@ -1222,37 +1245,22 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
* asynchronously back to the MDS once sync writes complete and dirty
* data is written out.
*
- * Unless @kick is true, skip cap_snaps that were already sent to
- * the MDS (i.e., during this session).
- *
* Called under i_ceph_lock. Takes s_mutex as needed.
*/
-void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession,
- int kick)
+static void __ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
- int mds;
+ struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_cap_snap *capsnap;
- u32 mseq;
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
- session->s_mutex */
- u64 next_follows = 0; /* keep track of how far we've gotten through the
- i_cap_snaps list, and skip these entries next time
- around to avoid an infinite loop */
+ u64 oldest_flush_tid = 0;
+ u64 first_tid = 1, last_tid = 0;
- if (psession)
- session = *psession;
+ dout("__flush_snaps %p session %p\n", inode, session);
- dout("__flush_snaps %p\n", inode);
-retry:
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- /* avoid an infiniute loop after retry */
- if (capsnap->follows < next_follows)
- continue;
/*
* we need to wait for sync writes to complete and for dirty
* pages to be written out.
@@ -1263,97 +1271,129 @@ retry:
/* should be removed by ceph_try_drop_cap_snap() */
BUG_ON(!capsnap->need_flush);
- /* pick mds, take s_mutex */
- if (ci->i_auth_cap == NULL) {
- dout("no auth cap (migrating?), doing nothing\n");
- goto out;
- }
-
/* only flush each capsnap once */
- if (!kick && !list_empty(&capsnap->flushing_item)) {
- dout("already flushed %p, skipping\n", capsnap);
+ if (capsnap->cap_flush.tid > 0) {
+ dout(" already flushed %p, skipping\n", capsnap);
continue;
}
- mds = ci->i_auth_cap->session->s_mds;
- mseq = ci->i_auth_cap->mseq;
+ spin_lock(&mdsc->cap_dirty_lock);
+ capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
+ list_add_tail(&capsnap->cap_flush.g_list,
+ &mdsc->cap_flush_list);
+ if (oldest_flush_tid == 0)
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ if (list_empty(&ci->i_flushing_item)) {
+ list_add_tail(&ci->i_flushing_item,
+ &session->s_cap_flushing);
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ list_add_tail(&capsnap->cap_flush.i_list,
+ &ci->i_cap_flush_list);
- if (session && session->s_mds != mds) {
- dout("oops, wrong session %p mutex\n", session);
- if (kick)
- goto out;
+ if (first_tid == 1)
+ first_tid = capsnap->cap_flush.tid;
+ last_tid = capsnap->cap_flush.tid;
+ }
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- session = NULL;
+ ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
+
+ while (first_tid <= last_tid) {
+ struct ceph_cap *cap = ci->i_auth_cap;
+ struct ceph_cap_flush *cf;
+ int ret;
+
+ if (!(cap && cap->session == session)) {
+ dout("__flush_snaps %p auth cap %p not mds%d, "
+ "stop\n", inode, cap, session->s_mds);
+ break;
}
- if (!session) {
- spin_unlock(&ci->i_ceph_lock);
- mutex_lock(&mdsc->mutex);
- session = __ceph_lookup_mds_session(mdsc, mds);
- mutex_unlock(&mdsc->mutex);
- if (session) {
- dout("inverting session/ino locks on %p\n",
- session);
- mutex_lock(&session->s_mutex);
+
+ ret = -ENOENT;
+ list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
+ if (cf->tid >= first_tid) {
+ ret = 0;
+ break;
}
- /*
- * if session == NULL, we raced against a cap
- * deletion or migration. retry, and we'll
- * get a better @mds value next time.
- */
- spin_lock(&ci->i_ceph_lock);
- goto retry;
}
+ if (ret < 0)
+ break;
- spin_lock(&mdsc->cap_dirty_lock);
- capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
- spin_unlock(&mdsc->cap_dirty_lock);
+ first_tid = cf->tid + 1;
+ capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
atomic_inc(&capsnap->nref);
- if (list_empty(&capsnap->flushing_item))
- list_add_tail(&capsnap->flushing_item,
- &session->s_cap_snaps_flushing);
spin_unlock(&ci->i_ceph_lock);
- dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
- inode, capsnap, capsnap->follows, capsnap->flush_tid);
- send_cap_msg(session, ceph_vino(inode).ino, 0,
- CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
- capsnap->dirty, 0, capsnap->flush_tid, 0,
- 0, mseq, capsnap->size, 0,
- &capsnap->mtime, &capsnap->atime,
- &capsnap->ctime, capsnap->time_warp_seq,
- capsnap->uid, capsnap->gid, capsnap->mode,
- capsnap->xattr_version, capsnap->xattr_blob,
- capsnap->follows, capsnap->inline_data);
-
- next_follows = capsnap->follows + 1;
- ceph_put_cap_snap(capsnap);
+ dout("__flush_snaps %p capsnap %p tid %llu %s\n",
+ inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
+
+ ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
+ oldest_flush_tid);
+ if (ret < 0) {
+ pr_err("__flush_snaps: error sending cap flushsnap, "
+ "ino (%llx.%llx) tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid, capsnap->follows);
+ }
+ ceph_put_cap_snap(capsnap);
spin_lock(&ci->i_ceph_lock);
- goto retry;
}
+}
- /* we flushed them all; remove this inode from the queue */
- spin_lock(&mdsc->snap_flush_lock);
- list_del_init(&ci->i_snap_flush_item);
- spin_unlock(&mdsc->snap_flush_lock);
+void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession)
+{
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_session *session = *psession;
+ int mds;
+ dout("ceph_flush_snaps %p\n", inode);
+retry:
+ spin_lock(&ci->i_ceph_lock);
+ if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
+ dout(" no capsnap needs flush, doing nothing\n");
+ goto out;
+ }
+ if (!ci->i_auth_cap) {
+ dout(" no auth cap (migrating?), doing nothing\n");
+ goto out;
+ }
-out:
- if (psession)
- *psession = session;
- else if (session) {
+ mds = ci->i_auth_cap->session->s_mds;
+ if (session && session->s_mds != mds) {
+ dout(" oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
+ session = NULL;
+ }
+ if (!session) {
+ spin_unlock(&ci->i_ceph_lock);
+ mutex_lock(&mdsc->mutex);
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex);
+ if (session) {
+ dout(" inverting session/ino locks on %p\n", session);
+ mutex_lock(&session->s_mutex);
+ }
+ goto retry;
}
-}
-static void ceph_flush_snaps(struct ceph_inode_info *ci)
-{
- spin_lock(&ci->i_ceph_lock);
- __ceph_flush_snaps(ci, NULL, 0);
+ __ceph_flush_snaps(ci, session);
+out:
spin_unlock(&ci->i_ceph_lock);
+
+ if (psession) {
+ *psession = session;
+ } else {
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ }
+ /* we flushed them all; remove this inode from the queue */
+ spin_lock(&mdsc->snap_flush_lock);
+ list_del_init(&ci->i_snap_flush_item);
+ spin_unlock(&mdsc->snap_flush_lock);
}
/*
@@ -1411,52 +1451,6 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
return dirty;
}
-static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
- struct ceph_cap_flush *cf)
-{
- struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_cap_flush *other = NULL;
-
- while (*p) {
- parent = *p;
- other = rb_entry(parent, struct ceph_cap_flush, i_node);
-
- if (cf->tid < other->tid)
- p = &(*p)->rb_left;
- else if (cf->tid > other->tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&cf->i_node, parent, p);
- rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
-}
-
-static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
- struct ceph_cap_flush *cf)
-{
- struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_cap_flush *other = NULL;
-
- while (*p) {
- parent = *p;
- other = rb_entry(parent, struct ceph_cap_flush, g_node);
-
- if (cf->tid < other->tid)
- p = &(*p)->rb_left;
- else if (cf->tid > other->tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&cf->g_node, parent, p);
- rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
-}
-
struct ceph_cap_flush *ceph_alloc_cap_flush(void)
{
return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
@@ -1470,23 +1464,54 @@ void ceph_free_cap_flush(struct ceph_cap_flush *cf)
static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
{
- struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
- if (n) {
+ if (!list_empty(&mdsc->cap_flush_list)) {
struct ceph_cap_flush *cf =
- rb_entry(n, struct ceph_cap_flush, g_node);
+ list_first_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
return cf->tid;
}
return 0;
}
/*
+ * Remove cap_flush from the mdsc's or inode's flushing cap list.
+ * Return true if caller needs to wake up flush waiters.
+ */
+static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci,
+ struct ceph_cap_flush *cf)
+{
+ struct ceph_cap_flush *prev;
+ bool wake = cf->wake;
+ if (mdsc) {
+ /* are there older pending cap flushes? */
+ if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
+ prev = list_prev_entry(cf, g_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->g_list);
+ } else if (ci) {
+ if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
+ prev = list_prev_entry(cf, i_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->i_list);
+ } else {
+ BUG_ON(1);
+ }
+ return wake;
+}
+
+/*
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
* Called under i_ceph_lock.
*/
static int __mark_caps_flushing(struct inode *inode,
- struct ceph_mds_session *session,
+ struct ceph_mds_session *session, bool wake,
u64 *flush_tid, u64 *oldest_flush_tid)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1509,26 +1534,22 @@ static int __mark_caps_flushing(struct inode *inode,
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
+ cf->wake = wake;
spin_lock(&mdsc->cap_dirty_lock);
list_del_init(&ci->i_dirty_item);
cf->tid = ++mdsc->last_cap_flush_tid;
- __add_cap_flushing_to_mdsc(mdsc, cf);
+ list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
*oldest_flush_tid = __get_oldest_flush_tid(mdsc);
if (list_empty(&ci->i_flushing_item)) {
list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
mdsc->num_cap_flushing++;
- dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
- } else {
- list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
- dout(" inode %p now flushing (more) tid %llu\n",
- inode, cf->tid);
}
spin_unlock(&mdsc->cap_dirty_lock);
- __add_cap_flushing_to_inode(ci, cf);
+ list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
*flush_tid = cf->tid;
return flushing;
@@ -1583,10 +1604,11 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
int mds = -1; /* keep track of how far we've gone through i_caps list
to avoid an infinite loop on retry */
struct rb_node *p;
- int tried_invalidate = 0;
- int delayed = 0, sent = 0, force_requeue = 0, num;
- int queue_invalidate = 0;
- int is_delayed = flags & CHECK_CAPS_NODELAY;
+ int delayed = 0, sent = 0, num;
+ bool is_delayed = flags & CHECK_CAPS_NODELAY;
+ bool queue_invalidate = false;
+ bool force_requeue = false;
+ bool tried_invalidate = false;
/* if we are unmounting, flush any unused caps immediately. */
if (mdsc->stopping)
@@ -1597,9 +1619,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
- /* flush snaps first time around only */
- if (!list_empty(&ci->i_cap_snaps))
- __ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
spin_lock(&ci->i_ceph_lock);
@@ -1666,17 +1685,17 @@ retry_locked:
if (revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) {
dout("check_caps queuing invalidate\n");
- queue_invalidate = 1;
+ queue_invalidate = true;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
} else {
dout("check_caps failed to invalidate pages\n");
/* we failed to invalidate pages. check these
caps again later. */
- force_requeue = 1;
+ force_requeue = true;
__cap_set_timeouts(mdsc, ci);
}
}
- tried_invalidate = 1;
+ tried_invalidate = true;
goto retry_locked;
}
@@ -1720,10 +1739,15 @@ retry_locked:
}
}
/* flush anything dirty? */
- if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
- ci->i_dirty_caps) {
- dout("flushing dirty caps\n");
- goto ack;
+ if (cap == ci->i_auth_cap) {
+ if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
+ dout("flushing dirty caps\n");
+ goto ack;
+ }
+ if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
+ dout("flushing snap caps\n");
+ goto ack;
+ }
}
/* completed revocation? going down and there are no caps? */
@@ -1782,6 +1806,26 @@ ack:
goto retry;
}
}
+
+ /* kick flushing and flush snaps before sending normal
+ * cap message */
+ if (cap == ci->i_auth_cap &&
+ (ci->i_ceph_flags &
+ (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
+ if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ }
+ if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
+ __ceph_flush_snaps(ci, session);
+
+ goto retry_locked;
+ }
+
/* take snap_rwsem after session mutex */
if (!took_snap_rwsem) {
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
@@ -1796,7 +1840,7 @@ ack:
}
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
- flushing = __mark_caps_flushing(inode, session,
+ flushing = __mark_caps_flushing(inode, session, false,
&flush_tid,
&oldest_flush_tid);
} else {
@@ -1822,7 +1866,7 @@ ack:
* otherwise cancel.
*/
if (delayed && is_delayed)
- force_requeue = 1; /* __send_cap delayed release; requeue */
+ force_requeue = true; /* __send_cap delayed release; requeue */
if (!delayed && !is_delayed)
__cap_delay_cancel(mdsc, ci);
else if (!is_delayed || force_requeue)
@@ -1873,8 +1917,8 @@ retry:
if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
goto out;
- flushing = __mark_caps_flushing(inode, session, &flush_tid,
- &oldest_flush_tid);
+ flushing = __mark_caps_flushing(inode, session, true,
+ &flush_tid, &oldest_flush_tid);
/* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
@@ -1887,10 +1931,11 @@ retry:
spin_unlock(&ci->i_ceph_lock);
}
} else {
- struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
- if (n) {
+ if (!list_empty(&ci->i_cap_flush_list)) {
struct ceph_cap_flush *cf =
- rb_entry(n, struct ceph_cap_flush, i_node);
+ list_last_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ cf->wake = true;
flush_tid = cf->tid;
}
flushing = ci->i_flushing_caps;
@@ -1910,14 +1955,13 @@ out:
static int caps_are_flushed(struct inode *inode, u64 flush_tid)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap_flush *cf;
- struct rb_node *n;
int ret = 1;
spin_lock(&ci->i_ceph_lock);
- n = rb_first(&ci->i_cap_flush_tree);
- if (n) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
+ if (!list_empty(&ci->i_cap_flush_list)) {
+ struct ceph_cap_flush * cf =
+ list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
if (cf->tid <= flush_tid)
ret = 0;
}
@@ -1926,53 +1970,6 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
}
/*
- * Wait on any unsafe replies for the given inode. First wait on the
- * newest request, and make that the upper bound. Then, if there are
- * more requests, keep waiting on the oldest as long as it is still older
- * than the original request.
- */
-static void sync_write_wait(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct list_head *head = &ci->i_unsafe_writes;
- struct ceph_osd_request *req;
- u64 last_tid;
-
- if (!S_ISREG(inode->i_mode))
- return;
-
- spin_lock(&ci->i_unsafe_lock);
- if (list_empty(head))
- goto out;
-
- /* set upper bound as _last_ entry in chain */
- req = list_last_entry(head, struct ceph_osd_request,
- r_unsafe_item);
- last_tid = req->r_tid;
-
- do {
- ceph_osdc_get_request(req);
- spin_unlock(&ci->i_unsafe_lock);
- dout("sync_write_wait on tid %llu (until %llu)\n",
- req->r_tid, last_tid);
- wait_for_completion(&req->r_safe_completion);
- spin_lock(&ci->i_unsafe_lock);
- ceph_osdc_put_request(req);
-
- /*
- * from here on look at first entry in chain, since we
- * only want to wait for anything older than last_tid
- */
- if (list_empty(head))
- break;
- req = list_first_entry(head, struct ceph_osd_request,
- r_unsafe_item);
- } while (req->r_tid < last_tid);
-out:
- spin_unlock(&ci->i_unsafe_lock);
-}
-
-/*
* wait for any unsafe requests to complete.
*/
static int unsafe_request_wait(struct inode *inode)
@@ -2024,7 +2021,8 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int dirty;
dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
- sync_write_wait(inode);
+
+ ceph_sync_write_wait(inode);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret < 0)
@@ -2087,87 +2085,74 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-/*
- * After a recovering MDS goes active, we need to resend any caps
- * we were flushing.
- *
- * Caller holds session->s_mutex.
- */
-static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
-{
- struct ceph_cap_snap *capsnap;
-
- dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
- list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
- flushing_item) {
- struct ceph_inode_info *ci = capsnap->ci;
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap *cap;
-
- spin_lock(&ci->i_ceph_lock);
- cap = ci->i_auth_cap;
- if (cap && cap->session == session) {
- dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
- cap, capsnap);
- __ceph_flush_snaps(ci, &session, 1);
- } else {
- pr_err("%p auth cap %p not mds%d ???\n", inode,
- cap, session->s_mds);
- }
- spin_unlock(&ci->i_ceph_lock);
- }
-}
-
-static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session,
- struct ceph_inode_info *ci)
+static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_inode_info *ci,
+ u64 oldest_flush_tid)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
struct ceph_cap_flush *cf;
- struct rb_node *n;
- int delayed = 0;
+ int ret;
u64 first_tid = 0;
- u64 oldest_flush_tid;
- spin_lock(&mdsc->cap_dirty_lock);
- oldest_flush_tid = __get_oldest_flush_tid(mdsc);
- spin_unlock(&mdsc->cap_dirty_lock);
+ list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
+ if (cf->tid < first_tid)
+ continue;
- while (true) {
- spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n", inode,
- cap, session->s_mds);
- spin_unlock(&ci->i_ceph_lock);
+ pr_err("%p auth cap %p not mds%d ???\n",
+ inode, cap, session->s_mds);
break;
}
- for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- if (cf->tid >= first_tid)
- break;
- }
- if (!n) {
+ first_tid = cf->tid + 1;
+
+ if (cf->caps) {
+ dout("kick_flushing_caps %p cap %p tid %llu %s\n",
+ inode, cap, cf->tid, ceph_cap_string(cf->caps));
+ ci->i_ceph_flags |= CEPH_I_NODELAY;
+ ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+ __ceph_caps_used(ci),
+ __ceph_caps_wanted(ci),
+ cap->issued | cap->implemented,
+ cf->caps, cf->tid, oldest_flush_tid);
+ if (ret) {
+ pr_err("kick_flushing_caps: error sending "
+ "cap flush, ino (%llx.%llx) "
+ "tid %llu flushing %s\n",
+ ceph_vinop(inode), cf->tid,
+ ceph_cap_string(cf->caps));
+ }
+ } else {
+ struct ceph_cap_snap *capsnap =
+ container_of(cf, struct ceph_cap_snap,
+ cap_flush);
+ dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
+ inode, capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
+
+ atomic_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
- break;
- }
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
+ ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
+ oldest_flush_tid);
+ if (ret < 0) {
+ pr_err("kick_flushing_caps: error sending "
+ "cap flushsnap, ino (%llx.%llx) "
+ "tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid,
+ capsnap->follows);
+ }
- first_tid = cf->tid + 1;
+ ceph_put_cap_snap(capsnap);
+ }
- dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
- cap, cf->tid, ceph_cap_string(cf->caps));
- delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
- __ceph_caps_used(ci),
- __ceph_caps_wanted(ci),
- cap->issued | cap->implemented,
- cf->caps, cf->tid, oldest_flush_tid);
+ spin_lock(&ci->i_ceph_lock);
}
- return delayed;
}
void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
@@ -2175,8 +2160,14 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
{
struct ceph_inode_info *ci;
struct ceph_cap *cap;
+ u64 oldest_flush_tid;
dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
@@ -2196,10 +2187,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
*/
if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) {
- spin_unlock(&ci->i_ceph_lock);
- if (!__kick_flushing_caps(mdsc, session, ci))
- continue;
- spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
+ } else {
+ ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
}
spin_unlock(&ci->i_ceph_lock);
@@ -2210,50 +2202,56 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_inode_info *ci;
-
- kick_flushing_capsnaps(mdsc, session);
+ struct ceph_cap *cap;
+ u64 oldest_flush_tid;
dout("kick_flushing_caps mds%d\n", session->s_mds);
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
- int delayed = __kick_flushing_caps(mdsc, session, ci);
- if (delayed) {
- spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
+ spin_lock(&ci->i_ceph_lock);
+ cap = ci->i_auth_cap;
+ if (!(cap && cap->session == session)) {
+ pr_err("%p auth cap %p not mds%d ???\n",
+ &ci->vfs_inode, cap, session->s_mds);
spin_unlock(&ci->i_ceph_lock);
+ continue;
+ }
+ if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
}
+ spin_unlock(&ci->i_ceph_lock);
}
}
static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct inode *inode)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
- spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
dout("kick_flushing_inode_caps %p flushing %s\n", inode,
ceph_cap_string(ci->i_flushing_caps));
- __ceph_flush_snaps(ci, &session, 1);
-
- if (ci->i_flushing_caps) {
- int delayed;
-
+ if (!list_empty(&ci->i_cap_flush_list)) {
+ u64 oldest_flush_tid;
spin_lock(&mdsc->cap_dirty_lock);
list_move_tail(&ci->i_flushing_item,
&cap->session->s_cap_flushing);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
spin_unlock(&ci->i_ceph_lock);
-
- delayed = __kick_flushing_caps(mdsc, session, ci);
- if (delayed) {
- spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
- spin_unlock(&ci->i_ceph_lock);
- }
} else {
spin_unlock(&ci->i_ceph_lock);
}
@@ -2580,16 +2578,19 @@ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
* drop cap_snap that is not associated with any snapshot.
* we don't need to send FLUSHSNAP message for it.
*/
-static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
+static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_cap_snap *capsnap)
{
if (!capsnap->need_flush &&
!capsnap->writing && !capsnap->dirty_pages) {
-
dout("dropping cap_snap %p follows %llu\n",
capsnap, capsnap->follows);
+ BUG_ON(capsnap->cap_flush.tid > 0);
ceph_put_snap_context(capsnap->context);
+ if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
+
list_del(&capsnap->ci_item);
- list_del(&capsnap->flushing_item);
ceph_put_cap_snap(capsnap);
return 1;
}
@@ -2636,7 +2637,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
struct ceph_cap_snap,
ci_item);
capsnap->writing = 0;
- if (ceph_try_drop_cap_snap(capsnap))
+ if (ceph_try_drop_cap_snap(ci, capsnap))
put++;
else if (__ceph_finish_cap_snap(ci, capsnap))
flushsnaps = 1;
@@ -2661,7 +2662,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
if (last && !flushsnaps)
ceph_check_caps(ci, 0, NULL);
else if (flushsnaps)
- ceph_flush_snaps(ci);
+ ceph_flush_snaps(ci, NULL);
if (wake)
wake_up_all(&ci->i_cap_wq);
while (put-- > 0)
@@ -2679,15 +2680,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc)
{
struct inode *inode = &ci->vfs_inode;
- int last = 0;
- int complete_capsnap = 0;
- int drop_capsnap = 0;
- int found = 0;
struct ceph_cap_snap *capsnap = NULL;
+ int put = 0;
+ bool last = false;
+ bool found = false;
+ bool flush_snaps = false;
+ bool complete_capsnap = false;
spin_lock(&ci->i_ceph_lock);
ci->i_wrbuffer_ref -= nr;
- last = !ci->i_wrbuffer_ref;
+ if (ci->i_wrbuffer_ref == 0) {
+ last = true;
+ put++;
+ }
if (ci->i_head_snapc == snapc) {
ci->i_wrbuffer_ref_head -= nr;
@@ -2707,15 +2712,22 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
} else {
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->context == snapc) {
- found = 1;
+ found = true;
break;
}
}
BUG_ON(!found);
capsnap->dirty_pages -= nr;
if (capsnap->dirty_pages == 0) {
- complete_capsnap = 1;
- drop_capsnap = ceph_try_drop_cap_snap(capsnap);
+ complete_capsnap = true;
+ if (!capsnap->writing) {
+ if (ceph_try_drop_cap_snap(ci, capsnap)) {
+ put++;
+ } else {
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
+ flush_snaps = true;
+ }
+ }
}
dout("put_wrbuffer_cap_refs on %p cap_snap %p "
" snap %lld %d/%d -> %d/%d %s%s\n",
@@ -2730,12 +2742,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
- iput(inode);
- } else if (complete_capsnap) {
- ceph_flush_snaps(ci);
- wake_up_all(&ci->i_cap_wq);
+ } else if (flush_snaps) {
+ ceph_flush_snaps(ci, NULL);
}
- if (drop_capsnap)
+ if (complete_capsnap)
+ wake_up_all(&ci->i_cap_wq);
+ while (put-- > 0)
iput(inode);
}
@@ -2779,12 +2791,11 @@ static void invalidate_aliases(struct inode *inode)
*/
static void handle_cap_grant(struct ceph_mds_client *mdsc,
struct inode *inode, struct ceph_mds_caps *grant,
- u64 inline_version,
- void *inline_data, int inline_len,
+ struct ceph_string **pns, u64 inline_version,
+ void *inline_data, u32 inline_len,
struct ceph_buffer *xattr_buf,
struct ceph_mds_session *session,
- struct ceph_cap *cap, int issued,
- u32 pool_ns_len)
+ struct ceph_cap *cap, int issued)
__releases(ci->i_ceph_lock)
__releases(mdsc->snap_rwsem)
{
@@ -2895,8 +2906,18 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
/* file layout may have changed */
- ci->i_layout = grant->layout;
- ci->i_pool_ns_len = pool_ns_len;
+ s64 old_pool = ci->i_layout.pool_id;
+ struct ceph_string *old_ns;
+
+ ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
+ old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
+ lockdep_is_held(&ci->i_ceph_lock));
+ rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
+
+ if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
+ ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
+
+ *pns = old_ns;
/* size/truncate_seq? */
queue_trunc = ceph_fill_file_size(inode, issued,
@@ -2979,13 +3000,13 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
fill_inline = true;
}
- spin_unlock(&ci->i_ceph_lock);
-
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
- kick_flushing_inode_caps(mdsc, session, inode);
- up_read(&mdsc->snap_rwsem);
if (newcaps & ~issued)
wake = true;
+ kick_flushing_inode_caps(mdsc, session, inode);
+ up_read(&mdsc->snap_rwsem);
+ } else {
+ spin_unlock(&ci->i_ceph_lock);
}
if (fill_inline)
@@ -3029,23 +3050,24 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
- struct ceph_cap_flush *cf;
- struct rb_node *n;
+ struct ceph_cap_flush *cf, *tmp_cf;
LIST_HEAD(to_remove);
unsigned seq = le32_to_cpu(m->seq);
int dirty = le32_to_cpu(m->dirty);
int cleaned = 0;
- int drop = 0;
+ bool drop = false;
+ bool wake_ci = 0;
+ bool wake_mdsc = 0;
- n = rb_first(&ci->i_cap_flush_tree);
- while (n) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- n = rb_next(&cf->i_node);
+ list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
if (cf->tid == flush_tid)
cleaned = cf->caps;
+ if (cf->caps == 0) /* capsnap */
+ continue;
if (cf->tid <= flush_tid) {
- rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
- list_add_tail(&cf->list, &to_remove);
+ if (__finish_cap_flush(NULL, ci, cf))
+ wake_ci = true;
+ list_add_tail(&cf->i_list, &to_remove);
} else {
cleaned &= ~cf->caps;
if (!cleaned)
@@ -3066,31 +3088,29 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
spin_lock(&mdsc->cap_dirty_lock);
- if (!list_empty(&to_remove)) {
- list_for_each_entry(cf, &to_remove, list)
- rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
-
- n = rb_first(&mdsc->cap_flush_tree);
- cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
- if (!cf || cf->tid > flush_tid)
- wake_up_all(&mdsc->cap_flushing_wq);
+ list_for_each_entry(cf, &to_remove, i_list) {
+ if (__finish_cap_flush(mdsc, NULL, cf))
+ wake_mdsc = true;
}
if (ci->i_flushing_caps == 0) {
- list_del_init(&ci->i_flushing_item);
- if (!list_empty(&session->s_cap_flushing))
- dout(" mds%d still flushing cap on %p\n",
- session->s_mds,
- &list_entry(session->s_cap_flushing.next,
- struct ceph_inode_info,
- i_flushing_item)->vfs_inode);
+ if (list_empty(&ci->i_cap_flush_list)) {
+ list_del_init(&ci->i_flushing_item);
+ if (!list_empty(&session->s_cap_flushing)) {
+ dout(" mds%d still flushing cap on %p\n",
+ session->s_mds,
+ &list_first_entry(&session->s_cap_flushing,
+ struct ceph_inode_info,
+ i_flushing_item)->vfs_inode);
+ }
+ }
mdsc->num_cap_flushing--;
dout(" inode %p now !flushing\n", inode);
if (ci->i_dirty_caps == 0) {
dout(" inode %p now clean\n", inode);
BUG_ON(!list_empty(&ci->i_dirty_item));
- drop = 1;
+ drop = true;
if (ci->i_wr_ref == 0 &&
ci->i_wrbuffer_ref_head == 0) {
BUG_ON(!ci->i_head_snapc);
@@ -3102,17 +3122,21 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
spin_unlock(&mdsc->cap_dirty_lock);
- wake_up_all(&ci->i_cap_wq);
out:
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, list);
- list_del(&cf->list);
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
ceph_free_cap_flush(cf);
}
+
+ if (wake_ci)
+ wake_up_all(&ci->i_cap_wq);
+ if (wake_mdsc)
+ wake_up_all(&mdsc->cap_flushing_wq);
if (drop)
iput(inode);
}
@@ -3131,7 +3155,9 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
u64 follows = le64_to_cpu(m->snap_follows);
struct ceph_cap_snap *capsnap;
- int drop = 0;
+ bool flushed = false;
+ bool wake_ci = false;
+ bool wake_mdsc = false;
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
@@ -3139,30 +3165,47 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
- if (capsnap->flush_tid != flush_tid) {
+ if (capsnap->cap_flush.tid != flush_tid) {
dout(" cap_snap %p follows %lld tid %lld !="
" %lld\n", capsnap, follows,
- flush_tid, capsnap->flush_tid);
+ flush_tid, capsnap->cap_flush.tid);
break;
}
- WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(" removing %p cap_snap %p follows %lld\n",
- inode, capsnap, follows);
- ceph_put_snap_context(capsnap->context);
- list_del(&capsnap->ci_item);
- list_del(&capsnap->flushing_item);
- ceph_put_cap_snap(capsnap);
- wake_up_all(&mdsc->cap_flushing_wq);
- drop = 1;
+ flushed = true;
break;
} else {
dout(" skipping cap_snap %p follows %lld\n",
capsnap, capsnap->follows);
}
}
+ if (flushed) {
+ WARN_ON(capsnap->dirty_pages || capsnap->writing);
+ dout(" removing %p cap_snap %p follows %lld\n",
+ inode, capsnap, follows);
+ list_del(&capsnap->ci_item);
+ if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
+ wake_ci = true;
+
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ if (list_empty(&ci->i_cap_flush_list))
+ list_del_init(&ci->i_flushing_item);
+
+ if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
+ wake_mdsc = true;
+
+ spin_unlock(&mdsc->cap_dirty_lock);
+ }
spin_unlock(&ci->i_ceph_lock);
- if (drop)
+ if (flushed) {
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ if (wake_ci)
+ wake_up_all(&ci->i_cap_wq);
+ if (wake_mdsc)
+ wake_up_all(&mdsc->cap_flushing_wq);
iput(inode);
+ }
}
/*
@@ -3267,7 +3310,8 @@ retry:
tcap->implemented |= issued;
if (cap == ci->i_auth_cap)
ci->i_auth_cap = tcap;
- if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
+ if (!list_empty(&ci->i_cap_flush_list) &&
+ ci->i_auth_cap == tcap) {
spin_lock(&mdsc->cap_dirty_lock);
list_move_tail(&ci->i_flushing_item,
&tcap->session->s_cap_flushing);
@@ -3420,20 +3464,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_cap *cap;
struct ceph_mds_caps *h;
struct ceph_mds_cap_peer *peer = NULL;
- struct ceph_snap_realm *realm;
+ struct ceph_snap_realm *realm = NULL;
+ struct ceph_string *pool_ns = NULL;
int mds = session->s_mds;
int op, issued;
u32 seq, mseq;
struct ceph_vino vino;
- u64 cap_id;
- u64 size, max_size;
u64 tid;
u64 inline_version = 0;
void *inline_data = NULL;
u32 inline_len = 0;
void *snaptrace;
size_t snaptrace_len;
- u32 pool_ns_len = 0;
void *p, *end;
dout("handle_caps from mds%d\n", mds);
@@ -3447,11 +3489,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
op = le32_to_cpu(h->op);
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
- cap_id = le64_to_cpu(h->cap_id);
seq = le32_to_cpu(h->seq);
mseq = le32_to_cpu(h->migrate_seq);
- size = le64_to_cpu(h->size);
- max_size = le64_to_cpu(h->max_size);
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
@@ -3490,6 +3529,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u64 flush_tid;
u32 caller_uid, caller_gid;
u32 osd_epoch_barrier;
+ u32 pool_ns_len;
/* version >= 5 */
ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
/* version >= 6 */
@@ -3499,6 +3539,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_decode_32_safe(&p, end, caller_gid, bad);
/* version >= 8 */
ceph_decode_32_safe(&p, end, pool_ns_len, bad);
+ if (pool_ns_len > 0) {
+ ceph_decode_need(&p, end, pool_ns_len, bad);
+ pool_ns = ceph_find_or_create_string(p, pool_ns_len);
+ p += pool_ns_len;
+ }
}
/* lookup ino */
@@ -3519,7 +3564,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
cap = ceph_get_cap(mdsc, NULL);
cap->cap_ino = vino.ino;
cap->queue_release = 1;
- cap->cap_id = cap_id;
+ cap->cap_id = le64_to_cpu(h->cap_id);
cap->mseq = mseq;
cap->seq = seq;
spin_lock(&session->s_cap_lock);
@@ -3554,10 +3599,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
}
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &issued);
- handle_cap_grant(mdsc, inode, h,
+ handle_cap_grant(mdsc, inode, h, &pool_ns,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued,
- pool_ns_len);
+ msg->middle, session, cap, issued);
if (realm)
ceph_put_snap_realm(mdsc, realm);
goto done_unlocked;
@@ -3579,10 +3623,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_GRANT:
__ceph_caps_issued(ci, &issued);
issued |= __ceph_caps_dirty(ci);
- handle_cap_grant(mdsc, inode, h,
+ handle_cap_grant(mdsc, inode, h, &pool_ns,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued,
- pool_ns_len);
+ msg->middle, session, cap, issued);
goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
@@ -3613,6 +3656,7 @@ done:
mutex_unlock(&session->s_mutex);
done_unlocked:
iput(inode);
+ ceph_put_string(pool_ns);
return;
bad:
@@ -3673,6 +3717,16 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
dout("flush_dirty_caps done\n");
}
+void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
+{
+ int i;
+ int bits = (fmode << 1) | 1;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (bits & (1 << i))
+ ci->i_nr_by_mode[i]++;
+ }
+}
+
/*
* Drop open file reference. If we were the last open file,
* we may need to release capabilities to the MDS (or schedule
@@ -3680,15 +3734,20 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
*/
void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
{
- struct inode *inode = &ci->vfs_inode;
- int last = 0;
-
+ int i, last = 0;
+ int bits = (fmode << 1) | 1;
spin_lock(&ci->i_ceph_lock);
- dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
- ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
- BUG_ON(ci->i_nr_by_mode[fmode] == 0);
- if (--ci->i_nr_by_mode[fmode] == 0)
- last++;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (bits & (1 << i)) {
+ BUG_ON(ci->i_nr_by_mode[i] == 0);
+ if (--ci->i_nr_by_mode[i] == 0)
+ last++;
+ }
+ }
+ dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
+ &ci->vfs_inode, fmode,
+ ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
+ ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
spin_unlock(&ci->i_ceph_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6e0fedf6713b..c64a0b794d49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -59,7 +59,7 @@ int ceph_init_dentry(struct dentry *dentry)
di->dentry = dentry;
di->lease_session = NULL;
- dentry->d_time = jiffies;
+ di->time = jiffies;
/* avoid reordering d_fsdata setup so that the check above is safe */
smp_mb();
dentry->d_fsdata = di;
@@ -1124,7 +1124,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
void ceph_invalidate_dentry_lease(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
- dentry->d_time = jiffies;
+ ceph_dentry(dentry)->time = jiffies;
ceph_dentry(dentry)->lease_shared_gen = 0;
spin_unlock(&dentry->d_lock);
}
@@ -1133,7 +1133,8 @@ void ceph_invalidate_dentry_lease(struct dentry *dentry)
* Check if dentry lease is valid. If not, delete the lease. Try to
* renew if the least is more than half up.
*/
-static int dentry_lease_is_valid(struct dentry *dentry)
+static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
+ struct inode *dir)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *s;
@@ -1141,12 +1142,11 @@ static int dentry_lease_is_valid(struct dentry *dentry)
u32 gen;
unsigned long ttl;
struct ceph_mds_session *session = NULL;
- struct inode *dir = NULL;
u32 seq = 0;
spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
- if (di->lease_session) {
+ if (di && di->lease_session) {
s = di->lease_session;
spin_lock(&s->s_gen_ttl_lock);
gen = s->s_cap_gen;
@@ -1154,17 +1154,24 @@ static int dentry_lease_is_valid(struct dentry *dentry)
spin_unlock(&s->s_gen_ttl_lock);
if (di->lease_gen == gen &&
- time_before(jiffies, dentry->d_time) &&
+ time_before(jiffies, di->time) &&
time_before(jiffies, ttl)) {
valid = 1;
if (di->lease_renew_after &&
time_after(jiffies, di->lease_renew_after)) {
- /* we should renew */
- dir = d_inode(dentry->d_parent);
- session = ceph_get_mds_session(s);
- seq = di->lease_seq;
- di->lease_renew_after = 0;
- di->lease_renew_from = jiffies;
+ /*
+ * We should renew. If we're in RCU walk mode
+ * though, we can't do that so just return
+ * -ECHILD.
+ */
+ if (flags & LOOKUP_RCU) {
+ valid = -ECHILD;
+ } else {
+ session = ceph_get_mds_session(s);
+ seq = di->lease_seq;
+ di->lease_renew_after = 0;
+ di->lease_renew_from = jiffies;
+ }
}
}
}
@@ -1207,15 +1214,19 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct dentry *parent;
struct inode *dir;
- if (flags & LOOKUP_RCU)
- return -ECHILD;
+ if (flags & LOOKUP_RCU) {
+ parent = ACCESS_ONCE(dentry->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ } else {
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ }
dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
- parent = dget_parent(dentry);
- dir = d_inode(parent);
-
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
@@ -1224,12 +1235,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
} else if (d_really_is_positive(dentry) &&
ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
valid = 1;
- } else if (dentry_lease_is_valid(dentry) ||
- dir_lease_is_valid(dir, dentry)) {
- if (d_really_is_positive(dentry))
- valid = ceph_is_any_caps(d_inode(dentry));
- else
- valid = 1;
+ } else {
+ valid = dentry_lease_is_valid(dentry, flags, dir);
+ if (valid == -ECHILD)
+ return valid;
+ if (valid || dir_lease_is_valid(dir, dentry)) {
+ if (d_really_is_positive(dentry))
+ valid = ceph_is_any_caps(d_inode(dentry));
+ else
+ valid = 1;
+ }
}
if (!valid) {
@@ -1238,6 +1253,9 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ceph_mds_request *req;
int op, mask, err;
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
@@ -1273,7 +1291,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
ceph_dir_clear_complete(dir);
}
- dput(parent);
+ if (!(flags & LOOKUP_RCU))
+ dput(parent);
return valid;
}
@@ -1286,10 +1305,14 @@ static void ceph_d_release(struct dentry *dentry)
dout("d_release %p\n", dentry);
ceph_dentry_lru_del(dentry);
+
+ spin_lock(&dentry->d_lock);
+ dentry->d_fsdata = NULL;
+ spin_unlock(&dentry->d_lock);
+
if (di->lease_session)
ceph_put_mds_session(di->lease_session);
kmem_cache_free(ceph_dentry_cachep, di);
- dentry->d_fsdata = NULL;
}
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 0daaf7ceedc5..0f5375d8e030 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -708,7 +708,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
}
- ceph_put_page_vector(osd_data->pages, num_pages, false);
+ ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
ceph_osdc_put_request(req);
if (rc < 0)
@@ -821,6 +821,54 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
}
}
+/*
+ * Wait on any unsafe replies for the given inode. First wait on the
+ * newest request, and make that the upper bound. Then, if there are
+ * more requests, keep waiting on the oldest as long as it is still older
+ * than the original request.
+ */
+void ceph_sync_write_wait(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct list_head *head = &ci->i_unsafe_writes;
+ struct ceph_osd_request *req;
+ u64 last_tid;
+
+ if (!S_ISREG(inode->i_mode))
+ return;
+
+ spin_lock(&ci->i_unsafe_lock);
+ if (list_empty(head))
+ goto out;
+
+ /* set upper bound as _last_ entry in chain */
+
+ req = list_last_entry(head, struct ceph_osd_request,
+ r_unsafe_item);
+ last_tid = req->r_tid;
+
+ do {
+ ceph_osdc_get_request(req);
+ spin_unlock(&ci->i_unsafe_lock);
+
+ dout("sync_write_wait on tid %llu (until %llu)\n",
+ req->r_tid, last_tid);
+ wait_for_completion(&req->r_safe_completion);
+ ceph_osdc_put_request(req);
+
+ spin_lock(&ci->i_unsafe_lock);
+ /*
+ * from here on look at first entry in chain, since we
+ * only want to wait for anything older than last_tid
+ */
+ if (list_empty(head))
+ break;
+ req = list_first_entry(head, struct ceph_osd_request,
+ r_unsafe_item);
+ } while (req->r_tid < last_tid);
+out:
+ spin_unlock(&ci->i_unsafe_lock);
+}
static ssize_t
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
@@ -964,7 +1012,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
len = ret;
}
- ceph_put_page_vector(pages, num_pages, false);
+ ceph_put_page_vector(pages, num_pages, !write);
ceph_osdc_put_request(req);
if (ret < 0)
@@ -985,6 +1033,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
}
if (aio_req) {
+ LIST_HEAD(osd_reqs);
+
if (aio_req->num_reqs == 0) {
kfree(aio_req);
return ret;
@@ -993,8 +1043,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
CEPH_CAP_FILE_RD);
- while (!list_empty(&aio_req->osd_reqs)) {
- req = list_first_entry(&aio_req->osd_reqs,
+ list_splice(&aio_req->osd_reqs, &osd_reqs);
+ while (!list_empty(&osd_reqs)) {
+ req = list_first_entry(&osd_reqs,
struct ceph_osd_request,
r_unsafe_item);
list_del_init(&req->r_unsafe_item);
@@ -1448,16 +1499,14 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
loff_t i_size;
- int ret;
+ loff_t ret;
inode_lock(inode);
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
- if (ret < 0) {
- offset = ret;
+ if (ret < 0)
goto out;
- }
}
i_size = i_size_read(inode);
@@ -1473,7 +1522,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
* write() or lseek() might have altered it
*/
if (offset == 0) {
- offset = file->f_pos;
+ ret = file->f_pos;
goto out;
}
offset += file->f_pos;
@@ -1493,11 +1542,11 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
break;
}
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+ ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out:
inode_unlock(inode);
- return offset;
+ return ret;
}
static inline void ceph_zero_partial_page(
@@ -1583,9 +1632,9 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
{
int ret = 0;
struct ceph_inode_info *ci = ceph_inode(inode);
- s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
- s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- s32 object_size = ceph_file_layout_object_size(ci->i_layout);
+ s32 stripe_unit = ci->i_layout.stripe_unit;
+ s32 stripe_count = ci->i_layout.stripe_count;
+ s32 object_size = ci->i_layout.object_size;
u64 object_set_size = object_size * stripe_count;
u64 nearly, t;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 99bdef66213a..dd3a6dbf71eb 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -446,7 +446,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_symlink = NULL;
memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
- ci->i_pool_ns_len = 0;
+ RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
ci->i_fragtree = RB_ROOT;
mutex_init(&ci->i_fragtree_mutex);
@@ -468,7 +468,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
INIT_LIST_HEAD(&ci->i_dirty_item);
INIT_LIST_HEAD(&ci->i_flushing_item);
ci->i_prealloc_cap_flush = NULL;
- ci->i_cap_flush_tree = RB_ROOT;
+ INIT_LIST_HEAD(&ci->i_cap_flush_list);
init_waitqueue_head(&ci->i_cap_wq);
ci->i_hold_caps_min = 0;
ci->i_hold_caps_max = 0;
@@ -477,7 +477,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_head_snapc = NULL;
ci->i_snap_caps = 0;
- for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
ci->i_nr_by_mode[i] = 0;
mutex_init(&ci->i_truncate_mutex);
@@ -570,6 +570,8 @@ void ceph_destroy_inode(struct inode *inode)
if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
+
call_rcu(&inode->i_rcu, ceph_i_callback);
}
@@ -583,6 +585,14 @@ int ceph_drop_inode(struct inode *inode)
return 1;
}
+void ceph_evict_inode(struct inode *inode)
+{
+ /* wait unsafe sync writes */
+ ceph_sync_write_wait(inode);
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
static inline blkcnt_t calc_inode_blocks(u64 size)
{
return (size + (1<<9) - 1) >> 9;
@@ -733,6 +743,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
int issued = 0, implemented, new_issued;
struct timespec mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
bool wake = false;
@@ -760,6 +771,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
iinfo->xattr_len);
}
+ if (iinfo->pool_ns_len > 0)
+ pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
+ iinfo->pool_ns_len);
+
spin_lock(&ci->i_ceph_lock);
/*
@@ -814,10 +829,18 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if (new_version ||
(new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
- if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
+ s64 old_pool = ci->i_layout.pool_id;
+ struct ceph_string *old_ns;
+
+ ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
+ old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
+ lockdep_is_held(&ci->i_ceph_lock));
+ rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
+
+ if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
- ci->i_layout = info->layout;
- ci->i_pool_ns_len = iinfo->pool_ns_len;
+
+ pool_ns = old_ns;
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
@@ -985,6 +1008,7 @@ out:
ceph_put_cap(mdsc, new_cap);
if (xattr_blob)
ceph_buffer_put(xattr_blob);
+ ceph_put_string(pool_ns);
return err;
}
@@ -1018,7 +1042,7 @@ static void update_dentry_lease(struct dentry *dentry,
goto out_unlock;
if (di->lease_gen == session->s_cap_gen &&
- time_before(ttl, dentry->d_time))
+ time_before(ttl, di->time))
goto out_unlock; /* we already have a newer lease. */
if (di->lease_session && di->lease_session != session)
@@ -1032,7 +1056,7 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_seq = le32_to_cpu(lease->seq);
di->lease_renew_after = half_ttl;
di->lease_renew_from = 0;
- dentry->d_time = ttl;
+ di->time = ttl;
out_unlock:
spin_unlock(&dentry->d_lock);
return;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index be6b1657b1af..7d752d53353a 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -21,10 +21,10 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT, false);
if (!err) {
- l.stripe_unit = ceph_file_layout_su(ci->i_layout);
- l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- l.object_size = ceph_file_layout_object_size(ci->i_layout);
- l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
+ l.stripe_unit = ci->i_layout.stripe_unit;
+ l.stripe_count = ci->i_layout.stripe_count;
+ l.object_size = ci->i_layout.object_size;
+ l.data_pool = ci->i_layout.pool_id;
l.preferred_osd = (s32)-1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
@@ -82,19 +82,19 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
if (l.stripe_count)
nl.stripe_count = l.stripe_count;
else
- nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
+ nl.stripe_count = ci->i_layout.stripe_count;
if (l.stripe_unit)
nl.stripe_unit = l.stripe_unit;
else
- nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
+ nl.stripe_unit = ci->i_layout.stripe_unit;
if (l.object_size)
nl.object_size = l.object_size;
else
- nl.object_size = ceph_file_layout_object_size(ci->i_layout);
+ nl.object_size = ci->i_layout.object_size;
if (l.data_pool)
nl.data_pool = l.data_pool;
else
- nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
+ nl.data_pool = ci->i_layout.pool_id;
/* this is obsolete, and always -1 */
nl.preferred_osd = le64_to_cpu(-1);
@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
struct ceph_object_locator oloc;
- struct ceph_object_id oid;
+ CEPH_DEFINE_OID_ONSTACK(oid);
u64 len = 1, olen;
u64 tmp;
struct ceph_pg pgid;
@@ -202,8 +202,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return -EIO;
}
dl.file_offset -= dl.object_offset;
- dl.object_size = ceph_file_layout_object_size(ci->i_layout);
- dl.block_size = ceph_file_layout_su(ci->i_layout);
+ dl.object_size = ci->i_layout.object_size;
+ dl.block_size = ci->i_layout.stripe_unit;
/* block_offset = object_offset % block_size */
tmp = dl.object_offset;
@@ -212,10 +212,13 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no);
- oloc.pool = ceph_file_layout_pg_pool(ci->i_layout);
+ oloc.pool = ci->i_layout.pool_id;
+ oloc.pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
ceph_oid_printf(&oid, "%s", dl.object_name);
r = ceph_object_locator_to_pg(osdc->osdmap, &oid, &oloc, &pgid);
+
+ ceph_oloc_destroy(&oloc);
if (r < 0) {
up_read(&osdc->lock);
return r;
@@ -247,9 +250,8 @@ static long ceph_ioctl_lazyio(struct file *file)
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
spin_lock(&ci->i_ceph_lock);
- ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
- ci->i_nr_by_mode[fi->fmode]++;
+ ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4e8678a612b6..fa59a85226b2 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -48,7 +48,7 @@
struct ceph_reconnect_state {
int nr_caps;
struct ceph_pagelist *pagelist;
- bool flock;
+ unsigned msg_version;
};
static void __wake_requests(struct ceph_mds_client *mdsc,
@@ -100,12 +100,15 @@ static int parse_reply_info_in(void **p, void *end,
} else
info->inline_version = CEPH_INLINE_NONE;
+ info->pool_ns_len = 0;
+ info->pool_ns_data = NULL;
if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
- ceph_decode_need(p, end, info->pool_ns_len, bad);
- *p += info->pool_ns_len;
- } else {
- info->pool_ns_len = 0;
+ if (info->pool_ns_len > 0) {
+ ceph_decode_need(p, end, info->pool_ns_len, bad);
+ info->pool_ns_data = *p;
+ *p += info->pool_ns_len;
+ }
}
return 0;
@@ -469,7 +472,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_LIST_HEAD(&s->s_cap_flushing);
- INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
dout("register_session mds%d\n", mds);
if (mds >= mdsc->max_sessions) {
@@ -1145,19 +1147,17 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
invalidate = true;
- while (true) {
- struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
- if (!n)
- break;
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
- list_add(&cf->list, &to_remove);
+ while (!list_empty(&ci->i_cap_flush_list)) {
+ cf = list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
+ list_add(&cf->i_list, &to_remove);
}
spin_lock(&mdsc->cap_dirty_lock);
- list_for_each_entry(cf, &to_remove, list)
- rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
+ list_for_each_entry(cf, &to_remove, i_list)
+ list_del(&cf->g_list);
if (!list_empty(&ci->i_dirty_item)) {
pr_warn_ratelimited(
@@ -1181,7 +1181,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock);
if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
- list_add(&ci->i_prealloc_cap_flush->list, &to_remove);
+ list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
}
@@ -1189,8 +1189,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
while (!list_empty(&to_remove)) {
struct ceph_cap_flush *cf;
cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, list);
- list_del(&cf->list);
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
ceph_free_cap_flush(cf);
}
@@ -1212,6 +1212,8 @@ static void remove_session_caps(struct ceph_mds_session *session)
dout("remove_session_caps on %p\n", session);
iterate_session_caps(session, remove_session_caps_cb, fsc);
+ wake_up_all(&fsc->mdsc->cap_flushing_wq);
+
spin_lock(&session->s_cap_lock);
if (session->s_nr_caps > 0) {
struct inode *inode;
@@ -1478,35 +1480,21 @@ static int trim_caps(struct ceph_mds_client *mdsc,
return 0;
}
-static int check_capsnap_flush(struct ceph_inode_info *ci,
- u64 want_snap_seq)
-{
- int ret = 1;
- spin_lock(&ci->i_ceph_lock);
- if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) {
- struct ceph_cap_snap *capsnap =
- list_first_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap, ci_item);
- ret = capsnap->follows >= want_snap_seq;
- }
- spin_unlock(&ci->i_ceph_lock);
- return ret;
-}
-
static int check_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
- struct rb_node *n;
- struct ceph_cap_flush *cf;
int ret = 1;
spin_lock(&mdsc->cap_dirty_lock);
- n = rb_first(&mdsc->cap_flush_tree);
- cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
- if (cf && cf->tid <= want_flush_tid) {
- dout("check_caps_flush still flushing tid %llu <= %llu\n",
- cf->tid, want_flush_tid);
- ret = 0;
+ if (!list_empty(&mdsc->cap_flush_list)) {
+ struct ceph_cap_flush *cf =
+ list_first_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
+ if (cf->tid <= want_flush_tid) {
+ dout("check_caps_flush still flushing tid "
+ "%llu <= %llu\n", cf->tid, want_flush_tid);
+ ret = 0;
+ }
}
spin_unlock(&mdsc->cap_dirty_lock);
return ret;
@@ -1518,54 +1506,9 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
* returns true if we've flushed through want_flush_tid
*/
static void wait_caps_flush(struct ceph_mds_client *mdsc,
- u64 want_flush_tid, u64 want_snap_seq)
+ u64 want_flush_tid)
{
- int mds;
-
- dout("check_caps_flush want %llu snap want %llu\n",
- want_flush_tid, want_snap_seq);
- mutex_lock(&mdsc->mutex);
- for (mds = 0; mds < mdsc->max_sessions; ) {
- struct ceph_mds_session *session = mdsc->sessions[mds];
- struct inode *inode = NULL;
-
- if (!session) {
- mds++;
- continue;
- }
- get_session(session);
- mutex_unlock(&mdsc->mutex);
-
- mutex_lock(&session->s_mutex);
- if (!list_empty(&session->s_cap_snaps_flushing)) {
- struct ceph_cap_snap *capsnap =
- list_first_entry(&session->s_cap_snaps_flushing,
- struct ceph_cap_snap,
- flushing_item);
- struct ceph_inode_info *ci = capsnap->ci;
- if (!check_capsnap_flush(ci, want_snap_seq)) {
- dout("check_cap_flush still flushing snap %p "
- "follows %lld <= %lld to mds%d\n",
- &ci->vfs_inode, capsnap->follows,
- want_snap_seq, mds);
- inode = igrab(&ci->vfs_inode);
- }
- }
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
-
- if (inode) {
- wait_event(mdsc->cap_flushing_wq,
- check_capsnap_flush(ceph_inode(inode),
- want_snap_seq));
- iput(inode);
- } else {
- mds++;
- }
-
- mutex_lock(&mdsc->mutex);
- }
- mutex_unlock(&mdsc->mutex);
+ dout("check_caps_flush want %llu\n", want_flush_tid);
wait_event(mdsc->cap_flushing_wq,
check_caps_flush(mdsc, want_flush_tid));
@@ -2163,6 +2106,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
mds = __choose_mds(mdsc, req);
if (mds < 0 ||
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
+ if (mdsc->mdsmap_err) {
+ err = mdsc->mdsmap_err;
+ dout("do_request mdsmap err %d\n", err);
+ goto finish;
+ }
dout("do_request no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
goto out;
@@ -2292,14 +2240,6 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
- /* deny access to directories with pool_ns layouts */
- if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
- ceph_inode(req->r_inode)->i_pool_ns_len)
- return -EIO;
- if (req->r_locked_dir &&
- ceph_inode(req->r_locked_dir)->i_pool_ns_len)
- return -EIO;
-
/* issue */
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
@@ -2791,13 +2731,13 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
- size_t reclen;
struct ceph_inode_info *ci;
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
char *path;
int pathlen, err;
u64 pathbase;
+ u64 snap_follows;
struct dentry *dentry;
ci = cap->ci;
@@ -2820,9 +2760,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
path = NULL;
pathlen = 0;
}
- err = ceph_pagelist_encode_string(pagelist, path, pathlen);
- if (err)
- goto out_free;
spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
@@ -2830,14 +2767,13 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap->mseq = 0; /* and migrate_seq */
cap->cap_gen = cap->session->s_cap_gen;
- if (recon_state->flock) {
+ if (recon_state->msg_version >= 2) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v2.pathbase = cpu_to_le64(pathbase);
rec.v2.flock_len = 0;
- reclen = sizeof(rec.v2);
} else {
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
@@ -2847,13 +2783,23 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
- reclen = sizeof(rec.v1);
+ }
+
+ if (list_empty(&ci->i_cap_snaps)) {
+ snap_follows = 0;
+ } else {
+ struct ceph_cap_snap *capsnap =
+ list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ snap_follows = capsnap->follows;
}
spin_unlock(&ci->i_ceph_lock);
- if (recon_state->flock) {
+ if (recon_state->msg_version >= 2) {
int num_fcntl_locks, num_flock_locks;
struct ceph_filelock *flocks;
+ size_t struct_len, total_len = 0;
+ u8 struct_v = 0;
encode_again:
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
@@ -2872,20 +2818,51 @@ encode_again:
goto encode_again;
goto out_free;
}
+
+ if (recon_state->msg_version >= 3) {
+ /* version, compat_version and struct_len */
+ total_len = 2 * sizeof(u8) + sizeof(u32);
+ struct_v = 2;
+ }
/*
* number of encoded locks is stable, so copy to pagelist
*/
- rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
- (num_fcntl_locks+num_flock_locks) *
- sizeof(struct ceph_filelock));
- err = ceph_pagelist_append(pagelist, &rec, reclen);
- if (!err)
- err = ceph_locks_to_pagelist(flocks, pagelist,
- num_fcntl_locks,
- num_flock_locks);
+ struct_len = 2 * sizeof(u32) +
+ (num_fcntl_locks + num_flock_locks) *
+ sizeof(struct ceph_filelock);
+ rec.v2.flock_len = cpu_to_le32(struct_len);
+
+ struct_len += sizeof(rec.v2);
+ struct_len += sizeof(u32) + pathlen;
+
+ if (struct_v >= 2)
+ struct_len += sizeof(u64); /* snap_follows */
+
+ total_len += struct_len;
+ err = ceph_pagelist_reserve(pagelist, total_len);
+
+ if (!err) {
+ if (recon_state->msg_version >= 3) {
+ ceph_pagelist_encode_8(pagelist, struct_v);
+ ceph_pagelist_encode_8(pagelist, 1);
+ ceph_pagelist_encode_32(pagelist, struct_len);
+ }
+ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
+ ceph_locks_to_pagelist(flocks, pagelist,
+ num_fcntl_locks,
+ num_flock_locks);
+ if (struct_v >= 2)
+ ceph_pagelist_encode_64(pagelist, snap_follows);
+ }
kfree(flocks);
} else {
- err = ceph_pagelist_append(pagelist, &rec, reclen);
+ size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
+ err = ceph_pagelist_reserve(pagelist, size);
+ if (!err) {
+ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
+ }
}
recon_state->nr_caps++;
@@ -2976,7 +2953,12 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
recon_state.nr_caps = 0;
recon_state.pagelist = pagelist;
- recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
+ if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
+ recon_state.msg_version = 3;
+ else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
+ recon_state.msg_version = 2;
+ else
+ recon_state.msg_version = 1;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
@@ -3005,8 +2987,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
goto fail;
}
- if (recon_state.flock)
- reply->hdr.version = cpu_to_le16(2);
+ reply->hdr.version = cpu_to_le16(recon_state.msg_version);
/* raced with cap release? */
if (s_nr_caps != recon_state.nr_caps) {
@@ -3231,7 +3212,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
msecs_to_jiffies(le32_to_cpu(h->duration_ms));
di->lease_seq = seq;
- dentry->d_time = di->lease_renew_from + duration;
+ di->time = di->lease_renew_from + duration;
di->lease_renew_after = di->lease_renew_from +
(duration >> 1);
di->lease_renew_from = 0;
@@ -3297,47 +3278,6 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
}
/*
- * Preemptively release a lease we expect to invalidate anyway.
- * Pass @inode always, @dentry is optional.
- */
-void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
- struct dentry *dentry)
-{
- struct ceph_dentry_info *di;
- struct ceph_mds_session *session;
- u32 seq;
-
- BUG_ON(inode == NULL);
- BUG_ON(dentry == NULL);
-
- /* is dentry lease valid? */
- spin_lock(&dentry->d_lock);
- di = ceph_dentry(dentry);
- if (!di || !di->lease_session ||
- di->lease_session->s_mds < 0 ||
- di->lease_gen != di->lease_session->s_cap_gen ||
- !time_before(jiffies, dentry->d_time)) {
- dout("lease_release inode %p dentry %p -- "
- "no lease\n",
- inode, dentry);
- spin_unlock(&dentry->d_lock);
- return;
- }
-
- /* we do have a lease on this dentry; note mds and seq */
- session = ceph_get_mds_session(di->lease_session);
- seq = di->lease_seq;
- __ceph_mdsc_drop_dentry_lease(dentry);
- spin_unlock(&dentry->d_lock);
-
- dout("lease_release inode %p dentry %p to mds%d\n",
- inode, dentry, session->s_mds);
- ceph_mdsc_lease_send_msg(session, inode, dentry,
- CEPH_MDS_LEASE_RELEASE, seq);
- ceph_put_mds_session(session);
-}
-
-/*
* drop all leases (and dentry refs) in preparation for umount
*/
static void drop_leases(struct ceph_mds_client *mdsc)
@@ -3470,7 +3410,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
- mdsc->cap_flush_tree = RB_ROOT;
+ INIT_LIST_HEAD(&mdsc->cap_flush_list);
INIT_LIST_HEAD(&mdsc->cap_dirty);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
mdsc->num_cap_flushing = 0;
@@ -3585,7 +3525,7 @@ restart:
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
- u64 want_tid, want_flush, want_snap;
+ u64 want_tid, want_flush;
if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
return;
@@ -3598,17 +3538,19 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
ceph_flush_dirty_caps(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
+ if (!list_empty(&mdsc->cap_flush_list)) {
+ struct ceph_cap_flush *cf =
+ list_last_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
+ cf->wake = true;
+ }
spin_unlock(&mdsc->cap_dirty_lock);
- down_read(&mdsc->snap_rwsem);
- want_snap = mdsc->last_snap_seq;
- up_read(&mdsc->snap_rwsem);
-
- dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
- want_tid, want_flush, want_snap);
+ dout("sync want tid %lld flush_seq %lld\n",
+ want_tid, want_flush);
wait_unsafe_requests(mdsc, want_tid);
- wait_caps_flush(mdsc, want_flush, want_snap);
+ wait_caps_flush(mdsc, want_flush);
}
/*
@@ -3729,11 +3671,86 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
dout("mdsc_destroy %p done\n", mdsc);
}
+void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+{
+ struct ceph_fs_client *fsc = mdsc->fsc;
+ const char *mds_namespace = fsc->mount_options->mds_namespace;
+ void *p = msg->front.iov_base;
+ void *end = p + msg->front.iov_len;
+ u32 epoch;
+ u32 map_len;
+ u32 num_fs;
+ u32 mount_fscid = (u32)-1;
+ u8 struct_v, struct_cv;
+ int err = -EINVAL;
+
+ ceph_decode_need(&p, end, sizeof(u32), bad);
+ epoch = ceph_decode_32(&p);
+
+ dout("handle_fsmap epoch %u\n", epoch);
+
+ ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
+ struct_v = ceph_decode_8(&p);
+ struct_cv = ceph_decode_8(&p);
+ map_len = ceph_decode_32(&p);
+
+ ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
+ p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
+
+ num_fs = ceph_decode_32(&p);
+ while (num_fs-- > 0) {
+ void *info_p, *info_end;
+ u32 info_len;
+ u8 info_v, info_cv;
+ u32 fscid, namelen;
+
+ ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
+ info_v = ceph_decode_8(&p);
+ info_cv = ceph_decode_8(&p);
+ info_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, info_len, bad);
+ info_p = p;
+ info_end = p + info_len;
+ p = info_end;
+
+ ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
+ fscid = ceph_decode_32(&info_p);
+ namelen = ceph_decode_32(&info_p);
+ ceph_decode_need(&info_p, info_end, namelen, bad);
+
+ if (mds_namespace &&
+ strlen(mds_namespace) == namelen &&
+ !strncmp(mds_namespace, (char *)info_p, namelen)) {
+ mount_fscid = fscid;
+ break;
+ }
+ }
+
+ ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
+ if (mount_fscid != (u32)-1) {
+ fsc->client->monc.fs_cluster_id = mount_fscid;
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
+ 0, true);
+ ceph_monc_renew_subs(&fsc->client->monc);
+ } else {
+ err = -ENOENT;
+ goto err_out;
+ }
+ return;
+bad:
+ pr_err("error decoding fsmap\n");
+err_out:
+ mutex_lock(&mdsc->mutex);
+ mdsc->mdsmap_err = -ENOENT;
+ __wake_requests(mdsc, &mdsc->waiting_for_map);
+ mutex_unlock(&mdsc->mutex);
+ return;
+}
/*
* handle mds map update.
*/
-void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
u32 epoch;
u32 maplen;
@@ -3840,7 +3857,10 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
switch (type) {
case CEPH_MSG_MDS_MAP:
- ceph_mdsc_handle_map(mdsc, msg);
+ ceph_mdsc_handle_mdsmap(mdsc, msg);
+ break;
+ case CEPH_MSG_FS_MAP_USER:
+ ceph_mdsc_handle_fsmap(mdsc, msg);
break;
case CEPH_MSG_CLIENT_SESSION:
handle_session(s, msg);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index e7d38aac7109..6b3679737d4a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -45,6 +45,7 @@ struct ceph_mds_reply_info_in {
u32 inline_len;
char *inline_data;
u32 pool_ns_len;
+ char *pool_ns_data;
};
struct ceph_mds_reply_dir_entry {
@@ -151,7 +152,6 @@ struct ceph_mds_session {
/* protected by mutex */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
- struct list_head s_cap_snaps_flushing;
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
@@ -275,8 +275,10 @@ struct ceph_mds_request {
struct ceph_pool_perm {
struct rb_node node;
- u32 pool;
int perm;
+ s64 pool;
+ size_t pool_ns_len;
+ char pool_ns[];
};
/*
@@ -290,6 +292,7 @@ struct ceph_mds_client {
struct completion safe_umount_waiters;
wait_queue_head_t session_close_wq;
struct list_head waiting_for_map;
+ int mdsmap_err;
struct ceph_mds_session **sessions; /* NULL for mds if no session */
atomic_t num_sessions;
@@ -321,7 +324,7 @@ struct ceph_mds_client {
spinlock_t snap_flush_lock;
u64 last_cap_flush_tid;
- struct rb_root cap_flush_tree;
+ struct list_head cap_flush_list;
struct list_head cap_dirty; /* inodes with dirty caps */
struct list_head cap_dirty_migrating; /* ...that are migration... */
int num_cap_flushing; /* # caps we are flushing */
@@ -382,10 +385,6 @@ extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
-extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
- struct inode *inode,
- struct dentry *dn);
-
extern void ceph_invalidate_dir_request(struct ceph_mds_request *req);
extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct inode *dir);
@@ -420,8 +419,10 @@ extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
u32 seq);
-extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
- struct ceph_msg *msg);
+extern void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
+extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
extern struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 9caaa7ffc93f..9ff5219d849e 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -520,9 +520,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
ihold(inode);
atomic_set(&capsnap->nref, 1);
- capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
- INIT_LIST_HEAD(&capsnap->flushing_item);
capsnap->follows = old_snapc->seq;
capsnap->issued = __ceph_caps_issued(ci, NULL);
@@ -551,7 +549,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
ci->i_wrbuffer_ref_head = 0;
capsnap->context = old_snapc;
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
- old_snapc = NULL;
if (used & CEPH_CAP_FILE_WR) {
dout("queue_cap_snap %p cap_snap %p snapc %p"
@@ -563,6 +560,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
__ceph_finish_cap_snap(ci, capsnap);
}
capsnap = NULL;
+ old_snapc = NULL;
update_snapc:
if (ci->i_head_snapc) {
@@ -603,6 +601,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->dirty_pages);
return 0;
}
+
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
inode, capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
@@ -799,9 +799,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
inode = &ci->vfs_inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
- spin_lock(&ci->i_ceph_lock);
- __ceph_flush_snaps(ci, &session, 0);
- spin_unlock(&ci->i_ceph_lock);
+ ceph_flush_snaps(ci, &session);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 91e02481ce06..e247f6f0feb7 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -108,7 +108,6 @@ static int ceph_sync_fs(struct super_block *sb, int wait)
* mount options
*/
enum {
- Opt_mds_namespace,
Opt_wsize,
Opt_rsize,
Opt_rasize,
@@ -121,6 +120,7 @@ enum {
Opt_last_int,
/* int args above */
Opt_snapdirname,
+ Opt_mds_namespace,
Opt_last_string,
/* string args above */
Opt_dirstat,
@@ -144,7 +144,6 @@ enum {
};
static match_table_t fsopt_tokens = {
- {Opt_mds_namespace, "mds_namespace=%d"},
{Opt_wsize, "wsize=%d"},
{Opt_rsize, "rsize=%d"},
{Opt_rasize, "rasize=%d"},
@@ -156,6 +155,7 @@ static match_table_t fsopt_tokens = {
{Opt_congestion_kb, "write_congestion_kb=%d"},
/* int args above */
{Opt_snapdirname, "snapdirname=%s"},
+ {Opt_mds_namespace, "mds_namespace=%s"},
/* string args above */
{Opt_dirstat, "dirstat"},
{Opt_nodirstat, "nodirstat"},
@@ -212,11 +212,14 @@ static int parse_fsopt_token(char *c, void *private)
if (!fsopt->snapdir_name)
return -ENOMEM;
break;
-
- /* misc */
case Opt_mds_namespace:
- fsopt->mds_namespace = intval;
+ fsopt->mds_namespace = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ if (!fsopt->mds_namespace)
+ return -ENOMEM;
break;
+ /* misc */
case Opt_wsize:
fsopt->wsize = intval;
break;
@@ -302,6 +305,7 @@ static void destroy_mount_options(struct ceph_mount_options *args)
{
dout("destroy_mount_options %p\n", args);
kfree(args->snapdir_name);
+ kfree(args->mds_namespace);
kfree(args->server_path);
kfree(args);
}
@@ -333,6 +337,9 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
+ ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
+ if (ret)
+ return ret;
ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
if (ret)
@@ -376,7 +383,6 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
- fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
/*
* Distinguish the server list from the path in "dev_name".
@@ -469,8 +475,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
- if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
- seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
+ if (fsopt->mds_namespace)
+ seq_printf(m, ",mds_namespace=%s", fsopt->mds_namespace);
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -509,9 +515,11 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
switch (type) {
case CEPH_MSG_MDS_MAP:
- ceph_mdsc_handle_map(fsc->mdsc, msg);
+ ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
+ return 0;
+ case CEPH_MSG_FS_MAP_USER:
+ ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
return 0;
-
default:
return -1;
}
@@ -543,8 +551,14 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
goto fail;
}
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
- fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
- ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
+
+ if (fsopt->mds_namespace == NULL) {
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
+ 0, true);
+ } else {
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
+ 0, false);
+ }
fsc->mount_options = fsopt;
@@ -672,8 +686,8 @@ static int __init init_caches(void)
if (ceph_dentry_cachep == NULL)
goto bad_dentry;
- ceph_file_cachep = KMEM_CACHE(ceph_file_info,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
+
if (ceph_file_cachep == NULL)
goto bad_file;
@@ -731,6 +745,7 @@ static const struct super_operations ceph_super_ops = {
.destroy_inode = ceph_destroy_inode,
.write_inode = ceph_write_inode,
.drop_inode = ceph_drop_inode,
+ .evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
.show_options = ceph_show_options,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 0168b49fb6ad..3e3fa9163059 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -62,7 +62,6 @@ struct ceph_mount_options {
int cap_release_safety;
int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */
- int mds_namespace;
/*
* everything above this point can be memcmp'd; everything below
@@ -70,6 +69,7 @@ struct ceph_mount_options {
*/
char *snapdir_name; /* default ".snap" */
+ char *mds_namespace; /* default NULL */
char *server_path; /* default "/" */
};
@@ -147,6 +147,14 @@ struct ceph_cap {
#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */
#define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */
+struct ceph_cap_flush {
+ u64 tid;
+ int caps; /* 0 means capsnap */
+ bool wake; /* wake up flush waiters when finish ? */
+ struct list_head g_list; // global
+ struct list_head i_list; // per inode
+};
+
/*
* Snapped cap state that is pending flush to mds. When a snapshot occurs,
* we first complete any in-process sync writes and writeback any dirty
@@ -154,10 +162,11 @@ struct ceph_cap {
*/
struct ceph_cap_snap {
atomic_t nref;
- struct ceph_inode_info *ci;
- struct list_head ci_item, flushing_item;
+ struct list_head ci_item;
+
+ struct ceph_cap_flush cap_flush;
- u64 follows, flush_tid;
+ u64 follows;
int issued, dirty;
struct ceph_snap_context *context;
@@ -186,16 +195,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
}
}
-struct ceph_cap_flush {
- u64 tid;
- int caps;
- struct rb_node g_node; // global
- union {
- struct rb_node i_node; // inode
- struct list_head list;
- };
-};
-
/*
* The frag tree describes how a directory is fragmented, potentially across
* multiple metadata servers. It is also used to indicate points where
@@ -246,7 +245,7 @@ struct ceph_dentry_info {
unsigned long lease_renew_after, lease_renew_from;
struct list_head lru;
struct dentry *dentry;
- u64 time;
+ unsigned long time;
u64 offset;
};
@@ -287,7 +286,6 @@ struct ceph_inode_info {
struct ceph_dir_layout i_dir_layout;
struct ceph_file_layout i_layout;
- size_t i_pool_ns_len;
char *i_symlink;
/* for dirs */
@@ -311,7 +309,7 @@ struct ceph_inode_info {
* overlapping, pipelined cap flushes to the mds. we can probably
* reduce the tid to 8 bits if we're concerned about inode size. */
struct ceph_cap_flush *i_prealloc_cap_flush;
- struct rb_root i_cap_flush_tree;
+ struct list_head i_cap_flush_list;
wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
unsigned long i_hold_caps_min; /* jiffies */
unsigned long i_hold_caps_max; /* jiffies */
@@ -322,7 +320,7 @@ struct ceph_inode_info {
dirty|flushing caps */
unsigned i_snap_caps; /* cap bits for snapped files */
- int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
+ int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */
struct mutex i_truncate_mutex;
u32 i_truncate_seq; /* last truncate to smaller size */
@@ -471,6 +469,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */
#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */
#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
+#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
+#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
@@ -750,6 +750,7 @@ extern const struct inode_operations ceph_file_iops;
extern struct inode *ceph_alloc_inode(struct super_block *sb);
extern void ceph_destroy_inode(struct inode *inode);
extern int ceph_drop_inode(struct inode *inode);
+extern void ceph_evict_inode(struct inode *inode);
extern struct inode *ceph_get_inode(struct super_block *sb,
struct ceph_vino vino);
@@ -890,9 +891,8 @@ extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
-extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession,
- int again);
+extern void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
@@ -907,10 +907,7 @@ extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
/* for counting open files by mode */
-static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
-{
- ci->i_nr_by_mode[mode]++;
-}
+extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
/* addr.c */
@@ -931,6 +928,7 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
+extern void ceph_sync_write_wait(struct inode *inode);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
extern const struct file_operations ceph_snapdir_fops;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 4870b29df224..adc231892b0d 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -57,81 +57,88 @@ struct ceph_vxattr {
static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
{
- size_t s;
- char *p = (char *)&ci->i_layout;
-
- for (s = 0; s < sizeof(ci->i_layout); s++, p++)
- if (*p)
- return true;
- return false;
+ struct ceph_file_layout *fl = &ci->i_layout;
+ return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
+ fl->object_size > 0 || fl->pool_id >= 0 ||
+ rcu_dereference_raw(fl->pool_ns) != NULL);
}
static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
- int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
- s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ struct ceph_string *pool_ns;
+ s64 pool = ci->i_layout.pool_id;
const char *pool_name;
+ const char *ns_field = " pool_namespace=";
char buf[128];
+ size_t len, total_len = 0;
+ int ret;
+
+ pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
- size_t len = strlen(pool_name);
- ret = snprintf(buf, sizeof(buf),
- "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=",
- (unsigned long long)ceph_file_layout_su(ci->i_layout),
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
- if (!size) {
- ret += len;
- } else if (ret + len > size) {
- ret = -ERANGE;
- } else {
- memcpy(val, buf, ret);
+ len = snprintf(buf, sizeof(buf),
+ "stripe_unit=%u stripe_count=%u object_size=%u pool=",
+ ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
+ ci->i_layout.object_size);
+ total_len = len + strlen(pool_name);
+ } else {
+ len = snprintf(buf, sizeof(buf),
+ "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
+ ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
+ ci->i_layout.object_size, (unsigned long long)pool);
+ total_len = len;
+ }
+
+ if (pool_ns)
+ total_len += strlen(ns_field) + pool_ns->len;
+
+ if (!size) {
+ ret = total_len;
+ } else if (total_len > size) {
+ ret = -ERANGE;
+ } else {
+ memcpy(val, buf, len);
+ ret = len;
+ if (pool_name) {
+ len = strlen(pool_name);
memcpy(val + ret, pool_name, len);
ret += len;
}
- } else {
- ret = snprintf(buf, sizeof(buf),
- "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
- (unsigned long long)ceph_file_layout_su(ci->i_layout),
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
- (unsigned long long)pool);
- if (size) {
- if (ret <= size)
- memcpy(val, buf, ret);
- else
- ret = -ERANGE;
+ if (pool_ns) {
+ len = strlen(ns_field);
+ memcpy(val + ret, ns_field, len);
+ ret += len;
+ memcpy(val + ret, pool_ns->str, pool_ns->len);
+ ret += pool_ns->len;
}
}
up_read(&osdc->lock);
+ ceph_put_string(pool_ns);
return ret;
}
static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_su(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
}
static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.stripe_count);
}
static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.object_size);
}
static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
@@ -140,7 +147,7 @@ static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
- s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ s64 pool = ci->i_layout.pool_id;
const char *pool_name;
down_read(&osdc->lock);
@@ -153,6 +160,18 @@ static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
return ret;
}
+static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ int ret = 0;
+ struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
+ if (ns) {
+ ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str);
+ ceph_put_string(ns);
+ }
+ return ret;
+}
+
/* directories */
static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
@@ -241,6 +260,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
XATTR_LAYOUT_FIELD(dir, layout, object_size),
XATTR_LAYOUT_FIELD(dir, layout, pool),
+ XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
XATTR_NAME_CEPH(dir, entries),
XATTR_NAME_CEPH(dir, files),
XATTR_NAME_CEPH(dir, subdirs),
@@ -268,6 +288,7 @@ static struct ceph_vxattr ceph_file_vxattrs[] = {
XATTR_LAYOUT_FIELD(file, layout, stripe_count),
XATTR_LAYOUT_FIELD(file, layout, object_size),
XATTR_LAYOUT_FIELD(file, layout, pool),
+ XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
{ .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_file_vxattrs_name_size; /* total size of all names */
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 4e532536cbc6..4716c54dbfc6 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -903,10 +903,10 @@ static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
return 0;
}
-static int cifs_ci_compare(const struct dentry *parent, const struct dentry *dentry,
+static int cifs_ci_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct nls_table *codepage = CIFS_SB(parent->d_sb)->local_nls;
+ struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
wchar_t c1, c2;
int i, l1, l2;
diff --git a/fs/dcache.c b/fs/dcache.c
index b90cf8e09d5b..5c7cc953ac81 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -316,20 +316,6 @@ static void dentry_free(struct dentry *dentry)
call_rcu(&dentry->d_u.d_rcu, __d_free);
}
-/**
- * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
- * @dentry: the target dentry
- * After this call, in-progress rcu-walk path lookup will fail. This
- * should be called after unhashing, and after changing d_inode (if
- * the dentry has not already been unhashed).
- */
-static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
-{
- lockdep_assert_held(&dentry->d_lock);
- /* Go through am invalidation barrier */
- write_seqcount_invalidate(&dentry->d_seq);
-}
-
/*
* Release the dentry's inode, using the filesystem
* d_iput() operation if defined.
@@ -468,7 +454,8 @@ void __d_drop(struct dentry *dentry)
__hlist_bl_del(&dentry->d_hash);
dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
- dentry_rcuwalk_invalidate(dentry);
+ /* After this call, in-progress rcu-walk path lookup will fail. */
+ write_seqcount_invalidate(&dentry->d_seq);
}
}
EXPORT_SYMBOL(__d_drop);
@@ -2060,7 +2047,7 @@ static inline bool d_same_name(const struct dentry *dentry,
return false;
return dentry_cmp(dentry, name->name, name->len) == 0;
}
- return parent->d_op->d_compare(parent, dentry,
+ return parent->d_op->d_compare(dentry,
dentry->d_name.len, dentry->d_name.name,
name) == 0;
}
@@ -2163,7 +2150,7 @@ seqretry:
cpu_relax();
goto seqretry;
}
- if (parent->d_op->d_compare(parent, dentry,
+ if (parent->d_op->d_compare(dentry,
tlen, tname, name) != 0)
continue;
} else {
@@ -2352,19 +2339,15 @@ again:
}
EXPORT_SYMBOL(d_delete);
-static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
+static void __d_rehash(struct dentry *entry)
{
+ struct hlist_bl_head *b = d_hash(entry->d_name.hash);
BUG_ON(!d_unhashed(entry));
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
-static void _d_rehash(struct dentry * entry)
-{
- __d_rehash(entry, d_hash(entry->d_name.hash));
-}
-
/**
* d_rehash - add an entry back to the hash
* @entry: dentry to add to the hash
@@ -2375,7 +2358,7 @@ static void _d_rehash(struct dentry * entry)
void d_rehash(struct dentry * entry)
{
spin_lock(&entry->d_lock);
- _d_rehash(entry);
+ __d_rehash(entry);
spin_unlock(&entry->d_lock);
}
EXPORT_SYMBOL(d_rehash);
@@ -2549,7 +2532,7 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
raw_write_seqcount_end(&dentry->d_seq);
fsnotify_update_flags(dentry);
}
- _d_rehash(dentry);
+ __d_rehash(dentry);
if (dir)
end_dir_add(dir, n);
spin_unlock(&dentry->d_lock);
@@ -2611,7 +2594,7 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
alias = NULL;
} else {
__dget_dlock(alias);
- _d_rehash(alias);
+ __d_rehash(alias);
spin_unlock(&alias->d_lock);
}
spin_unlock(&inode->i_lock);
@@ -2636,7 +2619,7 @@ EXPORT_SYMBOL(d_exact_alias);
* Parent inode i_mutex must be held over d_lookup and into this call (to
* keep renames and concurrent inserts, and readdir(2) away).
*/
-void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
+void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
{
BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
@@ -2795,23 +2778,10 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
+ /* unhash both */
/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
-
- /*
- * Move the dentry to the target hash queue. Don't bother checking
- * for the same hash queue because of how unlikely it is.
- */
__d_drop(dentry);
- __d_rehash(dentry, d_hash(target->d_name.hash));
-
- /*
- * Unhash the target (d_delete() is not usable here). If exchanging
- * the two dentries, then rehash onto the other's hash queue.
- */
__d_drop(target);
- if (exchange) {
- __d_rehash(target, d_hash(dentry->d_name.hash));
- }
/* Switch the names.. */
if (exchange)
@@ -2819,6 +2789,11 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
else
copy_name(dentry, target);
+ /* rehash in new place(s) */
+ __d_rehash(dentry);
+ if (exchange)
+ __d_rehash(target);
+
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
/* splicing a tree */
@@ -3038,7 +3013,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* Data dependency barrier is needed to make sure that we see that terminating
* NUL. Alpha strikes again, film at 11...
*/
-static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
const char *dname = ACCESS_ONCE(name->name);
u32 dlen = ACCESS_ONCE(name->len);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index a5e607e8f056..688ccc16b702 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -45,8 +45,7 @@ static struct super_block *efivarfs_sb;
* So we need to perform a case-sensitive match on part 1 and a
* case-insensitive match on part 2.
*/
-static int efivarfs_d_compare(const struct dentry *parent,
- const struct dentry *dentry,
+static int efivarfs_d_compare(const struct dentry *dentry,
unsigned int len, const char *str,
const struct qstr *name)
{
diff --git a/fs/exec.c b/fs/exec.c
index ca239fc86d8d..6fcfb3f7b137 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -762,6 +762,39 @@ out_unlock:
}
EXPORT_SYMBOL(setup_arg_pages);
+#else
+
+/*
+ * Transfer the program arguments and environment from the holding pages
+ * onto the stack. The provided stack pointer is adjusted accordingly.
+ */
+int transfer_args_to_stack(struct linux_binprm *bprm,
+ unsigned long *sp_location)
+{
+ unsigned long index, stop, sp;
+ int ret = 0;
+
+ stop = bprm->p >> PAGE_SHIFT;
+ sp = *sp_location;
+
+ for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
+ unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
+ char *src = kmap(bprm->page[index]) + offset;
+ sp -= PAGE_SIZE - offset;
+ if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
+ ret = -EFAULT;
+ kunmap(bprm->page[index]);
+ if (ret)
+ goto out;
+ }
+
+ *sp_location = sp;
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(transfer_args_to_stack);
+
#endif /* CONFIG_MMU */
static struct file *do_open_execat(int fd, struct filename *name, int flags)
@@ -866,7 +899,8 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
goto out;
}
- *buf = vmalloc(i_size);
+ if (id != READING_FIRMWARE_PREALLOC_BUFFER)
+ *buf = vmalloc(i_size);
if (!*buf) {
ret = -ENOMEM;
goto out;
@@ -897,8 +931,10 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
out_free:
if (ret < 0) {
- vfree(*buf);
- *buf = NULL;
+ if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
+ vfree(*buf);
+ *buf = NULL;
+ }
}
out:
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 19efd1197fa5..61ad490ed67b 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -358,8 +358,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
-struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
- struct qstr *child, struct page ** res_page)
+struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir,
+ const struct qstr *child, struct page **res_page)
{
const char *name = child->name;
int namelen = child->len;
@@ -435,7 +435,7 @@ struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
return de;
}
-ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child)
+ino_t ext2_inode_by_name(struct inode *dir, const struct qstr *child)
{
ino_t res = 0;
struct ext2_dir_entry_2 *de;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 3fb93681bf7f..06af2f92226c 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -757,9 +757,9 @@ extern void ext2_rsv_window_add(struct super_block *sb, struct ext2_reserve_wind
/* dir.c */
extern int ext2_add_link (struct dentry *, struct inode *);
-extern ino_t ext2_inode_by_name(struct inode *, struct qstr *);
+extern ino_t ext2_inode_by_name(struct inode *, const struct qstr *);
extern int ext2_make_empty(struct inode *, struct inode *);
-extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,struct qstr *, struct page **);
+extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,const struct qstr *, struct page **);
extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
extern int ext2_empty_dir (struct inode *);
extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index e2624275d828..d64d2a515cb2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -245,7 +245,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio_put(bio);
return -EFAULT;
}
- bio->bi_rw = fio->op_flags;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a485f68a76b1..9054aeac8015 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -219,7 +219,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- struct qstr *child, struct page **res_page)
+ const struct qstr *child, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
@@ -272,7 +272,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
return f2fs_find_entry(dir, &dotdot, p);
}
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr,
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
struct page **page)
{
ino_t res = 0;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 7890e9071499..675fa79d86f6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1914,10 +1914,10 @@ struct page *init_inode_metadata(struct inode *, struct inode *,
void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int);
void f2fs_drop_nlink(struct inode *, struct inode *);
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *, struct page **);
+ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 1337c0c7527d..664655b2c55f 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -162,10 +162,10 @@ static int msdos_hash(const struct dentry *dentry, struct qstr *qstr)
* Compare two msdos names. If either of the names are invalid,
* we fall back to doing the standard name comparison.
*/
-static int msdos_cmp(const struct dentry *parent, const struct dentry *dentry,
+static int msdos_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options;
+ struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
int error;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6ccdf3f34f90..92b7363dafa9 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -138,10 +138,10 @@ static int vfat_hashi(const struct dentry *dentry, struct qstr *qstr)
/*
* Case insensitive compare of two vfat names.
*/
-static int vfat_cmpi(const struct dentry *parent, const struct dentry *dentry,
+static int vfat_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
+ struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
@@ -157,7 +157,7 @@ static int vfat_cmpi(const struct dentry *parent, const struct dentry *dentry,
/*
* Case sensitive compare of two vfat names.
*/
-static int vfat_cmp(const struct dentry *parent, const struct dentry *dentry,
+static int vfat_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned int alen, blen;
@@ -652,8 +652,8 @@ out_free:
return err;
}
-static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
- int cluster, struct timespec *ts,
+static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
+ int is_dir, int cluster, struct timespec *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
@@ -688,7 +688,7 @@ cleanup:
return err;
}
-static int vfat_find(struct inode *dir, struct qstr *qname,
+static int vfat_find(struct inode *dir, const struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 56c8fda436c0..4d09d4441e3e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1327,6 +1327,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
dirty = inode->i_state & I_DIRTY;
if (inode->i_state & I_DIRTY_TIME) {
if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
+ wbc->sync_mode == WB_SYNC_ALL ||
unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
unlikely(time_after(jiffies,
(inode->dirtied_time_when +
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 5f1627725791..c47b7780ce37 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -146,7 +146,7 @@ static void fuse_invalidate_entry(struct dentry *entry)
}
static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
- u64 nodeid, struct qstr *name,
+ u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
@@ -282,7 +282,7 @@ int fuse_valid_type(int m)
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
-int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
+int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5db5d24f91a5..d98d8cc84def 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -704,7 +704,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int generation, struct fuse_attr *attr,
u64 attr_valid, u64 attr_version);
-int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
+int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode);
/**
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 9b7cb37b4ba8..4e05b51120f4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -673,13 +673,11 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
if (!inode) {
struct fuse_entry_out outarg;
- struct qstr name;
+ const struct qstr name = QSTR_INIT(".", 1);
if (!fc->export_support)
goto out_err;
- name.len = 1;
- name.name = ".";
err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
&inode);
if (err && err != -ENOENT)
@@ -775,14 +773,12 @@ static struct dentry *fuse_get_parent(struct dentry *child)
struct inode *inode;
struct dentry *parent;
struct fuse_entry_out outarg;
- struct qstr name;
+ const struct qstr name = QSTR_INIT("..", 2);
int err;
if (!fc->export_support)
return ERR_PTR(-ESTALE);
- name.len = 2;
- name.name = "..";
err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
&name, &outarg, &inode);
if (err) {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index e0621cacf134..e4da0ecd3285 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1800,7 +1800,7 @@ int gfs2_permission(struct inode *inode, int mask)
}
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
- error = -EACCES;
+ error = -EPERM;
else
error = generic_permission(inode, mask);
if (gfs2_holder_initialized(&i_gh))
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 98cde8ba5dc2..8f4afd3f5108 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -20,7 +20,7 @@
*
* Given the ID of the parent and the name build a search key.
*/
-void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, struct qstr *name)
+void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, const struct qstr *name)
{
key->cat.reserved = 0;
key->cat.ParID = cpu_to_be32(parent);
@@ -64,7 +64,7 @@ static int hfs_cat_build_record(hfs_cat_rec *rec, u32 cnid, struct inode *inode)
static int hfs_cat_build_thread(struct super_block *sb,
hfs_cat_rec *rec, int type,
- u32 parentid, struct qstr *name)
+ u32 parentid, const struct qstr *name)
{
rec->type = type;
memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved));
@@ -79,7 +79,7 @@ static int hfs_cat_build_thread(struct super_block *sb,
* Add a new file or directory to the catalog B-tree and
* return a (struct hfs_cat_entry) for it in '*result'.
*/
-int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
+int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct inode *inode)
{
struct hfs_find_data fd;
struct super_block *sb;
@@ -210,7 +210,7 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
* Delete the indicated file or directory.
* The associated thread is also removed unless ('with_thread'==0).
*/
-int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
+int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
{
struct super_block *sb;
struct hfs_find_data fd;
@@ -277,8 +277,8 @@ out:
* If the destination exists it is removed and a
* (struct hfs_cat_entry) for it is returned in '*result'.
*/
-int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name)
+int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name)
{
struct super_block *sb;
struct hfs_find_data src_fd, dst_fd;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index ee2f385811c8..16f5172ee40d 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -178,11 +178,11 @@ extern int hfs_clear_vbm_bits(struct super_block *, u16, u16);
extern int hfs_cat_keycmp(const btree_key *, const btree_key *);
struct hfs_find_data;
extern int hfs_cat_find_brec(struct super_block *, u32, struct hfs_find_data *);
-extern int hfs_cat_create(u32, struct inode *, struct qstr *, struct inode *);
-extern int hfs_cat_delete(u32, struct inode *, struct qstr *);
-extern int hfs_cat_move(u32, struct inode *, struct qstr *,
- struct inode *, struct qstr *);
-extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *);
+extern int hfs_cat_create(u32, struct inode *, const struct qstr *, struct inode *);
+extern int hfs_cat_delete(u32, struct inode *, const struct qstr *);
+extern int hfs_cat_move(u32, struct inode *, const struct qstr *,
+ struct inode *, const struct qstr *);
+extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, const struct qstr *);
/* dir.c */
extern const struct file_operations hfs_dir_operations;
@@ -201,7 +201,7 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
-extern struct inode *hfs_new_inode(struct inode *, struct qstr *, umode_t);
+extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
extern int hfs_inode_setattr(struct dentry *, struct iattr *);
@@ -233,11 +233,11 @@ extern const struct dentry_operations hfs_dentry_operations;
extern int hfs_hash_dentry(const struct dentry *, struct qstr *);
extern int hfs_strcmp(const unsigned char *, unsigned int,
const unsigned char *, unsigned int);
-extern int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+extern int hfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
/* trans.c */
-extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
+extern void hfs_asc2mac(struct super_block *, struct hfs_name *, const struct qstr *);
extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
/* super.c */
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 02a3845363f7..c6a32415735b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -177,7 +177,7 @@ const struct address_space_operations hfs_aops = {
/*
* hfs_new_inode
*/
-struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
+struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index ec9f164c35a5..3912209153a8 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -92,7 +92,7 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1,
* Test for equality of two strings in the HFS filename character ordering.
* return 1 on failure and 0 on success
*/
-int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+int hfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
const unsigned char *n1, *n2;
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index b1ce4c7ad3fb..39f5e343bf4d 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -94,7 +94,7 @@ out:
* This routine is a inverse to hfs_mac2triv().
* A ':' is replaced by a '/'.
*/
-void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, struct qstr *in)
+void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr *in)
{
struct nls_table *nls_disk = HFS_SB(sb)->nls_disk;
struct nls_table *nls_io = HFS_SB(sb)->nls_io;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index fb707e8f423a..142534d3c2d5 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -40,7 +40,7 @@ int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
/* Generates key for catalog file/folders record. */
int hfsplus_cat_build_key(struct super_block *sb,
- hfsplus_btree_key *key, u32 parent, struct qstr *str)
+ hfsplus_btree_key *key, u32 parent, const struct qstr *str)
{
int len, err;
@@ -174,7 +174,7 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
- u32 parentid, struct qstr *str)
+ u32 parentid, const struct qstr *str)
{
int err;
@@ -250,7 +250,7 @@ static void hfsplus_subfolders_dec(struct inode *dir)
}
int hfsplus_create_cat(u32 cnid, struct inode *dir,
- struct qstr *str, struct inode *inode)
+ const struct qstr *str, struct inode *inode)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
@@ -318,7 +318,7 @@ err2:
return err;
}
-int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
+int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
@@ -415,8 +415,8 @@ out:
}
int hfsplus_rename_cat(u32 cnid,
- struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name)
+ struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name)
{
struct super_block *sb = src_dir->i_sb;
struct hfs_find_data src_fd, dst_fd;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 047245bd2cd6..a3f03b247463 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -445,17 +445,17 @@ int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
int hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
- u32 parent, struct qstr *str);
+ u32 parent, const struct qstr *str);
void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
hfsplus_btree_key *key, u32 parent);
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd);
-int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str,
+int hfsplus_create_cat(u32 cnid, struct inode *dir, const struct qstr *str,
struct inode *inode);
-int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str);
-int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name);
+int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str);
+int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name);
/* dir.c */
extern const struct inode_operations hfsplus_dir_inode_operations;
@@ -520,8 +520,7 @@ int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
int max_unistr_len, const char *astr, int len);
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str);
-int hfsplus_compare_dentry(const struct dentry *parent,
- const struct dentry *dentry, unsigned int len,
+int hfsplus_compare_dentry(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name);
/* wrapper.c */
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index c13c8a240be3..e563939882f3 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -385,10 +385,10 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
* Composed unicode characters are decomposed and case-folding is performed
* if the appropriate bits are (un)set on the superblock.
*/
-int hfsplus_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+int hfsplus_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct super_block *sb = parent->d_sb;
+ struct super_block *sb = dentry->d_sb;
int casefold, decompose, size;
int dsize1, dsize2, len1, len2;
const u16 *dstr1, *dstr2;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c57654927a6..90e46cd752fe 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
if (S_ISLNK(root_inode->i_mode)) {
char *name = follow_link(host_root_path);
- if (IS_ERR(name))
+ if (IS_ERR(name)) {
err = PTR_ERR(name);
- else
- err = read_name(root_inode, name);
+ goto out_put;
+ }
+ err = read_name(root_inode, name);
kfree(name);
if (err)
goto out_put;
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 60e6d334d79a..bb87d65f0d97 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -34,7 +34,7 @@ static int hpfs_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
return 0;
}
-static int hpfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int hpfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned al = len;
@@ -50,7 +50,7 @@ static int hpfs_compare_dentry(const struct dentry *parent, const struct dentry
if (hpfs_chk_name(name->name, &bl))
return 1;
- if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0))
+ if (hpfs_compare_names(dentry->d_sb, str, al, name->name, bl, 0))
return 1;
return 0;
}
diff --git a/fs/inode.c b/fs/inode.c
index 9cef4e16aeda..7e3ef3af3db9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
void address_space_init_once(struct address_space *mapping)
{
memset(mapping, 0, sizeof(*mapping));
- INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
spin_lock_init(&mapping->tree_lock);
init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->private_list);
@@ -1729,7 +1729,6 @@ int dentry_needs_remove_privs(struct dentry *dentry)
mask |= ATTR_KILL_PRIV;
return mask;
}
-EXPORT_SYMBOL(dentry_needs_remove_privs);
static int __remove_privs(struct dentry *dentry, int kill)
{
@@ -1749,8 +1748,8 @@ static int __remove_privs(struct dentry *dentry, int kill)
*/
int file_remove_privs(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = d_inode(dentry);
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
int kill;
int error = 0;
@@ -1758,7 +1757,7 @@ int file_remove_privs(struct file *file)
if (IS_NOSEC(inode))
return 0;
- kill = file_needs_remove_privs(file);
+ kill = dentry_needs_remove_privs(dentry);
if (kill < 0)
return kill;
if (kill)
diff --git a/fs/internal.h b/fs/internal.h
index cef0913e5d41..ba0737649d4a 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -111,12 +111,14 @@ extern long do_handle_open(int mountdirfd,
struct file_handle __user *ufh, int open_flag);
extern int open_check_o_direct(struct file *f);
extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern struct file *filp_clone_open(struct file *);
/*
* inode.c
*/
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
+extern int dentry_needs_remove_privs(struct dentry *dentry);
/*
* fs-writeback.c
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 761fade7680f..ad0c745ebad7 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -29,18 +29,15 @@
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#ifdef CONFIG_JOLIET
static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi_ms(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
-static int isofs_dentry_cmp_ms(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#endif
@@ -235,7 +232,7 @@ isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
}
static int
-isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 1);
@@ -276,14 +273,14 @@ isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr)
}
static int
-isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 0);
}
static int
-isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 1);
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 7b543e6b6526..aee592767f1d 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -22,7 +22,7 @@ isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
qstr.len = dlen;
if (likely(!dentry->d_op))
return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen);
- return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
+ return dentry->d_op->d_compare(NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
}
/*
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 04baf0dfc40c..814b0c58016c 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1572,7 +1572,7 @@ static int jfs_ci_hash(const struct dentry *dir, struct qstr *this)
return 0;
}
-static int jfs_ci_compare(const struct dentry *parent, const struct dentry *dentry,
+static int jfs_ci_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
int i, result = 1;
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index bcd754d216bd..9568064ecadf 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -156,7 +156,7 @@ static pgoff_t hash_index(u32 hash, int round)
static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
{
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct page *page;
struct logfs_disk_dentry *dd;
u32 hash = logfs_hash_32(name->name, name->len, 0);
@@ -323,7 +323,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
return 0;
}
-static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
+static void logfs_set_name(struct logfs_disk_dentry *dd, const struct qstr *name)
{
dd->namelen = cpu_to_be16(name->len);
memcpy(dd->name, name->name, name->len);
diff --git a/fs/mpage.c b/fs/mpage.c
index 2ca1f39c8cba..d2413af0823a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- page_endio(page, bio_data_dir(bio), bio->bi_error);
+ page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
}
bio_put(bio);
diff --git a/fs/namei.c b/fs/namei.c
index c386a329ab20..adb04146df09 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -410,7 +410,7 @@ int __inode_permission(struct inode *inode, int mask)
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
- return -EACCES;
+ return -EPERM;
/*
* Updating mtime will likely cause i_uid and i_gid to be
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 9add7ab747a5..17de5c13dfae 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -74,7 +74,7 @@ const struct inode_operations ncp_dir_inode_operations =
*/
static int ncp_lookup_validate(struct dentry *, unsigned int);
static int ncp_hash_dentry(const struct dentry *, struct qstr *);
-static int ncp_compare_dentry(const struct dentry *, const struct dentry *,
+static int ncp_compare_dentry(const struct dentry *,
unsigned int, const char *, const struct qstr *);
static int ncp_delete_dentry(const struct dentry *);
static void ncp_d_prune(struct dentry *dentry);
@@ -154,7 +154,7 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
* the callers will handle races.
*/
static int
-ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+ncp_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct inode *pinode;
@@ -162,7 +162,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
if (len != name->len)
return 1;
- pinode = d_inode_rcu(parent);
+ pinode = d_inode_rcu(dentry->d_parent);
if (!pinode)
return 1;
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8664417955a2..6abdda209642 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_NFS_FS) += nfs.o
CFLAGS_nfstrace.o += -I$(src)
nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
- direct.o pagelist.o read.o symlink.o unlink.o \
+ io.o direct.o pagelist.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o nfstrace.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index e5b89675263e..a69ef4e9c24c 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -65,8 +65,8 @@ nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
if (!p)
return -EIO;
b->simple.nr_sigs = be32_to_cpup(p++);
- if (!b->simple.nr_sigs) {
- dprintk("no signature\n");
+ if (!b->simple.nr_sigs || b->simple.nr_sigs > PNFS_BLOCK_MAX_UUIDS) {
+ dprintk("Bad signature count: %d\n", b->simple.nr_sigs);
return -EIO;
}
@@ -89,7 +89,8 @@ nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
memcpy(&b->simple.sigs[i].sig, p,
b->simple.sigs[i].sig_len);
- b->simple.len += 8 + 4 + b->simple.sigs[i].sig_len;
+ b->simple.len += 8 + 4 + \
+ (XDR_QUADLEN(b->simple.sigs[i].sig_len) << 2);
}
break;
case PNFS_BLOCK_VOLUME_SLICE:
@@ -104,7 +105,12 @@ nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
p = xdr_inline_decode(xdr, 4);
if (!p)
return -EIO;
+
b->concat.volumes_count = be32_to_cpup(p++);
+ if (b->concat.volumes_count > PNFS_BLOCK_MAX_DEVICES) {
+ dprintk("Too many volumes: %d\n", b->concat.volumes_count);
+ return -EIO;
+ }
p = xdr_inline_decode(xdr, b->concat.volumes_count * 4);
if (!p)
@@ -116,8 +122,13 @@ nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
p = xdr_inline_decode(xdr, 8 + 4);
if (!p)
return -EIO;
+
p = xdr_decode_hyper(p, &b->stripe.chunk_size);
b->stripe.volumes_count = be32_to_cpup(p++);
+ if (b->stripe.volumes_count > PNFS_BLOCK_MAX_DEVICES) {
+ dprintk("Too many volumes: %d\n", b->stripe.volumes_count);
+ return -EIO;
+ }
p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4);
if (!p)
@@ -224,18 +235,20 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
+ struct block_device *bdev;
dev_t dev;
dev = bl_resolve_deviceid(server, v, gfp_mask);
if (!dev)
return -EIO;
- d->bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
- if (IS_ERR(d->bdev)) {
+ bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
+ if (IS_ERR(bdev)) {
printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
- MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev));
- return PTR_ERR(d->bdev);
+ MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
+ return PTR_ERR(bdev);
}
+ d->bdev = bdev;
d->len = i_size_read(d->bdev->bd_inode);
@@ -287,44 +300,71 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
+/*
+ * Try to open the udev path for the WWN. At least on Debian the udev
+ * by-id path will always point to the dm-multipath device if one exists.
+ */
+static struct block_device *
+bl_open_udev_path(struct pnfs_block_volume *v)
+{
+ struct block_device *bdev;
+ const char *devname;
+
+ devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%*phN",
+ v->scsi.designator_len, v->scsi.designator);
+ if (!devname)
+ return ERR_PTR(-ENOMEM);
+
+ bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
+ if (IS_ERR(bdev)) {
+ pr_warn("pNFS: failed to open device %s (%ld)\n",
+ devname, PTR_ERR(bdev));
+ }
+
+ kfree(devname);
+ return bdev;
+}
+
+/*
+ * Try to open the RH/Fedora specific dm-mpath udev path for this WWN, as the
+ * wwn- links will only point to the first discovered SCSI device there.
+ */
+static struct block_device *
+bl_open_dm_mpath_udev_path(struct pnfs_block_volume *v)
+{
+ struct block_device *bdev;
+ const char *devname;
+
+ devname = kasprintf(GFP_KERNEL,
+ "/dev/disk/by-id/dm-uuid-mpath-%d%*phN",
+ v->scsi.designator_type,
+ v->scsi.designator_len, v->scsi.designator);
+ if (!devname)
+ return ERR_PTR(-ENOMEM);
+
+ bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
+ kfree(devname);
+ return bdev;
+}
+
static int
bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
+ struct block_device *bdev;
const struct pr_ops *ops;
- const char *devname;
int error;
if (!bl_validate_designator(v))
return -EINVAL;
- switch (v->scsi.designator_len) {
- case 8:
- devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%8phN",
- v->scsi.designator);
- break;
- case 12:
- devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%12phN",
- v->scsi.designator);
- break;
- case 16:
- devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/wwn-0x%16phN",
- v->scsi.designator);
- break;
- default:
- return -EINVAL;
- }
-
- d->bdev = blkdev_get_by_path(devname, FMODE_READ, NULL);
- if (IS_ERR(d->bdev)) {
- pr_warn("pNFS: failed to open device %s (%ld)\n",
- devname, PTR_ERR(d->bdev));
- kfree(devname);
- return PTR_ERR(d->bdev);
- }
-
- kfree(devname);
+ bdev = bl_open_dm_mpath_udev_path(v);
+ if (IS_ERR(bdev))
+ bdev = bl_open_udev_path(v);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ d->bdev = bdev;
d->len = i_size_read(d->bdev->bd_inode);
d->map = bl_map_simple;
@@ -352,7 +392,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
return 0;
out_blkdev_put:
- blkdev_put(d->bdev, FMODE_READ);
+ blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE);
return error;
}
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 720b3ff55fa9..992bcb19c11e 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -121,6 +121,16 @@ ext_try_to_merge_right(struct rb_root *root, struct pnfs_block_extent *be)
return be;
}
+static void __ext_put_deviceids(struct list_head *head)
+{
+ struct pnfs_block_extent *be, *tmp;
+
+ list_for_each_entry_safe(be, tmp, head, be_list) {
+ nfs4_put_deviceid_node(be->be_device);
+ kfree(be);
+ }
+}
+
static void
__ext_tree_insert(struct rb_root *root,
struct pnfs_block_extent *new, bool merge_ok)
@@ -163,7 +173,8 @@ free_new:
}
static int
-__ext_tree_remove(struct rb_root *root, sector_t start, sector_t end)
+__ext_tree_remove(struct rb_root *root,
+ sector_t start, sector_t end, struct list_head *tmp)
{
struct pnfs_block_extent *be;
sector_t len1 = 0, len2 = 0;
@@ -223,8 +234,7 @@ __ext_tree_remove(struct rb_root *root, sector_t start, sector_t end)
struct pnfs_block_extent *next = ext_tree_next(be);
rb_erase(&be->be_node, root);
- nfs4_put_deviceid_node(be->be_device);
- kfree(be);
+ list_add_tail(&be->be_list, tmp);
be = next;
}
@@ -350,16 +360,18 @@ int ext_tree_remove(struct pnfs_block_layout *bl, bool rw,
sector_t start, sector_t end)
{
int err, err2;
+ LIST_HEAD(tmp);
spin_lock(&bl->bl_ext_lock);
- err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
+ err = __ext_tree_remove(&bl->bl_ext_ro, start, end, &tmp);
if (rw) {
- err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end);
+ err2 = __ext_tree_remove(&bl->bl_ext_rw, start, end, &tmp);
if (!err)
err = err2;
}
spin_unlock(&bl->bl_ext_lock);
+ __ext_put_deviceids(&tmp);
return err;
}
@@ -396,12 +408,13 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
sector_t end = start + len;
struct pnfs_block_extent *be;
int err = 0;
+ LIST_HEAD(tmp);
spin_lock(&bl->bl_ext_lock);
/*
* First remove all COW extents or holes from written to range.
*/
- err = __ext_tree_remove(&bl->bl_ext_ro, start, end);
+ err = __ext_tree_remove(&bl->bl_ext_ro, start, end, &tmp);
if (err)
goto out;
@@ -459,6 +472,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
}
out:
spin_unlock(&bl->bl_ext_lock);
+
+ __ext_put_deviceids(&tmp);
return err;
}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index aaa2e8d3df6f..c92a75e066a6 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -119,27 +119,30 @@ out:
* hashed by filehandle.
*/
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
- struct nfs_fh *fh, nfs4_stateid *stateid)
+ struct nfs_fh *fh)
{
struct nfs_server *server;
+ struct nfs_inode *nfsi;
struct inode *ino;
struct pnfs_layout_hdr *lo;
+restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry(lo, &server->layouts, plh_layouts) {
- if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
+ nfsi = NFS_I(lo->plh_inode);
+ if (nfs_compare_fh(fh, &nfsi->fh))
continue;
- if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
+ if (nfsi->layout != lo)
continue;
ino = igrab(lo->plh_inode);
if (!ino)
break;
spin_lock(&ino->i_lock);
/* Is this layout in the process of being freed? */
- if (NFS_I(ino)->layout != lo) {
+ if (nfsi->layout != lo) {
spin_unlock(&ino->i_lock);
iput(ino);
- break;
+ goto restart;
}
pnfs_get_layout_hdr(lo);
spin_unlock(&ino->i_lock);
@@ -151,13 +154,13 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
}
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
- struct nfs_fh *fh, nfs4_stateid *stateid)
+ struct nfs_fh *fh)
{
struct pnfs_layout_hdr *lo;
spin_lock(&clp->cl_lock);
rcu_read_lock();
- lo = get_layout_by_fh_locked(clp, fh, stateid);
+ lo = get_layout_by_fh_locked(clp, fh);
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
@@ -167,17 +170,39 @@ static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
/*
* Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
*/
-static bool pnfs_check_stateid_sequence(struct pnfs_layout_hdr *lo,
+static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new)
{
u32 oldseq, newseq;
- oldseq = be32_to_cpu(lo->plh_stateid.seqid);
+ /* Is the stateid still not initialised? */
+ if (!pnfs_layout_is_valid(lo))
+ return NFS4ERR_DELAY;
+
+ /* Mismatched stateid? */
+ if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
+ return NFS4ERR_BAD_STATEID;
+
newseq = be32_to_cpu(new->seqid);
+ /* Are we already in a layout recall situation? */
+ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
+ lo->plh_return_seq != 0) {
+ if (newseq < lo->plh_return_seq)
+ return NFS4ERR_OLD_STATEID;
+ if (newseq > lo->plh_return_seq)
+ return NFS4ERR_DELAY;
+ goto out;
+ }
+ /* Check that the stateid matches what we think it should be. */
+ oldseq = be32_to_cpu(lo->plh_stateid.seqid);
if (newseq > oldseq + 1)
- return false;
- return true;
+ return NFS4ERR_DELAY;
+ /* Crazy server! */
+ if (newseq <= oldseq)
+ return NFS4ERR_OLD_STATEID;
+out:
+ return NFS_OK;
}
static u32 initiate_file_draining(struct nfs_client *clp,
@@ -188,7 +213,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
LIST_HEAD(free_me_list);
- lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
+ lo = get_layout_by_fh(clp, &args->cbl_fh);
if (!lo) {
trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
&args->cbl_stateid, -rv);
@@ -196,18 +221,15 @@ static u32 initiate_file_draining(struct nfs_client *clp,
}
ino = lo->plh_inode;
+ pnfs_layoutcommit_inode(ino, false);
+
spin_lock(&ino->i_lock);
- if (!pnfs_check_stateid_sequence(lo, &args->cbl_stateid)) {
- rv = NFS4ERR_DELAY;
+ rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
+ if (rv != NFS_OK)
goto unlock;
- }
pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
- spin_unlock(&ino->i_lock);
-
- pnfs_layoutcommit_inode(ino, false);
- spin_lock(&ino->i_lock);
/*
* Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
*/
@@ -223,11 +245,13 @@ static u32 initiate_file_draining(struct nfs_client *clp,
goto unlock;
}
+ /* Embrace your forgetfulness! */
+ rv = NFS4ERR_NOMATCHING_LAYOUT;
+
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
&args->cbl_range);
}
- pnfs_mark_layout_returned_if_empty(lo);
unlock:
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me_list);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index d81f96aacd51..656f68f7fe53 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -925,7 +925,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
- return rpc_drop_reply;
+ goto out_invalidcred;
}
cps.minorversion = hdr_arg.minorversion;
@@ -953,6 +953,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
nfs_put_client(cps.clp);
dprintk("%s: done, status = %u\n", __func__, ntohl(status));
return rpc_success;
+
+out_invalidcred:
+ pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
+ return rpc_autherr_badcred;
}
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 487c5607d52f..003ebce4bbc4 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -367,8 +367,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
*/
struct nfs_client *
nfs_get_client(const struct nfs_client_initdata *cl_init,
- const struct rpc_timeout *timeparms,
- const char *ip_addr,
rpc_authflavor_t authflavour)
{
struct nfs_client *clp, *new = NULL;
@@ -399,7 +397,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
new->cl_flags = cl_init->init_flags;
- return rpc_ops->init_client(new, timeparms, ip_addr);
+ return rpc_ops->init_client(new, cl_init);
}
spin_unlock(&nn->nfs_client_lock);
@@ -470,7 +468,7 @@ EXPORT_SYMBOL_GPL(nfs_init_timeout_values);
* Create an RPC client handle
*/
int nfs_create_rpc_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
+ const struct nfs_client_initdata *cl_init,
rpc_authflavor_t flavor)
{
struct rpc_clnt *clnt = NULL;
@@ -479,8 +477,9 @@ int nfs_create_rpc_client(struct nfs_client *clp,
.protocol = clp->cl_proto,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
- .timeout = timeparms,
+ .timeout = cl_init->timeparms,
.servername = clp->cl_hostname,
+ .nodename = cl_init->nodename,
.program = &nfs_program,
.version = clp->rpc_ops->version,
.authflavor = flavor,
@@ -591,14 +590,12 @@ EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
* nfs_init_client - Initialise an NFS2 or NFS3 client
*
* @clp: nfs_client to initialise
- * @timeparms: timeout parameters for underlying RPC transport
- * @ip_addr: IP presentation address (not used)
+ * @cl_init: Initialisation parameters
*
* Returns pointer to an NFS client, or an ERR_PTR value.
*/
struct nfs_client *nfs_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr)
+ const struct nfs_client_initdata *cl_init)
{
int error;
@@ -612,7 +609,7 @@ struct nfs_client *nfs_init_client(struct nfs_client *clp,
* Create a client RPC handle for doing FSSTAT with UNIX auth only
* - RFC 2623, sec 2.3.2
*/
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
+ error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
if (error < 0)
goto error;
nfs_mark_client_ready(clp, NFS_CS_READY);
@@ -633,6 +630,7 @@ static int nfs_init_server(struct nfs_server *server,
const struct nfs_parsed_mount_data *data,
struct nfs_subversion *nfs_mod)
{
+ struct rpc_timeout timeparms;
struct nfs_client_initdata cl_init = {
.hostname = data->nfs_server.hostname,
.addr = (const struct sockaddr *)&data->nfs_server.address,
@@ -640,8 +638,8 @@ static int nfs_init_server(struct nfs_server *server,
.nfs_mod = nfs_mod,
.proto = data->nfs_server.protocol,
.net = data->net,
+ .timeparms = &timeparms,
};
- struct rpc_timeout timeparms;
struct nfs_client *clp;
int error;
@@ -653,7 +651,7 @@ static int nfs_init_server(struct nfs_server *server,
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
+ clp = nfs_get_client(&cl_init, RPC_AUTH_UNIX);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index baaa38859899..177fefb26c18 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -2252,21 +2252,37 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, st
return NULL;
}
-static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
+static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res, bool may_block)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_access_entry *cache;
- int err = -ENOENT;
+ bool retry = true;
+ int err;
spin_lock(&inode->i_lock);
- if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
- goto out_zap;
- cache = nfs_access_search_rbtree(inode, cred);
- if (cache == NULL)
- goto out;
- if (!nfs_have_delegated_attributes(inode) &&
- !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
- goto out_stale;
+ for(;;) {
+ if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
+ goto out_zap;
+ cache = nfs_access_search_rbtree(inode, cred);
+ err = -ENOENT;
+ if (cache == NULL)
+ goto out;
+ /* Found an entry, is our attribute cache valid? */
+ if (!nfs_attribute_cache_expired(inode) &&
+ !(nfsi->cache_validity & NFS_INO_INVALID_ATTR))
+ break;
+ err = -ECHILD;
+ if (!may_block)
+ goto out;
+ if (!retry)
+ goto out_zap;
+ spin_unlock(&inode->i_lock);
+ err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (err)
+ return err;
+ spin_lock(&inode->i_lock);
+ retry = false;
+ }
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
@@ -2275,12 +2291,6 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
out:
spin_unlock(&inode->i_lock);
return err;
-out_stale:
- rb_erase(&cache->rb_node, &nfsi->access_cache);
- list_del(&cache->lru);
- spin_unlock(&inode->i_lock);
- nfs_access_free_entry(cache);
- return -ENOENT;
out_zap:
spin_unlock(&inode->i_lock);
nfs_access_zap_cache(inode);
@@ -2307,13 +2317,12 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
cache = NULL;
if (cache == NULL)
goto out;
- if (!nfs_have_delegated_attributes(inode) &&
- !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
+ err = nfs_revalidate_inode_rcu(NFS_SERVER(inode), inode);
+ if (err)
goto out;
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
- err = 0;
out:
rcu_read_unlock();
return err;
@@ -2402,18 +2411,19 @@ EXPORT_SYMBOL_GPL(nfs_access_set_mask);
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
{
struct nfs_access_entry cache;
+ bool may_block = (mask & MAY_NOT_BLOCK) == 0;
int status;
trace_nfs_access_enter(inode);
status = nfs_access_get_cached_rcu(inode, cred, &cache);
if (status != 0)
- status = nfs_access_get_cached(inode, cred, &cache);
+ status = nfs_access_get_cached(inode, cred, &cache, may_block);
if (status == 0)
goto out_cached;
status = -ECHILD;
- if (mask & MAY_NOT_BLOCK)
+ if (!may_block)
goto out;
/* Be clever: ask server to check for all possible rights */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e6210ead71d0..72b7d13ee3c6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -196,6 +196,12 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
WARN_ON_ONCE(verfp->committed < 0);
}
+static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
+ const struct nfs_writeverf *v2)
+{
+ return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
+}
+
/*
* nfs_direct_cmp_hdr_verf - compare verifier for pgio header
* @dreq - direct request possibly spanning multiple servers
@@ -215,7 +221,7 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
nfs_direct_set_hdr_verf(dreq, hdr);
return 0;
}
- return memcmp(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
+ return nfs_direct_cmp_verf(verfp, &hdr->verf);
}
/*
@@ -238,7 +244,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
if (verfp->committed < 0)
return 1;
- return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf));
+ return nfs_direct_cmp_verf(verfp, &data->verf);
}
/**
@@ -366,22 +372,10 @@ out:
* Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
* the iocb is still valid here if this is a synchronous request.
*/
-static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
+static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
struct inode *inode = dreq->inode;
- if (dreq->iocb && write) {
- loff_t pos = dreq->iocb->ki_pos + dreq->count;
-
- spin_lock(&inode->i_lock);
- if (i_size_read(inode) < pos)
- i_size_write(inode, pos);
- spin_unlock(&inode->i_lock);
- }
-
- if (write)
- nfs_zap_mapping(inode, inode->i_mapping);
-
inode_dio_end(inode);
if (dreq->iocb) {
@@ -436,7 +430,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
}
out_put:
if (put_dreq(dreq))
- nfs_direct_complete(dreq, false);
+ nfs_direct_complete(dreq);
hdr->release(hdr);
}
@@ -542,7 +536,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
}
if (put_dreq(dreq))
- nfs_direct_complete(dreq, false);
+ nfs_direct_complete(dreq);
return 0;
}
@@ -583,17 +577,12 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!count)
goto out;
- inode_lock(inode);
- result = nfs_sync_mapping(mapping);
- if (result)
- goto out_unlock;
-
task_io_account_read(count);
result = -ENOMEM;
dreq = nfs_direct_req_alloc();
if (dreq == NULL)
- goto out_unlock;
+ goto out;
dreq->inode = inode;
dreq->bytes_left = dreq->max_count = count;
@@ -608,10 +597,12 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ nfs_start_io_direct(inode);
+
NFS_I(inode)->read_io += count;
result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
- inode_unlock(inode);
+ nfs_end_io_direct(inode);
if (!result) {
result = nfs_direct_wait(dreq);
@@ -619,13 +610,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
iocb->ki_pos += result;
}
- nfs_direct_req_release(dreq);
- return result;
-
out_release:
nfs_direct_req_release(dreq);
-out_unlock:
- inode_unlock(inode);
out:
return result;
}
@@ -657,6 +643,8 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
+ dreq->verf.committed = NFS_INVALID_STABLE_HOW;
+ nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
for (i = 0; i < dreq->mirror_count; i++)
dreq->mirrors[i].count = 0;
get_dreq(dreq);
@@ -775,7 +763,8 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
nfs_direct_write_reschedule(dreq);
break;
default:
- nfs_direct_complete(dreq, true);
+ nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
+ nfs_direct_complete(dreq);
}
}
@@ -991,6 +980,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t result = -EINVAL;
+ size_t count;
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
@@ -1001,34 +991,24 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
file, iov_iter_count(iter), (long long) iocb->ki_pos);
- nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES,
- iov_iter_count(iter));
+ result = generic_write_checks(iocb, iter);
+ if (result <= 0)
+ return result;
+ count = result;
+ nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
pos = iocb->ki_pos;
end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
- inode_lock(inode);
-
- result = nfs_sync_mapping(mapping);
- if (result)
- goto out_unlock;
-
- if (mapping->nrpages) {
- result = invalidate_inode_pages2_range(mapping,
- pos >> PAGE_SHIFT, end);
- if (result)
- goto out_unlock;
- }
-
- task_io_account_write(iov_iter_count(iter));
+ task_io_account_write(count);
result = -ENOMEM;
dreq = nfs_direct_req_alloc();
if (!dreq)
- goto out_unlock;
+ goto out;
dreq->inode = inode;
- dreq->bytes_left = dreq->max_count = iov_iter_count(iter);
+ dreq->bytes_left = dreq->max_count = count;
dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -1040,6 +1020,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ nfs_start_io_direct(inode);
+
result = nfs_direct_write_schedule_iovec(dreq, iter, pos);
if (mapping->nrpages) {
@@ -1047,30 +1029,19 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
pos >> PAGE_SHIFT, end);
}
- inode_unlock(inode);
+ nfs_end_io_direct(inode);
if (!result) {
result = nfs_direct_wait(dreq);
if (result > 0) {
- struct inode *inode = mapping->host;
-
iocb->ki_pos = pos + result;
- spin_lock(&inode->i_lock);
- if (i_size_read(inode) < iocb->ki_pos)
- i_size_write(inode, iocb->ki_pos);
- spin_unlock(&inode->i_lock);
-
/* XXX: should check the generic_write_sync retval */
generic_write_sync(iocb, result);
}
}
- nfs_direct_req_release(dreq);
- return result;
-
out_release:
nfs_direct_req_release(dreq);
-out_unlock:
- inode_unlock(inode);
+out:
return result;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 717a8d6af52d..7d620970f2e1 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -170,12 +170,14 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
iocb->ki_filp,
iov_iter_count(to), (unsigned long) iocb->ki_pos);
- result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping);
+ nfs_start_io_read(inode);
+ result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
if (!result) {
result = generic_file_read_iter(iocb, to);
if (result > 0)
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
}
+ nfs_end_io_read(inode);
return result;
}
EXPORT_SYMBOL_GPL(nfs_file_read);
@@ -191,12 +193,14 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
filp, (unsigned long) count, (unsigned long long) *ppos);
- res = nfs_revalidate_mapping_protected(inode, filp->f_mapping);
+ nfs_start_io_read(inode);
+ res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (!res) {
res = generic_file_splice_read(filp, ppos, pipe, count, flags);
if (res > 0)
nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res);
}
+ nfs_end_io_read(inode);
return res;
}
EXPORT_SYMBOL_GPL(nfs_file_splice_read);
@@ -272,16 +276,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
trace_nfs_fsync_enter(inode);
- inode_dio_wait(inode);
do {
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret != 0)
break;
- inode_lock(inode);
ret = nfs_file_fsync_commit(file, start, end, datasync);
if (!ret)
ret = pnfs_sync_inode(inode, !!datasync);
- inode_unlock(inode);
/*
* If nfs_file_fsync_commit detected a server reboot, then
* resend all dirty pages that might have been covered by
@@ -359,19 +360,6 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file, mapping->host->i_ino, len, (long long) pos);
start:
- /*
- * Prevent starvation issues if someone is doing a consistency
- * sync-to-disk
- */
- ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING,
- nfs_wait_bit_killable, TASK_KILLABLE);
- if (ret)
- return ret;
- /*
- * Wait for O_DIRECT to complete
- */
- inode_dio_wait(mapping->host);
-
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
@@ -432,7 +420,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
return status;
NFS_I(mapping->host)->write_io += copied;
- if (nfs_ctx_key_to_expire(ctx)) {
+ if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
status = nfs_wb_all(mapping->host);
if (status < 0)
return status;
@@ -470,31 +458,8 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
*/
static int nfs_release_page(struct page *page, gfp_t gfp)
{
- struct address_space *mapping = page->mapping;
-
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- /* Always try to initiate a 'commit' if relevant, but only
- * wait for it if the caller allows blocking. Even then,
- * only wait 1 second and only if the 'bdi' is not congested.
- * Waiting indefinitely can cause deadlocks when the NFS
- * server is on this machine, when a new TCP connection is
- * needed and in other rare cases. There is no particular
- * need to wait extensively here. A short wait has the
- * benefit that someone else can worry about the freezer.
- */
- if (mapping) {
- struct nfs_server *nfss = NFS_SERVER(mapping->host);
- nfs_commit_inode(mapping->host, 0);
- if (gfpflags_allow_blocking(gfp) &&
- !bdi_write_congested(&nfss->backing_dev_info)) {
- wait_on_page_bit_killable_timeout(page, PG_private,
- HZ);
- if (PagePrivate(page))
- set_bdi_congested(&nfss->backing_dev_info,
- BLK_RW_ASYNC);
- }
- }
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
@@ -604,6 +569,8 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
filp, filp->f_mapping->host->i_ino,
(long long)page_offset(page));
+ sb_start_pagefault(inode->i_sb);
+
/* make sure the cache has finished storing the page */
nfs_fscache_wait_on_page_write(NFS_I(inode), page);
@@ -630,6 +597,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out_unlock:
unlock_page(page);
out:
+ sb_end_pagefault(inode->i_sb);
return ret;
}
@@ -645,7 +613,7 @@ static int nfs_need_check_write(struct file *filp, struct inode *inode)
ctx = nfs_file_open_context(filp);
if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags) ||
- nfs_ctx_key_to_expire(ctx))
+ nfs_ctx_key_to_expire(ctx, inode))
return 1;
return 0;
}
@@ -656,23 +624,17 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(file);
unsigned long written = 0;
ssize_t result;
- size_t count = iov_iter_count(from);
result = nfs_key_timeout_notify(file, inode);
if (result)
return result;
- if (iocb->ki_flags & IOCB_DIRECT) {
- result = generic_write_checks(iocb, from);
- if (result <= 0)
- return result;
+ if (iocb->ki_flags & IOCB_DIRECT)
return nfs_file_direct_write(iocb, from);
- }
dprintk("NFS: write(%pD2, %zu@%Ld)\n",
- file, count, (long long) iocb->ki_pos);
+ file, iov_iter_count(from), (long long) iocb->ki_pos);
- result = -EBUSY;
if (IS_SWAPFILE(inode))
goto out_swapfile;
/*
@@ -684,28 +646,33 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
- result = count;
- if (!count)
+ nfs_start_io_write(inode);
+ result = generic_write_checks(iocb, from);
+ if (result > 0) {
+ current->backing_dev_info = inode_to_bdi(inode);
+ result = generic_perform_write(file, from, iocb->ki_pos);
+ current->backing_dev_info = NULL;
+ }
+ nfs_end_io_write(inode);
+ if (result <= 0)
goto out;
- result = generic_file_write_iter(iocb, from);
- if (result > 0)
- written = result;
+ written = generic_write_sync(iocb, result);
+ iocb->ki_pos += written;
/* Return error values */
- if (result >= 0 && nfs_need_check_write(file, inode)) {
+ if (nfs_need_check_write(file, inode)) {
int err = vfs_fsync(file, 0);
if (err < 0)
result = err;
}
- if (result > 0)
- nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
out:
return result;
out_swapfile:
printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
- goto out;
+ return -EBUSY;
}
EXPORT_SYMBOL_GPL(nfs_file_write);
@@ -780,11 +747,6 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
}
static int
-is_time_granular(struct timespec *ts) {
- return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000));
-}
-
-static int
do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
struct inode *inode = filp->f_mapping->host;
@@ -817,12 +779,8 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
* This makes locking act as a cache coherency point.
*/
nfs_sync_mapping(filp->f_mapping);
- if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
- if (is_time_granular(&NFS_SERVER(inode)->time_delta))
- __nfs_revalidate_inode(NFS_SERVER(inode), inode);
- else
- nfs_zap_caches(inode);
- }
+ if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ nfs_zap_mapping(inode, filp->f_mapping);
out:
return status;
}
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index aa59757389dc..a3fc48ba4931 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -255,13 +255,16 @@ static int filelayout_read_done_cb(struct rpc_task *task,
static void
filelayout_set_layoutcommit(struct nfs_pgio_header *hdr)
{
+ loff_t end_offs = 0;
if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
- hdr->res.verf->committed != NFS_DATA_SYNC)
+ hdr->res.verf->committed == NFS_FILE_SYNC)
return;
+ if (hdr->res.verf->committed == NFS_DATA_SYNC)
+ end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
- pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
- hdr->mds_offset + hdr->res.count);
+ /* Note: if the write is unstable, don't set end_offs until commit */
+ pnfs_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
@@ -354,6 +357,12 @@ static int filelayout_write_done_cb(struct rpc_task *task,
}
filelayout_set_layoutcommit(hdr);
+
+ /* zero out the fattr */
+ hdr->fattr.valid = 0;
+ if (task->tk_status >= 0)
+ nfs_writeback_update_inode(hdr);
+
return 0;
}
@@ -375,8 +384,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
return -EAGAIN;
}
- if (data->verf.committed == NFS_UNSTABLE)
- pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
+ pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
return 0;
}
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 0e8018bc9880..e6206eaf2bdf 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1325,15 +1325,16 @@ ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
* we always send layoutcommit after DS writes.
*/
static void
-ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
+ff_layout_set_layoutcommit(struct inode *inode,
+ struct pnfs_layout_segment *lseg,
+ loff_t end_offset)
{
- if (!ff_layout_need_layoutcommit(hdr->lseg))
+ if (!ff_layout_need_layoutcommit(lseg))
return;
- pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
- hdr->mds_offset + hdr->res.count);
- dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
- (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
+ pnfs_set_layoutcommit(inode, lseg, end_offset);
+ dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
+ (unsigned long long) NFS_I(inode)->layout->plh_lwb);
}
static bool
@@ -1469,6 +1470,7 @@ static void ff_layout_read_release(void *data)
static int ff_layout_write_done_cb(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
+ loff_t end_offs = 0;
int err;
trace_nfs4_pnfs_write(hdr, task->tk_status);
@@ -1494,7 +1496,10 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
hdr->res.verf->committed == NFS_DATA_SYNC)
- ff_layout_set_layoutcommit(hdr);
+ end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
+
+ /* Note: if the write is unstable, don't set end_offs until commit */
+ ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
/* zero out fattr since we don't care DS attr at all */
hdr->fattr.valid = 0;
@@ -1530,9 +1535,7 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
return -EAGAIN;
}
- if (data->verf.committed == NFS_UNSTABLE
- && ff_layout_need_layoutcommit(data->lseg))
- pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
+ ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
return 0;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index dda689d7a8a7..bf4ec5ecc97e 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -662,9 +662,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
trace_nfs_getattr_enter(inode);
/* Flush out writes to the server in order to update c/mtime. */
if (S_ISREG(inode->i_mode)) {
- inode_lock(inode);
- err = nfs_sync_inode(inode);
- inode_unlock(inode);
+ err = filemap_write_and_wait(inode->i_mapping);
if (err)
goto out;
}
@@ -879,7 +877,10 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- list_add(&ctx->list, &nfsi->open_files);
+ if (ctx->mode & FMODE_WRITE)
+ list_add(&ctx->list, &nfsi->open_files);
+ else
+ list_add_tail(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
@@ -972,6 +973,13 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
if (NFS_STALE(inode))
goto out;
+ /* pNFS: Attributes aren't updated until we layoutcommit */
+ if (S_ISREG(inode->i_mode)) {
+ status = pnfs_sync_inode(inode, false);
+ if (status)
+ goto out;
+ }
+
status = -ENOMEM;
fattr = nfs_alloc_fattr();
if (fattr == NULL)
@@ -1122,14 +1130,12 @@ out:
}
/**
- * __nfs_revalidate_mapping - Revalidate the pagecache
+ * nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode
* @mapping - pointer to mapping
- * @may_lock - take inode->i_mutex?
*/
-static int __nfs_revalidate_mapping(struct inode *inode,
- struct address_space *mapping,
- bool may_lock)
+int nfs_revalidate_mapping(struct inode *inode,
+ struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long *bitlock = &nfsi->flags;
@@ -1178,12 +1184,7 @@ static int __nfs_revalidate_mapping(struct inode *inode,
nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode);
- if (may_lock) {
- inode_lock(inode);
- ret = nfs_invalidate_mapping(inode, mapping);
- inode_unlock(inode);
- } else
- ret = nfs_invalidate_mapping(inode, mapping);
+ ret = nfs_invalidate_mapping(inode, mapping);
trace_nfs_invalidate_mapping_exit(inode, ret);
clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
@@ -1193,27 +1194,28 @@ out:
return ret;
}
-/**
- * nfs_revalidate_mapping - Revalidate the pagecache
- * @inode - pointer to host inode
- * @mapping - pointer to mapping
- */
-int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+static bool nfs_file_has_writers(struct nfs_inode *nfsi)
{
- return __nfs_revalidate_mapping(inode, mapping, false);
+ struct inode *inode = &nfsi->vfs_inode;
+
+ assert_spin_locked(&inode->i_lock);
+
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ if (list_empty(&nfsi->open_files))
+ return false;
+ /* Note: This relies on nfsi->open_files being ordered with writers
+ * being placed at the head of the list.
+ * See nfs_inode_attach_open_context()
+ */
+ return (list_first_entry(&nfsi->open_files,
+ struct nfs_open_context,
+ list)->mode & FMODE_WRITE) == FMODE_WRITE;
}
-/**
- * nfs_revalidate_mapping_protected - Revalidate the pagecache
- * @inode - pointer to host inode
- * @mapping - pointer to mapping
- *
- * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex
- * while invalidating the mapping.
- */
-int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping)
+static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
{
- return __nfs_revalidate_mapping(inode, mapping, true);
+ return nfs_file_has_writers(nfsi) && nfs_file_io_is_buffered(nfsi);
}
static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
@@ -1280,22 +1282,24 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
return -EIO;
- if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
- inode->i_version != fattr->change_attr)
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ if (!nfs_file_has_buffered_writers(nfsi)) {
+ /* Verify a few of the more important attributes */
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode->i_version != fattr->change_attr)
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
- /* Verify a few of the more important attributes */
- if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
- invalid |= NFS_INO_INVALID_ATTR;
+ if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
+ invalid |= NFS_INO_INVALID_ATTR;
- if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
- cur_size = i_size_read(inode);
- new_isize = nfs_size_to_loff_t(fattr->size);
- if (cur_size != new_isize)
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec_equal(&inode->i_ctime, &fattr->ctime))
+ invalid |= NFS_INO_INVALID_ATTR;
+
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+ cur_size = i_size_read(inode);
+ new_isize = nfs_size_to_loff_t(fattr->size);
+ if (cur_size != new_isize)
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ }
}
- if (nfsi->nrequests != 0)
- invalid &= ~NFS_INO_REVAL_PAGECACHE;
/* Have any file permissions changed? */
if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1470,28 +1474,12 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
}
-/*
- * Don't trust the change_attribute, mtime, ctime or size if
- * a pnfs LAYOUTCOMMIT is outstanding
- */
-static void nfs_inode_attrs_handle_layoutcommit(struct inode *inode,
- struct nfs_fattr *fattr)
-{
- if (pnfs_layoutcommit_outstanding(inode))
- fattr->valid &= ~(NFS_ATTR_FATTR_CHANGE |
- NFS_ATTR_FATTR_MTIME |
- NFS_ATTR_FATTR_CTIME |
- NFS_ATTR_FATTR_SIZE);
-}
-
static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
{
int ret;
trace_nfs_refresh_inode_enter(inode);
- nfs_inode_attrs_handle_layoutcommit(inode, fattr);
-
if (nfs_inode_attrs_need_update(inode, fattr))
ret = nfs_update_inode(inode, fattr);
else
@@ -1527,7 +1515,7 @@ EXPORT_SYMBOL_GPL(nfs_refresh_inode);
static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
{
- unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ unsigned long invalid = NFS_INO_INVALID_ATTR;
/*
* Don't revalidate the pagecache if we hold a delegation, but do
@@ -1676,6 +1664,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
unsigned long invalid = 0;
unsigned long now = jiffies;
unsigned long save_cache_validity;
+ bool have_writers = nfs_file_has_buffered_writers(nfsi);
bool cache_revalidated = true;
dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
@@ -1725,17 +1714,25 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* Do atomic weak cache consistency updates */
invalid |= nfs_wcc_update_inode(inode, fattr);
+ if (pnfs_layoutcommit_outstanding(inode)) {
+ nfsi->cache_validity |= save_cache_validity & NFS_INO_INVALID_ATTR;
+ cache_revalidated = false;
+ }
+
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
- if (S_ISDIR(inode->i_mode))
- nfs_force_lookup_revalidate(inode);
+ /* Could it be a race with writeback? */
+ if (!have_writers) {
+ invalid |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
+ if (S_ISDIR(inode->i_mode))
+ nfs_force_lookup_revalidate(inode);
+ }
inode->i_version = fattr->change_attr;
}
} else {
@@ -1768,9 +1765,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (new_isize != cur_isize) {
/* Do we perhaps have any outstanding writes, or has
* the file grown beyond our last write? */
- if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
+ if (nfsi->nrequests == 0 || new_isize > cur_isize) {
i_size_write(inode, new_isize);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+ if (!have_writers)
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
}
dprintk("NFS: isize change on server for file %s/%ld "
"(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 5ea04d87fc65..7ce5e023c3c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -66,13 +66,16 @@ struct nfs_clone_mount {
struct nfs_client_initdata {
unsigned long init_flags;
- const char *hostname;
- const struct sockaddr *addr;
+ const char *hostname; /* Hostname of the server */
+ const struct sockaddr *addr; /* Address of the server */
+ const char *nodename; /* Hostname of the client */
+ const char *ip_addr; /* IP address of the client */
size_t addrlen;
struct nfs_subversion *nfs_mod;
int proto;
u32 minorversion;
struct net *net;
+ const struct rpc_timeout *timeparms;
};
/*
@@ -147,9 +150,8 @@ extern void nfs_umount(const struct nfs_mount_request *info);
extern const struct rpc_program nfs_program;
extern void nfs_clients_init(struct net *net);
extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
-int nfs_create_rpc_client(struct nfs_client *, const struct rpc_timeout *, rpc_authflavor_t);
+int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
- const struct rpc_timeout *, const char *,
rpc_authflavor_t);
int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
void nfs_server_insert_lists(struct nfs_server *);
@@ -184,7 +186,7 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
rpc_authflavor_t);
extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
-extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr,
int ds_addrlen, int ds_proto,
unsigned int ds_timeo,
@@ -193,7 +195,7 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
rpc_authflavor_t au_flavor);
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
struct inode *);
-extern struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo,
unsigned int ds_retrans, rpc_authflavor_t au_flavor);
@@ -338,8 +340,7 @@ nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr);
+ const struct nfs_client_initdata *);
/* dir.c */
extern void nfs_force_use_readdirplus(struct inode *dir);
@@ -411,6 +412,19 @@ extern void __exit unregister_nfs_fs(void);
extern bool nfs_sb_active(struct super_block *sb);
extern void nfs_sb_deactive(struct super_block *sb);
+/* io.c */
+extern void nfs_start_io_read(struct inode *inode);
+extern void nfs_end_io_read(struct inode *inode);
+extern void nfs_start_io_write(struct inode *inode);
+extern void nfs_end_io_write(struct inode *inode);
+extern void nfs_start_io_direct(struct inode *inode);
+extern void nfs_end_io_direct(struct inode *inode);
+
+static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
+{
+ return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
+}
+
/* namespace.c */
#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
@@ -496,9 +510,29 @@ void nfs_init_cinfo(struct nfs_commit_info *cinfo,
struct inode *inode,
struct nfs_direct_req *dreq);
int nfs_key_timeout_notify(struct file *filp, struct inode *inode);
-bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx);
+bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode);
void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
+int nfs_filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+
+#ifdef CONFIG_NFS_V4_1
+static inline
+void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
+{
+ int i;
+
+ for (i = 0; i < cinfo->nbuckets; i++)
+ cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
+}
+#else
+static inline
+void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
+{
+}
+#endif
+
+
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
@@ -506,6 +540,13 @@ extern int nfs_migrate_page(struct address_space *,
#define nfs_migrate_page NULL
#endif
+static inline int
+nfs_write_verifier_cmp(const struct nfs_write_verifier *v1,
+ const struct nfs_write_verifier *v2)
+{
+ return memcmp(v1->data, v2->data, sizeof(v1->data));
+}
+
/* unlink.c */
extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
@@ -521,8 +562,7 @@ extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr);
+ const struct nfs_client_initdata *);
extern int nfs40_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
struct rpc_cred *cred);
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
new file mode 100644
index 000000000000..1fc5d1ce327e
--- /dev/null
+++ b/fs/nfs/io.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016 Trond Myklebust
+ *
+ * I/O and data path helper functionality.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/rwsem.h>
+#include <linux/fs.h>
+#include <linux/nfs_fs.h>
+
+#include "internal.h"
+
+/* Call with exclusively locked inode->i_rwsem */
+static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
+{
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+ clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+ inode_dio_wait(inode);
+ }
+}
+
+/**
+ * nfs_start_io_read - declare the file is being used for buffered reads
+ * @inode - file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ * On exit, the function ensures that the NFS_INO_ODIRECT flag is unset,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that buffered read operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas direct I/O
+ * operations need to wait to grab an exclusive lock in order to set
+ * NFS_INO_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
+ */
+void
+nfs_start_io_read(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0)
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ nfs_block_o_direct(nfsi, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * nfs_end_io_read - declare that the buffered read operation is done
+ * @inode - file inode
+ *
+ * Declare that a buffered read operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+nfs_end_io_read(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
+
+/**
+ * nfs_start_io_write - declare the file is being used for buffered writes
+ * @inode - file inode
+ *
+ * Declare that a buffered read operation is about to start, and ensure
+ * that we block all direct I/O.
+ */
+void
+nfs_start_io_write(struct inode *inode)
+{
+ down_write(&inode->i_rwsem);
+ nfs_block_o_direct(NFS_I(inode), inode);
+}
+
+/**
+ * nfs_end_io_write - declare that the buffered write operation is done
+ * @inode - file inode
+ *
+ * Declare that a buffered write operation is done, and release the
+ * lock on inode->i_rwsem.
+ */
+void
+nfs_end_io_write(struct inode *inode)
+{
+ up_write(&inode->i_rwsem);
+}
+
+/* Call with exclusively locked inode->i_rwsem */
+static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
+{
+ if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+ set_bit(NFS_INO_ODIRECT, &nfsi->flags);
+ nfs_wb_all(inode);
+ }
+}
+
+/**
+ * nfs_end_io_direct - declare the file is being used for direct i/o
+ * @inode - file inode
+ *
+ * Declare that a direct I/O operation is about to start, and ensure
+ * that we block all buffered I/O.
+ * On exit, the function ensures that the NFS_INO_ODIRECT flag is set,
+ * and holds a shared lock on inode->i_rwsem to ensure that the flag
+ * cannot be changed.
+ * In practice, this means that direct I/O operations are allowed to
+ * execute in parallel, thanks to the shared lock, whereas buffered I/O
+ * operations need to wait to grab an exclusive lock in order to clear
+ * NFS_INO_ODIRECT.
+ * Note that buffered writes and truncates both take a write lock on
+ * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
+ */
+void
+nfs_start_io_direct(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ /* Be an optimist! */
+ down_read(&inode->i_rwsem);
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags) != 0)
+ return;
+ up_read(&inode->i_rwsem);
+ /* Slow path.... */
+ down_write(&inode->i_rwsem);
+ nfs_block_buffered(nfsi, inode);
+ downgrade_write(&inode->i_rwsem);
+}
+
+/**
+ * nfs_end_io_direct - declare that the direct i/o operation is done
+ * @inode - file inode
+ *
+ * Declare that a direct I/O operation is done, and release the shared
+ * lock on inode->i_rwsem.
+ */
+void
+nfs_end_io_direct(struct inode *inode)
+{
+ up_read(&inode->i_rwsem);
+}
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index 9e9fa347a948..ee753547fb0a 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -76,19 +76,23 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
* low timeout interval so that if a connection is lost, we retry through
* the MDS.
*/
-struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
+struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
rpc_authflavor_t au_flavor)
{
+ struct rpc_timeout ds_timeout;
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
.addrlen = ds_addrlen,
+ .nodename = mds_clp->cl_rpcclient->cl_nodename,
+ .ip_addr = mds_clp->cl_ipaddr,
.nfs_mod = &nfs_v3,
.proto = ds_proto,
.net = mds_clp->cl_net,
+ .timeparms = &ds_timeout,
};
- struct rpc_timeout ds_timeout;
struct nfs_client *clp;
char buf[INET6_ADDRSTRLEN + 1];
@@ -97,10 +101,12 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
+ if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
/* Use the MDS nfs_client cl_ipaddr. */
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
- clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- au_flavor);
+ clp = nfs_get_client(&cl_init, au_flavor);
return clp;
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index cb28cceefebe..698be9361280 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -144,7 +144,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -404,7 +404,7 @@ out:
}
static int
-nfs3_proc_remove(struct inode *dir, struct qstr *name)
+nfs3_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
@@ -480,7 +480,7 @@ nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
}
static int
-nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+nfs3_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs3_linkargs arg = {
.fromfh = NFS_FH(inode),
@@ -582,7 +582,7 @@ out:
}
static int
-nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
+nfs3_proc_rmdir(struct inode *dir, const struct qstr *name)
{
struct nfs_fattr *dir_attr;
struct nfs3_diropargs arg = {
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index aa03ed09ba06..33da841a21bb 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -113,15 +113,17 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
return -EOPNOTSUPP;
- nfs_wb_all(inode);
inode_lock(inode);
+ err = nfs_sync_inode(inode);
+ if (err)
+ goto out_unlock;
err = nfs42_proc_fallocate(&msg, filep, offset, len);
if (err == 0)
truncate_pagecache_range(inode, offset, (offset + len) -1);
if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
-
+out_unlock:
inode_unlock(inode);
return err;
}
@@ -154,11 +156,20 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
if (status)
return status;
+ status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping,
+ pos_src, pos_src + (loff_t)count - 1);
+ if (status)
+ return status;
+
status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
dst_lock, FMODE_WRITE);
if (status)
return status;
+ status = nfs_sync_inode(dst_inode);
+ if (status)
+ return status;
+
status = nfs4_call_sync(server->client, server, &msg,
&args.seq_args, &res.seq_res, 0);
if (status == -ENOTSUPP)
@@ -258,7 +269,11 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
if (status)
return status;
- nfs_wb_all(inode);
+ status = nfs_filemap_write_and_wait_range(inode->i_mapping,
+ offset, LLONG_MAX);
+ if (status)
+ return status;
+
status = nfs4_call_sync(server->client, server, &msg,
&args.seq_args, &res.seq_res, 0);
if (status == -ENOTSUPP)
@@ -336,8 +351,7 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
* Mark the bad layout state as invalid, then retry
* with the current stateid.
*/
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
+ pnfs_mark_layout_stateid_invalid(lo, &head);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
} else
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 6dc6f2aea0d6..8b2605882a20 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -330,13 +330,21 @@ static int decode_write_response(struct xdr_stream *xdr,
struct nfs42_write_res *res)
{
__be32 *p;
- int stateids;
p = xdr_inline_decode(xdr, 4 + 8 + 4);
if (unlikely(!p))
goto out_overflow;
- stateids = be32_to_cpup(p++);
+ /*
+ * We never use asynchronous mode, so warn if a server returns
+ * a stateid.
+ */
+ if (unlikely(*p != 0)) {
+ pr_err_once("%s: server has set unrequested "
+ "asynchronous mode\n", __func__);
+ return -EREMOTEIO;
+ }
+ p++;
p = xdr_decode_hyper(p, &res->count);
res->verifier.committed = be32_to_cpup(p);
return decode_verifier(xdr, &res->verifier.verifier);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 768456fa1b17..324bfdc21250 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -185,6 +185,7 @@ struct nfs4_state {
struct nfs4_exception {
struct nfs4_state *state;
struct inode *inode;
+ nfs4_stateid *stateid;
long timeout;
unsigned char delay : 1,
recovering : 1,
@@ -224,7 +225,8 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
extern struct file_system_type nfs4_fs_type;
/* nfs4namespace.c */
-struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
+struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
+ const struct qstr *);
struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
int nfs4_replace_transport(struct nfs_server *server,
@@ -251,7 +253,7 @@ extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struc
extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
struct page *page, struct rpc_cred *);
extern int nfs4_proc_fsid_present(struct inode *, struct rpc_cred *);
-extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
extern const struct xattr_handler *nfs4_xattr_handlers[];
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 10410e8b5853..8d7d08d4f95f 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -349,10 +349,10 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
* Returns pointer to an NFS client, or an ERR_PTR value.
*/
struct nfs_client *nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr)
+ const struct nfs_client_initdata *cl_init)
{
char buf[INET6_ADDRSTRLEN + 1];
+ const char *ip_addr = cl_init->ip_addr;
struct nfs_client *old;
int error;
@@ -370,9 +370,9 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
+ error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
if (error == -EINVAL)
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
+ error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
if (error < 0)
goto error;
@@ -793,10 +793,12 @@ static int nfs4_set_client(struct nfs_server *server,
.hostname = hostname,
.addr = addr,
.addrlen = addrlen,
+ .ip_addr = ip_addr,
.nfs_mod = &nfs_v4,
.proto = proto,
.minorversion = minorversion,
.net = net,
+ .timeparms = timeparms,
};
struct nfs_client *clp;
int error;
@@ -809,7 +811,7 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
+ clp = nfs_get_client(&cl_init, authflavour);
if (IS_ERR(clp)) {
error = PTR_ERR(clp);
goto error;
@@ -842,20 +844,24 @@ error:
* low timeout interval so that if a connection is lost, we retry through
* the MDS.
*/
-struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
const struct sockaddr *ds_addr, int ds_addrlen,
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
u32 minor_version, rpc_authflavor_t au_flavor)
{
+ struct rpc_timeout ds_timeout;
+ struct nfs_client *mds_clp = mds_srv->nfs_client;
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
.addrlen = ds_addrlen,
+ .nodename = mds_clp->cl_rpcclient->cl_nodename,
+ .ip_addr = mds_clp->cl_ipaddr,
.nfs_mod = &nfs_v4,
.proto = ds_proto,
.minorversion = minor_version,
.net = mds_clp->cl_net,
+ .timeparms = &ds_timeout,
};
- struct rpc_timeout ds_timeout;
struct nfs_client *clp;
char buf[INET6_ADDRSTRLEN + 1];
@@ -863,14 +869,16 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
return ERR_PTR(-EINVAL);
cl_init.hostname = buf;
+ if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
+ __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
/*
* Set an authflavor equual to the MDS value. Use the MDS nfs_client
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
* (section 13.1 RFC 5661).
*/
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
- clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- au_flavor);
+ clp = nfs_get_client(&cl_init, au_flavor);
dprintk("<-- %s %p\n", __func__, clp);
return clp;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 014b0e41ace5..d085ad794884 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -66,7 +66,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (openflags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
- nfs_sync_inode(inode);
+ filemap_write_and_wait(inode->i_mapping);
}
inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL);
@@ -133,21 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
size_t count, unsigned int flags)
{
- struct inode *in_inode = file_inode(file_in);
- struct inode *out_inode = file_inode(file_out);
- int ret;
-
- if (in_inode == out_inode)
+ if (file_inode(file_in) == file_inode(file_out))
return -EINVAL;
- /* flush any pending writes */
- ret = nfs_sync_inode(in_inode);
- if (ret)
- return ret;
- ret = nfs_sync_inode(out_inode);
- if (ret)
- return ret;
-
return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
}
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index f592672373cb..d21104912676 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -208,7 +208,7 @@ static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
*/
struct rpc_clnt *
nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
- struct qstr *name)
+ const struct qstr *name)
{
struct page *page;
struct nfs4_secinfo_flavors *flavors;
@@ -397,7 +397,7 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
rpc_authflavor_t flavor = server->client->cl_auth->au_flavor;
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct rpc_clnt *client;
struct vfsmount *mnt;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff416d0e24bc..a036e93bdf96 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -363,6 +363,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
+ const nfs4_stateid *stateid = exception->stateid;
struct inode *inode = exception->inode;
int ret = errorcode;
@@ -376,9 +377,18 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
- if (inode && nfs_async_inode_return_delegation(inode,
- NULL) == 0)
- goto wait_on_recovery;
+ if (inode) {
+ int err;
+
+ err = nfs_async_inode_return_delegation(inode,
+ stateid);
+ if (err == 0)
+ goto wait_on_recovery;
+ if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
+ exception->retry = 1;
+ break;
+ }
+ }
if (state == NULL)
break;
ret = nfs4_schedule_stateid_recovery(server, state);
@@ -427,6 +437,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
+ case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
exception->delay = 1;
return 0;
@@ -2669,28 +2680,17 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
return res;
}
-static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
- struct nfs_fattr *fattr, struct iattr *sattr,
- struct nfs4_state *state, struct nfs4_label *ilabel,
- struct nfs4_label *olabel)
+static int _nfs4_do_setattr(struct inode *inode,
+ struct nfs_setattrargs *arg,
+ struct nfs_setattrres *res,
+ struct rpc_cred *cred,
+ struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_setattrargs arg = {
- .fh = NFS_FH(inode),
- .iap = sattr,
- .server = server,
- .bitmask = server->attr_bitmask,
- .label = ilabel,
- };
- struct nfs_setattrres res = {
- .fattr = fattr,
- .label = olabel,
- .server = server,
- };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
- .rpc_argp = &arg,
- .rpc_resp = &res,
+ .rpc_argp = arg,
+ .rpc_resp = res,
.rpc_cred = cred,
};
struct rpc_cred *delegation_cred = NULL;
@@ -2699,17 +2699,13 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
bool truncate;
int status;
- arg.bitmask = nfs4_bitmask(server, ilabel);
- if (ilabel)
- arg.bitmask = nfs4_bitmask(server, olabel);
-
- nfs_fattr_init(fattr);
+ nfs_fattr_init(res->fattr);
/* Servers should only apply open mode checks for file size changes */
- truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
+ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
fmode = truncate ? FMODE_WRITE : FMODE_READ;
- if (nfs4_copy_delegation_stateid(inode, fmode, &arg.stateid, &delegation_cred)) {
+ if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
} else if (truncate && state != NULL) {
struct nfs_lockowner lockowner = {
@@ -2719,19 +2715,19 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
if (!nfs4_valid_open_stateid(state))
return -EBADF;
if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
- &arg.stateid, &delegation_cred) == -EIO)
+ &arg->stateid, &delegation_cred) == -EIO)
return -EBADF;
} else
- nfs4_stateid_copy(&arg.stateid, &zero_stateid);
+ nfs4_stateid_copy(&arg->stateid, &zero_stateid);
if (delegation_cred)
msg.rpc_cred = delegation_cred;
- status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
put_rpccred(delegation_cred);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
- trace_nfs4_setattr(inode, &arg.stateid, status);
+ trace_nfs4_setattr(inode, &arg->stateid, status);
return status;
}
@@ -2741,13 +2737,31 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs4_label *olabel)
{
struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_setattrargs arg = {
+ .fh = NFS_FH(inode),
+ .iap = sattr,
+ .server = server,
+ .bitmask = server->attr_bitmask,
+ .label = ilabel,
+ };
+ struct nfs_setattrres res = {
+ .fattr = fattr,
+ .label = olabel,
+ .server = server,
+ };
struct nfs4_exception exception = {
.state = state,
.inode = inode,
+ .stateid = &arg.stateid,
};
int err;
+
+ arg.bitmask = nfs4_bitmask(server, ilabel);
+ if (ilabel)
+ arg.bitmask = nfs4_bitmask(server, olabel);
+
do {
- err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel);
+ err = _nfs4_do_setattr(inode, &arg, &res, cred, state);
switch (err) {
case -NFS4ERR_OPENMODE:
if (!(sattr->ia_valid & ATTR_SIZE)) {
@@ -3267,13 +3281,6 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
return status;
}
-static int nfs4_do_find_root_sec(struct nfs_server *server,
- struct nfs_fh *fhandle, struct nfs_fsinfo *info)
-{
- int mv = server->nfs_client->cl_minorversion;
- return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
-}
-
/**
* nfs4_proc_get_rootfh - get file handle for server's pseudoroot
* @server: initialized nfs_server handle
@@ -3293,7 +3300,8 @@ int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
status = nfs4_lookup_root(server, fhandle, info);
if (auth_probe || status == NFS4ERR_WRONGSEC)
- status = nfs4_do_find_root_sec(server, fhandle, info);
+ status = server->nfs_client->cl_mvops->find_root_sec(server,
+ fhandle, info);
if (status == 0)
status = nfs4_server_capabilities(server, fhandle);
@@ -3530,7 +3538,7 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- struct qstr *name, struct nfs_fh *fhandle,
+ const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct nfs4_label *label)
{
struct nfs4_exception exception = { };
@@ -3572,7 +3580,7 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -3588,7 +3596,7 @@ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
}
struct rpc_clnt *
-nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
+nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct rpc_clnt *client = NFS_CLIENT(dir);
@@ -3747,7 +3755,7 @@ out:
return status;
}
-static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
+static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
@@ -3770,7 +3778,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
return status;
}
-static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
+static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
@@ -3853,7 +3861,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 1;
}
-static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
@@ -3900,7 +3908,7 @@ out:
return status;
}
-static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
@@ -3922,7 +3930,7 @@ struct nfs4_createdata {
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
- struct qstr *name, struct iattr *sattr, u32 ftype)
+ const struct qstr *name, struct iattr *sattr, u32 ftype)
{
struct nfs4_createdata *data;
@@ -4392,7 +4400,8 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
struct rpc_message *msg)
{
hdr->timestamp = jiffies;
- hdr->pgio_done_cb = nfs4_read_done_cb;
+ if (!hdr->pgio_done_cb)
+ hdr->pgio_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
}
@@ -7869,11 +7878,13 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layout_hdr *lo;
- int status = task->tk_status;
+ int nfs4err = task->tk_status;
+ int err, status = 0;
+ LIST_HEAD(head);
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
- switch (status) {
+ switch (nfs4err) {
case 0:
goto out;
@@ -7905,45 +7916,42 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
status = -EOVERFLOW;
goto out;
}
- /* Fallthrough */
+ status = -EBUSY;
+ break;
case -NFS4ERR_RECALLCONFLICT:
- nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT,
- exception);
status = -ERECALLCONFLICT;
- goto out;
+ break;
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
exception->timeout = 0;
spin_lock(&inode->i_lock);
- if (nfs4_stateid_match(&lgp->args.stateid,
+ lo = NFS_I(inode)->layout;
+ /* If the open stateid was bad, then recover it. */
+ if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
+ nfs4_stateid_match_other(&lgp->args.stateid,
&lgp->args.ctx->state->stateid)) {
spin_unlock(&inode->i_lock);
- /* If the open stateid was bad, then recover it. */
exception->state = lgp->args.ctx->state;
break;
}
- lo = NFS_I(inode)->layout;
- if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
- nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
- LIST_HEAD(head);
-
- /*
- * Mark the bad layout state as invalid, then retry
- * with the current stateid.
- */
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
- spin_unlock(&inode->i_lock);
- pnfs_free_lseg_list(&head);
- status = -EAGAIN;
- goto out;
- } else
- spin_unlock(&inode->i_lock);
- }
- status = nfs4_handle_exception(server, status, exception);
- if (exception->retry)
+ /*
+ * Mark the bad layout state as invalid, then retry
+ */
+ pnfs_mark_layout_stateid_invalid(lo, &head);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&head);
status = -EAGAIN;
+ goto out;
+ }
+
+ err = nfs4_handle_exception(server, nfs4err, exception);
+ if (!status) {
+ if (exception->retry)
+ status = -EAGAIN;
+ else
+ status = err;
+ }
out:
dprintk("<-- %s\n", __func__);
return status;
@@ -8129,8 +8137,7 @@ static void nfs4_layoutreturn_release(void *calldata)
spin_lock(&lo->plh_inode->i_lock);
pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
be32_to_cpu(lrp->args.stateid.seqid));
- pnfs_mark_layout_returned_if_empty(lo);
- if (lrp->res.lrs_present)
+ if (lrp->res.lrs_present && pnfs_layout_is_valid(lo))
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
pnfs_clear_layoutreturn_waitbit(lo);
spin_unlock(&lo->plh_inode->i_lock);
@@ -8835,7 +8842,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
#endif
};
-ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
{
ssize_t error, error2;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 661e753fe1c9..7bd3a5c09d31 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1985,9 +1985,14 @@ encode_layoutcommit(struct xdr_stream *xdr,
p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */
*p = cpu_to_be32(0); /* reclaim */
encode_nfs4_stateid(xdr, &args->stateid);
- p = reserve_space(xdr, 20);
- *p++ = cpu_to_be32(1); /* newoffset = TRUE */
- p = xdr_encode_hyper(p, args->lastbytewritten);
+ if (args->lastbytewritten != U64_MAX) {
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(1); /* newoffset = TRUE */
+ p = xdr_encode_hyper(p, args->lastbytewritten);
+ } else {
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(0); /* newoffset = FALSE */
+ }
*p++ = cpu_to_be32(0); /* Never send time_modify_changed */
*p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 31c7763b94d5..2ca9167bc97d 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -37,7 +37,6 @@
{ 1 << NFS_INO_ADVISE_RDPLUS, "ADVISE_RDPLUS" }, \
{ 1 << NFS_INO_STALE, "STALE" }, \
{ 1 << NFS_INO_INVALIDATING, "INVALIDATING" }, \
- { 1 << NFS_INO_FLUSHING, "FLUSHING" }, \
{ 1 << NFS_INO_FSCACHE, "FSCACHE" }, \
{ 1 << NFS_INO_LAYOUTCOMMIT, "NEED_LAYOUTCOMMIT" }, \
{ 1 << NFS_INO_LAYOUTCOMMITTING, "LAYOUTCOMMIT" })
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0fbe734cc38c..70806cae0d36 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -259,7 +259,7 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
* is required.
* Note that caller must hold inode->i_lock.
*/
-static int
+int
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
struct list_head *lseg_list)
{
@@ -334,14 +334,17 @@ pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
}
static void
-init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
+pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
+ const struct pnfs_layout_range *range,
+ const nfs4_stateid *stateid)
{
INIT_LIST_HEAD(&lseg->pls_list);
INIT_LIST_HEAD(&lseg->pls_lc_list);
atomic_set(&lseg->pls_refcount, 1);
- smp_mb();
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo;
+ lseg->pls_range = *range;
+ lseg->pls_seq = be32_to_cpu(stateid->seqid);
}
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
@@ -486,15 +489,6 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
(end2 == NFS4_MAX_UINT64 || end2 > start1);
}
-static bool
-should_free_lseg(const struct pnfs_layout_range *lseg_range,
- const struct pnfs_layout_range *recall_range)
-{
- return (recall_range->iomode == IOMODE_ANY ||
- lseg_range->iomode == recall_range->iomode) &&
- pnfs_lseg_range_intersecting(lseg_range, recall_range);
-}
-
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
{
@@ -533,6 +527,27 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
return (s32)(s1 - s2) > 0;
}
+static bool
+pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
+ const struct pnfs_layout_range *recall_range)
+{
+ return (recall_range->iomode == IOMODE_ANY ||
+ lseg_range->iomode == recall_range->iomode) &&
+ pnfs_lseg_range_intersecting(lseg_range, recall_range);
+}
+
+static bool
+pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
+ const struct pnfs_layout_range *recall_range,
+ u32 seq)
+{
+ if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
+ return false;
+ if (recall_range == NULL)
+ return true;
+ return pnfs_should_free_range(&lseg->pls_range, recall_range);
+}
+
/**
* pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
* @lo: layout header containing the lsegs
@@ -562,10 +577,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
if (list_empty(&lo->plh_segs))
return 0;
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
- if (!recall_range ||
- should_free_lseg(&lseg->pls_range, recall_range)) {
- if (seq && pnfs_seqid_is_newer(lseg->pls_seq, seq))
- continue;
+ if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
dprintk("%s: freeing lseg %p iomode %d seq %u"
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_seq,
@@ -761,24 +773,25 @@ void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
bool update_barrier)
{
- u32 oldseq, newseq, new_barrier;
- int empty = list_empty(&lo->plh_segs);
+ u32 oldseq, newseq, new_barrier = 0;
+ bool invalid = !pnfs_layout_is_valid(lo);
oldseq = be32_to_cpu(lo->plh_stateid.seqid);
newseq = be32_to_cpu(new->seqid);
- if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
+ if (invalid || pnfs_seqid_is_newer(newseq, oldseq)) {
nfs4_stateid_copy(&lo->plh_stateid, new);
- if (update_barrier) {
- new_barrier = be32_to_cpu(new->seqid);
- } else {
- /* Because of wraparound, we want to keep the barrier
- * "close" to the current seqids.
- */
- new_barrier = newseq - atomic_read(&lo->plh_outstanding);
- }
- if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
- lo->plh_barrier = new_barrier;
+ /*
+ * Because of wraparound, we want to keep the barrier
+ * "close" to the current seqids.
+ */
+ new_barrier = newseq - atomic_read(&lo->plh_outstanding);
}
+ if (update_barrier)
+ new_barrier = be32_to_cpu(new->seqid);
+ else if (new_barrier == 0)
+ return;
+ if (invalid || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
+ lo->plh_barrier = new_barrier;
}
static bool
@@ -873,15 +886,37 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}
+static void
+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
+{
+ lo->plh_return_iomode = 0;
+ lo->plh_return_seq = 0;
+ clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+}
+
static bool
-pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
+ nfs4_stateid *stateid,
+ enum pnfs_iomode *iomode)
{
if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
return false;
- lo->plh_return_iomode = 0;
- lo->plh_return_seq = 0;
pnfs_get_layout_hdr(lo);
- clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
+ if (stateid != NULL) {
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ if (lo->plh_return_seq != 0)
+ stateid->seqid = cpu_to_be32(lo->plh_return_seq);
+ }
+ if (iomode != NULL)
+ *iomode = lo->plh_return_iomode;
+ pnfs_clear_layoutreturn_info(lo);
+ return true;
+ }
+ if (stateid != NULL)
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
+ if (iomode != NULL)
+ *iomode = IOMODE_ANY;
return true;
}
@@ -949,10 +984,7 @@ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
enum pnfs_iomode iomode;
bool send;
- nfs4_stateid_copy(&stateid, &lo->plh_stateid);
- stateid.seqid = cpu_to_be32(lo->plh_return_seq);
- iomode = lo->plh_return_iomode;
- send = pnfs_prepare_layoutreturn(lo);
+ send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
spin_unlock(&inode->i_lock);
if (send) {
/* Send an async layoutreturn so we dont deadlock */
@@ -989,7 +1021,6 @@ _pnfs_return_layout(struct inode *ino)
dprintk("NFS: %s no layout to return\n", __func__);
goto out;
}
- nfs4_stateid_copy(&stateid, &nfsi->layout->plh_stateid);
/* Reference matched in nfs4_layoutreturn_release */
pnfs_get_layout_hdr(lo);
empty = list_empty(&lo->plh_segs);
@@ -1012,8 +1043,7 @@ _pnfs_return_layout(struct inode *ino)
goto out_put_layout_hdr;
}
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- send = pnfs_prepare_layoutreturn(lo);
+ send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
if (send)
@@ -1080,11 +1110,10 @@ bool pnfs_roc(struct inode *ino)
goto out_noroc;
}
- nfs4_stateid_copy(&stateid, &lo->plh_stateid);
/* always send layoutreturn if being marked so */
- if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED,
- &lo->plh_flags))
- layoutreturn = pnfs_prepare_layoutreturn(lo);
+ if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ layoutreturn = pnfs_prepare_layoutreturn(lo,
+ &stateid, NULL);
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
/* If we are sending layoutreturn, invalidate all valid lsegs */
@@ -1132,7 +1161,6 @@ void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
- pnfs_mark_layout_returned_if_empty(lo);
if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
lo->plh_barrier = barrier;
spin_unlock(&ino->i_lock);
@@ -1505,7 +1533,7 @@ pnfs_update_layout(struct inode *ino,
struct pnfs_layout_segment *lseg = NULL;
nfs4_stateid stateid;
long timeout = 0;
- unsigned long giveup = jiffies + rpc_get_timeout(server->client);
+ unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
bool first;
if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
@@ -1645,33 +1673,44 @@ lookup_again:
lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
+ atomic_dec(&lo->plh_outstanding);
if (IS_ERR(lseg)) {
switch(PTR_ERR(lseg)) {
- case -ERECALLCONFLICT:
+ case -EBUSY:
if (time_after(jiffies, giveup))
lseg = NULL;
- /* Fallthrough */
- case -EAGAIN:
- pnfs_put_layout_hdr(lo);
- if (first)
- pnfs_clear_first_layoutget(lo);
- if (lseg) {
- trace_pnfs_update_layout(ino, pos, count,
- iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
- goto lookup_again;
+ break;
+ case -ERECALLCONFLICT:
+ /* Huh? We hold no layouts, how is there a recall? */
+ if (first) {
+ lseg = NULL;
+ break;
}
+ /* Destroy the existing layout and start over */
+ if (time_after(jiffies, giveup))
+ pnfs_destroy_layout(NFS_I(ino));
/* Fallthrough */
+ case -EAGAIN:
+ break;
default:
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
lseg = NULL;
}
+ goto out_put_layout_hdr;
+ }
+ if (lseg) {
+ if (first)
+ pnfs_clear_first_layoutget(lo);
+ trace_pnfs_update_layout(ino, pos, count,
+ iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
}
} else {
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
}
- atomic_dec(&lo->plh_outstanding);
out_put_layout_hdr:
if (first)
pnfs_clear_first_layoutget(lo);
@@ -1735,9 +1774,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
return lseg;
}
- init_lseg(lo, lseg);
- lseg->pls_range = res->range;
- lseg->pls_seq = be32_to_cpu(res->stateid.seqid);
+ pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
spin_lock(&ino->i_lock);
if (pnfs_layoutgets_blocked(lo)) {
@@ -1758,16 +1795,19 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
* inode invalid, and don't bother validating the stateid
* sequence number.
*/
- pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL, 0);
+ pnfs_mark_layout_stateid_invalid(lo, &free_me);
nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
}
- clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-
pnfs_get_lseg(lseg);
pnfs_layout_insert_lseg(lo, lseg, &free_me);
+ if (!pnfs_layout_is_valid(lo)) {
+ pnfs_clear_layoutreturn_info(lo);
+ clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ }
+
if (res->return_on_close)
set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
@@ -1787,14 +1827,14 @@ static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
u32 seq)
{
- if (lo->plh_return_iomode == iomode)
- return;
- if (lo->plh_return_iomode != 0)
+ if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
iomode = IOMODE_ANY;
lo->plh_return_iomode = iomode;
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
- if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
+ if (seq != 0) {
+ WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
lo->plh_return_seq = seq;
+ }
}
/**
@@ -1824,7 +1864,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
- if (should_free_lseg(&lseg->pls_range, return_range)) {
+ if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
dprintk("%s: marking lseg %p iomode %d "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode,
@@ -1855,19 +1895,17 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
bool return_now = false;
spin_lock(&inode->i_lock);
- pnfs_set_plh_return_info(lo, range.iomode, lseg->pls_seq);
+ pnfs_set_plh_return_info(lo, range.iomode, 0);
/*
* mark all matching lsegs so that we are sure to have no live
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
* for how it works.
*/
- if (!pnfs_mark_matching_lsegs_return(lo, &free_me,
- &range, lseg->pls_seq)) {
+ if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0)) {
nfs4_stateid stateid;
- enum pnfs_iomode iomode = lo->plh_return_iomode;
+ enum pnfs_iomode iomode;
- nfs4_stateid_copy(&stateid, &lo->plh_stateid);
- return_now = pnfs_prepare_layoutreturn(lo);
+ return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
spin_unlock(&inode->i_lock);
if (return_now)
pnfs_send_layoutreturn(lo, &stateid, iomode, false);
@@ -2382,7 +2420,10 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
nfs_fattr_init(&data->fattr);
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
data->res.fattr = &data->fattr;
- data->args.lastbytewritten = end_pos - 1;
+ if (end_pos != 0)
+ data->args.lastbytewritten = end_pos - 1;
+ else
+ data->args.lastbytewritten = U64_MAX;
data->res.server = NFS_SERVER(inode);
if (ld->prepare_layoutcommit) {
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index b21bd0bee784..31d99b2927b0 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -268,6 +268,8 @@ int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
const struct pnfs_layout_range *recall_range,
u32 seq);
+int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *lseg_list);
bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
@@ -375,6 +377,11 @@ static inline bool nfs_have_layout(struct inode *inode)
return NFS_I(inode)->layout != NULL;
}
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) == 0;
+}
+
static inline struct nfs4_deviceid_node *
nfs4_get_deviceid(struct nfs4_deviceid_node *d)
{
@@ -545,19 +552,6 @@ pnfs_calc_offset_length(u64 offset, u64 end)
return 1 + end - offset;
}
-/**
- * pnfs_mark_layout_returned_if_empty - marks the layout as returned
- * @lo: layout header
- *
- * Note: Caller must hold inode->i_lock
- */
-static inline void
-pnfs_mark_layout_returned_if_empty(struct pnfs_layout_hdr *lo)
-{
- if (list_empty(&lo->plh_segs))
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-}
-
static inline void
pnfs_copy_range(struct pnfs_layout_range *dst,
const struct pnfs_layout_range *src)
@@ -629,6 +623,13 @@ pnfs_sync_inode(struct inode *inode, bool datasync)
}
static inline bool
+pnfs_layoutcommit_outstanding(struct inode *inode)
+{
+ return false;
+}
+
+
+static inline bool
pnfs_roc(struct inode *ino)
{
return false;
@@ -716,13 +717,6 @@ pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
return false;
}
-static inline bool
-pnfs_layoutcommit_outstanding(struct inode *inode)
-{
- return false;
-}
-
-
static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
return NULL;
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index b38e3c0dc790..f3468b57a32a 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -595,7 +595,7 @@ static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
}
static struct nfs_client *(*get_v3_ds_connect)(
- struct nfs_client *mds_clp,
+ struct nfs_server *mds_srv,
const struct sockaddr *ds_addr,
int ds_addrlen,
int ds_proto,
@@ -654,7 +654,7 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_test_and_add_xprt, NULL);
} else
- clp = get_v3_ds_connect(mds_srv->nfs_client,
+ clp = get_v3_ds_connect(mds_srv,
(struct sockaddr *)&da->da_addr,
da->da_addrlen, IPPROTO_TCP,
timeo, retrans, au_flavor);
@@ -690,7 +690,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
dprintk("%s: DS %s: trying address %s\n",
__func__, ds->ds_remotestr, da->da_remotestr);
- clp = nfs4_set_ds_client(mds_srv->nfs_client,
+ clp = nfs4_set_ds_client(mds_srv,
(struct sockaddr *)&da->da_addr,
da->da_addrlen, IPPROTO_TCP,
timeo, retrans, minor_version,
@@ -940,6 +940,13 @@ EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
int
pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
{
+ int ret;
+
+ if (!pnfs_layoutcommit_outstanding(inode))
+ return 0;
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (ret < 0)
+ return ret;
if (datasync)
return 0;
return pnfs_layoutcommit_inode(inode, true);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b417bbcd9704..b7bca8303989 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -145,7 +145,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -299,7 +299,7 @@ out:
}
static int
-nfs_proc_remove(struct inode *dir, struct qstr *name)
+nfs_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
@@ -357,7 +357,7 @@ nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
}
static int
-nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+nfs_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs_linkargs arg = {
.fromfh = NFS_FH(inode),
@@ -456,7 +456,7 @@ out:
}
static int
-nfs_proc_rmdir(struct inode *dir, struct qstr *name)
+nfs_proc_rmdir(struct inode *dir, const struct qstr *name)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2137e0202f25..18d446e1a82b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1684,6 +1684,7 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
{
rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
unsigned int i;
+ int use_auth_null = false;
/*
* If the sec= mount option is used, the specified flavor or AUTH_NULL
@@ -1691,14 +1692,21 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
*
* AUTH_NULL has a special meaning when it's in the server list - it
* means that the server will ignore the rpc creds, so any flavor
- * can be used.
+ * can be used but still use the sec= that was specified.
*/
for (i = 0; i < count; i++) {
flavor = server_authlist[i];
- if (nfs_auth_info_match(&args->auth_info, flavor) ||
- flavor == RPC_AUTH_NULL)
+ if (nfs_auth_info_match(&args->auth_info, flavor))
goto out;
+
+ if (flavor == RPC_AUTH_NULL)
+ use_auth_null = true;
+ }
+
+ if (use_auth_null) {
+ flavor = RPC_AUTH_NULL;
+ goto out;
}
dfprintk(MOUNT,
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1868246f56e6..191aa577dd1f 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -162,7 +162,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
* @dentry: dentry to unlink
*/
static int
-nfs_async_unlink(struct dentry *dentry, struct qstr *name)
+nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
{
struct nfs_unlinkdata *data;
int status = -ENOMEM;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 593fa21a02c0..3a6724c6eb5f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -625,7 +625,7 @@ static int nfs_writepage_locked(struct page *page,
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+ nfs_pageio_init_write(&pgio, inode, 0,
false, &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio, launder);
nfs_pageio_complete(&pgio);
@@ -657,16 +657,9 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- unsigned long *bitlock = &NFS_I(inode)->flags;
struct nfs_pageio_descriptor pgio;
int err;
- /* Stop dirtying of new pages while we sync */
- err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
- nfs_wait_bit_killable, TASK_KILLABLE);
- if (err)
- goto out_err;
-
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
@@ -674,10 +667,6 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
- clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
- smp_mb__after_atomic();
- wake_up_bit(bitlock, NFS_INO_FLUSHING);
-
if (err < 0)
goto out_err;
err = pgio.pg_error;
@@ -1195,9 +1184,11 @@ nfs_key_timeout_notify(struct file *filp, struct inode *inode)
/*
* Test if the open context credential key is marked to expire soon.
*/
-bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx)
+bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
{
- return rpcauth_cred_key_to_expire(ctx->cred);
+ struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
+
+ return rpcauth_cred_key_to_expire(auth, ctx->cred);
}
/*
@@ -1289,6 +1280,9 @@ int nfs_updatepage(struct file *file, struct page *page,
dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n",
file, count, (long long)(page_file_offset(page) + offset));
+ if (!count)
+ goto out;
+
if (nfs_can_extend_write(file, page, inode)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
@@ -1299,7 +1293,7 @@ int nfs_updatepage(struct file *file, struct page *page,
nfs_set_pageerror(page);
else
__set_page_dirty_nobuffers(page);
-
+out:
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
status, (long long)i_size_read(inode));
return status;
@@ -1800,7 +1794,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
- if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
+ if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) {
/* We have a match */
nfs_inode_remove_request(req);
dprintk(" OK\n");
@@ -1924,6 +1918,24 @@ out_mark_dirty:
EXPORT_SYMBOL_GPL(nfs_write_inode);
/*
+ * Wrapper for filemap_write_and_wait_range()
+ *
+ * Needed for pNFS in order to ensure data becomes visible to the
+ * client.
+ */
+int nfs_filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend)
+{
+ int ret;
+
+ ret = filemap_write_and_wait_range(mapping, lstart, lend);
+ if (ret == 0)
+ ret = pnfs_sync_inode(mapping->host, true);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
+
+/*
* flush the inode to disk.
*/
int nfs_wb_all(struct inode *inode)
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index c9f583d7bac8..47febcf99185 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -90,6 +90,7 @@ config NFSD_BLOCKLAYOUT
bool "NFSv4.1 server support for pNFS block layouts"
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
+ select EXPORTFS_BLOCK_OPS
help
This option enables support for the exporting pNFS block layouts
in the kernel's NFS server. The pNFS block layout enables NFS
@@ -102,6 +103,7 @@ config NFSD_SCSILAYOUT
bool "NFSv4.1 server support for pNFS SCSI layouts"
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
+ select EXPORTFS_BLOCK_OPS
help
This option enables support for the exporting pNFS SCSI layouts
in the kernel's NFS server. The pNFS SCSI layout enables NFS
@@ -111,6 +113,23 @@ config NFSD_SCSILAYOUT
If unsure, say N.
+config NFSD_FLEXFILELAYOUT
+ bool "NFSv4.1 server support for pNFS Flex File layouts"
+ depends on NFSD_V4
+ select NFSD_PNFS
+ help
+ This option enables support for the exporting pNFS Flex File
+ layouts in the kernel's NFS server. The pNFS Flex File layout
+ enables NFS clients to directly perform I/O to NFSv3 devices
+ accesible to both the server and the clients. See
+ draft-ietf-nfsv4-flex-files for more details.
+
+ Warning, this server implements the bare minimum functionality
+ to be a flex file server - it is for testing the client,
+ not for use in production.
+
+ If unsure, say N.
+
config NFSD_V4_SECURITY_LABEL
bool "Provide Security Label support for NFSv4 server"
depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 3ae5f3c77e28..5f5d3a76980c 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -20,3 +20,4 @@ nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
+nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index ad2c05e80a83..5a1708441510 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -163,6 +163,7 @@ nfsd4_block_get_device_info_simple(struct super_block *sb,
static __be32
nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdp)
{
@@ -355,6 +356,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
static __be32
nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdp)
{
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 4ebaaf4b8d8a..ac6f54546fdd 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -44,7 +44,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
switch (b->type) {
case PNFS_BLOCK_VOLUME_SIMPLE:
- len = 4 + 4 + 8 + 4 + b->simple.sig_len;
+ len = 4 + 4 + 8 + 4 + (XDR_QUADLEN(b->simple.sig_len) << 2);
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
@@ -55,7 +55,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
break;
case PNFS_BLOCK_VOLUME_SCSI:
- len = 4 + 4 + 4 + 4 + b->scsi.designator_len + 8;
+ len = 4 + 4 + 4 + 4 + (XDR_QUADLEN(b->scsi.designator_len) << 2) + 8;
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b4d84b579f20..43e109cc0ccc 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -706,7 +706,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
- new->ex_layout_type = 0;
+ new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
}
@@ -731,7 +731,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
- new->ex_layout_type = item->ex_layout_type;
+ new->ex_layout_types = item->ex_layout_types;
new->ex_nflavors = item->ex_nflavors;
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
@@ -954,6 +954,16 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
return 0;
}
+
+ /* If the compound op contains a spo_must_allowed op,
+ * it will be sent with integrity/protection which
+ * will have to be expressly allowed on mounts that
+ * don't support it
+ */
+
+ if (nfsd4_spo_must_allow(rqstp))
+ return 0;
+
return nfserr_wrongsec;
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 2e315072bf3f..730f15eeb7ed 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -57,7 +57,7 @@ struct svc_export {
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
- enum pnfs_layouttype ex_layout_type;
+ u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
};
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
new file mode 100644
index 000000000000..df880e9fa71f
--- /dev/null
+++ b/fs/nfsd/flexfilelayout.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ *
+ * The following implements a super-simple flex-file server
+ * where the NFSv4.1 mds is also the ds. And the storage is
+ * the same. I.e., writing to the mds via a NFSv4.1 WRITE
+ * goes to the same location as the NFSv3 WRITE.
+ */
+#include <linux/slab.h>
+
+#include <linux/nfsd/debug.h>
+
+#include <linux/sunrpc/addr.h>
+
+#include "flexfilelayoutxdr.h"
+#include "pnfs.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+static __be32
+nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ u32 device_generation = 0;
+ int error;
+ uid_t u;
+
+ struct pnfs_ff_layout *fl;
+
+ /*
+ * The super simple flex file server has 1 mirror, 1 data server,
+ * and 1 file handle. So instead of 4 allocs, do 1 for now.
+ * Zero it out for the stateid - don't want junk in there!
+ */
+ error = -ENOMEM;
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ goto out_error;
+ args->lg_content = fl;
+
+ /*
+ * Avoid layout commit, try to force the I/O to the DS,
+ * and for fun, cause all IOMODE_RW layout segments to
+ * effectively be WRITE only.
+ */
+ fl->flags = FF_FLAGS_NO_LAYOUTCOMMIT | FF_FLAGS_NO_IO_THRU_MDS |
+ FF_FLAGS_NO_READ_IO;
+
+ /* Do not allow a IOMODE_READ segment to have write pemissions */
+ if (seg->iomode == IOMODE_READ) {
+ u = from_kuid(&init_user_ns, inode->i_uid) + 1;
+ fl->uid = make_kuid(&init_user_ns, u);
+ } else
+ fl->uid = inode->i_uid;
+ fl->gid = inode->i_gid;
+
+ error = nfsd4_set_deviceid(&fl->deviceid, fhp, device_generation);
+ if (error)
+ goto out_error;
+
+ fl->fh.size = fhp->fh_handle.fh_size;
+ memcpy(fl->fh.data, &fhp->fh_handle.fh_base, fl->fh.size);
+
+ /* Give whole file layout segments */
+ seg->offset = 0;
+ seg->length = NFS4_MAX_UINT64;
+
+ dprintk("GET: 0x%llx:0x%llx %d\n", seg->offset, seg->length,
+ seg->iomode);
+ return 0;
+
+out_error:
+ seg->length = 0;
+ return nfserrno(error);
+}
+
+static __be32
+nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
+ struct nfs4_client *clp, struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_ff_device_addr *da;
+
+ u16 port;
+ char addr[INET6_ADDRSTRLEN];
+
+ da = kzalloc(sizeof(struct pnfs_ff_device_addr), GFP_KERNEL);
+ if (!da)
+ return nfserrno(-ENOMEM);
+
+ gdp->gd_device = da;
+
+ da->version = 3;
+ da->minor_version = 0;
+
+ da->rsize = svc_max_payload(rqstp);
+ da->wsize = da->rsize;
+
+ rpc_ntop((struct sockaddr *)&rqstp->rq_daddr,
+ addr, INET6_ADDRSTRLEN);
+ if (rqstp->rq_daddr.ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&rqstp->rq_daddr;
+ port = ntohs(sin->sin_port);
+ snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp");
+ da->netaddr.netid_len = 3;
+ } else {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&rqstp->rq_daddr;
+ port = ntohs(sin6->sin6_port);
+ snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp6");
+ da->netaddr.netid_len = 4;
+ }
+
+ da->netaddr.addr_len =
+ snprintf(da->netaddr.addr, FF_ADDR_LEN + 1,
+ "%s.%hhu.%hhu", addr, port >> 8, port & 0xff);
+
+ da->tightly_coupled = false;
+
+ return 0;
+}
+
+const struct nfsd4_layout_ops ff_layout_ops = {
+ .notify_types =
+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+ .proc_getdeviceinfo = nfsd4_ff_proc_getdeviceinfo,
+ .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_ff_proc_layoutget,
+ .encode_layoutget = nfsd4_ff_encode_layoutget,
+};
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
new file mode 100644
index 000000000000..5e3fd7fc1a9f
--- /dev/null
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ */
+#include <linux/sunrpc/svc.h>
+#include <linux/nfs4.h>
+
+#include "nfsd.h"
+#include "flexfilelayoutxdr.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+struct ff_idmap {
+ char buf[11];
+ int len;
+};
+
+__be32
+nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct pnfs_ff_layout *fl = lgp->lg_content;
+ int len, mirror_len, ds_len, fh_len;
+ __be32 *p;
+
+ /*
+ * Unlike nfsd4_encode_user, we know these will
+ * always be stringified.
+ */
+ struct ff_idmap uid;
+ struct ff_idmap gid;
+
+ fh_len = 4 + fl->fh.size;
+
+ uid.len = sprintf(uid.buf, "%u", from_kuid(&init_user_ns, fl->uid));
+ gid.len = sprintf(gid.buf, "%u", from_kgid(&init_user_ns, fl->gid));
+
+ /* 8 + len for recording the length, name, and padding */
+ ds_len = 20 + sizeof(stateid_opaque_t) + 4 + fh_len +
+ 8 + uid.len + 8 + gid.len;
+
+ mirror_len = 4 + ds_len;
+
+ /* The layout segment */
+ len = 20 + mirror_len;
+
+ p = xdr_reserve_space(xdr, sizeof(__be32) + len);
+ if (!p)
+ return nfserr_toosmall;
+
+ *p++ = cpu_to_be32(len);
+ p = xdr_encode_hyper(p, 0); /* stripe unit of 1 */
+
+ *p++ = cpu_to_be32(1); /* single mirror */
+ *p++ = cpu_to_be32(1); /* single data server */
+
+ p = xdr_encode_opaque_fixed(p, &fl->deviceid,
+ sizeof(struct nfsd4_deviceid));
+
+ *p++ = cpu_to_be32(1); /* efficiency */
+
+ *p++ = cpu_to_be32(fl->stateid.si_generation);
+ p = xdr_encode_opaque_fixed(p, &fl->stateid.si_opaque,
+ sizeof(stateid_opaque_t));
+
+ *p++ = cpu_to_be32(1); /* single file handle */
+ p = xdr_encode_opaque(p, fl->fh.data, fl->fh.size);
+
+ p = xdr_encode_opaque(p, uid.buf, uid.len);
+ p = xdr_encode_opaque(p, gid.buf, gid.len);
+
+ *p++ = cpu_to_be32(fl->flags);
+ *p++ = cpu_to_be32(0); /* No stats collect hint */
+
+ return 0;
+}
+
+__be32
+nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_ff_device_addr *da = gdp->gd_device;
+ int len;
+ int ver_len;
+ int addr_len;
+ __be32 *p;
+
+ /* len + padding for two strings */
+ addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
+ ver_len = 20;
+
+ len = 4 + ver_len + 4 + addr_len;
+
+ p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ if (!p)
+ return nfserr_resource;
+
+ /*
+ * Fill in the overall length and number of volumes at the beginning
+ * of the layout.
+ */
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* 1 netaddr */
+ p = xdr_encode_opaque(p, da->netaddr.netid, da->netaddr.netid_len);
+ p = xdr_encode_opaque(p, da->netaddr.addr, da->netaddr.addr_len);
+
+ *p++ = cpu_to_be32(1); /* 1 versions */
+
+ *p++ = cpu_to_be32(da->version);
+ *p++ = cpu_to_be32(da->minor_version);
+ *p++ = cpu_to_be32(da->rsize);
+ *p++ = cpu_to_be32(da->wsize);
+ *p++ = cpu_to_be32(da->tightly_coupled);
+
+ return 0;
+}
diff --git a/fs/nfsd/flexfilelayoutxdr.h b/fs/nfsd/flexfilelayoutxdr.h
new file mode 100644
index 000000000000..467defd4e563
--- /dev/null
+++ b/fs/nfsd/flexfilelayoutxdr.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ */
+#ifndef _NFSD_FLEXFILELAYOUTXDR_H
+#define _NFSD_FLEXFILELAYOUTXDR_H 1
+
+#include <linux/inet.h>
+#include "xdr4.h"
+
+#define FF_FLAGS_NO_LAYOUTCOMMIT 1
+#define FF_FLAGS_NO_IO_THRU_MDS 2
+#define FF_FLAGS_NO_READ_IO 4
+
+struct xdr_stream;
+
+#define FF_NETID_LEN (4)
+#define FF_ADDR_LEN (INET6_ADDRSTRLEN + 8)
+struct pnfs_ff_netaddr {
+ char netid[FF_NETID_LEN + 1];
+ char addr[FF_ADDR_LEN + 1];
+ u32 netid_len;
+ u32 addr_len;
+};
+
+struct pnfs_ff_device_addr {
+ struct pnfs_ff_netaddr netaddr;
+ u32 version;
+ u32 minor_version;
+ u32 rsize;
+ u32 wsize;
+ bool tightly_coupled;
+};
+
+struct pnfs_ff_layout {
+ u32 flags;
+ u32 stats_collect_hint;
+ kuid_t uid;
+ kgid_t gid;
+ struct nfsd4_deviceid deviceid;
+ stateid_t stateid;
+ struct nfs_fh fh;
+};
+
+__be32 nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp);
+__be32 nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp);
+
+#endif /* _NFSD_FLEXFILELAYOUTXDR_H */
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 953c0755cb37..2be9602b0221 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -27,6 +27,9 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
static const struct lock_manager_operations nfsd4_layouts_lm_ops;
const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+ [LAYOUT_FLEX_FILES] = &ff_layout_ops,
+#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
[LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
#endif
@@ -122,28 +125,35 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
void nfsd4_setup_layout_type(struct svc_export *exp)
{
+#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
struct super_block *sb = exp->ex_path.mnt->mnt_sb;
+#endif
if (!(exp->ex_flags & NFSEXP_PNFS))
return;
/*
- * Check if the file system supports exporting a block-like layout.
+ * If flex file is configured, use it by default. Otherwise
+ * check if the file system supports exporting a block-like layout.
* If the block device supports reservations prefer the SCSI layout,
* otherwise advertise the block layout.
*/
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+ exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
+#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
+ /* overwrite flex file layout selection if needed */
if (sb->s_export_op->get_uuid &&
sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks)
- exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
+ exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
#endif
#ifdef CONFIG_NFSD_SCSILAYOUT
/* overwrite block layout selection if needed */
if (sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks &&
sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
- exp->ex_layout_type = LAYOUT_SCSI;
+ exp->ex_layout_types |= 1 << LAYOUT_SCSI;
#endif
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index de1ff1d98bb1..1fb222752b2b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -605,8 +605,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fh_init(&resfh, NFS4_FHSIZE);
- status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR,
- NFSD_MAY_CREATE);
+ status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_NOP);
if (status)
return status;
@@ -1219,12 +1218,12 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
- if (!exp->ex_layout_type) {
+ if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
- if (exp->ex_layout_type != layout_type) {
+ if (!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
@@ -1270,7 +1269,7 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
- cstate->session->se_client, gdp);
+ rqstp, cstate->session->se_client, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
@@ -2335,6 +2334,45 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
};
+/**
+ * nfsd4_spo_must_allow - Determine if the compound op contains an
+ * operation that is allowed to be sent with machine credentials
+ *
+ * @rqstp: a pointer to the struct svc_rqst
+ *
+ * Checks to see if the compound contains a spo_must_allow op
+ * and confirms that it was sent with the proper machine creds.
+ */
+
+bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+ struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
+ struct nfsd4_compound_state *cstate = &resp->cstate;
+ struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
+ u32 opiter;
+
+ if (!cstate->minorversion)
+ return false;
+
+ if (cstate->spo_must_allowed == true)
+ return true;
+
+ opiter = resp->opcnt;
+ while (opiter < argp->opcnt) {
+ this = &argp->ops[opiter++];
+ if (test_bit(this->opnum, allow->u.longs) &&
+ cstate->clp->cl_mach_cred &&
+ nfsd4_mach_creds_match(cstate->clp, rqstp)) {
+ cstate->spo_must_allowed = true;
+ return true;
+ }
+ }
+ cstate->spo_must_allowed = false;
+ return false;
+}
+
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
struct nfsd4_operation *opdesc;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 70d0b9b33031..8410ca275db1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1200,27 +1200,6 @@ free_ol_stateid_reaplist(struct list_head *reaplist)
}
}
-static void release_lockowner(struct nfs4_lockowner *lo)
-{
- struct nfs4_client *clp = lo->lo_owner.so_client;
- struct nfs4_ol_stateid *stp;
- struct list_head reaplist;
-
- INIT_LIST_HEAD(&reaplist);
-
- spin_lock(&clp->cl_lock);
- unhash_lockowner_locked(lo);
- while (!list_empty(&lo->lo_owner.so_stateids)) {
- stp = list_first_entry(&lo->lo_owner.so_stateids,
- struct nfs4_ol_stateid, st_perstateowner);
- WARN_ON(!unhash_lock_stateid(stp));
- put_ol_stateid_locked(stp, &reaplist);
- }
- spin_unlock(&clp->cl_lock);
- free_ol_stateid_reaplist(&reaplist);
- nfs4_put_stateowner(&lo->lo_owner);
-}
-
static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
struct list_head *reaplist)
{
@@ -1972,7 +1951,7 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
service == RPC_GSS_SVC_PRIVACY;
}
-static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
+bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
@@ -2388,6 +2367,22 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
switch (exid->spa_how) {
case SP4_MACH_CRED:
+ exid->spo_must_enforce[0] = 0;
+ exid->spo_must_enforce[1] = (
+ 1 << (OP_BIND_CONN_TO_SESSION - 32) |
+ 1 << (OP_EXCHANGE_ID - 32) |
+ 1 << (OP_CREATE_SESSION - 32) |
+ 1 << (OP_DESTROY_SESSION - 32) |
+ 1 << (OP_DESTROY_CLIENTID - 32));
+
+ exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
+ 1 << (OP_OPEN_DOWNGRADE) |
+ 1 << (OP_LOCKU) |
+ 1 << (OP_DELEGRETURN));
+
+ exid->spo_must_allow[1] &= (
+ 1 << (OP_TEST_STATEID - 32) |
+ 1 << (OP_FREE_STATEID - 32));
if (!svc_rqst_integrity_protected(rqstp)) {
status = nfserr_inval;
goto out_nolock;
@@ -2424,7 +2419,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
status = nfserr_inval;
goto out;
}
- if (!mach_creds_match(conf, rqstp)) {
+ if (!nfsd4_mach_creds_match(conf, rqstp)) {
status = nfserr_wrong_cred;
goto out;
}
@@ -2473,6 +2468,8 @@ out_new:
goto out;
}
new->cl_minorversion = cstate->minorversion;
+ new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
+ new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
gen_clid(new, nn);
add_to_unconfirmed(new);
@@ -2676,7 +2673,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
if (conf) {
status = nfserr_wrong_cred;
- if (!mach_creds_match(conf, rqstp))
+ if (!nfsd4_mach_creds_match(conf, rqstp))
goto out_free_conn;
cs_slot = &conf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
@@ -2692,7 +2689,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out_free_conn;
}
status = nfserr_wrong_cred;
- if (!mach_creds_match(unconf, rqstp))
+ if (!nfsd4_mach_creds_match(unconf, rqstp))
goto out_free_conn;
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
@@ -2801,7 +2798,7 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
if (!session)
goto out_no_session;
status = nfserr_wrong_cred;
- if (!mach_creds_match(session->se_client, rqstp))
+ if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
@@ -2848,7 +2845,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
if (!ses)
goto out_client_lock;
status = nfserr_wrong_cred;
- if (!mach_creds_match(ses->se_client, r))
+ if (!nfsd4_mach_creds_match(ses->se_client, r))
goto out_put_session;
status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
if (status)
@@ -3087,7 +3084,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
status = nfserr_stale_clientid;
goto out;
}
- if (!mach_creds_match(clp, rqstp)) {
+ if (!nfsd4_mach_creds_match(clp, rqstp)) {
clp = NULL;
status = nfserr_wrong_cred;
goto out;
@@ -3112,7 +3109,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
* We don't take advantage of the rca_one_fs case.
* That's OK, it's optional, we can safely ignore it.
*/
- return nfs_ok;
+ return nfs_ok;
}
status = nfserr_complete_already;
@@ -5945,6 +5942,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct nfs4_client *clp;
+ LIST_HEAD (reaplist);
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
@@ -5975,9 +5973,23 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
nfs4_get_stateowner(sop);
break;
}
+ if (!lo) {
+ spin_unlock(&clp->cl_lock);
+ return status;
+ }
+
+ unhash_lockowner_locked(lo);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+ stp = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid,
+ st_perstateowner);
+ WARN_ON(!unhash_lock_stateid(stp));
+ put_ol_stateid_locked(stp, &reaplist);
+ }
spin_unlock(&clp->cl_lock);
- if (lo)
- release_lockowner(lo);
+ free_ol_stateid_reaplist(&reaplist);
+ nfs4_put_stateowner(&lo->lo_owner);
+
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 9df898ba648f..0aa0236a1429 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1299,16 +1299,14 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
break;
case SP4_MACH_CRED:
/* spo_must_enforce */
- READ_BUF(4);
- dummy = be32_to_cpup(p++);
- READ_BUF(dummy * 4);
- p += dummy;
-
+ status = nfsd4_decode_bitmap(argp,
+ exid->spo_must_enforce);
+ if (status)
+ goto out;
/* spo_must_allow */
- READ_BUF(4);
- dummy = be32_to_cpup(p++);
- READ_BUF(dummy * 4);
- p += dummy;
+ status = nfsd4_decode_bitmap(argp, exid->spo_must_allow);
+ if (status)
+ goto out;
break;
case SP4_SSV:
/* ssp_ops */
@@ -2164,22 +2162,20 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
}
static inline __be32
-nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
+nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
{
- __be32 *p;
+ __be32 *p;
+ unsigned long i = hweight_long(layout_types);
- if (layout_type) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1);
- *p++ = cpu_to_be32(layout_type);
- } else {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0);
- }
+ p = xdr_reserve_space(xdr, 4 + 4 * i);
+ if (!p)
+ return nfserr_resource;
+
+ *p++ = cpu_to_be32(i);
+
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (layout_types & (1 << i))
+ *p++ = cpu_to_be32(i);
return 0;
}
@@ -2754,13 +2750,13 @@ out_acl:
}
#ifdef CONFIG_NFSD_PNFS
if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+ status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+ status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
@@ -3867,14 +3863,6 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
return nfserr;
}
-static const u32 nfs4_minimal_spo_must_enforce[2] = {
- [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
- 1 << (OP_EXCHANGE_ID - 32) |
- 1 << (OP_CREATE_SESSION - 32) |
- 1 << (OP_DESTROY_SESSION - 32) |
- 1 << (OP_DESTROY_CLIENTID - 32)
-};
-
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_exchange_id *exid)
@@ -3885,6 +3873,7 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
char *server_scope;
int major_id_sz;
int server_scope_sz;
+ int status = 0;
uint64_t minor_id = 0;
if (nfserr)
@@ -3913,18 +3902,20 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
case SP4_NONE:
break;
case SP4_MACH_CRED:
- /* spo_must_enforce, spo_must_allow */
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- return nfserr_resource;
-
/* spo_must_enforce bitmap: */
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(nfs4_minimal_spo_must_enforce[0]);
- *p++ = cpu_to_be32(nfs4_minimal_spo_must_enforce[1]);
- /* empty spo_must_allow bitmap: */
- *p++ = cpu_to_be32(0);
-
+ status = nfsd4_encode_bitmap(xdr,
+ exid->spo_must_enforce[0],
+ exid->spo_must_enforce[1],
+ exid->spo_must_enforce[2]);
+ if (status)
+ goto out;
+ /* spo_must_allow bitmap: */
+ status = nfsd4_encode_bitmap(xdr,
+ exid->spo_must_allow[0],
+ exid->spo_must_allow[1],
+ exid->spo_must_allow[2]);
+ if (status)
+ goto out;
break;
default:
WARN_ON_ONCE(1);
@@ -3951,6 +3942,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
/* Implementation id */
*p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
return 0;
+out:
+ return status;
}
static __be32
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index cf980523898b..9446849888d5 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -124,6 +124,7 @@ void nfs4_state_shutdown_net(struct net *net);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
char * nfs4_recoverydir(void);
+bool nfsd4_spo_must_allow(struct svc_rqst *rqstp);
#else
static inline int nfsd4_init_slabs(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
@@ -134,6 +135,10 @@ static inline void nfs4_state_shutdown_net(struct net *net) { }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
static inline char * nfs4_recoverydir(void) {return NULL; }
+static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
+{
+ return false;
+}
#endif
/*
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index a8919444c460..cfe7500d5847 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -59,14 +59,20 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, umode_t requested)
+nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
+ umode_t requested)
{
- mode &= S_IFMT;
+ umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
if (requested == 0) /* the caller doesn't care */
return nfs_ok;
- if (mode == requested)
+ if (mode == requested) {
+ if (mode == S_IFDIR && !d_can_lookup(dentry)) {
+ WARN_ON_ONCE(1);
+ return nfserr_notdir;
+ }
return nfs_ok;
+ }
/*
* v4 has an error more specific than err_notdir which we should
* return in preference to err_notdir:
@@ -298,7 +304,7 @@ out:
* that it expects something not of the given type.
*
* @access is formed from the NFSD_MAY_* constants defined in
- * include/linux/nfsd/nfsd.h.
+ * fs/nfsd/vfs.h.
*/
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
@@ -340,7 +346,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
if (error)
goto out;
- error = nfsd_mode_check(rqstp, d_inode(dentry)->i_mode, type);
+ error = nfsd_mode_check(rqstp, dentry, type);
if (error)
goto out;
@@ -533,7 +539,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
* the reference filehandle (if it is in the same export)
* or the export options.
*/
- set_version_and_fsid_type(fhp, exp, ref_fh);
+ set_version_and_fsid_type(fhp, exp, ref_fh);
if (ref_fh == fhp)
fh_put(ref_fh);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 4cd78ef4c95c..e9214768cde9 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -251,9 +251,6 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
/* Check for NFSD_MAY_WRITE in nfsd_create if necessary */
- nfserr = nfserr_acces;
- if (!argp->len)
- goto done;
nfserr = nfserr_exist;
if (isdotent(argp->name, argp->len))
goto done;
@@ -362,8 +359,8 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
nfserr = 0;
if (!inode) {
/* File doesn't exist. Create it and set attrs */
- nfserr = nfsd_create(rqstp, dirfhp, argp->name, argp->len,
- attr, type, rdev, newfhp);
+ nfserr = nfsd_create_locked(rqstp, dirfhp, argp->name,
+ argp->len, attr, type, rdev, newfhp);
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
argp->name, attr->ia_valid, (long) attr->ia_size);
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 79d964aa8079..41b468a6a90f 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -240,7 +240,7 @@ nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
- return xdr_argsize_check(rqstp, p);
+ return xdr_argsize_check(rqstp, p);
}
int
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 7d073b9b1553..0c2a716e8741 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -21,6 +21,7 @@ struct nfsd4_layout_ops {
u32 notify_types;
__be32 (*proc_getdeviceinfo)(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdevp);
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
@@ -44,6 +45,9 @@ extern const struct nfsd4_layout_ops bl_layout_ops;
#ifdef CONFIG_NFSD_SCSILAYOUT
extern const struct nfsd4_layout_ops scsi_layout_ops;
#endif
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+extern const struct nfsd4_layout_ops ff_layout_ops;
+#endif
__be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, stateid_t *stateid,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 64053eadeb81..b95adf9a1595 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -345,6 +345,7 @@ struct nfs4_client {
u32 cl_exchange_flags;
/* number of rpc's in progress over an associated session: */
atomic_t cl_refcount;
+ struct nfs4_op_map cl_spo_must_allow;
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6fbd81ecb410..ba944123167b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1135,96 +1135,37 @@ nfsd_check_ignore_resizing(struct iattr *iap)
iap->ia_valid &= ~ATTR_SIZE;
}
-/*
- * Create a file (regular, directory, device, fifo); UNIX sockets
- * not yet implemented.
- * If the response fh has been verified, the parent directory should
- * already be locked. Note that the parent directory is left locked.
- *
- * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
- */
+/* The parent directory should already be locked: */
__be32
-nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
int type, dev_t rdev, struct svc_fh *resfhp)
{
- struct dentry *dentry, *dchild = NULL;
+ struct dentry *dentry, *dchild;
struct inode *dirp;
__be32 err;
__be32 err2;
int host_err;
- err = nfserr_perm;
- if (!flen)
- goto out;
- err = nfserr_exist;
- if (isdotent(fname, flen))
- goto out;
-
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
- if (err)
- goto out;
-
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
- err = nfserr_notdir;
- if (!dirp->i_op->lookup)
- goto out;
- /*
- * Check whether the response file handle has been verified yet.
- * If it has, the parent directory should already be locked.
- */
- if (!resfhp->fh_dentry) {
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
-
- /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
- fh_lock_nested(fhp, I_MUTEX_PARENT);
- dchild = lookup_one_len(fname, dentry, flen);
- host_err = PTR_ERR(dchild);
- if (IS_ERR(dchild))
- goto out_nfserr;
- err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
- if (err)
- goto out;
- } else {
- /* called from nfsd_proc_create */
- dchild = dget(resfhp->fh_dentry);
- if (!fhp->fh_locked) {
- /* not actually possible */
- printk(KERN_ERR
- "nfsd_create: parent %pd2 not locked!\n",
+ dchild = dget(resfhp->fh_dentry);
+ if (!fhp->fh_locked) {
+ WARN_ONCE(1, "nfsd_create: parent %pd2 not locked!\n",
dentry);
- err = nfserr_io;
- goto out;
- }
- }
- /*
- * Make sure the child dentry is still negative ...
- */
- err = nfserr_exist;
- if (d_really_is_positive(dchild)) {
- dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
- dentry, dchild);
- goto out;
+ err = nfserr_io;
+ goto out;
}
+ err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
- err = nfserr_inval;
- if (!S_ISREG(type) && !S_ISDIR(type) && !special_file(type)) {
- printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
- type);
- goto out;
- }
-
- /*
- * Get the dir op function pointer.
- */
err = 0;
host_err = 0;
switch (type) {
@@ -1242,6 +1183,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
case S_IFSOCK:
host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
break;
+ default:
+ printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
+ type);
+ host_err = -EINVAL;
}
if (host_err < 0)
goto out_nfserr;
@@ -1251,7 +1196,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
/*
* nfsd_create_setattr already committed the child. Transactional
* filesystems had a chance to commit changes for both parent and
- * child * simultaneously making the following commit_metadata a
+ * child simultaneously making the following commit_metadata a
* noop.
*/
err2 = nfserrno(commit_metadata(fhp));
@@ -1263,8 +1208,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!err)
err = fh_update(resfhp);
out:
- if (dchild && !IS_ERR(dchild))
- dput(dchild);
+ dput(dchild);
return err;
out_nfserr:
@@ -1272,6 +1216,50 @@ out_nfserr:
goto out;
}
+/*
+ * Create a filesystem object (regular, directory, special).
+ * Note that the parent directory is left locked.
+ *
+ * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
+ */
+__be32
+nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ char *fname, int flen, struct iattr *iap,
+ int type, dev_t rdev, struct svc_fh *resfhp)
+{
+ struct dentry *dentry, *dchild = NULL;
+ struct inode *dirp;
+ __be32 err;
+ int host_err;
+
+ if (isdotent(fname, flen))
+ return nfserr_exist;
+
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
+ if (err)
+ return err;
+
+ dentry = fhp->fh_dentry;
+ dirp = d_inode(dentry);
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ return nfserrno(host_err);
+
+ fh_lock_nested(fhp, I_MUTEX_PARENT);
+ dchild = lookup_one_len(fname, dentry, flen);
+ host_err = PTR_ERR(dchild);
+ if (IS_ERR(dchild))
+ return nfserrno(host_err);
+ err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
+ if (err) {
+ dput(dchild);
+ return err;
+ }
+ return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
+ rdev, resfhp);
+}
+
#ifdef CONFIG_NFSD_V3
/*
@@ -1304,12 +1292,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
- /* Get all the sanity checks out of the way before
- * we lock the parent. */
- err = nfserr_notdir;
- if (!dirp->i_op->lookup)
- goto out;
-
host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 2d573ec057f8..3cbb1b33777b 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -59,6 +59,9 @@ __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
u64, u64);
#endif /* CONFIG_NFSD_V4 */
+__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+ int type, dev_t rdev, struct svc_fh *res);
__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
int type, dev_t rdev, struct svc_fh *res);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index d9554813e58a..beea0c5edc51 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -59,6 +59,7 @@ struct nfsd4_compound_state {
struct nfsd4_session *session;
struct nfsd4_slot *slot;
int data_offset;
+ bool spo_must_allowed;
size_t iovlen;
u32 minorversion;
__be32 status;
@@ -403,6 +404,8 @@ struct nfsd4_exchange_id {
clientid_t clientid;
u32 seqid;
int spa_how;
+ u32 spo_must_enforce[3];
+ u32 spo_must_allow[3];
};
struct nfsd4_sequence {
@@ -654,6 +657,8 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
}
+
+bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
struct nfsd4_compoundargs *);
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 1a85d94f5b25..2c90e285d7c6 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -622,10 +622,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)req->pr_entry_nr,
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
@@ -663,10 +663,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)req->pr_entry_nr,
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
@@ -772,10 +772,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
do {
if (!nilfs_clear_bit_atomic(lock, group_offset,
bitmap)) {
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)entry_nrs[j],
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)entry_nrs[j]);
} else {
n++;
}
@@ -816,12 +816,11 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
for (k = 0; k < nempties; k++) {
ret = nilfs_palloc_delete_entry_block(inode,
last_nrs[k]);
- if (ret && ret != -ENOENT) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to delete block of entry %llu: ino=%lu, err=%d",
- (unsigned long long)last_nrs[k],
- (unsigned long)inode->i_ino, ret);
- }
+ if (ret && ret != -ENOENT)
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
+ ret, (unsigned long long)last_nrs[k],
+ inode->i_ino);
}
desc_kaddr = kmap_atomic(desc_bh->b_page);
@@ -835,12 +834,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
if (nfree == nilfs_palloc_entries_per_group(inode)) {
ret = nilfs_palloc_delete_bitmap_block(inode, group);
- if (ret && ret != -ENOENT) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to delete bitmap block of group %lu: ino=%lu, err=%d",
- group,
- (unsigned long)inode->i_ino, ret);
- }
+ if (ret && ret != -ENOENT)
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "error %d deleting bitmap block of group=%lu, ino=%lu",
+ ret, group, inode->i_ino);
}
}
return 0;
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index f2a7877e0c8c..01fb1831ca25 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -41,8 +41,8 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
struct inode *inode = bmap->b_inode;
if (err == -EINVAL) {
- nilfs_error(inode->i_sb, fname,
- "broken bmap (inode number=%lu)", inode->i_ino);
+ __nilfs_error(inode->i_sb, fname,
+ "broken bmap (inode number=%lu)", inode->i_ino);
err = -EIO;
}
return err;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index b6a4c8f93ac8..2b6ffbe5997a 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_binfo, nilfs_inode, etc */
#include "alloc.h"
#include "dat.h"
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 4cca998ec7a0..d5c23da43513 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -41,7 +41,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
struct inode *inode = NILFS_BTNC_I(btnc);
struct buffer_head *bh;
- bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
return NULL;
@@ -70,7 +70,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct page *page;
int err;
- bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
return -ENOMEM;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 982d1e3df3a5..2e315f9f2e51 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -339,12 +339,14 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
* nilfs_btree_node_broken - verify consistency of btree node
* @node: btree node block to be examined
* @size: node size (in bytes)
+ * @inode: host inode of btree
* @blocknr: block number
*
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
*/
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
- size_t size, sector_t blocknr)
+ size_t size, struct inode *inode,
+ sector_t blocknr)
{
int level, flags, nchildren;
int ret = 0;
@@ -358,9 +360,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
(flags & NILFS_BTREE_NODE_ROOT) ||
nchildren < 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
- printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
- "level = %d, flags = 0x%x, nchildren = %d\n",
- (unsigned long long)blocknr, level, flags, nchildren);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
+ inode->i_ino, (unsigned long long)blocknr, level,
+ flags, nchildren);
ret = 1;
}
return ret;
@@ -369,12 +372,12 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
/**
* nilfs_btree_root_broken - verify consistency of btree root node
* @node: btree root node to be examined
- * @ino: inode number
+ * @inode: host inode of btree
*
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
*/
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
- unsigned long ino)
+ struct inode *inode)
{
int level, flags, nchildren;
int ret = 0;
@@ -387,8 +390,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
- pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
- ino, level, flags, nchildren);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
+ inode->i_ino, level, flags, nchildren);
ret = 1;
}
return ret;
@@ -396,13 +400,15 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
int nilfs_btree_broken_node_block(struct buffer_head *bh)
{
+ struct inode *inode;
int ret;
if (buffer_nilfs_checked(bh))
return 0;
+ inode = bh->b_page->mapping->host;
ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
- bh->b_size, bh->b_blocknr);
+ bh->b_size, inode, bh->b_blocknr);
if (likely(!ret))
set_buffer_nilfs_checked(bh);
return ret;
@@ -448,13 +454,15 @@ nilfs_btree_get_node(const struct nilfs_bmap *btree,
return node;
}
-static int
-nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
+static int nilfs_btree_bad_node(const struct nilfs_bmap *btree,
+ struct nilfs_btree_node *node, int level)
{
if (unlikely(nilfs_btree_node_get_level(node) != level)) {
dump_stack();
- printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
- nilfs_btree_node_get_level(node), level);
+ nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+ "btree level mismatch (ino=%lu): %d != %d",
+ btree->b_inode->i_ino,
+ nilfs_btree_node_get_level(node), level);
return 1;
}
return 0;
@@ -509,6 +517,9 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
out_no_wait:
if (!buffer_uptodate(bh)) {
+ nilfs_msg(btree->b_inode->i_sb, KERN_ERR,
+ "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)",
+ btree->b_inode->i_ino, (unsigned long long)ptr);
brelse(bh);
return -EIO;
}
@@ -568,7 +579,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- if (nilfs_btree_bad_node(node, level))
+ if (nilfs_btree_bad_node(btree, node, level))
return -EINVAL;
if (!found)
found = nilfs_btree_node_lookup(node, key, &index);
@@ -616,7 +627,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- if (nilfs_btree_bad_node(node, level))
+ if (nilfs_btree_bad_node(btree, node, level))
return -EINVAL;
index = nilfs_btree_node_get_nchildren(node) - 1;
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
@@ -2072,8 +2083,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
if (unlikely(ret == -ENOENT))
- printk(KERN_CRIT "%s: key = %llu, level == %d\n",
- __func__, (unsigned long long)key, level);
+ nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+ "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
+ btree->b_inode->i_ino,
+ (unsigned long long)key, level);
goto out;
}
@@ -2110,12 +2123,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX) {
dump_stack();
- printk(KERN_WARNING
- "%s: invalid btree level: %d (key=%llu, ino=%lu, "
- "blocknr=%llu)\n",
- __func__, level, (unsigned long long)key,
- NILFS_BMAP_I(btree)->vfs_inode.i_ino,
- (unsigned long long)bh->b_blocknr);
+ nilfs_msg(btree->b_inode->i_sb, KERN_WARNING,
+ "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
+ level, (unsigned long long)key,
+ btree->b_inode->i_ino,
+ (unsigned long long)bh->b_blocknr);
return;
}
@@ -2394,8 +2406,7 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
__nilfs_btree_init(bmap);
- if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
- bmap->b_inode->i_ino))
+ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO;
return ret;
}
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index df1a25faa83b..2184e47fa4bf 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/list.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_btree_node */
#include "btnode.h"
#include "bmap.h"
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 8a3d3b65af3f..a15a1601e931 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -21,7 +21,6 @@
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/errno.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "cpfile.h"
@@ -332,9 +331,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
int ret, ncps, nicps, nss, count, i;
if (unlikely(start == 0 || start > end)) {
- printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
- "[%llu, %llu)\n", __func__,
- (unsigned long long)start, (unsigned long long)end);
+ nilfs_msg(cpfile->i_sb, KERN_ERR,
+ "cannot delete checkpoints: invalid range [%llu, %llu)",
+ (unsigned long long)start, (unsigned long long)end);
return -EINVAL;
}
@@ -386,9 +385,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
cpfile, cno);
if (ret == 0)
continue;
- printk(KERN_ERR
- "%s: cannot delete block\n",
- __func__);
+ nilfs_msg(cpfile->i_sb, KERN_ERR,
+ "error %d deleting checkpoint block",
+ ret);
break;
}
}
@@ -991,14 +990,12 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
int err;
if (cpsize > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large checkpoint size: %zu bytes.\n",
- cpsize);
+ nilfs_msg(sb, KERN_ERR,
+ "too large checkpoint size: %zu bytes", cpsize);
return -EINVAL;
} else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
- printk(KERN_ERR
- "NILFS: too small checkpoint size: %zu bytes.\n",
- cpsize);
+ nilfs_msg(sb, KERN_ERR,
+ "too small checkpoint size: %zu bytes", cpsize);
return -EINVAL;
}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index 0249744ae234..6eca972f9673 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -21,7 +21,8 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_api.h> /* nilfs_cpstat */
+#include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */
int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 7367610ea807..dffedb2f8817 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -349,10 +349,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
- printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
- (unsigned long long)vblocknr,
- (unsigned long long)le64_to_cpu(entry->de_start),
- (unsigned long long)le64_to_cpu(entry->de_end));
+ nilfs_msg(dat->i_sb, KERN_CRIT,
+ "%s: invalid vblocknr = %llu, [%llu, %llu)",
+ __func__, (unsigned long long)vblocknr,
+ (unsigned long long)le64_to_cpu(entry->de_start),
+ (unsigned long long)le64_to_cpu(entry->de_end));
kunmap_atomic(kaddr);
brelse(entry_bh);
return -EINVAL;
@@ -479,14 +480,12 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
int err;
if (entry_size > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large DAT entry size: %zu bytes.\n",
- entry_size);
+ nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
+ entry_size);
return -EINVAL;
} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
- printk(KERN_ERR
- "NILFS: too small DAT entry size: %zu bytes.\n",
- entry_size);
+ nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
+ entry_size);
return -EINVAL;
}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index abbfdabcabea..57dc6cf466d0 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */
struct nilfs_palloc_req;
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index e506f4f7120a..908ebbf0ac7e 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -42,6 +42,28 @@
#include "nilfs.h"
#include "page.h"
+static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen)
+{
+ unsigned int len = le16_to_cpu(dlen);
+
+#if (PAGE_SIZE >= 65536)
+ if (len == NILFS_MAX_REC_LEN)
+ return 1 << 16;
+#endif
+ return len;
+}
+
+static inline __le16 nilfs_rec_len_to_disk(unsigned int len)
+{
+#if (PAGE_SIZE >= 65536)
+ if (len == (1 << 16))
+ return cpu_to_le16(NILFS_MAX_REC_LEN);
+
+ BUG_ON(len > (1 << 16));
+#endif
+ return cpu_to_le16(len);
+}
+
/*
* nilfs uses block-sized chunks. Arguably, sector-sized ones would be
* more robust, but we have what we have
@@ -140,10 +162,9 @@ out:
/* Too bad, we had an error */
Ebadsize:
- nilfs_error(sb, "nilfs_check_page",
+ nilfs_error(sb,
"size of directory #%lu is not a multiple of chunk size",
- dir->i_ino
- );
+ dir->i_ino);
goto fail;
Eshort:
error = "rec_len is smaller than minimal";
@@ -157,19 +178,18 @@ Enamelen:
Espan:
error = "directory entry across blocks";
bad_entry:
- nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
- "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
- (unsigned long) le64_to_cpu(p->inode),
+ nilfs_error(sb,
+ "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+ dir->i_ino, error, (page->index << PAGE_SHIFT) + offs,
+ (unsigned long)le64_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
Eend:
p = (struct nilfs_dir_entry *)(kaddr + offs);
- nilfs_error(sb, "nilfs_check_page",
- "entry in directory #%lu spans the page boundary"
- "offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
- (unsigned long) le64_to_cpu(p->inode));
+ nilfs_error(sb,
+ "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu",
+ dir->i_ino, (page->index << PAGE_SHIFT) + offs,
+ (unsigned long)le64_to_cpu(p->inode));
fail:
SetPageError(page);
return false;
@@ -267,8 +287,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
struct page *page = nilfs_get_page(inode, n);
if (IS_ERR(page)) {
- nilfs_error(sb, __func__, "bad page in #%lu",
- inode->i_ino);
+ nilfs_error(sb, "bad page in #%lu", inode->i_ino);
ctx->pos += PAGE_SIZE - offset;
return -EIO;
}
@@ -278,8 +297,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
NILFS_DIR_REC_LEN(1);
for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
if (de->rec_len == 0) {
- nilfs_error(sb, __func__,
- "zero-length directory entry");
+ nilfs_error(sb, "zero-length directory entry");
nilfs_put_page(page);
return -EIO;
}
@@ -345,7 +363,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
@@ -360,7 +378,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
n = 0;
/* next page is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
(unsigned long long)dir->i_blocks);
@@ -469,7 +487,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
}
if (de->rec_len == 0) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"zero-length directory entry");
err = -EIO;
goto out_unlock;
@@ -541,7 +559,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
while ((char *)de < (char *)dir) {
if (de->rec_len == 0) {
- nilfs_error(inode->i_sb, __func__,
+ nilfs_error(inode->i_sb,
"zero-length directory entry");
err = -EIO;
goto out;
@@ -628,7 +646,7 @@ int nilfs_empty_dir(struct inode *inode)
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
- nilfs_error(inode->i_sb, __func__,
+ nilfs_error(inode->i_sb,
"zero-length directory entry (kaddr=%p, de=%p)",
kaddr, de);
goto not_empty;
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 251a44928405..96e3ed0d9652 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -337,14 +337,16 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
key = nilfs_bmap_data_get_key(bmap, *bh);
if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
- printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
- (unsigned long long)key);
+ nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
+ "%s (ino=%lu): invalid key: %llu", __func__,
+ bmap->b_inode->i_ino, (unsigned long long)key);
return -EINVAL;
}
ptr = nilfs_direct_get_ptr(bmap, key);
if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
- printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
- (unsigned long long)ptr);
+ nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
+ "%s (ino=%lu): invalid pointer: %llu", __func__,
+ bmap->b_inode->i_ino, (unsigned long long)ptr);
return -EINVAL;
}
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index 3015a6e78724..cfe85e848bba 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -24,16 +24,6 @@
#include "bmap.h"
-/**
- * struct nilfs_direct_node - direct node
- * @dn_flags: flags
- * @dn_pad: padding
- */
-struct nilfs_direct_node {
- __u8 dn_flags;
- __u8 pad[7];
-};
-
#define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1)
#define NILFS_DIRECT_KEY_MIN 0
#define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index e9148f94d696..853a831dcde0 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -148,8 +148,15 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
{
wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
+ struct inode *inode = bh->b_page->mapping->host;
+
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)",
+ buffer_nilfs_node(bh) ? "node" : "data",
+ inode->i_ino, (unsigned long long)bh->b_blocknr);
return -EIO;
+ }
if (buffer_dirty(bh))
return -EEXIST;
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 1d2b1805327a..b8fa45c20c63 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -145,15 +145,14 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
int err;
if (unlikely(!NILFS_VALID_INODE(sb, ino))) {
- nilfs_error(sb, __func__, "bad inode number: %lu",
- (unsigned long) ino);
+ nilfs_error(sb, "bad inode number: %lu", (unsigned long)ino);
return -EINVAL;
}
err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh);
if (unlikely(err))
- nilfs_warning(sb, __func__, "unable to read inode: %lu",
- (unsigned long) ino);
+ nilfs_msg(sb, KERN_WARNING, "error %d reading inode: ino=%lu",
+ err, (unsigned long)ino);
return err;
}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 23ad2f091e76..188b94fe0ec5 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "alloc.h"
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index a0ebdb17e912..af04f553d7c9 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -112,13 +112,10 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
* However, the page having this block must
* be locked in this case.
*/
- printk(KERN_WARNING
- "nilfs_get_block: a race condition "
- "while inserting a data block. "
- "(inode number=%lu, file block "
- "offset=%llu)\n",
- inode->i_ino,
- (unsigned long long)blkoff);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+ __func__, inode->i_ino,
+ (unsigned long long)blkoff);
err = 0;
}
nilfs_transaction_abort(inode->i_sb);
@@ -359,7 +356,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
root = NILFS_I(dir)->i_root;
ii = NILFS_I(inode);
- ii->i_state = 1 << NILFS_I_NEW;
+ ii->i_state = BIT(NILFS_I_NEW);
ii->i_root = root;
err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
@@ -558,7 +555,7 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
inode->i_ino = args->ino;
if (args->for_gc) {
- NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
+ NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = NULL;
} else {
@@ -726,9 +723,9 @@ repeat:
goto repeat;
failed:
- nilfs_warning(ii->vfs_inode.i_sb, __func__,
- "failed to truncate bmap (ino=%lu, err=%d)",
- ii->vfs_inode.i_ino, ret);
+ nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
+ "error %d truncating bmap (ino=%lu)", ret,
+ ii->vfs_inode.i_ino);
}
void nilfs_truncate(struct inode *inode)
@@ -939,9 +936,9 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
* This will happen when somebody is freeing
* this inode.
*/
- nilfs_warning(inode->i_sb, __func__,
- "cannot get inode (ino=%lu)",
- inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "cannot set file dirty (ino=%lu): the file is being freed",
+ inode->i_ino);
spin_unlock(&nilfs->ns_inode_lock);
return -EINVAL; /*
* NILFS_I_DIRTY may remain for
@@ -962,8 +959,9 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
err = nilfs_load_inode_block(inode, &ibh);
if (unlikely(err)) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to reget inode block.");
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "cannot mark inode dirty (ino=%lu): error %d loading inode block",
+ inode->i_ino, err);
return err;
}
nilfs_update_inode(inode, ibh, flags);
@@ -989,8 +987,8 @@ void nilfs_dirty_inode(struct inode *inode, int flags)
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
if (is_bad_inode(inode)) {
- nilfs_warning(inode->i_sb, __func__,
- "tried to mark bad_inode dirty. ignored.");
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "tried to mark bad_inode dirty. ignored.");
dump_stack();
return;
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 358b57e2cdf9..f1d7989459fd 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -25,7 +25,6 @@
#include <linux/compat.h> /* compat_ptr() */
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
#include "bmap.h"
@@ -584,27 +583,25 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
if (unlikely(ret < 0)) {
if (ret == -ENOENT)
- printk(KERN_CRIT
- "%s: invalid virtual block address (%s): "
- "ino=%llu, cno=%llu, offset=%llu, "
- "blocknr=%llu, vblocknr=%llu\n",
- __func__, vdesc->vd_flags ? "node" : "data",
- (unsigned long long)vdesc->vd_ino,
- (unsigned long long)vdesc->vd_cno,
- (unsigned long long)vdesc->vd_offset,
- (unsigned long long)vdesc->vd_blocknr,
- (unsigned long long)vdesc->vd_vblocknr);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+ __func__, vdesc->vd_flags ? "node" : "data",
+ (unsigned long long)vdesc->vd_ino,
+ (unsigned long long)vdesc->vd_cno,
+ (unsigned long long)vdesc->vd_offset,
+ (unsigned long long)vdesc->vd_blocknr,
+ (unsigned long long)vdesc->vd_vblocknr);
return ret;
}
if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
- printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, "
- "cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n",
- __func__, vdesc->vd_flags ? "node" : "data",
- (unsigned long long)vdesc->vd_ino,
- (unsigned long long)vdesc->vd_cno,
- (unsigned long long)vdesc->vd_offset,
- (unsigned long long)vdesc->vd_blocknr,
- (unsigned long long)vdesc->vd_vblocknr);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+ __func__, vdesc->vd_flags ? "node" : "data",
+ (unsigned long long)vdesc->vd_ino,
+ (unsigned long long)vdesc->vd_cno,
+ (unsigned long long)vdesc->vd_offset,
+ (unsigned long long)vdesc->vd_blocknr,
+ (unsigned long long)vdesc->vd_vblocknr);
brelse(bh);
return -EEXIST;
}
@@ -854,8 +851,8 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
return 0;
failed:
- printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
- msg, ret);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR, "error %d preparing GC: %s", ret,
+ msg);
return ret;
}
@@ -963,10 +960,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
}
ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
- if (ret < 0)
- printk(KERN_ERR "NILFS: GC failed during preparation: "
- "cannot read source blocks: err=%d\n", ret);
- else {
+ if (ret < 0) {
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "error %d preparing GC: cannot read source blocks",
+ ret);
+ } else {
if (nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 0d7b71fbeff8..d56d3a5bea88 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -207,8 +207,12 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
out_no_wait:
err = -EIO;
- if (!buffer_uptodate(first_bh))
+ if (!buffer_uptodate(first_bh)) {
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
+ inode->i_ino, block);
goto failed_bh;
+ }
out:
*out_bh = first_bh;
return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1ec8ae5995a5..dbcf1dc93a51 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -283,9 +283,9 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
goto out;
if (!inode->i_nlink) {
- nilfs_warning(inode->i_sb, __func__,
- "deleting nonexistent file (%lu), %d",
- inode->i_ino, inode->i_nlink);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "deleting nonexistent file (ino=%lu), %d",
+ inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
err = nilfs_delete_entry(de, page);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index b1d48bc0532d..33f8c8fc96e8 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -23,7 +23,8 @@
#include <linux/buffer_head.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_api.h>
+#include <linux/nilfs2_ondisk.h>
#include "the_nilfs.h"
#include "bmap.h"
@@ -119,20 +120,19 @@ enum {
/*
* Macros to check inode numbers
*/
-#define NILFS_MDT_INO_BITS \
- ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
- 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
- 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+#define NILFS_MDT_INO_BITS \
+ (BIT(NILFS_DAT_INO) | BIT(NILFS_CPFILE_INO) | \
+ BIT(NILFS_SUFILE_INO) | BIT(NILFS_IFILE_INO) | \
+ BIT(NILFS_ATIME_INO) | BIT(NILFS_SKETCH_INO))
-#define NILFS_SYS_INO_BITS \
- ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+#define NILFS_SYS_INO_BITS (BIT(NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+ ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+ ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
/**
* struct nilfs_transaction_info: context information for synchronization
@@ -299,10 +299,36 @@ static inline int nilfs_mark_inode_dirty_sync(struct inode *inode)
/* super.c */
extern struct inode *nilfs_alloc_inode(struct super_block *);
extern void nilfs_destroy_inode(struct inode *);
+
extern __printf(3, 4)
-void nilfs_error(struct super_block *, const char *, const char *, ...);
+void __nilfs_msg(struct super_block *sb, const char *level,
+ const char *fmt, ...);
extern __printf(3, 4)
-void nilfs_warning(struct super_block *, const char *, const char *, ...);
+void __nilfs_error(struct super_block *sb, const char *function,
+ const char *fmt, ...);
+
+#ifdef CONFIG_PRINTK
+
+#define nilfs_msg(sb, level, fmt, ...) \
+ __nilfs_msg(sb, level, fmt, ##__VA_ARGS__)
+#define nilfs_error(sb, fmt, ...) \
+ __nilfs_error(sb, __func__, fmt, ##__VA_ARGS__)
+
+#else
+
+#define nilfs_msg(sb, level, fmt, ...) \
+ do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ (void)(sb); \
+ } while (0)
+#define nilfs_error(sb, fmt, ...) \
+ do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __nilfs_error(sb, "", " "); \
+ } while (0)
+
+#endif /* CONFIG_PRINTK */
+
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index d97ba5f11b77..f11a3ad2df0c 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -30,9 +30,9 @@
#include "mdt.h"
-#define NILFS_BUFFER_INHERENT_BITS \
- ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
- (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
+#define NILFS_BUFFER_INHERENT_BITS \
+ (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
+ BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
static struct buffer_head *
__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
@@ -85,9 +85,9 @@ void nilfs_forget_buffer(struct buffer_head *bh)
{
struct page *page = bh->b_page;
const unsigned long clear_bits =
- (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
- 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
lock_buffer(bh);
set_mask_bits(&bh->b_state, clear_bits, 0);
@@ -124,17 +124,17 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
dbh->b_bdev = sbh->b_bdev;
bh = dbh;
- bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
+ bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
while ((bh = bh->b_this_page) != dbh) {
lock_buffer(bh);
bits &= bh->b_state;
unlock_buffer(bh);
}
- if (bits & (1UL << BH_Uptodate))
+ if (bits & BIT(BH_Uptodate))
SetPageUptodate(dpage);
else
ClearPageUptodate(dpage);
- if (bits & (1UL << BH_Mapped))
+ if (bits & BIT(BH_Mapped))
SetPageMappedToDisk(dpage);
else
ClearPageMappedToDisk(dpage);
@@ -215,7 +215,7 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
create_empty_buffers(dst, sbh->b_size, 0);
if (copy_dirty)
- mask |= (1UL << BH_Dirty);
+ mask |= BIT(BH_Dirty);
dbh = dbufs = page_buffers(dst);
do {
@@ -403,11 +403,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
BUG_ON(!PageLocked(page));
- if (!silent) {
- nilfs_warning(sb, __func__,
- "discard page: offset %lld, ino %lu",
- page_offset(page), inode->i_ino);
- }
+ if (!silent)
+ nilfs_msg(sb, KERN_WARNING,
+ "discard dirty page: offset=%lld, ino=%lu",
+ page_offset(page), inode->i_ino);
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
@@ -415,18 +414,18 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
const unsigned long clear_bits =
- (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
- 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
bh = head = page_buffers(page);
do {
lock_buffer(bh);
- if (!silent) {
- nilfs_warning(sb, __func__,
- "discard block %llu, size %zu",
- (u64)bh->b_blocknr, bh->b_size);
- }
+ if (!silent)
+ nilfs_msg(sb, KERN_WARNING,
+ "discard dirty block: blocknr=%llu, size=%zu",
+ (u64)bh->b_blocknr, bh->b_size);
+
set_mask_bits(&bh->b_state, clear_bits, 0);
unlock_buffer(bh);
} while (bh = bh->b_this_page, bh != head);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index d893dc912b62..5139efed1888 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -54,38 +54,37 @@ struct nilfs_recovery_block {
};
-static int nilfs_warn_segment_error(int err)
+static int nilfs_warn_segment_error(struct super_block *sb, int err)
{
+ const char *msg = NULL;
+
switch (err) {
case NILFS_SEG_FAIL_IO:
- printk(KERN_WARNING
- "NILFS warning: I/O error on loading last segment\n");
+ nilfs_msg(sb, KERN_ERR, "I/O error reading segment");
return -EIO;
case NILFS_SEG_FAIL_MAGIC:
- printk(KERN_WARNING
- "NILFS warning: Segment magic number invalid\n");
+ msg = "Magic number mismatch";
break;
case NILFS_SEG_FAIL_SEQ:
- printk(KERN_WARNING
- "NILFS warning: Sequence number mismatch\n");
+ msg = "Sequence number mismatch";
break;
case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in super root\n");
+ msg = "Checksum error in super root";
break;
case NILFS_SEG_FAIL_CHECKSUM_FULL:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in segment payload\n");
+ msg = "Checksum error in segment payload";
break;
case NILFS_SEG_FAIL_CONSISTENCY:
- printk(KERN_WARNING
- "NILFS warning: Inconsistent segment\n");
+ msg = "Inconsistency found";
break;
case NILFS_SEG_NO_SUPER_ROOT:
- printk(KERN_WARNING
- "NILFS warning: No super root in the last segment\n");
+ msg = "No super root in the last segment";
break;
+ default:
+ nilfs_msg(sb, KERN_ERR, "unrecognized segment error %d", err);
+ return -EINVAL;
}
+ nilfs_msg(sb, KERN_WARNING, "invalid segment: %s", msg);
return -EINVAL;
}
@@ -178,7 +177,7 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
brelse(bh_sr);
failed:
- return nilfs_warn_segment_error(ret);
+ return nilfs_warn_segment_error(nilfs->ns_sb, ret);
}
/**
@@ -553,11 +552,10 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
put_page(page);
failed_inode:
- printk(KERN_WARNING
- "NILFS warning: error recovering data block "
- "(err=%d, ino=%lu, block-offset=%llu)\n",
- err, (unsigned long)rb->ino,
- (unsigned long long)rb->blkoff);
+ nilfs_msg(sb, KERN_WARNING,
+ "error %d recovering data block (ino=%lu, block-offset=%llu)",
+ err, (unsigned long)rb->ino,
+ (unsigned long long)rb->blkoff);
if (!err2)
err2 = err;
next:
@@ -680,8 +678,8 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
}
if (nsalvaged_blocks) {
- printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
- sb->s_id, nsalvaged_blocks);
+ nilfs_msg(sb, KERN_INFO, "salvaged %lu blocks",
+ nsalvaged_blocks);
ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
}
out:
@@ -692,10 +690,9 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
confused:
err = -EINVAL;
failed:
- printk(KERN_ERR
- "NILFS (device %s): Error roll-forwarding "
- "(err=%d, pseg block=%llu). ",
- sb->s_id, err, (unsigned long long)pseg_start);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d roll-forwarding partial segment at blocknr = %llu",
+ err, (unsigned long long)pseg_start);
goto out;
}
@@ -715,9 +712,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
set_buffer_dirty(bh);
err = sync_dirty_buffer(bh);
if (unlikely(err))
- printk(KERN_WARNING
- "NILFS warning: buffer sync write failed during "
- "post-cleaning of recovery.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_WARNING,
+ "buffer sync write failed during post-cleaning of recovery.");
brelse(bh);
}
@@ -752,8 +748,8 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
if (unlikely(err)) {
- printk(KERN_ERR
- "NILFS: error loading the latest checkpoint.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d loading the latest checkpoint", err);
return err;
}
@@ -764,8 +760,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: Error preparing segments for "
- "recovery.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d preparing segment for recovery",
+ err);
goto failed;
}
@@ -778,8 +775,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
nilfs_detach_log_writer(sb);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: Oops! recovery failed. "
- "(err=%d)\n", err);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d writing segment for recovery",
+ err);
goto failed;
}
@@ -961,5 +959,5 @@ int nilfs_search_super_root(struct the_nilfs *nilfs,
failed:
brelse(bh_sum);
nilfs_dispose_segment_list(&segments);
- return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
+ return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret);
}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index a962d7d83447..6f87b2ac1aeb 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -514,7 +514,11 @@ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
} while (--segbuf->sb_nbio > 0);
if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
- printk(KERN_ERR "NILFS: IO error writing segment\n");
+ nilfs_msg(segbuf->sb_super, KERN_ERR,
+ "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
+ (unsigned long long)segbuf->sb_pseg_start,
+ segbuf->sb_sum.nblocks,
+ (unsigned long long)segbuf->sb_segnum);
err = -EIO;
}
return err;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index e78b68a81aec..bedcae2c28e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -150,7 +150,8 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
-static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
+static int nilfs_prepare_segment_lock(struct super_block *sb,
+ struct nilfs_transaction_info *ti)
{
struct nilfs_transaction_info *cur_ti = current->journal_info;
void *save = NULL;
@@ -164,8 +165,7 @@ static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
* it is saved and will be restored on
* nilfs_transaction_commit().
*/
- printk(KERN_WARNING
- "NILFS warning: journal info from a different FS\n");
+ nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
save = current->journal_info;
}
if (!ti) {
@@ -215,7 +215,7 @@ int nilfs_transaction_begin(struct super_block *sb,
int vacancy_check)
{
struct the_nilfs *nilfs;
- int ret = nilfs_prepare_segment_lock(ti);
+ int ret = nilfs_prepare_segment_lock(sb, ti);
struct nilfs_transaction_info *trace_ti;
if (unlikely(ret < 0))
@@ -373,7 +373,7 @@ static void nilfs_transaction_lock(struct super_block *sb,
nilfs_segctor_do_immediate_flush(sci);
up_write(&nilfs->ns_segctor_sem);
- yield();
+ cond_resched();
}
if (gcflag)
ti->ti_flags |= NILFS_TI_GC;
@@ -1858,11 +1858,11 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
*/
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- const unsigned long set_bits = (1 << BH_Uptodate);
+ const unsigned long set_bits = BIT(BH_Uptodate);
const unsigned long clear_bits =
- (1 << BH_Dirty | 1 << BH_Async_Write |
- 1 << BH_Delay | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Redirected);
+ (BIT(BH_Dirty) | BIT(BH_Async_Write) |
+ BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Redirected));
set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
@@ -1951,8 +1951,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
err = nilfs_ifile_get_inode_block(
ifile, ii->vfs_inode.i_ino, &ibh);
if (unlikely(err)) {
- nilfs_warning(sci->sc_super, __func__,
- "failed to get inode block.");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "log writer: error %d getting inode block (ino=%lu)",
+ err, ii->vfs_inode.i_ino);
return err;
}
mark_buffer_dirty(ibh);
@@ -2131,10 +2132,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
{
spin_lock(&sci->sc_state_lock);
- if (!(sci->sc_flush_request & (1 << bn))) {
+ if (!(sci->sc_flush_request & BIT(bn))) {
unsigned long prev_req = sci->sc_flush_request;
- sci->sc_flush_request |= (1 << bn);
+ sci->sc_flush_request |= BIT(bn);
if (!prev_req)
wake_up(&sci->sc_wait_daemon);
}
@@ -2318,7 +2319,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
}
#define FLUSH_FILE_BIT (0x1) /* data file only */
-#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
+#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
/**
* nilfs_segctor_accept - record accepted sequence count of log-write requests
@@ -2458,8 +2459,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
if (likely(!err))
break;
- nilfs_warning(sb, __func__,
- "segment construction failed. (err=%d)", err);
+ nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
@@ -2467,9 +2467,9 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
sci->sc_nfreesegs);
if (ret) {
- printk(KERN_WARNING
- "NILFS warning: error %d on discard request, "
- "turning discards off for the device\n", ret);
+ nilfs_msg(sb, KERN_WARNING,
+ "error %d on discard request, turning discards off for the device",
+ ret);
nilfs_clear_opt(nilfs, DISCARD);
}
}
@@ -2551,10 +2551,9 @@ static int nilfs_segctor_thread(void *arg)
/* start sync. */
sci->sc_task = current;
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
- printk(KERN_INFO
- "segctord starting. Construction interval = %lu seconds, "
- "CP frequency < %lu seconds\n",
- sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
+ nilfs_msg(sci->sc_super, KERN_INFO,
+ "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
+ sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
spin_lock(&sci->sc_state_lock);
loop:
@@ -2628,8 +2627,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
if (IS_ERR(t)) {
int err = PTR_ERR(t);
- printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
- err);
+ nilfs_msg(sci->sc_super, KERN_ERR,
+ "error %d creating segctord thread", err);
return err;
}
wait_event(sci->sc_wait_task, sci->sc_task != NULL);
@@ -2739,14 +2738,14 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
nilfs_segctor_write_out(sci);
if (!list_empty(&sci->sc_dirty_files)) {
- nilfs_warning(sci->sc_super, __func__,
- "dirty file(s) after the final construction");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "disposed unprocessed dirty file(s) when stopping log writer");
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
if (!list_empty(&sci->sc_iput_queue)) {
- nilfs_warning(sci->sc_super, __func__,
- "iput queue is not empty");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "disposed unprocessed inode(s) in iput queue when stopping log writer");
nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
}
@@ -2822,8 +2821,8 @@ void nilfs_detach_log_writer(struct super_block *sb)
spin_lock(&nilfs->ns_inode_lock);
if (!list_empty(&nilfs->ns_dirty_files)) {
list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
- nilfs_warning(sb, __func__,
- "Hit dirty file after stopped log writer");
+ nilfs_msg(sb, KERN_WARNING,
+ "disposed unprocessed dirty file(s) when detaching log writer");
}
spin_unlock(&nilfs->ns_inode_lock);
up_write(&nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 6565c10b7b76..1060949d7dd2 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/workqueue.h>
-#include <linux/nilfs2_fs.h>
#include "nilfs.h"
struct nilfs_root;
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 1963595a1580..1541a1e9221a 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/errno.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "sufile.h"
@@ -181,9 +180,9 @@ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
down_write(&NILFS_MDT(sufile)->mi_sem);
for (seg = segnumv; seg < segnumv + nsegs; seg++) {
if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
- printk(KERN_WARNING
- "%s: invalid segment number: %llu\n", __func__,
- (unsigned long long)*seg);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: invalid segment number: %llu",
+ __func__, (unsigned long long)*seg);
nerr++;
}
}
@@ -240,8 +239,9 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
int ret;
if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
- printk(KERN_WARNING "%s: invalid segment number: %llu\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: invalid segment number: %llu",
+ __func__, (unsigned long long)segnum);
return -EINVAL;
}
down_write(&NILFS_MDT(sufile)->mi_sem);
@@ -419,8 +419,9 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (unlikely(!nilfs_segment_usage_clean(su))) {
- printk(KERN_WARNING "%s: segment %llu must be clean\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: segment %llu must be clean", __func__,
+ (unsigned long long)segnum);
kunmap_atomic(kaddr);
return;
}
@@ -444,7 +445,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
- if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
+ if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
su->su_nblocks == cpu_to_le32(0)) {
kunmap_atomic(kaddr);
return;
@@ -455,7 +456,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
/* make the segment garbage */
su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0);
- su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
+ su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
@@ -476,8 +477,9 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_clean(su)) {
- printk(KERN_WARNING "%s: segment %llu is already clean\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: segment %llu is already clean",
+ __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr);
return;
}
@@ -692,7 +694,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
su2 = su;
for (j = 0; j < n; j++, su = (void *)su + susz) {
if ((le32_to_cpu(su->su_flags) &
- ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
+ ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY;
kunmap_atomic(kaddr);
@@ -859,10 +861,10 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
si->sui_lastmod = le64_to_cpu(su->su_lastmod);
si->sui_nblocks = le32_to_cpu(su->su_nblocks);
si->sui_flags = le32_to_cpu(su->su_flags) &
- ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
if (nilfs_segment_is_active(nilfs, segnum + j))
si->sui_flags |=
- (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ BIT(NILFS_SEGMENT_USAGE_ACTIVE);
}
kunmap_atomic(kaddr);
brelse(su_bh);
@@ -950,7 +952,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
* disk.
*/
sup->sup_sui.sui_flags &=
- ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
cleansi = nilfs_suinfo_clean(&sup->sup_sui);
cleansu = nilfs_segment_usage_clean(su);
@@ -1175,14 +1177,12 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
int err;
if (susize > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large segment usage size: %zu bytes.\n",
- susize);
+ nilfs_msg(sb, KERN_ERR,
+ "too large segment usage size: %zu bytes", susize);
return -EINVAL;
} else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
- printk(KERN_ERR
- "NILFS: too small segment usage size: %zu bytes.\n",
- susize);
+ nilfs_msg(sb, KERN_ERR,
+ "too small segment usage size: %zu bytes", susize);
return -EINVAL;
}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 46e89872294c..158a9190c8ec 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 666107a18a22..c95d369e90aa 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -71,6 +71,22 @@ struct kmem_cache *nilfs_btree_path_cache;
static int nilfs_setup_super(struct super_block *sb, int is_mount);
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
+void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt,
+ ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (sb)
+ printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf);
+ else
+ printk("%sNILFS: %pV\n", level, &vaf);
+ va_end(args);
+}
+
static void nilfs_set_error(struct super_block *sb)
{
struct the_nilfs *nilfs = sb->s_fs_info;
@@ -91,19 +107,20 @@ static void nilfs_set_error(struct super_block *sb)
}
/**
- * nilfs_error() - report failure condition on a filesystem
+ * __nilfs_error() - report failure condition on a filesystem
+ *
+ * __nilfs_error() sets an ERROR_FS flag on the superblock as well as
+ * reporting an error message. This function should be called when
+ * NILFS detects incoherences or defects of meta data on disk.
*
- * nilfs_error() sets an ERROR_FS flag on the superblock as well as
- * reporting an error message. It should be called when NILFS detects
- * incoherences or defects of meta data on disk. As for sustainable
- * errors such as a single-shot I/O error, nilfs_warning() or the printk()
- * function should be used instead.
+ * This implements the body of nilfs_error() macro. Normally,
+ * nilfs_error() should be used. As for sustainable errors such as a
+ * single-shot I/O error, nilfs_msg() should be used instead.
*
- * The segment constructor must not call this function because it can
- * kill itself.
+ * Callers should not add a trailing newline since this will do it.
*/
-void nilfs_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void __nilfs_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
struct the_nilfs *nilfs = sb->s_fs_info;
struct va_format vaf;
@@ -133,24 +150,6 @@ void nilfs_error(struct super_block *sb, const char *function,
sb->s_id);
}
-void nilfs_warning(struct super_block *sb, const char *function,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n",
- sb->s_id, function, &vaf);
-
- va_end(args);
-}
-
-
struct inode *nilfs_alloc_inode(struct super_block *sb)
{
struct nilfs_inode_info *ii;
@@ -196,8 +195,8 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
}
if (unlikely(err)) {
- printk(KERN_ERR
- "NILFS: unable to write superblock (err=%d)\n", err);
+ nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d",
+ err);
if (err == -EIO && nilfs->ns_sbh[1]) {
/*
* sbp[0] points to newer log than sbp[1],
@@ -267,8 +266,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
} else {
- printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
- sb->s_id);
+ nilfs_msg(sb, KERN_CRIT, "superblock broke");
return NULL;
}
} else if (sbp[1] &&
@@ -378,9 +376,9 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
offset = sb2off & (nilfs->ns_blocksize - 1);
nsbh = sb_getblk(sb, newblocknr);
if (!nsbh) {
- printk(KERN_WARNING
- "NILFS warning: unable to move secondary superblock "
- "to block %llu\n", (unsigned long long)newblocknr);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to move secondary superblock to block %llu",
+ (unsigned long long)newblocknr);
ret = -EIO;
goto out;
}
@@ -543,10 +541,9 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
up_read(&nilfs->ns_segctor_sem);
if (unlikely(err)) {
if (err == -ENOENT || err == -EINVAL) {
- printk(KERN_ERR
- "NILFS: Invalid checkpoint "
- "(checkpoint number=%llu)\n",
- (unsigned long long)cno);
+ nilfs_msg(sb, KERN_ERR,
+ "Invalid checkpoint (checkpoint number=%llu)",
+ (unsigned long long)cno);
err = -EINVAL;
}
goto failed;
@@ -642,9 +639,8 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
err = nilfs_ifile_count_free_inodes(root->ifile,
&nmaxinodes, &nfreeinodes);
if (unlikely(err)) {
- printk(KERN_WARNING
- "NILFS warning: fail to count free inodes: err %d.\n",
- err);
+ nilfs_msg(sb, KERN_WARNING,
+ "failed to count free inodes: err=%d", err);
if (err == -ERANGE) {
/*
* If nilfs_palloc_count_max_entries() returns
@@ -776,9 +772,9 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
break;
case Opt_snapshot:
if (is_remount) {
- printk(KERN_ERR
- "NILFS: \"%s\" option is invalid "
- "for remount.\n", p);
+ nilfs_msg(sb, KERN_ERR,
+ "\"%s\" option is invalid for remount",
+ p);
return 0;
}
break;
@@ -792,8 +788,8 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
nilfs_clear_opt(nilfs, DISCARD);
break;
default:
- printk(KERN_ERR
- "NILFS: Unrecognized mount option \"%s\"\n", p);
+ nilfs_msg(sb, KERN_ERR,
+ "unrecognized mount option \"%s\"", p);
return 0;
}
}
@@ -829,12 +825,10 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount)
mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
- printk(KERN_WARNING
- "NILFS warning: mounting fs with errors\n");
+ nilfs_msg(sb, KERN_WARNING, "mounting fs with errors");
#if 0
} else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
- printk(KERN_WARNING
- "NILFS warning: maximal mount count reached\n");
+ nilfs_msg(sb, KERN_WARNING, "maximal mount count reached");
#endif
}
if (!max_mnt_count)
@@ -897,17 +891,17 @@ int nilfs_check_feature_compatibility(struct super_block *sb,
features = le64_to_cpu(sbp->s_feature_incompat) &
~NILFS_FEATURE_INCOMPAT_SUPP;
if (features) {
- printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
- "optional features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount because of unsupported optional features (%llx)",
+ (unsigned long long)features);
return -EINVAL;
}
features = le64_to_cpu(sbp->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP;
if (!(sb->s_flags & MS_RDONLY) && features) {
- printk(KERN_ERR "NILFS: couldn't mount RDWR because of "
- "unsupported optional features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount RDWR because of unsupported optional features (%llx)",
+ (unsigned long long)features);
return -EINVAL;
}
return 0;
@@ -923,13 +917,13 @@ static int nilfs_get_root_dentry(struct super_block *sb,
inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
if (IS_ERR(inode)) {
- printk(KERN_ERR "NILFS: get root inode failed\n");
ret = PTR_ERR(inode);
+ nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret);
goto out;
}
if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
iput(inode);
- printk(KERN_ERR "NILFS: corrupt root inode.\n");
+ nilfs_msg(sb, KERN_ERR, "corrupt root inode");
ret = -EINVAL;
goto out;
}
@@ -957,7 +951,7 @@ static int nilfs_get_root_dentry(struct super_block *sb,
return ret;
failed_dentry:
- printk(KERN_ERR "NILFS: get root dentry failed\n");
+ nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret);
goto out;
}
@@ -977,18 +971,18 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
ret = (ret == -ENOENT) ? -EINVAL : ret;
goto out;
} else if (!ret) {
- printk(KERN_ERR "NILFS: The specified checkpoint is "
- "not a snapshot (checkpoint number=%llu).\n",
- (unsigned long long)cno);
+ nilfs_msg(s, KERN_ERR,
+ "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
+ (unsigned long long)cno);
ret = -EINVAL;
goto out;
}
ret = nilfs_attach_checkpoint(s, cno, false, &root);
if (ret) {
- printk(KERN_ERR "NILFS: error loading snapshot "
- "(checkpoint number=%llu).\n",
- (unsigned long long)cno);
+ nilfs_msg(s, KERN_ERR,
+ "error %d while loading snapshot (checkpoint number=%llu)",
+ ret, (unsigned long long)cno);
goto out;
}
ret = nilfs_get_root_dentry(s, root, root_dentry);
@@ -1058,7 +1052,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
__u64 cno;
int err;
- nilfs = alloc_nilfs(sb->s_bdev);
+ nilfs = alloc_nilfs(sb);
if (!nilfs)
return -ENOMEM;
@@ -1083,8 +1077,9 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
cno = nilfs_last_cno(nilfs);
err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
if (err) {
- printk(KERN_ERR "NILFS: error loading last checkpoint "
- "(checkpoint number=%llu).\n", (unsigned long long)cno);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d while loading last checkpoint (checkpoint number=%llu)",
+ err, (unsigned long long)cno);
goto failed_unload;
}
@@ -1144,9 +1139,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
if (!nilfs_valid_fs(nilfs)) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because the filesystem is in an "
- "incomplete recovery state.\n", sb->s_id);
+ nilfs_msg(sb, KERN_WARNING,
+ "couldn't remount because the filesystem is in an incomplete recovery state");
goto restore_opts;
}
@@ -1178,10 +1172,9 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
~NILFS_FEATURE_COMPAT_RO_SUPP;
up_read(&nilfs->ns_sem);
if (features) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount RDWR because of unsupported optional "
- "features (%llx)\n",
- sb->s_id, (unsigned long long)features);
+ nilfs_msg(sb, KERN_WARNING,
+ "couldn't remount RDWR because of unsupported optional features (%llx)",
+ (unsigned long long)features);
err = -EROFS;
goto restore_opts;
}
@@ -1212,6 +1205,38 @@ struct nilfs_super_data {
int flags;
};
+static int nilfs_parse_snapshot_option(const char *option,
+ const substring_t *arg,
+ struct nilfs_super_data *sd)
+{
+ unsigned long long val;
+ const char *msg = NULL;
+ int err;
+
+ if (!(sd->flags & MS_RDONLY)) {
+ msg = "read-only option is not specified";
+ goto parse_error;
+ }
+
+ err = kstrtoull(arg->from, 0, &val);
+ if (err) {
+ if (err == -ERANGE)
+ msg = "too large checkpoint number";
+ else
+ msg = "malformed argument";
+ goto parse_error;
+ } else if (val == 0) {
+ msg = "invalid checkpoint number 0";
+ goto parse_error;
+ }
+ sd->cno = val;
+ return 0;
+
+parse_error:
+ nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg);
+ return 1;
+}
+
/**
* nilfs_identify - pre-read mount options needed to identify mount instance
* @data: mount options
@@ -1228,24 +1253,9 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
p = strsep(&options, ",");
if (p != NULL && *p) {
token = match_token(p, tokens, args);
- if (token == Opt_snapshot) {
- if (!(sd->flags & MS_RDONLY)) {
- ret++;
- } else {
- sd->cno = simple_strtoull(args[0].from,
- NULL, 0);
- /*
- * No need to see the end pointer;
- * match_token() has done syntax
- * checking.
- */
- if (sd->cno == 0)
- ret++;
- }
- }
- if (ret)
- printk(KERN_ERR
- "NILFS: invalid mount option: %s\n", p);
+ if (token == Opt_snapshot)
+ ret = nilfs_parse_snapshot_option(p, &args[0],
+ sd);
}
if (!options)
break;
@@ -1326,10 +1336,10 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
} else if (!sd.cno) {
if (nilfs_tree_is_busy(s->s_root)) {
if ((flags ^ s->s_flags) & MS_RDONLY) {
- printk(KERN_ERR "NILFS: the device already "
- "has a %s mount.\n",
- (s->s_flags & MS_RDONLY) ?
- "read-only" : "read/write");
+ nilfs_msg(s, KERN_ERR,
+ "the device already has a %s mount.",
+ (s->s_flags & MS_RDONLY) ?
+ "read-only" : "read/write");
err = -EBUSY;
goto failed_super;
}
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 8ffa42b704d8..490303e3d517 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -272,8 +272,8 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get checkpoint stat: err=%d", err);
return err;
}
@@ -295,8 +295,8 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get checkpoint stat: err=%d", err);
return err;
}
@@ -326,9 +326,9 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr,
{
__u64 cno;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
}
@@ -414,8 +414,8 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get segment stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get segment stat: err=%d", err);
return err;
}
@@ -511,9 +511,9 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr,
{
u64 seg_seq;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
seg_seq = nilfs->ns_seg_seq;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq);
}
@@ -525,9 +525,9 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr,
{
__u64 segnum;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
segnum = nilfs->ns_segnum;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", segnum);
}
@@ -539,9 +539,9 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr,
{
__u64 nextnum;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nextnum = nilfs->ns_nextnum;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum);
}
@@ -553,9 +553,9 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr,
{
unsigned long pseg_offset;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
pseg_offset = nilfs->ns_pseg_offset;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset);
}
@@ -567,9 +567,9 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr,
{
__u64 cno;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
}
@@ -581,9 +581,9 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr,
{
time_t ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ctime = nilfs->ns_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return NILFS_SHOW_TIME(ctime, buf);
}
@@ -595,9 +595,9 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
{
time_t ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ctime = nilfs->ns_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime);
}
@@ -609,9 +609,9 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr,
{
time_t nongc_ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nongc_ctime = nilfs->ns_nongc_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return NILFS_SHOW_TIME(nongc_ctime, buf);
}
@@ -623,9 +623,9 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
{
time_t nongc_ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nongc_ctime = nilfs->ns_nongc_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)nongc_ctime);
@@ -638,9 +638,9 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr,
{
u32 ndirtyblks;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks);
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks);
}
@@ -789,14 +789,15 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr,
err = kstrtouint(skip_spaces(buf), 0, &val);
if (err) {
- printk(KERN_ERR "NILFS: unable to convert string: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to convert string: err=%d", err);
return err;
}
if (val < NILFS_SB_FREQ) {
val = NILFS_SB_FREQ;
- printk(KERN_WARNING "NILFS: superblock update frequency cannot be lesser than 10 seconds\n");
+ nilfs_msg(nilfs->ns_sb, KERN_WARNING,
+ "superblock update frequency cannot be lesser than 10 seconds");
}
down_write(&nilfs->ns_sem);
@@ -999,7 +1000,8 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL);
if (unlikely(!nilfs->ns_dev_subgroups)) {
err = -ENOMEM;
- printk(KERN_ERR "NILFS: unable to allocate memory for device group\n");
+ nilfs_msg(sb, KERN_ERR,
+ "unable to allocate memory for device group");
goto failed_create_device_group;
}
@@ -1109,15 +1111,15 @@ int __init nilfs_sysfs_init(void)
nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj);
if (!nilfs_kset) {
err = -ENOMEM;
- printk(KERN_ERR "NILFS: unable to create sysfs entry: err %d\n",
- err);
+ nilfs_msg(NULL, KERN_ERR,
+ "unable to create sysfs entry: err=%d", err);
goto failed_sysfs_init;
}
err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: unable to create feature group: err %d\n",
- err);
+ nilfs_msg(NULL, KERN_ERR,
+ "unable to create feature group: err=%d", err);
goto cleanup_sysfs_init;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index e9fd241b9a0a..2dd75bf619ad 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -56,12 +56,12 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
/**
* alloc_nilfs - allocate a nilfs object
- * @bdev: block device to which the_nilfs is related
+ * @sb: super block instance
*
* Return Value: On success, pointer to the_nilfs is returned.
* On error, NULL is returned.
*/
-struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+struct the_nilfs *alloc_nilfs(struct super_block *sb)
{
struct the_nilfs *nilfs;
@@ -69,7 +69,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
if (!nilfs)
return NULL;
- nilfs->ns_bdev = bdev;
+ nilfs->ns_sb = sb;
+ nilfs->ns_bdev = sb->s_bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
mutex_init(&nilfs->ns_snapshot_mount_mutex);
@@ -191,7 +192,10 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
nilfs->ns_cno = nilfs->ns_last_cno + 1;
if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
- printk(KERN_ERR "NILFS invalid last segment number.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "pointed segment number is out of range: segnum=%llu, nsegments=%lu",
+ (unsigned long long)nilfs->ns_segnum,
+ nilfs->ns_nsegments);
ret = -EINVAL;
}
return ret;
@@ -215,12 +219,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
int err;
if (!valid_fs) {
- printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
+ nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
if (s_flags & MS_RDONLY) {
- printk(KERN_INFO "NILFS: INFO: recovery "
- "required for readonly filesystem.\n");
- printk(KERN_INFO "NILFS: write access will "
- "be enabled during recovery.\n");
+ nilfs_msg(sb, KERN_INFO,
+ "recovery required for readonly filesystem");
+ nilfs_msg(sb, KERN_INFO,
+ "write access will be enabled during recovery");
}
}
@@ -235,13 +239,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto scan_error;
if (!nilfs_valid_sb(sbp[1])) {
- printk(KERN_WARNING
- "NILFS warning: unable to fall back to spare"
- "super block\n");
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to fall back to spare super block");
goto scan_error;
}
- printk(KERN_INFO
- "NILFS: try rollback from an earlier position\n");
+ nilfs_msg(sb, KERN_INFO,
+ "trying rollback from an earlier position");
/*
* restore super block with its spare and reconfigure
@@ -254,10 +257,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
/* verify consistency between two super blocks */
blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
if (blocksize != nilfs->ns_blocksize) {
- printk(KERN_WARNING
- "NILFS warning: blocksize differs between "
- "two super blocks (%d != %d)\n",
- blocksize, nilfs->ns_blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "blocksize differs between two super blocks (%d != %d)",
+ blocksize, nilfs->ns_blocksize);
goto scan_error;
}
@@ -276,7 +278,8 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: error loading super root.\n");
+ nilfs_msg(sb, KERN_ERR, "error %d while loading super root",
+ err);
goto failed;
}
@@ -287,30 +290,29 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
__u64 features;
if (nilfs_test_opt(nilfs, NORECOVERY)) {
- printk(KERN_INFO "NILFS: norecovery option specified. "
- "skipping roll-forward recovery\n");
+ nilfs_msg(sb, KERN_INFO,
+ "norecovery option specified, skipping roll-forward recovery");
goto skip_recovery;
}
features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP;
if (features) {
- printk(KERN_ERR "NILFS: couldn't proceed with "
- "recovery because of unsupported optional "
- "features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't proceed with recovery because of unsupported optional features (%llx)",
+ (unsigned long long)features);
err = -EROFS;
goto failed_unload;
}
if (really_read_only) {
- printk(KERN_ERR "NILFS: write access "
- "unavailable, cannot proceed.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "write access unavailable, cannot proceed");
err = -EROFS;
goto failed_unload;
}
sb->s_flags &= ~MS_RDONLY;
} else if (nilfs_test_opt(nilfs, NORECOVERY)) {
- printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
- "option was specified for a read/write mount\n");
+ nilfs_msg(sb, KERN_ERR,
+ "recovery cancelled because norecovery option was specified for a read/write mount");
err = -EINVAL;
goto failed_unload;
}
@@ -325,11 +327,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
up_write(&nilfs->ns_sem);
if (err) {
- printk(KERN_ERR "NILFS: failed to update super block. "
- "recovery unfinished.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d updating super block. recovery unfinished.",
+ err);
goto failed_unload;
}
- printk(KERN_INFO "NILFS: recovery complete.\n");
+ nilfs_msg(sb, KERN_INFO, "recovery complete");
skip_recovery:
nilfs_clear_recovery_info(&ri);
@@ -337,7 +340,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
return 0;
scan_error:
- printk(KERN_ERR "NILFS: error searching super root.\n");
+ nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err);
goto failed;
failed_unload:
@@ -384,12 +387,11 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
{
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
- printk(KERN_ERR "NILFS: unsupported revision "
- "(superblock rev.=%d.%d, current rev.=%d.%d). "
- "Please check the version of mkfs.nilfs.\n",
- le32_to_cpu(sbp->s_rev_level),
- le16_to_cpu(sbp->s_minor_rev_level),
- NILFS_CURRENT_REV, NILFS_MINOR_REV);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
+ le32_to_cpu(sbp->s_rev_level),
+ le16_to_cpu(sbp->s_minor_rev_level),
+ NILFS_CURRENT_REV, NILFS_MINOR_REV);
return -EINVAL;
}
nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
@@ -398,12 +400,14 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
if (nilfs->ns_inode_size > nilfs->ns_blocksize) {
- printk(KERN_ERR "NILFS: too large inode size: %d bytes.\n",
- nilfs->ns_inode_size);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too large inode size: %d bytes",
+ nilfs->ns_inode_size);
return -EINVAL;
} else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) {
- printk(KERN_ERR "NILFS: too small inode size: %d bytes.\n",
- nilfs->ns_inode_size);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too small inode size: %d bytes",
+ nilfs->ns_inode_size);
return -EINVAL;
}
@@ -411,7 +415,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
- printk(KERN_ERR "NILFS: too short segment.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too short segment: %lu blocks",
+ nilfs->ns_blocks_per_segment);
return -EINVAL;
}
@@ -420,7 +426,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
le32_to_cpu(sbp->s_r_segments_percentage);
if (nilfs->ns_r_segments_percentage < 1 ||
nilfs->ns_r_segments_percentage > 99) {
- printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "invalid reserved segments percentage: %lu",
+ nilfs->ns_r_segments_percentage);
return -EINVAL;
}
@@ -504,16 +512,16 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
if (!sbp[0]) {
if (!sbp[1]) {
- printk(KERN_ERR "NILFS: unable to read superblock\n");
+ nilfs_msg(sb, KERN_ERR, "unable to read superblock");
return -EIO;
}
- printk(KERN_WARNING
- "NILFS warning: unable to read primary superblock "
- "(blocksize = %d)\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to read primary superblock (blocksize = %d)",
+ blocksize);
} else if (!sbp[1]) {
- printk(KERN_WARNING
- "NILFS warning: unable to read secondary superblock "
- "(blocksize = %d)\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to read secondary superblock (blocksize = %d)",
+ blocksize);
}
/*
@@ -535,14 +543,14 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
}
if (!valid[swp]) {
nilfs_release_super_block(nilfs);
- printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n",
- sb->s_id);
+ nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device");
return -EINVAL;
}
if (!valid[!swp])
- printk(KERN_WARNING "NILFS warning: broken superblock. "
- "using spare superblock (blocksize = %d).\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "broken superblock, retrying with spare superblock (blocksize = %d)",
+ blocksize);
if (swp)
nilfs_swap_super_block(nilfs);
@@ -576,7 +584,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
- printk(KERN_ERR "NILFS: unable to set blocksize\n");
+ nilfs_msg(sb, KERN_ERR, "unable to set blocksize");
err = -EINVAL;
goto out;
}
@@ -595,8 +603,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
if (blocksize < NILFS_MIN_BLOCK_SIZE ||
blocksize > NILFS_MAX_BLOCK_SIZE) {
- printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
- "filesystem blocksize %d\n", blocksize);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount because of unsupported filesystem blocksize %d",
+ blocksize);
err = -EINVAL;
goto failed_sbh;
}
@@ -604,10 +613,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
if (blocksize < hw_blocksize) {
- printk(KERN_ERR
- "NILFS: blocksize %d too small for device "
- "(sector-size = %d).\n",
- blocksize, hw_blocksize);
+ nilfs_msg(sb, KERN_ERR,
+ "blocksize %d too small for device (sector-size = %d)",
+ blocksize, hw_blocksize);
err = -EINVAL;
goto failed_sbh;
}
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 79369fd6b13b..b305c6f033e7 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -43,6 +43,7 @@ enum {
* struct the_nilfs - struct to supervise multiple nilfs mount points
* @ns_flags: flags
* @ns_flushed_device: flag indicating if all volatile data was flushed
+ * @ns_sb: back pointer to super block instance
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
* @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
@@ -102,6 +103,7 @@ struct the_nilfs {
unsigned long ns_flags;
int ns_flushed_device;
+ struct super_block *ns_sb;
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
struct mutex ns_snapshot_mount_mutex;
@@ -120,11 +122,8 @@ struct the_nilfs {
unsigned int ns_sb_update_freq;
/*
- * Following fields are dedicated to a writable FS-instance.
- * Except for the period seeking checkpoint, code outside the segment
- * constructor must lock a segment semaphore while accessing these
- * fields.
- * The writable FS-instance is sole during a lifetime of the_nilfs.
+ * The following fields are updated by a writable FS-instance.
+ * These fields are protected by ns_segctor_sem outside load_nilfs().
*/
u64 ns_seg_seq;
__u64 ns_segnum;
@@ -281,7 +280,7 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
}
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
-struct the_nilfs *alloc_nilfs(struct block_device *bdev);
+struct the_nilfs *alloc_nilfs(struct super_block *sb);
void destroy_nilfs(struct the_nilfs *nilfs);
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 460c0cedab3a..7dabbc31060e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6106,6 +6106,43 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
}
}
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+ unsigned int needed)
+{
+ tid_t target;
+ int ret = 0;
+ unsigned int truncated_clusters;
+
+ inode_lock(osb->osb_tl_inode);
+ truncated_clusters = osb->truncated_clusters;
+ inode_unlock(osb->osb_tl_inode);
+
+ /*
+ * Check whether we can succeed in allocating if we free
+ * the truncate log.
+ */
+ if (truncated_clusters < needed)
+ goto out;
+
+ ret = ocfs2_flush_truncate_log(osb);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+ jbd2_log_wait_commit(osb->journal->j_journal, target);
+ ret = 1;
+ }
+out:
+ return ret;
+}
+
static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
int slot_num,
struct inode **tl_inode,
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index f3dc1b0dfffc..4a5152ec88a3 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -188,6 +188,8 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
u64 start_blk,
unsigned int num_clusters);
int __ocfs2_flush_truncate_log(struct ocfs2_super *osb);
+int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+ unsigned int needed);
/*
* Process local structure which describes the block unlinks done
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index af2adfcb0f6f..98d36548153d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1645,43 +1645,6 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
return ret;
}
-/*
- * Try to flush truncate logs if we can free enough clusters from it.
- * As for return value, "< 0" means error, "0" no space and "1" means
- * we have freed enough spaces and let the caller try to allocate again.
- */
-static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
- unsigned int needed)
-{
- tid_t target;
- int ret = 0;
- unsigned int truncated_clusters;
-
- inode_lock(osb->osb_tl_inode);
- truncated_clusters = osb->truncated_clusters;
- inode_unlock(osb->osb_tl_inode);
-
- /*
- * Check whether we can succeed in allocating if we free
- * the truncate log.
- */
- if (truncated_clusters < needed)
- goto out;
-
- ret = ocfs2_flush_truncate_log(osb);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
- jbd2_log_wait_commit(osb->journal->j_journal, target);
- ret = 1;
- }
-out:
- return ret;
-}
-
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 8107d0d0c3f6..e9f3705c4c9f 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1004,6 +1004,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master);
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 12e064b8be9a..533bd524e41e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -172,12 +172,10 @@ void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
- struct qstr *q;
assert_spin_locked(&dlm->spinlock);
- q = &res->lockname;
- bucket = dlm_lockres_hash(dlm, q->hash);
+ bucket = dlm_lockres_hash(dlm, res->lockname.hash);
/* get a reference for our hashtable */
dlm_lockres_get(res);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 13719d3f35f8..6ea06f8a7d29 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2276,9 +2276,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
- BUG();
- }
- return ret ? ret : r;
+ if (r == -ENOMEM)
+ BUG();
+ } else
+ ret = r;
+
+ return ret;
}
int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -2416,48 +2419,26 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
}
spin_lock(&res->spinlock);
- BUG_ON(!(res->state & DLM_LOCK_RES_DROPPING_REF));
- if (!list_empty(&res->purge)) {
- mlog(0, "%s: Removing res %.*s from purgelist\n",
- dlm->name, res->lockname.len, res->lockname.name);
- list_del_init(&res->purge);
- dlm_lockres_put(res);
- dlm->purge_count--;
- }
-
- if (!__dlm_lockres_unused(res)) {
- mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
- BUG();
- }
-
- __dlm_unhash_lockres(dlm, res);
-
- spin_lock(&dlm->track_lock);
- if (!list_empty(&res->tracking))
- list_del_init(&res->tracking);
- else {
- mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
+ if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
+ "but it is already derefed!\n", dlm->name,
+ res->lockname.len, res->lockname.name, node);
+ ret = 0;
+ goto done;
}
- spin_unlock(&dlm->track_lock);
- /* lockres is not in the hash now. drop the flag and wake up
- * any processes waiting in dlm_get_lock_resource.
- */
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ __dlm_do_purge_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
- dlm_lockres_put(res);
-
spin_unlock(&dlm->spinlock);
ret = 0;
-
done:
+ if (res)
+ dlm_lockres_put(res);
dlm_put(dlm);
return ret;
}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index f6b313898763..dd5cb8bcefd1 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2343,6 +2343,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
struct dlm_lock_resource *res;
int i;
struct hlist_head *bucket;
+ struct hlist_node *tmp;
struct dlm_lock *lock;
@@ -2365,7 +2366,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
*/
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
- hlist_for_each_entry(res, bucket, hash_node) {
+ hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name,
@@ -2386,8 +2387,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
break;
}
}
- dlm_lockres_clear_refmap_bit(dlm, res,
- dead_node);
+
+ if ((res->owner == dead_node) &&
+ (res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
+ } else if (res->owner == dlm->node_num)
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
spin_unlock(&res->spinlock);
continue;
}
@@ -2398,14 +2408,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(0, "%s:%.*s: owned by "
"dead node %u, this node was "
- "dropping its ref when it died. "
- "continue, dropping the flag.\n",
+ "dropping its ref when master died. "
+ "continue, purging the lockres.\n",
dlm->name, res->lockname.len,
res->lockname.name, dead_node);
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
}
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
- dlm_move_lockres_to_recovery_list(dlm,
- res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 68d239ba0c63..838a06d4066a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -160,6 +160,52 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
spin_unlock(&dlm->spinlock);
}
+/*
+ * Do the real purge work:
+ * unhash the lockres, and
+ * clear flag DLM_LOCK_RES_DROPPING_REF.
+ * It requires dlm and lockres spinlock to be taken.
+ */
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (!list_empty(&res->purge)) {
+ mlog(0, "%s: Removing res %.*s from purgelist\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ list_del_init(&res->purge);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ __dlm_unhash_lockres(dlm, res);
+
+ spin_lock(&dlm->track_lock);
+ if (!list_empty(&res->tracking))
+ list_del_init(&res->tracking);
+ else {
+ mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ }
+ spin_unlock(&dlm->track_lock);
+
+ /*
+ * lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource.
+ */
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+}
+
static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
@@ -175,6 +221,13 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
res->lockname.len, res->lockname.name, master);
if (!master) {
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
spin_unlock(&res->spinlock);
@@ -203,8 +256,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
dlm->purge_count--;
}
- if (!master && ret != 0) {
- mlog(0, "%s: deref %.*s in progress or master goes down\n",
+ if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
+ mlog(0, "%s: deref %.*s in progress\n",
dlm->name, res->lockname.len, res->lockname.name);
spin_unlock(&res->spinlock);
return;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 47b3b2d4e775..ef474cdd6404 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -469,7 +469,7 @@ static int dlmfs_mkdir(struct inode * dir,
{
int status;
struct inode *inode = NULL;
- struct qstr *domain = &dentry->d_name;
+ const struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct ocfs2_cluster_connection *conn;
@@ -518,7 +518,7 @@ static int dlmfs_create(struct inode *dir,
{
int status = 0;
struct inode *inode;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
mlog(0, "create %.*s\n", name->len, name->name);
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 0499e3fb7bdb..f70cda2f090d 100644
--- a/fs/ocfs2/dlmfs/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -667,7 +667,7 @@ void user_dlm_set_locking_protocol(void)
ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
}
-struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name)
{
int rc;
struct ocfs2_cluster_connection *conn;
diff --git a/fs/ocfs2/dlmfs/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
index 3b42d79531d7..ede94a6e7fd3 100644
--- a/fs/ocfs2/dlmfs/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -83,7 +83,7 @@ void user_dlm_write_lvb(struct inode *inode,
ssize_t user_dlm_read_lvb(struct inode *inode,
char *val,
unsigned int len);
-struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name);
void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
void user_dlm_set_locking_protocol(void);
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index ced70c8139f7..c9e828ec3c8e 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -1007,10 +1007,17 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
lc->oc_type = NO_CONTROLD;
rc = dlm_new_lockspace(conn->cc_name, conn->cc_cluster_name,
- DLM_LSFL_FS, DLM_LVB_LEN,
+ DLM_LSFL_FS | DLM_LSFL_NEWEXCL, DLM_LVB_LEN,
&ocfs2_ls_ops, conn, &ops_rv, &fsdlm);
- if (rc)
+ if (rc) {
+ if (rc == -EEXIST || rc == -EPROTO)
+ printk(KERN_ERR "ocfs2: Unable to create the "
+ "lockspace %s (%d), because a ocfs2-tools "
+ "program is running on this file system "
+ "with the same name lockspace\n",
+ conn->cc_name, rc);
goto out;
+ }
if (ops_rv == -EOPNOTSUPP) {
lc->oc_type = WITH_CONTROLD;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 2f19aeec5482..ea47120a85ff 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1164,7 +1164,8 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
int flags,
struct ocfs2_alloc_context **ac)
{
- int status;
+ int status, ret = 0;
+ int retried = 0;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
@@ -1189,7 +1190,24 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
}
if (status == -ENOSPC) {
+retry:
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
+ /* Retry if there is sufficient space cached in truncate log */
+ if (status == -ENOSPC && !retried) {
+ retried = 1;
+ ocfs2_inode_unlock((*ac)->ac_inode, 1);
+ inode_unlock((*ac)->ac_inode);
+
+ ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
+ if (ret == 1)
+ goto retry;
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ inode_lock((*ac)->ac_inode);
+ ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+ }
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
diff --git a/fs/open.c b/fs/open.c
index bf66cf1a9f5c..4fd6e256f4f4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -998,6 +998,26 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
+struct file *filp_clone_open(struct file *oldfile)
+{
+ struct file *file;
+ int retval;
+
+ file = get_empty_filp();
+ if (IS_ERR(file))
+ return file;
+
+ file->f_flags = oldfile->f_flags;
+ retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
+ if (retval) {
+ put_filp(file);
+ return ERR_PTR(retval);
+ }
+
+ return file;
+}
+EXPORT_SYMBOL(filp_clone_open);
+
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 5dfc4f3cfe68..00235bf644dc 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,6 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
}
}
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
ret = 1;
out_release_op:
op_release(new_op);
@@ -94,6 +95,9 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int ret;
+ if (time_before(jiffies, dentry->d_time))
+ return 1;
+
if (flags & LOOKUP_RCU)
return -ECHILD;
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 2e63e6d0a68e..28a0557a69be 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -262,7 +262,7 @@ int orangefs_getattr(struct vfsmount *mnt,
"orangefs_getattr: called on %s\n",
dentry->d_name.name);
- ret = orangefs_inode_getattr(inode, 0, 1);
+ ret = orangefs_inode_getattr(inode, 0, 0);
if (ret == 0) {
generic_fillattr(inode, kstat);
@@ -384,7 +384,7 @@ struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref
if (!inode || !(inode->i_state & I_NEW))
return inode;
- error = orangefs_inode_getattr(inode, 1, 0);
+ error = orangefs_inode_getattr(inode, 1, 1);
if (error) {
iget_failed(inode);
return ERR_PTR(error);
@@ -429,7 +429,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
orangefs_set_inode(inode, ref);
inode->i_ino = hash; /* needed for stat etc */
- error = orangefs_inode_getattr(inode, 1, 0);
+ error = orangefs_inode_getattr(inode, 1, 1);
if (error)
goto out_iput;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 7e8dfa97c44a..62c525936ee8 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,6 +72,8 @@ static int orangefs_create(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: dentry instantiated for %s\n",
@@ -181,6 +183,8 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
if (IS_ERR(inode)) {
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -189,6 +193,8 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+
gossip_debug(GOSSIP_NAME_DEBUG,
"%s:%s:%d "
"Found good inode [%lu] with count [%d]\n",
@@ -316,6 +322,8 @@ static int orangefs_symlink(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Symlink) %pU -> %s\n",
@@ -378,6 +386,8 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Directory) %pU -> %s\n",
@@ -408,6 +418,8 @@ static int orangefs_rename(struct inode *old_dir,
"orangefs_rename: called (%pd2 => %pd2) ct=%d\n",
old_dentry, new_dentry, d_count(new_dentry));
+ ORANGEFS_I(new_dentry->d_parent->d_inode)->getattr_time = jiffies - 1;
+
new_op = op_alloc(ORANGEFS_VFS_OP_RENAME);
if (!new_op)
return -EINVAL;
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 4b6e132d5a0f..633c07a6e3d8 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -246,6 +246,8 @@ struct orangefs_inode_s {
* with this object
*/
unsigned long pinode_flags;
+
+ unsigned long getattr_time;
};
#define P_ATIME_FLAG 0
@@ -527,7 +529,7 @@ int orangefs_inode_setxattr(struct inode *inode,
size_t size,
int flags);
-int orangefs_inode_getattr(struct inode *inode, int new, int size);
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass);
int orangefs_inode_check_changed(struct inode *inode);
@@ -546,6 +548,8 @@ extern struct mutex request_mutex;
extern int debug;
extern int op_timeout_secs;
extern int slot_timeout_secs;
+extern int dcache_timeout_msecs;
+extern int getattr_timeout_msecs;
extern struct list_head orangefs_superblocks;
extern spinlock_t orangefs_superblocks_lock;
extern struct list_head orangefs_request_list;
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index 6f072a8c0de1..e9fd5755c05f 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -47,6 +47,8 @@ struct client_debug_mask client_debug_mask = { NULL, 0, 0 };
unsigned int kernel_mask_set_mod_init; /* implicitly false */
int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS;
int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS;
+int dcache_timeout_msecs = 50;
+int getattr_timeout_msecs = 50;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ORANGEFS Development Team");
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index 5c03113e3ad2..375708c2db87 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -61,10 +61,21 @@
* Slots are requested and waited for,
* the wait times out after slot_timeout_secs.
*
+ * What: /sys/fs/orangefs/dcache_timeout_msecs
+ * Date: Jul 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Time lookup is valid in milliseconds.
+ *
+ * What: /sys/fs/orangefs/getattr_timeout_msecs
+ * Date: Jul 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Time getattr is valid in milliseconds.
*
* What: /sys/fs/orangefs/acache/...
* Date: Jun 2015
- * Contact: Mike Marshall <hubcap@omnibond.com>
+ * Contact: Martin Brandenburg <martin@omnibond.com>
* Description:
* Attribute cache configurable settings.
*
@@ -117,6 +128,8 @@ struct orangefs_obj {
int perf_history_size;
int perf_time_interval_secs;
int slot_timeout_secs;
+ int dcache_timeout_msecs;
+ int getattr_timeout_msecs;
};
struct acache_orangefs_obj {
@@ -658,6 +671,20 @@ static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr)
"%d\n",
slot_timeout_secs);
goto out;
+ } else if (!strcmp(orangefs_attr->attr.name,
+ "dcache_timeout_msecs")) {
+ rc = scnprintf(buf,
+ PAGE_SIZE,
+ "%d\n",
+ dcache_timeout_msecs);
+ goto out;
+ } else if (!strcmp(orangefs_attr->attr.name,
+ "getattr_timeout_msecs")) {
+ rc = scnprintf(buf,
+ PAGE_SIZE,
+ "%d\n",
+ getattr_timeout_msecs);
+ goto out;
} else {
goto out;
}
@@ -734,6 +761,12 @@ static ssize_t int_store(struct orangefs_obj *orangefs_obj,
} else if (!strcmp(attr->attr.name, "slot_timeout_secs")) {
rc = kstrtoint(buf, 0, &slot_timeout_secs);
goto out;
+ } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) {
+ rc = kstrtoint(buf, 0, &dcache_timeout_msecs);
+ goto out;
+ } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) {
+ rc = kstrtoint(buf, 0, &getattr_timeout_msecs);
+ goto out;
} else {
goto out;
}
@@ -1361,6 +1394,12 @@ static struct orangefs_attribute op_timeout_secs_attribute =
static struct orangefs_attribute slot_timeout_secs_attribute =
__ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store);
+static struct orangefs_attribute dcache_timeout_msecs_attribute =
+ __ATTR(dcache_timeout_msecs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute getattr_timeout_msecs_attribute =
+ __ATTR(getattr_timeout_msecs, 0664, int_orangefs_show, int_store);
+
static struct orangefs_attribute perf_counter_reset_attribute =
__ATTR(perf_counter_reset,
0664,
@@ -1382,6 +1421,8 @@ static struct orangefs_attribute perf_time_interval_secs_attribute =
static struct attribute *orangefs_default_attrs[] = {
&op_timeout_secs_attribute.attr,
&slot_timeout_secs_attribute.attr,
+ &dcache_timeout_msecs_attribute.attr,
+ &getattr_timeout_msecs_attribute.attr,
&perf_counter_reset_attribute.attr,
&perf_history_size_attribute.attr,
&perf_time_interval_secs_attribute.attr,
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index c5fbc62357c6..d13c7291fd05 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -251,7 +251,7 @@ static int orangefs_inode_is_stale(struct inode *inode, int new,
return 0;
}
-int orangefs_inode_getattr(struct inode *inode, int new, int size)
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op;
@@ -261,12 +261,16 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__,
get_khandle_from_ino(inode));
+ if (!new && !bypass) {
+ if (time_before(jiffies, orangefs_inode->getattr_time))
+ return 0;
+ }
+
new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR);
if (!new_op)
return -ENOMEM;
new_op->upcall.req.getattr.refn = orangefs_inode->refn;
- new_op->upcall.req.getattr.mask = size ?
- ORANGEFS_ATTR_SYS_ALL_NOHINT : ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE;
+ new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT;
ret = service_operation(new_op, __func__,
get_interruptible_flag(inode));
@@ -287,20 +291,18 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
case S_IFREG:
inode->i_flags = orangefs_inode_flags(&new_op->
downcall.resp.getattr.attributes);
- if (size) {
- inode_size = (loff_t)new_op->
- downcall.resp.getattr.attributes.size;
- rounded_up_size =
- (inode_size + (4096 - (inode_size % 4096)));
- inode->i_size = inode_size;
- orangefs_inode->blksize =
- new_op->downcall.resp.getattr.attributes.blksize;
- spin_lock(&inode->i_lock);
- inode->i_bytes = inode_size;
- inode->i_blocks =
- (unsigned long)(rounded_up_size / 512);
- spin_unlock(&inode->i_lock);
- }
+ inode_size = (loff_t)new_op->
+ downcall.resp.getattr.attributes.size;
+ rounded_up_size =
+ (inode_size + (4096 - (inode_size % 4096)));
+ inode->i_size = inode_size;
+ orangefs_inode->blksize =
+ new_op->downcall.resp.getattr.attributes.blksize;
+ spin_lock(&inode->i_lock);
+ inode->i_bytes = inode_size;
+ inode->i_blocks =
+ (unsigned long)(rounded_up_size / 512);
+ spin_unlock(&inode->i_lock);
break;
case S_IFDIR:
inode->i_size = PAGE_SIZE;
@@ -345,6 +347,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes);
+ orangefs_inode->getattr_time = jiffies + getattr_timeout_msecs*HZ/1000;
ret = 0;
out:
op_release(new_op);
@@ -418,6 +421,7 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
ClearMtimeFlag(orangefs_inode);
ClearCtimeFlag(orangefs_inode);
ClearModeFlag(orangefs_inode);
+ orangefs_inode->getattr_time = jiffies - 1;
}
return ret;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 1efc6f8a5224..3d7418c728f5 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -207,14 +207,6 @@ typedef __s64 ORANGEFS_offset;
ORANGEFS_ATTR_SYS_DIRENT_COUNT | \
ORANGEFS_ATTR_SYS_BLKSIZE)
-#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE \
- (ORANGEFS_ATTR_SYS_COMMON_ALL | \
- ORANGEFS_ATTR_SYS_LNK_TARGET | \
- ORANGEFS_ATTR_SYS_DFILE_COUNT | \
- ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \
- ORANGEFS_ATTR_SYS_DIRENT_COUNT | \
- ORANGEFS_ATTR_SYS_BLKSIZE)
-
#define ORANGEFS_XATTR_REPLACE 0x2
#define ORANGEFS_XATTR_CREATE 0x1
#define ORANGEFS_MAX_SERVER_ADDR_LEN 256
diff --git a/fs/pipe.c b/fs/pipe.c
index 4b32928f5426..4ebe6b2e5217 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -144,10 +144,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
struct page *page = buf->page;
if (page_count(page) == 1) {
- if (memcg_kmem_enabled()) {
+ if (memcg_kmem_enabled())
memcg_kmem_uncharge(page, 0);
- __ClearPageKmemcg(page);
- }
__SetPageLocked(page);
return 0;
}
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 7151ea428041..12c6922c913c 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -4,6 +4,7 @@
obj-y += proc.o
+CFLAGS_task_mmu.o += $(call cc-option,-Wno-override-init,)
proc-y := nommu.o task_nommu.o
proc-$(CONFIG_MMU) := task_mmu.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 31370da2ee7c..54e270262979 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -579,11 +579,8 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
unsigned long totalpages = totalram_pages + total_swap_pages;
unsigned long points = 0;
- read_lock(&tasklist_lock);
- if (pid_alive(task))
- points = oom_badness(task, NULL, NULL, totalpages) *
- 1000 / totalpages;
- read_unlock(&tasklist_lock);
+ points = oom_badness(task, NULL, NULL, totalpages) *
+ 1000 / totalpages;
seq_printf(m, "%lu\n", points);
return 0;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b59db94d2ff4..1b93650dda2f 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -474,7 +474,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
{
struct ctl_table_header *head = grab_header(dir);
struct ctl_table_header *h = NULL;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct ctl_table *p;
struct inode *inode;
struct dentry *err = ERR_PTR(-ENOENT);
@@ -834,7 +834,7 @@ static int sysctl_is_seen(struct ctl_table_header *p)
return res;
}
-static int proc_sys_compare(const struct dentry *parent, const struct dentry *dentry,
+static int proc_sys_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct ctl_table_header *head;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 510413eb25b8..7907e456ac4f 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -80,19 +80,17 @@ static u64 get_iowait_time(int cpu)
static int show_stat(struct seq_file *p, void *v)
{
int i, j;
- unsigned long jif;
u64 user, nice, system, idle, iowait, irq, softirq, steal;
u64 guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
- struct timespec boottime;
+ struct timespec64 boottime;
user = nice = system = idle = iowait =
irq = softirq = steal = 0;
guest = guest_nice = 0;
- getboottime(&boottime);
- jif = boottime.tv_sec;
+ getboottime64(&boottime);
for_each_possible_cpu(i) {
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
@@ -163,12 +161,12 @@ static int show_stat(struct seq_file *p, void *v)
seq_printf(p,
"\nctxt %llu\n"
- "btime %lu\n"
+ "btime %llu\n"
"processes %lu\n"
"procs_running %lu\n"
"procs_blocked %lu\n",
nr_context_switches(),
- (unsigned long)jif,
+ (unsigned long long)boottime.tv_sec,
total_forks,
nr_running(),
nr_iowait());
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 47516a794011..7a034d62cf8c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -486,30 +486,21 @@ static int ramoops_parse_dt(struct platform_device *pdev,
struct ramoops_platform_data *pdata)
{
struct device_node *of_node = pdev->dev.of_node;
- struct device_node *mem_region;
- struct resource res;
+ struct resource *res;
u32 value;
int ret;
dev_dbg(&pdev->dev, "using Device Tree\n");
- mem_region = of_parse_phandle(of_node, "memory-region", 0);
- if (!mem_region) {
- dev_err(&pdev->dev, "no memory-region phandle\n");
- return -ENODEV;
- }
-
- ret = of_address_to_resource(mem_region, 0, &res);
- of_node_put(mem_region);
- if (ret) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
dev_err(&pdev->dev,
- "failed to translate memory-region to resource: %d\n",
- ret);
- return ret;
+ "failed to locate DT /reserved-memory resource\n");
+ return -EINVAL;
}
- pdata->mem_size = resource_size(&res);
- pdata->mem_address = res.start;
+ pdata->mem_size = resource_size(res);
+ pdata->mem_address = res->start;
pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops");
@@ -652,11 +643,11 @@ fail_buf:
kfree(cxt->pstore.buf);
fail_clear:
cxt->pstore.bufsize = 0;
- kfree(cxt->mprz);
+ persistent_ram_free(cxt->mprz);
fail_init_mprz:
- kfree(cxt->fprz);
+ persistent_ram_free(cxt->fprz);
fail_init_fprz:
- kfree(cxt->cprz);
+ persistent_ram_free(cxt->cprz);
fail_init_cprz:
ramoops_free_przs(cxt);
fail_out:
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index b751eea32e20..5db6f45b3fed 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
insert_ptr);
}
- memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
insert_ptr[0] = new_insert_ptr;
+ if (new_insert_ptr)
+ memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
return order;
}
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 9718da86ad01..821b34816976 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -100,10 +100,6 @@ static int switch_gc_head(struct ubifs_info *c)
if (err)
return err;
- err = ubifs_wbuf_sync_nolock(wbuf);
- if (err)
- return err;
-
err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
if (err)
return err;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 70349954e78b..4ec051089186 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -520,19 +520,19 @@ static int init_constants_early(struct ubifs_info *c)
c->max_write_shift = fls(c->max_write_size) - 1;
if (c->leb_size < UBIFS_MIN_LEB_SZ) {
- ubifs_err(c, "too small LEBs (%d bytes), min. is %d bytes",
- c->leb_size, UBIFS_MIN_LEB_SZ);
+ ubifs_errc(c, "too small LEBs (%d bytes), min. is %d bytes",
+ c->leb_size, UBIFS_MIN_LEB_SZ);
return -EINVAL;
}
if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
- ubifs_err(c, "too few LEBs (%d), min. is %d",
- c->leb_cnt, UBIFS_MIN_LEB_CNT);
+ ubifs_errc(c, "too few LEBs (%d), min. is %d",
+ c->leb_cnt, UBIFS_MIN_LEB_CNT);
return -EINVAL;
}
if (!is_power_of_2(c->min_io_size)) {
- ubifs_err(c, "bad min. I/O size %d", c->min_io_size);
+ ubifs_errc(c, "bad min. I/O size %d", c->min_io_size);
return -EINVAL;
}
@@ -543,8 +543,8 @@ static int init_constants_early(struct ubifs_info *c)
if (c->max_write_size < c->min_io_size ||
c->max_write_size % c->min_io_size ||
!is_power_of_2(c->max_write_size)) {
- ubifs_err(c, "bad write buffer size %d for %d min. I/O unit",
- c->max_write_size, c->min_io_size);
+ ubifs_errc(c, "bad write buffer size %d for %d min. I/O unit",
+ c->max_write_size, c->min_io_size);
return -EINVAL;
}
@@ -2108,8 +2108,9 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
- current->pid, name, (int)PTR_ERR(ubi));
+ if (!(flags & MS_SILENT))
+ pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
+ current->pid, name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index ddf9f6b9eee2..4617d459022a 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1783,8 +1783,8 @@ void ubifs_err(const struct ubifs_info *c, const char *fmt, ...);
__printf(2, 3)
void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
/*
- * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description
- * object as an argument.
+ * A conditional variant of 'ubifs_err()' which doesn't output anything
+ * if probing (ie. MS_SILENT set).
*/
#define ubifs_errc(c, fmt, ...) \
do { \
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index b5fc27969e9d..e237811f09ce 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -592,19 +592,19 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
return __ubifs_removexattr(inode, name);
}
-const struct xattr_handler ubifs_user_xattr_handler = {
+static const struct xattr_handler ubifs_user_xattr_handler = {
.prefix = XATTR_USER_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
};
-const struct xattr_handler ubifs_trusted_xattr_handler = {
+static const struct xattr_handler ubifs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
};
-const struct xattr_handler ubifs_security_xattr_handler = {
+static const struct xattr_handler ubifs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
diff --git a/fs/utimes.c b/fs/utimes.c
index 85c40f4f373d..794f5f5b1fb5 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -92,10 +92,11 @@ static int utimes_common(struct path *path, struct timespec *times)
* then we need to check permissions, because
* inode_change_ok() won't do it.
*/
- error = -EACCES;
+ error = -EPERM;
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
+ error = -EACCES;
if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 3542d94fddce..fc593c869493 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -39,6 +39,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_btree.o \
xfs_da_btree.o \
xfs_da_format.o \
+ xfs_defer.o \
xfs_dir2.o \
xfs_dir2_block.o \
xfs_dir2_data.o \
@@ -51,6 +52,8 @@ xfs-y += $(addprefix libxfs/, \
xfs_inode_fork.o \
xfs_inode_buf.o \
xfs_log_rlimit.o \
+ xfs_rmap.o \
+ xfs_rmap_btree.o \
xfs_sb.o \
xfs_symlink_remote.o \
xfs_trans_resv.o \
@@ -100,11 +103,13 @@ xfs-y += xfs_log.o \
xfs_extfree_item.o \
xfs_icreate_item.o \
xfs_inode_item.o \
+ xfs_rmap_item.o \
xfs_log_recover.o \
xfs_trans_ail.o \
xfs_trans_buf.o \
xfs_trans_extfree.o \
xfs_trans_inode.o \
+ xfs_trans_rmap.o \
# optional features
xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
@@ -121,5 +126,4 @@ xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
-xfs-$(CONFIG_NFSD_BLOCKLAYOUT) += xfs_pnfs.o
-xfs-$(CONFIG_NFSD_SCSILAYOUT) += xfs_pnfs.o
+xfs-$(CONFIG_EXPORTFS_BLOCK_OPS) += xfs_pnfs.o
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 88c26b827a2d..776ae2f325d1 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -24,8 +24,10 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
+#include "xfs_rmap.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
@@ -49,6 +51,81 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+xfs_extlen_t
+xfs_prealloc_blocks(
+ struct xfs_mount *mp)
+{
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return XFS_RMAP_BLOCK(mp) + 1;
+ if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ return XFS_FIBT_BLOCK(mp) + 1;
+ return XFS_IBT_BLOCK(mp) + 1;
+}
+
+/*
+ * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
+ * AGF buffer (PV 947395), we place constraints on the relationship among
+ * actual allocations for data blocks, freelist blocks, and potential file data
+ * bmap btree blocks. However, these restrictions may result in no actual space
+ * allocated for a delayed extent, for example, a data block in a certain AG is
+ * allocated but there is no additional block for the additional bmap btree
+ * block due to a split of the bmap btree of the file. The result of this may
+ * lead to an infinite loop when the file gets flushed to disk and all delayed
+ * extents need to be actually allocated. To get around this, we explicitly set
+ * aside a few blocks which will not be reserved in delayed allocation.
+ *
+ * When rmap is disabled, we need to reserve 4 fsbs _per AG_ for the freelist
+ * and 4 more to handle a potential split of the file's bmap btree.
+ *
+ * When rmap is enabled, we must also be able to handle two rmap btree inserts
+ * to record both the file data extent and a new bmbt block. The bmbt block
+ * might not be in the same AG as the file data extent. In the worst case
+ * the bmap btree splits multiple levels and all the new blocks come from
+ * different AGs, so set aside enough to handle rmap btree splits in all AGs.
+ */
+unsigned int
+xfs_alloc_set_aside(
+ struct xfs_mount *mp)
+{
+ unsigned int blocks;
+
+ blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks += mp->m_sb.sb_agcount * mp->m_rmap_maxlevels;
+ return blocks;
+}
+
+/*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ * - the AG superblock, AGF, AGI and AGFL
+ * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
+ * the AGI free inode and rmap btree root blocks.
+ * - blocks on the AGFL according to xfs_alloc_set_aside() limits
+ * - the rmapbt root block
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+unsigned int
+xfs_alloc_ag_max_usable(
+ struct xfs_mount *mp)
+{
+ unsigned int blocks;
+
+ blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
+ blocks += XFS_ALLOC_AGFL_RESERVE;
+ blocks += 3; /* AGF, AGI btree root blocks */
+ if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ blocks++; /* finobt root block */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks++; /* rmap root block */
+
+ return mp->m_sb.sb_agblocks - blocks;
+}
+
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
@@ -636,6 +713,14 @@ xfs_alloc_ag_vextent(
ASSERT(!args->wasfromfl || !args->isfl);
ASSERT(args->agbno % args->alignment == 0);
+ /* if not file data, insert new block into the reverse map btree */
+ if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+ error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
+ args->agbno, args->len, &args->oinfo);
+ if (error)
+ return error;
+ }
+
if (!args->wasfromfl) {
error = xfs_alloc_update_counters(args->tp, args->pag,
args->agbp,
@@ -1577,14 +1662,15 @@ error0:
/*
* Free the extent starting at agno/bno for length.
*/
-STATIC int /* error */
+STATIC int
xfs_free_ag_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_buf_t *agbp, /* buffer for a.g. freelist header */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t bno, /* starting block number */
- xfs_extlen_t len, /* length of extent */
- int isfl) /* set if is freelist blocks - no sb acctg */
+ xfs_trans_t *tp,
+ xfs_buf_t *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ int isfl)
{
xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
@@ -1601,12 +1687,19 @@ xfs_free_ag_extent(
xfs_extlen_t nlen; /* new length of freespace */
xfs_perag_t *pag; /* per allocation group data */
+ bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
+
+ if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+ error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
+ if (error)
+ goto error0;
+ }
+
/*
* Allocate and initialize a cursor for the by-block btree.
*/
bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
- cnt_cur = NULL;
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
@@ -1875,6 +1968,11 @@ xfs_alloc_min_freelist(
/* space needed by-size freespace btree */
min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
mp->m_ag_maxlevels);
+ /* space needed reverse mapping used space btree */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ min_free += min_t(unsigned int,
+ pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
+ mp->m_rmap_maxlevels);
return min_free;
}
@@ -1992,21 +2090,34 @@ xfs_alloc_fix_freelist(
* anything other than extra overhead when we need to put more blocks
* back on the free list? Maybe we should only do this when space is
* getting low or the AGFL is more than half full?
+ *
+ * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
+ * big; the NORMAP flag prevents AGFL expand/shrink operations from
+ * updating the rmapbt. Both flags are used in xfs_repair while we're
+ * rebuilding the rmapbt, and neither are used by the kernel. They're
+ * both required to ensure that rmaps are correctly recorded for the
+ * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
+ * repair/rmap.c in xfsprogs for details.
*/
- while (pag->pagf_flcount > need) {
+ memset(&targs, 0, sizeof(targs));
+ if (flags & XFS_ALLOC_FLAG_NORMAP)
+ xfs_rmap_skip_owner_update(&targs.oinfo);
+ else
+ xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
+ while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
struct xfs_buf *bp;
error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
if (error)
goto out_agbp_relse;
- error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1);
+ error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
+ &targs.oinfo, 1);
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
xfs_trans_binval(tp, bp);
}
- memset(&targs, 0, sizeof(targs));
targs.tp = tp;
targs.mp = mp;
targs.agbp = agbp;
@@ -2271,6 +2382,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
return false;
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)
+ return false;
+
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -2402,6 +2517,8 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
+ pag->pagf_levels[XFS_BTNUM_RMAPi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
spin_lock_init(&pag->pagb_lock);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
@@ -2691,7 +2808,8 @@ int /* error */
xfs_free_extent(
struct xfs_trans *tp, /* transaction pointer */
xfs_fsblock_t bno, /* starting block number of extent */
- xfs_extlen_t len) /* length of extent */
+ xfs_extlen_t len, /* length of extent */
+ struct xfs_owner_info *oinfo) /* extent owner */
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_buf *agbp;
@@ -2701,6 +2819,11 @@ xfs_free_extent(
ASSERT(len != 0);
+ if (XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_FREE_EXTENT,
+ XFS_RANDOM_FREE_EXTENT))
+ return -EIO;
+
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
if (error)
return error;
@@ -2712,7 +2835,7 @@ xfs_free_extent(
agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
err);
- error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, 0);
+ error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, 0);
if (error)
goto err;
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index cf268b2d0b6c..6fe2d6b7cfe9 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -54,41 +54,8 @@ typedef unsigned int xfs_alloctype_t;
*/
#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
-
-/*
- * In order to avoid ENOSPC-related deadlock caused by
- * out-of-order locking of AGF buffer (PV 947395), we place
- * constraints on the relationship among actual allocations for
- * data blocks, freelist blocks, and potential file data bmap
- * btree blocks. However, these restrictions may result in no
- * actual space allocated for a delayed extent, for example, a data
- * block in a certain AG is allocated but there is no additional
- * block for the additional bmap btree block due to a split of the
- * bmap btree of the file. The result of this may lead to an
- * infinite loop in xfssyncd when the file gets flushed to disk and
- * all delayed extents need to be actually allocated. To get around
- * this, we explicitly set aside a few blocks which will not be
- * reserved in delayed allocation. Considering the minimum number of
- * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap
- * btree requires 1 fsb, so we set the number of set-aside blocks
- * to 4 + 4*agcount.
- */
-#define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4))
-
-/*
- * When deciding how much space to allocate out of an AG, we limit the
- * allocation maximum size to the size the AG. However, we cannot use all the
- * blocks in the AG - some are permanently used by metadata. These
- * blocks are generally:
- * - the AG superblock, AGF, AGI and AGFL
- * - the AGF (bno and cnt) and AGI btree root blocks
- * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
- *
- * The AG headers are sector sized, so the amount of space they take up is
- * dependent on filesystem geometry. The others are all single blocks.
- */
-#define XFS_ALLOC_AG_MAX_USABLE(mp) \
- ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
+#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
/*
@@ -123,6 +90,7 @@ typedef struct xfs_alloc_arg {
char isfl; /* set if is freelist blocks - !acctg */
char userdata; /* mask defining userdata treatment */
xfs_fsblock_t firstblock; /* io first block allocated */
+ struct xfs_owner_info oinfo; /* owner of blocks being allocated */
} xfs_alloc_arg_t;
/*
@@ -132,6 +100,11 @@ typedef struct xfs_alloc_arg {
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
+/* freespace limit calculations */
+#define XFS_ALLOC_AGFL_RESERVE 4
+unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
+unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
+
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
struct xfs_perag *pag, xfs_extlen_t need);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
@@ -208,9 +181,10 @@ xfs_alloc_vextent(
*/
int /* error */
xfs_free_extent(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_fsblock_t bno, /* starting block number of extent */
- xfs_extlen_t len); /* length of extent */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_fsblock_t bno, /* starting block number of extent */
+ xfs_extlen_t len, /* length of extent */
+ struct xfs_owner_info *oinfo);/* extent owner */
int /* error */
xfs_alloc_lookup_ge(
@@ -232,4 +206,6 @@ int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
struct xfs_buf **agbp);
+xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index d9b42425291e..5ba2dac5e67c 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -212,17 +212,6 @@ xfs_allocbt_init_key_from_rec(
}
STATIC void
-xfs_allocbt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- ASSERT(key->alloc.ar_startblock != 0);
-
- rec->alloc.ar_startblock = key->alloc.ar_startblock;
- rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
-}
-
-STATIC void
xfs_allocbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -406,7 +395,6 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
.get_minrecs = xfs_allocbt_get_minrecs,
.get_maxrecs = xfs_allocbt_get_maxrecs,
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
- .init_rec_from_key = xfs_allocbt_init_rec_from_key,
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_allocbt_key_diff,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 4e126f41a0aa..af1ecb19121e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr_sf.h"
@@ -203,7 +204,7 @@ xfs_attr_set(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_bmap_free flist;
+ struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
@@ -221,7 +222,7 @@ xfs_attr_set(
args.value = value;
args.valuelen = valuelen;
args.firstblock = &firstblock;
- args.flist = &flist;
+ args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
@@ -316,13 +317,13 @@ xfs_attr_set(
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- xfs_bmap_init(args.flist, args.firstblock);
+ xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args);
if (!error)
- error = xfs_bmap_finish(&args.trans, args.flist, dp);
+ error = xfs_defer_finish(&args.trans, args.dfops, dp);
if (error) {
args.trans = NULL;
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
goto out;
}
@@ -382,7 +383,7 @@ xfs_attr_remove(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_bmap_free flist;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t firstblock;
int error;
@@ -399,7 +400,7 @@ xfs_attr_remove(
return error;
args.firstblock = &firstblock;
- args.flist = &flist;
+ args.dfops = &dfops;
/*
* we have no control over the attribute names that userspace passes us
@@ -584,13 +585,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
@@ -674,15 +675,15 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
}
@@ -737,14 +738,14 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
}
@@ -863,14 +864,14 @@ restart:
*/
xfs_da_state_free(state);
state = NULL;
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
@@ -891,13 +892,13 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_split(state);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
} else {
@@ -990,14 +991,14 @@ restart:
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
}
@@ -1113,13 +1114,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
/*
@@ -1146,15 +1147,15 @@ xfs_attr_node_removename(xfs_da_args_t *args)
goto out;
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
} else
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 01a5ecfedfcf..8ea91f363093 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -792,7 +792,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
nargs.dp = dp;
nargs.geo = args->geo;
nargs.firstblock = args->firstblock;
- nargs.flist = args->flist;
+ nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -922,7 +922,7 @@ xfs_attr3_leaf_to_shortform(
nargs.geo = args->geo;
nargs.dp = dp;
nargs.firstblock = args->firstblock;
- nargs.flist = args->flist;
+ nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index a572532a55cd..d52f525f5b2d 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -24,6 +24,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -460,16 +461,16 @@ xfs_attr_rmtval_set(
* extent and then crash then the block may not contain the
* correct metadata after log recovery occurs.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
- args->total, &map, &nmap, args->flist);
+ args->total, &map, &nmap, args->dfops);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
@@ -503,7 +504,7 @@ xfs_attr_rmtval_set(
ASSERT(blkcnt > 0);
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
@@ -603,16 +604,16 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK, 1, args->firstblock,
- args->flist, &done);
+ args->dfops, &done);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist,
+ error = xfs_defer_finish(&args->trans, args->dfops,
args->dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2f2c85cc8117..b060bca93402 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2.h"
@@ -45,6 +46,7 @@
#include "xfs_symlink.h"
#include "xfs_attr_leaf.h"
#include "xfs_filestream.h"
+#include "xfs_rmap.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -570,12 +572,13 @@ xfs_bmap_validate_ret(
*/
void
xfs_bmap_add_free(
- struct xfs_mount *mp, /* mount point structure */
- struct xfs_bmap_free *flist, /* list of extents */
- xfs_fsblock_t bno, /* fs block number of extent */
- xfs_filblks_t len) /* length of extent */
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_fsblock_t bno,
+ xfs_filblks_t len,
+ struct xfs_owner_info *oinfo)
{
- struct xfs_bmap_free_item *new; /* new element */
+ struct xfs_extent_free_item *new; /* new element */
#ifdef DEBUG
xfs_agnumber_t agno;
xfs_agblock_t agbno;
@@ -592,44 +595,17 @@ xfs_bmap_add_free(
ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
#endif
ASSERT(xfs_bmap_free_item_zone != NULL);
- new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
- new->xbfi_startblock = bno;
- new->xbfi_blockcount = (xfs_extlen_t)len;
- list_add(&new->xbfi_list, &flist->xbf_flist);
- flist->xbf_count++;
-}
-
-/*
- * Remove the entry "free" from the free item list. Prev points to the
- * previous entry, unless "free" is the head of the list.
- */
-void
-xfs_bmap_del_free(
- struct xfs_bmap_free *flist, /* free item list header */
- struct xfs_bmap_free_item *free) /* list item to be freed */
-{
- list_del(&free->xbfi_list);
- flist->xbf_count--;
- kmem_zone_free(xfs_bmap_free_item_zone, free);
-}
-
-/*
- * Free up any items left in the list.
- */
-void
-xfs_bmap_cancel(
- struct xfs_bmap_free *flist) /* list of bmap_free_items */
-{
- struct xfs_bmap_free_item *free; /* free list item */
- if (flist->xbf_count == 0)
- return;
- while (!list_empty(&flist->xbf_flist)) {
- free = list_first_entry(&flist->xbf_flist,
- struct xfs_bmap_free_item, xbfi_list);
- xfs_bmap_del_free(flist, free);
- }
- ASSERT(flist->xbf_count == 0);
+ new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+ new->xefi_startblock = bno;
+ new->xefi_blockcount = (xfs_extlen_t)len;
+ if (oinfo)
+ new->xefi_oinfo = *oinfo;
+ else
+ xfs_rmap_skip_owner_update(&new->xefi_oinfo);
+ trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
+ XFS_FSB_TO_AGBNO(mp, bno), len);
+ xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
}
/*
@@ -659,6 +635,7 @@ xfs_bmap_btree_to_extents(
xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
struct xfs_btree_block *rblock;/* root btree block */
+ struct xfs_owner_info oinfo;
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -682,7 +659,8 @@ xfs_bmap_btree_to_extents(
cblock = XFS_BUF_TO_BLOCK(cbp);
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
return error;
- xfs_bmap_add_free(mp, cur->bc_private.b.flist, cbno, 1);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
+ xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
@@ -705,7 +683,7 @@ xfs_bmap_extents_to_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first-block-allocated */
- xfs_bmap_free_t *flist, /* blocks freed in xaction */
+ struct xfs_defer_ops *dfops, /* blocks freed in xaction */
xfs_btree_cur_t **curp, /* cursor returned to caller */
int wasdel, /* converting a delayed alloc */
int *logflagsp, /* inode logging flags */
@@ -754,7 +732,7 @@ xfs_bmap_extents_to_btree(
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
/*
* Convert to a btree with two levels, one record in root.
@@ -763,11 +741,12 @@ xfs_bmap_extents_to_btree(
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = mp;
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
args.firstblock = *firstblock;
if (*firstblock == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
- } else if (flist->xbf_low) {
+ } else if (dfops->dop_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = *firstblock;
} else {
@@ -788,7 +767,7 @@ xfs_bmap_extents_to_btree(
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(*firstblock == NULLFSBLOCK ||
args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
- (flist->xbf_low &&
+ (dfops->dop_low &&
args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
@@ -909,6 +888,7 @@ xfs_bmap_local_to_extents(
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
+ xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
@@ -973,7 +953,7 @@ xfs_bmap_add_attrfork_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -986,7 +966,7 @@ xfs_bmap_add_attrfork_btree(
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.firstblock = *firstblock;
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
goto error0;
@@ -1016,7 +996,7 @@ xfs_bmap_add_attrfork_extents(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -1025,7 +1005,7 @@ xfs_bmap_add_attrfork_extents(
if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
return 0;
cur = NULL;
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
flags, XFS_DATA_FORK);
if (cur) {
cur->bc_private.b.allocated = 0;
@@ -1051,7 +1031,7 @@ xfs_bmap_add_attrfork_local(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_da_args_t dargs; /* args for dir/attr code */
@@ -1064,7 +1044,7 @@ xfs_bmap_add_attrfork_local(
dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
dargs.firstblock = firstblock;
- dargs.flist = flist;
+ dargs.dfops = dfops;
dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
@@ -1092,7 +1072,7 @@ xfs_bmap_add_attrfork(
int rsvd) /* xact may use reserved blks */
{
xfs_fsblock_t firstblock; /* 1st block/ag allocated */
- xfs_bmap_free_t flist; /* freed extent records */
+ struct xfs_defer_ops dfops; /* freed extent records */
xfs_mount_t *mp; /* mount structure */
xfs_trans_t *tp; /* transaction pointer */
int blks; /* space reservation */
@@ -1158,18 +1138,18 @@ xfs_bmap_add_attrfork(
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
- error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
+ error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
&logflags);
break;
case XFS_DINODE_FMT_EXTENTS:
error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
- &flist, &logflags);
+ &dfops, &logflags);
break;
case XFS_DINODE_FMT_BTREE:
- error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
+ error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
&logflags);
break;
default:
@@ -1198,7 +1178,7 @@ xfs_bmap_add_attrfork(
xfs_log_sb(tp);
}
- error = xfs_bmap_finish(&tp, &flist, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto bmap_cancel;
error = xfs_trans_commit(tp);
@@ -1206,7 +1186,7 @@ xfs_bmap_add_attrfork(
return error;
bmap_cancel:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -2003,7 +1983,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist,
+ bma->firstblock, bma->dfops,
&bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2087,7 +2067,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur, 1,
+ bma->firstblock, bma->dfops, &bma->cur, 1,
&tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2156,7 +2136,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2199,13 +2179,18 @@ xfs_bmap_add_extent_delay_real(
ASSERT(0);
}
+ /* add reverse mapping */
+ error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
da_old > 0, &tmp_logflags, whichfork);
bma->logflags |= tmp_logflags;
if (error)
@@ -2247,7 +2232,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
+ struct xfs_defer_ops *dfops, /* list of extents to be freed */
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -2735,12 +2720,17 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(0);
}
+ /* update reverse mappings */
+ error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
+ error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
0, &tmp_logflags, XFS_DATA_FORK);
*logflagsp |= tmp_logflags;
if (error)
@@ -3127,13 +3117,18 @@ xfs_bmap_add_extent_hole_real(
break;
}
+ /* add reverse mapping */
+ error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
0, &tmp_logflags, whichfork);
bma->logflags |= tmp_logflags;
if (error)
@@ -3691,9 +3686,10 @@ xfs_bmap_btalloc(
args.tp = ap->tp;
args.mp = mp;
args.fsbno = ap->blkno;
+ xfs_rmap_skip_owner_update(&args.oinfo);
/* Trim the allocation back to the maximum an AG can fit. */
- args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
+ args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
@@ -3708,7 +3704,7 @@ xfs_bmap_btalloc(
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
if (error)
return error;
- } else if (ap->flist->xbf_low) {
+ } else if (ap->dfops->dop_low) {
if (xfs_inode_is_filestream(ap->ip))
args.type = XFS_ALLOCTYPE_FIRST_AG;
else
@@ -3741,7 +3737,7 @@ xfs_bmap_btalloc(
* is >= the stripe unit and the allocation offset is
* at the end of file.
*/
- if (!ap->flist->xbf_low && ap->aeof) {
+ if (!ap->dfops->dop_low && ap->aeof) {
if (!ap->offset) {
args.alignment = stripe_align;
atype = args.type;
@@ -3834,7 +3830,7 @@ xfs_bmap_btalloc(
args.minleft = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
- ap->flist->xbf_low = 1;
+ ap->dfops->dop_low = true;
}
if (args.fsbno != NULLFSBLOCK) {
/*
@@ -3844,7 +3840,7 @@ xfs_bmap_btalloc(
ASSERT(*ap->firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
XFS_FSB_TO_AGNO(mp, args.fsbno) ||
- (ap->flist->xbf_low &&
+ (ap->dfops->dop_low &&
XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
XFS_FSB_TO_AGNO(mp, args.fsbno)));
@@ -3852,7 +3848,7 @@ xfs_bmap_btalloc(
if (*ap->firstblock == NULLFSBLOCK)
*ap->firstblock = args.fsbno;
ASSERT(nullfb || fb_agno == args.agno ||
- (ap->flist->xbf_low && fb_agno < args.agno));
+ (ap->dfops->dop_low && fb_agno < args.agno));
ap->length = args.len;
ap->ip->i_d.di_nblocks += args.len;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
@@ -4319,7 +4315,7 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->flist->xbf_low)
+ if (bma->dfops->dop_low)
bma->minleft = 0;
if (bma->cur)
bma->cur->bc_private.b.firstblock = *bma->firstblock;
@@ -4328,7 +4324,7 @@ xfs_bmapi_allocate(
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.flist = bma->flist;
+ bma->cur->bc_private.b.dfops = bma->dfops;
}
/*
* Bump the number of extents we've allocated
@@ -4409,7 +4405,7 @@ xfs_bmapi_convert_unwritten(
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
bma->ip, whichfork);
bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.flist = bma->flist;
+ bma->cur->bc_private.b.dfops = bma->dfops;
}
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
@@ -4426,7 +4422,7 @@ xfs_bmapi_convert_unwritten(
}
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
- &bma->cur, mval, bma->firstblock, bma->flist,
+ &bma->cur, mval, bma->firstblock, bma->dfops,
&tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
@@ -4480,7 +4476,7 @@ xfs_bmapi_write(
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- struct xfs_bmap_free *flist) /* i/o: list extents to free */
+ struct xfs_defer_ops *dfops) /* i/o: list extents to free */
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
@@ -4570,7 +4566,7 @@ xfs_bmapi_write(
bma.ip = ip;
bma.total = total;
bma.userdata = 0;
- bma.flist = flist;
+ bma.dfops = dfops;
bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
@@ -4684,7 +4680,7 @@ error0:
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp,
bma.cur->bc_private.b.firstblock) ||
- (flist->xbf_low &&
+ (dfops->dop_low &&
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp,
bma.cur->bc_private.b.firstblock)));
@@ -4768,7 +4764,7 @@ xfs_bmap_del_extent(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
xfs_extnum_t *idx, /* extent number to update/delete */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
+ struct xfs_defer_ops *dfops, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
@@ -4870,6 +4866,7 @@ xfs_bmap_del_extent(
nblks = 0;
do_fx = 0;
}
+
/*
* Set flag value to use in switch statement.
* Left-contig is 2, right-contig is 1.
@@ -5052,12 +5049,20 @@ xfs_bmap_del_extent(
++*idx;
break;
}
+
+ /* remove reverse mapping */
+ if (!delay) {
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
+ if (error)
+ goto done;
+ }
+
/*
* If we need to, add to list of extents to delete.
*/
if (do_fx)
- xfs_bmap_add_free(mp, flist, del->br_startblock,
- del->br_blockcount);
+ xfs_bmap_add_free(mp, dfops, del->br_startblock,
+ del->br_blockcount, NULL);
/*
* Adjust inode # blocks in the file.
*/
@@ -5097,7 +5102,7 @@ xfs_bunmapi(
xfs_extnum_t nexts, /* number of extents max */
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ struct xfs_defer_ops *dfops, /* i/o: list extents to free */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5170,7 +5175,7 @@ xfs_bunmapi(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
} else
cur = NULL;
@@ -5179,8 +5184,10 @@ xfs_bunmapi(
/*
* Synchronize by locking the bitmap inode.
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
+ xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
}
extno = 0;
@@ -5262,7 +5269,7 @@ xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
- &lastx, &cur, &del, firstblock, flist,
+ &lastx, &cur, &del, firstblock, dfops,
&logflags);
if (error)
goto error0;
@@ -5321,7 +5328,7 @@ xfs_bunmapi(
lastx--;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, &lastx, &cur, &prev,
- firstblock, flist, &logflags);
+ firstblock, dfops, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5330,7 +5337,7 @@ xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, &lastx, &cur, &del,
- firstblock, flist, &logflags);
+ firstblock, dfops, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5388,7 +5395,7 @@ xfs_bunmapi(
} else if (cur)
cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
- error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
+ error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
&tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
@@ -5422,7 +5429,7 @@ nodelete:
*/
if (xfs_bmap_needs_btree(ip, whichfork)) {
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
&cur, 0, &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
@@ -5589,7 +5596,8 @@ xfs_bmse_shift_one(
struct xfs_bmbt_rec_host *gotp,
struct xfs_btree_cur *cur,
int *logflags,
- enum shift_direction direction)
+ enum shift_direction direction,
+ struct xfs_defer_ops *dfops)
{
struct xfs_ifork *ifp;
struct xfs_mount *mp;
@@ -5637,9 +5645,13 @@ xfs_bmse_shift_one(
/* check whether to merge the extent or shift it down */
if (xfs_bmse_can_merge(&adj_irec, &got,
offset_shift_fsb)) {
- return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- *current_ext, gotp, adj_irecp,
- cur, logflags);
+ error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
+ *current_ext, gotp, adj_irecp,
+ cur, logflags);
+ if (error)
+ return error;
+ adj_irec = got;
+ goto update_rmap;
}
} else {
startoff = got.br_startoff + offset_shift_fsb;
@@ -5676,9 +5688,10 @@ update_current_ext:
(*current_ext)--;
xfs_bmbt_set_startoff(gotp, startoff);
*logflags |= XFS_ILOG_CORE;
+ adj_irec = got;
if (!cur) {
*logflags |= XFS_ILOG_DEXT;
- return 0;
+ goto update_rmap;
}
error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
@@ -5688,8 +5701,18 @@ update_current_ext:
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
got.br_startoff = startoff;
- return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
- got.br_blockcount, got.br_state);
+ error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
+ got.br_blockcount, got.br_state);
+ if (error)
+ return error;
+
+update_rmap:
+ /* update reverse mapping */
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
+ if (error)
+ return error;
+ adj_irec.br_startoff = startoff;
+ return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
}
/*
@@ -5711,7 +5734,7 @@ xfs_bmap_shift_extents(
int *done,
xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist,
+ struct xfs_defer_ops *dfops,
enum shift_direction direction,
int num_exts)
{
@@ -5756,7 +5779,7 @@ xfs_bmap_shift_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5817,7 +5840,7 @@ xfs_bmap_shift_extents(
while (nexts++ < num_exts) {
error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
&current_ext, gotp, cur, &logflags,
- direction);
+ direction, dfops);
if (error)
goto del_cursor;
/*
@@ -5865,7 +5888,7 @@ xfs_bmap_split_extent_at(
struct xfs_inode *ip,
xfs_fileoff_t split_fsb,
xfs_fsblock_t *firstfsb,
- struct xfs_bmap_free *free_list)
+ struct xfs_defer_ops *dfops)
{
int whichfork = XFS_DATA_FORK;
struct xfs_btree_cur *cur = NULL;
@@ -5927,7 +5950,7 @@ xfs_bmap_split_extent_at(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstfsb;
- cur->bc_private.b.flist = free_list;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
got.br_startblock,
@@ -5980,7 +6003,7 @@ xfs_bmap_split_extent_at(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
&cur, 0, &tmp_logflags, whichfork);
logflags |= tmp_logflags;
}
@@ -6004,7 +6027,7 @@ xfs_bmap_split_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t firstfsb;
int error;
@@ -6016,21 +6039,21 @@ xfs_bmap_split_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
- &firstfsb, &free_list);
+ &firstfsb, &dfops);
if (error)
goto out;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out;
return xfs_trans_commit(tp);
out:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f1f3ae6c0a3f..254034f96941 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -32,7 +32,7 @@ extern kmem_zone_t *xfs_bmap_free_item_zone;
*/
struct xfs_bmalloca {
xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_bmap_free *flist; /* bmap freelist */
+ struct xfs_defer_ops *dfops; /* bmap freelist */
struct xfs_trans *tp; /* transaction pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct xfs_bmbt_irec prev; /* extent before the new one */
@@ -62,34 +62,14 @@ struct xfs_bmalloca {
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
-struct xfs_bmap_free_item
+struct xfs_extent_free_item
{
- xfs_fsblock_t xbfi_startblock;/* starting fs block number */
- xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */
- struct list_head xbfi_list;
+ xfs_fsblock_t xefi_startblock;/* starting fs block number */
+ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
+ struct list_head xefi_list;
+ struct xfs_owner_info xefi_oinfo; /* extent owner */
};
-/*
- * Header for free extent list.
- *
- * xbf_low is used by the allocator to activate the lowspace algorithm -
- * when free space is running low the extent allocator may choose to
- * allocate an extent from an AG without leaving sufficient space for
- * a btree split when inserting the new extent. In this case the allocator
- * will enable the lowspace algorithm which is supposed to allow further
- * allocations (such as btree splits and newroots) to allocate from
- * sequential AGs. In order to avoid locking AGs out of order the lowspace
- * algorithm will start searching for free space from AG 0. If the correct
- * transaction reservations have been made then this algorithm will eventually
- * find all the space it needs.
- */
-typedef struct xfs_bmap_free
-{
- struct list_head xbf_flist; /* list of to-be-free extents */
- int xbf_count; /* count of items on list */
- int xbf_low; /* alloc in low mode */
-} xfs_bmap_free_t;
-
#define XFS_BMAP_MAX_NMAP 4
/*
@@ -139,14 +119,6 @@ static inline int xfs_bmapi_aflag(int w)
#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
-static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
-{
- INIT_LIST_HEAD(&flp->xbf_flist);
- flp->xbf_count = 0;
- flp->xbf_low = 0;
- *fbp = NULLFSBLOCK;
-}
-
/*
* Flags for xfs_bmap_add_extent*.
*/
@@ -193,11 +165,9 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
-void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_bmap_free *flist,
- xfs_fsblock_t bno, xfs_filblks_t len);
-void xfs_bmap_cancel(struct xfs_bmap_free *flist);
-int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
- struct xfs_inode *ip);
+void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_fsblock_t bno, xfs_filblks_t len,
+ struct xfs_owner_info *oinfo);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
@@ -218,18 +188,18 @@ int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_fsblock_t *firstblock, xfs_extlen_t total,
struct xfs_bmbt_irec *mval, int *nmap,
- struct xfs_bmap_free *flist);
+ struct xfs_defer_ops *dfops);
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist, int *done);
+ struct xfs_defer_ops *dfops, int *done);
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist, enum shift_direction direction,
+ struct xfs_defer_ops *dfops, enum shift_direction direction,
int num_exts);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index db0c71e470c9..cd85274e810c 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
@@ -34,6 +35,7 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_rmap.h"
/*
* Determine the extent state.
@@ -406,11 +408,11 @@ xfs_bmbt_dup_cursor(
cur->bc_private.b.ip, cur->bc_private.b.whichfork);
/*
- * Copy the firstblock, flist, and flags values,
+ * Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
- new->bc_private.b.flist = cur->bc_private.b.flist;
+ new->bc_private.b.dfops = cur->bc_private.b.dfops;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
@@ -423,7 +425,7 @@ xfs_bmbt_update_cursor(
{
ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
- ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
+ ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
@@ -446,6 +448,8 @@ xfs_bmbt_alloc_block(
args.mp = cur->bc_mp;
args.fsbno = cur->bc_private.b.firstblock;
args.firstblock = args.fsbno;
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
+ cur->bc_private.b.whichfork);
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
@@ -462,7 +466,7 @@ xfs_bmbt_alloc_block(
* block allocation here and corrupt the filesystem.
*/
args.minleft = args.tp->t_blk_res;
- } else if (cur->bc_private.b.flist->xbf_low) {
+ } else if (cur->bc_private.b.dfops->dop_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -490,7 +494,7 @@ xfs_bmbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
- cur->bc_private.b.flist->xbf_low = 1;
+ cur->bc_private.b.dfops->dop_low = true;
}
if (args.fsbno == NULLFSBLOCK) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
@@ -525,8 +529,10 @@ xfs_bmbt_free_block(
struct xfs_inode *ip = cur->bc_private.b.ip;
struct xfs_trans *tp = cur->bc_tp;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+ struct xfs_owner_info oinfo;
- xfs_bmap_add_free(mp, cur->bc_private.b.flist, fsbno, 1);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
+ xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -600,17 +606,6 @@ xfs_bmbt_init_key_from_rec(
}
STATIC void
-xfs_bmbt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- ASSERT(key->bmbt.br_startoff != 0);
-
- xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
- 0, 0, XFS_EXT_NORM);
-}
-
-STATIC void
xfs_bmbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -760,7 +755,6 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
.get_minrecs = xfs_bmbt_get_minrecs,
.get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
- .init_rec_from_key = xfs_bmbt_init_rec_from_key,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
@@ -800,7 +794,7 @@ xfs_bmbt_init_cursor(
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
- cur->bc_private.b.flist = NULL;
+ cur->bc_private.b.dfops = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 07eeb0b4ca74..b5c213a051cd 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
@@ -43,15 +44,14 @@ kmem_zone_t *xfs_btree_cur_zone;
* Btree magic numbers.
*/
static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
- { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
+ { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
XFS_FIBT_MAGIC },
- { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
+ { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
};
#define xfs_btree_magic(cur) \
xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
-
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lblock(
struct xfs_btree_cur *cur, /* btree cursor */
@@ -428,6 +428,50 @@ xfs_btree_dup_cursor(
* into a btree block (xfs_btree_*_offset) or return a pointer to the given
* record, key or pointer (xfs_btree_*_addr). Note that all addressing
* inside the btree block is done using indices starting at one, not zero!
+ *
+ * If XFS_BTREE_OVERLAPPING is set, then this btree supports keys containing
+ * overlapping intervals. In such a tree, records are still sorted lowest to
+ * highest and indexed by the smallest key value that refers to the record.
+ * However, nodes are different: each pointer has two associated keys -- one
+ * indexing the lowest key available in the block(s) below (the same behavior
+ * as the key in a regular btree) and another indexing the highest key
+ * available in the block(s) below. Because records are /not/ sorted by the
+ * highest key, all leaf block updates require us to compute the highest key
+ * that matches any record in the leaf and to recursively update the high keys
+ * in the nodes going further up in the tree, if necessary. Nodes look like
+ * this:
+ *
+ * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
+ * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
+ * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
+ *
+ * To perform an interval query on an overlapped tree, perform the usual
+ * depth-first search and use the low and high keys to decide if we can skip
+ * that particular node. If a leaf node is reached, return the records that
+ * intersect the interval. Note that an interval query may return numerous
+ * entries. For a non-overlapped tree, simply search for the record associated
+ * with the lowest key and iterate forward until a non-matching record is
+ * found. Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by
+ * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in
+ * more detail.
+ *
+ * Why do we care about overlapping intervals? Let's say you have a bunch of
+ * reverse mapping records on a reflink filesystem:
+ *
+ * 1: +- file A startblock B offset C length D -----------+
+ * 2: +- file E startblock F offset G length H --------------+
+ * 3: +- file I startblock F offset J length K --+
+ * 4: +- file L... --+
+ *
+ * Now say we want to map block (B+D) into file A at offset (C+D). Ideally,
+ * we'd simply increment the length of record 1. But how do we find the record
+ * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
+ * record 3 because the keys are ordered first by startblock. An interval
+ * query would return records 1 and 2 because they both overlap (B+D-1), and
+ * from that we can pick out record 1 as the appropriate left neighbor.
+ *
+ * In the non-overlapped case you can do a LE lookup and decrement the cursor
+ * because a record's interval must end before the next record.
*/
/*
@@ -479,6 +523,18 @@ xfs_btree_key_offset(
}
/*
+ * Calculate offset of the n-th high key in a btree block.
+ */
+STATIC size_t
+xfs_btree_high_key_offset(
+ struct xfs_btree_cur *cur,
+ int n)
+{
+ return xfs_btree_block_len(cur) +
+ (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
+}
+
+/*
* Calculate offset of the n-th block pointer in a btree block.
*/
STATIC size_t
@@ -519,6 +575,19 @@ xfs_btree_key_addr(
}
/*
+ * Return a pointer to the n-th high key in the btree block.
+ */
+STATIC union xfs_btree_key *
+xfs_btree_high_key_addr(
+ struct xfs_btree_cur *cur,
+ int n,
+ struct xfs_btree_block *block)
+{
+ return (union xfs_btree_key *)
+ ((char *)block + xfs_btree_high_key_offset(cur, n));
+}
+
+/*
* Return a pointer to the n-th block pointer in the btree block.
*/
STATIC union xfs_btree_ptr *
@@ -1144,6 +1213,9 @@ xfs_btree_set_refs(
case XFS_BTNUM_BMAP:
xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
break;
+ case XFS_BTNUM_RMAP:
+ xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
+ break;
default:
ASSERT(0);
}
@@ -1879,32 +1951,214 @@ error0:
return error;
}
+/* Find the high key storage area from a regular key. */
+STATIC union xfs_btree_key *
+xfs_btree_high_key_from_key(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
+ return (union xfs_btree_key *)((char *)key +
+ (cur->bc_ops->key_len / 2));
+}
+
+/* Determine the low (and high if overlapped) keys of a leaf block */
+STATIC void
+xfs_btree_get_leaf_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ union xfs_btree_key max_hkey;
+ union xfs_btree_key hkey;
+ union xfs_btree_rec *rec;
+ union xfs_btree_key *high;
+ int n;
+
+ rec = xfs_btree_rec_addr(cur, 1, block);
+ cur->bc_ops->init_key_from_rec(key, rec);
+
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+
+ cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
+ for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
+ rec = xfs_btree_rec_addr(cur, n, block);
+ cur->bc_ops->init_high_key_from_rec(&hkey, rec);
+ if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
+ > 0)
+ max_hkey = hkey;
+ }
+
+ high = xfs_btree_high_key_from_key(cur, key);
+ memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
+ }
+}
+
+/* Determine the low (and high if overlapped) keys of a node block */
+STATIC void
+xfs_btree_get_node_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ union xfs_btree_key *hkey;
+ union xfs_btree_key *max_hkey;
+ union xfs_btree_key *high;
+ int n;
+
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ memcpy(key, xfs_btree_key_addr(cur, 1, block),
+ cur->bc_ops->key_len / 2);
+
+ max_hkey = xfs_btree_high_key_addr(cur, 1, block);
+ for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
+ hkey = xfs_btree_high_key_addr(cur, n, block);
+ if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
+ max_hkey = hkey;
+ }
+
+ high = xfs_btree_high_key_from_key(cur, key);
+ memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
+ } else {
+ memcpy(key, xfs_btree_key_addr(cur, 1, block),
+ cur->bc_ops->key_len);
+ }
+}
+
+/* Derive the keys for any btree block. */
+STATIC void
+xfs_btree_get_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ if (be16_to_cpu(block->bb_level) == 0)
+ xfs_btree_get_leaf_keys(cur, block, key);
+ else
+ xfs_btree_get_node_keys(cur, block, key);
+}
+
/*
- * Update keys at all levels from here to the root along the cursor's path.
+ * Decide if we need to update the parent keys of a btree block. For
+ * a standard btree this is only necessary if we're updating the first
+ * record/key. For an overlapping btree, we must always update the
+ * keys because the highest key can be in any of the records or keys
+ * in the block.
+ */
+static inline bool
+xfs_btree_needs_key_update(
+ struct xfs_btree_cur *cur,
+ int ptr)
+{
+ return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
+}
+
+/*
+ * Update the low and high parent keys of the given level, progressing
+ * towards the root. If force_all is false, stop if the keys for a given
+ * level do not need updating.
*/
STATIC int
-xfs_btree_updkey(
+__xfs_btree_updkeys(
+ struct xfs_btree_cur *cur,
+ int level,
+ struct xfs_btree_block *block,
+ struct xfs_buf *bp0,
+ bool force_all)
+{
+ union xfs_btree_bigkey key; /* keys from current level */
+ union xfs_btree_key *lkey; /* keys from the next level up */
+ union xfs_btree_key *hkey;
+ union xfs_btree_key *nlkey; /* keys from the next level up */
+ union xfs_btree_key *nhkey;
+ struct xfs_buf *bp;
+ int ptr;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
+
+ /* Exit if there aren't any parent levels to update. */
+ if (level + 1 >= cur->bc_nlevels)
+ return 0;
+
+ trace_xfs_btree_updkeys(cur, level, bp0);
+
+ lkey = (union xfs_btree_key *)&key;
+ hkey = xfs_btree_high_key_from_key(cur, lkey);
+ xfs_btree_get_keys(cur, block, lkey);
+ for (level++; level < cur->bc_nlevels; level++) {
+#ifdef DEBUG
+ int error;
+#endif
+ block = xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_updkeys(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+#endif
+ ptr = cur->bc_ptrs[level];
+ nlkey = xfs_btree_key_addr(cur, ptr, block);
+ nhkey = xfs_btree_high_key_addr(cur, ptr, block);
+ if (!force_all &&
+ !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
+ cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
+ break;
+ xfs_btree_copy_keys(cur, nlkey, lkey, 1);
+ xfs_btree_log_keys(cur, bp, ptr, ptr);
+ if (level + 1 >= cur->bc_nlevels)
+ break;
+ xfs_btree_get_node_keys(cur, block, lkey);
+ }
+
+ return 0;
+}
+
+/* Update all the keys from some level in cursor back to the root. */
+STATIC int
+xfs_btree_updkeys_force(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ struct xfs_buf *bp;
+ struct xfs_btree_block *block;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ return __xfs_btree_updkeys(cur, level, block, bp, true);
+}
+
+/*
+ * Update the parent keys of the given level, progressing towards the root.
+ */
+STATIC int
+xfs_btree_update_keys(
struct xfs_btree_cur *cur,
- union xfs_btree_key *keyp,
int level)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
union xfs_btree_key *kp;
+ union xfs_btree_key key;
int ptr;
+ ASSERT(level >= 0);
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
+ return __xfs_btree_updkeys(cur, level, block, bp, false);
+
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
- ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
-
/*
* Go up the tree from this level toward the root.
* At each level, update the key value to the value input.
* Stop when we reach a level where the cursor isn't pointing
* at the first entry in the block.
*/
- for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+ xfs_btree_get_keys(cur, block, &key);
+ for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
#ifdef DEBUG
int error;
#endif
@@ -1918,7 +2172,7 @@ xfs_btree_updkey(
#endif
ptr = cur->bc_ptrs[level];
kp = xfs_btree_key_addr(cur, ptr, block);
- xfs_btree_copy_keys(cur, kp, keyp, 1);
+ xfs_btree_copy_keys(cur, kp, &key, 1);
xfs_btree_log_keys(cur, bp, ptr, ptr);
}
@@ -1970,12 +2224,9 @@ xfs_btree_update(
ptr, LASTREC_UPDATE);
}
- /* Updating first rec in leaf. Pass new key value up to our parent. */
- if (ptr == 1) {
- union xfs_btree_key key;
-
- cur->bc_ops->init_key_from_rec(&key, rec);
- error = xfs_btree_updkey(cur, &key, 1);
+ /* Pass new key value up to our parent. */
+ if (xfs_btree_needs_key_update(cur, ptr)) {
+ error = xfs_btree_update_keys(cur, 0);
if (error)
goto error0;
}
@@ -1998,18 +2249,19 @@ xfs_btree_lshift(
int level,
int *stat) /* success/failure */
{
- union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs; /* left record count */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
+ struct xfs_btree_cur *tcur; /* temporary btree cursor */
int rrecs; /* right record count */
union xfs_btree_ptr lptr; /* left btree pointer */
union xfs_btree_key *rkp = NULL; /* right btree key */
union xfs_btree_ptr *rpp = NULL; /* right address pointer */
union xfs_btree_rec *rrp = NULL; /* right record pointer */
int error; /* error return value */
+ int i;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
@@ -2139,18 +2391,33 @@ xfs_btree_lshift(
xfs_btree_rec_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
+ }
- /*
- * If it's the first record in the block, we'll need a key
- * structure to pass up to the next level (updkey).
- */
- cur->bc_ops->init_key_from_rec(&key,
- xfs_btree_rec_addr(cur, 1, right));
- rkp = &key;
+ /*
+ * Using a temporary cursor, update the parent key values of the
+ * block on the left.
+ */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_dup_cursor(cur, &tcur);
+ if (error)
+ goto error0;
+ i = xfs_btree_firstrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+
+ error = xfs_btree_decrement(tcur, level, &i);
+ if (error)
+ goto error1;
+
+ /* Update the parent high keys of the left block, if needed. */
+ error = xfs_btree_update_keys(tcur, level);
+ if (error)
+ goto error1;
+
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
}
- /* Update the parent key values of right. */
- error = xfs_btree_updkey(cur, rkp, level + 1);
+ /* Update the parent keys of the right block. */
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
@@ -2169,6 +2436,11 @@ out0:
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
+
+error1:
+ XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
+ xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+ return error;
}
/*
@@ -2181,7 +2453,6 @@ xfs_btree_rshift(
int level,
int *stat) /* success/failure */
{
- union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *rbp; /* right buffer pointer */
@@ -2290,12 +2561,6 @@ xfs_btree_rshift(
/* Now put the new data in, and log it. */
xfs_btree_copy_recs(cur, rrp, lrp, 1);
xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
-
- cur->bc_ops->init_key_from_rec(&key, rrp);
- rkp = &key;
-
- ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
- xfs_btree_rec_addr(cur, 2, right)));
}
/*
@@ -2315,13 +2580,21 @@ xfs_btree_rshift(
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error1;
- error = xfs_btree_updkey(tcur, rkp, level + 1);
+ /* Update the parent high keys of the left block, if needed. */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_update_keys(cur, level);
+ if (error)
+ goto error1;
+ }
+
+ /* Update the parent keys of the right block. */
+ error = xfs_btree_update_keys(tcur, level);
if (error)
goto error1;
@@ -2422,6 +2695,11 @@ __xfs_btree_split(
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+ /* Adjust numrecs for the later get_*_keys() calls. */
+ lrecs -= rrecs;
+ xfs_btree_set_numrecs(left, lrecs);
+ xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
+
/*
* Copy btree block entries from the left block over to the
* new block, the right. Update the right block and log the
@@ -2447,14 +2725,15 @@ __xfs_btree_split(
}
#endif
+ /* Copy the keys & pointers to the new block. */
xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
- /* Grab the keys to the entries moved to the right block */
- xfs_btree_copy_keys(cur, key, rkp, 1);
+ /* Stash the keys of the new block for later insertion. */
+ xfs_btree_get_node_keys(cur, right, key);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
@@ -2463,27 +2742,23 @@ __xfs_btree_split(
lrp = xfs_btree_rec_addr(cur, src_index, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
+ /* Copy records to the new block. */
xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
- cur->bc_ops->init_key_from_rec(key,
- xfs_btree_rec_addr(cur, 1, right));
+ /* Stash the keys of the new block for later insertion. */
+ xfs_btree_get_leaf_keys(cur, right, key);
}
-
/*
* Find the left block number by looking in the buffer.
- * Adjust numrecs, sibling pointers.
+ * Adjust sibling pointers.
*/
xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
- lrecs -= rrecs;
- xfs_btree_set_numrecs(left, lrecs);
- xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
-
xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
@@ -2499,6 +2774,14 @@ __xfs_btree_split(
xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
+
+ /* Update the parent high keys of the left block, if needed. */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_update_keys(cur, level);
+ if (error)
+ goto error0;
+ }
+
/*
* If the cursor is really in the right block, move it there.
* If it's just pointing past the last entry in left, then we'll
@@ -2802,6 +3085,7 @@ xfs_btree_new_root(
bp = lbp;
nptr = 2;
}
+
/* Fill in the new block's btree header and log it. */
xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
@@ -2810,19 +3094,24 @@ xfs_btree_new_root(
/* Fill in the key data in the new root. */
if (xfs_btree_get_level(left) > 0) {
- xfs_btree_copy_keys(cur,
- xfs_btree_key_addr(cur, 1, new),
- xfs_btree_key_addr(cur, 1, left), 1);
- xfs_btree_copy_keys(cur,
- xfs_btree_key_addr(cur, 2, new),
- xfs_btree_key_addr(cur, 1, right), 1);
+ /*
+ * Get the keys for the left block's keys and put them directly
+ * in the parent block. Do the same for the right block.
+ */
+ xfs_btree_get_node_keys(cur, left,
+ xfs_btree_key_addr(cur, 1, new));
+ xfs_btree_get_node_keys(cur, right,
+ xfs_btree_key_addr(cur, 2, new));
} else {
- cur->bc_ops->init_key_from_rec(
- xfs_btree_key_addr(cur, 1, new),
- xfs_btree_rec_addr(cur, 1, left));
- cur->bc_ops->init_key_from_rec(
- xfs_btree_key_addr(cur, 2, new),
- xfs_btree_rec_addr(cur, 1, right));
+ /*
+ * Get the keys for the left block's records and put them
+ * directly in the parent block. Do the same for the right
+ * block.
+ */
+ xfs_btree_get_leaf_keys(cur, left,
+ xfs_btree_key_addr(cur, 1, new));
+ xfs_btree_get_leaf_keys(cur, right,
+ xfs_btree_key_addr(cur, 2, new));
}
xfs_btree_log_keys(cur, nbp, 1, 2);
@@ -2858,10 +3147,9 @@ xfs_btree_make_block_unfull(
int *index, /* new tree index */
union xfs_btree_ptr *nptr, /* new btree ptr */
struct xfs_btree_cur **ncur, /* new btree cursor */
- union xfs_btree_rec *nrec, /* new record */
+ union xfs_btree_key *key, /* key of new block */
int *stat)
{
- union xfs_btree_key key; /* new btree key value */
int error = 0;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
@@ -2871,6 +3159,7 @@ xfs_btree_make_block_unfull(
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
+ *stat = 1;
} else {
/* A root block that needs replacing */
int logflags = 0;
@@ -2906,13 +3195,12 @@ xfs_btree_make_block_unfull(
* If this works we have to re-set our variables because we
* could be in a different block now.
*/
- error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
+ error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
if (error || *stat == 0)
return error;
*index = cur->bc_ptrs[level];
- cur->bc_ops->init_rec_from_key(&key, nrec);
return 0;
}
@@ -2925,16 +3213,17 @@ xfs_btree_insrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level to insert record at */
union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
- union xfs_btree_rec *recp, /* i/o: record data inserted */
+ union xfs_btree_rec *rec, /* record to insert */
+ union xfs_btree_key *key, /* i/o: block key for ptrp */
struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
- union xfs_btree_key key; /* btree key */
union xfs_btree_ptr nptr; /* new block ptr */
struct xfs_btree_cur *ncur; /* new btree cursor */
- union xfs_btree_rec nrec; /* new record count */
+ union xfs_btree_bigkey nkey; /* new block key */
+ union xfs_btree_key *lkey;
int optr; /* old key/record index */
int ptr; /* key/record index */
int numrecs;/* number of records */
@@ -2942,11 +3231,13 @@ xfs_btree_insrec(
#ifdef DEBUG
int i;
#endif
+ xfs_daddr_t old_bn;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
- XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
+ XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
ncur = NULL;
+ lkey = (union xfs_btree_key *)&nkey;
/*
* If we have an external root pointer, and we've made it to the
@@ -2969,15 +3260,13 @@ xfs_btree_insrec(
return 0;
}
- /* Make a key out of the record data to be inserted, and save it. */
- cur->bc_ops->init_key_from_rec(&key, recp);
-
optr = ptr;
XFS_BTREE_STATS_INC(cur, insrec);
/* Get pointers to the btree buffer and block. */
block = xfs_btree_get_block(cur, level, &bp);
+ old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
@@ -2988,10 +3277,10 @@ xfs_btree_insrec(
/* Check that the new entry is being inserted in the right place. */
if (ptr <= numrecs) {
if (level == 0) {
- ASSERT(cur->bc_ops->recs_inorder(cur, recp,
+ ASSERT(cur->bc_ops->recs_inorder(cur, rec,
xfs_btree_rec_addr(cur, ptr, block)));
} else {
- ASSERT(cur->bc_ops->keys_inorder(cur, &key,
+ ASSERT(cur->bc_ops->keys_inorder(cur, key,
xfs_btree_key_addr(cur, ptr, block)));
}
}
@@ -3004,7 +3293,7 @@ xfs_btree_insrec(
xfs_btree_set_ptr_null(cur, &nptr);
if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
error = xfs_btree_make_block_unfull(cur, level, numrecs,
- &optr, &ptr, &nptr, &ncur, &nrec, stat);
+ &optr, &ptr, &nptr, &ncur, lkey, stat);
if (error || *stat == 0)
goto error0;
}
@@ -3054,7 +3343,7 @@ xfs_btree_insrec(
#endif
/* Now put the new data in, bump numrecs and log it. */
- xfs_btree_copy_keys(cur, kp, &key, 1);
+ xfs_btree_copy_keys(cur, kp, key, 1);
xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
numrecs++;
xfs_btree_set_numrecs(block, numrecs);
@@ -3075,7 +3364,7 @@ xfs_btree_insrec(
xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
/* Now put the new data in, bump numrecs and log it. */
- xfs_btree_copy_recs(cur, rp, recp, 1);
+ xfs_btree_copy_recs(cur, rp, rec, 1);
xfs_btree_set_numrecs(block, ++numrecs);
xfs_btree_log_recs(cur, bp, ptr, numrecs);
#ifdef DEBUG
@@ -3089,9 +3378,18 @@ xfs_btree_insrec(
/* Log the new number of records in the btree header. */
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
- /* If we inserted at the start of a block, update the parents' keys. */
- if (optr == 1) {
- error = xfs_btree_updkey(cur, &key, level + 1);
+ /*
+ * If we just inserted into a new tree block, we have to
+ * recalculate nkey here because nkey is out of date.
+ *
+ * Otherwise we're just updating an existing block (having shoved
+ * some records into the new tree block), so use the regular key
+ * update mechanism.
+ */
+ if (bp && bp->b_bn != old_bn) {
+ xfs_btree_get_keys(cur, block, lkey);
+ } else if (xfs_btree_needs_key_update(cur, optr)) {
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
}
@@ -3101,7 +3399,7 @@ xfs_btree_insrec(
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
- cur->bc_ops->update_lastrec(cur, block, recp,
+ cur->bc_ops->update_lastrec(cur, block, rec,
ptr, LASTREC_INSREC);
}
@@ -3111,7 +3409,7 @@ xfs_btree_insrec(
*/
*ptrp = nptr;
if (!xfs_btree_ptr_is_null(cur, &nptr)) {
- *recp = nrec;
+ xfs_btree_copy_keys(cur, key, lkey, 1);
*curp = ncur;
}
@@ -3142,14 +3440,20 @@ xfs_btree_insert(
union xfs_btree_ptr nptr; /* new block number (split result) */
struct xfs_btree_cur *ncur; /* new cursor (split result) */
struct xfs_btree_cur *pcur; /* previous level's cursor */
+ union xfs_btree_bigkey bkey; /* key of block to insert */
+ union xfs_btree_key *key;
union xfs_btree_rec rec; /* record to insert */
level = 0;
ncur = NULL;
pcur = cur;
+ key = (union xfs_btree_key *)&bkey;
xfs_btree_set_ptr_null(cur, &nptr);
+
+ /* Make a key out of the record data to be inserted, and save it. */
cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(key, &rec);
/*
* Loop going up the tree, starting at the leaf level.
@@ -3161,7 +3465,8 @@ xfs_btree_insert(
* Insert nrec/nptr into this level of the tree.
* Note if we fail, nptr will be null.
*/
- error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
+ error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
+ &ncur, &i);
if (error) {
if (pcur != cur)
xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
@@ -3385,8 +3690,6 @@ xfs_btree_delrec(
struct xfs_buf *bp; /* buffer for block */
int error; /* error return value */
int i; /* loop counter */
- union xfs_btree_key key; /* storage for keyp */
- union xfs_btree_key *keyp = &key; /* passed to the next level */
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
@@ -3457,13 +3760,6 @@ xfs_btree_delrec(
xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
}
-
- /*
- * If it's the first record in the block, we'll need to pass a
- * key up to the next level (updkey).
- */
- if (ptr == 1)
- keyp = xfs_btree_key_addr(cur, 1, block);
} else {
/* It's a leaf. operate on records */
if (ptr < numrecs) {
@@ -3472,16 +3768,6 @@ xfs_btree_delrec(
-1, numrecs - ptr);
xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
}
-
- /*
- * If it's the first record in the block, we'll need a key
- * structure to pass up to the next level (updkey).
- */
- if (ptr == 1) {
- cur->bc_ops->init_key_from_rec(&key,
- xfs_btree_rec_addr(cur, 1, block));
- keyp = &key;
- }
}
/*
@@ -3548,8 +3834,8 @@ xfs_btree_delrec(
* If we deleted the leftmost entry in the block, update the
* key values above us in the tree.
*/
- if (ptr == 1) {
- error = xfs_btree_updkey(cur, keyp, level + 1);
+ if (xfs_btree_needs_key_update(cur, ptr)) {
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
}
@@ -3878,6 +4164,16 @@ xfs_btree_delrec(
if (level > 0)
cur->bc_ptrs[level]--;
+ /*
+ * We combined blocks, so we have to update the parent keys if the
+ * btree supports overlapped intervals. However, bc_ptrs[level + 1]
+ * points to the old block so that the caller knows which record to
+ * delete. Therefore, the caller must be savvy enough to call updkeys
+ * for us if we return stat == 2. The other exit points from this
+ * function don't require deletions further up the tree, so they can
+ * call updkeys directly.
+ */
+
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
/* Return value means the next level up has something to do. */
*stat = 2;
@@ -3903,6 +4199,7 @@ xfs_btree_delete(
int error; /* error return value */
int level;
int i;
+ bool joined = false;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
@@ -3916,6 +4213,18 @@ xfs_btree_delete(
error = xfs_btree_delrec(cur, level, &i);
if (error)
goto error0;
+ if (i == 2)
+ joined = true;
+ }
+
+ /*
+ * If we combined blocks as part of deleting the record, delrec won't
+ * have updated the parent high keys so we have to do that here.
+ */
+ if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
+ error = xfs_btree_updkeys_force(cur, 0);
+ if (error)
+ goto error0;
}
if (i == 0) {
@@ -3978,6 +4287,81 @@ xfs_btree_get_rec(
return 0;
}
+/* Visit a block in a btree. */
+STATIC int
+xfs_btree_visit_block(
+ struct xfs_btree_cur *cur,
+ int level,
+ xfs_btree_visit_blocks_fn fn,
+ void *data)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ union xfs_btree_ptr rptr;
+ int error;
+
+ /* do right sibling readahead */
+ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* process the block */
+ error = fn(cur, level, data);
+ if (error)
+ return error;
+
+ /* now read rh sibling block for next iteration */
+ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+ if (xfs_btree_ptr_is_null(cur, &rptr))
+ return -ENOENT;
+
+ return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+}
+
+
+/* Visit every block in a btree. */
+int
+xfs_btree_visit_blocks(
+ struct xfs_btree_cur *cur,
+ xfs_btree_visit_blocks_fn fn,
+ void *data)
+{
+ union xfs_btree_ptr lptr;
+ int level;
+ struct xfs_btree_block *block = NULL;
+ int error = 0;
+
+ cur->bc_ops->init_ptr_from_cur(cur, &lptr);
+
+ /* for each level */
+ for (level = cur->bc_nlevels - 1; level >= 0; level--) {
+ /* grab the left hand block */
+ error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
+ if (error)
+ return error;
+
+ /* readahead the left most block for the next level down */
+ if (level > 0) {
+ union xfs_btree_ptr *ptr;
+
+ ptr = xfs_btree_ptr_addr(cur, 1, block);
+ xfs_btree_readahead_ptr(cur, ptr, 1);
+
+ /* save for the next iteration of the loop */
+ lptr = *ptr;
+ }
+
+ /* for each buffer in the level */
+ do {
+ error = xfs_btree_visit_block(cur, level, fn, data);
+ } while (!error);
+
+ if (error != -ENOENT)
+ return error;
+ }
+
+ return 0;
+}
+
/*
* Change the owner of a btree.
*
@@ -4002,26 +4386,27 @@ xfs_btree_get_rec(
* just queue the modified buffer as delayed write buffer so the transaction
* recovery completion writes the changes to disk.
*/
+struct xfs_btree_block_change_owner_info {
+ __uint64_t new_owner;
+ struct list_head *buffer_list;
+};
+
static int
xfs_btree_block_change_owner(
struct xfs_btree_cur *cur,
int level,
- __uint64_t new_owner,
- struct list_head *buffer_list)
+ void *data)
{
+ struct xfs_btree_block_change_owner_info *bbcoi = data;
struct xfs_btree_block *block;
struct xfs_buf *bp;
- union xfs_btree_ptr rptr;
-
- /* do right sibling readahead */
- xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
/* modify the owner */
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- block->bb_u.l.bb_owner = cpu_to_be64(new_owner);
+ block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
else
- block->bb_u.s.bb_owner = cpu_to_be32(new_owner);
+ block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
/*
* If the block is a root block hosted in an inode, we might not have a
@@ -4035,19 +4420,14 @@ xfs_btree_block_change_owner(
xfs_trans_ordered_buf(cur->bc_tp, bp);
xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
} else {
- xfs_buf_delwri_queue(bp, buffer_list);
+ xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
}
} else {
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(level == cur->bc_nlevels - 1);
}
- /* now read rh sibling block for next iteration */
- xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
- if (xfs_btree_ptr_is_null(cur, &rptr))
- return -ENOENT;
-
- return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+ return 0;
}
int
@@ -4056,43 +4436,13 @@ xfs_btree_change_owner(
__uint64_t new_owner,
struct list_head *buffer_list)
{
- union xfs_btree_ptr lptr;
- int level;
- struct xfs_btree_block *block = NULL;
- int error = 0;
-
- cur->bc_ops->init_ptr_from_cur(cur, &lptr);
-
- /* for each level */
- for (level = cur->bc_nlevels - 1; level >= 0; level--) {
- /* grab the left hand block */
- error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
- if (error)
- return error;
-
- /* readahead the left most block for the next level down */
- if (level > 0) {
- union xfs_btree_ptr *ptr;
-
- ptr = xfs_btree_ptr_addr(cur, 1, block);
- xfs_btree_readahead_ptr(cur, ptr, 1);
-
- /* save for the next iteration of the loop */
- lptr = *ptr;
- }
-
- /* for each buffer in the level */
- do {
- error = xfs_btree_block_change_owner(cur, level,
- new_owner,
- buffer_list);
- } while (!error);
+ struct xfs_btree_block_change_owner_info bbcoi;
- if (error != -ENOENT)
- return error;
- }
+ bbcoi.new_owner = new_owner;
+ bbcoi.buffer_list = buffer_list;
- return 0;
+ return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
+ &bbcoi);
}
/**
@@ -4171,3 +4521,267 @@ xfs_btree_compute_maxlevels(
maxblocks = (maxblocks + limits[1] - 1) / limits[1];
return level;
}
+
+/*
+ * Query a regular btree for all records overlapping a given interval.
+ * Start with a LE lookup of the key of low_rec and return all records
+ * until we find a record with a key greater than the key of high_rec.
+ */
+STATIC int
+xfs_btree_simple_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *low_key,
+ union xfs_btree_key *high_key,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_rec *recp;
+ union xfs_btree_key rec_key;
+ __int64_t diff;
+ int stat;
+ bool firstrec = true;
+ int error;
+
+ ASSERT(cur->bc_ops->init_high_key_from_rec);
+ ASSERT(cur->bc_ops->diff_two_keys);
+
+ /*
+ * Find the leftmost record. The btree cursor must be set
+ * to the low record used to generate low_key.
+ */
+ stat = 0;
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
+ if (error)
+ goto out;
+
+ while (stat) {
+ /* Find the record. */
+ error = xfs_btree_get_rec(cur, &recp, &stat);
+ if (error || !stat)
+ break;
+ cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
+
+ /* Skip if high_key(rec) < low_key. */
+ if (firstrec) {
+ firstrec = false;
+ diff = cur->bc_ops->diff_two_keys(cur, low_key,
+ &rec_key);
+ if (diff > 0)
+ goto advloop;
+ }
+
+ /* Stop if high_key < low_key(rec). */
+ diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
+ if (diff > 0)
+ break;
+
+ /* Callback */
+ error = fn(cur, recp, priv);
+ if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
+ break;
+
+advloop:
+ /* Move on to the next record. */
+ error = xfs_btree_increment(cur, 0, &stat);
+ if (error)
+ break;
+ }
+
+out:
+ return error;
+}
+
+/*
+ * Query an overlapped interval btree for all records overlapping a given
+ * interval. This function roughly follows the algorithm given in
+ * "Interval Trees" of _Introduction to Algorithms_, which is section
+ * 14.3 in the 2nd and 3rd editions.
+ *
+ * First, generate keys for the low and high records passed in.
+ *
+ * For any leaf node, generate the high and low keys for the record.
+ * If the record keys overlap with the query low/high keys, pass the
+ * record to the function iterator.
+ *
+ * For any internal node, compare the low and high keys of each
+ * pointer against the query low/high keys. If there's an overlap,
+ * follow the pointer.
+ *
+ * As an optimization, we stop scanning a block when we find a low key
+ * that is greater than the query's high key.
+ */
+STATIC int
+xfs_btree_overlapped_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *low_key,
+ union xfs_btree_key *high_key,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_key rec_key;
+ union xfs_btree_key rec_hkey;
+ union xfs_btree_key *lkp;
+ union xfs_btree_key *hkp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ __int64_t ldiff;
+ __int64_t hdiff;
+ int level;
+ struct xfs_buf *bp;
+ int i;
+ int error;
+
+ /* Load the root of the btree. */
+ level = cur->bc_nlevels - 1;
+ cur->bc_ops->init_ptr_from_cur(cur, &ptr);
+ error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
+ if (error)
+ return error;
+ xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_overlapped_query_range(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto out;
+#endif
+ cur->bc_ptrs[level] = 1;
+
+ while (level < cur->bc_nlevels) {
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* End of node, pop back towards the root. */
+ if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
+pop_up:
+ if (level < cur->bc_nlevels - 1)
+ cur->bc_ptrs[level + 1]++;
+ level++;
+ continue;
+ }
+
+ if (level == 0) {
+ /* Handle a leaf node. */
+ recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
+
+ cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
+ ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
+ low_key);
+
+ cur->bc_ops->init_key_from_rec(&rec_key, recp);
+ hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
+ &rec_key);
+
+ /*
+ * If (record's high key >= query's low key) and
+ * (query's high key >= record's low key), then
+ * this record overlaps the query range; callback.
+ */
+ if (ldiff >= 0 && hdiff >= 0) {
+ error = fn(cur, recp, priv);
+ if (error < 0 ||
+ error == XFS_BTREE_QUERY_RANGE_ABORT)
+ break;
+ } else if (hdiff < 0) {
+ /* Record is larger than high key; pop. */
+ goto pop_up;
+ }
+ cur->bc_ptrs[level]++;
+ continue;
+ }
+
+ /* Handle an internal node. */
+ lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
+ hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
+ pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
+
+ ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
+ hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
+
+ /*
+ * If (pointer's high key >= query's low key) and
+ * (query's high key >= pointer's low key), then
+ * this record overlaps the query range; follow pointer.
+ */
+ if (ldiff >= 0 && hdiff >= 0) {
+ level--;
+ error = xfs_btree_lookup_get_block(cur, level, pp,
+ &block);
+ if (error)
+ goto out;
+ xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_overlapped_query_range(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto out;
+#endif
+ cur->bc_ptrs[level] = 1;
+ continue;
+ } else if (hdiff < 0) {
+ /* The low key is larger than the upper range; pop. */
+ goto pop_up;
+ }
+ cur->bc_ptrs[level]++;
+ }
+
+out:
+ /*
+ * If we don't end this function with the cursor pointing at a record
+ * block, a subsequent non-error cursor deletion will not release
+ * node-level buffers, causing a buffer leak. This is quite possible
+ * with a zero-results range query, so release the buffers if we
+ * failed to return any results.
+ */
+ if (cur->bc_bufs[0] == NULL) {
+ for (i = 0; i < cur->bc_nlevels; i++) {
+ if (cur->bc_bufs[i]) {
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
+ cur->bc_bufs[i] = NULL;
+ cur->bc_ptrs[i] = 0;
+ cur->bc_ra[i] = 0;
+ }
+ }
+ }
+
+ return error;
+}
+
+/*
+ * Query a btree for all records overlapping a given interval of keys. The
+ * supplied function will be called with each record found; return one of the
+ * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
+ * code. This function returns XFS_BTREE_QUERY_RANGE_ABORT, zero, or a
+ * negative error code.
+ */
+int
+xfs_btree_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low_rec,
+ union xfs_btree_irec *high_rec,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_rec rec;
+ union xfs_btree_key low_key;
+ union xfs_btree_key high_key;
+
+ /* Find the keys of both ends of the interval. */
+ cur->bc_rec = *high_rec;
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(&high_key, &rec);
+
+ cur->bc_rec = *low_rec;
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(&low_key, &rec);
+
+ /* Enforce low key < high key. */
+ if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
+ return -EINVAL;
+
+ if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return xfs_btree_simple_query_range(cur, &low_key,
+ &high_key, fn, priv);
+ return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
+ fn, priv);
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 785a99682159..04d0865e5e6d 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -19,7 +19,7 @@
#define __XFS_BTREE_H__
struct xfs_buf;
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
@@ -38,17 +38,37 @@ union xfs_btree_ptr {
};
union xfs_btree_key {
- xfs_bmbt_key_t bmbt;
- xfs_bmdr_key_t bmbr; /* bmbt root block */
- xfs_alloc_key_t alloc;
- xfs_inobt_key_t inobt;
+ struct xfs_bmbt_key bmbt;
+ xfs_bmdr_key_t bmbr; /* bmbt root block */
+ xfs_alloc_key_t alloc;
+ struct xfs_inobt_key inobt;
+ struct xfs_rmap_key rmap;
+};
+
+/*
+ * In-core key that holds both low and high keys for overlapped btrees.
+ * The two keys are packed next to each other on disk, so do the same
+ * in memory. Preserve the existing xfs_btree_key as a single key to
+ * avoid the mental model breakage that would happen if we passed a
+ * bigkey into a function that operates on a single key.
+ */
+union xfs_btree_bigkey {
+ struct xfs_bmbt_key bmbt;
+ xfs_bmdr_key_t bmbr; /* bmbt root block */
+ xfs_alloc_key_t alloc;
+ struct xfs_inobt_key inobt;
+ struct {
+ struct xfs_rmap_key rmap;
+ struct xfs_rmap_key rmap_hi;
+ };
};
union xfs_btree_rec {
- xfs_bmbt_rec_t bmbt;
- xfs_bmdr_rec_t bmbr; /* bmbt root block */
- xfs_alloc_rec_t alloc;
- xfs_inobt_rec_t inobt;
+ struct xfs_bmbt_rec bmbt;
+ xfs_bmdr_rec_t bmbr; /* bmbt root block */
+ struct xfs_alloc_rec alloc;
+ struct xfs_inobt_rec inobt;
+ struct xfs_rmap_rec rmap;
};
/*
@@ -63,6 +83,7 @@ union xfs_btree_rec {
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
+#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
/*
* For logging record fields.
@@ -95,6 +116,7 @@ do { \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(__mp, bmbt, stat); break; \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(__mp, ibt, stat); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(__mp, fibt, stat); break; \
+ case XFS_BTNUM_RMAP: __XFS_BTREE_STATS_INC(__mp, rmap, stat); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
@@ -115,11 +137,13 @@ do { \
__XFS_BTREE_STATS_ADD(__mp, ibt, stat, val); break; \
case XFS_BTNUM_FINO: \
__XFS_BTREE_STATS_ADD(__mp, fibt, stat, val); break; \
+ case XFS_BTNUM_RMAP: \
+ __XFS_BTREE_STATS_ADD(__mp, rmap, stat, val); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
-#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
+#define XFS_BTREE_MAXLEVELS 9 /* max of all btrees */
struct xfs_btree_ops {
/* size of the key and record structures */
@@ -158,17 +182,25 @@ struct xfs_btree_ops {
/* init values of btree structures */
void (*init_key_from_rec)(union xfs_btree_key *key,
union xfs_btree_rec *rec);
- void (*init_rec_from_key)(union xfs_btree_key *key,
- union xfs_btree_rec *rec);
void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
union xfs_btree_rec *rec);
void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr);
+ void (*init_high_key_from_rec)(union xfs_btree_key *key,
+ union xfs_btree_rec *rec);
/* difference between key value and cursor value */
__int64_t (*key_diff)(struct xfs_btree_cur *cur,
union xfs_btree_key *key);
+ /*
+ * Difference between key2 and key1 -- positive if key1 > key2,
+ * negative if key1 < key2, and zero if equal.
+ */
+ __int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
+ union xfs_btree_key *key1,
+ union xfs_btree_key *key2);
+
const struct xfs_buf_ops *buf_ops;
#if defined(DEBUG) || defined(XFS_WARN)
@@ -192,6 +224,13 @@ struct xfs_btree_ops {
#define LASTREC_DELREC 2
+union xfs_btree_irec {
+ struct xfs_alloc_rec_incore a;
+ struct xfs_bmbt_irec b;
+ struct xfs_inobt_rec_incore i;
+ struct xfs_rmap_irec r;
+};
+
/*
* Btree cursor structure.
* This collects all information needed by the btree code in one place.
@@ -202,11 +241,7 @@ typedef struct xfs_btree_cur
struct xfs_mount *bc_mp; /* file system mount struct */
const struct xfs_btree_ops *bc_ops;
uint bc_flags; /* btree features - below */
- union {
- xfs_alloc_rec_incore_t a;
- xfs_bmbt_irec_t b;
- xfs_inobt_rec_incore_t i;
- } bc_rec; /* current insert/search record value */
+ union xfs_btree_irec bc_rec; /* current insert/search record value */
struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */
int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */
__uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
@@ -218,11 +253,12 @@ typedef struct xfs_btree_cur
union {
struct { /* needed for BNO, CNT, INO */
struct xfs_buf *agbp; /* agf/agi buffer pointer */
+ struct xfs_defer_ops *dfops; /* deferred updates */
xfs_agnumber_t agno; /* ag number */
} a;
struct { /* needed for BMAP */
struct xfs_inode *ip; /* pointer to our inode */
- struct xfs_bmap_free *flist; /* list to free after */
+ struct xfs_defer_ops *dfops; /* deferred updates */
xfs_fsblock_t firstblock; /* 1st blk allocated */
int allocated; /* count of alloced */
short forksize; /* fork's inode space */
@@ -238,6 +274,7 @@ typedef struct xfs_btree_cur
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
+#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
#define XFS_BTREE_NOERROR 0
@@ -477,4 +514,19 @@ bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
unsigned long len);
+/* return codes */
+#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
+#define XFS_BTREE_QUERY_RANGE_ABORT 1 /* stop iterating */
+typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec, void *priv);
+
+int xfs_btree_query_range(struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low_rec, union xfs_btree_irec *high_rec,
+ xfs_btree_query_range_fn fn, void *priv);
+
+typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
+ void *data);
+int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
+ xfs_btree_visit_blocks_fn fn, void *data);
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 0f1f165f4048..f2dc1a950c85 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2029,7 +2029,7 @@ xfs_da_grow_inode_int(
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist);
+ args->dfops);
if (error)
return error;
@@ -2052,7 +2052,7 @@ xfs_da_grow_inode_int(
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist);
+ &mapp[mapi], &nmap, args->dfops);
if (error)
goto out_free_map;
if (nmap < 1)
@@ -2362,7 +2362,7 @@ xfs_da_shrink_inode(
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w), 0, args->firstblock,
- args->flist, &done);
+ args->dfops, &done);
if (error == -ENOSPC) {
if (w != XFS_DATA_FORK)
break;
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 6e153e399a77..98c75cbe6ac2 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -19,7 +19,7 @@
#ifndef __XFS_DA_BTREE_H__
#define __XFS_DA_BTREE_H__
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_inode;
struct xfs_trans;
struct zone;
@@ -70,7 +70,7 @@ typedef struct xfs_da_args {
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */
- struct xfs_bmap_free *flist; /* ptr to freelist for bmap_finish */
+ struct xfs_defer_ops *dfops; /* ptr to freelist for bmap_finish */
struct xfs_trans *trans; /* current trans (changes over time) */
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
int whichfork; /* data or attribute fork */
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 685f23b67056..9a492a9e19bd 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -629,6 +629,7 @@ typedef struct xfs_attr_shortform {
struct xfs_attr_sf_hdr { /* constant-structure header block */
__be16 totsize; /* total bytes in shortform list */
__u8 count; /* count of active entries */
+ __u8 padding;
} hdr;
struct xfs_attr_sf_entry {
__uint8_t namelen; /* actual length of name (no NULL) */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
new file mode 100644
index 000000000000..054a2032fdb3
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trace.h"
+
+/*
+ * Deferred Operations in XFS
+ *
+ * Due to the way locking rules work in XFS, certain transactions (block
+ * mapping and unmapping, typically) have permanent reservations so that
+ * we can roll the transaction to adhere to AG locking order rules and
+ * to unlock buffers between metadata updates. Prior to rmap/reflink,
+ * the mapping code had a mechanism to perform these deferrals for
+ * extents that were going to be freed; this code makes that facility
+ * more generic.
+ *
+ * When adding the reverse mapping and reflink features, it became
+ * necessary to perform complex remapping multi-transactions to comply
+ * with AG locking order rules, and to be able to spread a single
+ * refcount update operation (an operation on an n-block extent can
+ * update as many as n records!) among multiple transactions. XFS can
+ * roll a transaction to facilitate this, but using this facility
+ * requires us to log "intent" items in case log recovery needs to
+ * redo the operation, and to log "done" items to indicate that redo
+ * is not necessary.
+ *
+ * Deferred work is tracked in xfs_defer_pending items. Each pending
+ * item tracks one type of deferred work. Incoming work items (which
+ * have not yet had an intent logged) are attached to a pending item
+ * on the dop_intake list, where they wait for the caller to finish
+ * the deferred operations.
+ *
+ * Finishing a set of deferred operations is an involved process. To
+ * start, we define "rolling a deferred-op transaction" as follows:
+ *
+ * > For each xfs_defer_pending item on the dop_intake list,
+ * - Sort the work items in AG order. XFS locking
+ * order rules require us to lock buffers in AG order.
+ * - Create a log intent item for that type.
+ * - Attach it to the pending item.
+ * - Move the pending item from the dop_intake list to the
+ * dop_pending list.
+ * > Roll the transaction.
+ *
+ * NOTE: To avoid exceeding the transaction reservation, we limit the
+ * number of items that we attach to a given xfs_defer_pending.
+ *
+ * The actual finishing process looks like this:
+ *
+ * > For each xfs_defer_pending in the dop_pending list,
+ * - Roll the deferred-op transaction as above.
+ * - Create a log done item for that type, and attach it to the
+ * log intent item.
+ * - For each work item attached to the log intent item,
+ * * Perform the described action.
+ * * Attach the work item to the log done item.
+ *
+ * The key here is that we must log an intent item for all pending
+ * work items every time we roll the transaction, and that we must log
+ * a done item as soon as the work is completed. With this mechanism
+ * we can perform complex remapping operations, chaining intent items
+ * as needed.
+ *
+ * This is an example of remapping the extent (E, E+B) into file X at
+ * offset A and dealing with the extent (C, C+B) already being mapped
+ * there:
+ * +-------------------------------------------------+
+ * | Unmap file X startblock C offset A length B | t0
+ * | Intent to reduce refcount for extent (C, B) |
+ * | Intent to remove rmap (X, C, A, B) |
+ * | Intent to free extent (D, 1) (bmbt block) |
+ * | Intent to map (X, A, B) at startblock E |
+ * +-------------------------------------------------+
+ * | Map file X startblock E offset A length B | t1
+ * | Done mapping (X, E, A, B) |
+ * | Intent to increase refcount for extent (E, B) |
+ * | Intent to add rmap (X, E, A, B) |
+ * +-------------------------------------------------+
+ * | Reduce refcount for extent (C, B) | t2
+ * | Done reducing refcount for extent (C, B) |
+ * | Increase refcount for extent (E, B) |
+ * | Done increasing refcount for extent (E, B) |
+ * | Intent to free extent (C, B) |
+ * | Intent to free extent (F, 1) (refcountbt block) |
+ * | Intent to remove rmap (F, 1, REFC) |
+ * +-------------------------------------------------+
+ * | Remove rmap (X, C, A, B) | t3
+ * | Done removing rmap (X, C, A, B) |
+ * | Add rmap (X, E, A, B) |
+ * | Done adding rmap (X, E, A, B) |
+ * | Remove rmap (F, 1, REFC) |
+ * | Done removing rmap (F, 1, REFC) |
+ * +-------------------------------------------------+
+ * | Free extent (C, B) | t4
+ * | Done freeing extent (C, B) |
+ * | Free extent (D, 1) |
+ * | Done freeing extent (D, 1) |
+ * | Free extent (F, 1) |
+ * | Done freeing extent (F, 1) |
+ * +-------------------------------------------------+
+ *
+ * If we should crash before t2 commits, log recovery replays
+ * the following intent items:
+ *
+ * - Intent to reduce refcount for extent (C, B)
+ * - Intent to remove rmap (X, C, A, B)
+ * - Intent to free extent (D, 1) (bmbt block)
+ * - Intent to increase refcount for extent (E, B)
+ * - Intent to add rmap (X, E, A, B)
+ *
+ * In the process of recovering, it should also generate and take care
+ * of these intent items:
+ *
+ * - Intent to free extent (C, B)
+ * - Intent to free extent (F, 1) (refcountbt block)
+ * - Intent to remove rmap (F, 1, REFC)
+ */
+
+static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
+
+/*
+ * For each pending item in the intake list, log its intent item and the
+ * associated extents, then add the entire intake list to the end of
+ * the pending list.
+ */
+STATIC void
+xfs_defer_intake_work(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop)
+{
+ struct list_head *li;
+ struct xfs_defer_pending *dfp;
+
+ list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
+ trace_xfs_defer_intake_work(tp->t_mountp, dfp);
+ dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
+ dfp->dfp_count);
+ list_sort(tp->t_mountp, &dfp->dfp_work,
+ dfp->dfp_type->diff_items);
+ list_for_each(li, &dfp->dfp_work)
+ dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
+ }
+
+ list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
+}
+
+/* Abort all the intents that were committed. */
+STATIC void
+xfs_defer_trans_abort(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ int error)
+{
+ struct xfs_defer_pending *dfp;
+
+ trace_xfs_defer_trans_abort(tp->t_mountp, dop);
+ /*
+ * If the transaction was committed, drop the intent reference
+ * since we're bailing out of here. The other reference is
+ * dropped when the intent hits the AIL. If the transaction
+ * was not committed, the intent is freed by the intent item
+ * unlock handler on abort.
+ */
+ if (!dop->dop_committed)
+ return;
+
+ /* Abort intent items. */
+ list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
+ trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
+ if (dfp->dfp_committed)
+ dfp->dfp_type->abort_intent(dfp->dfp_intent);
+ }
+
+ /* Shut down FS. */
+ xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
+ SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
+}
+
+/* Roll a transaction so we can do some deferred op processing. */
+STATIC int
+xfs_defer_trans_roll(
+ struct xfs_trans **tp,
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ int i;
+ int error;
+
+ /* Log all the joined inodes except the one we passed in. */
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) {
+ if (dop->dop_inodes[i] == ip)
+ continue;
+ xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
+ }
+
+ trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
+
+ /* Roll the transaction. */
+ error = xfs_trans_roll(tp, ip);
+ if (error) {
+ trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
+ xfs_defer_trans_abort(*tp, dop, error);
+ return error;
+ }
+ dop->dop_committed = true;
+
+ /* Rejoin the joined inodes except the one we passed in. */
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) {
+ if (dop->dop_inodes[i] == ip)
+ continue;
+ xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
+ }
+
+ return error;
+}
+
+/* Do we have any work items to finish? */
+bool
+xfs_defer_has_unfinished_work(
+ struct xfs_defer_ops *dop)
+{
+ return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
+}
+
+/*
+ * Add this inode to the deferred op. Each joined inode is relogged
+ * each time we roll the transaction, in addition to any inode passed
+ * to xfs_defer_finish().
+ */
+int
+xfs_defer_join(
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ int i;
+
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
+ if (dop->dop_inodes[i] == ip)
+ return 0;
+ else if (dop->dop_inodes[i] == NULL) {
+ dop->dop_inodes[i] = ip;
+ return 0;
+ }
+ }
+
+ return -EFSCORRUPTED;
+}
+
+/*
+ * Finish all the pending work. This involves logging intent items for
+ * any work items that wandered in since the last transaction roll (if
+ * one has even happened), rolling the transaction, and finishing the
+ * work items in the first item on the logged-and-pending list.
+ *
+ * If an inode is provided, relog it to the new transaction.
+ */
+int
+xfs_defer_finish(
+ struct xfs_trans **tp,
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ struct xfs_defer_pending *dfp;
+ struct list_head *li;
+ struct list_head *n;
+ void *done_item = NULL;
+ void *state;
+ int error = 0;
+ void (*cleanup_fn)(struct xfs_trans *, void *, int);
+
+ ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+ trace_xfs_defer_finish((*tp)->t_mountp, dop);
+
+ /* Until we run out of pending work to finish... */
+ while (xfs_defer_has_unfinished_work(dop)) {
+ /* Log intents for work items sitting in the intake. */
+ xfs_defer_intake_work(*tp, dop);
+
+ /* Roll the transaction. */
+ error = xfs_defer_trans_roll(tp, dop, ip);
+ if (error)
+ goto out;
+
+ /* Mark all pending intents as committed. */
+ list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
+ if (dfp->dfp_committed)
+ break;
+ trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
+ dfp->dfp_committed = true;
+ }
+
+ /* Log an intent-done item for the first pending item. */
+ dfp = list_first_entry(&dop->dop_pending,
+ struct xfs_defer_pending, dfp_list);
+ trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
+ done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+ dfp->dfp_count);
+ cleanup_fn = dfp->dfp_type->finish_cleanup;
+
+ /* Finish the work items. */
+ state = NULL;
+ list_for_each_safe(li, n, &dfp->dfp_work) {
+ list_del(li);
+ dfp->dfp_count--;
+ error = dfp->dfp_type->finish_item(*tp, dop, li,
+ done_item, &state);
+ if (error) {
+ /*
+ * Clean up after ourselves and jump out.
+ * xfs_defer_cancel will take care of freeing
+ * all these lists and stuff.
+ */
+ if (cleanup_fn)
+ cleanup_fn(*tp, state, error);
+ xfs_defer_trans_abort(*tp, dop, error);
+ goto out;
+ }
+ }
+ /* Done with the dfp, free it. */
+ list_del(&dfp->dfp_list);
+ kmem_free(dfp);
+
+ if (cleanup_fn)
+ cleanup_fn(*tp, state, error);
+ }
+
+out:
+ if (error)
+ trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
+ else
+ trace_xfs_defer_finish_done((*tp)->t_mountp, dop);
+ return error;
+}
+
+/*
+ * Free up any items left in the list.
+ */
+void
+xfs_defer_cancel(
+ struct xfs_defer_ops *dop)
+{
+ struct xfs_defer_pending *dfp;
+ struct xfs_defer_pending *pli;
+ struct list_head *pwi;
+ struct list_head *n;
+
+ trace_xfs_defer_cancel(NULL, dop);
+
+ /*
+ * Free the pending items. Caller should already have arranged
+ * for the intent items to be released.
+ */
+ list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
+ trace_xfs_defer_intake_cancel(NULL, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
+ }
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
+ }
+ list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
+ trace_xfs_defer_pending_cancel(NULL, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
+ }
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
+ }
+}
+
+/* Add an item for later deferred processing. */
+void
+xfs_defer_add(
+ struct xfs_defer_ops *dop,
+ enum xfs_defer_ops_type type,
+ struct list_head *li)
+{
+ struct xfs_defer_pending *dfp = NULL;
+
+ /*
+ * Add the item to a pending item at the end of the intake list.
+ * If the last pending item has the same type, reuse it. Else,
+ * create a new pending item at the end of the intake list.
+ */
+ if (!list_empty(&dop->dop_intake)) {
+ dfp = list_last_entry(&dop->dop_intake,
+ struct xfs_defer_pending, dfp_list);
+ if (dfp->dfp_type->type != type ||
+ (dfp->dfp_type->max_items &&
+ dfp->dfp_count >= dfp->dfp_type->max_items))
+ dfp = NULL;
+ }
+ if (!dfp) {
+ dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
+ KM_SLEEP | KM_NOFS);
+ dfp->dfp_type = defer_op_types[type];
+ dfp->dfp_committed = false;
+ dfp->dfp_intent = NULL;
+ dfp->dfp_count = 0;
+ INIT_LIST_HEAD(&dfp->dfp_work);
+ list_add_tail(&dfp->dfp_list, &dop->dop_intake);
+ }
+
+ list_add_tail(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+}
+
+/* Initialize a deferred operation list. */
+void
+xfs_defer_init_op_type(
+ const struct xfs_defer_op_type *type)
+{
+ defer_op_types[type->type] = type;
+}
+
+/* Initialize a deferred operation. */
+void
+xfs_defer_init(
+ struct xfs_defer_ops *dop,
+ xfs_fsblock_t *fbp)
+{
+ dop->dop_committed = false;
+ dop->dop_low = false;
+ memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
+ *fbp = NULLFSBLOCK;
+ INIT_LIST_HEAD(&dop->dop_intake);
+ INIT_LIST_HEAD(&dop->dop_pending);
+ trace_xfs_defer_init(NULL, dop);
+}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
new file mode 100644
index 000000000000..cc3981c48296
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_DEFER_H__
+#define __XFS_DEFER_H__
+
+struct xfs_defer_op_type;
+
+/*
+ * Save a log intent item and a list of extents, so that we can replay
+ * whatever action had to happen to the extent list and file the log done
+ * item.
+ */
+struct xfs_defer_pending {
+ const struct xfs_defer_op_type *dfp_type; /* function pointers */
+ struct list_head dfp_list; /* pending items */
+ bool dfp_committed; /* committed trans? */
+ void *dfp_intent; /* log intent item */
+ struct list_head dfp_work; /* work items */
+ unsigned int dfp_count; /* # extent items */
+};
+
+/*
+ * Header for deferred operation list.
+ *
+ * dop_low is used by the allocator to activate the lowspace algorithm -
+ * when free space is running low the extent allocator may choose to
+ * allocate an extent from an AG without leaving sufficient space for
+ * a btree split when inserting the new extent. In this case the allocator
+ * will enable the lowspace algorithm which is supposed to allow further
+ * allocations (such as btree splits and newroots) to allocate from
+ * sequential AGs. In order to avoid locking AGs out of order the lowspace
+ * algorithm will start searching for free space from AG 0. If the correct
+ * transaction reservations have been made then this algorithm will eventually
+ * find all the space it needs.
+ */
+enum xfs_defer_ops_type {
+ XFS_DEFER_OPS_TYPE_RMAP,
+ XFS_DEFER_OPS_TYPE_FREE,
+ XFS_DEFER_OPS_TYPE_MAX,
+};
+
+#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
+
+struct xfs_defer_ops {
+ bool dop_committed; /* did any trans commit? */
+ bool dop_low; /* alloc in low mode */
+ struct list_head dop_intake; /* unlogged pending work */
+ struct list_head dop_pending; /* logged pending work */
+
+ /* relog these inodes with each roll */
+ struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
+};
+
+void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
+ struct list_head *h);
+int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop,
+ struct xfs_inode *ip);
+void xfs_defer_cancel(struct xfs_defer_ops *dop);
+void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
+bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
+int xfs_defer_join(struct xfs_defer_ops *dop, struct xfs_inode *ip);
+
+/* Description of a deferred type. */
+struct xfs_defer_op_type {
+ enum xfs_defer_ops_type type;
+ unsigned int max_items;
+ void (*abort_intent)(void *);
+ void *(*create_done)(struct xfs_trans *, void *, unsigned int);
+ int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
+ struct list_head *, void *, void **);
+ void (*finish_cleanup)(struct xfs_trans *, void *, int);
+ void (*cancel_item)(struct list_head *);
+ int (*diff_items)(void *, struct list_head *, struct list_head *);
+ void *(*create_intent)(struct xfs_trans *, uint);
+ void (*log_item)(struct xfs_trans *, void *, struct list_head *);
+};
+
+void xfs_defer_init_op_type(const struct xfs_defer_op_type *type);
+
+#endif /* __XFS_DEFER_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index af0f9d171f8a..20a96dd5af7e 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -21,6 +21,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -259,7 +260,7 @@ xfs_dir_createname(
struct xfs_name *name,
xfs_ino_t inum, /* new entry inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -286,7 +287,7 @@ xfs_dir_createname(
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -436,7 +437,7 @@ xfs_dir_removename(
struct xfs_name *name,
xfs_ino_t ino,
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -458,7 +459,7 @@ xfs_dir_removename(
args->inumber = ino;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -498,7 +499,7 @@ xfs_dir_replace(
struct xfs_name *name, /* name of entry to replace */
xfs_ino_t inum, /* new inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -523,7 +524,7 @@ xfs_dir_replace(
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -680,7 +681,7 @@ xfs_dir2_shrink_inode(
/* Unmap the fsblock(s). */
error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
- args->firstblock, args->flist, &done);
+ args->firstblock, args->dfops, &done);
if (error) {
/*
* ENOSPC actually can happen if we're in a removename with no
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index e55353651f5b..becc926c3e3d 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,7 +18,7 @@
#ifndef __XFS_DIR2_H__
#define __XFS_DIR2_H__
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
@@ -129,18 +129,18 @@ extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t *inum,
struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name);
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index adb204d40f22..f814d42c73b2 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -455,8 +455,10 @@ xfs_sb_has_compat_feature(
}
#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
+#define XFS_SB_FEAT_RO_COMPAT_RMAPBT (1 << 1) /* reverse map btree */
#define XFS_SB_FEAT_RO_COMPAT_ALL \
- (XFS_SB_FEAT_RO_COMPAT_FINOBT)
+ (XFS_SB_FEAT_RO_COMPAT_FINOBT | \
+ XFS_SB_FEAT_RO_COMPAT_RMAPBT)
#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
static inline bool
xfs_sb_has_ro_compat_feature(
@@ -538,6 +540,12 @@ static inline bool xfs_sb_version_hasmetauuid(struct xfs_sb *sbp)
(sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_META_UUID);
}
+static inline bool xfs_sb_version_hasrmapbt(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
+ (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_RMAPBT);
+}
+
/*
* end of superblock version macros
*/
@@ -598,10 +606,10 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
/*
- * Btree number 0 is bno, 1 is cnt. This value gives the size of the
+ * Btree number 0 is bno, 1 is cnt, 2 is rmap. This value gives the size of the
* arrays below.
*/
-#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
+#define XFS_BTNUM_AGF ((int)XFS_BTNUM_RMAPi + 1)
/*
* The second word of agf_levels in the first a.g. overlaps the EFS
@@ -618,12 +626,10 @@ typedef struct xfs_agf {
__be32 agf_seqno; /* sequence # starting from 0 */
__be32 agf_length; /* size in blocks of a.g. */
/*
- * Freespace information
+ * Freespace and rmap information
*/
__be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
- __be32 agf_spare0; /* spare field */
__be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
- __be32 agf_spare1; /* spare field */
__be32 agf_flfirst; /* first freelist block's index */
__be32 agf_fllast; /* last freelist block's index */
@@ -1308,17 +1314,118 @@ typedef __be32 xfs_inobt_ptr_t;
#define XFS_FIBT_BLOCK(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
/*
- * The first data block of an AG depends on whether the filesystem was formatted
- * with the finobt feature. If so, account for the finobt reserved root btree
- * block.
+ * Reverse mapping btree format definitions
+ *
+ * There is a btree for the reverse map per allocation group
+ */
+#define XFS_RMAP_CRC_MAGIC 0x524d4233 /* 'RMB3' */
+
+/*
+ * Ownership info for an extent. This is used to create reverse-mapping
+ * entries.
*/
-#define XFS_PREALLOC_BLOCKS(mp) \
+#define XFS_OWNER_INFO_ATTR_FORK (1 << 0)
+#define XFS_OWNER_INFO_BMBT_BLOCK (1 << 1)
+struct xfs_owner_info {
+ uint64_t oi_owner;
+ xfs_fileoff_t oi_offset;
+ unsigned int oi_flags;
+};
+
+/*
+ * Special owner types.
+ *
+ * Seeing as we only support up to 8EB, we have the upper bit of the owner field
+ * to tell us we have a special owner value. We use these for static metadata
+ * allocated at mkfs/growfs time, as well as for freespace management metadata.
+ */
+#define XFS_RMAP_OWN_NULL (-1ULL) /* No owner, for growfs */
+#define XFS_RMAP_OWN_UNKNOWN (-2ULL) /* Unknown owner, for EFI recovery */
+#define XFS_RMAP_OWN_FS (-3ULL) /* static fs metadata */
+#define XFS_RMAP_OWN_LOG (-4ULL) /* static fs metadata */
+#define XFS_RMAP_OWN_AG (-5ULL) /* AG freespace btree blocks */
+#define XFS_RMAP_OWN_INOBT (-6ULL) /* Inode btree blocks */
+#define XFS_RMAP_OWN_INODES (-7ULL) /* Inode chunk */
+#define XFS_RMAP_OWN_MIN (-8ULL) /* guard */
+
+#define XFS_RMAP_NON_INODE_OWNER(owner) (!!((owner) & (1ULL << 63)))
+
+/*
+ * Data record structure
+ */
+struct xfs_rmap_rec {
+ __be32 rm_startblock; /* extent start block */
+ __be32 rm_blockcount; /* extent length */
+ __be64 rm_owner; /* extent owner */
+ __be64 rm_offset; /* offset within the owner */
+};
+
+/*
+ * rmap btree record
+ * rm_offset:63 is the attribute fork flag
+ * rm_offset:62 is the bmbt block flag
+ * rm_offset:61 is the unwritten extent flag (same as l0:63 in bmbt)
+ * rm_offset:54-60 aren't used and should be zero
+ * rm_offset:0-53 is the block offset within the inode
+ */
+#define XFS_RMAP_OFF_ATTR_FORK ((__uint64_t)1ULL << 63)
+#define XFS_RMAP_OFF_BMBT_BLOCK ((__uint64_t)1ULL << 62)
+#define XFS_RMAP_OFF_UNWRITTEN ((__uint64_t)1ULL << 61)
+
+#define XFS_RMAP_LEN_MAX ((__uint32_t)~0U)
+#define XFS_RMAP_OFF_FLAGS (XFS_RMAP_OFF_ATTR_FORK | \
+ XFS_RMAP_OFF_BMBT_BLOCK | \
+ XFS_RMAP_OFF_UNWRITTEN)
+#define XFS_RMAP_OFF_MASK ((__uint64_t)0x3FFFFFFFFFFFFFULL)
+
+#define XFS_RMAP_OFF(off) ((off) & XFS_RMAP_OFF_MASK)
+
+#define XFS_RMAP_IS_BMBT_BLOCK(off) (!!((off) & XFS_RMAP_OFF_BMBT_BLOCK))
+#define XFS_RMAP_IS_ATTR_FORK(off) (!!((off) & XFS_RMAP_OFF_ATTR_FORK))
+#define XFS_RMAP_IS_UNWRITTEN(len) (!!((off) & XFS_RMAP_OFF_UNWRITTEN))
+
+#define RMAPBT_STARTBLOCK_BITLEN 32
+#define RMAPBT_BLOCKCOUNT_BITLEN 32
+#define RMAPBT_OWNER_BITLEN 64
+#define RMAPBT_ATTRFLAG_BITLEN 1
+#define RMAPBT_BMBTFLAG_BITLEN 1
+#define RMAPBT_EXNTFLAG_BITLEN 1
+#define RMAPBT_UNUSED_OFFSET_BITLEN 7
+#define RMAPBT_OFFSET_BITLEN 54
+
+#define XFS_RMAP_ATTR_FORK (1 << 0)
+#define XFS_RMAP_BMBT_BLOCK (1 << 1)
+#define XFS_RMAP_UNWRITTEN (1 << 2)
+#define XFS_RMAP_KEY_FLAGS (XFS_RMAP_ATTR_FORK | \
+ XFS_RMAP_BMBT_BLOCK)
+#define XFS_RMAP_REC_FLAGS (XFS_RMAP_UNWRITTEN)
+struct xfs_rmap_irec {
+ xfs_agblock_t rm_startblock; /* extent start block */
+ xfs_extlen_t rm_blockcount; /* extent length */
+ __uint64_t rm_owner; /* extent owner */
+ __uint64_t rm_offset; /* offset within the owner */
+ unsigned int rm_flags; /* state flags */
+};
+
+/*
+ * Key structure
+ *
+ * We don't use the length for lookups
+ */
+struct xfs_rmap_key {
+ __be32 rm_startblock; /* extent start block */
+ __be64 rm_owner; /* extent owner */
+ __be64 rm_offset; /* offset within the owner */
+} __attribute__((packed));
+
+/* btree pointer type */
+typedef __be32 xfs_rmap_ptr_t;
+
+#define XFS_RMAP_BLOCK(mp) \
(xfs_sb_version_hasfinobt(&((mp)->m_sb)) ? \
XFS_FIBT_BLOCK(mp) + 1 : \
XFS_IBT_BLOCK(mp) + 1)
-
-
/*
* BMAP Btree format definitions
*
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index f5ec9c5ccae6..79455058b752 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -206,6 +206,7 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_FTYPE 0x10000 /* inode directory types */
#define XFS_FSOP_GEOM_FLAGS_FINOBT 0x20000 /* free inode btree */
#define XFS_FSOP_GEOM_FLAGS_SPINODES 0x40000 /* sparse inode chunks */
+#define XFS_FSOP_GEOM_FLAGS_RMAPBT 0x80000 /* Reverse mapping btree */
/*
* Minimum and maximum sizes need for growth checks.
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 4b1e408169a8..51b4e0de1fdc 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
@@ -39,6 +40,7 @@
#include "xfs_icache.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_rmap.h"
/*
@@ -614,6 +616,7 @@ xfs_ialloc_ag_alloc(
args.tp = tp;
args.mp = tp->t_mountp;
args.fsbno = NULLFSBLOCK;
+ xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INODES);
#ifdef DEBUG
/* randomly do sparse inode allocations */
@@ -1817,19 +1820,21 @@ xfs_difree_inode_chunk(
struct xfs_mount *mp,
xfs_agnumber_t agno,
struct xfs_inobt_rec_incore *rec,
- struct xfs_bmap_free *flist)
+ struct xfs_defer_ops *dfops)
{
xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
int startidx, endidx;
int nextbit;
xfs_agblock_t agbno;
int contigblk;
+ struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
- xfs_bmap_add_free(mp, flist, XFS_AGB_TO_FSB(mp, agno, sagbno),
- mp->m_ialloc_blks);
+ xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
+ mp->m_ialloc_blks, &oinfo);
return;
}
@@ -1872,8 +1877,8 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
- xfs_bmap_add_free(mp, flist, XFS_AGB_TO_FSB(mp, agno, agbno),
- contigblk);
+ xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
+ contigblk, &oinfo);
/* reset range to current bit and carry on... */
startidx = endidx = nextbit;
@@ -1889,7 +1894,7 @@ xfs_difree_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agino_t agino,
- struct xfs_bmap_free *flist,
+ struct xfs_defer_ops *dfops,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
@@ -1976,7 +1981,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(mp, agno, &rec, flist);
+ xfs_difree_inode_chunk(mp, agno, &rec, dfops);
} else {
xic->deleted = 0;
@@ -2121,7 +2126,7 @@ int
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_bmap_free *flist, /* extents to free */
+ struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *xic) /* cluster info if deleted */
{
/* REFERENCED */
@@ -2173,7 +2178,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, flist, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
if (error)
goto error0;
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 6e450df2979b..0bb89669fc07 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -95,7 +95,7 @@ int /* error */
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_bmap_free *flist, /* extents to free */
+ struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *ifree); /* cluster info if deleted */
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 89c21d771e35..31ca2208c03d 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -32,6 +32,7 @@
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_trans.h"
+#include "xfs_rmap.h"
STATIC int
@@ -96,6 +97,7 @@ xfs_inobt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
+ xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
args.minlen = 1;
args.maxlen = 1;
@@ -125,8 +127,12 @@ xfs_inobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
+ struct xfs_owner_info oinfo;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
- XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1);
+ XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
+ &oinfo);
}
STATIC int
@@ -146,14 +152,6 @@ xfs_inobt_init_key_from_rec(
}
STATIC void
-xfs_inobt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- rec->inobt.ir_startino = key->inobt.ir_startino;
-}
-
-STATIC void
xfs_inobt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -314,7 +312,6 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
- .init_rec_from_key = xfs_inobt_init_rec_from_key,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
@@ -336,7 +333,6 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
- .init_rec_from_key = xfs_inobt_init_rec_from_key,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 9d9559eb2835..4b9769e23c83 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -22,6 +22,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index e8f49c029ff0..a6eed43fa7cd 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -110,7 +110,9 @@ static inline uint xlog_get_cycle(char *ptr)
#define XLOG_REG_TYPE_COMMIT 18
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_ICREATE 20
-#define XLOG_REG_TYPE_MAX 20
+#define XLOG_REG_TYPE_RUI_FORMAT 21
+#define XLOG_REG_TYPE_RUD_FORMAT 22
+#define XLOG_REG_TYPE_MAX 22
/*
* Flags to log operation header
@@ -227,6 +229,8 @@ typedef struct xfs_trans_header {
#define XFS_LI_DQUOT 0x123d
#define XFS_LI_QUOTAOFF 0x123e
#define XFS_LI_ICREATE 0x123f
+#define XFS_LI_RUI 0x1240 /* rmap update intent */
+#define XFS_LI_RUD 0x1241
#define XFS_LI_TYPE_DESC \
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
@@ -236,7 +240,9 @@ typedef struct xfs_trans_header {
{ XFS_LI_BUF, "XFS_LI_BUF" }, \
{ XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
{ XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }, \
- { XFS_LI_ICREATE, "XFS_LI_ICREATE" }
+ { XFS_LI_ICREATE, "XFS_LI_ICREATE" }, \
+ { XFS_LI_RUI, "XFS_LI_RUI" }, \
+ { XFS_LI_RUD, "XFS_LI_RUD" }
/*
* Inode Log Item Format definitions.
@@ -604,6 +610,59 @@ typedef struct xfs_efd_log_format_64 {
} xfs_efd_log_format_64_t;
/*
+ * RUI/RUD (reverse mapping) log format definitions
+ */
+struct xfs_map_extent {
+ __uint64_t me_owner;
+ __uint64_t me_startblock;
+ __uint64_t me_startoff;
+ __uint32_t me_len;
+ __uint32_t me_flags;
+};
+
+/* rmap me_flags: upper bits are flags, lower byte is type code */
+#define XFS_RMAP_EXTENT_MAP 1
+#define XFS_RMAP_EXTENT_UNMAP 3
+#define XFS_RMAP_EXTENT_CONVERT 5
+#define XFS_RMAP_EXTENT_ALLOC 7
+#define XFS_RMAP_EXTENT_FREE 8
+#define XFS_RMAP_EXTENT_TYPE_MASK 0xFF
+
+#define XFS_RMAP_EXTENT_ATTR_FORK (1U << 31)
+#define XFS_RMAP_EXTENT_BMBT_BLOCK (1U << 30)
+#define XFS_RMAP_EXTENT_UNWRITTEN (1U << 29)
+
+#define XFS_RMAP_EXTENT_FLAGS (XFS_RMAP_EXTENT_TYPE_MASK | \
+ XFS_RMAP_EXTENT_ATTR_FORK | \
+ XFS_RMAP_EXTENT_BMBT_BLOCK | \
+ XFS_RMAP_EXTENT_UNWRITTEN)
+
+/*
+ * This is the structure used to lay out an rui log item in the
+ * log. The rui_extents field is a variable size array whose
+ * size is given by rui_nextents.
+ */
+struct xfs_rui_log_format {
+ __uint16_t rui_type; /* rui log item type */
+ __uint16_t rui_size; /* size of this item */
+ __uint32_t rui_nextents; /* # extents to free */
+ __uint64_t rui_id; /* rui identifier */
+ struct xfs_map_extent rui_extents[1]; /* array of extents to rmap */
+};
+
+/*
+ * This is the structure used to lay out an rud log item in the
+ * log. The rud_extents array is a variable size array whose
+ * size is given by rud_nextents;
+ */
+struct xfs_rud_log_format {
+ __uint16_t rud_type; /* rud log item type */
+ __uint16_t rud_size; /* size of this item */
+ __uint32_t __pad;
+ __uint64_t rud_rui_id; /* id of corresponding rui */
+};
+
+/*
* Dquot Log format definitions.
*
* The first two fields must be the type and size fitting into
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
new file mode 100644
index 000000000000..73d05407d663
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_btree.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+#include "xfs_bmap.h"
+#include "xfs_inode.h"
+
+/*
+ * Lookup the first record less than or equal to [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_le(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags,
+ int *stat)
+{
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Lookup the record exactly matching [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_eq(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags,
+ int *stat)
+{
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [bno, len, owner, offset].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_rmap_update(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *irec)
+{
+ union xfs_btree_rec rec;
+ int error;
+
+ trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno,
+ irec->rm_startblock, irec->rm_blockcount,
+ irec->rm_owner, irec->rm_offset, irec->rm_flags);
+
+ rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
+ rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
+ rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
+ rec.rmap.rm_offset = cpu_to_be64(
+ xfs_rmap_irec_offset_pack(irec));
+ error = xfs_btree_update(cur, &rec);
+ if (error)
+ trace_xfs_rmap_update_error(cur->bc_mp,
+ cur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+int
+xfs_rmap_insert(
+ struct xfs_btree_cur *rcur,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags)
+{
+ int i;
+ int error;
+
+ trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
+ len, owner, offset, flags);
+
+ error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done);
+
+ rcur->bc_rec.r.rm_startblock = agbno;
+ rcur->bc_rec.r.rm_blockcount = len;
+ rcur->bc_rec.r.rm_owner = owner;
+ rcur->bc_rec.r.rm_offset = offset;
+ rcur->bc_rec.r.rm_flags = flags;
+ error = xfs_btree_insert(rcur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+done:
+ if (error)
+ trace_xfs_rmap_insert_error(rcur->bc_mp,
+ rcur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+static int
+xfs_rmap_btrec_to_irec(
+ union xfs_btree_rec *rec,
+ struct xfs_rmap_irec *irec)
+{
+ irec->rm_flags = 0;
+ irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
+ irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
+ irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
+ return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
+ irec);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_rmap_get_rec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *irec,
+ int *stat)
+{
+ union xfs_btree_rec *rec;
+ int error;
+
+ error = xfs_btree_get_rec(cur, &rec, stat);
+ if (error || !*stat)
+ return error;
+
+ return xfs_rmap_btrec_to_irec(rec, irec);
+}
+
+/*
+ * Find the extent in the rmap btree and remove it.
+ *
+ * The record we find should always be an exact match for the extent that we're
+ * looking for, since we insert them into the btree without modification.
+ *
+ * Special Case #1: when growing the filesystem, we "free" an extent when
+ * growing the last AG. This extent is new space and so it is not tracked as
+ * used space in the btree. The growfs code will pass in an owner of
+ * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
+ * extent. We verify that - the extent lookup result in a record that does not
+ * overlap.
+ *
+ * Special Case #2: EFIs do not record the owner of the extent, so when
+ * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
+ * btree to ignore the owner (i.e. wildcard match) so we don't trigger
+ * corruption checks during log recovery.
+ */
+STATIC int
+xfs_rmap_unmap(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ uint64_t ltoff;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ bool ignore_off;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK);
+ if (unwritten)
+ flags |= XFS_RMAP_UNWRITTEN;
+ trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * We should always have a left record because there's a static record
+ * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
+ * will not ever be removed from the tree.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+
+ error = xfs_rmap_get_rec(cur, &ltrec, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
+ ltoff = ltrec.rm_offset;
+
+ /*
+ * For growfs, the incoming extent must be beyond the left record we
+ * just found as it is new space and won't be used by anyone. This is
+ * just a corruption check as we don't actually do anything with this
+ * extent. Note that we need to use >= instead of > because it might
+ * be the case that the "left" extent goes all the way to EOFS.
+ */
+ if (owner == XFS_RMAP_OWN_NULL) {
+ XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock +
+ ltrec.rm_blockcount, out_error);
+ goto out_done;
+ }
+
+ /* Make sure the unwritten flag matches. */
+ XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+ (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+
+ /* Make sure the extent we found covers the entire freeing range. */
+ XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
+ ltrec.rm_startblock + ltrec.rm_blockcount >=
+ bno + len, out_error);
+
+ /* Make sure the owner matches what we expect to find in the tree. */
+ XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
+ XFS_RMAP_NON_INODE_OWNER(owner), out_error);
+
+ /* Check the offset, if necessary. */
+ if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
+ if (flags & XFS_RMAP_BMBT_BLOCK) {
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
+ out_error);
+ } else {
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltrec.rm_offset <= offset, out_error);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltoff + ltrec.rm_blockcount >= offset + len,
+ out_error);
+ }
+ }
+
+ if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
+ /* exact match, simply remove the record from rmap tree */
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ ltrec.rm_startblock, ltrec.rm_blockcount,
+ ltrec.rm_owner, ltrec.rm_offset,
+ ltrec.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ } else if (ltrec.rm_startblock == bno) {
+ /*
+ * overlap left hand side of extent: move the start, trim the
+ * length and update the current record.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_startblock += len;
+ ltrec.rm_blockcount -= len;
+ if (!ignore_off)
+ ltrec.rm_offset += len;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
+ /*
+ * overlap right hand side of extent: trim the length and update
+ * the current record.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else {
+
+ /*
+ * overlap middle of extent: trim the length of the existing
+ * record to the length of the new left-extent size, increment
+ * the insertion position so we can insert a new record
+ * containing the remaining right-extent space.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrr| |rrrr|
+ * bno len
+ */
+ xfs_extlen_t orig_len = ltrec.rm_blockcount;
+
+ ltrec.rm_blockcount = bno - ltrec.rm_startblock;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto out_error;
+
+ cur->bc_rec.r.rm_startblock = bno + len;
+ cur->bc_rec.r.rm_blockcount = orig_len - len -
+ ltrec.rm_blockcount;
+ cur->bc_rec.r.rm_owner = ltrec.rm_owner;
+ if (ignore_off)
+ cur->bc_rec.r.rm_offset = 0;
+ else
+ cur->bc_rec.r.rm_offset = offset + len;
+ cur->bc_rec.r.rm_flags = flags;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ cur->bc_rec.r.rm_startblock,
+ cur->bc_rec.r.rm_blockcount,
+ cur->bc_rec.r.rm_owner,
+ cur->bc_rec.r.rm_offset,
+ cur->bc_rec.r.rm_flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto out_error;
+ }
+
+out_done:
+ trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+out_error:
+ if (error)
+ trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno,
+ error, _RET_IP_);
+ return error;
+}
+
+/*
+ * Remove a reference to an extent in the rmap btree.
+ */
+int
+xfs_rmap_free(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return 0;
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+
+ error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
+ if (error)
+ goto out_error;
+
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ return 0;
+
+out_error:
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return error;
+}
+
+/*
+ * A mergeable rmap must have the same owner and the same values for
+ * the unwritten, attr_fork, and bmbt flags. The startblock and
+ * offset are checked separately.
+ */
+static bool
+xfs_rmap_is_mergeable(
+ struct xfs_rmap_irec *irec,
+ uint64_t owner,
+ unsigned int flags)
+{
+ if (irec->rm_owner == XFS_RMAP_OWN_NULL)
+ return false;
+ if (irec->rm_owner != owner)
+ return false;
+ if ((flags & XFS_RMAP_UNWRITTEN) ^
+ (irec->rm_flags & XFS_RMAP_UNWRITTEN))
+ return false;
+ if ((flags & XFS_RMAP_ATTR_FORK) ^
+ (irec->rm_flags & XFS_RMAP_ATTR_FORK))
+ return false;
+ if ((flags & XFS_RMAP_BMBT_BLOCK) ^
+ (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
+ return false;
+ return true;
+}
+
+/*
+ * When we allocate a new block, the first thing we do is add a reference to
+ * the extent in the rmap btree. This takes the form of a [agbno, length,
+ * owner, offset] record. Flags are encoded in the high bits of the offset
+ * field.
+ */
+STATIC int
+xfs_rmap_map(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ struct xfs_rmap_irec gtrec;
+ int have_gt;
+ int have_lt;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags = 0;
+ bool ignore_off;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(owner != 0);
+ ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK);
+ if (unwritten)
+ flags |= XFS_RMAP_UNWRITTEN;
+ trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * For the initial lookup, look for an exact match or the left-adjacent
+ * record for our insertion point. This will also give us the record for
+ * start block contiguity tests.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags,
+ &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+
+ error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
+
+ if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
+ have_lt = 0;
+
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ have_lt == 0 ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error);
+
+ /*
+ * Increment the cursor to see if we have a right-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_increment(cur, 0, &have_gt);
+ if (error)
+ goto out_error;
+ if (have_gt) {
+ error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
+ XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock,
+ out_error);
+ trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, gtrec.rm_startblock,
+ gtrec.rm_blockcount, gtrec.rm_owner,
+ gtrec.rm_offset, gtrec.rm_flags);
+ if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
+ have_gt = 0;
+ }
+
+ /*
+ * Note: cursor currently points one record to the right of ltrec, even
+ * if there is no record in the tree to the right.
+ */
+ if (have_lt &&
+ ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
+ (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
+ /*
+ * left edge contiguous, merge into left record.
+ *
+ * ltbno ltlen
+ * orig: |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * result: |rrrrrrrrrrrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_blockcount += len;
+ if (have_gt &&
+ bno + len == gtrec.rm_startblock &&
+ (ignore_off || offset + len == gtrec.rm_offset) &&
+ (unsigned long)ltrec.rm_blockcount + len +
+ gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
+ /*
+ * right edge also contiguous, delete right record
+ * and merge into left record.
+ *
+ * ltbno ltlen gtbno gtlen
+ * orig: |ooooooooo| |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
+ */
+ ltrec.rm_blockcount += gtrec.rm_blockcount;
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ gtrec.rm_startblock,
+ gtrec.rm_blockcount,
+ gtrec.rm_owner,
+ gtrec.rm_offset,
+ gtrec.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ }
+
+ /* point the cursor back to the left record and update */
+ error = xfs_btree_decrement(cur, 0, &have_gt);
+ if (error)
+ goto out_error;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else if (have_gt &&
+ bno + len == gtrec.rm_startblock &&
+ (ignore_off || offset + len == gtrec.rm_offset)) {
+ /*
+ * right edge contiguous, merge into right record.
+ *
+ * gtbno gtlen
+ * Orig: |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * Result: |rrrrrrrrrrrrrrrrrrr|
+ * bno len
+ */
+ gtrec.rm_startblock = bno;
+ gtrec.rm_blockcount += len;
+ if (!ignore_off)
+ gtrec.rm_offset = offset;
+ error = xfs_rmap_update(cur, &gtrec);
+ if (error)
+ goto out_error;
+ } else {
+ /*
+ * no contiguous edge with identical owner, insert
+ * new record at current cursor position.
+ */
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ owner, offset, flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ }
+
+ trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+out_error:
+ if (error)
+ trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno,
+ error, _RET_IP_);
+ return error;
+}
+
+/*
+ * Add a reference to an extent in the rmap btree.
+ */
+int
+xfs_rmap_alloc(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return 0;
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ error = xfs_rmap_map(cur, bno, len, false, oinfo);
+ if (error)
+ goto out_error;
+
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ return 0;
+
+out_error:
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return error;
+}
+
+#define RMAP_LEFT_CONTIG (1 << 0)
+#define RMAP_RIGHT_CONTIG (1 << 1)
+#define RMAP_LEFT_FILLING (1 << 2)
+#define RMAP_RIGHT_FILLING (1 << 3)
+#define RMAP_LEFT_VALID (1 << 6)
+#define RMAP_RIGHT_VALID (1 << 7)
+
+#define LEFT r[0]
+#define RIGHT r[1]
+#define PREV r[2]
+#define NEW r[3]
+
+/*
+ * Convert an unwritten extent to a real extent or vice versa.
+ * Does not handle overlapping extents.
+ */
+STATIC int
+xfs_rmap_convert(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec r[4]; /* neighbor extent entries */
+ /* left is 0, right is 1, prev is 2 */
+ /* new is 3 */
+ uint64_t owner;
+ uint64_t offset;
+ uint64_t new_endoff;
+ unsigned int oldext;
+ unsigned int newext;
+ unsigned int flags = 0;
+ int i;
+ int state = 0;
+ int error;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
+ oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
+ new_endoff = offset + len;
+ trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * For the initial lookup, look for an exact match or the left-adjacent
+ * record for our insertion point. This will also give us the record for
+ * start block contiguity tests.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+
+ error = xfs_rmap_get_rec(cur, &PREV, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, PREV.rm_startblock,
+ PREV.rm_blockcount, PREV.rm_owner,
+ PREV.rm_offset, PREV.rm_flags);
+
+ ASSERT(PREV.rm_offset <= offset);
+ ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
+ ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
+ newext = ~oldext & XFS_RMAP_UNWRITTEN;
+
+ /*
+ * Set flags determining what part of the previous oldext allocation
+ * extent is being replaced by a newext allocation.
+ */
+ if (PREV.rm_offset == offset)
+ state |= RMAP_LEFT_FILLING;
+ if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
+ state |= RMAP_RIGHT_FILLING;
+
+ /*
+ * Decrement the cursor to see if we have a left-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ if (i) {
+ state |= RMAP_LEFT_VALID;
+ error = xfs_rmap_get_rec(cur, &LEFT, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
+ done);
+ trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, LEFT.rm_startblock,
+ LEFT.rm_blockcount, LEFT.rm_owner,
+ LEFT.rm_offset, LEFT.rm_flags);
+ if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
+ LEFT.rm_offset + LEFT.rm_blockcount == offset &&
+ xfs_rmap_is_mergeable(&LEFT, owner, newext))
+ state |= RMAP_LEFT_CONTIG;
+ }
+
+ /*
+ * Increment the cursor to see if we have a right-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ if (i) {
+ state |= RMAP_RIGHT_VALID;
+ error = xfs_rmap_get_rec(cur, &RIGHT, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
+ done);
+ trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, RIGHT.rm_startblock,
+ RIGHT.rm_blockcount, RIGHT.rm_owner,
+ RIGHT.rm_offset, RIGHT.rm_flags);
+ if (bno + len == RIGHT.rm_startblock &&
+ offset + len == RIGHT.rm_offset &&
+ xfs_rmap_is_mergeable(&RIGHT, owner, newext))
+ state |= RMAP_RIGHT_CONTIG;
+ }
+
+ /* check that left + prev + right is not too long */
+ if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
+ (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
+ (unsigned long)LEFT.rm_blockcount + len +
+ RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
+ state &= ~RMAP_RIGHT_CONTIG;
+
+ trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
+ _RET_IP_);
+
+ /* reset the cursor back to PREV */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+
+ /*
+ * Switch out based on the FILLING and CONTIG state bits.
+ */
+ switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The left and right neighbors are both contiguous with new.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ PREV.rm_startblock, PREV.rm_blockcount,
+ PREV.rm_owner, PREV.rm_offset,
+ PREV.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = LEFT;
+ NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The left neighbor is contiguous, the right is not.
+ */
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ PREV.rm_startblock, PREV.rm_blockcount,
+ PREV.rm_owner, PREV.rm_offset,
+ PREV.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = LEFT;
+ NEW.rm_blockcount += PREV.rm_blockcount;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The right neighbor is contiguous, the left is not.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = PREV;
+ NEW.rm_blockcount = len + RIGHT.rm_blockcount;
+ NEW.rm_flags = newext;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * Neither the left nor right neighbors are contiguous with
+ * the new one.
+ */
+ NEW = PREV;
+ NEW.rm_flags = newext;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
+ /*
+ * Setting the first part of a previous oldext extent to newext.
+ * The left neighbor is contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_offset += len;
+ NEW.rm_startblock += len;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ NEW = LEFT;
+ NEW.rm_blockcount += len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING:
+ /*
+ * Setting the first part of a previous oldext extent to newext.
+ * The left neighbor is not contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_startblock += len;
+ NEW.rm_offset += len;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ NEW.rm_startblock = bno;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = offset;
+ NEW.rm_blockcount = len;
+ NEW.rm_flags = newext;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ len, owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting the last part of a previous oldext extent to newext.
+ * The right neighbor is contiguous with the new allocation.
+ */
+ NEW = PREV;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ NEW = RIGHT;
+ NEW.rm_offset = offset;
+ NEW.rm_startblock = bno;
+ NEW.rm_blockcount += len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_RIGHT_FILLING:
+ /*
+ * Setting the last part of a previous oldext extent to newext.
+ * The right neighbor is not contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
+ oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ NEW.rm_startblock = bno;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = offset;
+ NEW.rm_blockcount = len;
+ NEW.rm_flags = newext;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ len, owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case 0:
+ /*
+ * Setting the middle part of a previous oldext extent to
+ * newext. Contiguity is impossible here.
+ * One extent becomes three extents.
+ */
+ /* new right extent - oldext */
+ NEW.rm_startblock = bno + len;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = new_endoff;
+ NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
+ new_endoff;
+ NEW.rm_flags = PREV.rm_flags;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ /* new left extent - oldext */
+ NEW = PREV;
+ NEW.rm_blockcount = offset - PREV.rm_offset;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ NEW.rm_startblock, NEW.rm_blockcount,
+ NEW.rm_owner, NEW.rm_offset,
+ NEW.rm_flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ /*
+ * Reset the cursor to the position of the new extent
+ * we are about to insert as we can't trust it after
+ * the previous insert.
+ */
+ error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
+ oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ /* new middle extent - newext */
+ cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
+ cur->bc_rec.r.rm_flags |= newext;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
+ case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+ case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_LEFT_CONTIG:
+ case RMAP_RIGHT_CONTIG:
+ /*
+ * These cases are all impossible.
+ */
+ ASSERT(0);
+ }
+
+ trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+done:
+ if (error)
+ trace_xfs_rmap_convert_error(cur->bc_mp,
+ cur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+#undef NEW
+#undef LEFT
+#undef RIGHT
+#undef PREV
+
+struct xfs_rmap_query_range_info {
+ xfs_rmap_query_range_fn fn;
+ void *priv;
+};
+
+/* Format btree record and pass to our callback. */
+STATIC int
+xfs_rmap_query_range_helper(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_rmap_query_range_info *query = priv;
+ struct xfs_rmap_irec irec;
+ int error;
+
+ error = xfs_rmap_btrec_to_irec(rec, &irec);
+ if (error)
+ return error;
+ return query->fn(cur, &irec, query->priv);
+}
+
+/* Find all rmaps between two keys. */
+int
+xfs_rmap_query_range(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *low_rec,
+ struct xfs_rmap_irec *high_rec,
+ xfs_rmap_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_irec low_brec;
+ union xfs_btree_irec high_brec;
+ struct xfs_rmap_query_range_info query;
+
+ low_brec.r = *low_rec;
+ high_brec.r = *high_rec;
+ query.priv = priv;
+ query.fn = fn;
+ return xfs_btree_query_range(cur, &low_brec, &high_brec,
+ xfs_rmap_query_range_helper, &query);
+}
+
+/* Clean up after calling xfs_rmap_finish_one. */
+void
+xfs_rmap_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
+{
+ struct xfs_buf *agbp;
+
+ if (rcur == NULL)
+ return;
+ agbp = rcur->bc_private.a.agbp;
+ xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ if (error)
+ xfs_trans_brelse(tp, agbp);
+}
+
+/*
+ * Process one of the deferred rmap operations. We pass back the
+ * btree cursor to maintain our lock on the rmapbt between calls.
+ * This saves time and eliminates a buffer deadlock between the
+ * superblock and the AGF because we'll always grab them in the same
+ * order.
+ */
+int
+xfs_rmap_finish_one(
+ struct xfs_trans *tp,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock,
+ xfs_filblks_t blockcount,
+ xfs_exntst_t state,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *rcur;
+ struct xfs_buf *agbp = NULL;
+ int error = 0;
+ xfs_agnumber_t agno;
+ struct xfs_owner_info oinfo;
+ xfs_agblock_t bno;
+ bool unwritten;
+
+ agno = XFS_FSB_TO_AGNO(mp, startblock);
+ ASSERT(agno != NULLAGNUMBER);
+ bno = XFS_FSB_TO_AGBNO(mp, startblock);
+
+ trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork,
+ startoff, blockcount, state);
+
+ if (XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_RMAP_FINISH_ONE,
+ XFS_RANDOM_RMAP_FINISH_ONE))
+ return -EIO;
+
+ /*
+ * If we haven't gotten a cursor or the cursor AG doesn't match
+ * the startblock, get one now.
+ */
+ rcur = *pcur;
+ if (rcur != NULL && rcur->bc_private.a.agno != agno) {
+ xfs_rmap_finish_one_cleanup(tp, rcur, 0);
+ rcur = NULL;
+ *pcur = NULL;
+ }
+ if (rcur == NULL) {
+ /*
+ * Refresh the freelist before we start changing the
+ * rmapbt, because a shape change could cause us to
+ * allocate blocks.
+ */
+ error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ if (error)
+ return error;
+ if (!agbp)
+ return -EFSCORRUPTED;
+
+ rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ if (!rcur) {
+ error = -ENOMEM;
+ goto out_cur;
+ }
+ }
+ *pcur = rcur;
+
+ xfs_rmap_ino_owner(&oinfo, owner, whichfork, startoff);
+ unwritten = state == XFS_EXT_UNWRITTEN;
+ bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, startblock);
+
+ switch (type) {
+ case XFS_RMAP_ALLOC:
+ case XFS_RMAP_MAP:
+ error = xfs_rmap_map(rcur, bno, blockcount, unwritten, &oinfo);
+ break;
+ case XFS_RMAP_FREE:
+ case XFS_RMAP_UNMAP:
+ error = xfs_rmap_unmap(rcur, bno, blockcount, unwritten,
+ &oinfo);
+ break;
+ case XFS_RMAP_CONVERT:
+ error = xfs_rmap_convert(rcur, bno, blockcount, !unwritten,
+ &oinfo);
+ break;
+ default:
+ ASSERT(0);
+ error = -EFSCORRUPTED;
+ }
+ return error;
+
+out_cur:
+ xfs_trans_brelse(tp, agbp);
+
+ return error;
+}
+
+/*
+ * Don't defer an rmap if we aren't an rmap filesystem.
+ */
+static bool
+xfs_rmap_update_is_needed(
+ struct xfs_mount *mp)
+{
+ return xfs_sb_version_hasrmapbt(&mp->m_sb);
+}
+
+/*
+ * Record a rmap intent; the list is kept sorted first by AG and then by
+ * increasing age.
+ */
+static int
+__xfs_rmap_add(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ struct xfs_bmbt_irec *bmap)
+{
+ struct xfs_rmap_intent *ri;
+
+ trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ type,
+ XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ owner, whichfork,
+ bmap->br_startoff,
+ bmap->br_blockcount,
+ bmap->br_state);
+
+ ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS);
+ INIT_LIST_HEAD(&ri->ri_list);
+ ri->ri_type = type;
+ ri->ri_owner = owner;
+ ri->ri_whichfork = whichfork;
+ ri->ri_bmap = *bmap;
+
+ xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
+ return 0;
+}
+
+/* Map an extent into a file. */
+int
+xfs_rmap_map_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_MAP, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Unmap an extent out of a file. */
+int
+xfs_rmap_unmap_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_UNMAP, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Convert a data fork extent from unwritten to real or vice versa. */
+int
+xfs_rmap_convert_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_CONVERT, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Schedule the creation of an rmap for non-file data. */
+int
+xfs_rmap_alloc_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ __uint64_t owner)
+{
+ struct xfs_bmbt_irec bmap;
+
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_blockcount = len;
+ bmap.br_startoff = 0;
+ bmap.br_state = XFS_EXT_NORM;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
+ XFS_DATA_FORK, &bmap);
+}
+
+/* Schedule the deletion of an rmap for non-file data. */
+int
+xfs_rmap_free_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ __uint64_t owner)
+{
+ struct xfs_bmbt_irec bmap;
+
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_blockcount = len;
+ bmap.br_startoff = 0;
+ bmap.br_state = XFS_EXT_NORM;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
+ XFS_DATA_FORK, &bmap);
+}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
new file mode 100644
index 000000000000..71cf99a4acba
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_RMAP_H__
+#define __XFS_RMAP_H__
+
+static inline void
+xfs_rmap_ag_owner(
+ struct xfs_owner_info *oi,
+ uint64_t owner)
+{
+ oi->oi_owner = owner;
+ oi->oi_offset = 0;
+ oi->oi_flags = 0;
+}
+
+static inline void
+xfs_rmap_ino_bmbt_owner(
+ struct xfs_owner_info *oi,
+ xfs_ino_t ino,
+ int whichfork)
+{
+ oi->oi_owner = ino;
+ oi->oi_offset = 0;
+ oi->oi_flags = XFS_OWNER_INFO_BMBT_BLOCK;
+ if (whichfork == XFS_ATTR_FORK)
+ oi->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+}
+
+static inline void
+xfs_rmap_ino_owner(
+ struct xfs_owner_info *oi,
+ xfs_ino_t ino,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ oi->oi_owner = ino;
+ oi->oi_offset = offset;
+ oi->oi_flags = 0;
+ if (whichfork == XFS_ATTR_FORK)
+ oi->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+}
+
+static inline void
+xfs_rmap_skip_owner_update(
+ struct xfs_owner_info *oi)
+{
+ oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
+}
+
+/* Reverse mapping functions. */
+
+struct xfs_buf;
+
+static inline __u64
+xfs_rmap_irec_offset_pack(
+ const struct xfs_rmap_irec *irec)
+{
+ __u64 x;
+
+ x = XFS_RMAP_OFF(irec->rm_offset);
+ if (irec->rm_flags & XFS_RMAP_ATTR_FORK)
+ x |= XFS_RMAP_OFF_ATTR_FORK;
+ if (irec->rm_flags & XFS_RMAP_BMBT_BLOCK)
+ x |= XFS_RMAP_OFF_BMBT_BLOCK;
+ if (irec->rm_flags & XFS_RMAP_UNWRITTEN)
+ x |= XFS_RMAP_OFF_UNWRITTEN;
+ return x;
+}
+
+static inline int
+xfs_rmap_irec_offset_unpack(
+ __u64 offset,
+ struct xfs_rmap_irec *irec)
+{
+ if (offset & ~(XFS_RMAP_OFF_MASK | XFS_RMAP_OFF_FLAGS))
+ return -EFSCORRUPTED;
+ irec->rm_offset = XFS_RMAP_OFF(offset);
+ if (offset & XFS_RMAP_OFF_ATTR_FORK)
+ irec->rm_flags |= XFS_RMAP_ATTR_FORK;
+ if (offset & XFS_RMAP_OFF_BMBT_BLOCK)
+ irec->rm_flags |= XFS_RMAP_BMBT_BLOCK;
+ if (offset & XFS_RMAP_OFF_UNWRITTEN)
+ irec->rm_flags |= XFS_RMAP_UNWRITTEN;
+ return 0;
+}
+
+static inline void
+xfs_owner_info_unpack(
+ struct xfs_owner_info *oinfo,
+ uint64_t *owner,
+ uint64_t *offset,
+ unsigned int *flags)
+{
+ unsigned int r = 0;
+
+ *owner = oinfo->oi_owner;
+ *offset = oinfo->oi_offset;
+ if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
+ r |= XFS_RMAP_ATTR_FORK;
+ if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
+ r |= XFS_RMAP_BMBT_BLOCK;
+ *flags = r;
+}
+
+static inline void
+xfs_owner_info_pack(
+ struct xfs_owner_info *oinfo,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags)
+{
+ oinfo->oi_owner = owner;
+ oinfo->oi_offset = XFS_RMAP_OFF(offset);
+ oinfo->oi_flags = 0;
+ if (flags & XFS_RMAP_ATTR_FORK)
+ oinfo->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+ if (flags & XFS_RMAP_BMBT_BLOCK)
+ oinfo->oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK;
+}
+
+int xfs_rmap_alloc(struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+
+int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags, int *stat);
+int xfs_rmap_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags, int *stat);
+int xfs_rmap_insert(struct xfs_btree_cur *rcur, xfs_agblock_t agbno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags);
+int xfs_rmap_get_rec(struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec,
+ int *stat);
+
+typedef int (*xfs_rmap_query_range_fn)(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv);
+
+int xfs_rmap_query_range(struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *low_rec, struct xfs_rmap_irec *high_rec,
+ xfs_rmap_query_range_fn fn, void *priv);
+
+enum xfs_rmap_intent_type {
+ XFS_RMAP_MAP,
+ XFS_RMAP_MAP_SHARED,
+ XFS_RMAP_UNMAP,
+ XFS_RMAP_UNMAP_SHARED,
+ XFS_RMAP_CONVERT,
+ XFS_RMAP_CONVERT_SHARED,
+ XFS_RMAP_ALLOC,
+ XFS_RMAP_FREE,
+};
+
+struct xfs_rmap_intent {
+ struct list_head ri_list;
+ enum xfs_rmap_intent_type ri_type;
+ __uint64_t ri_owner;
+ int ri_whichfork;
+ struct xfs_bmbt_irec ri_bmap;
+};
+
+/* functions for updating the rmapbt based on bmbt map/unmap operations */
+int xfs_rmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ __uint64_t owner);
+int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ __uint64_t owner);
+
+void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur, int error);
+int xfs_rmap_finish_one(struct xfs_trans *tp, enum xfs_rmap_intent_type type,
+ __uint64_t owner, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t blockcount,
+ xfs_exntst_t state, struct xfs_btree_cur **pcur);
+
+#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
new file mode 100644
index 000000000000..bc1faebc84ec
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+
+/*
+ * Reverse map btree.
+ *
+ * This is a per-ag tree used to track the owner(s) of a given extent. With
+ * reflink it is possible for there to be multiple owners, which is a departure
+ * from classic XFS. Owner records for data extents are inserted when the
+ * extent is mapped and removed when an extent is unmapped. Owner records for
+ * all other block types (i.e. metadata) are inserted when an extent is
+ * allocated and removed when an extent is freed. There can only be one owner
+ * of a metadata extent, usually an inode or some other metadata structure like
+ * an AG btree.
+ *
+ * The rmap btree is part of the free space management, so blocks for the tree
+ * are sourced from the agfl. Hence we need transaction reservation support for
+ * this tree so that the freelist is always large enough. This also impacts on
+ * the minimum space we need to leave free in the AG.
+ *
+ * The tree is ordered by [ag block, owner, offset]. This is a large key size,
+ * but it is the only way to enforce unique keys when a block can be owned by
+ * multiple files at any offset. There's no need to order/search by extent
+ * size for online updating/management of the tree. It is intended that most
+ * reverse lookups will be to find the owner(s) of a particular block, or to
+ * try to recover tree and file data from corrupt primary metadata.
+ */
+
+static struct xfs_btree_cur *
+xfs_rmapbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
+ cur->bc_private.a.agbp, cur->bc_private.a.agno);
+}
+
+STATIC void
+xfs_rmapbt_set_root(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int inc)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int btnum = cur->bc_btnum;
+ struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
+
+ ASSERT(ptr->s != 0);
+
+ agf->agf_roots[btnum] = ptr->s;
+ be32_add_cpu(&agf->agf_levels[btnum], inc);
+ pag->pagf_levels[btnum] += inc;
+ xfs_perag_put(pag);
+
+ xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+}
+
+STATIC int
+xfs_rmapbt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ int error;
+ xfs_agblock_t bno;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+ /* Allocate the new block from the freelist. If we can't, give up. */
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ &bno, 1);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+ bno, 1);
+ if (bno == NULLAGBLOCK) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
+ false);
+
+ xfs_trans_agbtree_delta(cur->bc_tp, 1);
+ new->s = cpu_to_be32(bno);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agblock_t bno;
+ int error;
+
+ bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+ trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ bno, 1);
+ error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+ if (error)
+ return error;
+
+ xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ XFS_EXTENT_BUSY_SKIP_DISCARD);
+ xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ return cur->bc_mp->m_rmap_mnr[level != 0];
+}
+
+STATIC int
+xfs_rmapbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ return cur->bc_mp->m_rmap_mxr[level != 0];
+}
+
+STATIC void
+xfs_rmapbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = rec->rmap.rm_offset;
+}
+
+/*
+ * The high key for a reverse mapping record can be computed by shifting
+ * the startblock and offset to the highest value that would still map
+ * to that record. In practice this means that we add blockcount-1 to
+ * the startblock for all records, and if the record is for a data/attr
+ * fork mapping, we add blockcount-1 to the offset too.
+ */
+STATIC void
+xfs_rmapbt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ __uint64_t off;
+ int adj;
+
+ adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
+
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ be32_add_cpu(&key->rmap.rm_startblock, adj);
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = rec->rmap.rm_offset;
+ if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
+ XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
+ return;
+ off = be64_to_cpu(key->rmap.rm_offset);
+ off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
+ key->rmap.rm_offset = cpu_to_be64(off);
+}
+
+STATIC void
+xfs_rmapbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
+ rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
+ rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
+ rec->rmap.rm_offset = cpu_to_be64(
+ xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
+}
+
+STATIC void
+xfs_rmapbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+
+ ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
+
+ ptr->s = agf->agf_roots[cur->bc_btnum];
+}
+
+STATIC __int64_t
+xfs_rmapbt_key_diff(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ struct xfs_rmap_irec *rec = &cur->bc_rec.r;
+ struct xfs_rmap_key *kp = &key->rmap;
+ __u64 x, y;
+ __int64_t d;
+
+ d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
+ if (d)
+ return d;
+
+ x = be64_to_cpu(kp->rm_owner);
+ y = rec->rm_owner;
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+
+ x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+ y = rec->rm_offset;
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ return 0;
+}
+
+STATIC __int64_t
+xfs_rmapbt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ struct xfs_rmap_key *kp1 = &k1->rmap;
+ struct xfs_rmap_key *kp2 = &k2->rmap;
+ __int64_t d;
+ __u64 x, y;
+
+ d = (__int64_t)be32_to_cpu(kp1->rm_startblock) -
+ be32_to_cpu(kp2->rm_startblock);
+ if (d)
+ return d;
+
+ x = be64_to_cpu(kp1->rm_owner);
+ y = be64_to_cpu(kp2->rm_owner);
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+
+ x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
+ y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ return 0;
+}
+
+static bool
+xfs_rmapbt_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_perag *pag = bp->b_pag;
+ unsigned int level;
+
+ /*
+ * magic number and level verification
+ *
+ * During growfs operations, we can't verify the exact level or owner as
+ * the perag is not fully initialised and hence not attached to the
+ * buffer. In this case, check against the maximum tree depth.
+ *
+ * Similarly, during log recovery we will have a perag structure
+ * attached, but the agf information will not yet have been initialised
+ * from the on disk AGF. Again, we can only check against maximum limits
+ * in this case.
+ */
+ if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
+ return false;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return false;
+ if (!xfs_btree_sblock_v5hdr_verify(bp))
+ return false;
+
+ level = be16_to_cpu(block->bb_level);
+ if (pag && pag->pagf_init) {
+ if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
+ return false;
+ } else if (level >= mp->m_rmap_maxlevels)
+ return false;
+
+ return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
+}
+
+static void
+xfs_rmapbt_read_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_btree_sblock_verify_crc(bp))
+ xfs_buf_ioerror(bp, -EFSBADCRC);
+ else if (!xfs_rmapbt_verify(bp))
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+
+ if (bp->b_error) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp);
+ }
+}
+
+static void
+xfs_rmapbt_write_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_rmapbt_verify(bp)) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+ xfs_btree_sblock_calc_crc(bp);
+
+}
+
+const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
+ .name = "xfs_rmapbt",
+ .verify_read = xfs_rmapbt_read_verify,
+ .verify_write = xfs_rmapbt_write_verify,
+};
+
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_rmapbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ __uint32_t x;
+ __uint32_t y;
+ __uint64_t a;
+ __uint64_t b;
+
+ x = be32_to_cpu(k1->rmap.rm_startblock);
+ y = be32_to_cpu(k2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(k1->rmap.rm_owner);
+ b = be64_to_cpu(k2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+ b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2)
+{
+ __uint32_t x;
+ __uint32_t y;
+ __uint64_t a;
+ __uint64_t b;
+
+ x = be32_to_cpu(r1->rmap.rm_startblock);
+ y = be32_to_cpu(r2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(r1->rmap.rm_owner);
+ b = be64_to_cpu(r2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+ b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+#endif /* DEBUG */
+
+static const struct xfs_btree_ops xfs_rmapbt_ops = {
+ .rec_len = sizeof(struct xfs_rmap_rec),
+ .key_len = 2 * sizeof(struct xfs_rmap_key),
+
+ .dup_cursor = xfs_rmapbt_dup_cursor,
+ .set_root = xfs_rmapbt_set_root,
+ .alloc_block = xfs_rmapbt_alloc_block,
+ .free_block = xfs_rmapbt_free_block,
+ .get_minrecs = xfs_rmapbt_get_minrecs,
+ .get_maxrecs = xfs_rmapbt_get_maxrecs,
+ .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
+ .key_diff = xfs_rmapbt_key_diff,
+ .buf_ops = &xfs_rmapbt_buf_ops,
+ .diff_two_keys = xfs_rmapbt_diff_two_keys,
+#if defined(DEBUG) || defined(XFS_WARN)
+ .keys_inorder = xfs_rmapbt_keys_inorder,
+ .recs_inorder = xfs_rmapbt_recs_inorder,
+#endif
+};
+
+/*
+ * Allocate a new allocation btree cursor.
+ */
+struct xfs_btree_cur *
+xfs_rmapbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_btree_cur *cur;
+
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
+ cur->bc_tp = tp;
+ cur->bc_mp = mp;
+ /* Overlapping btree; 2 keys per pointer. */
+ cur->bc_btnum = XFS_BTNUM_RMAP;
+ cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
+ cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_ops = &xfs_rmapbt_ops;
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+
+ cur->bc_private.a.agbp = agbp;
+ cur->bc_private.a.agno = agno;
+
+ return cur;
+}
+
+/*
+ * Calculate number of records in an rmap btree block.
+ */
+int
+xfs_rmapbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_RMAP_BLOCK_LEN;
+
+ if (leaf)
+ return blocklen / sizeof(struct xfs_rmap_rec);
+ return blocklen /
+ (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
+}
+
+/* Compute the maximum height of an rmap btree. */
+void
+xfs_rmapbt_compute_maxlevels(
+ struct xfs_mount *mp)
+{
+ mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
+}
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
new file mode 100644
index 000000000000..e73a55357dab
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_RMAP_BTREE_H__
+#define __XFS_RMAP_BTREE_H__
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_mount;
+
+/* rmaps only exist on crc enabled filesystems */
+#define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN
+
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_RMAP_REC_ADDR(block, index) \
+ ((struct xfs_rmap_rec *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ (((index) - 1) * sizeof(struct xfs_rmap_rec))))
+
+#define XFS_RMAP_KEY_ADDR(block, index) \
+ ((struct xfs_rmap_key *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ ((index) - 1) * 2 * sizeof(struct xfs_rmap_key)))
+
+#define XFS_RMAP_HIGH_KEY_ADDR(block, index) \
+ ((struct xfs_rmap_key *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ sizeof(struct xfs_rmap_key) + \
+ ((index) - 1) * 2 * sizeof(struct xfs_rmap_key)))
+
+#define XFS_RMAP_PTR_ADDR(block, index, maxrecs) \
+ ((xfs_rmap_ptr_t *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ (maxrecs) * 2 * sizeof(struct xfs_rmap_key) + \
+ ((index) - 1) * sizeof(xfs_rmap_ptr_t)))
+
+struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *bp,
+ xfs_agnumber_t agno);
+int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
+extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
+
+#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 12ca86778e02..0e3d4f5ec33c 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
@@ -36,6 +37,7 @@
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_log.h"
+#include "xfs_rmap_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -729,6 +731,11 @@ xfs_sb_mount_common(
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
+ mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 1);
+ mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
+ mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
+
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
sbp->sb_inopblock);
@@ -738,6 +745,8 @@ xfs_sb_mount_common(
mp->m_ialloc_min_blks = sbp->sb_spino_align;
else
mp->m_ialloc_min_blks = mp->m_ialloc_blks;
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
+ mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp);
}
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 16002b5ec4eb..0c5b30bd884c 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -38,6 +38,7 @@ extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agfl_buf_ops;
extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
@@ -116,6 +117,7 @@ int xfs_log_calc_minimum_size(struct xfs_mount *);
#define XFS_INO_BTREE_REF 3
#define XFS_ALLOC_BTREE_REF 2
#define XFS_BMAP_BTREE_REF 2
+#define XFS_RMAP_BTREE_REF 2
#define XFS_DIR_BTREE_REF 2
#define XFS_INO_REF 2
#define XFS_ATTR_BTREE_REF 1
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 68cb1e7bf2bb..301ef2f4dbd6 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -64,6 +64,30 @@ xfs_calc_buf_res(
}
/*
+ * Per-extent log reservation for the btree changes involved in freeing or
+ * allocating an extent. In classic XFS there were two trees that will be
+ * modified (bnobt + cntbt). With rmap enabled, there are three trees
+ * (rmapbt). The number of blocks reserved is based on the formula:
+ *
+ * num trees * ((2 blocks/level * max depth) - 1)
+ *
+ * Keep in mind that max depth is calculated separately for each type of tree.
+ */
+static uint
+xfs_allocfree_log_count(
+ struct xfs_mount *mp,
+ uint num_ops)
+{
+ uint blocks;
+
+ blocks = num_ops * 2 * (2 * mp->m_ag_maxlevels - 1);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
+
+ return blocks;
+}
+
+/*
* Logging inodes is really tricksy. They are logged in memory format,
* which means that what we write into the log doesn't directly translate into
* the amount of space they use on disk.
@@ -126,7 +150,7 @@ xfs_calc_inode_res(
*/
STATIC uint
xfs_calc_finobt_res(
- struct xfs_mount *mp,
+ struct xfs_mount *mp,
int alloc,
int modify)
{
@@ -137,7 +161,7 @@ xfs_calc_finobt_res(
res = xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1));
if (alloc)
- res += xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ res += xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
if (modify)
res += (uint)XFS_FSB_TO_B(mp, 1);
@@ -153,9 +177,9 @@ xfs_calc_finobt_res(
* item logged to try to account for the overhead of the transaction mechanism.
*
* Note: Most of the reservations underestimate the number of allocation
- * groups into which they could free extents in the xfs_bmap_finish() call.
+ * groups into which they could free extents in the xfs_defer_finish() call.
* This is because the number in the worst case is quite high and quite
- * unusual. In order to fix this we need to change xfs_bmap_finish() to free
+ * unusual. In order to fix this we need to change xfs_defer_finish() to free
* extents in only a single AG at a time. This will require changes to the
* EFI code as well, however, so that the EFI for the extents not freed is
* logged again in each transaction. See SGI PV #261917.
@@ -188,10 +212,10 @@ xfs_calc_write_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
@@ -217,10 +241,10 @@ xfs_calc_itruncate_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(5, 0) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0)));
@@ -247,7 +271,7 @@ xfs_calc_rename_reservation(
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 3),
XFS_FSB_TO_B(mp, 1))));
}
@@ -286,7 +310,7 @@ xfs_calc_link_reservation(
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1))));
}
@@ -324,7 +348,7 @@ xfs_calc_remove_reservation(
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
@@ -371,7 +395,7 @@ xfs_calc_create_resv_alloc(
mp->m_sb.sb_sectsize +
xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -399,7 +423,7 @@ xfs_calc_icreate_resv_alloc(
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_finobt_res(mp, 0, 0);
}
@@ -483,7 +507,7 @@ xfs_calc_ifree_reservation(
xfs_calc_buf_res(1, 0) +
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_finobt_res(mp, 0, 1);
}
@@ -513,7 +537,7 @@ xfs_calc_growdata_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -535,7 +559,7 @@ xfs_calc_growrtalloc_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_inode_res(mp, 1) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -611,7 +635,7 @@ xfs_calc_addafork_reservation(
xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -634,7 +658,7 @@ xfs_calc_attrinval_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
XFS_FSB_TO_B(mp, 1))));
}
@@ -701,7 +725,7 @@ xfs_calc_attrrm_reservation(
XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 797815012c0e..0eb46ed6d404 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -68,16 +68,6 @@ struct xfs_trans_resv {
#define M_RES(mp) (&(mp)->m_resv)
/*
- * Per-extent log reservation for the allocation btree changes
- * involved in freeing or allocating an extent.
- * 2 trees * (2 blocks/level * max depth - 1) * block size
- */
-#define XFS_ALLOCFREE_LOG_RES(mp,nx) \
- ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * (mp)->m_ag_maxlevels - 1)))
-#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
- ((nx) * (2 * (2 * (mp)->m_ag_maxlevels - 1)))
-
-/*
* Per-directory log reservation for any directory change.
* dir blocks: (1 btree block per level + data block + free block) * dblock size
* bmap btree: (levels + 2) * max depth * block size
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b79dc66b2ecd..3d503647f26b 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -108,8 +108,8 @@ typedef enum {
} xfs_lookup_t;
typedef enum {
- XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_BMAPi, XFS_BTNUM_INOi,
- XFS_BTNUM_FINOi, XFS_BTNUM_MAX
+ XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi,
+ XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_MAX
} xfs_btnum_t;
struct xfs_name {
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index cd4a850564f2..4ece4f2ffc72 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -25,6 +25,7 @@
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
@@ -40,6 +41,7 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
+#include "xfs_rmap_btree.h"
/* Kernel only BMAP related definitions and functions */
@@ -79,95 +81,6 @@ xfs_zero_extent(
GFP_NOFS, true);
}
-/* Sort bmap items by AG. */
-static int
-xfs_bmap_free_list_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_mount *mp = priv;
- struct xfs_bmap_free_item *ra;
- struct xfs_bmap_free_item *rb;
-
- ra = container_of(a, struct xfs_bmap_free_item, xbfi_list);
- rb = container_of(b, struct xfs_bmap_free_item, xbfi_list);
- return XFS_FSB_TO_AGNO(mp, ra->xbfi_startblock) -
- XFS_FSB_TO_AGNO(mp, rb->xbfi_startblock);
-}
-
-/*
- * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
- * caller. Frees all the extents that need freeing, which must be done
- * last due to locking considerations. We never free any extents in
- * the first transaction.
- *
- * If an inode *ip is provided, rejoin it to the transaction if
- * the transaction was committed.
- */
-int /* error */
-xfs_bmap_finish(
- struct xfs_trans **tp, /* transaction pointer addr */
- struct xfs_bmap_free *flist, /* i/o: list extents to free */
- struct xfs_inode *ip)
-{
- struct xfs_efd_log_item *efd; /* extent free data */
- struct xfs_efi_log_item *efi; /* extent free intention */
- int error; /* error return value */
- int committed;/* xact committed or not */
- struct xfs_bmap_free_item *free; /* free extent item */
-
- ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- if (flist->xbf_count == 0)
- return 0;
-
- list_sort((*tp)->t_mountp, &flist->xbf_flist, xfs_bmap_free_list_cmp);
-
- efi = xfs_trans_get_efi(*tp, flist->xbf_count);
- list_for_each_entry(free, &flist->xbf_flist, xbfi_list)
- xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
- free->xbfi_blockcount);
-
- error = __xfs_trans_roll(tp, ip, &committed);
- if (error) {
- /*
- * If the transaction was committed, drop the EFD reference
- * since we're bailing out of here. The other reference is
- * dropped when the EFI hits the AIL.
- *
- * If the transaction was not committed, the EFI is freed by the
- * EFI item unlock handler on abort. Also, we have a new
- * transaction so we should return committed=1 even though we're
- * returning an error.
- */
- if (committed) {
- xfs_efi_release(efi);
- xfs_force_shutdown((*tp)->t_mountp,
- SHUTDOWN_META_IO_ERROR);
- }
- return error;
- }
-
- /*
- * Get an EFD and free each extent in the list, logging to the EFD in
- * the process. The remaining bmap free list is cleaned up by the caller
- * on error.
- */
- efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
- while (!list_empty(&flist->xbf_flist)) {
- free = list_first_entry(&flist->xbf_flist,
- struct xfs_bmap_free_item, xbfi_list);
- error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
- free->xbfi_blockcount);
- if (error)
- return error;
-
- xfs_bmap_del_free(flist, free);
- }
-
- return 0;
-}
-
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -214,9 +127,9 @@ xfs_bmap_rtalloc(
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
- xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
@@ -773,7 +686,7 @@ xfs_bmap_punch_delalloc_range(
xfs_bmbt_irec_t imap;
int nimaps = 1;
xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
+ struct xfs_defer_ops dfops;
/*
* Map the range first and check that it is a delalloc extent
@@ -804,18 +717,18 @@ xfs_bmap_punch_delalloc_range(
WARN_ON(imap.br_blockcount == 0);
/*
- * Note: while we initialise the firstblock/flist pair, they
+ * Note: while we initialise the firstblock/dfops pair, they
* should never be used because blocks should never be
* allocated or freed for a delalloc extent and hence we need
* don't cancel or finish them after the xfs_bunmapi() call.
*/
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
- &flist, &done);
+ &dfops, &done);
if (error)
break;
- ASSERT(!flist.xbf_count && list_empty(&flist.xbf_flist));
+ ASSERT(!xfs_defer_has_unfinished_work(&dfops));
next_block:
start_fsb++;
remaining--;
@@ -972,7 +885,7 @@ xfs_alloc_file_space(
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
@@ -1063,17 +976,17 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, alloc_type, &firstfsb,
- resblks, imapp, &nimaps, &free_list);
+ resblks, imapp, &nimaps, &dfops);
if (error)
goto error0;
/*
* Complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto error0;
@@ -1096,7 +1009,7 @@ xfs_alloc_file_space(
return error;
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
@@ -1114,7 +1027,7 @@ xfs_unmap_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t firstfsb;
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
@@ -1133,13 +1046,13 @@ xfs_unmap_extent(
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
- &free_list, done);
+ &dfops, done);
if (error)
goto out_bmap_cancel;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, ip);
if (error)
goto out_bmap_cancel;
@@ -1149,7 +1062,7 @@ out_unlock:
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
@@ -1338,7 +1251,7 @@ xfs_shift_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
xfs_fileoff_t stop_fsb;
xfs_fileoff_t next_fsb;
@@ -1416,19 +1329,19 @@ xfs_shift_file_space(
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* We are using the write transaction in which max 2 bmbt
* updates are allowed
*/
error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &free_list,
+ &done, stop_fsb, &first_block, &dfops,
direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
if (error)
goto out_bmap_cancel;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -1438,7 +1351,7 @@ xfs_shift_file_space(
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1622,6 +1535,10 @@ xfs_swap_extents(
__uint64_t tmp;
int lock_flags;
+ /* XXX: we can't do this with rmap, will fix later */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
if (!tempifp) {
error = -ENOMEM;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index f20071432ca6..68a621a8e0c0 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -21,7 +21,7 @@
/* Kernel only BMAP related definitions and functions */
struct xfs_bmbt_irec;
-struct xfs_bmap_free_item;
+struct xfs_extent_free_item;
struct xfs_ifork;
struct xfs_inode;
struct xfs_mount;
@@ -40,8 +40,6 @@ int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
xfs_bmap_format_t formatter, void *arg);
/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
-void xfs_bmap_del_free(struct xfs_bmap_free *flist,
- struct xfs_bmap_free_item *free);
int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
int rt, int eof, int delay, int convert,
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 272c3f8b6f7d..4ff499aa7338 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,7 +179,7 @@ xfs_ioc_trim(
* matter as trimming blocks is an advisory interface.
*/
if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
- range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
+ range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
range.len < mp->m_sb.sb_blocksize)
return -EINVAL;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index ccb0811963b2..7a30b8f11db7 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -307,7 +308,7 @@ xfs_qm_dqalloc(
xfs_buf_t **O_bpp)
{
xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
+ struct xfs_defer_ops dfops;
xfs_bmbt_irec_t map;
int nmaps, error;
xfs_buf_t *bp;
@@ -320,7 +321,7 @@ xfs_qm_dqalloc(
/*
* Initialize the bmap freelist prior to calling bmapi code.
*/
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL);
/*
* Return if this type of quotas is turned off while we didn't
@@ -336,7 +337,7 @@ xfs_qm_dqalloc(
error = xfs_bmapi_write(tp, quotip, offset_fsb,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &flist);
+ &map, &nmaps, &dfops);
if (error)
goto error0;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -368,7 +369,7 @@ xfs_qm_dqalloc(
dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
/*
- * xfs_bmap_finish() may commit the current transaction and
+ * xfs_defer_finish() may commit the current transaction and
* start a second transaction if the freelist is not empty.
*
* Since we still want to modify this buffer, we need to
@@ -382,7 +383,7 @@ xfs_qm_dqalloc(
xfs_trans_bhold(tp, bp);
- error = xfs_bmap_finish(tpp, &flist, NULL);
+ error = xfs_defer_finish(tpp, &dfops, NULL);
if (error)
goto error1;
@@ -398,7 +399,7 @@ xfs_qm_dqalloc(
return 0;
error1:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
error0:
xfs_iunlock(quotip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 2e4f67f68856..3d224702fbc0 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -90,7 +90,9 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_ERRTAG_STRATCMPL_IOERR 19
#define XFS_ERRTAG_DIOWRITE_IOERR 20
#define XFS_ERRTAG_BMAPIFORMAT 21
-#define XFS_ERRTAG_MAX 22
+#define XFS_ERRTAG_FREE_EXTENT 22
+#define XFS_ERRTAG_RMAP_FINISH_ONE 23
+#define XFS_ERRTAG_MAX 24
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -117,6 +119,8 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_FREE_EXTENT 1
+#define XFS_RANDOM_RMAP_FINISH_ONE 1
#ifdef DEBUG
extern int xfs_error_test_active;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index a1b2dd828b9d..fe1bfee35898 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -246,7 +246,7 @@ const struct export_operations xfs_export_operations = {
.fh_to_parent = xfs_fs_fh_to_parent,
.get_parent = xfs_fs_get_parent,
.commit_metadata = xfs_fs_nfs_commit_metadata,
-#ifdef CONFIG_NFSD_BLOCKLAYOUT
+#ifdef CONFIG_EXPORTFS_BLOCK_OPS
.get_uuid = xfs_fs_get_uuid,
.map_blocks = xfs_fs_map_blocks,
.commit_blocks = xfs_fs_commit_blocks,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index ab779460ecbf..d7bc14906af8 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -20,12 +20,15 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_extfree_item.h"
#include "xfs_log.h"
+#include "xfs_btree.h"
+#include "xfs_rmap.h"
kmem_zone_t *xfs_efi_zone;
@@ -486,3 +489,69 @@ xfs_efd_init(
return efdp;
}
+
+/*
+ * Process an extent free intent item that was recovered from
+ * the log. We need to free the extents that it describes.
+ */
+int
+xfs_efi_recover(
+ struct xfs_mount *mp,
+ struct xfs_efi_log_item *efip)
+{
+ struct xfs_efd_log_item *efdp;
+ struct xfs_trans *tp;
+ int i;
+ int error = 0;
+ xfs_extent_t *extp;
+ xfs_fsblock_t startblock_fsb;
+ struct xfs_owner_info oinfo;
+
+ ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
+
+ /*
+ * First check the validity of the extents described by the
+ * EFI. If any are bad, then assume that all are bad and
+ * just toss the EFI.
+ */
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ extp = &efip->efi_format.efi_extents[i];
+ startblock_fsb = XFS_BB_TO_FSB(mp,
+ XFS_FSB_TO_DADDR(mp, extp->ext_start));
+ if (startblock_fsb == 0 ||
+ extp->ext_len == 0 ||
+ startblock_fsb >= mp->m_sb.sb_dblocks ||
+ extp->ext_len >= mp->m_sb.sb_agblocks) {
+ /*
+ * This will pull the EFI from the AIL and
+ * free the memory associated with it.
+ */
+ set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
+ xfs_efi_release(efip);
+ return -EIO;
+ }
+ }
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error)
+ return error;
+ efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
+
+ xfs_rmap_skip_owner_update(&oinfo);
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ extp = &efip->efi_format.efi_extents[i];
+ error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
+ extp->ext_len, &oinfo);
+ if (error)
+ goto abort_error;
+
+ }
+
+ set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
+ error = xfs_trans_commit(tp);
+ return error;
+
+abort_error:
+ xfs_trans_cancel(tp);
+ return error;
+}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 8fa8651705e1..a32c794a86b7 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -98,4 +98,7 @@ int xfs_efi_copy_format(xfs_log_iovec_t *buf,
void xfs_efi_item_free(xfs_efi_log_item_t *);
void xfs_efi_release(struct xfs_efi_log_item *);
+int xfs_efi_recover(struct xfs_mount *mp,
+ struct xfs_efi_log_item *efip);
+
#endif /* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index a51353a1f87f..4a33a3304369 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -22,6 +22,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -385,7 +386,7 @@ xfs_filestream_new_ag(
}
flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |
- (ap->flist->xbf_low ? XFS_PICK_LOWSPACE : 0);
+ (ap->dfops->dop_low ? XFS_PICK_LOWSPACE : 0);
err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 7191c3878b4a..0f96847b90e1 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -32,6 +33,7 @@
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_rmap_btree.h"
#include "xfs_ialloc.h"
#include "xfs_fsops.h"
#include "xfs_itable.h"
@@ -40,6 +42,7 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_filestream.h"
+#include "xfs_rmap.h"
/*
* File system operations
@@ -103,7 +106,9 @@ xfs_fs_geometry(
(xfs_sb_version_hasfinobt(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_FINOBT : 0) |
(xfs_sb_version_hassparseinodes(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SPINODES : 0);
+ XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
+ (xfs_sb_version_hasrmapbt(&mp->m_sb) ?
+ XFS_FSOP_GEOM_FLAGS_RMAPBT : 0);
geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
mp->m_sb.sb_logsectsize : BBSIZE;
geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -239,10 +244,16 @@ xfs_growfs_data_private(
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ agf->agf_roots[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(XFS_RMAP_BLOCK(mp));
+ agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
+ }
+
agf->agf_flfirst = cpu_to_be32(1);
agf->agf_fllast = 0;
agf->agf_flcount = 0;
- tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
+ tmpsize = agsize - mp->m_ag_prealloc_blocks;
agf->agf_freeblks = cpu_to_be32(tmpsize);
agf->agf_longest = cpu_to_be32(tmpsize);
if (xfs_sb_version_hascrc(&mp->m_sb))
@@ -339,7 +350,7 @@ xfs_growfs_data_private(
agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
- arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+ arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
arec->ar_blockcount = cpu_to_be32(
agsize - be32_to_cpu(arec->ar_startblock));
@@ -368,7 +379,7 @@ xfs_growfs_data_private(
agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
- arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+ arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
arec->ar_blockcount = cpu_to_be32(
agsize - be32_to_cpu(arec->ar_startblock));
nfree += be32_to_cpu(arec->ar_blockcount);
@@ -378,6 +389,72 @@ xfs_growfs_data_private(
if (error)
goto error0;
+ /* RMAP btree root block */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ struct xfs_rmap_rec *rrec;
+ struct xfs_btree_block *block;
+
+ bp = xfs_growfs_get_hdr_buf(mp,
+ XFS_AGB_TO_DADDR(mp, agno, XFS_RMAP_BLOCK(mp)),
+ BTOBB(mp->m_sb.sb_blocksize), 0,
+ &xfs_rmapbt_buf_ops);
+ if (!bp) {
+ error = -ENOMEM;
+ goto error0;
+ }
+
+ xfs_btree_init_block(mp, bp, XFS_RMAP_CRC_MAGIC, 0, 0,
+ agno, XFS_BTREE_CRC_BLOCKS);
+ block = XFS_BUF_TO_BLOCK(bp);
+
+
+ /*
+ * mark the AG header regions as static metadata The BNO
+ * btree block is the first block after the headers, so
+ * it's location defines the size of region the static
+ * metadata consumes.
+ *
+ * Note: unlike mkfs, we never have to account for log
+ * space when growing the data regions
+ */
+ rrec = XFS_RMAP_REC_ADDR(block, 1);
+ rrec->rm_startblock = 0;
+ rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account freespace btree root blocks */
+ rrec = XFS_RMAP_REC_ADDR(block, 2);
+ rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(2);
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account inode btree root blocks */
+ rrec = XFS_RMAP_REC_ADDR(block, 3);
+ rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
+ XFS_IBT_BLOCK(mp));
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account for rmap btree root */
+ rrec = XFS_RMAP_REC_ADDR(block, 4);
+ rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(1);
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
+ if (error)
+ goto error0;
+ }
+
/*
* INO btree root block
*/
@@ -435,6 +512,8 @@ xfs_growfs_data_private(
* There are new blocks in the old last a.g.
*/
if (new) {
+ struct xfs_owner_info oinfo;
+
/*
* Change the agi length.
*/
@@ -462,14 +541,20 @@ xfs_growfs_data_private(
be32_to_cpu(agi->agi_length));
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
+
/*
* Free the new space.
+ *
+ * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
+ * this doesn't actually exist in the rmap btree.
*/
- error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
- be32_to_cpu(agf->agf_length) - new), new);
- if (error) {
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
+ error = xfs_free_extent(tp,
+ XFS_AGB_TO_FSB(mp, agno,
+ be32_to_cpu(agf->agf_length) - new),
+ new, &oinfo);
+ if (error)
goto error0;
- }
}
/*
@@ -501,6 +586,7 @@ xfs_growfs_data_private(
} else
mp->m_maxicount = 0;
xfs_set_low_space_thresholds(mp);
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
/* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
@@ -638,7 +724,7 @@ xfs_fs_counts(
cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
- XFS_ALLOC_SET_ASIDE(mp);
+ mp->m_alloc_set_aside;
spin_lock(&mp->m_sb_lock);
cnt->freertx = mp->m_sb.sb_frextents;
@@ -726,7 +812,7 @@ xfs_reserve_blocks(
error = -ENOSPC;
do {
free = percpu_counter_sum(&mp->m_fdblocks) -
- XFS_ALLOC_SET_ASIDE(mp);
+ mp->m_alloc_set_aside;
if (!free)
break;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 8825bcfd314c..e08eaea6327b 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -25,6 +25,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -1122,7 +1123,7 @@ xfs_create(
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
int error;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
prid_t prid;
@@ -1182,7 +1183,7 @@ xfs_create(
XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Reserve disk quota and the inode.
@@ -1219,7 +1220,7 @@ xfs_create(
unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
- &first_block, &free_list, resblks ?
+ &first_block, &dfops, resblks ?
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != -ENOSPC);
@@ -1253,7 +1254,7 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -1269,7 +1270,7 @@ xfs_create(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -1401,7 +1402,7 @@ xfs_link(
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
int error;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
int resblks;
@@ -1452,7 +1453,7 @@ xfs_link(
goto error_return;
}
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Handle initial link state of O_TMPFILE inode
@@ -1464,7 +1465,7 @@ xfs_link(
}
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error)
goto error_return;
xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -1482,9 +1483,9 @@ xfs_link(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error) {
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
goto error_return;
}
@@ -1526,7 +1527,7 @@ xfs_itruncate_extents(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
xfs_fileoff_t first_unmap_block;
xfs_fileoff_t last_block;
@@ -1562,12 +1563,12 @@ xfs_itruncate_extents(
ASSERT(first_unmap_block < last_block);
unmap_len = last_block - first_unmap_block + 1;
while (!done) {
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
error = xfs_bunmapi(tp, ip,
first_unmap_block, unmap_len,
xfs_bmapi_aflag(whichfork),
XFS_ITRUNC_MAX_EXTENTS,
- &first_block, &free_list,
+ &first_block, &dfops,
&done);
if (error)
goto out_bmap_cancel;
@@ -1576,7 +1577,7 @@ xfs_itruncate_extents(
* Duplicate the transaction that has the permanent
* reservation and commit the old transaction.
*/
- error = xfs_bmap_finish(&tp, &free_list, ip);
+ error = xfs_defer_finish(&tp, &dfops, ip);
if (error)
goto out_bmap_cancel;
@@ -1602,7 +1603,7 @@ out_bmap_cancel:
* the transaction can be properly aborted. We just need to make sure
* we're not holding any resources that we were not when we came in.
*/
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
goto out;
}
@@ -1743,7 +1744,7 @@ STATIC int
xfs_inactive_ifree(
struct xfs_inode *ip)
{
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
@@ -1780,8 +1781,8 @@ xfs_inactive_ifree(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &first_block);
- error = xfs_ifree(tp, ip, &free_list);
+ xfs_defer_init(&dfops, &first_block);
+ error = xfs_ifree(tp, ip, &dfops);
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
@@ -1807,11 +1808,11 @@ xfs_inactive_ifree(
* Just ignore errors at this point. There is nothing we can do except
* to try to keep going. Make sure it's not a silent error.
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error) {
- xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+ xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
__func__, error);
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
}
error = xfs_trans_commit(tp);
if (error)
@@ -2367,7 +2368,7 @@ int
xfs_ifree(
xfs_trans_t *tp,
xfs_inode_t *ip,
- xfs_bmap_free_t *flist)
+ struct xfs_defer_ops *dfops)
{
int error;
struct xfs_icluster xic = { 0 };
@@ -2386,7 +2387,7 @@ xfs_ifree(
if (error)
return error;
- error = xfs_difree(tp, ip->i_ino, flist, &xic);
+ error = xfs_difree(tp, ip->i_ino, dfops, &xic);
if (error)
return error;
@@ -2474,7 +2475,7 @@ xfs_iunpin_wait(
* directory entry.
*
* This is still safe from a transactional point of view - it is not until we
- * get to xfs_bmap_finish() that we have the possibility of multiple
+ * get to xfs_defer_finish() that we have the possibility of multiple
* transactions in this operation. Hence as long as we remove the directory
* entry and drop the link count in the first transaction of the remove
* operation, there are no transactional constraints on the ordering here.
@@ -2489,7 +2490,7 @@ xfs_remove(
xfs_trans_t *tp = NULL;
int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
int error = 0;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
uint resblks;
@@ -2571,9 +2572,9 @@ xfs_remove(
if (error)
goto out_trans_cancel;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error) {
ASSERT(error != -ENOENT);
goto out_bmap_cancel;
@@ -2587,7 +2588,7 @@ xfs_remove(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -2601,7 +2602,7 @@ xfs_remove(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
std_return:
@@ -2662,7 +2663,7 @@ xfs_sort_for_rename(
static int
xfs_finish_rename(
struct xfs_trans *tp,
- struct xfs_bmap_free *free_list)
+ struct xfs_defer_ops *dfops)
{
int error;
@@ -2673,9 +2674,9 @@ xfs_finish_rename(
if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, free_list, NULL);
+ error = xfs_defer_finish(&tp, dfops, NULL);
if (error) {
- xfs_bmap_cancel(free_list);
+ xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2697,7 +2698,7 @@ xfs_cross_rename(
struct xfs_inode *dp2,
struct xfs_name *name2,
struct xfs_inode *ip2,
- struct xfs_bmap_free *free_list,
+ struct xfs_defer_ops *dfops,
xfs_fsblock_t *first_block,
int spaceres)
{
@@ -2709,14 +2710,14 @@ xfs_cross_rename(
/* Swap inode number for dirent in first parent */
error = xfs_dir_replace(tp, dp1, name1,
ip2->i_ino,
- first_block, free_list, spaceres);
+ first_block, dfops, spaceres);
if (error)
goto out_trans_abort;
/* Swap inode number for dirent in second parent */
error = xfs_dir_replace(tp, dp2, name2,
ip1->i_ino,
- first_block, free_list, spaceres);
+ first_block, dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2731,7 +2732,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip2)->i_mode)) {
error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
dp1->i_ino, first_block,
- free_list, spaceres);
+ dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2758,7 +2759,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip1)->i_mode)) {
error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
dp2->i_ino, first_block,
- free_list, spaceres);
+ dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2797,10 +2798,10 @@ xfs_cross_rename(
}
xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
- return xfs_finish_rename(tp, free_list);
+ return xfs_finish_rename(tp, dfops);
out_trans_abort:
- xfs_bmap_cancel(free_list);
+ xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2855,7 +2856,7 @@ xfs_rename(
{
struct xfs_mount *mp = src_dp->i_mount;
struct xfs_trans *tp;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
@@ -2944,13 +2945,13 @@ xfs_rename(
goto out_trans_cancel;
}
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/* RENAME_EXCHANGE is unique from here on. */
if (flags & RENAME_EXCHANGE)
return xfs_cross_rename(tp, src_dp, src_name, src_ip,
target_dp, target_name, target_ip,
- &free_list, &first_block, spaceres);
+ &dfops, &first_block, spaceres);
/*
* Set up the target.
@@ -2972,7 +2973,7 @@ xfs_rename(
*/
error = xfs_dir_createname(tp, target_dp, target_name,
src_ip->i_ino, &first_block,
- &free_list, spaceres);
+ &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3012,7 +3013,7 @@ xfs_rename(
*/
error = xfs_dir_replace(tp, target_dp, target_name,
src_ip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3047,7 +3048,7 @@ xfs_rename(
*/
error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
target_dp->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
ASSERT(error != -EEXIST);
if (error)
goto out_bmap_cancel;
@@ -3086,10 +3087,10 @@ xfs_rename(
*/
if (wip) {
error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
} else
error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3124,13 +3125,13 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
- error = xfs_finish_rename(tp, &free_list);
+ error = xfs_finish_rename(tp, &dfops);
if (wip)
IRELE(wip);
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_wip:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 8eb78ec4a6e2..e1a411e08f00 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -27,7 +27,7 @@
struct xfs_dinode;
struct xfs_inode;
struct xfs_buf;
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_bmbt_irec;
struct xfs_inode_log_item;
struct xfs_mount;
@@ -398,7 +398,7 @@ uint xfs_ilock_attr_map_shared(struct xfs_inode *);
uint xfs_ip2xflags(struct xfs_inode *);
int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
- struct xfs_bmap_free *);
+ struct xfs_defer_ops *);
int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
int, xfs_fsize_t);
void xfs_iext_realloc(xfs_inode_t *, int, int);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 9a7c87809d3b..96a70fd1f5d6 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -232,7 +232,7 @@ xfs_open_by_handle(
}
if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
- error = -EACCES;
+ error = -EPERM;
goto out_dput;
}
@@ -387,6 +387,7 @@ xfs_attrlist_by_handle(
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
+ struct xfs_fsop_attrlist_handlereq __user *p = arg;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -419,6 +420,11 @@ xfs_attrlist_by_handle(
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 620fc9120444..2114d53df433 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -23,6 +23,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_bmap_btree.h"
@@ -128,7 +129,7 @@ xfs_iomap_write_direct(
int quota_flag;
int rt;
xfs_trans_t *tp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
int lockmode;
@@ -231,18 +232,18 @@ xfs_iomap_write_direct(
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
*/
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
bmapi_flags, &firstfsb, resblks, imap,
- &nimaps, &free_list);
+ &nimaps, &dfops);
if (error)
goto out_bmap_cancel;
/*
* Complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -266,7 +267,7 @@ out_unlock:
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
out_trans_cancel:
xfs_trans_cancel(tp);
@@ -685,7 +686,7 @@ xfs_iomap_write_allocate(
xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t end_fsb, map_start_fsb;
xfs_fsblock_t first_block;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_filblks_t count_fsb;
xfs_trans_t *tp;
int nimaps;
@@ -727,7 +728,7 @@ xfs_iomap_write_allocate(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* it is possible that the extents have changed since
@@ -783,11 +784,11 @@ xfs_iomap_write_allocate(
error = xfs_bmapi_write(tp, ip, map_start_fsb,
count_fsb, 0, &first_block,
nres, imap, &nimaps,
- &free_list);
+ &dfops);
if (error)
goto trans_cancel;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto trans_cancel;
@@ -821,7 +822,7 @@ xfs_iomap_write_allocate(
}
trans_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -842,7 +843,7 @@ xfs_iomap_write_unwritten(
int nimaps;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsize_t i_size;
uint resblks;
int error;
@@ -886,11 +887,11 @@ xfs_iomap_write_unwritten(
/*
* Modify the unwritten extent state of the buffer.
*/
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_CONVERT, &firstfsb, resblks,
- &imap, &nimaps, &free_list);
+ &imap, &nimaps, &dfops);
if (error)
goto error_on_bmapi_transaction;
@@ -909,7 +910,7 @@ xfs_iomap_write_unwritten(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto error_on_bmapi_transaction;
@@ -936,7 +937,7 @@ xfs_iomap_write_unwritten(
return 0;
error_on_bmapi_transaction:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 835997843846..e8638fd2c0c3 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -43,6 +43,7 @@
#include "xfs_bmap_btree.h"
#include "xfs_error.h"
#include "xfs_dir2.h"
+#include "xfs_rmap_item.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -1911,6 +1912,8 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
trace_xfs_log_recover_item_reorder_tail(log,
trans, item, pass);
list_move_tail(&item->ri_list, &inode_list);
@@ -2228,6 +2231,7 @@ xlog_recover_get_buf_lsn(
case XFS_ABTC_CRC_MAGIC:
case XFS_ABTB_MAGIC:
case XFS_ABTC_MAGIC:
+ case XFS_RMAP_CRC_MAGIC:
case XFS_IBT_CRC_MAGIC:
case XFS_IBT_MAGIC: {
struct xfs_btree_block *btb = blk;
@@ -2396,6 +2400,9 @@ xlog_recover_validate_buf_type(
case XFS_BMAP_MAGIC:
bp->b_ops = &xfs_bmbt_buf_ops;
break;
+ case XFS_RMAP_CRC_MAGIC:
+ bp->b_ops = &xfs_rmapbt_buf_ops;
+ break;
default:
xfs_warn(mp, "Bad btree block magic!");
ASSERT(0);
@@ -3415,6 +3422,99 @@ xlog_recover_efd_pass2(
}
/*
+ * This routine is called to create an in-core extent rmap update
+ * item from the rui format structure which was logged on disk.
+ * It allocates an in-core rui, copies the extents from the format
+ * structure into it, and adds the rui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_rui_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_rui_log_format *rui_formatp;
+
+ rui_formatp = item->ri_buf[0].i_addr;
+
+ ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
+ if (error) {
+ xfs_rui_item_free(ruip);
+ return error;
+ }
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+
+ spin_lock(&log->l_ailp->xa_lock);
+ /*
+ * The RUI has two references. One for the RUD and one for RUI to ensure
+ * it makes it into the AIL. Insert the RUI into the AIL directly and
+ * drop the RUI reference. Note that xfs_trans_ail_update() drops the
+ * AIL lock.
+ */
+ xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
+ xfs_rui_release(ruip);
+ return 0;
+}
+
+
+/*
+ * This routine is called when an RUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding RUI if it
+ * was still in the log. To do this it searches the AIL for the RUI with an id
+ * equal to that in the RUD format structure. If we find it we drop the RUD
+ * reference, which removes the RUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_rud_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_rud_log_format *rud_formatp;
+ struct xfs_rui_log_item *ruip = NULL;
+ struct xfs_log_item *lip;
+ __uint64_t rui_id;
+ struct xfs_ail_cursor cur;
+ struct xfs_ail *ailp = log->l_ailp;
+
+ rud_formatp = item->ri_buf[0].i_addr;
+ ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
+ rui_id = rud_formatp->rud_rui_id;
+
+ /*
+ * Search for the RUI with the id in the RUD format structure in the
+ * AIL.
+ */
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+ while (lip != NULL) {
+ if (lip->li_type == XFS_LI_RUI) {
+ ruip = (struct xfs_rui_log_item *)lip;
+ if (ruip->rui_format.rui_id == rui_id) {
+ /*
+ * Drop the RUD reference to the RUI. This
+ * removes the RUI from the AIL and frees it.
+ */
+ spin_unlock(&ailp->xa_lock);
+ xfs_rui_release(ruip);
+ spin_lock(&ailp->xa_lock);
+ break;
+ }
+ }
+ lip = xfs_trans_ail_cursor_next(ailp, &cur);
+ }
+
+ xfs_trans_ail_cursor_done(&cur);
+ spin_unlock(&ailp->xa_lock);
+
+ return 0;
+}
+
+/*
* This routine is called when an inode create format structure is found in a
* committed transaction in the log. It's purpose is to initialise the inodes
* being allocated on disk. This requires us to get inode cluster buffers that
@@ -3639,6 +3739,8 @@ xlog_recover_ra_pass2(
case XFS_LI_EFI:
case XFS_LI_EFD:
case XFS_LI_QUOTAOFF:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
default:
break;
}
@@ -3662,6 +3764,8 @@ xlog_recover_commit_pass1(
case XFS_LI_EFD:
case XFS_LI_DQUOT:
case XFS_LI_ICREATE:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
/* nothing to do in pass 1 */
return 0;
default:
@@ -3692,6 +3796,10 @@ xlog_recover_commit_pass2(
return xlog_recover_efi_pass2(log, item, trans->r_lsn);
case XFS_LI_EFD:
return xlog_recover_efd_pass2(log, item);
+ case XFS_LI_RUI:
+ return xlog_recover_rui_pass2(log, item, trans->r_lsn);
+ case XFS_LI_RUD:
+ return xlog_recover_rud_pass2(log, item);
case XFS_LI_DQUOT:
return xlog_recover_dquot_pass2(log, buffer_list, item,
trans->r_lsn);
@@ -4164,126 +4272,156 @@ xlog_recover_process_data(
return 0;
}
-/*
- * Process an extent free intent item that was recovered from
- * the log. We need to free the extents that it describes.
- */
+/* Recover the EFI if necessary. */
STATIC int
xlog_recover_process_efi(
- xfs_mount_t *mp,
- xfs_efi_log_item_t *efip)
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
{
- xfs_efd_log_item_t *efdp;
- xfs_trans_t *tp;
- int i;
- int error = 0;
- xfs_extent_t *extp;
- xfs_fsblock_t startblock_fsb;
-
- ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
+ struct xfs_efi_log_item *efip;
+ int error;
/*
- * First check the validity of the extents described by the
- * EFI. If any are bad, then assume that all are bad and
- * just toss the EFI.
+ * Skip EFIs that we've already processed.
*/
- for (i = 0; i < efip->efi_format.efi_nextents; i++) {
- extp = &(efip->efi_format.efi_extents[i]);
- startblock_fsb = XFS_BB_TO_FSB(mp,
- XFS_FSB_TO_DADDR(mp, extp->ext_start));
- if ((startblock_fsb == 0) ||
- (extp->ext_len == 0) ||
- (startblock_fsb >= mp->m_sb.sb_dblocks) ||
- (extp->ext_len >= mp->m_sb.sb_agblocks)) {
- /*
- * This will pull the EFI from the AIL and
- * free the memory associated with it.
- */
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
- xfs_efi_release(efip);
- return -EIO;
- }
- }
+ efip = container_of(lip, struct xfs_efi_log_item, efi_item);
+ if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
+ return 0;
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
- if (error)
- return error;
- efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
+ spin_unlock(&ailp->xa_lock);
+ error = xfs_efi_recover(mp, efip);
+ spin_lock(&ailp->xa_lock);
- for (i = 0; i < efip->efi_format.efi_nextents; i++) {
- extp = &(efip->efi_format.efi_extents[i]);
- error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
- extp->ext_len);
- if (error)
- goto abort_error;
+ return error;
+}
- }
+/* Release the EFI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_efi(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_efi_log_item *efip;
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
- error = xfs_trans_commit(tp);
- return error;
+ efip = container_of(lip, struct xfs_efi_log_item, efi_item);
+
+ spin_unlock(&ailp->xa_lock);
+ xfs_efi_release(efip);
+ spin_lock(&ailp->xa_lock);
+}
+
+/* Recover the RUI if necessary. */
+STATIC int
+xlog_recover_process_rui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_rui_log_item *ruip;
+ int error;
+
+ /*
+ * Skip RUIs that we've already processed.
+ */
+ ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
+ if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
+ return 0;
+
+ spin_unlock(&ailp->xa_lock);
+ error = xfs_rui_recover(mp, ruip);
+ spin_lock(&ailp->xa_lock);
-abort_error:
- xfs_trans_cancel(tp);
return error;
}
+/* Release the RUI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_rui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_rui_log_item *ruip;
+
+ ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
+
+ spin_unlock(&ailp->xa_lock);
+ xfs_rui_release(ruip);
+ spin_lock(&ailp->xa_lock);
+}
+
+/* Is this log item a deferred action intent? */
+static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
+{
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ case XFS_LI_RUI:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
- * When this is called, all of the EFIs which did not have
- * corresponding EFDs should be in the AIL. What we do now
- * is free the extents associated with each one.
+ * When this is called, all of the log intent items which did not have
+ * corresponding log done items should be in the AIL. What we do now
+ * is update the data structures associated with each one.
*
- * Since we process the EFIs in normal transactions, they
- * will be removed at some point after the commit. This prevents
- * us from just walking down the list processing each one.
- * We'll use a flag in the EFI to skip those that we've already
- * processed and use the AIL iteration mechanism's generation
- * count to try to speed this up at least a bit.
+ * Since we process the log intent items in normal transactions, they
+ * will be removed at some point after the commit. This prevents us
+ * from just walking down the list processing each one. We'll use a
+ * flag in the intent item to skip those that we've already processed
+ * and use the AIL iteration mechanism's generation count to try to
+ * speed this up at least a bit.
*
- * When we start, we know that the EFIs are the only things in
- * the AIL. As we process them, however, other items are added
- * to the AIL. Since everything added to the AIL must come after
- * everything already in the AIL, we stop processing as soon as
- * we see something other than an EFI in the AIL.
+ * When we start, we know that the intents are the only things in the
+ * AIL. As we process them, however, other items are added to the
+ * AIL.
*/
STATIC int
-xlog_recover_process_efis(
+xlog_recover_process_intents(
struct xlog *log)
{
struct xfs_log_item *lip;
- struct xfs_efi_log_item *efip;
int error = 0;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp;
+ xfs_lsn_t last_lsn;
ailp = log->l_ailp;
spin_lock(&ailp->xa_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+ last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
while (lip != NULL) {
/*
- * We're done when we see something other than an EFI.
- * There should be no EFIs left in the AIL now.
+ * We're done when we see something other than an intent.
+ * There should be no intents left in the AIL now.
*/
- if (lip->li_type != XFS_LI_EFI) {
+ if (!xlog_item_is_intent(lip)) {
#ifdef DEBUG
for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(lip->li_type != XFS_LI_EFI);
+ ASSERT(!xlog_item_is_intent(lip));
#endif
break;
}
/*
- * Skip EFIs that we've already processed.
+ * We should never see a redo item with a LSN higher than
+ * the last transaction we found in the log at the start
+ * of recovery.
*/
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
- if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- continue;
- }
+ ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
- spin_unlock(&ailp->xa_lock);
- error = xlog_recover_process_efi(log->l_mp, efip);
- spin_lock(&ailp->xa_lock);
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ error = xlog_recover_process_efi(log->l_mp, ailp, lip);
+ break;
+ case XFS_LI_RUI:
+ error = xlog_recover_process_rui(log->l_mp, ailp, lip);
+ break;
+ }
if (error)
goto out;
lip = xfs_trans_ail_cursor_next(ailp, &cur);
@@ -4295,15 +4433,14 @@ out:
}
/*
- * A cancel occurs when the mount has failed and we're bailing out. Release all
- * pending EFIs so they don't pin the AIL.
+ * A cancel occurs when the mount has failed and we're bailing out.
+ * Release all pending log intent items so they don't pin the AIL.
*/
STATIC int
-xlog_recover_cancel_efis(
+xlog_recover_cancel_intents(
struct xlog *log)
{
struct xfs_log_item *lip;
- struct xfs_efi_log_item *efip;
int error = 0;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp;
@@ -4313,22 +4450,25 @@ xlog_recover_cancel_efis(
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
/*
- * We're done when we see something other than an EFI.
- * There should be no EFIs left in the AIL now.
+ * We're done when we see something other than an intent.
+ * There should be no intents left in the AIL now.
*/
- if (lip->li_type != XFS_LI_EFI) {
+ if (!xlog_item_is_intent(lip)) {
#ifdef DEBUG
for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(lip->li_type != XFS_LI_EFI);
+ ASSERT(!xlog_item_is_intent(lip));
#endif
break;
}
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
-
- spin_unlock(&ailp->xa_lock);
- xfs_efi_release(efip);
- spin_lock(&ailp->xa_lock);
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ xlog_recover_cancel_efi(log->l_mp, ailp, lip);
+ break;
+ case XFS_LI_RUI:
+ xlog_recover_cancel_rui(log->l_mp, ailp, lip);
+ break;
+ }
lip = xfs_trans_ail_cursor_next(ailp, &cur);
}
@@ -5023,6 +5163,7 @@ xlog_do_recover(
xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
return error;
}
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
xlog_recover_check_summary(log);
@@ -5139,16 +5280,17 @@ xlog_recover_finish(
*/
if (log->l_flags & XLOG_RECOVERY_NEEDED) {
int error;
- error = xlog_recover_process_efis(log);
+ error = xlog_recover_process_intents(log);
if (error) {
- xfs_alert(log->l_mp, "Failed to recover EFIs");
+ xfs_alert(log->l_mp, "Failed to recover intents");
return error;
}
+
/*
- * Sync the log to get all the EFIs out of the AIL.
+ * Sync the log to get all the intents out of the AIL.
* This isn't absolutely necessary, but it helps in
* case the unlink transactions would have problems
- * pushing the EFIs out of the way.
+ * pushing the intents out of the way.
*/
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
@@ -5173,7 +5315,7 @@ xlog_recover_cancel(
int error = 0;
if (log->l_flags & XLOG_RECOVERY_NEEDED)
- error = xlog_recover_cancel_efis(log);
+ error = xlog_recover_cancel_intents(log);
return error;
}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 970c19ba2f56..faeead671f9f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -41,6 +42,7 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_sysfs.h"
+#include "xfs_rmap_btree.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
@@ -230,6 +232,8 @@ xfs_initialize_perag(
if (maxagi)
*maxagi = index;
+
+ mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
out_unwind:
@@ -679,6 +683,7 @@ xfs_mountfs(
xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
xfs_ialloc_compute_maxlevels(mp);
+ xfs_rmapbt_compute_maxlevels(mp);
xfs_set_maxicount(mp);
@@ -1216,7 +1221,7 @@ xfs_mod_fdblocks(
batch = XFS_FDBLOCKS_BATCH;
__percpu_counter_add(&mp->m_fdblocks, delta, batch);
- if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
+ if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
XFS_FDBLOCKS_BATCH) >= 0) {
/* we had space! */
return 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index c1b798c72126..b36676cde103 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -116,9 +116,15 @@ typedef struct xfs_mount {
uint m_bmap_dmnr[2]; /* min bmap btree records */
uint m_inobt_mxr[2]; /* max inobt btree records */
uint m_inobt_mnr[2]; /* min inobt btree records */
+ uint m_rmap_mxr[2]; /* max rmap btree records */
+ uint m_rmap_mnr[2]; /* min rmap btree records */
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
uint m_in_maxlevels; /* max inobt btree levels. */
+ uint m_rmap_maxlevels; /* max rmap btree levels */
+ xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
+ uint m_alloc_set_aside; /* space we can't use */
+ uint m_ag_max_usable; /* max space per AG */
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
spinlock_t m_perag_lock; /* lock for m_perag_tree */
struct mutex m_growlock; /* growfs mutex */
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0cc8d8f74356..69e2986a3776 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -49,11 +49,14 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_dsymlink_hdr, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_key, 4);
XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_rec, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_key, 20);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_rec, 24);
XFS_CHECK_STRUCT_SIZE(struct xfs_timestamp, 8);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_key_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_ptr_t, 4);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_rec_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_inobt_ptr_t, 4);
+ XFS_CHECK_STRUCT_SIZE(xfs_rmap_ptr_t, 4);
/* dir/attr trees */
XFS_CHECK_STRUCT_SIZE(struct xfs_attr3_leaf_hdr, 80);
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index 93f74853961b..e8339f74966b 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -1,7 +1,7 @@
#ifndef _XFS_PNFS_H
#define _XFS_PNFS_H 1
-#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
+#ifdef CONFIG_EXPORTFS_BLOCK_OPS
int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
struct iomap *iomap, bool write, u32 *device_generation);
@@ -15,5 +15,5 @@ xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex)
{
return 0;
}
-#endif /* CONFIG_NFSD_PNFS */
+#endif /* CONFIG_EXPORTFS_BLOCK_OPS */
#endif /* _XFS_PNFS_H */
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
new file mode 100644
index 000000000000..2500f28689d5
--- /dev/null
+++ b/fs/xfs/xfs_rmap_item.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_rmap_item.h"
+#include "xfs_log.h"
+#include "xfs_rmap.h"
+
+
+kmem_zone_t *xfs_rui_zone;
+kmem_zone_t *xfs_rud_zone;
+
+static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_rui_log_item, rui_item);
+}
+
+void
+xfs_rui_item_free(
+ struct xfs_rui_log_item *ruip)
+{
+ if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
+ kmem_free(ruip);
+ else
+ kmem_zone_free(xfs_rui_zone, ruip);
+}
+
+/*
+ * This returns the number of iovecs needed to log the given rui item.
+ * We only need 1 iovec for an rui item. It just logs the rui_log_format
+ * structure.
+ */
+static inline int
+xfs_rui_item_sizeof(
+ struct xfs_rui_log_item *ruip)
+{
+ return sizeof(struct xfs_rui_log_format) +
+ (ruip->rui_format.rui_nextents - 1) *
+ sizeof(struct xfs_map_extent);
+}
+
+STATIC void
+xfs_rui_item_size(
+ struct xfs_log_item *lip,
+ int *nvecs,
+ int *nbytes)
+{
+ *nvecs += 1;
+ *nbytes += xfs_rui_item_sizeof(RUI_ITEM(lip));
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given rui log item. We use only 1 iovec, and we point that
+ * at the rui_log_format structure embedded in the rui item.
+ * It is at this point that we assert that all of the extent
+ * slots in the rui item have been filled.
+ */
+STATIC void
+xfs_rui_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_vec *lv)
+{
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+ struct xfs_log_iovec *vecp = NULL;
+
+ ASSERT(atomic_read(&ruip->rui_next_extent) ==
+ ruip->rui_format.rui_nextents);
+
+ ruip->rui_format.rui_type = XFS_LI_RUI;
+ ruip->rui_format.rui_size = 1;
+
+ xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
+ xfs_rui_item_sizeof(ruip));
+}
+
+/*
+ * Pinning has no meaning for an rui item, so just return.
+ */
+STATIC void
+xfs_rui_item_pin(
+ struct xfs_log_item *lip)
+{
+}
+
+/*
+ * The unpin operation is the last place an RUI is manipulated in the log. It is
+ * either inserted in the AIL or aborted in the event of a log I/O error. In
+ * either case, the RUI transaction has been successfully committed to make it
+ * this far. Therefore, we expect whoever committed the RUI to either construct
+ * and commit the RUD or drop the RUD's reference in the event of error. Simply
+ * drop the log's RUI reference now that the log is done with it.
+ */
+STATIC void
+xfs_rui_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
+{
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+
+ xfs_rui_release(ruip);
+}
+
+/*
+ * RUI items have no locking or pushing. However, since RUIs are pulled from
+ * the AIL when their corresponding RUDs are committed to disk, their situation
+ * is very similar to being pinned. Return XFS_ITEM_PINNED so that the caller
+ * will eventually flush the log. This should help in getting the RUI out of
+ * the AIL.
+ */
+STATIC uint
+xfs_rui_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
+{
+ return XFS_ITEM_PINNED;
+}
+
+/*
+ * The RUI has been either committed or aborted if the transaction has been
+ * cancelled. If the transaction was cancelled, an RUD isn't going to be
+ * constructed and thus we free the RUI here directly.
+ */
+STATIC void
+xfs_rui_item_unlock(
+ struct xfs_log_item *lip)
+{
+ if (lip->li_flags & XFS_LI_ABORTED)
+ xfs_rui_item_free(RUI_ITEM(lip));
+}
+
+/*
+ * The RUI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.
+ */
+STATIC xfs_lsn_t
+xfs_rui_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+ return lsn;
+}
+
+/*
+ * The RUI dependency tracking op doesn't do squat. It can't because
+ * it doesn't know where the free extent is coming from. The dependency
+ * tracking has to be handled by the "enclosing" metadata object. For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_rui_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all rui log items.
+ */
+static const struct xfs_item_ops xfs_rui_item_ops = {
+ .iop_size = xfs_rui_item_size,
+ .iop_format = xfs_rui_item_format,
+ .iop_pin = xfs_rui_item_pin,
+ .iop_unpin = xfs_rui_item_unpin,
+ .iop_unlock = xfs_rui_item_unlock,
+ .iop_committed = xfs_rui_item_committed,
+ .iop_push = xfs_rui_item_push,
+ .iop_committing = xfs_rui_item_committing,
+};
+
+/*
+ * Allocate and initialize an rui item with the given number of extents.
+ */
+struct xfs_rui_log_item *
+xfs_rui_init(
+ struct xfs_mount *mp,
+ uint nextents)
+
+{
+ struct xfs_rui_log_item *ruip;
+ uint size;
+
+ ASSERT(nextents > 0);
+ if (nextents > XFS_RUI_MAX_FAST_EXTENTS) {
+ size = (uint)(sizeof(struct xfs_rui_log_item) +
+ ((nextents - 1) * sizeof(struct xfs_map_extent)));
+ ruip = kmem_zalloc(size, KM_SLEEP);
+ } else {
+ ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
+ }
+
+ xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
+ ruip->rui_format.rui_nextents = nextents;
+ ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
+ atomic_set(&ruip->rui_next_extent, 0);
+ atomic_set(&ruip->rui_refcount, 2);
+
+ return ruip;
+}
+
+/*
+ * Copy an RUI format buffer from the given buf, and into the destination
+ * RUI format structure. The RUI/RUD items were designed not to need any
+ * special alignment handling.
+ */
+int
+xfs_rui_copy_format(
+ struct xfs_log_iovec *buf,
+ struct xfs_rui_log_format *dst_rui_fmt)
+{
+ struct xfs_rui_log_format *src_rui_fmt;
+ uint len;
+
+ src_rui_fmt = buf->i_addr;
+ len = sizeof(struct xfs_rui_log_format) +
+ (src_rui_fmt->rui_nextents - 1) *
+ sizeof(struct xfs_map_extent);
+
+ if (buf->i_len != len)
+ return -EFSCORRUPTED;
+
+ memcpy((char *)dst_rui_fmt, (char *)src_rui_fmt, len);
+ return 0;
+}
+
+/*
+ * Freeing the RUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the RUI may not yet have been placed in the AIL
+ * when called by xfs_rui_release() from RUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the RUI.
+ */
+void
+xfs_rui_release(
+ struct xfs_rui_log_item *ruip)
+{
+ if (atomic_dec_and_test(&ruip->rui_refcount)) {
+ xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_rui_item_free(ruip);
+ }
+}
+
+static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_rud_log_item, rud_item);
+}
+
+STATIC void
+xfs_rud_item_size(
+ struct xfs_log_item *lip,
+ int *nvecs,
+ int *nbytes)
+{
+ *nvecs += 1;
+ *nbytes += sizeof(struct xfs_rud_log_format);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given rud log item. We use only 1 iovec, and we point that
+ * at the rud_log_format structure embedded in the rud item.
+ * It is at this point that we assert that all of the extent
+ * slots in the rud item have been filled.
+ */
+STATIC void
+xfs_rud_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_vec *lv)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+ struct xfs_log_iovec *vecp = NULL;
+
+ rudp->rud_format.rud_type = XFS_LI_RUD;
+ rudp->rud_format.rud_size = 1;
+
+ xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
+ sizeof(struct xfs_rud_log_format));
+}
+
+/*
+ * Pinning has no meaning for an rud item, so just return.
+ */
+STATIC void
+xfs_rud_item_pin(
+ struct xfs_log_item *lip)
+{
+}
+
+/*
+ * Since pinning has no meaning for an rud item, unpinning does
+ * not either.
+ */
+STATIC void
+xfs_rud_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
+{
+}
+
+/*
+ * There isn't much you can do to push on an rud item. It is simply stuck
+ * waiting for the log to be flushed to disk.
+ */
+STATIC uint
+xfs_rud_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
+{
+ return XFS_ITEM_PINNED;
+}
+
+/*
+ * The RUD is either committed or aborted if the transaction is cancelled. If
+ * the transaction is cancelled, drop our reference to the RUI and free the
+ * RUD.
+ */
+STATIC void
+xfs_rud_item_unlock(
+ struct xfs_log_item *lip)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+
+ if (lip->li_flags & XFS_LI_ABORTED) {
+ xfs_rui_release(rudp->rud_ruip);
+ kmem_zone_free(xfs_rud_zone, rudp);
+ }
+}
+
+/*
+ * When the rud item is committed to disk, all we need to do is delete our
+ * reference to our partner rui item and then free ourselves. Since we're
+ * freeing ourselves we must return -1 to keep the transaction code from
+ * further referencing this item.
+ */
+STATIC xfs_lsn_t
+xfs_rud_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+
+ /*
+ * Drop the RUI reference regardless of whether the RUD has been
+ * aborted. Once the RUD transaction is constructed, it is the sole
+ * responsibility of the RUD to release the RUI (even if the RUI is
+ * aborted due to log I/O error).
+ */
+ xfs_rui_release(rudp->rud_ruip);
+ kmem_zone_free(xfs_rud_zone, rudp);
+
+ return (xfs_lsn_t)-1;
+}
+
+/*
+ * The RUD dependency tracking op doesn't do squat. It can't because
+ * it doesn't know where the free extent is coming from. The dependency
+ * tracking has to be handled by the "enclosing" metadata object. For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_rud_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all rud log items.
+ */
+static const struct xfs_item_ops xfs_rud_item_ops = {
+ .iop_size = xfs_rud_item_size,
+ .iop_format = xfs_rud_item_format,
+ .iop_pin = xfs_rud_item_pin,
+ .iop_unpin = xfs_rud_item_unpin,
+ .iop_unlock = xfs_rud_item_unlock,
+ .iop_committed = xfs_rud_item_committed,
+ .iop_push = xfs_rud_item_push,
+ .iop_committing = xfs_rud_item_committing,
+};
+
+/*
+ * Allocate and initialize an rud item with the given number of extents.
+ */
+struct xfs_rud_log_item *
+xfs_rud_init(
+ struct xfs_mount *mp,
+ struct xfs_rui_log_item *ruip)
+
+{
+ struct xfs_rud_log_item *rudp;
+
+ rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
+ xfs_log_item_init(mp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops);
+ rudp->rud_ruip = ruip;
+ rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
+
+ return rudp;
+}
+
+/*
+ * Process an rmap update intent item that was recovered from the log.
+ * We need to update the rmapbt.
+ */
+int
+xfs_rui_recover(
+ struct xfs_mount *mp,
+ struct xfs_rui_log_item *ruip)
+{
+ int i;
+ int error = 0;
+ struct xfs_map_extent *rmap;
+ xfs_fsblock_t startblock_fsb;
+ bool op_ok;
+ struct xfs_rud_log_item *rudp;
+ enum xfs_rmap_intent_type type;
+ int whichfork;
+ xfs_exntst_t state;
+ struct xfs_trans *tp;
+ struct xfs_btree_cur *rcur = NULL;
+
+ ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
+
+ /*
+ * First check the validity of the extents described by the
+ * RUI. If any are bad, then assume that all are bad and
+ * just toss the RUI.
+ */
+ for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
+ rmap = &ruip->rui_format.rui_extents[i];
+ startblock_fsb = XFS_BB_TO_FSB(mp,
+ XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
+ switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
+ case XFS_RMAP_EXTENT_MAP:
+ case XFS_RMAP_EXTENT_UNMAP:
+ case XFS_RMAP_EXTENT_CONVERT:
+ case XFS_RMAP_EXTENT_ALLOC:
+ case XFS_RMAP_EXTENT_FREE:
+ op_ok = true;
+ break;
+ default:
+ op_ok = false;
+ break;
+ }
+ if (!op_ok || startblock_fsb == 0 ||
+ rmap->me_len == 0 ||
+ startblock_fsb >= mp->m_sb.sb_dblocks ||
+ rmap->me_len >= mp->m_sb.sb_agblocks ||
+ (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
+ /*
+ * This will pull the RUI from the AIL and
+ * free the memory associated with it.
+ */
+ set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
+ xfs_rui_release(ruip);
+ return -EIO;
+ }
+ }
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error)
+ return error;
+ rudp = xfs_trans_get_rud(tp, ruip);
+
+ for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
+ rmap = &ruip->rui_format.rui_extents[i];
+ state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
+ XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
+ whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+ switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
+ case XFS_RMAP_EXTENT_MAP:
+ type = XFS_RMAP_MAP;
+ break;
+ case XFS_RMAP_EXTENT_UNMAP:
+ type = XFS_RMAP_UNMAP;
+ break;
+ case XFS_RMAP_EXTENT_CONVERT:
+ type = XFS_RMAP_CONVERT;
+ break;
+ case XFS_RMAP_EXTENT_ALLOC:
+ type = XFS_RMAP_ALLOC;
+ break;
+ case XFS_RMAP_EXTENT_FREE:
+ type = XFS_RMAP_FREE;
+ break;
+ default:
+ error = -EFSCORRUPTED;
+ goto abort_error;
+ }
+ error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
+ rmap->me_owner, whichfork,
+ rmap->me_startoff, rmap->me_startblock,
+ rmap->me_len, state, &rcur);
+ if (error)
+ goto abort_error;
+
+ }
+
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+ set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
+ error = xfs_trans_commit(tp);
+ return error;
+
+abort_error:
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+ xfs_trans_cancel(tp);
+ return error;
+}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
new file mode 100644
index 000000000000..aefcc3a318a5
--- /dev/null
+++ b/fs/xfs/xfs_rmap_item.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_RMAP_ITEM_H__
+#define __XFS_RMAP_ITEM_H__
+
+/*
+ * There are (currently) three pairs of rmap btree redo item types: map, unmap,
+ * and convert. The common abbreviations for these are RUI (rmap update
+ * intent) and RUD (rmap update done). The redo item type is encoded in the
+ * flags field of each xfs_map_extent.
+ *
+ * *I items should be recorded in the *first* of a series of rolled
+ * transactions, and the *D items should be recorded in the same transaction
+ * that records the associated rmapbt updates. Typically, the first
+ * transaction will record a bmbt update, followed by some number of
+ * transactions containing rmapbt updates, and finally transactions with any
+ * bnobt/cntbt updates.
+ *
+ * Should the system crash after the commit of the first transaction but
+ * before the commit of the final transaction in a series, log recovery will
+ * use the redo information recorded by the intent items to replay the
+ * (rmapbt/bnobt/cntbt) metadata updates in the non-first transaction.
+ */
+
+/* kernel only RUI/RUD definitions */
+
+struct xfs_mount;
+struct kmem_zone;
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define XFS_RUI_MAX_FAST_EXTENTS 16
+
+/*
+ * Define RUI flag bits. Manipulated by set/clear/test_bit operators.
+ */
+#define XFS_RUI_RECOVERED 1
+
+/*
+ * This is the "rmap update intent" log item. It is used to log the fact that
+ * some reverse mappings need to change. It is used in conjunction with the
+ * "rmap update done" log item described below.
+ *
+ * These log items follow the same rules as struct xfs_efi_log_item; see the
+ * comments about that structure (in xfs_extfree_item.h) for more details.
+ */
+struct xfs_rui_log_item {
+ struct xfs_log_item rui_item;
+ atomic_t rui_refcount;
+ atomic_t rui_next_extent;
+ unsigned long rui_flags; /* misc flags */
+ struct xfs_rui_log_format rui_format;
+};
+
+/*
+ * This is the "rmap update done" log item. It is used to log the fact that
+ * some rmapbt updates mentioned in an earlier rui item have been performed.
+ */
+struct xfs_rud_log_item {
+ struct xfs_log_item rud_item;
+ struct xfs_rui_log_item *rud_ruip;
+ struct xfs_rud_log_format rud_format;
+};
+
+extern struct kmem_zone *xfs_rui_zone;
+extern struct kmem_zone *xfs_rud_zone;
+
+struct xfs_rui_log_item *xfs_rui_init(struct xfs_mount *, uint);
+struct xfs_rud_log_item *xfs_rud_init(struct xfs_mount *,
+ struct xfs_rui_log_item *);
+int xfs_rui_copy_format(struct xfs_log_iovec *buf,
+ struct xfs_rui_log_format *dst_rui_fmt);
+void xfs_rui_item_free(struct xfs_rui_log_item *);
+void xfs_rui_release(struct xfs_rui_log_item *);
+int xfs_rui_recover(struct xfs_mount *mp, struct xfs_rui_log_item *ruip);
+
+#endif /* __XFS_RMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 3938b37d1043..802bcc326d9f 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -769,7 +770,7 @@ xfs_growfs_rt_alloc(
xfs_daddr_t d; /* disk block address */
int error; /* error return value */
xfs_fsblock_t firstblock;/* first block allocated in xaction */
- struct xfs_bmap_free flist; /* list of freed blocks */
+ struct xfs_defer_ops dfops; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
struct xfs_bmbt_irec map; /* block map output */
int nmap; /* number of block maps */
@@ -794,14 +795,14 @@ xfs_growfs_rt_alloc(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
/*
* Allocate blocks to the bitmap file.
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &flist);
+ resblks, &map, &nmap, &dfops);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
@@ -809,7 +810,7 @@ xfs_growfs_rt_alloc(
/*
* Free any blocks freed up in the transaction, then commit.
*/
- error = xfs_bmap_finish(&tp, &flist, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
error = xfs_trans_commit(tp);
@@ -862,7 +863,7 @@ xfs_growfs_rt_alloc(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index d266e835ecc3..6e812fe0fd43 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -61,6 +61,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "bmbt2", XFSSTAT_END_BMBT_V2 },
{ "ibt2", XFSSTAT_END_IBT_V2 },
{ "fibt2", XFSSTAT_END_FIBT_V2 },
+ { "rmapbt", XFSSTAT_END_RMAP_V2 },
/* we print both series of quota information together */
{ "qm", XFSSTAT_END_QM },
};
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 483b0eff1988..657865f51e78 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -197,7 +197,23 @@ struct xfsstats {
__uint32_t xs_fibt_2_alloc;
__uint32_t xs_fibt_2_free;
__uint32_t xs_fibt_2_moves;
-#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_FIBT_V2+6)
+#define XFSSTAT_END_RMAP_V2 (XFSSTAT_END_FIBT_V2+15)
+ __uint32_t xs_rmap_2_lookup;
+ __uint32_t xs_rmap_2_compare;
+ __uint32_t xs_rmap_2_insrec;
+ __uint32_t xs_rmap_2_delrec;
+ __uint32_t xs_rmap_2_newroot;
+ __uint32_t xs_rmap_2_killroot;
+ __uint32_t xs_rmap_2_increment;
+ __uint32_t xs_rmap_2_decrement;
+ __uint32_t xs_rmap_2_lshift;
+ __uint32_t xs_rmap_2_rshift;
+ __uint32_t xs_rmap_2_split;
+ __uint32_t xs_rmap_2_join;
+ __uint32_t xs_rmap_2_alloc;
+ __uint32_t xs_rmap_2_free;
+ __uint32_t xs_rmap_2_moves;
+#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_RMAP_V2+6)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 0303f1005f88..24ef83ef04de 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -46,6 +46,7 @@
#include "xfs_quota.h"
#include "xfs_sysfs.h"
#include "xfs_ondisk.h"
+#include "xfs_rmap_item.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -1075,7 +1076,7 @@ xfs_fs_statfs(
statp->f_blocks = sbp->sb_dblocks - lsize;
spin_unlock(&mp->m_sb_lock);
- statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
statp->f_bavail = statp->f_bfree;
fakeinos = statp->f_bfree << sbp->sb_inopblog;
@@ -1573,6 +1574,10 @@ xfs_fs_fill_super(
}
}
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ xfs_alert(mp,
+ "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
+
error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
@@ -1697,7 +1702,7 @@ xfs_init_zones(void)
goto out_free_ioend_bioset;
xfs_bmap_free_item_zone = kmem_zone_init(
- sizeof(struct xfs_bmap_free_item),
+ sizeof(struct xfs_extent_free_item),
"xfs_bmap_free_item");
if (!xfs_bmap_free_item_zone)
goto out_destroy_log_ticket_zone;
@@ -1765,8 +1770,24 @@ xfs_init_zones(void)
if (!xfs_icreate_zone)
goto out_destroy_ili_zone;
+ xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
+ "xfs_rud_item");
+ if (!xfs_rud_zone)
+ goto out_destroy_icreate_zone;
+
+ xfs_rui_zone = kmem_zone_init((sizeof(struct xfs_rui_log_item) +
+ ((XFS_RUI_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_map_extent))),
+ "xfs_rui_item");
+ if (!xfs_rui_zone)
+ goto out_destroy_rud_zone;
+
return 0;
+ out_destroy_rud_zone:
+ kmem_zone_destroy(xfs_rud_zone);
+ out_destroy_icreate_zone:
+ kmem_zone_destroy(xfs_icreate_zone);
out_destroy_ili_zone:
kmem_zone_destroy(xfs_ili_zone);
out_destroy_inode_zone:
@@ -1805,6 +1826,8 @@ xfs_destroy_zones(void)
* destroy caches.
*/
rcu_barrier();
+ kmem_zone_destroy(xfs_rui_zone);
+ kmem_zone_destroy(xfs_rud_zone);
kmem_zone_destroy(xfs_icreate_zone);
kmem_zone_destroy(xfs_ili_zone);
kmem_zone_destroy(xfs_inode_zone);
@@ -1854,6 +1877,9 @@ init_xfs_fs(void)
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
+ xfs_extent_free_init_defer_op();
+ xfs_rmap_update_init_defer_op();
+
xfs_dir_startup();
error = xfs_init_zones();
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 08a46c6181fd..58142aeeeea6 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -26,6 +26,7 @@
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
+#include "xfs_defer.h"
#include "xfs_dir2.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
@@ -172,7 +173,7 @@ xfs_symlink(
struct xfs_inode *ip = NULL;
int error = 0;
int pathlen;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
xfs_fileoff_t first_fsb;
@@ -269,7 +270,7 @@ xfs_symlink(
* Initialize the bmap freelist prior to calling either
* bmapi or the directory create code.
*/
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Allocate an inode for the symlink.
@@ -313,7 +314,7 @@ xfs_symlink(
error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_METADATA, &first_block, resblks,
- mval, &nmaps, &free_list);
+ mval, &nmaps, &dfops);
if (error)
goto out_bmap_cancel;
@@ -361,7 +362,7 @@ xfs_symlink(
* Create the directory entry for the symlink.
*/
error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error)
goto out_bmap_cancel;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -376,7 +377,7 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -392,7 +393,7 @@ xfs_symlink(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -426,7 +427,7 @@ xfs_inactive_symlink_rmt(
int done;
int error;
xfs_fsblock_t first_block;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
int i;
xfs_mount_t *mp;
xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
@@ -465,7 +466,7 @@ xfs_inactive_symlink_rmt(
* Find the block(s) so we can inval and unmap them.
*/
done = 0;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
nmaps = ARRAY_SIZE(mval);
error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
mval, &nmaps, 0);
@@ -485,17 +486,17 @@ xfs_inactive_symlink_rmt(
xfs_trans_binval(tp, bp);
}
/*
- * Unmap the dead block(s) to the free_list.
+ * Unmap the dead block(s) to the dfops.
*/
error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps,
- &first_block, &free_list, &done);
+ &first_block, &dfops, &done);
if (error)
goto error_bmap_cancel;
ASSERT(done);
/*
* Commit the first transaction. This logs the EFI and the inode.
*/
- error = xfs_bmap_finish(&tp, &free_list, ip);
+ error = xfs_defer_finish(&tp, &dfops, ip);
if (error)
goto error_bmap_cancel;
/*
@@ -525,7 +526,7 @@ xfs_inactive_symlink_rmt(
return 0;
error_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
error_trans_cancel:
xfs_trans_cancel(tp);
error_unlock:
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 13a029806805..7f17ae6d709a 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -22,7 +22,9 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 145169093fe0..551b7e26980c 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -38,6 +38,7 @@ struct xlog_recover_item;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
+struct xfs_btree_cur;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -2185,6 +2186,379 @@ DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
DEFINE_DISCARD_EVENT(xfs_discard_exclude);
DEFINE_DISCARD_EVENT(xfs_discard_busy);
+/* btree cursor events */
+DECLARE_EVENT_CLASS(xfs_btree_cur_class,
+ TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
+ TP_ARGS(cur, level, bp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(int, nlevels)
+ __field(int, ptr)
+ __field(xfs_daddr_t, daddr)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->nlevels = cur->bc_nlevels;
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->daddr = bp ? bp->b_bn : -1;
+ ),
+ TP_printk("dev %d:%d btnum %d level %d/%d ptr %d daddr 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->btnum,
+ __entry->level,
+ __entry->nlevels,
+ __entry->ptr,
+ (unsigned long long)__entry->daddr)
+)
+
+#define DEFINE_BTREE_CUR_EVENT(name) \
+DEFINE_EVENT(xfs_btree_cur_class, name, \
+ TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \
+ TP_ARGS(cur, level, bp))
+DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
+DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
+
+/* deferred ops */
+struct xfs_defer_pending;
+struct xfs_defer_intake;
+struct xfs_defer_ops;
+
+DECLARE_EVENT_CLASS(xfs_defer_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop),
+ TP_ARGS(mp, dop),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(void *, dop)
+ __field(bool, committed)
+ __field(bool, low)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->dop = dop;
+ __entry->committed = dop->dop_committed;
+ __entry->low = dop->dop_low;
+ ),
+ TP_printk("dev %d:%d ops %p committed %d low %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dop,
+ __entry->committed,
+ __entry->low)
+)
+#define DEFINE_DEFER_EVENT(name) \
+DEFINE_EVENT(xfs_defer_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop), \
+ TP_ARGS(mp, dop))
+
+DECLARE_EVENT_CLASS(xfs_defer_error_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error),
+ TP_ARGS(mp, dop, error),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(void *, dop)
+ __field(bool, committed)
+ __field(bool, low)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->dop = dop;
+ __entry->committed = dop->dop_committed;
+ __entry->low = dop->dop_low;
+ __entry->error = error;
+ ),
+ TP_printk("dev %d:%d ops %p committed %d low %d err %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dop,
+ __entry->committed,
+ __entry->low,
+ __entry->error)
+)
+#define DEFINE_DEFER_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_defer_error_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \
+ TP_ARGS(mp, dop, error))
+
+DECLARE_EVENT_CLASS(xfs_defer_pending_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
+ TP_ARGS(mp, dfp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(void *, intent)
+ __field(bool, committed)
+ __field(int, nr)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->type = dfp->dfp_type->type;
+ __entry->intent = dfp->dfp_intent;
+ __entry->committed = dfp->dfp_committed;
+ __entry->nr = dfp->dfp_count;
+ ),
+ TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->intent,
+ __entry->committed,
+ __entry->nr)
+)
+#define DEFINE_DEFER_PENDING_EVENT(name) \
+DEFINE_EVENT(xfs_defer_pending_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), \
+ TP_ARGS(mp, dfp))
+
+DECLARE_EVENT_CLASS(xfs_phys_extent_deferred_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int type, xfs_agblock_t agbno, xfs_extlen_t len),
+ TP_ARGS(mp, agno, type, agbno, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, type)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->type = type;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d op %d agno %u agbno %u len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->agno,
+ __entry->agbno,
+ __entry->len)
+);
+#define DEFINE_PHYS_EXTENT_DEFERRED_EVENT(name) \
+DEFINE_EVENT(xfs_phys_extent_deferred_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ int type, \
+ xfs_agblock_t bno, \
+ xfs_extlen_t len), \
+ TP_ARGS(mp, agno, type, bno, len))
+
+DECLARE_EVENT_CLASS(xfs_map_extent_deferred_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int op,
+ xfs_agblock_t agbno,
+ xfs_ino_t ino,
+ int whichfork,
+ xfs_fileoff_t offset,
+ xfs_filblks_t len,
+ xfs_exntst_t state),
+ TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_ino_t, ino)
+ __field(xfs_agblock_t, agbno)
+ __field(int, whichfork)
+ __field(xfs_fileoff_t, l_loff)
+ __field(xfs_filblks_t, l_len)
+ __field(xfs_exntst_t, l_state)
+ __field(int, op)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->ino = ino;
+ __entry->agbno = agbno;
+ __entry->whichfork = whichfork;
+ __entry->l_loff = offset;
+ __entry->l_len = len;
+ __entry->l_state = state;
+ __entry->op = op;
+ ),
+ TP_printk("dev %d:%d op %d agno %u agbno %u owner %lld %s offset %llu len %llu state %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->op,
+ __entry->agno,
+ __entry->agbno,
+ __entry->ino,
+ __entry->whichfork == XFS_ATTR_FORK ? "attr" : "data",
+ __entry->l_loff,
+ __entry->l_len,
+ __entry->l_state)
+);
+#define DEFINE_MAP_EXTENT_DEFERRED_EVENT(name) \
+DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ int op, \
+ xfs_agblock_t agbno, \
+ xfs_ino_t ino, \
+ int whichfork, \
+ xfs_fileoff_t offset, \
+ xfs_filblks_t len, \
+ xfs_exntst_t state), \
+ TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
+
+DEFINE_DEFER_EVENT(xfs_defer_init);
+DEFINE_DEFER_EVENT(xfs_defer_cancel);
+DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
+DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
+DEFINE_DEFER_EVENT(xfs_defer_finish);
+DEFINE_DEFER_EVENT(xfs_defer_finish_done);
+
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_op_finish_error);
+
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_work);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_commit);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
+
+#define DEFINE_BMAP_FREE_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
+DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_defer);
+DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_deferred);
+
+/* rmap tracepoints */
+DECLARE_EVENT_CLASS(xfs_rmap_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
+ struct xfs_owner_info *oinfo),
+ TP_ARGS(mp, agno, agbno, len, unwritten, oinfo),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(uint64_t, owner)
+ __field(uint64_t, offset)
+ __field(unsigned long, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->owner = oinfo->oi_owner;
+ __entry->offset = oinfo->oi_offset;
+ __entry->flags = oinfo->oi_flags;
+ if (unwritten)
+ __entry->flags |= XFS_RMAP_UNWRITTEN;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u owner %lld offset %llu flags 0x%lx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+);
+#define DEFINE_RMAP_EVENT(name) \
+DEFINE_EVENT(xfs_rmap_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
+ struct xfs_owner_info *oinfo), \
+ TP_ARGS(mp, agno, agbno, len, unwritten, oinfo))
+
+/* simple AG-based error/%ip tracepoint class */
+DECLARE_EVENT_CLASS(xfs_ag_error_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agno, error, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, error)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->error = error;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno %u error %d caller %ps",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->error,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_AG_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_ag_error_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, error, caller_ip))
+
+DEFINE_RMAP_EVENT(xfs_rmap_unmap);
+DEFINE_RMAP_EVENT(xfs_rmap_unmap_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_unmap_error);
+DEFINE_RMAP_EVENT(xfs_rmap_map);
+DEFINE_RMAP_EVENT(xfs_rmap_map_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
+DEFINE_RMAP_EVENT(xfs_rmap_convert);
+DEFINE_RMAP_EVENT(xfs_rmap_convert_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_state);
+
+DECLARE_EVENT_CLASS(xfs_rmapbt_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ uint64_t owner, uint64_t offset, unsigned int flags),
+ TP_ARGS(mp, agno, agbno, len, owner, offset, flags),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(uint64_t, owner)
+ __field(uint64_t, offset)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->owner = owner;
+ __entry->offset = offset;
+ __entry->flags = flags;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u owner %lld offset %llu flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+);
+#define DEFINE_RMAPBT_EVENT(name) \
+DEFINE_EVENT(xfs_rmapbt_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ xfs_agblock_t agbno, xfs_extlen_t len, \
+ uint64_t owner, uint64_t offset, unsigned int flags), \
+ TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
+
+#define DEFINE_RMAP_DEFERRED_EVENT DEFINE_MAP_EXTENT_DEFERRED_EVENT
+DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
+DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
+
+DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
+DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
+DEFINE_RMAPBT_EVENT(xfs_rmap_update);
+DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
+DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_insert_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_delete_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_update_error);
+DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 9b2b9fa89331..e2bf86aad33d 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -33,6 +33,9 @@ struct xfs_trans;
struct xfs_trans_res;
struct xfs_dquot_acct;
struct xfs_busy_extent;
+struct xfs_rud_log_item;
+struct xfs_rui_log_item;
+struct xfs_btree_cur;
typedef struct xfs_log_item {
struct list_head li_ail; /* AIL pointers */
@@ -210,17 +213,14 @@ void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
-struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
-void xfs_trans_log_efi_extent(xfs_trans_t *,
- struct xfs_efi_log_item *,
- xfs_fsblock_t,
- xfs_extlen_t);
-struct xfs_efd_log_item *xfs_trans_get_efd(xfs_trans_t *,
+
+void xfs_extent_free_init_defer_op(void);
+struct xfs_efd_log_item *xfs_trans_get_efd(struct xfs_trans *,
struct xfs_efi_log_item *,
uint);
int xfs_trans_free_extent(struct xfs_trans *,
struct xfs_efd_log_item *, xfs_fsblock_t,
- xfs_extlen_t);
+ xfs_extlen_t, struct xfs_owner_info *);
int xfs_trans_commit(struct xfs_trans *);
int __xfs_trans_roll(struct xfs_trans **, struct xfs_inode *, int *);
int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
@@ -236,4 +236,16 @@ void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
extern kmem_zone_t *xfs_trans_zone;
extern kmem_zone_t *xfs_log_item_desc_zone;
+/* rmap updates */
+enum xfs_rmap_intent_type;
+
+void xfs_rmap_update_init_defer_op(void);
+struct xfs_rud_log_item *xfs_trans_get_rud(struct xfs_trans *tp,
+ struct xfs_rui_log_item *ruip);
+int xfs_trans_log_finish_rmap_update(struct xfs_trans *tp,
+ struct xfs_rud_log_item *rudp, enum xfs_rmap_intent_type type,
+ __uint64_t owner, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t blockcount,
+ xfs_exntst_t state, struct xfs_btree_cur **pcur);
+
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index a96ae540eb62..459ddec137a4 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -21,66 +21,15 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
-
-/*
- * This routine is called to allocate an "extent free intention"
- * log item that will hold nextents worth of extents. The
- * caller must use all nextents extents, because we are not
- * flexible about this at all.
- */
-xfs_efi_log_item_t *
-xfs_trans_get_efi(xfs_trans_t *tp,
- uint nextents)
-{
- xfs_efi_log_item_t *efip;
-
- ASSERT(tp != NULL);
- ASSERT(nextents > 0);
-
- efip = xfs_efi_init(tp->t_mountp, nextents);
- ASSERT(efip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &efip->efi_item);
- return efip;
-}
-
-/*
- * This routine is called to indicate that the described
- * extent is to be logged as needing to be freed. It should
- * be called once for each extent to be freed.
- */
-void
-xfs_trans_log_efi_extent(xfs_trans_t *tp,
- xfs_efi_log_item_t *efip,
- xfs_fsblock_t start_block,
- xfs_extlen_t ext_len)
-{
- uint next_extent;
- xfs_extent_t *extp;
-
- tp->t_flags |= XFS_TRANS_DIRTY;
- efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
-
- /*
- * atomic_inc_return gives us the value after the increment;
- * we want to use it as an array index so we need to subtract 1 from
- * it.
- */
- next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
- ASSERT(next_extent < efip->efi_format.efi_nextents);
- extp = &(efip->efi_format.efi_extents[next_extent]);
- extp->ext_start = start_block;
- extp->ext_len = ext_len;
-}
-
+#include "xfs_bmap.h"
+#include "xfs_trace.h"
/*
* This routine is called to allocate an "extent free done"
@@ -88,12 +37,12 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
* caller must use all nextents extents, because we are not
* flexible about this at all.
*/
-xfs_efd_log_item_t *
-xfs_trans_get_efd(xfs_trans_t *tp,
- xfs_efi_log_item_t *efip,
- uint nextents)
+struct xfs_efd_log_item *
+xfs_trans_get_efd(struct xfs_trans *tp,
+ struct xfs_efi_log_item *efip,
+ uint nextents)
{
- xfs_efd_log_item_t *efdp;
+ struct xfs_efd_log_item *efdp;
ASSERT(tp != NULL);
ASSERT(nextents > 0);
@@ -118,13 +67,19 @@ xfs_trans_free_extent(
struct xfs_trans *tp,
struct xfs_efd_log_item *efdp,
xfs_fsblock_t start_block,
- xfs_extlen_t ext_len)
+ xfs_extlen_t ext_len,
+ struct xfs_owner_info *oinfo)
{
+ struct xfs_mount *mp = tp->t_mountp;
uint next_extent;
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, start_block);
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, start_block);
struct xfs_extent *extp;
int error;
- error = xfs_free_extent(tp, start_block, ext_len);
+ trace_xfs_bmap_free_deferred(tp->t_mountp, agno, 0, agbno, ext_len);
+
+ error = xfs_free_extent(tp, start_block, ext_len, oinfo);
/*
* Mark the transaction dirty, even on error. This ensures the
@@ -145,3 +100,139 @@ xfs_trans_free_extent(
return error;
}
+
+/* Sort bmap items by AG. */
+static int
+xfs_extent_free_diff_items(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_mount *mp = priv;
+ struct xfs_extent_free_item *ra;
+ struct xfs_extent_free_item *rb;
+
+ ra = container_of(a, struct xfs_extent_free_item, xefi_list);
+ rb = container_of(b, struct xfs_extent_free_item, xefi_list);
+ return XFS_FSB_TO_AGNO(mp, ra->xefi_startblock) -
+ XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
+}
+
+/* Get an EFI. */
+STATIC void *
+xfs_extent_free_create_intent(
+ struct xfs_trans *tp,
+ unsigned int count)
+{
+ struct xfs_efi_log_item *efip;
+
+ ASSERT(tp != NULL);
+ ASSERT(count > 0);
+
+ efip = xfs_efi_init(tp->t_mountp, count);
+ ASSERT(efip != NULL);
+
+ /*
+ * Get a log_item_desc to point at the new item.
+ */
+ xfs_trans_add_item(tp, &efip->efi_item);
+ return efip;
+}
+
+/* Log a free extent to the intent item. */
+STATIC void
+xfs_extent_free_log_item(
+ struct xfs_trans *tp,
+ void *intent,
+ struct list_head *item)
+{
+ struct xfs_efi_log_item *efip = intent;
+ struct xfs_extent_free_item *free;
+ uint next_extent;
+ struct xfs_extent *extp;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ /*
+ * atomic_inc_return gives us the value after the increment;
+ * we want to use it as an array index so we need to subtract 1 from
+ * it.
+ */
+ next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
+ ASSERT(next_extent < efip->efi_format.efi_nextents);
+ extp = &efip->efi_format.efi_extents[next_extent];
+ extp->ext_start = free->xefi_startblock;
+ extp->ext_len = free->xefi_blockcount;
+}
+
+/* Get an EFD so we can process all the free extents. */
+STATIC void *
+xfs_extent_free_create_done(
+ struct xfs_trans *tp,
+ void *intent,
+ unsigned int count)
+{
+ return xfs_trans_get_efd(tp, intent, count);
+}
+
+/* Process a free extent. */
+STATIC int
+xfs_extent_free_finish_item(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ struct list_head *item,
+ void *done_item,
+ void **state)
+{
+ struct xfs_extent_free_item *free;
+ int error;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+ error = xfs_trans_free_extent(tp, done_item,
+ free->xefi_startblock,
+ free->xefi_blockcount,
+ &free->xefi_oinfo);
+ kmem_free(free);
+ return error;
+}
+
+/* Abort all pending EFIs. */
+STATIC void
+xfs_extent_free_abort_intent(
+ void *intent)
+{
+ xfs_efi_release(intent);
+}
+
+/* Cancel a free extent. */
+STATIC void
+xfs_extent_free_cancel_item(
+ struct list_head *item)
+{
+ struct xfs_extent_free_item *free;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+ kmem_free(free);
+}
+
+static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
+ .type = XFS_DEFER_OPS_TYPE_FREE,
+ .max_items = XFS_EFI_MAX_FAST_EXTENTS,
+ .diff_items = xfs_extent_free_diff_items,
+ .create_intent = xfs_extent_free_create_intent,
+ .abort_intent = xfs_extent_free_abort_intent,
+ .log_item = xfs_extent_free_log_item,
+ .create_done = xfs_extent_free_create_done,
+ .finish_item = xfs_extent_free_finish_item,
+ .cancel_item = xfs_extent_free_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_extent_free_init_defer_op(void)
+{
+ xfs_defer_init_op_type(&xfs_extent_free_defer_type);
+}
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
new file mode 100644
index 000000000000..5a50ef881568
--- /dev/null
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_rmap_item.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+
+/* Set the map extent flags for this reverse mapping. */
+static void
+xfs_trans_set_rmap_flags(
+ struct xfs_map_extent *rmap,
+ enum xfs_rmap_intent_type type,
+ int whichfork,
+ xfs_exntst_t state)
+{
+ rmap->me_flags = 0;
+ if (state == XFS_EXT_UNWRITTEN)
+ rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
+ if (whichfork == XFS_ATTR_FORK)
+ rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
+ switch (type) {
+ case XFS_RMAP_MAP:
+ rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
+ break;
+ case XFS_RMAP_UNMAP:
+ rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
+ break;
+ case XFS_RMAP_CONVERT:
+ rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
+ break;
+ case XFS_RMAP_ALLOC:
+ rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
+ break;
+ case XFS_RMAP_FREE:
+ rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+
+struct xfs_rud_log_item *
+xfs_trans_get_rud(
+ struct xfs_trans *tp,
+ struct xfs_rui_log_item *ruip)
+{
+ struct xfs_rud_log_item *rudp;
+
+ rudp = xfs_rud_init(tp->t_mountp, ruip);
+ xfs_trans_add_item(tp, &rudp->rud_item);
+ return rudp;
+}
+
+/*
+ * Finish an rmap update and log it to the RUD. Note that the transaction is
+ * marked dirty regardless of whether the rmap update succeeds or fails to
+ * support the RUI/RUD lifecycle rules.
+ */
+int
+xfs_trans_log_finish_rmap_update(
+ struct xfs_trans *tp,
+ struct xfs_rud_log_item *rudp,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock,
+ xfs_filblks_t blockcount,
+ xfs_exntst_t state,
+ struct xfs_btree_cur **pcur)
+{
+ int error;
+
+ error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
+ startblock, blockcount, state, pcur);
+
+ /*
+ * Mark the transaction dirty, even on error. This ensures the
+ * transaction is aborted, which:
+ *
+ * 1.) releases the RUI and frees the RUD
+ * 2.) shuts down the filesystem
+ */
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ rudp->rud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ return error;
+}
+
+/* Sort rmap intents by AG. */
+static int
+xfs_rmap_update_diff_items(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_mount *mp = priv;
+ struct xfs_rmap_intent *ra;
+ struct xfs_rmap_intent *rb;
+
+ ra = container_of(a, struct xfs_rmap_intent, ri_list);
+ rb = container_of(b, struct xfs_rmap_intent, ri_list);
+ return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
+ XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
+}
+
+/* Get an RUI. */
+STATIC void *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ unsigned int count)
+{
+ struct xfs_rui_log_item *ruip;
+
+ ASSERT(tp != NULL);
+ ASSERT(count > 0);
+
+ ruip = xfs_rui_init(tp->t_mountp, count);
+ ASSERT(ruip != NULL);
+
+ /*
+ * Get a log_item_desc to point at the new item.
+ */
+ xfs_trans_add_item(tp, &ruip->rui_item);
+ return ruip;
+}
+
+/* Log rmap updates in the intent item. */
+STATIC void
+xfs_rmap_update_log_item(
+ struct xfs_trans *tp,
+ void *intent,
+ struct list_head *item)
+{
+ struct xfs_rui_log_item *ruip = intent;
+ struct xfs_rmap_intent *rmap;
+ uint next_extent;
+ struct xfs_map_extent *map;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ ruip->rui_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ /*
+ * atomic_inc_return gives us the value after the increment;
+ * we want to use it as an array index so we need to subtract 1 from
+ * it.
+ */
+ next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
+ ASSERT(next_extent < ruip->rui_format.rui_nextents);
+ map = &ruip->rui_format.rui_extents[next_extent];
+ map->me_owner = rmap->ri_owner;
+ map->me_startblock = rmap->ri_bmap.br_startblock;
+ map->me_startoff = rmap->ri_bmap.br_startoff;
+ map->me_len = rmap->ri_bmap.br_blockcount;
+ xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
+ rmap->ri_bmap.br_state);
+}
+
+/* Get an RUD so we can process all the deferred rmap updates. */
+STATIC void *
+xfs_rmap_update_create_done(
+ struct xfs_trans *tp,
+ void *intent,
+ unsigned int count)
+{
+ return xfs_trans_get_rud(tp, intent);
+}
+
+/* Process a deferred rmap update. */
+STATIC int
+xfs_rmap_update_finish_item(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ struct list_head *item,
+ void *done_item,
+ void **state)
+{
+ struct xfs_rmap_intent *rmap;
+ int error;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+ error = xfs_trans_log_finish_rmap_update(tp, done_item,
+ rmap->ri_type,
+ rmap->ri_owner, rmap->ri_whichfork,
+ rmap->ri_bmap.br_startoff,
+ rmap->ri_bmap.br_startblock,
+ rmap->ri_bmap.br_blockcount,
+ rmap->ri_bmap.br_state,
+ (struct xfs_btree_cur **)state);
+ kmem_free(rmap);
+ return error;
+}
+
+/* Clean up after processing deferred rmaps. */
+STATIC void
+xfs_rmap_update_finish_cleanup(
+ struct xfs_trans *tp,
+ void *state,
+ int error)
+{
+ struct xfs_btree_cur *rcur = state;
+
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+}
+
+/* Abort all pending RUIs. */
+STATIC void
+xfs_rmap_update_abort_intent(
+ void *intent)
+{
+ xfs_rui_release(intent);
+}
+
+/* Cancel a deferred rmap update. */
+STATIC void
+xfs_rmap_update_cancel_item(
+ struct list_head *item)
+{
+ struct xfs_rmap_intent *rmap;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+ kmem_free(rmap);
+}
+
+static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
+ .type = XFS_DEFER_OPS_TYPE_RMAP,
+ .max_items = XFS_RUI_MAX_FAST_EXTENTS,
+ .diff_items = xfs_rmap_update_diff_items,
+ .create_intent = xfs_rmap_update_create_intent,
+ .abort_intent = xfs_rmap_update_abort_intent,
+ .log_item = xfs_rmap_update_log_item,
+ .create_done = xfs_rmap_update_create_done,
+ .finish_item = xfs_rmap_update_finish_item,
+ .finish_cleanup = xfs_rmap_update_finish_cleanup,
+ .cancel_item = xfs_rmap_update_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_rmap_update_init_defer_op(void)
+{
+ xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
+}
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 788c6c35291a..c1a524de67c5 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -420,6 +420,13 @@ static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwn
container_of(fwnode, struct acpi_data_node, fwnode) : NULL;
}
+static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ return is_acpi_data_node(fwnode) ?
+ (!strcmp(to_acpi_data_node(fwnode)->name, name)) : false;
+}
+
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return &adev->fwnode;
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index dd86c5fc102d..d7d0f495a34e 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -13,7 +13,7 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
-void __iomem *__init_refok
+void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size);
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size);
void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 54643d1f5af4..24563970ff7b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -164,7 +164,7 @@
#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
-#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
+#define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
#define _OF_TABLE_0(name)
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 25afb31f0389..261b86d20e77 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -16,8 +16,14 @@
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
+ RK3399_EDP,
};
+static inline bool is_rockchip(enum analogix_dp_devtype type)
+{
+ return type == RK3288_DP || type == RK3399_EDP;
+}
+
struct analogix_dp_plat_data {
enum analogix_dp_devtype dev_type;
struct drm_panel *panel;
@@ -28,7 +34,8 @@ struct analogix_dp_plat_data {
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
- int (*get_modes)(struct analogix_dp_plat_data *);
+ int (*get_modes)(struct analogix_dp_plat_data *,
+ struct drm_connector *);
};
int analogix_dp_resume(struct device *dev);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 84f1a8eefbdb..d3778652e462 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -56,6 +56,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/fence.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -66,6 +67,7 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mem_util.h>
@@ -83,6 +85,8 @@ struct drm_local_map;
struct drm_device_dma;
struct drm_dma_handle;
struct drm_gem_object;
+struct drm_master;
+struct drm_vblank_crtc;
struct device_node;
struct videomode;
@@ -281,13 +285,14 @@ struct drm_ioctl_desc {
/* Event queued up for userspace to read */
struct drm_pending_event {
+ struct completion *completion;
struct drm_event *event;
+ struct fence *fence;
struct list_head link;
struct list_head pending_link;
struct drm_file *file_priv;
pid_t pid; /* pid of requester, no guarantee it's valid by the time
we deliver the event, for tracing only */
- void (*destroy)(struct drm_pending_event *event);
};
/* initial implementaton using a linked list - todo hashtab */
@@ -299,8 +304,6 @@ struct drm_prime_file_private {
/** File private data */
struct drm_file {
unsigned authenticated :1;
- /* Whether we're master for a minor. Protected by master_mutex */
- unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1;
/*
@@ -311,10 +314,10 @@ struct drm_file {
/* true if client understands atomic properties */
unsigned atomic:1;
/*
- * This client is allowed to gain master privileges for @master.
+ * This client is the creator of @master.
* Protected by struct drm_device::master_mutex.
*/
- unsigned allowed_master:1;
+ unsigned is_master:1;
struct pid *pid;
kuid_t uid;
@@ -332,7 +335,7 @@ struct drm_file {
void *driver_priv;
struct drm_master *master; /* master this node is currently associated with
- N.B. not always minor->master */
+ N.B. not always dev->master */
/**
* fbs - List of framebuffers associated with this file.
*
@@ -371,32 +374,6 @@ struct drm_lock_data {
int idle_has_lock;
};
-/**
- * struct drm_master - drm master structure
- *
- * @refcount: Refcount for this master object.
- * @minor: Link back to minor char device we are master for. Immutable.
- * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
- * @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
- * @lock: DRI lock information.
- * @driver_priv: Pointer to driver-private information.
- */
-struct drm_master {
- struct kref refcount;
- struct drm_minor *minor;
- char *unique;
- int unique_len;
- struct idr magic_map;
- struct drm_lock_data lock;
- void *driver_priv;
-};
-
-/* Size of ringbuffer for vblank timestamps. Just double-buffer
- * in initial implementation.
- */
-#define DRM_VBLANKTIME_RBSIZE 2
-
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -420,8 +397,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
- int (*suspend) (struct drm_device *, pm_message_t state);
- int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
@@ -434,7 +409,7 @@ struct drm_driver {
*
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
- * return the value of drm_vblank_count. The DRM core will account for
+ * use drm_vblank_no_hw_counter() function. The DRM core will account for
* missed vblank events while interrupts where disabled based on system
* timestamps.
*
@@ -452,8 +427,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
+ * a hardware vblank counter, the driver should use the
+ * drm_vblank_no_hw_counter() function that keeps a virtual counter.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +442,8 @@ struct drm_driver {
* @pipe: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
+ * a hardware vblank counter, the driver should use the
+ * drm_vblank_no_hw_counter() function that keeps a virtual counter.
*/
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
@@ -573,8 +548,7 @@ struct drm_driver {
int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
bool from_open);
- void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
- bool from_release);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
int (*debugfs_init)(struct drm_minor *minor);
void (*debugfs_cleanup)(struct drm_minor *minor);
@@ -708,38 +682,6 @@ struct drm_minor {
struct list_head debugfs_list;
struct mutex debugfs_lock; /* Protects debugfs_list. */
-
- /* currently active master for this node. Protected by master_mutex */
- struct drm_master *master;
-};
-
-
-struct drm_pending_vblank_event {
- struct drm_pending_event base;
- unsigned int pipe;
- struct drm_event_vblank event;
-};
-
-struct drm_vblank_crtc {
- struct drm_device *dev; /* pointer to the drm_device */
- wait_queue_head_t queue; /**< VBLANK wait queue */
- struct timer_list disable_timer; /* delayed disable timer */
-
- /* vblank counter, protected by dev->vblank_time_lock for writes */
- u32 count;
- /* vblank timestamps, protected by dev->vblank_time_lock for writes */
- struct timeval time[DRM_VBLANKTIME_RBSIZE];
-
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
- u32 last; /* protected by dev->vbl_lock, used */
- /* for wraparound handling */
- u32 last_wait; /* Last vblank seqno waited per CRTC */
- unsigned int inmodeset; /* Display driver is setting mode */
- unsigned int pipe; /* crtc index */
- int framedur_ns; /* frame/field duration in ns */
- int linedur_ns; /* line duration in ns */
- bool enabled; /* so we don't call enable more than
- once per disable */
};
/**
@@ -759,6 +701,10 @@ struct drm_device {
struct drm_minor *control; /**< Control node */
struct drm_minor *primary; /**< Primary node */
struct drm_minor *render; /**< Render node */
+
+ /* currently active master for this device. Protected by master_mutex */
+ struct drm_master *master;
+
atomic_t unplugged; /**< Flag whether dev is dead */
struct inode *anon_inode; /**< inode for private address-space */
char *unique; /**< unique name of the device */
@@ -872,6 +818,8 @@ struct drm_device {
int switch_power_state;
};
+#include <drm/drm_irq.h>
+
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
@@ -928,7 +876,6 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
-int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
@@ -959,75 +906,14 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
- /* IRQ support (drm_irq.h) */
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
-
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
- unsigned flags,
- const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode);
-
-/**
- * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
- * @crtc: which CRTC's vblank waitqueue to retrieve
- *
- * This function returns a pointer to the vblank waitqueue for the CRTC.
- * Drivers can use this to implement vblank waits using wait_event() & co.
- */
-static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
-{
- return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
-}
-
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
- /* Stub support (drm_stub.h) */
-extern struct drm_master *drm_master_get(struct drm_master *master);
-extern void drm_master_put(struct drm_master **master);
-
-extern void drm_put_dev(struct drm_device *dev);
-extern void drm_unplug_dev(struct drm_device *dev);
+/* drm_drv.c */
+void drm_put_dev(struct drm_device *dev);
+void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
-extern bool drm_atomic;
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
@@ -1078,11 +964,13 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent);
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent);
void drm_dev_ref(struct drm_device *dev);
void drm_dev_unref(struct drm_device *dev);
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
-int drm_dev_set_unique(struct drm_device *dev, const char *name);
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
@@ -1135,7 +1023,6 @@ extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
-extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 92c84e9ab09a..856a9c85a838 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,12 @@
#include <drm/drm_crtc.h>
+void drm_crtc_commit_put(struct drm_crtc_commit *commit);
+static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
+{
+ kref_get(&commit->ref);
+}
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -71,7 +77,7 @@ static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- return state->crtc_states[drm_crtc_index(crtc)];
+ return state->crtcs[drm_crtc_index(crtc)].state;
}
/**
@@ -86,7 +92,7 @@ static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
- return state->plane_states[drm_plane_index(plane)];
+ return state->planes[drm_plane_index(plane)].state;
}
/**
@@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
if (index >= state->num_connector)
return NULL;
- return state->connector_states[index];
+ return state->connectors[index].state;
+}
+
+/**
+ * __drm_atomic_get_current_plane_state - get current plane state
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the plane state for the given plane, either from
+ * @state, or if the plane isn't part of the atomic state update, from @plane.
+ * This is useful in atomic check callbacks, when drivers need to peek at, but
+ * not change, state of other planes, since it avoids threading an error code
+ * back up the call chain.
+ *
+ * WARNING:
+ *
+ * Note that this function is in general unsafe since it doesn't check for the
+ * required locking for access state structures. Drivers must ensure that it is
+ * safe to access the returned state structure through other means. One common
+ * example is when planes are fixed to a single CRTC, and the driver knows that
+ * the CRTC lock is held already. In that case holding the CRTC lock gives a
+ * read-lock on all planes connected to that CRTC. But if planes can be
+ * reassigned things get more tricky. In that case it's better to use
+ * drm_atomic_get_plane_state and wire up full error handling.
+ *
+ * Returns:
+ *
+ * Read-only pointer to the current plane state.
+ */
+static inline const struct drm_plane_state *
+__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ if (state->planes[drm_plane_index(plane)].state)
+ return state->planes[drm_plane_index(plane)].state;
+
+ return plane->state;
}
int __must_check
@@ -139,29 +181,39 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
-#define for_each_connector_in_state(state, connector, connector_state, __i) \
+#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->num_connector && \
- ((connector) = (state)->connectors[__i], \
- (connector_state) = (state)->connector_states[__i], 1); \
+ (__i) < (__state)->num_connector && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (connector_state) = (__state)->connectors[__i].state, 1); \
(__i)++) \
for_each_if (connector)
-#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->dev->mode_config.num_crtc && \
- ((crtc) = (state)->crtcs[__i], \
- (crtc_state) = (state)->crtc_states[__i], 1); \
+ (__i) < (__state)->dev->mode_config.num_crtc && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (crtc_state) = (__state)->crtcs[__i].state, 1); \
(__i)++) \
for_each_if (crtc_state)
-#define for_each_plane_in_state(state, plane, plane_state, __i) \
+#define for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
- (__i) < (state)->dev->mode_config.num_total_plane && \
- ((plane) = (state)->planes[__i], \
- (plane_state) = (state)->plane_states[__i], 1); \
+ (__i) < (__state)->dev->mode_config.num_total_plane && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (plane_state) = (__state)->planes[__i].state, 1); \
(__i)++) \
for_each_if (plane_state)
+
+/**
+ * drm_atomic_crtc_needs_modeset - compute combined modeset need
+ * @state: &drm_crtc_state for the CRTC
+ *
+ * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
+ * whether the state CRTC changed enough to need a full modeset cycle:
+ * connectors_changed, mode_changed and active_change. This helper simply
+ * combines these three to compute the overall need for a modeset for @state.
+ */
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
{
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d473dcc91f54..d86ae5dcd7b4 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -38,6 +38,7 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
@@ -71,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta
void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
bool atomic);
-void drm_atomic_helper_swap_state(struct drm_device *dev,
- struct drm_atomic_state *state);
+void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+ bool stall);
+
+/* nonblocking commit helpers */
+int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
+ bool nonblock);
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
/* implementations for legacy interfaces */
int drm_atomic_helper_update_plane(struct drm_plane *plane,
@@ -147,9 +155,9 @@ void
__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size);
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -159,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
* planes which *will* be attached (for ->atomic_check()) see
- * drm_crtc_for_each_pending_plane()
+ * drm_atomic_crtc_state_for_each_plane().
*/
#define drm_atomic_crtc_for_each_plane(plane, crtc) \
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -171,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during (for example)
- * ->atomic_check() operations, to validate the incoming state
+ * ->atomic_check() operations, to validate the incoming state.
*/
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+/**
+ * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @plane_state: loop cursor for the plane's state, must be const
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state.
+ *
+ * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
+ * const plane_state. This is useful when a driver just wants to peek at other
+ * active planes on this crtc, but does not need to change it.
+ */
+#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
+ for_each_if ((plane_state = \
+ __drm_atomic_get_current_plane_state((crtc_state)->state, \
+ plane)))
+
/*
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @plane: plane object
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
new file mode 100644
index 000000000000..610223b0481b
--- /dev/null
+++ b/include/drm/drm_auth.h
@@ -0,0 +1,59 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 2016 Intel Corporation
+ *
+ * Author: Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_AUTH_H_
+#define _DRM_AUTH_H_
+
+/**
+ * struct drm_master - drm master structure
+ *
+ * @refcount: Refcount for this master object.
+ * @dev: Link back to the DRM device
+ * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
+ * @unique_len: Length of unique field. Protected by drm_global_mutex.
+ * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
+ * @lock: DRI lock information.
+ * @driver_priv: Pointer to driver-private information.
+ *
+ * Note that master structures are only relevant for the legacy/primary device
+ * nodes, hence there can only be one per device, not one per drm_minor.
+ */
+struct drm_master {
+ struct kref refcount;
+ struct drm_device *dev;
+ char *unique;
+ int unique_len;
+ struct idr magic_map;
+ struct drm_lock_data lock;
+ void *driver_priv;
+};
+
+struct drm_master *drm_master_get(struct drm_master *master);
+void drm_master_put(struct drm_master **master);
+bool drm_is_current_master(struct drm_file *fpriv);
+
+#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index d1559cd04e3d..44e070800b6d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -44,6 +44,7 @@ struct drm_file;
struct drm_clip_rect;
struct device_node;
struct fence;
+struct edid;
struct drm_mode_object {
uint32_t id;
@@ -253,6 +254,8 @@ struct drm_framebuffer {
int bits_per_pixel;
int flags;
uint32_t pixel_format; /* fourcc format */
+ int hot_x;
+ int hot_y;
struct list_head filp_head;
};
@@ -305,6 +308,7 @@ struct drm_plane_helper_funcs;
* @mode_changed: crtc_state->mode or crtc_state->enable has been changed
* @active_changed: crtc_state->active has been toggled.
* @connectors_changed: connectors to this crtc have been updated
+ * @zpos_changed: zpos values of planes on this crtc have been updated
* @color_mgmt_changed: color management properties have changed (degamma or
* gamma LUT or CSC matrix)
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
@@ -314,6 +318,7 @@ struct drm_plane_helper_funcs;
* update to ensure framebuffer cleanup isn't done too early
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
* @mode: current mode timings
+ * @mode_blob: &drm_property_blob for @mode
* @degamma_lut: Lookup table for converting framebuffer pixel data
* before apply the conversion matrix
* @ctm: Transformation matrix
@@ -340,6 +345,7 @@ struct drm_crtc_state {
bool mode_changed : 1;
bool active_changed : 1;
bool connectors_changed : 1;
+ bool zpos_changed : 1;
bool color_mgmt_changed : 1;
/* attached planes bitmask:
@@ -478,8 +484,8 @@ struct drm_crtc_funcs {
* going on, which should eventually be unified to just one set of
* hooks.
*/
- void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
+ int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t size);
/**
* @destroy:
@@ -701,6 +707,32 @@ struct drm_crtc_funcs {
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val);
+
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the crtc like debugfs interfaces.
+ * It is called late in the driver load sequence from drm_dev_register().
+ * Everything added from this callback should be unregistered in
+ * the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_crtc *crtc);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the crtc from
+ * late_unregister(). It is called from drm_dev_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_crtc *crtc);
};
/**
@@ -708,6 +740,7 @@ struct drm_crtc_funcs {
* @dev: parent DRM device
* @port: OF node used by drm_of_find_possible_crtcs()
* @head: list management
+ * @name: human readable name, can be overwritten by the driver
* @mutex: per-CRTC locking
* @base: base KMS object for ID tracking etc.
* @primary: primary plane for this CRTC
@@ -724,9 +757,6 @@ struct drm_crtc_funcs {
* @gamma_store: gamma ramp values
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
- * @state: current atomic state for this CRTC
- * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
- * legacy IOCTLs
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -738,12 +768,13 @@ struct drm_crtc {
char *name;
- /*
- * crtc mutex
+ /**
+ * @mutex:
*
* This provides a read lock for the overall crtc state (mode, dpms
* state, ...) and a write lock for everything which can be update
- * without a full modeset (fb, cursor data, ...)
+ * without a full modeset (fb, cursor data, crtc properties ...). Full
+ * modeset also need to grab dev->mode_config.connection_mutex.
*/
struct drm_modeset_lock mutex;
@@ -753,6 +784,12 @@ struct drm_crtc {
struct drm_plane *primary;
struct drm_plane *cursor;
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the CRTC.
+ */
+ unsigned index;
+
/* position of cursor plane on crtc */
int cursor_x;
int cursor_y;
@@ -779,11 +816,37 @@ struct drm_crtc {
struct drm_object_properties properties;
+ /**
+ * @state:
+ *
+ * Current atomic state for this CRTC.
+ */
struct drm_crtc_state *state;
- /*
- * For legacy crtc IOCTLs so that atomic drivers can get at the locking
- * acquire context.
+ /**
+ * @commit_list:
+ *
+ * List of &drm_crtc_commit structures tracking pending commits.
+ * Protected by @commit_lock. This list doesn't hold its own full
+ * reference, but burrows it from the ongoing commit. Commit entries
+ * must be removed from this list once the commit is fully completed,
+ * but before it's correspoding &drm_atomic_state gets destroyed.
+ */
+ struct list_head commit_list;
+
+ /**
+ * @commit_lock:
+ *
+ * Spinlock to protect @commit_list.
+ */
+ spinlock_t commit_lock;
+
+ /**
+ * @acquire_ctx:
+ *
+ * Per-CRTC implicit acquire context used by atomic drivers for legacy
+ * IOCTLs, so that atomic drivers can get at the locking acquire
+ * context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
};
@@ -926,6 +989,33 @@ struct drm_connector_funcs {
uint64_t val);
/**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the connector, light backlight control, i2c,
+ * DP aux or similar interfaces. It is called late in the driver load
+ * sequence from drm_connector_register() when registering all the
+ * core drm connector interfaces. Everything added from this callback
+ * should be unregistered in the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_connector *connector);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the connector from
+ * late_unregister(). It is called from drm_connector_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_connector *connector);
+
+ /**
* @destroy:
*
* Clean up connector resources. This is called at driver unload time
@@ -1069,6 +1159,32 @@ struct drm_encoder_funcs {
* hotplugged in DRM.
*/
void (*destroy)(struct drm_encoder *encoder);
+
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the encoder like debugfs interfaces.
+ * It is called late in the driver load sequence from drm_dev_register().
+ * Everything added from this callback should be unregistered in
+ * the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_encoder *encoder);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the encoder from
+ * late_unregister(). It is called from drm_dev_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_encoder *encoder);
};
#define DRM_CONNECTOR_MAX_ENCODER 3
@@ -1078,7 +1194,7 @@ struct drm_encoder_funcs {
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
- * @name: encoder name
+ * @name: human readable name, can be overwritten by the driver
* @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
* @possible_crtcs: bitmask of potential CRTC bindings
* @possible_clones: bitmask of potential sibling encoders for cloning
@@ -1097,6 +1213,13 @@ struct drm_encoder {
struct drm_mode_object base;
char *name;
int encoder_type;
+
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the encoder.
+ */
+ unsigned index;
+
uint32_t possible_crtcs;
uint32_t possible_clones;
@@ -1124,12 +1247,13 @@ struct drm_encoder {
* @attr: sysfs attributes
* @head: list management
* @base: base KMS object
- * @name: connector name
+ * @name: human readable name, can be overwritten by the driver
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
@@ -1137,7 +1261,6 @@ struct drm_encoder {
* @funcs: connector control functions
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
- * @path_blob_ptr: DRM blob property data for the DP MST path property
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
@@ -1181,12 +1304,21 @@ struct drm_connector {
struct drm_mode_object base;
char *name;
- int connector_id;
+
+ /**
+ * @index: Compacted connector index, which matches the position inside
+ * the mode_config.list for drivers not supporting hot-add/removing. Can
+ * be used as an array index. It is invariant over the lifetime of the
+ * connector.
+ */
+ unsigned index;
+
int connector_type;
int connector_type_id;
bool interlace_allowed;
bool doublescan_allowed;
bool stereo_allowed;
+ bool registered;
struct list_head modes; /* list of modes on this connector */
enum drm_connector_status status;
@@ -1200,8 +1332,23 @@ struct drm_connector {
struct drm_property_blob *edid_blob_ptr;
struct drm_object_properties properties;
+ /**
+ * @path_blob_ptr:
+ *
+ * DRM blob property data for the DP MST path property.
+ */
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ */
struct drm_property_blob *tile_blob_ptr;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -1263,6 +1410,10 @@ struct drm_connector {
* plane (in 16.16)
* @src_w: width of visible portion of plane (in 16.16)
* @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
+ * @zpos: priority of the given plane on crtc (optional)
+ * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
+ * where N is the number of active planes for given crtc
* @state: backpointer to global drm_atomic_state
*/
struct drm_plane_state {
@@ -1283,6 +1434,10 @@ struct drm_plane_state {
/* Plane rotation */
unsigned int rotation;
+ /* Plane zpos */
+ unsigned int zpos;
+ unsigned int normalized_zpos;
+
struct drm_atomic_state *state;
};
@@ -1490,6 +1645,31 @@ struct drm_plane_funcs {
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val);
+ /**
+ * @late_register:
+ *
+ * This optional hook can be used to register additional userspace
+ * interfaces attached to the plane like debugfs interfaces.
+ * It is called late in the driver load sequence from drm_dev_register().
+ * Everything added from this callback should be unregistered in
+ * the early_unregister callback.
+ *
+ * Returns:
+ *
+ * 0 on success, or a negative error code on failure.
+ */
+ int (*late_register)(struct drm_plane *plane);
+
+ /**
+ * @early_unregister:
+ *
+ * This optional hook should be used to unregister the additional
+ * userspace interfaces attached to the plane from
+ * late_unregister(). It is called from drm_dev_unregister(),
+ * early in the driver unload sequence to disable userspace access
+ * before data structures are torndown.
+ */
+ void (*early_unregister)(struct drm_plane *plane);
};
enum drm_plane_type {
@@ -1503,6 +1683,7 @@ enum drm_plane_type {
* struct drm_plane - central DRM plane control structure
* @dev: DRM device this plane belongs to
* @head: for list management
+ * @name: human readable name, can be overwritten by the driver
* @base: base mode object
* @possible_crtcs: pipes this plane can be bound to
* @format_types: array of formats supported by this plane
@@ -1516,6 +1697,8 @@ enum drm_plane_type {
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
+ * @zpos_property: zpos property for this plane
+ * @helper_private: mid-layer private data
*/
struct drm_plane {
struct drm_device *dev;
@@ -1523,6 +1706,13 @@ struct drm_plane {
char *name;
+ /**
+ * @mutex:
+ *
+ * Protects modeset plane state, together with the mutex of &drm_crtc
+ * this plane is linked to (when active, getting actived or getting
+ * disabled).
+ */
struct drm_modeset_lock mutex;
struct drm_mode_object base;
@@ -1543,9 +1733,17 @@ struct drm_plane {
enum drm_plane_type type;
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the plane.
+ */
+ unsigned index;
+
const struct drm_plane_helper_funcs *helper_private;
struct drm_plane_state *state;
+
+ struct drm_property *zpos_property;
};
/**
@@ -1694,18 +1892,136 @@ struct drm_bridge {
};
/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ * atomic commit thread hardware
+ *
+ * write new state into hardware ----> ...
+ * signal hw_done
+ * switch to new state on next
+ * ... v/hblank
+ *
+ * wait for buffers to show up ...
+ *
+ * ... send completion irq
+ * irq handler signals flip_done
+ * cleanup old buffers
+ *
+ * signal cleanup_done
+ *
+ * wait for flip_done <----
+ * clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+ /**
+ * @crtc:
+ *
+ * DRM CRTC for this commit.
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @ref:
+ *
+ * Reference count for this structure. Needed to allow blocking on
+ * completions without the risk of the completion disappearing
+ * meanwhile.
+ */
+ struct kref ref;
+
+ /**
+ * @flip_done:
+ *
+ * Will be signaled when the hardware has flipped to the new set of
+ * buffers. Signals at the same time as when the drm event for this
+ * commit is sent to userspace, or when an out-fence is singalled. Note
+ * that for most hardware, in most cases this happens after @hw_done is
+ * signalled.
+ */
+ struct completion flip_done;
+
+ /**
+ * @hw_done:
+ *
+ * Will be signalled when all hw register changes for this commit have
+ * been written out. Especially when disabling a pipe this can be much
+ * later than than @flip_done, since that can signal already when the
+ * screen goes black, whereas to fully shut down a pipe more register
+ * I/O is required.
+ *
+ * Note that this does not need to include separately reference-counted
+ * resources like backing storage buffer pinning, or runtime pm
+ * management.
+ */
+ struct completion hw_done;
+
+ /**
+ * @cleanup_done:
+ *
+ * Will be signalled after old buffers have been cleaned up by calling
+ * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+ * a vblank wait completed it might be a bit later. This completion is
+ * useful to throttle updates and avoid hardware updates getting ahead
+ * of the buffer cleanup too much.
+ */
+ struct completion cleanup_done;
+
+ /**
+ * @commit_entry:
+ *
+ * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+ */
+ struct list_head commit_entry;
+
+ /**
+ * @event:
+ *
+ * &drm_pending_vblank_event pointer to clean up private events.
+ */
+ struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+ struct drm_plane *ptr;
+ struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+ struct drm_crtc *ptr;
+ struct drm_crtc_state *state;
+ struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+ struct drm_connector *ptr;
+ struct drm_connector_state *state;
+};
+
+/**
* struct drm_atomic_state - the global state object for atomic updates
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of plane pointers
- * @plane_states: pointer to array of plane states pointers
+ * @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
- * @crtc_states: pointer to array of CRTC states pointers
* @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of connector pointers
- * @connector_states: pointer to array of connector states pointers
+ * @connectors: pointer to array of structures with per-connector data
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
@@ -1713,15 +2029,20 @@ struct drm_atomic_state {
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool legacy_set_config : 1;
- struct drm_plane **planes;
- struct drm_plane_state **plane_states;
- struct drm_crtc **crtcs;
- struct drm_crtc_state **crtc_states;
+ struct __drm_planes_state *planes;
+ struct __drm_crtcs_state *crtcs;
int num_connector;
- struct drm_connector **connectors;
- struct drm_connector_state **connector_states;
+ struct __drm_connnectors_state *connectors;
struct drm_modeset_acquire_ctx *acquire_ctx;
+
+ /**
+ * @commit_work:
+ *
+ * Work item which can be used by the driver or helpers to execute the
+ * commit without blocking.
+ */
+ struct work_struct commit_work;
};
@@ -2022,13 +2343,9 @@ struct drm_mode_config_funcs {
* @connection_mutex: ww mutex protecting connector state and routing
* @acquire_ctx: global implicit acquire context used by atomic drivers for
* legacy IOCTLs
- * @idr_mutex: mutex for KMS ID allocation and management
- * @crtc_idr: main KMS ID tracking object
* @fb_lock: mutex to protect fb state and lists
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
- * @num_connector: number of connectors on this device
- * @connector_list: list of connector objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_overlay_plane: number of overlay planes on this device
@@ -2045,24 +2362,16 @@ struct drm_mode_config_funcs {
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
* @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
* @output_poll_work: delayed work for polling in process context
* @property_blob_list: list of all the blob property objects
* @blob_lock: mutex for blob property allocation and management
* @*_property: core property tracking
- * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
- * gamma
- * @degamma_lut_size_property: size of the degamma LUT as supported by the
- * driver (read-only)
- * @ctm_property: Matrix used to convert colors after the lookup in the
- * degamma LUT
- * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
- * the gamma space of the connected screen (read-only)
- * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @async_page_flip: does this device support async flips on the primary plane?
* @cursor_width: hint to userspace for max cursor width
* @cursor_height: hint to userspace for max cursor height
+ * @helper_private: mid-layer private data
*
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
@@ -2072,17 +2381,46 @@ struct drm_mode_config {
struct mutex mutex; /* protects configuration (mode lists etc.) */
struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
- struct mutex idr_mutex; /* for IDR management */
- struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
- struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
- /* this is limited to one for now */
+
+ /**
+ * @idr_mutex:
+ *
+ * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+ * and @tile_idr.
+ */
+ struct mutex idr_mutex;
+
+ /**
+ * @crtc_idr:
+ *
+ * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+ * connector, modes - just makes life easier to have only one.
+ */
+ struct idr crtc_idr;
+
+ /**
+ * @tile_idr:
+ *
+ * Use this idr for allocating new IDs for tiled sinks like use in some
+ * high-res DP MST screens.
+ */
+ struct idr tile_idr;
struct mutex fb_lock; /* proctects global and per-file fb lists */
int num_fb;
struct list_head fb_list;
+ /**
+ * @num_connector: Number of connectors on this device.
+ */
int num_connector;
+ /**
+ * @connector_ida: ID allocator for connector indices.
+ */
struct ida connector_ida;
+ /**
+ * @connector_list: List of connector objects.
+ */
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
@@ -2117,71 +2455,251 @@ struct drm_mode_config {
/* pointers to standard properties */
struct list_head property_blob_list;
+ /**
+ * @edid_property: Default connector property to hold the EDID of the
+ * currently connected sink, if any.
+ */
struct drm_property *edid_property;
+ /**
+ * @dpms_property: Default connector property to control the
+ * connector's DPMS state.
+ */
struct drm_property *dpms_property;
+ /**
+ * @path_property: Default connector property to hold the DP MST path
+ * for the port.
+ */
struct drm_property *path_property;
+ /**
+ * @tile_property: Default connector property to store the tile
+ * position of a tiled screen, for sinks which need to be driven with
+ * multiple CRTCs.
+ */
struct drm_property *tile_property;
+ /**
+ * @plane_type_property: Default plane property to differentiate
+ * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+ */
struct drm_property *plane_type_property;
+ /**
+ * @rotation_property: Optional property for planes or CRTCs to specifiy
+ * rotation.
+ */
struct drm_property *rotation_property;
+ /**
+ * @prop_src_x: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_x;
+ /**
+ * @prop_src_y: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_y;
+ /**
+ * @prop_src_w: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_w;
+ /**
+ * @prop_src_h: Default atomic plane property for the plane source
+ * position in the connected &drm_framebuffer.
+ */
struct drm_property *prop_src_h;
+ /**
+ * @prop_crtc_x: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_x;
+ /**
+ * @prop_crtc_y: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_y;
+ /**
+ * @prop_crtc_w: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_w;
+ /**
+ * @prop_crtc_h: Default atomic plane property for the plane destination
+ * position in the &drm_crtc is is being shown on.
+ */
struct drm_property *prop_crtc_h;
+ /**
+ * @prop_fb_id: Default atomic plane property to specify the
+ * &drm_framebuffer.
+ */
struct drm_property *prop_fb_id;
+ /**
+ * @prop_crtc_id: Default atomic plane property to specify the
+ * &drm_crtc.
+ */
struct drm_property *prop_crtc_id;
+ /**
+ * @prop_active: Default atomic CRTC property to control the active
+ * state, which is the simplified implementation for DPMS in atomic
+ * drivers.
+ */
struct drm_property *prop_active;
+ /**
+ * @prop_mode_id: Default atomic CRTC property to set the mode for a
+ * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+ * connectors must be of and active must be set to disabled, too.
+ */
struct drm_property *prop_mode_id;
- /* DVI-I properties */
+ /**
+ * @dvi_i_subconnector_property: Optional DVI-I property to
+ * differentiate between analog or digital mode.
+ */
struct drm_property *dvi_i_subconnector_property;
+ /**
+ * @dvi_i_select_subconnector_property: Optional DVI-I property to
+ * select between analog or digital mode.
+ */
struct drm_property *dvi_i_select_subconnector_property;
- /* TV properties */
+ /**
+ * @tv_subconnector_property: Optional TV property to differentiate
+ * between different TV connector types.
+ */
struct drm_property *tv_subconnector_property;
+ /**
+ * @tv_select_subconnector_property: Optional TV property to select
+ * between different TV connector types.
+ */
struct drm_property *tv_select_subconnector_property;
+ /**
+ * @tv_mode_property: Optional TV property to select
+ * the output TV mode.
+ */
struct drm_property *tv_mode_property;
+ /**
+ * @tv_left_margin_property: Optional TV property to set the left
+ * margin.
+ */
struct drm_property *tv_left_margin_property;
+ /**
+ * @tv_right_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_right_margin_property;
+ /**
+ * @tv_top_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_top_margin_property;
+ /**
+ * @tv_bottom_margin_property: Optional TV property to set the right
+ * margin.
+ */
struct drm_property *tv_bottom_margin_property;
+ /**
+ * @tv_brightness_property: Optional TV property to set the
+ * brightness.
+ */
struct drm_property *tv_brightness_property;
+ /**
+ * @tv_contrast_property: Optional TV property to set the
+ * contrast.
+ */
struct drm_property *tv_contrast_property;
+ /**
+ * @tv_flicker_reduction_property: Optional TV property to control the
+ * flicker reduction mode.
+ */
struct drm_property *tv_flicker_reduction_property;
+ /**
+ * @tv_overscan_property: Optional TV property to control the overscan
+ * setting.
+ */
struct drm_property *tv_overscan_property;
+ /**
+ * @tv_saturation_property: Optional TV property to set the
+ * saturation.
+ */
struct drm_property *tv_saturation_property;
+ /**
+ * @tv_hue_property: Optional TV property to set the hue.
+ */
struct drm_property *tv_hue_property;
- /* Optional properties */
+ /**
+ * @scaling_mode_property: Optional connector property to control the
+ * upscaling, mostly used for built-in panels.
+ */
struct drm_property *scaling_mode_property;
+ /**
+ * @aspect_ratio_property: Optional connector property to control the
+ * HDMI infoframe aspect ratio setting.
+ */
struct drm_property *aspect_ratio_property;
+ /**
+ * @dirty_info_property: Optional connector property to give userspace a
+ * hint that the DIRTY_FB ioctl should be used.
+ */
struct drm_property *dirty_info_property;
- /* Optional color correction properties */
+ /**
+ * @degamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the framebuffer's colors to linear gamma.
+ */
struct drm_property *degamma_lut_property;
+ /**
+ * @degamma_lut_size_property: Optional CRTC property for the size of
+ * the degamma LUT as supported by the driver (read-only).
+ */
struct drm_property *degamma_lut_size_property;
+ /**
+ * @ctm_property: Optional CRTC property to set the
+ * matrix used to convert colors after the lookup in the
+ * degamma LUT.
+ */
struct drm_property *ctm_property;
+ /**
+ * @gamma_lut_property: Optional CRTC property to set the LUT used to
+ * convert the colors, after the CTM matrix, to the gamma space of the
+ * connected screen.
+ */
struct drm_property *gamma_lut_property;
+ /**
+ * @gamma_lut_size_property: Optional CRTC property for the size of the
+ * gamma LUT as supported by the driver (read-only).
+ */
struct drm_property *gamma_lut_size_property;
- /* properties for virtual machine layout */
+ /**
+ * @suggested_x_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
struct drm_property *suggested_x_property;
+ /**
+ * @suggested_y_property: Optional connector property with a hint for
+ * the position of the output on the host's screen.
+ */
struct drm_property *suggested_y_property;
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
- /* whether async page flip is supported or not */
+ /**
+ * @async_page_flip: Does this device support async flips on the primary
+ * plane?
+ */
bool async_page_flip;
- /* whether the driver supports fb modifiers */
+ /**
+ * @allow_fb_modifiers:
+ *
+ * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ */
bool allow_fb_modifiers;
/* cursor size */
uint32_t cursor_width, cursor_height;
+
+ struct drm_mode_config_helper_funcs *helper_private;
};
/**
@@ -2230,7 +2748,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
-extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+ return crtc->index;
+}
/**
* drm_crtc_mask - find the mask of a registered CRTC
@@ -2244,47 +2773,36 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
return 1 << drm_crtc_index(crtc);
}
-extern void drm_connector_ida_init(void);
-extern void drm_connector_ida_destroy(void);
-extern int drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type);
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
extern void drm_connector_cleanup(struct drm_connector *connector);
static inline unsigned drm_connector_index(struct drm_connector *connector)
{
- return connector->connector_id;
+ return connector->index;
}
-/* helpers to {un}register all connectors from sysfs for device */
-extern int drm_connector_register_all(struct drm_device *dev);
-extern void drm_connector_unregister_all(struct drm_device *dev);
-
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
extern __printf(5, 6)
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
-extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+ return encoder->index;
+}
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2315,17 +2833,24 @@ extern int drm_plane_init(struct drm_device *dev,
const uint32_t *formats, unsigned int format_count,
bool is_primary);
extern void drm_plane_cleanup(struct drm_plane *plane);
-extern unsigned int drm_plane_index(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+ return plane->index;
+}
extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
-extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
- u32 format);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
-extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
- int x, int y,
- const struct drm_display_mode *mode,
- const struct drm_framebuffer *fb);
+extern int drm_crtc_force_disable(struct drm_crtc *crtc);
+extern int drm_crtc_force_disable_all(struct drm_device *dev);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
@@ -2336,16 +2861,6 @@ extern const char *drm_get_dvi_i_subconnector_name(int val);
extern const char *drm_get_dvi_i_select_name(int val);
extern const char *drm_get_tv_subconnector_name(int val);
extern const char *drm_get_tv_select_name(int val);
-extern void drm_fb_release(struct drm_file *file_priv);
-extern void drm_property_destroy_user_blobs(struct drm_device *dev,
- struct drm_file *file_priv);
-extern bool drm_probe_ddc(struct i2c_adapter *adapter);
-extern struct edid *drm_get_edid(struct drm_connector *connector,
- struct i2c_adapter *adapter);
-extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
- struct i2c_adapter *adapter);
-extern struct edid *drm_edid_duplicate(const struct edid *edid);
-extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
@@ -2369,13 +2884,6 @@ static inline bool drm_property_type_is(struct drm_property *property,
return property->flags & type;
}
-static inline bool drm_property_type_valid(struct drm_property *property)
-{
- if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
- return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
- return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
@@ -2433,86 +2941,15 @@ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-extern bool drm_property_change_valid_get(struct drm_property *property,
- uint64_t value, struct drm_mode_object **ref);
-extern void drm_property_change_valid_put(struct drm_property *property,
- struct drm_mode_object *ref);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
-extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
- uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
-/* IOCTLs */
-extern int drm_mode_getresources(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_mode_getcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getconnector(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
extern int drm_mode_set_config_internal(struct drm_mode_set *set);
-extern int drm_mode_setcrtc(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getplane(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_setplane(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_cursor_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
+
extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-extern int drm_mode_rmfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getfb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
-extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_createblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_getencoder(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-extern bool drm_detect_hdmi_monitor(struct edid *edid);
-extern bool drm_detect_monitor_audio(struct edid *edid);
-extern bool drm_rgb_quant_range_selectable(struct edid *edid);
-extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
-extern void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref);
-
-extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
-extern bool drm_edid_is_valid(struct edid *edid);
-extern void drm_edid_get_monitor_name(struct edid *edid, char *name,
- int buflen);
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8]);
@@ -2520,41 +2957,32 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
char topology[8]);
extern void drm_mode_put_tile_group(struct drm_device *dev,
struct drm_tile_group *tg);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
- int hsize, int vsize, int fresh,
- bool rb);
-extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
uint64_t value);
-extern int drm_mode_atomic_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-
-extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp);
-extern int drm_format_num_planes(uint32_t format);
-extern int drm_format_plane_cpp(uint32_t format, int plane);
-extern int drm_format_horz_chroma_subsampling(uint32_t format);
-extern int drm_format_vert_chroma_subsampling(uint32_t format);
-extern int drm_format_plane_width(int width, uint32_t format, int plane);
-extern int drm_format_plane_height(int height, uint32_t format, int plane);
-extern const char *drm_get_format_name(uint32_t format);
+
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
extern unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);
+extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ uint degamma_lut_size,
+ bool has_ctm,
+ uint gamma_lut_size);
+
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+ unsigned int zpos,
+ unsigned int min, unsigned int max);
+
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+ unsigned int zpos);
/* Helpers */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
uint32_t id)
@@ -2720,4 +3148,50 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
+/* drm_edid.c */
+bool drm_probe_ddc(struct i2c_adapter *adapter);
+struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+ bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+ int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+
+/* drm_bridge.c */
+extern int drm_bridge_add(struct drm_bridge *bridge);
+extern void drm_bridge_remove(struct drm_bridge *bridge);
+extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 97fa894d4ee2..4b37afa2b73b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
-extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
- int degamma_lut_size,
- int gamma_lut_size);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9d03f167007b..63b8bd502444 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
+#define EDP_DISPLAY_CTL_CAP_SIZE 3
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -656,6 +657,8 @@ struct edp_vsc_psr {
#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
+
static inline int
drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
@@ -746,7 +749,14 @@ struct drm_dp_aux {
struct mutex hw_mutex;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
- unsigned i2c_nack_count, i2c_defer_count;
+ /**
+ * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+ */
+ unsigned i2c_nack_count;
+ /**
+ * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+ */
+ unsigned i2c_defer_count;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -804,6 +814,7 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index fdb47051d549..003207670597 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -87,7 +87,15 @@ struct drm_dp_mst_port {
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
- struct edid *cached_edid; /* for DP logical ports - make tiling work */
+ /**
+ * @cached_edid: for DP logical ports - make tiling work by ensuring
+ * that the EDID for all connectors is read immediately.
+ */
+ struct edid *cached_edid;
+ /**
+ * @has_audio: Tracks whether the sink connector to this port is
+ * audio-capable.
+ */
bool has_audio;
};
@@ -397,71 +405,154 @@ struct drm_dp_payload {
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
- * @dev: device pointer for adding i2c devices etc.
- * @cbs: callbacks for connector addition and destruction.
- * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
- * @aux: aux channel for the DP connector.
- * @max_payloads: maximum number of payloads the GPU can generate.
- * @conn_base_id: DRM connector ID this mgr is connected to.
- * @down_rep_recv: msg receiver state for down replies.
- * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, dpcd.
- * @mst_state: if this manager is enabled for an MST capable port.
- * @mst_primary: pointer to the primary branch device.
- * @dpcd: cache of DPCD for primary port.
- * @pbn_div: PBN to slots divisor.
*
* This struct represents the toplevel displayport MST topology manager.
* There should be one instance of this for every MST capable DP connector
* on the GPU.
*/
struct drm_dp_mst_topology_mgr {
-
+ /**
+ * @dev: device pointer for adding i2c devices etc.
+ */
struct device *dev;
+ /**
+ * @cbs: callbacks for connector addition and destruction.
+ */
const struct drm_dp_mst_topology_cbs *cbs;
+ /**
+ * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
+ * in one go.
+ */
int max_dpcd_transaction_bytes;
- struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+ /**
+ * @aux: AUX channel for the DP MST connector this topolgy mgr is
+ * controlling.
+ */
+ struct drm_dp_aux *aux;
+ /**
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ */
int max_payloads;
+ /**
+ * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+ * to build the MST connector path value.
+ */
int conn_base_id;
- /* only ever accessed from the workqueue - which should be serialised */
+ /**
+ * @down_rep_recv: Message receiver state for down replies. This and
+ * @up_req_recv are only ever access from the work item, which is
+ * serialised.
+ */
struct drm_dp_sideband_msg_rx down_rep_recv;
+ /**
+ * @up_req_recv: Message receiver state for up requests. This and
+ * @down_rep_recv are only ever access from the work item, which is
+ * serialised.
+ */
struct drm_dp_sideband_msg_rx up_req_recv;
- /* pointer to info about the initial MST device */
- struct mutex lock; /* protects mst_state + primary + dpcd */
+ /**
+ * @lock: protects mst state, primary, dpcd.
+ */
+ struct mutex lock;
+ /**
+ * @mst_state: If this manager is enabled for an MST capable port. False
+ * if no MST sink/branch devices is connected.
+ */
bool mst_state;
+ /**
+ * @mst_primary: Pointer to the primary/first branch device.
+ */
struct drm_dp_mst_branch *mst_primary;
+ /**
+ * @dpcd: Cache of DPCD for primary port.
+ */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ /**
+ * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
u8 sink_count;
+ /**
+ * @pbn_div: PBN to slots divisor.
+ */
int pbn_div;
+ /**
+ * @total_slots: Total slots that can be allocated.
+ */
int total_slots;
+ /**
+ * @avail_slots: Still available slots that can be allocated.
+ */
int avail_slots;
+ /**
+ * @total_pbn: Total PBN count.
+ */
int total_pbn;
- /* messages to be transmitted */
- /* qlock protects the upq/downq and in_progress,
- the mstb tx_slots and txmsg->state once they are queued */
+ /**
+ * @qlock: protects @tx_msg_downq, the tx_slots in struct
+ * &drm_dp_mst_branch and txmsg->state once they are queued
+ */
struct mutex qlock;
+ /**
+ * @tx_msg_downq: List of pending down replies.
+ */
struct list_head tx_msg_downq;
- bool tx_down_in_progress;
- /* payload info + lock for it */
+ /**
+ * @payload_lock: Protect payload information.
+ */
struct mutex payload_lock;
+ /**
+ * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
+ * VCPI structure itself is embedded into the corresponding
+ * &drm_dp_mst_port structure.
+ */
struct drm_dp_vcpi **proposed_vcpis;
+ /**
+ * @payloads: Array of payloads.
+ */
struct drm_dp_payload *payloads;
+ /**
+ * @payload_mask: Elements of @payloads actually in use. Since
+ * reallocation of active outputs isn't possible gaps can be created by
+ * disabling outputs out of order compared to how they've been enabled.
+ */
unsigned long payload_mask;
+ /**
+ * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
+ */
unsigned long vcpi_mask;
+ /**
+ * @tx_waitq: Wait to queue stall for the tx worker.
+ */
wait_queue_head_t tx_waitq;
+ /**
+ * @work: Probe work.
+ */
struct work_struct work;
-
+ /**
+ * @tx_work: Sideband transmit worker. This can nest within the main
+ * @work worker for each transaction @work launches.
+ */
struct work_struct tx_work;
+ /**
+ * @destroy_connector_list: List of to be destroyed connectors.
+ */
struct list_head destroy_connector_list;
+ /**
+ * @destroy_connector_lock: Protects @connector_list.
+ */
struct mutex destroy_connector_lock;
+ /**
+ * @destroy_connector_work: Work item to destroy connectors. Needed to
+ * avoid locking inversion.
+ */
struct work_struct destroy_connector_work;
};
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index fd0dde9f0a6d..f313211f8ed5 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -23,6 +23,7 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
const struct drm_framebuffer_funcs *funcs);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5b4aa35026a3..db8d4780eaa2 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -212,17 +212,6 @@ struct drm_fb_helper {
* needs to be reprobe when fbdev is in control again.
*/
bool delayed_hotplug;
-
- /**
- * @atomic:
- *
- * Use atomic updates for restore_fbdev_mode(), etc. This defaults to
- * true if driver has DRIVER_ATOMIC feature flag, but drivers can
- * override it to true after drm_fb_helper_init() if they support atomic
- * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
- * does not require ASYNC commits).
- */
- bool atomic;
};
#ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644
index 000000000000..7f90a396cf2b
--- /dev/null
+++ b/include/drm/drm_fourcc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef __DRM_FOURCC_H__
+#define __DRM_FOURCC_H__
+
+#include <linux/types.h>
+#include <uapi/drm/drm_fourcc.h>
+
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
+int drm_format_num_planes(uint32_t format);
+int drm_format_plane_cpp(uint32_t format, int plane);
+int drm_format_horz_chroma_subsampling(uint32_t format);
+int drm_format_vert_chroma_subsampling(uint32_t format);
+int drm_format_plane_width(int width, uint32_t format, int plane);
+int drm_format_plane_height(int height, uint32_t format, int plane);
+const char *drm_get_format_name(uint32_t format);
+
+#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
new file mode 100644
index 000000000000..2401b14d301f
--- /dev/null
+++ b/include/drm/drm_irq.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_IRQ_H_
+#define _DRM_IRQ_H_
+
+#include <linux/seqlock.h>
+
+/**
+ * struct drm_pending_vblank_event - pending vblank event tracking
+ */
+struct drm_pending_vblank_event {
+ /**
+ * @base: Base structure for tracking pending DRM events.
+ */
+ struct drm_pending_event base;
+ /**
+ * @pipe: drm_crtc_index() of the &drm_crtc this event is for.
+ */
+ unsigned int pipe;
+ /**
+ * @event: Actual event which will be sent to userspace.
+ */
+ struct drm_event_vblank event;
+};
+
+/**
+ * struct drm_vblank_crtc - vblank tracking for a CRTC
+ *
+ * This structure tracks the vblank state for one CRTC.
+ *
+ * Note that for historical reasons - the vblank handling code is still shared
+ * with legacy/non-kms drivers - this is a free-standing structure not directly
+ * connected to struct &drm_crtc. But all public interface functions are taking
+ * a struct &drm_crtc to hide this implementation detail.
+ */
+struct drm_vblank_crtc {
+ /**
+ * @dev: Pointer to the &drm_device.
+ */
+ struct drm_device *dev;
+ /**
+ * @queue: Wait queue for vblank waiters.
+ */
+ wait_queue_head_t queue; /**< VBLANK wait queue */
+ /**
+ * @disable_timer: Disable timer for the delayed vblank disabling
+ * hysteresis logic. Vblank disabling is controlled through the
+ * drm_vblank_offdelay module option and the setting of the
+ * max_vblank_count value in the &drm_device structure.
+ */
+ struct timer_list disable_timer;
+
+ /**
+ * @seqlock: Protect vblank count and time.
+ */
+ seqlock_t seqlock; /* protects vblank count and time */
+
+ /**
+ * @count: Current software vblank counter.
+ */
+ u32 count;
+ /**
+ * @time: Vblank timestamp corresponding to @count.
+ */
+ struct timeval time;
+
+ /**
+ * @refcount: Number of users/waiters of the vblank interrupt. Only when
+ * this refcount reaches 0 can the hardware interrupt be disabled using
+ * @disable_timer.
+ */
+ atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ /**
+ * @last: Protected by dev->vbl_lock, used for wraparound handling.
+ */
+ u32 last;
+ /**
+ * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
+ * For legacy driver bit 2 additionally tracks whether an additional
+ * temporary vblank reference has been acquired to paper over the
+ * hardware counter resetting/jumping. KMS drivers should instead just
+ * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
+ * save and restore the vblank count.
+ */
+ unsigned int inmodeset; /* Display driver is setting mode */
+ /**
+ * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
+ * structure.
+ */
+ unsigned int pipe;
+ /**
+ * @framedur_ns: Frame/Field duration in ns, used by
+ * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_calc_timestamping_constants().
+ */
+ int framedur_ns;
+ /**
+ * @linedur_ns: Line duration in ns, used by
+ * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_calc_timestamping_constants().
+ */
+ int linedur_ns;
+ /**
+ * @enabled: Tracks the enabling state of the corresponding &drm_crtc to
+ * avoid double-disabling and hence corrupting saved state. Needed by
+ * drivers not using atomic KMS, since those might go through their CRTC
+ * disabling functions multiple times.
+ */
+ bool enabled;
+};
+
+extern int drm_irq_install(struct drm_device *dev, int irq);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() and related
+ * functions.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+
+#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index a5ef2c7e40f8..cf0e7d89bcdf 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -1,6 +1,8 @@
#ifndef __DRM_DRM_LEGACY_H__
#define __DRM_DRM_LEGACY_H__
+#include <drm/drm_auth.h>
+
/*
* Legacy driver interfaces for the Direct Rendering Manager
*
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 7a9840f8b38e..47ac92584d76 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -180,6 +180,8 @@ struct mipi_dsi_device {
unsigned long mode_flags;
};
+#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
+
static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
{
return container_of(dev, struct mipi_dsi_device, dev);
@@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 625966a906f2..ff481770d76b 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -169,6 +169,8 @@ enum drm_mode_status {
*
* The horizontal and vertical timings are defined per the following diagram.
*
+ * ::
+ *
*
* Active Front Sync Back
* Region Porch Porch
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d4619dc2eecb..b55f21857a98 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
* inspect dynamic configuration state should instead use
* @atomic_best_encoder.
*
+ * You can leave this function to NULL if the connector is only
+ * attached to a single encoder and you are using the atomic helpers.
+ * In this case, the core will call drm_atomic_helper_best_encoder()
+ * for you.
+ *
* RETURNS:
*
* Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
* need to select the best encoder depending upon the desired
* configuration and can't select it statically.
*
- * This function is used by drm_atomic_helper_check_modeset() and either
- * this or @best_encoder is required.
+ * This function is used by drm_atomic_helper_check_modeset().
+ * If it is not implemented, the core will fallback to @best_encoder
+ * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
*
* NOTE:
*
@@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
plane->helper_private = funcs;
}
+/**
+ * struct drm_mode_config_helper_funcs - global modeset helper operations
+ *
+ * These helper functions are used by the atomic helpers.
+ */
+struct drm_mode_config_helper_funcs {
+ /**
+ * @atomic_commit_tail:
+ *
+ * This hook is used by the default atomic_commit() hook implemented in
+ * drm_atomic_helper_commit() together with the nonblocking commit
+ * helpers (see drm_atomic_helper_setup_commit() for a starting point)
+ * to implement blocking and nonblocking commits easily. It is not used
+ * by the atomic helpers
+ *
+ * This hook should first commit the given atomic state to the hardware.
+ * But drivers can add more waiting calls at the start of their
+ * implementation, e.g. to wait for driver-internal request for implicit
+ * syncing, before starting to commit the update to the hardware.
+ *
+ * After the atomic update is committed to the hardware this hook needs
+ * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+ * to be executed by the hardware, for example using
+ * drm_atomic_helper_wait_for_vblanks(), and then clean up the old
+ * framebuffers using drm_atomic_helper_cleanup_planes().
+ *
+ * When disabling a CRTC this hook _must_ stall for the commit to
+ * complete. Vblank waits don't work on disabled CRTC, hence the core
+ * can't take care of this. And it also can't rely on the vblank event,
+ * since that can be signalled already when the screen shows black,
+ * which can happen much earlier than the last hardware access needed to
+ * shut off the display pipeline completely.
+ *
+ * This hook is optional, the default implementation is
+ * drm_atomic_helper_commit_tail().
+ */
+ void (*atomic_commit_tail)(struct drm_atomic_state *state);
+};
+
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 4421f3f4ca8d..0e0c3573cce0 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -46,6 +46,7 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
struct drm_rect *src,
struct drm_rect *dest,
const struct drm_rect *clip,
+ unsigned int rotation,
int min_scale,
int max_scale,
bool can_position,
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
new file mode 100644
index 000000000000..269039722f91
--- /dev/null
+++ b/include/drm/drm_simple_kms_helper.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
+#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
+
+struct drm_simple_display_pipe;
+
+/**
+ * struct drm_simple_display_pipe_funcs - helper operations for a simple
+ * display pipeline
+ */
+struct drm_simple_display_pipe_funcs {
+ /**
+ * @enable:
+ *
+ * This function should be used to enable the pipeline.
+ * It is called when the underlying crtc is enabled.
+ * This hook is optional.
+ */
+ void (*enable)(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state);
+ /**
+ * @disable:
+ *
+ * This function should be used to disable the pipeline.
+ * It is called when the underlying crtc is disabled.
+ * This hook is optional.
+ */
+ void (*disable)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @check:
+ *
+ * This function is called in the check phase of an atomic update,
+ * specifically when the underlying plane is checked.
+ * The simple display pipeline helpers already check that the plane is
+ * not scaled, fills the entire visible area and is always enabled
+ * when the crtc is also enabled.
+ * This hook is optional.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the state or the transition can't be
+ * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
+ * attempt to obtain another state object ran into a &drm_modeset_lock
+ * deadlock.
+ */
+ int (*check)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state);
+ /**
+ * @update:
+ *
+ * This function is called when the underlying plane state is updated.
+ * This hook is optional.
+ */
+ void (*update)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+};
+
+/**
+ * struct drm_simple_display_pipe - simple display pipeline
+ * @crtc: CRTC control structure
+ * @plane: Plane control structure
+ * @encoder: Encoder control structure
+ * @connector: Connector control structure
+ * @funcs: Pipeline control functions (optional)
+ *
+ * Simple display pipeline with plane, crtc and encoder collapsed into one
+ * entity. It should be initialized by calling drm_simple_display_pipe_init().
+ */
+struct drm_simple_display_pipe {
+ struct drm_crtc crtc;
+ struct drm_plane plane;
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
+
+ const struct drm_simple_display_pipe_funcs *funcs;
+};
+
+int drm_simple_display_pipe_init(struct drm_device *dev,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ struct drm_connector *connector);
+
+#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 595f85c392ac..b1755f8db36b 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
+#define INTEL_BSM 0x5c
+#define INTEL_BSM_MASK (0xFFFF << 20)
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 9e9bddaa58a5..f49edecd66a3 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -13,6 +13,9 @@ void intel_gmch_remove(void);
bool intel_enable_gtt(void);
void intel_gtt_chipset_flush(void);
+void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags);
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 4cecb0b75b9c..6f2c59887ba6 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -173,7 +173,7 @@ struct ttm_tt;
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
- * @priv_flags: Flags describing buffer object internal state.
+ * @moving: Fence set when BO is moving
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
@@ -239,7 +239,7 @@ struct ttm_buffer_object {
* Members protected by a bo reservation.
*/
- unsigned long priv_flags;
+ struct fence *moving;
struct drm_vma_offset_node vma_node;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 513f7f96b80a..99c6d01d24f2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -258,8 +258,10 @@ struct ttm_mem_type_manager_func {
* reserved by the TTM vm system.
* @io_reserve_lru: Optional lru list for unreserving io mem regions.
* @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * @move_lock: lock for move fence
* static information. bdev::driver::io_mem_free is never used.
* @lru: The lru list for this memory type.
+ * @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
* It's set up by the ttm_bo_driver::init_mem_type method.
@@ -286,6 +288,7 @@ struct ttm_mem_type_manager {
struct mutex io_reserve_mutex;
bool use_io_reserve_lru;
bool io_reserve_fastpath;
+ spinlock_t move_lock;
/*
* Protected by @io_reserve_mutex:
@@ -298,6 +301,11 @@ struct ttm_mem_type_manager {
*/
struct list_head lru;
+
+ /*
+ * Protected by @move_lock.
+ */
+ struct fence *move;
};
/**
@@ -503,9 +511,6 @@ struct ttm_bo_global {
#define TTM_NUM_MEM_TYPES 8
-#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
- idling before CPU mapping */
-#define TTM_BO_PRIV_FLAG_MAX 1
/**
* struct ttm_bo_device - Buffer object driver device-specific data.
*
@@ -957,6 +962,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -971,7 +977,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -979,6 +985,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@@ -993,7 +1000,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
@@ -1011,7 +1019,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
@@ -1023,9 +1030,24 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
- bool evict, bool no_wait_gpu,
+ struct fence *fence, bool evict,
struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_pipeline_move.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Function for pipelining accelerated moves. Either free the memory
+ * immediately or hang it on a temporary buffer object.
+ */
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+ struct fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem);
+
/**
* ttm_io_prot
*
diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h
new file mode 100644
index 000000000000..9a8b392ceb00
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7792-clock.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7792_H__
+#define __DT_BINDINGS_CLOCK_R8A7792_H__
+
+/* CPG */
+#define R8A7792_CLK_MAIN 0
+#define R8A7792_CLK_PLL0 1
+#define R8A7792_CLK_PLL1 2
+#define R8A7792_CLK_PLL3 3
+#define R8A7792_CLK_LB 4
+#define R8A7792_CLK_QSPI 5
+#define R8A7792_CLK_Z 6
+
+/* MSTP0 */
+#define R8A7792_CLK_MSIOF0 0
+
+/* MSTP1 */
+#define R8A7792_CLK_JPU 6
+#define R8A7792_CLK_TMU1 11
+#define R8A7792_CLK_TMU3 21
+#define R8A7792_CLK_TMU2 22
+#define R8A7792_CLK_CMT0 24
+#define R8A7792_CLK_TMU0 25
+#define R8A7792_CLK_VSP1DU1 27
+#define R8A7792_CLK_VSP1DU0 28
+#define R8A7792_CLK_VSP1_SY 31
+
+/* MSTP2 */
+#define R8A7792_CLK_MSIOF1 8
+#define R8A7792_CLK_SYS_DMAC1 18
+#define R8A7792_CLK_SYS_DMAC0 19
+
+/* MSTP3 */
+#define R8A7792_CLK_TPU0 4
+#define R8A7792_CLK_SDHI0 14
+#define R8A7792_CLK_CMT1 29
+
+/* MSTP4 */
+#define R8A7792_CLK_IRQC 7
+
+/* MSTP5 */
+#define R8A7792_CLK_AUDIO_DMAC0 2
+#define R8A7792_CLK_THERMAL 22
+#define R8A7792_CLK_PWM 23
+
+/* MSTP7 */
+#define R8A7792_CLK_HSCIF1 16
+#define R8A7792_CLK_HSCIF0 17
+#define R8A7792_CLK_SCIF3 18
+#define R8A7792_CLK_SCIF2 19
+#define R8A7792_CLK_SCIF1 20
+#define R8A7792_CLK_SCIF0 21
+#define R8A7792_CLK_DU1 23
+#define R8A7792_CLK_DU0 24
+
+/* MSTP8 */
+#define R8A7792_CLK_VIN5 4
+#define R8A7792_CLK_VIN4 5
+#define R8A7792_CLK_VIN3 8
+#define R8A7792_CLK_VIN2 9
+#define R8A7792_CLK_VIN1 10
+#define R8A7792_CLK_VIN0 11
+#define R8A7792_CLK_ETHERAVB 12
+
+/* MSTP9 */
+#define R8A7792_CLK_GPIO7 4
+#define R8A7792_CLK_GPIO6 5
+#define R8A7792_CLK_GPIO5 7
+#define R8A7792_CLK_GPIO4 8
+#define R8A7792_CLK_GPIO3 9
+#define R8A7792_CLK_GPIO2 10
+#define R8A7792_CLK_GPIO1 11
+#define R8A7792_CLK_GPIO0 12
+#define R8A7792_CLK_GPIO11 13
+#define R8A7792_CLK_GPIO10 14
+#define R8A7792_CLK_CAN1 15
+#define R8A7792_CLK_CAN0 16
+#define R8A7792_CLK_QSPI_MOD 17
+#define R8A7792_CLK_GPIO9 19
+#define R8A7792_CLK_GPIO8 21
+#define R8A7792_CLK_I2C5 25
+#define R8A7792_CLK_IICDVFS 26
+#define R8A7792_CLK_I2C4 27
+#define R8A7792_CLK_I2C3 28
+#define R8A7792_CLK_I2C2 29
+#define R8A7792_CLK_I2C1 30
+#define R8A7792_CLK_I2C0 31
+
+/* MSTP10 */
+#define R8A7792_CLK_SSI_ALL 5
+#define R8A7792_CLK_SSI4 11
+#define R8A7792_CLK_SSI3 12
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7792_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 4d3ecd626c1f..a3491ba2f6ec 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -67,7 +67,6 @@
#define R8A7794_CLK_IRQC 7
/* MSTP5 */
-#define R8A7794_CLK_THERMAL 22
#define R8A7794_CLK_PWM 23
/* MSTP7 */
diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h
index 7af2b717b3b2..082edd9badfa 100644
--- a/include/dt-bindings/clock/stih407-clks.h
+++ b/include/dt-bindings/clock/stih407-clks.h
@@ -5,6 +5,10 @@
#ifndef _DT_BINDINGS_CLK_STIH407
#define _DT_BINDINGS_CLK_STIH407
+/* CLOCKGEN A0 */
+#define CLK_IC_LMI0 0
+#define CLK_IC_LMI1 1
+
/* CLOCKGEN C0 */
#define CLK_ICN_GPU 0
#define CLK_FDMA 1
diff --git a/include/dt-bindings/memory/mt2701-larb-port.h b/include/dt-bindings/memory/mt2701-larb-port.h
new file mode 100644
index 000000000000..78f66786da91
--- /dev/null
+++ b/include/dt-bindings/memory/mt2701-larb-port.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT2701_LARB_PORT_H_
+#define _MT2701_LARB_PORT_H_
+
+/*
+ * Mediatek m4u generation 1 such as mt2701 has flat m4u port numbers,
+ * the first port's id for larb[N] would be the last port's id of larb[N - 1]
+ * plus one while larb[0]'s first port number is 0. The definition of
+ * MT2701_M4U_ID_LARBx is following HW register spec.
+ * But m4u generation 2 like mt8173 have different port number, it use fixed
+ * offset for each larb, the first port's id for larb[N] would be (N * 32).
+ */
+#define LARB0_PORT_OFFSET 0
+#define LARB1_PORT_OFFSET 11
+#define LARB2_PORT_OFFSET 21
+#define LARB3_PORT_OFFSET 43
+
+#define MT2701_M4U_ID_LARB0(port) ((port) + LARB0_PORT_OFFSET)
+#define MT2701_M4U_ID_LARB1(port) ((port) + LARB1_PORT_OFFSET)
+#define MT2701_M4U_ID_LARB2(port) ((port) + LARB2_PORT_OFFSET)
+
+/* Port define for larb0 */
+#define MT2701_M4U_PORT_DISP_OVL_0 MT2701_M4U_ID_LARB0(0)
+#define MT2701_M4U_PORT_DISP_RDMA1 MT2701_M4U_ID_LARB0(1)
+#define MT2701_M4U_PORT_DISP_RDMA MT2701_M4U_ID_LARB0(2)
+#define MT2701_M4U_PORT_DISP_WDMA MT2701_M4U_ID_LARB0(3)
+#define MT2701_M4U_PORT_MM_CMDQ MT2701_M4U_ID_LARB0(4)
+#define MT2701_M4U_PORT_MDP_RDMA MT2701_M4U_ID_LARB0(5)
+#define MT2701_M4U_PORT_MDP_WDMA MT2701_M4U_ID_LARB0(6)
+#define MT2701_M4U_PORT_MDP_ROTO MT2701_M4U_ID_LARB0(7)
+#define MT2701_M4U_PORT_MDP_ROTCO MT2701_M4U_ID_LARB0(8)
+#define MT2701_M4U_PORT_MDP_ROTVO MT2701_M4U_ID_LARB0(9)
+#define MT2701_M4U_PORT_MDP_RDMA1 MT2701_M4U_ID_LARB0(10)
+
+/* Port define for larb1 */
+#define MT2701_M4U_PORT_VDEC_MC_EXT MT2701_M4U_ID_LARB1(0)
+#define MT2701_M4U_PORT_VDEC_PP_EXT MT2701_M4U_ID_LARB1(1)
+#define MT2701_M4U_PORT_VDEC_PPWRAP_EXT MT2701_M4U_ID_LARB1(2)
+#define MT2701_M4U_PORT_VDEC_AVC_MV_EXT MT2701_M4U_ID_LARB1(3)
+#define MT2701_M4U_PORT_VDEC_PRED_RD_EXT MT2701_M4U_ID_LARB1(4)
+#define MT2701_M4U_PORT_VDEC_PRED_WR_EXT MT2701_M4U_ID_LARB1(5)
+#define MT2701_M4U_PORT_VDEC_VLD_EXT MT2701_M4U_ID_LARB1(6)
+#define MT2701_M4U_PORT_VDEC_VLD2_EXT MT2701_M4U_ID_LARB1(7)
+#define MT2701_M4U_PORT_VDEC_TILE_EXT MT2701_M4U_ID_LARB1(8)
+#define MT2701_M4U_PORT_VDEC_IMG_RESZ_EXT MT2701_M4U_ID_LARB1(9)
+
+/* Port define for larb2 */
+#define MT2701_M4U_PORT_VENC_RCPU MT2701_M4U_ID_LARB2(0)
+#define MT2701_M4U_PORT_VENC_REC_FRM MT2701_M4U_ID_LARB2(1)
+#define MT2701_M4U_PORT_VENC_BSDMA MT2701_M4U_ID_LARB2(2)
+#define MT2701_M4U_PORT_JPGENC_RDMA MT2701_M4U_ID_LARB2(3)
+#define MT2701_M4U_PORT_VENC_LT_RCPU MT2701_M4U_ID_LARB2(4)
+#define MT2701_M4U_PORT_VENC_LT_REC_FRM MT2701_M4U_ID_LARB2(5)
+#define MT2701_M4U_PORT_VENC_LT_BSDMA MT2701_M4U_ID_LARB2(6)
+#define MT2701_M4U_PORT_JPGDEC_BSDMA MT2701_M4U_ID_LARB2(7)
+#define MT2701_M4U_PORT_VENC_SV_COMV MT2701_M4U_ID_LARB2(8)
+#define MT2701_M4U_PORT_VENC_RD_COMV MT2701_M4U_ID_LARB2(9)
+#define MT2701_M4U_PORT_JPGENC_BSDMA MT2701_M4U_ID_LARB2(10)
+#define MT2701_M4U_PORT_VENC_CUR_LUMA MT2701_M4U_ID_LARB2(11)
+#define MT2701_M4U_PORT_VENC_CUR_CHROMA MT2701_M4U_ID_LARB2(12)
+#define MT2701_M4U_PORT_VENC_REF_LUMA MT2701_M4U_ID_LARB2(13)
+#define MT2701_M4U_PORT_VENC_REF_CHROMA MT2701_M4U_ID_LARB2(14)
+#define MT2701_M4U_PORT_IMG_RESZ MT2701_M4U_ID_LARB2(15)
+#define MT2701_M4U_PORT_VENC_LT_SV_COMV MT2701_M4U_ID_LARB2(16)
+#define MT2701_M4U_PORT_VENC_LT_RD_COMV MT2701_M4U_ID_LARB2(17)
+#define MT2701_M4U_PORT_VENC_LT_CUR_LUMA MT2701_M4U_ID_LARB2(18)
+#define MT2701_M4U_PORT_VENC_LT_CUR_CHROMA MT2701_M4U_ID_LARB2(19)
+#define MT2701_M4U_PORT_VENC_LT_REF_LUMA MT2701_M4U_ID_LARB2(20)
+#define MT2701_M4U_PORT_VENC_LT_REF_CHROMA MT2701_M4U_ID_LARB2(21)
+#define MT2701_M4U_PORT_JPGDEC_WDMA MT2701_M4U_ID_LARB2(22)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h
new file mode 100644
index 000000000000..7f97d776a8ff
--- /dev/null
+++ b/include/dt-bindings/pinctrl/keystone.h
@@ -0,0 +1,39 @@
+/*
+ * This header provides constants for Keystone pinctrl bindings.
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H
+#define _DT_BINDINGS_PINCTRL_KEYSTONE_H
+
+#define MUX_MODE0 0
+#define MUX_MODE1 1
+#define MUX_MODE2 2
+#define MUX_MODE3 3
+#define MUX_MODE4 4
+#define MUX_MODE5 5
+
+#define BUFFER_CLASS_B (0 << 19)
+#define BUFFER_CLASS_C (1 << 19)
+#define BUFFER_CLASS_D (2 << 19)
+#define BUFFER_CLASS_E (3 << 19)
+
+#define PULL_DISABLE (1 << 16)
+#define PIN_PULLUP (1 << 17)
+#define PIN_PULLDOWN (0 << 17)
+
+#define KEYSTONE_IOPAD_OFFSET(pa, offset) (((pa) & 0xffff) - (offset))
+
+#define K2G_CORE_IOPAD(pa) KEYSTONE_IOPAD_OFFSET((pa), 0x1000)
+
+#endif
diff --git a/include/dt-bindings/power/r8a7792-sysc.h b/include/dt-bindings/power/r8a7792-sysc.h
new file mode 100644
index 000000000000..74f4a78e29aa
--- /dev/null
+++ b/include/dt-bindings/power/r8a7792-sysc.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7792_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7792_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7792_PD_CA15_CPU0 0
+#define R8A7792_PD_CA15_CPU1 1
+#define R8A7792_PD_CA15_SCU 12
+#define R8A7792_PD_SGX 20
+#define R8A7792_PD_IMP 24
+
+/* Always-on power area */
+#define R8A7792_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7792_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7796-sysc.h b/include/dt-bindings/power/r8a7796-sysc.h
new file mode 100644
index 000000000000..5b4daab44daa
--- /dev/null
+++ b/include/dt-bindings/power/r8a7796-sysc.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7796_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7796_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7796_PD_CA57_CPU0 0
+#define R8A7796_PD_CA57_CPU1 1
+#define R8A7796_PD_CA53_CPU0 5
+#define R8A7796_PD_CA53_CPU1 6
+#define R8A7796_PD_CA53_CPU2 7
+#define R8A7796_PD_CA53_CPU3 8
+#define R8A7796_PD_CA57_SCU 12
+#define R8A7796_PD_CR7 13
+#define R8A7796_PD_A3VC 14
+#define R8A7796_PD_3DG_A 17
+#define R8A7796_PD_3DG_B 18
+#define R8A7796_PD_CA53_SCU 21
+#define R8A7796_PD_A3IR 24
+#define R8A7796_PD_A2VC0 25
+#define R8A7796_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A7796_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7796_SYSC_H__ */
diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
new file mode 100644
index 000000000000..524d6077ac1b
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
@@ -0,0 +1,210 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_GXBB_RESET_H
+
+/* RESET0 */
+#define RESET_HIU 0
+/* 1 */
+#define RESET_DOS_RESET 2
+#define RESET_DDR_TOP 3
+#define RESET_DCU_RESET 4
+#define RESET_VIU 5
+#define RESET_AIU 6
+#define RESET_VID_PLL_DIV 7
+/* 8 */
+#define RESET_PMUX 9
+#define RESET_VENC 10
+#define RESET_ASSIST 11
+#define RESET_AFIFO2 12
+#define RESET_VCBUS 13
+/* 14 */
+/* 15 */
+#define RESET_GIC 16
+#define RESET_CAPB3_DECODE 17
+#define RESET_NAND_CAPB3 18
+#define RESET_HDMITX_CAPB3 19
+#define RESET_MALI_CAPB3 20
+#define RESET_DOS_CAPB3 21
+#define RESET_SYS_CPU_CAPB3 22
+#define RESET_CBUS_CAPB3 23
+#define RESET_AHB_CNTL 24
+#define RESET_AHB_DATA 25
+#define RESET_VCBUS_CLK81 26
+#define RESET_MMC 27
+#define RESET_MIPI_0 28
+#define RESET_MIPI_1 29
+#define RESET_MIPI_2 30
+#define RESET_MIPI_3 31
+/* RESET1 */
+#define RESET_CPPM 32
+#define RESET_DEMUX 33
+#define RESET_USB_OTG 34
+#define RESET_DDR 35
+#define RESET_AO_RESET 36
+#define RESET_BT656 37
+#define RESET_AHB_SRAM 38
+/* 39 */
+#define RESET_PARSER 40
+#define RESET_BLKMV 41
+#define RESET_ISA 42
+#define RESET_ETHERNET 43
+#define RESET_SD_EMMC_A 44
+#define RESET_SD_EMMC_B 45
+#define RESET_SD_EMMC_C 46
+#define RESET_ROM_BOOT 47
+#define RESET_SYS_CPU_0 48
+#define RESET_SYS_CPU_1 49
+#define RESET_SYS_CPU_2 50
+#define RESET_SYS_CPU_3 51
+#define RESET_SYS_CPU_CORE_0 52
+#define RESET_SYS_CPU_CORE_1 53
+#define RESET_SYS_CPU_CORE_2 54
+#define RESET_SYS_CPU_CORE_3 55
+#define RESET_SYS_PLL_DIV 56
+#define RESET_SYS_CPU_AXI 57
+#define RESET_SYS_CPU_L2 58
+#define RESET_SYS_CPU_P 59
+#define RESET_SYS_CPU_MBIST 60
+/* 61 */
+/* 62 */
+/* 63 */
+/* RESET2 */
+#define RESET_VD_RMEM 64
+#define RESET_AUDIN 65
+#define RESET_HDMI_TX 66
+/* 67 */
+/* 68 */
+/* 69 */
+#define RESET_GE2D 70
+#define RESET_PARSER_REG 71
+#define RESET_PARSER_FETCH 72
+#define RESET_PARSER_CTL 73
+#define RESET_PARSER_TOP 74
+/* 75 */
+/* 76 */
+#define RESET_AO_CPU_RESET 77
+#define RESET_MALI 78
+#define RESET_HDMI_SYSTEM_RESET 79
+/* 80-95 */
+/* RESET3 */
+#define RESET_RING_OSCILLATOR 96
+#define RESET_SYS_CPU 97
+#define RESET_EFUSE 98
+#define RESET_SYS_CPU_BVCI 99
+#define RESET_AIFIFO 100
+#define RESET_TVFE 101
+#define RESET_AHB_BRIDGE_CNTL 102
+/* 103 */
+#define RESET_AUDIO_DAC 104
+#define RESET_DEMUX_TOP 105
+#define RESET_DEMUX_DES 106
+#define RESET_DEMUX_S2P_0 107
+#define RESET_DEMUX_S2P_1 108
+#define RESET_DEMUX_RESET_0 109
+#define RESET_DEMUX_RESET_1 110
+#define RESET_DEMUX_RESET_2 111
+/* 112-127 */
+/* RESET4 */
+/* 128 */
+/* 129 */
+/* 130 */
+/* 131 */
+#define RESET_DVIN_RESET 132
+#define RESET_RDMA 133
+#define RESET_VENCI 134
+#define RESET_VENCP 135
+/* 136 */
+#define RESET_VDAC 137
+#define RESET_RTC 138
+/* 139 */
+#define RESET_VDI6 140
+#define RESET_VENCL 141
+#define RESET_I2C_MASTER_2 142
+#define RESET_I2C_MASTER_1 143
+/* 144-159 */
+/* RESET5 */
+/* 160-191 */
+/* RESET6 */
+#define RESET_PERIPHS_GENERAL 192
+#define RESET_PERIPHS_SPICC 193
+#define RESET_PERIPHS_SMART_CARD 194
+#define RESET_PERIPHS_SAR_ADC 195
+#define RESET_PERIPHS_I2C_MASTER_0 196
+#define RESET_SANA 197
+/* 198 */
+#define RESET_PERIPHS_STREAM_INTERFACE 199
+#define RESET_PERIPHS_SDIO 200
+#define RESET_PERIPHS_UART_0 201
+#define RESET_PERIPHS_UART_1_2 202
+#define RESET_PERIPHS_ASYNC_0 203
+#define RESET_PERIPHS_ASYNC_1 204
+#define RESET_PERIPHS_SPI_0 205
+#define RESET_PERIPHS_SDHC 206
+#define RESET_UART_SLIP 207
+/* 208-223 */
+/* RESET7 */
+#define RESET_USB_DDR_0 224
+#define RESET_USB_DDR_1 225
+#define RESET_USB_DDR_2 226
+#define RESET_USB_DDR_3 227
+/* 228 */
+#define RESET_DEVICE_MMC_ARB 229
+/* 230 */
+#define RESET_VID_LOCK 231
+#define RESET_A9_DMC_PIPEL 232
+/* 233-255 */
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h
new file mode 100644
index 000000000000..614aff2c7aff
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h
@@ -0,0 +1,175 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON8B_RESET_H
+
+/* RESET0 */
+#define RESET_HIU 0
+#define RESET_VLD 1
+#define RESET_IQIDCT 2
+#define RESET_MC 3
+/* 8 */
+#define RESET_VIU 5
+#define RESET_AIU 6
+#define RESET_MCPU 7
+#define RESET_CCPU 8
+#define RESET_PMUX 9
+#define RESET_VENC 10
+#define RESET_ASSIST 11
+#define RESET_AFIFO2 12
+#define RESET_MDEC 13
+#define RESET_VLD_PART 14
+#define RESET_VIFIFO 15
+/* 16-31 */
+/* RESET1 */
+/* 32 */
+#define RESET_DEMUX 33
+#define RESET_USB_OTG 34
+#define RESET_DDR 35
+#define RESET_VDAC_1 36
+#define RESET_BT656 37
+#define RESET_AHB_SRAM 38
+#define RESET_AHB_BRIDGE 39
+#define RESET_PARSER 40
+#define RESET_BLKMV 41
+#define RESET_ISA 42
+#define RESET_ETHERNET 43
+#define RESET_ABUF 44
+#define RESET_AHB_DATA 45
+#define RESET_AHB_CNTL 46
+#define RESET_ROM_BOOT 47
+/* 48-63 */
+/* RESET2 */
+#define RESET_VD_RMEM 64
+#define RESET_AUDIN 65
+#define RESET_DBLK 66
+#define RESET_PIC_DC 66
+#define RESET_PSC 66
+#define RESET_NAND 66
+#define RESET_GE2D 70
+#define RESET_PARSER_REG 71
+#define RESET_PARSER_FETCH 72
+#define RESET_PARSER_CTL 73
+#define RESET_PARSER_TOP 74
+#define RESET_HDMI_APB 75
+#define RESET_AUDIO_APB 76
+#define RESET_MEDIA_CPU 77
+#define RESET_MALI 78
+#define RESET_HDMI_SYSTEM_RESET 79
+/* 80-95 */
+/* RESET3 */
+#define RESET_RING_OSCILLATOR 96
+#define RESET_SYS_CPU_0 97
+#define RESET_EFUSE 98
+#define RESET_SYS_CPU_BVCI 99
+#define RESET_AIFIFO 100
+#define RESET_AUDIO_PLL_MODULATOR 101
+#define RESET_AHB_BRIDGE_CNTL 102
+#define RESET_SYS_CPU_1 103
+#define RESET_AUDIO_DAC 104
+#define RESET_DEMUX_TOP 105
+#define RESET_DEMUX_DES 106
+#define RESET_DEMUX_S2P_0 107
+#define RESET_DEMUX_S2P_1 108
+#define RESET_DEMUX_RESET_0 109
+#define RESET_DEMUX_RESET_1 110
+#define RESET_DEMUX_RESET_2 111
+/* 112-127 */
+/* RESET4 */
+#define RESET_PL310 128
+#define RESET_A5_APB 129
+#define RESET_A5_AXI 130
+#define RESET_A5 131
+#define RESET_DVIN 132
+#define RESET_RDMA 133
+#define RESET_VENCI 134
+#define RESET_VENCP 135
+#define RESET_VENCT 136
+#define RESET_VDAC_4 137
+#define RESET_RTC 138
+#define RESET_A5_DEBUG 139
+#define RESET_VDI6 140
+#define RESET_VENCL 141
+/* 142-159 */
+/* RESET5 */
+#define RESET_DDR_PLL 160
+#define RESET_MISC_PLL 161
+#define RESET_SYS_PLL 162
+#define RESET_HPLL_PLL 163
+#define RESET_AUDIO_PLL 164
+#define RESET_VID2_PLL 165
+/* 166-191 */
+/* RESET6 */
+#define RESET_PERIPHS_GENERAL 192
+#define RESET_PERIPHS_IR_REMOTE 193
+#define RESET_PERIPHS_SMART_CARD 194
+#define RESET_PERIPHS_SAR_ADC 195
+#define RESET_PERIPHS_I2C_MASTER_0 196
+#define RESET_PERIPHS_I2C_MASTER_1 197
+#define RESET_PERIPHS_I2C_SLAVE 198
+#define RESET_PERIPHS_STREAM_INTERFACE 199
+#define RESET_PERIPHS_SDIO 200
+#define RESET_PERIPHS_UART_0 201
+#define RESET_PERIPHS_UART_1 202
+#define RESET_PERIPHS_ASYNC_0 203
+#define RESET_PERIPHS_ASYNC_1 204
+#define RESET_PERIPHS_SPI_0 205
+#define RESET_PERIPHS_SPI_1 206
+#define RESET_PERIPHS_LED_PWM 207
+/* 208-223 */
+/* RESET7 */
+/* 224-255 */
+
+#endif
diff --git a/include/dt-bindings/reset/hisi,hi6220-resets.h b/include/dt-bindings/reset/hisi,hi6220-resets.h
index ca08a7e5248e..322ec5335b65 100644
--- a/include/dt-bindings/reset/hisi,hi6220-resets.h
+++ b/include/dt-bindings/reset/hisi,hi6220-resets.h
@@ -64,4 +64,12 @@
#define PERIPH_RSDIST9_CARM_SOCDBG 0x507
#define PERIPH_RSDIST9_CARM_ETM 0x508
+#define MEDIA_G3D 0
+#define MEDIA_CODEC_VPU 2
+#define MEDIA_CODEC_JPEG 3
+#define MEDIA_ISP 4
+#define MEDIA_ADE 5
+#define MEDIA_MMU 6
+#define MEDIA_XG2RAM1 7
+
#endif /*_DT_BINDINGS_RESET_CONTROLLER_HI6220*/
diff --git a/include/dt-bindings/reset/ti-syscon.h b/include/dt-bindings/reset/ti-syscon.h
new file mode 100644
index 000000000000..884fd91df8e9
--- /dev/null
+++ b/include/dt-bindings/reset/ti-syscon.h
@@ -0,0 +1,38 @@
+/*
+ * TI Syscon Reset definitions
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_RESET_TI_SYSCON_H__
+#define __DT_BINDINGS_RESET_TI_SYSCON_H__
+
+/*
+ * The reset does not support the feature and corresponding
+ * values are not valid
+ */
+#define ASSERT_NONE (1 << 0)
+#define DEASSERT_NONE (1 << 1)
+#define STATUS_NONE (1 << 2)
+
+/* When set this function is activated by setting(vs clearing) this bit */
+#define ASSERT_SET (1 << 3)
+#define DEASSERT_SET (1 << 4)
+#define STATUS_SET (1 << 5)
+
+/* The following are the inverse of the above and are added for consistency */
+#define ASSERT_CLEAR (0 << 3)
+#define DEASSERT_CLEAR (0 << 4)
+#define STATUS_CLEAR (0 << 5)
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index da0a524802cb..19b698ef3336 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -1,6 +1,5 @@
/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
+ * Copyright (C) 2015, 2016 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,16 +11,10 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-
-#ifndef __ASM_ARM_KVM_VGIC_H
-#define __ASM_ARM_KVM_VGIC_H
-
-#ifdef CONFIG_KVM_NEW_VGIC
-#include <kvm/vgic/vgic.h>
-#else
+#ifndef __KVM_ARM_VGIC_H
+#define __KVM_ARM_VGIC_H
#include <linux/kernel.h>
#include <linux/kvm.h>
@@ -29,248 +22,188 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <kvm/iodev.h>
-#include <linux/irqchip/arm-gic-common.h>
+#include <linux/list.h>
-#define VGIC_NR_IRQS_LEGACY 256
+#define VGIC_V3_MAX_CPUS 255
+#define VGIC_V2_MAX_CPUS 8
+#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
+#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
+#define VGIC_MAX_SPI 1019
+#define VGIC_MAX_RESERVED 1023
+#define VGIC_MIN_LPI 8192
+#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
-#define VGIC_V2_MAX_LRS (1 << 6)
-#define VGIC_V3_MAX_LRS 16
-#define VGIC_MAX_IRQS 1024
-#define VGIC_V2_MAX_CPUS 8
-#define VGIC_V3_MAX_CPUS 255
+enum vgic_type {
+ VGIC_V2, /* Good ol' GICv2 */
+ VGIC_V3, /* New fancy GICv3 */
+};
-#if (VGIC_NR_IRQS_LEGACY & 31)
-#error "VGIC_NR_IRQS must be a multiple of 32"
-#endif
+/* same for all guests, as depending only on the _host's_ GIC model */
+struct vgic_global {
+ /* type of the host GIC */
+ enum vgic_type type;
-#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
-#error "VGIC_NR_IRQS must be <= 1024"
-#endif
+ /* Physical address of vgic virtual cpu interface */
+ phys_addr_t vcpu_base;
-/*
- * The GIC distributor registers describing interrupts have two parts:
- * - 32 per-CPU interrupts (SGI + PPI)
- * - a bunch of shared interrupts (SPI)
- */
-struct vgic_bitmap {
- /*
- * - One UL per VCPU for private interrupts (assumes UL is at
- * least 32 bits)
- * - As many UL as necessary for shared interrupts.
- *
- * The private interrupts are accessed via the "private"
- * field, one UL per vcpu (the state for vcpu n is in
- * private[n]). The shared interrupts are accessed via the
- * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
- */
- unsigned long *private;
- unsigned long *shared;
-};
+ /* virtual control interface mapping */
+ void __iomem *vctrl_base;
-struct vgic_bytemap {
- /*
- * - 8 u32 per VCPU for private interrupts
- * - As many u32 as necessary for shared interrupts.
- *
- * The private interrupts are accessed via the "private"
- * field, (the state for vcpu n is in private[n*8] to
- * private[n*8 + 7]). The shared interrupts are accessed via
- * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
- * shared[(n-32)/4] word).
- */
- u32 *private;
- u32 *shared;
-};
+ /* Number of implemented list registers */
+ int nr_lr;
-struct kvm_vcpu;
+ /* Maintenance IRQ number */
+ unsigned int maint_irq;
-enum vgic_type {
- VGIC_V2, /* Good ol' GICv2 */
- VGIC_V3, /* New fancy GICv3 */
+ /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
+ int max_gic_vcpus;
+
+ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+ bool can_emulate_gicv2;
};
-#define LR_STATE_PENDING (1 << 0)
-#define LR_STATE_ACTIVE (1 << 1)
-#define LR_STATE_MASK (3 << 0)
-#define LR_EOI_INT (1 << 2)
-#define LR_HW (1 << 3)
+extern struct vgic_global kvm_vgic_global_state;
-struct vgic_lr {
- unsigned irq:10;
- union {
- unsigned hwirq:10;
- unsigned source:3;
- };
- unsigned state:4;
-};
+#define VGIC_V2_MAX_LRS (1 << 6)
+#define VGIC_V3_MAX_LRS 16
+#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
-struct vgic_vmcr {
- u32 ctlr;
- u32 abpr;
- u32 bpr;
- u32 pmr;
+enum vgic_irq_config {
+ VGIC_CONFIG_EDGE = 0,
+ VGIC_CONFIG_LEVEL
};
-struct vgic_ops {
- struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
- void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
- u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
- u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
- void (*clear_eisr)(struct kvm_vcpu *vcpu);
- u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
- void (*enable_underflow)(struct kvm_vcpu *vcpu);
- void (*disable_underflow)(struct kvm_vcpu *vcpu);
- void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
- void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
- void (*enable)(struct kvm_vcpu *vcpu);
+struct vgic_irq {
+ spinlock_t irq_lock; /* Protects the content of the struct */
+ struct list_head lpi_list; /* Used to link all LPIs together */
+ struct list_head ap_list;
+
+ struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
+ * SPIs and LPIs: The VCPU whose ap_list
+ * this is queued on.
+ */
+
+ struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
+ * be sent to, as a result of the
+ * targets reg (v2) or the
+ * affinity reg (v3).
+ */
+
+ u32 intid; /* Guest visible INTID */
+ bool pending;
+ bool line_level; /* Level only */
+ bool soft_pending; /* Level only */
+ bool active; /* not used for LPIs */
+ bool enabled;
+ bool hw; /* Tied to HW IRQ */
+ struct kref refcount; /* Used for LPIs */
+ u32 hwintid; /* HW INTID number */
+ union {
+ u8 targets; /* GICv2 target VCPUs mask */
+ u32 mpidr; /* GICv3 target VCPU */
+ };
+ u8 source; /* GICv2 SGIs only */
+ u8 priority;
+ enum vgic_irq_config config; /* Level or edge */
};
-struct vgic_params {
- /* vgic type */
- enum vgic_type type;
- /* Physical address of vgic virtual cpu interface */
- phys_addr_t vcpu_base;
- /* Number of list registers */
- u32 nr_lr;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface base address */
- void __iomem *vctrl_base;
- int max_gic_vcpus;
- /* Only needed for the legacy KVM_CREATE_IRQCHIP */
- bool can_emulate_gicv2;
-};
+struct vgic_register_region;
+struct vgic_its;
-struct vgic_vm_ops {
- bool (*queue_sgi)(struct kvm_vcpu *, int irq);
- void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
- int (*init_model)(struct kvm *);
- int (*map_resources)(struct kvm *, const struct vgic_params *);
+enum iodev_type {
+ IODEV_CPUIF,
+ IODEV_DIST,
+ IODEV_REDIST,
+ IODEV_ITS
};
struct vgic_io_device {
- gpa_t addr;
- int len;
- const struct vgic_io_range *reg_ranges;
- struct kvm_vcpu *redist_vcpu;
+ gpa_t base_addr;
+ union {
+ struct kvm_vcpu *redist_vcpu;
+ struct vgic_its *its;
+ };
+ const struct vgic_register_region *regions;
+ enum iodev_type iodev_type;
+ int nr_regions;
struct kvm_io_device dev;
};
-struct irq_phys_map {
- u32 virt_irq;
- u32 phys_irq;
-};
-
-struct irq_phys_map_entry {
- struct list_head entry;
- struct rcu_head rcu;
- struct irq_phys_map map;
+struct vgic_its {
+ /* The base address of the ITS control register frame */
+ gpa_t vgic_its_base;
+
+ bool enabled;
+ bool initialized;
+ struct vgic_io_device iodev;
+ struct kvm_device *dev;
+
+ /* These registers correspond to GITS_BASER{0,1} */
+ u64 baser_device_table;
+ u64 baser_coll_table;
+
+ /* Protects the command queue */
+ struct mutex cmd_lock;
+ u64 cbaser;
+ u32 creadr;
+ u32 cwriter;
+
+ /* Protects the device and collection lists */
+ struct mutex its_lock;
+ struct list_head device_list;
+ struct list_head collection_list;
};
struct vgic_dist {
- spinlock_t lock;
bool in_kernel;
bool ready;
+ bool initialized;
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
u32 vgic_model;
- int nr_cpus;
- int nr_irqs;
+ /* Do injected MSIs require an additional device ID? */
+ bool msis_require_devid;
+ int nr_spis;
+
+ /* TODO: Consider moving to global state */
/* Virtual control interface mapping */
void __iomem *vctrl_base;
- /* Distributor and vcpu interface mapping in the guest */
- phys_addr_t vgic_dist_base;
- /* GICv2 and GICv3 use different mapped register blocks */
+ /* base addresses in guest physical address space: */
+ gpa_t vgic_dist_base; /* distributor */
union {
- phys_addr_t vgic_cpu_base;
- phys_addr_t vgic_redist_base;
+ /* either a GICv2 CPU interface */
+ gpa_t vgic_cpu_base;
+ /* or a number of GICv3 redistributor regions */
+ gpa_t vgic_redist_base;
};
- /* Distributor enabled */
- u32 enabled;
-
- /* Interrupt enabled (one bit per IRQ) */
- struct vgic_bitmap irq_enabled;
-
- /* Level-triggered interrupt external input is asserted */
- struct vgic_bitmap irq_level;
-
- /*
- * Interrupt state is pending on the distributor
- */
- struct vgic_bitmap irq_pending;
-
- /*
- * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
- * interrupts. Essentially holds the state of the flip-flop in
- * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
- * Once set, it is only cleared for level-triggered interrupts on
- * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
- */
- struct vgic_bitmap irq_soft_pend;
-
- /* Level-triggered interrupt queued on VCPU interface */
- struct vgic_bitmap irq_queued;
+ /* distributor enabled */
+ bool enabled;
- /* Interrupt was active when unqueue from VCPU interface */
- struct vgic_bitmap irq_active;
+ struct vgic_irq *spis;
- /* Interrupt priority. Not used yet. */
- struct vgic_bytemap irq_priority;
-
- /* Level/edge triggered */
- struct vgic_bitmap irq_cfg;
-
- /*
- * Source CPU per SGI and target CPU:
- *
- * Each byte represent a SGI observable on a VCPU, each bit of
- * this byte indicating if the corresponding VCPU has
- * generated this interrupt. This is a GICv2 feature only.
- *
- * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
- * the SGIs observable on VCPUn.
- */
- u8 *irq_sgi_sources;
+ struct vgic_io_device dist_iodev;
- /*
- * Target CPU for each SPI:
- *
- * Array of available SPI, each byte indicating the target
- * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
- */
- u8 *irq_spi_cpu;
+ bool has_its;
/*
- * Reverse lookup of irq_spi_cpu for faster compute pending:
- *
- * Array of bitmaps, one per VCPU, describing if IRQn is
- * routed to a particular VCPU.
+ * Contains the attributes and gpa of the LPI configuration table.
+ * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
+ * one address across all redistributors.
+ * GICv3 spec: 6.1.2 "LPI Configuration tables"
*/
- struct vgic_bitmap *irq_spi_target;
-
- /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
- u32 *irq_spi_mpidr;
-
- /* Bitmap indicating which CPU has something pending */
- unsigned long *irq_pending_on_cpu;
+ u64 propbaser;
- /* Bitmap indicating which CPU has active IRQs */
- unsigned long *irq_active_on_cpu;
-
- struct vgic_vm_ops vm_ops;
- struct vgic_io_device dist_iodev;
- struct vgic_io_device *redist_iodevs;
-
- /* Virtual irq to hwirq mapping */
- spinlock_t irq_phys_map_lock;
- struct list_head irq_phys_map_list;
+ /* Protects the lpi_list and the count value below. */
+ spinlock_t lpi_list_lock;
+ struct list_head lpi_list_head;
+ int lpi_list_count;
};
struct vgic_v2_cpu_if {
@@ -298,78 +231,94 @@ struct vgic_v3_cpu_if {
};
struct vgic_cpu {
- /* Pending/active/both interrupts on this VCPU */
- DECLARE_BITMAP(pending_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP(active_percpu, VGIC_NR_PRIVATE_IRQS);
- DECLARE_BITMAP(pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
-
- /* Pending/active/both shared interrupts, dynamically sized */
- unsigned long *pending_shared;
- unsigned long *active_shared;
- unsigned long *pend_act_shared;
-
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
- /* Protected by the distributor's irq_phys_map_lock */
- struct list_head irq_phys_map_list;
+ unsigned int used_lrs;
+ struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
- u64 live_lrs;
-};
+ spinlock_t ap_list_lock; /* Protects the ap_list */
-#define LR_EMPTY 0xff
+ /*
+ * List of IRQs that this VCPU should consider because they are either
+ * Active or Pending (hence the name; AP list), or because they recently
+ * were one of the two and need to be migrated off this list to another
+ * VCPU.
+ */
+ struct list_head ap_list_head;
+
+ u64 live_lrs;
+
+ /*
+ * Members below are used with GICv3 emulation only and represent
+ * parts of the redistributor.
+ */
+ struct vgic_io_device rd_iodev;
+ struct vgic_io_device sgi_iodev;
-#define INT_STATUS_EOI (1 << 0)
-#define INT_STATUS_UNDERFLOW (1 << 1)
+ /* Contains the attributes and gpa of the LPI pending tables. */
+ u64 pendbaser;
-struct kvm;
-struct kvm_vcpu;
+ bool lpis_enabled;
+};
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
-int kvm_vgic_hyp_init(void);
-int kvm_vgic_map_resources(struct kvm *kvm);
-int kvm_vgic_get_max_vcpus(void);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
-void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+int kvm_vgic_map_resources(struct kvm *kvm);
+int kvm_vgic_hyp_init(void);
+
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
-int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- unsigned int virt_irq, bool level);
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
-int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
-#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
+#define vgic_initialized(k) ((k)->arch.vgic.initialized)
#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
- ((i) < (k)->arch.vgic.nr_irqs))
+ ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
+
+bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
- const struct vgic_ops **ops,
- const struct vgic_params **params);
#ifdef CONFIG_KVM_ARM_VGIC_V3
-int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
- const struct vgic_ops **ops,
- const struct vgic_params **params);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
#else
-static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
- const struct vgic_ops **ops,
- const struct vgic_params **params)
+static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
{
- return -ENODEV;
}
#endif
-#endif /* old VGIC include */
-#endif
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+static inline int kvm_vgic_get_max_vcpus(void)
+{
+ return kvm_vgic_global_state.max_gic_vcpus;
+}
+
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
+/**
+ * kvm_vgic_setup_default_irq_routing:
+ * Setup a default flat gsi routing table mapping all SPIs
+ */
+int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
+
+#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/kvm/vgic/vgic.h b/include/kvm/vgic/vgic.h
deleted file mode 100644
index 3fbd175265ae..000000000000
--- a/include/kvm/vgic/vgic.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (C) 2015, 2016 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_ARM_KVM_VGIC_VGIC_H
-#define __ASM_ARM_KVM_VGIC_VGIC_H
-
-#include <linux/kernel.h>
-#include <linux/kvm.h>
-#include <linux/irqreturn.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <kvm/iodev.h>
-
-#define VGIC_V3_MAX_CPUS 255
-#define VGIC_V2_MAX_CPUS 8
-#define VGIC_NR_IRQS_LEGACY 256
-#define VGIC_NR_SGIS 16
-#define VGIC_NR_PPIS 16
-#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
-#define VGIC_MAX_SPI 1019
-#define VGIC_MAX_RESERVED 1023
-#define VGIC_MIN_LPI 8192
-
-enum vgic_type {
- VGIC_V2, /* Good ol' GICv2 */
- VGIC_V3, /* New fancy GICv3 */
-};
-
-/* same for all guests, as depending only on the _host's_ GIC model */
-struct vgic_global {
- /* type of the host GIC */
- enum vgic_type type;
-
- /* Physical address of vgic virtual cpu interface */
- phys_addr_t vcpu_base;
-
- /* virtual control interface mapping */
- void __iomem *vctrl_base;
-
- /* Number of implemented list registers */
- int nr_lr;
-
- /* Maintenance IRQ number */
- unsigned int maint_irq;
-
- /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
- int max_gic_vcpus;
-
- /* Only needed for the legacy KVM_CREATE_IRQCHIP */
- bool can_emulate_gicv2;
-};
-
-extern struct vgic_global kvm_vgic_global_state;
-
-#define VGIC_V2_MAX_LRS (1 << 6)
-#define VGIC_V3_MAX_LRS 16
-#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
-
-enum vgic_irq_config {
- VGIC_CONFIG_EDGE = 0,
- VGIC_CONFIG_LEVEL
-};
-
-struct vgic_irq {
- spinlock_t irq_lock; /* Protects the content of the struct */
- struct list_head ap_list;
-
- struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
- * SPIs and LPIs: The VCPU whose ap_list
- * this is queued on.
- */
-
- struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
- * be sent to, as a result of the
- * targets reg (v2) or the
- * affinity reg (v3).
- */
-
- u32 intid; /* Guest visible INTID */
- bool pending;
- bool line_level; /* Level only */
- bool soft_pending; /* Level only */
- bool active; /* not used for LPIs */
- bool enabled;
- bool hw; /* Tied to HW IRQ */
- u32 hwintid; /* HW INTID number */
- union {
- u8 targets; /* GICv2 target VCPUs mask */
- u32 mpidr; /* GICv3 target VCPU */
- };
- u8 source; /* GICv2 SGIs only */
- u8 priority;
- enum vgic_irq_config config; /* Level or edge */
-};
-
-struct vgic_register_region;
-
-struct vgic_io_device {
- gpa_t base_addr;
- struct kvm_vcpu *redist_vcpu;
- const struct vgic_register_region *regions;
- int nr_regions;
- struct kvm_io_device dev;
-};
-
-struct vgic_dist {
- bool in_kernel;
- bool ready;
- bool initialized;
-
- /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
- u32 vgic_model;
-
- int nr_spis;
-
- /* TODO: Consider moving to global state */
- /* Virtual control interface mapping */
- void __iomem *vctrl_base;
-
- /* base addresses in guest physical address space: */
- gpa_t vgic_dist_base; /* distributor */
- union {
- /* either a GICv2 CPU interface */
- gpa_t vgic_cpu_base;
- /* or a number of GICv3 redistributor regions */
- gpa_t vgic_redist_base;
- };
-
- /* distributor enabled */
- bool enabled;
-
- struct vgic_irq *spis;
-
- struct vgic_io_device dist_iodev;
- struct vgic_io_device *redist_iodevs;
-};
-
-struct vgic_v2_cpu_if {
- u32 vgic_hcr;
- u32 vgic_vmcr;
- u32 vgic_misr; /* Saved only */
- u64 vgic_eisr; /* Saved only */
- u64 vgic_elrsr; /* Saved only */
- u32 vgic_apr;
- u32 vgic_lr[VGIC_V2_MAX_LRS];
-};
-
-struct vgic_v3_cpu_if {
-#ifdef CONFIG_KVM_ARM_VGIC_V3
- u32 vgic_hcr;
- u32 vgic_vmcr;
- u32 vgic_sre; /* Restored only, change ignored */
- u32 vgic_misr; /* Saved only */
- u32 vgic_eisr; /* Saved only */
- u32 vgic_elrsr; /* Saved only */
- u32 vgic_ap0r[4];
- u32 vgic_ap1r[4];
- u64 vgic_lr[VGIC_V3_MAX_LRS];
-#endif
-};
-
-struct vgic_cpu {
- /* CPU vif control registers for world switch */
- union {
- struct vgic_v2_cpu_if vgic_v2;
- struct vgic_v3_cpu_if vgic_v3;
- };
-
- unsigned int used_lrs;
- struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
-
- spinlock_t ap_list_lock; /* Protects the ap_list */
-
- /*
- * List of IRQs that this VCPU should consider because they are either
- * Active or Pending (hence the name; AP list), or because they recently
- * were one of the two and need to be migrated off this list to another
- * VCPU.
- */
- struct list_head ap_list_head;
-
- u64 live_lrs;
-};
-
-int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
-void kvm_vgic_early_init(struct kvm *kvm);
-int kvm_vgic_create(struct kvm *kvm, u32 type);
-void kvm_vgic_destroy(struct kvm *kvm);
-void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
-int kvm_vgic_map_resources(struct kvm *kvm);
-int kvm_vgic_hyp_init(void);
-
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level);
-int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-
-int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-
-#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
-#define vgic_initialized(k) ((k)->arch.vgic.initialized)
-#define vgic_ready(k) ((k)->arch.vgic.ready)
-#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
- ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
-
-bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
-void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
-void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
-
-#ifdef CONFIG_KVM_ARM_VGIC_V3
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
-#else
-static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
-{
-}
-#endif
-
-/**
- * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
- *
- * The host's GIC naturally limits the maximum amount of VCPUs a guest
- * can use.
- */
-static inline int kvm_vgic_get_max_vcpus(void)
-{
- return kvm_vgic_global_state.max_gic_vcpus;
-}
-
-#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index db7c8bd39a3c..4d8452c2384b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -608,6 +608,12 @@ static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwn
return NULL;
}
+static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+ const char *name)
+{
+ return false;
+}
+
static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
{
return NULL;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 3f103076d0bf..c357f27d5483 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -163,6 +163,7 @@ struct backing_dev_info {
wait_queue_head_t wb_waitq;
struct device *dev;
+ struct device *owner;
struct timer_list laptop_mode_wb_timer;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 491a91717788..43b93a947e61 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,6 +24,7 @@ __printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 314b3caa701c..1303b570b18c 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -113,6 +113,8 @@ extern int suid_dumpable;
extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
+extern int transfer_args_to_stack(struct linux_binprm *bprm,
+ unsigned long *sp_location);
extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 583c10810e32..59ffaa68b11b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -95,7 +95,7 @@ static inline bool bio_is_rw(struct bio *bio)
static inline bool bio_mergeable(struct bio *bio)
{
- if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+ if (bio->bi_opf & REQ_NOMERGE_FLAGS)
return false;
return true;
@@ -318,7 +318,7 @@ struct bio_integrity_payload {
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
- if (bio->bi_rw & REQ_INTEGRITY)
+ if (bio->bi_opf & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
@@ -470,11 +470,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
static inline int bio_associate_blkcg(struct bio *bio,
struct cgroup_subsys_state *blkcg_css) { return 0; }
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
+static inline void bio_clone_blkcg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 27bfc0b631a9..598bc999f4c2 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -266,13 +266,12 @@ static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
- return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
#ifdef CONFIG_S390
- else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
+ if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
return !memcmp(src1, src2, nbits / 8);
#endif
- else
- return __bitmap_equal(src1, src2, nbits);
+ return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index f77150a4a96a..10648e300c93 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -714,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
+ blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
+ blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
}
rcu_read_unlock();
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f254eb264924..436f43f87da9 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -27,8 +27,9 @@ struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
int bi_error;
- unsigned int bi_rw; /* bottom bits req flags,
- * top bits REQ_OP
+ unsigned int bi_opf; /* bottom bits req flags,
+ * top bits REQ_OP. Use
+ * accessors.
*/
unsigned short bi_flags; /* status, command, etc */
unsigned short bi_ioprio;
@@ -89,13 +90,13 @@ struct bio {
};
#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
-#define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT)
+#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
#define bio_set_op_attrs(bio, op, op_flags) do { \
WARN_ON(op >= (1 << REQ_OP_BITS)); \
- (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \
- (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \
- (bio)->bi_rw |= op_flags; \
+ (bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
+ (bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
+ (bio)->bi_opf |= op_flags; \
} while (0)
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
@@ -138,7 +139,7 @@ struct bio {
/*
* Request flags. For use in the cmd_flags field of struct request, and in
- * bi_rw of struct bio. Note that some flags are only valid in either one.
+ * bi_opf of struct bio. Note that some flags are only valid in either one.
*/
enum rq_flag_bits {
/* common flags */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index adf33079771e..2c210b6a7bcf 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -47,7 +47,6 @@ struct pr_ops;
*/
#define BLKCG_MAX_POLS 2
-struct request;
typedef void (rq_end_io_fn)(struct request *, int);
#define BLK_RL_SYNCFULL (1U << 0)
@@ -1673,7 +1672,7 @@ struct blk_dax_ctl {
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 5f3c63dde2d5..dbc21c719ce6 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -38,6 +38,7 @@ struct cpu_vfs_cap_data {
struct file;
struct inode;
struct dentry;
+struct task_struct;
struct user_namespace;
extern const kernel_cap_t __cap_empty_set;
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index dfce616002ad..7868d602c0a0 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -34,9 +34,9 @@
#define CEPH_MAX_MON 31
/*
- * ceph_file_layout - describe data layout for a file/inode
+ * legacy ceph_file_layoute
*/
-struct ceph_file_layout {
+struct ceph_file_layout_legacy {
/* file -> object mapping */
__le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
of page size. */
@@ -53,33 +53,27 @@ struct ceph_file_layout {
__le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
} __attribute__ ((packed));
-#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
-#define ceph_file_layout_stripe_count(l) \
- ((__s32)le32_to_cpu((l).fl_stripe_count))
-#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
-#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
-#define ceph_file_layout_object_su(l) \
- ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
-#define ceph_file_layout_pg_pool(l) \
- ((__s32)le32_to_cpu((l).fl_pg_pool))
-
-static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
-{
- return le32_to_cpu(l->fl_stripe_unit) *
- le32_to_cpu(l->fl_stripe_count);
-}
-
-/* "period" == bytes before i start on a new set of objects */
-static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
-{
- return le32_to_cpu(l->fl_object_size) *
- le32_to_cpu(l->fl_stripe_count);
-}
+struct ceph_string;
+/*
+ * ceph_file_layout - describe data layout for a file/inode
+ */
+struct ceph_file_layout {
+ /* file -> object mapping */
+ u32 stripe_unit; /* stripe unit, in bytes */
+ u32 stripe_count; /* over this many objects */
+ u32 object_size; /* until objects are this big */
+ s64 pool_id; /* rados pool id */
+ struct ceph_string __rcu *pool_ns; /* rados pool namespace */
+};
+
+extern int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
+extern void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy);
+extern void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy);
#define CEPH_MIN_STRIPE_UNIT 65536
-int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
-
struct ceph_dir_layout {
__u8 dl_dir_hash; /* see ceph_hash.h for ids */
__u8 dl_unused1;
@@ -127,6 +121,7 @@ struct ceph_dir_layout {
/* client <-> mds */
#define CEPH_MSG_MDS_MAP 21
+#define CEPH_MSG_FS_MAP_USER 103
#define CEPH_MSG_CLIENT_SESSION 22
#define CEPH_MSG_CLIENT_RECONNECT 23
@@ -399,7 +394,7 @@ union ceph_mds_request_args {
__le32 flags;
} __attribute__ ((packed)) setxattr;
struct {
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
} __attribute__ ((packed)) setlayout;
struct {
__u8 rule; /* currently fcntl or flock */
@@ -478,7 +473,7 @@ struct ceph_mds_reply_inode {
__le64 version; /* inode version */
__le64 xattr_version; /* version for xattr blob */
struct ceph_mds_reply_cap cap; /* caps issued for this inode */
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
struct ceph_timespec ctime, mtime, atime;
__le32 time_warp_seq;
__le64 size, max_size, truncate_size;
@@ -531,7 +526,7 @@ struct ceph_filelock {
#define CEPH_FILE_MODE_WR 2
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
-#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
+#define CEPH_FILE_MODE_BITS 4
int ceph_flags_to_mode(int flags);
@@ -673,7 +668,7 @@ struct ceph_mds_caps {
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
- struct ceph_file_layout layout;
+ struct ceph_file_layout_legacy layout;
__le32 time_warp_seq;
} __attribute__ ((packed));
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 19e9932f3e77..f990f2cc907a 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -3,6 +3,7 @@
#include <linux/err.h>
#include <linux/bug.h>
+#include <linux/slab.h>
#include <linux/time.h>
#include <asm/unaligned.h>
@@ -217,6 +218,60 @@ static inline void ceph_encode_string(void **p, void *end,
*p += len;
}
+/*
+ * version and length starting block encoders/decoders
+ */
+
+/* current code version (u8) + compat code version (u8) + len of struct (u32) */
+#define CEPH_ENCODING_START_BLK_LEN 6
+
+/**
+ * ceph_start_encoding - start encoding block
+ * @struct_v: current (code) version of the encoding
+ * @struct_compat: oldest code version that can decode it
+ * @struct_len: length of struct encoding
+ */
+static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat,
+ u32 struct_len)
+{
+ ceph_encode_8(p, struct_v);
+ ceph_encode_8(p, struct_compat);
+ ceph_encode_32(p, struct_len);
+}
+
+/**
+ * ceph_start_decoding - start decoding block
+ * @v: current version of the encoding that the code supports
+ * @name: name of the struct (free-form)
+ * @struct_v: out param for the encoding version
+ * @struct_len: out param for the length of struct encoding
+ *
+ * Validates the length of struct encoding, so unsafe ceph_decode_*
+ * variants can be used for decoding.
+ */
+static inline int ceph_start_decoding(void **p, void *end, u8 v,
+ const char *name, u8 *struct_v,
+ u32 *struct_len)
+{
+ u8 struct_compat;
+
+ ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad);
+ *struct_v = ceph_decode_8(p);
+ struct_compat = ceph_decode_8(p);
+ if (v < struct_compat) {
+ pr_warn("got struct_v %d struct_compat %d > %d of %s\n",
+ *struct_v, struct_compat, v, name);
+ return -EINVAL;
+ }
+
+ *struct_len = ceph_decode_32(p);
+ ceph_decode_need(p, end, *struct_len, bad);
+ return 0;
+
+bad:
+ return -ERANGE;
+}
+
#define ceph_encode_need(p, end, n, bad) \
do { \
if (!likely(ceph_has_room(p, end, n))) \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 690985daad1c..83fc1fff7061 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -21,6 +21,7 @@
#include <linux/ceph/mon_client.h>
#include <linux/ceph/osd_client.h>
#include <linux/ceph/ceph_fs.h>
+#include <linux/ceph/string_table.h>
/*
* mount options
@@ -214,8 +215,9 @@ static void erase_##name(struct rb_root *root, type *t) \
}
#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \
+extern type __lookup_##name##_key; \
static type *lookup_##name(struct rb_root *root, \
- typeof(((type *)0)->keyfld) key) \
+ typeof(__lookup_##name##_key.keyfld) key) \
{ \
struct rb_node *n = root->rb_node; \
\
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index e2a92df08b47..24d704d1ea5c 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -95,7 +95,7 @@ struct ceph_mon_client {
struct ceph_mon_subscribe_item item;
bool want;
u32 have; /* epoch */
- } subs[3];
+ } subs[4];
int fs_cluster_id; /* "mdsmap.<id>" sub */
#ifdef CONFIG_DEBUG_FS
@@ -111,9 +111,10 @@ extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern void ceph_monc_stop(struct ceph_mon_client *monc);
enum {
- CEPH_SUB_MDSMAP = 0,
- CEPH_SUB_MONMAP,
+ CEPH_SUB_MONMAP = 0,
CEPH_SUB_OSDMAP,
+ CEPH_SUB_FSMAP,
+ CEPH_SUB_MDSMAP,
};
extern const char *ceph_sub_str[];
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index 4b0d38960726..ddd0d48d0384 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -2,7 +2,6 @@
#define _FS_CEPH_MSGPOOL
#include <linux/mempool.h>
-#include <linux/ceph/messenger.h>
/*
* we use memory pools for preallocating messages we may receive, to
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 1b3b6e155392..858932304260 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -9,6 +9,7 @@
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
#include <linux/ceph/messenger.h>
+#include <linux/ceph/msgpool.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 9ccf4dbe55f8..9a9041784dcf 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -63,11 +63,13 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
struct ceph_object_locator {
s64 pool;
+ struct ceph_string *pool_ns;
};
static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
{
oloc->pool = -1;
+ oloc->pool_ns = NULL;
}
static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
@@ -75,11 +77,9 @@ static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
return oloc->pool == -1;
}
-static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
- const struct ceph_object_locator *src)
-{
- dest->pool = src->pool;
-}
+void ceph_oloc_copy(struct ceph_object_locator *dest,
+ const struct ceph_object_locator *src);
+void ceph_oloc_destroy(struct ceph_object_locator *oloc);
/*
* Maximum supported by kernel client object name length
@@ -115,6 +115,11 @@ static inline void ceph_oid_init(struct ceph_object_id *oid)
oid->name_len = 0;
}
+#define CEPH_OID_INIT_ONSTACK(oid) \
+ ({ ceph_oid_init(&oid); oid; })
+#define CEPH_DEFINE_OID_ONSTACK(oid) \
+ struct ceph_object_id oid = CEPH_OID_INIT_ONSTACK(oid)
+
static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
{
return oid->name == oid->inline_name && !oid->name_len;
diff --git a/include/linux/ceph/string_table.h b/include/linux/ceph/string_table.h
new file mode 100644
index 000000000000..1b02c96daf75
--- /dev/null
+++ b/include/linux/ceph/string_table.h
@@ -0,0 +1,62 @@
+#ifndef _FS_CEPH_STRING_TABLE_H
+#define _FS_CEPH_STRING_TABLE_H
+
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+struct ceph_string {
+ struct kref kref;
+ union {
+ struct rb_node node;
+ struct rcu_head rcu;
+ };
+ size_t len;
+ char str[];
+};
+
+extern void ceph_release_string(struct kref *ref);
+extern struct ceph_string *ceph_find_or_create_string(const char *str,
+ size_t len);
+extern bool ceph_strings_empty(void);
+
+static inline struct ceph_string *ceph_get_string(struct ceph_string *str)
+{
+ kref_get(&str->kref);
+ return str;
+}
+
+static inline void ceph_put_string(struct ceph_string *str)
+{
+ if (!str)
+ return;
+ kref_put(&str->kref, ceph_release_string);
+}
+
+static inline int ceph_compare_string(struct ceph_string *cs,
+ const char* str, size_t len)
+{
+ size_t cs_len = cs ? cs->len : 0;
+ if (cs_len != len)
+ return cs_len - len;
+ if (len == 0)
+ return 0;
+ return strncmp(cs->str, str, len);
+}
+
+#define ceph_try_get_string(x) \
+({ \
+ struct ceph_string *___str; \
+ rcu_read_lock(); \
+ for (;;) { \
+ ___str = rcu_dereference(x); \
+ if (!___str || \
+ kref_get_unless_zero(&___str->kref)) \
+ break; \
+ } \
+ rcu_read_unlock(); \
+ (___str); \
+})
+
+#endif
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d9aef2a0ec8e..c78fc27418f2 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -99,7 +99,8 @@ static inline void context_tracking_init(void) { }
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-static inline void guest_enter(void)
+/* must be called with irqs disabled */
+static inline void guest_enter_irqoff(void)
{
if (vtime_accounting_cpu_enabled())
vtime_guest_enter(current);
@@ -108,9 +109,19 @@ static inline void guest_enter(void)
if (context_tracking_is_enabled())
__context_tracking_enter(CONTEXT_GUEST);
+
+ /* KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
}
-static inline void guest_exit(void)
+static inline void guest_exit_irqoff(void)
{
if (context_tracking_is_enabled())
__context_tracking_exit(CONTEXT_GUEST);
@@ -122,7 +133,7 @@ static inline void guest_exit(void)
}
#else
-static inline void guest_enter(void)
+static inline void guest_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe
@@ -131,9 +142,10 @@ static inline void guest_enter(void)
*/
vtime_account_system(current);
current->flags |= PF_VCPU;
+ rcu_virt_note_context_switch(smp_processor_id());
}
-static inline void guest_exit(void)
+static inline void guest_exit_irqoff(void)
{
/* Flush the guest cputime we spent on the guest */
vtime_account_system(current);
@@ -141,4 +153,22 @@ static inline void guest_exit(void)
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void guest_enter(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_enter_irqoff();
+ local_irq_restore(flags);
+}
+
+static inline void guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit_irqoff();
+ local_irq_restore(flags);
+}
+
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index e828cf65d7df..da7fbf1cdd56 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -579,7 +579,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
}
/**
- * cpumask_parse - extract a cpumask from from a string
+ * cpumask_parse - extract a cpumask from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 98044a8d1487..5ff3e9a4fe5f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -130,7 +130,7 @@ struct dentry_operations {
int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
- int (*d_compare)(const struct dentry *, const struct dentry *,
+ int (*d_compare)(const struct dentry *,
unsigned int, const char *, const struct qstr *);
int (*d_delete)(const struct dentry *);
int (*d_init)(struct dentry *);
@@ -263,7 +263,7 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, struct qstr *);
+extern void dentry_update_name_case(struct dentry *, const struct qstr *);
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
deleted file mode 100644
index 5246239a4953..000000000000
--- a/include/linux/dma-attrs.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef _DMA_ATTR_H
-#define _DMA_ATTR_H
-
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-
-/**
- * an enum dma_attr represents an attribute associated with a DMA
- * mapping. The semantics of each attribute should be defined in
- * Documentation/DMA-attributes.txt.
- */
-enum dma_attr {
- DMA_ATTR_WRITE_BARRIER,
- DMA_ATTR_WEAK_ORDERING,
- DMA_ATTR_WRITE_COMBINE,
- DMA_ATTR_NON_CONSISTENT,
- DMA_ATTR_NO_KERNEL_MAPPING,
- DMA_ATTR_SKIP_CPU_SYNC,
- DMA_ATTR_FORCE_CONTIGUOUS,
- DMA_ATTR_ALLOC_SINGLE_PAGES,
- DMA_ATTR_MAX,
-};
-
-#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
-
-/**
- * struct dma_attrs - an opaque container for DMA attributes
- * @flags - bitmask representing a collection of enum dma_attr
- */
-struct dma_attrs {
- unsigned long flags[__DMA_ATTRS_LONGS];
-};
-
-#define DEFINE_DMA_ATTRS(x) \
- struct dma_attrs x = { \
- .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
- }
-
-static inline void init_dma_attrs(struct dma_attrs *attrs)
-{
- bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
-}
-
-/**
- * dma_set_attr - set a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- if (attrs == NULL)
- return;
- BUG_ON(attr >= DMA_ATTR_MAX);
- __set_bit(attr, attrs->flags);
-}
-
-/**
- * dma_get_attr - check for a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- if (attrs == NULL)
- return 0;
- BUG_ON(attr >= DMA_ATTR_MAX);
- return test_bit(attr, attrs->flags);
-}
-
-#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 4551c6f2a6c4..e0b0741ae671 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -242,6 +242,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
-int dma_buf_debugfs_create_file(const char *name,
- int (*write)(struct seq_file *));
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 8443bbb5c071..81c5c8d167ad 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -39,7 +39,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* the arch code to take care of attributes and cache maintenance
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
@@ -56,9 +56,9 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* directly as DMA mapping callbacks for simplicity
*/
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 71c1b215ef66..66533e18276c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -5,13 +5,58 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/kmemcheck.h>
#include <linux/bug.h>
+/**
+ * List of possible attributes associated with a DMA mapping. The semantics
+ * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ *
+ * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
+ * forces all pending DMA writes to complete.
+ */
+#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+/*
+ * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
+ * may be weakly ordered, that is that reads and writes may pass each other.
+ */
+#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
+/*
+ * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
+ * buffered to improve performance.
+ */
+#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
+/*
+ * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
+ * consistent or non-consistent memory as it sees fit.
+ */
+#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
+/*
+ * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
+ * virtual mapping for the allocated buffer.
+ */
+#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
+/*
+ * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
+ * the CPU cache for the given buffer assuming that it has been already
+ * transferred to 'device' domain.
+ */
+#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
+/*
+ * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
+ * in physical memory.
+ */
+#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
+/*
+ * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
+ * that it's probably not worth the time to try to allocate memory to in a way
+ * that gives better TLB efficiency.
+ */
+#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
+
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot
@@ -21,34 +66,35 @@
struct dma_map_ops {
void* (*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*free)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+ void *, dma_addr_t, size_t,
+ unsigned long attrs);
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, struct dma_attrs *attrs);
+ dma_addr_t, size_t, unsigned long attrs);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/*
* map_sg returns 0 on error and a value > 0 on success.
* It should never return a value < 0.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
@@ -123,7 +169,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
@@ -142,7 +188,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -158,7 +204,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
*/
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
@@ -176,7 +222,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -195,7 +241,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ addr = ops->map_page(dev, page, offset, size, dir, 0);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
@@ -208,7 +254,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, NULL);
+ ops->unmap_page(dev, addr, size, dir, 0);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
@@ -289,10 +335,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
@@ -321,7 +367,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
*/
static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
@@ -330,7 +376,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
@@ -338,7 +384,8 @@ dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
@@ -348,7 +395,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#ifndef arch_dma_alloc_attrs
#define arch_dma_alloc_attrs(dev, flag) (true)
@@ -356,7 +403,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
@@ -378,7 +425,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -398,31 +445,27 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -646,9 +689,8 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
@@ -657,9 +699,8 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size,
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_free_writecombine
#define dma_free_writecombine dma_free_wc
@@ -670,9 +711,8 @@ static inline int dma_mmap_wc(struct device *dev,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_mmap_writecombine
#define dma_mmap_writecombine dma_mmap_wc
diff --git a/include/linux/ds17287rtc.h b/include/linux/ds17287rtc.h
deleted file mode 100644
index d85d3f497b96..000000000000
--- a/include/linux/ds17287rtc.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * ds17287rtc.h - register definitions for the ds1728[57] RTC / CMOS RAM
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) 2003 Guido Guenther <agx@sigxcpu.org>
- */
-#ifndef __LINUX_DS17287RTC_H
-#define __LINUX_DS17287RTC_H
-
-#include <linux/rtc.h> /* get the user-level API */
-#include <linux/mc146818rtc.h>
-
-/* Register A */
-#define DS_REGA_DV2 0x40 /* countdown chain */
-#define DS_REGA_DV1 0x20 /* oscillator enable */
-#define DS_REGA_DV0 0x10 /* bank select */
-
-/* bank 1 registers */
-#define DS_B1_MODEL 0x40 /* model number byte */
-#define DS_B1_SN1 0x41 /* serial number byte 1 */
-#define DS_B1_SN2 0x42 /* serial number byte 2 */
-#define DS_B1_SN3 0x43 /* serial number byte 3 */
-#define DS_B1_SN4 0x44 /* serial number byte 4 */
-#define DS_B1_SN5 0x45 /* serial number byte 5 */
-#define DS_B1_SN6 0x46 /* serial number byte 6 */
-#define DS_B1_CRC 0x47 /* CRC byte */
-#define DS_B1_CENTURY 0x48 /* Century byte */
-#define DS_B1_DALARM 0x49 /* date alarm */
-#define DS_B1_XCTRL4A 0x4a /* extendec control register 4a */
-#define DS_B1_XCTRL4B 0x4b /* extendec control register 4b */
-#define DS_B1_RTCADDR2 0x4e /* rtc address 2 */
-#define DS_B1_RTCADDR3 0x4f /* rtc address 3 */
-#define DS_B1_RAMLSB 0x50 /* extended ram LSB */
-#define DS_B1_RAMMSB 0x51 /* extended ram MSB */
-#define DS_B1_RAMDPORT 0x53 /* extended ram data port */
-
-/* register details */
-/* extended control register 4a */
-#define DS_XCTRL4A_VRT2 0x80 /* valid ram and time */
-#define DS_XCTRL4A_INCR 0x40 /* increment progress status */
-#define DS_XCTRL4A_BME 0x20 /* burst mode enable */
-#define DS_XCTRL4A_PAB 0x08 /* power active bar ctrl */
-#define DS_XCTRL4A_RF 0x04 /* ram clear flag */
-#define DS_XCTRL4A_WF 0x02 /* wake up alarm flag */
-#define DS_XCTRL4A_KF 0x01 /* kickstart flag */
-
-/* interrupt causes */
-#define DS_XCTRL4A_IFS (DS_XCTRL4A_RF|DS_XCTRL4A_WF|DS_XCTRL4A_KF)
-
-/* extended control register 4b */
-#define DS_XCTRL4B_ABE 0x80 /* auxiliary battery enable */
-#define DS_XCTRL4B_E32K 0x40 /* enable 32.768 kHz Output */
-#define DS_XCTRL4B_CS 0x20 /* crystal select */
-#define DS_XCTRL4B_RCE 0x10 /* ram clear enable */
-#define DS_XCTRL4B_PRS 0x08 /* PAB resec select */
-#define DS_XCTRL4B_RIE 0x04 /* ram clear interrupt enable */
-#define DS_XCTRL4B_WFE 0x02 /* wake up alarm interrupt enable */
-#define DS_XCTRL4B_KFE 0x01 /* kickstart interrupt enable */
-
-/* interrupt enable bits */
-#define DS_XCTRL4B_IFES (DS_XCTRL4B_RIE|DS_XCTRL4B_WFE|DS_XCTRL4B_KFE)
-
-#endif /* __LINUX_DS17287RTC_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4f1bbc68cd1b..546d68057e3b 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,6 +1,10 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+#endif
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -33,6 +37,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
+#ifdef HAVE_JUMP_LABEL
+ union {
+ struct static_key_true dd_key_true;
+ struct static_key_false dd_key_false;
+ } key;
+#endif
} __attribute__((aligned(8)));
@@ -60,7 +70,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
const struct net_device *dev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \
static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
.modname = KBUILD_MODNAME, \
@@ -68,13 +78,51 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
- .flags = _DPRINTK_FLAGS_DEFAULT, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ dd_key_init(key, init) \
}
+#ifdef HAVE_JUMP_LABEL
+
+#define dd_key_init(key, init) key = (init)
+
+#ifdef DEBUG
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \
+ (STATIC_KEY_TRUE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_likely(&descriptor.key.dd_key_true)
+#else
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \
+ (STATIC_KEY_FALSE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_unlikely(&descriptor.key.dd_key_false)
+#endif
+
+#else
+
+#define dd_key_init(key, init)
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0)
+
+#ifdef DEBUG
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ likely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#else
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#endif
+
+#endif
+
#define dynamic_pr_debug(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
@@ -82,7 +130,7 @@ do { \
#define dynamic_dev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -90,7 +138,7 @@ do { \
#define dynamic_netdev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_netdev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -100,7 +148,7 @@ do { \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
__builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
print_hex_dump(KERN_DEBUG, prefix_str, \
prefix_type, rowsize, groupsize, \
buf, len, ascii); \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f80a75ee9e3..7f5a58225385 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -879,7 +879,7 @@ extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec *ts);
+extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
extern void efi_late_init(void);
diff --git a/include/linux/export.h b/include/linux/export.h
index 2f9ccbe6a639..c565f87f005e 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -82,7 +82,7 @@ extern struct module __this_module;
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, sec) \
- __cond_export_sym(sym, sec, config_enabled(__KSYM_##sym))
+ __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, sec, conf) \
___cond_export_sym(sym, sec, conf)
#define ___cond_export_sym(sym, sec, enabled) \
diff --git a/include/linux/extable.h b/include/linux/extable.h
new file mode 100644
index 000000000000..7effea4b257d
--- /dev/null
+++ b/include/linux/extable.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_EXTABLE_H
+#define _LINUX_EXTABLE_H
+
+#include <linux/stddef.h> /* for NULL */
+
+struct module;
+struct exception_table_entry;
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value);
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+void sort_main_extable(void);
+void trim_init_extable(struct module *m);
+
+/* Given an address, look for it in the exception tables */
+const struct exception_table_entry *search_exception_tables(unsigned long add);
+
+#ifdef CONFIG_MODULES
+/* For extable.c to search modules' exception tables. */
+const struct exception_table_entry *search_module_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_module_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif /*CONFIG_MODULES*/
+
+#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644
index 000000000000..86baaa45567c
--- /dev/null
+++ b/include/linux/fence-array.h
@@ -0,0 +1,73 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_ARRAY_H
+#define __LINUX_FENCE_ARRAY_H
+
+#include <linux/fence.h>
+
+/**
+ * struct fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct fence_array_cb {
+ struct fence_cb cb;
+ struct fence_array *array;
+};
+
+/**
+ * struct fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct fence_array {
+ struct fence base;
+
+ spinlock_t lock;
+ unsigned num_fences;
+ atomic_t num_pending;
+ struct fence **fences;
+};
+
+extern const struct fence_ops fence_array_ops;
+
+/**
+ * to_fence_array - cast a fence to a fence_array
+ * @fence: fence to cast to a fence_array
+ *
+ * Returns NULL if the fence is not a fence_array,
+ * or the fence_array otherwise.
+ */
+static inline struct fence_array *to_fence_array(struct fence *fence)
+{
+ if (fence->ops != &fence_array_ops)
+ return NULL;
+
+ return container_of(fence, struct fence_array, base);
+}
+
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
+#endif /* __LINUX_FENCE_ARRAY_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 1de1b3f6fb76..8cc719a63728 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -77,7 +77,8 @@ struct fence {
struct rcu_head rcu;
struct list_head cb_list;
spinlock_t *lock;
- unsigned context, seqno;
+ u64 context;
+ unsigned seqno;
unsigned long flags;
ktime_t timestamp;
int status;
@@ -178,7 +179,7 @@ struct fence_ops {
};
void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno);
+ spinlock_t *lock, u64 context, unsigned seqno);
void fence_release(struct kref *kref);
void fence_free(struct fence *fence);
@@ -352,27 +353,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
-unsigned fence_context_alloc(unsigned num);
+u64 fence_context_alloc(unsigned num);
#define FENCE_TRACE(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- if (config_enabled(CONFIG_FENCE_TRACE)) \
- pr_info("f %u#%u: " fmt, \
+ if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
+ pr_info("f %llu#%u: " fmt, \
__ff->context, __ff->seqno, ##args); \
} while (0)
#define FENCE_WARN(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
#define FENCE_ERR(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 5c41c5e75b5c..b1f9f0ccb8ac 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -47,6 +47,8 @@ int request_firmware_nowait(
void (*cont)(const struct firmware *fw, void *context));
int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
+int request_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device, void *buf, size_t size);
void release_firmware(const struct firmware *fw);
#else
@@ -75,5 +77,11 @@ static inline int request_firmware_direct(const struct firmware **fw,
return -EINVAL;
}
+static inline int request_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device, void *buf, size_t size)
+{
+ return -EINVAL;
+}
+
#endif
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 577365a77b47..3523bf62f328 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2652,6 +2652,7 @@ extern int do_pipe_flags(int *, int);
#define __kernel_read_file_id(id) \
id(UNKNOWN, unknown) \
id(FIRMWARE, firmware) \
+ id(FIRMWARE_PREALLOC_BUFFER, firmware) \
id(MODULE, kernel-module) \
id(KEXEC_IMAGE, kexec-image) \
id(KEXEC_INITRAMFS, kexec-initramfs) \
@@ -2746,11 +2747,6 @@ extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int should_remove_suid(struct dentry *);
extern int file_remove_privs(struct file *);
-extern int dentry_needs_remove_privs(struct dentry *dentry);
-static inline int file_needs_remove_privs(struct file *file)
-{
- return dentry_needs_remove_privs(file->f_path.dentry);
-}
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0f9bafa17a02..d98780ca9604 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -62,7 +62,6 @@ struct serio;
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-bool i8042_check_port_owner(const struct serio *);
int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio));
int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline bool i8042_check_port_owner(const struct serio *serio)
-{
- return false;
-}
-
static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio))
{
diff --git a/include/linux/init.h b/include/linux/init.h
index aedb254abc37..6935d02474aa 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -77,12 +77,6 @@
#define __refdata __section(.ref.data)
#define __refconst __constsection(.ref.rodata)
-/* compatibility defines */
-#define __init_refok __ref
-#define __initdata_refok __refdata
-#define __exit_refok __ref
-
-
#ifdef MODULE
#define __exitused
#else
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e399029b68c5..645ad06b5d52 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
}
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
resource_size_t phys_addr;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- return ioremap_wc(phys_addr, PAGE_SIZE);
+ return ioremap_wc(phys_addr, size);
}
static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
/* Non-atomic map/unmap */
static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
{
return ((char __force __iomem *) mapping) + offset;
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 664683aedcce..a35fb8b42e1a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -152,6 +152,7 @@ struct iommu_dm_region {
* @domain_set_attr: Change domain attributes
* @get_dm_regions: Request list of direct mapping requirements for a device
* @put_dm_regions: Free list of direct mapping requirements for a device
+ * @apply_dm_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain
@@ -186,6 +187,8 @@ struct iommu_ops {
/* Request/Free a list of direct mapping requirements for a device */
void (*get_dm_regions)(struct device *dev, struct list_head *list);
void (*put_dm_regions)(struct device *dev, struct list_head *list);
+ void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
+ struct iommu_dm_region *region);
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 1eee6bcfcf76..d10e54f03c09 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -63,8 +63,6 @@ struct ipc_namespace {
};
extern struct ipc_namespace init_ipc_ns;
-extern atomic_t nr_ipc_ns;
-
extern spinlock_t mq_lock;
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 107eed475b94..56b0b7ec66aa 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -112,34 +112,76 @@
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
-#define GICR_PROPBASER_NonShareable (0U << 10)
-#define GICR_PROPBASER_InnerShareable (1U << 10)
-#define GICR_PROPBASER_OuterShareable (2U << 10)
-#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
-#define GICR_PROPBASER_nCnB (0U << 7)
-#define GICR_PROPBASER_nC (1U << 7)
-#define GICR_PROPBASER_RaWt (2U << 7)
-#define GICR_PROPBASER_RaWb (3U << 7)
-#define GICR_PROPBASER_WaWt (4U << 7)
-#define GICR_PROPBASER_WaWb (5U << 7)
-#define GICR_PROPBASER_RaWaWt (6U << 7)
-#define GICR_PROPBASER_RaWaWb (7U << 7)
-#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
-#define GICR_PROPBASER_IDBITS_MASK (0x1f)
-
-#define GICR_PENDBASER_NonShareable (0U << 10)
-#define GICR_PENDBASER_InnerShareable (1U << 10)
-#define GICR_PENDBASER_OuterShareable (2U << 10)
-#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
-#define GICR_PENDBASER_nCnB (0U << 7)
-#define GICR_PENDBASER_nC (1U << 7)
-#define GICR_PENDBASER_RaWt (2U << 7)
-#define GICR_PENDBASER_RaWb (3U << 7)
-#define GICR_PENDBASER_WaWt (4U << 7)
-#define GICR_PENDBASER_WaWb (5U << 7)
-#define GICR_PENDBASER_RaWaWt (6U << 7)
-#define GICR_PENDBASER_RaWaWb (7U << 7)
-#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+#define GIC_BASER_CACHE_nCnB 0ULL
+#define GIC_BASER_CACHE_SameAsInner 0ULL
+#define GIC_BASER_CACHE_nC 1ULL
+#define GIC_BASER_CACHE_RaWt 2ULL
+#define GIC_BASER_CACHE_RaWb 3ULL
+#define GIC_BASER_CACHE_WaWt 4ULL
+#define GIC_BASER_CACHE_WaWb 5ULL
+#define GIC_BASER_CACHE_RaWaWt 6ULL
+#define GIC_BASER_CACHE_RaWaWb 7ULL
+#define GIC_BASER_CACHE_MASK 7ULL
+#define GIC_BASER_NonShareable 0ULL
+#define GIC_BASER_InnerShareable 1ULL
+#define GIC_BASER_OuterShareable 2ULL
+#define GIC_BASER_SHAREABILITY_MASK 3ULL
+
+#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \
+ (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT)
+
+#define GIC_BASER_SHAREABILITY(reg, type) \
+ (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT)
+
+#define GICR_PROPBASER_SHAREABILITY_SHIFT (10)
+#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_PROPBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK)
+#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK)
+#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK)
+#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_PROPBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable)
+
+#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
+#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
+#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
+#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
+#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
+#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb)
+
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
+#define GICR_PENDBASER_SHAREABILITY_SHIFT (10)
+#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_PENDBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK)
+#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK)
+#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK)
+#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_PENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)
+
+#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
+#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
+#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
+#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
+#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
+#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb)
+
+#define GICR_PENDBASER_PTZ BIT_ULL(62)
/*
* Re-Distributor registers, offsets from SGI_base
@@ -175,54 +217,83 @@
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
#define GITS_BASER 0x0100
+#define GITS_IDREGS_BASE 0xffd0
+#define GITS_PIDR0 0xffe0
+#define GITS_PIDR1 0xffe4
#define GITS_PIDR2 GICR_PIDR2
+#define GITS_PIDR4 0xffd0
+#define GITS_CIDR0 0xfff0
+#define GITS_CIDR1 0xfff4
+#define GITS_CIDR2 0xfff8
+#define GITS_CIDR3 0xfffc
#define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_QUIESCENT (1U << 31)
+#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
-
-#define GITS_CBASER_VALID (1UL << 63)
-#define GITS_CBASER_nCnB (0UL << 59)
-#define GITS_CBASER_nC (1UL << 59)
-#define GITS_CBASER_RaWt (2UL << 59)
-#define GITS_CBASER_RaWb (3UL << 59)
-#define GITS_CBASER_WaWt (4UL << 59)
-#define GITS_CBASER_WaWb (5UL << 59)
-#define GITS_CBASER_RaWaWt (6UL << 59)
-#define GITS_CBASER_RaWaWb (7UL << 59)
-#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
-#define GITS_CBASER_NonShareable (0UL << 10)
-#define GITS_CBASER_InnerShareable (1UL << 10)
-#define GITS_CBASER_OuterShareable (2UL << 10)
-#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_SHAREABILITY_SHIFT (10)
+#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
+#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
+#define GITS_CBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK)
+#define GITS_CBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK)
+#define GITS_CBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK)
+#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK
+
+#define GITS_CBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable)
+
+#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
+#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
+#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
+#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
+#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
+#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
#define GITS_BASER_NR_REGS 8
-#define GITS_BASER_VALID (1UL << 63)
-#define GITS_BASER_INDIRECT (1UL << 62)
-#define GITS_BASER_nCnB (0UL << 59)
-#define GITS_BASER_nC (1UL << 59)
-#define GITS_BASER_RaWt (2UL << 59)
-#define GITS_BASER_RaWb (3UL << 59)
-#define GITS_BASER_WaWt (4UL << 59)
-#define GITS_BASER_WaWb (5UL << 59)
-#define GITS_BASER_RaWaWt (6UL << 59)
-#define GITS_BASER_RaWaWb (7UL << 59)
-#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
-#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_INDIRECT (1ULL << 62)
+
+#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
+#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53)
+#define GITS_BASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK)
+#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK
+#define GITS_BASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK)
+#define GITS_BASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK)
+
+#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
+#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
+#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
+#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
+#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
+#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb)
+
+#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
-#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
-#define GITS_BASER_NonShareable (0UL << 10)
-#define GITS_BASER_InnerShareable (1UL << 10)
-#define GITS_BASER_OuterShareable (2UL << 10)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
-#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
@@ -230,6 +301,7 @@
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
+#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
#define GITS_BASER_TYPE_NONE 0
#define GITS_BASER_TYPE_DEVICE 1
@@ -247,7 +319,10 @@
*/
#define GITS_CMD_MAPD 0x08
#define GITS_CMD_MAPC 0x09
-#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MAPTI 0x0a
+/* older GIC documentation used MAPVI for this command */
+#define GITS_CMD_MAPVI GITS_CMD_MAPTI
+#define GITS_CMD_MAPI 0x0b
#define GITS_CMD_MOVI 0x01
#define GITS_CMD_DISCARD 0x0f
#define GITS_CMD_INV 0x0c
@@ -258,6 +333,22 @@
#define GITS_CMD_SYNC 0x05
/*
+ * ITS error numbers
+ */
+#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
+#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
+#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
+#define E_ITS_MAPD_DEVICE_OOR 0x010801
+#define E_ITS_MAPC_PROCNUM_OOR 0x010902
+#define E_ITS_MAPC_COLLECTION_OOR 0x010903
+#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04
+#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06
+#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07
+#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09
+#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01
+#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07
+
+/*
* CPU interface registers
*/
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 68904469fba1..661af564fae8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -76,7 +76,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/bug.h>
extern bool static_key_initialized;
@@ -115,20 +114,8 @@ enum jump_label_type {
struct module;
-#include <linux/atomic.h>
-
#ifdef HAVE_JUMP_LABEL
-static inline int static_key_count(struct static_key *key)
-{
- /*
- * -1 means the first static_key_slow_inc() is in progress.
- * static_key_enabled() must return true, so return 1 here.
- */
- int n = atomic_read(&key->enabled);
- return n >= 0 ? n : 1;
-}
-
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
@@ -157,16 +144,29 @@ extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern int static_key_count(struct static_key *key);
+extern void static_key_enable(struct static_key *key);
+extern void static_key_disable(struct static_key *key);
+/*
+ * We should be using ATOMIC_INIT() for initializing .enabled, but
+ * the inclusion of atomic.h is problematic for inclusion of jump_label.h
+ * in 'low-level' headers. Thus, we are initializing .enabled with a
+ * raw value, but have added a BUILD_BUG_ON() to catch any issues in
+ * jump_label_init() see: kernel/jump_label.c.
+ */
#define STATIC_KEY_INIT_TRUE \
- { .enabled = ATOMIC_INIT(1), \
+ { .enabled = { 1 }, \
.entries = (void *)JUMP_TYPE_TRUE }
#define STATIC_KEY_INIT_FALSE \
- { .enabled = ATOMIC_INIT(0), \
+ { .enabled = { 0 }, \
.entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
+#include <linux/atomic.h>
+#include <linux/bug.h>
+
static inline int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
@@ -216,14 +216,6 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
-#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-
-#endif /* HAVE_JUMP_LABEL */
-
-#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#define jump_label_enabled static_key_enabled
-
static inline void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
@@ -244,6 +236,14 @@ static inline void static_key_disable(struct static_key *key)
static_key_slow_dec(key);
}
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
+
+#endif /* HAVE_JUMP_LABEL */
+
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
/* -------------------------------------------------------------------------- */
/*
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index c9cf374445d8..d600303306eb 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -56,6 +56,7 @@ void kasan_cache_destroy(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(const void *ptr);
@@ -102,6 +103,8 @@ static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
+static inline void kasan_init_slab_obj(struct kmem_cache *cache,
+ const void *object) {}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(const void *ptr) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index b33c7797eb57..15ec117ec537 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -3,6 +3,21 @@
#include <generated/autoconf.h>
+#define __ARG_PLACEHOLDER_1 0,
+#define __take_second_arg(__ignored, val, ...) val
+
+/*
+ * The use of "&&" / "||" is limited in certain expressions.
+ * The followings enable to calculate "and" / "or" with macro expansion only.
+ */
+#define __and(x, y) ___and(x, y)
+#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
+#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
+
+#define __or(x, y) ___or(x, y)
+#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
+#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
+
/*
* Helper macros to use CONFIG_ options in C/CPP expressions. Note that
* these only work with boolean and tristate options.
@@ -16,11 +31,10 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define __ARG_PLACEHOLDER_1 0,
-#define config_enabled(cfg) _config_enabled(cfg)
-#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
-#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
-#define ___config_enabled(__ignored, val, ...) val
+#define config_enabled(cfg) ___is_defined(cfg)
+#define __is_defined(x) ___is_defined(x)
+#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
+#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
/*
* IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
@@ -41,14 +55,13 @@
* This is similar to IS_ENABLED(), but returns false when invoked from
* built-in code when CONFIG_FOO is set to 'm'.
*/
-#define IS_REACHABLE(option) (config_enabled(option) || \
- (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
+ __and(IS_MODULE(option), __is_defined(MODULE)))
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
* 0 otherwise.
*/
-#define IS_ENABLED(option) \
- (IS_BUILTIN(option) || IS_MODULE(option))
+#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c42082112ec8..d96a6118d26a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -11,7 +11,6 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
-#include <linux/dynamic_debug.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e8acb2b43dd9..d7437777baaa 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,6 +14,8 @@
#if !defined(__ASSEMBLY__)
+#include <asm/io.h>
+
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC_CORE
@@ -41,7 +43,7 @@
#endif
#ifndef KEXEC_CONTROL_MEMORY_GFP
-#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#define KEXEC_CONTROL_MEMORY_GFP (GFP_KERNEL | __GFP_NORETRY)
#endif
#ifndef KEXEC_CONTROL_PAGE_SIZE
@@ -228,12 +230,13 @@ extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
+int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
void crash_save_vmcoreinfo(void);
void arch_crash_save_vmcoreinfo(void);
__printf(1, 2)
void vmcoreinfo_append_str(const char *fmt, ...);
-unsigned long paddr_vmcoreinfo_note(void);
+phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_OSRELEASE(value) \
vmcoreinfo_append_str("OSRELEASE=%s\n", value)
@@ -318,12 +321,51 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
+#ifndef page_to_boot_pfn
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+ return page_to_pfn(page);
+}
+#endif
+
+#ifndef boot_pfn_to_page
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+ return pfn_to_page(boot_pfn);
+}
+#endif
+
+#ifndef phys_to_boot_phys
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+ return phys;
+}
+#endif
+
+#ifndef boot_phys_to_phys
+static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
+{
+ return boot_phys;
+}
+#endif
+
+static inline unsigned long virt_to_boot_phys(void *addr)
+{
+ return phys_to_boot_phys(__pa((unsigned long)addr));
+}
+
+static inline void *boot_phys_to_virt(unsigned long entry)
+{
+ return phys_to_virt(boot_phys_to_phys(entry));
+}
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+static inline int kexec_crash_loaded(void) { return 0; }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c9c973a7dd9..01e908ac4a39 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -164,6 +164,8 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
+struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ gpa_t addr);
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
@@ -315,7 +317,13 @@ struct kvm_kernel_irq_routing_entry {
unsigned irqchip;
unsigned pin;
} irqchip;
- struct msi_msg msi;
+ struct {
+ u32 address_lo;
+ u32 address_hi;
+ u32 data;
+ u32 flags;
+ u32 devid;
+ } msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
};
@@ -371,7 +379,15 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+
+ /*
+ * created_vcpus is protected by kvm->lock, and is incremented
+ * at the beginning of KVM_CREATE_VCPU. online_vcpus is only
+ * incremented after storing the kvm_vcpu pointer in vcpus,
+ * and is accessed atomically.
+ */
atomic_t online_vcpus;
+ int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
@@ -867,45 +883,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
}
#endif
-/* must be called with irqs disabled */
-static inline void __kvm_guest_enter(void)
-{
- guest_enter();
- /* KVM does not hold any references to rcu protected data when it
- * switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspace from rcu point of view. In
- * addition CPU may stay in a guest mode for quite a long time (up to
- * one time slice). Lets treat guest mode as quiescent state, just like
- * we do with user-mode execution.
- */
- if (!context_tracking_cpu_is_enabled())
- rcu_virt_note_context_switch(smp_processor_id());
-}
-
-/* must be called with irqs disabled */
-static inline void __kvm_guest_exit(void)
-{
- guest_exit();
-}
-
-static inline void kvm_guest_enter(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __kvm_guest_enter();
- local_irq_restore(flags);
-}
-
-static inline void kvm_guest_exit(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __kvm_guest_exit();
- local_irq_restore(flags);
-}
-
/*
* search_memslots() and __gfn_to_memslot() are here because they are
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
@@ -1032,17 +1009,18 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#ifdef CONFIG_S390
#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
+#elif defined(CONFIG_ARM64)
+#define KVM_MAX_IRQ_ROUTES 4096
#else
#define KVM_MAX_IRQ_ROUTES 1024
#endif
-int kvm_setup_default_irq_routing(struct kvm *kvm);
-int kvm_setup_empty_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
void kvm_free_irq_routing(struct kvm *kvm);
@@ -1097,12 +1075,6 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
#endif /* CONFIG_HAVE_KVM_EVENTFD */
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-#else
-static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-#endif
-
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 7ae397669d8b..101bf19c0f41 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1356,7 +1356,7 @@ union security_list_options {
struct super_block *newsb);
int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts);
int (*dentry_init_security)(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen);
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
new file mode 100644
index 000000000000..6b55c938b401
--- /dev/null
+++ b/include/linux/mailbox/brcm-message.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common header for Broadcom mailbox messages which is shared across
+ * Broadcom SoCs and Broadcom mailbox client drivers.
+ */
+
+#ifndef _LINUX_BRCM_MESSAGE_H_
+#define _LINUX_BRCM_MESSAGE_H_
+
+#include <linux/scatterlist.h>
+
+enum brcm_message_type {
+ BRCM_MESSAGE_UNKNOWN = 0,
+ BRCM_MESSAGE_SPU,
+ BRCM_MESSAGE_SBA,
+ BRCM_MESSAGE_MAX,
+};
+
+struct brcm_sba_command {
+ u64 cmd;
+#define BRCM_SBA_CMD_TYPE_A BIT(0)
+#define BRCM_SBA_CMD_TYPE_B BIT(1)
+#define BRCM_SBA_CMD_TYPE_C BIT(2)
+#define BRCM_SBA_CMD_HAS_RESP BIT(3)
+#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
+ u64 flags;
+ dma_addr_t input;
+ size_t input_len;
+ dma_addr_t resp;
+ size_t resp_len;
+ dma_addr_t output;
+ size_t output_len;
+};
+
+struct brcm_message {
+ enum brcm_message_type type;
+ union {
+ struct {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ } spu;
+ struct {
+ struct brcm_sba_command *cmds;
+ unsigned int cmds_count;
+ } sba;
+ };
+ void *ctx;
+ int error;
+};
+
+#endif /* _LINUX_BRCM_MESSAGE_H_ */
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index ea34a867caa0..d610232762e3 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -66,7 +66,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo
}
#endif
-int mvebu_mbus_save_cpu_target(u32 *store_addr);
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr);
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 433e0c74d643..a585b4b5fa0e 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -14,6 +14,8 @@
#include <asm/io.h>
#include <linux/rtc.h> /* get the user-level API */
#include <asm/mc146818rtc.h> /* register access macros */
+#include <linux/bcd.h>
+#include <linux/delay.h>
#ifdef __KERNEL__
#include <linux/spinlock.h> /* spinlock_t */
@@ -120,4 +122,7 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
+unsigned int mc146818_get_time(struct rtc_time *time);
+int mc146818_set_time(struct rtc_time *time);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index 689312745b2f..01024d1aed0e 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -37,12 +37,6 @@ static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
return ab8500_sysctrl_write(reg, bits, 0);
}
-/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
-struct ab8500_sysctrl_platform_data {
- u8 initial_req_buf_config[8];
- u16 (*reboot_reason_code)(const char *cmd);
-};
-
/* Registers */
#define AB8500_TURNONSTATUS 0x100
#define AB8500_RESETSTATUS 0x101
diff --git a/include/linux/mfd/altera-a10sr.h b/include/linux/mfd/altera-a10sr.h
new file mode 100644
index 000000000000..45a5e6e7db54
--- /dev/null
+++ b/include/linux/mfd/altera-a10sr.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Declarations for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from DA9052
+ */
+
+#ifndef __MFD_ALTERA_A10SR_H
+#define __MFD_ALTERA_A10SR_H
+
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Write registers are always on even addresses */
+#define WRITE_REG_MASK 0xFE
+/* Odd registers are always on odd addresses */
+#define READ_REG_MASK 0x01
+
+#define ALTR_A10SR_BITS_PER_REGISTER 8
+/*
+ * To find the correct register, we divide the input GPIO by
+ * the number of GPIO in each register. We then need to multiply
+ * by 2 because the reads are at odd addresses.
+ */
+#define ALTR_A10SR_REG_OFFSET(X) (((X) / ALTR_A10SR_BITS_PER_REGISTER) << 1)
+#define ALTR_A10SR_REG_BIT(X) ((X) % ALTR_A10SR_BITS_PER_REGISTER)
+#define ALTR_A10SR_REG_BIT_CHG(X, Y) ((X) << ALTR_A10SR_REG_BIT(Y))
+#define ALTR_A10SR_REG_BIT_MASK(X) (1 << ALTR_A10SR_REG_BIT(X))
+
+/* Arria10 System Controller Register Defines */
+#define ALTR_A10SR_NOP 0x00 /* No Change */
+#define ALTR_A10SR_VERSION_READ 0x00 /* MAX5 Version Read */
+
+#define ALTR_A10SR_LED_REG 0x02 /* LED - Upper 4 bits */
+/* LED register Bit Definitions */
+#define ALTR_A10SR_LED_VALID_SHIFT 4 /* LED - Upper 4 bits valid */
+#define ALTR_A10SR_OUT_VALID_RANGE_LO ALTR_A10SR_LED_VALID_SHIFT
+#define ALTR_A10SR_OUT_VALID_RANGE_HI 7
+
+#define ALTR_A10SR_PBDSW_REG 0x04 /* PB & DIP SW - Input only */
+#define ALTR_A10SR_PBDSW_IRQ_REG 0x06 /* PB & DIP SW Flag Clear */
+/* Pushbutton & DIP Switch Bit Definitions */
+#define ALTR_A10SR_IN_VALID_RANGE_LO 8
+#define ALTR_A10SR_IN_VALID_RANGE_HI 15
+
+#define ALTR_A10SR_PWR_GOOD1_REG 0x08 /* Power Good1 Read */
+#define ALTR_A10SR_PWR_GOOD2_REG 0x0A /* Power Good2 Read */
+#define ALTR_A10SR_PWR_GOOD3_REG 0x0C /* Power Good3 Read */
+#define ALTR_A10SR_FMCAB_REG 0x0E /* FMCA/B & PCIe Pwr Enable */
+#define ALTR_A10SR_HPS_RST_REG 0x10 /* HPS Reset */
+#define ALTR_A10SR_USB_QSPI_REG 0x12 /* USB, BQSPI, FILE Reset */
+#define ALTR_A10SR_SFPA_REG 0x14 /* SFPA Control Reg */
+#define ALTR_A10SR_SFPB_REG 0x16 /* SFPB Control Reg */
+#define ALTR_A10SR_I2C_M_REG 0x18 /* I2C Master Select */
+#define ALTR_A10SR_WARM_RST_REG 0x1A /* HPS Warm Reset */
+#define ALTR_A10SR_WR_KEY_REG 0x1C /* HPS Warm Reset Key */
+#define ALTR_A10SR_PMBUS_REG 0x1E /* HPS PM Bus */
+
+/**
+ * struct altr_a10sr - Altera Max5 MFD device private data structure
+ * @dev: : this device
+ * @regmap: the regmap assigned to the parent device.
+ */
+struct altr_a10sr {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#endif /* __MFD_ALTERA_A10SR_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index d55a42297d49..58ab4c0fe761 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -14,6 +14,7 @@
#define _WM_ARIZONA_CORE_H
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/arizona/pdata.h>
@@ -148,8 +149,17 @@ struct arizona {
uint16_t dac_comp_coeff;
uint8_t dac_comp_enabled;
struct mutex dac_comp_lock;
+
+ struct blocking_notifier_head notifier;
};
+static inline int arizona_call_notifiers(struct arizona *arizona,
+ unsigned long event,
+ void *data)
+{
+ return blocking_notifier_call_chain(&arizona->notifier, event, data);
+}
+
int arizona_clk32k_enable(struct arizona *arizona);
int arizona_clk32k_disable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index cd7e78eae006..0d06c5d0af93 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -856,12 +856,6 @@
#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
-#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
-#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
-#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
-#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
-#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
-#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
#define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
#define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 64184d27e3cd..d641a18abacb 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -226,6 +226,21 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
+ * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to write
+ * @return: Num. of bytes transferred on success, <0 on failure
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
* cros_ec_remove - Remove a ChromeOS EC
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 13b630c10d4c..7e7a8d4b4551 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -949,6 +949,37 @@ struct ec_params_pwm_set_fan_duty {
uint32_t percent;
} __packed;
+#define EC_CMD_PWM_SET_DUTY 0x25
+/* 16 bit duty cycle, 0xffff = 100% */
+#define EC_PWM_MAX_DUTY 0xffff
+
+enum ec_pwm_type {
+ /* All types, indexed by board-specific enum pwm_channel */
+ EC_PWM_TYPE_GENERIC = 0,
+ /* Keyboard backlight */
+ EC_PWM_TYPE_KB_LIGHT,
+ /* Display backlight */
+ EC_PWM_TYPE_DISPLAY_LIGHT,
+ EC_PWM_TYPE_COUNT,
+};
+
+struct ec_params_pwm_set_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __packed;
+
+#define EC_CMD_PWM_GET_DUTY 0x26
+
+struct ec_params_pwm_get_duty {
+ uint8_t pwm_type; /* ec_pwm_type */
+ uint8_t index; /* Type-specific index, or 0 if unique */
+} __packed;
+
+struct ec_response_pwm_get_duty {
+ uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
+} __packed;
+
/*****************************************************************************/
/*
* Lightbar commands. This looks worse than it is. Since we only use one HOST
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index bf5109d38a26..5d374601404c 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -178,16 +178,6 @@ enum ddr_pwrst {
#define DB8500_PRCMU_LEGACY_OFFSET 0xDD4
-struct prcmu_pdata
-{
- bool enable_set_ddr_opp;
- bool enable_ape_opp_100_voltage;
- struct ab8500_platform_data *ab_platdata;
- u32 version_offset;
- u32 legacy_offset;
- u32 adt_offset;
-};
-
#define PRCMU_FW_PROJECT_U8500 2
#define PRCMU_FW_PROJECT_U8400 3
#define PRCMU_FW_PROJECT_U9500 4 /* Customer specific */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index dbbe9a644622..62f03c2b1bb0 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -34,14 +34,23 @@
#define PMU_VER_START 0x10
#define PMU_VER_END 0x38
-#define RESERVE_INT BIT(7)
-#define PWRON_D20R_INT BIT(6)
-#define PWRON_D20F_INT BIT(5)
-#define PWRON_D4SR_INT BIT(4)
-#define VSYS_6P0_D200UR_INT BIT(3)
-#define VSYS_UV_D3R_INT BIT(2)
-#define VSYS_2P5_R_INT BIT(1)
-#define OTMP_D1R_INT BIT(0)
+#define RESERVE_INT 7
+#define PWRON_D20R_INT 6
+#define PWRON_D20F_INT 5
+#define PWRON_D4SR_INT 4
+#define VSYS_6P0_D200UR_INT 3
+#define VSYS_UV_D3R_INT 2
+#define VSYS_2P5_R_INT 1
+#define OTMP_D1R_INT 0
+
+#define RESERVE_INT_MASK BIT(RESERVE_INT)
+#define PWRON_D20R_INT_MASK BIT(PWRON_D20R_INT)
+#define PWRON_D20F_INT_MASK BIT(PWRON_D20F_INT)
+#define PWRON_D4SR_INT_MASK BIT(PWRON_D4SR_INT)
+#define VSYS_6P0_D200UR_INT_MASK BIT(VSYS_6P0_D200UR_INT)
+#define VSYS_UV_D3R_INT_MASK BIT(VSYS_UV_D3R_INT)
+#define VSYS_2P5_R_INT_MASK BIT(VSYS_2P5_R_INT)
+#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
struct resource *res;
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index cb83883918a7..de748bc7525e 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -62,6 +62,7 @@ enum {
struct stmpe_variant_info;
struct stmpe_client_info;
+struct stmpe_platform_data;
/**
* struct stmpe - STMPE MFD structure
@@ -117,25 +118,4 @@ extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
-/**
- * struct stmpe_platform_data - STMPE platform data
- * @id: device id to distinguish between multiple STMPEs on the same board
- * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
- * @irq_trigger: IRQ trigger to use for the interrupt to the host
- * @autosleep: bool to enable/disable stmpe autosleep
- * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
- * @irq_over_gpio: true if gpio is used to get irq
- * @irq_gpio: gpio number over which irq will be requested (significant only if
- * irq_over_gpio is true)
- */
-struct stmpe_platform_data {
- int id;
- unsigned int blocks;
- unsigned int irq_trigger;
- bool autosleep;
- bool irq_over_gpio;
- int irq_gpio;
- int autosleep_timeout;
-};
-
#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 1fd50dcfe47c..2567a87872b0 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -153,7 +153,7 @@
struct ti_tscadc_dev {
struct device *dev;
- struct regmap *regmap_tscadc;
+ struct regmap *regmap;
void __iomem *tscadc_base;
int irq;
int used_cells; /* 1-2 */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8e95cd87cd74..36795a1be479 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -226,6 +226,7 @@ struct twl6040 {
struct regmap_irq_chip_data *irq_data;
struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct clk *clk32k;
+ struct clk *mclk;
struct mutex mutex;
struct mutex irq_mutex;
struct mfd_cell cells[TWL6040_CELLS];
@@ -237,8 +238,8 @@ struct twl6040 {
/* PLL configuration */
int pll;
- unsigned int sysclk;
- unsigned int mclk;
+ unsigned int sysclk_rate;
+ unsigned int mclk_rate;
unsigned int irq;
unsigned int irq_ready;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index e6f6910278f3..42da3552f7cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -220,6 +220,7 @@ enum {
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
+ MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
};
enum {
@@ -1342,6 +1343,9 @@ enum {
VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
};
+enum {
+ MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2,
+};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
@@ -1382,6 +1386,9 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+ const u32 offset[], u32 value[],
+ size_t array_len, u8 port);
u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2be976dd4966..2566f6d6444f 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -58,6 +58,8 @@ struct mlx5_core_cq {
void (*comp)(struct mlx5_core_cq *);
void *priv;
} tasklet_ctx;
+ int reset_notify_added;
+ struct list_head reset_notify;
};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a041b99fceac..ccea6fb16482 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -46,6 +46,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
+#include <linux/mlx5/srq.h>
enum {
MLX5_RQ_BITMASK_VSD = 1 << 1,
@@ -798,11 +799,10 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen,
- int is_xrc);
+ struct mlx5_srq_attr *in);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out);
+ struct mlx5_srq_attr *out);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index ab310819ac36..7879bf411891 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -556,9 +556,9 @@ struct mlx5_destroy_qp_mbox_out {
struct mlx5_modify_qp_mbox_in {
struct mlx5_inbox_hdr hdr;
__be32 qpn;
- u8 rsvd1[4];
- __be32 optparam;
u8 rsvd0[4];
+ __be32 optparam;
+ u8 rsvd1[4];
struct mlx5_qp_context ctx;
u8 rsvd2[16];
};
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index f43ed054a3e0..33c97dc900f8 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -35,6 +35,31 @@
#include <linux/mlx5/driver.h>
+enum {
+ MLX5_SRQ_FLAG_ERR = (1 << 0),
+ MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+};
+
+struct mlx5_srq_attr {
+ u32 type;
+ u32 flags;
+ u32 log_size;
+ u32 wqe_shift;
+ u32 log_page_size;
+ u32 wqe_cnt;
+ u32 srqn;
+ u32 xrcd;
+ u32 page_offset;
+ u32 cqn;
+ u32 pd;
+ u32 lwm;
+ u32 user_index;
+ u64 db_record;
+ u64 *pas;
+};
+
+struct mlx5_core_dev;
+
void mlx5_init_srq_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 33e17f6a327a..634c4c51fe3a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -49,7 +49,7 @@ static inline void vm_unacct_memory(long pages)
*
* Returns true if the prot flags are valid
*/
-static inline int arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot)
{
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
}
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index eb0151bac50c..d8673ca968ba 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -95,6 +95,7 @@ struct mmc_ext_csd {
u8 raw_partition_support; /* 160 */
u8 raw_rpmb_size_mult; /* 168 */
u8 raw_erased_mem_count; /* 181 */
+ u8 strobe_support; /* 184 */
u8 raw_ext_csd_structure; /* 194 */
u8 raw_card_type; /* 196 */
u8 raw_driver_strength; /* 197 */
@@ -279,6 +280,7 @@ struct mmc_card {
#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
unsigned int erase_size; /* erase size in sectors */
@@ -353,6 +355,9 @@ struct mmc_fixup {
/* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
u16 cis_vendor, cis_device;
+ /* for MMC cards */
+ unsigned int ext_csd_rev;
+
void (*vendor_fixup)(struct mmc_card *card, int data);
int data;
};
@@ -361,11 +366,20 @@ struct mmc_fixup {
#define CID_OEMID_ANY ((unsigned short) -1)
#define CID_NAME_ANY (NULL)
+#define EXT_CSD_REV_ANY (-1u)
+
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_HYNIX 0x90
+
#define END_FIXUP { NULL }
#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
_cis_vendor, _cis_device, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
{ \
.name = (_name), \
.manfid = (_manfid), \
@@ -376,23 +390,30 @@ struct mmc_fixup {
.cis_device = (_cis_device), \
.vendor_fixup = (_fixup), \
.data = (_data), \
+ .ext_csd_rev = (_ext_csd_rev), \
}
#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
_FIXUP_EXT(_name, _manfid, \
_oemid, _rev_start, _rev_end, \
SDIO_ANY_ID, SDIO_ANY_ID, \
- _fixup, _data) \
+ _fixup, _data, _ext_csd_rev) \
#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
- MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ EXT_CSD_REV_ANY)
+
+#define MMC_FIXUP_EXT_CSD_REV(_name, _manfid, _oemid, _fixup, _data, \
+ _ext_csd_rev) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data, \
+ _ext_csd_rev)
#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
_FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
CID_OEMID_ANY, 0, -1ull, \
_vendor, _device, \
- _fixup, _data) \
+ _fixup, _data, EXT_CSD_REV_ANY) \
#define cid_rev(hwrev, fwrev, year, month) \
(((u64) hwrev) << 40 | \
@@ -511,6 +532,11 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
}
+static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index f7ed271a1d54..83b0edfce471 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -112,7 +112,6 @@ struct dw_mci_dma_slave {
* @part_buf: Simple buffer for partial fifo reads/writes.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
- * @quirks: Set of quirks that apply to specific versions of the IP.
* @vqmmc_enabled: Status of vqmmc, should be true or false.
* @irq_flags: The flags to be passed to request_irq.
* @irq: The irq value to be passed to request_irq.
@@ -218,9 +217,6 @@ struct dw_mci {
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
- /* Workaround flags */
- u32 quirks;
-
bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
@@ -242,17 +238,12 @@ struct dw_mci_dma_ops {
void (*exit)(struct dw_mci *host);
};
-/* IP Quirks/flags. */
-/* Timer for broken data transfer over scheme */
-#define DW_MCI_QUIRK_BROKEN_DTO BIT(0)
-
struct dma_pdata;
/* Board platform data */
struct dw_mci_board {
u32 num_slots;
- u32 quirks; /* Workaround / Quirk flags */
unsigned int bus_hz; /* Clock speed at the cclk_in pad */
u32 caps; /* Capabilities */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 45cde8cd39f2..aa4bfbf129e4 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -19,6 +19,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/pm.h>
struct mmc_ios {
@@ -77,6 +78,8 @@ struct mmc_ios {
#define MMC_SET_DRIVER_TYPE_A 1
#define MMC_SET_DRIVER_TYPE_C 2
#define MMC_SET_DRIVER_TYPE_D 3
+
+ bool enhanced_strobe; /* hs400es selection */
};
struct mmc_host_ops {
@@ -143,6 +146,9 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Prepare enhanced strobe depending host driver */
+ void (*hs400_enhanced_strobe)(struct mmc_host *host,
+ struct mmc_ios *ios);
int (*select_drive_strength)(struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
@@ -302,6 +308,9 @@ struct mmc_host {
#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
#define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */
#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */
+#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
+#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
+#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -513,6 +522,11 @@ static inline bool mmc_card_hs400(struct mmc_card *card)
return card->host->ios.timing == MMC_TIMING_MMC_HS400;
}
+static inline bool mmc_card_hs400es(struct mmc_card *card)
+{
+ return card->host->ios.enhanced_strobe;
+}
+
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 15f2c4a0a62c..c376209c70ef 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -297,6 +297,7 @@ struct _mmc_csd {
#define EXT_CSD_PART_CONFIG 179 /* R/W */
#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
#define EXT_CSD_BUS_WIDTH 183 /* R/W */
+#define EXT_CSD_STROBE_SUPPORT 184 /* RO */
#define EXT_CSD_HS_TIMING 185 /* R/W */
#define EXT_CSD_POWER_CLASS 187 /* R/W */
#define EXT_CSD_REV 192 /* RO */
@@ -380,12 +381,14 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */
#define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \
EXT_CSD_CARD_TYPE_HS400_1_2V)
+#define EXT_CSD_CARD_TYPE_HS400ES (1<<8) /* Card can run at HS400ES */
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */
#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */
+#define EXT_CSD_BUS_WIDTH_STROBE BIT(7) /* Enhanced strobe mode */
#define EXT_CSD_TIMING_BC 0 /* Backwards compatility */
#define EXT_CSD_TIMING_HS 1 /* High speed */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f2e4e90621ec..d572b78b65e1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
diff --git a/include/linux/module.h b/include/linux/module.h
index 3daf2b3a09d2..0c3207d26ac0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -18,6 +18,7 @@
#include <linux/moduleparam.h>
#include <linux/jump_label.h>
#include <linux/export.h>
+#include <linux/extable.h> /* only as arch move module.h -> extable.h */
#include <linux/rbtree_latch.h>
#include <linux/percpu.h>
@@ -37,6 +38,7 @@ struct modversion_info {
};
struct module;
+struct exception_table_entry;
struct module_kobject {
struct kobject kobj;
@@ -155,18 +157,6 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
-/* Archs provide a method of finding the correct exception table. */
-struct exception_table_entry;
-
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
- const struct exception_table_entry *last,
- unsigned long value);
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish);
-void sort_main_extable(void);
-void trim_init_extable(struct module *m);
-
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
@@ -268,9 +258,6 @@ extern const typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-/* Given an address, look for it in the exception tables */
-const struct exception_table_entry *search_exception_tables(unsigned long add);
-
struct notifier_block;
#ifdef CONFIG_MODULES
@@ -311,6 +298,8 @@ struct module_layout {
unsigned int text_size;
/* Size of RO section of the module (text+rodata) */
unsigned int ro_size;
+ /* Size of RO after init section */
+ unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -575,8 +564,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
-extern void __module_put_and_exit(struct module *mod, long code)
- __attribute__((noreturn));
+extern void __noreturn __module_put_and_exit(struct module *mod,
+ long code);
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
@@ -630,9 +619,6 @@ const char *module_address_lookup(unsigned long addr,
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-/* For extable.c to search modules' exception tables. */
-const struct exception_table_entry *search_module_extables(unsigned long addr);
-
int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);
@@ -657,13 +643,6 @@ static inline bool is_livepatch_module(struct module *mod)
#else /* !CONFIG_MODULES... */
-/* Given an address, look for it in the exception tables. */
-static inline const struct exception_table_entry *
-search_module_extables(unsigned long addr)
-{
- return NULL;
-}
-
static inline struct module *__module_address(unsigned long addr)
{
return NULL;
@@ -788,12 +767,12 @@ extern int module_sysfs_initialized;
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
extern void set_all_modules_text_rw(void);
extern void set_all_modules_text_ro(void);
-extern void module_enable_ro(const struct module *mod);
+extern void module_enable_ro(const struct module *mod, bool after_init);
extern void module_disable_ro(const struct module *mod);
#else
static inline void set_all_modules_text_rw(void) { }
static inline void set_all_modules_text_ro(void) { }
-static inline void module_enable_ro(const struct module *mod) { }
+static inline void module_enable_ro(const struct module *mod, bool after_init) { }
static inline void module_disable_ro(const struct module *mod) { }
#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index fbe8e164a4ee..8dd6e01f45c0 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -783,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
* NAND Flash Manufacturer ID Codes
*/
#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_ESMT 0xc8
#define NAND_MFR_SAMSUNG 0xec
#define NAND_MFR_FUJITSU 0x04
#define NAND_MFR_NATIONAL 0x8f
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 7f041bd88b82..c425c7b4c2a0 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -173,10 +173,10 @@ struct spi_nor {
int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*read)(struct spi_nor *nor, loff_t from,
- size_t len, size_t *retlen, u_char *read_buf);
- void (*write)(struct spi_nor *nor, loff_t to,
- size_t len, size_t *retlen, const u_char *write_buf);
+ ssize_t (*read)(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *read_buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf);
int (*erase)(struct spi_nor *nor, loff_t offs);
int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bfed6b367350..c6564ada9beb 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -643,4 +643,15 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
};
+#define NFS4_OP_MAP_NUM_LONGS \
+ DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
+#define NFS4_OP_MAP_NUM_WORDS \
+ (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
+struct nfs4_op_map {
+ union {
+ unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
+ u32 words[NFS4_OP_MAP_NUM_WORDS];
+ } u;
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index d71278c3c5bd..810124b33327 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -205,12 +205,12 @@ struct nfs_inode {
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
-#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
+#define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -351,7 +351,6 @@ extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *ino
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
extern int nfs_setattr(struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index c304a11b5b1a..7cc0deee5bde 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1185,17 +1185,6 @@ struct pnfs_ds_commit_info {
struct pnfs_commit_bucket *buckets;
};
-#define NFS4_OP_MAP_NUM_LONGS \
- DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
-#define NFS4_OP_MAP_NUM_WORDS \
- (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
-struct nfs4_op_map {
- union {
- unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
- u32 words[NFS4_OP_MAP_NUM_WORDS];
- } u;
-};
-
struct nfs41_state_protection {
u32 how;
struct nfs4_op_map enforce;
@@ -1543,7 +1532,7 @@ struct nfs_rpc_ops {
struct nfs_fattr *, struct nfs4_label *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, struct qstr *,
+ int (*lookup) (struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *,
struct nfs4_label *);
int (*access) (struct inode *, struct nfs_access_entry *);
@@ -1551,18 +1540,18 @@ struct nfs_rpc_ops {
unsigned int);
int (*create) (struct inode *, struct dentry *,
struct iattr *, int);
- int (*remove) (struct inode *, struct qstr *);
+ int (*remove) (struct inode *, const struct qstr *);
void (*unlink_setup) (struct rpc_message *, struct inode *dir);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
int (*unlink_done) (struct rpc_task *, struct inode *);
void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
- int (*link) (struct inode *, struct inode *, struct qstr *);
+ int (*link) (struct inode *, struct inode *, const struct qstr *);
int (*symlink) (struct inode *, struct dentry *, struct page *,
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
- int (*rmdir) (struct inode *, struct qstr *);
+ int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct dentry *, struct rpc_cred *,
u64, struct page **, unsigned int, int);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
@@ -1596,9 +1585,8 @@ struct nfs_rpc_ops {
int (*have_delegation)(struct inode *, fmode_t);
int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
- struct nfs_client *
- (*init_client) (struct nfs_client *, const struct rpc_timeout *,
- const char *);
+ struct nfs_client *(*init_client) (struct nfs_client *,
+ const struct nfs_client_initdata *);
void (*free_client) (struct nfs_client *);
struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
diff --git a/include/linux/of.h b/include/linux/of.h
index 15c43f076b23..3d9ff8e9d803 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -238,13 +238,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
#endif
-/* Default string compare functions, Allow arch asm/prom.h to override */
-#if !defined(of_compat_cmp)
-#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
-#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
-#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -324,6 +317,8 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
+extern int of_device_compatible_match(struct device_node *device,
+ const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
@@ -726,6 +721,13 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
+/* Default string compare functions, Allow arch asm/prom.h to override */
+#if !defined(of_compat_cmp)
+#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+#endif
+
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
#else
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 8b5e0a9f2431..610e13271918 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -124,6 +124,15 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
return ret;
}
+static inline int page_ref_inc_return(struct page *page)
+{
+ int ret = atomic_inc_return(&page->_refcount);
+
+ if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+ __page_ref_mod_and_return(page, 1, ret);
+ return ret;
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 81363b834900..66a1260b33de 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
-void page_endio(struct page *page, int rw, int err);
+void page_endio(struct page *page, bool is_write, int err);
/*
* Add an arbitrary waiter to a page's wait queue
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 89ab0572dbc6..7d63a66e8ed4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -24,6 +24,8 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
}
extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
+extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res);
+
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
struct pci_bus *pbus = pdev->bus;
diff --git a/drivers/pci/ecam.h b/include/linux/pci-ecam.h
index 9878bebd45bb..7adad206b1f4 100644
--- a/drivers/pci/ecam.h
+++ b/include/linux/pci-ecam.h
@@ -27,8 +27,7 @@ struct pci_config_window;
struct pci_ecam_ops {
unsigned int bus_shift;
struct pci_ops pci_ops;
- int (*init)(struct device *,
- struct pci_config_window *);
+ int (*init)(struct pci_config_window *);
};
/*
@@ -45,6 +44,7 @@ struct pci_config_window {
void __iomem *win; /* 64-bit single mapping */
void __iomem **winp; /* 32-bit per-bus mapping */
};
+ struct device *parent;/* ECAM res was from this dev */
};
/* create and free pci_config_window */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b67e4df20801..2599a980340f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -101,6 +101,10 @@ enum {
DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
};
+/*
+ * pci_power_t values must match the bits in the Capabilities PME_Support
+ * and Control/Status PowerState fields in the Power Management capability.
+ */
typedef int __bitwise pci_power_t;
#define PCI_D0 ((pci_power_t __force) 0)
@@ -116,7 +120,7 @@ extern const char *pci_power_names[];
static inline const char *pci_power_name(pci_power_t state)
{
- return pci_power_names[1 + (int) state];
+ return pci_power_names[1 + (__force int) state];
}
#define PCI_PM_D2_DELAY 200
@@ -294,6 +298,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
unsigned int no_d3cold:1; /* D3cold is forbidden */
+ unsigned int bridge_d3:1; /* Allow D3 for bridge */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
unsigned int mmio_always_on:1; /* disallow turning off io/mem
decoding during bar sizing */
@@ -320,6 +325,7 @@ struct pci_dev {
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
+ struct cpumask *irq_affinity;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
bool match_driver; /* Skip attaching driver */
@@ -854,6 +860,7 @@ void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
void pci_stop_root_bus(struct pci_bus *bus);
void pci_remove_root_bus(struct pci_bus *bus);
void pci_setup_cardbus(struct pci_bus *bus);
+void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
@@ -1083,6 +1090,8 @@ int pci_back_from_sleep(struct pci_dev *dev);
bool pci_dev_run_wake(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
+void pci_d3cold_enable(struct pci_dev *dev);
+void pci_d3cold_disable(struct pci_dev *dev);
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
bool enable)
@@ -1114,6 +1123,7 @@ int pci_set_vpd_size(struct pci_dev *dev, size_t len);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
void pci_bus_assign_resources(const struct pci_bus *bus);
+void pci_bus_claim_resources(struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
@@ -1143,9 +1153,12 @@ void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
void pci_free_resource_list(struct list_head *resources);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ unsigned int flags);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+int devm_request_pci_bus_resources(struct device *dev,
+ struct list_head *resources);
#define pci_bus_for_each_resource(bus, res, i) \
for (i = 0; \
@@ -1167,6 +1180,7 @@ int pci_register_io_range(phys_addr_t addr, resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+void pci_unmap_iospace(struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1237,6 +1251,11 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
+#define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */
+#define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */
+#define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */
+#define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */
+
/* kmem_cache style wrapper around pci_alloc_consistent() */
#include <linux/pci-dma.h>
@@ -1284,6 +1303,11 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
+void pci_free_irq_vectors(struct pci_dev *dev);
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
+
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_msi_shutdown(struct pci_dev *dev) { }
@@ -1307,6 +1331,24 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
+static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
+ unsigned int min_vecs, unsigned int max_vecs,
+ unsigned int flags)
+{
+ if (min_vecs > 1)
+ return -EINVAL;
+ return 1;
+}
+static inline void pci_free_irq_vectors(struct pci_dev *dev)
+{
+}
+
+static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ if (WARN_ON_ONCE(nr > 0))
+ return -EINVAL;
+ return dev->irq;
+}
#endif
#ifdef CONFIG_PCIEPORTBUS
@@ -1389,12 +1431,13 @@ static inline int pci_domain_nr(struct pci_bus *bus)
{
return bus->domain_nr;
}
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#ifdef CONFIG_ACPI
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
#else
-static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
- struct device *parent)
-{
-}
+static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{ return 0; }
+#endif
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
#endif
/* some architectures require additional setup to direct VGA traffic */
@@ -1402,6 +1445,34 @@ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
void pci_register_set_vga_state(arch_set_vga_state_t func);
+static inline int
+pci_request_io_regions(struct pci_dev *pdev, const char *name)
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO), name);
+}
+
+static inline void
+pci_release_io_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_IO));
+}
+
+static inline int
+pci_request_mem_regions(struct pci_dev *pdev, const char *name)
+{
+ return pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM), name);
+}
+
+static inline void
+pci_release_mem_regions(struct pci_dev *pdev)
+{
+ return pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+}
+
#else /* CONFIG_PCI is not enabled */
static inline void pci_set_flags(int flags) { }
@@ -1554,7 +1625,11 @@ static inline const char *pci_name(const struct pci_dev *pdev)
/* Some archs don't want to expose struct resource to userland as-is
* in sysfs and /proc
*/
-#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+#ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc,
+ resource_size_t *start, resource_size_t *end);
+#else
static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc, resource_size_t *start,
resource_size_t *end)
@@ -1706,6 +1781,7 @@ extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_bus_size;
/* Architecture-specific versions may override these (weak) */
void pcibios_disable_device(struct pci_dev *dev);
@@ -1722,7 +1798,7 @@ void pcibios_free_irq(struct pci_dev *dev);
extern struct dev_pm_ops pcibios_pm_ops;
#endif
-#ifdef CONFIG_PCI_MMCONFIG
+#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
#else
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
index 3c73c045f8da..e684543254f3 100644
--- a/include/linux/platform_data/asoc-ti-mcbsp.h
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -44,7 +44,7 @@ struct omap_mcbsp_platform_data {
/* McBSP platform and instance specific features */
bool has_wakeup; /* Wakeup capability */
bool has_ccr; /* Transceiver has configuration control registers */
- int (*enable_st_clock)(unsigned int, bool);
+ int (*force_ick_on)(struct clk *clk, bool force_on);
};
/**
@@ -55,4 +55,6 @@ struct omap_mcbsp_dev_attr {
const char *sidetone;
};
+void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata);
+
#endif
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
deleted file mode 100644
index 3af0da1f3be5..000000000000
--- a/include/linux/platform_data/clk-ux500.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Clock definitions for ux500 platforms
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#ifndef __CLK_UX500_H
-#define __CLK_UX500_H
-
-void u8500_clk_init(void);
-void u9540_clk_init(void);
-void u8540_clk_init(void);
-
-#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h
index 3038120ca46e..812d87307877 100644
--- a/include/linux/platform_data/media/ir-rx51.h
+++ b/include/linux/platform_data/media/ir-rx51.h
@@ -2,10 +2,7 @@
#define _LIRC_RX51_H
struct lirc_rx51_platform_data {
- int pwm_timer;
-
int(*set_max_mpu_wakeup_lat)(struct device *dev, long t);
- struct pwm_omap_dmtimer_pdata *dmtimer;
};
#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 95ccab3f454a..7daa78a2f342 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -46,5 +46,6 @@ struct esdhc_platform_data {
bool support_vsel;
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
+ unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
new file mode 100644
index 000000000000..679177929045
--- /dev/null
+++ b/include/linux/platform_data/omapdss.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAPDSS_PDATA_H
+#define __OMAPDSS_PDATA_H
+
+enum omapdss_version {
+ OMAPDSS_VER_UNKNOWN = 0,
+ OMAPDSS_VER_OMAP24xx,
+ OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
+ OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
+ OMAPDSS_VER_OMAP3630,
+ OMAPDSS_VER_AM35xx,
+ OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
+ OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
+ OMAPDSS_VER_OMAP4, /* All other OMAP4s */
+ OMAPDSS_VER_OMAP5,
+ OMAPDSS_VER_AM43xx,
+ OMAPDSS_VER_DRA7xx,
+};
+
+/* Board specific data */
+struct omap_dss_board_info {
+ const char *default_display_name;
+ int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask);
+ void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask);
+ int (*set_min_bus_tput)(struct device *dev, unsigned long r);
+ enum omapdss_version version;
+};
+
+#endif /* __OMAPDSS_PDATA_H */
diff --git a/include/linux/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
index 22c53825528f..22c53825528f 100644
--- a/include/linux/rtc-ds2404.h
+++ b/include/linux/platform_data/rtc-ds2404.h
diff --git a/include/linux/m48t86.h b/include/linux/platform_data/rtc-m48t86.h
index 915d6b4f0f89..915d6b4f0f89 100644
--- a/include/linux/m48t86.h
+++ b/include/linux/platform_data/rtc-m48t86.h
diff --git a/include/linux/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h
index e55d82cebf80..e55d82cebf80 100644
--- a/include/linux/rtc-v3020.h
+++ b/include/linux/platform_data/rtc-v3020.h
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f136b22c7772..696a56be7d3e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -61,6 +61,11 @@ static inline void console_verbose(void)
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
}
+/* strlen("ratelimit") + 1 */
+#define DEVKMSG_STR_MAX_SIZE 10
+extern char devkmsg_log_str[];
+struct ctl_table;
+
struct va_format {
const char *fmt;
va_list *va;
@@ -175,6 +180,10 @@ extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
+extern int
+devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
+ size_t *lenp, loff_t *ppos);
+
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
@@ -289,10 +298,11 @@ extern asmlinkage void dump_stack(void) __cold;
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
-#include <linux/dynamic_debug.h>
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG)
+#include <linux/dynamic_debug.h>
+
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
#define pr_debug(fmt, ...) \
dynamic_pr_debug(fmt, ##__VA_ARGS__)
diff --git a/include/linux/property.h b/include/linux/property.h
index ecab11e40794..3a2f9ae25c86 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -77,6 +77,9 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname);
+
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index c038ae36b10e..f1bbae014889 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -5,7 +5,9 @@
#include <linux/mutex.h>
#include <linux/of.h>
+struct pwm_capture;
struct seq_file;
+
struct pwm_chip;
/**
@@ -148,11 +150,100 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * @pwm: PWM device
+ * @state: state to fill with the prepared PWM state
+ *
+ * This functions prepares a state that can later be tweaked and applied
+ * to the PWM device with pwm_apply_state(). This is a convenient function
+ * that first retrieves the current PWM state and the replaces the period
+ * and polarity fields with the reference values defined in pwm->args.
+ * Once the function returns, you can adjust the ->enabled and ->duty_cycle
+ * fields according to your needs before calling pwm_apply_state().
+ *
+ * ->duty_cycle is initially set to zero to avoid cases where the current
+ * ->duty_cycle value exceed the pwm_args->period one, which would trigger
+ * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * first.
+ */
+static inline void pwm_init_state(const struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_args args;
+
+ /* First get the current state. */
+ pwm_get_state(pwm, state);
+
+ /* Then fill it with the reference config */
+ pwm_get_args(pwm, &args);
+
+ state->period = args.period;
+ state->polarity = args.polarity;
+ state->duty_cycle = 0;
+}
+
+/**
+ * pwm_get_relative_duty_cycle() - Get a relative duty cycle value
+ * @state: PWM state to extract the duty cycle from
+ * @scale: target scale of the relative duty cycle
+ *
+ * This functions converts the absolute duty cycle stored in @state (expressed
+ * in nanosecond) into a value relative to the period.
+ *
+ * For example if you want to get the duty_cycle expressed in percent, call:
+ *
+ * pwm_get_state(pwm, &state);
+ * duty = pwm_get_relative_duty_cycle(&state, 100);
+ */
+static inline unsigned int
+pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
+{
+ if (!state->period)
+ return 0;
+
+ return DIV_ROUND_CLOSEST_ULL((u64)state->duty_cycle * scale,
+ state->period);
+}
+
+/**
+ * pwm_set_relative_duty_cycle() - Set a relative duty cycle value
+ * @state: PWM state to fill
+ * @duty_cycle: relative duty cycle value
+ * @scale: scale in which @duty_cycle is expressed
+ *
+ * This functions converts a relative into an absolute duty cycle (expressed
+ * in nanoseconds), and puts the result in state->duty_cycle.
+ *
+ * For example if you want to configure a 50% duty cycle, call:
+ *
+ * pwm_init_state(pwm, &state);
+ * pwm_set_relative_duty_cycle(&state, 50, 100);
+ * pwm_apply_state(pwm, &state);
+ *
+ * This functions returns -EINVAL if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ */
+static inline int
+pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
+ unsigned int scale)
+{
+ if (!scale || duty_cycle > scale)
+ return -EINVAL;
+
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL((u64)duty_cycle *
+ state->period,
+ scale);
+
+ return 0;
+}
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @config: configure duty cycles and period length for this PWM
* @set_polarity: configure the polarity of this PWM
+ * @capture: capture and report PWM signal
* @enable: enable PWM output toggling
* @disable: disable PWM output toggling
* @apply: atomically apply a new PWM config. The state argument
@@ -172,6 +263,8 @@ struct pwm_ops {
int duty_ns, int period_ns);
int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity);
+ int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_capture *result, unsigned long timeout);
int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -212,6 +305,16 @@ struct pwm_chip {
bool can_sleep;
};
+/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
struct pwm_device *pwm_request(int pwm_id, const char *label);
@@ -323,8 +426,9 @@ static inline void pwm_disable(struct pwm_device *pwm)
pwm_apply_state(pwm, &state);
}
-
/* PWM provider APIs */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout);
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
@@ -376,6 +480,13 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
return -EINVAL;
}
+static inline int pwm_capture(struct pwm_device *pwm,
+ struct pwm_capture *result,
+ unsigned long timeout)
+{
+ return -EINVAL;
+}
+
static inline int pwm_set_polarity(struct pwm_device *pwm,
enum pwm_polarity polarity)
{
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 9e12000914b3..cc32ab852fbc 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -29,6 +29,14 @@ extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
+ size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
+ phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index cbfee507c839..4c45105dece3 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -35,7 +35,7 @@
* 00 - data pointer
* 01 - internal entry
* 10 - exceptional entry
- * 11 - locked exceptional entry
+ * 11 - this bit combination is currently unused/reserved
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 18102529254e..57c9e0622a38 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -2,11 +2,15 @@
#define _LINUX_RATELIMIT_H
#include <linux/param.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10
+/* issue num suppressed message on exit */
+#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
@@ -15,6 +19,7 @@ struct ratelimit_state {
int printed;
int missed;
unsigned long begin;
+ unsigned long flags;
};
#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
@@ -34,12 +39,35 @@ struct ratelimit_state {
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
+ memset(rs, 0, sizeof(*rs));
+
raw_spin_lock_init(&rs->lock);
- rs->interval = interval;
- rs->burst = burst;
- rs->printed = 0;
- rs->missed = 0;
- rs->begin = 0;
+ rs->interval = interval;
+ rs->burst = burst;
+}
+
+static inline void ratelimit_default_init(struct ratelimit_state *rs)
+{
+ return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+}
+
+static inline void ratelimit_state_exit(struct ratelimit_state *rs)
+{
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
+ return;
+
+ if (rs->missed) {
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
+ current->comm, rs->missed);
+ rs->missed = 0;
+ }
+}
+
+static inline void
+ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags)
+{
+ rs->flags = flags;
}
extern struct ratelimit_state printk_ratelimit_state;
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index b91ba932bbd4..db1fe6772ad5 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -53,4 +53,8 @@ struct reset_controller_dev {
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
+struct device;
+int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev);
+
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 45a4abeb6acb..5daff15722d3 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -71,14 +71,14 @@ static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
const char *id, int index, int shared)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__devm_reset_control_get(
struct device *dev,
const char *id, int index, int shared)
{
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOTSUPP);
}
#endif /* CONFIG_RESET_CONTROLLER */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index aa2323893e8d..37b95c4af99d 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -163,6 +163,7 @@ enum rio_device_state {
* @dst_ops: Destination operation capabilities
* @comp_tag: RIO component tag
* @phys_efptr: RIO device extended features pointer
+ * @phys_rmap: LP-Serial Register Map Type (1 or 2)
* @em_efptr: RIO Error Management features pointer
* @dma_mask: Mask of bits of RIO address this device implements
* @driver: Driver claiming this device
@@ -193,6 +194,7 @@ struct rio_dev {
u32 dst_ops;
u32 comp_tag;
u32 phys_efptr;
+ u32 phys_rmap;
u32 em_efptr;
u64 dma_mask;
struct rio_driver *driver; /* RIO driver claiming this device */
@@ -237,11 +239,6 @@ struct rio_dbell {
void *dev_id;
};
-enum rio_phy_type {
- RIO_PHY_PARALLEL,
- RIO_PHY_SERIAL,
-};
-
/**
* struct rio_mport - RIO master port info
* @dbells: List of doorbell events
@@ -259,8 +256,8 @@ enum rio_phy_type {
* @id: Port ID, unique among all ports
* @index: Port index, unique among all port interfaces of the same type
* @sys_size: RapidIO common transport system size
- * @phy_type: RapidIO phy type
* @phys_efptr: RIO port extended features pointer
+ * @phys_rmap: LP-Serial EFB Register Mapping type (1 or 2).
* @name: Port name string
* @dev: device structure associated with an mport
* @priv: Master port private data
@@ -289,8 +286,8 @@ struct rio_mport {
* 0 - Small size. 256 devices.
* 1 - Large size, 65536 devices.
*/
- enum rio_phy_type phy_type; /* RapidIO phy type */
u32 phys_efptr;
+ u32 phys_rmap;
unsigned char name[RIO_MAX_MPORT_NAME];
struct device dev;
void *priv; /* Master port private data */
@@ -425,7 +422,7 @@ struct rio_ops {
int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
void *(*get_inb_message)(struct rio_mport *mport, int mbox);
int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
- u64 rstart, u32 size, u32 flags);
+ u64 rstart, u64 size, u32 flags);
void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
int (*query_mport)(struct rio_mport *mport,
struct rio_mport_attr *attr);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 2543bc163d54..334c576c151c 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -38,5 +38,7 @@
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
#define RIO_DID_TSI721 0x80ab
+#define RIO_DID_IDTRXS1632 0x80e5
+#define RIO_DID_IDTRXS2448 0x80e6
#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 1063ae382bc2..40c04efe7409 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -42,9 +42,11 @@
#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */
#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */
#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */
+#define RIO_PEF_DEV32 0x00001000 /* [III] PE supports Common TRansport Dev32 */
#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
-#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
+#define RIO_PEF_CTLS 0x00000010 /* [III] Common Transport Large System (< rev.3) */
+#define RIO_PEF_DEV16 0x00000010 /* [III] PE Supports Common Transport Dev16 (rev.3) */
#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */
#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */
#define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */
@@ -194,70 +196,101 @@
#define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK)
/* Extended Feature Block IDs */
-#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */
-#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */
-#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */
-#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
-#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */
-#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */
-#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */
-#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */
+#define RIO_EFB_SER_EP_M1_ID 0x0001 /* [VI] LP-Serial EP Devices, Map I */
+#define RIO_EFB_SER_EP_SW_M1_ID 0x0002 /* [VI] LP-Serial EP w SW Recovery Devices, Map I */
+#define RIO_EFB_SER_EPF_M1_ID 0x0003 /* [VI] LP-Serial EP Free Devices, Map I */
+#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP-Serial EP Devices, RIO 1.2 */
+#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP-Serial EP w SW Recovery Devices, RIO 1.2 */
+#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP-Serial EP Free Devices, RIO 1.2 */
#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */
+#define RIO_EFB_SER_EPF_SW_M1_ID 0x0009 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map I */
+#define RIO_EFB_SW_ROUTING_TBL 0x000E /* [III] Switch Routing Table Block */
+#define RIO_EFB_SER_EP_M2_ID 0x0011 /* [VI] LP-Serial EP Devices, Map II */
+#define RIO_EFB_SER_EP_SW_M2_ID 0x0012 /* [VI] LP-Serial EP w SW Recovery Devices, Map II */
+#define RIO_EFB_SER_EPF_M2_ID 0x0013 /* [VI] LP-Serial EP Free Devices, Map II */
+#define RIO_EFB_ERR_MGMNT_HS 0x0017 /* [VIII] Error Management Extensions, Hot-Swap only */
+#define RIO_EFB_SER_EPF_SW_M2_ID 0x0019 /* [VI] LP-Serial EP Free w SW Recovery Devices, Map II */
/*
- * Physical 8/16 LP-LVDS
- * ID=0x0001, Generic End Point Devices
- * ID=0x0002, Generic End Point Devices, software assisted recovery option
- * ID=0x0003, Generic End Point Free Devices
- *
- * Physical LP-Serial
- * ID=0x0004, Generic End Point Devices
- * ID=0x0005, Generic End Point Devices, software assisted recovery option
- * ID=0x0006, Generic End Point Free Devices
+ * Physical LP-Serial Registers Definitions
+ * Parameters in register macros:
+ * n - port number, m - Register Map Type (1 or 2)
*/
#define RIO_PORT_MNT_HEADER 0x0000
#define RIO_PORT_REQ_CTL_CSR 0x0020
-#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */
-#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */
-#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */
+#define RIO_PORT_RSP_CTL_CSR 0x0024
+#define RIO_PORT_LINKTO_CTL_CSR 0x0020
+#define RIO_PORT_RSPTO_CTL_CSR 0x0024
#define RIO_PORT_GEN_CTL_CSR 0x003c
#define RIO_PORT_GEN_HOST 0x80000000
#define RIO_PORT_GEN_MASTER 0x40000000
#define RIO_PORT_GEN_DISCOVERED 0x20000000
-#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_MNT_REQ_CSR(n, m) (0x40 + (n) * (0x20 * (m)))
#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */
#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */
-#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_MNT_RSP_CSR(n, m) (0x44 + (n) * (0x20 * (m)))
#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */
#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
-#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_ACK_STS_CSR(n) (0x48 + (n) * 0x20) /* Only in RM-I */
#define RIO_PORT_N_ACK_CLEAR 0x80000000
#define RIO_PORT_N_ACK_INBOUND 0x3f000000
#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
-#define RIO_PORT_N_CTL2_CSR(x) (0x0054 + x*0x20)
+#define RIO_PORT_N_CTL2_CSR(n, m) (0x54 + (n) * (0x20 * (m)))
#define RIO_PORT_N_CTL2_SEL_BAUD 0xf0000000
-#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
-#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
-#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
+#define RIO_PORT_N_ERR_STS_CSR(n, m) (0x58 + (n) * (0x20 * (m)))
+#define RIO_PORT_N_ERR_STS_OUT_ES 0x00010000 /* Output Error-stopped */
+#define RIO_PORT_N_ERR_STS_INP_ES 0x00000100 /* Input Error-stopped */
#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */
+#define RIO_PORT_N_ERR_STS_PORT_UA 0x00000008 /* Port Unavailable */
#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
-#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
+#define RIO_PORT_N_CTL_CSR(n, m) (0x5c + (n) * (0x20 * (m)))
#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
#define RIO_PORT_N_CTL_IPW 0x38000000 /* Initialized Port Width */
#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
-#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
-#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000
-#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000
-#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000
+#define RIO_PORT_N_CTL_EN_RX 0x00200000
+#define RIO_PORT_N_CTL_EN_TX 0x00400000
+#define RIO_PORT_N_OB_ACK_CSR(n) (0x60 + (n) * 0x40) /* Only in RM-II */
+#define RIO_PORT_N_OB_ACK_CLEAR 0x80000000
+#define RIO_PORT_N_OB_ACK_OUTSTD 0x00fff000
+#define RIO_PORT_N_OB_ACK_OUTBND 0x00000fff
+#define RIO_PORT_N_IB_ACK_CSR(n) (0x64 + (n) * 0x40) /* Only in RM-II */
+#define RIO_PORT_N_IB_ACK_INBND 0x00000fff
+
+/*
+ * Device-based helper macros for serial port register access.
+ * d - pointer to rapidio device object, n - port number
+ */
+
+#define RIO_DEV_PORT_N_MNT_REQ_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_MNT_RSP_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_ACK_STS_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_ACK_STS_CSR(n))
+
+#define RIO_DEV_PORT_N_CTL2_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_CTL2_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_ERR_STS_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_ERR_STS_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_CTL_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_CTL_CSR(n, d->phys_rmap))
+
+#define RIO_DEV_PORT_N_OB_ACK_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_OB_ACK_CSR(n))
+
+#define RIO_DEV_PORT_N_IB_ACK_CSR(d, n) \
+ (d->phys_efptr + RIO_PORT_N_IB_ACK_CSR(n))
/*
* Error Management Extensions (RapidIO 1.3+, Part 8)
@@ -268,6 +301,7 @@
/* General EM Registers (Common for all Ports) */
#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
+#define RIO_EM_EMHS_CAR 0x004 /* EM Functionality CAR */
#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */
@@ -278,15 +312,33 @@
#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */
+#define RIO_EM_LTL_DID32_CAP 0x020 /* Logical/Transport Layer Dev32 DestID Capture CSR */
+#define RIO_EM_LTL_SID32_CAP 0x024 /* Logical/Transport Layer Dev32 source ID Capture CSR */
#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */
+#define RIO_EM_PW_TGT_DEVID_D16M 0xff000000 /* Port-write Target DID16 MSB */
+#define RIO_EM_PW_TGT_DEVID_D8 0x00ff0000 /* Port-write Target DID16 LSB or DID8 */
+#define RIO_EM_PW_TGT_DEVID_DEV16 0x00008000 /* Port-write Target DID16 LSB or DID8 */
+#define RIO_EM_PW_TGT_DEVID_DEV32 0x00004000 /* Port-write Target DID16 LSB or DID8 */
#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */
+#define RIO_EM_PKT_TTL_VAL 0xffff0000 /* Packet Time-to-live value */
+#define RIO_EM_PW_TGT32_DEVID 0x030 /* Port-write Dev32 Target deviceID CSR */
+#define RIO_EM_PW_TX_CTRL 0x034 /* Port-write Transmission Control CSR */
+#define RIO_EM_PW_TX_CTRL_PW_DIS 0x00000001 /* Port-write Transmission Disable bit */
/* Per-Port EM Registers */
#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */
#define REM_PED_IMPL_SPEC 0x80000000
+#define REM_PED_LINK_OK2U 0x40000000 /* Link OK to Uninit transition */
+#define REM_PED_LINK_UPDA 0x20000000 /* Link Uninit Packet Discard Active */
+#define REM_PED_LINK_U2OK 0x10000000 /* Link Uninit to OK transition */
#define REM_PED_LINK_TO 0x00000001
+
#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
+#define RIO_EM_PN_ERRRATE_EN_OK2U 0x40000000 /* Enable notification for OK2U */
+#define RIO_EM_PN_ERRRATE_EN_UPDA 0x20000000 /* Enable notification for UPDA */
+#define RIO_EM_PN_ERRRATE_EN_U2OK 0x10000000 /* Enable notification for U2OK */
+
#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */
#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
@@ -294,5 +346,50 @@
#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */
#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
+#define RIO_EM_PN_LINK_UDT(x) (0x070 + x*0x40) /* Port N Link Uninit Discard Timer CSR */
+#define RIO_EM_PN_LINK_UDT_TO 0xffffff00 /* Link Uninit Timeout value */
+
+/*
+ * Switch Routing Table Register Block ID=0x000E (RapidIO 3.0+, part 3)
+ * Register offsets are defined from beginning of the block.
+ */
+
+/* Broadcast Routing Table Control CSR */
+#define RIO_BC_RT_CTL_CSR 0x020
+#define RIO_RT_CTL_THREE_LVL 0x80000000
+#define RIO_RT_CTL_DEV32_RT_CTRL 0x40000000
+#define RIO_RT_CTL_MC_MASK_SZ 0x03000000 /* 3.0+ Part 11: Multicast */
+
+/* Broadcast Level 0 Info CSR */
+#define RIO_BC_RT_LVL0_INFO_CSR 0x030
+#define RIO_RT_L0I_NUM_GR 0xff000000
+#define RIO_RT_L0I_GR_PTR 0x00fffc00
+
+/* Broadcast Level 1 Info CSR */
+#define RIO_BC_RT_LVL1_INFO_CSR 0x034
+#define RIO_RT_L1I_NUM_GR 0xff000000
+#define RIO_RT_L1I_GR_PTR 0x00fffc00
+
+/* Broadcast Level 2 Info CSR */
+#define RIO_BC_RT_LVL2_INFO_CSR 0x038
+#define RIO_RT_L2I_NUM_GR 0xff000000
+#define RIO_RT_L2I_GR_PTR 0x00fffc00
+
+/* Per-Port Routing Table registers.
+ * Register fields defined in the broadcast section above are
+ * applicable to the corresponding registers below.
+ */
+#define RIO_SPx_RT_CTL_CSR(x) (0x040 + (0x20 * x))
+#define RIO_SPx_RT_LVL0_INFO_CSR(x) (0x50 + (0x20 * x))
+#define RIO_SPx_RT_LVL1_INFO_CSR(x) (0x54 + (0x20 * x))
+#define RIO_SPx_RT_LVL2_INFO_CSR(x) (0x58 + (0x20 * x))
+
+/* Register Formats for Routing Table Group entry.
+ * Register offsets are calculated using GR_PTR field in the corresponding
+ * table Level_N and group/entry numbers (see RapidIO 3.0+ Part 3).
+ */
+#define RIO_RT_Ln_ENTRY_IMPL_DEF 0xf0000000
+#define RIO_RT_Ln_ENTRY_RTE_VAL 0x000003ff
+#define RIO_RT_ENTRY_DROP_PKT 0x300
#endif /* LINUX_RIO_REGS_H */
diff --git a/include/linux/ds1286.h b/include/linux/rtc/ds1286.h
index 45ea0aa0aeb9..45ea0aa0aeb9 100644
--- a/include/linux/ds1286.h
+++ b/include/linux/rtc/ds1286.h
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 553af2923824..62c68e513e39 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1547,6 +1547,9 @@ struct task_struct {
/* unserialized, strictly 'current' */
unsigned in_execve:1; /* bit to tell LSMs we're in execve */
unsigned in_iowait:1;
+#if !defined(TIF_RESTORE_SIGMASK)
+ unsigned restore_sigmask:1;
+#endif
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#ifndef CONFIG_SLOB
@@ -2680,6 +2683,66 @@ extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+#ifdef TIF_RESTORE_SIGMASK
+/*
+ * Legacy restore_sigmask accessors. These are inefficient on
+ * SMP architectures because they require atomic operations.
+ */
+
+/**
+ * set_restore_sigmask() - make sure saved_sigmask processing gets done
+ *
+ * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
+ * will run before returning to user mode, to process the flag. For
+ * all callers, TIF_SIGPENDING is already set or it's no harm to set
+ * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
+ * arch code will notice on return to user mode, in case those bits
+ * are scarce. We set TIF_SIGPENDING here to ensure that the arch
+ * signal code always gets run when TIF_RESTORE_SIGMASK is set.
+ */
+static inline void set_restore_sigmask(void)
+{
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+ return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+
+#else /* TIF_RESTORE_SIGMASK */
+
+/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
+static inline void set_restore_sigmask(void)
+{
+ current->restore_sigmask = true;
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current->restore_sigmask = false;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current->restore_sigmask;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ if (!current->restore_sigmask)
+ return false;
+ current->restore_sigmask = false;
+ return true;
+}
+#endif
+
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 35de50a65665..dc5f989be226 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -70,6 +70,8 @@ struct scpi_ops {
int (*sensor_get_capability)(u16 *sensors);
int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *);
int (*sensor_get_value)(u16, u64 *);
+ int (*device_get_power_state)(u16);
+ int (*device_set_power_state)(u16, u8);
};
#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
diff --git a/include/linux/security.h b/include/linux/security.h
index 14df373ff2ca..7831cd57bcf7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -240,7 +240,7 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb);
int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_dentry_init_security(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen);
int security_inode_alloc(struct inode *inode);
@@ -591,7 +591,7 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
- struct qstr *name,
+ const struct qstr *name,
void **ctx,
u32 *ctxlen)
{
diff --git a/include/linux/serio.h b/include/linux/serio.h
index df4ab5de1586..c733cff44e18 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -31,7 +31,8 @@ struct serio {
struct serio_device_id id;
- spinlock_t lock; /* protects critical sections from port's interrupt handler */
+ /* Protects critical sections from port's interrupt handler */
+ spinlock_t lock;
int (*write)(struct serio *, unsigned char);
int (*open)(struct serio *);
@@ -40,16 +41,29 @@ struct serio {
void (*stop)(struct serio *);
struct serio *parent;
- struct list_head child_node; /* Entry in parent->children list */
+ /* Entry in parent->children list */
+ struct list_head child_node;
struct list_head children;
- unsigned int depth; /* level of nesting in serio hierarchy */
+ /* Level of nesting in serio hierarchy */
+ unsigned int depth;
- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
+ /*
+ * serio->drv is accessed from interrupt handlers; when modifying
+ * caller should acquire serio->drv_mutex and serio->lock.
+ */
+ struct serio_driver *drv;
+ /* Protects serio->drv so attributes can pin current driver */
+ struct mutex drv_mutex;
struct device dev;
struct list_head node;
+
+ /*
+ * For use by PS/2 layer when several ports share hardware and
+ * may get indigestion when exposed to concurrent access (i8042).
+ */
+ struct mutex *ps2_cmd_mutex;
};
#define to_serio_port(d) container_of(d, struct serio, dev)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1a4ea551aae5..4293808d8cfb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
new file mode 100644
index 000000000000..a37bc5538f19
--- /dev/null
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -0,0 +1,8 @@
+#ifndef __WCNSS_CTRL_H__
+#define __WCNSS_CTRL_H__
+
+#include <linux/soc/qcom/smd.h>
+
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+
+#endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
index 92fc613ab23d..7b8b280c181b 100644
--- a/include/linux/soc/renesas/rcar-sysc.h
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -11,6 +11,6 @@ struct rcar_sysc_ch {
int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
-void __iomem *rcar_sysc_init(phys_addr_t base);
+void rcar_sysc_init(phys_addr_t base, u32 syscier);
#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 899791573a40..4ccf184e971f 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -37,7 +37,6 @@ struct rpcsec_gss_info;
/* auth_cred ac_flags bits */
enum {
- RPC_CRED_NO_CRKEY_TIMEOUT = 0, /* underlying cred has no key timeout */
RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
key will expire soon */
@@ -82,6 +81,9 @@ struct rpc_cred {
#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
+/* rpc_auth au_flags */
+#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */
+
/*
* Client authentication handle
*/
@@ -107,6 +109,9 @@ struct rpc_auth {
/* per-flavor data */
};
+/* rpc_auth au_flags */
+#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
const char *target_name;
@@ -196,7 +201,7 @@ void rpcauth_destroy_credcache(struct rpc_auth *);
void rpcauth_clear_credcache(struct rpc_cred_cache *);
int rpcauth_key_timeout_notify(struct rpc_auth *,
struct rpc_cred *);
-bool rpcauth_cred_key_to_expire(struct rpc_cred *);
+bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index ed03c9f7f908..62a60eeacb0a 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -78,8 +78,6 @@ struct cache_detail {
struct hlist_head * hash_table;
rwlock_t hash_lock;
- atomic_t inuse; /* active user-space update or lookup */
-
char *name;
void (*cache_put)(struct kref *);
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 1f911ccb2a75..68ec78c1aa48 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -73,6 +73,7 @@ u32 gss_delete_sec_context(
rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop,
u32 service);
u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
+bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor);
char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
struct pf_desc {
@@ -81,6 +82,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ bool datatouch;
};
/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 05a1809c44d9..817af0b4385e 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -230,6 +230,10 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
+struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *,
+ bool (*)(struct rpc_task *, void *),
+ void *);
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
bool (*)(struct rpc_task *, void *),
void *);
@@ -247,6 +251,7 @@ void rpc_show_tasks(struct net *);
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 7ca44fb5b675..7321ae933867 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -268,6 +268,7 @@ struct svc_rqst {
* cache pages */
#define RQ_VICTIM (5) /* about to be shut down */
#define RQ_BUSY (6) /* request is busy */
+#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
void * rq_argp; /* decoded arguments */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 79ba50856707..ab02a457da1f 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,7 +25,6 @@ struct svc_xprt_ops {
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
int (*xpo_secure_port)(struct svc_rqst *);
- void (*xpo_adjust_wspace)(struct svc_xprt *);
};
struct svc_xprt_class {
@@ -69,6 +68,7 @@ struct svc_xprt {
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
+ atomic_t xpt_nr_rqsts; /* Number of requests */
struct mutex xpt_mutex; /* to serialize sending data */
spinlock_t xpt_lock; /* protects sk_deferred
* and xpt_auth_cache */
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 0ece4ba06f06..bef3fb0abb8f 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -80,6 +80,7 @@ struct sock_xprt {
#define TCP_RPC_REPLY (1UL << 6)
#define XPRT_SOCK_CONNECTING 1U
+#define XPRT_SOCK_DATA_READY (2)
#endif /* __KERNEL__ */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 017fced60242..5f81f8a187f2 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
struct device;
-struct dma_attrs;
struct page;
struct scatterlist;
@@ -68,10 +67,10 @@ swiotlb_free_coherent(struct device *hwdev, size_t size,
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
@@ -83,12 +82,13 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir,
+ unsigned long attrs);
extern void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index fa7bc29925c9..697e160c78d0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -28,6 +28,7 @@
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
+struct completion;
struct ctl_table;
struct nsproxy;
struct ctl_table_root;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index b4c2a485b28a..cbd8990e2e77 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -105,46 +105,29 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
-/*
- * An arch can define its own version of set_restore_sigmask() to get the
- * job done however works, with or without TIF_RESTORE_SIGMASK.
- */
-#define HAVE_SET_RESTORE_SIGMASK 1
-
-/**
- * set_restore_sigmask() - make sure saved_sigmask processing gets done
- *
- * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
- * will run before returning to user mode, to process the flag. For
- * all callers, TIF_SIGPENDING is already set or it's no harm to set
- * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
- * arch code will notice on return to user mode, in case those bits
- * are scarce. We set TIF_SIGPENDING here to ensure that the arch
- * signal code always gets run when TIF_RESTORE_SIGMASK is set.
- */
-static inline void set_restore_sigmask(void)
-{
- set_thread_flag(TIF_RESTORE_SIGMASK);
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
-}
-static inline void clear_restore_sigmask(void)
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
{
- clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return 0;
}
-static inline bool test_restore_sigmask(void)
-{
- return test_thread_flag(TIF_RESTORE_SIGMASK);
-}
-static inline bool test_and_clear_restore_sigmask(void)
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
- return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+ __check_object_size(ptr, n, to_user);
}
-#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
-
-#ifndef HAVE_SET_RESTORE_SIGMASK
-#error "no set_restore_sigmask() provided and default one won't work"
-#endif
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
#endif /* __KERNEL__ */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 349557825428..f30c187ed785 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b39a5f3153bd..960bedbdec87 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
int vga_switcheroo_process_delayed_switch(void);
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 6e6cb0c9d7cb..26c155bb639b 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -149,6 +149,19 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+/**
+ * virtio_has_iommu_quirk - determine whether this device has the iommu quirk
+ * @vdev: the device
+ */
+static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev)
+{
+ /*
+ * Note the reverse polarity of the quirk feature (compared to most
+ * other features), this is for compatibility with legacy systems.
+ */
+ return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
new file mode 100644
index 000000000000..9638bfeb0d1f
--- /dev/null
+++ b/include/linux/virtio_vsock.h
@@ -0,0 +1,154 @@
+#ifndef _LINUX_VIRTIO_VSOCK_H
+#define _LINUX_VIRTIO_VSOCK_H
+
+#include <uapi/linux/virtio_vsock.h>
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/af_vsock.h>
+
+#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
+#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
+#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
+#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
+#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
+
+enum {
+ VSOCK_VQ_RX = 0, /* for host to guest data */
+ VSOCK_VQ_TX = 1, /* for guest to host data */
+ VSOCK_VQ_EVENT = 2,
+ VSOCK_VQ_MAX = 3,
+};
+
+/* Per-socket state (accessed via vsk->trans) */
+struct virtio_vsock_sock {
+ struct vsock_sock *vsk;
+
+ /* Protected by lock_sock(sk_vsock(trans->vsk)) */
+ u32 buf_size;
+ u32 buf_size_min;
+ u32 buf_size_max;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ /* Protected by tx_lock */
+ u32 tx_cnt;
+ u32 buf_alloc;
+ u32 peer_fwd_cnt;
+ u32 peer_buf_alloc;
+
+ /* Protected by rx_lock */
+ u32 fwd_cnt;
+ u32 rx_bytes;
+ struct list_head rx_queue;
+};
+
+struct virtio_vsock_pkt {
+ struct virtio_vsock_hdr hdr;
+ struct work_struct work;
+ struct list_head list;
+ void *buf;
+ u32 len;
+ u32 off;
+ bool reply;
+};
+
+struct virtio_vsock_pkt_info {
+ u32 remote_cid, remote_port;
+ struct msghdr *msg;
+ u32 pkt_len;
+ u16 type;
+ u16 op;
+ u32 flags;
+ bool reply;
+};
+
+struct virtio_transport {
+ /* This must be the first field */
+ struct vsock_transport transport;
+
+ /* Takes ownership of the packet */
+ int (*send_pkt)(struct virtio_vsock_pkt *pkt);
+};
+
+ssize_t
+virtio_transport_stream_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len,
+ int type);
+int
+virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags);
+
+s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
+s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
+
+int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ struct vsock_sock *psk);
+u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
+u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
+void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
+void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
+int
+virtio_transport_notify_poll_in(struct vsock_sock *vsk,
+ size_t target,
+ bool *data_ready_now);
+int
+virtio_transport_notify_poll_out(struct vsock_sock *vsk,
+ size_t target,
+ bool *space_available_now);
+
+int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
+ size_t target, ssize_t copied, bool data_read,
+ struct vsock_transport_recv_notify_data *data);
+int virtio_transport_notify_send_init(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data);
+int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
+ ssize_t written, struct vsock_transport_send_notify_data *data);
+
+u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
+bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
+bool virtio_transport_stream_allow(u32 cid, u32 port);
+int virtio_transport_dgram_bind(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr);
+bool virtio_transport_dgram_allow(u32 cid, u32 port);
+
+int virtio_transport_connect(struct vsock_sock *vsk);
+
+int virtio_transport_shutdown(struct vsock_sock *vsk, int mode);
+
+void virtio_transport_release(struct vsock_sock *vsk);
+
+ssize_t
+virtio_transport_stream_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len);
+int
+virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ struct sockaddr_vm *remote_addr,
+ struct msghdr *msg,
+ size_t len);
+
+void virtio_transport_destruct(struct vsock_sock *vsk);
+
+void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
+u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
+void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+
+#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 51732d6c9555..7047bc7f8106 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -66,7 +66,8 @@ struct watchdog_ops {
* as configurable from user space. Only relevant if
* max_hw_heartbeat_ms is not provided.
* @min_hw_heartbeat_ms:
- * Minimum time between heartbeats, in milli-seconds.
+ * Hardware limit for minimum time between heartbeats,
+ * in milli-seconds.
* @max_hw_heartbeat_ms:
* Hardware limit for maximum timeout, in milli-seconds.
* Replaces max_timeout if specified.
@@ -180,4 +181,7 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd,
extern int watchdog_register_device(struct watchdog_device *);
extern void watchdog_unregister_device(struct watchdog_device *);
+/* devres register variant */
+int devm_watchdog_register_device(struct device *dev, struct watchdog_device *);
+
#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 760399a470bd..2bb5deb0012e 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -173,14 +173,14 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
- if (!config_enabled(CONFIG_PROVE_LOCKING))
+ if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
- if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
+ if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 946340ce7701..a4a9a55a0c42 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -98,7 +98,7 @@ struct vb2_threadio_data;
* #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(struct device *dev, const struct dma_attrs *attrs,
+ void *(*alloc)(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags);
void (*put)(void *buf_priv);
@@ -408,7 +408,7 @@ struct vb2_buf_ops {
* @io_modes: supported io methods (see vb2_io_modes enum)
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
- * @dma_attrs: DMA attributes to use for the DMA. May be NULL.
+ * @dma_attrs: DMA attributes to use for the DMA.
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
@@ -476,7 +476,7 @@ struct vb2_queue {
unsigned int type;
unsigned int io_modes;
struct device *dev;
- const struct dma_attrs *dma_attrs;
+ unsigned long dma_attrs;
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index df2aabee3401..5604818d137e 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -16,8 +16,6 @@
#include <media/videobuf2-v4l2.h>
#include <linux/dma-mapping.h>
-struct dma_attrs;
-
static inline dma_addr_t
vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
{
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
index 5ae962512fb8..b2ebc91fe09a 100644
--- a/include/misc/cxl-base.h
+++ b/include/misc/cxl-base.h
@@ -10,6 +10,8 @@
#ifndef _MISC_CXL_BASE_H
#define _MISC_CXL_BASE_H
+#include <misc/cxl.h>
+
#ifdef CONFIG_CXL_BASE
#define CXL_IRQ_RANGES 4
@@ -36,12 +38,24 @@ static inline void cxl_ctx_put(void)
atomic_dec(&cxl_use_count);
}
+struct cxl_afu *cxl_afu_get(struct cxl_afu *afu);
+void cxl_afu_put(struct cxl_afu *afu);
void cxl_slbia(struct mm_struct *mm);
+bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
+void cxl_pci_disable_device(struct pci_dev *dev);
+int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
#else /* CONFIG_CXL_BASE */
static inline bool cxl_ctx_in_use(void) { return false; }
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; }
+static inline void cxl_afu_put(struct cxl_afu *afu) {}
static inline void cxl_slbia(struct mm_struct *mm) {}
+static inline bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) { return false; }
+static inline void cxl_pci_disable_device(struct pci_dev *dev) {}
+static inline int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { return -ENODEV; }
+static inline void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) {}
#endif /* CONFIG_CXL_BASE */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 56560c5781b4..480d50a0b8ba 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -24,6 +24,46 @@
* generic PCI API. This API is agnostic to the actual AFU.
*/
+#define CXL_SLOT_FLAG_DMA 0x1
+
+/*
+ * Checks if the given card is in a cxl capable slot. Pass CXL_SLOT_FLAG_DMA if
+ * the card requires CAPP DMA mode to also check if the system supports it.
+ * This is intended to be used by bi-modal devices to determine if they can use
+ * cxl mode or if they should continue running in PCI mode.
+ *
+ * Note that this only checks if the slot is cxl capable - it does not
+ * currently check if the CAPP is currently available for chips where it can be
+ * assigned to different PHBs on a first come first serve basis (i.e. P8)
+ */
+bool cxl_slot_is_supported(struct pci_dev *dev, int flags);
+
+
+#define CXL_BIMODE_CXL 1
+#define CXL_BIMODE_PCI 2
+
+/*
+ * Check the mode that the given bi-modal CXL adapter is currently in and
+ * change it if necessary. This does not apply to AFU drivers.
+ *
+ * If the mode matches the requested mode this function will return 0 - if the
+ * driver was expecting the generic CXL driver to have bound to the adapter and
+ * it gets this return value it should fail the probe function to give the CXL
+ * driver a chance to probe it.
+ *
+ * If the mode does not match it will start a background task to unplug the
+ * device from Linux and switch its mode, and will return -EBUSY. At this
+ * point the calling driver should make sure it has released the device and
+ * fail its probe function.
+ *
+ * The offset of the CXL VSEC can be provided to this function. If 0 is passed,
+ * this function will search for a CXL VSEC with ID 0x1280 and return -ENODEV
+ * if it is not found.
+ */
+#ifdef CONFIG_CXL_BIMODAL
+int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec);
+#endif
+
/* Get the AFU associated with a pci_dev */
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
@@ -86,6 +126,13 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
int cxl_release_context(struct cxl_context *ctx);
/*
+ * Set and get private data associated with a context. Allows drivers to have a
+ * back pointer to some useful structure.
+ */
+int cxl_set_priv(struct cxl_context *ctx, void *priv);
+void *cxl_get_priv(struct cxl_context *ctx);
+
+/*
* Allocate AFU interrupts for this context. num=0 will allocate the default
* for this AFU as given in the AFU descriptor. This number doesn't include the
* interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
@@ -144,6 +191,25 @@ void cxl_psa_unmap(void __iomem *addr);
/* Get the process element for this context */
int cxl_process_element(struct cxl_context *ctx);
+/*
+ * Limit the number of interrupts that a single context can allocate via
+ * cxl_start_work. If using the api with a real phb, this may be used to
+ * request that additional default contexts be created when allocating
+ * interrupts via pci_enable_msix_range. These will be set to the same running
+ * state as the default context, and if that is running it will reuse the
+ * parameters previously passed to cxl_start_context for the default context.
+ */
+int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs);
+int cxl_get_max_irqs_per_process(struct pci_dev *dev);
+
+/*
+ * Use to simultaneously iterate over hardware interrupt numbers, contexts and
+ * afu interrupt numbers allocated for the device via pci_enable_msix_range and
+ * is a useful convenience function when working with hardware that has
+ * limitations on the number of interrupts per process. *ctx and *afu_irq
+ * should be NULL and 0 to start the iteration.
+ */
+int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
/*
* These calls allow drivers to create their own file descriptors and make them
@@ -220,4 +286,52 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu,
*/
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
+/*
+ * AFU driver ops allow an AFU driver to create their own events to pass to
+ * userspace through the file descriptor as a simpler alternative to overriding
+ * the read() and poll() calls that works with the generic cxl events. These
+ * events are given priority over the generic cxl events, so they will be
+ * delivered first if multiple types of events are pending.
+ *
+ * The AFU driver must call cxl_context_events_pending() to notify the cxl
+ * driver that new events are ready to be delivered for a specific context.
+ * cxl_context_events_pending() will adjust the current count of AFU driver
+ * events for this context, and wake up anyone waiting on the context wait
+ * queue.
+ *
+ * The cxl driver will then call fetch_event() to get a structure defining
+ * the size and address of the driver specific event data. The cxl driver
+ * will build a cxl header with type and process_element fields filled in,
+ * and header.size set to sizeof(struct cxl_event_header) + data_size.
+ * The total size of the event is limited to CXL_READ_MIN_SIZE (4K).
+ *
+ * fetch_event() is called with a spin lock held, so it must not sleep.
+ *
+ * The cxl driver will then deliver the event to userspace, and finally
+ * call event_delivered() to return the status of the operation, identified
+ * by cxl context and AFU driver event data pointers.
+ * 0 Success
+ * -EFAULT copy_to_user() has failed
+ * -EINVAL Event data pointer is NULL, or event size is greater than
+ * CXL_READ_MIN_SIZE.
+ */
+struct cxl_afu_driver_ops {
+ struct cxl_event_afu_driver_reserved *(*fetch_event) (
+ struct cxl_context *ctx);
+ void (*event_delivered) (struct cxl_context *ctx,
+ struct cxl_event_afu_driver_reserved *event,
+ int rc);
+};
+
+/*
+ * Associate the above driver ops with a specific context.
+ * Reset the current count of AFU driver events.
+ */
+void cxl_set_driver_ops(struct cxl_context *ctx,
+ struct cxl_afu_driver_ops *ops);
+
+/* Notify cxl driver that new events are ready to be delivered for context */
+void cxl_context_events_pending(struct cxl_context *ctx,
+ unsigned int new_events);
+
#endif /* _MISC_CXL_H */
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index e9eb2d6791b3..f2758964ce6f 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -63,6 +63,8 @@ struct vsock_sock {
struct list_head accept_queue;
bool rejected;
struct delayed_work dwork;
+ struct delayed_work close_work;
+ bool close_work_scheduled;
u32 peer_shutdown;
bool sent_request;
bool ignore_connecting_rst;
@@ -165,6 +167,9 @@ static inline int vsock_core_init(const struct vsock_transport *t)
}
void vsock_core_exit(void);
+/* The transport may downcast this to access transport-specific functions */
+const struct vsock_transport *vsock_core_get_transport(void);
+
/**** UTILS ****/
void vsock_release_pending(struct sock *pending);
@@ -177,6 +182,7 @@ void vsock_remove_connected(struct vsock_sock *vsk);
struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr);
struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
+void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4089abc6e9c0..0933c7455a30 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -275,7 +275,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#define __net_initconst
#else
#define __net_init __init
-#define __net_exit __exit_refok
+#define __net_exit __ref
#define __net_initdata __initdata
#define __net_initconst __initconst
#endif
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 8c337cd0e1e4..5b847e49f7e9 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -214,7 +214,7 @@ typedef enum {
SCTP_SS_LISTENING = TCP_LISTEN,
SCTP_SS_ESTABLISHING = TCP_SYN_SENT,
SCTP_SS_ESTABLISHED = TCP_ESTABLISHED,
- SCTP_SS_CLOSING = TCP_CLOSING,
+ SCTP_SS_CLOSING = TCP_CLOSE_WAIT,
} sctp_sock_state_t;
/* These functions map various type to printable names. */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 384041669489..5ee7aab95eb8 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -94,6 +94,19 @@ enum ib_sa_selector {
IB_SA_BEST = 3
};
+/*
+ * There are 4 types of join states:
+ * FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
+ * The order corresponds to JoinState bits in MCMemberRecord.
+ */
+enum ib_sa_mc_join_states {
+ FULLMEMBER_JOIN,
+ NONMEMBER_JOIN,
+ SENDONLY_NONMEBER_JOIN,
+ SENDONLY_FULLMEMBER_JOIN,
+ NUM_JOIN_MEMBERSHIP_TYPES,
+};
+
#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12)
/*
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 7e440d41487a..8e90dd28bb75 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -562,6 +562,7 @@ enum ib_event_type {
IB_EVENT_QP_LAST_WQE_REACHED,
IB_EVENT_CLIENT_REREGISTER,
IB_EVENT_GID_CHANGE,
+ IB_EVENT_WQ_FATAL,
};
const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
@@ -572,6 +573,7 @@ struct ib_event {
struct ib_cq *cq;
struct ib_qp *qp;
struct ib_srq *srq;
+ struct ib_wq *wq;
u8 port_num;
} element;
enum ib_event_type event;
@@ -1015,6 +1017,7 @@ struct ib_qp_init_attr {
* Only needed for special QP types, or when using the RW API.
*/
u8 port_num;
+ struct ib_rwq_ind_table *rwq_ind_tbl;
};
struct ib_qp_open_attr {
@@ -1323,6 +1326,8 @@ struct ib_ucontext {
struct list_head ah_list;
struct list_head xrcd_list;
struct list_head rule_list;
+ struct list_head wq_list;
+ struct list_head rwq_ind_tbl_list;
int closing;
struct pid *tgid;
@@ -1428,6 +1433,67 @@ struct ib_srq {
} ext;
};
+enum ib_wq_type {
+ IB_WQT_RQ
+};
+
+enum ib_wq_state {
+ IB_WQS_RESET,
+ IB_WQS_RDY,
+ IB_WQS_ERR
+};
+
+struct ib_wq {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ void *wq_context;
+ void (*event_handler)(struct ib_event *, void *);
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ u32 wq_num;
+ enum ib_wq_state state;
+ enum ib_wq_type wq_type;
+ atomic_t usecnt;
+};
+
+struct ib_wq_init_attr {
+ void *wq_context;
+ enum ib_wq_type wq_type;
+ u32 max_wr;
+ u32 max_sge;
+ struct ib_cq *cq;
+ void (*event_handler)(struct ib_event *, void *);
+};
+
+enum ib_wq_attr_mask {
+ IB_WQ_STATE = 1 << 0,
+ IB_WQ_CUR_STATE = 1 << 1,
+};
+
+struct ib_wq_attr {
+ enum ib_wq_state wq_state;
+ enum ib_wq_state curr_wq_state;
+};
+
+struct ib_rwq_ind_table {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ atomic_t usecnt;
+ u32 ind_tbl_num;
+ u32 log_ind_tbl_size;
+ struct ib_wq **ind_tbl;
+};
+
+struct ib_rwq_ind_table_init_attr {
+ u32 log_ind_tbl_size;
+ /* Each entry is a pointer to Receive Work Queue */
+ struct ib_wq **ind_tbl;
+};
+
+/*
+ * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
+ * @max_read_sge: Maximum SGE elements per RDMA READ request.
+ */
struct ib_qp {
struct ib_device *device;
struct ib_pd *pd;
@@ -1449,7 +1515,10 @@ struct ib_qp {
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
u32 qp_num;
+ u32 max_write_sge;
+ u32 max_read_sge;
enum ib_qp_type qp_type;
+ struct ib_rwq_ind_table *rwq_ind_tbl;
};
struct ib_mr {
@@ -1506,6 +1575,7 @@ enum ib_flow_spec_type {
IB_FLOW_SPEC_IB = 0x22,
/* L3 header*/
IB_FLOW_SPEC_IPV4 = 0x30,
+ IB_FLOW_SPEC_IPV6 = 0x31,
/* L4 headers*/
IB_FLOW_SPEC_TCP = 0x40,
IB_FLOW_SPEC_UDP = 0x41
@@ -1567,6 +1637,18 @@ struct ib_flow_spec_ipv4 {
struct ib_flow_ipv4_filter mask;
};
+struct ib_flow_ipv6_filter {
+ u8 src_ip[16];
+ u8 dst_ip[16];
+};
+
+struct ib_flow_spec_ipv6 {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_ipv6_filter val;
+ struct ib_flow_ipv6_filter mask;
+};
+
struct ib_flow_tcp_udp_filter {
__be16 dst_port;
__be16 src_port;
@@ -1588,6 +1670,7 @@ union ib_flow_spec {
struct ib_flow_spec_ib ib;
struct ib_flow_spec_ipv4 ipv4;
struct ib_flow_spec_tcp_udp tcp_udp;
+ struct ib_flow_spec_ipv6 ipv6;
};
struct ib_flow_attr {
@@ -1921,7 +2004,18 @@ struct ib_device {
struct ifla_vf_stats *stats);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
-
+ struct ib_wq * (*create_wq)(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+ int (*destroy_wq)(struct ib_wq *wq);
+ int (*modify_wq)(struct ib_wq *wq,
+ struct ib_wq_attr *attr,
+ u32 wq_attr_mask,
+ struct ib_udata *udata);
+ struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+ int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
struct ib_dma_mapping_ops *dma_ops;
struct module *owner;
@@ -1956,6 +2050,7 @@ struct ib_device {
* in fast paths.
*/
int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
+ void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
};
struct ib_client {
@@ -1991,6 +2086,8 @@ struct ib_client {
struct ib_device *ib_alloc_device(size_t size);
void ib_dealloc_device(struct ib_device *device);
+void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
+
int ib_register_device(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
@@ -2819,19 +2916,19 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
- direction, attrs);
+ direction, dma_attrs);
}
static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
return dma_unmap_single_attrs(dev->dma_device, addr, size,
- direction, attrs);
+ direction, dma_attrs);
}
/**
@@ -2906,17 +3003,18 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3167,6 +3265,15 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
u16 pkey, const union ib_gid *gid,
const struct sockaddr *addr);
+struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr);
+int ib_destroy_wq(struct ib_wq *wq);
+int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
+ u32 wq_attr_mask);
+struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr*
+ wq_ind_table_init_attr);
+int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset, unsigned int page_size);
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 2b95c2c336eb..9303e0e4f508 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -33,11 +33,6 @@
#if !defined(OPA_PORT_INFO_H)
#define OPA_PORT_INFO_H
-/* Temporary until HFI driver is updated */
-#ifndef USE_PI_LED_ENABLE
-#define USE_PI_LED_ENABLE 0
-#endif
-
#define OPA_PORT_LINK_MODE_NOP 0 /* No change */
#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */
@@ -274,23 +269,12 @@ enum port_info_field_masks {
OPA_PI_MASK_MTU_CAP = 0x0F,
};
-#if USE_PI_LED_ENABLE
struct opa_port_states {
u8 reserved;
u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */
u8 reserved2;
u8 portphysstate_portstate; /* 4 bits, 4 bits */
};
-#define PI_LED_ENABLE_SUP 1
-#else
-struct opa_port_states {
- u8 reserved;
- u8 offline_reason; /* 2 res, 6 bits */
- u8 reserved2;
- u8 portphysstate_portstate; /* 4 bits, 4 bits */
-};
-#define PI_LED_ENABLE_SUP 0
-#endif
struct opa_port_state_info {
struct opa_port_states port_states;
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index afe44fde72a5..81fb1d15e8bb 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -333,11 +333,13 @@ int rdma_disconnect(struct rdma_cm_id *id);
* address.
* @id: Communication identifier associated with the request.
* @addr: Multicast address identifying the group to join.
+ * @join_state: Multicast JoinState bitmap requested by port.
+ * Bitmap is based on IB_SA_MCMEMBER_REC_JOIN_STATE bits.
* @context: User-defined context associated with the join request, returned
* to the user through the private_data pointer in multicast events.
*/
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
- void *context);
+ u8 join_state, void *context);
/**
* rdma_leave_multicast - Leave the multicast group specified by the given
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 9c9a27d42aaa..e31502107a58 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -158,6 +158,7 @@ struct rvt_driver_params {
u32 max_mad_size;
u8 qos_shift;
u8 max_rdma_atomic;
+ u8 reserved_operations;
};
/* Protection domain */
@@ -351,6 +352,9 @@ struct rvt_dev_info {
/* Driver specific properties */
struct rvt_driver_params dparms;
+ /* post send table */
+ const struct rvt_operation_params *post_parms;
+
struct rvt_mregion __rcu *dma_mr;
struct rvt_lkey_table lkey_table;
@@ -484,6 +488,9 @@ void rvt_unregister_device(struct rvt_dev_info *rvd);
int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
int port_index, u16 *pkey_table);
+int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
+ int access);
+int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey);
int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc);
int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 5edffdca8c53..6b3c6c8b6b77 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -81,6 +81,7 @@ struct rvt_mregion {
u32 mapsz; /* size of the map array */
u8 page_shift; /* 0 - non unform/non powerof2 sizes */
u8 lkey_published; /* in global table */
+ atomic_t lkey_invalid; /* true if current lkey is invalid */
struct completion comp; /* complete when refcount goes to zero */
atomic_t refcount;
struct rvt_segarray *map[0]; /* the segments */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 6d23b879416a..bd34d0b56bf7 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -145,6 +145,12 @@
(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
/*
+ * Internal send flags
+ */
+#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
+#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
+
+/*
* Send work request queue entry.
* The size of the sg_list is determined when the QP is created and stored
* in qp->s_max_sge.
@@ -216,23 +222,43 @@ struct rvt_mmap_info {
* to send a RDMA read response or atomic operation.
*/
struct rvt_ack_entry {
- u8 opcode;
- u8 sent;
+ struct rvt_sge rdma_sge;
+ u64 atomic_data;
u32 psn;
u32 lpsn;
- union {
- struct rvt_sge rdma_sge;
- u64 atomic_data;
- };
+ u8 opcode;
+ u8 sent;
};
#define RC_QP_SCALING_INTERVAL 5
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
+#define RVT_OPERATION_PRIV 0x00000001
+#define RVT_OPERATION_ATOMIC 0x00000002
+#define RVT_OPERATION_ATOMIC_SGE 0x00000004
+#define RVT_OPERATION_LOCAL 0x00000008
+#define RVT_OPERATION_USE_RESERVE 0x00000010
+
+#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
+
+/**
+ * rvt_operation_params - op table entry
+ * @length - the length to copy into the swqe entry
+ * @qpt_support - a bit mask indicating QP type support
+ * @flags - RVT_OPERATION flags (see above)
*
+ * This supports table driven post send so that
+ * the driver can have differing an potentially
+ * different sets of operations.
+ *
+ **/
+
+struct rvt_operation_params {
+ size_t length;
+ u32 qpt_support;
+ u32 flags;
+};
+
+/*
* Common variables are protected by both r_rq.lock and s_lock in that order
* which only happens in modify_qp() or changing the QP 'state'.
*/
@@ -307,6 +333,7 @@ struct rvt_qp {
u32 s_next_psn; /* PSN for next request */
u32 s_avail; /* number of entries avail */
u32 s_ssn; /* SSN of tail entry */
+ atomic_t s_reserved_used; /* reserved entries in use */
spinlock_t s_lock ____cacheline_aligned_in_smp;
u32 s_flags;
@@ -343,6 +370,8 @@ struct rvt_qp {
struct rvt_sge_state s_ack_rdma_sge;
struct timer_list s_timer;
+ atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
+
/*
* This sge list MUST be last. Do not add anything below here.
*/
@@ -436,6 +465,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
rq->max_sge * sizeof(struct ib_sge)) * n);
}
+/**
+ * rvt_qp_wqe_reserve - reserve operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This routine used in post send to record
+ * a wqe relative reserved operation use.
+ */
+static inline void rvt_qp_wqe_reserve(
+ struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
+ atomic_inc(&qp->s_reserved_used);
+}
+
+/**
+ * rvt_qp_wqe_unreserve - clean reserved operation
+ * @qp - the rvt qp
+ * @wqe - the send wqe
+ *
+ * This decrements the reserve use count.
+ *
+ * This call MUST precede the change to
+ * s_last to insure that post send sees a stable
+ * s_avail.
+ *
+ * An smp_mp__after_atomic() is used to insure
+ * the compiler does not juggle the order of the s_last
+ * ring index and the decrementing of s_reserved_used.
+ */
+static inline void rvt_qp_wqe_unreserve(
+ struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+ wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
+ atomic_dec(&qp->s_reserved_used);
+ /* insure no compiler re-order up to s_last change */
+ smp_mb__after_atomic();
+ }
+}
+
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/include/scsi/viosrp.h
index c1ab8a4c3161..974e07bd8e59 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/include/scsi/viosrp.h
@@ -15,11 +15,6 @@
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
-/* You should have received a copy of the GNU General Public License */
-/* along with this program; if not, write to the Free Software */
-/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* */
-/* */
/* This file contains structures and definitions for IBM RPA (RS/6000 */
/* platform architecture) implementation of the SRP (SCSI RDMA Protocol) */
/* standard. SRP is used on IBM iSeries and pSeries platforms to send SCSI */
@@ -93,7 +88,7 @@ struct viosrp_crq {
};
/* MADs are Management requests above and beyond the IUs defined in the SRP
- * standard.
+ * standard.
*/
enum viosrp_mad_types {
VIOSRP_EMPTY_IU_TYPE = 0x01,
@@ -131,7 +126,7 @@ enum viosrp_capability_flag {
CAP_LIST_DATA = 0x08,
};
-/*
+/*
* Common MAD header
*/
struct mad_common {
@@ -146,7 +141,7 @@ struct mad_common {
* client to the server. There is no way for the server to send
* an asynchronous message back to the client. The Empty IU is used
* to hang out a meaningless request to the server so that it can respond
- * asynchrouously with something like a SCSI AER
+ * asynchrouously with something like a SCSI AER
*/
struct viosrp_empty_iu {
struct mad_common common;
@@ -189,7 +184,7 @@ struct mad_migration_cap {
__be32 ecl;
};
-struct capabilities{
+struct capabilities {
__be32 flags;
char name[SRP_MAX_LOC_LEN];
char loc[SRP_MAX_LOC_LEN];
diff --git a/include/soc/imx/cpuidle.h b/include/soc/imx/cpuidle.h
new file mode 100644
index 000000000000..8e7743d3b34d
--- /dev/null
+++ b/include/soc/imx/cpuidle.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2016 Pengutronix, <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOC_IMX_CPUIDLE_H__
+#define __SOC_IMX_CPUIDLE_H__
+
+#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SOC_IMX6Q)
+void imx6q_cpuidle_fec_irqs_used(void);
+void imx6q_cpuidle_fec_irqs_unused(void);
+#else
+static inline void imx6q_cpuidle_fec_irqs_used(void) { }
+static inline void imx6q_cpuidle_fec_irqs_unused(void) { }
+#endif
+
+#endif /* __SOC_IMX_CPUIDLE_H__ */
diff --git a/include/soc/tegra/cpuidle.h b/include/soc/tegra/cpuidle.h
index ea04f4225638..1fae9c7800d1 100644
--- a/include/soc/tegra/cpuidle.h
+++ b/include/soc/tegra/cpuidle.h
@@ -14,7 +14,7 @@
#ifndef __SOC_TEGRA_CPUIDLE_H__
#define __SOC_TEGRA_CPUIDLE_H__
-#ifdef CONFIG_CPU_IDLE
+#if defined(CONFIG_ARM) && defined(CONFIG_CPU_IDLE)
void tegra_cpuidle_pcie_irqs_in_use(void);
#else
static inline void tegra_cpuidle_pcie_irqs_in_use(void)
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index c0abcdc11470..cee8c00f3d3e 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -68,6 +68,7 @@ struct snd_compr_runtime {
* @ops: pointer to DSP callbacks
* @runtime: pointer to runtime structure
* @device: device pointer
+ * @error_work: delayed work used when closing the stream due to an error
* @direction: stream direction, playback/recording
* @metadata_set: metadata set flag, true when set
* @next_track: has userspace signal next track transition, true when set
@@ -78,6 +79,7 @@ struct snd_compr_stream {
struct snd_compr_ops *ops;
struct snd_compr_runtime *runtime;
struct snd_compr *device;
+ struct delayed_work error_work;
enum snd_compr_direction direction;
bool metadata_set;
bool next_track;
@@ -187,4 +189,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
wake_up(&stream->runtime->sleep);
}
+int snd_compr_stop_error(struct snd_compr_stream *stream,
+ snd_pcm_state_t state);
+
#endif
diff --git a/include/sound/cs35l33.h b/include/sound/cs35l33.h
new file mode 100644
index 000000000000..b6eadce76fc8
--- /dev/null
+++ b/include/sound/cs35l33.h
@@ -0,0 +1,48 @@
+/*
+ * linux/sound/cs35l33.h -- Platform data for CS35l33
+ *
+ * Copyright (c) 2016 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS35L33_H
+#define __CS35L33_H
+
+struct cs35l33_hg {
+ bool enable_hg_algo;
+ unsigned int mem_depth;
+ unsigned int release_rate;
+ unsigned int hd_rm;
+ unsigned int ldo_thld;
+ unsigned int ldo_path_disable;
+ unsigned int ldo_entry_delay;
+ bool vp_hg_auto;
+ unsigned int vp_hg;
+ unsigned int vp_hg_rate;
+ unsigned int vp_hg_va;
+};
+
+struct cs35l33_pdata {
+ /* Boost Controller Voltage Setting */
+ unsigned int boost_ctl;
+
+ /* Boost Controller Peak Current */
+ unsigned int boost_ipk;
+
+ /* Amplifier Drive Select */
+ unsigned int amp_drv_sel;
+
+ /* soft volume ramp */
+ unsigned int ramp_rate;
+
+ /* IMON adc scale */
+ unsigned int imon_adc_scale;
+
+ /* H/G algo configuration */
+ struct cs35l33_hg hg_config;
+};
+
+#endif /* __CS35L33_H */
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index fc3a481ad91e..530c57bdefa0 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -53,18 +53,19 @@ struct hdmi_codec_params {
int channels;
};
+struct hdmi_codec_pdata;
struct hdmi_codec_ops {
/*
* Called when ASoC starts an audio stream setup.
* Optional
*/
- int (*audio_startup)(struct device *dev);
+ int (*audio_startup)(struct device *dev, void *data);
/*
* Configures HDMI-encoder for audio stream.
* Mandatory
*/
- int (*hw_params)(struct device *dev,
+ int (*hw_params)(struct device *dev, void *data,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
@@ -72,19 +73,20 @@ struct hdmi_codec_ops {
* Shuts down the audio stream.
* Mandatory
*/
- void (*audio_shutdown)(struct device *dev);
+ void (*audio_shutdown)(struct device *dev, void *data);
/*
* Mute/unmute HDMI audio stream.
* Optional
*/
- int (*digital_mute)(struct device *dev, bool enable);
+ int (*digital_mute)(struct device *dev, void *data, bool enable);
/*
* Provides EDID-Like-Data from connected HDMI device.
* Optional
*/
- int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+ int (*get_eld)(struct device *dev, void *data,
+ uint8_t *buf, size_t len);
};
/* HDMI codec initalization data */
@@ -93,6 +95,7 @@ struct hdmi_codec_pdata {
uint i2s:1;
uint spdif:1;
int max_i2s_channels;
+ void *data;
};
#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h
index afdb416898e0..1df2ff61a4dd 100644
--- a/include/sound/omap-hdmi-audio.h
+++ b/include/sound/omap-hdmi-audio.h
@@ -16,11 +16,16 @@
*
*/
-#include <video/omapdss.h>
-
#ifndef __OMAP_HDMI_AUDIO_H__
#define __OMAP_HDMI_AUDIO_H__
+#include <linux/platform_data/omapdss.h>
+
+struct omap_dss_audio {
+ struct snd_aes_iec958 *iec;
+ struct snd_cea_861_aud_if *cea;
+};
+
struct omap_hdmi_audio_ops {
int (*audio_startup)(struct device *dev,
void (*abort_cb)(struct device *dev));
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index 0399352f3a62..a6a2e1547092 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -13,16 +13,7 @@
#define __SIMPLE_CARD_H
#include <sound/soc.h>
-
-struct asoc_simple_dai {
- const char *name;
- unsigned int sysclk;
- int slots;
- int slot_width;
- unsigned int tx_slot_mask;
- unsigned int rx_slot_mask;
- struct clk *clk;
-};
+#include <sound/simple_card_utils.h>
struct asoc_simple_card_info {
const char *name;
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
new file mode 100644
index 000000000000..86088aed9002
--- /dev/null
+++ b/include/sound/simple_card_utils.h
@@ -0,0 +1,36 @@
+/*
+ * simple_card_core.h
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SIMPLE_CARD_CORE_H
+#define __SIMPLE_CARD_CORE_H
+
+#include <sound/soc.h>
+
+struct asoc_simple_dai {
+ const char *name;
+ unsigned int sysclk;
+ int slots;
+ int slot_width;
+ unsigned int tx_slot_mask;
+ unsigned int rx_slot_mask;
+ struct clk *clk;
+};
+
+int asoc_simple_card_parse_daifmt(struct device *dev,
+ struct device_node *node,
+ struct device_node *codec,
+ char *prefix,
+ unsigned int *retfmt);
+int asoc_simple_card_set_dailink_name(struct device *dev,
+ struct snd_soc_dai_link *dai_link,
+ const char *fmt, ...);
+int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
+ char *prefix);
+
+#endif /* __SIMPLE_CARD_CORE_H */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 3101d53468aa..f60d755f7ac6 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -358,6 +358,7 @@ struct snd_soc_dapm_context;
struct regulator;
struct snd_soc_dapm_widget_list;
struct snd_soc_dapm_update;
+enum snd_soc_dapm_direction;
int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
@@ -382,6 +383,9 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget,
int num);
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control(
+ struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget);
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
@@ -451,7 +455,9 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
/* dapm path query */
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
- struct snd_soc_dapm_widget_list **list);
+ struct snd_soc_dapm_widget_list **list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction));
struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(
struct snd_kcontrol *kcontrol);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index fd7b58a58d6f..6144882cc96a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -179,6 +179,17 @@
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_S_VALUE(reg_left, reg_right, xshift, \
xmin, xmax, xsign_bit, xinvert) }
+#define SOC_SINGLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
+ .put = snd_soc_put_volsw, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .rreg = xreg, \
+ .min = xmin, .max = xmax, .platform_max = xmax, \
+ .sign_bit = 7,} }
#define SOC_DOUBLE_S8_TLV(xname, xreg, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index d8ab5101fad5..f6f3bc52c1ac 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size);
+ struct request_queue *q);
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index b316b44d03f3..fb8e3b6febdf 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -142,6 +142,7 @@ enum se_cmd_flags_table {
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
+ SCF_TASK_ATTR_SET = 0x01000000,
};
/*
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index de44462a7680..5cd6faa6e0d1 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
-void __target_execute_cmd(struct se_cmd *);
int transport_lookup_tmr_lun(struct se_cmd *, u64);
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 65673d8b81ac..d336b890e31f 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
__entry->sector = bio->bi_iter.bi_sector;
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
),
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
),
@@ -138,7 +138,7 @@ TRACE_EVENT(bcache_read,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
__entry->cache_hit = hit;
__entry->bypass = bypass;
@@ -170,7 +170,7 @@ TRACE_EVENT(bcache_write,
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
__entry->writeback = writeback;
__entry->bypass = bypass;
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 5a2a7592068f..8f3a163b8166 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -274,7 +274,7 @@ TRACE_EVENT(block_bio_bounce,
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -313,7 +313,7 @@ TRACE_EVENT(block_bio_complete,
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
__entry->error = error;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
),
@@ -341,7 +341,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -409,7 +409,7 @@ TRACE_EVENT(block_bio_queue,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -439,7 +439,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
- bio ? bio->bi_rw : 0, __entry->nr_sector);
+ bio ? bio->bi_opf : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -573,7 +573,7 @@ TRACE_EVENT(block_split,
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -617,7 +617,7 @@ TRACE_EVENT(block_bio_remap,
__entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev;
__entry->old_sector = from;
- blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+ blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
bio->bi_iter.bi_size);
),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index e90e82ad6875..e030d6f6c19a 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -66,6 +66,21 @@ struct btrfs_qgroup_extent_record;
{ BTRFS_BLOCK_GROUP_RAID6, "RAID6"}
#define BTRFS_UUID_SIZE 16
+#define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_UUID_SIZE)
+
+#define TP_fast_assign_fsid(fs_info) \
+ memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE)
+
+#define TP_STRUCT__entry_btrfs(args...) \
+ TP_STRUCT__entry( \
+ TP_STRUCT__entry_fsid \
+ args)
+#define TP_fast_assign_btrfs(fs_info, args...) \
+ TP_fast_assign( \
+ TP_fast_assign_fsid(fs_info); \
+ args)
+#define TP_printk_btrfs(fmt, args...) \
+ TP_printk("%pU: " fmt, __entry->fsid, args)
TRACE_EVENT(btrfs_transaction_commit,
@@ -73,17 +88,17 @@ TRACE_EVENT(btrfs_transaction_commit,
TP_ARGS(root),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, generation )
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->generation = root->fs_info->generation;
__entry->root_objectid = root->root_key.objectid;
),
- TP_printk("root = %llu(%s), gen = %llu",
+ TP_printk_btrfs("root = %llu(%s), gen = %llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->generation)
);
@@ -94,7 +109,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
TP_ARGS(inode),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( ino_t, ino )
__field( blkcnt_t, blocks )
__field( u64, disk_i_size )
@@ -104,7 +119,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = inode->i_ino;
__entry->blocks = inode->i_blocks;
__entry->disk_i_size = BTRFS_I(inode)->disk_i_size;
@@ -115,7 +130,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
+ TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
"disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->generation,
@@ -175,7 +190,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_CONDITION(map),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, start )
__field( u64, len )
@@ -187,7 +202,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( unsigned int, compress_type )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->start = map->start;
__entry->len = map->len;
@@ -199,7 +214,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->compress_type = map->compress_type;
),
- TP_printk("root = %llu(%s), start = %llu, len = %llu, "
+ TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
"orig_start = %llu, block_start = %llu(%s), "
"block_len = %llu, flags = %s, refs = %u, "
"compress_type = %u",
@@ -233,7 +248,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
TP_ARGS(inode, ordered),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( ino_t, ino )
__field( u64, file_offset )
__field( u64, start )
@@ -246,7 +261,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = inode->i_ino;
__entry->file_offset = ordered->file_offset;
__entry->start = ordered->start;
@@ -260,7 +275,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk("root = %llu(%s), ino = %llu, file_offset = %llu, "
+ TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
"start = %llu, len = %llu, disk_len = %llu, "
"bytes_left = %llu, flags = %s, compress_type = %d, "
"refs = %d",
@@ -310,7 +325,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_ARGS(page, inode, wbc),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( ino_t, ino )
__field( pgoff_t, index )
__field( long, nr_to_write )
@@ -324,7 +339,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = inode->i_ino;
__entry->index = page->index;
__entry->nr_to_write = wbc->nr_to_write;
@@ -339,7 +354,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
+ TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
"range_end = %llu, for_kupdate = %d, "
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
@@ -366,7 +381,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_ARGS(page, start, end, uptodate),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( ino_t, ino )
__field( pgoff_t, index )
__field( u64, start )
@@ -375,7 +390,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb),
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
__entry->start = start;
@@ -385,7 +400,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
BTRFS_I(page->mapping->host)->root->root_key.objectid;
),
- TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
+ TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
"end = %llu, uptodate = %d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->index,
@@ -399,7 +414,7 @@ TRACE_EVENT(btrfs_sync_file,
TP_ARGS(file, datasync),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( ino_t, ino )
__field( ino_t, parent )
__field( int, datasync )
@@ -410,6 +425,7 @@ TRACE_EVENT(btrfs_sync_file,
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = d_inode(dentry);
+ TP_fast_assign_fsid(btrfs_sb(file->f_path.dentry->d_sb));
__entry->ino = inode->i_ino;
__entry->parent = d_inode(dentry->d_parent)->i_ino;
__entry->datasync = datasync;
@@ -417,7 +433,7 @@ TRACE_EVENT(btrfs_sync_file,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+ TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->parent,
__entry->datasync)
@@ -425,19 +441,59 @@ TRACE_EVENT(btrfs_sync_file,
TRACE_EVENT(btrfs_sync_fs,
- TP_PROTO(int wait),
+ TP_PROTO(struct btrfs_fs_info *fs_info, int wait),
- TP_ARGS(wait),
+ TP_ARGS(fs_info, wait),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( int, wait )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->wait = wait;
),
- TP_printk("wait = %d", __entry->wait)
+ TP_printk_btrfs("wait = %d", __entry->wait)
+);
+
+TRACE_EVENT(btrfs_add_block_group,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group, int create),
+
+ TP_ARGS(fs_info, block_group, create),
+
+ TP_STRUCT__entry(
+ __array( u8, fsid, BTRFS_UUID_SIZE )
+ __field( u64, offset )
+ __field( u64, size )
+ __field( u64, flags )
+ __field( u64, bytes_used )
+ __field( u64, bytes_super )
+ __field( int, create )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+ __entry->offset = block_group->key.objectid;
+ __entry->size = block_group->key.offset;
+ __entry->flags = block_group->flags;
+ __entry->bytes_used =
+ btrfs_block_group_used(&block_group->item);
+ __entry->bytes_super = block_group->bytes_super;
+ __entry->create = create;
+ ),
+
+ TP_printk("%pU: block_group offset = %llu, size = %llu, "
+ "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
+ "create = %d", __entry->fsid,
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->size,
+ (unsigned long long)__entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|",
+ BTRFS_GROUP_FLAGS),
+ (unsigned long long)__entry->bytes_used,
+ (unsigned long long)__entry->bytes_super, __entry->create)
);
#define show_ref_action(action) \
@@ -450,13 +506,14 @@ TRACE_EVENT(btrfs_sync_fs,
DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_tree_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action),
+ TP_ARGS(fs_info, ref, full_ref, action),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
__field( u64, num_bytes )
__field( int, action )
@@ -467,7 +524,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__field( u64, seq )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
__entry->action = action;
@@ -478,7 +535,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__entry->seq = ref->seq;
),
- TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+ TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
"parent = %llu(%s), ref_root = %llu(%s), level = %d, "
"type = %s, seq = %llu",
(unsigned long long)__entry->bytenr,
@@ -492,31 +549,34 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
DEFINE_EVENT(btrfs_delayed_tree_ref, add_delayed_tree_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_tree_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action)
+ TP_ARGS(fs_info, ref, full_ref, action)
);
DEFINE_EVENT(btrfs_delayed_tree_ref, run_delayed_tree_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_tree_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action)
+ TP_ARGS(fs_info, ref, full_ref, action)
);
DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_data_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action),
+ TP_ARGS(fs_info, ref, full_ref, action),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
__field( u64, num_bytes )
__field( int, action )
@@ -528,7 +588,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__field( u64, seq )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
__entry->action = action;
@@ -540,7 +600,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__entry->seq = ref->seq;
),
- TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, "
+ TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
"parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
"offset = %llu, type = %s, seq = %llu",
(unsigned long long)__entry->bytenr,
@@ -556,45 +616,48 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
DEFINE_EVENT(btrfs_delayed_data_ref, add_delayed_data_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_data_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action)
+ TP_ARGS(fs_info, ref, full_ref, action)
);
DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_data_ref *full_ref,
int action),
- TP_ARGS(ref, full_ref, action)
+ TP_ARGS(fs_info, ref, full_ref, action)
);
DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(ref, head_ref, action),
+ TP_ARGS(fs_info, ref, head_ref, action),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
__field( u64, num_bytes )
__field( int, action )
__field( int, is_data )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->bytenr = ref->bytenr;
__entry->num_bytes = ref->num_bytes;
__entry->action = action;
__entry->is_data = head_ref->is_data;
),
- TP_printk("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+ TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -603,20 +666,22 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(ref, head_ref, action)
+ TP_ARGS(fs_info, ref, head_ref, action)
);
DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
- TP_PROTO(struct btrfs_delayed_ref_node *ref,
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(ref, head_ref, action)
+ TP_ARGS(fs_info, ref, head_ref, action)
);
#define show_chunk_type(type) \
@@ -638,7 +703,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
TP_ARGS(root, map, offset, size),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( int, num_stripes )
__field( u64, type )
__field( int, sub_stripes )
@@ -647,7 +712,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__field( u64, root_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->num_stripes = map->num_stripes;
__entry->type = map->type;
__entry->sub_stripes = map->sub_stripes;
@@ -656,7 +721,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->root_objectid = root->root_key.objectid;
),
- TP_printk("root = %llu(%s), offset = %llu, size = %llu, "
+ TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
"num_stripes = %d, sub_stripes = %d, type = %s",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->offset,
@@ -688,7 +753,7 @@ TRACE_EVENT(btrfs_cow_block,
TP_ARGS(root, buf, cow),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, buf_start )
__field( int, refs )
@@ -697,7 +762,7 @@ TRACE_EVENT(btrfs_cow_block,
__field( int, cow_level )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->buf_start = buf->start;
__entry->refs = atomic_read(&buf->refs);
@@ -706,7 +771,7 @@ TRACE_EVENT(btrfs_cow_block,
__entry->cow_level = btrfs_header_level(cow);
),
- TP_printk("root = %llu(%s), refs = %d, orig_buf = %llu "
+ TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
"(orig_level = %d), cow_buf = %llu (cow_level = %d)",
show_root_type(__entry->root_objectid),
__entry->refs,
@@ -723,25 +788,105 @@ TRACE_EVENT(btrfs_space_reservation,
TP_ARGS(fs_info, type, val, bytes, reserve),
- TP_STRUCT__entry(
- __array( u8, fsid, BTRFS_UUID_SIZE )
+ TP_STRUCT__entry_btrfs(
__string( type, type )
__field( u64, val )
__field( u64, bytes )
__field( int, reserve )
),
- TP_fast_assign(
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+ TP_fast_assign_btrfs(fs_info,
__assign_str(type, type);
__entry->val = val;
__entry->bytes = bytes;
__entry->reserve = reserve;
),
- TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
- __entry->val, __entry->reserve ? "reserve" : "release",
- __entry->bytes)
+ TP_printk_btrfs("%s: %Lu %s %Lu", __get_str(type), __entry->val,
+ __entry->reserve ? "reserve" : "release",
+ __entry->bytes)
+);
+
+#define show_flush_action(action) \
+ __print_symbolic(action, \
+ { BTRFS_RESERVE_NO_FLUSH, "BTRFS_RESERVE_NO_FLUSH"}, \
+ { BTRFS_RESERVE_FLUSH_LIMIT, "BTRFS_RESERVE_FLUSH_LIMIT"}, \
+ { BTRFS_RESERVE_FLUSH_ALL, "BTRFS_RESERVE_FLUSH_ALL"})
+
+TRACE_EVENT(btrfs_trigger_flush,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
+ int flush, char *reason),
+
+ TP_ARGS(fs_info, flags, bytes, flush, reason),
+
+ TP_STRUCT__entry(
+ __array( u8, fsid, BTRFS_UUID_SIZE )
+ __field( u64, flags )
+ __field( u64, bytes )
+ __field( int, flush )
+ __string( reason, reason )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+ __entry->flags = flags;
+ __entry->bytes = bytes;
+ __entry->flush = flush;
+ __assign_str(reason, reason)
+ ),
+
+ TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
+ __entry->fsid, __get_str(reason), __entry->flush,
+ show_flush_action(__entry->flush),
+ (unsigned long long)__entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|",
+ BTRFS_GROUP_FLAGS),
+ (unsigned long long)__entry->bytes)
+);
+
+#define show_flush_state(state) \
+ __print_symbolic(state, \
+ { FLUSH_DELAYED_ITEMS_NR, "FLUSH_DELAYED_ITEMS_NR"}, \
+ { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
+ { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
+ { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
+ { ALLOC_CHUNK, "ALLOC_CHUNK"}, \
+ { COMMIT_TRANS, "COMMIT_TRANS"})
+
+TRACE_EVENT(btrfs_flush_space,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 flags, u64 num_bytes,
+ u64 orig_bytes, int state, int ret),
+
+ TP_ARGS(fs_info, flags, num_bytes, orig_bytes, state, ret),
+
+ TP_STRUCT__entry(
+ __array( u8, fsid, BTRFS_UUID_SIZE )
+ __field( u64, flags )
+ __field( u64, num_bytes )
+ __field( u64, orig_bytes )
+ __field( int, state )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+ __entry->flags = flags;
+ __entry->num_bytes = num_bytes;
+ __entry->orig_bytes = orig_bytes;
+ __entry->state = state;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
+ "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
+ show_flush_state(__entry->state),
+ (unsigned long long)__entry->flags,
+ __print_flags((unsigned long)__entry->flags, "|",
+ BTRFS_GROUP_FLAGS),
+ (unsigned long long)__entry->num_bytes,
+ (unsigned long long)__entry->orig_bytes, __entry->ret)
);
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
@@ -750,19 +895,19 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
TP_ARGS(root, start, len),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, start )
__field( u64, len )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->start = start;
__entry->len = len;
),
- TP_printk("root = %llu(%s), start = %llu, len = %llu",
+ TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
@@ -789,21 +934,21 @@ TRACE_EVENT(find_free_extent,
TP_ARGS(root, num_bytes, empty_size, data),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
__field( u64, data )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->num_bytes = num_bytes;
__entry->empty_size = empty_size;
__entry->data = data;
),
- TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
+ TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
"flags = %Lu(%s)", show_root_type(__entry->root_objectid),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
@@ -818,7 +963,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
TP_ARGS(root, block_group, start, len),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, bg_objectid )
__field( u64, flags )
@@ -826,7 +971,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__field( u64, len )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
@@ -834,7 +979,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__entry->len = len;
),
- TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
+ TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
"start = %Lu, len = %Lu",
show_root_type(__entry->root_objectid), __entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -867,7 +1012,7 @@ TRACE_EVENT(btrfs_find_cluster,
TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
__field( u64, flags )
__field( u64, start )
@@ -876,7 +1021,7 @@ TRACE_EVENT(btrfs_find_cluster,
__field( u64, min_bytes )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = start;
@@ -885,7 +1030,7 @@ TRACE_EVENT(btrfs_find_cluster,
__entry->min_bytes = min_bytes;
),
- TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
+ TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
" empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -899,15 +1044,15 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
TP_ARGS(block_group),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
),
- TP_printk("block_group = %Lu", __entry->bg_objectid)
+ TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
);
TRACE_EVENT(btrfs_setup_cluster,
@@ -917,7 +1062,7 @@ TRACE_EVENT(btrfs_setup_cluster,
TP_ARGS(block_group, cluster, size, bitmap),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bg_objectid )
__field( u64, flags )
__field( u64, start )
@@ -926,7 +1071,7 @@ TRACE_EVENT(btrfs_setup_cluster,
__field( int, bitmap )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = cluster->window_start;
@@ -935,7 +1080,7 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->bitmap = bitmap;
),
- TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
+ TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
"size = %Lu, max_size = %Lu, bitmap = %d",
__entry->bg_objectid,
__entry->flags,
@@ -993,7 +1138,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
TP_ARGS(work),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( void *, work )
__field( void *, wq )
__field( void *, func )
@@ -1002,7 +1147,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( void *, normal_work )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_work_owner(work),
__entry->work = work;
__entry->wq = work->wq;
__entry->func = work->func;
@@ -1011,7 +1156,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->normal_work = &work->normal_work;
),
- TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
+ TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
" ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
@@ -1024,15 +1169,15 @@ DECLARE_EVENT_CLASS(btrfs__work__done,
TP_ARGS(work),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( void *, work )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_work_owner(work),
__entry->work = work;
),
- TP_printk("work->%p", __entry->work)
+ TP_printk_btrfs("work->%p", __entry->work)
);
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1069,19 +1214,19 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
TP_ARGS(wq, name, high),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( void *, wq )
__string( name, name )
__field( int , high )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq;
__assign_str(name, name);
__entry->high = high;
),
- TP_printk("name=%s%s, wq=%p", __get_str(name),
+ TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
__print_flags(__entry->high, "",
{(WQ_HIGHPRI), "-high"}),
__entry->wq)
@@ -1100,15 +1245,15 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done,
TP_ARGS(wq),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( void *, wq )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq;
),
- TP_printk("wq=%p", __entry->wq)
+ TP_printk_btrfs("wq=%p", __entry->wq)
);
DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
@@ -1124,19 +1269,19 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
TP_ARGS(inode, free_reserved),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, rootid )
__field( unsigned long, ino )
__field( u64, free_reserved )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->rootid = BTRFS_I(inode)->root->objectid;
__entry->ino = inode->i_ino;
__entry->free_reserved = free_reserved;
),
- TP_printk("rootid=%llu, ino=%lu, free_reserved=%llu",
+ TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
__entry->rootid, __entry->ino, __entry->free_reserved)
);
@@ -1165,7 +1310,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
TP_ARGS(inode, start, len, reserved, op),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, rootid )
__field( unsigned long, ino )
__field( u64, start )
@@ -1174,7 +1319,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
__field( int, op )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->rootid = BTRFS_I(inode)->root->objectid;
__entry->ino = inode->i_ino;
__entry->start = start;
@@ -1183,7 +1328,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
__entry->op = op;
),
- TP_printk("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+ TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
__entry->rootid, __entry->ino, __entry->start, __entry->len,
__entry->reserved,
__print_flags((unsigned long)__entry->op, "",
@@ -1207,86 +1352,90 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
- TP_PROTO(u64 ref_root, u64 reserved),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 ref_root, u64 reserved),
- TP_ARGS(ref_root, reserved),
+ TP_ARGS(fs_info, ref_root, reserved),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, ref_root )
__field( u64, reserved )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->ref_root = ref_root;
__entry->reserved = reserved;
),
- TP_printk("root=%llu, reserved=%llu, op=free",
+ TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
__entry->ref_root, __entry->reserved)
);
DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref,
- TP_PROTO(u64 ref_root, u64 reserved),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 ref_root, u64 reserved),
- TP_ARGS(ref_root, reserved)
+ TP_ARGS(fs_info, ref_root, reserved)
);
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
- TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_extent_record *rec),
- TP_ARGS(rec),
+ TP_ARGS(fs_info, rec),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
__field( u64, num_bytes )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->bytenr = rec->bytenr,
__entry->num_bytes = rec->num_bytes;
),
- TP_printk("bytenr = %llu, num_bytes = %llu",
+ TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
- TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_extent_record *rec),
- TP_ARGS(rec)
+ TP_ARGS(fs_info, rec)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
- TP_PROTO(struct btrfs_qgroup_extent_record *rec),
+ TP_PROTO(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_extent_record *rec),
- TP_ARGS(rec)
+ TP_ARGS(fs_info, rec)
);
TRACE_EVENT(btrfs_qgroup_account_extent,
- TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 bytenr,
+ u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots),
- TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots),
+ TP_ARGS(fs_info, bytenr, num_bytes, nr_old_roots, nr_new_roots),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
__field( u64, num_bytes )
__field( u64, nr_old_roots )
__field( u64, nr_new_roots )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->bytenr = bytenr;
__entry->num_bytes = num_bytes;
__entry->nr_old_roots = nr_old_roots;
__entry->nr_new_roots = nr_new_roots;
),
- TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
+ TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
"nr_new_roots = %llu",
__entry->bytenr,
__entry->num_bytes,
@@ -1296,23 +1445,24 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
TRACE_EVENT(qgroup_update_counters,
- TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count),
+ TP_PROTO(struct btrfs_fs_info *fs_info, u64 qgid,
+ u64 cur_old_count, u64 cur_new_count),
- TP_ARGS(qgid, cur_old_count, cur_new_count),
+ TP_ARGS(fs_info, qgid, cur_old_count, cur_new_count),
- TP_STRUCT__entry(
+ TP_STRUCT__entry_btrfs(
__field( u64, qgid )
__field( u64, cur_old_count )
__field( u64, cur_new_count )
),
- TP_fast_assign(
+ TP_fast_assign_btrfs(fs_info,
__entry->qgid = qgid;
__entry->cur_old_count = cur_old_count;
__entry->cur_new_count = cur_new_count;
),
- TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+ TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
__entry->qgid,
__entry->cur_old_count,
__entry->cur_new_count)
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index f28292d73ddb..8ade3eb6c640 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -151,8 +151,9 @@ TRACE_EVENT(kvm_msi_set_irq,
__entry->data = data;
),
- TP_printk("dst %u vec %u (%s|%s|%s%s)",
- (u8)(__entry->address >> 12), (u8)__entry->data,
+ TP_printk("dst %llx vec %u (%s|%s|%s%s)",
+ (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
+ (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 003dca933803..8a707f8a41c3 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -473,6 +473,39 @@ TRACE_EVENT(svc_recv,
show_rqstp_flags(__entry->flags))
);
+DECLARE_EVENT_CLASS(svc_rqst_event,
+
+ TP_PROTO(struct svc_rqst *rqst),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(__be32, xid)
+ __field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = rqst->rq_xid;
+ __entry->flags = rqst->rq_flags;
+ memcpy(__get_dynamic_array(addr),
+ &rqst->rq_addr, rqst->rq_addrlen);
+ ),
+
+ TP_printk("addr=%pIScp rq_xid=0x%x flags=%s",
+ (struct sockaddr *)__get_dynamic_array(addr),
+ be32_to_cpu(__entry->xid),
+ show_rqstp_flags(__entry->flags))
+);
+
+DEFINE_EVENT(svc_rqst_event, svc_defer,
+ TP_PROTO(struct svc_rqst *rqst),
+ TP_ARGS(rqst));
+
+DEFINE_EVENT(svc_rqst_event, svc_drop,
+ TP_PROTO(struct svc_rqst *rqst),
+ TP_ARGS(rqst));
+
DECLARE_EVENT_CLASS(svc_rqst_status,
TP_PROTO(struct svc_rqst *rqst, int status),
@@ -529,45 +562,67 @@ TRACE_EVENT(svc_xprt_do_enqueue,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
- __field_struct(struct sockaddr_storage, ss)
__field(int, pid)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
__entry->xprt = xprt;
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
__entry->pid = rqst? rqst->rq_task->pid : 0;
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
__entry->pid, show_svc_xprt_flags(__entry->flags))
);
-TRACE_EVENT(svc_xprt_dequeue,
+DECLARE_EVENT_CLASS(svc_xprt_event,
TP_PROTO(struct svc_xprt *xprt),
TP_ARGS(xprt),
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
- __field_struct(struct sockaddr_storage, ss)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
- __entry->xprt = xprt,
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ __entry->xprt = xprt;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
show_svc_xprt_flags(__entry->flags))
);
+DEFINE_EVENT(svc_xprt_event, svc_xprt_dequeue,
+ TP_PROTO(struct svc_xprt *xprt),
+ TP_ARGS(xprt));
+
+DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
+ TP_PROTO(struct svc_xprt *xprt),
+ TP_ARGS(xprt));
+
TRACE_EVENT(svc_wake_up,
TP_PROTO(int pid),
@@ -592,21 +647,56 @@ TRACE_EVENT(svc_handle_xprt,
TP_STRUCT__entry(
__field(struct svc_xprt *, xprt)
__field(int, len)
- __field_struct(struct sockaddr_storage, ss)
__field(unsigned long, flags)
+ __dynamic_array(unsigned char, addr, xprt != NULL ?
+ xprt->xpt_remotelen : 0)
),
TP_fast_assign(
__entry->xprt = xprt;
- xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
__entry->len = len;
- __entry->flags = xprt ? xprt->xpt_flags : 0;
+ if (xprt) {
+ memcpy(__get_dynamic_array(addr),
+ &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = xprt->xpt_flags;
+ } else
+ __entry->flags = 0;
),
TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
- (struct sockaddr *)&__entry->ss,
+ __get_dynamic_array_len(addr) != 0 ?
+ (struct sockaddr *)__get_dynamic_array(addr) : NULL,
__entry->len, show_svc_xprt_flags(__entry->flags))
);
+
+
+DECLARE_EVENT_CLASS(svc_deferred_event,
+ TP_PROTO(struct svc_deferred_req *dr),
+
+ TP_ARGS(dr),
+
+ TP_STRUCT__entry(
+ __field(__be32, xid)
+ __dynamic_array(unsigned char, addr, dr->addrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = *(__be32 *)(dr->args + (dr->xprt_hlen>>2));
+ memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen);
+ ),
+
+ TP_printk("addr=%pIScp xid=0x%x",
+ (struct sockaddr *)__get_dynamic_array(addr),
+ be32_to_cpu(__entry->xid))
+);
+
+DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,
+ TP_PROTO(struct svc_deferred_req *dr),
+ TP_ARGS(dr));
+DEFINE_EVENT(svc_deferred_event, svc_revisit_deferred,
+ TP_PROTO(struct svc_deferred_req *dr),
+ TP_ARGS(dr));
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 51440131d337..28c5da6fdfac 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
#ifdef CONFIG_NO_HZ_COMMON
#define TICK_DEP_NAMES \
- tick_dep_name(NONE) \
+ tick_dep_mask_name(NONE) \
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name_end(CLOCK_UNSTABLE)
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
TICK_DEP_NAMES
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
#define show_tick_dep_name(val) \
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
new file mode 100644
index 000000000000..b7f1d6278280
--- /dev/null
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -0,0 +1,144 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vsock
+
+#if !defined(_TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H) || \
+ defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H
+
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM);
+
+#define show_type(val) \
+ __print_symbolic(val, { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" })
+
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RESPONSE);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RST);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_SHUTDOWN);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RW);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_UPDATE);
+TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_REQUEST);
+
+#define show_op(val) \
+ __print_symbolic(val, \
+ { VIRTIO_VSOCK_OP_INVALID, "INVALID" }, \
+ { VIRTIO_VSOCK_OP_REQUEST, "REQUEST" }, \
+ { VIRTIO_VSOCK_OP_RESPONSE, "RESPONSE" }, \
+ { VIRTIO_VSOCK_OP_RST, "RST" }, \
+ { VIRTIO_VSOCK_OP_SHUTDOWN, "SHUTDOWN" }, \
+ { VIRTIO_VSOCK_OP_RW, "RW" }, \
+ { VIRTIO_VSOCK_OP_CREDIT_UPDATE, "CREDIT_UPDATE" }, \
+ { VIRTIO_VSOCK_OP_CREDIT_REQUEST, "CREDIT_REQUEST" })
+
+TRACE_EVENT(virtio_transport_alloc_pkt,
+ TP_PROTO(
+ __u32 src_cid, __u32 src_port,
+ __u32 dst_cid, __u32 dst_port,
+ __u32 len,
+ __u16 type,
+ __u16 op,
+ __u32 flags
+ ),
+ TP_ARGS(
+ src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ type,
+ op,
+ flags
+ ),
+ TP_STRUCT__entry(
+ __field(__u32, src_cid)
+ __field(__u32, src_port)
+ __field(__u32, dst_cid)
+ __field(__u32, dst_port)
+ __field(__u32, len)
+ __field(__u16, type)
+ __field(__u16, op)
+ __field(__u32, flags)
+ ),
+ TP_fast_assign(
+ __entry->src_cid = src_cid;
+ __entry->src_port = src_port;
+ __entry->dst_cid = dst_cid;
+ __entry->dst_port = dst_port;
+ __entry->len = len;
+ __entry->type = type;
+ __entry->op = op;
+ __entry->flags = flags;
+ ),
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ __entry->src_cid, __entry->src_port,
+ __entry->dst_cid, __entry->dst_port,
+ __entry->len,
+ show_type(__entry->type),
+ show_op(__entry->op),
+ __entry->flags)
+);
+
+TRACE_EVENT(virtio_transport_recv_pkt,
+ TP_PROTO(
+ __u32 src_cid, __u32 src_port,
+ __u32 dst_cid, __u32 dst_port,
+ __u32 len,
+ __u16 type,
+ __u16 op,
+ __u32 flags,
+ __u32 buf_alloc,
+ __u32 fwd_cnt
+ ),
+ TP_ARGS(
+ src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ type,
+ op,
+ flags,
+ buf_alloc,
+ fwd_cnt
+ ),
+ TP_STRUCT__entry(
+ __field(__u32, src_cid)
+ __field(__u32, src_port)
+ __field(__u32, dst_cid)
+ __field(__u32, dst_port)
+ __field(__u32, len)
+ __field(__u16, type)
+ __field(__u16, op)
+ __field(__u32, flags)
+ __field(__u32, buf_alloc)
+ __field(__u32, fwd_cnt)
+ ),
+ TP_fast_assign(
+ __entry->src_cid = src_cid;
+ __entry->src_port = src_port;
+ __entry->dst_cid = dst_cid;
+ __entry->dst_port = dst_port;
+ __entry->len = len;
+ __entry->type = type;
+ __entry->op = op;
+ __entry->flags = flags;
+ __entry->buf_alloc = buf_alloc;
+ __entry->fwd_cnt = fwd_cnt;
+ ),
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x "
+ "buf_alloc=%u fwd_cnt=%u",
+ __entry->src_cid, __entry->src_port,
+ __entry->dst_cid, __entry->dst_port,
+ __entry->len,
+ show_type(__entry->type),
+ show_op(__entry->op),
+ __entry->flags,
+ __entry->buf_alloc,
+ __entry->fwd_cnt)
+);
+
+#endif /* _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H */
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vsock_virtio_transport_common
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index cdecf87576e8..462246aa200e 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -487,6 +487,22 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
#define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff
+struct drm_amdgpu_query_fw {
+ /** AMDGPU_INFO_FW_* */
+ __u32 fw_type;
+ /**
+ * Index of the IP if there are more IPs of
+ * the same type.
+ */
+ __u32 ip_instance;
+ /**
+ * Index of the engine. Whether this is used depends
+ * on the firmware type. (e.g. MEC, SDMA)
+ */
+ __u32 index;
+ __u32 _pad;
+};
+
/* Input structure for the INFO ioctl */
struct drm_amdgpu_info {
/* Where the return value will be stored */
@@ -522,21 +538,7 @@ struct drm_amdgpu_info {
__u32 flags;
} read_mmr_reg;
- struct {
- /** AMDGPU_INFO_FW_* */
- __u32 fw_type;
- /**
- * Index of the IP if there are more IPs of
- * the same type.
- */
- __u32 ip_instance;
- /**
- * Index of the engine. Whether this is used depends
- * on the firmware type. (e.g. MEC, SDMA)
- */
- __u32 index;
- __u32 _pad;
- } query_fw;
+ struct drm_amdgpu_query_fw query_fw;
};
};
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index c17d63d8b543..d7e81a3886fd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_GPU_RESET 35
#define I915_PARAM_HAS_RESOURCE_STREAMER 36
#define I915_PARAM_HAS_EXEC_SOFTPIN 37
+#define I915_PARAM_HAS_POOLED_EU 38
+#define I915_PARAM_MIN_EU_IN_POOL 39
typedef struct drm_i915_getparam {
__s32 param;
@@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
__u64 value;
};
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index bf19d2cd9078..49f778de8e06 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -201,6 +201,27 @@ struct drm_msm_wait_fence {
struct drm_msm_timespec timeout; /* in */
};
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
+#define __MSM_MADV_PURGED 2 /* internal state */
+
+struct drm_msm_gem_madvise {
+ __u32 handle; /* in, GEM handle */
+ __u32 madv; /* in, MSM_MADV_x */
+ __u32 retained; /* out, whether backing store still exists */
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -211,7 +232,8 @@ struct drm_msm_wait_fence {
#define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
-#define DRM_MSM_NUM_IOCTLS 0x08
+#define DRM_MSM_GEM_MADVISE 0x08
+#define DRM_MSM_NUM_IOCTLS 0x09
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -220,6 +242,7 @@ struct drm_msm_wait_fence {
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index af12e8a184c8..ad7edc3edf7c 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -37,6 +37,7 @@ extern "C" {
#define DRM_VC4_MMAP_BO 0x04
#define DRM_VC4_CREATE_SHADER_BO 0x05
#define DRM_VC4_GET_HANG_STATE 0x06
+#define DRM_VC4_GET_PARAM 0x07
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -45,6 +46,7 @@ extern "C" {
#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -280,6 +282,17 @@ struct drm_vc4_get_hang_state {
__u32 pad[16];
};
+#define DRM_VC4_PARAM_V3D_IDENT0 0
+#define DRM_VC4_PARAM_V3D_IDENT1 1
+#define DRM_VC4_PARAM_V3D_IDENT2 2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
+
+struct drm_vc4_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644
index 000000000000..bf66f5db6da8
--- /dev/null
+++ b/include/uapi/drm/vgem_drm.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH 0x1
+#define DRM_VGEM_FENCE_SIGNAL 0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+ __u32 handle;
+ __u32 flags;
+#define VGEM_FENCE_WRITE 0x1
+ __u32 out_fence;
+ __u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+ __u32 fence;
+ __u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6d4e92ccdc91..185f8ea2702f 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -357,6 +357,7 @@ header-y += reiserfs_fs.h
header-y += reiserfs_xattr.h
header-y += resource.h
header-y += rfkill.h
+header-y += rio_cm_cdev.h
header-y += rio_mport_cdev.h
header-y += romfs_fs.h
header-y += rose.h
@@ -453,6 +454,7 @@ header-y += virtio_ring.h
header-y += virtio_rng.h
header-y += virtio_scsi.h
header-y += virtio_types.h
+header-y += virtio_vsock.h
header-y += vm_sockets.h
header-y += vt.h
header-y += vtpm_proxy.h
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 2bdd1e3e7007..ac5eacd3055b 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -798,7 +798,7 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
#define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
struct btrfs_ioctl_ino_path_args)
#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
- struct btrfs_ioctl_ino_path_args)
+ struct btrfs_ioctl_logical_ino_args)
#define BTRFS_IOC_SET_RECEIVED_SUBVOL _IOWR(BTRFS_IOCTL_MAGIC, 37, \
struct btrfs_ioctl_received_subvol_args)
#define BTRFS_IOC_SEND _IOW(BTRFS_IOCTL_MAGIC, 38, struct btrfs_ioctl_send_args)
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 12c37a197d24..49bc06295398 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -15,8 +15,6 @@
#include <linux/types.h>
-struct task_struct;
-
/* User-level do most of the mapping between kernel and user
capabilities based on the version tag given by the kernel. The
kernel might be somewhat backwards compatible, but don't bet on
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index cb4a72f888d5..b59ee077a596 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -286,6 +286,7 @@ typedef struct elf64_phdr {
#define SHF_ALLOC 0x2
#define SHF_EXECINSTR 0x4
#define SHF_RELA_LIVEPATCH 0x00100000
+#define SHF_RO_AFTER_INIT 0x00200000
#define SHF_MASKPROC 0xf0000000
/* special section indexes */
@@ -381,6 +382,19 @@ typedef struct elf64_shdr {
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NT_PPC_TAR 0x103 /* Target Address Register */
+#define NT_PPC_PPR 0x104 /* Program Priority Register */
+#define NT_PPC_DSCR 0x105 /* Data Stream Control Register */
+#define NT_PPC_EBB 0x106 /* Event Based Branch Registers */
+#define NT_PPC_PMU 0x107 /* Performance Monitor Registers */
+#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
+#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
+#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
+#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
+#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
+#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
+#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
+#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 05ebf475104c..300ef255d1e0 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -866,6 +866,10 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_ARM_PMU_V3 126
#define KVM_CAP_VCPU_ATTRIBUTES 127
#define KVM_CAP_MAX_VCPU_ID 128
+#define KVM_CAP_X2APIC_API 129
+#define KVM_CAP_S390_USER_INSTR0 130
+#define KVM_CAP_MSI_DEVID 131
+#define KVM_CAP_PPC_HTM 132
#ifdef KVM_CAP_IRQ_ROUTING
@@ -878,7 +882,10 @@ struct kvm_irq_routing_msi {
__u32 address_lo;
__u32 address_hi;
__u32 data;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 devid;
+ };
};
struct kvm_irq_routing_s390_adapter {
@@ -1024,12 +1031,14 @@ struct kvm_one_reg {
__u64 addr;
};
+#define KVM_MSI_VALID_DEVID (1U << 0)
struct kvm_msi {
__u32 address_lo;
__u32 address_hi;
__u32 data;
__u32 flags;
- __u8 pad[16];
+ __u32 devid;
+ __u8 pad[12];
};
struct kvm_arm_device_addr {
@@ -1074,6 +1083,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_FLIC KVM_DEV_TYPE_FLIC
KVM_DEV_TYPE_ARM_VGIC_V3,
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
+ KVM_DEV_TYPE_ARM_VGIC_ITS,
+#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
KVM_DEV_TYPE_MAX,
};
@@ -1313,4 +1324,7 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
+#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/nilfs2_api.h b/include/uapi/linux/nilfs2_api.h
new file mode 100644
index 000000000000..ef4c1de89b11
--- /dev/null
+++ b/include/uapi/linux/nilfs2_api.h
@@ -0,0 +1,292 @@
+/*
+ * nilfs2_api.h - NILFS2 user space API
+ *
+ * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_NILFS2_API_H
+#define _LINUX_NILFS2_API_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * struct nilfs_cpinfo - checkpoint information
+ * @ci_flags: flags
+ * @ci_pad: padding
+ * @ci_cno: checkpoint number
+ * @ci_create: creation timestamp
+ * @ci_nblk_inc: number of blocks incremented by this checkpoint
+ * @ci_inodes_count: inodes count
+ * @ci_blocks_count: blocks count
+ * @ci_next: next checkpoint number in snapshot list
+ */
+struct nilfs_cpinfo {
+ __u32 ci_flags;
+ __u32 ci_pad;
+ __u64 ci_cno;
+ __u64 ci_create;
+ __u64 ci_nblk_inc;
+ __u64 ci_inodes_count;
+ __u64 ci_blocks_count;
+ __u64 ci_next;
+};
+
+/* checkpoint flags */
+enum {
+ NILFS_CPINFO_SNAPSHOT,
+ NILFS_CPINFO_INVALID,
+ NILFS_CPINFO_SKETCH,
+ NILFS_CPINFO_MINOR,
+};
+
+#define NILFS_CPINFO_FNS(flag, name) \
+static inline int \
+nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
+{ \
+ return !!(cpinfo->ci_flags & (1UL << NILFS_CPINFO_##flag)); \
+}
+
+NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
+NILFS_CPINFO_FNS(INVALID, invalid)
+NILFS_CPINFO_FNS(MINOR, minor)
+
+/**
+ * nilfs_suinfo - segment usage information
+ * @sui_lastmod: timestamp of last modification
+ * @sui_nblocks: number of written blocks in segment
+ * @sui_flags: segment usage flags
+ */
+struct nilfs_suinfo {
+ __u64 sui_lastmod;
+ __u32 sui_nblocks;
+ __u32 sui_flags;
+};
+
+/* segment usage flags */
+enum {
+ NILFS_SUINFO_ACTIVE,
+ NILFS_SUINFO_DIRTY,
+ NILFS_SUINFO_ERROR,
+};
+
+#define NILFS_SUINFO_FNS(flag, name) \
+static inline int \
+nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
+{ \
+ return si->sui_flags & (1UL << NILFS_SUINFO_##flag); \
+}
+
+NILFS_SUINFO_FNS(ACTIVE, active)
+NILFS_SUINFO_FNS(DIRTY, dirty)
+NILFS_SUINFO_FNS(ERROR, error)
+
+static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
+{
+ return !si->sui_flags;
+}
+
+/**
+ * nilfs_suinfo_update - segment usage information update
+ * @sup_segnum: segment number
+ * @sup_flags: flags for which fields are active in sup_sui
+ * @sup_reserved: reserved necessary for alignment
+ * @sup_sui: segment usage information
+ */
+struct nilfs_suinfo_update {
+ __u64 sup_segnum;
+ __u32 sup_flags;
+ __u32 sup_reserved;
+ struct nilfs_suinfo sup_sui;
+};
+
+enum {
+ NILFS_SUINFO_UPDATE_LASTMOD,
+ NILFS_SUINFO_UPDATE_NBLOCKS,
+ NILFS_SUINFO_UPDATE_FLAGS,
+ __NR_NILFS_SUINFO_UPDATE_FIELDS,
+};
+
+#define NILFS_SUINFO_UPDATE_FNS(flag, name) \
+static inline void \
+nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \
+} \
+static inline void \
+nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \
+} \
+static inline int \
+nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \
+{ \
+ return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\
+}
+
+NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod)
+NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks)
+NILFS_SUINFO_UPDATE_FNS(FLAGS, flags)
+
+enum {
+ NILFS_CHECKPOINT,
+ NILFS_SNAPSHOT,
+};
+
+/**
+ * struct nilfs_cpmode - change checkpoint mode structure
+ * @cm_cno: checkpoint number
+ * @cm_mode: mode of checkpoint
+ * @cm_pad: padding
+ */
+struct nilfs_cpmode {
+ __u64 cm_cno;
+ __u32 cm_mode;
+ __u32 cm_pad;
+};
+
+/**
+ * struct nilfs_argv - argument vector
+ * @v_base: pointer on data array from userspace
+ * @v_nmembs: number of members in data array
+ * @v_size: size of data array in bytes
+ * @v_flags: flags
+ * @v_index: start number of target data items
+ */
+struct nilfs_argv {
+ __u64 v_base;
+ __u32 v_nmembs; /* number of members */
+ __u16 v_size; /* size of members */
+ __u16 v_flags;
+ __u64 v_index;
+};
+
+/**
+ * struct nilfs_period - period of checkpoint numbers
+ * @p_start: start checkpoint number (inclusive)
+ * @p_end: end checkpoint number (exclusive)
+ */
+struct nilfs_period {
+ __u64 p_start;
+ __u64 p_end;
+};
+
+/**
+ * struct nilfs_cpstat - checkpoint statistics
+ * @cs_cno: checkpoint number
+ * @cs_ncps: number of checkpoints
+ * @cs_nsss: number of snapshots
+ */
+struct nilfs_cpstat {
+ __u64 cs_cno;
+ __u64 cs_ncps;
+ __u64 cs_nsss;
+};
+
+/**
+ * struct nilfs_sustat - segment usage statistics
+ * @ss_nsegs: number of segments
+ * @ss_ncleansegs: number of clean segments
+ * @ss_ndirtysegs: number of dirty segments
+ * @ss_ctime: creation time of the last segment
+ * @ss_nongc_ctime: creation time of the last segment not for GC
+ * @ss_prot_seq: least sequence number of segments which must not be reclaimed
+ */
+struct nilfs_sustat {
+ __u64 ss_nsegs;
+ __u64 ss_ncleansegs;
+ __u64 ss_ndirtysegs;
+ __u64 ss_ctime;
+ __u64 ss_nongc_ctime;
+ __u64 ss_prot_seq;
+};
+
+/**
+ * struct nilfs_vinfo - virtual block number information
+ * @vi_vblocknr: virtual block number
+ * @vi_start: start checkpoint number (inclusive)
+ * @vi_end: end checkpoint number (exclusive)
+ * @vi_blocknr: disk block number
+ */
+struct nilfs_vinfo {
+ __u64 vi_vblocknr;
+ __u64 vi_start;
+ __u64 vi_end;
+ __u64 vi_blocknr;
+};
+
+/**
+ * struct nilfs_vdesc - descriptor of virtual block number
+ * @vd_ino: inode number
+ * @vd_cno: checkpoint number
+ * @vd_vblocknr: virtual block number
+ * @vd_period: period of checkpoint numbers
+ * @vd_blocknr: disk block number
+ * @vd_offset: logical block offset inside a file
+ * @vd_flags: flags (data or node block)
+ * @vd_pad: padding
+ */
+struct nilfs_vdesc {
+ __u64 vd_ino;
+ __u64 vd_cno;
+ __u64 vd_vblocknr;
+ struct nilfs_period vd_period;
+ __u64 vd_blocknr;
+ __u64 vd_offset;
+ __u32 vd_flags;
+ __u32 vd_pad;
+};
+
+/**
+ * struct nilfs_bdesc - descriptor of disk block number
+ * @bd_ino: inode number
+ * @bd_oblocknr: disk block address (for skipping dead blocks)
+ * @bd_blocknr: disk block address
+ * @bd_offset: logical block offset inside a file
+ * @bd_level: level in the b-tree organization
+ * @bd_pad: padding
+ */
+struct nilfs_bdesc {
+ __u64 bd_ino;
+ __u64 bd_oblocknr;
+ __u64 bd_blocknr;
+ __u64 bd_offset;
+ __u32 bd_level;
+ __u32 bd_pad;
+};
+
+#define NILFS_IOCTL_IDENT 'n'
+
+#define NILFS_IOCTL_CHANGE_CPMODE \
+ _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
+#define NILFS_IOCTL_DELETE_CHECKPOINT \
+ _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
+#define NILFS_IOCTL_GET_CPINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
+#define NILFS_IOCTL_GET_CPSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
+#define NILFS_IOCTL_GET_SUINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
+#define NILFS_IOCTL_GET_SUSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
+#define NILFS_IOCTL_GET_VINFO \
+ _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
+#define NILFS_IOCTL_GET_BDESCS \
+ _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
+#define NILFS_IOCTL_CLEAN_SEGMENTS \
+ _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
+#define NILFS_IOCTL_SYNC \
+ _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
+#define NILFS_IOCTL_RESIZE \
+ _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
+#define NILFS_IOCTL_SET_ALLOC_RANGE \
+ _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
+#define NILFS_IOCTL_SET_SUINFO \
+ _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv)
+
+#endif /* _LINUX_NILFS2_API_H */
diff --git a/include/linux/nilfs2_fs.h b/include/uapi/linux/nilfs2_ondisk.h
index 5988dd57ba66..2a8a3addb675 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -1,5 +1,5 @@
/*
- * nilfs2_fs.h - NILFS2 on-disk structures and common declarations.
+ * nilfs2_ondisk.h - NILFS2 on-disk structures
*
* Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
*
@@ -7,13 +7,6 @@
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * Written by Koji Sato and Ryusuke Konishi.
*/
/*
* linux/include/linux/ext2_fs.h
@@ -30,16 +23,15 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*/
-#ifndef _LINUX_NILFS_FS_H
-#define _LINUX_NILFS_FS_H
+#ifndef _LINUX_NILFS2_ONDISK_H
+#define _LINUX_NILFS2_ONDISK_H
#include <linux/types.h>
-#include <linux/ioctl.h>
#include <linux/magic.h>
-#include <linux/bug.h>
#define NILFS_INODE_BMAP_SIZE 7
+
/**
* struct nilfs_inode - structure of an inode on disk
* @i_blocks: blocks count
@@ -56,7 +48,7 @@
* @i_bmap: block mapping
* @i_xattr: extended attributes
* @i_generation: file generation (for NFS)
- * @i_pad: padding
+ * @i_pad: padding
*/
struct nilfs_inode {
__le64 i_blocks;
@@ -338,29 +330,7 @@ enum {
#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1)
#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \
~NILFS_DIR_ROUND)
-#define NILFS_MAX_REC_LEN ((1<<16)-1)
-
-static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen)
-{
- unsigned int len = le16_to_cpu(dlen);
-
-#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
- if (len == NILFS_MAX_REC_LEN)
- return 1 << 16;
-#endif
- return len;
-}
-
-static inline __le16 nilfs_rec_len_to_disk(unsigned int len)
-{
-#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
- if (len == (1 << 16))
- return cpu_to_le16(NILFS_MAX_REC_LEN);
- else if (len > (1 << 16))
- BUG();
-#endif
- return cpu_to_le16(len);
-}
+#define NILFS_MAX_REC_LEN ((1 << 16) - 1)
/**
* struct nilfs_finfo - file information
@@ -374,11 +344,10 @@ struct nilfs_finfo {
__le64 fi_cno;
__le32 fi_nblocks;
__le32 fi_ndatablk;
- /* array of virtual block numbers */
};
/**
- * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned
+ * struct nilfs_binfo_v - information on a data block (except DAT)
* @bi_vblocknr: virtual block number
* @bi_blkoff: block offset
*/
@@ -388,7 +357,7 @@ struct nilfs_binfo_v {
};
/**
- * struct nilfs_binfo_dat - information for the block which belongs to the DAT file
+ * struct nilfs_binfo_dat - information on a DAT node block
* @bi_blkoff: block offset
* @bi_level: level
* @bi_pad: padding
@@ -454,7 +423,7 @@ struct nilfs_segment_summary {
#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
/**
- * struct nilfs_btree_node - B-tree node
+ * struct nilfs_btree_node - header of B-tree node block
* @bn_flags: flags
* @bn_level: level
* @bn_nchildren: number of children
@@ -476,6 +445,16 @@ struct nilfs_btree_node {
#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
/**
+ * struct nilfs_direct_node - header of built-in bmap array
+ * @dn_flags: flags
+ * @dn_pad: padding
+ */
+struct nilfs_direct_node {
+ __u8 dn_flags;
+ __u8 pad[7];
+};
+
+/**
* struct nilfs_palloc_group_desc - block group descriptor
* @pg_nfrees: number of free entries in block group
*/
@@ -574,40 +553,6 @@ NILFS_CHECKPOINT_FNS(INVALID, invalid)
NILFS_CHECKPOINT_FNS(MINOR, minor)
/**
- * struct nilfs_cpinfo - checkpoint information
- * @ci_flags: flags
- * @ci_pad: padding
- * @ci_cno: checkpoint number
- * @ci_create: creation timestamp
- * @ci_nblk_inc: number of blocks incremented by this checkpoint
- * @ci_inodes_count: inodes count
- * @ci_blocks_count: blocks count
- * @ci_next: next checkpoint number in snapshot list
- */
-struct nilfs_cpinfo {
- __u32 ci_flags;
- __u32 ci_pad;
- __u64 ci_cno;
- __u64 ci_create;
- __u64 ci_nblk_inc;
- __u64 ci_inodes_count;
- __u64 ci_blocks_count;
- __u64 ci_next;
-};
-
-#define NILFS_CPINFO_FNS(flag, name) \
-static inline int \
-nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
-{ \
- return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \
-}
-
-NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
-NILFS_CPINFO_FNS(INVALID, invalid)
-NILFS_CPINFO_FNS(MINOR, minor)
-
-
-/**
* struct nilfs_cpfile_header - checkpoint file header
* @ch_ncheckpoints: number of checkpoints
* @ch_nsnapshots: number of snapshots
@@ -619,7 +564,7 @@ struct nilfs_cpfile_header {
struct nilfs_snapshot_list ch_snapshot_list;
};
-#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
+#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
((sizeof(struct nilfs_cpfile_header) + \
sizeof(struct nilfs_checkpoint) - 1) / \
sizeof(struct nilfs_checkpoint))
@@ -643,8 +588,6 @@ enum {
NILFS_SEGMENT_USAGE_ACTIVE,
NILFS_SEGMENT_USAGE_DIRTY,
NILFS_SEGMENT_USAGE_ERROR,
-
- /* ... */
};
#define NILFS_SEGMENT_USAGE_FNS(flag, name) \
@@ -699,236 +642,9 @@ struct nilfs_sufile_header {
/* ... */
};
-#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
+#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
((sizeof(struct nilfs_sufile_header) + \
sizeof(struct nilfs_segment_usage) - 1) / \
sizeof(struct nilfs_segment_usage))
-/**
- * nilfs_suinfo - segment usage information
- * @sui_lastmod: timestamp of last modification
- * @sui_nblocks: number of written blocks in segment
- * @sui_flags: segment usage flags
- */
-struct nilfs_suinfo {
- __u64 sui_lastmod;
- __u32 sui_nblocks;
- __u32 sui_flags;
-};
-
-#define NILFS_SUINFO_FNS(flag, name) \
-static inline int \
-nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
-{ \
- return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \
-}
-
-NILFS_SUINFO_FNS(ACTIVE, active)
-NILFS_SUINFO_FNS(DIRTY, dirty)
-NILFS_SUINFO_FNS(ERROR, error)
-
-static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
-{
- return !si->sui_flags;
-}
-
-/* ioctl */
-/**
- * nilfs_suinfo_update - segment usage information update
- * @sup_segnum: segment number
- * @sup_flags: flags for which fields are active in sup_sui
- * @sup_reserved: reserved necessary for alignment
- * @sup_sui: segment usage information
- */
-struct nilfs_suinfo_update {
- __u64 sup_segnum;
- __u32 sup_flags;
- __u32 sup_reserved;
- struct nilfs_suinfo sup_sui;
-};
-
-enum {
- NILFS_SUINFO_UPDATE_LASTMOD,
- NILFS_SUINFO_UPDATE_NBLOCKS,
- NILFS_SUINFO_UPDATE_FLAGS,
- __NR_NILFS_SUINFO_UPDATE_FIELDS,
-};
-
-#define NILFS_SUINFO_UPDATE_FNS(flag, name) \
-static inline void \
-nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \
-{ \
- sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \
-} \
-static inline void \
-nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \
-{ \
- sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \
-} \
-static inline int \
-nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \
-{ \
- return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\
-}
-
-NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod)
-NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks)
-NILFS_SUINFO_UPDATE_FNS(FLAGS, flags)
-
-enum {
- NILFS_CHECKPOINT,
- NILFS_SNAPSHOT,
-};
-
-/**
- * struct nilfs_cpmode - change checkpoint mode structure
- * @cm_cno: checkpoint number
- * @cm_mode: mode of checkpoint
- * @cm_pad: padding
- */
-struct nilfs_cpmode {
- __u64 cm_cno;
- __u32 cm_mode;
- __u32 cm_pad;
-};
-
-/**
- * struct nilfs_argv - argument vector
- * @v_base: pointer on data array from userspace
- * @v_nmembs: number of members in data array
- * @v_size: size of data array in bytes
- * @v_flags: flags
- * @v_index: start number of target data items
- */
-struct nilfs_argv {
- __u64 v_base;
- __u32 v_nmembs; /* number of members */
- __u16 v_size; /* size of members */
- __u16 v_flags;
- __u64 v_index;
-};
-
-/**
- * struct nilfs_period - period of checkpoint numbers
- * @p_start: start checkpoint number (inclusive)
- * @p_end: end checkpoint number (exclusive)
- */
-struct nilfs_period {
- __u64 p_start;
- __u64 p_end;
-};
-
-/**
- * struct nilfs_cpstat - checkpoint statistics
- * @cs_cno: checkpoint number
- * @cs_ncps: number of checkpoints
- * @cs_nsss: number of snapshots
- */
-struct nilfs_cpstat {
- __u64 cs_cno;
- __u64 cs_ncps;
- __u64 cs_nsss;
-};
-
-/**
- * struct nilfs_sustat - segment usage statistics
- * @ss_nsegs: number of segments
- * @ss_ncleansegs: number of clean segments
- * @ss_ndirtysegs: number of dirty segments
- * @ss_ctime: creation time of the last segment
- * @ss_nongc_ctime: creation time of the last segment not for GC
- * @ss_prot_seq: least sequence number of segments which must not be reclaimed
- */
-struct nilfs_sustat {
- __u64 ss_nsegs;
- __u64 ss_ncleansegs;
- __u64 ss_ndirtysegs;
- __u64 ss_ctime;
- __u64 ss_nongc_ctime;
- __u64 ss_prot_seq;
-};
-
-/**
- * struct nilfs_vinfo - virtual block number information
- * @vi_vblocknr: virtual block number
- * @vi_start: start checkpoint number (inclusive)
- * @vi_end: end checkpoint number (exclusive)
- * @vi_blocknr: disk block number
- */
-struct nilfs_vinfo {
- __u64 vi_vblocknr;
- __u64 vi_start;
- __u64 vi_end;
- __u64 vi_blocknr;
-};
-
-/**
- * struct nilfs_vdesc - descriptor of virtual block number
- * @vd_ino: inode number
- * @vd_cno: checkpoint number
- * @vd_vblocknr: virtual block number
- * @vd_period: period of checkpoint numbers
- * @vd_blocknr: disk block number
- * @vd_offset: logical block offset inside a file
- * @vd_flags: flags (data or node block)
- * @vd_pad: padding
- */
-struct nilfs_vdesc {
- __u64 vd_ino;
- __u64 vd_cno;
- __u64 vd_vblocknr;
- struct nilfs_period vd_period;
- __u64 vd_blocknr;
- __u64 vd_offset;
- __u32 vd_flags;
- __u32 vd_pad;
-};
-
-/**
- * struct nilfs_bdesc - descriptor of disk block number
- * @bd_ino: inode number
- * @bd_oblocknr: disk block address (for skipping dead blocks)
- * @bd_blocknr: disk block address
- * @bd_offset: logical block offset inside a file
- * @bd_level: level in the b-tree organization
- * @bd_pad: padding
- */
-struct nilfs_bdesc {
- __u64 bd_ino;
- __u64 bd_oblocknr;
- __u64 bd_blocknr;
- __u64 bd_offset;
- __u32 bd_level;
- __u32 bd_pad;
-};
-
-#define NILFS_IOCTL_IDENT 'n'
-
-#define NILFS_IOCTL_CHANGE_CPMODE \
- _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
-#define NILFS_IOCTL_DELETE_CHECKPOINT \
- _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
-#define NILFS_IOCTL_GET_CPINFO \
- _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
-#define NILFS_IOCTL_GET_CPSTAT \
- _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
-#define NILFS_IOCTL_GET_SUINFO \
- _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
-#define NILFS_IOCTL_GET_SUSTAT \
- _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
-#define NILFS_IOCTL_GET_VINFO \
- _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
-#define NILFS_IOCTL_GET_BDESCS \
- _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
-#define NILFS_IOCTL_CLEAN_SEGMENTS \
- _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
-#define NILFS_IOCTL_SYNC \
- _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
-#define NILFS_IOCTL_RESIZE \
- _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
-#define NILFS_IOCTL_SET_ALLOC_RANGE \
- _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
-#define NILFS_IOCTL_SET_SUINFO \
- _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv)
-
-#endif /* _LINUX_NILFS_FS_H */
+#endif /* _LINUX_NILFS2_ONDISK_H */
diff --git a/include/uapi/linux/rio_cm_cdev.h b/include/uapi/linux/rio_cm_cdev.h
new file mode 100644
index 000000000000..6edb900d318d
--- /dev/null
+++ b/include/uapi/linux/rio_cm_cdev.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2015, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_CM_CDEV_H_
+#define _RIO_CM_CDEV_H_
+
+#include <linux/types.h>
+
+struct rio_cm_channel {
+ __u16 id;
+ __u16 remote_channel;
+ __u16 remote_destid;
+ __u8 mport_id;
+};
+
+struct rio_cm_msg {
+ __u16 ch_num;
+ __u16 size;
+ __u32 rxto; /* receive timeout in mSec. 0 = blocking */
+ __u64 msg;
+};
+
+struct rio_cm_accept {
+ __u16 ch_num;
+ __u16 pad0;
+ __u32 wait_to; /* accept timeout in mSec. 0 = blocking */
+};
+
+/* RapidIO Channelized Messaging Driver IOCTLs */
+#define RIO_CM_IOC_MAGIC 'c'
+
+#define RIO_CM_EP_GET_LIST_SIZE _IOWR(RIO_CM_IOC_MAGIC, 1, __u32)
+#define RIO_CM_EP_GET_LIST _IOWR(RIO_CM_IOC_MAGIC, 2, __u32)
+#define RIO_CM_CHAN_CREATE _IOWR(RIO_CM_IOC_MAGIC, 3, __u16)
+#define RIO_CM_CHAN_CLOSE _IOW(RIO_CM_IOC_MAGIC, 4, __u16)
+#define RIO_CM_CHAN_BIND _IOW(RIO_CM_IOC_MAGIC, 5, struct rio_cm_channel)
+#define RIO_CM_CHAN_LISTEN _IOW(RIO_CM_IOC_MAGIC, 6, __u16)
+#define RIO_CM_CHAN_ACCEPT _IOWR(RIO_CM_IOC_MAGIC, 7, struct rio_cm_accept)
+#define RIO_CM_CHAN_CONNECT _IOW(RIO_CM_IOC_MAGIC, 8, struct rio_cm_channel)
+#define RIO_CM_CHAN_SEND _IOW(RIO_CM_IOC_MAGIC, 9, struct rio_cm_msg)
+#define RIO_CM_CHAN_RECEIVE _IOWR(RIO_CM_IOC_MAGIC, 10, struct rio_cm_msg)
+#define RIO_CM_MPORT_GET_LIST _IOWR(RIO_CM_IOC_MAGIC, 11, __u32)
+
+#endif /* _RIO_CM_CDEV_H_ */
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 0956373b56db..d2b12152e358 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -26,8 +26,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-struct completion;
-
#define CTL_MAXNAME 10 /* how many path components do we allow in a
call to sysctl? In other words, what is
the largest acceptable value for the nlen
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 61a8777178c6..56b7ab584cc0 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -47,6 +47,32 @@ struct vhost_vring_addr {
__u64 log_guest_addr;
};
+/* no alignment requirement */
+struct vhost_iotlb_msg {
+ __u64 iova;
+ __u64 size;
+ __u64 uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ __u8 perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+ __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -146,6 +172,8 @@ struct vhost_memory {
#define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
+/* Vhost have device IOTLB */
+#define VHOST_F_DEVICE_IOTLB 63
/* VHOST_SCSI specific definitions */
@@ -175,4 +203,9 @@ struct vhost_scsi_target {
#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
+/* VHOST_VSOCK specific defines */
+
+#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
+#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
+
#endif
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 4cb65bbfa654..308e2096291f 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -49,7 +49,7 @@
* transport being used (eg. virtio_ring), the rest are per-device feature
* bits. */
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 33
+#define VIRTIO_TRANSPORT_F_END 34
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -63,4 +63,12 @@
/* v1.0 compliant. */
#define VIRTIO_F_VERSION_1 32
+/*
+ * If clear - device has the IOMMU bypass quirk feature.
+ * If set - use platform tools to detect the IOMMU.
+ *
+ * Note the reverse polarity (compared to most other features),
+ * this is for compatibility with legacy systems.
+ */
+#define VIRTIO_F_IOMMU_PLATFORM 33
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 77925f587b15..3228d582234a 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -41,5 +41,6 @@
#define VIRTIO_ID_CAIF 12 /* Virtio caif */
#define VIRTIO_ID_GPU 16 /* virtio GPU */
#define VIRTIO_ID_INPUT 18 /* virtio input */
+#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_vsock.h b/include/uapi/linux/virtio_vsock.h
new file mode 100644
index 000000000000..6b011c19b50f
--- /dev/null
+++ b/include/uapi/linux/virtio_vsock.h
@@ -0,0 +1,94 @@
+/*
+ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
+ * anyone can use the definitions to implement compatible drivers/servers:
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) Red Hat, Inc., 2013-2015
+ * Copyright (C) Asias He <asias@redhat.com>, 2013
+ * Copyright (C) Stefan Hajnoczi <stefanha@redhat.com>, 2015
+ */
+
+#ifndef _UAPI_LINUX_VIRTIO_VSOCK_H
+#define _UAPI_LINUX_VIRTIO_VOSCK_H
+
+#include <linux/types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+struct virtio_vsock_config {
+ __le64 guest_cid;
+} __attribute__((packed));
+
+enum virtio_vsock_event_id {
+ VIRTIO_VSOCK_EVENT_TRANSPORT_RESET = 0,
+};
+
+struct virtio_vsock_event {
+ __le32 id;
+} __attribute__((packed));
+
+struct virtio_vsock_hdr {
+ __le64 src_cid;
+ __le64 dst_cid;
+ __le32 src_port;
+ __le32 dst_port;
+ __le32 len;
+ __le16 type; /* enum virtio_vsock_type */
+ __le16 op; /* enum virtio_vsock_op */
+ __le32 flags;
+ __le32 buf_alloc;
+ __le32 fwd_cnt;
+} __attribute__((packed));
+
+enum virtio_vsock_type {
+ VIRTIO_VSOCK_TYPE_STREAM = 1,
+};
+
+enum virtio_vsock_op {
+ VIRTIO_VSOCK_OP_INVALID = 0,
+
+ /* Connect operations */
+ VIRTIO_VSOCK_OP_REQUEST = 1,
+ VIRTIO_VSOCK_OP_RESPONSE = 2,
+ VIRTIO_VSOCK_OP_RST = 3,
+ VIRTIO_VSOCK_OP_SHUTDOWN = 4,
+
+ /* To send payload */
+ VIRTIO_VSOCK_OP_RW = 5,
+
+ /* Tell the peer our credit info */
+ VIRTIO_VSOCK_OP_CREDIT_UPDATE = 6,
+ /* Request the peer to send the credit info to us */
+ VIRTIO_VSOCK_OP_CREDIT_REQUEST = 7,
+};
+
+/* VIRTIO_VSOCK_OP_SHUTDOWN flags values */
+enum virtio_vsock_shutdown {
+ VIRTIO_VSOCK_SHUTDOWN_RCV = 1,
+ VIRTIO_VSOCK_SHUTDOWN_SEND = 2,
+};
+
+#endif /* _UAPI_LINUX_VIRTIO_VSOCK_H */
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 8cd334f99ddc..cbae529b7ce0 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -93,6 +93,7 @@ enum cxl_event_type {
CXL_EVENT_AFU_INTERRUPT = 1,
CXL_EVENT_DATA_STORAGE = 2,
CXL_EVENT_AFU_ERROR = 3,
+ CXL_EVENT_AFU_DRIVER = 4,
};
struct cxl_event_header {
@@ -124,12 +125,28 @@ struct cxl_event_afu_error {
__u64 error;
};
+struct cxl_event_afu_driver_reserved {
+ /*
+ * Defines the buffer passed to the cxl driver by the AFU driver.
+ *
+ * This is not ABI since the event header.size passed to the user for
+ * existing events is set in the read call to sizeof(cxl_event_header)
+ * + sizeof(whatever event is being dispatched) and the user is already
+ * required to use a 4K buffer on the read call.
+ *
+ * Of course the contents will be ABI, but that's up the AFU driver.
+ */
+ size_t data_size;
+ u8 data[];
+};
+
struct cxl_event {
struct cxl_event_header header;
union {
struct cxl_event_afu_interrupt irq;
struct cxl_event_data_storage fault;
struct cxl_event_afu_error afu_error;
+ struct cxl_event_afu_driver_reserved afu_driver_event;
};
};
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 231901b08f6c..4edb0f2b4f9f 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -6,3 +6,4 @@ header-y += ib_user_verbs.h
header-y += rdma_netlink.h
header-y += rdma_user_cm.h
header-y += hfi/
+header-y += rdma_user_rxe.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 98bebf8bef55..d15e7289d835 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -75,7 +75,7 @@
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define HFI1_USER_SWMINOR 1
+#define HFI1_USER_SWMINOR 2
/*
* We will encode the major/minor inside a single 32bit version number.
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index b6543d73d20a..7f035f4b53b0 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -95,6 +95,11 @@ enum {
IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
+ IB_USER_VERBS_EX_CMD_CREATE_WQ,
+ IB_USER_VERBS_EX_CMD_MODIFY_WQ,
+ IB_USER_VERBS_EX_CMD_DESTROY_WQ,
+ IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
+ IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
};
/*
@@ -518,6 +523,14 @@ struct ib_uverbs_create_qp {
__u64 driver_data[0];
};
+enum ib_uverbs_create_qp_mask {
+ IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1UL << 0,
+};
+
+enum {
+ IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
+};
+
struct ib_uverbs_ex_create_qp {
__u64 user_handle;
__u32 pd_handle;
@@ -535,6 +548,8 @@ struct ib_uverbs_ex_create_qp {
__u8 reserved;
__u32 comp_mask;
__u32 create_flags;
+ __u32 rwq_ind_tbl_handle;
+ __u32 reserved1;
};
struct ib_uverbs_open_qp {
@@ -852,6 +867,24 @@ struct ib_uverbs_flow_spec_tcp_udp {
struct ib_uverbs_flow_tcp_udp_filter mask;
};
+struct ib_uverbs_flow_ipv6_filter {
+ __u8 src_ip[16];
+ __u8 dst_ip[16];
+};
+
+struct ib_uverbs_flow_spec_ipv6 {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_ipv6_filter val;
+ struct ib_uverbs_flow_ipv6_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
@@ -946,4 +979,66 @@ struct ib_uverbs_destroy_srq_resp {
__u32 events_reported;
};
+struct ib_uverbs_ex_create_wq {
+ __u32 comp_mask;
+ __u32 wq_type;
+ __u64 user_handle;
+ __u32 pd_handle;
+ __u32 cq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+};
+
+struct ib_uverbs_ex_create_wq_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 wq_handle;
+ __u32 max_wr;
+ __u32 max_sge;
+ __u32 wqn;
+};
+
+struct ib_uverbs_ex_destroy_wq {
+ __u32 comp_mask;
+ __u32 wq_handle;
+};
+
+struct ib_uverbs_ex_destroy_wq_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 events_reported;
+ __u32 reserved;
+};
+
+struct ib_uverbs_ex_modify_wq {
+ __u32 attr_mask;
+ __u32 wq_handle;
+ __u32 wq_state;
+ __u32 curr_wq_state;
+};
+
+/* Prevent memory allocation rather than max expected size */
+#define IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE 0x0d
+struct ib_uverbs_ex_create_rwq_ind_table {
+ __u32 comp_mask;
+ __u32 log_ind_tbl_size;
+ /* Following are the wq handles according to log_ind_tbl_size
+ * wq_handle1
+ * wq_handle2
+ */
+ __u32 wq_handles[0];
+};
+
+struct ib_uverbs_ex_create_rwq_ind_table_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __u32 ind_tbl_handle;
+ __u32 ind_tbl_num;
+};
+
+struct ib_uverbs_ex_destroy_rwq_ind_table {
+ __u32 comp_mask;
+ __u32 ind_tbl_handle;
+};
+
#endif /* IB_USER_VERBS_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 3066718eb120..01923d463673 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -244,12 +244,19 @@ struct rdma_ucm_join_ip_mcast {
__u32 id;
};
+/* Multicast join flags */
+enum {
+ RDMA_MC_JOIN_FLAG_FULLMEMBER,
+ RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER,
+ RDMA_MC_JOIN_FLAG_RESERVED,
+};
+
struct rdma_ucm_join_mcast {
__u64 response; /* rdma_ucma_create_id_resp */
__u64 uid;
__u32 id;
__u16 addr_size;
- __u16 reserved;
+ __u16 join_flags;
struct sockaddr_storage addr;
};
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
new file mode 100644
index 000000000000..1de99cfdaf7d
--- /dev/null
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_RXE_H
+#define RDMA_USER_RXE_H
+
+#include <linux/types.h>
+
+union rxe_gid {
+ __u8 raw[16];
+ struct {
+ __be64 subnet_prefix;
+ __be64 interface_id;
+ } global;
+};
+
+struct rxe_global_route {
+ union rxe_gid dgid;
+ __u32 flow_label;
+ __u8 sgid_index;
+ __u8 hop_limit;
+ __u8 traffic_class;
+};
+
+struct rxe_av {
+ __u8 port_num;
+ __u8 network_type;
+ struct rxe_global_route grh;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+};
+
+struct rxe_send_wr {
+ __u64 wr_id;
+ __u32 num_sge;
+ __u32 opcode;
+ __u32 send_flags;
+ union {
+ __be32 imm_data;
+ __u32 invalidate_rkey;
+ } ex;
+ union {
+ struct {
+ __u64 remote_addr;
+ __u32 rkey;
+ } rdma;
+ struct {
+ __u64 remote_addr;
+ __u64 compare_add;
+ __u64 swap;
+ __u32 rkey;
+ } atomic;
+ struct {
+ __u32 remote_qpn;
+ __u32 remote_qkey;
+ __u16 pkey_index;
+ } ud;
+ struct {
+ struct ib_mr *mr;
+ __u32 key;
+ int access;
+ } reg;
+ } wr;
+};
+
+struct rxe_sge {
+ __u64 addr;
+ __u32 length;
+ __u32 lkey;
+};
+
+struct mminfo {
+ __u64 offset;
+ __u32 size;
+ __u32 pad;
+};
+
+struct rxe_dma_info {
+ __u32 length;
+ __u32 resid;
+ __u32 cur_sge;
+ __u32 num_sge;
+ __u32 sge_offset;
+ union {
+ __u8 inline_data[0];
+ struct rxe_sge sge[0];
+ };
+};
+
+struct rxe_send_wqe {
+ struct rxe_send_wr wr;
+ struct rxe_av av;
+ __u32 status;
+ __u32 state;
+ __u64 iova;
+ __u32 mask;
+ __u32 first_psn;
+ __u32 last_psn;
+ __u32 ack_length;
+ __u32 ssn;
+ __u32 has_rd_atomic;
+ struct rxe_dma_info dma;
+};
+
+struct rxe_recv_wqe {
+ __u64 wr_id;
+ __u32 num_sge;
+ __u32 padding;
+ struct rxe_dma_info dma;
+};
+
+#endif /* RDMA_USER_RXE_H */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 3a2a79401789..7adeaae06961 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -235,9 +235,6 @@ int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
struct dmfc_channel;
int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc);
void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_mbs, int burstsize);
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
void ipu_dmfc_put(struct dmfc_channel *dmfc);
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 56830d1dc762..e7003ee6e063 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -27,59 +27,18 @@
#ifndef __OMAP_PANEL_DATA_H
#define __OMAP_PANEL_DATA_H
-#include <video/omapdss.h>
#include <video/display_timing.h>
-struct omap_dss_device;
-
-/**
- * encoder_tfp410 platform data
- * @name: name for this display entity
- * @power_down_gpio: gpio number for PD pin (or -1 if not available)
- * @data_lines: number of DPI datalines
- */
-struct encoder_tfp410_platform_data {
- const char *name;
- const char *source;
- int power_down_gpio;
- int data_lines;
-};
-
-
-/**
- * connector_dvi platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @i2c_bus_num: i2c bus number to be used for reading EDID
- */
-struct connector_dvi_platform_data {
- const char *name;
- const char *source;
- int i2c_bus_num;
-};
-
-/**
- * connector_hdmi platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- */
-struct connector_hdmi_platform_data {
- const char *name;
- const char *source;
-};
-
/**
* connector_atv platform data
* @name: name for this display entity
* @source: name of the display entity used as a video source
- * @connector_type: composite/svideo
* @invert_polarity: invert signal polarity
*/
struct connector_atv_platform_data {
const char *name;
const char *source;
- enum omap_dss_venc_type connector_type;
bool invert_polarity;
};
@@ -105,33 +64,6 @@ struct panel_dpi_platform_data {
};
/**
- * panel_dsicm platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @reset_gpio: gpio to reset the panel (or -1)
- * @use_ext_te: use external TE GPIO
- * @ext_te_gpio: external TE GPIO
- * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
- * @use_dsi_backlight: true if panel uses DSI command to control backlight
- * @pin_config: DSI pin configuration
- */
-struct panel_dsicm_platform_data {
- const char *name;
- const char *source;
-
- int reset_gpio;
-
- bool use_ext_te;
- int ext_te_gpio;
-
- unsigned ulps_timeout;
-
- bool use_dsi_backlight;
-
- struct omap_dsi_pin_config pin_config;
-};
-
-/**
* panel_acx565akm platform data
* @name: name for this display entity
* @source: name of the display entity used as a video source
@@ -147,93 +79,4 @@ struct panel_acx565akm_platform_data {
int datapairs;
};
-/**
- * panel_lb035q02 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @backlight_gpio: gpio to enable/disable the backlight (or -1)
- * @enable_gpio: gpio to enable/disable the panel (or -1)
- */
-struct panel_lb035q02_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-
- int backlight_gpio;
- int enable_gpio;
-};
-
-/**
- * panel_sharp_ls037v7dw01 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @resb_gpio: reset signal GPIO
- * @ini_gpio: power on control GPIO
- * @mo_gpio: selection for resolution(VGA/QVGA) GPIO
- * @lr_gpio: selection for horizontal scanning direction GPIO
- * @ud_gpio: selection for vertical scanning direction GPIO
- */
-struct panel_sharp_ls037v7dw01_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-
- int resb_gpio;
- int ini_gpio;
- int mo_gpio;
- int lr_gpio;
- int ud_gpio;
-};
-
-/**
- * panel-tpo-td043mtea1 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @nreset_gpio: reset signal
- */
-struct panel_tpo_td043mtea1_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-
- int nreset_gpio;
-};
-
-/**
- * panel-nec-nl8048hl11 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @res_gpio: reset signal
- * @qvga_gpio: selection for resolution(QVGA/WVGA)
- */
-struct panel_nec_nl8048hl11_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-
- int res_gpio;
- int qvga_gpio;
-};
-
-/**
- * panel-tpo-td028ttec1 platform data
- * @name: name for display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- */
-struct panel_tpo_td028ttec1_platform_data {
- const char *name;
- const char *source;
-
- int data_lines;
-};
-
#endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omapdss.h b/include/video/omapfb_dss.h
index 8e14ad7327c9..1d38901d599d 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapfb_dss.h
@@ -1,27 +1,20 @@
/*
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Copyright (C) 2016 Texas Instruments, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*/
-#ifndef __OMAP_OMAPDSS_H
-#define __OMAP_OMAPDSS_H
+#ifndef __OMAPFB_DSS_H
+#define __OMAPFB_DSS_H
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/platform_data/omapdss.h>
#include <video/videomode.h>
@@ -167,11 +160,6 @@ enum omap_dss_display_state {
OMAP_DSS_DISPLAY_ACTIVE,
};
-struct omap_dss_audio {
- struct snd_aes_iec958 *iec;
- struct snd_cea_861_aud_if *cea;
-};
-
enum omap_dss_rotation_type {
OMAP_DSS_ROT_DMA = 1 << 0,
OMAP_DSS_ROT_VRFB = 1 << 1,
@@ -195,25 +183,6 @@ enum omap_overlay_caps {
OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
};
-enum omap_overlay_manager_caps {
- OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
-};
-
-enum omap_dss_clk_source {
- OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
- * OMAP4: DSS_FCLK */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
- * OMAP4: PLL1_CLK1 */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
- * OMAP4: PLL1_CLK2 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
-};
-
-enum omap_hdmi_flags {
- OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
-};
-
enum omap_dss_output_id {
OMAP_DSS_OUTPUT_DPI = 1 << 0,
OMAP_DSS_OUTPUT_DBI = 1 << 1,
@@ -303,36 +272,6 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-enum omapdss_version {
- OMAPDSS_VER_UNKNOWN = 0,
- OMAPDSS_VER_OMAP24xx,
- OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */
- OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */
- OMAPDSS_VER_OMAP3630,
- OMAPDSS_VER_AM35xx,
- OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */
- OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */
- OMAPDSS_VER_OMAP4, /* All other OMAP4s */
- OMAPDSS_VER_OMAP5,
- OMAPDSS_VER_AM43xx,
- OMAPDSS_VER_DRA7xx,
-};
-
-/* Board specific data */
-struct omap_dss_board_info {
- int num_devices;
- struct omap_dss_device **devices;
- struct omap_dss_device *default_device;
- const char *default_display_name;
- int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
- void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
- int (*set_min_bus_tput)(struct device *dev, unsigned long r);
- enum omapdss_version version;
-};
-
-/* Init with the board info */
-extern int omap_display_init(struct omap_dss_board_info *board_data);
-
struct omap_video_timings {
/* Unit: pixels */
u16 x_res;
@@ -463,7 +402,6 @@ struct omap_overlay_manager {
/* static fields */
const char *name;
enum omap_channel id;
- enum omap_overlay_manager_caps caps;
struct list_head overlays;
enum omap_display_type supported_displays;
enum omap_dss_output_id supported_outputs;
@@ -919,4 +857,4 @@ omapdss_of_get_first_endpoint(const struct device_node *parent);
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
-#endif
+#endif /* __OMAPFB_DSS_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 8b2eb93ae8ba..7c35e279d1e3 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -9,30 +9,30 @@ extern int xen_swiotlb_init(int verbose, bool early);
extern void
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/init/Kconfig b/init/Kconfig
index 46f817abff0e..cac3f096050d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -55,6 +55,7 @@ config CROSS_COMPILE
config COMPILE_TEST
bool "Compile also drivers which will not load"
+ depends on !UML
default n
help
Some drivers can be compiled on a different platform than they are
@@ -80,6 +81,7 @@ config LOCALVERSION
config LOCALVERSION_AUTO
bool "Automatically append version information to the version string"
default y
+ depends on !COMPILE_TEST
help
This will try to automatically determine if the current tree is a
release tree by looking for git tags that belong to the current
@@ -952,7 +954,7 @@ menuconfig CGROUPS
controls or device isolation.
See
- Documentation/scheduler/sched-design-CFS.txt (CFS)
- - Documentation/cgroups/ (features for grouping, isolation
+ - Documentation/cgroup-v1/ (features for grouping, isolation
and resource control)
Say N if unsure.
@@ -1009,7 +1011,7 @@ config BLK_CGROUP
CONFIG_CFQ_GROUP_IOSCHED=y; for enabling throttling policy, set
CONFIG_BLK_DEV_THROTTLING=y.
- See Documentation/cgroups/blkio-controller.txt for more information.
+ See Documentation/cgroup-v1/blkio-controller.txt for more information.
config DEBUG_BLK_CGROUP
bool "IO controller debugging"
@@ -1759,6 +1761,7 @@ choice
config SLAB
bool "SLAB"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
@@ -1766,6 +1769,7 @@ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
@@ -2078,7 +2082,7 @@ config TRIM_UNUSED_KSYMS
(especially when using LTO) for optimizing the code and reducing
binary size. This might have some security advantages as well.
- If unsure say N.
+ If unsure, or if you need to build out-of-tree modules, say N.
endif # MODULES
diff --git a/init/main.c b/init/main.c
index eae02aa03c9e..a8a58e2794a5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -380,7 +380,7 @@ static void __init setup_command_line(char *command_line)
static __initdata DECLARE_COMPLETION(kthreadd_done);
-static noinline void __init_refok rest_init(void)
+static noinline void __ref rest_init(void)
{
int pid;
@@ -716,6 +716,12 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
addr = (unsigned long) dereference_function_descriptor(fn);
sprint_symbol_no_offset(fn_name, addr);
+ /*
+ * fn will be "function_name [module_name]" where [module_name] is not
+ * displayed for built-in init functions. Strip off the [module_name].
+ */
+ strreplace(fn_name, ' ', '\0');
+
list_for_each_entry(entry, &blacklisted_initcalls, next) {
if (!strcmp(fn_name, entry->buf)) {
pr_debug("initcall %s blacklisted\n", fn_name);
diff --git a/ipc/msg.c b/ipc/msg.c
index 1471db9a7e61..c6521c205cb4 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -680,7 +680,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock();
ipc_lock_object(&msq->q_perm);
- ipc_rcu_putref(msq, ipc_rcu_free);
+ ipc_rcu_putref(msq, msg_rcu_free);
/* raced with RMID? */
if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM;
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index ed81aafd2392..a521999de4f1 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -37,8 +37,6 @@ struct ipc_namespace init_ipc_ns = {
#endif
};
-atomic_t nr_ipc_ns = ATOMIC_INIT(1);
-
struct msg_msgseg {
struct msg_msgseg *next;
/* the next part of the message follows immediately */
diff --git a/ipc/namespace.c b/ipc/namespace.c
index 04cb07eb81f1..d87e6baa1323 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -43,7 +43,6 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
kfree(ns);
return ERR_PTR(err);
}
- atomic_inc(&nr_ipc_ns);
sem_init_ns(ns);
msg_init_ns(ns);
@@ -96,7 +95,6 @@ static void free_ipc_ns(struct ipc_namespace *ns)
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
- atomic_dec(&nr_ipc_ns);
put_user_ns(ns->user_ns);
ns_free_inum(&ns->ns);
diff --git a/ipc/sem.c b/ipc/sem.c
index ae72b3cddc8d..7c9d4f7683c0 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -438,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -1381,7 +1381,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
@@ -1415,20 +1415,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
@@ -1720,7 +1720,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
- ipc_rcu_putref(sma, ipc_rcu_free);
+ ipc_rcu_putref(sma, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
new file mode 100644
index 000000000000..9f748ed7bea8
--- /dev/null
+++ b/kernel/configs/android-base.config
@@ -0,0 +1,152 @@
+# KEEP ALPHABETICALLY SORTED
+# CONFIG_DEVKMEM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_MODULES is not set
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_SYSVIPC is not set
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_ASHMEM=y
+CONFIG_AUDIT=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_EMBEDDED=y
+CONFIG_FB=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_INET=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_INET_ESP=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IPV6=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_NET=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_NAT=y
+CONFIG_NO_HZ=y
+CONFIG_PACKET=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PREEMPT=y
+CONFIG_QUOTA=y
+CONFIG_RTC_CLASS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_STAGING=y
+CONFIG_SWP_EMULATION=y
+CONFIG_SYNC=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_XFRM_USER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
new file mode 100644
index 000000000000..e3b953e966d2
--- /dev/null
+++ b/kernel/configs/android-recommended.config
@@ -0,0 +1,121 @@
+# KEEP ALPHABETICALLY SORTED
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_NF_CONNTRACK_SIP is not set
+# CONFIG_PM_WAKELOCKS_GC is not set
+# CONFIG_VT is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_COMPACTION=y
+CONFIG_DEBUG_RODATA=y
+CONFIG_DM_UEVENT=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_HOLTEK=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_LCPOWER=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PRIMAX=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HID_ZYDACRON=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_GPIO=y
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_ION=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KSM=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGITECH_FF=y
+CONFIG_MD=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MSDOS_FS=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANTHERLORD_FF=y
+CONFIG_PERF_EVENTS=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_POWER_SUPPLY=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TIMER_STATS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UHID=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_USBNET=y
+CONFIG_VFAT_FS=y
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 356a6c7cb52a..a19550d80ab1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -448,7 +448,7 @@ static u64 __report_allowed;
static void perf_duration_warn(struct irq_work *w)
{
- printk_ratelimited(KERN_WARNING
+ printk_ratelimited(KERN_INFO
"perf: interrupt took too long (%lld > %lld), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
__report_avg, __report_allowed,
diff --git a/kernel/exit.c b/kernel/exit.c
index 84ae830234f8..2f974ae042a6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -715,7 +715,7 @@ static void check_stack_usage(void)
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
- pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
+ pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
current->comm, task_pid_nr(current), free);
lowest_to_date = free;
}
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 0dbea887d625..93ad6c1fb9b6 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
+#include <linux/bug.h>
#ifdef HAVE_JUMP_LABEL
@@ -56,6 +57,49 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
static void jump_label_update(struct static_key *key);
+/*
+ * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
+ * The use of 'atomic_read()' requires atomic.h and its problematic for some
+ * kernel headers such as kernel.h and others. Since static_key_count() is not
+ * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
+ * to have it be a function here. Similarly, for 'static_key_enable()' and
+ * 'static_key_disable()', which require bug.h. This should allow jump_label.h
+ * to be included from most/all places for HAVE_JUMP_LABEL.
+ */
+int static_key_count(struct static_key *key)
+{
+ /*
+ * -1 means the first static_key_slow_inc() is in progress.
+ * static_key_enabled() must return true, so return 1 here.
+ */
+ int n = atomic_read(&key->enabled);
+
+ return n >= 0 ? n : 1;
+}
+EXPORT_SYMBOL_GPL(static_key_count);
+
+void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_enable);
+
+void static_key_disable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
+}
+EXPORT_SYMBOL_GPL(static_key_disable);
+
void static_key_slow_inc(struct static_key *key)
{
int v, v1;
@@ -235,6 +279,18 @@ void __init jump_label_init(void)
struct static_key *key = NULL;
struct jump_entry *iter;
+ /*
+ * Since we are initializing the static_key.enabled field with
+ * with the 'raw' int values (to avoid pulling in atomic.h) in
+ * jump_label.h, let's make sure that is safe. There are only two
+ * cases to check since we initialize to 0 or 1.
+ */
+ BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
+ BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
+
+ if (static_key_initialized)
+ return;
+
jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop);
@@ -284,11 +340,14 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
{
struct module *mod;
+ preempt_disable();
mod = __module_text_address((unsigned long)start);
+ WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
+ preempt_enable();
+
if (!mod)
return 0;
- WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
return __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries,
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 4384672d3245..980936a90ee6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -48,7 +48,8 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry,
if (kexec_on_panic) {
/* Verify we have a valid entry point */
- if ((entry < crashk_res.start) || (entry > crashk_res.end))
+ if ((entry < phys_to_boot_phys(crashk_res.start)) ||
+ (entry > phys_to_boot_phys(crashk_res.end)))
return -EADDRNOTAVAIL;
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 56b3ed0927b0..561675589511 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -95,6 +95,12 @@ int kexec_should_crash(struct task_struct *p)
return 0;
}
+int kexec_crash_loaded(void)
+{
+ return !!kexec_crash_image;
+}
+EXPORT_SYMBOL_GPL(kexec_crash_loaded);
+
/*
* When kexec transitions to the new kernel there is a one-to-one
* mapping between physical and virtual addresses. On processors
@@ -140,6 +146,7 @@ int kexec_should_crash(struct task_struct *p)
* allocating pages whose destination address we do not care about.
*/
#define KIMAGE_NO_DEST (-1UL)
+#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
static struct page *kimage_alloc_page(struct kimage *image,
gfp_t gfp_mask,
@@ -147,8 +154,9 @@ static struct page *kimage_alloc_page(struct kimage *image,
int sanity_check_segment_list(struct kimage *image)
{
- int result, i;
+ int i;
unsigned long nr_segments = image->nr_segments;
+ unsigned long total_pages = 0;
/*
* Verify we have good destination addresses. The caller is
@@ -163,16 +171,17 @@ int sanity_check_segment_list(struct kimage *image)
* simply because addresses are changed to page size
* granularity.
*/
- result = -EADDRNOTAVAIL;
for (i = 0; i < nr_segments; i++) {
unsigned long mstart, mend;
mstart = image->segment[i].mem;
mend = mstart + image->segment[i].memsz;
+ if (mstart > mend)
+ return -EADDRNOTAVAIL;
if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
- return result;
+ return -EADDRNOTAVAIL;
if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
- return result;
+ return -EADDRNOTAVAIL;
}
/* Verify our destination addresses do not overlap.
@@ -180,7 +189,6 @@ int sanity_check_segment_list(struct kimage *image)
* through very weird things can happen with no
* easy explanation as one segment stops on another.
*/
- result = -EINVAL;
for (i = 0; i < nr_segments; i++) {
unsigned long mstart, mend;
unsigned long j;
@@ -194,7 +202,7 @@ int sanity_check_segment_list(struct kimage *image)
pend = pstart + image->segment[j].memsz;
/* Do the segments overlap ? */
if ((mend > pstart) && (mstart < pend))
- return result;
+ return -EINVAL;
}
}
@@ -203,12 +211,26 @@ int sanity_check_segment_list(struct kimage *image)
* and it is easier to check up front than to be surprised
* later on.
*/
- result = -EINVAL;
for (i = 0; i < nr_segments; i++) {
if (image->segment[i].bufsz > image->segment[i].memsz)
- return result;
+ return -EINVAL;
+ }
+
+ /*
+ * Verify that no more than half of memory will be consumed. If the
+ * request from userspace is too large, a large amount of time will be
+ * wasted allocating pages, which can cause a soft lockup.
+ */
+ for (i = 0; i < nr_segments; i++) {
+ if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
+ return -EINVAL;
+
+ total_pages += PAGE_COUNT(image->segment[i].memsz);
}
+ if (total_pages > totalram_pages / 2)
+ return -EINVAL;
+
/*
* Verify we have good destination addresses. Normally
* the caller is responsible for making certain we don't
@@ -220,16 +242,15 @@ int sanity_check_segment_list(struct kimage *image)
*/
if (image->type == KEXEC_TYPE_CRASH) {
- result = -EADDRNOTAVAIL;
for (i = 0; i < nr_segments; i++) {
unsigned long mstart, mend;
mstart = image->segment[i].mem;
mend = mstart + image->segment[i].memsz - 1;
/* Ensure we are within the crash kernel limits */
- if ((mstart < crashk_res.start) ||
- (mend > crashk_res.end))
- return result;
+ if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
+ (mend > phys_to_boot_phys(crashk_res.end)))
+ return -EADDRNOTAVAIL;
}
}
@@ -352,7 +373,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
if (!pages)
break;
- pfn = page_to_pfn(pages);
+ pfn = page_to_boot_pfn(pages);
epfn = pfn + count;
addr = pfn << PAGE_SHIFT;
eaddr = epfn << PAGE_SHIFT;
@@ -478,7 +499,7 @@ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
return -ENOMEM;
ind_page = page_address(page);
- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
+ *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
image->entry = ind_page;
image->last_entry = ind_page +
((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -533,13 +554,13 @@ void kimage_terminate(struct kimage *image)
#define for_each_kimage_entry(image, ptr, entry) \
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
ptr = (entry & IND_INDIRECTION) ? \
- phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
+ boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
static void kimage_free_entry(kimage_entry_t entry)
{
struct page *page;
- page = pfn_to_page(entry >> PAGE_SHIFT);
+ page = boot_pfn_to_page(entry >> PAGE_SHIFT);
kimage_free_pages(page);
}
@@ -633,7 +654,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
* have a match.
*/
list_for_each_entry(page, &image->dest_pages, lru) {
- addr = page_to_pfn(page) << PAGE_SHIFT;
+ addr = page_to_boot_pfn(page) << PAGE_SHIFT;
if (addr == destination) {
list_del(&page->lru);
return page;
@@ -648,12 +669,12 @@ static struct page *kimage_alloc_page(struct kimage *image,
if (!page)
return NULL;
/* If the page cannot be used file it away */
- if (page_to_pfn(page) >
+ if (page_to_boot_pfn(page) >
(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
list_add(&page->lru, &image->unusable_pages);
continue;
}
- addr = page_to_pfn(page) << PAGE_SHIFT;
+ addr = page_to_boot_pfn(page) << PAGE_SHIFT;
/* If it is the destination page we want use it */
if (addr == destination)
@@ -676,7 +697,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
struct page *old_page;
old_addr = *old & PAGE_MASK;
- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
+ old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
copy_highpage(page, old_page);
*old = addr | (*old & ~PAGE_MASK);
@@ -732,7 +753,7 @@ static int kimage_load_normal_segment(struct kimage *image,
result = -ENOMEM;
goto out;
}
- result = kimage_add_page(image, page_to_pfn(page)
+ result = kimage_add_page(image, page_to_boot_pfn(page)
<< PAGE_SHIFT);
if (result < 0)
goto out;
@@ -793,7 +814,7 @@ static int kimage_load_crash_segment(struct kimage *image,
char *ptr;
size_t uchunk, mchunk;
- page = pfn_to_page(maddr >> PAGE_SHIFT);
+ page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
if (!page) {
result = -ENOMEM;
goto out;
@@ -921,7 +942,7 @@ void __weak crash_free_reserved_phys_range(unsigned long begin,
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE)
- free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
}
int crash_shrink_memory(unsigned long new_size)
@@ -1374,7 +1395,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
void __weak arch_crash_save_vmcoreinfo(void)
{}
-unsigned long __weak paddr_vmcoreinfo_note(void)
+phys_addr_t __weak paddr_vmcoreinfo_note(void)
{
return __pa((unsigned long)(char *)&vmcoreinfo_note);
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 152da4a48867..ee1bc1bb8feb 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -101,7 +101,7 @@ KERNEL_ATTR_RO(kexec_loaded);
static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", !!kexec_crash_image);
+ return sprintf(buf, "%d\n", kexec_crash_loaded());
}
KERNEL_ATTR_RO(kexec_crash_loaded);
@@ -128,8 +128,8 @@ KERNEL_ATTR_RW(kexec_crash_size);
static ssize_t vmcoreinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%lx %x\n",
- paddr_vmcoreinfo_note(),
+ phys_addr_t vmcore_base = paddr_vmcoreinfo_note();
+ return sprintf(buf, "%pa %x\n", &vmcore_base,
(unsigned int)sizeof(vmcoreinfo_note));
}
KERNEL_ATTR_RO(vmcoreinfo);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 5c2bc1052691..8bbe50704621 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -309,7 +309,7 @@ static int klp_write_object_relocations(struct module *pmod,
break;
}
- module_enable_ro(pmod);
+ module_enable_ro(pmod, true);
return ret;
}
diff --git a/kernel/module.c b/kernel/module.c
index 5f71aa63ed2a..529efae9f481 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -60,6 +60,7 @@
#include <linux/jump_label.h>
#include <linux/pfn.h>
#include <linux/bsearch.h>
+#include <linux/dynamic_debug.h>
#include <uapi/linux/module.h>
#include "module-internal.h"
@@ -264,7 +265,7 @@ static void module_assert_mutex_or_preempt(void)
if (unlikely(!debug_locks))
return;
- WARN_ON(!rcu_read_lock_sched_held() &&
+ WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
!lockdep_is_held(&module_mutex));
#endif
}
@@ -336,7 +337,7 @@ static inline void add_taint_module(struct module *mod, unsigned flag,
* A thread that wants to hold a reference to a module only while it
* is running can call this to safely exit. nfsd and lockd use this.
*/
-void __module_put_and_exit(struct module *mod, long code)
+void __noreturn __module_put_and_exit(struct module *mod, long code)
{
module_put(mod);
do_exit(code);
@@ -1693,8 +1694,7 @@ static int module_add_modinfo_attrs(struct module *mod)
temp_attr = mod->modinfo_attrs;
for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
- if (!attr->test ||
- (attr->test && attr->test(mod))) {
+ if (!attr->test || attr->test(mod)) {
memcpy(temp_attr, attr, sizeof(*temp_attr));
sysfs_attr_init(&temp_attr->attr);
error = sysfs_create_file(&mod->mkobj.kobj,
@@ -1858,10 +1858,11 @@ static void mod_sysfs_teardown(struct module *mod)
* from modification and any data from execution.
*
* General layout of module is:
- * [text] [read-only-data] [writable data]
- * text_size -----^ ^ ^
- * ro_size ------------------------| |
- * size -------------------------------------------|
+ * [text] [read-only-data] [ro-after-init] [writable data]
+ * text_size -----^ ^ ^ ^
+ * ro_size ------------------------| | |
+ * ro_after_init_size -----------------------------| |
+ * size -----------------------------------------------------------|
*
* These values are always page-aligned (as is base)
*/
@@ -1884,14 +1885,24 @@ static void frob_rodata(const struct module_layout *layout,
(layout->ro_size - layout->text_size) >> PAGE_SHIFT);
}
+static void frob_ro_after_init(const struct module_layout *layout,
+ int (*set_memory)(unsigned long start, int num_pages))
+{
+ BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
+ BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
+ BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
+ set_memory((unsigned long)layout->base + layout->ro_size,
+ (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
+}
+
static void frob_writable_data(const struct module_layout *layout,
int (*set_memory)(unsigned long start, int num_pages))
{
BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
- BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
+ BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
- set_memory((unsigned long)layout->base + layout->ro_size,
- (layout->size - layout->ro_size) >> PAGE_SHIFT);
+ set_memory((unsigned long)layout->base + layout->ro_after_init_size,
+ (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}
/* livepatching wants to disable read-only so it can frob module. */
@@ -1899,21 +1910,26 @@ void module_disable_ro(const struct module *mod)
{
frob_text(&mod->core_layout, set_memory_rw);
frob_rodata(&mod->core_layout, set_memory_rw);
+ frob_ro_after_init(&mod->core_layout, set_memory_rw);
frob_text(&mod->init_layout, set_memory_rw);
frob_rodata(&mod->init_layout, set_memory_rw);
}
-void module_enable_ro(const struct module *mod)
+void module_enable_ro(const struct module *mod, bool after_init)
{
frob_text(&mod->core_layout, set_memory_ro);
frob_rodata(&mod->core_layout, set_memory_ro);
frob_text(&mod->init_layout, set_memory_ro);
frob_rodata(&mod->init_layout, set_memory_ro);
+
+ if (after_init)
+ frob_ro_after_init(&mod->core_layout, set_memory_ro);
}
static void module_enable_nx(const struct module *mod)
{
frob_rodata(&mod->core_layout, set_memory_nx);
+ frob_ro_after_init(&mod->core_layout, set_memory_nx);
frob_writable_data(&mod->core_layout, set_memory_nx);
frob_rodata(&mod->init_layout, set_memory_nx);
frob_writable_data(&mod->init_layout, set_memory_nx);
@@ -1922,6 +1938,7 @@ static void module_enable_nx(const struct module *mod)
static void module_disable_nx(const struct module *mod)
{
frob_rodata(&mod->core_layout, set_memory_x);
+ frob_ro_after_init(&mod->core_layout, set_memory_x);
frob_writable_data(&mod->core_layout, set_memory_x);
frob_rodata(&mod->init_layout, set_memory_x);
frob_writable_data(&mod->init_layout, set_memory_x);
@@ -1964,6 +1981,8 @@ static void disable_ro_nx(const struct module_layout *layout)
frob_text(layout, set_memory_rw);
frob_rodata(layout, set_memory_rw);
frob_rodata(layout, set_memory_x);
+ frob_ro_after_init(layout, set_memory_rw);
+ frob_ro_after_init(layout, set_memory_x);
frob_writable_data(layout, set_memory_x);
}
@@ -2306,6 +2325,7 @@ static void layout_sections(struct module *mod, struct load_info *info)
* finder in the two loops below */
{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
+ { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
};
@@ -2337,7 +2357,11 @@ static void layout_sections(struct module *mod, struct load_info *info)
mod->core_layout.size = debug_align(mod->core_layout.size);
mod->core_layout.ro_size = mod->core_layout.size;
break;
- case 3: /* whole core */
+ case 2: /* RO after init */
+ mod->core_layout.size = debug_align(mod->core_layout.size);
+ mod->core_layout.ro_after_init_size = mod->core_layout.size;
+ break;
+ case 4: /* whole core */
mod->core_layout.size = debug_align(mod->core_layout.size);
break;
}
@@ -2367,7 +2391,14 @@ static void layout_sections(struct module *mod, struct load_info *info)
mod->init_layout.size = debug_align(mod->init_layout.size);
mod->init_layout.ro_size = mod->init_layout.size;
break;
- case 3: /* whole init */
+ case 2:
+ /*
+ * RO after init doesn't apply to init_layout (only
+ * core_layout), so it just takes the value of ro_size.
+ */
+ mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
+ break;
+ case 4: /* whole init */
mod->init_layout.size = debug_align(mod->init_layout.size);
break;
}
@@ -2687,13 +2718,18 @@ static inline void kmemleak_load_module(const struct module *mod,
#endif
#ifdef CONFIG_MODULE_SIG
-static int module_sig_check(struct load_info *info)
+static int module_sig_check(struct load_info *info, int flags)
{
int err = -ENOKEY;
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
const void *mod = info->hdr;
- if (info->len > markerlen &&
+ /*
+ * Require flags == 0, as a module with version information
+ * removed is no longer the module that was signed
+ */
+ if (flags == 0 &&
+ info->len > markerlen &&
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
info->len -= markerlen;
@@ -2712,7 +2748,7 @@ static int module_sig_check(struct load_info *info)
return err;
}
#else /* !CONFIG_MODULE_SIG */
-static int module_sig_check(struct load_info *info)
+static int module_sig_check(struct load_info *info, int flags)
{
return 0;
}
@@ -2920,8 +2956,12 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
return -ENOEXEC;
}
- if (!get_modinfo(info, "intree"))
+ if (!get_modinfo(info, "intree")) {
+ if (!test_taint(TAINT_OOT_MODULE))
+ pr_warn("%s: loading out-of-tree module taints kernel.\n",
+ mod->name);
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
+ }
if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3090,6 +3130,8 @@ static int move_module(struct module *mod, struct load_info *info)
static int check_module_license_and_versions(struct module *mod)
{
+ int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
+
/*
* ndiswrapper is under GPL by itself, but loads proprietary modules.
* Don't use add_taint_module(), as it would prevent ndiswrapper from
@@ -3108,6 +3150,9 @@ static int check_module_license_and_versions(struct module *mod)
add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
LOCKDEP_NOW_UNRELIABLE);
+ if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
+ pr_warn("%s: module license taints kernel.\n", mod->name);
+
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !mod->crcs)
|| (mod->num_gpl_syms && !mod->gpl_crcs)
@@ -3155,16 +3200,41 @@ int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
return 0;
}
+/* module_blacklist is a comma-separated list of module names */
+static char *module_blacklist;
+static bool blacklisted(char *module_name)
+{
+ const char *p;
+ size_t len;
+
+ if (!module_blacklist)
+ return false;
+
+ for (p = module_blacklist; *p; p += len) {
+ len = strcspn(p, ",");
+ if (strlen(module_name) == len && !memcmp(module_name, p, len))
+ return true;
+ if (p[len] == ',')
+ len++;
+ }
+ return false;
+}
+core_param(module_blacklist, module_blacklist, charp, 0400);
+
static struct module *layout_and_allocate(struct load_info *info, int flags)
{
/* Module within temporary copy. */
struct module *mod;
+ unsigned int ndx;
int err;
mod = setup_load_info(info, flags);
if (IS_ERR(mod))
return mod;
+ if (blacklisted(mod->name))
+ return ERR_PTR(-EPERM);
+
err = check_modinfo(mod, info, flags);
if (err)
return ERR_PTR(err);
@@ -3178,6 +3248,15 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
/* We will do a special allocation for per-cpu sections later. */
info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
+ /*
+ * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
+ * layout_sections() can put it in the right place.
+ * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
+ */
+ ndx = find_sec(info, ".data..ro_after_init");
+ if (ndx)
+ info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
+
/* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any
special cases for the architectures. */
@@ -3344,12 +3423,14 @@ static noinline int do_init_module(struct module *mod)
/* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
+ module_enable_ro(mod, true);
mod_tree_remove_init(mod);
disable_ro_nx(&mod->init_layout);
module_arch_freeing_init(mod);
mod->init_layout.base = NULL;
mod->init_layout.size = 0;
mod->init_layout.ro_size = 0;
+ mod->init_layout.ro_after_init_size = 0;
mod->init_layout.text_size = 0;
/*
* We want to free module_init, but be aware that kallsyms may be
@@ -3441,8 +3522,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
/* This relies on module_mutex for list integrity. */
module_bug_finalize(info->hdr, info->sechdrs, mod);
- /* Set RO and NX regions */
- module_enable_ro(mod);
+ module_enable_ro(mod, false);
module_enable_nx(mod);
/* Mark state as coming so strong_try_module_get() ignores us,
@@ -3498,7 +3578,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
long err;
char *after_dashes;
- err = module_sig_check(info);
+ err = module_sig_check(info, flags);
if (err)
goto free_copy;
diff --git a/kernel/panic.c b/kernel/panic.c
index 8aa74497cc5a..ca8cea1ef673 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -108,6 +108,7 @@ void panic(const char *fmt, ...)
long i, i_next = 0;
int state = 0;
int old_cpu, this_cpu;
+ bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
@@ -160,7 +161,7 @@ void panic(const char *fmt, ...)
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
- if (!crash_kexec_post_notifiers) {
+ if (!_crash_kexec_post_notifiers) {
printk_nmi_flush_on_panic();
__crash_kexec(NULL);
}
@@ -191,7 +192,7 @@ void panic(const char *fmt, ...)
*
* Bypass the panic_cpu check and call __crash_kexec directly.
*/
- if (crash_kexec_post_notifiers)
+ if (_crash_kexec_post_notifiers)
__crash_kexec(NULL);
bust_spinlocks(0);
@@ -571,13 +572,7 @@ EXPORT_SYMBOL(__stack_chk_fail);
core_param(panic, panic_timeout, int, 0644);
core_param(pause_on_oops, pause_on_oops, int, 0644);
core_param(panic_on_warn, panic_on_warn, int, 0644);
-
-static int __init setup_crash_kexec_post_notifiers(char *s)
-{
- crash_kexec_post_notifiers = true;
- return 0;
-}
-early_param("crash_kexec_post_notifiers", setup_crash_kexec_post_notifiers);
+core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
static int __init oops_setup(char *s)
{
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index d4de33934dac..eea6dbc2d8cf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -26,7 +26,6 @@
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/interrupt.h> /* For in_interrupt() */
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
@@ -48,7 +47,7 @@
#include <linux/uio.h>
#include <asm/uaccess.h>
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
@@ -86,6 +85,111 @@ static struct lockdep_map console_lock_dep_map = {
};
#endif
+enum devkmsg_log_bits {
+ __DEVKMSG_LOG_BIT_ON = 0,
+ __DEVKMSG_LOG_BIT_OFF,
+ __DEVKMSG_LOG_BIT_LOCK,
+};
+
+enum devkmsg_log_masks {
+ DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON),
+ DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF),
+ DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK),
+};
+
+/* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
+#define DEVKMSG_LOG_MASK_DEFAULT 0
+
+static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
+
+static int __control_devkmsg(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strncmp(str, "on", 2)) {
+ devkmsg_log = DEVKMSG_LOG_MASK_ON;
+ return 2;
+ } else if (!strncmp(str, "off", 3)) {
+ devkmsg_log = DEVKMSG_LOG_MASK_OFF;
+ return 3;
+ } else if (!strncmp(str, "ratelimit", 9)) {
+ devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
+ return 9;
+ }
+ return -EINVAL;
+}
+
+static int __init control_devkmsg(char *str)
+{
+ if (__control_devkmsg(str) < 0)
+ return 1;
+
+ /*
+ * Set sysctl string accordingly:
+ */
+ if (devkmsg_log == DEVKMSG_LOG_MASK_ON) {
+ memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE);
+ strncpy(devkmsg_log_str, "on", 2);
+ } else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) {
+ memset(devkmsg_log_str, 0, DEVKMSG_STR_MAX_SIZE);
+ strncpy(devkmsg_log_str, "off", 3);
+ }
+ /* else "ratelimit" which is set by default. */
+
+ /*
+ * Sysctl cannot change it anymore. The kernel command line setting of
+ * this parameter is to force the setting to be permanent throughout the
+ * runtime of the system. This is a precation measure against userspace
+ * trying to be a smarta** and attempting to change it up on us.
+ */
+ devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
+
+ return 0;
+}
+__setup("printk.devkmsg=", control_devkmsg);
+
+char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
+
+int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ char old_str[DEVKMSG_STR_MAX_SIZE];
+ unsigned int old;
+ int err;
+
+ if (write) {
+ if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
+ return -EINVAL;
+
+ old = devkmsg_log;
+ strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
+ }
+
+ err = proc_dostring(table, write, buffer, lenp, ppos);
+ if (err)
+ return err;
+
+ if (write) {
+ err = __control_devkmsg(devkmsg_log_str);
+
+ /*
+ * Do not accept an unknown string OR a known string with
+ * trailing crap...
+ */
+ if (err < 0 || (err + 1 != *lenp)) {
+
+ /* ... and restore old setting. */
+ devkmsg_log = old;
+ strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
+
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/*
* Number of registered extended console drivers.
*
@@ -614,6 +718,7 @@ struct devkmsg_user {
u64 seq;
u32 idx;
enum log_flags prev;
+ struct ratelimit_state rs;
struct mutex lock;
char buf[CONSOLE_EXT_LOG_MAX];
};
@@ -623,11 +728,24 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
char *buf, *line;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
+ struct file *file = iocb->ki_filp;
+ struct devkmsg_user *user = file->private_data;
size_t len = iov_iter_count(from);
ssize_t ret = len;
- if (len > LOG_LINE_MAX)
+ if (!user || len > LOG_LINE_MAX)
return -EINVAL;
+
+ /* Ignore when user logging is disabled. */
+ if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
+ return len;
+
+ /* Ratelimit when not explicitly enabled. */
+ if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
+ if (!___ratelimit(&user->rs, current->comm))
+ return ret;
+ }
+
buf = kmalloc(len+1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
@@ -800,19 +918,24 @@ static int devkmsg_open(struct inode *inode, struct file *file)
struct devkmsg_user *user;
int err;
- /* write-only does not need any file context */
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- return 0;
+ if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
+ return -EPERM;
- err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
- SYSLOG_FROM_READER);
- if (err)
- return err;
+ /* write-only does not need any file context */
+ if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
+ err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
+ SYSLOG_FROM_READER);
+ if (err)
+ return err;
+ }
user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
if (!user)
return -ENOMEM;
+ ratelimit_default_init(&user->rs);
+ ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
+
mutex_init(&user->lock);
raw_spin_lock_irq(&logbuf_lock);
@@ -831,6 +954,8 @@ static int devkmsg_release(struct inode *inode, struct file *file)
if (!user)
return 0;
+ ratelimit_state_exit(&user->rs);
+
mutex_destroy(&user->lock);
kfree(user);
return 0;
@@ -986,6 +1111,11 @@ module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_loglevel,
"ignore loglevel setting (prints all kernel messages to the console)");
+static bool suppress_message_printing(int level)
+{
+ return (level >= console_loglevel && !ignore_loglevel);
+}
+
#ifdef CONFIG_BOOT_PRINTK_DELAY
static int boot_delay; /* msecs delay after each printk during bootup */
@@ -1015,7 +1145,7 @@ static void boot_delay_msec(int level)
unsigned long timeout;
if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
- || (level >= console_loglevel && !ignore_loglevel)) {
+ || suppress_message_printing(level)) {
return;
}
@@ -1439,8 +1569,6 @@ static void call_console_drivers(int level,
trace_console(text, len);
- if (level >= console_loglevel && !ignore_loglevel)
- return;
if (!console_drivers)
return;
@@ -1888,6 +2016,7 @@ static void call_console_drivers(int level,
static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size) { return 0; }
static size_t cont_print_text(char *text, size_t size) { return 0; }
+static bool suppress_message_printing(int level) { return false; }
/* Still needs to be defined for users */
DEFINE_PER_CPU(printk_func_t, printk_func);
@@ -2167,6 +2296,13 @@ static void console_cont_flush(char *text, size_t size)
if (!cont.len)
goto out;
+ if (suppress_message_printing(cont.level)) {
+ cont.cons = cont.len;
+ if (cont.flushed)
+ cont.len = 0;
+ goto out;
+ }
+
/*
* We still queue earlier records, likely because the console was
* busy. The earlier ones need to be printed before this one, we
@@ -2270,10 +2406,13 @@ skip:
break;
msg = log_from_idx(console_idx);
- if (msg->flags & LOG_NOCONS) {
+ level = msg->level;
+ if ((msg->flags & LOG_NOCONS) ||
+ suppress_message_printing(level)) {
/*
* Skip record we have buffered and already printed
- * directly to the console when we received it.
+ * directly to the console when we received it, and
+ * record that has level above the console loglevel.
*/
console_idx = log_next(console_idx);
console_seq++;
@@ -2287,7 +2426,6 @@ skip:
goto skip;
}
- level = msg->level;
len += msg_print_text(msg, console_prev, false,
text + len, sizeof(text) - len);
if (nr_ext_console_drivers) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index d49bfa1e53e6..1d3b7665d0be 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -585,8 +585,8 @@ static int ptrace_setoptions(struct task_struct *child, unsigned long data)
return -EINVAL;
if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
- if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) ||
- !config_enabled(CONFIG_SECCOMP))
+ if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
+ !IS_ENABLED(CONFIG_SECCOMP))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
diff --git a/kernel/relay.c b/kernel/relay.c
index 04d7cf3ef8cf..d797502140b9 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -451,6 +451,13 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
if (!dentry)
goto free_buf;
relay_set_buf_dentry(buf, dentry);
+ } else {
+ /* Only retrieve global info, nothing more, nothing less */
+ dentry = chan->cb->create_buf_file(NULL, NULL,
+ S_IRUSR, buf,
+ &chan->is_global);
+ if (WARN_ON(dentry))
+ goto free_buf;
}
buf->cpu = cpu;
@@ -562,6 +569,10 @@ static int relay_hotcpu_callback(struct notifier_block *nb,
* attributes specified. The created channel buffer files
* will be named base_filename0...base_filenameN-1. File
* permissions will be %S_IRUSR.
+ *
+ * If opening a buffer (@parent = NULL) that you later wish to register
+ * in a filesystem, call relay_late_setup_files() once the @parent dentry
+ * is available.
*/
struct rchan *relay_open(const char *base_filename,
struct dentry *parent,
@@ -640,8 +651,12 @@ static void __relay_set_buf_dentry(void *info)
*
* Returns 0 if successful, non-zero otherwise.
*
- * Use to setup files for a previously buffer-only channel.
- * Useful to do early tracing in kernel, before VFS is up, for example.
+ * Use to setup files for a previously buffer-only channel created
+ * by relay_open() with a NULL parent dentry.
+ *
+ * For example, this is useful for perfomring early tracing in kernel,
+ * before VFS is up and then exposing the early results once the dentry
+ * is available.
*/
int relay_late_setup_files(struct rchan *chan,
const char *base_filename,
@@ -666,6 +681,20 @@ int relay_late_setup_files(struct rchan *chan,
}
chan->has_base_filename = 1;
chan->parent = parent;
+
+ if (chan->is_global) {
+ err = -EINVAL;
+ if (!WARN_ON_ONCE(!chan->buf[0])) {
+ dentry = relay_create_buf_file(chan, chan->buf[0], 0);
+ if (dentry && !WARN_ON_ONCE(!chan->is_global)) {
+ relay_set_buf_dentry(chan->buf[0], dentry);
+ err = 0;
+ }
+ }
+ mutex_unlock(&relay_channels_mutex);
+ return err;
+ }
+
curr_cpu = get_cpu();
/*
* The CPU hotplug notifier ran before us and created buffers with
@@ -706,6 +735,7 @@ int relay_late_setup_files(struct rchan *chan,
return err;
}
+EXPORT_SYMBOL_GPL(relay_late_setup_files);
/**
* relay_switch_subbuf - switch to a new sub-buffer
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 54d15eb2b701..ef6c6c3f9d8a 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -347,7 +347,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
{
struct seccomp_filter *sfilter;
int ret;
- const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
+ const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
@@ -542,7 +542,7 @@ void secure_computing_strict(int this_syscall)
{
int mode = current->seccomp.mode;
- if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+ if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return;
@@ -655,7 +655,7 @@ int __secure_computing(const struct seccomp_data *sd)
int mode = current->seccomp.mode;
int this_syscall;
- if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+ if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 53954631a4e1..b43d0b27c1fe 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -814,6 +814,13 @@ static struct ctl_table kern_table[] = {
.extra2 = &ten_thousand,
},
{
+ .procname = "printk_devkmsg",
+ .data = devkmsg_log_str,
+ .maxlen = DEVKMSG_STR_MAX_SIZE,
+ .mode = 0644,
+ .proc_handler = devkmsg_sysctl_set_loglvl,
+ },
+ {
.procname = "dmesg_restrict",
.data = &dmesg_restrict,
.maxlen = sizeof(int),
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 6ab4842b00e8..d513051fcca2 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -29,7 +29,7 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
struct callback_head *head;
do {
- head = ACCESS_ONCE(task->task_works);
+ head = READ_ONCE(task->task_works);
if (unlikely(head == &work_exited))
return -ESRCH;
work->next = head;
@@ -57,6 +57,9 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
struct callback_head **pprev = &task->task_works;
struct callback_head *work;
unsigned long flags;
+
+ if (likely(!task->task_works))
+ return NULL;
/*
* If cmpxchg() fails we continue without updating pprev.
* Either we raced with task_work_add() which added the
@@ -64,8 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- while ((work = ACCESS_ONCE(*pprev))) {
- smp_read_barrier_depends();
+ while ((work = lockless_dereference(*pprev))) {
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
@@ -95,7 +97,7 @@ void task_work_run(void)
* work_exited unless the list is empty.
*/
do {
- work = ACCESS_ONCE(task->task_works);
+ work = READ_ONCE(task->task_works);
head = !work && (task->flags & PF_EXITING) ?
&work_exited : NULL;
} while (cmpxchg(&task->task_works, work, head) != work);
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 979e7bfbde7a..d0a1617b52b4 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -1,4 +1,8 @@
+# We are fully aware of the dangers of __builtin_return_address()
+FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
+KBUILD_CFLAGS += $(FRAME_CFLAGS)
+
# Do not instrument the tracer itself:
ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index fb345cd11883..7598e6ca817a 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -776,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
return;
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_rw, what, error, 0, NULL);
+ bio_op(bio), bio->bi_opf, what, error, 0, NULL);
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -881,7 +881,7 @@ static void blk_add_trace_split(void *ignore,
__be64 rpdu = cpu_to_be64(pdu);
__blk_add_trace(bt, bio->bi_iter.bi_sector,
- bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
+ bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
&rpdu);
}
@@ -915,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore,
r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+ bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
sizeof(r), &r);
}
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0c05b8a99806..f3a960ed75a1 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1441,6 +1441,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
goto out;
}
+ if (hist_data->attrs->pause)
+ data->paused = true;
+
if (named_data) {
destroy_hist_data(data->private_data);
data->private_data = named_data->private_data;
@@ -1448,9 +1451,6 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
data->ops = &event_hist_trigger_named_ops;
}
- if (hist_data->attrs->pause)
- data->paused = true;
-
if (data->ops->init) {
ret = data->ops->init(data->ops, data);
if (ret < 0)
@@ -1500,9 +1500,9 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
static void hist_unreg_all(struct trace_event_file *file)
{
- struct event_trigger_data *test;
+ struct event_trigger_data *test, *n;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry_safe(test, n, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
list_del_rcu(&test->list);
trace_event_trigger_enable_disable(file, 0);
@@ -1699,9 +1699,9 @@ hist_enable_get_trigger_ops(char *cmd, char *param)
static void hist_enable_unreg_all(struct trace_event_file *file)
{
- struct event_trigger_data *test;
+ struct event_trigger_data *test, *n;
- list_for_each_entry_rcu(test, &file->triggers, list) {
+ list_for_each_entry_safe(test, n, &file->triggers, list) {
if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
list_del_rcu(&test->list);
update_cond_flag(file);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f07842e2d69f..2307d7c89dac 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -709,6 +709,8 @@ config KCOV
bool "Code coverage for fuzzing"
depends on ARCH_HAS_KCOV
select DEBUG_FS
+ select GCC_PLUGINS if !COMPILE_TEST
+ select GCC_PLUGIN_SANCOV if !COMPILE_TEST
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
@@ -719,6 +721,17 @@ config KCOV
For more details, see Documentation/kcov.txt.
+config KCOV_INSTRUMENT_ALL
+ bool "Instrument all code by default"
+ depends on KCOV
+ default y if KCOV
+ help
+ If you are doing generic system call fuzzing (like e.g. syzkaller),
+ then you will want to instrument the whole kernel and you should
+ say y here. If you are doing more targeted fuzzing (like e.g.
+ filesystem fuzzing with AFL) then you will want to enable coverage
+ for more specific subsets of files, and should say n here.
+
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
depends on DEBUG_KERNEL
diff --git a/lib/crc32.c b/lib/crc32.c
index 9a907d489d95..7fbd1a112b9d 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -979,7 +979,6 @@ static int __init crc32c_test(void)
int i;
int errors = 0;
int bytes = 0;
- struct timespec start, stop;
u64 nsec;
unsigned long flags;
@@ -999,20 +998,17 @@ static int __init crc32c_test(void)
local_irq_save(flags);
local_irq_disable();
- getnstimeofday(&start);
+ nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length))
errors++;
}
- getnstimeofday(&stop);
+ nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
- nsec = stop.tv_nsec - start.tv_nsec +
- 1000000000 * (stop.tv_sec - start.tv_sec);
-
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
if (errors)
@@ -1065,7 +1061,6 @@ static int __init crc32_test(void)
int i;
int errors = 0;
int bytes = 0;
- struct timespec start, stop;
u64 nsec;
unsigned long flags;
@@ -1088,7 +1083,7 @@ static int __init crc32_test(void)
local_irq_save(flags);
local_irq_disable();
- getnstimeofday(&start);
+ nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
test[i].start, test[i].length))
@@ -1098,14 +1093,11 @@ static int __init crc32_test(void)
test[i].start, test[i].length))
errors++;
}
- getnstimeofday(&stop);
+ nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
local_irq_enable();
- nsec = stop.tv_nsec - start.tv_nsec +
- 1000000000 * (stop.tv_sec - start.tv_sec);
-
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index 72145646857e..3d766e78fbe2 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -10,7 +10,7 @@
static void *dma_noop_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
@@ -22,7 +22,7 @@ static void *dma_noop_alloc(struct device *dev, size_t size,
static void dma_noop_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -30,13 +30,14 @@ static void dma_noop_free(struct device *dev, size_t size,
static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index fe42b6ec3f0c..da796e2dc4f5 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -188,6 +188,13 @@ static int ddebug_change(const struct ddebug_query *query,
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
+#ifdef HAVE_JUMP_LABEL
+ if (dp->flags & _DPRINTK_FLAGS_PRINT) {
+ if (!(flags & _DPRINTK_FLAGS_PRINT))
+ static_branch_disable(&dp->key.dd_key_true);
+ } else if (flags & _DPRINTK_FLAGS_PRINT)
+ static_branch_enable(&dp->key.dd_key_true);
+#endif
dp->flags = newflags;
vpr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index c27e269210c4..a816f3a80625 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -29,8 +29,7 @@ again:
index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
- /* we could do more effectively */
- start = index + 1;
+ start = ALIGN(shift + index, boundary_size) - shift;
goto again;
}
bitmap_set(map, index, nr);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index c6272ae2015e..5a0f75a3bf01 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -363,6 +363,9 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
lzeros = 0;
}
+ miter.consumed = lzeros;
+ sg_miter_stop(&miter);
+
nbytes -= lzeros;
nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) {
@@ -390,7 +393,10 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
z %= BYTES_PER_MPI_LIMB;
- for (;;) {
+ while (sg_miter_next(&miter)) {
+ buff = miter.addr;
+ len = miter.length;
+
for (x = 0; x < len; x++) {
a <<= 8;
a |= *buff++;
@@ -400,12 +406,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
}
}
z += x;
-
- if (!sg_miter_next(&miter))
- break;
-
- buff = miter.addr;
- len = miter.length;
}
return val;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 61b8fb529cef..1b7bf7314141 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root)
/*
* Even if the caller has preloaded, try to allocate from the
- * cache first for the new node to get accounted.
+ * cache first for the new node to get accounted to the memory
+ * cgroup.
*/
ret = kmem_cache_alloc(radix_tree_node_cachep,
- gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
+ gfp_mask | __GFP_NOWARN);
if (ret)
goto out;
@@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
kmemleak_update_trace(ret);
goto out;
}
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- gfp_mask | __GFP_ACCOUNT);
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
out:
BUG_ON(radix_tree_is_internal_node(ret));
return ret;
@@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr)
struct radix_tree_node *node;
int ret = -ENOMEM;
+ /*
+ * Nodes preloaded by one cgroup can be be used by another cgroup, so
+ * they should never be accounted to any particular memory cgroup.
+ */
+ gfp_mask &= ~__GFP_ACCOUNT;
+
preempt_disable();
rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < nr) {
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 2c5de86460c5..08f8043cac61 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -46,12 +46,14 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
rs->begin = jiffies;
if (time_is_before_jiffies(rs->begin + rs->interval)) {
- if (rs->missed)
- printk(KERN_WARNING "%s: %d callbacks suppressed\n",
- func, rs->missed);
+ if (rs->missed) {
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
+ rs->missed = 0;
+ }
+ }
rs->begin = jiffies;
rs->printed = 0;
- rs->missed = 0;
}
if (rs->burst && rs->burst > rs->printed) {
rs->printed++;
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 33f655ef48cd..9c5fe8110413 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -40,8 +40,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
unsigned long c, data;
/* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
- break;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
*(unsigned long *)(dst+res) = c;
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
@@ -56,8 +56,7 @@ byte_at_a_time:
while (max) {
char c;
- if (unlikely(unsafe_get_user(c,src+res)))
- return -EFAULT;
+ unsafe_get_user(c,src+res, efault);
dst[res] = c;
if (!c)
return res;
@@ -76,6 +75,7 @@ byte_at_a_time:
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
+efault:
return -EFAULT;
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 2625943625d7..8e105ed4df12 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
src -= align;
max += align;
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)src, efault);
c |= aligned_byte_mask(align);
for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
- if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
}
res -= align;
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's 0.
*/
+efault:
return 0;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 76f29ecba8f4..22e13a0e19d7 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -738,7 +738,7 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
@@ -807,7 +807,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unmap_single(hwdev, dev_addr, size, dir);
}
@@ -877,7 +877,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_device);
*/
int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -914,7 +914,7 @@ int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
- return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+ return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
}
EXPORT_SYMBOL(swiotlb_map_sg);
@@ -924,7 +924,8 @@ EXPORT_SYMBOL(swiotlb_map_sg);
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -941,7 +942,7 @@ void
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
- return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+ return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
}
EXPORT_SYMBOL(swiotlb_unmap_sg);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 8799ae5e2e42..fb0409df1bcf 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -308,7 +308,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data,
return;
ubsan_prologue(&data->location, &flags);
- pr_err("%s address %pk with insufficient space\n",
+ pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
pr_err("for an object of type %s\n", data->type->type_name);
diff --git a/mm/Kconfig b/mm/Kconfig
index c0837845c17c..78a23c5c302d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,6 +187,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on !KASAN
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/Makefile b/mm/Makefile
index fc059666c760..2ca1faf3fa09 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index efe237742074..8fde443f36d7 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
}
EXPORT_SYMBOL(bdi_register_dev);
+int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
+{
+ int rc;
+
+ rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
+ MINOR(owner->devt));
+ if (rc)
+ return rc;
+ bdi->owner = owner;
+ get_device(owner);
+ return 0;
+}
+EXPORT_SYMBOL(bdi_register_owner);
+
/*
* Remove bdi from bdi_list, and ensure that it is no longer visible
*/
@@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi)
device_unregister(bdi->dev);
bdi->dev = NULL;
}
+
+ if (bdi->owner) {
+ put_device(bdi->owner);
+ bdi->owner = NULL;
+ }
}
void bdi_exit(struct backing_dev_info *bdi)
diff --git a/mm/filemap.c b/mm/filemap.c
index 3083ded98b15..8a287dfc5372 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
* After completing I/O on a page, call this routine to update the page
* flags appropriately
*/
-void page_endio(struct page *page, int rw, int err)
+void page_endio(struct page *page, bool is_write, int err)
{
- if (rw == READ) {
+ if (!is_write) {
if (!err) {
SetPageUptodate(page);
} else {
@@ -897,7 +897,7 @@ void page_endio(struct page *page, int rw, int err)
SetPageError(page);
}
unlock_page(page);
- } else { /* rw == WRITE */
+ } else {
if (err) {
SetPageError(page);
if (page->mapping)
diff --git a/mm/gup.c b/mm/gup.c
index 547741f5f7a7..96b2b2fd0fbd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -723,6 +723,7 @@ retry:
}
return 0;
}
+EXPORT_SYMBOL_GPL(fixup_user_fault);
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f904246a8fd5..b9aa1b0b38b0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2216,6 +2216,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
+
+ /* yield cpu to avoid soft lockup */
+ cond_resched();
+
if (hstate_is_gigantic(h))
ret = alloc_fresh_gigantic_page(h, nodes_allowed);
else
@@ -3938,6 +3942,14 @@ same_page:
return i ? i : -EFAULT;
}
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
@@ -3998,7 +4010,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk.
*/
- flush_tlb_range(vma, start, end);
+ flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
@@ -4306,7 +4318,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+ BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
return pte;
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index b6f99e81bfeb..88af13c00d3c 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -442,11 +442,6 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
- if (cache->flags & SLAB_KASAN) {
- struct kasan_alloc_meta *alloc_info =
- get_alloc_info(cache, object);
- alloc_info->state = KASAN_STATE_INIT;
- }
}
static inline int in_irqentry_text(unsigned long ptr)
@@ -510,6 +505,17 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
return (void *)object + cache->kasan_info.free_meta_offset;
}
+void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
+{
+ struct kasan_alloc_meta *alloc_info;
+
+ if (!(cache->flags & SLAB_KASAN))
+ return;
+
+ alloc_info = get_alloc_info(cache, object);
+ __memset(alloc_info, 0, sizeof(*alloc_info));
+}
+
void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
{
kasan_kmalloc(cache, object, cache->object_size, flags);
@@ -529,34 +535,26 @@ static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
bool kasan_slab_free(struct kmem_cache *cache, void *object)
{
+ s8 shadow_byte;
+
/* RCU slabs could be legally used after free within the RCU period */
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return false;
- if (likely(cache->flags & SLAB_KASAN)) {
- struct kasan_alloc_meta *alloc_info;
- struct kasan_free_meta *free_info;
+ shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
+ if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
+ kasan_report_double_free(cache, object, shadow_byte);
+ return true;
+ }
- alloc_info = get_alloc_info(cache, object);
- free_info = get_free_info(cache, object);
+ kasan_poison_slab_free(cache, object);
- switch (alloc_info->state) {
- case KASAN_STATE_ALLOC:
- alloc_info->state = KASAN_STATE_QUARANTINE;
- quarantine_put(free_info, cache);
- set_track(&free_info->track, GFP_NOWAIT);
- kasan_poison_slab_free(cache, object);
- return true;
- case KASAN_STATE_QUARANTINE:
- case KASAN_STATE_FREE:
- pr_err("Double free");
- dump_stack();
- break;
- default:
- break;
- }
- }
- return false;
+ if (unlikely(!(cache->flags & SLAB_KASAN)))
+ return false;
+
+ set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
+ quarantine_put(get_free_info(cache, object), cache);
+ return true;
}
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
@@ -565,7 +563,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
unsigned long redzone_start;
unsigned long redzone_end;
- if (flags & __GFP_RECLAIM)
+ if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(object == NULL))
@@ -579,14 +577,9 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
kasan_unpoison_shadow(object, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
- if (cache->flags & SLAB_KASAN) {
- struct kasan_alloc_meta *alloc_info =
- get_alloc_info(cache, object);
- alloc_info->state = KASAN_STATE_ALLOC;
- alloc_info->alloc_size = size;
- set_track(&alloc_info->track, flags);
- }
+ if (cache->flags & SLAB_KASAN)
+ set_track(&get_alloc_info(cache, object)->alloc_track, flags);
}
EXPORT_SYMBOL(kasan_kmalloc);
@@ -596,7 +589,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
unsigned long redzone_start;
unsigned long redzone_end;
- if (flags & __GFP_RECLAIM)
+ if (gfpflags_allow_blocking(flags))
quarantine_reduce();
if (unlikely(ptr == NULL))
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 31972cdba433..e5c2181fee6f 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -59,13 +59,6 @@ struct kasan_global {
* Structures to keep alloc and free tracks *
*/
-enum kasan_state {
- KASAN_STATE_INIT,
- KASAN_STATE_ALLOC,
- KASAN_STATE_QUARANTINE,
- KASAN_STATE_FREE
-};
-
#define KASAN_STACK_DEPTH 64
struct kasan_track {
@@ -74,9 +67,8 @@ struct kasan_track {
};
struct kasan_alloc_meta {
- struct kasan_track track;
- u32 state : 2; /* enum kasan_state */
- u32 alloc_size : 30;
+ struct kasan_track alloc_track;
+ struct kasan_track free_track;
};
struct qlist_node {
@@ -87,7 +79,6 @@ struct kasan_free_meta {
* Otherwise it might be used for the allocator freelist.
*/
struct qlist_node quarantine_link;
- struct kasan_track track;
};
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
@@ -108,6 +99,8 @@ static inline bool kasan_report_enabled(void)
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
+void kasan_report_double_free(struct kmem_cache *cache, void *object,
+ s8 shadow);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 65793f150d1f..b6728a33a4ac 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -144,13 +144,15 @@ static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
void *object = qlink_to_object(qlink, cache);
- struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
unsigned long flags;
- local_irq_save(flags);
- alloc_info->state = KASAN_STATE_FREE;
+ if (IS_ENABLED(CONFIG_SLAB))
+ local_irq_save(flags);
+
___cache_free(cache, object, _THIS_IP_);
- local_irq_restore(flags);
+
+ if (IS_ENABLED(CONFIG_SLAB))
+ local_irq_restore(flags);
}
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
@@ -196,7 +198,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
void quarantine_reduce(void)
{
- size_t new_quarantine_size;
+ size_t new_quarantine_size, percpu_quarantines;
unsigned long flags;
struct qlist_head to_free = QLIST_INIT;
size_t size_to_free = 0;
@@ -214,7 +216,12 @@ void quarantine_reduce(void)
*/
new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
QUARANTINE_FRACTION;
- new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus();
+ percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
+ if (WARN_ONCE(new_quarantine_size < percpu_quarantines,
+ "Too little memory, disabling global KASAN quarantine.\n"))
+ new_quarantine_size = 0;
+ else
+ new_quarantine_size -= percpu_quarantines;
WRITE_ONCE(quarantine_size, new_quarantine_size);
last = global_quarantine.head;
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 861b9776841a..24c1211fe9d5 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -116,6 +116,26 @@ static inline bool init_task_stack_addr(const void *addr)
sizeof(init_thread_union.stack));
}
+static DEFINE_SPINLOCK(report_lock);
+
+static void kasan_start_report(unsigned long *flags)
+{
+ /*
+ * Make sure we don't end up in loop.
+ */
+ kasan_disable_current();
+ spin_lock_irqsave(&report_lock, *flags);
+ pr_err("==================================================================\n");
+}
+
+static void kasan_end_report(unsigned long *flags)
+{
+ pr_err("==================================================================\n");
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irqrestore(&report_lock, *flags);
+ kasan_enable_current();
+}
+
static void print_track(struct kasan_track *track)
{
pr_err("PID = %u\n", track->pid);
@@ -129,37 +149,33 @@ static void print_track(struct kasan_track *track)
}
}
-static void kasan_object_err(struct kmem_cache *cache, struct page *page,
- void *object, char *unused_reason)
+static void kasan_object_err(struct kmem_cache *cache, void *object)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
- struct kasan_free_meta *free_info;
dump_stack();
- pr_err("Object at %p, in cache %s\n", object, cache->name);
+ pr_err("Object at %p, in cache %s size: %d\n", object, cache->name,
+ cache->object_size);
+
if (!(cache->flags & SLAB_KASAN))
return;
- switch (alloc_info->state) {
- case KASAN_STATE_INIT:
- pr_err("Object not allocated yet\n");
- break;
- case KASAN_STATE_ALLOC:
- pr_err("Object allocated with size %u bytes.\n",
- alloc_info->alloc_size);
- pr_err("Allocation:\n");
- print_track(&alloc_info->track);
- break;
- case KASAN_STATE_FREE:
- case KASAN_STATE_QUARANTINE:
- pr_err("Object freed, allocated with size %u bytes\n",
- alloc_info->alloc_size);
- free_info = get_free_info(cache, object);
- pr_err("Allocation:\n");
- print_track(&alloc_info->track);
- pr_err("Deallocation:\n");
- print_track(&free_info->track);
- break;
- }
+
+ pr_err("Allocated:\n");
+ print_track(&alloc_info->alloc_track);
+ pr_err("Freed:\n");
+ print_track(&alloc_info->free_track);
+}
+
+void kasan_report_double_free(struct kmem_cache *cache, void *object,
+ s8 shadow)
+{
+ unsigned long flags;
+
+ kasan_start_report(&flags);
+ pr_err("BUG: Double free or freeing an invalid pointer\n");
+ pr_err("Unexpected shadow byte: 0x%hhX\n", shadow);
+ kasan_object_err(cache, object);
+ kasan_end_report(&flags);
}
static void print_address_description(struct kasan_access_info *info)
@@ -175,8 +191,7 @@ static void print_address_description(struct kasan_access_info *info)
struct kmem_cache *cache = page->slab_cache;
object = nearest_obj(cache, page,
(void *)info->access_addr);
- kasan_object_err(cache, page, object,
- "kasan: bad access detected");
+ kasan_object_err(cache, object);
return;
}
dump_page(page, "kasan: bad access detected");
@@ -241,19 +256,13 @@ static void print_shadow_for_address(const void *addr)
}
}
-static DEFINE_SPINLOCK(report_lock);
-
static void kasan_report_error(struct kasan_access_info *info)
{
unsigned long flags;
const char *bug_type;
- /*
- * Make sure we don't end up in loop.
- */
- kasan_disable_current();
- spin_lock_irqsave(&report_lock, flags);
- pr_err("==================================================================\n");
+ kasan_start_report(&flags);
+
if (info->access_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
if ((unsigned long)info->access_addr < PAGE_SIZE)
@@ -274,10 +283,8 @@ static void kasan_report_error(struct kasan_access_info *info)
print_address_description(info);
print_shadow_for_address(info->first_bad_addr);
}
- pr_err("==================================================================\n");
- add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
- spin_unlock_irqrestore(&report_lock, flags);
- kasan_enable_current();
+
+ kasan_end_report(&flags);
}
void kasan_report(unsigned long addr, size_t size,
diff --git a/mm/memblock.c b/mm/memblock.c
index ff5ff3b5f1ea..483197ef613f 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -482,7 +482,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @flags: flags of the new region
*
* Insert new memblock region [@base,@base+@size) into @type at @idx.
- * @type must already have extra room to accomodate the new region.
+ * @type must already have extra room to accommodate the new region.
*/
static void __init_memblock memblock_insert_region(struct memblock_type *type,
int idx, phys_addr_t base,
@@ -544,7 +544,7 @@ repeat:
/*
* The following is executed twice. Once with %false @insert and
* then with %true. The first counts the number of regions needed
- * to accomodate the new area. The second actually inserts them.
+ * to accommodate the new area. The second actually inserts them.
*/
base = obase;
nr_new = 0;
@@ -994,7 +994,10 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
if (*idx == (u64)ULLONG_MAX) {
idx_a = type_a->cnt - 1;
- idx_b = type_b->cnt;
+ if (type_b != NULL)
+ idx_b = type_b->cnt;
+ else
+ idx_b = 0;
}
for (; idx_a >= 0; idx_a--) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c265212bec8c..e74d7080ec9e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2337,8 +2337,11 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
return 0;
memcg = get_mem_cgroup_from_mm(current->mm);
- if (!mem_cgroup_is_root(memcg))
+ if (!mem_cgroup_is_root(memcg)) {
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
+ if (!ret)
+ __SetPageKmemcg(page);
+ }
css_put(&memcg->css);
return ret;
}
@@ -2365,6 +2368,11 @@ void memcg_kmem_uncharge(struct page *page, int order)
page_counter_uncharge(&memcg->memsw, nr_pages);
page->mem_cgroup = NULL;
+
+ /* slab pages do not have PageKmemcg flag set */
+ if (PageKmemcg(page))
+ __ClearPageKmemcg(page);
+
css_put_many(&memcg->css, nr_pages);
}
#endif /* !CONFIG_SLOB */
@@ -2559,6 +2567,15 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
return 0;
mctz = soft_limit_tree_node(pgdat->node_id);
+
+ /*
+ * Do not even bother to check the largest node if the root
+ * is empty. Do it lockless to prevent lock bouncing. Races
+ * are acceptable as soft limit is best effort anyway.
+ */
+ if (RB_EMPTY_ROOT(&mctz->rb_root))
+ return 0;
+
/*
* This loop can run a while, specially if mem_cgroup's continuously
* keep exceeding their soft limit and putting the system under
@@ -5528,8 +5545,10 @@ static void uncharge_list(struct list_head *page_list)
else
nr_file += nr_pages;
pgpgout++;
- } else
+ } else {
nr_kmem += 1 << compound_order(page);
+ __ClearPageKmemcg(page);
+ }
page->mem_cgroup = NULL;
} while (next != page_list);
diff --git a/mm/memory.c b/mm/memory.c
index 4425b6059339..83be99d9d8a1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2642,6 +2642,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
if (page == swapcache) {
do_page_add_anon_rmap(page, vma, fe->address, exclusive);
mem_cgroup_commit_charge(page, memcg, true, false);
+ activate_page(page);
} else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, fe->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
@@ -3133,6 +3134,8 @@ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff)
if (pmd_none(*fe->pmd)) {
fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address);
+ if (!fe->prealloc_pte)
+ goto out;
smp_wmb(); /* See comment in __pte_alloc() */
}
diff --git a/mm/mmap.c b/mm/mmap.c
index d44bee96a5fe..ca9d91bca0d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2653,16 +2653,18 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static int do_brk(unsigned long addr, unsigned long len)
+static int do_brk(unsigned long addr, unsigned long request)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- unsigned long flags;
+ unsigned long flags, len;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
- len = PAGE_ALIGN(len);
+ len = PAGE_ALIGN(request);
+ if (len < request)
+ return -ENOMEM;
if (!len)
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea759b935360..ee744fa3b93d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1008,10 +1008,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
}
if (PageMappingFlags(page))
page->mapping = NULL;
- if (memcg_kmem_enabled() && PageKmemcg(page)) {
+ if (memcg_kmem_enabled() && PageKmemcg(page))
memcg_kmem_uncharge(page, order);
- __ClearPageKmemcg(page);
- }
if (check_free)
bad += free_pages_check(page);
if (bad)
@@ -3756,12 +3754,10 @@ no_zone:
}
out:
- if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) {
- if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) {
- __free_pages(page, order);
- page = NULL;
- } else
- __SetPageKmemcg(page);
+ if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
+ unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
+ __free_pages(page, order);
+ page = NULL;
}
if (kmemcheck_enabled && page)
@@ -5257,11 +5253,6 @@ static void __meminit setup_zone_pageset(struct zone *zone)
zone->pageset = alloc_percpu(struct per_cpu_pageset);
for_each_possible_cpu(cpu)
zone_pageset_init(zone, cpu);
-
- if (!zone->zone_pgdat->per_cpu_nodestats) {
- zone->zone_pgdat->per_cpu_nodestats =
- alloc_percpu(struct per_cpu_nodestat);
- }
}
/*
@@ -5270,13 +5261,18 @@ static void __meminit setup_zone_pageset(struct zone *zone)
*/
void __init setup_per_cpu_pageset(void)
{
+ struct pglist_data *pgdat;
struct zone *zone;
for_each_populated_zone(zone)
setup_zone_pageset(zone);
+
+ for_each_online_pgdat(pgdat)
+ pgdat->per_cpu_nodestats =
+ alloc_percpu(struct per_cpu_nodestat);
}
-static noinline __init_refok
+static noinline __ref
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
@@ -5903,7 +5899,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
}
}
-static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
+static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long __maybe_unused start = 0;
unsigned long __maybe_unused offset = 0;
diff --git a/mm/page_io.c b/mm/page_io.c
index fb1fa269d3a0..16bd82fad38c 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -319,9 +319,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
ret = -ENOMEM;
goto out;
}
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
if (wbc->sync_mode == WB_SYNC_ALL)
- bio->bi_rw |= REQ_SYNC;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC);
+ else
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 2ac19a61d565..7f7748a0f9e1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1362,13 +1362,14 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
struct vm_area_struct pvma;
struct inode *inode = &info->vfs_inode;
struct address_space *mapping = inode->i_mapping;
- pgoff_t idx, hindex = round_down(index, HPAGE_PMD_NR);
+ pgoff_t idx, hindex;
void __rcu **results;
struct page *page;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
return NULL;
+ hindex = round_down(index, HPAGE_PMD_NR);
rcu_read_lock();
if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
diff --git a/mm/slab.c b/mm/slab.c
index 09771ed3e693..b67271024135 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1877,7 +1877,7 @@ static struct array_cache __percpu *alloc_kmem_cache_cpus(
return cpu_cache;
}
-static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
+static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (slab_state >= FULL)
return enable_cpucache(cachep, gfp);
@@ -2604,9 +2604,11 @@ static void cache_init_objs(struct kmem_cache *cachep,
}
for (i = 0; i < cachep->num; i++) {
+ objp = index_to_obj(cachep, page, i);
+ kasan_init_slab_obj(cachep, objp);
+
/* constructor could break poison info */
if (DEBUG == 0 && cachep->ctor) {
- objp = index_to_obj(cachep, page, i);
kasan_unpoison_object_data(cachep, objp);
cachep->ctor(objp);
kasan_poison_object_data(cachep, objp);
@@ -4439,6 +4441,36 @@ static int __init slab_proc_init(void)
module_init(slab_proc_init);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ unsigned long offset;
+
+ /* Find and validate object. */
+ cachep = page->slab_cache;
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+
+ /* Find offset within object. */
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+ return NULL;
+
+ return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slub.c b/mm/slub.c
index 74e7c8c30db8..cead06394e9e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,7 +124,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
-inline void *fixup_red_left(struct kmem_cache *s, void *p)
+void *fixup_red_left(struct kmem_cache *s, void *p)
{
if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
p += s->red_left_pad;
@@ -1384,6 +1384,7 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
+ kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
@@ -3763,6 +3764,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *s;
+ unsigned long offset;
+ size_t object_size;
+
+ /* Find object and usable object size. */
+ s = page->slab_cache;
+ object_size = slab_ksize(s);
+
+ /* Reject impossible pointers. */
+ if (ptr < page_address(page))
+ return s->name;
+
+ /* Find offset within object. */
+ offset = (ptr - page_address(page)) % s->size;
+
+ /* Adjust for redzone and reject if within the redzone. */
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (offset < s->red_left_pad)
+ return s->name;
+ offset -= s->red_left_pad;
+ }
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= object_size && n <= object_size - offset)
+ return NULL;
+
+ return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
static size_t __ksize(const void *object)
{
struct page *page;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 68885dcbaf40..574c67b663fe 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -36,7 +36,7 @@
* Uses the main allocators if they are available, else bootmem.
*/
-static void * __init_refok __earlyonly_bootmem_alloc(int node,
+static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long size,
unsigned long align,
unsigned long goal)
diff --git a/mm/sparse.c b/mm/sparse.c
index 36d7bbb80e49..1e168bf2779a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -59,7 +59,7 @@ static inline void set_section_nid(unsigned long section_nr, int nid)
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
-static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
+static noinline struct mem_section __ref *sparse_index_alloc(int nid)
{
struct mem_section *section = NULL;
unsigned long array_size = SECTIONS_PER_ROOT *
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644
index 000000000000..8ebae91a6b55
--- /dev/null
+++ b/mm/usercopy.c
@@ -0,0 +1,268 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ * NOT_STACK: not at all on the stack
+ * GOOD_FRAME: fully within a valid stack frame
+ * GOOD_STACK: fully on the stack (when can't do frame-checking)
+ * BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
+ int ret;
+
+ /* Object is not on the stack at all. */
+ if (obj + len <= stack || stackend <= obj)
+ return NOT_STACK;
+
+ /*
+ * Reject: object partially overlaps the stack (passing the
+ * the check above means at least one end is within the stack,
+ * so if this check fails, the other end is outside the stack).
+ */
+ if (obj < stack || stackend < obj + len)
+ return BAD_STACK;
+
+ /* Check if object is safely within a valid frame. */
+ ret = arch_within_stack_frames(stack, stackend, obj, len);
+ if (ret)
+ return ret;
+
+ return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+ bool to_user, const char *type)
+{
+ pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to", ptr, type ? : "unknown", len);
+ /*
+ * For greater effect, it would be nice to do do_group_exit(),
+ * but BUG() actually hooks all the lock-breaking and per-arch
+ * Oops code, so that is used here instead.
+ */
+ BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+ unsigned long high)
+{
+ unsigned long check_low = (uintptr_t)ptr;
+ unsigned long check_high = check_low + n;
+
+ /* Does not overlap if entirely above or entirely below. */
+ if (check_low >= high || check_high < low)
+ return false;
+
+ return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+ unsigned long n)
+{
+ unsigned long textlow = (unsigned long)_stext;
+ unsigned long texthigh = (unsigned long)_etext;
+ unsigned long textlow_linear, texthigh_linear;
+
+ if (overlaps(ptr, n, textlow, texthigh))
+ return "<kernel text>";
+
+ /*
+ * Some architectures have virtual memory mappings with a secondary
+ * mapping of the kernel text, i.e. there is more than one virtual
+ * kernel address that points to the kernel image. It is usually
+ * when there is a separate linear physical memory mapping, in that
+ * __pa() is not just the reverse of __va(). This can be detected
+ * and checked:
+ */
+ textlow_linear = (unsigned long)__va(__pa(textlow));
+ /* No different mapping: we're done. */
+ if (textlow_linear == textlow)
+ return NULL;
+
+ /* Check the secondary mapping... */
+ texthigh_linear = (unsigned long)__va(__pa(texthigh));
+ if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+ return "<linear kernel text>";
+
+ return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+ /* Reject if object wraps past end of memory. */
+ if (ptr + n < ptr)
+ return "<wrapped address>";
+
+ /* Reject if NULL or ZERO-allocation. */
+ if (ZERO_OR_NULL_PTR(ptr))
+ return "<null>";
+
+ return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page, *endpage;
+ const void *end = ptr + n - 1;
+ bool is_reserved, is_cma;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ */
+ if (is_vmalloc_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
+
+ /*
+ * Sometimes the kernel data regions are not marked Reserved (see
+ * check below). And sometimes [_sdata,_edata) does not cover
+ * rodata and/or bss, so check each range explicitly.
+ */
+
+ /* Allow reads of kernel rodata region (if not marked as Reserved). */
+ if (ptr >= (const void *)__start_rodata &&
+ end <= (const void *)__end_rodata) {
+ if (!to_user)
+ return "<rodata>";
+ return NULL;
+ }
+
+ /* Allow kernel data region (if not marked as Reserved). */
+ if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+ return NULL;
+
+ /* Allow kernel bss region (if not marked as Reserved). */
+ if (ptr >= (const void *)__bss_start &&
+ end <= (const void *)__bss_stop)
+ return NULL;
+
+ /* Is the object wholly within one base page? */
+ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+ ((unsigned long)end & (unsigned long)PAGE_MASK)))
+ return NULL;
+
+ /* Allow if start and end are inside the same compound page. */
+ endpage = virt_to_head_page(end);
+ if (likely(endpage == page))
+ return NULL;
+
+ /*
+ * Reject if range is entirely either Reserved (i.e. special or
+ * device memory), or CMA. Otherwise, reject since the object spans
+ * several independently allocated pages.
+ */
+ is_reserved = PageReserved(page);
+ is_cma = is_migrate_cma_page(page);
+ if (!is_reserved && !is_cma)
+ goto reject;
+
+ for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+ page = virt_to_head_page(ptr);
+ if (is_reserved && !PageReserved(page))
+ goto reject;
+ if (is_cma && !is_migrate_cma_page(page))
+ goto reject;
+ }
+
+ return NULL;
+
+reject:
+ return "<spans multiple pages>";
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+ const char *err;
+
+ /* Skip all tests if size is zero. */
+ if (!n)
+ return;
+
+ /* Check for invalid addresses. */
+ err = check_bogus_address(ptr, n);
+ if (err)
+ goto report;
+
+ /* Check for bad heap object. */
+ err = check_heap_object(ptr, n, to_user);
+ if (err)
+ goto report;
+
+ /* Check for bad stack object. */
+ switch (check_stack_object(ptr, n)) {
+ case NOT_STACK:
+ /* Object is not touching the current process stack. */
+ break;
+ case GOOD_FRAME:
+ case GOOD_STACK:
+ /*
+ * Object is either in the correct frame (when it
+ * is possible to check) or just generally on the
+ * process stack (when frame checking not available).
+ */
+ return;
+ default:
+ err = "<process stack>";
+ goto report;
+ }
+
+ /* Check for object in kernel to avoid text exposure. */
+ err = check_kernel_text_object(ptr, n);
+ if (!err)
+ return;
+
+report:
+ report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 650d26832569..374d95d04178 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2561,7 +2561,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
- if (!global_reclaim(sc))
+ if (memcg)
shrink_slab(sc->gfp_mask, pgdat->node_id,
memcg, sc->nr_scanned - scanned,
lru_pages);
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index 958d9856912c..84cbed630c4b 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -11,5 +11,5 @@ libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
crypto.o armor.o \
auth_x.o \
ceph_fs.o ceph_strings.o ceph_hash.o \
- pagevec.o snapshot.o
+ pagevec.o snapshot.o string_table.o
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 55d2bfee16d7..bddfcf6f09c2 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -747,6 +747,8 @@ out:
static void __exit exit_ceph_lib(void)
{
dout("exit_ceph_lib\n");
+ WARN_ON(!ceph_strings_empty());
+
ceph_osdc_cleanup();
ceph_msgr_exit();
ceph_crypto_shutdown();
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index 41466ccb972a..7d54e944de5e 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -9,9 +9,9 @@
*/
int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
{
- __u32 su = le32_to_cpu(layout->fl_stripe_unit);
- __u32 sc = le32_to_cpu(layout->fl_stripe_count);
- __u32 os = le32_to_cpu(layout->fl_object_size);
+ __u32 su = layout->stripe_unit;
+ __u32 sc = layout->stripe_count;
+ __u32 os = layout->object_size;
/* stripe unit, object size must be non-zero, 64k increment */
if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
@@ -27,6 +27,30 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
return 1;
}
+void ceph_file_layout_from_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy)
+{
+ fl->stripe_unit = le32_to_cpu(legacy->fl_stripe_unit);
+ fl->stripe_count = le32_to_cpu(legacy->fl_stripe_count);
+ fl->object_size = le32_to_cpu(legacy->fl_object_size);
+ fl->pool_id = le32_to_cpu(legacy->fl_pg_pool);
+ if (fl->pool_id == 0)
+ fl->pool_id = -1;
+}
+EXPORT_SYMBOL(ceph_file_layout_from_legacy);
+
+void ceph_file_layout_to_legacy(struct ceph_file_layout *fl,
+ struct ceph_file_layout_legacy *legacy)
+{
+ legacy->fl_stripe_unit = cpu_to_le32(fl->stripe_unit);
+ legacy->fl_stripe_count = cpu_to_le32(fl->stripe_count);
+ legacy->fl_object_size = cpu_to_le32(fl->object_size);
+ if (fl->pool_id >= 0)
+ legacy->fl_pg_pool = cpu_to_le32(fl->pool_id);
+ else
+ legacy->fl_pg_pool = 0;
+}
+EXPORT_SYMBOL(ceph_file_layout_to_legacy);
int ceph_flags_to_mode(int flags)
{
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index e77b04ca7802..c62b2b029a6e 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -156,8 +156,16 @@ static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
seq_printf(s, "]/%d\t[", t->up.primary);
for (i = 0; i < t->acting.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
- seq_printf(s, "]/%d\t%*pE\t0x%x", t->acting.primary,
- t->target_oid.name_len, t->target_oid.name, t->flags);
+ seq_printf(s, "]/%d\t", t->acting.primary);
+ if (t->target_oloc.pool_ns) {
+ seq_printf(s, "%*pE/%*pE\t0x%x",
+ (int)t->target_oloc.pool_ns->len,
+ t->target_oloc.pool_ns->str,
+ t->target_oid.name_len, t->target_oid.name, t->flags);
+ } else {
+ seq_printf(s, "%*pE\t0x%x", t->target_oid.name_len,
+ t->target_oid.name, t->flags);
+ }
if (t->paused)
seq_puts(s, "\tP");
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 37c38a7fb5c5..c83326c5ba58 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -227,9 +227,10 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
}
const char *ceph_sub_str[] = {
- [CEPH_SUB_MDSMAP] = "mdsmap",
[CEPH_SUB_MONMAP] = "monmap",
[CEPH_SUB_OSDMAP] = "osdmap",
+ [CEPH_SUB_FSMAP] = "fsmap.user",
+ [CEPH_SUB_MDSMAP] = "mdsmap",
};
/*
@@ -1193,6 +1194,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
+ case CEPH_MSG_FS_MAP_USER:
m = ceph_msg_new(type, front_len, GFP_NOFS, false);
if (!m)
return NULL; /* ENOMEM--return skip == 0 */
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index ddec1c10ac80..aaed59a47b1d 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
+#include <linux/ceph/messenger.h>
#include <linux/ceph/msgpool.h>
static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 89469592076c..b5ec09612ff7 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -387,7 +387,9 @@ static void target_copy(struct ceph_osd_request_target *dest,
static void target_destroy(struct ceph_osd_request_target *t)
{
ceph_oid_destroy(&t->base_oid);
+ ceph_oloc_destroy(&t->base_oloc);
ceph_oid_destroy(&t->target_oid);
+ ceph_oloc_destroy(&t->target_oloc);
}
/*
@@ -533,6 +535,11 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
}
EXPORT_SYMBOL(ceph_osdc_alloc_request);
+static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
+{
+ return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
+}
+
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
{
struct ceph_osd_client *osdc = req->r_osdc;
@@ -540,11 +547,13 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
int msg_size;
WARN_ON(ceph_oid_empty(&req->r_base_oid));
+ WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
/* create request message */
msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
- msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
+ msg_size += CEPH_ENCODING_START_BLK_LEN +
+ ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
msg_size += 1 + 8 + 4 + 4; /* pgid */
msg_size += 4 + req->r_base_oid.name_len; /* oid */
msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
@@ -932,7 +941,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
osd_req_op_init(req, which, opcode, 0);
} else {
- u32 object_size = le32_to_cpu(layout->fl_object_size);
+ u32 object_size = layout->object_size;
u32 object_base = off - objoff;
if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
if (truncate_size <= object_base) {
@@ -948,7 +957,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
}
req->r_flags = flags;
- req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
+ req->r_base_oloc.pool = layout->pool_id;
+ req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
req->r_snapid = vino.snap;
@@ -1489,12 +1499,16 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
p += sizeof(req->r_replay_version);
/* oloc */
- ceph_encode_8(&p, 4);
- ceph_encode_8(&p, 4);
- ceph_encode_32(&p, 8 + 4 + 4);
+ ceph_start_encoding(&p, 5, 4,
+ ceph_oloc_encoding_size(&req->r_t.target_oloc));
ceph_encode_64(&p, req->r_t.target_oloc.pool);
ceph_encode_32(&p, -1); /* preferred */
ceph_encode_32(&p, 0); /* key len */
+ if (req->r_t.target_oloc.pool_ns)
+ ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
+ req->r_t.target_oloc.pool_ns->len);
+ else
+ ceph_encode_32(&p, 0);
/* pgid */
ceph_encode_8(&p, 1);
@@ -2594,9 +2608,22 @@ static int ceph_oloc_decode(void **p, void *end,
}
if (struct_v >= 5) {
+ bool changed = false;
+
len = ceph_decode_32(p);
if (len > 0) {
- pr_warn("ceph_object_locator::nspace is set\n");
+ ceph_decode_need(p, end, len, e_inval);
+ if (!oloc->pool_ns ||
+ ceph_compare_string(oloc->pool_ns, *p, len))
+ changed = true;
+ *p += len;
+ } else {
+ if (oloc->pool_ns)
+ changed = true;
+ }
+ if (changed) {
+ /* redirect changes namespace */
+ pr_warn("ceph_object_locator::nspace is changed\n");
goto e_inval;
}
}
@@ -2806,7 +2833,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
goto out_unlock_session;
}
+ m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
ret = decode_MOSDOpReply(msg, &m);
+ m.redirect.oloc.pool_ns = NULL;
if (ret) {
pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
req->r_tid, ret);
@@ -2835,7 +2864,11 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
unlink_request(osd, req);
mutex_unlock(&osd->lock);
- ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
+ /*
+ * Not ceph_oloc_copy() - changing pool_ns is not
+ * supported.
+ */
+ req->r_t.target_oloc.pool = m.redirect.oloc.pool;
req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
req->r_tid = 0;
__submit_request(req, false);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 7e480bf75bcf..d2436880b305 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1510,6 +1510,24 @@ bad:
return ERR_PTR(err);
}
+void ceph_oloc_copy(struct ceph_object_locator *dest,
+ const struct ceph_object_locator *src)
+{
+ WARN_ON(!ceph_oloc_empty(dest));
+ WARN_ON(dest->pool_ns); /* empty() only covers ->pool */
+
+ dest->pool = src->pool;
+ if (src->pool_ns)
+ dest->pool_ns = ceph_get_string(src->pool_ns);
+}
+EXPORT_SYMBOL(ceph_oloc_copy);
+
+void ceph_oloc_destroy(struct ceph_object_locator *oloc)
+{
+ ceph_put_string(oloc->pool_ns);
+}
+EXPORT_SYMBOL(ceph_oloc_destroy);
+
void ceph_oid_copy(struct ceph_object_id *dest,
const struct ceph_object_id *src)
{
@@ -1770,9 +1788,9 @@ int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 *ono,
u64 *oxoff, u64 *oxlen)
{
- u32 osize = le32_to_cpu(layout->fl_object_size);
- u32 su = le32_to_cpu(layout->fl_stripe_unit);
- u32 sc = le32_to_cpu(layout->fl_stripe_count);
+ u32 osize = layout->object_size;
+ u32 su = layout->stripe_unit;
+ u32 sc = layout->stripe_count;
u32 bl, stripeno, stripepos, objsetno;
u32 su_per_object;
u64 t, su_offset;
@@ -1844,12 +1862,34 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
if (!pi)
return -ENOENT;
- raw_pgid->pool = oloc->pool;
- raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
- oid->name_len);
-
- dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
- raw_pgid->pool, raw_pgid->seed);
+ if (!oloc->pool_ns) {
+ raw_pgid->pool = oloc->pool;
+ raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
+ oid->name_len);
+ dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
+ raw_pgid->pool, raw_pgid->seed);
+ } else {
+ char stack_buf[256];
+ char *buf = stack_buf;
+ int nsl = oloc->pool_ns->len;
+ size_t total = nsl + 1 + oid->name_len;
+
+ if (total > sizeof(stack_buf)) {
+ buf = kmalloc(total, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+ }
+ memcpy(buf, oloc->pool_ns->str, nsl);
+ buf[nsl] = '\037';
+ memcpy(buf + nsl + 1, oid->name, oid->name_len);
+ raw_pgid->pool = oloc->pool;
+ raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
+ if (buf != stack_buf)
+ kfree(buf);
+ dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
+ oid->name, nsl, oloc->pool_ns->str,
+ raw_pgid->pool, raw_pgid->seed);
+ }
return 0;
}
EXPORT_SYMBOL(ceph_object_locator_to_pg);
diff --git a/net/ceph/string_table.c b/net/ceph/string_table.c
new file mode 100644
index 000000000000..ca53c8319209
--- /dev/null
+++ b/net/ceph/string_table.c
@@ -0,0 +1,111 @@
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/ceph/string_table.h>
+
+static DEFINE_SPINLOCK(string_tree_lock);
+static struct rb_root string_tree = RB_ROOT;
+
+struct ceph_string *ceph_find_or_create_string(const char* str, size_t len)
+{
+ struct ceph_string *cs, *exist;
+ struct rb_node **p, *parent;
+ int ret;
+
+ exist = NULL;
+ spin_lock(&string_tree_lock);
+ p = &string_tree.rb_node;
+ while (*p) {
+ exist = rb_entry(*p, struct ceph_string, node);
+ ret = ceph_compare_string(exist, str, len);
+ if (ret > 0)
+ p = &(*p)->rb_left;
+ else if (ret < 0)
+ p = &(*p)->rb_right;
+ else
+ break;
+ exist = NULL;
+ }
+ if (exist && !kref_get_unless_zero(&exist->kref)) {
+ rb_erase(&exist->node, &string_tree);
+ RB_CLEAR_NODE(&exist->node);
+ exist = NULL;
+ }
+ spin_unlock(&string_tree_lock);
+ if (exist)
+ return exist;
+
+ cs = kmalloc(sizeof(*cs) + len + 1, GFP_NOFS);
+ if (!cs)
+ return NULL;
+
+ kref_init(&cs->kref);
+ cs->len = len;
+ memcpy(cs->str, str, len);
+ cs->str[len] = 0;
+
+retry:
+ exist = NULL;
+ parent = NULL;
+ p = &string_tree.rb_node;
+ spin_lock(&string_tree_lock);
+ while (*p) {
+ parent = *p;
+ exist = rb_entry(*p, struct ceph_string, node);
+ ret = ceph_compare_string(exist, str, len);
+ if (ret > 0)
+ p = &(*p)->rb_left;
+ else if (ret < 0)
+ p = &(*p)->rb_right;
+ else
+ break;
+ exist = NULL;
+ }
+ ret = 0;
+ if (!exist) {
+ rb_link_node(&cs->node, parent, p);
+ rb_insert_color(&cs->node, &string_tree);
+ } else if (!kref_get_unless_zero(&exist->kref)) {
+ rb_erase(&exist->node, &string_tree);
+ RB_CLEAR_NODE(&exist->node);
+ ret = -EAGAIN;
+ }
+ spin_unlock(&string_tree_lock);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ if (exist) {
+ kfree(cs);
+ cs = exist;
+ }
+
+ return cs;
+}
+EXPORT_SYMBOL(ceph_find_or_create_string);
+
+static void ceph_free_string(struct rcu_head *head)
+{
+ struct ceph_string *cs = container_of(head, struct ceph_string, rcu);
+ kfree(cs);
+}
+
+void ceph_release_string(struct kref *ref)
+{
+ struct ceph_string *cs = container_of(ref, struct ceph_string, kref);
+
+ spin_lock(&string_tree_lock);
+ if (!RB_EMPTY_NODE(&cs->node)) {
+ rb_erase(&cs->node, &string_tree);
+ RB_CLEAR_NODE(&cs->node);
+ }
+ spin_unlock(&string_tree_lock);
+
+ call_rcu(&cs->rcu, ceph_free_string);
+}
+EXPORT_SYMBOL(ceph_release_string);
+
+bool ceph_strings_empty(void)
+{
+ return RB_EMPTY_ROOT(&string_tree);
+}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b26aa870adc0..bdaef7fd6e47 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -236,7 +236,8 @@ void tcp_select_initial_window(int __space, __u32 mss,
/* Set window scaling on max possible window
* See RFC1323 for an explanation of the limit to 14
*/
- space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
+ space = max_t(u32, space, sysctl_tcp_rmem[2]);
+ space = max_t(u32, space, sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
while (space > 65535 && (*rcv_wscale) < 14) {
space >>= 1;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 6287a8b9f428..ab3e796596b1 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3624,8 +3624,7 @@ restart:
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
- list_del(&ifa->if_list);
- list_add(&ifa->if_list, &del_list);
+ list_move(&ifa->if_list, &del_list);
}
spin_unlock_bh(&ifa->lock);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7425f6c23888..1f1682b9a6a8 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -610,7 +610,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
/* We will generate more packets, so re-queue
* auth chunk.
*/
- list_add(&chunk->list, &packet->chunk_list);
+ list_add(&packet->auth->list,
+ &packet->chunk_list);
} else {
sctp_chunk_free(packet->auth);
packet->auth = NULL;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 8812e1bf6c1c..9fc417a8b476 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2079,7 +2079,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lock_sock(sk);
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
- !sctp_sstate(sk, CLOSING)) {
+ !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
err = -ENOTCONN;
goto out;
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index ec166d2bd2d9..877e55066f89 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -204,7 +204,9 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
- if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (sk->sk_shutdown & RCV_SHUTDOWN &&
+ (sk->sk_shutdown & SEND_SHUTDOWN ||
+ !sctp_ulpevent_is_notification(event)))
goto out_free;
if (!sctp_ulpevent_is_notification(event)) {
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 040ff627c18a..a7e42f9a405c 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -51,9 +51,7 @@ static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
ret = kstrtoul(val, 0, &num);
if (ret == -EINVAL)
goto out_inval;
- nbits = fls(num);
- if (num > (1U << nbits))
- nbits++;
+ nbits = fls(num - 1);
if (nbits > MAX_HASHTABLE_BITS || nbits < 2)
goto out_inval;
*(unsigned int *)kp->arg = nbits;
@@ -359,8 +357,10 @@ rpcauth_key_timeout_notify(struct rpc_auth *auth, struct rpc_cred *cred)
EXPORT_SYMBOL_GPL(rpcauth_key_timeout_notify);
bool
-rpcauth_cred_key_to_expire(struct rpc_cred *cred)
+rpcauth_cred_key_to_expire(struct rpc_auth *auth, struct rpc_cred *cred)
{
+ if (auth->au_flags & RPCAUTH_AUTH_NO_CRKEY_TIMEOUT)
+ return false;
if (!cred->cr_ops->crkey_to_expire)
return false;
return cred->cr_ops->crkey_to_expire(cred);
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 54dd3fdead54..168219535a34 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -224,7 +224,7 @@ generic_key_timeout(struct rpc_auth *auth, struct rpc_cred *cred)
/* Fast track for non crkey_timeout (no key) underlying credentials */
- if (test_bit(RPC_CRED_NO_CRKEY_TIMEOUT, &acred->ac_flags))
+ if (auth->au_flags & RPCAUTH_AUTH_NO_CRKEY_TIMEOUT)
return 0;
/* Fast track for the normal case */
@@ -236,12 +236,6 @@ generic_key_timeout(struct rpc_auth *auth, struct rpc_cred *cred)
if (IS_ERR(tcred))
return -EACCES;
- if (!tcred->cr_ops->crkey_timeout) {
- set_bit(RPC_CRED_NO_CRKEY_TIMEOUT, &acred->ac_flags);
- ret = 0;
- goto out_put;
- }
-
/* Test for the almost error case */
ret = tcred->cr_ops->crkey_timeout(tcred);
if (ret != 0) {
@@ -257,7 +251,6 @@ generic_key_timeout(struct rpc_auth *auth, struct rpc_cred *cred)
set_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags);
}
-out_put:
put_rpccred(tcred);
return ret;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e64ae93d5b4f..23c8e7c39656 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1015,8 +1015,11 @@ gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
auth->au_rslack = GSS_VERF_SLACK >> 2;
+ auth->au_flags = 0;
auth->au_ops = &authgss_ops;
auth->au_flavor = flavor;
+ if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
+ auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
atomic_set(&auth->au_count, 1);
kref_init(&gss_auth->kref);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 65427492b1c9..60595835317a 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -745,12 +745,14 @@ static struct pf_desc gss_kerberos_pfs[] = {
.qop = GSS_C_QOP_DEFAULT,
.service = RPC_GSS_SVC_INTEGRITY,
.name = "krb5i",
+ .datatouch = true,
},
[2] = {
.pseudoflavor = RPC_AUTH_GSS_KRB5P,
.qop = GSS_C_QOP_DEFAULT,
.service = RPC_GSS_SVC_PRIVACY,
.name = "krb5p",
+ .datatouch = true,
},
};
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 7063d856a598..5fec3abbe19b 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -361,6 +361,18 @@ gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
}
EXPORT_SYMBOL(gss_pseudoflavor_to_service);
+bool
+gss_pseudoflavor_to_datatouch(struct gss_api_mech *gm, u32 pseudoflavor)
+{
+ int i;
+
+ for (i = 0; i < gm->gm_pf_num; i++) {
+ if (gm->gm_pfs[i].pseudoflavor == pseudoflavor)
+ return gm->gm_pfs[i].datatouch;
+ }
+ return false;
+}
+
char *
gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
{
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e085f5ae1548..1d281816f2bf 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1230,8 +1230,9 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
if (status)
goto out;
- dprintk("RPC: svcauth_gss: gss major status = %d\n",
- ud.major_status);
+ dprintk("RPC: svcauth_gss: gss major status = %d "
+ "minor status = %d\n",
+ ud.major_status, ud.minor_status);
switch (ud.major_status) {
case GSS_S_CONTINUE_NEEDED:
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 8d9eb4d5ddd8..4d17376b2acb 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -115,6 +115,7 @@ static
struct rpc_auth null_auth = {
.au_cslack = NUL_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
+ .au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authnull_ops,
.au_flavor = RPC_AUTH_NULL,
.au_count = ATOMIC_INIT(0),
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 9f65452b7cbc..a99278c984e8 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -228,6 +228,7 @@ static
struct rpc_auth unix_auth = {
.au_cslack = UNX_CALLSLACK,
.au_rslack = NUL_REPLYSLACK,
+ .au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
.au_count = ATOMIC_INIT(0),
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 553bf95f7003..4d8e11f94a35 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -362,7 +362,7 @@ void sunrpc_destroy_cache_detail(struct cache_detail *cd)
cache_purge(cd);
spin_lock(&cache_list_lock);
write_lock(&cd->hash_lock);
- if (cd->entries || atomic_read(&cd->inuse)) {
+ if (cd->entries) {
write_unlock(&cd->hash_lock);
spin_unlock(&cache_list_lock);
goto out;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2808d550d273..cb49898a5a58 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2577,7 +2577,7 @@ static void rpc_cb_add_xprt_release(void *calldata)
kfree(data);
}
-const static struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
+static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
.rpc_call_done = rpc_cb_add_xprt_done,
.rpc_release = rpc_cb_add_xprt_release,
};
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fcfd48d263f6..9ae588511aaf 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -54,7 +54,8 @@ static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
-struct workqueue_struct *rpciod_workqueue;
+struct workqueue_struct *rpciod_workqueue __read_mostly;
+struct workqueue_struct *xprtiod_workqueue __read_mostly;
/*
* Disable the timer for a given RPC task. Should be called with
@@ -329,7 +330,8 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
* lockless RPC_IS_QUEUED() test) before we've had a chance to test
* the RPC_TASK_RUNNING flag.
*/
-static void rpc_make_runnable(struct rpc_task *task)
+static void rpc_make_runnable(struct workqueue_struct *wq,
+ struct rpc_task *task)
{
bool need_wakeup = !rpc_test_and_set_running(task);
@@ -338,7 +340,7 @@ static void rpc_make_runnable(struct rpc_task *task)
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
- queue_work(rpciod_workqueue, &task->u.tk_work);
+ queue_work(wq, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
@@ -407,13 +409,16 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
/**
- * __rpc_do_wake_up_task - wake up a single rpc_task
+ * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
+ * @wq: workqueue on which to run task
* @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
-static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
+static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue,
+ struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
@@ -428,7 +433,7 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
__rpc_remove_wait_queue(queue, task);
- rpc_make_runnable(task);
+ rpc_make_runnable(wq, task);
dprintk("RPC: __rpc_wake_up_task done\n");
}
@@ -436,16 +441,25 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
/*
* Wake up a queued task while the queue lock is being held
*/
-static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (RPC_IS_QUEUED(task)) {
smp_rmb();
if (task->tk_waitqueue == queue)
- __rpc_do_wake_up_task(queue, task);
+ __rpc_do_wake_up_task_on_wq(wq, queue, task);
}
}
/*
+ * Wake up a queued task while the queue lock is being held
+ */
+static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+{
+ rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
+}
+
+/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
@@ -518,7 +532,8 @@ static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
/*
* Wake up the first task on the wait queue.
*/
-struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
+struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue,
bool (*func)(struct rpc_task *, void *), void *data)
{
struct rpc_task *task = NULL;
@@ -529,7 +544,7 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
task = __rpc_find_next_queued(queue);
if (task != NULL) {
if (func(task, data))
- rpc_wake_up_task_queue_locked(queue, task);
+ rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
else
task = NULL;
}
@@ -537,6 +552,15 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
return task;
}
+
+/*
+ * Wake up the first task on the wait queue.
+ */
+struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
+ bool (*func)(struct rpc_task *, void *), void *data)
+{
+ return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
+}
EXPORT_SYMBOL_GPL(rpc_wake_up_first);
static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
@@ -814,7 +838,7 @@ void rpc_execute(struct rpc_task *task)
bool is_async = RPC_IS_ASYNC(task);
rpc_set_active(task);
- rpc_make_runnable(task);
+ rpc_make_runnable(rpciod_workqueue, task);
if (!is_async)
__rpc_execute(task);
}
@@ -1071,10 +1095,22 @@ static int rpciod_start(void)
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- /* Note: highpri because network receive is latency sensitive */
- wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
+ if (!wq)
+ goto out_failed;
rpciod_workqueue = wq;
- return rpciod_workqueue != NULL;
+ /* Note: highpri because network receive is latency sensitive */
+ wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!wq)
+ goto free_rpciod;
+ xprtiod_workqueue = wq;
+ return 1;
+free_rpciod:
+ wq = rpciod_workqueue;
+ rpciod_workqueue = NULL;
+ destroy_workqueue(wq);
+out_failed:
+ return 0;
}
static void rpciod_stop(void)
@@ -1088,6 +1124,9 @@ static void rpciod_stop(void)
wq = rpciod_workqueue;
rpciod_workqueue = NULL;
destroy_workqueue(wq);
+ wq = xprtiod_workqueue;
+ xprtiod_workqueue = NULL;
+ destroy_workqueue(wq);
}
void
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index cc9852897395..c5b0cb4f4056 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1188,11 +1188,17 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
/* Encode reply */
- if (test_bit(RQ_DROPME, &rqstp->rq_flags)) {
+ if (*statp == rpc_drop_reply ||
+ test_bit(RQ_DROPME, &rqstp->rq_flags)) {
if (procp->pc_release)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
}
+ if (*statp == rpc_autherr_badcred) {
+ if (procp->pc_release)
+ procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+ goto err_bad_auth;
+ }
if (*statp == rpc_success &&
(xdr = procp->pc_encode) &&
!xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4f01f63102ee..c3f652395a80 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -21,6 +21,10 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
+static unsigned int svc_rpc_per_connection_limit __read_mostly;
+module_param(svc_rpc_per_connection_limit, uint, 0644);
+
+
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -329,12 +333,45 @@ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
}
EXPORT_SYMBOL_GPL(svc_print_addr);
+static bool svc_xprt_slots_in_range(struct svc_xprt *xprt)
+{
+ unsigned int limit = svc_rpc_per_connection_limit;
+ int nrqsts = atomic_read(&xprt->xpt_nr_rqsts);
+
+ return limit == 0 || (nrqsts >= 0 && nrqsts < limit);
+}
+
+static bool svc_xprt_reserve_slot(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+{
+ if (!test_bit(RQ_DATA, &rqstp->rq_flags)) {
+ if (!svc_xprt_slots_in_range(xprt))
+ return false;
+ atomic_inc(&xprt->xpt_nr_rqsts);
+ set_bit(RQ_DATA, &rqstp->rq_flags);
+ }
+ return true;
+}
+
+static void svc_xprt_release_slot(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) {
+ atomic_dec(&xprt->xpt_nr_rqsts);
+ svc_xprt_enqueue(xprt);
+ }
+}
+
static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
{
if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
return true;
- if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
- return xprt->xpt_ops->xpo_has_wspace(xprt);
+ if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) {
+ if (xprt->xpt_ops->xpo_has_wspace(xprt) &&
+ svc_xprt_slots_in_range(xprt))
+ return true;
+ trace_svc_xprt_no_write_space(xprt);
+ return false;
+ }
return false;
}
@@ -480,8 +517,6 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
rqstp->rq_reserved = space;
- if (xprt->xpt_ops->xpo_adjust_wspace)
- xprt->xpt_ops->xpo_adjust_wspace(xprt);
svc_xprt_enqueue(xprt);
}
}
@@ -512,8 +547,8 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
rqstp->rq_res.head[0].iov_len = 0;
svc_reserve(rqstp, 0);
+ svc_xprt_release_slot(rqstp);
rqstp->rq_xprt = NULL;
-
svc_xprt_put(xprt);
}
@@ -781,7 +816,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
svc_add_new_temp_xprt(serv, newxpt);
else
module_put(xprt->xpt_class->xcl_owner);
- } else {
+ } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
/* XPT_DATA|XPT_DEFERRED case: */
dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
rqstp, rqstp->rq_pool->sp_id, xprt,
@@ -871,6 +906,7 @@ EXPORT_SYMBOL_GPL(svc_recv);
*/
void svc_drop(struct svc_rqst *rqstp)
{
+ trace_svc_drop(rqstp);
dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
svc_xprt_release(rqstp);
}
@@ -1148,6 +1184,7 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
spin_unlock(&xprt->xpt_lock);
dprintk("revisit canceled\n");
svc_xprt_put(xprt);
+ trace_svc_drop_deferred(dr);
kfree(dr);
return;
}
@@ -1205,6 +1242,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit;
+ trace_svc_defer(rqstp);
return &dr->handle;
}
@@ -1245,6 +1283,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
struct svc_deferred_req,
handle.recent);
list_del_init(&dr->handle.recent);
+ trace_svc_revisit_deferred(dr);
} else
clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
spin_unlock(&xprt->xpt_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index dadfec66dbd8..57625f64efd5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -60,7 +60,6 @@
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
int flags);
-static void svc_udp_data_ready(struct sock *);
static int svc_udp_recvfrom(struct svc_rqst *);
static int svc_udp_sendto(struct svc_rqst *);
static void svc_sock_detach(struct svc_xprt *);
@@ -398,48 +397,21 @@ static int svc_sock_secure_port(struct svc_rqst *rqstp)
return svc_port_is_privileged(svc_addr(rqstp));
}
-static bool sunrpc_waitqueue_active(wait_queue_head_t *wq)
-{
- if (!wq)
- return false;
- /*
- * There should normally be a memory * barrier here--see
- * wq_has_sleeper().
- *
- * It appears that isn't currently necessary, though, basically
- * because callers all appear to have sufficient memory barriers
- * between the time the relevant change is made and the
- * time they call these callbacks.
- *
- * The nfsd code itself doesn't actually explicitly wait on
- * these waitqueues, but it may wait on them for example in
- * sendpage() or sendmsg() calls. (And those may be the only
- * places, since it it uses nonblocking reads.)
- *
- * Maybe we should add the memory barriers anyway, but these are
- * hot paths so we'd need to be convinced there's no sigificant
- * penalty.
- */
- return waitqueue_active(wq);
-}
-
/*
* INET callback when data has been received on the socket.
*/
-static void svc_udp_data_ready(struct sock *sk)
+static void svc_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- wait_queue_head_t *wq = sk_sleep(sk);
if (svsk) {
dprintk("svc: socket %p(inet %p), busy=%d\n",
svsk, sk,
test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- svc_xprt_enqueue(&svsk->sk_xprt);
+ svsk->sk_odata(sk);
+ if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
+ svc_xprt_enqueue(&svsk->sk_xprt);
}
- if (sunrpc_waitqueue_active(wq))
- wake_up_interruptible(wq);
}
/*
@@ -448,56 +420,22 @@ static void svc_udp_data_ready(struct sock *sk)
static void svc_write_space(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
- wait_queue_head_t *wq = sk_sleep(sk);
if (svsk) {
dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+ svsk->sk_owspace(sk);
svc_xprt_enqueue(&svsk->sk_xprt);
}
-
- if (sunrpc_waitqueue_active(wq)) {
- dprintk("RPC svc_write_space: someone sleeping on %p\n",
- svsk);
- wake_up_interruptible(wq);
- }
}
static int svc_tcp_has_wspace(struct svc_xprt *xprt)
{
- struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct svc_serv *serv = svsk->sk_xprt.xpt_server;
- int required;
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
return 1;
- required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
- if (sk_stream_wspace(svsk->sk_sk) >= required ||
- (sk_stream_min_wspace(svsk->sk_sk) == 0 &&
- atomic_read(&xprt->xpt_reserved) == 0))
- return 1;
- set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- return 0;
-}
-
-static void svc_tcp_write_space(struct sock *sk)
-{
- struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
- struct socket *sock = sk->sk_socket;
-
- if (!sk_stream_is_writeable(sk) || !sock)
- return;
- if (!svsk || svc_tcp_has_wspace(&svsk->sk_xprt))
- clear_bit(SOCK_NOSPACE, &sock->flags);
- svc_write_space(sk);
-}
-
-static void svc_tcp_adjust_wspace(struct svc_xprt *xprt)
-{
- struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
-
- if (svc_tcp_has_wspace(xprt))
- clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
+ return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
}
/*
@@ -746,7 +684,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class,
&svsk->sk_xprt, serv);
clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
- svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
+ svsk->sk_sk->sk_data_ready = svc_data_ready;
svsk->sk_sk->sk_write_space = svc_write_space;
/* initialise setting must have enough space to
@@ -786,11 +724,12 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
static void svc_tcp_listen_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- wait_queue_head_t *wq;
dprintk("svc: socket %p TCP (listen) state change %d\n",
sk, sk->sk_state);
+ if (svsk)
+ svsk->sk_odata(sk);
/*
* This callback may called twice when a new connection
* is established as a child socket inherits everything
@@ -808,10 +747,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
} else
printk("svc: socket %p: no user data\n", sk);
}
-
- wq = sk_sleep(sk);
- if (sunrpc_waitqueue_active(wq))
- wake_up_interruptible_all(wq);
}
/*
@@ -820,7 +755,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
static void svc_tcp_state_change(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- wait_queue_head_t *wq = sk_sleep(sk);
dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
sk, sk->sk_state, sk->sk_user_data);
@@ -828,26 +762,12 @@ static void svc_tcp_state_change(struct sock *sk)
if (!svsk)
printk("svc: socket %p: no user data\n", sk);
else {
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- svc_xprt_enqueue(&svsk->sk_xprt);
- }
- if (sunrpc_waitqueue_active(wq))
- wake_up_interruptible_all(wq);
-}
-
-static void svc_tcp_data_ready(struct sock *sk)
-{
- struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- wait_queue_head_t *wq = sk_sleep(sk);
-
- dprintk("svc: socket %p TCP data ready (svsk %p)\n",
- sk, sk->sk_user_data);
- if (svsk) {
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- svc_xprt_enqueue(&svsk->sk_xprt);
+ svsk->sk_ostate(sk);
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+ svc_xprt_enqueue(&svsk->sk_xprt);
+ }
}
- if (sunrpc_waitqueue_active(wq))
- wake_up_interruptible(wq);
}
/*
@@ -901,6 +821,11 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
dprintk("%s: connect from %s\n", serv->sv_name,
__svc_print_addr(sin, buf, sizeof(buf)));
+ /* Reset the inherited callbacks before calling svc_setup_socket */
+ newsock->sk->sk_state_change = svsk->sk_ostate;
+ newsock->sk->sk_data_ready = svsk->sk_odata;
+ newsock->sk->sk_write_space = svsk->sk_owspace;
+
/* make sure that a write doesn't block forever when
* low on memory
*/
@@ -1317,7 +1242,6 @@ static struct svc_xprt_ops svc_tcp_ops = {
.xpo_has_wspace = svc_tcp_has_wspace,
.xpo_accept = svc_tcp_accept,
.xpo_secure_port = svc_sock_secure_port,
- .xpo_adjust_wspace = svc_tcp_adjust_wspace,
};
static struct svc_xprt_class svc_tcp_class = {
@@ -1357,8 +1281,8 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
} else {
dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change;
- sk->sk_data_ready = svc_tcp_data_ready;
- sk->sk_write_space = svc_tcp_write_space;
+ sk->sk_data_ready = svc_data_ready;
+ sk->sk_write_space = svc_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1368,8 +1292,13 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- if (sk->sk_state != TCP_ESTABLISHED)
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+ case TCP_ESTABLISHED:
+ break;
+ default:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+ }
}
}
@@ -1428,17 +1357,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
/* Initialize the socket */
if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv);
- else {
- /* initialise setting must have enough space to
- * receive and respond to one request.
- */
- svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
- 4 * serv->sv_max_mesg);
+ else
svc_tcp_init(svsk, serv);
- }
- dprintk("svc: svc_setup_socket created %p (inet %p)\n",
- svsk, svsk->sk_sk);
+ dprintk("svc: svc_setup_socket created %p (inet %p), "
+ "listen %d close %d\n",
+ svsk, svsk->sk_sk,
+ test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags),
+ test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
return svsk;
}
@@ -1606,18 +1532,16 @@ static void svc_sock_detach(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
struct sock *sk = svsk->sk_sk;
- wait_queue_head_t *wq;
dprintk("svc: svc_sock_detach(%p)\n", svsk);
/* put back the old socket callbacks */
+ lock_sock(sk);
sk->sk_state_change = svsk->sk_ostate;
sk->sk_data_ready = svsk->sk_odata;
sk->sk_write_space = svsk->sk_owspace;
-
- wq = sk_sleep(sk);
- if (sunrpc_waitqueue_active(wq))
- wake_up_interruptible(wq);
+ sk->sk_user_data = NULL;
+ release_sock(sk);
}
/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 216a1385718a..8313960cac52 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -220,7 +220,7 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
clear_bit(XPRT_LOCKED, &xprt->state);
smp_mb__after_atomic();
} else
- queue_work(rpciod_workqueue, &xprt->task_cleanup);
+ queue_work(xprtiod_workqueue, &xprt->task_cleanup);
}
/*
@@ -295,7 +295,8 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
- if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
+ if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
+ __xprt_lock_write_func, xprt))
return;
xprt_clear_locked(xprt);
}
@@ -324,7 +325,8 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
return;
if (RPCXPRT_CONGESTED(xprt))
goto out_unlock;
- if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
+ if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
+ __xprt_lock_write_cong_func, xprt))
return;
out_unlock:
xprt_clear_locked(xprt);
@@ -645,7 +647,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
- queue_work(rpciod_workqueue, &xprt->task_cleanup);
+ queue_work(xprtiod_workqueue, &xprt->task_cleanup);
xprt_wake_pending_tasks(xprt, -EAGAIN);
spin_unlock_bh(&xprt->transport_lock);
}
@@ -672,7 +674,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
- queue_work(rpciod_workqueue, &xprt->task_cleanup);
+ queue_work(xprtiod_workqueue, &xprt->task_cleanup);
xprt_wake_pending_tasks(xprt, -EAGAIN);
out:
spin_unlock_bh(&xprt->transport_lock);
@@ -689,7 +691,7 @@ xprt_init_autodisconnect(unsigned long data)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
goto out_abort;
spin_unlock(&xprt->transport_lock);
- queue_work(rpciod_workqueue, &xprt->task_cleanup);
+ queue_work(xprtiod_workqueue, &xprt->task_cleanup);
return;
out_abort:
spin_unlock(&xprt->transport_lock);
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index e7fd76975d86..66c9d63f4797 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -271,14 +271,12 @@ struct rpc_xprt *xprt_iter_next_entry_multiple(struct rpc_xprt_iter *xpi,
xprt_switch_find_xprt_t find_next)
{
struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
- struct list_head *head;
if (xps == NULL)
return NULL;
- head = &xps->xps_xprt_list;
- if (xps->xps_nxprts < 2)
- return xprt_switch_find_first_entry(head);
- return xprt_switch_set_next_cursor(head, &xpi->xpi_cursor, find_next);
+ return xprt_switch_set_next_cursor(&xps->xps_xprt_list,
+ &xpi->xpi_cursor,
+ find_next);
}
static
diff --git a/net/sunrpc/xprtrdma/Makefile b/net/sunrpc/xprtrdma/Makefile
index dc9f3b513a05..ef19fa42c50f 100644
--- a/net/sunrpc/xprtrdma/Makefile
+++ b/net/sunrpc/xprtrdma/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += rpcrdma.o
rpcrdma-y := transport.o rpc_rdma.o verbs.o \
- fmr_ops.o frwr_ops.o physical_ops.o \
+ fmr_ops.o frwr_ops.o \
svc_rdma.o svc_rdma_backchannel.o svc_rdma_transport.o \
svc_rdma_marshal.o svc_rdma_sendto.o svc_rdma_recvfrom.o \
module.o
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 6326ebe8b595..21cb3b150b37 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -19,13 +19,6 @@
* verb (fmr_op_unmap).
*/
-/* Transport recovery
- *
- * After a transport reconnect, fmr_op_map re-uses the MR already
- * allocated for the RPC, but generates a fresh rkey then maps the
- * MR again. This process is synchronous.
- */
-
#include "xprt_rdma.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -35,62 +28,132 @@
/* Maximum scatter/gather per FMR */
#define RPCRDMA_MAX_FMR_SGES (64)
-static struct workqueue_struct *fmr_recovery_wq;
-
-#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
+/* Access mode of externally registered pages */
+enum {
+ RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ,
+};
-int
-fmr_alloc_recovery_wq(void)
+bool
+fmr_is_supported(struct rpcrdma_ia *ia)
{
- fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
- return !fmr_recovery_wq ? -ENOMEM : 0;
+ if (!ia->ri_device->alloc_fmr) {
+ pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
+ ia->ri_device->name);
+ return false;
+ }
+ return true;
}
-void
-fmr_destroy_recovery_wq(void)
+static int
+fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
{
- struct workqueue_struct *wq;
+ static struct ib_fmr_attr fmr_attr = {
+ .max_pages = RPCRDMA_MAX_FMR_SGES,
+ .max_maps = 1,
+ .page_shift = PAGE_SHIFT
+ };
- if (!fmr_recovery_wq)
- return;
+ mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(u64), GFP_KERNEL);
+ if (!mw->fmr.fm_physaddrs)
+ goto out_free;
- wq = fmr_recovery_wq;
- fmr_recovery_wq = NULL;
- destroy_workqueue(wq);
+ mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(*mw->mw_sg), GFP_KERNEL);
+ if (!mw->mw_sg)
+ goto out_free;
+
+ sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
+
+ mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
+ &fmr_attr);
+ if (IS_ERR(mw->fmr.fm_mr))
+ goto out_fmr_err;
+
+ return 0;
+
+out_fmr_err:
+ dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
+ PTR_ERR(mw->fmr.fm_mr));
+
+out_free:
+ kfree(mw->mw_sg);
+ kfree(mw->fmr.fm_physaddrs);
+ return -ENOMEM;
}
static int
__fmr_unmap(struct rpcrdma_mw *mw)
{
LIST_HEAD(l);
+ int rc;
- list_add(&mw->fmr.fmr->list, &l);
- return ib_unmap_fmr(&l);
+ list_add(&mw->fmr.fm_mr->list, &l);
+ rc = ib_unmap_fmr(&l);
+ list_del_init(&mw->fmr.fm_mr->list);
+ return rc;
}
-/* Deferred reset of a single FMR. Generate a fresh rkey by
- * replacing the MR. There's no recovery if this fails.
- */
static void
-__fmr_recovery_worker(struct work_struct *work)
+fmr_op_release_mr(struct rpcrdma_mw *r)
{
- struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
- mw_work);
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ LIST_HEAD(unmap_list);
+ int rc;
- __fmr_unmap(mw);
- rpcrdma_put_mw(r_xprt, mw);
- return;
+ /* Ensure MW is not on any rl_registered list */
+ if (!list_empty(&r->mw_list))
+ list_del(&r->mw_list);
+
+ kfree(r->fmr.fm_physaddrs);
+ kfree(r->mw_sg);
+
+ /* In case this one was left mapped, try to unmap it
+ * to prevent dealloc_fmr from failing with EBUSY
+ */
+ rc = __fmr_unmap(r);
+ if (rc)
+ pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
+ r, rc);
+
+ rc = ib_dealloc_fmr(r->fmr.fm_mr);
+ if (rc)
+ pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
+ r, rc);
+
+ kfree(r);
}
-/* A broken MR was discovered in a context that can't sleep.
- * Defer recovery to the recovery worker.
+/* Reset of a single FMR.
*/
static void
-__fmr_queue_recovery(struct rpcrdma_mw *mw)
+fmr_op_recover_mr(struct rpcrdma_mw *mw)
{
- INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
- queue_work(fmr_recovery_wq, &mw->mw_work);
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ int rc;
+
+ /* ORDER: invalidate first */
+ rc = __fmr_unmap(mw);
+
+ /* ORDER: then DMA unmap */
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (rc)
+ goto out_release;
+
+ rpcrdma_put_mw(r_xprt, mw);
+ r_xprt->rx_stats.mrs_recovered++;
+ return;
+
+out_release:
+ pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
+ r_xprt->rx_stats.mrs_orphaned++;
+
+ spin_lock(&r_xprt->rx_buf.rb_mwlock);
+ list_del(&mw->mw_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+
+ fmr_op_release_mr(mw);
}
static int
@@ -112,86 +175,21 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
}
-static int
-fmr_op_init(struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- int mr_access_flags = IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = RPCRDMA_MAX_FMR_SGES,
- .max_maps = 1,
- .page_shift = PAGE_SHIFT
- };
- struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
- struct rpcrdma_mw *r;
- int i, rc;
-
- spin_lock_init(&buf->rb_mwlock);
- INIT_LIST_HEAD(&buf->rb_mws);
- INIT_LIST_HEAD(&buf->rb_all);
-
- i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1);
- i += 2; /* head + tail */
- i *= buf->rb_max_requests; /* one set for each RPC slot */
- dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i);
-
- rc = -ENOMEM;
- while (i--) {
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- goto out;
-
- r->fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
- sizeof(u64), GFP_KERNEL);
- if (!r->fmr.physaddrs)
- goto out_free;
-
- r->fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
- if (IS_ERR(r->fmr.fmr))
- goto out_fmr_err;
-
- r->mw_xprt = r_xprt;
- list_add(&r->mw_list, &buf->rb_mws);
- list_add(&r->mw_all, &buf->rb_all);
- }
- return 0;
-
-out_fmr_err:
- rc = PTR_ERR(r->fmr.fmr);
- dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
- kfree(r->fmr.physaddrs);
-out_free:
- kfree(r);
-out:
- return rc;
-}
-
/* Use the ib_map_phys_fmr() verb to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
static int
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing)
+ int nsegs, bool writing, struct rpcrdma_mw **out)
{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct ib_device *device = ia->ri_device;
- enum dma_data_direction direction = rpcrdma_data_dir(writing);
struct rpcrdma_mr_seg *seg1 = seg;
int len, pageoff, i, rc;
struct rpcrdma_mw *mw;
+ u64 *dma_pages;
- mw = seg1->rl_mw;
- seg1->rl_mw = NULL;
- if (!mw) {
- mw = rpcrdma_get_mw(r_xprt);
- if (!mw)
- return -ENOMEM;
- } else {
- /* this is a retransmit; generate a fresh rkey */
- rc = __fmr_unmap(mw);
- if (rc)
- return rc;
- }
+ mw = rpcrdma_get_mw(r_xprt);
+ if (!mw)
+ return -ENOBUFS;
pageoff = offset_in_page(seg1->mr_offset);
seg1->mr_offset -= pageoff; /* start of page */
@@ -200,8 +198,14 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (nsegs > RPCRDMA_MAX_FMR_SGES)
nsegs = RPCRDMA_MAX_FMR_SGES;
for (i = 0; i < nsegs;) {
- rpcrdma_map_one(device, seg, direction);
- mw->fmr.physaddrs[i] = seg->mr_dma;
+ if (seg->mr_page)
+ sg_set_page(&mw->mw_sg[i],
+ seg->mr_page,
+ seg->mr_len,
+ offset_in_page(seg->mr_offset));
+ else
+ sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+ seg->mr_len);
len += seg->mr_len;
++seg;
++i;
@@ -210,49 +214,54 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
-
- rc = ib_map_phys_fmr(mw->fmr.fmr, mw->fmr.physaddrs,
- i, seg1->mr_dma);
+ mw->mw_nents = i;
+ mw->mw_dir = rpcrdma_data_dir(writing);
+ if (i == 0)
+ goto out_dmamap_err;
+
+ if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir))
+ goto out_dmamap_err;
+
+ for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
+ dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
+ rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
+ dma_pages[0]);
if (rc)
goto out_maperr;
- seg1->rl_mw = mw;
- seg1->mr_rkey = mw->fmr.fmr->rkey;
- seg1->mr_base = seg1->mr_dma + pageoff;
- seg1->mr_nsegs = i;
- seg1->mr_len = len;
- return i;
+ mw->mw_handle = mw->fmr.fm_mr->rkey;
+ mw->mw_length = len;
+ mw->mw_offset = dma_pages[0] + pageoff;
-out_maperr:
- dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
- __func__, len, (unsigned long long)seg1->mr_dma,
- pageoff, i, rc);
- while (i--)
- rpcrdma_unmap_one(device, --seg);
- return rc;
-}
+ *out = mw;
+ return mw->mw_nents;
-static void
-__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
-{
- struct ib_device *device = r_xprt->rx_ia.ri_device;
- int nsegs = seg->mr_nsegs;
+out_dmamap_err:
+ pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
+ mw->mw_sg, mw->mw_nents);
+ rpcrdma_defer_mr_recovery(mw);
+ return -EIO;
- while (nsegs--)
- rpcrdma_unmap_one(device, seg++);
+out_maperr:
+ pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
+ len, (unsigned long long)dma_pages[0],
+ pageoff, mw->mw_nents, rc);
+ rpcrdma_defer_mr_recovery(mw);
+ return -EIO;
}
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
+ *
+ * Caller ensures that req->rl_registered is not empty.
*/
static void
fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
- struct rpcrdma_mr_seg *seg;
- unsigned int i, nchunks;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mw *mw, *tmp;
LIST_HEAD(unmap_list);
int rc;
@@ -261,90 +270,54 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* ORDER: Invalidate all of the req's MRs first
*
* ib_unmap_fmr() is slow, so use a single call instead
- * of one call per mapped MR.
+ * of one call per mapped FMR.
*/
- for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
- seg = &req->rl_segments[i];
- mw = seg->rl_mw;
-
- list_add(&mw->fmr.fmr->list, &unmap_list);
-
- i += seg->mr_nsegs;
- }
+ list_for_each_entry(mw, &req->rl_registered, mw_list)
+ list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
rc = ib_unmap_fmr(&unmap_list);
if (rc)
- pr_warn("%s: ib_unmap_fmr failed (%i)\n", __func__, rc);
+ goto out_reset;
/* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list.
*/
- for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
- seg = &req->rl_segments[i];
+ list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+ list_del_init(&mw->mw_list);
+ list_del_init(&mw->fmr.fm_mr->list);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ rpcrdma_put_mw(r_xprt, mw);
+ }
- __fmr_dma_unmap(r_xprt, seg);
- rpcrdma_put_mw(r_xprt, seg->rl_mw);
+ return;
- i += seg->mr_nsegs;
- seg->mr_nsegs = 0;
- seg->rl_mw = NULL;
- }
+out_reset:
+ pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
- req->rl_nchunks = 0;
+ list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+ list_del_init(&mw->fmr.fm_mr->list);
+ fmr_op_recover_mr(mw);
+ }
}
/* Use a slow, safe mechanism to invalidate all memory regions
* that were registered for "req".
- *
- * In the asynchronous case, DMA unmapping occurs first here
- * because the rpcrdma_mr_seg is released immediately after this
- * call. It's contents won't be available in __fmr_dma_unmap later.
- * FIXME.
*/
static void
fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
bool sync)
{
- struct rpcrdma_mr_seg *seg;
struct rpcrdma_mw *mw;
- unsigned int i;
-
- for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
- seg = &req->rl_segments[i];
- mw = seg->rl_mw;
-
- if (sync) {
- /* ORDER */
- __fmr_unmap(mw);
- __fmr_dma_unmap(r_xprt, seg);
- rpcrdma_put_mw(r_xprt, mw);
- } else {
- __fmr_dma_unmap(r_xprt, seg);
- __fmr_queue_recovery(mw);
- }
-
- i += seg->mr_nsegs;
- seg->mr_nsegs = 0;
- seg->rl_mw = NULL;
- }
-}
-
-static void
-fmr_op_destroy(struct rpcrdma_buffer *buf)
-{
- struct rpcrdma_mw *r;
- int rc;
-
- while (!list_empty(&buf->rb_all)) {
- r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&r->mw_all);
- kfree(r->fmr.physaddrs);
- rc = ib_dealloc_fmr(r->fmr.fmr);
- if (rc)
- dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
- __func__, rc);
+ while (!list_empty(&req->rl_registered)) {
+ mw = list_first_entry(&req->rl_registered,
+ struct rpcrdma_mw, mw_list);
+ list_del_init(&mw->mw_list);
- kfree(r);
+ if (sync)
+ fmr_op_recover_mr(mw);
+ else
+ rpcrdma_defer_mr_recovery(mw);
}
}
@@ -352,9 +325,10 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
.ro_unmap_sync = fmr_op_unmap_sync,
.ro_unmap_safe = fmr_op_unmap_safe,
+ .ro_recover_mr = fmr_op_recover_mr,
.ro_open = fmr_op_open,
.ro_maxpages = fmr_op_maxpages,
- .ro_init = fmr_op_init,
- .ro_destroy = fmr_op_destroy,
+ .ro_init_mr = fmr_op_init_mr,
+ .ro_release_mr = fmr_op_release_mr,
.ro_displayname = "fmr",
};
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index c0947544babe..892b5e1d9b09 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -73,29 +73,71 @@
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
-static struct workqueue_struct *frwr_recovery_wq;
-
-#define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM)
+bool
+frwr_is_supported(struct rpcrdma_ia *ia)
+{
+ struct ib_device_attr *attrs = &ia->ri_device->attrs;
+
+ if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+ goto out_not_supported;
+ if (attrs->max_fast_reg_page_list_len == 0)
+ goto out_not_supported;
+ return true;
+
+out_not_supported:
+ pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
+ ia->ri_device->name);
+ return false;
+}
-int
-frwr_alloc_recovery_wq(void)
+static int
+frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
{
- frwr_recovery_wq = alloc_workqueue("frwr_recovery",
- FRWR_RECOVERY_WQ_FLAGS, 0);
- return !frwr_recovery_wq ? -ENOMEM : 0;
+ unsigned int depth = ia->ri_max_frmr_depth;
+ struct rpcrdma_frmr *f = &r->frmr;
+ int rc;
+
+ f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
+ if (IS_ERR(f->fr_mr))
+ goto out_mr_err;
+
+ r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
+ if (!r->mw_sg)
+ goto out_list_err;
+
+ sg_init_table(r->mw_sg, depth);
+ init_completion(&f->fr_linv_done);
+ return 0;
+
+out_mr_err:
+ rc = PTR_ERR(f->fr_mr);
+ dprintk("RPC: %s: ib_alloc_mr status %i\n",
+ __func__, rc);
+ return rc;
+
+out_list_err:
+ rc = -ENOMEM;
+ dprintk("RPC: %s: sg allocation failure\n",
+ __func__);
+ ib_dereg_mr(f->fr_mr);
+ return rc;
}
-void
-frwr_destroy_recovery_wq(void)
+static void
+frwr_op_release_mr(struct rpcrdma_mw *r)
{
- struct workqueue_struct *wq;
+ int rc;
- if (!frwr_recovery_wq)
- return;
+ /* Ensure MW is not on any rl_registered list */
+ if (!list_empty(&r->mw_list))
+ list_del(&r->mw_list);
- wq = frwr_recovery_wq;
- frwr_recovery_wq = NULL;
- destroy_workqueue(wq);
+ rc = ib_dereg_mr(r->frmr.fr_mr);
+ if (rc)
+ pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
+ r, rc);
+ kfree(r->mw_sg);
+ kfree(r);
}
static int
@@ -124,93 +166,37 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
return 0;
}
-static void
-__frwr_reset_and_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_frmr *f = &mw->frmr;
- int rc;
-
- rc = __frwr_reset_mr(ia, mw);
- ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents, f->fr_dir);
- if (rc)
- return;
-
- rpcrdma_put_mw(r_xprt, mw);
-}
-
-/* Deferred reset of a single FRMR. Generate a fresh rkey by
- * replacing the MR.
+/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
*
* There's no recovery if this fails. The FRMR is abandoned, but
* remains in rb_all. It will be cleaned up when the transport is
* destroyed.
*/
static void
-__frwr_recovery_worker(struct work_struct *work)
-{
- struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
- mw_work);
-
- __frwr_reset_and_unmap(r->mw_xprt, r);
- return;
-}
-
-/* A broken MR was discovered in a context that can't sleep.
- * Defer recovery to the recovery worker.
- */
-static void
-__frwr_queue_recovery(struct rpcrdma_mw *r)
-{
- INIT_WORK(&r->mw_work, __frwr_recovery_worker);
- queue_work(frwr_recovery_wq, &r->mw_work);
-}
-
-static int
-__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
- unsigned int depth)
+frwr_op_recover_mr(struct rpcrdma_mw *mw)
{
- struct rpcrdma_frmr *f = &r->frmr;
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc;
- f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
- if (IS_ERR(f->fr_mr))
- goto out_mr_err;
-
- f->fr_sg = kcalloc(depth, sizeof(*f->fr_sg), GFP_KERNEL);
- if (!f->fr_sg)
- goto out_list_err;
-
- sg_init_table(f->fr_sg, depth);
-
- init_completion(&f->fr_linv_done);
-
- return 0;
+ rc = __frwr_reset_mr(ia, mw);
+ ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (rc)
+ goto out_release;
-out_mr_err:
- rc = PTR_ERR(f->fr_mr);
- dprintk("RPC: %s: ib_alloc_mr status %i\n",
- __func__, rc);
- return rc;
+ rpcrdma_put_mw(r_xprt, mw);
+ r_xprt->rx_stats.mrs_recovered++;
+ return;
-out_list_err:
- rc = -ENOMEM;
- dprintk("RPC: %s: sg allocation failure\n",
- __func__);
- ib_dereg_mr(f->fr_mr);
- return rc;
-}
+out_release:
+ pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
+ r_xprt->rx_stats.mrs_orphaned++;
-static void
-__frwr_release(struct rpcrdma_mw *r)
-{
- int rc;
+ spin_lock(&r_xprt->rx_buf.rb_mwlock);
+ list_del(&mw->mw_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mwlock);
- rc = ib_dereg_mr(r->frmr.fr_mr);
- if (rc)
- dprintk("RPC: %s: ib_dereg_mr status %i\n",
- __func__, rc);
- kfree(r->frmr.fr_sg);
+ frwr_op_release_mr(mw);
}
static int
@@ -346,57 +332,14 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
complete_all(&frmr->fr_linv_done);
}
-static int
-frwr_op_init(struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct ib_device *device = r_xprt->rx_ia.ri_device;
- unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
- struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
- int i;
-
- spin_lock_init(&buf->rb_mwlock);
- INIT_LIST_HEAD(&buf->rb_mws);
- INIT_LIST_HEAD(&buf->rb_all);
-
- i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
- i += 2; /* head + tail */
- i *= buf->rb_max_requests; /* one set for each RPC slot */
- dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
-
- while (i--) {
- struct rpcrdma_mw *r;
- int rc;
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r)
- return -ENOMEM;
-
- rc = __frwr_init(r, pd, device, depth);
- if (rc) {
- kfree(r);
- return rc;
- }
-
- r->mw_xprt = r_xprt;
- list_add(&r->mw_list, &buf->rb_mws);
- list_add(&r->mw_all, &buf->rb_all);
- }
-
- return 0;
-}
-
-/* Post a FAST_REG Work Request to register a memory region
+/* Post a REG_MR Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
static int
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing)
+ int nsegs, bool writing, struct rpcrdma_mw **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct ib_device *device = ia->ri_device;
- enum dma_data_direction direction = rpcrdma_data_dir(writing);
- struct rpcrdma_mr_seg *seg1 = seg;
struct rpcrdma_mw *mw;
struct rpcrdma_frmr *frmr;
struct ib_mr *mr;
@@ -405,14 +348,13 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
int rc, i, n, dma_nents;
u8 key;
- mw = seg1->rl_mw;
- seg1->rl_mw = NULL;
+ mw = NULL;
do {
if (mw)
- __frwr_queue_recovery(mw);
+ rpcrdma_defer_mr_recovery(mw);
mw = rpcrdma_get_mw(r_xprt);
if (!mw)
- return -ENOMEM;
+ return -ENOBUFS;
} while (mw->frmr.fr_state != FRMR_IS_INVALID);
frmr = &mw->frmr;
frmr->fr_state = FRMR_IS_VALID;
@@ -421,15 +363,14 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (nsegs > ia->ri_max_frmr_depth)
nsegs = ia->ri_max_frmr_depth;
-
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&frmr->fr_sg[i],
+ sg_set_page(&mw->mw_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&frmr->fr_sg[i], seg->mr_offset,
+ sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
seg->mr_len);
++seg;
@@ -440,26 +381,22 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- frmr->fr_nents = i;
- frmr->fr_dir = direction;
-
- dma_nents = ib_dma_map_sg(device, frmr->fr_sg, frmr->fr_nents, direction);
- if (!dma_nents) {
- pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n",
- __func__, frmr->fr_sg, frmr->fr_nents);
- return -ENOMEM;
- }
+ mw->mw_nents = i;
+ mw->mw_dir = rpcrdma_data_dir(writing);
+ if (i == 0)
+ goto out_dmamap_err;
- n = ib_map_mr_sg(mr, frmr->fr_sg, frmr->fr_nents, NULL, PAGE_SIZE);
- if (unlikely(n != frmr->fr_nents)) {
- pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
- __func__, frmr->fr_mr, n, frmr->fr_nents);
- rc = n < 0 ? n : -EINVAL;
- goto out_senderr;
- }
+ dma_nents = ib_dma_map_sg(ia->ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ if (!dma_nents)
+ goto out_dmamap_err;
+
+ n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
+ if (unlikely(n != mw->mw_nents))
+ goto out_mapmr_err;
dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
- __func__, mw, frmr->fr_nents, mr->length);
+ __func__, mw, mw->mw_nents, mr->length);
key = (u8)(mr->rkey & 0x000000FF);
ib_update_fast_reg_key(mr, ++key);
@@ -481,24 +418,34 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (rc)
goto out_senderr;
- seg1->rl_mw = mw;
- seg1->mr_rkey = mr->rkey;
- seg1->mr_base = mr->iova;
- seg1->mr_nsegs = frmr->fr_nents;
- seg1->mr_len = mr->length;
+ mw->mw_handle = mr->rkey;
+ mw->mw_length = mr->length;
+ mw->mw_offset = mr->iova;
+
+ *out = mw;
+ return mw->mw_nents;
- return frmr->fr_nents;
+out_dmamap_err:
+ pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
+ mw->mw_sg, mw->mw_nents);
+ rpcrdma_defer_mr_recovery(mw);
+ return -EIO;
+
+out_mapmr_err:
+ pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
+ frmr->fr_mr, n, mw->mw_nents);
+ rpcrdma_defer_mr_recovery(mw);
+ return -EIO;
out_senderr:
- dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
- __frwr_queue_recovery(mw);
- return rc;
+ pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
+ rpcrdma_defer_mr_recovery(mw);
+ return -ENOTCONN;
}
static struct ib_send_wr *
-__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
+__frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
{
- struct rpcrdma_mw *mw = seg->rl_mw;
struct rpcrdma_frmr *f = &mw->frmr;
struct ib_send_wr *invalidate_wr;
@@ -518,16 +465,16 @@ __frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
*
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
+ *
+ * Caller ensures that req->rl_registered is not empty.
*/
static void
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_mr_seg *seg;
- unsigned int i, nchunks;
+ struct rpcrdma_mw *mw, *tmp;
struct rpcrdma_frmr *f;
- struct rpcrdma_mw *mw;
int rc;
dprintk("RPC: %s: req %p\n", __func__, req);
@@ -537,22 +484,18 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* Chain the LOCAL_INV Work Requests and post them with
* a single ib_post_send() call.
*/
+ f = NULL;
invalidate_wrs = pos = prev = NULL;
- seg = NULL;
- for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
- seg = &req->rl_segments[i];
-
- pos = __frwr_prepare_linv_wr(seg);
+ list_for_each_entry(mw, &req->rl_registered, mw_list) {
+ pos = __frwr_prepare_linv_wr(mw);
if (!invalidate_wrs)
invalidate_wrs = pos;
else
prev->next = pos;
prev = pos;
-
- i += seg->mr_nsegs;
+ f = &mw->frmr;
}
- f = &seg->rl_mw->frmr;
/* Strong send queue ordering guarantees that when the
* last WR in the chain completes, all WRs in the chain
@@ -577,39 +520,27 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* them to the free MW list.
*/
unmap:
- for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
- seg = &req->rl_segments[i];
- mw = seg->rl_mw;
- seg->rl_mw = NULL;
-
- ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents,
- f->fr_dir);
+ list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
+ list_del_init(&mw->mw_list);
+ ib_dma_unmap_sg(ia->ri_device,
+ mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw);
-
- i += seg->mr_nsegs;
- seg->mr_nsegs = 0;
}
-
- req->rl_nchunks = 0;
return;
reset_mrs:
- pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
+ pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
+ rdma_disconnect(ia->ri_id);
/* Find and reset the MRs in the LOCAL_INV WRs that did not
* get posted. This is synchronous, and slow.
*/
- for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
- seg = &req->rl_segments[i];
- mw = seg->rl_mw;
+ list_for_each_entry(mw, &req->rl_registered, mw_list) {
f = &mw->frmr;
-
if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
__frwr_reset_mr(ia, mw);
bad_wr = bad_wr->next;
}
-
- i += seg->mr_nsegs;
}
goto unmap;
}
@@ -621,38 +552,17 @@ static void
frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
bool sync)
{
- struct rpcrdma_mr_seg *seg;
struct rpcrdma_mw *mw;
- unsigned int i;
- for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
- seg = &req->rl_segments[i];
- mw = seg->rl_mw;
+ while (!list_empty(&req->rl_registered)) {
+ mw = list_first_entry(&req->rl_registered,
+ struct rpcrdma_mw, mw_list);
+ list_del_init(&mw->mw_list);
if (sync)
- __frwr_reset_and_unmap(r_xprt, mw);
+ frwr_op_recover_mr(mw);
else
- __frwr_queue_recovery(mw);
-
- i += seg->mr_nsegs;
- seg->mr_nsegs = 0;
- seg->rl_mw = NULL;
- }
-}
-
-static void
-frwr_op_destroy(struct rpcrdma_buffer *buf)
-{
- struct rpcrdma_mw *r;
-
- /* Ensure stale MWs for "buf" are no longer in flight */
- flush_workqueue(frwr_recovery_wq);
-
- while (!list_empty(&buf->rb_all)) {
- r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&r->mw_all);
- __frwr_release(r);
- kfree(r);
+ rpcrdma_defer_mr_recovery(mw);
}
}
@@ -660,9 +570,10 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_unmap_safe = frwr_op_unmap_safe,
+ .ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,
.ro_maxpages = frwr_op_maxpages,
- .ro_init = frwr_op_init,
- .ro_destroy = frwr_op_destroy,
+ .ro_init_mr = frwr_op_init_mr,
+ .ro_release_mr = frwr_op_release_mr,
.ro_displayname = "frwr",
};
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
deleted file mode 100644
index 3750596cc432..000000000000
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2015 Oracle. All rights reserved.
- * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
- */
-
-/* No-op chunk preparation. All client memory is pre-registered.
- * Sometimes referred to as ALLPHYSICAL mode.
- *
- * Physical registration is simple because all client memory is
- * pre-registered and never deregistered. This mode is good for
- * adapter bring up, but is considered not safe: the server is
- * trusted not to abuse its access to client memory not involved
- * in RDMA I/O.
- */
-
-#include "xprt_rdma.h"
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_TRANS
-#endif
-
-static int
-physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
- struct rpcrdma_create_data_internal *cdata)
-{
- struct ib_mr *mr;
-
- /* Obtain an rkey to use for RPC data payloads.
- */
- mr = ib_get_dma_mr(ia->ri_pd,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- if (IS_ERR(mr)) {
- pr_err("%s: ib_get_dma_mr for failed with %lX\n",
- __func__, PTR_ERR(mr));
- return -ENOMEM;
- }
- ia->ri_dma_mr = mr;
-
- rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int,
- RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS));
- return 0;
-}
-
-/* PHYSICAL memory registration conveys one page per chunk segment.
- */
-static size_t
-physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
-{
- return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS);
-}
-
-static int
-physical_op_init(struct rpcrdma_xprt *r_xprt)
-{
- return 0;
-}
-
-/* The client's physical memory is already exposed for
- * remote access via RDMA READ or RDMA WRITE.
- */
-static int
-physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
- rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
- seg->mr_rkey = ia->ri_dma_mr->rkey;
- seg->mr_base = seg->mr_dma;
- return 1;
-}
-
-/* DMA unmap all memory regions that were mapped for "req".
- */
-static void
-physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
-{
- struct ib_device *device = r_xprt->rx_ia.ri_device;
- unsigned int i;
-
- for (i = 0; req->rl_nchunks; --req->rl_nchunks)
- rpcrdma_unmap_one(device, &req->rl_segments[i++]);
-}
-
-/* Use a slow, safe mechanism to invalidate all memory regions
- * that were registered for "req".
- *
- * For physical memory registration, there is no good way to
- * fence a single MR that has been advertised to the server. The
- * client has already handed the server an R_key that cannot be
- * invalidated and is shared by all MRs on this connection.
- * Tearing down the PD might be the only safe choice, but it's
- * not clear that a freshly acquired DMA R_key would be different
- * than the one used by the PD that was just destroyed.
- * FIXME.
- */
-static void
-physical_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
- bool sync)
-{
- physical_op_unmap_sync(r_xprt, req);
-}
-
-static void
-physical_op_destroy(struct rpcrdma_buffer *buf)
-{
-}
-
-const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
- .ro_map = physical_op_map,
- .ro_unmap_sync = physical_op_unmap_sync,
- .ro_unmap_safe = physical_op_unmap_safe,
- .ro_open = physical_op_open,
- .ro_maxpages = physical_op_maxpages,
- .ro_init = physical_op_init,
- .ro_destroy = physical_op_destroy,
- .ro_displayname = "physical",
-};
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 35a81096e83d..a47f170b20ef 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -196,8 +196,7 @@ rpcrdma_tail_pullup(struct xdr_buf *buf)
* MR when they can.
*/
static int
-rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
- int n, int nsegs)
+rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
{
size_t page_offset;
u32 remaining;
@@ -206,7 +205,7 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
base = vec->iov_base;
page_offset = offset_in_page(base);
remaining = vec->iov_len;
- while (remaining && n < nsegs) {
+ while (remaining && n < RPCRDMA_MAX_SEGS) {
seg[n].mr_page = NULL;
seg[n].mr_offset = base;
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
@@ -230,34 +229,34 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
static int
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
- enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
+ enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg)
{
- int len, n = 0, p;
- int page_base;
+ int len, n, p, page_base;
struct page **ppages;
+ n = 0;
if (pos == 0) {
- n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
- if (n == nsegs)
- return -EIO;
+ n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
+ if (n == RPCRDMA_MAX_SEGS)
+ goto out_overflow;
}
len = xdrbuf->page_len;
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
page_base = xdrbuf->page_base & ~PAGE_MASK;
p = 0;
- while (len && n < nsegs) {
+ while (len && n < RPCRDMA_MAX_SEGS) {
if (!ppages[p]) {
/* alloc the pagelist for receiving buffer */
ppages[p] = alloc_page(GFP_ATOMIC);
if (!ppages[p])
- return -ENOMEM;
+ return -EAGAIN;
}
seg[n].mr_page = ppages[p];
seg[n].mr_offset = (void *)(unsigned long) page_base;
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
if (seg[n].mr_len > PAGE_SIZE)
- return -EIO;
+ goto out_overflow;
len -= seg[n].mr_len;
++n;
++p;
@@ -265,8 +264,8 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
}
/* Message overflows the seg array */
- if (len && n == nsegs)
- return -EIO;
+ if (len && n == RPCRDMA_MAX_SEGS)
+ goto out_overflow;
/* When encoding the read list, the tail is always sent inline */
if (type == rpcrdma_readch)
@@ -277,20 +276,24 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
* xdr pad bytes, saving the server an RDMA operation. */
if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
return n;
- n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
- if (n == nsegs)
- return -EIO;
+ n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
+ if (n == RPCRDMA_MAX_SEGS)
+ goto out_overflow;
}
return n;
+
+out_overflow:
+ pr_err("rpcrdma: segment array overflow\n");
+ return -EIO;
}
static inline __be32 *
-xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr_seg *seg)
+xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
{
- *iptr++ = cpu_to_be32(seg->mr_rkey);
- *iptr++ = cpu_to_be32(seg->mr_len);
- return xdr_encode_hyper(iptr, seg->mr_base);
+ *iptr++ = cpu_to_be32(mw->mw_handle);
+ *iptr++ = cpu_to_be32(mw->mw_length);
+ return xdr_encode_hyper(iptr, mw->mw_offset);
}
/* XDR-encode the Read list. Supports encoding a list of read
@@ -310,7 +313,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, struct rpc_rqst *rqst,
__be32 *iptr, enum rpcrdma_chunktype rtype)
{
- struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ struct rpcrdma_mr_seg *seg;
+ struct rpcrdma_mw *mw;
unsigned int pos;
int n, nsegs;
@@ -322,15 +326,17 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
pos = rqst->rq_snd_buf.head[0].iov_len;
if (rtype == rpcrdma_areadch)
pos = 0;
- nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg,
- RPCRDMA_MAX_SEGS - req->rl_nchunks);
+ seg = req->rl_segments;
+ nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg);
if (nsegs < 0)
return ERR_PTR(nsegs);
do {
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, false);
- if (n <= 0)
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
+ false, &mw);
+ if (n < 0)
return ERR_PTR(n);
+ list_add(&mw->mw_list, &req->rl_registered);
*iptr++ = xdr_one; /* item present */
@@ -338,20 +344,17 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
* have the same "position".
*/
*iptr++ = cpu_to_be32(pos);
- iptr = xdr_encode_rdma_segment(iptr, seg);
+ iptr = xdr_encode_rdma_segment(iptr, mw);
- dprintk("RPC: %5u %s: read segment pos %u "
- "%d@0x%016llx:0x%08x (%s)\n",
+ dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__, pos,
- seg->mr_len, (unsigned long long)seg->mr_base,
- seg->mr_rkey, n < nsegs ? "more" : "last");
+ mw->mw_length, (unsigned long long)mw->mw_offset,
+ mw->mw_handle, n < nsegs ? "more" : "last");
r_xprt->rx_stats.read_chunk_count++;
- req->rl_nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
- req->rl_nextseg = seg;
/* Finish Read list */
*iptr++ = xdr_zero; /* Next item not present */
@@ -375,7 +378,8 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpc_rqst *rqst, __be32 *iptr,
enum rpcrdma_chunktype wtype)
{
- struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ struct rpcrdma_mr_seg *seg;
+ struct rpcrdma_mw *mw;
int n, nsegs, nchunks;
__be32 *segcount;
@@ -384,10 +388,10 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return iptr;
}
+ seg = req->rl_segments;
nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
rqst->rq_rcv_buf.head[0].iov_len,
- wtype, seg,
- RPCRDMA_MAX_SEGS - req->rl_nchunks);
+ wtype, seg);
if (nsegs < 0)
return ERR_PTR(nsegs);
@@ -396,26 +400,25 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
- if (n <= 0)
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
+ true, &mw);
+ if (n < 0)
return ERR_PTR(n);
+ list_add(&mw->mw_list, &req->rl_registered);
- iptr = xdr_encode_rdma_segment(iptr, seg);
+ iptr = xdr_encode_rdma_segment(iptr, mw);
- dprintk("RPC: %5u %s: write segment "
- "%d@0x016%llx:0x%08x (%s)\n",
+ dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__,
- seg->mr_len, (unsigned long long)seg->mr_base,
- seg->mr_rkey, n < nsegs ? "more" : "last");
+ mw->mw_length, (unsigned long long)mw->mw_offset,
+ mw->mw_handle, n < nsegs ? "more" : "last");
r_xprt->rx_stats.write_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
- req->rl_nchunks++;
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
- req->rl_nextseg = seg;
/* Update count of segments in this Write chunk */
*segcount = cpu_to_be32(nchunks);
@@ -442,7 +445,8 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, struct rpc_rqst *rqst,
__be32 *iptr, enum rpcrdma_chunktype wtype)
{
- struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ struct rpcrdma_mr_seg *seg;
+ struct rpcrdma_mw *mw;
int n, nsegs, nchunks;
__be32 *segcount;
@@ -451,8 +455,8 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
return iptr;
}
- nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
- RPCRDMA_MAX_SEGS - req->rl_nchunks);
+ seg = req->rl_segments;
+ nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg);
if (nsegs < 0)
return ERR_PTR(nsegs);
@@ -461,26 +465,25 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
nchunks = 0;
do {
- n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
- if (n <= 0)
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
+ true, &mw);
+ if (n < 0)
return ERR_PTR(n);
+ list_add(&mw->mw_list, &req->rl_registered);
- iptr = xdr_encode_rdma_segment(iptr, seg);
+ iptr = xdr_encode_rdma_segment(iptr, mw);
- dprintk("RPC: %5u %s: reply segment "
- "%d@0x%016llx:0x%08x (%s)\n",
+ dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
rqst->rq_task->tk_pid, __func__,
- seg->mr_len, (unsigned long long)seg->mr_base,
- seg->mr_rkey, n < nsegs ? "more" : "last");
+ mw->mw_length, (unsigned long long)mw->mw_offset,
+ mw->mw_handle, n < nsegs ? "more" : "last");
r_xprt->rx_stats.reply_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
- req->rl_nchunks++;
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
- req->rl_nextseg = seg;
/* Update count of segments in the Reply chunk */
*segcount = cpu_to_be32(nchunks);
@@ -567,6 +570,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp;
+ bool ddp_allowed;
ssize_t hdrlen;
size_t rpclen;
__be32 *iptr;
@@ -583,6 +587,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
headerp->rm_type = rdma_msg;
+ /* When the ULP employs a GSS flavor that guarantees integrity
+ * or privacy, direct data placement of individual data items
+ * is not allowed.
+ */
+ ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
+ RPCAUTH_AUTH_DATATOUCH);
+
/*
* Chunks needed for results?
*
@@ -594,7 +605,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
*/
if (rpcrdma_results_inline(r_xprt, rqst))
wtype = rpcrdma_noch;
- else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
+ else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
wtype = rpcrdma_writech;
else
wtype = rpcrdma_replych;
@@ -617,7 +628,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
rtype = rpcrdma_noch;
rpcrdma_inline_pullup(rqst);
rpclen = rqst->rq_svec[0].iov_len;
- } else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
+ } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
rtype = rpcrdma_readch;
rpclen = rqst->rq_svec[0].iov_len;
rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
@@ -650,8 +661,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* send a Call message with a Position Zero Read chunk and a
* regular Read chunk at the same time.
*/
- req->rl_nchunks = 0;
- req->rl_nextseg = req->rl_segments;
iptr = headerp->rm_body.rm_chunks;
iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
if (IS_ERR(iptr))
@@ -690,10 +699,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
out_overflow:
pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n",
hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]);
- /* Terminate this RPC. Chunks registered above will be
- * released by xprt_release -> xprt_rmda_free .
- */
- return -EIO;
+ iptr = ERR_PTR(-EIO);
out_unmap:
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
@@ -705,15 +711,13 @@ out_unmap:
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
*/
static int
-rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
+rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
i = be32_to_cpu(**iptrp);
- if (i > max)
- return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
total_len = 0;
while (i--) {
@@ -744,45 +748,66 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __b
return total_len;
}
-/*
- * Scatter inline received data back into provided iov's.
+/**
+ * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
+ * @rqst: controlling RPC request
+ * @srcp: points to RPC message payload in receive buffer
+ * @copy_len: remaining length of receive buffer content
+ * @pad: Write chunk pad bytes needed (zero for pure inline)
+ *
+ * The upper layer has set the maximum number of bytes it can
+ * receive in each component of rq_rcv_buf. These values are set in
+ * the head.iov_len, page_len, tail.iov_len, and buflen fields.
+ *
+ * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
+ * many cases this function simply updates iov_base pointers in
+ * rq_rcv_buf to point directly to the received reply data, to
+ * avoid copying reply data.
+ *
+ * Returns the count of bytes which had to be memcopied.
*/
-static void
+static unsigned long
rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
{
- int i, npages, curlen, olen;
+ unsigned long fixup_copy_count;
+ int i, npages, curlen;
char *destp;
struct page **ppages;
int page_base;
+ /* The head iovec is redirected to the RPC reply message
+ * in the receive buffer, to avoid a memcopy.
+ */
+ rqst->rq_rcv_buf.head[0].iov_base = srcp;
+ rqst->rq_private_buf.head[0].iov_base = srcp;
+
+ /* The contents of the receive buffer that follow
+ * head.iov_len bytes are copied into the page list.
+ */
curlen = rqst->rq_rcv_buf.head[0].iov_len;
- if (curlen > copy_len) { /* write chunk header fixup */
+ if (curlen > copy_len)
curlen = copy_len;
- rqst->rq_rcv_buf.head[0].iov_len = curlen;
- }
-
dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
__func__, srcp, copy_len, curlen);
-
- /* Shift pointer for first receive segment only */
- rqst->rq_rcv_buf.head[0].iov_base = srcp;
srcp += curlen;
copy_len -= curlen;
- olen = copy_len;
- i = 0;
- rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
page_base = rqst->rq_rcv_buf.page_base;
ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
page_base &= ~PAGE_MASK;
-
+ fixup_copy_count = 0;
if (copy_len && rqst->rq_rcv_buf.page_len) {
- npages = PAGE_ALIGN(page_base +
- rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
- for (; i < npages; i++) {
+ int pagelist_len;
+
+ pagelist_len = rqst->rq_rcv_buf.page_len;
+ if (pagelist_len > copy_len)
+ pagelist_len = copy_len;
+ npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
curlen = PAGE_SIZE - page_base;
- if (curlen > copy_len)
- curlen = copy_len;
+ if (curlen > pagelist_len)
+ curlen = pagelist_len;
+
dprintk("RPC: %s: page %d"
" srcp 0x%p len %d curlen %d\n",
__func__, i, srcp, copy_len, curlen);
@@ -792,39 +817,32 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
kunmap_atomic(destp);
srcp += curlen;
copy_len -= curlen;
- if (copy_len == 0)
+ fixup_copy_count += curlen;
+ pagelist_len -= curlen;
+ if (!pagelist_len)
break;
page_base = 0;
}
- }
- if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
- curlen = copy_len;
- if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
- curlen = rqst->rq_rcv_buf.tail[0].iov_len;
- if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
- memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
- dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
- __func__, srcp, copy_len, curlen);
- rqst->rq_rcv_buf.tail[0].iov_len = curlen;
- copy_len -= curlen; ++i;
- } else
- rqst->rq_rcv_buf.tail[0].iov_len = 0;
-
- if (pad) {
- /* implicit padding on terminal chunk */
- unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
- while (pad--)
- p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
+ /* Implicit padding for the last segment in a Write
+ * chunk is inserted inline at the front of the tail
+ * iovec. The upper layer ignores the content of
+ * the pad. Simply ensure inline content in the tail
+ * that follows the Write chunk is properly aligned.
+ */
+ if (pad)
+ srcp -= pad;
}
- if (copy_len)
- dprintk("RPC: %s: %d bytes in"
- " %d extra segments (%d lost)\n",
- __func__, olen, i, copy_len);
+ /* The tail iovec is redirected to the remaining data
+ * in the receive buffer, to avoid a memcopy.
+ */
+ if (copy_len || pad) {
+ rqst->rq_rcv_buf.tail[0].iov_base = srcp;
+ rqst->rq_private_buf.tail[0].iov_base = srcp;
+ }
- /* TBD avoid a warning from call_decode() */
- rqst->rq_private_buf = rqst->rq_rcv_buf;
+ return fixup_copy_count;
}
void
@@ -960,14 +978,13 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
(headerp->rm_body.rm_chunks[1] == xdr_zero &&
headerp->rm_body.rm_chunks[2] != xdr_zero) ||
(headerp->rm_body.rm_chunks[1] != xdr_zero &&
- req->rl_nchunks == 0))
+ list_empty(&req->rl_registered)))
goto badheader;
if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
/* count any expected write chunks in read reply */
/* start at write chunk array count */
iptr = &headerp->rm_body.rm_chunks[2];
- rdmalen = rpcrdma_count_chunks(rep,
- req->rl_nchunks, 1, &iptr);
+ rdmalen = rpcrdma_count_chunks(rep, 1, &iptr);
/* check for validity, and no reply chunk after */
if (rdmalen < 0 || *iptr++ != xdr_zero)
goto badheader;
@@ -988,8 +1005,10 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
rep->rr_len -= RPCRDMA_HDRLEN_MIN;
status = rep->rr_len;
}
- /* Fix up the rpc results for upper layer */
- rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
+
+ r_xprt->rx_stats.fixup_copy_count +=
+ rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
+ rdmalen);
break;
case rdma_nomsg:
@@ -997,11 +1016,11 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
headerp->rm_body.rm_chunks[1] != xdr_zero ||
headerp->rm_body.rm_chunks[2] != xdr_one ||
- req->rl_nchunks == 0)
+ list_empty(&req->rl_registered))
goto badheader;
iptr = (__be32 *)((unsigned char *)headerp +
RPCRDMA_HDRLEN_MIN);
- rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
+ rdmalen = rpcrdma_count_chunks(rep, 0, &iptr);
if (rdmalen < 0)
goto badheader;
r_xprt->rx_stats.total_rdma_reply += rdmalen;
@@ -1014,14 +1033,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
badheader:
default:
- dprintk("%s: invalid rpcrdma reply header (type %d):"
- " chunks[012] == %d %d %d"
- " expected chunks <= %d\n",
- __func__, be32_to_cpu(headerp->rm_type),
- headerp->rm_body.rm_chunks[0],
- headerp->rm_body.rm_chunks[1],
- headerp->rm_body.rm_chunks[2],
- req->rl_nchunks);
+ dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
+ rqst->rq_task->tk_pid, __func__,
+ be32_to_cpu(headerp->rm_type));
status = -EIO;
r_xprt->rx_stats.bad_reply_count++;
break;
@@ -1035,7 +1049,7 @@ out:
* control: waking the next RPC waits until this RPC has
* relinquished all its Send Queue entries.
*/
- if (req->rl_nchunks)
+ if (!list_empty(&req->rl_registered))
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
spin_lock_bh(&xprt->transport_lock);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 99d2e5b72726..81f0e879f019 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -558,7 +558,6 @@ out_sendbuf:
out_fail:
rpcrdma_buffer_put(req);
- r_xprt->rx_stats.failed_marshal_count++;
return NULL;
}
@@ -590,8 +589,19 @@ xprt_rdma_free(void *buffer)
rpcrdma_buffer_put(req);
}
-/*
+/**
+ * xprt_rdma_send_request - marshal and send an RPC request
+ * @task: RPC task with an RPC message in rq_snd_buf
+ *
+ * Return values:
+ * 0: The request has been sent
+ * ENOTCONN: Caller needs to invoke connect logic then call again
+ * ENOBUFS: Call again later to send the request
+ * EIO: A permanent error occurred. The request was not sent,
+ * and don't try it again
+ *
* send_request invokes the meat of RPC RDMA. It must do the following:
+ *
* 1. Marshal the RPC request into an RPC RDMA request, which means
* putting a header in front of data, and creating IOVs for RDMA
* from those in the request.
@@ -600,7 +610,6 @@ xprt_rdma_free(void *buffer)
* the request (rpcrdma_ep_post).
* 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
*/
-
static int
xprt_rdma_send_request(struct rpc_task *task)
{
@@ -610,6 +619,9 @@ xprt_rdma_send_request(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
int rc = 0;
+ /* On retransmit, remove any previously registered chunks */
+ r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
+
rc = rpcrdma_marshal_req(rqst);
if (rc < 0)
goto failed_marshal;
@@ -630,11 +642,12 @@ xprt_rdma_send_request(struct rpc_task *task)
return 0;
failed_marshal:
- r_xprt->rx_stats.failed_marshal_count++;
dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n",
__func__, rc);
if (rc == -EIO)
- return -EIO;
+ r_xprt->rx_stats.failed_marshal_count++;
+ if (rc != -ENOTCONN)
+ return rc;
drop_connection:
xprt_disconnect_done(xprt);
return -ENOTCONN; /* implies disconnect */
@@ -660,7 +673,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
xprt->stat.bad_xids,
xprt->stat.req_u,
xprt->stat.bklog_u);
- seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n",
+ seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu ",
r_xprt->rx_stats.read_chunk_count,
r_xprt->rx_stats.write_chunk_count,
r_xprt->rx_stats.reply_chunk_count,
@@ -672,6 +685,10 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
r_xprt->rx_stats.failed_marshal_count,
r_xprt->rx_stats.bad_reply_count,
r_xprt->rx_stats.nomsg_call_count);
+ seq_printf(seq, "%lu %lu %lu\n",
+ r_xprt->rx_stats.mrs_recovered,
+ r_xprt->rx_stats.mrs_orphaned,
+ r_xprt->rx_stats.mrs_allocated);
}
static int
@@ -741,7 +758,6 @@ void xprt_rdma_cleanup(void)
__func__, rc);
rpcrdma_destroy_wq();
- frwr_destroy_recovery_wq();
rc = xprt_unregister_transport(&xprt_rdma_bc);
if (rc)
@@ -753,20 +769,13 @@ int xprt_rdma_init(void)
{
int rc;
- rc = frwr_alloc_recovery_wq();
- if (rc)
- return rc;
-
rc = rpcrdma_alloc_wq();
- if (rc) {
- frwr_destroy_recovery_wq();
+ if (rc)
return rc;
- }
rc = xprt_register_transport(&xprt_rdma);
if (rc) {
rpcrdma_destroy_wq();
- frwr_destroy_recovery_wq();
return rc;
}
@@ -774,7 +783,6 @@ int xprt_rdma_init(void)
if (rc) {
xprt_unregister_transport(&xprt_rdma);
rpcrdma_destroy_wq();
- frwr_destroy_recovery_wq();
return rc;
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index b044d98a1370..536d0be3f61b 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -379,8 +379,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
struct rpcrdma_ia *ia = &xprt->rx_ia;
int rc;
- ia->ri_dma_mr = NULL;
-
ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
if (IS_ERR(ia->ri_id)) {
rc = PTR_ERR(ia->ri_id);
@@ -391,47 +389,29 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
ia->ri_pd = ib_alloc_pd(ia->ri_device);
if (IS_ERR(ia->ri_pd)) {
rc = PTR_ERR(ia->ri_pd);
- dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
- __func__, rc);
+ pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
goto out2;
}
- if (memreg == RPCRDMA_FRMR) {
- if (!(ia->ri_device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS) ||
- (ia->ri_device->attrs.max_fast_reg_page_list_len == 0)) {
- dprintk("RPC: %s: FRMR registration "
- "not supported by HCA\n", __func__);
- memreg = RPCRDMA_MTHCAFMR;
- }
- }
- if (memreg == RPCRDMA_MTHCAFMR) {
- if (!ia->ri_device->alloc_fmr) {
- dprintk("RPC: %s: MTHCAFMR registration "
- "not supported by HCA\n", __func__);
- rc = -EINVAL;
- goto out3;
- }
- }
-
switch (memreg) {
case RPCRDMA_FRMR:
- ia->ri_ops = &rpcrdma_frwr_memreg_ops;
- break;
- case RPCRDMA_ALLPHYSICAL:
- ia->ri_ops = &rpcrdma_physical_memreg_ops;
- break;
+ if (frwr_is_supported(ia)) {
+ ia->ri_ops = &rpcrdma_frwr_memreg_ops;
+ break;
+ }
+ /*FALLTHROUGH*/
case RPCRDMA_MTHCAFMR:
- ia->ri_ops = &rpcrdma_fmr_memreg_ops;
- break;
+ if (fmr_is_supported(ia)) {
+ ia->ri_ops = &rpcrdma_fmr_memreg_ops;
+ break;
+ }
+ /*FALLTHROUGH*/
default:
- printk(KERN_ERR "RPC: Unsupported memory "
- "registration mode: %d\n", memreg);
- rc = -ENOMEM;
+ pr_err("rpcrdma: Unsupported memory registration mode: %d\n",
+ memreg);
+ rc = -EINVAL;
goto out3;
}
- dprintk("RPC: %s: memory registration strategy is '%s'\n",
- __func__, ia->ri_ops->ro_displayname);
return 0;
@@ -585,8 +565,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
out2:
ib_free_cq(sendcq);
out1:
- if (ia->ri_dma_mr)
- ib_dereg_mr(ia->ri_dma_mr);
return rc;
}
@@ -600,8 +578,6 @@ out1:
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- int rc;
-
dprintk("RPC: %s: entering, connected is %d\n",
__func__, ep->rep_connected);
@@ -615,12 +591,6 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
ib_free_cq(ep->rep_attr.recv_cq);
ib_free_cq(ep->rep_attr.send_cq);
-
- if (ia->ri_dma_mr) {
- rc = ib_dereg_mr(ia->ri_dma_mr);
- dprintk("RPC: %s: ib_dereg_mr returned %i\n",
- __func__, rc);
- }
}
/*
@@ -777,6 +747,90 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
ib_drain_qp(ia->ri_id->qp);
}
+static void
+rpcrdma_mr_recovery_worker(struct work_struct *work)
+{
+ struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
+ rb_recovery_worker.work);
+ struct rpcrdma_mw *mw;
+
+ spin_lock(&buf->rb_recovery_lock);
+ while (!list_empty(&buf->rb_stale_mrs)) {
+ mw = list_first_entry(&buf->rb_stale_mrs,
+ struct rpcrdma_mw, mw_list);
+ list_del_init(&mw->mw_list);
+ spin_unlock(&buf->rb_recovery_lock);
+
+ dprintk("RPC: %s: recovering MR %p\n", __func__, mw);
+ mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);
+
+ spin_lock(&buf->rb_recovery_lock);
+ }
+ spin_unlock(&buf->rb_recovery_lock);
+}
+
+void
+rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
+{
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+
+ spin_lock(&buf->rb_recovery_lock);
+ list_add(&mw->mw_list, &buf->rb_stale_mrs);
+ spin_unlock(&buf->rb_recovery_lock);
+
+ schedule_delayed_work(&buf->rb_recovery_worker, 0);
+}
+
+static void
+rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ unsigned int count;
+ LIST_HEAD(free);
+ LIST_HEAD(all);
+
+ for (count = 0; count < 32; count++) {
+ struct rpcrdma_mw *mw;
+ int rc;
+
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ break;
+
+ rc = ia->ri_ops->ro_init_mr(ia, mw);
+ if (rc) {
+ kfree(mw);
+ break;
+ }
+
+ mw->mw_xprt = r_xprt;
+
+ list_add(&mw->mw_list, &free);
+ list_add(&mw->mw_all, &all);
+ }
+
+ spin_lock(&buf->rb_mwlock);
+ list_splice(&free, &buf->rb_mws);
+ list_splice(&all, &buf->rb_all);
+ r_xprt->rx_stats.mrs_allocated += count;
+ spin_unlock(&buf->rb_mwlock);
+
+ dprintk("RPC: %s: created %u MRs\n", __func__, count);
+}
+
+static void
+rpcrdma_mr_refresh_worker(struct work_struct *work)
+{
+ struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
+ rb_refresh_worker.work);
+ struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
+ rx_buf);
+
+ rpcrdma_create_mrs(r_xprt);
+}
+
struct rpcrdma_req *
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
@@ -793,6 +847,7 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
spin_unlock(&buffer->rb_reqslock);
req->rl_cqe.done = rpcrdma_wc_send;
req->rl_buffer = &r_xprt->rx_buf;
+ INIT_LIST_HEAD(&req->rl_registered);
return req;
}
@@ -832,17 +887,23 @@ int
rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int i, rc;
buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0;
- spin_lock_init(&buf->rb_lock);
atomic_set(&buf->rb_credits, 1);
+ spin_lock_init(&buf->rb_mwlock);
+ spin_lock_init(&buf->rb_lock);
+ spin_lock_init(&buf->rb_recovery_lock);
+ INIT_LIST_HEAD(&buf->rb_mws);
+ INIT_LIST_HEAD(&buf->rb_all);
+ INIT_LIST_HEAD(&buf->rb_stale_mrs);
+ INIT_DELAYED_WORK(&buf->rb_refresh_worker,
+ rpcrdma_mr_refresh_worker);
+ INIT_DELAYED_WORK(&buf->rb_recovery_worker,
+ rpcrdma_mr_recovery_worker);
- rc = ia->ri_ops->ro_init(r_xprt);
- if (rc)
- goto out;
+ rpcrdma_create_mrs(r_xprt);
INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs);
@@ -862,7 +923,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
INIT_LIST_HEAD(&buf->rb_recv_bufs);
- for (i = 0; i < buf->rb_max_requests + 2; i++) {
+ for (i = 0; i < buf->rb_max_requests; i++) {
struct rpcrdma_rep *rep;
rep = rpcrdma_create_rep(r_xprt);
@@ -918,11 +979,39 @@ rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
kfree(req);
}
+static void
+rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
+{
+ struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
+ rx_buf);
+ struct rpcrdma_ia *ia = rdmab_to_ia(buf);
+ struct rpcrdma_mw *mw;
+ unsigned int count;
+
+ count = 0;
+ spin_lock(&buf->rb_mwlock);
+ while (!list_empty(&buf->rb_all)) {
+ mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
+ list_del(&mw->mw_all);
+
+ spin_unlock(&buf->rb_mwlock);
+ ia->ri_ops->ro_release_mr(mw);
+ count++;
+ spin_lock(&buf->rb_mwlock);
+ }
+ spin_unlock(&buf->rb_mwlock);
+ r_xprt->rx_stats.mrs_allocated = 0;
+
+ dprintk("RPC: %s: released %u MRs\n", __func__, count);
+}
+
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_ia *ia = rdmab_to_ia(buf);
+ cancel_delayed_work_sync(&buf->rb_recovery_worker);
+
while (!list_empty(&buf->rb_recv_bufs)) {
struct rpcrdma_rep *rep;
@@ -944,7 +1033,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
}
spin_unlock(&buf->rb_reqslock);
- ia->ri_ops->ro_destroy(buf);
+ rpcrdma_destroy_mrs(buf);
}
struct rpcrdma_mw *
@@ -962,8 +1051,17 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
spin_unlock(&buf->rb_mwlock);
if (!mw)
- pr_err("RPC: %s: no MWs available\n", __func__);
+ goto out_nomws;
return mw;
+
+out_nomws:
+ dprintk("RPC: %s: no MWs available\n", __func__);
+ schedule_delayed_work(&buf->rb_refresh_worker, 0);
+
+ /* Allow the reply handler and refresh worker to run */
+ cond_resched();
+
+ return NULL;
}
void
@@ -978,8 +1076,6 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
/*
* Get a set of request/reply buffers.
- *
- * Reply buffer (if available) is attached to send buffer upon return.
*/
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
@@ -998,13 +1094,13 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("RPC: %s: out of request buffers\n", __func__);
+ pr_warn("rpcrdma: out of request buffers (%p)\n", buffers);
return NULL;
out_repbuf:
+ list_add(&req->rl_free, &buffers->rb_send_bufs);
spin_unlock(&buffers->rb_lock);
- pr_warn("RPC: %s: out of reply buffers\n", __func__);
- req->rl_reply = NULL;
- return req;
+ pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
+ return NULL;
}
/*
@@ -1060,14 +1156,6 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
* Wrappers for internal-use kmalloc memory registration, used by buffer code.
*/
-void
-rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
-{
- dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
- seg->mr_offset,
- (unsigned long long)seg->mr_dma, seg->mr_dmalen);
-}
-
/**
* rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
* @ia: controlling rpcrdma_ia
@@ -1150,7 +1238,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
if (rep) {
rc = rpcrdma_ep_post_recv(ia, ep, rep);
if (rc)
- goto out;
+ return rc;
req->rl_reply = NULL;
}
@@ -1175,10 +1263,12 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
if (rc)
- dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
- rc);
-out:
- return rc;
+ goto out_postsend_err;
+ return 0;
+
+out_postsend_err:
+ pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
+ return -ENOTCONN;
}
/*
@@ -1203,11 +1293,13 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
DMA_BIDIRECTIONAL);
rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
-
if (rc)
- dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
- rc);
- return rc;
+ goto out_postrecv;
+ return 0;
+
+out_postrecv:
+ pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
+ return -ENOTCONN;
}
/**
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 95cdc66225ee..670fad57153a 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -68,7 +68,6 @@ struct rpcrdma_ia {
struct ib_device *ri_device;
struct rdma_cm_id *ri_id;
struct ib_pd *ri_pd;
- struct ib_mr *ri_dma_mr;
struct completion ri_done;
int ri_async_rc;
unsigned int ri_max_frmr_depth;
@@ -172,23 +171,14 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
* o recv buffer (posted to provider)
* o ib_sge (also donated to provider)
* o status of reply (length, success or not)
- * o bookkeeping state to get run by tasklet (list, etc)
+ * o bookkeeping state to get run by reply handler (list, etc)
*
- * These are allocated during initialization, per-transport instance;
- * however, the tasklet execution list itself is global, as it should
- * always be pretty short.
+ * These are allocated during initialization, per-transport instance.
*
* N of these are associated with a transport instance, and stored in
* struct rpcrdma_buffer. N is the max number of outstanding requests.
*/
-#define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE)
-
-/* data segments + head/tail for Call + head/tail for Reply */
-#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 4)
-
-struct rpcrdma_buffer;
-
struct rpcrdma_rep {
struct ib_cqe rr_cqe;
unsigned int rr_len;
@@ -221,9 +211,6 @@ enum rpcrdma_frmr_state {
};
struct rpcrdma_frmr {
- struct scatterlist *fr_sg;
- int fr_nents;
- enum dma_data_direction fr_dir;
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
enum rpcrdma_frmr_state fr_state;
@@ -235,18 +222,23 @@ struct rpcrdma_frmr {
};
struct rpcrdma_fmr {
- struct ib_fmr *fmr;
- u64 *physaddrs;
+ struct ib_fmr *fm_mr;
+ u64 *fm_physaddrs;
};
struct rpcrdma_mw {
+ struct list_head mw_list;
+ struct scatterlist *mw_sg;
+ int mw_nents;
+ enum dma_data_direction mw_dir;
union {
struct rpcrdma_fmr fmr;
struct rpcrdma_frmr frmr;
};
- struct work_struct mw_work;
struct rpcrdma_xprt *mw_xprt;
- struct list_head mw_list;
+ u32 mw_handle;
+ u32 mw_length;
+ u64 mw_offset;
struct list_head mw_all;
};
@@ -266,33 +258,30 @@ struct rpcrdma_mw {
* of iovs for send operations. The reason is that the iovs passed to
* ib_post_{send,recv} must not be modified until the work request
* completes.
- *
- * NOTES:
- * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
- * marshal. The number needed varies depending on the iov lists that
- * are passed to us, the memory registration mode we are in, and if
- * physical addressing is used, the layout.
*/
+/* Maximum number of page-sized "segments" per chunk list to be
+ * registered or invalidated. Must handle a Reply chunk:
+ */
+enum {
+ RPCRDMA_MAX_IOV_SEGS = 3,
+ RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
+ RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
+ RPCRDMA_MAX_IOV_SEGS,
+};
+
struct rpcrdma_mr_seg { /* chunk descriptors */
- struct rpcrdma_mw *rl_mw; /* registered MR */
- u64 mr_base; /* registration result */
- u32 mr_rkey; /* registration result */
u32 mr_len; /* length of chunk or segment */
- int mr_nsegs; /* number of segments in chunk or 0 */
- enum dma_data_direction mr_dir; /* segment mapping direction */
- dma_addr_t mr_dma; /* segment mapping address */
- size_t mr_dmalen; /* segment mapping length */
struct page *mr_page; /* owning page, if any */
char *mr_offset; /* kva if no page, else offset */
};
#define RPCRDMA_MAX_IOVS (2)
+struct rpcrdma_buffer;
struct rpcrdma_req {
struct list_head rl_free;
unsigned int rl_niovs;
- unsigned int rl_nchunks;
unsigned int rl_connect_cookie;
struct rpc_task *rl_task;
struct rpcrdma_buffer *rl_buffer;
@@ -300,12 +289,13 @@ struct rpcrdma_req {
struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
struct rpcrdma_regbuf *rl_rdmabuf;
struct rpcrdma_regbuf *rl_sendbuf;
- struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
- struct rpcrdma_mr_seg *rl_nextseg;
struct ib_cqe rl_cqe;
struct list_head rl_all;
bool rl_backchannel;
+
+ struct list_head rl_registered; /* registered segments */
+ struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
static inline struct rpcrdma_req *
@@ -341,6 +331,11 @@ struct rpcrdma_buffer {
struct list_head rb_allreqs;
u32 rb_bc_max_requests;
+
+ spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */
+ struct list_head rb_stale_mrs;
+ struct delayed_work rb_recovery_worker;
+ struct delayed_work rb_refresh_worker;
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
@@ -387,6 +382,9 @@ struct rpcrdma_stats {
unsigned long bad_reply_count;
unsigned long nomsg_call_count;
unsigned long bcall_count;
+ unsigned long mrs_recovered;
+ unsigned long mrs_orphaned;
+ unsigned long mrs_allocated;
};
/*
@@ -395,23 +393,25 @@ struct rpcrdma_stats {
struct rpcrdma_xprt;
struct rpcrdma_memreg_ops {
int (*ro_map)(struct rpcrdma_xprt *,
- struct rpcrdma_mr_seg *, int, bool);
+ struct rpcrdma_mr_seg *, int, bool,
+ struct rpcrdma_mw **);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct rpcrdma_req *);
void (*ro_unmap_safe)(struct rpcrdma_xprt *,
struct rpcrdma_req *, bool);
+ void (*ro_recover_mr)(struct rpcrdma_mw *);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *);
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
- int (*ro_init)(struct rpcrdma_xprt *);
- void (*ro_destroy)(struct rpcrdma_buffer *);
+ int (*ro_init_mr)(struct rpcrdma_ia *,
+ struct rpcrdma_mw *);
+ void (*ro_release_mr)(struct rpcrdma_mw *);
const char *ro_displayname;
};
extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
-extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops;
/*
* RPCRDMA transport -- encapsulates the structures above for
@@ -446,6 +446,8 @@ extern int xprt_rdma_pad_optimize;
*/
int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
void rpcrdma_ia_close(struct rpcrdma_ia *);
+bool frwr_is_supported(struct rpcrdma_ia *);
+bool fmr_is_supported(struct rpcrdma_ia *);
/*
* Endpoint calls - xprtrdma/verbs.c
@@ -477,6 +479,8 @@ void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
+void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
+
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
size_t, gfp_t);
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
@@ -484,9 +488,6 @@ void rpcrdma_free_regbuf(struct rpcrdma_ia *,
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
-int frwr_alloc_recovery_wq(void);
-void frwr_destroy_recovery_wq(void);
-
int rpcrdma_alloc_wq(void);
void rpcrdma_destroy_wq(void);
@@ -494,45 +495,12 @@ void rpcrdma_destroy_wq(void);
* Wrappers for chunk registration, shared by read/write chunk code.
*/
-void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);
-
static inline enum dma_data_direction
rpcrdma_data_dir(bool writing)
{
return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}
-static inline void
-rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
- enum dma_data_direction direction)
-{
- seg->mr_dir = direction;
- seg->mr_dmalen = seg->mr_len;
-
- if (seg->mr_page)
- seg->mr_dma = ib_dma_map_page(device,
- seg->mr_page, offset_in_page(seg->mr_offset),
- seg->mr_dmalen, seg->mr_dir);
- else
- seg->mr_dma = ib_dma_map_single(device,
- seg->mr_offset,
- seg->mr_dmalen, seg->mr_dir);
-
- if (ib_dma_mapping_error(device, seg->mr_dma))
- rpcrdma_mapping_error(seg);
-}
-
-static inline void
-rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
-{
- if (seg->mr_page)
- ib_dma_unmap_page(device,
- seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
- else
- ib_dma_unmap_single(device,
- seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
-}
-
/*
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
*/
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7e2b2fa189c3..111767ab124a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -124,7 +124,7 @@ static struct ctl_table xs_tunables_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xprt_min_resvport_limit,
- .extra2 = &xprt_max_resvport_limit
+ .extra2 = &xprt_max_resvport
},
{
.procname = "max_resvport",
@@ -132,7 +132,7 @@ static struct ctl_table xs_tunables_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &xprt_min_resvport_limit,
+ .extra1 = &xprt_min_resvport,
.extra2 = &xprt_max_resvport_limit
},
{
@@ -642,6 +642,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *xdr = &req->rq_snd_buf;
bool zerocopy = true;
+ bool vm_wait = false;
int status;
int sent;
@@ -677,15 +678,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
return 0;
}
+ WARN_ON_ONCE(sent == 0 && status == 0);
+
+ if (status == -EAGAIN ) {
+ /*
+ * Return EAGAIN if we're sure we're hitting the
+ * socket send buffer limits.
+ */
+ if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
+ break;
+ /*
+ * Did we hit a memory allocation failure?
+ */
+ if (sent == 0) {
+ status = -ENOBUFS;
+ if (vm_wait)
+ break;
+ /* Retry, knowing now that we're below the
+ * socket send buffer limit
+ */
+ vm_wait = true;
+ }
+ continue;
+ }
if (status < 0)
break;
- if (sent == 0) {
- status = -EAGAIN;
- break;
- }
+ vm_wait = false;
}
- if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
- status = -ENOBUFS;
switch (status) {
case -ENOTSOCK:
@@ -755,11 +774,19 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
sk->sk_error_report = transport->old_error_report;
}
+static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+}
+
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
smp_mb__before_atomic();
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
+ xs_sock_reset_state_flags(xprt);
smp_mb__after_atomic();
}
@@ -962,10 +989,13 @@ static void xs_local_data_receive(struct sock_xprt *transport)
goto out;
for (;;) {
skb = skb_recv_datagram(sk, 0, 1, &err);
- if (skb == NULL)
+ if (skb != NULL) {
+ xs_local_data_read_skb(&transport->xprt, sk, skb);
+ skb_free_datagram(sk, skb);
+ continue;
+ }
+ if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
- xs_local_data_read_skb(&transport->xprt, sk, skb);
- skb_free_datagram(sk, skb);
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1043,10 +1073,13 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
goto out;
for (;;) {
skb = skb_recv_datagram(sk, 0, 1, &err);
- if (skb == NULL)
+ if (skb != NULL) {
+ xs_udp_data_read_skb(&transport->xprt, sk, skb);
+ skb_free_datagram(sk, skb);
+ continue;
+ }
+ if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
- xs_udp_data_read_skb(&transport->xprt, sk, skb);
- skb_free_datagram(sk, skb);
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1074,7 +1107,14 @@ static void xs_data_ready(struct sock *sk)
if (xprt != NULL) {
struct sock_xprt *transport = container_of(xprt,
struct sock_xprt, xprt);
- queue_work(rpciod_workqueue, &transport->recv_worker);
+ transport->old_data_ready(sk);
+ /* Any data means we had a useful conversation, so
+ * then we don't need to delay the next reconnect
+ */
+ if (xprt->reestablish_timeout)
+ xprt->reestablish_timeout = 0;
+ if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
+ queue_work(xprtiod_workqueue, &transport->recv_worker);
}
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -1474,10 +1514,15 @@ static void xs_tcp_data_receive(struct sock_xprt *transport)
for (;;) {
lock_sock(sk);
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
- release_sock(sk);
- if (read <= 0)
- break;
- total += read;
+ if (read <= 0) {
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+ release_sock(sk);
+ if (!test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
+ break;
+ } else {
+ release_sock(sk);
+ total += read;
+ }
rd_desc.count = 65536;
}
out:
@@ -1493,34 +1538,6 @@ static void xs_tcp_data_receive_workfn(struct work_struct *work)
}
/**
- * xs_tcp_data_ready - "data ready" callback for TCP sockets
- * @sk: socket with data to read
- *
- */
-static void xs_tcp_data_ready(struct sock *sk)
-{
- struct sock_xprt *transport;
- struct rpc_xprt *xprt;
-
- dprintk("RPC: xs_tcp_data_ready...\n");
-
- read_lock_bh(&sk->sk_callback_lock);
- if (!(xprt = xprt_from_sock(sk)))
- goto out;
- transport = container_of(xprt, struct sock_xprt, xprt);
-
- /* Any data means we had a useful conversation, so
- * the we don't need to delay the next reconnect
- */
- if (xprt->reestablish_timeout)
- xprt->reestablish_timeout = 0;
- queue_work(rpciod_workqueue, &transport->recv_worker);
-
-out:
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
-/**
* xs_tcp_state_change - callback to handle TCP socket state changes
* @sk: socket whose state has changed
*
@@ -1714,7 +1731,7 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
static unsigned short xs_get_random_port(void)
{
- unsigned short range = xprt_max_resvport - xprt_min_resvport;
+ unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
unsigned short rand = (unsigned short) prandom_u32() % range;
return rand + xprt_min_resvport;
}
@@ -2241,7 +2258,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_save_old_callbacks(transport, sk);
sk->sk_user_data = xprt;
- sk->sk_data_ready = xs_tcp_data_ready;
+ sk->sk_data_ready = xs_data_ready;
sk->sk_state_change = xs_tcp_state_change;
sk->sk_write_space = xs_tcp_write_space;
sock_set_flag(sk, SOCK_FASYNC);
@@ -2380,7 +2397,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
/* Start by resetting any existing state */
xs_reset_transport(transport);
- queue_delayed_work(rpciod_workqueue,
+ queue_delayed_work(xprtiod_workqueue,
&transport->connect_worker,
xprt->reestablish_timeout);
xprt->reestablish_timeout <<= 1;
@@ -2390,7 +2407,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- queue_delayed_work(rpciod_workqueue,
+ queue_delayed_work(xprtiod_workqueue,
&transport->connect_worker, 0);
}
}
@@ -3153,8 +3170,12 @@ static int param_set_uint_minmax(const char *val,
static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
- return param_set_uint_minmax(val, kp,
+ if (kp->arg == &xprt_min_resvport)
+ return param_set_uint_minmax(val, kp,
RPC_MIN_RESVPORT,
+ xprt_max_resvport);
+ return param_set_uint_minmax(val, kp,
+ xprt_min_resvport,
RPC_MAX_RESVPORT);
}
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index be70a57c1ff9..b62caa1c770c 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -794,10 +794,10 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
return 0;
attr_msg_full:
+ read_unlock_bh(&mon->lock);
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
- read_unlock_bh(&mon->lock);
return -EMSGSIZE;
}
diff --git a/net/vmw_vsock/Kconfig b/net/vmw_vsock/Kconfig
index 14810abedc2e..8831e7c42167 100644
--- a/net/vmw_vsock/Kconfig
+++ b/net/vmw_vsock/Kconfig
@@ -26,3 +26,23 @@ config VMWARE_VMCI_VSOCKETS
To compile this driver as a module, choose M here: the module
will be called vmw_vsock_vmci_transport. If unsure, say N.
+
+config VIRTIO_VSOCKETS
+ tristate "virtio transport for Virtual Sockets"
+ depends on VSOCKETS && VIRTIO
+ select VIRTIO_VSOCKETS_COMMON
+ help
+ This module implements a virtio transport for Virtual Sockets.
+
+ Enable this transport if your Virtual Machine host supports Virtual
+ Sockets over virtio.
+
+ To compile this driver as a module, choose M here: the module will be
+ called vmw_vsock_virtio_transport. If unsure, say N.
+
+config VIRTIO_VSOCKETS_COMMON
+ tristate
+ help
+ This option is selected by any driver which needs to access
+ the virtio_vsock. The module will be called
+ vmw_vsock_virtio_transport_common.
diff --git a/net/vmw_vsock/Makefile b/net/vmw_vsock/Makefile
index 2ce52d70f224..bc27c70e0e59 100644
--- a/net/vmw_vsock/Makefile
+++ b/net/vmw_vsock/Makefile
@@ -1,7 +1,13 @@
obj-$(CONFIG_VSOCKETS) += vsock.o
obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
+obj-$(CONFIG_VIRTIO_VSOCKETS) += vmw_vsock_virtio_transport.o
+obj-$(CONFIG_VIRTIO_VSOCKETS_COMMON) += vmw_vsock_virtio_transport_common.o
vsock-y += af_vsock.o vsock_addr.o
vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
vmci_transport_notify_qstate.o
+
+vmw_vsock_virtio_transport-y += virtio_transport.o
+
+vmw_vsock_virtio_transport_common-y += virtio_transport_common.o
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index b96ac918e0ba..17dbbe64cd73 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -344,6 +344,16 @@ static bool vsock_in_connected_table(struct vsock_sock *vsk)
return ret;
}
+void vsock_remove_sock(struct vsock_sock *vsk)
+{
+ if (vsock_in_bound_table(vsk))
+ vsock_remove_bound(vsk);
+
+ if (vsock_in_connected_table(vsk))
+ vsock_remove_connected(vsk);
+}
+EXPORT_SYMBOL_GPL(vsock_remove_sock);
+
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
{
int i;
@@ -660,12 +670,6 @@ static void __vsock_release(struct sock *sk)
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
- if (vsock_in_bound_table(vsk))
- vsock_remove_bound(vsk);
-
- if (vsock_in_connected_table(vsk))
- vsock_remove_connected(vsk);
-
transport->release(vsk);
lock_sock(sk);
@@ -1995,6 +1999,15 @@ void vsock_core_exit(void)
}
EXPORT_SYMBOL_GPL(vsock_core_exit);
+const struct vsock_transport *vsock_core_get_transport(void)
+{
+ /* vsock_register_mutex not taken since only the transport uses this
+ * function and only while registered.
+ */
+ return transport;
+}
+EXPORT_SYMBOL_GPL(vsock_core_get_transport);
+
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Socket Family");
MODULE_VERSION("1.0.1.0-k");
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
new file mode 100644
index 000000000000..699dfabdbccd
--- /dev/null
+++ b/net/vmw_vsock/virtio_transport.c
@@ -0,0 +1,624 @@
+/*
+ * virtio transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
+ * early virtio-vsock proof-of-concept bits.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_vsock.h>
+#include <net/sock.h>
+#include <linux/mutex.h>
+#include <net/af_vsock.h>
+
+static struct workqueue_struct *virtio_vsock_workqueue;
+static struct virtio_vsock *the_virtio_vsock;
+static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
+
+struct virtio_vsock {
+ struct virtio_device *vdev;
+ struct virtqueue *vqs[VSOCK_VQ_MAX];
+
+ /* Virtqueue processing is deferred to a workqueue */
+ struct work_struct tx_work;
+ struct work_struct rx_work;
+ struct work_struct event_work;
+
+ /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
+ * must be accessed with tx_lock held.
+ */
+ struct mutex tx_lock;
+
+ struct work_struct send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list;
+
+ atomic_t queued_replies;
+
+ /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
+ * must be accessed with rx_lock held.
+ */
+ struct mutex rx_lock;
+ int rx_buf_nr;
+ int rx_buf_max_nr;
+
+ /* The following fields are protected by event_lock.
+ * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
+ */
+ struct mutex event_lock;
+ struct virtio_vsock_event event_list[8];
+
+ u32 guest_cid;
+};
+
+static struct virtio_vsock *virtio_vsock_get(void)
+{
+ return the_virtio_vsock;
+}
+
+static u32 virtio_transport_get_local_cid(void)
+{
+ struct virtio_vsock *vsock = virtio_vsock_get();
+
+ return vsock->guest_cid;
+}
+
+static void
+virtio_transport_send_pkt_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, send_pkt_work);
+ struct virtqueue *vq;
+ bool added = false;
+ bool restart_rx = false;
+
+ mutex_lock(&vsock->tx_lock);
+
+ vq = vsock->vqs[VSOCK_VQ_TX];
+
+ /* Avoid unnecessary interrupts while we're processing the ring */
+ virtqueue_disable_cb(vq);
+
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ struct scatterlist hdr, buf, *sgs[2];
+ int ret, in_sg = 0, out_sg = 0;
+ bool reply;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtqueue_enable_cb(vq);
+ break;
+ }
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ reply = pkt->reply;
+
+ sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sgs[out_sg++] = &hdr;
+ if (pkt->buf) {
+ sg_init_one(&buf, pkt->buf, pkt->len);
+ sgs[out_sg++] = &buf;
+ }
+
+ ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
+ if (ret < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ if (!virtqueue_enable_cb(vq) && ret == -ENOSPC)
+ continue; /* retry now that we have more space */
+ break;
+ }
+
+ if (reply) {
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we now have resources to resume rx processing? */
+ if (val + 1 == virtqueue_get_vring_size(rx_vq))
+ restart_rx = true;
+ }
+
+ added = true;
+ }
+
+ if (added)
+ virtqueue_kick(vq);
+
+ mutex_unlock(&vsock->tx_lock);
+
+ if (restart_rx)
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+}
+
+static int
+virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock *vsock;
+ int len = pkt->len;
+
+ vsock = virtio_vsock_get();
+ if (!vsock) {
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add_tail(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+ return len;
+}
+
+static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
+{
+ int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+ struct virtio_vsock_pkt *pkt;
+ struct scatterlist hdr, buf, *sgs[2];
+ struct virtqueue *vq;
+ int ret;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ do {
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ break;
+
+ pkt->buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!pkt->buf) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+
+ pkt->len = buf_len;
+
+ sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
+ sgs[0] = &hdr;
+
+ sg_init_one(&buf, pkt->buf, buf_len);
+ sgs[1] = &buf;
+ ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
+ if (ret) {
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+ vsock->rx_buf_nr++;
+ } while (vq->num_free);
+ if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
+ vsock->rx_buf_max_nr = vsock->rx_buf_nr;
+ virtqueue_kick(vq);
+}
+
+static void virtio_transport_tx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, tx_work);
+ struct virtqueue *vq;
+ bool added = false;
+
+ vq = vsock->vqs[VSOCK_VQ_TX];
+ mutex_lock(&vsock->tx_lock);
+ do {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ virtqueue_disable_cb(vq);
+ while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
+ virtio_transport_free_pkt(pkt);
+ added = true;
+ }
+ } while (!virtqueue_enable_cb(vq));
+ mutex_unlock(&vsock->tx_lock);
+
+ if (added)
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
+}
+
+/* Is there space left for replies to rx packets? */
+static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
+{
+ struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < virtqueue_get_vring_size(vq);
+}
+
+static void virtio_transport_rx_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, rx_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_RX];
+
+ mutex_lock(&vsock->rx_lock);
+
+ do {
+ virtqueue_disable_cb(vq);
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ unsigned int len;
+
+ if (!virtio_transport_more_replies(vsock)) {
+ /* Stop rx until the device processes already
+ * pending replies. Leave rx virtqueue
+ * callbacks disabled.
+ */
+ goto out;
+ }
+
+ pkt = virtqueue_get_buf(vq, &len);
+ if (!pkt) {
+ break;
+ }
+
+ vsock->rx_buf_nr--;
+
+ /* Drop short/long packets */
+ if (unlikely(len < sizeof(pkt->hdr) ||
+ len > sizeof(pkt->hdr) + pkt->len)) {
+ virtio_transport_free_pkt(pkt);
+ continue;
+ }
+
+ pkt->len = len - sizeof(pkt->hdr);
+ virtio_transport_recv_pkt(pkt);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+out:
+ if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+}
+
+/* event_lock must be held */
+static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
+ struct virtio_vsock_event *event)
+{
+ struct scatterlist sg;
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_EVENT];
+
+ sg_init_one(&sg, event, sizeof(*event));
+
+ return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
+}
+
+/* event_lock must be held */
+static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
+ struct virtio_vsock_event *event = &vsock->event_list[i];
+
+ virtio_vsock_event_fill_one(vsock, event);
+ }
+
+ virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
+}
+
+static void virtio_vsock_reset_sock(struct sock *sk)
+{
+ lock_sock(sk);
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+ release_sock(sk);
+}
+
+static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
+{
+ struct virtio_device *vdev = vsock->vdev;
+ u64 guest_cid;
+
+ vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
+ &guest_cid, sizeof(guest_cid));
+ vsock->guest_cid = le64_to_cpu(guest_cid);
+}
+
+/* event_lock must be held */
+static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
+ struct virtio_vsock_event *event)
+{
+ switch (le32_to_cpu(event->id)) {
+ case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
+ virtio_vsock_update_guest_cid(vsock);
+ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+ break;
+ }
+}
+
+static void virtio_transport_event_work(struct work_struct *work)
+{
+ struct virtio_vsock *vsock =
+ container_of(work, struct virtio_vsock, event_work);
+ struct virtqueue *vq;
+
+ vq = vsock->vqs[VSOCK_VQ_EVENT];
+
+ mutex_lock(&vsock->event_lock);
+
+ do {
+ struct virtio_vsock_event *event;
+ unsigned int len;
+
+ virtqueue_disable_cb(vq);
+ while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
+ if (len == sizeof(*event))
+ virtio_vsock_event_handle(vsock, event);
+
+ virtio_vsock_event_fill_one(vsock, event);
+ }
+ } while (!virtqueue_enable_cb(vq));
+
+ virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
+
+ mutex_unlock(&vsock->event_lock);
+}
+
+static void virtio_vsock_event_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->event_work);
+}
+
+static void virtio_vsock_tx_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->tx_work);
+}
+
+static void virtio_vsock_rx_done(struct virtqueue *vq)
+{
+ struct virtio_vsock *vsock = vq->vdev->priv;
+
+ if (!vsock)
+ return;
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
+}
+
+static struct virtio_transport virtio_transport = {
+ .transport = {
+ .get_local_cid = virtio_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = virtio_transport_send_pkt,
+};
+
+static int virtio_vsock_probe(struct virtio_device *vdev)
+{
+ vq_callback_t *callbacks[] = {
+ virtio_vsock_rx_done,
+ virtio_vsock_tx_done,
+ virtio_vsock_event_done,
+ };
+ static const char * const names[] = {
+ "rx",
+ "tx",
+ "event",
+ };
+ struct virtio_vsock *vsock = NULL;
+ int ret;
+
+ ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
+ if (ret)
+ return ret;
+
+ /* Only one virtio-vsock device per guest is supported */
+ if (the_virtio_vsock) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
+ if (!vsock) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsock->vdev = vdev;
+
+ ret = vsock->vdev->config->find_vqs(vsock->vdev, VSOCK_VQ_MAX,
+ vsock->vqs, callbacks, names);
+ if (ret < 0)
+ goto out;
+
+ virtio_vsock_update_guest_cid(vsock);
+
+ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret < 0)
+ goto out_vqs;
+
+ vsock->rx_buf_nr = 0;
+ vsock->rx_buf_max_nr = 0;
+ atomic_set(&vsock->queued_replies, 0);
+
+ vdev->priv = vsock;
+ the_virtio_vsock = vsock;
+ mutex_init(&vsock->tx_lock);
+ mutex_init(&vsock->rx_lock);
+ mutex_init(&vsock->event_lock);
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
+ INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
+ INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+ INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
+
+ mutex_lock(&vsock->rx_lock);
+ virtio_vsock_rx_fill(vsock);
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->event_lock);
+ virtio_vsock_event_fill(vsock);
+ mutex_unlock(&vsock->event_lock);
+
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return 0;
+
+out_vqs:
+ vsock->vdev->config->del_vqs(vsock->vdev);
+out:
+ kfree(vsock);
+ mutex_unlock(&the_virtio_vsock_mutex);
+ return ret;
+}
+
+static void virtio_vsock_remove(struct virtio_device *vdev)
+{
+ struct virtio_vsock *vsock = vdev->priv;
+ struct virtio_vsock_pkt *pkt;
+
+ flush_work(&vsock->rx_work);
+ flush_work(&vsock->tx_work);
+ flush_work(&vsock->event_work);
+ flush_work(&vsock->send_pkt_work);
+
+ vdev->config->reset(vdev);
+
+ mutex_lock(&vsock->rx_lock);
+ while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
+ virtio_transport_free_pkt(pkt);
+ mutex_unlock(&vsock->rx_lock);
+
+ mutex_lock(&vsock->tx_lock);
+ while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
+ virtio_transport_free_pkt(pkt);
+ mutex_unlock(&vsock->tx_lock);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ mutex_lock(&the_virtio_vsock_mutex);
+ the_virtio_vsock = NULL;
+ vsock_core_exit();
+ mutex_unlock(&the_virtio_vsock_mutex);
+
+ vdev->config->del_vqs(vdev);
+
+ kfree(vsock);
+}
+
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static unsigned int features[] = {
+};
+
+static struct virtio_driver virtio_vsock_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = virtio_vsock_probe,
+ .remove = virtio_vsock_remove,
+};
+
+static int __init virtio_vsock_init(void)
+{
+ int ret;
+
+ virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
+ if (!virtio_vsock_workqueue)
+ return -ENOMEM;
+ ret = register_virtio_driver(&virtio_vsock_driver);
+ if (ret)
+ destroy_workqueue(virtio_vsock_workqueue);
+ return ret;
+}
+
+static void __exit virtio_vsock_exit(void)
+{
+ unregister_virtio_driver(&virtio_vsock_driver);
+ destroy_workqueue(virtio_vsock_workqueue);
+}
+
+module_init(virtio_vsock_init);
+module_exit(virtio_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("virtio transport for vsock");
+MODULE_DEVICE_TABLE(virtio, id_table);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
new file mode 100644
index 000000000000..a53b3a16b4f1
--- /dev/null
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -0,0 +1,992 @@
+/*
+ * common code for virtio vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/virtio.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_vsock.h>
+
+#include <net/sock.h>
+#include <net/af_vsock.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vsock_virtio_transport_common.h>
+
+/* How long to wait for graceful shutdown of a connection */
+#define VSOCK_CLOSE_TIMEOUT (8 * HZ)
+
+static const struct virtio_transport *virtio_transport_get_ops(void)
+{
+ const struct vsock_transport *t = vsock_core_get_transport();
+
+ return container_of(t, struct virtio_transport, transport);
+}
+
+struct virtio_vsock_pkt *
+virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
+ size_t len,
+ u32 src_cid,
+ u32 src_port,
+ u32 dst_cid,
+ u32 dst_port)
+{
+ struct virtio_vsock_pkt *pkt;
+ int err;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return NULL;
+
+ pkt->hdr.type = cpu_to_le16(info->type);
+ pkt->hdr.op = cpu_to_le16(info->op);
+ pkt->hdr.src_cid = cpu_to_le64(src_cid);
+ pkt->hdr.dst_cid = cpu_to_le64(dst_cid);
+ pkt->hdr.src_port = cpu_to_le32(src_port);
+ pkt->hdr.dst_port = cpu_to_le32(dst_port);
+ pkt->hdr.flags = cpu_to_le32(info->flags);
+ pkt->len = len;
+ pkt->hdr.len = cpu_to_le32(len);
+ pkt->reply = info->reply;
+
+ if (info->msg && len > 0) {
+ pkt->buf = kmalloc(len, GFP_KERNEL);
+ if (!pkt->buf)
+ goto out_pkt;
+ err = memcpy_from_msg(pkt->buf, info->msg, len);
+ if (err)
+ goto out;
+ }
+
+ trace_virtio_transport_alloc_pkt(src_cid, src_port,
+ dst_cid, dst_port,
+ len,
+ info->type,
+ info->op,
+ info->flags);
+
+ return pkt;
+
+out:
+ kfree(pkt->buf);
+out_pkt:
+ kfree(pkt);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_alloc_pkt);
+
+static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt_info *info)
+{
+ u32 src_cid, src_port, dst_cid, dst_port;
+ struct virtio_vsock_sock *vvs;
+ struct virtio_vsock_pkt *pkt;
+ u32 pkt_len = info->pkt_len;
+
+ src_cid = vm_sockets_get_local_cid();
+ src_port = vsk->local_addr.svm_port;
+ if (!info->remote_cid) {
+ dst_cid = vsk->remote_addr.svm_cid;
+ dst_port = vsk->remote_addr.svm_port;
+ } else {
+ dst_cid = info->remote_cid;
+ dst_port = info->remote_port;
+ }
+
+ vvs = vsk->trans;
+
+ /* we can send less than pkt_len bytes */
+ if (pkt_len > VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE)
+ pkt_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
+
+ /* virtio_transport_get_credit might return less than pkt_len credit */
+ pkt_len = virtio_transport_get_credit(vvs, pkt_len);
+
+ /* Do not send zero length OP_RW pkt */
+ if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
+ return pkt_len;
+
+ pkt = virtio_transport_alloc_pkt(info, pkt_len,
+ src_cid, src_port,
+ dst_cid, dst_port);
+ if (!pkt) {
+ virtio_transport_put_credit(vvs, pkt_len);
+ return -ENOMEM;
+ }
+
+ virtio_transport_inc_tx_pkt(vvs, pkt);
+
+ return virtio_transport_get_ops()->send_pkt(pkt);
+}
+
+static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+ struct virtio_vsock_pkt *pkt)
+{
+ vvs->rx_bytes += pkt->len;
+}
+
+static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
+ struct virtio_vsock_pkt *pkt)
+{
+ vvs->rx_bytes -= pkt->len;
+ vvs->fwd_cnt += pkt->len;
+}
+
+void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
+{
+ spin_lock_bh(&vvs->tx_lock);
+ pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
+ pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
+ spin_unlock_bh(&vvs->tx_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
+
+u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
+{
+ u32 ret;
+
+ spin_lock_bh(&vvs->tx_lock);
+ ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ if (ret > credit)
+ ret = credit;
+ vvs->tx_cnt += ret;
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
+
+void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
+{
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->tx_cnt -= credit;
+ spin_unlock_bh(&vvs->tx_lock);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
+
+static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
+ int type,
+ struct virtio_vsock_hdr *hdr)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
+ .type = type,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+static ssize_t
+virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt;
+ size_t bytes, total = 0;
+ int err = -EFAULT;
+
+ spin_lock_bh(&vvs->rx_lock);
+ while (total < len && !list_empty(&vvs->rx_queue)) {
+ pkt = list_first_entry(&vvs->rx_queue,
+ struct virtio_vsock_pkt, list);
+
+ bytes = len - total;
+ if (bytes > pkt->len - pkt->off)
+ bytes = pkt->len - pkt->off;
+
+ /* sk_lock is held by caller so no one else can dequeue.
+ * Unlock rx_lock since memcpy_to_msg() may sleep.
+ */
+ spin_unlock_bh(&vvs->rx_lock);
+
+ err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
+ if (err)
+ goto out;
+
+ spin_lock_bh(&vvs->rx_lock);
+
+ total += bytes;
+ pkt->off += bytes;
+ if (pkt->off == pkt->len) {
+ virtio_transport_dec_rx_pkt(vvs, pkt);
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ }
+ spin_unlock_bh(&vvs->rx_lock);
+
+ /* Send a credit pkt to peer */
+ virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+ NULL);
+
+ return total;
+
+out:
+ if (total)
+ err = total;
+ return err;
+}
+
+ssize_t
+virtio_transport_stream_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags)
+{
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
+
+ return virtio_transport_stream_do_dequeue(vsk, msg, len);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
+
+int
+virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
+
+s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ spin_lock_bh(&vvs->rx_lock);
+ bytes = vvs->rx_bytes;
+ spin_unlock_bh(&vvs->rx_lock);
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
+
+static s64 virtio_transport_has_space(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
+ if (bytes < 0)
+ bytes = 0;
+
+ return bytes;
+}
+
+s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ s64 bytes;
+
+ spin_lock_bh(&vvs->tx_lock);
+ bytes = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
+
+int virtio_transport_do_socket_init(struct vsock_sock *vsk,
+ struct vsock_sock *psk)
+{
+ struct virtio_vsock_sock *vvs;
+
+ vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
+ if (!vvs)
+ return -ENOMEM;
+
+ vsk->trans = vvs;
+ vvs->vsk = vsk;
+ if (psk) {
+ struct virtio_vsock_sock *ptrans = psk->trans;
+
+ vvs->buf_size = ptrans->buf_size;
+ vvs->buf_size_min = ptrans->buf_size_min;
+ vvs->buf_size_max = ptrans->buf_size_max;
+ vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
+ } else {
+ vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
+ vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
+ vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
+ }
+
+ vvs->buf_alloc = vvs->buf_size;
+
+ spin_lock_init(&vvs->rx_lock);
+ spin_lock_init(&vvs->tx_lock);
+ INIT_LIST_HEAD(&vvs->rx_queue);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
+
+u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
+
+u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size_min;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
+
+u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size_max;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
+
+void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val < vvs->buf_size_min)
+ vvs->buf_size_min = val;
+ if (val > vvs->buf_size_max)
+ vvs->buf_size_max = val;
+ vvs->buf_size = val;
+ vvs->buf_alloc = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
+
+void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val > vvs->buf_size)
+ vvs->buf_size = val;
+ vvs->buf_size_min = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
+
+void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
+ val = VIRTIO_VSOCK_MAX_BUF_SIZE;
+ if (val < vvs->buf_size)
+ vvs->buf_size = val;
+ vvs->buf_size_max = val;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
+
+int
+virtio_transport_notify_poll_in(struct vsock_sock *vsk,
+ size_t target,
+ bool *data_ready_now)
+{
+ if (vsock_stream_has_data(vsk))
+ *data_ready_now = true;
+ else
+ *data_ready_now = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
+
+int
+virtio_transport_notify_poll_out(struct vsock_sock *vsk,
+ size_t target,
+ bool *space_avail_now)
+{
+ s64 free_space;
+
+ free_space = vsock_stream_has_space(vsk);
+ if (free_space > 0)
+ *space_avail_now = true;
+ else if (free_space == 0)
+ *space_avail_now = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
+
+int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
+
+int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
+
+int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
+ size_t target, struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
+
+int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
+ size_t target, ssize_t copied, bool data_read,
+ struct vsock_transport_recv_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
+
+int virtio_transport_notify_send_init(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
+
+int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
+
+int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
+ struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
+
+int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
+ ssize_t written, struct vsock_transport_send_notify_data *data)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
+
+u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ return vvs->buf_size;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
+
+bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
+
+bool virtio_transport_stream_allow(u32 cid, u32 port)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
+
+int virtio_transport_dgram_bind(struct vsock_sock *vsk,
+ struct sockaddr_vm *addr)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
+
+bool virtio_transport_dgram_allow(u32 cid, u32 port)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
+
+int virtio_transport_connect(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_REQUEST,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_connect);
+
+int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_SHUTDOWN,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .flags = (mode & RCV_SHUTDOWN ?
+ VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
+ (mode & SEND_SHUTDOWN ?
+ VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
+
+int
+virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
+ struct sockaddr_vm *remote_addr,
+ struct msghdr *msg,
+ size_t dgram_len)
+{
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
+
+ssize_t
+virtio_transport_stream_enqueue(struct vsock_sock *vsk,
+ struct msghdr *msg,
+ size_t len)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RW,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .msg = msg,
+ .pkt_len = len,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
+
+void virtio_transport_destruct(struct vsock_sock *vsk)
+{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+
+ kfree(vvs);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_destruct);
+
+static int virtio_transport_reset(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .reply = !!pkt,
+ };
+
+ /* Send RST only if the original pkt is not a RST pkt */
+ if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+/* Normally packets are associated with a socket. There may be no socket if an
+ * attempt was made to connect to a socket that does not exist.
+ */
+static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RST,
+ .type = le16_to_cpu(pkt->hdr.type),
+ .reply = true,
+ };
+
+ /* Send RST only if the original pkt is not a RST pkt */
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ return 0;
+
+ pkt = virtio_transport_alloc_pkt(&info, 0,
+ le32_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port),
+ le32_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ if (!pkt)
+ return -ENOMEM;
+
+ return virtio_transport_get_ops()->send_pkt(pkt);
+}
+
+static void virtio_transport_wait_close(struct sock *sk, long timeout)
+{
+ if (timeout) {
+ DEFINE_WAIT(wait);
+
+ do {
+ prepare_to_wait(sk_sleep(sk), &wait,
+ TASK_INTERRUPTIBLE);
+ if (sk_wait_event(sk, &timeout,
+ sock_flag(sk, SOCK_DONE)))
+ break;
+ } while (!signal_pending(current) && timeout);
+
+ finish_wait(sk_sleep(sk), &wait);
+ }
+}
+
+static void virtio_transport_do_close(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = SS_DISCONNECTING;
+ sk->sk_state_change(sk);
+
+ if (vsk->close_work_scheduled &&
+ (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ vsk->close_work_scheduled = false;
+
+ vsock_remove_sock(vsk);
+
+ /* Release refcnt obtained when we scheduled the timeout */
+ sock_put(sk);
+ }
+}
+
+static void virtio_transport_close_timeout(struct work_struct *work)
+{
+ struct vsock_sock *vsk =
+ container_of(work, struct vsock_sock, close_work.work);
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_hold(sk);
+ lock_sock(sk);
+
+ if (!sock_flag(sk, SOCK_DONE)) {
+ (void)virtio_transport_reset(vsk, NULL);
+
+ virtio_transport_do_close(vsk, false);
+ }
+
+ vsk->close_work_scheduled = false;
+
+ release_sock(sk);
+ sock_put(sk);
+}
+
+/* User context, vsk->sk is locked */
+static bool virtio_transport_close(struct vsock_sock *vsk)
+{
+ struct sock *sk = &vsk->sk;
+
+ if (!(sk->sk_state == SS_CONNECTED ||
+ sk->sk_state == SS_DISCONNECTING))
+ return true;
+
+ /* Already received SHUTDOWN from peer, reply with RST */
+ if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
+ (void)virtio_transport_reset(vsk, NULL);
+ return true;
+ }
+
+ if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
+ (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
+
+ if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
+ virtio_transport_wait_close(sk, sk->sk_lingertime);
+
+ if (sock_flag(sk, SOCK_DONE)) {
+ return true;
+ }
+
+ sock_hold(sk);
+ INIT_DELAYED_WORK(&vsk->close_work,
+ virtio_transport_close_timeout);
+ vsk->close_work_scheduled = true;
+ schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
+ return false;
+}
+
+void virtio_transport_release(struct vsock_sock *vsk)
+{
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+ lock_sock(sk);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
+ release_sock(sk);
+
+ if (remove_sock)
+ vsock_remove_sock(vsk);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_release);
+
+static int
+virtio_transport_recv_connecting(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ int err;
+ int skerr;
+
+ switch (le16_to_cpu(pkt->hdr.op)) {
+ case VIRTIO_VSOCK_OP_RESPONSE:
+ sk->sk_state = SS_CONNECTED;
+ sk->sk_socket->state = SS_CONNECTED;
+ vsock_insert_connected(vsk);
+ sk->sk_state_change(sk);
+ break;
+ case VIRTIO_VSOCK_OP_INVALID:
+ break;
+ case VIRTIO_VSOCK_OP_RST:
+ skerr = ECONNRESET;
+ err = 0;
+ goto destroy;
+ default:
+ skerr = EPROTO;
+ err = -EINVAL;
+ goto destroy;
+ }
+ return 0;
+
+destroy:
+ virtio_transport_reset(vsk, pkt);
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = skerr;
+ sk->sk_error_report(sk);
+ return err;
+}
+
+static int
+virtio_transport_recv_connected(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ int err = 0;
+
+ switch (le16_to_cpu(pkt->hdr.op)) {
+ case VIRTIO_VSOCK_OP_RW:
+ pkt->len = le32_to_cpu(pkt->hdr.len);
+ pkt->off = 0;
+
+ spin_lock_bh(&vvs->rx_lock);
+ virtio_transport_inc_rx_pkt(vvs, pkt);
+ list_add_tail(&pkt->list, &vvs->rx_queue);
+ spin_unlock_bh(&vvs->rx_lock);
+
+ sk->sk_data_ready(sk);
+ return err;
+ case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
+ sk->sk_write_space(sk);
+ break;
+ case VIRTIO_VSOCK_OP_SHUTDOWN:
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
+ vsk->peer_shutdown |= RCV_SHUTDOWN;
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+ if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+ vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = SS_DISCONNECTING;
+ if (le32_to_cpu(pkt->hdr.flags))
+ sk->sk_state_change(sk);
+ break;
+ case VIRTIO_VSOCK_OP_RST:
+ virtio_transport_do_close(vsk, true);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ return err;
+}
+
+static void
+virtio_transport_recv_disconnecting(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
+ virtio_transport_do_close(vsk, true);
+}
+
+static int
+virtio_transport_send_response(struct vsock_sock *vsk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct virtio_vsock_pkt_info info = {
+ .op = VIRTIO_VSOCK_OP_RESPONSE,
+ .type = VIRTIO_VSOCK_TYPE_STREAM,
+ .remote_cid = le32_to_cpu(pkt->hdr.src_cid),
+ .remote_port = le32_to_cpu(pkt->hdr.src_port),
+ .reply = true,
+ };
+
+ return virtio_transport_send_pkt_info(vsk, &info);
+}
+
+/* Handle server socket */
+static int
+virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct vsock_sock *vchild;
+ struct sock *child;
+
+ if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
+ virtio_transport_reset(vsk, pkt);
+ return -EINVAL;
+ }
+
+ if (sk_acceptq_is_full(sk)) {
+ virtio_transport_reset(vsk, pkt);
+ return -ENOMEM;
+ }
+
+ child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
+ sk->sk_type, 0);
+ if (!child) {
+ virtio_transport_reset(vsk, pkt);
+ return -ENOMEM;
+ }
+
+ sk->sk_ack_backlog++;
+
+ lock_sock_nested(child, SINGLE_DEPTH_NESTING);
+
+ child->sk_state = SS_CONNECTED;
+
+ vchild = vsock_sk(child);
+ vsock_addr_init(&vchild->local_addr, le32_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port));
+ vsock_addr_init(&vchild->remote_addr, le32_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+
+ vsock_insert_connected(vchild);
+ vsock_enqueue_accept(sk, child);
+ virtio_transport_send_response(vchild, pkt);
+
+ release_sock(child);
+
+ sk->sk_data_ready(sk);
+ return 0;
+}
+
+static bool virtio_transport_space_update(struct sock *sk,
+ struct virtio_vsock_pkt *pkt)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ bool space_available;
+
+ /* buf_alloc and fwd_cnt is always included in the hdr */
+ spin_lock_bh(&vvs->tx_lock);
+ vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
+ vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
+ space_available = virtio_transport_has_space(vsk);
+ spin_unlock_bh(&vvs->tx_lock);
+ return space_available;
+}
+
+/* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
+ * lock.
+ */
+void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct sockaddr_vm src, dst;
+ struct vsock_sock *vsk;
+ struct sock *sk;
+ bool space_available;
+
+ vsock_addr_init(&src, le32_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ vsock_addr_init(&dst, le32_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port));
+
+ trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
+ dst.svm_cid, dst.svm_port,
+ le32_to_cpu(pkt->hdr.len),
+ le16_to_cpu(pkt->hdr.type),
+ le16_to_cpu(pkt->hdr.op),
+ le32_to_cpu(pkt->hdr.flags),
+ le32_to_cpu(pkt->hdr.buf_alloc),
+ le32_to_cpu(pkt->hdr.fwd_cnt));
+
+ if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
+ (void)virtio_transport_reset_no_sock(pkt);
+ goto free_pkt;
+ }
+
+ /* The socket must be in connected or bound table
+ * otherwise send reset back
+ */
+ sk = vsock_find_connected_socket(&src, &dst);
+ if (!sk) {
+ sk = vsock_find_bound_socket(&dst);
+ if (!sk) {
+ (void)virtio_transport_reset_no_sock(pkt);
+ goto free_pkt;
+ }
+ }
+
+ vsk = vsock_sk(sk);
+
+ space_available = virtio_transport_space_update(sk, pkt);
+
+ lock_sock(sk);
+
+ /* Update CID in case it has changed after a transport reset event */
+ vsk->local_addr.svm_cid = dst.svm_cid;
+
+ if (space_available)
+ sk->sk_write_space(sk);
+
+ switch (sk->sk_state) {
+ case VSOCK_SS_LISTEN:
+ virtio_transport_recv_listen(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ case SS_CONNECTING:
+ virtio_transport_recv_connecting(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ case SS_CONNECTED:
+ virtio_transport_recv_connected(sk, pkt);
+ break;
+ case SS_DISCONNECTING:
+ virtio_transport_recv_disconnecting(sk, pkt);
+ virtio_transport_free_pkt(pkt);
+ break;
+ default:
+ virtio_transport_free_pkt(pkt);
+ break;
+ }
+ release_sock(sk);
+
+ /* Release refcnt obtained when we fetched this socket out of the
+ * bound or connected list.
+ */
+ sock_put(sk);
+ return;
+
+free_pkt:
+ virtio_transport_free_pkt(pkt);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
+
+void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
+{
+ kfree(pkt->buf);
+ kfree(pkt);
+}
+EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("common code for virtio vsock");
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 4120b7a538be..4be4fbbc0b50 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1644,6 +1644,8 @@ static void vmci_transport_destruct(struct vsock_sock *vsk)
static void vmci_transport_release(struct vsock_sock *vsk)
{
+ vsock_remove_sock(vsk);
+
if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index da49c0b1fd32..b0e11b6dc994 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -715,7 +715,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
ASSERT_RTNL();
- if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
+ if (!IS_ENABLED(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
!(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
return false;
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
index c3108bb15789..e3c0a40909f7 100644
--- a/samples/kprobes/jprobe_example.c
+++ b/samples/kprobes/jprobe_example.c
@@ -48,10 +48,10 @@ static int __init jprobe_init(void)
ret = register_jprobe(&my_jprobe);
if (ret < 0) {
- printk(KERN_INFO "register_jprobe failed, returned %d\n", ret);
+ pr_err("register_jprobe failed, returned %d\n", ret);
return -1;
}
- printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n",
+ pr_info("Planted jprobe at %p, handler addr %p\n",
my_jprobe.kp.addr, my_jprobe.entry);
return 0;
}
@@ -59,7 +59,7 @@ static int __init jprobe_init(void)
static void __exit jprobe_exit(void)
{
unregister_jprobe(&my_jprobe);
- printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr);
+ pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr);
}
module_init(jprobe_init)
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index f3b61b4ee09c..88b3e2d227ae 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -27,23 +27,19 @@ static struct kprobe kp = {
static int handler_pre(struct kprobe *p, struct pt_regs *regs)
{
#ifdef CONFIG_X86
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, ip = %lx,"
- " flags = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->ip, regs->flags);
#endif
#ifdef CONFIG_PPC
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx,"
- " msr = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->nip, regs->msr);
#endif
#ifdef CONFIG_MIPS
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx,"
- " status = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
#endif
#ifdef CONFIG_TILEGX
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx,"
- " ex1 = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, ex1 = 0x%lx\n",
p->symbol_name, p->addr, regs->pc, regs->ex1);
#endif
#ifdef CONFIG_ARM64
@@ -61,19 +57,19 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
#ifdef CONFIG_X86
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
#ifdef CONFIG_PPC
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->msr);
#endif
#ifdef CONFIG_MIPS
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_status);
#endif
#ifdef CONFIG_TILEGX
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
p->symbol_name, p->addr, regs->ex1);
#endif
#ifdef CONFIG_ARM64
@@ -89,8 +85,7 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
*/
static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
{
- printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn",
- p->addr, trapnr);
+ pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr);
/* Return 0 because we don't handle the fault. */
return 0;
}
@@ -104,17 +99,17 @@ static int __init kprobe_init(void)
ret = register_kprobe(&kp);
if (ret < 0) {
- printk(KERN_INFO "register_kprobe failed, returned %d\n", ret);
+ pr_err("register_kprobe failed, returned %d\n", ret);
return ret;
}
- printk(KERN_INFO "Planted kprobe at %p\n", kp.addr);
+ pr_info("Planted kprobe at %p\n", kp.addr);
return 0;
}
static void __exit kprobe_exit(void)
{
unregister_kprobe(&kp);
- printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr);
+ pr_info("kprobe at %p unregistered\n", kp.addr);
}
module_init(kprobe_init)
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
index ebb1d1aed547..7f9060f435cd 100644
--- a/samples/kprobes/kretprobe_example.c
+++ b/samples/kprobes/kretprobe_example.c
@@ -55,14 +55,14 @@ static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
*/
static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- int retval = regs_return_value(regs);
+ unsigned long retval = regs_return_value(regs);
struct my_data *data = (struct my_data *)ri->data;
s64 delta;
ktime_t now;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
- printk(KERN_INFO "%s returned %d and took %lld ns to execute\n",
+ pr_info("%s returned %lu and took %lld ns to execute\n",
func_name, retval, (long long)delta);
return 0;
}
@@ -82,11 +82,10 @@ static int __init kretprobe_init(void)
my_kretprobe.kp.symbol_name = func_name;
ret = register_kretprobe(&my_kretprobe);
if (ret < 0) {
- printk(KERN_INFO "register_kretprobe failed, returned %d\n",
- ret);
+ pr_err("register_kretprobe failed, returned %d\n", ret);
return -1;
}
- printk(KERN_INFO "Planted return probe at %s: %p\n",
+ pr_info("Planted return probe at %s: %p\n",
my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr);
return 0;
}
@@ -94,11 +93,10 @@ static int __init kretprobe_init(void)
static void __exit kretprobe_exit(void)
{
unregister_kretprobe(&my_kretprobe);
- printk(KERN_INFO "kretprobe at %p unregistered\n",
- my_kretprobe.kp.addr);
+ pr_info("kretprobe at %p unregistered\n", my_kretprobe.kp.addr);
/* nmissed > 0 suggests that maxactive was set too low. */
- printk(KERN_INFO "Missed probing %d instances of %s\n",
+ pr_info("Missed probing %d instances of %s\n",
my_kretprobe.nmissed, my_kretprobe.kp.symbol_name);
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 0f82314621f2..179219845dfc 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -108,16 +108,20 @@ as-option = $(call try-run,\
as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+# Do not attempt to build with gcc plugins during cc-option tests.
+# (And this uses delayed resolution so the flags will be up to date.)
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
cc-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
# cc-option-align
# Prefix align with either -falign or -malign
@@ -127,7 +131,7 @@ cc-option-align = $(subst -functions=0,,\
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
cc-disable-warning = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+ $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
# cc-name
# Expands to either gcc or clang
@@ -202,7 +206,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
# Prefix -I with $(srctree) if it is not an absolute path.
# skip if -I has no parameter
addtree = $(if $(patsubst -I%,%,$(1)), \
-$(if $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1))
+$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)))
# Find all -I options and call addtree
flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
diff --git a/scripts/Makefile b/scripts/Makefile
index 822ab4a6a4aa..1d80897a9644 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -47,4 +47,4 @@ subdir-$(CONFIG_DTC) += dtc
subdir-$(CONFIG_GDB_SCRIPTS) += gdb
# Let clean descend into subdirs
-subdir- += basic kconfig package
+subdir- += basic kconfig package gcc-plugins
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 0d1ca5bf42fb..11602e5efb3b 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -60,7 +60,7 @@ endif
endif
# Do not include host rules unless needed
-ifneq ($(hostprogs-y)$(hostprogs-m),)
+ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
include scripts/Makefile.host
endif
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 55c96cb8070f..50616ea25131 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -38,7 +38,9 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
__clean-files := $(extra-y) $(extra-m) $(extra-) \
$(always) $(targets) $(clean-files) \
$(host-progs) \
- $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
+ $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
+ $(hostlibs-y) $(hostlibs-m) $(hostlibs-) \
+ $(hostcxxlibs-y) $(hostcxxlibs-m)
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
new file mode 100644
index 000000000000..61f0e6db909b
--- /dev/null
+++ b/scripts/Makefile.gcc-plugins
@@ -0,0 +1,60 @@
+ifdef CONFIG_GCC_PLUGINS
+ __PLUGINCC := $(call cc-ifversion, -ge, 0408, $(HOSTCXX), $(HOSTCC))
+ PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)")
+
+ SANCOV_PLUGIN := -fplugin=$(objtree)/scripts/gcc-plugins/sancov_plugin.so
+
+ gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) += cyc_complexity_plugin.so
+
+ ifdef CONFIG_GCC_PLUGIN_SANCOV
+ ifeq ($(CFLAGS_KCOV),)
+ # It is needed because of the gcc-plugin.sh and gcc version checks.
+ gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV) += sancov_plugin.so
+
+ ifneq ($(PLUGINCC),)
+ CFLAGS_KCOV := $(SANCOV_PLUGIN)
+ else
+ $(warning warning: cannot use CONFIG_KCOV: -fsanitize-coverage=trace-pc is not supported by compiler)
+ endif
+ endif
+ endif
+
+ GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
+
+ export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
+
+ ifneq ($(PLUGINCC),)
+ # SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
+ GCC_PLUGINS_CFLAGS := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGINS_CFLAGS))
+ endif
+
+ KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
+ GCC_PLUGIN := $(gcc-plugin-y)
+ GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y)
+endif
+
+# If plugins aren't supported, abort the build before hard-to-read compiler
+# errors start getting spewed by the main build.
+PHONY += gcc-plugins-check
+gcc-plugins-check: FORCE
+ifdef CONFIG_GCC_PLUGINS
+ ifeq ($(PLUGINCC),)
+ ifneq ($(GCC_PLUGINS_CFLAGS),)
+ ifeq ($(call cc-ifversion, -ge, 0405, y), y)
+ $(Q)$(srctree)/scripts/gcc-plugin.sh --show-error "$(__PLUGINCC)" "$(HOSTCXX)" "$(CC)" || true
+ @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc installation does not support plugins, perhaps the necessary headers are missing?" >&2 && exit 1
+ else
+ @echo "Cannot use CONFIG_GCC_PLUGINS: your gcc version does not support plugins, you should upgrade it to at least gcc 4.5" >&2 && exit 1
+ endif
+ endif
+ endif
+endif
+ @:
+
+# Actually do the build, if requested.
+PHONY += gcc-plugins
+gcc-plugins: scripts_basic gcc-plugins-check
+ifdef CONFIG_GCC_PLUGINS
+ $(Q)$(MAKE) $(build)=scripts/gcc-plugins
+endif
+ @:
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 133edfae5b8a..45b5b1aaedbd 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -20,7 +20,15 @@
# Will compile qconf as a C++ program, and menu as a C program.
# They are linked as C++ code to the executable qconf
+# hostcc-option
+# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
+
+hostcc-option = $(call try-run,\
+ $(HOSTCC) $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
__hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
+host-cshlib := $(sort $(hostlibs-y) $(hostlibs-m))
+host-cxxshlib := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
# C code
# Executables compiled from a single .c file
@@ -42,6 +50,10 @@ host-cxxmulti := $(foreach m,$(__hostprogs),$(if $($(m)-cxxobjs),$(m)))
# C++ Object (.o) files compiled from .cc files
host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
+# Object (.o) files used by the shared libaries
+host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
+host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
+
# output directory for programs/.o files
# hostprogs-y := tools/build may have been specified.
# Retrieve also directory of .o files from prog-objs or prog-cxxobjs notation
@@ -56,6 +68,10 @@ host-cmulti := $(addprefix $(obj)/,$(host-cmulti))
host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
+host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
+host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
+host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
+host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
obj-dirs += $(host-objdirs)
@@ -124,5 +140,42 @@ quiet_cmd_host-cxxobjs = HOSTCXX $@
$(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
$(call if_changed_dep,host-cxxobjs)
+# Compile .c file, create position independent .o file
+# host-cshobjs -> .o
+quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
+ cmd_host-cshobjs = $(HOSTCC) $(hostc_flags) -fPIC -c -o $@ $<
+$(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,host-cshobjs)
+
+# Compile .c file, create position independent .o file
+# Note that plugin capable gcc versions can be either C or C++ based
+# therefore plugin source files have to be compilable in both C and C++ mode.
+# This is why a C++ compiler is invoked on a .c file.
+# host-cxxshobjs -> .o
+quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
+ cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
+$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,host-cxxshobjs)
+
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cshlib)
+quiet_cmd_host-cshlib = HOSTLLD -shared $@
+ cmd_host-cshlib = $(HOSTCC) $(HOSTLDFLAGS) -shared -o $@ \
+ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cshlib): FORCE
+ $(call if_changed,host-cshlib)
+$(call multi_depend, $(host-cshlib), .so, -objs)
+
+# Link a shared library, based on position independent .o files
+# *.o -> .so shared library (host-cxxshlib)
+quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
+ cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
+ $(addprefix $(obj)/,$($(@F:.so=-objs))) \
+ $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
+$(host-cxxshlib): FORCE
+ $(call if_changed,host-cxxshlib)
+$(call multi_depend, $(host-cxxshlib), .so, -objs)
+
targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
- $(host-cxxmulti) $(host-cxxobjs)
+ $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index e7df0f5db7ec..0a07f9014944 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -138,7 +138,7 @@ endif
ifeq ($(CONFIG_KCOV),y)
_c_flags += $(if $(patsubst n%,, \
- $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)y), \
+ $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)$(CONFIG_KCOV_INSTRUMENT_ALL)), \
$(CFLAGS_KCOV))
endif
@@ -155,9 +155,10 @@ else
# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
# and locates generated .h files
# FIXME: Replace both with specific CFLAGS* statements in the makefiles
-__c_flags = $(call addtree,-I$(obj)) $(call flags,_c_flags)
-__a_flags = $(call flags,_a_flags)
-__cpp_flags = $(call flags,_cpp_flags)
+__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \
+ $(call flags,_c_flags)
+__a_flags = $(call flags,_a_flags)
+__cpp_flags = $(call flags,_cpp_flags)
endif
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
diff --git a/scripts/basic/bin2c.c b/scripts/basic/bin2c.c
index af187e695345..c3d7eef3ad06 100644
--- a/scripts/basic/bin2c.c
+++ b/scripts/basic/bin2c.c
@@ -29,7 +29,8 @@ int main(int argc, char *argv[])
} while (ch != EOF);
if (argc > 1)
- printf("\t;\n\nconst int %s_size = %d;\n", argv[1], total);
+ printf("\t;\n\n#include <linux/types.h>\n\nconst size_t %s_size = %d;\n",
+ argv[1], total);
return 0;
}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 24a08363995a..4de3cc42fc50 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -55,6 +55,7 @@ my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $color = 1;
+my $allow_c99_comments = 1;
sub help {
my ($exitcode) = @_;
@@ -227,9 +228,9 @@ if ($^V && $^V lt $minimum_perl_version) {
}
}
+#if no filenames are given, push '-' to read patch from stdin
if ($#ARGV < 0) {
- print "$P: no input files\n";
- exit(1);
+ push(@ARGV, '-');
}
sub hash_save_array_words {
@@ -1144,6 +1145,11 @@ sub sanitise_line {
$res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@;
}
+ if ($allow_c99_comments && $res =~ m@(//.*$)@) {
+ my $match = $1;
+ $res =~ s/\Q$match\E/"$;" x length($match)/e;
+ }
+
return $res;
}
@@ -2063,6 +2069,7 @@ sub process {
my $is_patch = 0;
my $in_header_lines = $file ? 0 : 1;
my $in_commit_log = 0; #Scanning lines before patch
+ my $has_commit_log = 0; #Encountered lines before patch
my $commit_log_possible_stack_dump = 0;
my $commit_log_long_line = 0;
my $commit_log_has_diff = 0;
@@ -2453,9 +2460,9 @@ sub process {
# Check for git id commit length and improperly formed commit descriptions
if ($in_commit_log && !$commit_log_possible_stack_dump &&
- $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
+ $line !~ /^\s*(?:Link|Patchwork|http|https|BugLink):/i &&
($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
- ($line =~ /\b[0-9a-f]{12,40}\b/i &&
+ ($line =~ /(?:\s|^)[0-9a-f]{12,40}(?:[\s"'\(\[]|$)/i &&
$line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
$line !~ /\bfixes:\s*[0-9a-f]{12,40}/i))) {
my $init_char = "c";
@@ -2560,6 +2567,7 @@ sub process {
$rawline =~ /^(commit\b|from\b|[\w-]+:).*$/i)) {
$in_header_lines = 0;
$in_commit_log = 1;
+ $has_commit_log = 1;
}
# Check if there is UTF-8 in a commit log when a mail header has explicitly
@@ -2763,6 +2771,10 @@ sub process {
$line =~ /^\+\s*#\s*define\s+\w+\s+$String$/) {
$msg_type = "";
+ # EFI_GUID is another special case
+ } elsif ($line =~ /^\+.*\bEFI_GUID\s*\(/) {
+ $msg_type = "";
+
# Otherwise set the alternate message types
# a comment starts before $max_line_length
@@ -3337,7 +3349,7 @@ sub process {
next if ($line =~ /^[^\+]/);
# check for declarations of signed or unsigned without int
- while ($line =~ m{($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) {
+ while ($line =~ m{\b($Declare)\s*(?!char\b|short\b|int\b|long\b)\s*($Ident)?\s*[=,;\[\)\(]}g) {
my $type = $1;
my $var = $2;
$var = "" if (!defined $var);
@@ -5722,8 +5734,9 @@ sub process {
}
}
-# check for #defines like: 1 << <digit> that could be BIT(digit)
- if ($line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) {
+# check for #defines like: 1 << <digit> that could be BIT(digit), it is not exported to uapi
+ if ($realfile !~ m@^include/uapi/@ &&
+ $line =~ /#\s*define\s+\w+\s+\(?\s*1\s*([ulUL]*)\s*\<\<\s*(?:\d+|$Ident)\s*\)?/) {
my $ull = "";
$ull = "_ULL" if (defined($1) && $1 =~ /ll/i);
if (CHK("BIT_MACRO",
@@ -6044,7 +6057,7 @@ sub process {
ERROR("NOT_UNIFIED_DIFF",
"Does not appear to be a unified-diff format patch\n");
}
- if ($is_patch && $filename ne '-' && $chk_signoff && $signoff == 0) {
+ if ($is_patch && $has_commit_log && $chk_signoff && $signoff == 0) {
ERROR("MISSING_SIGN_OFF",
"Missing Signed-off-by: line(s)\n");
}
diff --git a/scripts/coccicheck b/scripts/coccicheck
index dd85a455b2ba..c92c1528a54d 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -1,14 +1,24 @@
#!/bin/bash
-
+# Linux kernel coccicheck
+#
+# Read Documentation/coccinelle.txt
#
# This script requires at least spatch
# version 1.0.0-rc11.
-#
+DIR="$(dirname $(readlink -f $0))/.."
SPATCH="`which ${SPATCH:=spatch}`"
-trap kill_running SIGTERM SIGINT
-declare -a SPATCH_PID
+if [ ! -x "$SPATCH" ]; then
+ echo 'spatch is part of the Coccinelle project and is available at http://coccinelle.lip6.fr/'
+ exit 1
+fi
+
+SPATCH_VERSION=$($SPATCH --version | head -1 | awk '{print $3}')
+SPATCH_VERSION_NUM=$(echo $SPATCH_VERSION | ${DIR}/scripts/ld-version.sh)
+
+USE_JOBS="no"
+$SPATCH --help | grep "\-\-jobs" > /dev/null && USE_JOBS="yes"
# The verbosity may be set by the environmental parameter V=
# as for example with 'make V=1 coccicheck'
@@ -25,7 +35,28 @@ else
NPROC="$J"
fi
-FLAGS="$SPFLAGS --very-quiet"
+FLAGS="--very-quiet"
+
+# You can use SPFLAGS to append extra arguments to coccicheck or override any
+# heuristics done in this file as Coccinelle accepts the last options when
+# options conflict.
+#
+# A good example for use of SPFLAGS is if you want to debug your cocci script,
+# you can for instance use the following:
+#
+# $ export COCCI=scripts/coccinelle/misc/irqf_oneshot.cocci
+# $ make coccicheck MODE=report DEBUG_FILE="all.err" SPFLAGS="--profile --show-trying" M=./drivers/mfd/arizona-irq.c
+#
+# "--show-trying" should show you what rule is being processed as it goes to
+# stdout, you do not need a debug file for that. The profile output will be
+# be sent to stdout, if you provide a DEBUG_FILE the profiling data can be
+# inspected there.
+#
+# --profile will not output if --very-quiet is used, so avoid it.
+echo $SPFLAGS | egrep -e "--profile|--show-trying" 2>&1 > /dev/null
+if [ $? -eq 0 ]; then
+ FLAGS="--quiet"
+fi
# spatch only allows include directories with the syntax "-I include"
# while gcc also allows "-Iinclude" and "-include include"
@@ -51,9 +82,14 @@ if [ "$KBUILD_EXTMOD" != "" ] ; then
OPTIONS="--patch $srctree $OPTIONS"
fi
-if [ ! -x "$SPATCH" ]; then
- echo 'spatch is part of the Coccinelle project and is available at http://coccinelle.lip6.fr/'
- exit 1
+# You can override by using SPFLAGS
+if [ "$USE_JOBS" = "no" ]; then
+ trap kill_running SIGTERM SIGINT
+ declare -a SPATCH_PID
+elif [ "$NPROC" != "1" ]; then
+ # Using 0 should work as well, refer to _SC_NPROCESSORS_ONLN use on
+ # https://github.com/rdicosmo/parmap/blob/master/setcore_stubs.c
+ OPTIONS="$OPTIONS --jobs $NPROC --chunksize 1"
fi
if [ "$MODE" = "" ] ; then
@@ -72,7 +108,7 @@ if [ "$MODE" = "chain" ] ; then
echo 'All available modes will be tried (in that order): patch, report, context, org'
fi
elif [ "$MODE" = "report" -o "$MODE" = "org" ] ; then
- FLAGS="$FLAGS --no-show-diff"
+ FLAGS="--no-show-diff $FLAGS"
fi
if [ "$ONLINE" = "0" ] ; then
@@ -82,7 +118,26 @@ if [ "$ONLINE" = "0" ] ; then
echo ''
fi
-run_cmd() {
+run_cmd_parmap() {
+ if [ $VERBOSE -ne 0 ] ; then
+ echo "Running ($NPROC in parallel): $@"
+ fi
+ if [ "$DEBUG_FILE" != "/dev/null" -a "$DEBUG_FILE" != "" ]; then
+ if [ -f $DEBUG_FILE ]; then
+ echo "Debug file $DEBUG_FILE exists, bailing"
+ exit
+ fi
+ else
+ DEBUG_FILE="/dev/null"
+ fi
+ $@ 2>$DEBUG_FILE
+ if [[ $? -ne 0 ]]; then
+ echo "coccicheck failed"
+ exit $?
+ fi
+}
+
+run_cmd_old() {
local i
if [ $VERBOSE -ne 0 ] ; then
echo "Running ($NPROC in parallel): $@"
@@ -97,6 +152,14 @@ run_cmd() {
wait
}
+run_cmd() {
+ if [ "$USE_JOBS" = "yes" ]; then
+ run_cmd_parmap $@
+ else
+ run_cmd_old $@
+ fi
+}
+
kill_running() {
for i in $(seq 0 $(( NPROC - 1 )) ); do
if [ $VERBOSE -eq 2 ] ; then
@@ -106,10 +169,23 @@ kill_running() {
done
}
+# You can override heuristics with SPFLAGS, these must always go last
+OPTIONS="$OPTIONS $SPFLAGS"
+
coccinelle () {
COCCI="$1"
OPT=`grep "Option" $COCCI | cut -d':' -f2`
+ REQ=`grep "Requires" $COCCI | cut -d':' -f2 | sed "s| ||"`
+ REQ_NUM=$(echo $REQ | ${DIR}/scripts/ld-version.sh)
+ if [ "$REQ_NUM" != "0" ] ; then
+ if [ "$SPATCH_VERSION_NUM" -lt "$REQ_NUM" ] ; then
+ echo "Skipping coccinele SmPL patch: $COCCI"
+ echo "You have coccinelle: $SPATCH_VERSION"
+ echo "This SmPL patch requires: $REQ"
+ return
+ fi
+ fi
# The option '--parse-cocci' can be used to syntactically check the SmPL files.
#
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci
index 3d9349012bb3..c990d2c7ee16 100644
--- a/scripts/coccinelle/free/devm_free.cocci
+++ b/scripts/coccinelle/free/devm_free.cocci
@@ -29,8 +29,24 @@ expression x;
@@
(
+ x = devm_kmalloc(...)
+|
+ x = devm_kvasprintf(...)
+|
+ x = devm_kasprintf(...)
+|
x = devm_kzalloc(...)
|
+ x = devm_kmalloc_array(...)
+|
+ x = devm_kcalloc(...)
+|
+ x = devm_kstrdup(...)
+|
+ x = devm_kmemdup(...)
+|
+ x = devm_get_free_pages(...)
+|
x = devm_request_irq(...)
|
x = devm_ioremap(...)
@@ -48,6 +64,16 @@ position p;
(
* kfree@p(x)
|
+* kzfree@p(x)
+|
+* __krealloc@p(x, ...)
+|
+* krealloc@p(x, ...)
+|
+* free_pages@p(x, ...)
+|
+* free_page@p(x)
+|
* free_irq@p(x)
|
* iounmap@p(x)
diff --git a/scripts/coccinelle/free/ifnullfree.cocci b/scripts/coccinelle/free/ifnullfree.cocci
index 52bd235286fa..14a4cd98e83b 100644
--- a/scripts/coccinelle/free/ifnullfree.cocci
+++ b/scripts/coccinelle/free/ifnullfree.cocci
@@ -20,6 +20,8 @@ expression E;
(
kfree(E);
|
+ kzfree(E);
+|
debugfs_remove(E);
|
debugfs_remove_recursive(E);
@@ -39,7 +41,7 @@ position p;
@@
* if (E != NULL)
-* \(kfree@p\|debugfs_remove@p\|debugfs_remove_recursive@p\|
+* \(kfree@p\|kzfree@p\|debugfs_remove@p\|debugfs_remove_recursive@p\|
* usb_free_urb@p\|kmem_cache_destroy@p\|mempool_destroy@p\|
* dma_pool_destroy@p\)(E);
diff --git a/scripts/coccinelle/free/kfree.cocci b/scripts/coccinelle/free/kfree.cocci
index 577b78056990..ac438da4fd7b 100644
--- a/scripts/coccinelle/free/kfree.cocci
+++ b/scripts/coccinelle/free/kfree.cocci
@@ -20,7 +20,11 @@ expression E;
position p1;
@@
-kfree@p1(E)
+(
+* kfree@p1(E)
+|
+* kzfree@p1(E)
+)
@print expression@
constant char [] c;
@@ -60,7 +64,11 @@ position ok;
@@
while (1) { ...
- kfree@ok(E)
+(
+* kfree@ok(E)
+|
+* kzfree@ok(E)
+)
... when != break;
when != goto l;
when forall
@@ -74,7 +82,11 @@ statement S;
position free.p1!=loop.ok,p2!={print.p,sz.p};
@@
-kfree@p1(E,...)
+(
+* kfree@p1(E,...)
+|
+* kzfree@p1(E,...)
+)
...
(
iter(...,subE,...) S // no use
diff --git a/scripts/coccinelle/free/kfreeaddr.cocci b/scripts/coccinelle/free/kfreeaddr.cocci
index ce8aacc314cb..d46063b1db8b 100644
--- a/scripts/coccinelle/free/kfreeaddr.cocci
+++ b/scripts/coccinelle/free/kfreeaddr.cocci
@@ -16,7 +16,11 @@ identifier f;
position p;
@@
+(
* kfree@p(&e->f)
+|
+* kzfree@p(&e->f)
+)
@script:python depends on org@
p << r.p;
@@ -28,5 +32,5 @@ cocci.print_main("kfree",p)
p << r.p;
@@
-msg = "ERROR: kfree of structure field"
+msg = "ERROR: invalid free of structure field"
coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/iterators/device_node_continue.cocci b/scripts/coccinelle/iterators/device_node_continue.cocci
index 38ab744a4037..a36c16db171b 100644
--- a/scripts/coccinelle/iterators/device_node_continue.cocci
+++ b/scripts/coccinelle/iterators/device_node_continue.cocci
@@ -5,8 +5,11 @@
// Copyright: (C) 2015 Julia Lawall, Inria. GPLv2.
// URL: http://coccinelle.lip6.fr/
// Options: --no-includes --include-headers
+// Requires: 1.0.4
// Keywords: for_each_child_of_node, etc.
+// This uses a conjunction, which requires at least coccinelle >= 1.0.4
+
virtual patch
virtual context
virtual org
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
index 80a831c91161..007f0de0c715 100644
--- a/scripts/coccinelle/misc/noderef.cocci
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -16,6 +16,7 @@ virtual patch
@depends on patch@
expression *x;
expression f;
+expression i;
type T;
@@
@@ -30,15 +31,26 @@ f(...,(T)(x),...,sizeof(
+ *x
),...)
|
-f(...,sizeof(x),...,(T)(
+f(...,sizeof(
+- x
++ *x
+ ),...,(T)(x),...)
+|
+f(...,(T)(x),...,i*sizeof(
- x
+ *x
),...)
+|
+f(...,i*sizeof(
+- x
++ *x
+ ),...,(T)(x),...)
)
@r depends on !patch@
expression *x;
expression f;
+expression i;
position p;
type T;
@@
@@ -49,6 +61,10 @@ type T;
*f(...,(T)(x),...,sizeof@p(x),...)
|
*f(...,sizeof@p(x),...,(T)(x),...)
+|
+*f(...,(T)(x),...,i*sizeof@p(x),...)
+|
+*f(...,i*sizeof@p(x),...,(T)(x),...)
)
@script:python depends on org@
diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
new file mode 100755
index 000000000000..b65224bfb847
--- /dev/null
+++ b/scripts/gcc-plugin.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+srctree=$(dirname "$0")
+
+SHOW_ERROR=
+if [ "$1" = "--show-error" ] ; then
+ SHOW_ERROR=1
+ shift || true
+fi
+
+gccplugins_dir=$($3 -print-file-name=plugin)
+plugincc=$($1 -E -x c++ - -o /dev/null -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
+#include "gcc-common.h"
+#if BUILDING_GCC_VERSION >= 4008 || defined(ENABLE_BUILD_WITH_CXX)
+#warning $2 CXX
+#else
+#warning $1 CC
+#endif
+EOF
+)
+
+if [ $? -ne 0 ]
+then
+ if [ -n "$SHOW_ERROR" ] ; then
+ echo "${plugincc}" >&2
+ fi
+ exit 1
+fi
+
+case "$plugincc" in
+ *"$1 CC"*)
+ echo "$1"
+ exit 0
+ ;;
+
+ *"$2 CXX"*)
+ # the c++ compiler needs another test, see below
+ ;;
+
+ *)
+ exit 1
+ ;;
+esac
+
+# we need a c++ compiler that supports the designated initializer GNU extension
+plugincc=$($2 -c -x c++ -std=gnu++98 - -fsyntax-only -I"${srctree}"/gcc-plugins -I"${gccplugins_dir}"/include 2>&1 <<EOF
+#include "gcc-common.h"
+class test {
+public:
+ int test;
+} test = {
+ .test = 1
+};
+EOF
+)
+
+if [ $? -eq 0 ]
+then
+ echo "$2"
+ exit 0
+fi
+
+if [ -n "$SHOW_ERROR" ] ; then
+ echo "${plugincc}" >&2
+fi
+exit 1
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
new file mode 100644
index 000000000000..8b29dc17c73c
--- /dev/null
+++ b/scripts/gcc-plugins/Makefile
@@ -0,0 +1,29 @@
+GCC_PLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
+
+ifeq ($(PLUGINCC),$(HOSTCC))
+ HOSTLIBS := hostlibs
+ HOST_EXTRACFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu99 -ggdb
+ export HOST_EXTRACFLAGS
+else
+ HOSTLIBS := hostcxxlibs
+ HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
+ HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
+ HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable
+ export HOST_EXTRACXXFLAGS
+endif
+
+ifneq ($(CFLAGS_KCOV), $(SANCOV_PLUGIN))
+ GCC_PLUGIN := $(filter-out $(SANCOV_PLUGIN), $(GCC_PLUGIN))
+endif
+
+export HOSTLIBS
+
+$(HOSTLIBS)-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
+always := $($(HOSTLIBS)-y)
+
+$(foreach p,$($(HOSTLIBS)-y:%.so=%),$(eval $(p)-objs := $(p).o))
+
+subdir-y := $(GCC_PLUGIN_SUBDIR)
+subdir- += $(GCC_PLUGIN_SUBDIR)
+
+clean-files += *.so
diff --git a/scripts/gcc-plugins/cyc_complexity_plugin.c b/scripts/gcc-plugins/cyc_complexity_plugin.c
new file mode 100644
index 000000000000..34df974c6ba3
--- /dev/null
+++ b/scripts/gcc-plugins/cyc_complexity_plugin.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2011-2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * https://github.com/ephox-gcc-plugins/cyclomatic_complexity
+ *
+ * http://en.wikipedia.org/wiki/Cyclomatic_complexity
+ * The complexity M is then defined as:
+ * M = E - N + 2P
+ * where
+ *
+ * E = the number of edges of the graph
+ * N = the number of nodes of the graph
+ * P = the number of connected components (exit nodes).
+ *
+ * Usage (4.5 - 5):
+ * $ make clean; make run
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static struct plugin_info cyc_complexity_plugin_info = {
+ .version = "20160225",
+ .help = "Cyclomatic Complexity\n",
+};
+
+static unsigned int cyc_complexity_execute(void)
+{
+ int complexity;
+ expanded_location xloc;
+
+ /* M = E - N + 2P */
+ complexity = n_edges_for_fn(cfun) - n_basic_blocks_for_fn(cfun) + 2;
+
+ xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
+ fprintf(stderr, "Cyclomatic Complexity %d %s:%s\n", complexity,
+ xloc.file, DECL_NAME_POINTER(current_function_decl));
+
+ return 0;
+}
+
+#define PASS_NAME cyc_complexity
+
+#define NO_GATE
+#define TODO_FLAGS_FINISH TODO_dump_func
+
+#include "gcc-generate-gimple-pass.h"
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+ const char * const plugin_name = plugin_info->base_name;
+ struct register_pass_info cyc_complexity_pass_info;
+
+ cyc_complexity_pass_info.pass = make_cyc_complexity_pass();
+ cyc_complexity_pass_info.reference_pass_name = "ssa";
+ cyc_complexity_pass_info.ref_pass_instance_number = 1;
+ cyc_complexity_pass_info.pos_op = PASS_POS_INSERT_AFTER;
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL,
+ &cyc_complexity_plugin_info);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &cyc_complexity_pass_info);
+
+ return 0;
+}
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
new file mode 100644
index 000000000000..172850bcd0d9
--- /dev/null
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -0,0 +1,830 @@
+#ifndef GCC_COMMON_H_INCLUDED
+#define GCC_COMMON_H_INCLUDED
+
+#include "bversion.h"
+#if BUILDING_GCC_VERSION >= 6000
+#include "gcc-plugin.h"
+#else
+#include "plugin.h"
+#endif
+#include "plugin-version.h"
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "line-map.h"
+#include "input.h"
+#include "tree.h"
+
+#include "tree-inline.h"
+#include "version.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "output.h"
+#include "except.h"
+#include "function.h"
+#include "toplev.h"
+#include "basic-block.h"
+#include "intl.h"
+#include "ggc.h"
+#include "timevar.h"
+
+#include "params.h"
+
+#if BUILDING_GCC_VERSION <= 4009
+#include "pointer-set.h"
+#else
+#include "hash-map.h"
+#endif
+
+#include "emit-rtl.h"
+#include "debug.h"
+#include "target.h"
+#include "langhooks.h"
+#include "cfgloop.h"
+#include "cgraph.h"
+#include "opts.h"
+
+#if BUILDING_GCC_VERSION == 4005
+#include <sys/mman.h>
+#endif
+
+#if BUILDING_GCC_VERSION >= 4007
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
+#endif
+
+#if BUILDING_GCC_VERSION >= 4006
+#include "c-family/c-common.h"
+#else
+#include "c-common.h"
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
+#include "tree-flow.h"
+#else
+#include "tree-cfgcleanup.h"
+#include "tree-ssa-operands.h"
+#include "tree-into-ssa.h"
+#endif
+
+#if BUILDING_GCC_VERSION >= 4008
+#include "is-a.h"
+#endif
+
+#include "diagnostic.h"
+#include "tree-dump.h"
+#include "tree-pass.h"
+#include "predict.h"
+#include "ipa-utils.h"
+
+#if BUILDING_GCC_VERSION >= 4009
+#include "attribs.h"
+#include "varasm.h"
+#include "stor-layout.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+#include "gimple-fold.h"
+#include "context.h"
+#include "tree-ssa-alias.h"
+#include "tree-ssa.h"
+#include "stringpool.h"
+#include "tree-ssanames.h"
+#include "print-tree.h"
+#include "tree-eh.h"
+#include "stmt.h"
+#include "gimplify.h"
+#endif
+
+#include "gimple.h"
+
+#if BUILDING_GCC_VERSION >= 4009
+#include "tree-ssa-operands.h"
+#include "tree-phinodes.h"
+#include "tree-cfg.h"
+#include "gimple-iterator.h"
+#include "gimple-ssa.h"
+#include "ssa-iterators.h"
+#endif
+
+#if BUILDING_GCC_VERSION >= 5000
+#include "builtins.h"
+#endif
+
+/* #include "expr.h" where are you... */
+extern rtx emit_move_insn(rtx x, rtx y);
+
+/* missing from basic_block.h... */
+extern void debug_dominance_info(enum cdi_direction dir);
+extern void debug_dominance_tree(enum cdi_direction dir, basic_block root);
+
+#if BUILDING_GCC_VERSION == 4006
+extern void debug_gimple_stmt(gimple);
+extern void debug_gimple_seq(gimple_seq);
+extern void print_gimple_seq(FILE *, gimple_seq, int, int);
+extern void print_gimple_stmt(FILE *, gimple, int, int);
+extern void print_gimple_expr(FILE *, gimple, int, int);
+extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
+#endif
+
+#define __unused __attribute__((__unused__))
+
+#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
+#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
+#define TYPE_NAME_POINTER(node) IDENTIFIER_POINTER(TYPE_NAME(node))
+#define TYPE_NAME_LENGTH(node) IDENTIFIER_LENGTH(TYPE_NAME(node))
+
+/* should come from c-tree.h if only it were installed for gcc 4.5... */
+#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
+
+#if BUILDING_GCC_VERSION == 4005
+#define FOR_EACH_LOCAL_DECL(FUN, I, D) \
+ for (tree vars = (FUN)->local_decls, (I) = 0; \
+ vars && ((D) = TREE_VALUE(vars)); \
+ vars = TREE_CHAIN(vars), (I)++)
+#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
+#define FOR_EACH_VEC_ELT(T, V, I, P) \
+ for (I = 0; VEC_iterate(T, (V), (I), (P)); ++(I))
+#define TODO_rebuild_cgraph_edges 0
+#define SCOPE_FILE_SCOPE_P(EXP) (!(EXP))
+
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+
+typedef struct varpool_node *varpool_node_ptr;
+
+static inline bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
+{
+ tree fndecl;
+
+ if (!is_gimple_call(stmt))
+ return false;
+ fndecl = gimple_call_fndecl(stmt);
+ if (!fndecl || DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
+ return false;
+ return DECL_FUNCTION_CODE(fndecl) == code;
+}
+
+static inline bool is_simple_builtin(tree decl)
+{
+ if (decl && DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
+ return false;
+
+ switch (DECL_FUNCTION_CODE(decl)) {
+ /* Builtins that expand to constants. */
+ case BUILT_IN_CONSTANT_P:
+ case BUILT_IN_EXPECT:
+ case BUILT_IN_OBJECT_SIZE:
+ case BUILT_IN_UNREACHABLE:
+ /* Simple register moves or loads from stack. */
+ case BUILT_IN_RETURN_ADDRESS:
+ case BUILT_IN_EXTRACT_RETURN_ADDR:
+ case BUILT_IN_FROB_RETURN_ADDR:
+ case BUILT_IN_RETURN:
+ case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
+ case BUILT_IN_FRAME_ADDRESS:
+ case BUILT_IN_VA_END:
+ case BUILT_IN_STACK_SAVE:
+ case BUILT_IN_STACK_RESTORE:
+ /* Exception state returns or moves registers around. */
+ case BUILT_IN_EH_FILTER:
+ case BUILT_IN_EH_POINTER:
+ case BUILT_IN_EH_COPY_VALUES:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static inline void add_local_decl(struct function *fun, tree d)
+{
+ gcc_assert(TREE_CODE(d) == VAR_DECL);
+ fun->local_decls = tree_cons(NULL_TREE, d, fun->local_decls);
+}
+#endif
+
+#if BUILDING_GCC_VERSION <= 4006
+#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
+#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP)
+#define EDGE_PRESERVE 0ULL
+#define HOST_WIDE_INT_PRINT_HEX_PURE "%" HOST_WIDE_INT_PRINT "x"
+#define flag_fat_lto_objects true
+
+#define get_random_seed(noinit) ({ \
+ unsigned HOST_WIDE_INT seed; \
+ sscanf(get_random_seed(noinit), "%" HOST_WIDE_INT_PRINT "x", &seed); \
+ seed * seed; })
+
+#define int_const_binop(code, arg1, arg2) \
+ int_const_binop((code), (arg1), (arg2), 0)
+
+static inline bool gimple_clobber_p(gimple s __unused)
+{
+ return false;
+}
+
+static inline bool gimple_asm_clobbers_memory_p(const_gimple stmt)
+{
+ unsigned i;
+
+ for (i = 0; i < gimple_asm_nclobbers(stmt); i++) {
+ tree op = gimple_asm_clobber_op(stmt, i);
+
+ if (!strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "memory"))
+ return true;
+ }
+
+ return false;
+}
+
+static inline tree builtin_decl_implicit(enum built_in_function fncode)
+{
+ return implicit_built_in_decls[fncode];
+}
+
+static inline int ipa_reverse_postorder(struct cgraph_node **order)
+{
+ return cgraph_postorder(order);
+}
+
+static inline struct cgraph_node *cgraph_create_node(tree decl)
+{
+ return cgraph_node(decl);
+}
+
+static inline struct cgraph_node *cgraph_get_create_node(tree decl)
+{
+ struct cgraph_node *node = cgraph_get_node(decl);
+
+ return node ? node : cgraph_node(decl);
+}
+
+static inline bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
+{
+ return node->analyzed && !node->thunk.thunk_p && !node->alias;
+}
+
+static inline struct cgraph_node *cgraph_first_function_with_gimple_body(void)
+{
+ struct cgraph_node *node;
+
+ for (node = cgraph_nodes; node; node = node->next)
+ if (cgraph_function_with_gimple_body_p(node))
+ return node;
+ return NULL;
+}
+
+static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node)
+{
+ for (node = node->next; node; node = node->next)
+ if (cgraph_function_with_gimple_body_p(node))
+ return node;
+ return NULL;
+}
+
+#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
+ for ((node) = cgraph_first_function_with_gimple_body(); (node); \
+ (node) = cgraph_next_function_with_gimple_body(node))
+
+static inline void varpool_add_new_variable(tree decl)
+{
+ varpool_finalize_decl(decl);
+}
+#endif
+
+#if BUILDING_GCC_VERSION <= 4007
+#define FOR_EACH_FUNCTION(node) \
+ for (node = cgraph_nodes; node; node = node->next)
+#define FOR_EACH_VARIABLE(node) \
+ for (node = varpool_nodes; node; node = node->next)
+#define PROP_loops 0
+#define NODE_SYMBOL(node) (node)
+#define NODE_DECL(node) (node)->decl
+#define INSN_LOCATION(INSN) RTL_LOCATION(INSN)
+#define vNULL NULL
+
+static inline int bb_loop_depth(const_basic_block bb)
+{
+ return bb->loop_father ? loop_depth(bb->loop_father) : 0;
+}
+
+static inline bool gimple_store_p(gimple gs)
+{
+ tree lhs = gimple_get_lhs(gs);
+
+ return lhs && !is_gimple_reg(lhs);
+}
+
+static inline void gimple_init_singleton(gimple g __unused)
+{
+}
+#endif
+
+#if BUILDING_GCC_VERSION == 4007 || BUILDING_GCC_VERSION == 4008
+static inline struct cgraph_node *cgraph_alias_target(struct cgraph_node *n)
+{
+ return cgraph_alias_aliased_node(n);
+}
+#endif
+
+#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION <= 4009
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+ cgraph_create_edge((caller), (callee), (call_stmt), (count), (freq))
+#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
+ cgraph_create_edge_including_clones((caller), (callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
+#endif
+
+#if BUILDING_GCC_VERSION <= 4008
+#define ENTRY_BLOCK_PTR_FOR_FN(FN) ENTRY_BLOCK_PTR_FOR_FUNCTION(FN)
+#define EXIT_BLOCK_PTR_FOR_FN(FN) EXIT_BLOCK_PTR_FOR_FUNCTION(FN)
+#define basic_block_info_for_fn(FN) ((FN)->cfg->x_basic_block_info)
+#define n_basic_blocks_for_fn(FN) ((FN)->cfg->x_n_basic_blocks)
+#define n_edges_for_fn(FN) ((FN)->cfg->x_n_edges)
+#define last_basic_block_for_fn(FN) ((FN)->cfg->x_last_basic_block)
+#define label_to_block_map_for_fn(FN) ((FN)->cfg->x_label_to_block_map)
+#define profile_status_for_fn(FN) ((FN)->cfg->x_profile_status)
+#define BASIC_BLOCK_FOR_FN(FN, N) BASIC_BLOCK_FOR_FUNCTION((FN), (N))
+#define NODE_IMPLICIT_ALIAS(node) (node)->same_body_alias
+#define VAR_P(NODE) (TREE_CODE(NODE) == VAR_DECL)
+
+static inline bool tree_fits_shwi_p(const_tree t)
+{
+ if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
+ return false;
+
+ if (TREE_INT_CST_HIGH(t) == 0 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) >= 0)
+ return true;
+
+ if (TREE_INT_CST_HIGH(t) == -1 && (HOST_WIDE_INT)TREE_INT_CST_LOW(t) < 0 && !TYPE_UNSIGNED(TREE_TYPE(t)))
+ return true;
+
+ return false;
+}
+
+static inline bool tree_fits_uhwi_p(const_tree t)
+{
+ if (t == NULL_TREE || TREE_CODE(t) != INTEGER_CST)
+ return false;
+
+ return TREE_INT_CST_HIGH(t) == 0;
+}
+
+static inline HOST_WIDE_INT tree_to_shwi(const_tree t)
+{
+ gcc_assert(tree_fits_shwi_p(t));
+ return TREE_INT_CST_LOW(t);
+}
+
+static inline unsigned HOST_WIDE_INT tree_to_uhwi(const_tree t)
+{
+ gcc_assert(tree_fits_uhwi_p(t));
+ return TREE_INT_CST_LOW(t);
+}
+
+static inline const char *get_tree_code_name(enum tree_code code)
+{
+ gcc_assert(code < MAX_TREE_CODES);
+ return tree_code_name[code];
+}
+
+#define ipa_remove_stmt_references(cnode, stmt)
+
+typedef union gimple_statement_d gasm;
+typedef union gimple_statement_d gassign;
+typedef union gimple_statement_d gcall;
+typedef union gimple_statement_d gcond;
+typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d gphi;
+typedef union gimple_statement_d greturn;
+
+static inline gasm *as_a_gasm(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gasm *as_a_const_gasm(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gassign *as_a_gassign(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gassign *as_a_const_gassign(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gcall *as_a_gcall(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gcall *as_a_const_gcall(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gcond *as_a_gcond(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gcond *as_a_const_gcond(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gdebug *as_a_gdebug(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gphi *as_a_gphi(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gphi *as_a_const_gphi(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline greturn *as_a_greturn(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const greturn *as_a_const_greturn(const_gimple stmt)
+{
+ return stmt;
+}
+#endif
+
+#if BUILDING_GCC_VERSION == 4008
+#define NODE_SYMBOL(node) (&(node)->symbol)
+#define NODE_DECL(node) (node)->symbol.decl
+#endif
+
+#if BUILDING_GCC_VERSION >= 4008
+#define add_referenced_var(var)
+#define mark_sym_for_renaming(var)
+#define varpool_mark_needed_node(node)
+#define create_var_ann(var)
+#define TODO_dump_func 0
+#define TODO_dump_cgraph 0
+#endif
+
+#if BUILDING_GCC_VERSION <= 4009
+#define TODO_verify_il 0
+#define AVAIL_INTERPOSABLE AVAIL_OVERWRITABLE
+
+#define section_name_prefix LTO_SECTION_NAME_PREFIX
+#define fatal_error(loc, gmsgid, ...) fatal_error((gmsgid), __VA_ARGS__)
+
+typedef struct rtx_def rtx_insn;
+
+static inline void set_decl_section_name(tree node, const char *value)
+{
+ if (value)
+ DECL_SECTION_NAME(node) = build_string(strlen(value) + 1, value);
+ else
+ DECL_SECTION_NAME(node) = NULL;
+}
+#endif
+
+#if BUILDING_GCC_VERSION == 4009
+typedef struct gimple_statement_asm gasm;
+typedef struct gimple_statement_base gassign;
+typedef struct gimple_statement_call gcall;
+typedef struct gimple_statement_base gcond;
+typedef struct gimple_statement_base gdebug;
+typedef struct gimple_statement_phi gphi;
+typedef struct gimple_statement_base greturn;
+
+static inline gasm *as_a_gasm(gimple stmt)
+{
+ return as_a<gasm>(stmt);
+}
+
+static inline const gasm *as_a_const_gasm(const_gimple stmt)
+{
+ return as_a<const gasm>(stmt);
+}
+
+static inline gassign *as_a_gassign(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gassign *as_a_const_gassign(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gcall *as_a_gcall(gimple stmt)
+{
+ return as_a<gcall>(stmt);
+}
+
+static inline const gcall *as_a_const_gcall(const_gimple stmt)
+{
+ return as_a<const gcall>(stmt);
+}
+
+static inline gcond *as_a_gcond(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gcond *as_a_const_gcond(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gdebug *as_a_gdebug(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
+{
+ return stmt;
+}
+
+static inline gphi *as_a_gphi(gimple stmt)
+{
+ return as_a<gphi>(stmt);
+}
+
+static inline const gphi *as_a_const_gphi(const_gimple stmt)
+{
+ return as_a<const gphi>(stmt);
+}
+
+static inline greturn *as_a_greturn(gimple stmt)
+{
+ return stmt;
+}
+
+static inline const greturn *as_a_const_greturn(const_gimple stmt)
+{
+ return stmt;
+}
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+#define TODO_ggc_collect 0
+#define NODE_SYMBOL(node) (node)
+#define NODE_DECL(node) (node)->decl
+#define cgraph_node_name(node) (node)->name()
+#define NODE_IMPLICIT_ALIAS(node) (node)->cpp_implicit_alias
+#endif
+
+#if BUILDING_GCC_VERSION >= 5000 && BUILDING_GCC_VERSION < 6000
+/* gimple related */
+template <>
+template <>
+inline bool is_a_helper<const gassign *>::test(const_gimple gs)
+{
+ return gs->code == GIMPLE_ASSIGN;
+}
+#endif
+
+#if BUILDING_GCC_VERSION >= 5000
+#define TODO_verify_ssa TODO_verify_il
+#define TODO_verify_flow TODO_verify_il
+#define TODO_verify_stmts TODO_verify_il
+#define TODO_verify_rtl_sharing TODO_verify_il
+
+#define INSN_DELETED_P(insn) (insn)->deleted()
+
+/* symtab/cgraph related */
+#define debug_cgraph_node(node) (node)->debug()
+#define cgraph_get_node(decl) cgraph_node::get(decl)
+#define cgraph_get_create_node(decl) cgraph_node::get_create(decl)
+#define cgraph_create_node(decl) cgraph_node::create(decl)
+#define cgraph_n_nodes symtab->cgraph_count
+#define cgraph_max_uid symtab->cgraph_max_uid
+#define varpool_get_node(decl) varpool_node::get(decl)
+
+#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
+ (caller)->create_edge((callee), (call_stmt), (count), (freq))
+#define cgraph_create_edge_including_clones(caller, callee, old_call_stmt, call_stmt, count, freq, nest, reason) \
+ (caller)->create_edge_including_clones((callee), (old_call_stmt), (call_stmt), (count), (freq), (reason))
+
+typedef struct cgraph_node *cgraph_node_ptr;
+typedef struct cgraph_edge *cgraph_edge_p;
+typedef struct varpool_node *varpool_node_ptr;
+
+static inline void change_decl_assembler_name(tree decl, tree name)
+{
+ symtab->change_decl_assembler_name(decl, name);
+}
+
+static inline void varpool_finalize_decl(tree decl)
+{
+ varpool_node::finalize_decl(decl);
+}
+
+static inline void varpool_add_new_variable(tree decl)
+{
+ varpool_node::add(decl);
+}
+
+static inline unsigned int rebuild_cgraph_edges(void)
+{
+ return cgraph_edge::rebuild_edges();
+}
+
+static inline cgraph_node_ptr cgraph_function_node(cgraph_node_ptr node, enum availability *availability)
+{
+ return node->function_symbol(availability);
+}
+
+static inline cgraph_node_ptr cgraph_function_or_thunk_node(cgraph_node_ptr node, enum availability *availability = NULL)
+{
+ return node->ultimate_alias_target(availability);
+}
+
+static inline bool cgraph_only_called_directly_p(cgraph_node_ptr node)
+{
+ return node->only_called_directly_p();
+}
+
+static inline enum availability cgraph_function_body_availability(cgraph_node_ptr node)
+{
+ return node->get_availability();
+}
+
+static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
+{
+ return node->get_alias_target();
+}
+
+static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
+{
+ return symtab->add_cgraph_insertion_hook(hook, data);
+}
+
+static inline void cgraph_remove_function_insertion_hook(struct cgraph_node_hook_list *entry)
+{
+ symtab->remove_cgraph_insertion_hook(entry);
+}
+
+static inline struct cgraph_node_hook_list *cgraph_add_node_removal_hook(cgraph_node_hook hook, void *data)
+{
+ return symtab->add_cgraph_removal_hook(hook, data);
+}
+
+static inline void cgraph_remove_node_removal_hook(struct cgraph_node_hook_list *entry)
+{
+ symtab->remove_cgraph_removal_hook(entry);
+}
+
+static inline struct cgraph_2node_hook_list *cgraph_add_node_duplication_hook(cgraph_2node_hook hook, void *data)
+{
+ return symtab->add_cgraph_duplication_hook(hook, data);
+}
+
+static inline void cgraph_remove_node_duplication_hook(struct cgraph_2node_hook_list *entry)
+{
+ symtab->remove_cgraph_duplication_hook(entry);
+}
+
+static inline void cgraph_call_node_duplication_hooks(cgraph_node_ptr node, cgraph_node_ptr node2)
+{
+ symtab->call_cgraph_duplication_hooks(node, node2);
+}
+
+static inline void cgraph_call_edge_duplication_hooks(cgraph_edge *cs1, cgraph_edge *cs2)
+{
+ symtab->call_edge_duplication_hooks(cs1, cs2);
+}
+
+#if BUILDING_GCC_VERSION >= 6000
+typedef gimple *gimple_ptr;
+typedef const gimple *const_gimple_ptr;
+#define gimple gimple_ptr
+#define const_gimple const_gimple_ptr
+#undef CONST_CAST_GIMPLE
+#define CONST_CAST_GIMPLE(X) CONST_CAST(gimple, (X))
+#endif
+
+/* gimple related */
+static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree lhs, tree op1, tree op2 MEM_STAT_DECL)
+{
+ return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
+}
+
+template <>
+template <>
+inline bool is_a_helper<const greturn *>::test(const_gimple gs)
+{
+ return gs->code == GIMPLE_RETURN;
+}
+
+static inline gasm *as_a_gasm(gimple stmt)
+{
+ return as_a<gasm *>(stmt);
+}
+
+static inline const gasm *as_a_const_gasm(const_gimple stmt)
+{
+ return as_a<const gasm *>(stmt);
+}
+
+static inline gassign *as_a_gassign(gimple stmt)
+{
+ return as_a<gassign *>(stmt);
+}
+
+static inline const gassign *as_a_const_gassign(const_gimple stmt)
+{
+ return as_a<const gassign *>(stmt);
+}
+
+static inline gcall *as_a_gcall(gimple stmt)
+{
+ return as_a<gcall *>(stmt);
+}
+
+static inline const gcall *as_a_const_gcall(const_gimple stmt)
+{
+ return as_a<const gcall *>(stmt);
+}
+
+static inline gphi *as_a_gphi(gimple stmt)
+{
+ return as_a<gphi *>(stmt);
+}
+
+static inline const gphi *as_a_const_gphi(const_gimple stmt)
+{
+ return as_a<const gphi *>(stmt);
+}
+
+static inline greturn *as_a_greturn(gimple stmt)
+{
+ return as_a<greturn *>(stmt);
+}
+
+static inline const greturn *as_a_const_greturn(const_gimple stmt)
+{
+ return as_a<const greturn *>(stmt);
+}
+
+/* IPA/LTO related */
+#define ipa_ref_list_referring_iterate(L, I, P) \
+ (L)->referring.iterate((I), &(P))
+#define ipa_ref_list_reference_iterate(L, I, P) \
+ (L)->reference.iterate((I), &(P))
+
+static inline cgraph_node_ptr ipa_ref_referring_node(struct ipa_ref *ref)
+{
+ return dyn_cast<cgraph_node_ptr>(ref->referring);
+}
+
+static inline void ipa_remove_stmt_references(symtab_node *referring_node, gimple stmt)
+{
+ referring_node->remove_stmt_references(stmt);
+}
+#endif
+
+#if BUILDING_GCC_VERSION < 6000
+#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \
+ get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, pvolatilep, keep_aligning)
+#define gen_rtx_set(ARG0, ARG1) gen_rtx_SET(VOIDmode, (ARG0), (ARG1))
+#endif
+
+#if BUILDING_GCC_VERSION >= 6000
+#define gen_rtx_set(ARG0, ARG1) gen_rtx_SET((ARG0), (ARG1))
+#endif
+
+#ifdef __cplusplus
+static inline void debug_tree(const_tree t)
+{
+ debug_tree(CONST_CAST_TREE(t));
+}
+
+static inline void debug_gimple_stmt(const_gimple s)
+{
+ debug_gimple_stmt(CONST_CAST_GIMPLE(s));
+}
+#else
+#define debug_tree(t) debug_tree(CONST_CAST_TREE(t))
+#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
+#endif
+
+#endif
diff --git a/scripts/gcc-plugins/gcc-generate-gimple-pass.h b/scripts/gcc-plugins/gcc-generate-gimple-pass.h
new file mode 100644
index 000000000000..526c3c79b68e
--- /dev/null
+++ b/scripts/gcc-plugins/gcc-generate-gimple-pass.h
@@ -0,0 +1,175 @@
+/*
+ * Generator for GIMPLE pass related boilerplate code/data
+ *
+ * Supports gcc 4.5-6
+ *
+ * Usage:
+ *
+ * 1. before inclusion define PASS_NAME
+ * 2. before inclusion define NO_* for unimplemented callbacks
+ * NO_GATE
+ * NO_EXECUTE
+ * 3. before inclusion define PROPERTIES_* and TODO_FLAGS_* to override
+ * the default 0 values
+ * 4. for convenience, all the above will be undefined after inclusion!
+ * 5. the only exported name is make_PASS_NAME_pass() to register with gcc
+ */
+
+#ifndef PASS_NAME
+#error at least PASS_NAME must be defined
+#else
+#define __GCC_PLUGIN_STRINGIFY(n) #n
+#define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n)
+#define _GCC_PLUGIN_CONCAT2(x, y) x ## y
+#define _GCC_PLUGIN_CONCAT3(x, y, z) x ## y ## z
+
+#define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data)
+#define _PASS_NAME_PASS_DATA __PASS_NAME_PASS_DATA(PASS_NAME)
+
+#define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass)
+#define _PASS_NAME_PASS __PASS_NAME_PASS(PASS_NAME)
+
+#define _PASS_NAME_NAME _GCC_PLUGIN_STRINGIFY(PASS_NAME)
+
+#define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass)
+#define _MAKE_PASS_NAME_PASS __MAKE_PASS_NAME_PASS(PASS_NAME)
+
+#ifdef NO_GATE
+#define _GATE NULL
+#define _HAS_GATE false
+#else
+#define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate)
+#define _GATE __GATE(PASS_NAME)
+#define _HAS_GATE true
+#endif
+
+#ifdef NO_EXECUTE
+#define _EXECUTE NULL
+#define _HAS_EXECUTE false
+#else
+#define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute)
+#define _EXECUTE __EXECUTE(PASS_NAME)
+#define _HAS_EXECUTE true
+#endif
+
+#ifndef PROPERTIES_REQUIRED
+#define PROPERTIES_REQUIRED 0
+#endif
+
+#ifndef PROPERTIES_PROVIDED
+#define PROPERTIES_PROVIDED 0
+#endif
+
+#ifndef PROPERTIES_DESTROYED
+#define PROPERTIES_DESTROYED 0
+#endif
+
+#ifndef TODO_FLAGS_START
+#define TODO_FLAGS_START 0
+#endif
+
+#ifndef TODO_FLAGS_FINISH
+#define TODO_FLAGS_FINISH 0
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+static const pass_data _PASS_NAME_PASS_DATA = {
+#else
+static struct gimple_opt_pass _PASS_NAME_PASS = {
+ .pass = {
+#endif
+ .type = GIMPLE_PASS,
+ .name = _PASS_NAME_NAME,
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
+#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = _HAS_GATE,
+ .has_execute = _HAS_EXECUTE,
+#else
+ .gate = _GATE,
+ .execute = _EXECUTE,
+ .sub = NULL,
+ .next = NULL,
+ .static_pass_number = 0,
+#endif
+ .tv_id = TV_NONE,
+ .properties_required = PROPERTIES_REQUIRED,
+ .properties_provided = PROPERTIES_PROVIDED,
+ .properties_destroyed = PROPERTIES_DESTROYED,
+ .todo_flags_start = TODO_FLAGS_START,
+ .todo_flags_finish = TODO_FLAGS_FINISH,
+#if BUILDING_GCC_VERSION < 4009
+ }
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+class _PASS_NAME_PASS : public gimple_opt_pass {
+public:
+ _PASS_NAME_PASS() : gimple_opt_pass(_PASS_NAME_PASS_DATA, g) {}
+
+#ifndef NO_GATE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual bool gate(function *) { return _GATE(); }
+#else
+ virtual bool gate(void) { return _GATE(); }
+#endif
+#endif
+
+ virtual opt_pass * clone () { return new _PASS_NAME_PASS(); }
+
+#ifndef NO_EXECUTE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual unsigned int execute(function *) { return _EXECUTE(); }
+#else
+ virtual unsigned int execute(void) { return _EXECUTE(); }
+#endif
+#endif
+};
+}
+
+opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return new _PASS_NAME_PASS();
+}
+#else
+struct opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return &_PASS_NAME_PASS.pass;
+}
+#endif
+
+/* clean up user provided defines */
+#undef PASS_NAME
+#undef NO_GATE
+#undef NO_EXECUTE
+
+#undef PROPERTIES_DESTROYED
+#undef PROPERTIES_PROVIDED
+#undef PROPERTIES_REQUIRED
+#undef TODO_FLAGS_FINISH
+#undef TODO_FLAGS_START
+
+/* clean up generated defines */
+#undef _EXECUTE
+#undef __EXECUTE
+#undef _GATE
+#undef __GATE
+#undef _GCC_PLUGIN_CONCAT2
+#undef _GCC_PLUGIN_CONCAT3
+#undef _GCC_PLUGIN_STRINGIFY
+#undef __GCC_PLUGIN_STRINGIFY
+#undef _HAS_EXECUTE
+#undef _HAS_GATE
+#undef _MAKE_PASS_NAME_PASS
+#undef __MAKE_PASS_NAME_PASS
+#undef _PASS_NAME_NAME
+#undef _PASS_NAME_PASS
+#undef __PASS_NAME_PASS
+#undef _PASS_NAME_PASS_DATA
+#undef __PASS_NAME_PASS_DATA
+
+#endif /* PASS_NAME */
diff --git a/scripts/gcc-plugins/gcc-generate-ipa-pass.h b/scripts/gcc-plugins/gcc-generate-ipa-pass.h
new file mode 100644
index 000000000000..9bd926e072f0
--- /dev/null
+++ b/scripts/gcc-plugins/gcc-generate-ipa-pass.h
@@ -0,0 +1,289 @@
+/*
+ * Generator for IPA pass related boilerplate code/data
+ *
+ * Supports gcc 4.5-6
+ *
+ * Usage:
+ *
+ * 1. before inclusion define PASS_NAME
+ * 2. before inclusion define NO_* for unimplemented callbacks
+ * NO_GENERATE_SUMMARY
+ * NO_READ_SUMMARY
+ * NO_WRITE_SUMMARY
+ * NO_READ_OPTIMIZATION_SUMMARY
+ * NO_WRITE_OPTIMIZATION_SUMMARY
+ * NO_STMT_FIXUP
+ * NO_FUNCTION_TRANSFORM
+ * NO_VARIABLE_TRANSFORM
+ * NO_GATE
+ * NO_EXECUTE
+ * 3. before inclusion define PROPERTIES_* and *TODO_FLAGS_* to override
+ * the default 0 values
+ * 4. for convenience, all the above will be undefined after inclusion!
+ * 5. the only exported name is make_PASS_NAME_pass() to register with gcc
+ */
+
+#ifndef PASS_NAME
+#error at least PASS_NAME must be defined
+#else
+#define __GCC_PLUGIN_STRINGIFY(n) #n
+#define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n)
+#define _GCC_PLUGIN_CONCAT2(x, y) x ## y
+#define _GCC_PLUGIN_CONCAT3(x, y, z) x ## y ## z
+
+#define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data)
+#define _PASS_NAME_PASS_DATA __PASS_NAME_PASS_DATA(PASS_NAME)
+
+#define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass)
+#define _PASS_NAME_PASS __PASS_NAME_PASS(PASS_NAME)
+
+#define _PASS_NAME_NAME _GCC_PLUGIN_STRINGIFY(PASS_NAME)
+
+#define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass)
+#define _MAKE_PASS_NAME_PASS __MAKE_PASS_NAME_PASS(PASS_NAME)
+
+#ifdef NO_GENERATE_SUMMARY
+#define _GENERATE_SUMMARY NULL
+#else
+#define __GENERATE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _generate_summary)
+#define _GENERATE_SUMMARY __GENERATE_SUMMARY(PASS_NAME)
+#endif
+
+#ifdef NO_READ_SUMMARY
+#define _READ_SUMMARY NULL
+#else
+#define __READ_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_summary)
+#define _READ_SUMMARY __READ_SUMMARY(PASS_NAME)
+#endif
+
+#ifdef NO_WRITE_SUMMARY
+#define _WRITE_SUMMARY NULL
+#else
+#define __WRITE_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_summary)
+#define _WRITE_SUMMARY __WRITE_SUMMARY(PASS_NAME)
+#endif
+
+#ifdef NO_READ_OPTIMIZATION_SUMMARY
+#define _READ_OPTIMIZATION_SUMMARY NULL
+#else
+#define __READ_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _read_optimization_summary)
+#define _READ_OPTIMIZATION_SUMMARY __READ_OPTIMIZATION_SUMMARY(PASS_NAME)
+#endif
+
+#ifdef NO_WRITE_OPTIMIZATION_SUMMARY
+#define _WRITE_OPTIMIZATION_SUMMARY NULL
+#else
+#define __WRITE_OPTIMIZATION_SUMMARY(n) _GCC_PLUGIN_CONCAT2(n, _write_optimization_summary)
+#define _WRITE_OPTIMIZATION_SUMMARY __WRITE_OPTIMIZATION_SUMMARY(PASS_NAME)
+#endif
+
+#ifdef NO_STMT_FIXUP
+#define _STMT_FIXUP NULL
+#else
+#define __STMT_FIXUP(n) _GCC_PLUGIN_CONCAT2(n, _stmt_fixup)
+#define _STMT_FIXUP __STMT_FIXUP(PASS_NAME)
+#endif
+
+#ifdef NO_FUNCTION_TRANSFORM
+#define _FUNCTION_TRANSFORM NULL
+#else
+#define __FUNCTION_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _function_transform)
+#define _FUNCTION_TRANSFORM __FUNCTION_TRANSFORM(PASS_NAME)
+#endif
+
+#ifdef NO_VARIABLE_TRANSFORM
+#define _VARIABLE_TRANSFORM NULL
+#else
+#define __VARIABLE_TRANSFORM(n) _GCC_PLUGIN_CONCAT2(n, _variable_transform)
+#define _VARIABLE_TRANSFORM __VARIABLE_TRANSFORM(PASS_NAME)
+#endif
+
+#ifdef NO_GATE
+#define _GATE NULL
+#define _HAS_GATE false
+#else
+#define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate)
+#define _GATE __GATE(PASS_NAME)
+#define _HAS_GATE true
+#endif
+
+#ifdef NO_EXECUTE
+#define _EXECUTE NULL
+#define _HAS_EXECUTE false
+#else
+#define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute)
+#define _EXECUTE __EXECUTE(PASS_NAME)
+#define _HAS_EXECUTE true
+#endif
+
+#ifndef PROPERTIES_REQUIRED
+#define PROPERTIES_REQUIRED 0
+#endif
+
+#ifndef PROPERTIES_PROVIDED
+#define PROPERTIES_PROVIDED 0
+#endif
+
+#ifndef PROPERTIES_DESTROYED
+#define PROPERTIES_DESTROYED 0
+#endif
+
+#ifndef TODO_FLAGS_START
+#define TODO_FLAGS_START 0
+#endif
+
+#ifndef TODO_FLAGS_FINISH
+#define TODO_FLAGS_FINISH 0
+#endif
+
+#ifndef FUNCTION_TRANSFORM_TODO_FLAGS_START
+#define FUNCTION_TRANSFORM_TODO_FLAGS_START 0
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+static const pass_data _PASS_NAME_PASS_DATA = {
+#else
+static struct ipa_opt_pass_d _PASS_NAME_PASS = {
+ .pass = {
+#endif
+ .type = IPA_PASS,
+ .name = _PASS_NAME_NAME,
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
+#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = _HAS_GATE,
+ .has_execute = _HAS_EXECUTE,
+#else
+ .gate = _GATE,
+ .execute = _EXECUTE,
+ .sub = NULL,
+ .next = NULL,
+ .static_pass_number = 0,
+#endif
+ .tv_id = TV_NONE,
+ .properties_required = PROPERTIES_REQUIRED,
+ .properties_provided = PROPERTIES_PROVIDED,
+ .properties_destroyed = PROPERTIES_DESTROYED,
+ .todo_flags_start = TODO_FLAGS_START,
+ .todo_flags_finish = TODO_FLAGS_FINISH,
+#if BUILDING_GCC_VERSION < 4009
+ },
+ .generate_summary = _GENERATE_SUMMARY,
+ .write_summary = _WRITE_SUMMARY,
+ .read_summary = _READ_SUMMARY,
+#if BUILDING_GCC_VERSION >= 4006
+ .write_optimization_summary = _WRITE_OPTIMIZATION_SUMMARY,
+ .read_optimization_summary = _READ_OPTIMIZATION_SUMMARY,
+#endif
+ .stmt_fixup = _STMT_FIXUP,
+ .function_transform_todo_flags_start = FUNCTION_TRANSFORM_TODO_FLAGS_START,
+ .function_transform = _FUNCTION_TRANSFORM,
+ .variable_transform = _VARIABLE_TRANSFORM,
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+class _PASS_NAME_PASS : public ipa_opt_pass_d {
+public:
+ _PASS_NAME_PASS() : ipa_opt_pass_d(_PASS_NAME_PASS_DATA,
+ g,
+ _GENERATE_SUMMARY,
+ _WRITE_SUMMARY,
+ _READ_SUMMARY,
+ _WRITE_OPTIMIZATION_SUMMARY,
+ _READ_OPTIMIZATION_SUMMARY,
+ _STMT_FIXUP,
+ FUNCTION_TRANSFORM_TODO_FLAGS_START,
+ _FUNCTION_TRANSFORM,
+ _VARIABLE_TRANSFORM) {}
+
+#ifndef NO_GATE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual bool gate(function *) { return _GATE(); }
+#else
+ virtual bool gate(void) { return _GATE(); }
+#endif
+#endif
+
+ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); }
+
+#ifndef NO_EXECUTE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual unsigned int execute(function *) { return _EXECUTE(); }
+#else
+ virtual unsigned int execute(void) { return _EXECUTE(); }
+#endif
+#endif
+};
+}
+
+opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return new _PASS_NAME_PASS();
+}
+#else
+struct opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return &_PASS_NAME_PASS.pass;
+}
+#endif
+
+/* clean up user provided defines */
+#undef PASS_NAME
+#undef NO_GENERATE_SUMMARY
+#undef NO_WRITE_SUMMARY
+#undef NO_READ_SUMMARY
+#undef NO_WRITE_OPTIMIZATION_SUMMARY
+#undef NO_READ_OPTIMIZATION_SUMMARY
+#undef NO_STMT_FIXUP
+#undef NO_FUNCTION_TRANSFORM
+#undef NO_VARIABLE_TRANSFORM
+#undef NO_GATE
+#undef NO_EXECUTE
+
+#undef FUNCTION_TRANSFORM_TODO_FLAGS_START
+#undef PROPERTIES_DESTROYED
+#undef PROPERTIES_PROVIDED
+#undef PROPERTIES_REQUIRED
+#undef TODO_FLAGS_FINISH
+#undef TODO_FLAGS_START
+
+/* clean up generated defines */
+#undef _EXECUTE
+#undef __EXECUTE
+#undef _FUNCTION_TRANSFORM
+#undef __FUNCTION_TRANSFORM
+#undef _GATE
+#undef __GATE
+#undef _GCC_PLUGIN_CONCAT2
+#undef _GCC_PLUGIN_CONCAT3
+#undef _GCC_PLUGIN_STRINGIFY
+#undef __GCC_PLUGIN_STRINGIFY
+#undef _GENERATE_SUMMARY
+#undef __GENERATE_SUMMARY
+#undef _HAS_EXECUTE
+#undef _HAS_GATE
+#undef _MAKE_PASS_NAME_PASS
+#undef __MAKE_PASS_NAME_PASS
+#undef _PASS_NAME_NAME
+#undef _PASS_NAME_PASS
+#undef __PASS_NAME_PASS
+#undef _PASS_NAME_PASS_DATA
+#undef __PASS_NAME_PASS_DATA
+#undef _READ_OPTIMIZATION_SUMMARY
+#undef __READ_OPTIMIZATION_SUMMARY
+#undef _READ_SUMMARY
+#undef __READ_SUMMARY
+#undef _STMT_FIXUP
+#undef __STMT_FIXUP
+#undef _VARIABLE_TRANSFORM
+#undef __VARIABLE_TRANSFORM
+#undef _WRITE_OPTIMIZATION_SUMMARY
+#undef __WRITE_OPTIMIZATION_SUMMARY
+#undef _WRITE_SUMMARY
+#undef __WRITE_SUMMARY
+
+#endif /* PASS_NAME */
diff --git a/scripts/gcc-plugins/gcc-generate-rtl-pass.h b/scripts/gcc-plugins/gcc-generate-rtl-pass.h
new file mode 100644
index 000000000000..1dc67a5aeadf
--- /dev/null
+++ b/scripts/gcc-plugins/gcc-generate-rtl-pass.h
@@ -0,0 +1,175 @@
+/*
+ * Generator for RTL pass related boilerplate code/data
+ *
+ * Supports gcc 4.5-6
+ *
+ * Usage:
+ *
+ * 1. before inclusion define PASS_NAME
+ * 2. before inclusion define NO_* for unimplemented callbacks
+ * NO_GATE
+ * NO_EXECUTE
+ * 3. before inclusion define PROPERTIES_* and TODO_FLAGS_* to override
+ * the default 0 values
+ * 4. for convenience, all the above will be undefined after inclusion!
+ * 5. the only exported name is make_PASS_NAME_pass() to register with gcc
+ */
+
+#ifndef PASS_NAME
+#error at least PASS_NAME must be defined
+#else
+#define __GCC_PLUGIN_STRINGIFY(n) #n
+#define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n)
+#define _GCC_PLUGIN_CONCAT2(x, y) x ## y
+#define _GCC_PLUGIN_CONCAT3(x, y, z) x ## y ## z
+
+#define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data)
+#define _PASS_NAME_PASS_DATA __PASS_NAME_PASS_DATA(PASS_NAME)
+
+#define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass)
+#define _PASS_NAME_PASS __PASS_NAME_PASS(PASS_NAME)
+
+#define _PASS_NAME_NAME _GCC_PLUGIN_STRINGIFY(PASS_NAME)
+
+#define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass)
+#define _MAKE_PASS_NAME_PASS __MAKE_PASS_NAME_PASS(PASS_NAME)
+
+#ifdef NO_GATE
+#define _GATE NULL
+#define _HAS_GATE false
+#else
+#define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate)
+#define _GATE __GATE(PASS_NAME)
+#define _HAS_GATE true
+#endif
+
+#ifdef NO_EXECUTE
+#define _EXECUTE NULL
+#define _HAS_EXECUTE false
+#else
+#define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute)
+#define _EXECUTE __EXECUTE(PASS_NAME)
+#define _HAS_EXECUTE true
+#endif
+
+#ifndef PROPERTIES_REQUIRED
+#define PROPERTIES_REQUIRED 0
+#endif
+
+#ifndef PROPERTIES_PROVIDED
+#define PROPERTIES_PROVIDED 0
+#endif
+
+#ifndef PROPERTIES_DESTROYED
+#define PROPERTIES_DESTROYED 0
+#endif
+
+#ifndef TODO_FLAGS_START
+#define TODO_FLAGS_START 0
+#endif
+
+#ifndef TODO_FLAGS_FINISH
+#define TODO_FLAGS_FINISH 0
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+static const pass_data _PASS_NAME_PASS_DATA = {
+#else
+static struct rtl_opt_pass _PASS_NAME_PASS = {
+ .pass = {
+#endif
+ .type = RTL_PASS,
+ .name = _PASS_NAME_NAME,
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
+#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = _HAS_GATE,
+ .has_execute = _HAS_EXECUTE,
+#else
+ .gate = _GATE,
+ .execute = _EXECUTE,
+ .sub = NULL,
+ .next = NULL,
+ .static_pass_number = 0,
+#endif
+ .tv_id = TV_NONE,
+ .properties_required = PROPERTIES_REQUIRED,
+ .properties_provided = PROPERTIES_PROVIDED,
+ .properties_destroyed = PROPERTIES_DESTROYED,
+ .todo_flags_start = TODO_FLAGS_START,
+ .todo_flags_finish = TODO_FLAGS_FINISH,
+#if BUILDING_GCC_VERSION < 4009
+ }
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+class _PASS_NAME_PASS : public rtl_opt_pass {
+public:
+ _PASS_NAME_PASS() : rtl_opt_pass(_PASS_NAME_PASS_DATA, g) {}
+
+#ifndef NO_GATE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual bool gate(function *) { return _GATE(); }
+#else
+ virtual bool gate(void) { return _GATE(); }
+#endif
+#endif
+
+ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); }
+
+#ifndef NO_EXECUTE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual unsigned int execute(function *) { return _EXECUTE(); }
+#else
+ virtual unsigned int execute(void) { return _EXECUTE(); }
+#endif
+#endif
+};
+}
+
+opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return new _PASS_NAME_PASS();
+}
+#else
+struct opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return &_PASS_NAME_PASS.pass;
+}
+#endif
+
+/* clean up user provided defines */
+#undef PASS_NAME
+#undef NO_GATE
+#undef NO_EXECUTE
+
+#undef PROPERTIES_DESTROYED
+#undef PROPERTIES_PROVIDED
+#undef PROPERTIES_REQUIRED
+#undef TODO_FLAGS_FINISH
+#undef TODO_FLAGS_START
+
+/* clean up generated defines */
+#undef _EXECUTE
+#undef __EXECUTE
+#undef _GATE
+#undef __GATE
+#undef _GCC_PLUGIN_CONCAT2
+#undef _GCC_PLUGIN_CONCAT3
+#undef _GCC_PLUGIN_STRINGIFY
+#undef __GCC_PLUGIN_STRINGIFY
+#undef _HAS_EXECUTE
+#undef _HAS_GATE
+#undef _MAKE_PASS_NAME_PASS
+#undef __MAKE_PASS_NAME_PASS
+#undef _PASS_NAME_NAME
+#undef _PASS_NAME_PASS
+#undef __PASS_NAME_PASS
+#undef _PASS_NAME_PASS_DATA
+#undef __PASS_NAME_PASS_DATA
+
+#endif /* PASS_NAME */
diff --git a/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h b/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h
new file mode 100644
index 000000000000..a27e2b36afaa
--- /dev/null
+++ b/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h
@@ -0,0 +1,175 @@
+/*
+ * Generator for SIMPLE_IPA pass related boilerplate code/data
+ *
+ * Supports gcc 4.5-6
+ *
+ * Usage:
+ *
+ * 1. before inclusion define PASS_NAME
+ * 2. before inclusion define NO_* for unimplemented callbacks
+ * NO_GATE
+ * NO_EXECUTE
+ * 3. before inclusion define PROPERTIES_* and TODO_FLAGS_* to override
+ * the default 0 values
+ * 4. for convenience, all the above will be undefined after inclusion!
+ * 5. the only exported name is make_PASS_NAME_pass() to register with gcc
+ */
+
+#ifndef PASS_NAME
+#error at least PASS_NAME must be defined
+#else
+#define __GCC_PLUGIN_STRINGIFY(n) #n
+#define _GCC_PLUGIN_STRINGIFY(n) __GCC_PLUGIN_STRINGIFY(n)
+#define _GCC_PLUGIN_CONCAT2(x, y) x ## y
+#define _GCC_PLUGIN_CONCAT3(x, y, z) x ## y ## z
+
+#define __PASS_NAME_PASS_DATA(n) _GCC_PLUGIN_CONCAT2(n, _pass_data)
+#define _PASS_NAME_PASS_DATA __PASS_NAME_PASS_DATA(PASS_NAME)
+
+#define __PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT2(n, _pass)
+#define _PASS_NAME_PASS __PASS_NAME_PASS(PASS_NAME)
+
+#define _PASS_NAME_NAME _GCC_PLUGIN_STRINGIFY(PASS_NAME)
+
+#define __MAKE_PASS_NAME_PASS(n) _GCC_PLUGIN_CONCAT3(make_, n, _pass)
+#define _MAKE_PASS_NAME_PASS __MAKE_PASS_NAME_PASS(PASS_NAME)
+
+#ifdef NO_GATE
+#define _GATE NULL
+#define _HAS_GATE false
+#else
+#define __GATE(n) _GCC_PLUGIN_CONCAT2(n, _gate)
+#define _GATE __GATE(PASS_NAME)
+#define _HAS_GATE true
+#endif
+
+#ifdef NO_EXECUTE
+#define _EXECUTE NULL
+#define _HAS_EXECUTE false
+#else
+#define __EXECUTE(n) _GCC_PLUGIN_CONCAT2(n, _execute)
+#define _EXECUTE __EXECUTE(PASS_NAME)
+#define _HAS_EXECUTE true
+#endif
+
+#ifndef PROPERTIES_REQUIRED
+#define PROPERTIES_REQUIRED 0
+#endif
+
+#ifndef PROPERTIES_PROVIDED
+#define PROPERTIES_PROVIDED 0
+#endif
+
+#ifndef PROPERTIES_DESTROYED
+#define PROPERTIES_DESTROYED 0
+#endif
+
+#ifndef TODO_FLAGS_START
+#define TODO_FLAGS_START 0
+#endif
+
+#ifndef TODO_FLAGS_FINISH
+#define TODO_FLAGS_FINISH 0
+#endif
+
+#if BUILDING_GCC_VERSION >= 4009
+namespace {
+static const pass_data _PASS_NAME_PASS_DATA = {
+#else
+static struct simple_ipa_opt_pass _PASS_NAME_PASS = {
+ .pass = {
+#endif
+ .type = SIMPLE_IPA_PASS,
+ .name = _PASS_NAME_NAME,
+#if BUILDING_GCC_VERSION >= 4008
+ .optinfo_flags = OPTGROUP_NONE,
+#endif
+#if BUILDING_GCC_VERSION >= 5000
+#elif BUILDING_GCC_VERSION == 4009
+ .has_gate = _HAS_GATE,
+ .has_execute = _HAS_EXECUTE,
+#else
+ .gate = _GATE,
+ .execute = _EXECUTE,
+ .sub = NULL,
+ .next = NULL,
+ .static_pass_number = 0,
+#endif
+ .tv_id = TV_NONE,
+ .properties_required = PROPERTIES_REQUIRED,
+ .properties_provided = PROPERTIES_PROVIDED,
+ .properties_destroyed = PROPERTIES_DESTROYED,
+ .todo_flags_start = TODO_FLAGS_START,
+ .todo_flags_finish = TODO_FLAGS_FINISH,
+#if BUILDING_GCC_VERSION < 4009
+ }
+#endif
+};
+
+#if BUILDING_GCC_VERSION >= 4009
+class _PASS_NAME_PASS : public simple_ipa_opt_pass {
+public:
+ _PASS_NAME_PASS() : simple_ipa_opt_pass(_PASS_NAME_PASS_DATA, g) {}
+
+#ifndef NO_GATE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual bool gate(function *) { return _GATE(); }
+#else
+ virtual bool gate(void) { return _GATE(); }
+#endif
+#endif
+
+ virtual opt_pass *clone() { return new _PASS_NAME_PASS(); }
+
+#ifndef NO_EXECUTE
+#if BUILDING_GCC_VERSION >= 5000
+ virtual unsigned int execute(function *) { return _EXECUTE(); }
+#else
+ virtual unsigned int execute(void) { return _EXECUTE(); }
+#endif
+#endif
+};
+}
+
+opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return new _PASS_NAME_PASS();
+}
+#else
+struct opt_pass *_MAKE_PASS_NAME_PASS(void)
+{
+ return &_PASS_NAME_PASS.pass;
+}
+#endif
+
+/* clean up user provided defines */
+#undef PASS_NAME
+#undef NO_GATE
+#undef NO_EXECUTE
+
+#undef PROPERTIES_DESTROYED
+#undef PROPERTIES_PROVIDED
+#undef PROPERTIES_REQUIRED
+#undef TODO_FLAGS_FINISH
+#undef TODO_FLAGS_START
+
+/* clean up generated defines */
+#undef _EXECUTE
+#undef __EXECUTE
+#undef _GATE
+#undef __GATE
+#undef _GCC_PLUGIN_CONCAT2
+#undef _GCC_PLUGIN_CONCAT3
+#undef _GCC_PLUGIN_STRINGIFY
+#undef __GCC_PLUGIN_STRINGIFY
+#undef _HAS_EXECUTE
+#undef _HAS_GATE
+#undef _MAKE_PASS_NAME_PASS
+#undef __MAKE_PASS_NAME_PASS
+#undef _PASS_NAME_NAME
+#undef _PASS_NAME_PASS
+#undef __PASS_NAME_PASS
+#undef _PASS_NAME_PASS_DATA
+#undef __PASS_NAME_PASS_DATA
+
+#endif /* PASS_NAME */
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
new file mode 100644
index 000000000000..aedd6113cb73
--- /dev/null
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2011-2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2, or (at your option) v3
+ *
+ * Homepage:
+ * https://github.com/ephox-gcc-plugins/sancov
+ *
+ * This plugin inserts a __sanitizer_cov_trace_pc() call at the start of basic blocks.
+ * It supports all gcc versions with plugin support (from gcc-4.5 on).
+ * It is based on the commit "Add fuzzing coverage support" by Dmitry Vyukov <dvyukov@google.com>.
+ *
+ * You can read about it more here:
+ * https://gcc.gnu.org/viewcvs/gcc?limit_changes=0&view=revision&revision=231296
+ * http://lwn.net/Articles/674854/
+ * https://github.com/google/syzkaller
+ * https://lwn.net/Articles/677764/
+ *
+ * Usage:
+ * make run
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+tree sancov_fndecl;
+
+static struct plugin_info sancov_plugin_info = {
+ .version = "20160402",
+ .help = "sancov plugin\n",
+};
+
+static unsigned int sancov_execute(void)
+{
+ basic_block bb;
+
+ /* Remove this line when this plugin and kcov will be in the kernel.
+ if (!strcmp(DECL_NAME_POINTER(current_function_decl), DECL_NAME_POINTER(sancov_fndecl)))
+ return 0;
+ */
+
+ FOR_EACH_BB_FN(bb, cfun) {
+ const_gimple stmt;
+ gcall *gcall;
+ gimple_stmt_iterator gsi = gsi_after_labels(bb);
+
+ if (gsi_end_p(gsi))
+ continue;
+
+ stmt = gsi_stmt(gsi);
+ gcall = as_a_gcall(gimple_build_call(sancov_fndecl, 0));
+ gimple_set_location(gcall, gimple_location(stmt));
+ gsi_insert_before(&gsi, gcall, GSI_SAME_STMT);
+ }
+ return 0;
+}
+
+#define PASS_NAME sancov
+
+#define NO_GATE
+#define TODO_FLAGS_FINISH TODO_dump_func | TODO_verify_stmts | TODO_update_ssa_no_phi | TODO_verify_flow
+
+#include "gcc-generate-gimple-pass.h"
+
+static void sancov_start_unit(void __unused *gcc_data, void __unused *user_data)
+{
+ tree leaf_attr, nothrow_attr;
+ tree BT_FN_VOID = build_function_type_list(void_type_node, NULL_TREE);
+
+ sancov_fndecl = build_fn_decl("__sanitizer_cov_trace_pc", BT_FN_VOID);
+
+ DECL_ASSEMBLER_NAME(sancov_fndecl);
+ TREE_PUBLIC(sancov_fndecl) = 1;
+ DECL_EXTERNAL(sancov_fndecl) = 1;
+ DECL_ARTIFICIAL(sancov_fndecl) = 1;
+ DECL_PRESERVE_P(sancov_fndecl) = 1;
+ DECL_UNINLINABLE(sancov_fndecl) = 1;
+ TREE_USED(sancov_fndecl) = 1;
+
+ nothrow_attr = tree_cons(get_identifier("nothrow"), NULL, NULL);
+ decl_attributes(&sancov_fndecl, nothrow_attr, 0);
+ gcc_assert(TREE_NOTHROW(sancov_fndecl));
+#if BUILDING_GCC_VERSION > 4005
+ leaf_attr = tree_cons(get_identifier("leaf"), NULL, NULL);
+ decl_attributes(&sancov_fndecl, leaf_attr, 0);
+#endif
+}
+
+int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
+{
+ int i;
+ struct register_pass_info sancov_plugin_pass_info;
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument * const argv = plugin_info->argv;
+ bool enable = true;
+
+ static const struct ggc_root_tab gt_ggc_r_gt_sancov[] = {
+ {
+ .base = &sancov_fndecl,
+ .nelt = 1,
+ .stride = sizeof(sancov_fndecl),
+ .cb = &gt_ggc_mx_tree_node,
+ .pchw = &gt_pch_nx_tree_node
+ },
+ LAST_GGC_ROOT_TAB
+ };
+
+ /* BBs can be split afterwards?? */
+ sancov_plugin_pass_info.pass = make_sancov_pass();
+#if BUILDING_GCC_VERSION >= 4009
+ sancov_plugin_pass_info.reference_pass_name = "asan";
+#else
+ sancov_plugin_pass_info.reference_pass_name = "nrv";
+#endif
+ sancov_plugin_pass_info.ref_pass_instance_number = 0;
+ sancov_plugin_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ for (i = 0; i < argc; ++i) {
+ if (!strcmp(argv[i].key, "no-sancov")) {
+ enable = false;
+ continue;
+ }
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL, &sancov_plugin_info);
+
+ if (!enable)
+ return 0;
+
+#if BUILDING_GCC_VERSION < 6000
+ register_callback(plugin_name, PLUGIN_START_UNIT, &sancov_start_unit, NULL);
+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL, (void *)&gt_ggc_r_gt_sancov);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &sancov_plugin_pass_info);
+#endif
+
+ return 0;
+}
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 1873421f2305..122fcdaf42c8 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -133,6 +133,7 @@ my %VCS_cmds_git = (
"author_pattern" => "^GitAuthor: (.*)",
"subject_pattern" => "^GitSubject: (.*)",
"stat_pattern" => "^(\\d+)\\t(\\d+)\\t\$file\$",
+ "file_exists_cmd" => "git ls-files \$file",
);
my %VCS_cmds_hg = (
@@ -161,6 +162,7 @@ my %VCS_cmds_hg = (
"author_pattern" => "^HgAuthor: (.*)",
"subject_pattern" => "^HgSubject: (.*)",
"stat_pattern" => "^(\\d+)\t(\\d+)\t\$file\$",
+ "file_exists_cmd" => "hg files \$file",
);
my $conf = which_conf(".get_maintainer.conf");
@@ -430,7 +432,7 @@ foreach my $file (@ARGV) {
die "$P: file '${file}' not found\n";
}
}
- if ($from_filename) {
+ if ($from_filename || vcs_file_exists($file)) {
$file =~ s/^\Q${cur_path}\E//; #strip any absolute path
$file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree
push(@files, $file);
@@ -2124,6 +2126,22 @@ sub vcs_file_blame {
}
}
+sub vcs_file_exists {
+ my ($file) = @_;
+
+ my $exists;
+
+ my $vcs_used = vcs_exists();
+ return 0 if (!$vcs_used);
+
+ my $cmd = $VCS_cmds{"file_exists_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
+
+ $exists = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ return $exists;
+}
+
sub uniq {
my (@parms) = @_;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index f0f6d9d75435..4f727eb5ec43 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -180,7 +180,7 @@ else
fi;
# final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
kallsymso=""
kallsyms_vmlinux=""
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 86e56fef7473..e1c09e2f9be7 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -26,6 +26,8 @@ create_package() {
# Fix ownership and permissions
chown -R root:root "$pdir"
chmod -R go-w "$pdir"
+ # in case we are in a restrictive umask environment like 0077
+ chmod -R a+rX "$pdir"
# Create the package
dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch}" -p$pname -P"$pdir"
@@ -238,7 +240,8 @@ maintainer="$name <$email>"
# Try to determine distribution
if [ -n "$KDEB_CHANGELOG_DIST" ]; then
distribution=$KDEB_CHANGELOG_DIST
-elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ]; then
+# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog
+elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then
: # nothing to do in this case
else
distribution="unstable"
@@ -322,13 +325,14 @@ fi
# Build kernel header package
(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
- (cd $srctree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrsrcfiles"
-fi
(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
+if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
+ (cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
+fi
(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
+(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index e167592793a7..42396a74405d 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -33,10 +33,17 @@
#include <string.h>
#include <unistd.h>
+/*
+ * glibc synced up and added the metag number but didn't add the relocations.
+ * Work around this in a crude manner for now.
+ */
#ifndef EM_METAG
-/* Remove this when these make it to the standard system elf.h. */
#define EM_METAG 174
+#endif
+#ifndef R_METAG_ADDR32
#define R_METAG_ADDR32 2
+#endif
+#ifndef R_METAG_NONE
#define R_METAG_NONE 3
#endif
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 63d91e22ed7c..966dd3924ea9 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -143,7 +143,7 @@ fi
if test -e include/config/auto.conf; then
. include/config/auto.conf
else
- echo "Error: kernelrelease not valid - run 'make prepare' to update it"
+ echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
exit 1
fi
diff --git a/security/Kconfig b/security/Kconfig
index 176758cdfa57..df28f2b6f3e1 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+ bool
+ help
+ The heap allocator implements __check_heap_object() for
+ validating memory ranges against heap object sizes in
+ support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+ bool
+ help
+ The architecture supports CONFIG_HARDENED_USERCOPY by
+ calling check_object_size() just before performing the
+ userspace copies in the low level implementation of
+ copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ depends on HAVE_ARCH_HARDENED_USERCOPY
+ select BUG
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocates pages, are not on the process stack,
+ or are part of the kernel text. This kills entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
diff --git a/security/security.c b/security/security.c
index c4bb47db30ee..4838e7fefa1f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -356,7 +356,7 @@ void security_inode_free(struct inode *inode)
}
int security_dentry_init_security(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen)
{
return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ec30880c4b98..13185a6c266a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2832,7 +2832,7 @@ static void selinux_inode_free_security(struct inode *inode)
}
static int selinux_dentry_init_security(struct dentry *dentry, int mode,
- struct qstr *name, void **ctx,
+ const struct qstr *name, void **ctx,
u32 *ctxlen)
{
u32 newsid;
diff --git a/sound/Makefile b/sound/Makefile
index 77320709fd26..c41bdf5fdf24 100644
--- a/sound/Makefile
+++ b/sound/Makefile
@@ -2,7 +2,6 @@
#
obj-$(CONFIG_SOUND) += soundcore.o
-obj-$(CONFIG_SOUND_PRIME) += sound_firmware.o
obj-$(CONFIG_SOUND_PRIME) += oss/
obj-$(CONFIG_DMASOUND) += oss/
obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index e0406211716b..65171f6657a2 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,14 +9,6 @@ menuconfig SND_ARM
Drivers that are implemented on ASoC can be found in
"ALSA for SoC audio support" section.
-config SND_PXA2XX_LIB
- tristate
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
- select SND_DMAENGINE_PCM
-
-config SND_PXA2XX_LIB_AC97
- bool
-
if SND_ARM
config SND_ARMAACI
@@ -42,3 +34,10 @@ config SND_PXA2XX_AC97
endif # SND_ARM
+config SND_PXA2XX_LIB
+ tristate
+ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+ select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+ bool
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 9b3334be9df2..2c498488af6c 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -67,6 +67,8 @@ struct snd_compr_file {
struct snd_compr_stream stream;
};
+static void error_delayed_work(struct work_struct *work);
+
/*
* a note on stream states used:
* we use following states in the compressed core
@@ -123,6 +125,9 @@ static int snd_compr_open(struct inode *inode, struct file *f)
snd_card_unref(compr->card);
return -ENOMEM;
}
+
+ INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
+
data->stream.ops = compr->ops;
data->stream.direction = dirn;
data->stream.private_data = compr->private_data;
@@ -153,6 +158,8 @@ static int snd_compr_free(struct inode *inode, struct file *f)
struct snd_compr_file *data = f->private_data;
struct snd_compr_runtime *runtime = data->stream.runtime;
+ cancel_delayed_work_sync(&data->stream.error_work);
+
switch (runtime->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_DRAINING:
@@ -237,6 +244,15 @@ snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
avail = snd_compr_calc_avail(stream, &ioctl_avail);
ioctl_avail.avail = avail;
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ return -EBADFD;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
+
if (copy_to_user((__u64 __user *)arg,
&ioctl_avail, sizeof(ioctl_avail)))
return -EFAULT;
@@ -346,11 +362,13 @@ static ssize_t snd_compr_read(struct file *f, char __user *buf,
switch (stream->runtime->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_XRUN:
case SNDRV_PCM_STATE_SUSPENDED:
case SNDRV_PCM_STATE_DISCONNECTED:
retval = -EBADFD;
goto out;
+ case SNDRV_PCM_STATE_XRUN:
+ retval = -EPIPE;
+ goto out;
}
avail = snd_compr_get_avail(stream);
@@ -399,10 +417,16 @@ static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
stream = &data->stream;
mutex_lock(&stream->device->lock);
- if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
+
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_XRUN:
retval = snd_compr_get_poll(stream) | POLLERR;
goto out;
+ default:
+ break;
}
+
poll_wait(f, &stream->runtime->sleep, wait);
avail = snd_compr_get_avail(stream);
@@ -697,6 +721,45 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
return retval;
}
+static void error_delayed_work(struct work_struct *work)
+{
+ struct snd_compr_stream *stream;
+
+ stream = container_of(work, struct snd_compr_stream, error_work.work);
+
+ mutex_lock(&stream->device->lock);
+
+ stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ wake_up(&stream->runtime->sleep);
+
+ mutex_unlock(&stream->device->lock);
+}
+
+/*
+ * snd_compr_stop_error: Report a fatal error on a stream
+ * @stream: pointer to stream
+ * @state: state to transition the stream to
+ *
+ * Stop the stream and set its state.
+ *
+ * Should be called with compressed device lock held.
+ */
+int snd_compr_stop_error(struct snd_compr_stream *stream,
+ snd_pcm_state_t state)
+{
+ if (stream->runtime->state == state)
+ return 0;
+
+ stream->runtime->state = state;
+
+ pr_debug("Changing state to: %d\n", state);
+
+ queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_compr_stop_error);
+
static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
{
int ret;
diff --git a/sound/core/control.c b/sound/core/control.c
index b4fe9b002512..fb096cb20a80 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -807,6 +807,36 @@ static int snd_ctl_elem_list(struct snd_card *card,
return 0;
}
+static bool validate_element_member_dimension(struct snd_ctl_elem_info *info)
+{
+ unsigned int members;
+ unsigned int i;
+
+ if (info->dimen.d[0] == 0)
+ return true;
+
+ members = 1;
+ for (i = 0; i < ARRAY_SIZE(info->dimen.d); ++i) {
+ if (info->dimen.d[i] == 0)
+ break;
+ members *= info->dimen.d[i];
+
+ /*
+ * info->count should be validated in advance, to guarantee
+ * calculation soundness.
+ */
+ if (members > info->count)
+ return false;
+ }
+
+ for (++i; i < ARRAY_SIZE(info->dimen.d); ++i) {
+ if (info->dimen.d[i] > 0)
+ return false;
+ }
+
+ return members == info->count;
+}
+
static int snd_ctl_elem_info(struct snd_ctl_file *ctl,
struct snd_ctl_elem_info *info)
{
@@ -1274,6 +1304,8 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
if (info->count < 1 ||
info->count > max_value_counts[info->type])
return -EINVAL;
+ if (!validate_element_member_dimension(info))
+ return -EINVAL;
private_size = value_sizes[info->type] * info->count;
/*
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index b16dbef04174..cd0e0ebbfdb1 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -70,11 +70,11 @@ struct seq_oss_synth {
static int max_synth_devs;
static struct seq_oss_synth *synth_devs[SNDRV_SEQ_OSS_MAX_SYNTH_DEVS];
static struct seq_oss_synth midi_synth_dev = {
- -1, /* seq_device */
- SYNTH_TYPE_MIDI, /* synth_type */
- 0, /* synth_subtype */
- 16, /* nr_voices */
- "MIDI", /* name */
+ .seq_device = -1,
+ .synth_type = SYNTH_TYPE_MIDI,
+ .synth_subtype = 0,
+ .nr_voices = 16,
+ .name = "MIDI",
};
static DEFINE_SPINLOCK(register_lock);
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 293104926098..dcc102813aef 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -165,7 +165,7 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
snd_seq_timer_update_tick(&tmr->tick, resolution);
/* register actual time of this timer update */
- do_gettimeofday(&tmr->last_update);
+ ktime_get_ts64(&tmr->last_update);
spin_unlock_irqrestore(&tmr->lock, flags);
@@ -392,7 +392,7 @@ static int seq_timer_start(struct snd_seq_timer *tmr)
return -EINVAL;
snd_timer_start(tmr->timeri, tmr->ticks);
tmr->running = 1;
- do_gettimeofday(&tmr->last_update);
+ ktime_get_ts64(&tmr->last_update);
return 0;
}
@@ -420,7 +420,7 @@ static int seq_timer_continue(struct snd_seq_timer *tmr)
}
snd_timer_start(tmr->timeri, tmr->ticks);
tmr->running = 1;
- do_gettimeofday(&tmr->last_update);
+ ktime_get_ts64(&tmr->last_update);
return 0;
}
@@ -444,17 +444,12 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
if (tmr->running) {
- struct timeval tm;
- int usec;
- do_gettimeofday(&tm);
- usec = (int)(tm.tv_usec - tmr->last_update.tv_usec);
- if (usec < 0) {
- cur_time.tv_nsec += (1000000 + usec) * 1000;
- cur_time.tv_sec += tm.tv_sec - tmr->last_update.tv_sec - 1;
- } else {
- cur_time.tv_nsec += usec * 1000;
- cur_time.tv_sec += tm.tv_sec - tmr->last_update.tv_sec;
- }
+ struct timespec64 tm;
+
+ ktime_get_ts64(&tm);
+ tm = timespec64_sub(tm, tmr->last_update);
+ cur_time.tv_nsec = tm.tv_nsec;
+ cur_time.tv_sec = tm.tv_sec;
snd_seq_sanity_real_time(&cur_time);
}
spin_unlock_irqrestore(&tmr->lock, flags);
diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
index 88dfb71805ae..9506b661fe5b 100644
--- a/sound/core/seq/seq_timer.h
+++ b/sound/core/seq/seq_timer.h
@@ -52,7 +52,7 @@ struct snd_seq_timer {
unsigned int skew;
unsigned int skew_base;
- struct timeval last_update; /* time of last clock update, used for interpolation */
+ struct timespec64 last_update; /* time of last clock update, used for interpolation */
spinlock_t lock;
};
diff --git a/sound/hda/array.c b/sound/hda/array.c
index 516795baa7db..5dfa610e4471 100644
--- a/sound/hda/array.c
+++ b/sound/hda/array.c
@@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array)
return NULL;
if (array->used >= array->alloced) {
int num = array->alloced + array->alloc_align;
+ int oldsize = array->alloced * array->elem_size;
int size = (num + 1) * array->elem_size;
void *nlist;
if (snd_BUG_ON(num >= 4096))
return NULL;
- nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
+ nlist = krealloc(array->list, size, GFP_KERNEL);
if (!nlist)
return NULL;
+ memset(nlist + oldsize, 0, size - oldsize);
array->list = nlist;
array->alloced = num;
}
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index c6c75e7e0981..81acc20c2535 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -353,7 +353,8 @@ static void hdmi_std_setup_channel_mapping(struct hdac_chmap *chmap,
int hdmi_slot = 0;
/* fill actual channel mappings in ALSA channel (i) order */
for (i = 0; i < ch_alloc->channels; i++) {
- while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
+ while (!WARN_ON(hdmi_slot >= 8) &&
+ !ch_alloc->speakers[7 - hdmi_slot])
hdmi_slot++; /* skip zero slots */
hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
@@ -430,6 +431,12 @@ static int to_cea_slot(int ordered_ca, unsigned char pos)
int mask = snd_hdac_chmap_to_spk_mask(pos);
int i;
+ /* Add sanity check to pass klockwork check.
+ * This should never happen.
+ */
+ if (ordered_ca >= ARRAY_SIZE(channel_allocations))
+ return -1;
+
if (mask) {
for (i = 0; i < 8; i++) {
if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
@@ -456,7 +463,15 @@ EXPORT_SYMBOL_GPL(snd_hdac_spk_to_chmap);
/* from CEA slot to ALSA API channel position */
static int from_cea_slot(int ordered_ca, unsigned char slot)
{
- int mask = channel_allocations[ordered_ca].speakers[7 - slot];
+ int mask;
+
+ /* Add sanity check to pass klockwork check.
+ * This should never happen.
+ */
+ if (slot >= 8)
+ return 0;
+
+ mask = channel_allocations[ordered_ca].speakers[7 - slot];
return snd_hdac_spk_to_chmap(mask);
}
@@ -523,7 +538,8 @@ static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
int ordered_ca = get_channel_allocation_order(ca);
for (i = 0; i < 8; i++) {
- if (i < channel_allocations[ordered_ca].channels)
+ if (ordered_ca < ARRAY_SIZE(channel_allocations) &&
+ i < channel_allocations[ordered_ca].channels)
map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
else
map[i] = 0;
@@ -551,6 +567,12 @@ int snd_hdac_get_active_channels(int ca)
{
int ordered_ca = get_channel_allocation_order(ca);
+ /* Add sanity check to pass klockwork check.
+ * This should never happen.
+ */
+ if (ordered_ca >= ARRAY_SIZE(channel_allocations))
+ ordered_ca = 0;
+
return channel_allocations[ordered_ca].channels;
}
EXPORT_SYMBOL_GPL(snd_hdac_get_active_channels);
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 5a4cf3fab4ae..d53c9bb36281 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -121,7 +121,7 @@ int snd_ak4114_create(struct snd_card *card,
__fail:
snd_ak4114_free(chip);
- return err < 0 ? err : -EIO;
+ return err;
}
EXPORT_SYMBOL(snd_ak4114_create);
diff --git a/sound/i2c/other/ak4117.c b/sound/i2c/other/ak4117.c
index 48848909a5a9..0702f0552d19 100644
--- a/sound/i2c/other/ak4117.c
+++ b/sound/i2c/other/ak4117.c
@@ -110,7 +110,7 @@ int snd_ak4117_create(struct snd_card *card, ak4117_read_t *read, ak4117_write_t
__fail:
snd_ak4117_free(chip);
- return err < 0 ? err : -EIO;
+ return err;
}
void snd_ak4117_reg_write(struct ak4117 *chip, unsigned char reg, unsigned char mask, unsigned char val)
diff --git a/sound/isa/ad1848/ad1848.c b/sound/isa/ad1848/ad1848.c
index f159da4ec890..a302d1f8d14f 100644
--- a/sound/isa/ad1848/ad1848.c
+++ b/sound/isa/ad1848/ad1848.c
@@ -170,15 +170,4 @@ static struct isa_driver snd_ad1848_driver = {
}
};
-static int __init alsa_card_ad1848_init(void)
-{
- return isa_register_driver(&snd_ad1848_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_ad1848_exit(void)
-{
- isa_unregister_driver(&snd_ad1848_driver);
-}
-
-module_init(alsa_card_ad1848_init);
-module_exit(alsa_card_ad1848_exit);
+module_isa_driver(snd_ad1848_driver, SNDRV_CARDS);
diff --git a/sound/isa/adlib.c b/sound/isa/adlib.c
index 120c524bb2a0..8d3060fd7ad7 100644
--- a/sound/isa/adlib.c
+++ b/sound/isa/adlib.c
@@ -112,15 +112,4 @@ static struct isa_driver snd_adlib_driver = {
}
};
-static int __init alsa_card_adlib_init(void)
-{
- return isa_register_driver(&snd_adlib_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_adlib_exit(void)
-{
- isa_unregister_driver(&snd_adlib_driver);
-}
-
-module_init(alsa_card_adlib_init);
-module_exit(alsa_card_adlib_exit);
+module_isa_driver(snd_adlib_driver, SNDRV_CARDS);
diff --git a/sound/isa/cmi8328.c b/sound/isa/cmi8328.c
index 2c89d95da674..787475084f46 100644
--- a/sound/isa/cmi8328.c
+++ b/sound/isa/cmi8328.c
@@ -469,15 +469,4 @@ static struct isa_driver snd_cmi8328_driver = {
},
};
-static int __init alsa_card_cmi8328_init(void)
-{
- return isa_register_driver(&snd_cmi8328_driver, CMI8328_MAX);
-}
-
-static void __exit alsa_card_cmi8328_exit(void)
-{
- isa_unregister_driver(&snd_cmi8328_driver);
-}
-
-module_init(alsa_card_cmi8328_init)
-module_exit(alsa_card_cmi8328_exit)
+module_isa_driver(snd_cmi8328_driver, CMI8328_MAX);
diff --git a/sound/isa/cs423x/cs4231.c b/sound/isa/cs423x/cs4231.c
index 282cd75d2235..ef7448e9f813 100644
--- a/sound/isa/cs423x/cs4231.c
+++ b/sound/isa/cs423x/cs4231.c
@@ -186,15 +186,4 @@ static struct isa_driver snd_cs4231_driver = {
}
};
-static int __init alsa_card_cs4231_init(void)
-{
- return isa_register_driver(&snd_cs4231_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_cs4231_exit(void)
-{
- isa_unregister_driver(&snd_cs4231_driver);
-}
-
-module_init(alsa_card_cs4231_init);
-module_exit(alsa_card_cs4231_exit);
+module_isa_driver(snd_cs4231_driver, SNDRV_CARDS);
diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
index 32278847884f..379abe2cbeb2 100644
--- a/sound/isa/galaxy/galaxy.c
+++ b/sound/isa/galaxy/galaxy.c
@@ -634,15 +634,4 @@ static struct isa_driver snd_galaxy_driver = {
}
};
-static int __init alsa_card_galaxy_init(void)
-{
- return isa_register_driver(&snd_galaxy_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_galaxy_exit(void)
-{
- isa_unregister_driver(&snd_galaxy_driver);
-}
-
-module_init(alsa_card_galaxy_init);
-module_exit(alsa_card_galaxy_exit);
+module_isa_driver(snd_galaxy_driver, SNDRV_CARDS);
diff --git a/sound/isa/gus/gusclassic.c b/sound/isa/gus/gusclassic.c
index f0019715d82e..c169be49ed71 100644
--- a/sound/isa/gus/gusclassic.c
+++ b/sound/isa/gus/gusclassic.c
@@ -229,15 +229,4 @@ static struct isa_driver snd_gusclassic_driver = {
}
};
-static int __init alsa_card_gusclassic_init(void)
-{
- return isa_register_driver(&snd_gusclassic_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_gusclassic_exit(void)
-{
- isa_unregister_driver(&snd_gusclassic_driver);
-}
-
-module_init(alsa_card_gusclassic_init);
-module_exit(alsa_card_gusclassic_exit);
+module_isa_driver(snd_gusclassic_driver, SNDRV_CARDS);
diff --git a/sound/isa/gus/gusextreme.c b/sound/isa/gus/gusextreme.c
index 693d95f46804..77ac2fd723b4 100644
--- a/sound/isa/gus/gusextreme.c
+++ b/sound/isa/gus/gusextreme.c
@@ -358,15 +358,4 @@ static struct isa_driver snd_gusextreme_driver = {
}
};
-static int __init alsa_card_gusextreme_init(void)
-{
- return isa_register_driver(&snd_gusextreme_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_gusextreme_exit(void)
-{
- isa_unregister_driver(&snd_gusextreme_driver);
-}
-
-module_init(alsa_card_gusextreme_init);
-module_exit(alsa_card_gusextreme_exit);
+module_isa_driver(snd_gusextreme_driver, SNDRV_CARDS);
diff --git a/sound/isa/gus/gusmax.c b/sound/isa/gus/gusmax.c
index 8216e8d8f017..dd88c9d33492 100644
--- a/sound/isa/gus/gusmax.c
+++ b/sound/isa/gus/gusmax.c
@@ -370,15 +370,4 @@ static struct isa_driver snd_gusmax_driver = {
},
};
-static int __init alsa_card_gusmax_init(void)
-{
- return isa_register_driver(&snd_gusmax_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_gusmax_exit(void)
-{
- isa_unregister_driver(&snd_gusmax_driver);
-}
-
-module_init(alsa_card_gusmax_init)
-module_exit(alsa_card_gusmax_exit)
+module_isa_driver(snd_gusmax_driver, SNDRV_CARDS);
diff --git a/sound/isa/sb/jazz16.c b/sound/isa/sb/jazz16.c
index 6b4884d052a5..4d909971eedb 100644
--- a/sound/isa/sb/jazz16.c
+++ b/sound/isa/sb/jazz16.c
@@ -387,15 +387,4 @@ static struct isa_driver snd_jazz16_driver = {
},
};
-static int __init alsa_card_jazz16_init(void)
-{
- return isa_register_driver(&snd_jazz16_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_jazz16_exit(void)
-{
- isa_unregister_driver(&snd_jazz16_driver);
-}
-
-module_init(alsa_card_jazz16_init)
-module_exit(alsa_card_jazz16_exit)
+module_isa_driver(snd_jazz16_driver, SNDRV_CARDS);
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index b8e2391c33ff..ad42d2364199 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -251,15 +251,4 @@ static struct isa_driver snd_sb8_driver = {
},
};
-static int __init alsa_card_sb8_init(void)
-{
- return isa_register_driver(&snd_sb8_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_sb8_exit(void)
-{
- isa_unregister_driver(&snd_sb8_driver);
-}
-
-module_init(alsa_card_sb8_init)
-module_exit(alsa_card_sb8_exit)
+module_isa_driver(snd_sb8_driver, SNDRV_CARDS);
diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
index 51cfa7615f72..b61a6633d8f2 100644
--- a/sound/isa/sc6000.c
+++ b/sound/isa/sc6000.c
@@ -711,15 +711,4 @@ static struct isa_driver snd_sc6000_driver = {
};
-static int __init alsa_card_sc6000_init(void)
-{
- return isa_register_driver(&snd_sc6000_driver, SNDRV_CARDS);
-}
-
-static void __exit alsa_card_sc6000_exit(void)
-{
- isa_unregister_driver(&snd_sc6000_driver);
-}
-
-module_init(alsa_card_sc6000_init)
-module_exit(alsa_card_sc6000_exit)
+module_isa_driver(snd_sc6000_driver, SNDRV_CARDS);
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 10c8de1f8d29..6368e5c7d0ba 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -254,7 +254,7 @@ static void ad_write(ad1848_info * devc, int reg, int data)
static void wait_for_calibration(ad1848_info * devc)
{
- int timeout = 0;
+ int timeout;
/*
* Wait until the auto calibration process has finished.
diff --git a/sound/oss/aedsp16.c b/sound/oss/aedsp16.c
index 35b5912cf3f8..bb477d5c8528 100644
--- a/sound/oss/aedsp16.c
+++ b/sound/oss/aedsp16.c
@@ -482,13 +482,13 @@ static struct orVals orDMA[] __initdata = {
};
static struct aedsp16_info ae_config = {
- DEF_AEDSP16_IOB,
- DEF_AEDSP16_IRQ,
- DEF_AEDSP16_MRQ,
- DEF_AEDSP16_DMA,
- -1,
- -1,
- INIT_NONE
+ .base_io = DEF_AEDSP16_IOB,
+ .irq = DEF_AEDSP16_IRQ,
+ .mpu_irq = DEF_AEDSP16_MRQ,
+ .dma = DEF_AEDSP16_DMA,
+ .mss_base = -1,
+ .mpu_base = -1,
+ .init = INIT_NONE
};
/*
diff --git a/sound/oss/sound_firmware.h b/sound/oss/sound_firmware.h
index 0a0cbfdfb855..da4c67e005ed 100644
--- a/sound/oss/sound_firmware.h
+++ b/sound/oss/sound_firmware.h
@@ -1,2 +1,29 @@
-extern int mod_firmware_load(const char *fn, char **fp);
+#include <linux/fs.h>
+/**
+ * mod_firmware_load - load sound driver firmware
+ * @fn: filename
+ * @fp: return for the buffer.
+ *
+ * Load the firmware for a sound module (up to 128K) into a buffer.
+ * The buffer is returned in *fp. It is allocated with vmalloc so is
+ * virtually linear and not DMAable. The caller should free it with
+ * vfree when finished.
+ *
+ * The length of the buffer is returned on a successful load, the
+ * value zero on a failure.
+ *
+ * Caution: This API is not recommended. Firmware should be loaded via
+ * request_firmware.
+ */
+static inline int mod_firmware_load(const char *fn, char **fp)
+{
+ loff_t size;
+ int err;
+
+ err = kernel_read_file_from_path((char *)fn, (void **)fp, &size,
+ 131072, READING_FIRMWARE);
+ if (err < 0)
+ return 0;
+ return size;
+}
diff --git a/sound/oss/sound_timer.c b/sound/oss/sound_timer.c
index 8021c85f076d..3a444a6f10eb 100644
--- a/sound/oss/sound_timer.c
+++ b/sound/oss/sound_timer.c
@@ -17,7 +17,7 @@
#include "sound_config.h"
static volatile int initialized, opened, tmr_running;
-static volatile time_t tmr_offs, tmr_ctr;
+static volatile unsigned int tmr_offs, tmr_ctr;
static volatile unsigned long ticks_offs;
static volatile int curr_tempo, curr_timebase;
static volatile unsigned long curr_ticks;
diff --git a/sound/oss/sys_timer.c b/sound/oss/sys_timer.c
index 2226dda0eff0..d17019d25b99 100644
--- a/sound/oss/sys_timer.c
+++ b/sound/oss/sys_timer.c
@@ -19,7 +19,7 @@
#include "sound_config.h"
static volatile int opened, tmr_running;
-static volatile time_t tmr_offs, tmr_ctr;
+static volatile unsigned int tmr_offs, tmr_ctr;
static volatile unsigned long ticks_offs;
static volatile int curr_tempo, curr_timebase;
static volatile unsigned long curr_ticks;
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index 9dc2950e1ab7..6414ecf93efa 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -1615,23 +1615,23 @@ static int hw_dac_init(struct hw *hw, const struct dac_conf *info)
int i;
struct regs_cs4382 cs_read = {0};
struct regs_cs4382 cs_def = {
- 0x00000001, /* Mode Control 1 */
- 0x00000000, /* Mode Control 2 */
- 0x00000084, /* Mode Control 3 */
- 0x00000000, /* Filter Control */
- 0x00000000, /* Invert Control */
- 0x00000024, /* Mixing Control Pair 1 */
- 0x00000000, /* Vol Control A1 */
- 0x00000000, /* Vol Control B1 */
- 0x00000024, /* Mixing Control Pair 2 */
- 0x00000000, /* Vol Control A2 */
- 0x00000000, /* Vol Control B2 */
- 0x00000024, /* Mixing Control Pair 3 */
- 0x00000000, /* Vol Control A3 */
- 0x00000000, /* Vol Control B3 */
- 0x00000024, /* Mixing Control Pair 4 */
- 0x00000000, /* Vol Control A4 */
- 0x00000000 /* Vol Control B4 */
+ .mode_control_1 = 0x00000001, /* Mode Control 1 */
+ .mode_control_2 = 0x00000000, /* Mode Control 2 */
+ .mode_control_3 = 0x00000084, /* Mode Control 3 */
+ .filter_control = 0x00000000, /* Filter Control */
+ .invert_control = 0x00000000, /* Invert Control */
+ .mix_control_P1 = 0x00000024, /* Mixing Control Pair 1 */
+ .vol_control_A1 = 0x00000000, /* Vol Control A1 */
+ .vol_control_B1 = 0x00000000, /* Vol Control B1 */
+ .mix_control_P2 = 0x00000024, /* Mixing Control Pair 2 */
+ .vol_control_A2 = 0x00000000, /* Vol Control A2 */
+ .vol_control_B2 = 0x00000000, /* Vol Control B2 */
+ .mix_control_P3 = 0x00000024, /* Mixing Control Pair 3 */
+ .vol_control_A3 = 0x00000000, /* Vol Control A3 */
+ .vol_control_B3 = 0x00000000, /* Vol Control B3 */
+ .mix_control_P4 = 0x00000024, /* Mixing Control Pair 4 */
+ .vol_control_A4 = 0x00000000, /* Vol Control A4 */
+ .vol_control_B4 = 0x00000000 /* Vol Control B4 */
};
if (hw->model == CTSB1270) {
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 286f5e3686a3..937071760bc4 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_busses_in(chip);
+ uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
chip = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = ECHOGAIN_MAXOUT;
uinfo->dimen.d[0] = num_busses_out(chip);
uinfo->dimen.d[1] = num_pipes_out(chip);
+ uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
return 0;
}
@@ -1728,7 +1728,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 96;
uinfo->value.integer.min = ECHOGAIN_MINOUT;
uinfo->value.integer.max = 0;
#ifdef ECHOCARD_HAS_VMIXER
@@ -1738,6 +1737,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
#endif
uinfo->dimen.d[1] = 16; /* 16 channels */
uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */
+ uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
return 0;
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 83741887faa1..9913be8532ab 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3584,6 +3584,12 @@ static void setup_dig_out_stream(struct hda_codec *codec, hda_nid_t nid,
bool reset;
spdif = snd_hda_spdif_out_of_nid(codec, nid);
+ /* Add sanity check to pass klockwork check.
+ * This should never happen.
+ */
+ if (WARN_ON(spdif == NULL))
+ return;
+
curr_fmt = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_STREAM_FORMAT, 0);
reset = codec->spdif_status_reset &&
@@ -3768,7 +3774,7 @@ int snd_hda_multi_out_analog_prepare(struct hda_codec *codec,
spdif = snd_hda_spdif_out_of_nid(codec, mout->dig_out_nid);
if (mout->dig_out_nid && mout->share_spdif &&
mout->dig_out_used != HDA_DIG_EXCLUSIVE) {
- if (chs == 2 &&
+ if (chs == 2 && spdif != NULL &&
snd_hda_is_supported_format(codec, mout->dig_out_nid,
format) &&
!(spdif->status & IEC958_AES0_NONAUDIO)) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 79c7b340acc2..e7c8f4f076d5 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -2492,10 +2492,6 @@ static int create_loopback_mixing_ctl(struct hda_codec *codec)
if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum))
return -ENOMEM;
spec->have_aamix_ctl = 1;
- /* if no explicit aamix path is present (e.g. for Realtek codecs),
- * enable aamix as default -- just for compatibility
- */
- spec->aamix_mode = !has_aamix_out_paths(spec);
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 6f8ea13323c1..89dacf9b4e6c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2265,6 +2265,8 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
+ { PCI_DEVICE(0x1002, 0x0002),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x1308),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
{ PCI_DEVICE(0x1002, 0x157a),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index d0d5ad8beac5..56e5204ac9c1 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1680,6 +1680,11 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
mutex_lock(&codec->spdif_mutex);
spdif = snd_hda_spdif_out_of_nid(codec, cvt_nid);
+ /* Add sanity check to pass klockwork check.
+ * This should never happen.
+ */
+ if (WARN_ON(spdif == NULL))
+ return true;
non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
mutex_unlock(&codec->spdif_mutex);
return non_pcm;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index abcb5a6a1cd9..574b1b48996f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3718,6 +3718,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0867:
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+ break;
}
codec_dbg(codec, "Headset jack set to unplugged mode.\n");
}
@@ -3805,6 +3808,9 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0293);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0867:
+ alc_update_coefex_idx(codec, 0x57, 0x5, 0, 1<<14);
+ /* fallthru */
case 0x10ec0662:
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
@@ -3899,6 +3905,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0867:
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+ break;
}
codec_dbg(codec, "Headset jack set to headphone (default) mode.\n");
}
@@ -3989,6 +3998,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0867:
+ alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
+ break;
}
codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
}
@@ -4166,6 +4178,9 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x00f0) == 0x00f0;
break;
+ case 0x10ec0867:
+ is_ctia = true;
+ break;
}
codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
@@ -4674,6 +4689,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
}
}
+static void alc298_fixup_speaker_volume(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ /* The speaker is routed to the Node 0x06 by a mistake, as a result
+ we can't adjust the speaker's volume since this node does not has
+ Amp-out capability. we change the speaker's route to:
+ Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 (
+ Pin Complex), since Node 0x02 has Amp-out caps, we can adjust
+ speaker's volume now. */
+
+ hda_nid_t conn1[1] = { 0x0c };
+ snd_hda_override_conn_list(codec, 0x17, 1, conn1);
+ }
+}
+
/* Hook to update amp GPIO4 for automute */
static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
struct hda_jack_callback *jack)
@@ -4823,6 +4854,7 @@ enum {
ALC280_FIXUP_HP_HEADSET_MIC,
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
+ ALC298_FIXUP_SPK_VOLUME,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5478,6 +5510,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
},
+ [ALC298_FIXUP_SPK_VOLUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_speaker_volume,
+ .chained = true,
+ .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5524,6 +5562,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5799,6 +5838,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
+ {0x1b, 0x02011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170150},
{0x1b, 0x02011020},
{0x21, 0x0221105f}),
@@ -6532,6 +6575,9 @@ enum {
ALC668_FIXUP_DELL_XPS13,
ALC662_FIXUP_ASUS_Nx50,
ALC668_FIXUP_ASUS_Nx51,
+ ALC891_FIXUP_HEADSET_MODE,
+ ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+ ALC662_FIXUP_ACER_VERITON,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6787,6 +6833,27 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC662_FIXUP_BASS_CHMAP,
},
+ [ALC891_FIXUP_HEADSET_MODE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mode,
+ },
+ [ALC891_FIXUP_DELL_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
+ { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC891_FIXUP_HEADSET_MODE
+ },
+ [ALC662_FIXUP_ACER_VERITON] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x50170120 }, /* no internal speaker */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6825,6 +6892,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
+ SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
#if 0
@@ -6903,6 +6971,11 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
};
static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x17, 0x02211010},
+ {0x18, 0x01a19030},
+ {0x1a, 0x01813040},
+ {0x21, 0x01014020}),
SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
{0x14, 0x01014010},
{0x18, 0x01a19020},
@@ -7091,7 +7164,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
- HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
+ HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
HDA_CODEC_ENTRY(0x10ec0883, "ALC883", patch_alc882),
diff --git a/sound/pci/mixart/mixart_mixer.c b/sound/pci/mixart/mixart_mixer.c
index 58fd79ebac20..51e53497f0ad 100644
--- a/sound/pci/mixart/mixart_mixer.c
+++ b/sound/pci/mixart/mixart_mixer.c
@@ -965,7 +965,7 @@ static int mixart_update_monitoring(struct snd_mixart* chip, int channel)
int err;
struct mixart_msg request;
struct mixart_set_out_audio_level audio_level;
- u32 resp;
+ u32 resp = 0;
if(chip->pipe_out_ana.status == PIPE_UNDEFINED)
return -EINVAL; /* no pipe defined */
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 94639d6b5fb5..067a91207d8e 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1496,7 +1496,7 @@ static int snd_riptide_prepare(struct snd_pcm_substream *substream)
f = PAGE_SIZE;
while ((size + (f >> 1) - 1) <= (f << 7) && (f << 1) > period)
f = f >> 1;
- pages = (size + f - 1) / f;
+ pages = DIV_ROUND_UP(size, f);
data->size = size;
data->pages = pages;
snd_printdd
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 09da7b52bc2e..1468e4b7bf93 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -991,6 +991,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
if (err < 0)
return err;
}
+ master_vol = NULL;
if (pm7500)
err = build_mixers(chip,
ARRAY_SIZE(snd_pmac_awacs_mixers_pmac7500),
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index ad3d9ae38034..fbbc25279559 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -63,9 +63,6 @@ MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard.");
module_param(enable, bool, 0644);
MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
-/* Use workqueue */
-static struct workqueue_struct *aica_queue;
-
/* Simple platform device */
static struct platform_device *pd;
static struct resource aica_memory_space[2] = {
@@ -327,7 +324,7 @@ static void aica_period_elapsed(unsigned long timer_var)
dreamcastcard->current_period = play_period;
if (unlikely(dreamcastcard->dma_check == 0))
dreamcastcard->dma_check = 1;
- queue_work(aica_queue, &(dreamcastcard->spu_dma_work));
+ schedule_work(&(dreamcastcard->spu_dma_work));
}
static void spu_begin_dma(struct snd_pcm_substream *substream)
@@ -337,7 +334,7 @@ static void spu_begin_dma(struct snd_pcm_substream *substream)
runtime = substream->runtime;
dreamcastcard = substream->pcm->private_data;
/*get the queue to do the work */
- queue_work(aica_queue, &(dreamcastcard->spu_dma_work));
+ schedule_work(&(dreamcastcard->spu_dma_work));
/* Timer may already be running */
if (unlikely(dreamcastcard->timer.data)) {
mod_timer(&dreamcastcard->timer, jiffies + 4);
@@ -381,7 +378,7 @@ static int snd_aicapcm_pcm_close(struct snd_pcm_substream
*substream)
{
struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
- flush_workqueue(aica_queue);
+ flush_work(&(dreamcastcard->spu_dma_work));
if (dreamcastcard->timer.data)
del_timer(&dreamcastcard->timer);
kfree(dreamcastcard->channel);
@@ -633,9 +630,6 @@ static int snd_aica_probe(struct platform_device *devptr)
if (unlikely(err < 0))
goto freedreamcast;
platform_set_drvdata(devptr, dreamcastcard);
- aica_queue = create_workqueue(CARD_NAME);
- if (unlikely(!aica_queue))
- goto freedreamcast;
snd_printk
("ALSA Driver for Yamaha AICA Super Intelligent Sound Processor\n");
return 0;
@@ -671,10 +665,6 @@ static int __init aica_init(void)
static void __exit aica_exit(void)
{
- /* Destroy the aica kernel thread *
- * being extra cautious to check if it exists*/
- if (likely(aica_queue))
- destroy_workqueue(aica_queue);
platform_device_unregister(pd);
platform_driver_unregister(&snd_aica_driver);
/* Kill any sound still playing and reset ARM7 to safe state */
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 06e099e802df..22aec9a1e9a4 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -10,6 +10,7 @@ if SND_ATMEL_SOC
config SND_ATMEL_SOC_PDC
tristate
+ depends on HAS_DMA
default m if SND_ATMEL_SOC_SSC_PDC=m && SND_ATMEL_SOC_SSC=m
default y if SND_ATMEL_SOC_SSC_PDC=y || (SND_ATMEL_SOC_SSC_PDC=m && SND_ATMEL_SOC_SSC=y)
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 6107de9c538b..6d9b8b44e2da 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -593,11 +593,6 @@ static int atmel_classd_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no memory resource\n");
- return -ENXIO;
- }
-
io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(io_base)) {
ret = PTR_ERR(io_base);
diff --git a/sound/soc/atmel/atmel-pdmic.c b/sound/soc/atmel/atmel-pdmic.c
index aee4787a0b89..5f56da60c92f 100644
--- a/sound/soc/atmel/atmel-pdmic.c
+++ b/sound/soc/atmel/atmel-pdmic.c
@@ -624,11 +624,6 @@ static int atmel_pdmic_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no memory resource\n");
- return -ENXIO;
- }
-
io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(io_base)) {
ret = PTR_ERR(io_base);
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 1267e1af0fae..54c09acd3fed 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -321,7 +321,7 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
return ret;
}
- dma_params = &ssc_dma_params[dai->id][dir];
+ dma_params = &ssc_dma_params[pdev->id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
diff --git a/sound/soc/bcm/Kconfig b/sound/soc/bcm/Kconfig
index 6a834e109f1d..d528aaceaad9 100644
--- a/sound/soc/bcm/Kconfig
+++ b/sound/soc/bcm/Kconfig
@@ -7,3 +7,12 @@ config SND_BCM2835_SOC_I2S
Say Y or M if you want to add support for codecs attached to
the BCM2835 I2S interface. You will also need
to select the audio interfaces to support below.
+
+config SND_SOC_CYGNUS
+ tristate "SoC platform audio for Broadcom Cygnus chips"
+ depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ help
+ Say Y if you want to add support for ASoC audio on Broadcom
+ Cygnus chips (bcm958300, bcm958305, bcm911360)
+
+ If you don't know what to do here, say N. \ No newline at end of file
diff --git a/sound/soc/bcm/Makefile b/sound/soc/bcm/Makefile
index bc816b71e5a4..fc739d007884 100644
--- a/sound/soc/bcm/Makefile
+++ b/sound/soc/bcm/Makefile
@@ -3,3 +3,8 @@ snd-soc-bcm2835-i2s-objs := bcm2835-i2s.o
obj-$(CONFIG_SND_BCM2835_SOC_I2S) += snd-soc-bcm2835-i2s.o
+# CYGNUS Platform Support
+snd-soc-cygnus-objs := cygnus-pcm.o cygnus-ssp.o
+
+obj-$(CONFIG_SND_SOC_CYGNUS) += snd-soc-cygnus.o
+
diff --git a/sound/soc/bcm/cygnus-pcm.c b/sound/soc/bcm/cygnus-pcm.c
new file mode 100644
index 000000000000..d616e096462e
--- /dev/null
+++ b/sound/soc/bcm/cygnus-pcm.c
@@ -0,0 +1,861 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "cygnus-ssp.h"
+
+/* Register offset needed for ASoC PCM module */
+
+#define INTH_R5F_STATUS_OFFSET 0x040
+#define INTH_R5F_CLEAR_OFFSET 0x048
+#define INTH_R5F_MASK_SET_OFFSET 0x050
+#define INTH_R5F_MASK_CLEAR_OFFSET 0x054
+
+#define BF_REARM_FREE_MARK_OFFSET 0x344
+#define BF_REARM_FULL_MARK_OFFSET 0x348
+
+/* Ring Buffer Ctrl Regs --- Start */
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_RDADDR_REG_BASE */
+#define SRC_RBUF_0_RDADDR_OFFSET 0x500
+#define SRC_RBUF_1_RDADDR_OFFSET 0x518
+#define SRC_RBUF_2_RDADDR_OFFSET 0x530
+#define SRC_RBUF_3_RDADDR_OFFSET 0x548
+#define SRC_RBUF_4_RDADDR_OFFSET 0x560
+#define SRC_RBUF_5_RDADDR_OFFSET 0x578
+#define SRC_RBUF_6_RDADDR_OFFSET 0x590
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_WRADDR_REG_BASE */
+#define SRC_RBUF_0_WRADDR_OFFSET 0x504
+#define SRC_RBUF_1_WRADDR_OFFSET 0x51c
+#define SRC_RBUF_2_WRADDR_OFFSET 0x534
+#define SRC_RBUF_3_WRADDR_OFFSET 0x54c
+#define SRC_RBUF_4_WRADDR_OFFSET 0x564
+#define SRC_RBUF_5_WRADDR_OFFSET 0x57c
+#define SRC_RBUF_6_WRADDR_OFFSET 0x594
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_BASEADDR_REG_BASE */
+#define SRC_RBUF_0_BASEADDR_OFFSET 0x508
+#define SRC_RBUF_1_BASEADDR_OFFSET 0x520
+#define SRC_RBUF_2_BASEADDR_OFFSET 0x538
+#define SRC_RBUF_3_BASEADDR_OFFSET 0x550
+#define SRC_RBUF_4_BASEADDR_OFFSET 0x568
+#define SRC_RBUF_5_BASEADDR_OFFSET 0x580
+#define SRC_RBUF_6_BASEADDR_OFFSET 0x598
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_ENDADDR_REG_BASE */
+#define SRC_RBUF_0_ENDADDR_OFFSET 0x50c
+#define SRC_RBUF_1_ENDADDR_OFFSET 0x524
+#define SRC_RBUF_2_ENDADDR_OFFSET 0x53c
+#define SRC_RBUF_3_ENDADDR_OFFSET 0x554
+#define SRC_RBUF_4_ENDADDR_OFFSET 0x56c
+#define SRC_RBUF_5_ENDADDR_OFFSET 0x584
+#define SRC_RBUF_6_ENDADDR_OFFSET 0x59c
+
+/* AUD_FMM_BF_CTRL_SOURCECH_RINGBUF_X_FREE_MARK_REG_BASE */
+#define SRC_RBUF_0_FREE_MARK_OFFSET 0x510
+#define SRC_RBUF_1_FREE_MARK_OFFSET 0x528
+#define SRC_RBUF_2_FREE_MARK_OFFSET 0x540
+#define SRC_RBUF_3_FREE_MARK_OFFSET 0x558
+#define SRC_RBUF_4_FREE_MARK_OFFSET 0x570
+#define SRC_RBUF_5_FREE_MARK_OFFSET 0x588
+#define SRC_RBUF_6_FREE_MARK_OFFSET 0x5a0
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_RDADDR_REG_BASE */
+#define DST_RBUF_0_RDADDR_OFFSET 0x5c0
+#define DST_RBUF_1_RDADDR_OFFSET 0x5d8
+#define DST_RBUF_2_RDADDR_OFFSET 0x5f0
+#define DST_RBUF_3_RDADDR_OFFSET 0x608
+#define DST_RBUF_4_RDADDR_OFFSET 0x620
+#define DST_RBUF_5_RDADDR_OFFSET 0x638
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_WRADDR_REG_BASE */
+#define DST_RBUF_0_WRADDR_OFFSET 0x5c4
+#define DST_RBUF_1_WRADDR_OFFSET 0x5dc
+#define DST_RBUF_2_WRADDR_OFFSET 0x5f4
+#define DST_RBUF_3_WRADDR_OFFSET 0x60c
+#define DST_RBUF_4_WRADDR_OFFSET 0x624
+#define DST_RBUF_5_WRADDR_OFFSET 0x63c
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_BASEADDR_REG_BASE */
+#define DST_RBUF_0_BASEADDR_OFFSET 0x5c8
+#define DST_RBUF_1_BASEADDR_OFFSET 0x5e0
+#define DST_RBUF_2_BASEADDR_OFFSET 0x5f8
+#define DST_RBUF_3_BASEADDR_OFFSET 0x610
+#define DST_RBUF_4_BASEADDR_OFFSET 0x628
+#define DST_RBUF_5_BASEADDR_OFFSET 0x640
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_ENDADDR_REG_BASE */
+#define DST_RBUF_0_ENDADDR_OFFSET 0x5cc
+#define DST_RBUF_1_ENDADDR_OFFSET 0x5e4
+#define DST_RBUF_2_ENDADDR_OFFSET 0x5fc
+#define DST_RBUF_3_ENDADDR_OFFSET 0x614
+#define DST_RBUF_4_ENDADDR_OFFSET 0x62c
+#define DST_RBUF_5_ENDADDR_OFFSET 0x644
+
+/* AUD_FMM_BF_CTRL_DESTCH_RINGBUF_X_FULL_MARK_REG_BASE */
+#define DST_RBUF_0_FULL_MARK_OFFSET 0x5d0
+#define DST_RBUF_1_FULL_MARK_OFFSET 0x5e8
+#define DST_RBUF_2_FULL_MARK_OFFSET 0x600
+#define DST_RBUF_3_FULL_MARK_OFFSET 0x618
+#define DST_RBUF_4_FULL_MARK_OFFSET 0x630
+#define DST_RBUF_5_FULL_MARK_OFFSET 0x648
+/* Ring Buffer Ctrl Regs --- End */
+
+/* Error Status Regs --- Start */
+/* AUD_FMM_BF_ESR_ESRX_STATUS_REG_BASE */
+#define ESR0_STATUS_OFFSET 0x900
+#define ESR1_STATUS_OFFSET 0x918
+#define ESR2_STATUS_OFFSET 0x930
+#define ESR3_STATUS_OFFSET 0x948
+#define ESR4_STATUS_OFFSET 0x960
+
+/* AUD_FMM_BF_ESR_ESRX_STATUS_CLEAR_REG_BASE */
+#define ESR0_STATUS_CLR_OFFSET 0x908
+#define ESR1_STATUS_CLR_OFFSET 0x920
+#define ESR2_STATUS_CLR_OFFSET 0x938
+#define ESR3_STATUS_CLR_OFFSET 0x950
+#define ESR4_STATUS_CLR_OFFSET 0x968
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_REG_BASE */
+#define ESR0_MASK_STATUS_OFFSET 0x90c
+#define ESR1_MASK_STATUS_OFFSET 0x924
+#define ESR2_MASK_STATUS_OFFSET 0x93c
+#define ESR3_MASK_STATUS_OFFSET 0x954
+#define ESR4_MASK_STATUS_OFFSET 0x96c
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_SET_REG_BASE */
+#define ESR0_MASK_SET_OFFSET 0x910
+#define ESR1_MASK_SET_OFFSET 0x928
+#define ESR2_MASK_SET_OFFSET 0x940
+#define ESR3_MASK_SET_OFFSET 0x958
+#define ESR4_MASK_SET_OFFSET 0x970
+
+/* AUD_FMM_BF_ESR_ESRX_MASK_CLEAR_REG_BASE */
+#define ESR0_MASK_CLR_OFFSET 0x914
+#define ESR1_MASK_CLR_OFFSET 0x92c
+#define ESR2_MASK_CLR_OFFSET 0x944
+#define ESR3_MASK_CLR_OFFSET 0x95c
+#define ESR4_MASK_CLR_OFFSET 0x974
+/* Error Status Regs --- End */
+
+#define R5F_ESR0_SHIFT 0 /* esr0 = fifo underflow */
+#define R5F_ESR1_SHIFT 1 /* esr1 = ringbuf underflow */
+#define R5F_ESR2_SHIFT 2 /* esr2 = ringbuf overflow */
+#define R5F_ESR3_SHIFT 3 /* esr3 = freemark */
+#define R5F_ESR4_SHIFT 4 /* esr4 = fullmark */
+
+
+/* Mask for R5F register. Set all relevant interrupt for playback handler */
+#define ANY_PLAYBACK_IRQ (BIT(R5F_ESR0_SHIFT) | \
+ BIT(R5F_ESR1_SHIFT) | \
+ BIT(R5F_ESR3_SHIFT))
+
+/* Mask for R5F register. Set all relevant interrupt for capture handler */
+#define ANY_CAPTURE_IRQ (BIT(R5F_ESR2_SHIFT) | BIT(R5F_ESR4_SHIFT))
+
+/*
+ * PERIOD_BYTES_MIN is the number of bytes to at which the interrupt will tick.
+ * This number should be a multiple of 256. Minimum value is 256
+ */
+#define PERIOD_BYTES_MIN 0x100
+
+static const struct snd_pcm_hardware cygnus_pcm_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+
+ /* A period is basically an interrupt */
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = 0x10000,
+
+ /* period_min/max gives range of approx interrupts per buffer */
+ .periods_min = 2,
+ .periods_max = 8,
+
+ /*
+ * maximum buffer size in bytes = period_bytes_max * periods_max
+ * We allocate this amount of data for each enabled channel
+ */
+ .buffer_bytes_max = 4 * 0x8000,
+};
+
+static u64 cygnus_dma_dmamask = DMA_BIT_MASK(32);
+
+static struct cygnus_aio_port *cygnus_dai_get_dma_data(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
+
+ return snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
+}
+
+static void ringbuf_set_initial(void __iomem *audio_io,
+ struct ringbuf_regs *p_rbuf,
+ bool is_playback,
+ u32 start,
+ u32 periodsize,
+ u32 bufsize)
+{
+ u32 initial_rd;
+ u32 initial_wr;
+ u32 end;
+ u32 fmark_val; /* free or full mark */
+
+ p_rbuf->period_bytes = periodsize;
+ p_rbuf->buf_size = bufsize;
+
+ if (is_playback) {
+ /* Set the pointers to indicate full (flip uppermost bit) */
+ initial_rd = start;
+ initial_wr = initial_rd ^ BIT(31);
+ } else {
+ /* Set the pointers to indicate empty */
+ initial_wr = start;
+ initial_rd = initial_wr;
+ }
+
+ end = start + bufsize - 1;
+
+ /*
+ * The interrupt will fire when free/full mark is *exceeded*
+ * The fmark value must be multiple of PERIOD_BYTES_MIN so set fmark
+ * to be PERIOD_BYTES_MIN less than the period size.
+ */
+ fmark_val = periodsize - PERIOD_BYTES_MIN;
+
+ writel(start, audio_io + p_rbuf->baseaddr);
+ writel(end, audio_io + p_rbuf->endaddr);
+ writel(fmark_val, audio_io + p_rbuf->fmark);
+ writel(initial_rd, audio_io + p_rbuf->rdaddr);
+ writel(initial_wr, audio_io + p_rbuf->wraddr);
+}
+
+static int configure_ringbuf_regs(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf;
+ int status = 0;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /* Map the ssp portnum to a set of ring buffers. */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ p_rbuf = &aio->play_rb_regs;
+
+ switch (aio->portnum) {
+ case 0:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(0);
+ break;
+ case 1:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(2);
+ break;
+ case 2:
+ *p_rbuf = RINGBUF_REG_PLAYBACK(4);
+ break;
+ case 3: /* SPDIF */
+ *p_rbuf = RINGBUF_REG_PLAYBACK(6);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ } else {
+ p_rbuf = &aio->capture_rb_regs;
+
+ switch (aio->portnum) {
+ case 0:
+ *p_rbuf = RINGBUF_REG_CAPTURE(0);
+ break;
+ case 1:
+ *p_rbuf = RINGBUF_REG_CAPTURE(2);
+ break;
+ case 2:
+ *p_rbuf = RINGBUF_REG_CAPTURE(4);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ }
+
+ return status;
+}
+
+static struct ringbuf_regs *get_ringbuf(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ p_rbuf = &aio->play_rb_regs;
+ else
+ p_rbuf = &aio->capture_rb_regs;
+
+ return p_rbuf;
+}
+
+static void enable_intr(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ u32 clear_mask;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /* The port number maps to the bit position to be cleared */
+ clear_mask = BIT(aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Clear interrupt status before enabling them */
+ writel(clear_mask, aio->cygaud->audio + ESR0_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR1_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR3_STATUS_CLR_OFFSET);
+ /* Unmask the interrupts of the given port*/
+ writel(clear_mask, aio->cygaud->audio + ESR0_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR1_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR3_MASK_CLR_OFFSET);
+
+ writel(ANY_PLAYBACK_IRQ,
+ aio->cygaud->audio + INTH_R5F_MASK_CLEAR_OFFSET);
+ } else {
+ writel(clear_mask, aio->cygaud->audio + ESR2_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR4_STATUS_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR2_MASK_CLR_OFFSET);
+ writel(clear_mask, aio->cygaud->audio + ESR4_MASK_CLR_OFFSET);
+
+ writel(ANY_CAPTURE_IRQ,
+ aio->cygaud->audio + INTH_R5F_MASK_CLEAR_OFFSET);
+ }
+
+}
+
+static void disable_intr(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+ u32 set_mask;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s on port %d\n", __func__, aio->portnum);
+
+ /* The port number maps to the bit position to be set */
+ set_mask = BIT(aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Mask the interrupts of the given port*/
+ writel(set_mask, aio->cygaud->audio + ESR0_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR1_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR3_MASK_SET_OFFSET);
+ } else {
+ writel(set_mask, aio->cygaud->audio + ESR2_MASK_SET_OFFSET);
+ writel(set_mask, aio->cygaud->audio + ESR4_MASK_SET_OFFSET);
+ }
+
+}
+
+static int cygnus_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ enable_intr(substream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ disable_intr(substream);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void cygnus_pcm_period_elapsed(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ struct ringbuf_regs *p_rbuf = NULL;
+ u32 regval;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ p_rbuf = get_ringbuf(substream);
+
+ /*
+ * If free/full mark interrupt occurs, provide timestamp
+ * to ALSA and update appropriate idx by period_bytes
+ */
+ snd_pcm_period_elapsed(substream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Set the ring buffer to full */
+ regval = readl(aio->cygaud->audio + p_rbuf->rdaddr);
+ regval = regval ^ BIT(31);
+ writel(regval, aio->cygaud->audio + p_rbuf->wraddr);
+ } else {
+ /* Set the ring buffer to empty */
+ regval = readl(aio->cygaud->audio + p_rbuf->wraddr);
+ writel(regval, aio->cygaud->audio + p_rbuf->rdaddr);
+ }
+}
+
+/*
+ * ESR0/1/3 status Description
+ * 0x1 I2S0_out port caused interrupt
+ * 0x2 I2S1_out port caused interrupt
+ * 0x4 I2S2_out port caused interrupt
+ * 0x8 SPDIF_out port caused interrupt
+ */
+static void handle_playback_irq(struct cygnus_audio *cygaud)
+{
+ void __iomem *audio_io;
+ u32 port;
+ u32 esr_status0, esr_status1, esr_status3;
+
+ audio_io = cygaud->audio;
+
+ /*
+ * ESR status gets updates with/without interrupts enabled.
+ * So, check the ESR mask, which provides interrupt enable/
+ * disable status and use it to determine which ESR status
+ * should be serviced.
+ */
+ esr_status0 = readl(audio_io + ESR0_STATUS_OFFSET);
+ esr_status0 &= ~readl(audio_io + ESR0_MASK_STATUS_OFFSET);
+ esr_status1 = readl(audio_io + ESR1_STATUS_OFFSET);
+ esr_status1 &= ~readl(audio_io + ESR1_MASK_STATUS_OFFSET);
+ esr_status3 = readl(audio_io + ESR3_STATUS_OFFSET);
+ esr_status3 &= ~readl(audio_io + ESR3_MASK_STATUS_OFFSET);
+
+ for (port = 0; port < CYGNUS_MAX_PLAYBACK_PORTS; port++) {
+ u32 esrmask = BIT(port);
+
+ /*
+ * Ringbuffer or FIFO underflow
+ * If we get this interrupt then, it is also true that we have
+ * not yet responded to the freemark interrupt.
+ * Log a debug message. The freemark handler below will
+ * handle getting everything going again.
+ */
+ if ((esrmask & esr_status1) || (esrmask & esr_status0)) {
+ dev_dbg(cygaud->dev,
+ "Underrun: esr0=0x%x, esr1=0x%x esr3=0x%x\n",
+ esr_status0, esr_status1, esr_status3);
+ }
+
+ /*
+ * Freemark is hit. This is the normal interrupt.
+ * In typical operation the read and write regs will be equal
+ */
+ if (esrmask & esr_status3) {
+ struct snd_pcm_substream *playstr;
+
+ playstr = cygaud->portinfo[port].play_stream;
+ cygnus_pcm_period_elapsed(playstr);
+ }
+ }
+
+ /* Clear ESR interrupt */
+ writel(esr_status0, audio_io + ESR0_STATUS_CLR_OFFSET);
+ writel(esr_status1, audio_io + ESR1_STATUS_CLR_OFFSET);
+ writel(esr_status3, audio_io + ESR3_STATUS_CLR_OFFSET);
+ /* Rearm freemark logic by writing 1 to the correct bit */
+ writel(esr_status3, audio_io + BF_REARM_FREE_MARK_OFFSET);
+}
+
+/*
+ * ESR2/4 status Description
+ * 0x1 I2S0_in port caused interrupt
+ * 0x2 I2S1_in port caused interrupt
+ * 0x4 I2S2_in port caused interrupt
+ */
+static void handle_capture_irq(struct cygnus_audio *cygaud)
+{
+ void __iomem *audio_io;
+ u32 port;
+ u32 esr_status2, esr_status4;
+
+ audio_io = cygaud->audio;
+
+ /*
+ * ESR status gets updates with/without interrupts enabled.
+ * So, check the ESR mask, which provides interrupt enable/
+ * disable status and use it to determine which ESR status
+ * should be serviced.
+ */
+ esr_status2 = readl(audio_io + ESR2_STATUS_OFFSET);
+ esr_status2 &= ~readl(audio_io + ESR2_MASK_STATUS_OFFSET);
+ esr_status4 = readl(audio_io + ESR4_STATUS_OFFSET);
+ esr_status4 &= ~readl(audio_io + ESR4_MASK_STATUS_OFFSET);
+
+ for (port = 0; port < CYGNUS_MAX_CAPTURE_PORTS; port++) {
+ u32 esrmask = BIT(port);
+
+ /*
+ * Ringbuffer or FIFO overflow
+ * If we get this interrupt then, it is also true that we have
+ * not yet responded to the fullmark interrupt.
+ * Log a debug message. The fullmark handler below will
+ * handle getting everything going again.
+ */
+ if (esrmask & esr_status2)
+ dev_dbg(cygaud->dev,
+ "Overflow: esr2=0x%x\n", esr_status2);
+
+ if (esrmask & esr_status4) {
+ struct snd_pcm_substream *capstr;
+
+ capstr = cygaud->portinfo[port].capture_stream;
+ cygnus_pcm_period_elapsed(capstr);
+ }
+ }
+
+ writel(esr_status2, audio_io + ESR2_STATUS_CLR_OFFSET);
+ writel(esr_status4, audio_io + ESR4_STATUS_CLR_OFFSET);
+ /* Rearm fullmark logic by writing 1 to the correct bit */
+ writel(esr_status4, audio_io + BF_REARM_FULL_MARK_OFFSET);
+}
+
+static irqreturn_t cygnus_dma_irq(int irq, void *data)
+{
+ u32 r5_status;
+ struct cygnus_audio *cygaud = data;
+
+ /*
+ * R5 status bits Description
+ * 0 ESR0 (playback FIFO interrupt)
+ * 1 ESR1 (playback rbuf interrupt)
+ * 2 ESR2 (capture rbuf interrupt)
+ * 3 ESR3 (Freemark play. interrupt)
+ * 4 ESR4 (Fullmark capt. interrupt)
+ */
+ r5_status = readl(cygaud->audio + INTH_R5F_STATUS_OFFSET);
+
+ if (!(r5_status & (ANY_PLAYBACK_IRQ | ANY_CAPTURE_IRQ)))
+ return IRQ_NONE;
+
+ /* If playback interrupt happened */
+ if (ANY_PLAYBACK_IRQ & r5_status) {
+ handle_playback_irq(cygaud);
+ writel(ANY_PLAYBACK_IRQ & r5_status,
+ cygaud->audio + INTH_R5F_CLEAR_OFFSET);
+ }
+
+ /* If capture interrupt happened */
+ if (ANY_CAPTURE_IRQ & r5_status) {
+ handle_capture_irq(cygaud);
+ writel(ANY_CAPTURE_IRQ & r5_status,
+ cygaud->audio + INTH_R5F_CLEAR_OFFSET);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cygnus_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ int ret;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ if (!aio)
+ return -ENODEV;
+
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_soc_set_runtime_hwparams(substream, &cygnus_pcm_hw);
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, PERIOD_BYTES_MIN);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, PERIOD_BYTES_MIN);
+ if (ret < 0)
+ return ret;
+ /*
+ * Keep track of which substream belongs to which port.
+ * This info is needed by snd_pcm_period_elapsed() in irq_handler
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->play_stream = substream;
+ else
+ aio->capture_stream = substream;
+
+ return 0;
+}
+
+static int cygnus_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->play_stream = NULL;
+ else
+ aio->capture_stream = NULL;
+
+ if (!aio->play_stream && !aio->capture_stream)
+ dev_dbg(rtd->cpu_dai->dev, "freed port %d\n", aio->portnum);
+
+ return 0;
+}
+
+static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ int ret = 0;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ runtime->dma_bytes = params_buffer_bytes(params);
+
+ return ret;
+}
+
+static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct cygnus_aio_port *aio;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ snd_pcm_set_runtime_buffer(substream, NULL);
+ return 0;
+}
+
+static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cygnus_aio_port *aio;
+ unsigned long bufsize, periodsize;
+ int ret = 0;
+ bool is_play;
+ u32 start;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+ dev_dbg(rtd->cpu_dai->dev, "%s port %d\n", __func__, aio->portnum);
+
+ bufsize = snd_pcm_lib_buffer_bytes(substream);
+ periodsize = snd_pcm_lib_period_bytes(substream);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s (buf_size %lu) (period_size %lu)\n",
+ __func__, bufsize, periodsize);
+
+ configure_ringbuf_regs(substream);
+
+ p_rbuf = get_ringbuf(substream);
+
+ start = runtime->dma_addr;
+
+ is_play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 1 : 0;
+
+ ringbuf_set_initial(aio->cygaud->audio, p_rbuf, is_play, start,
+ periodsize, bufsize);
+
+ return ret;
+}
+
+static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct cygnus_aio_port *aio;
+ unsigned int res = 0, cur = 0, base = 0;
+ struct ringbuf_regs *p_rbuf = NULL;
+
+ aio = cygnus_dai_get_dma_data(substream);
+
+ /*
+ * Get the offset of the current read (for playack) or write
+ * index (for capture). Report this value back to the asoc framework.
+ */
+ p_rbuf = get_ringbuf(substream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ cur = readl(aio->cygaud->audio + p_rbuf->rdaddr);
+ else
+ cur = readl(aio->cygaud->audio + p_rbuf->wraddr);
+
+ base = readl(aio->cygaud->audio + p_rbuf->baseaddr);
+
+ /*
+ * Mask off the MSB of the rdaddr,wraddr and baseaddr
+ * since MSB is not part of the address
+ */
+ res = (cur & 0x7fffffff) - (base & 0x7fffffff);
+
+ return bytes_to_frames(substream->runtime, res);
+}
+
+static int cygnus_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size;
+
+ size = cygnus_pcm_hw.buffer_bytes_max;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+ buf->area = dma_alloc_coherent(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+
+ dev_dbg(rtd->cpu_dai->dev, "%s: size 0x%zx @ %pK\n",
+ __func__, size, buf->area);
+
+ if (!buf->area) {
+ dev_err(rtd->cpu_dai->dev, "%s: dma_alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ buf->bytes = size;
+
+ return 0;
+}
+
+
+static const struct snd_pcm_ops cygnus_pcm_ops = {
+ .open = cygnus_pcm_open,
+ .close = cygnus_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = cygnus_pcm_hw_params,
+ .hw_free = cygnus_pcm_hw_free,
+ .prepare = cygnus_pcm_prepare,
+ .trigger = cygnus_pcm_trigger,
+ .pointer = cygnus_pcm_pointer,
+};
+
+static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (substream) {
+ buf = &substream->dma_buffer;
+ if (buf->area) {
+ dma_free_coherent(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+ }
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (substream) {
+ buf = &substream->dma_buffer;
+ if (buf->area) {
+ dma_free_coherent(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+ }
+}
+
+static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &cygnus_dma_dmamask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
+ ret = cygnus_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret)
+ return ret;
+ }
+
+ if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
+ ret = cygnus_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE);
+ if (ret) {
+ cygnus_dma_free_dma_buffers(pcm);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver cygnus_soc_platform = {
+ .ops = &cygnus_pcm_ops,
+ .pcm_new = cygnus_dma_new,
+ .pcm_free = cygnus_dma_free_dma_buffers,
+};
+
+int cygnus_soc_platform_register(struct device *dev,
+ struct cygnus_audio *cygaud)
+{
+ int rc = 0;
+
+ dev_dbg(dev, "%s Enter\n", __func__);
+
+ rc = devm_request_irq(dev, cygaud->irq_num, cygnus_dma_irq,
+ IRQF_SHARED, "cygnus-audio", cygaud);
+ if (rc) {
+ dev_err(dev, "%s request_irq error %d\n", __func__, rc);
+ return rc;
+ }
+
+ rc = snd_soc_register_platform(dev, &cygnus_soc_platform);
+ if (rc) {
+ dev_err(dev, "%s failed\n", __func__);
+ return rc;
+ }
+
+ return 0;
+}
+
+int cygnus_soc_platform_unregister(struct device *dev)
+{
+ snd_soc_unregister_platform(dev);
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Cygnus ASoC PCM module");
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
new file mode 100644
index 000000000000..e710bb0c5637
--- /dev/null
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -0,0 +1,1529 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "cygnus-ssp.h"
+
+#define DEFAULT_VCO 1354750204
+
+#define CYGNUS_TDM_RATE \
+ (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define CAPTURE_FCI_ID_BASE 0x180
+#define CYGNUS_SSP_TRISTATE_MASK 0x001fff
+#define CYGNUS_PLLCLKSEL_MASK 0xf
+
+/* Used with stream_on field to indicate which streams are active */
+#define PLAYBACK_STREAM_MASK BIT(0)
+#define CAPTURE_STREAM_MASK BIT(1)
+
+#define I2S_STREAM_CFG_MASK 0xff003ff
+#define I2S_CAP_STREAM_CFG_MASK 0xf0
+#define SPDIF_STREAM_CFG_MASK 0x3ff
+#define CH_GRP_STEREO 0x1
+
+/* Begin register offset defines */
+#define AUD_MISC_SEROUT_OE_REG_BASE 0x01c
+#define AUD_MISC_SEROUT_SPDIF_OE 12
+#define AUD_MISC_SEROUT_MCLK_OE 3
+#define AUD_MISC_SEROUT_LRCK_OE 2
+#define AUD_MISC_SEROUT_SCLK_OE 1
+#define AUD_MISC_SEROUT_SDAT_OE 0
+
+/* AUD_FMM_BF_CTRL_xxx regs */
+#define BF_DST_CFG0_OFFSET 0x100
+#define BF_DST_CFG1_OFFSET 0x104
+#define BF_DST_CFG2_OFFSET 0x108
+
+#define BF_DST_CTRL0_OFFSET 0x130
+#define BF_DST_CTRL1_OFFSET 0x134
+#define BF_DST_CTRL2_OFFSET 0x138
+
+#define BF_SRC_CFG0_OFFSET 0x148
+#define BF_SRC_CFG1_OFFSET 0x14c
+#define BF_SRC_CFG2_OFFSET 0x150
+#define BF_SRC_CFG3_OFFSET 0x154
+
+#define BF_SRC_CTRL0_OFFSET 0x1c0
+#define BF_SRC_CTRL1_OFFSET 0x1c4
+#define BF_SRC_CTRL2_OFFSET 0x1c8
+#define BF_SRC_CTRL3_OFFSET 0x1cc
+
+#define BF_SRC_GRP0_OFFSET 0x1fc
+#define BF_SRC_GRP1_OFFSET 0x200
+#define BF_SRC_GRP2_OFFSET 0x204
+#define BF_SRC_GRP3_OFFSET 0x208
+
+#define BF_SRC_GRP_EN_OFFSET 0x320
+#define BF_SRC_GRP_FLOWON_OFFSET 0x324
+#define BF_SRC_GRP_SYNC_DIS_OFFSET 0x328
+
+/* AUD_FMM_IOP_OUT_I2S_xxx regs */
+#define OUT_I2S_0_STREAM_CFG_OFFSET 0xa00
+#define OUT_I2S_0_CFG_OFFSET 0xa04
+#define OUT_I2S_0_MCLK_CFG_OFFSET 0xa0c
+
+#define OUT_I2S_1_STREAM_CFG_OFFSET 0xa40
+#define OUT_I2S_1_CFG_OFFSET 0xa44
+#define OUT_I2S_1_MCLK_CFG_OFFSET 0xa4c
+
+#define OUT_I2S_2_STREAM_CFG_OFFSET 0xa80
+#define OUT_I2S_2_CFG_OFFSET 0xa84
+#define OUT_I2S_2_MCLK_CFG_OFFSET 0xa8c
+
+/* AUD_FMM_IOP_OUT_SPDIF_xxx regs */
+#define SPDIF_STREAM_CFG_OFFSET 0xac0
+#define SPDIF_CTRL_OFFSET 0xac4
+#define SPDIF_FORMAT_CFG_OFFSET 0xad8
+#define SPDIF_MCLK_CFG_OFFSET 0xadc
+
+/* AUD_FMM_IOP_PLL_0_xxx regs */
+#define IOP_PLL_0_MACRO_OFFSET 0xb00
+#define IOP_PLL_0_MDIV_Ch0_OFFSET 0xb14
+#define IOP_PLL_0_MDIV_Ch1_OFFSET 0xb18
+#define IOP_PLL_0_MDIV_Ch2_OFFSET 0xb1c
+
+#define IOP_PLL_0_ACTIVE_MDIV_Ch0_OFFSET 0xb30
+#define IOP_PLL_0_ACTIVE_MDIV_Ch1_OFFSET 0xb34
+#define IOP_PLL_0_ACTIVE_MDIV_Ch2_OFFSET 0xb38
+
+/* AUD_FMM_IOP_xxx regs */
+#define IOP_PLL_0_CONTROL_OFFSET 0xb04
+#define IOP_PLL_0_USER_NDIV_OFFSET 0xb08
+#define IOP_PLL_0_ACTIVE_NDIV_OFFSET 0xb20
+#define IOP_PLL_0_RESET_OFFSET 0xb5c
+
+/* AUD_FMM_IOP_IN_I2S_xxx regs */
+#define IN_I2S_0_STREAM_CFG_OFFSET 0x00
+#define IN_I2S_0_CFG_OFFSET 0x04
+#define IN_I2S_1_STREAM_CFG_OFFSET 0x40
+#define IN_I2S_1_CFG_OFFSET 0x44
+#define IN_I2S_2_STREAM_CFG_OFFSET 0x80
+#define IN_I2S_2_CFG_OFFSET 0x84
+
+/* AUD_FMM_IOP_MISC_xxx regs */
+#define IOP_SW_INIT_LOGIC 0x1c0
+
+/* End register offset defines */
+
+
+/* AUD_FMM_IOP_OUT_I2S_x_MCLK_CFG_0_REG */
+#define I2S_OUT_MCLKRATE_SHIFT 16
+
+/* AUD_FMM_IOP_OUT_I2S_x_MCLK_CFG_REG */
+#define I2S_OUT_PLLCLKSEL_SHIFT 0
+
+/* AUD_FMM_IOP_OUT_I2S_x_STREAM_CFG */
+#define I2S_OUT_STREAM_ENA 31
+#define I2S_OUT_STREAM_CFG_GROUP_ID 20
+#define I2S_OUT_STREAM_CFG_CHANNEL_GROUPING 24
+
+/* AUD_FMM_IOP_IN_I2S_x_CAP */
+#define I2S_IN_STREAM_CFG_CAP_ENA 31
+#define I2S_IN_STREAM_CFG_0_GROUP_ID 4
+
+/* AUD_FMM_IOP_OUT_I2S_x_I2S_CFG_REG */
+#define I2S_OUT_CFGX_CLK_ENA 0
+#define I2S_OUT_CFGX_DATA_ENABLE 1
+#define I2S_OUT_CFGX_DATA_ALIGNMENT 6
+#define I2S_OUT_CFGX_BITS_PER_SLOT 13
+#define I2S_OUT_CFGX_VALID_SLOT 14
+#define I2S_OUT_CFGX_FSYNC_WIDTH 18
+#define I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32 26
+#define I2S_OUT_CFGX_SLAVE_MODE 30
+#define I2S_OUT_CFGX_TDM_MODE 31
+
+/* AUD_FMM_BF_CTRL_SOURCECH_CFGx_REG */
+#define BF_SRC_CFGX_SFIFO_ENA 0
+#define BF_SRC_CFGX_BUFFER_PAIR_ENABLE 1
+#define BF_SRC_CFGX_SAMPLE_CH_MODE 2
+#define BF_SRC_CFGX_SFIFO_SZ_DOUBLE 5
+#define BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY 10
+#define BF_SRC_CFGX_BIT_RES 20
+#define BF_SRC_CFGX_PROCESS_SEQ_ID_VALID 31
+
+/* AUD_FMM_BF_CTRL_DESTCH_CFGx_REG */
+#define BF_DST_CFGX_CAP_ENA 0
+#define BF_DST_CFGX_BUFFER_PAIR_ENABLE 1
+#define BF_DST_CFGX_DFIFO_SZ_DOUBLE 2
+#define BF_DST_CFGX_NOT_PAUSE_WHEN_FULL 11
+#define BF_DST_CFGX_FCI_ID 12
+#define BF_DST_CFGX_CAP_MODE 24
+#define BF_DST_CFGX_PROC_SEQ_ID_VALID 31
+
+/* AUD_FMM_IOP_OUT_SPDIF_xxx */
+#define SPDIF_0_OUT_DITHER_ENA 3
+#define SPDIF_0_OUT_STREAM_ENA 31
+
+/* AUD_FMM_IOP_PLL_0_USER */
+#define IOP_PLL_0_USER_NDIV_FRAC 10
+
+/* AUD_FMM_IOP_PLL_0_ACTIVE */
+#define IOP_PLL_0_ACTIVE_NDIV_FRAC 10
+
+
+#define INIT_SSP_REGS(num) (struct cygnus_ssp_regs){ \
+ .i2s_stream_cfg = OUT_I2S_ ##num## _STREAM_CFG_OFFSET, \
+ .i2s_cap_stream_cfg = IN_I2S_ ##num## _STREAM_CFG_OFFSET, \
+ .i2s_cfg = OUT_I2S_ ##num## _CFG_OFFSET, \
+ .i2s_cap_cfg = IN_I2S_ ##num## _CFG_OFFSET, \
+ .i2s_mclk_cfg = OUT_I2S_ ##num## _MCLK_CFG_OFFSET, \
+ .bf_destch_ctrl = BF_DST_CTRL ##num## _OFFSET, \
+ .bf_destch_cfg = BF_DST_CFG ##num## _OFFSET, \
+ .bf_sourcech_ctrl = BF_SRC_CTRL ##num## _OFFSET, \
+ .bf_sourcech_cfg = BF_SRC_CFG ##num## _OFFSET, \
+ .bf_sourcech_grp = BF_SRC_GRP ##num## _OFFSET \
+}
+
+struct pll_macro_entry {
+ u32 mclk;
+ u32 pll_ch_num;
+};
+
+/*
+ * PLL has 3 output channels (1x, 2x, and 4x). Below are
+ * the common MCLK frequencies used by audio driver
+ */
+static const struct pll_macro_entry pll_predef_mclk[] = {
+ { 4096000, 0},
+ { 8192000, 1},
+ {16384000, 2},
+
+ { 5644800, 0},
+ {11289600, 1},
+ {22579200, 2},
+
+ { 6144000, 0},
+ {12288000, 1},
+ {24576000, 2},
+
+ {12288000, 0},
+ {24576000, 1},
+ {49152000, 2},
+
+ {22579200, 0},
+ {45158400, 1},
+ {90316800, 2},
+
+ {24576000, 0},
+ {49152000, 1},
+ {98304000, 2},
+};
+
+/* List of valid frame sizes for tdm mode */
+static const int ssp_valid_tdm_framesize[] = {32, 64, 128, 256, 512};
+
+/*
+ * Use this relationship to derive the sampling rate (lrclk)
+ * lrclk = (mclk) / ((2*mclk_to_sclk_ratio) * (32 * SCLK))).
+ *
+ * Use mclk and pll_ch from the table above
+ *
+ * Valid SCLK = 0/1/2/4/8/12
+ *
+ * mclk_to_sclk_ratio = number of MCLK per SCLK. Division is twice the
+ * value programmed in this field.
+ * Valid mclk_to_sclk_ratio = 1 through to 15
+ *
+ * eg: To set lrclk = 48khz, set mclk = 12288000, mclk_to_sclk_ratio = 2,
+ * SCLK = 64
+ */
+struct _ssp_clk_coeff {
+ u32 mclk;
+ u32 sclk_rate;
+ u32 rate;
+ u32 mclk_rate;
+};
+
+static const struct _ssp_clk_coeff ssp_clk_coeff[] = {
+ { 4096000, 32, 16000, 4},
+ { 4096000, 32, 32000, 2},
+ { 4096000, 64, 8000, 4},
+ { 4096000, 64, 16000, 2},
+ { 4096000, 64, 32000, 1},
+ { 4096000, 128, 8000, 2},
+ { 4096000, 128, 16000, 1},
+ { 4096000, 256, 8000, 1},
+
+ { 6144000, 32, 16000, 6},
+ { 6144000, 32, 32000, 3},
+ { 6144000, 32, 48000, 2},
+ { 6144000, 32, 96000, 1},
+ { 6144000, 64, 8000, 6},
+ { 6144000, 64, 16000, 3},
+ { 6144000, 64, 48000, 1},
+ { 6144000, 128, 8000, 3},
+
+ { 8192000, 32, 32000, 4},
+ { 8192000, 64, 16000, 4},
+ { 8192000, 64, 32000, 2},
+ { 8192000, 128, 8000, 4},
+ { 8192000, 128, 16000, 2},
+ { 8192000, 128, 32000, 1},
+ { 8192000, 256, 8000, 2},
+ { 8192000, 256, 16000, 1},
+ { 8192000, 512, 8000, 1},
+
+ {12288000, 32, 32000, 6},
+ {12288000, 32, 48000, 4},
+ {12288000, 32, 96000, 2},
+ {12288000, 32, 192000, 1},
+ {12288000, 64, 16000, 6},
+ {12288000, 64, 32000, 3},
+ {12288000, 64, 48000, 2},
+ {12288000, 64, 96000, 1},
+ {12288000, 128, 8000, 6},
+ {12288000, 128, 16000, 3},
+ {12288000, 128, 48000, 1},
+ {12288000, 256, 8000, 3},
+
+ {16384000, 64, 32000, 4},
+ {16384000, 128, 16000, 4},
+ {16384000, 128, 32000, 2},
+ {16384000, 256, 8000, 4},
+ {16384000, 256, 16000, 2},
+ {16384000, 256, 32000, 1},
+ {16384000, 512, 8000, 2},
+ {16384000, 512, 16000, 1},
+
+ {24576000, 32, 96000, 4},
+ {24576000, 32, 192000, 2},
+ {24576000, 64, 32000, 6},
+ {24576000, 64, 48000, 4},
+ {24576000, 64, 96000, 2},
+ {24576000, 64, 192000, 1},
+ {24576000, 128, 16000, 6},
+ {24576000, 128, 32000, 3},
+ {24576000, 128, 48000, 2},
+ {24576000, 256, 8000, 6},
+ {24576000, 256, 16000, 3},
+ {24576000, 256, 48000, 1},
+ {24576000, 512, 8000, 3},
+
+ {49152000, 32, 192000, 4},
+ {49152000, 64, 96000, 4},
+ {49152000, 64, 192000, 2},
+ {49152000, 128, 32000, 6},
+ {49152000, 128, 48000, 4},
+ {49152000, 128, 96000, 2},
+ {49152000, 128, 192000, 1},
+ {49152000, 256, 16000, 6},
+ {49152000, 256, 32000, 3},
+ {49152000, 256, 48000, 2},
+ {49152000, 256, 96000, 1},
+ {49152000, 512, 8000, 6},
+ {49152000, 512, 16000, 3},
+ {49152000, 512, 48000, 1},
+
+ { 5644800, 32, 22050, 4},
+ { 5644800, 32, 44100, 2},
+ { 5644800, 32, 88200, 1},
+ { 5644800, 64, 11025, 4},
+ { 5644800, 64, 22050, 2},
+ { 5644800, 64, 44100, 1},
+
+ {11289600, 32, 44100, 4},
+ {11289600, 32, 88200, 2},
+ {11289600, 32, 176400, 1},
+ {11289600, 64, 22050, 4},
+ {11289600, 64, 44100, 2},
+ {11289600, 64, 88200, 1},
+ {11289600, 128, 11025, 4},
+ {11289600, 128, 22050, 2},
+ {11289600, 128, 44100, 1},
+
+ {22579200, 32, 88200, 4},
+ {22579200, 32, 176400, 2},
+ {22579200, 64, 44100, 4},
+ {22579200, 64, 88200, 2},
+ {22579200, 64, 176400, 1},
+ {22579200, 128, 22050, 4},
+ {22579200, 128, 44100, 2},
+ {22579200, 128, 88200, 1},
+ {22579200, 256, 11025, 4},
+ {22579200, 256, 22050, 2},
+ {22579200, 256, 44100, 1},
+
+ {45158400, 32, 176400, 4},
+ {45158400, 64, 88200, 4},
+ {45158400, 64, 176400, 2},
+ {45158400, 128, 44100, 4},
+ {45158400, 128, 88200, 2},
+ {45158400, 128, 176400, 1},
+ {45158400, 256, 22050, 4},
+ {45158400, 256, 44100, 2},
+ {45158400, 256, 88200, 1},
+ {45158400, 512, 11025, 4},
+ {45158400, 512, 22050, 2},
+ {45158400, 512, 44100, 1},
+};
+
+static struct cygnus_aio_port *cygnus_dai_get_portinfo(struct snd_soc_dai *dai)
+{
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ return &cygaud->portinfo[dai->id];
+}
+
+static int audio_ssp_init_portregs(struct cygnus_aio_port *aio)
+{
+ u32 value, fci_id;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value &= ~I2S_STREAM_CFG_MASK;
+
+ /* Set Group ID */
+ writel(aio->portnum,
+ aio->cygaud->audio + aio->regs.bf_sourcech_grp);
+
+ /* Configure the AUD_FMM_IOP_OUT_I2S_x_STREAM_CFG reg */
+ value |= aio->portnum << I2S_OUT_STREAM_CFG_GROUP_ID;
+ value |= aio->portnum; /* FCI ID is the port num */
+ value |= CH_GRP_STEREO << I2S_OUT_STREAM_CFG_CHANNEL_GROUPING;
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ /* Configure the AUD_FMM_BF_CTRL_SOURCECH_CFGX reg */
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY);
+ value |= BIT(BF_SRC_CFGX_SFIFO_SZ_DOUBLE);
+ value |= BIT(BF_SRC_CFGX_PROCESS_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* Configure the AUD_FMM_IOP_IN_I2S_x_CAP_STREAM_CFG_0 reg */
+ value = readl(aio->cygaud->i2s_in +
+ aio->regs.i2s_cap_stream_cfg);
+ value &= ~I2S_CAP_STREAM_CFG_MASK;
+ value |= aio->portnum << I2S_IN_STREAM_CFG_0_GROUP_ID;
+ writel(value, aio->cygaud->i2s_in +
+ aio->regs.i2s_cap_stream_cfg);
+
+ /* Configure the AUD_FMM_BF_CTRL_DESTCH_CFGX_REG_BASE reg */
+ fci_id = CAPTURE_FCI_ID_BASE + aio->portnum;
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_DFIFO_SZ_DOUBLE);
+ value &= ~BIT(BF_DST_CFGX_NOT_PAUSE_WHEN_FULL);
+ value |= (fci_id << BF_DST_CFGX_FCI_ID);
+ value |= BIT(BF_DST_CFGX_PROC_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+
+ /* Enable the transmit pin for this port */
+ value = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ value &= ~BIT((aio->portnum * 4) + AUD_MISC_SEROUT_SDAT_OE);
+ writel(value, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ break;
+ case PORT_SPDIF:
+ writel(aio->portnum, aio->cygaud->audio + BF_SRC_GRP3_OFFSET);
+
+ value = readl(aio->cygaud->audio + SPDIF_CTRL_OFFSET);
+ value |= BIT(SPDIF_0_OUT_DITHER_ENA);
+ writel(value, aio->cygaud->audio + SPDIF_CTRL_OFFSET);
+
+ /* Enable and set the FCI ID for the SPDIF channel */
+ value = readl(aio->cygaud->audio + SPDIF_STREAM_CFG_OFFSET);
+ value &= ~SPDIF_STREAM_CFG_MASK;
+ value |= aio->portnum; /* FCI ID is the port num */
+ value |= BIT(SPDIF_0_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + SPDIF_STREAM_CFG_OFFSET);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_NOT_PAUSE_WHEN_EMPTY);
+ value |= BIT(BF_SRC_CFGX_SFIFO_SZ_DOUBLE);
+ value |= BIT(BF_SRC_CFGX_PROCESS_SEQ_ID_VALID);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* Enable the spdif output pin */
+ value = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ value &= ~BIT(AUD_MISC_SEROUT_SPDIF_OE);
+ writel(value, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+ break;
+ default:
+ dev_err(aio->cygaud->dev, "Port not supported\n");
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static void audio_ssp_in_enable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_CAP_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+
+ writel(0x1, aio->cygaud->audio + aio->regs.bf_destch_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value |= BIT(I2S_OUT_CFGX_CLK_ENA);
+ value |= BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+ value |= BIT(I2S_IN_STREAM_CFG_CAP_ENA);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+
+ aio->streams_on |= CAPTURE_STREAM_MASK;
+}
+
+static void audio_ssp_in_disable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+ value &= ~BIT(I2S_IN_STREAM_CFG_CAP_ENA);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_stream_cfg);
+
+ aio->streams_on &= ~CAPTURE_STREAM_MASK;
+
+ /* If both playback and capture are off */
+ if (!aio->streams_on) {
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~BIT(I2S_OUT_CFGX_CLK_ENA);
+ value &= ~BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ }
+
+ writel(0x0, aio->cygaud->audio + aio->regs.bf_destch_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_destch_cfg);
+ value &= ~BIT(BF_DST_CFGX_CAP_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_destch_cfg);
+}
+
+static int audio_ssp_out_enable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value |= BIT(I2S_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ writel(1, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value |= BIT(I2S_OUT_CFGX_CLK_ENA);
+ value |= BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value |= BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ aio->streams_on |= PLAYBACK_STREAM_MASK;
+ break;
+ case PORT_SPDIF:
+ value = readl(aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ value |= 0x3;
+ writel(value, aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+
+ writel(1, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value |= BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "Port not supported %d\n", aio->portnum);
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int audio_ssp_out_disable(struct cygnus_aio_port *aio)
+{
+ u32 value;
+ int status = 0;
+
+ switch (aio->port_type) {
+ case PORT_TDM:
+ aio->streams_on &= ~PLAYBACK_STREAM_MASK;
+
+ /* If both playback and capture are off */
+ if (!aio->streams_on) {
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~BIT(I2S_OUT_CFGX_CLK_ENA);
+ value &= ~BIT(I2S_OUT_CFGX_DATA_ENABLE);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ }
+
+ /* set group_sync_dis = 1 */
+ value = readl(aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+ value |= BIT(aio->portnum);
+ writel(value, aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+
+ writel(0, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ /* set group_sync_dis = 0 */
+ value = readl(aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+ value &= ~BIT(aio->portnum);
+ writel(value, aio->cygaud->audio + BF_SRC_GRP_SYNC_DIS_OFFSET);
+
+ value = readl(aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+ value &= ~BIT(I2S_OUT_STREAM_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_stream_cfg);
+
+ /* IOP SW INIT on OUT_I2S_x */
+ value = readl(aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ value |= BIT(aio->portnum);
+ writel(value, aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ value &= ~BIT(aio->portnum);
+ writel(value, aio->cygaud->i2s_in + IOP_SW_INIT_LOGIC);
+ break;
+ case PORT_SPDIF:
+ value = readl(aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ value &= ~0x3;
+ writel(value, aio->cygaud->audio + SPDIF_FORMAT_CFG_OFFSET);
+ writel(0, aio->cygaud->audio + aio->regs.bf_sourcech_ctrl);
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_SFIFO_ENA);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "Port not supported %d\n", aio->portnum);
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int pll_configure_mclk(struct cygnus_audio *cygaud, u32 mclk,
+ struct cygnus_aio_port *aio)
+{
+ int i = 0, error;
+ bool found = false;
+ const struct pll_macro_entry *p_entry;
+ struct clk *ch_clk;
+
+ for (i = 0; i < ARRAY_SIZE(pll_predef_mclk); i++) {
+ p_entry = &pll_predef_mclk[i];
+ if (p_entry->mclk == mclk) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(cygaud->dev,
+ "%s No valid mclk freq (%u) found!\n", __func__, mclk);
+ return -EINVAL;
+ }
+
+ ch_clk = cygaud->audio_clk[p_entry->pll_ch_num];
+
+ if ((aio->clk_trace.cap_en) && (!aio->clk_trace.cap_clk_en)) {
+ error = clk_prepare_enable(ch_clk);
+ if (error) {
+ dev_err(cygaud->dev, "%s clk_prepare_enable failed %d\n",
+ __func__, error);
+ return error;
+ }
+ aio->clk_trace.cap_clk_en = true;
+ }
+
+ if ((aio->clk_trace.play_en) && (!aio->clk_trace.play_clk_en)) {
+ error = clk_prepare_enable(ch_clk);
+ if (error) {
+ dev_err(cygaud->dev, "%s clk_prepare_enable failed %d\n",
+ __func__, error);
+ return error;
+ }
+ aio->clk_trace.play_clk_en = true;
+ }
+
+ error = clk_set_rate(ch_clk, mclk);
+ if (error) {
+ dev_err(cygaud->dev, "%s Set MCLK rate failed: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ return p_entry->pll_ch_num;
+}
+
+static int cygnus_ssp_set_clocks(struct cygnus_aio_port *aio,
+ struct cygnus_audio *cygaud)
+{
+ u32 value, i = 0;
+ u32 mask = 0xf;
+ u32 sclk;
+ bool found = false;
+ const struct _ssp_clk_coeff *p_entry = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(ssp_clk_coeff); i++) {
+ p_entry = &ssp_clk_coeff[i];
+ if ((p_entry->rate == aio->lrclk) &&
+ (p_entry->sclk_rate == aio->bit_per_frame) &&
+ (p_entry->mclk == aio->mclk)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_err(aio->cygaud->dev,
+ "No valid match found in ssp_clk_coeff array\n");
+ dev_err(aio->cygaud->dev, "lrclk = %u, bits/frame = %u, mclk = %u\n",
+ aio->lrclk, aio->bit_per_frame, aio->mclk);
+ return -EINVAL;
+ }
+
+ sclk = aio->bit_per_frame;
+ if (sclk == 512)
+ sclk = 0;
+ /* sclks_per_1fs_div = sclk cycles/32 */
+ sclk /= 32;
+ /* Set sclk rate */
+ switch (aio->port_type) {
+ case PORT_TDM:
+ /* Set number of bitclks per frame */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~(mask << I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32);
+ value |= sclk << I2S_OUT_CFGX_SCLKS_PER_1FS_DIV32;
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+ dev_dbg(aio->cygaud->dev,
+ "SCLKS_PER_1FS_DIV32 = 0x%x\n", value);
+ break;
+ case PORT_SPDIF:
+ break;
+ default:
+ dev_err(aio->cygaud->dev, "Unknown port type\n");
+ return -EINVAL;
+ }
+
+ /* Set MCLK_RATE ssp port (spdif and ssp are the same) */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ value &= ~(0xf << I2S_OUT_MCLKRATE_SHIFT);
+ value |= (p_entry->mclk_rate << I2S_OUT_MCLKRATE_SHIFT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+
+ dev_dbg(aio->cygaud->dev, "mclk cfg reg = 0x%x\n", value);
+ dev_dbg(aio->cygaud->dev, "bits per frame = %u, mclk = %u Hz, lrclk = %u Hz\n",
+ aio->bit_per_frame, aio->mclk, aio->lrclk);
+ return 0;
+}
+
+static int cygnus_ssp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+ int rate, bitres;
+ u32 value;
+ u32 mask = 0x1f;
+ int ret = 0;
+
+ dev_dbg(aio->cygaud->dev, "%s port = %d\n", __func__, aio->portnum);
+ dev_dbg(aio->cygaud->dev, "params_channels %d\n",
+ params_channels(params));
+ dev_dbg(aio->cygaud->dev, "rate %d\n", params_rate(params));
+ dev_dbg(aio->cygaud->dev, "format %d\n", params_format(params));
+
+ rate = params_rate(params);
+
+ switch (aio->mode) {
+ case CYGNUS_SSPMODE_TDM:
+ if ((rate == 192000) && (params_channels(params) > 4)) {
+ dev_err(aio->cygaud->dev, "Cannot run %d channels at %dHz\n",
+ params_channels(params), rate);
+ return -EINVAL;
+ }
+ break;
+ case CYGNUS_SSPMODE_I2S:
+ aio->bit_per_frame = 64; /* I2S must be 64 bit per frame */
+ break;
+ default:
+ dev_err(aio->cygaud->dev,
+ "%s port running in unknown mode\n", __func__);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~BIT(BF_SRC_CFGX_BUFFER_PAIR_ENABLE);
+ /* Configure channels as mono or stereo/TDM */
+ if (params_channels(params) == 1)
+ value |= BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
+ else
+ value &= ~BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ if (aio->port_type == PORT_SPDIF) {
+ dev_err(aio->cygaud->dev,
+ "SPDIF does not support 8bit format\n");
+ return -EINVAL;
+ }
+ bitres = 8;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bitres = 16;
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ /* 32 bit mode is coded as 0 */
+ bitres = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+ value &= ~(mask << BF_SRC_CFGX_BIT_RES);
+ value |= (bitres << BF_SRC_CFGX_BIT_RES);
+ writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
+
+ } else {
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = readl(aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ value |= BIT(BF_DST_CFGX_CAP_MODE);
+ writel(value, aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = readl(aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ value &= ~BIT(BF_DST_CFGX_CAP_MODE);
+ writel(value, aio->cygaud->audio +
+ aio->regs.bf_destch_cfg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ aio->lrclk = rate;
+
+ if (!aio->is_slave)
+ ret = cygnus_ssp_set_clocks(aio, cygaud);
+
+ return ret;
+}
+
+/*
+ * This function sets the mclk frequency for pll clock
+ */
+static int cygnus_ssp_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ int sel;
+ u32 value;
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(aio->cygaud->dev,
+ "%s Enter port = %d\n", __func__, aio->portnum);
+ sel = pll_configure_mclk(cygaud, freq, aio);
+ if (sel < 0) {
+ dev_err(aio->cygaud->dev,
+ "%s Setting mclk failed.\n", __func__);
+ return -EINVAL;
+ }
+
+ aio->mclk = freq;
+
+ dev_dbg(aio->cygaud->dev, "%s Setting MCLKSEL to %d\n", __func__, sel);
+ value = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ value &= ~(0xf << I2S_OUT_PLLCLKSEL_SHIFT);
+ value |= (sel << I2S_OUT_PLLCLKSEL_SHIFT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+
+ return 0;
+}
+
+static int cygnus_ssp_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+
+ snd_soc_dai_set_dma_data(dai, substream, aio);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->clk_trace.play_en = true;
+ else
+ aio->clk_trace.cap_en = true;
+
+ return 0;
+}
+
+static void cygnus_ssp_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ aio->clk_trace.play_en = false;
+ else
+ aio->clk_trace.cap_en = false;
+
+ if (!aio->is_slave) {
+ u32 val;
+
+ val = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ val &= CYGNUS_PLLCLKSEL_MASK;
+ if (val >= ARRAY_SIZE(aio->cygaud->audio_clk)) {
+ dev_err(aio->cygaud->dev, "Clk index %u is out of bounds\n",
+ val);
+ return;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (aio->clk_trace.play_clk_en) {
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[val]);
+ aio->clk_trace.play_clk_en = false;
+ }
+ } else {
+ if (aio->clk_trace.cap_clk_en) {
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[val]);
+ aio->clk_trace.cap_clk_en = false;
+ }
+ }
+ }
+}
+
+/*
+ * Bit Update Notes
+ * 31 Yes TDM Mode (1 = TDM, 0 = i2s)
+ * 30 Yes Slave Mode (1 = Slave, 0 = Master)
+ * 29:26 No Sclks per frame
+ * 25:18 Yes FS Width
+ * 17:14 No Valid Slots
+ * 13 No Bits (1 = 16 bits, 0 = 32 bits)
+ * 12:08 No Bits per samp
+ * 07 Yes Justifcation (1 = LSB, 0 = MSB)
+ * 06 Yes Alignment (1 = Delay 1 clk, 0 = no delay
+ * 05 Yes SCLK polarity (1 = Rising, 0 = Falling)
+ * 04 Yes LRCLK Polarity (1 = High for left, 0 = Low for left)
+ * 03:02 Yes Reserved - write as zero
+ * 01 No Data Enable
+ * 00 No CLK Enable
+ */
+#define I2S_OUT_CFG_REG_UPDATE_MASK 0x3C03FF03
+
+/* Input cfg is same as output, but the FS width is not a valid field */
+#define I2S_IN_CFG_REG_UPDATE_MASK (I2S_OUT_CFG_REG_UPDATE_MASK | 0x03FC0000)
+
+int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai, int len)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+
+ if ((len > 0) && (len < 256)) {
+ aio->fsync_width = len;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int cygnus_ssp_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ u32 ssp_curcfg;
+ u32 ssp_newcfg;
+ u32 ssp_outcfg;
+ u32 ssp_incfg;
+ u32 val;
+ u32 mask;
+
+ dev_dbg(aio->cygaud->dev, "%s Enter fmt: %x\n", __func__, fmt);
+
+ if (aio->port_type == PORT_SPDIF)
+ return -EINVAL;
+
+ ssp_newcfg = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_SLAVE_MODE);
+ aio->is_slave = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ssp_newcfg &= ~BIT(I2S_OUT_CFGX_SLAVE_MODE);
+ aio->is_slave = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_DATA_ALIGNMENT);
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_FSYNC_WIDTH);
+ aio->mode = CYGNUS_SSPMODE_I2S;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_TDM_MODE);
+
+ /* DSP_A = data after FS, DSP_B = data during FS */
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_DSP_A)
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_DATA_ALIGNMENT);
+
+ if ((aio->fsync_width > 0) && (aio->fsync_width < 256))
+ ssp_newcfg |=
+ (aio->fsync_width << I2S_OUT_CFGX_FSYNC_WIDTH);
+ else
+ ssp_newcfg |= BIT(I2S_OUT_CFGX_FSYNC_WIDTH);
+
+ aio->mode = CYGNUS_SSPMODE_TDM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * SSP out cfg.
+ * Retain bits we do not want to update, then OR in new bits
+ */
+ ssp_curcfg = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ ssp_outcfg = (ssp_curcfg & I2S_OUT_CFG_REG_UPDATE_MASK) | ssp_newcfg;
+ writel(ssp_outcfg, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ /*
+ * SSP in cfg.
+ * Retain bits we do not want to update, then OR in new bits
+ */
+ ssp_curcfg = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+ ssp_incfg = (ssp_curcfg & I2S_IN_CFG_REG_UPDATE_MASK) | ssp_newcfg;
+ writel(ssp_incfg, aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+
+ val = readl(aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ /*
+ * Configure the word clk and bit clk as output or tristate
+ * Each port has 4 bits for controlling its pins.
+ * Shift the mask based upon port number.
+ */
+ mask = BIT(AUD_MISC_SEROUT_LRCK_OE)
+ | BIT(AUD_MISC_SEROUT_SCLK_OE)
+ | BIT(AUD_MISC_SEROUT_MCLK_OE);
+ mask = mask << (aio->portnum * 4);
+ if (aio->is_slave)
+ /* Set bit for tri-state */
+ val |= mask;
+ else
+ /* Clear bit for drive */
+ val &= ~mask;
+
+ dev_dbg(aio->cygaud->dev, "%s Set OE bits 0x%x\n", __func__, val);
+ writel(val, aio->cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ return 0;
+}
+
+static int cygnus_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(dai);
+ struct cygnus_audio *cygaud = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(aio->cygaud->dev,
+ "%s cmd %d at port = %d\n", __func__, cmd, aio->portnum);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_ssp_out_enable(aio);
+ else
+ audio_ssp_in_enable(aio);
+ cygaud->active_ports++;
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ audio_ssp_out_disable(aio);
+ else
+ audio_ssp_in_disable(aio);
+ cygaud->active_ports--;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cygnus_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ u32 value;
+ int bits_per_slot = 0; /* default to 32-bits per slot */
+ int frame_bits;
+ unsigned int active_slots;
+ bool found = false;
+ int i;
+
+ if (tx_mask != rx_mask) {
+ dev_err(aio->cygaud->dev,
+ "%s tx_mask must equal rx_mask\n", __func__);
+ return -EINVAL;
+ }
+
+ active_slots = hweight32(tx_mask);
+
+ if ((active_slots < 0) || (active_slots > 16))
+ return -EINVAL;
+
+ /* Slot value must be even */
+ if (active_slots % 2)
+ return -EINVAL;
+
+ /* We encode 16 slots as 0 in the reg */
+ if (active_slots == 16)
+ active_slots = 0;
+
+ /* Slot Width is either 16 or 32 */
+ switch (slot_width) {
+ case 16:
+ bits_per_slot = 1;
+ break;
+ case 32:
+ bits_per_slot = 0;
+ break;
+ default:
+ bits_per_slot = 0;
+ dev_warn(aio->cygaud->dev,
+ "%s Defaulting Slot Width to 32\n", __func__);
+ }
+
+ frame_bits = slots * slot_width;
+
+ for (i = 0; i < ARRAY_SIZE(ssp_valid_tdm_framesize); i++) {
+ if (ssp_valid_tdm_framesize[i] == frame_bits) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(aio->cygaud->dev,
+ "%s In TDM mode, frame bits INVALID (%d)\n",
+ __func__, frame_bits);
+ return -EINVAL;
+ }
+
+ aio->bit_per_frame = frame_bits;
+
+ dev_dbg(aio->cygaud->dev, "%s active_slots %u, bits per frame %d\n",
+ __func__, active_slots, frame_bits);
+
+ /* Set capture side of ssp port */
+ value = readl(aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+ value &= ~(0xf << I2S_OUT_CFGX_VALID_SLOT);
+ value |= (active_slots << I2S_OUT_CFGX_VALID_SLOT);
+ value &= ~BIT(I2S_OUT_CFGX_BITS_PER_SLOT);
+ value |= (bits_per_slot << I2S_OUT_CFGX_BITS_PER_SLOT);
+ writel(value, aio->cygaud->i2s_in + aio->regs.i2s_cap_cfg);
+
+ /* Set playback side of ssp port */
+ value = readl(aio->cygaud->audio + aio->regs.i2s_cfg);
+ value &= ~(0xf << I2S_OUT_CFGX_VALID_SLOT);
+ value |= (active_slots << I2S_OUT_CFGX_VALID_SLOT);
+ value &= ~BIT(I2S_OUT_CFGX_BITS_PER_SLOT);
+ value |= (bits_per_slot << I2S_OUT_CFGX_BITS_PER_SLOT);
+ writel(value, aio->cygaud->audio + aio->regs.i2s_cfg);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cygnus_ssp_suspend(struct snd_soc_dai *cpu_dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+
+ if (!aio->is_slave) {
+ u32 val;
+
+ val = readl(aio->cygaud->audio + aio->regs.i2s_mclk_cfg);
+ val &= CYGNUS_PLLCLKSEL_MASK;
+ if (val >= ARRAY_SIZE(aio->cygaud->audio_clk)) {
+ dev_err(aio->cygaud->dev, "Clk index %u is out of bounds\n",
+ val);
+ return -EINVAL;
+ }
+
+ if (aio->clk_trace.cap_clk_en)
+ clk_disable_unprepare(aio->cygaud->audio_clk[val]);
+ if (aio->clk_trace.play_clk_en)
+ clk_disable_unprepare(aio->cygaud->audio_clk[val]);
+
+ aio->pll_clk_num = val;
+ }
+
+ return 0;
+}
+
+static int cygnus_ssp_resume(struct snd_soc_dai *cpu_dai)
+{
+ struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
+ int error;
+
+ if (!aio->is_slave) {
+ if (aio->clk_trace.cap_clk_en) {
+ error = clk_prepare_enable(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ if (error) {
+ dev_err(aio->cygaud->dev, "%s clk_prepare_enable failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ if (aio->clk_trace.play_clk_en) {
+ error = clk_prepare_enable(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ if (error) {
+ if (aio->clk_trace.cap_clk_en)
+ clk_disable_unprepare(aio->cygaud->
+ audio_clk[aio->pll_clk_num]);
+ dev_err(aio->cygaud->dev, "%s clk_prepare_enable failed\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+#else
+#define cygnus_ssp_suspend NULL
+#define cygnus_ssp_resume NULL
+#endif
+
+static const struct snd_soc_dai_ops cygnus_ssp_dai_ops = {
+ .startup = cygnus_ssp_startup,
+ .shutdown = cygnus_ssp_shutdown,
+ .trigger = cygnus_ssp_trigger,
+ .hw_params = cygnus_ssp_hw_params,
+ .set_fmt = cygnus_ssp_set_fmt,
+ .set_sysclk = cygnus_ssp_set_sysclk,
+ .set_tdm_slot = cygnus_set_dai_tdm_slot,
+};
+
+
+#define INIT_CPU_DAI(num) { \
+ .name = "cygnus-ssp" #num, \
+ .playback = { \
+ .channels_min = 1, \
+ .channels_max = 16, \
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+ SNDRV_PCM_RATE_192000, \
+ .formats = SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ }, \
+ .capture = { \
+ .channels_min = 2, \
+ .channels_max = 16, \
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | \
+ SNDRV_PCM_RATE_192000, \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE, \
+ }, \
+ .ops = &cygnus_ssp_dai_ops, \
+ .suspend = cygnus_ssp_suspend, \
+ .resume = cygnus_ssp_resume, \
+}
+
+static const struct snd_soc_dai_driver cygnus_ssp_dai_info[] = {
+ INIT_CPU_DAI(0),
+ INIT_CPU_DAI(1),
+ INIT_CPU_DAI(2),
+};
+
+static struct snd_soc_dai_driver cygnus_spdif_dai_info = {
+ .name = "cygnus-spdif",
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CYGNUS_TDM_RATE | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &cygnus_ssp_dai_ops,
+ .suspend = cygnus_ssp_suspend,
+ .resume = cygnus_ssp_resume,
+};
+
+static struct snd_soc_dai_driver cygnus_ssp_dai[CYGNUS_MAX_PORTS];
+
+static const struct snd_soc_component_driver cygnus_ssp_component = {
+ .name = "cygnus-audio",
+};
+
+/*
+ * Return < 0 if error
+ * Return 0 if disabled
+ * Return 1 if enabled and node is parsed successfully
+ */
+static int parse_ssp_child_node(struct platform_device *pdev,
+ struct device_node *dn,
+ struct cygnus_audio *cygaud,
+ struct snd_soc_dai_driver *p_dai)
+{
+ struct cygnus_aio_port *aio;
+ struct cygnus_ssp_regs ssp_regs[3];
+ u32 rawval;
+ int portnum = -1;
+ enum cygnus_audio_port_type port_type;
+
+ if (of_property_read_u32(dn, "reg", &rawval)) {
+ dev_err(&pdev->dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ portnum = rawval;
+ switch (rawval) {
+ case 0:
+ ssp_regs[0] = INIT_SSP_REGS(0);
+ port_type = PORT_TDM;
+ break;
+ case 1:
+ ssp_regs[1] = INIT_SSP_REGS(1);
+ port_type = PORT_TDM;
+ break;
+ case 2:
+ ssp_regs[2] = INIT_SSP_REGS(2);
+ port_type = PORT_TDM;
+ break;
+ case 3:
+ port_type = PORT_SPDIF;
+ break;
+ default:
+ dev_err(&pdev->dev, "Bad value for reg %u\n", rawval);
+ return -EINVAL;
+ }
+
+ aio = &cygaud->portinfo[portnum];
+ aio->cygaud = cygaud;
+ aio->portnum = portnum;
+ aio->port_type = port_type;
+ aio->fsync_width = -1;
+
+ switch (port_type) {
+ case PORT_TDM:
+ aio->regs = ssp_regs[portnum];
+ *p_dai = cygnus_ssp_dai_info[portnum];
+ aio->mode = CYGNUS_SSPMODE_UNKNOWN;
+ break;
+
+ case PORT_SPDIF:
+ aio->regs.bf_sourcech_cfg = BF_SRC_CFG3_OFFSET;
+ aio->regs.bf_sourcech_ctrl = BF_SRC_CTRL3_OFFSET;
+ aio->regs.i2s_mclk_cfg = SPDIF_MCLK_CFG_OFFSET;
+ aio->regs.i2s_stream_cfg = SPDIF_STREAM_CFG_OFFSET;
+ *p_dai = cygnus_spdif_dai_info;
+
+ /* For the purposes of this code SPDIF can be I2S mode */
+ aio->mode = CYGNUS_SSPMODE_I2S;
+ break;
+ default:
+ dev_err(&pdev->dev, "Bad value for port_type %d\n", port_type);
+ return -EINVAL;
+ }
+
+ dev_dbg(&pdev->dev, "%s portnum = %d\n", __func__, aio->portnum);
+ aio->streams_on = 0;
+ aio->cygaud->dev = &pdev->dev;
+ aio->clk_trace.play_en = false;
+ aio->clk_trace.cap_en = false;
+
+ audio_ssp_init_portregs(aio);
+ return 0;
+}
+
+static int audio_clk_init(struct platform_device *pdev,
+ struct cygnus_audio *cygaud)
+{
+ int i;
+ char clk_name[PROP_LEN_MAX];
+
+ for (i = 0; i < ARRAY_SIZE(cygaud->audio_clk); i++) {
+ snprintf(clk_name, PROP_LEN_MAX, "ch%d_audio", i);
+
+ cygaud->audio_clk[i] = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(cygaud->audio_clk[i]))
+ return PTR_ERR(cygaud->audio_clk[i]);
+ }
+
+ return 0;
+}
+
+static int cygnus_ssp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child_node;
+ struct resource *res = pdev->resource;
+ struct cygnus_audio *cygaud;
+ int err = -EINVAL;
+ int node_count;
+ int active_port_count;
+
+ cygaud = devm_kzalloc(dev, sizeof(struct cygnus_audio), GFP_KERNEL);
+ if (!cygaud)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, cygaud);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aud");
+ cygaud->audio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cygaud->audio))
+ return PTR_ERR(cygaud->audio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "i2s_in");
+ cygaud->i2s_in = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cygaud->i2s_in))
+ return PTR_ERR(cygaud->i2s_in);
+
+ /* Tri-state all controlable pins until we know that we need them */
+ writel(CYGNUS_SSP_TRISTATE_MASK,
+ cygaud->audio + AUD_MISC_SEROUT_OE_REG_BASE);
+
+ node_count = of_get_child_count(pdev->dev.of_node);
+ if ((node_count < 1) || (node_count > CYGNUS_MAX_PORTS)) {
+ dev_err(dev, "child nodes is %d. Must be between 1 and %d\n",
+ node_count, CYGNUS_MAX_PORTS);
+ return -EINVAL;
+ }
+
+ active_port_count = 0;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child_node) {
+ err = parse_ssp_child_node(pdev, child_node, cygaud,
+ &cygnus_ssp_dai[active_port_count]);
+
+ /* negative is err, 0 is active and good, 1 is disabled */
+ if (err < 0)
+ return err;
+ else if (!err) {
+ dev_dbg(dev, "Activating DAI: %s\n",
+ cygnus_ssp_dai[active_port_count].name);
+ active_port_count++;
+ }
+ }
+
+ cygaud->dev = dev;
+ cygaud->active_ports = 0;
+
+ dev_dbg(dev, "Registering %d DAIs\n", active_port_count);
+ err = snd_soc_register_component(dev, &cygnus_ssp_component,
+ cygnus_ssp_dai, active_port_count);
+ if (err) {
+ dev_err(dev, "snd_soc_register_dai failed\n");
+ return err;
+ }
+
+ cygaud->irq_num = platform_get_irq(pdev, 0);
+ if (cygaud->irq_num <= 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ err = cygaud->irq_num;
+ goto err_irq;
+ }
+
+ err = audio_clk_init(pdev, cygaud);
+ if (err) {
+ dev_err(dev, "audio clock initialization failed\n");
+ goto err_irq;
+ }
+
+ err = cygnus_soc_platform_register(dev, cygaud);
+ if (err) {
+ dev_err(dev, "platform reg error %d\n", err);
+ goto err_irq;
+ }
+
+ return 0;
+
+err_irq:
+ snd_soc_unregister_component(dev);
+ return err;
+}
+
+static int cygnus_ssp_remove(struct platform_device *pdev)
+{
+ cygnus_soc_platform_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id cygnus_ssp_of_match[] = {
+ { .compatible = "brcm,cygnus-audio" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cygnus_ssp_of_match);
+
+static struct platform_driver cygnus_ssp_driver = {
+ .probe = cygnus_ssp_probe,
+ .remove = cygnus_ssp_remove,
+ .driver = {
+ .name = "cygnus-ssp",
+ .of_match_table = cygnus_ssp_of_match,
+ },
+};
+
+module_platform_driver(cygnus_ssp_driver);
+
+MODULE_ALIAS("platform:cygnus-ssp");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Cygnus ASoC SSP Interface");
diff --git a/sound/soc/bcm/cygnus-ssp.h b/sound/soc/bcm/cygnus-ssp.h
new file mode 100644
index 000000000000..33dd34305928
--- /dev/null
+++ b/sound/soc/bcm/cygnus-ssp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __CYGNUS_SSP_H__
+#define __CYGNUS_SSP_H__
+
+#define CYGNUS_TDM_DAI_MAX_SLOTS 16
+
+#define CYGNUS_MAX_PLAYBACK_PORTS 4
+#define CYGNUS_MAX_CAPTURE_PORTS 3
+#define CYGNUS_MAX_I2S_PORTS 3
+#define CYGNUS_MAX_PORTS CYGNUS_MAX_PLAYBACK_PORTS
+#define CYGNUS_AUIDO_MAX_NUM_CLKS 3
+
+#define CYGNUS_SSP_FRAMEBITS_DIV 1
+
+#define CYGNUS_SSPMODE_I2S 0
+#define CYGNUS_SSPMODE_TDM 1
+#define CYGNUS_SSPMODE_UNKNOWN -1
+
+#define CYGNUS_SSP_CLKSRC_PLL 0
+
+/* Max string length of our dt property names */
+#define PROP_LEN_MAX 40
+
+struct ringbuf_regs {
+ unsigned rdaddr;
+ unsigned wraddr;
+ unsigned baseaddr;
+ unsigned endaddr;
+ unsigned fmark; /* freemark for play, fullmark for caputure */
+ unsigned period_bytes;
+ unsigned buf_size;
+};
+
+#define RINGBUF_REG_PLAYBACK(num) ((struct ringbuf_regs) { \
+ .rdaddr = SRC_RBUF_ ##num## _RDADDR_OFFSET, \
+ .wraddr = SRC_RBUF_ ##num## _WRADDR_OFFSET, \
+ .baseaddr = SRC_RBUF_ ##num## _BASEADDR_OFFSET, \
+ .endaddr = SRC_RBUF_ ##num## _ENDADDR_OFFSET, \
+ .fmark = SRC_RBUF_ ##num## _FREE_MARK_OFFSET, \
+ .period_bytes = 0, \
+ .buf_size = 0, \
+})
+
+#define RINGBUF_REG_CAPTURE(num) ((struct ringbuf_regs) { \
+ .rdaddr = DST_RBUF_ ##num## _RDADDR_OFFSET, \
+ .wraddr = DST_RBUF_ ##num## _WRADDR_OFFSET, \
+ .baseaddr = DST_RBUF_ ##num## _BASEADDR_OFFSET, \
+ .endaddr = DST_RBUF_ ##num## _ENDADDR_OFFSET, \
+ .fmark = DST_RBUF_ ##num## _FULL_MARK_OFFSET, \
+ .period_bytes = 0, \
+ .buf_size = 0, \
+})
+
+enum cygnus_audio_port_type {
+ PORT_TDM,
+ PORT_SPDIF,
+};
+
+struct cygnus_ssp_regs {
+ u32 i2s_stream_cfg;
+ u32 i2s_cfg;
+ u32 i2s_cap_stream_cfg;
+ u32 i2s_cap_cfg;
+ u32 i2s_mclk_cfg;
+
+ u32 bf_destch_ctrl;
+ u32 bf_destch_cfg;
+ u32 bf_sourcech_ctrl;
+ u32 bf_sourcech_cfg;
+ u32 bf_sourcech_grp;
+};
+
+struct cygnus_track_clk {
+ bool cap_en;
+ bool play_en;
+ bool cap_clk_en;
+ bool play_clk_en;
+};
+
+struct cygnus_aio_port {
+ int portnum;
+ int mode;
+ bool is_slave;
+ int streams_on; /* will be 0 if both capture and play are off */
+ int fsync_width;
+ int port_type;
+
+ u32 mclk;
+ u32 lrclk;
+ u32 bit_per_frame;
+ u32 pll_clk_num;
+
+ struct cygnus_audio *cygaud;
+ struct cygnus_ssp_regs regs;
+
+ struct ringbuf_regs play_rb_regs;
+ struct ringbuf_regs capture_rb_regs;
+
+ struct snd_pcm_substream *play_stream;
+ struct snd_pcm_substream *capture_stream;
+
+ struct cygnus_track_clk clk_trace;
+};
+
+
+struct cygnus_audio {
+ struct cygnus_aio_port portinfo[CYGNUS_MAX_PORTS];
+
+ int irq_num;
+ void __iomem *audio;
+ struct device *dev;
+ void __iomem *i2s_in;
+
+ struct clk *audio_clk[CYGNUS_AUIDO_MAX_NUM_CLKS];
+ int active_ports;
+ unsigned long vco_rate;
+};
+
+extern int cygnus_ssp_get_mode(struct snd_soc_dai *cpu_dai);
+extern int cygnus_ssp_add_pll_tweak_controls(struct snd_soc_pcm_runtime *rtd);
+extern int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai,
+ int len);
+extern int cygnus_soc_platform_register(struct device *dev,
+ struct cygnus_audio *cygaud);
+extern int cygnus_soc_platform_unregister(struct device *dev);
+extern int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai,
+ int len);
+#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index f3fb98f0a995..1cd6ab344d67 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -32,6 +32,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ADAU1977_SPI if SPI_MASTER
select SND_SOC_ADAU1977_I2C if I2C
select SND_SOC_ADAU1701 if I2C
+ select SND_SOC_ADAU7002
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4535 if I2C
@@ -46,6 +47,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_BT_SCO
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS35L32 if I2C
+ select SND_SOC_CS35L33 if I2C
select SND_SOC_CS42L51_I2C if I2C
select SND_SOC_CS42L52 if I2C && INPUT
select SND_SOC_CS42L56 if I2C && INPUT
@@ -57,6 +59,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CS42XX8_I2C if I2C
select SND_SOC_CS4349 if I2C
select SND_SOC_CS47L24 if MFD_CS47L24
+ select SND_SOC_CS53L30 if I2C
select SND_SOC_CX20442 if TTY
select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
select SND_SOC_DA7213 if I2C
@@ -84,6 +87,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
select SND_SOC_MAX9850 if I2C
+ select SND_SOC_MAX9860 if I2C
select SND_SOC_MAX9768 if I2C
select SND_SOC_MAX9877 if I2C
select SND_SOC_MC13783 if MFD_MC13XXX
@@ -269,8 +273,12 @@ config SND_SOC_AD1980
config SND_SOC_AD73311
tristate
+config SND_SOC_ADAU_UTILS
+ tristate
+
config SND_SOC_ADAU1373
tristate
+ select SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1701
tristate "Analog Devices ADAU1701 CODEC"
@@ -280,6 +288,7 @@ config SND_SOC_ADAU1701
config SND_SOC_ADAU17X1
tristate
select SND_SOC_SIGMADSP_REGMAP
+ select SND_SOC_ADAU_UTILS
config SND_SOC_ADAU1761
tristate
@@ -322,6 +331,9 @@ config SND_SOC_ADAU1977_I2C
select SND_SOC_ADAU1977
select REGMAP_I2C
+config SND_SOC_ADAU7002
+ tristate "Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter"
+
config SND_SOC_ADAV80X
tristate
@@ -371,7 +383,7 @@ config SND_SOC_ALC5632
tristate
config SND_SOC_BT_SCO
- tristate
+ tristate "Dummy BT SCO codec driver"
config SND_SOC_CQ0093VC
tristate
@@ -380,6 +392,10 @@ config SND_SOC_CS35L32
tristate "Cirrus Logic CS35L32 CODEC"
depends on I2C
+config SND_SOC_CS35L33
+ tristate "Cirrus Logic CS35L33 CODEC"
+ depends on I2C
+
config SND_SOC_CS42L51
tristate
@@ -450,6 +466,11 @@ config SND_SOC_CS4349
config SND_SOC_CS47L24
tristate
+# Cirrus Logic Quad-Channel ADC
+config SND_SOC_CS53L30
+ tristate "Cirrus Logic CS53L30 CODEC"
+ depends on I2C
+
config SND_SOC_CX20442
tristate
depends on TTY
@@ -536,6 +557,10 @@ config SND_SOC_MAX98357A
config SND_SOC_MAX98371
tristate
+config SND_SOC_MAX98504
+ tristate "Maxim MAX98504 speaker amplifier"
+ depends on I2C
+
config SND_SOC_MAX9867
tristate
@@ -548,6 +573,11 @@ config SND_SOC_MAX98926
config SND_SOC_MAX9850
tristate
+config SND_SOC_MAX9860
+ tristate "Maxim MAX9860 Mono Audio Voice Codec"
+ depends on I2C
+ select REGMAP_I2C
+
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
depends on I2C
@@ -644,6 +674,9 @@ config SND_SOC_RT298
config SND_SOC_RT5514
tristate
+config SND_SOC_RT5514_SPI
+ tristate
+
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
depends on I2C
@@ -969,7 +1002,8 @@ config SND_SOC_WM8983
tristate
config SND_SOC_WM8985
- tristate
+ tristate "Wolfson Microelectronics WM8985 and WM8758 codec driver"
+ depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8988
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 0f548fd34ca3..58036af2c7d9 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -7,6 +7,7 @@ snd-soc-ad193x-spi-objs := ad193x-spi.o
snd-soc-ad193x-i2c-objs := ad193x-i2c.o
snd-soc-ad1980-objs := ad1980.o
snd-soc-ad73311-objs := ad73311.o
+snd-soc-adau-utils-objs := adau-utils.o
snd-soc-adau1373-objs := adau1373.o
snd-soc-adau1701-objs := adau1701.o
snd-soc-adau17x1-objs := adau17x1.o
@@ -19,6 +20,7 @@ snd-soc-adau1781-spi-objs := adau1781-spi.o
snd-soc-adau1977-objs := adau1977.o
snd-soc-adau1977-spi-objs := adau1977-spi.o
snd-soc-adau1977-i2c-objs := adau1977-i2c.o
+snd-soc-adau7002-objs := adau7002.o
snd-soc-adav80x-objs := adav80x.o
snd-soc-adav801-objs := adav801.o
snd-soc-adav803-objs := adav803.o
@@ -35,6 +37,7 @@ snd-soc-arizona-objs := arizona.o
snd-soc-bt-sco-objs := bt-sco.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs35l32-objs := cs35l32.o
+snd-soc-cs35l33-objs := cs35l33.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
snd-soc-cs42l52-objs := cs42l52.o
@@ -49,6 +52,7 @@ snd-soc-cs42xx8-objs := cs42xx8.o
snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
snd-soc-cs4349-objs := cs4349.o
snd-soc-cs47l24-objs := cs47l24.o
+snd-soc-cs53l30-objs := cs53l30.o
snd-soc-cx20442-objs := cx20442.o
snd-soc-da7210-objs := da7210.o
snd-soc-da7213-objs := da7213.o
@@ -79,6 +83,7 @@ snd-soc-max9867-objs := max9867.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max9850-objs := max9850.o
+snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
snd-soc-nau8825-objs := nau8825.o
@@ -100,6 +105,7 @@ snd-soc-rl6347a-objs := rl6347a.o
snd-soc-rt286-objs := rt286.o
snd-soc-rt298-objs := rt298.o
snd-soc-rt5514-objs := rt5514.o
+snd-soc-rt5514-spi-objs := rt5514-spi.o
snd-soc-rt5616-objs := rt5616.o
snd-soc-rt5631-objs := rt5631.o
snd-soc-rt5640-objs := rt5640.o
@@ -208,6 +214,7 @@ snd-soc-wm-hubs-objs := wm_hubs.o
# Amp
snd-soc-max9877-objs := max9877.o
+snd-soc-max98504-objs := max98504.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
@@ -220,6 +227,7 @@ obj-$(CONFIG_SND_SOC_AD193X_SPI) += snd-soc-ad193x-spi.o
obj-$(CONFIG_SND_SOC_AD193X_I2C) += snd-soc-ad193x-i2c.o
obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o
+obj-$(CONFIG_SND_SOC_ADAU_UTILS) += snd-soc-adau-utils.o
obj-$(CONFIG_SND_SOC_ADAU1373) += snd-soc-adau1373.o
obj-$(CONFIG_SND_SOC_ADAU1701) += snd-soc-adau1701.o
obj-$(CONFIG_SND_SOC_ADAU17X1) += snd-soc-adau17x1.o
@@ -232,6 +240,7 @@ obj-$(CONFIG_SND_SOC_ADAU1781_SPI) += snd-soc-adau1781-spi.o
obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o
obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o
obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o
+obj-$(CONFIG_SND_SOC_ADAU7002) += snd-soc-adau7002.o
obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o
obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o
obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o
@@ -250,6 +259,7 @@ obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o
obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
+obj-$(CONFIG_SND_SOC_CS35L33) += snd-soc-cs35l33.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L51_I2C) += snd-soc-cs42l51-i2c.o
obj-$(CONFIG_SND_SOC_CS42L52) += snd-soc-cs42l52.o
@@ -264,6 +274,7 @@ obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o
obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
obj-$(CONFIG_SND_SOC_CS4349) += snd-soc-cs4349.o
obj-$(CONFIG_SND_SOC_CS47L24) += snd-soc-cs47l24.o
+obj-$(CONFIG_SND_SOC_CS53L30) += snd-soc-cs53l30.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o
@@ -293,6 +304,7 @@ obj-$(CONFIG_SND_SOC_MAX9867) += snd-soc-max9867.o
obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
+obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
@@ -314,6 +326,7 @@ obj-$(CONFIG_SND_SOC_RL6347A) += snd-soc-rl6347a.o
obj-$(CONFIG_SND_SOC_RT286) += snd-soc-rt286.o
obj-$(CONFIG_SND_SOC_RT298) += snd-soc-rt298.o
obj-$(CONFIG_SND_SOC_RT5514) += snd-soc-rt5514.o
+obj-$(CONFIG_SND_SOC_RT5514_SPI) += snd-soc-rt5514-spi.o
obj-$(CONFIG_SND_SOC_RT5616) += snd-soc-rt5616.o
obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
@@ -419,4 +432,5 @@ obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
# Amp
obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
+obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o
obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 8b1d0c1a7839..2fc89155f14a 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2464,45 +2464,20 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
struct device *dev = codec->dev;
struct device_node *np = dev->of_node;
struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(dev);
- struct ab8500_platform_data *pdata;
+ struct ab8500_codec_platform_data codec_pdata;
struct filter_control *fc;
int status;
dev_dbg(dev, "%s: Enter.\n", __func__);
- /* Setup AB8500 according to board-settings */
- pdata = dev_get_platdata(dev->parent);
+ ab8500_codec_of_probe(dev, np, &codec_pdata);
- if (np) {
- if (!pdata)
- pdata = devm_kzalloc(dev,
- sizeof(struct ab8500_platform_data),
- GFP_KERNEL);
-
- if (pdata && !pdata->codec)
- pdata->codec
- = devm_kzalloc(dev,
- sizeof(struct ab8500_codec_platform_data),
- GFP_KERNEL);
-
- if (!(pdata && pdata->codec))
- return -ENOMEM;
-
- ab8500_codec_of_probe(dev, np, pdata->codec);
-
- } else {
- if (!(pdata && pdata->codec)) {
- dev_err(dev, "No codec platform data or DT found\n");
- return -EINVAL;
- }
- }
-
- status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
+ status = ab8500_audio_setup_mics(codec, &codec_pdata.amics);
if (status < 0) {
pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
return status;
}
- status = ab8500_audio_set_ear_cmv(codec, pdata->codec->ear_cmv);
+ status = ab8500_audio_set_ear_cmv(codec, codec_pdata.ear_cmv);
if (status < 0) {
pr_err("%s: Failed to set earpiece CM-voltage (%d)!\n",
__func__, status);
diff --git a/sound/soc/codecs/adau-utils.c b/sound/soc/codecs/adau-utils.c
new file mode 100644
index 000000000000..19d6a6f41b12
--- /dev/null
+++ b/sound/soc/codecs/adau-utils.c
@@ -0,0 +1,61 @@
+/*
+ * Shared helper functions for devices from the ADAU family
+ *
+ * Copyright 2011-2016 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/gcd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "adau-utils.h"
+
+int adau_calc_pll_cfg(unsigned int freq_in, unsigned int freq_out,
+ uint8_t regs[5])
+{
+ unsigned int r, n, m, i, j;
+ unsigned int div;
+
+ if (!freq_out) {
+ r = 0;
+ n = 0;
+ m = 0;
+ div = 0;
+ } else {
+ if (freq_out % freq_in != 0) {
+ div = DIV_ROUND_UP(freq_in, 13500000);
+ freq_in /= div;
+ r = freq_out / freq_in;
+ i = freq_out % freq_in;
+ j = gcd(i, freq_in);
+ n = i / j;
+ m = freq_in / j;
+ div--;
+ } else {
+ r = freq_out / freq_in;
+ n = 0;
+ m = 0;
+ div = 0;
+ }
+ if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2)
+ return -EINVAL;
+ }
+
+ regs[0] = m >> 8;
+ regs[1] = m & 0xff;
+ regs[2] = n >> 8;
+ regs[3] = n & 0xff;
+ regs[4] = (r << 3) | (div << 1);
+ if (m != 0)
+ regs[4] |= 1; /* Fractional mode */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adau_calc_pll_cfg);
+
+MODULE_DESCRIPTION("ASoC ADAU audio CODECs shared helper functions");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/adau-utils.h b/sound/soc/codecs/adau-utils.h
new file mode 100644
index 000000000000..939b5f37762f
--- /dev/null
+++ b/sound/soc/codecs/adau-utils.h
@@ -0,0 +1,7 @@
+#ifndef SOUND_SOC_CODECS_ADAU_PLL_H
+#define SOUND_SOC_CODECS_ADAU_PLL_H
+
+int adau_calc_pll_cfg(unsigned int freq_in, unsigned int freq_out,
+ uint8_t regs[5]);
+
+#endif
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index fe1353a797b9..1556b360fa15 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -23,6 +23,7 @@
#include <sound/adau1373.h>
#include "adau1373.h"
+#include "adau-utils.h"
struct adau1373_dai {
unsigned int clk_src;
@@ -1254,7 +1255,8 @@ static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
{
struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
unsigned int dpll_div = 0;
- unsigned int x, r, n, m, i, j, mode;
+ uint8_t pll_regs[5];
+ int ret;
switch (pll_id) {
case ADAU1373_PLL1:
@@ -1295,27 +1297,8 @@ static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
dpll_div++;
}
- if (freq_out % freq_in != 0) {
- /* fout = fin * (r + (n/m)) / x */
- x = DIV_ROUND_UP(freq_in, 13500000);
- freq_in /= x;
- r = freq_out / freq_in;
- i = freq_out % freq_in;
- j = gcd(i, freq_in);
- n = i / j;
- m = freq_in / j;
- x--;
- mode = 1;
- } else {
- /* fout = fin / r */
- r = freq_out / freq_in;
- n = 0;
- m = 0;
- x = 0;
- mode = 0;
- }
-
- if (r < 2 || r > 8 || x > 3 || m > 0xffff || n > 0xffff)
+ ret = adau_calc_pll_cfg(freq_in, freq_out, pll_regs);
+ if (ret)
return -EINVAL;
if (dpll_div) {
@@ -1330,12 +1313,11 @@ static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
regmap_write(adau1373->regmap, ADAU1373_DPLL_CTRL(pll_id),
(source << 4) | dpll_div);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
- regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id),
- (r << 3) | (x << 1) | mode);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), pll_regs[0]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), pll_regs[1]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), pll_regs[2]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), pll_regs[3]);
+ regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id), pll_regs[4]);
/* Set sysclk to pll_rate / 4 */
regmap_update_bits(adau1373->regmap, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index 8de010f758cd..9e7f257f17f8 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -31,7 +31,7 @@ static int adau1761_i2c_probe(struct i2c_client *client,
static int adau1761_i2c_remove(struct i2c_client *client)
{
- snd_soc_unregister_codec(&client->dev);
+ adau17x1_remove(&client->dev);
return 0;
}
diff --git a/sound/soc/codecs/adau1761-spi.c b/sound/soc/codecs/adau1761-spi.c
index d9171245bd9f..a0b214be759a 100644
--- a/sound/soc/codecs/adau1761-spi.c
+++ b/sound/soc/codecs/adau1761-spi.c
@@ -48,7 +48,7 @@ static int adau1761_spi_probe(struct spi_device *spi)
static int adau1761_spi_remove(struct spi_device *spi)
{
- snd_soc_unregister_codec(&spi->dev);
+ adau17x1_remove(&spi->dev);
return 0;
}
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 06cbca84cf02..7b9d1802d159 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -31,7 +31,7 @@ static int adau1781_i2c_probe(struct i2c_client *client,
static int adau1781_i2c_remove(struct i2c_client *client)
{
- snd_soc_unregister_codec(&client->dev);
+ adau17x1_remove(&client->dev);
return 0;
}
diff --git a/sound/soc/codecs/adau1781-spi.c b/sound/soc/codecs/adau1781-spi.c
index 3d965a01b99c..9b233544d2e8 100644
--- a/sound/soc/codecs/adau1781-spi.c
+++ b/sound/soc/codecs/adau1781-spi.c
@@ -48,7 +48,7 @@ static int adau1781_spi_probe(struct spi_device *spi)
static int adau1781_spi_remove(struct spi_device *spi)
{
- snd_soc_unregister_codec(&spi->dev);
+ adau17x1_remove(&spi->dev);
return 0;
}
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index fcf05b254ecd..439aa3ff1f99 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -23,6 +24,7 @@
#include "sigmadsp.h"
#include "adau17x1.h"
+#include "adau-utils.h"
static const char * const adau17x1_capture_mixer_boost_text[] = {
"Normal operation", "Boost Level 1", "Boost Level 2", "Boost Level 3",
@@ -302,6 +304,116 @@ bool adau17x1_has_dsp(struct adau *adau)
}
EXPORT_SYMBOL_GPL(adau17x1_has_dsp);
+static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
+ int source, unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct adau *adau = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ if (freq_in < 8000000 || freq_in > 27000000)
+ return -EINVAL;
+
+ ret = adau_calc_pll_cfg(freq_in, freq_out, adau->pll_regs);
+ if (ret < 0)
+ return ret;
+
+ /* The PLL register is 6 bytes long and can only be written at once. */
+ ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL,
+ adau->pll_regs, ARRAY_SIZE(adau->pll_regs));
+ if (ret)
+ return ret;
+
+ adau->pll_freq = freq_out;
+
+ return 0;
+}
+
+static int adau17x1_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(dai->codec);
+ struct adau *adau = snd_soc_codec_get_drvdata(dai->codec);
+ bool is_pll;
+ bool was_pll;
+
+ switch (clk_id) {
+ case ADAU17X1_CLK_SRC_MCLK:
+ is_pll = false;
+ break;
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ if (!adau->mclk)
+ return -EINVAL;
+ /* Fall-through */
+ case ADAU17X1_CLK_SRC_PLL:
+ is_pll = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (adau->clk_src) {
+ case ADAU17X1_CLK_SRC_MCLK:
+ was_pll = false;
+ break;
+ case ADAU17X1_CLK_SRC_PLL:
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ was_pll = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adau->sysclk = freq;
+
+ if (is_pll != was_pll) {
+ if (is_pll) {
+ snd_soc_dapm_add_routes(dapm,
+ &adau17x1_dapm_pll_route, 1);
+ } else {
+ snd_soc_dapm_del_routes(dapm,
+ &adau17x1_dapm_pll_route, 1);
+ }
+ }
+
+ adau->clk_src = clk_id;
+
+ return 0;
+}
+
+static int adau17x1_auto_pll(struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct adau *adau = snd_soc_dai_get_drvdata(dai);
+ unsigned int pll_rate;
+
+ switch (params_rate(params)) {
+ case 48000:
+ case 8000:
+ case 12000:
+ case 16000:
+ case 24000:
+ case 32000:
+ case 96000:
+ pll_rate = 48000 * 1024;
+ break;
+ case 44100:
+ case 7350:
+ case 11025:
+ case 14700:
+ case 22050:
+ case 29400:
+ case 88200:
+ pll_rate = 44100 * 1024;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return adau17x1_set_dai_pll(dai, ADAU17X1_PLL, ADAU17X1_PLL_SRC_MCLK,
+ clk_get_rate(adau->mclk), pll_rate);
+}
+
static int adau17x1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -311,10 +423,19 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
unsigned int freq;
int ret;
- if (adau->clk_src == ADAU17X1_CLK_SRC_PLL)
+ switch (adau->clk_src) {
+ case ADAU17X1_CLK_SRC_PLL_AUTO:
+ ret = adau17x1_auto_pll(dai, params);
+ if (ret)
+ return ret;
+ /* Fall-through */
+ case ADAU17X1_CLK_SRC_PLL:
freq = adau->pll_freq;
- else
+ break;
+ default:
freq = adau->sysclk;
+ break;
+ }
if (freq % params_rate(params) != 0)
return -EINVAL;
@@ -386,93 +507,6 @@ static int adau17x1_hw_params(struct snd_pcm_substream *substream,
ADAU17X1_SERIAL_PORT1_DELAY_MASK, val);
}
-static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
- int source, unsigned int freq_in, unsigned int freq_out)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct adau *adau = snd_soc_codec_get_drvdata(codec);
- unsigned int r, n, m, i, j;
- unsigned int div;
- int ret;
-
- if (freq_in < 8000000 || freq_in > 27000000)
- return -EINVAL;
-
- if (!freq_out) {
- r = 0;
- n = 0;
- m = 0;
- div = 0;
- } else {
- if (freq_out % freq_in != 0) {
- div = DIV_ROUND_UP(freq_in, 13500000);
- freq_in /= div;
- r = freq_out / freq_in;
- i = freq_out % freq_in;
- j = gcd(i, freq_in);
- n = i / j;
- m = freq_in / j;
- div--;
- } else {
- r = freq_out / freq_in;
- n = 0;
- m = 0;
- div = 0;
- }
- if (n > 0xffff || m > 0xffff || div > 3 || r > 8 || r < 2)
- return -EINVAL;
- }
-
- adau->pll_regs[0] = m >> 8;
- adau->pll_regs[1] = m & 0xff;
- adau->pll_regs[2] = n >> 8;
- adau->pll_regs[3] = n & 0xff;
- adau->pll_regs[4] = (r << 3) | (div << 1);
- if (m != 0)
- adau->pll_regs[4] |= 1; /* Fractional mode */
-
- /* The PLL register is 6 bytes long and can only be written at once. */
- ret = regmap_raw_write(adau->regmap, ADAU17X1_PLL_CONTROL,
- adau->pll_regs, ARRAY_SIZE(adau->pll_regs));
- if (ret)
- return ret;
-
- adau->pll_freq = freq_out;
-
- return 0;
-}
-
-static int adau17x1_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
-{
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(dai->codec);
- struct adau *adau = snd_soc_codec_get_drvdata(dai->codec);
-
- switch (clk_id) {
- case ADAU17X1_CLK_SRC_MCLK:
- case ADAU17X1_CLK_SRC_PLL:
- break;
- default:
- return -EINVAL;
- }
-
- adau->sysclk = freq;
-
- if (adau->clk_src != clk_id) {
- if (clk_id == ADAU17X1_CLK_SRC_PLL) {
- snd_soc_dapm_add_routes(dapm,
- &adau17x1_dapm_pll_route, 1);
- } else {
- snd_soc_dapm_del_routes(dapm,
- &adau17x1_dapm_pll_route, 1);
- }
- }
-
- adau->clk_src = clk_id;
-
- return 0;
-}
-
static int adau17x1_set_dai_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
@@ -857,6 +891,10 @@ int adau17x1_add_routes(struct snd_soc_codec *codec)
ret = snd_soc_dapm_add_routes(dapm, adau17x1_no_dsp_dapm_routes,
ARRAY_SIZE(adau17x1_no_dsp_dapm_routes));
}
+
+ if (adau->clk_src != ADAU17X1_CLK_SRC_MCLK)
+ snd_soc_dapm_add_routes(dapm, &adau17x1_dapm_pll_route, 1);
+
return ret;
}
EXPORT_SYMBOL_GPL(adau17x1_add_routes);
@@ -879,6 +917,7 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
const char *firmware_name)
{
struct adau *adau;
+ int ret;
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -887,6 +926,30 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
if (!adau)
return -ENOMEM;
+ adau->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(adau->mclk)) {
+ if (PTR_ERR(adau->mclk) != -ENOENT)
+ return PTR_ERR(adau->mclk);
+ /* Clock is optional (for the driver) */
+ adau->mclk = NULL;
+ } else if (adau->mclk) {
+ adau->clk_src = ADAU17X1_CLK_SRC_PLL_AUTO;
+
+ /*
+ * Any valid PLL output rate will work at this point, use one
+ * that is likely to be chosen later as well. The register will
+ * be written when the PLL is powered up for the first time.
+ */
+ ret = adau_calc_pll_cfg(clk_get_rate(adau->mclk), 48000 * 1024,
+ adau->pll_regs);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(adau->mclk);
+ if (ret)
+ return ret;
+ }
+
adau->regmap = regmap;
adau->switch_mode = switch_mode;
adau->type = type;
@@ -910,6 +973,16 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
}
EXPORT_SYMBOL_GPL(adau17x1_probe);
+void adau17x1_remove(struct device *dev)
+{
+ struct adau *adau = dev_get_drvdata(dev);
+
+ snd_soc_unregister_codec(dev);
+ if (adau->mclk)
+ clk_disable_unprepare(adau->mclk);
+}
+EXPORT_SYMBOL_GPL(adau17x1_remove);
+
MODULE_DESCRIPTION("ASoC ADAU1X61/ADAU1X81 common code");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index 5ae87a084d97..bf04b7efee40 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -22,13 +22,18 @@ enum adau17x1_pll_src {
};
enum adau17x1_clk_src {
+ /* Automatically configure PLL based on the sample rate */
+ ADAU17X1_CLK_SRC_PLL_AUTO,
ADAU17X1_CLK_SRC_MCLK,
ADAU17X1_CLK_SRC_PLL,
};
+struct clk;
+
struct adau {
unsigned int sysclk;
unsigned int pll_freq;
+ struct clk *mclk;
enum adau17x1_clk_src clk_src;
enum adau17x1_type type;
@@ -52,6 +57,7 @@ int adau17x1_add_routes(struct snd_soc_codec *codec);
int adau17x1_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev),
const char *firmware_name);
+void adau17x1_remove(struct device *dev);
int adau17x1_set_micbias_voltage(struct snd_soc_codec *codec,
enum adau17x1_micbias_voltage micbias);
bool adau17x1_readable_register(struct device *dev, unsigned int reg);
diff --git a/sound/soc/codecs/adau7002.c b/sound/soc/codecs/adau7002.c
new file mode 100644
index 000000000000..9df72c6adcca
--- /dev/null
+++ b/sound/soc/codecs/adau7002.c
@@ -0,0 +1,80 @@
+/*
+ * ADAU7002 Stereo PDM-to-I2S/TDM converter driver
+ *
+ * Copyright 2014-2016 Analog Devices
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <sound/soc.h>
+
+static const struct snd_soc_dapm_widget adau7002_widgets[] = {
+ SND_SOC_DAPM_INPUT("PDM_DAT"),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("IOVDD", 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7002_routes[] = {
+ { "Capture", NULL, "PDM_DAT" },
+ { "Capture", NULL, "IOVDD" },
+};
+
+static struct snd_soc_dai_driver adau7002_dai = {
+ .name = "adau7002-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE,
+ .sig_bits = 20,
+ },
+};
+
+static const struct snd_soc_codec_driver adau7002_codec_driver = {
+ .dapm_widgets = adau7002_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau7002_widgets),
+ .dapm_routes = adau7002_routes,
+ .num_dapm_routes = ARRAY_SIZE(adau7002_routes),
+};
+
+static int adau7002_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev, &adau7002_codec_driver,
+ &adau7002_dai, 1);
+}
+
+static int adau7002_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id adau7002_dt_ids[] = {
+ { .compatible = "adi,adau7002", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adau7002_dt_ids);
+#endif
+
+static struct platform_driver adau7002_driver = {
+ .driver = {
+ .name = "adau7002",
+ .of_match_table = of_match_ptr(adau7002_dt_ids),
+ },
+ .probe = adau7002_probe,
+ .remove = adau7002_remove,
+};
+module_platform_driver(adau7002_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADAU7002 Stereo PDM-to-I2S/TDM Converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 5013d2ba0c10..97798d250f08 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -437,15 +437,25 @@ static struct snd_soc_dai_driver ak4613_dai = {
.symmetric_rates = 1,
};
-static int ak4613_resume(struct snd_soc_codec *codec)
+static int ak4613_suspend(struct snd_soc_codec *codec)
{
struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+ regcache_cache_only(regmap, true);
regcache_mark_dirty(regmap);
+ return 0;
+}
+
+static int ak4613_resume(struct snd_soc_codec *codec)
+{
+ struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+
+ regcache_cache_only(regmap, false);
return regcache_sync(regmap);
}
static struct snd_soc_codec_driver soc_codec_dev_ak4613 = {
+ .suspend = ak4613_suspend,
.resume = ak4613_resume,
.set_bias_level = ak4613_set_bias_level,
.controls = ak4613_snd_controls,
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 4d8b9e49e8d6..cc941d66ec3d 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -523,15 +523,23 @@ static struct snd_soc_dai_driver ak4642_dai = {
.symmetric_rates = 1,
};
-static int ak4642_resume(struct snd_soc_codec *codec)
+static int ak4642_suspend(struct snd_soc_codec *codec)
{
struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+ regcache_cache_only(regmap, true);
regcache_mark_dirty(regmap);
- regcache_sync(regmap);
return 0;
}
+static int ak4642_resume(struct snd_soc_codec *codec)
+{
+ struct regmap *regmap = dev_get_regmap(codec->dev, NULL);
+
+ regcache_cache_only(regmap, false);
+ regcache_sync(regmap);
+ return 0;
+}
static int ak4642_probe(struct snd_soc_codec *codec)
{
struct ak4642_priv *priv = snd_soc_codec_get_drvdata(codec);
@@ -544,6 +552,7 @@ static int ak4642_probe(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
.probe = ak4642_probe,
+ .suspend = ak4642_suspend,
.resume = ak4642_resume,
.set_bias_level = ak4642_set_bias_level,
.controls = ak4642_snd_controls,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 664a8c044ffb..ecfdbfcae366 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -85,30 +85,9 @@ static int arizona_spk_ev(struct snd_soc_dapm_widget *w,
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- bool manual_ena = false;
int val;
- switch (arizona->type) {
- case WM5102:
- switch (arizona->rev) {
- case 0:
- break;
- default:
- manual_ena = true;
- break;
- }
- default:
- break;
- }
-
switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- if (!priv->spk_ena && manual_ena) {
- regmap_write_async(arizona->regmap, 0x4f5, 0x25a);
- priv->spk_ena_pending = true;
- }
- break;
case SND_SOC_DAPM_POST_PMU:
val = snd_soc_read(codec, ARIZONA_INTERRUPT_RAW_STATUS_3);
if (val & ARIZONA_SPK_OVERHEAT_STS) {
@@ -120,33 +99,12 @@ static int arizona_spk_ev(struct snd_soc_dapm_widget *w,
regmap_update_bits_async(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
1 << w->shift, 1 << w->shift);
-
- if (priv->spk_ena_pending) {
- msleep(75);
- regmap_write_async(arizona->regmap, 0x4f5, 0xda);
- priv->spk_ena_pending = false;
- priv->spk_ena++;
- }
break;
case SND_SOC_DAPM_PRE_PMD:
- if (manual_ena) {
- priv->spk_ena--;
- if (!priv->spk_ena)
- regmap_write_async(arizona->regmap,
- 0x4f5, 0x25a);
- }
-
regmap_update_bits_async(arizona->regmap,
ARIZONA_OUTPUT_ENABLES_1,
1 << w->shift, 0);
break;
- case SND_SOC_DAPM_POST_PMD:
- if (manual_ena) {
- if (!priv->spk_ena)
- regmap_write_async(arizona->regmap,
- 0x4f5, 0x0da);
- }
- break;
default:
break;
}
@@ -324,6 +282,17 @@ int arizona_init_gpio(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(arizona_init_gpio);
+int arizona_init_notifiers(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&arizona->notifier);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_notifiers);
+
const char * const arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
"None",
"Tone Generator 1",
@@ -619,7 +588,7 @@ const struct soc_enum arizona_asrc_rate1 =
arizona_rate_text, arizona_rate_val);
EXPORT_SYMBOL_GPL(arizona_asrc_rate1);
-static const char *arizona_vol_ramp_text[] = {
+static const char * const arizona_vol_ramp_text[] = {
"0ms/6dB", "0.5ms/6dB", "1ms/6dB", "2ms/6dB", "4ms/6dB", "8ms/6dB",
"15ms/6dB", "30ms/6dB",
};
@@ -648,7 +617,7 @@ SOC_ENUM_SINGLE_DECL(arizona_out_vi_ramp,
arizona_vol_ramp_text);
EXPORT_SYMBOL_GPL(arizona_out_vi_ramp);
-static const char *arizona_lhpf_mode_text[] = {
+static const char * const arizona_lhpf_mode_text[] = {
"Low-pass", "High-pass"
};
@@ -676,7 +645,7 @@ SOC_ENUM_SINGLE_DECL(arizona_lhpf4_mode,
arizona_lhpf_mode_text);
EXPORT_SYMBOL_GPL(arizona_lhpf4_mode);
-static const char *arizona_ng_hold_text[] = {
+static const char * const arizona_ng_hold_text[] = {
"30ms", "120ms", "250ms", "500ms",
};
@@ -810,6 +779,14 @@ const struct soc_enum arizona_output_anc_src[] = {
};
EXPORT_SYMBOL_GPL(arizona_output_anc_src);
+const struct snd_kcontrol_new arizona_voice_trigger_switch[] = {
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 1, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 2, 1, 0),
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 3, 1, 0),
+};
+EXPORT_SYMBOL_GPL(arizona_voice_trigger_switch);
+
static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena)
{
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
@@ -2573,6 +2550,30 @@ int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+int arizona_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb,
+ int (*notify)(struct notifier_block *nb,
+ unsigned long action, void *data))
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ nb->notifier_call = notify;
+
+ return blocking_notifier_chain_register(&arizona->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(arizona_register_notifier);
+
+int arizona_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ return blocking_notifier_chain_unregister(&arizona->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(arizona_unregister_notifier);
+
MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index ce0531b8c632..69da1ef3a17c 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -63,6 +63,9 @@
#define ARIZONA_DVFS_SR1_RQ 0x001
#define ARIZONA_DVFS_ADSP1_RQ 0x100
+/* Notifier events */
+#define ARIZONA_NOTIFY_VOICE_TRIGGER 0x1
+
struct arizona;
struct wm_adsp;
@@ -87,14 +90,15 @@ struct arizona_priv {
unsigned int out_down_pending;
unsigned int out_down_delay;
- unsigned int spk_ena:2;
- unsigned int spk_ena_pending:1;
-
unsigned int dvfs_reqs;
struct mutex dvfs_lock;
bool dvfs_cached;
};
+struct arizona_voice_trigger_info {
+ int core;
+};
+
#define ARIZONA_NUM_MIXER_INPUTS 104
extern const unsigned int arizona_mixer_tlv[];
@@ -248,6 +252,8 @@ extern const struct soc_enum arizona_anc_input_src[];
extern const struct soc_enum arizona_anc_ng_enum;
extern const struct soc_enum arizona_output_anc_src[];
+extern const struct snd_kcontrol_new arizona_voice_trigger_switch[];
+
extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event);
@@ -306,6 +312,7 @@ extern int arizona_set_fll(struct arizona_fll *fll, int source,
extern int arizona_init_spk(struct snd_soc_codec *codec);
extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_mono(struct snd_soc_codec *codec);
+extern int arizona_init_notifiers(struct snd_soc_codec *codec);
extern int arizona_free_spk(struct snd_soc_codec *codec);
@@ -317,4 +324,13 @@ int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
extern bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
extern const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
+
+extern int arizona_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb,
+ int (*notify)(struct notifier_block *nb,
+ unsigned long action,
+ void *data));
+extern int arizona_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nb);
+
#endif
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index b084ad113e96..2a8d0ee141d4 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -25,22 +25,41 @@ static const struct snd_soc_dapm_route bt_sco_routes[] = {
{ "TX", NULL, "Playback" },
};
-static struct snd_soc_dai_driver bt_sco_dai = {
- .name = "bt-sco-pcm",
- .playback = {
- .stream_name = "Playback",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- },
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SNDRV_PCM_RATE_8000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+static struct snd_soc_dai_driver bt_sco_dai[] = {
+ {
+ .name = "bt-sco-pcm",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
},
+ {
+ .name = "bt-sco-pcm-wb",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ }
};
static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
@@ -53,7 +72,7 @@ static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
static int bt_sco_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_bt_sco,
- &bt_sco_dai, 1);
+ bt_sco_dai, ARRAY_SIZE(bt_sco_dai));
}
static int bt_sco_remove(struct platform_device *pdev)
@@ -77,6 +96,7 @@ MODULE_DEVICE_TABLE(platform, bt_sco_driver_ids);
#if defined(CONFIG_OF)
static const struct of_device_id bt_sco_codec_of_match[] = {
{ .compatible = "delta,dfbmcs320", },
+ { .compatible = "linux,bt-sco", },
{},
};
MODULE_DEVICE_TABLE(of, bt_sco_codec_of_match);
diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c
new file mode 100644
index 000000000000..6f9c1addcd7f
--- /dev/null
+++ b/sound/soc/codecs/cs35l33.c
@@ -0,0 +1,1303 @@
+/*
+ * cs35l33.c -- CS35L33 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <paul.handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <sound/cs35l33.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#include "cs35l33.h"
+
+#define CS35L33_BOOT_DELAY 50
+
+struct cs35l33_private {
+ struct snd_soc_codec *codec;
+ struct cs35l33_pdata pdata;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ bool amp_cal;
+ int mclk_int;
+ struct regulator_bulk_data core_supplies[2];
+ int num_core_supplies;
+ bool is_tdm_mode;
+ bool enable_soft_ramp;
+};
+
+static const struct reg_default cs35l33_reg[] = {
+ {CS35L33_PWRCTL1, 0x85},
+ {CS35L33_PWRCTL2, 0xFE},
+ {CS35L33_CLK_CTL, 0x0C},
+ {CS35L33_BST_PEAK_CTL, 0x90},
+ {CS35L33_PROTECT_CTL, 0x55},
+ {CS35L33_BST_CTL1, 0x00},
+ {CS35L33_BST_CTL2, 0x01},
+ {CS35L33_ADSP_CTL, 0x00},
+ {CS35L33_ADC_CTL, 0xC8},
+ {CS35L33_DAC_CTL, 0x14},
+ {CS35L33_DIG_VOL_CTL, 0x00},
+ {CS35L33_CLASSD_CTL, 0x04},
+ {CS35L33_AMP_CTL, 0x90},
+ {CS35L33_INT_MASK_1, 0xFF},
+ {CS35L33_INT_MASK_2, 0xFF},
+ {CS35L33_DIAG_LOCK, 0x00},
+ {CS35L33_DIAG_CTRL_1, 0x40},
+ {CS35L33_DIAG_CTRL_2, 0x00},
+ {CS35L33_HG_MEMLDO_CTL, 0x62},
+ {CS35L33_HG_REL_RATE, 0x03},
+ {CS35L33_LDO_DEL, 0x12},
+ {CS35L33_HG_HEAD, 0x0A},
+ {CS35L33_HG_EN, 0x05},
+ {CS35L33_TX_VMON, 0x00},
+ {CS35L33_TX_IMON, 0x03},
+ {CS35L33_TX_VPMON, 0x02},
+ {CS35L33_TX_VBSTMON, 0x05},
+ {CS35L33_TX_FLAG, 0x06},
+ {CS35L33_TX_EN1, 0x00},
+ {CS35L33_TX_EN2, 0x00},
+ {CS35L33_TX_EN3, 0x00},
+ {CS35L33_TX_EN4, 0x00},
+ {CS35L33_RX_AUD, 0x40},
+ {CS35L33_RX_SPLY, 0x03},
+ {CS35L33_RX_ALIVE, 0x04},
+ {CS35L33_BST_CTL4, 0x63},
+};
+
+static const struct reg_sequence cs35l33_patch[] = {
+ { 0x00, 0x99, 0 },
+ { 0x59, 0x02, 0 },
+ { 0x52, 0x30, 0 },
+ { 0x39, 0x45, 0 },
+ { 0x57, 0x30, 0 },
+ { 0x2C, 0x68, 0 },
+ { 0x00, 0x00, 0 },
+};
+
+static bool cs35l33_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_HG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs35l33_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ /* these are read only registers */
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_HG_STATUS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cs35l33_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS35L33_DEVID_AB:
+ case CS35L33_DEVID_CD:
+ case CS35L33_DEVID_E:
+ case CS35L33_REV_ID:
+ case CS35L33_PWRCTL1:
+ case CS35L33_PWRCTL2:
+ case CS35L33_CLK_CTL:
+ case CS35L33_BST_PEAK_CTL:
+ case CS35L33_PROTECT_CTL:
+ case CS35L33_BST_CTL1:
+ case CS35L33_BST_CTL2:
+ case CS35L33_ADSP_CTL:
+ case CS35L33_ADC_CTL:
+ case CS35L33_DAC_CTL:
+ case CS35L33_DIG_VOL_CTL:
+ case CS35L33_CLASSD_CTL:
+ case CS35L33_AMP_CTL:
+ case CS35L33_INT_MASK_1:
+ case CS35L33_INT_MASK_2:
+ case CS35L33_INT_STATUS_1:
+ case CS35L33_INT_STATUS_2:
+ case CS35L33_DIAG_LOCK:
+ case CS35L33_DIAG_CTRL_1:
+ case CS35L33_DIAG_CTRL_2:
+ case CS35L33_HG_MEMLDO_CTL:
+ case CS35L33_HG_REL_RATE:
+ case CS35L33_LDO_DEL:
+ case CS35L33_HG_HEAD:
+ case CS35L33_HG_EN:
+ case CS35L33_TX_VMON:
+ case CS35L33_TX_IMON:
+ case CS35L33_TX_VPMON:
+ case CS35L33_TX_VBSTMON:
+ case CS35L33_TX_FLAG:
+ case CS35L33_TX_EN1:
+ case CS35L33_TX_EN2:
+ case CS35L33_TX_EN3:
+ case CS35L33_TX_EN4:
+ case CS35L33_RX_AUD:
+ case CS35L33_RX_SPLY:
+ case CS35L33_RX_ALIVE:
+ case CS35L33_BST_CTL4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(classd_ctl_tlv, 900, 100, 0);
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10200, 50, 0);
+
+static const struct snd_kcontrol_new cs35l33_snd_controls[] = {
+
+ SOC_SINGLE_TLV("SPK Amp Volume", CS35L33_AMP_CTL,
+ 4, 0x09, 0, classd_ctl_tlv),
+ SOC_SINGLE_SX_TLV("DAC Volume", CS35L33_DIG_VOL_CTL,
+ 0, 0x34, 0xE4, dac_tlv),
+};
+
+static int cs35l33_spkrdrv_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (!priv->amp_cal) {
+ usleep_range(8000, 9000);
+ priv->amp_cal = true;
+ regmap_update_bits(priv->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_CAL, 0);
+ dev_dbg(codec->dev, "Amp calibration done\n");
+ }
+ dev_dbg(codec->dev, "Amp turned on\n");
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev, "Amp turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static int cs35l33_sdin_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int val;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_BST, 0);
+ val = priv->is_tdm_mode ? 0 : CS35L33_PDN_TDM;
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_PDN_TDM, val);
+ dev_dbg(codec->dev, "BST turned on\n");
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev, "SDIN turned on\n");
+ if (!priv->amp_cal) {
+ regmap_update_bits(priv->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_CAL, CS35L33_AMP_CAL);
+ dev_dbg(codec->dev, "Amp calibration started\n");
+ usleep_range(10000, 11000);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_PDN_TDM, CS35L33_PDN_TDM);
+ usleep_range(4000, 4100);
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_BST, CS35L33_PDN_BST);
+ dev_dbg(codec->dev, "BST and SDIN turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+
+ }
+
+ return 0;
+}
+
+static int cs35l33_sdout_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int mask = CS35L33_SDOUT_3ST_I2S | CS35L33_PDN_TDM;
+ unsigned int mask2 = CS35L33_SDOUT_3ST_TDM;
+ unsigned int val, val2;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (priv->is_tdm_mode) {
+ /* set sdout_3st_i2s and reset pdn_tdm */
+ val = CS35L33_SDOUT_3ST_I2S;
+ /* reset sdout_3st_tdm */
+ val2 = 0;
+ } else {
+ /* reset sdout_3st_i2s and set pdn_tdm */
+ val = CS35L33_PDN_TDM;
+ /* set sdout_3st_tdm */
+ val2 = CS35L33_SDOUT_3ST_TDM;
+ }
+ dev_dbg(codec->dev, "SDOUT turned on\n");
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ val = CS35L33_SDOUT_3ST_I2S | CS35L33_PDN_TDM;
+ val2 = CS35L33_SDOUT_3ST_TDM;
+ dev_dbg(codec->dev, "SDOUT turned off\n");
+ break;
+ default:
+ dev_err(codec->dev, "Invalid event = 0x%x\n", event);
+ return 0;
+ }
+
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ mask, val);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ mask2, val2);
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget cs35l33_dapm_widgets[] = {
+
+ SND_SOC_DAPM_OUTPUT("SPK"),
+ SND_SOC_DAPM_OUT_DRV_E("SPKDRV", CS35L33_PWRCTL1, 7, 1, NULL, 0,
+ cs35l33_spkrdrv_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L33_PWRCTL2,
+ 2, 1, cs35l33_sdin_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_INPUT("MON"),
+
+ SND_SOC_DAPM_ADC("VMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("IMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_IMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("VPMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VPMON_SHIFT, 1),
+ SND_SOC_DAPM_ADC("VBSTMON", NULL,
+ CS35L33_PWRCTL2, CS35L33_PDN_VBSTMON_SHIFT, 1),
+
+ SND_SOC_DAPM_AIF_OUT_E("SDOUT", NULL, 0, SND_SOC_NOPM, 0, 0,
+ cs35l33_sdout_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_PRE_PMD),
+};
+
+static const struct snd_soc_dapm_route cs35l33_audio_map[] = {
+ {"SDIN", NULL, "CS35L33 Playback"},
+ {"SPKDRV", NULL, "SDIN"},
+ {"SPK", NULL, "SPKDRV"},
+
+ {"VMON", NULL, "MON"},
+ {"IMON", NULL, "MON"},
+
+ {"SDOUT", NULL, "VMON"},
+ {"SDOUT", NULL, "IMON"},
+ {"CS35L33 Capture", NULL, "SDOUT"},
+};
+
+static const struct snd_soc_dapm_route cs35l33_vphg_auto_route[] = {
+ {"SPKDRV", NULL, "VPMON"},
+ {"VPMON", NULL, "CS35L33 Playback"},
+};
+
+static const struct snd_soc_dapm_route cs35l33_vp_vbst_mon_route[] = {
+ {"SDOUT", NULL, "VPMON"},
+ {"VPMON", NULL, "MON"},
+ {"SDOUT", NULL, "VBSTMON"},
+ {"VBSTMON", NULL, "MON"},
+};
+
+static int cs35l33_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ unsigned int val;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_ALL, 0);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS, 0);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL1,
+ CS35L33_PDN_ALL, CS35L33_PDN_ALL);
+ regmap_read(priv->regmap, CS35L33_INT_STATUS_2, &val);
+ usleep_range(1000, 1100);
+ if (val & CS35L33_PDN_DONE)
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS, CS35L33_MCLKDIS);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct cs35l33_mclk_div {
+ int mclk;
+ int srate;
+ u8 adsp_rate;
+ u8 int_fs_ratio;
+};
+
+static const struct cs35l33_mclk_div cs35l33_mclk_coeffs[] = {
+ /* MCLK, Sample Rate, adsp_rate, int_fs_ratio */
+ {5644800, 11025, 0x4, CS35L33_INT_FS_RATE},
+ {5644800, 22050, 0x8, CS35L33_INT_FS_RATE},
+ {5644800, 44100, 0xC, CS35L33_INT_FS_RATE},
+
+ {6000000, 8000, 0x1, 0},
+ {6000000, 11025, 0x2, 0},
+ {6000000, 11029, 0x3, 0},
+ {6000000, 12000, 0x4, 0},
+ {6000000, 16000, 0x5, 0},
+ {6000000, 22050, 0x6, 0},
+ {6000000, 22059, 0x7, 0},
+ {6000000, 24000, 0x8, 0},
+ {6000000, 32000, 0x9, 0},
+ {6000000, 44100, 0xA, 0},
+ {6000000, 44118, 0xB, 0},
+ {6000000, 48000, 0xC, 0},
+
+ {6144000, 8000, 0x1, CS35L33_INT_FS_RATE},
+ {6144000, 12000, 0x4, CS35L33_INT_FS_RATE},
+ {6144000, 16000, 0x5, CS35L33_INT_FS_RATE},
+ {6144000, 24000, 0x8, CS35L33_INT_FS_RATE},
+ {6144000, 32000, 0x9, CS35L33_INT_FS_RATE},
+ {6144000, 48000, 0xC, CS35L33_INT_FS_RATE},
+};
+
+static int cs35l33_get_mclk_coeff(int mclk, int srate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l33_mclk_coeffs); i++) {
+ if (cs35l33_mclk_coeffs[i].mclk == mclk &&
+ cs35l33_mclk_coeffs[i].srate == srate)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int cs35l33_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ regmap_update_bits(priv->regmap, CS35L33_ADSP_CTL,
+ CS35L33_MS_MASK, CS35L33_MS_MASK);
+ dev_dbg(codec->dev, "Audio port in master mode\n");
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ regmap_update_bits(priv->regmap, CS35L33_ADSP_CTL,
+ CS35L33_MS_MASK, 0);
+ dev_dbg(codec->dev, "Audio port in slave mode\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ /*
+ * tdm mode in cs35l33 resembles dsp-a mode very
+ * closely, it is dsp-a with fsync shifted left by half bclk
+ */
+ priv->is_tdm_mode = true;
+ dev_dbg(codec->dev, "Audio port in TDM mode\n");
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ priv->is_tdm_mode = false;
+ dev_dbg(codec->dev, "Audio port in I2S mode\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs35l33_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ int sample_size = params_width(params);
+ int coeff = cs35l33_get_mclk_coeff(priv->mclk_int, params_rate(params));
+
+ if (coeff < 0)
+ return coeff;
+
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_ADSP_FS | CS35L33_INT_FS_RATE,
+ cs35l33_mclk_coeffs[coeff].int_fs_ratio
+ | cs35l33_mclk_coeffs[coeff].adsp_rate);
+
+ if (priv->is_tdm_mode) {
+ sample_size = (sample_size / 8) - 1;
+ if (sample_size > 2)
+ sample_size = 2;
+ regmap_update_bits(priv->regmap, CS35L33_RX_AUD,
+ CS35L33_AUDIN_RX_DEPTH,
+ sample_size << CS35L33_AUDIN_RX_DEPTH_SHIFT);
+ }
+
+ dev_dbg(codec->dev, "sample rate=%d, bits per sample=%d\n",
+ params_rate(params), params_width(params));
+
+ return 0;
+}
+
+static const unsigned int cs35l33_src_rates[] = {
+ 8000, 11025, 11029, 12000, 16000, 22050,
+ 22059, 24000, 32000, 44100, 44118, 48000
+};
+
+static const struct snd_pcm_hw_constraint_list cs35l33_constraints = {
+ .count = ARRAY_SIZE(cs35l33_src_rates),
+ .list = cs35l33_src_rates,
+};
+
+static int cs35l33_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &cs35l33_constraints);
+ return 0;
+}
+
+static int cs35l33_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (tristate) {
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_SDOUT_3ST_I2S, CS35L33_SDOUT_3ST_I2S);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_SDOUT_3ST_TDM, CS35L33_SDOUT_3ST_TDM);
+ } else {
+ regmap_update_bits(priv->regmap, CS35L33_PWRCTL2,
+ CS35L33_SDOUT_3ST_I2S, 0);
+ regmap_update_bits(priv->regmap, CS35L33_CLK_CTL,
+ CS35L33_SDOUT_3ST_TDM, 0);
+ }
+
+ return 0;
+}
+
+static int cs35l33_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg, bit_pos, i;
+ int slot, slot_num;
+
+ if (slot_width != 8)
+ return -EINVAL;
+
+ /* scan rx_mask for aud slot */
+ slot = ffs(rx_mask) - 1;
+ if (slot >= 0) {
+ regmap_update_bits(priv->regmap, CS35L33_RX_AUD,
+ CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "Audio starts from slots %d", slot);
+ }
+
+ /*
+ * scan tx_mask: vmon(2 slots); imon (2 slots);
+ * vpmon (1 slot) vbstmon (1 slot)
+ */
+ slot = ffs(tx_mask) - 1;
+ slot_num = 0;
+
+ for (i = 0; i < 2 ; i++) {
+ /* disable vpmon/vbstmon: enable later if set in tx_mask */
+ regmap_update_bits(priv->regmap, CS35L33_TX_VPMON + i,
+ CS35L33_X_STATE | CS35L33_X_LOC, CS35L33_X_STATE
+ | CS35L33_X_LOC);
+ }
+
+ /* disconnect {vp,vbst}_mon routes: eanble later if set in tx_mask*/
+ snd_soc_dapm_del_routes(dapm, cs35l33_vp_vbst_mon_route,
+ ARRAY_SIZE(cs35l33_vp_vbst_mon_route));
+
+ while (slot >= 0) {
+ /* configure VMON_TX_LOC */
+ if (slot_num == 0) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "VMON enabled in slots %d-%d",
+ slot, slot + 1);
+ }
+
+ /* configure IMON_TX_LOC */
+ if (slot_num == 3) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_IMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ dev_dbg(codec->dev, "IMON enabled in slots %d-%d",
+ slot, slot + 1);
+ }
+
+ /* configure VPMON_TX_LOC */
+ if (slot_num == 4) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VPMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ snd_soc_dapm_add_routes(dapm,
+ &cs35l33_vp_vbst_mon_route[0], 2);
+ dev_dbg(codec->dev, "VPMON enabled in slots %d", slot);
+ }
+
+ /* configure VBSTMON_TX_LOC */
+ if (slot_num == 5) {
+ regmap_update_bits(priv->regmap, CS35L33_TX_VBSTMON,
+ CS35L33_X_STATE | CS35L33_X_LOC, slot);
+ snd_soc_dapm_add_routes(dapm,
+ &cs35l33_vp_vbst_mon_route[2], 2);
+ dev_dbg(codec->dev,
+ "VBSTMON enabled in slots %d", slot);
+ }
+
+ /* Enable the relevant tx slot */
+ reg = CS35L33_TX_EN4 - (slot/8);
+ bit_pos = slot - ((slot / 8) * (8));
+ regmap_update_bits(priv->regmap, reg,
+ 1 << bit_pos, 1 << bit_pos);
+
+ tx_mask &= ~(1 << slot);
+ slot = ffs(tx_mask) - 1;
+ slot_num++;
+ }
+
+ return 0;
+}
+
+static int cs35l33_codec_set_sysclk(struct snd_soc_codec *codec,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+
+ switch (freq) {
+ case CS35L33_MCLK_5644:
+ case CS35L33_MCLK_6:
+ case CS35L33_MCLK_6144:
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIV2, 0);
+ cs35l33->mclk_int = freq;
+ break;
+ case CS35L33_MCLK_11289:
+ case CS35L33_MCLK_12:
+ case CS35L33_MCLK_12288:
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIV2, CS35L33_MCLKDIV2);
+ cs35l33->mclk_int = freq/2;
+ break;
+ default:
+ cs35l33->mclk_int = 0;
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "external mclk freq=%d, internal mclk freq=%d\n",
+ freq, cs35l33->mclk_int);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops cs35l33_ops = {
+ .startup = cs35l33_pcm_startup,
+ .set_tristate = cs35l33_set_tristate,
+ .set_fmt = cs35l33_set_dai_fmt,
+ .hw_params = cs35l33_pcm_hw_params,
+ .set_tdm_slot = cs35l33_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver cs35l33_dai = {
+ .name = "cs35l33-dai",
+ .id = 0,
+ .playback = {
+ .stream_name = "CS35L33 Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = CS35L33_RATES,
+ .formats = CS35L33_FORMATS,
+ },
+ .capture = {
+ .stream_name = "CS35L33 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = CS35L33_RATES,
+ .formats = CS35L33_FORMATS,
+ },
+ .ops = &cs35l33_ops,
+ .symmetric_rates = 1,
+};
+
+static int cs35l33_set_hg_data(struct snd_soc_codec *codec,
+ struct cs35l33_pdata *pdata)
+{
+ struct cs35l33_hg *hg_config = &pdata->hg_config;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs35l33_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ if (hg_config->enable_hg_algo) {
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_MEM_DEPTH_MASK,
+ hg_config->mem_depth << CS35L33_MEM_DEPTH_SHIFT);
+ regmap_write(priv->regmap, CS35L33_HG_REL_RATE,
+ hg_config->release_rate);
+ regmap_update_bits(priv->regmap, CS35L33_HG_HEAD,
+ CS35L33_HD_RM_MASK,
+ hg_config->hd_rm << CS35L33_HD_RM_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_LDO_THLD_MASK,
+ hg_config->ldo_thld << CS35L33_LDO_THLD_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_MEMLDO_CTL,
+ CS35L33_LDO_DISABLE_MASK,
+ hg_config->ldo_path_disable <<
+ CS35L33_LDO_DISABLE_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_LDO_ENTRY_DELAY_MASK,
+ hg_config->ldo_entry_delay <<
+ CS35L33_LDO_ENTRY_DELAY_SHIFT);
+ if (hg_config->vp_hg_auto) {
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_VP_HG_AUTO_MASK,
+ CS35L33_VP_HG_AUTO_MASK);
+ snd_soc_dapm_add_routes(dapm, cs35l33_vphg_auto_route,
+ ARRAY_SIZE(cs35l33_vphg_auto_route));
+ }
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_VP_HG_MASK,
+ hg_config->vp_hg << CS35L33_VP_HG_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_VP_HG_RATE_MASK,
+ hg_config->vp_hg_rate << CS35L33_VP_HG_RATE_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_LDO_DEL,
+ CS35L33_VP_HG_VA_MASK,
+ hg_config->vp_hg_va << CS35L33_VP_HG_VA_SHIFT);
+ regmap_update_bits(priv->regmap, CS35L33_HG_EN,
+ CS35L33_CLASS_HG_EN_MASK, CS35L33_CLASS_HG_EN_MASK);
+ }
+ return 0;
+}
+
+static int cs35l33_set_bst_ipk(struct snd_soc_codec *codec, unsigned int bst)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+ int ret = 0, steps = 0;
+
+ /* Boost current in uA */
+ if (bst > 3600000 || bst < 1850000) {
+ dev_err(codec->dev, "Invalid boost current %d\n", bst);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (bst % 15625) {
+ dev_err(codec->dev, "Current not a multiple of 15625uA (%d)\n",
+ bst);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ while (bst > 1850000) {
+ bst -= 15625;
+ steps++;
+ }
+
+ regmap_write(cs35l33->regmap, CS35L33_BST_PEAK_CTL,
+ steps+0x70);
+
+err:
+ return ret;
+}
+
+static int cs35l33_probe(struct snd_soc_codec *codec)
+{
+ struct cs35l33_private *cs35l33 = snd_soc_codec_get_drvdata(codec);
+
+ cs35l33->codec = codec;
+ pm_runtime_get_sync(codec->dev);
+
+ regmap_update_bits(cs35l33->regmap, CS35L33_PROTECT_CTL,
+ CS35L33_ALIVE_WD_DIS, 0x8);
+ regmap_update_bits(cs35l33->regmap, CS35L33_BST_CTL2,
+ CS35L33_ALIVE_WD_DIS2,
+ CS35L33_ALIVE_WD_DIS2);
+
+ /* Set Platform Data */
+ regmap_update_bits(cs35l33->regmap, CS35L33_BST_CTL1,
+ CS35L33_BST_CTL_MASK, cs35l33->pdata.boost_ctl);
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLASSD_CTL,
+ CS35L33_AMP_DRV_SEL_MASK,
+ cs35l33->pdata.amp_drv_sel << CS35L33_AMP_DRV_SEL_SHIFT);
+
+ if (cs35l33->pdata.boost_ipk)
+ cs35l33_set_bst_ipk(codec, cs35l33->pdata.boost_ipk);
+
+ if (cs35l33->enable_soft_ramp) {
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DIGSFT, CS35L33_DIGSFT);
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DSR_RATE, cs35l33->pdata.ramp_rate);
+ } else {
+ snd_soc_update_bits(codec, CS35L33_DAC_CTL,
+ CS35L33_DIGSFT, 0);
+ }
+
+ /* update IMON scaling rate if different from default of 0x8 */
+ if (cs35l33->pdata.imon_adc_scale != 0x8)
+ snd_soc_update_bits(codec, CS35L33_ADC_CTL,
+ CS35L33_IMON_SCALE, cs35l33->pdata.imon_adc_scale);
+
+ cs35l33_set_hg_data(codec, &(cs35l33->pdata));
+
+ /*
+ * unmask important interrupts that causes the chip to enter
+ * speaker safe mode and hence deserves user attention
+ */
+ regmap_update_bits(cs35l33->regmap, CS35L33_INT_MASK_1,
+ CS35L33_M_OTE | CS35L33_M_OTW | CS35L33_M_AMP_SHORT |
+ CS35L33_M_CAL_ERR, 0);
+
+ pm_runtime_put_sync(codec->dev);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_cs35l33 = {
+ .probe = cs35l33_probe,
+
+ .set_bias_level = cs35l33_set_bias_level,
+ .set_sysclk = cs35l33_codec_set_sysclk,
+
+ .dapm_widgets = cs35l33_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs35l33_dapm_widgets),
+ .dapm_routes = cs35l33_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(cs35l33_audio_map),
+ .controls = cs35l33_snd_controls,
+ .num_controls = ARRAY_SIZE(cs35l33_snd_controls),
+
+ .idle_bias_off = true,
+};
+
+static const struct regmap_config cs35l33_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS35L33_MAX_REGISTER,
+ .reg_defaults = cs35l33_reg,
+ .num_reg_defaults = ARRAY_SIZE(cs35l33_reg),
+ .volatile_reg = cs35l33_volatile_register,
+ .readable_reg = cs35l33_readable_register,
+ .writeable_reg = cs35l33_writeable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static int __maybe_unused cs35l33_runtime_resume(struct device *dev)
+{
+ struct cs35l33_private *cs35l33 = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 0);
+
+ ret = regulator_bulk_enable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable core supplies: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(cs35l33->regmap, false);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 1);
+
+ msleep(CS35L33_BOOT_DELAY);
+
+ ret = regcache_sync(cs35l33->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to restore register cache\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_cache_only(cs35l33->regmap, true);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return ret;
+}
+
+static int __maybe_unused cs35l33_runtime_suspend(struct device *dev)
+{
+ struct cs35l33_private *cs35l33 = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* redo the calibration in next power up */
+ cs35l33->amp_cal = false;
+
+ regcache_cache_only(cs35l33->regmap, true);
+ regcache_mark_dirty(cs35l33->regmap);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cs35l33_pm_ops = {
+ SET_RUNTIME_PM_OPS(cs35l33_runtime_suspend,
+ cs35l33_runtime_resume,
+ NULL)
+};
+
+static int cs35l33_get_hg_data(const struct device_node *np,
+ struct cs35l33_pdata *pdata)
+{
+ struct device_node *hg;
+ struct cs35l33_hg *hg_config = &pdata->hg_config;
+ u32 val32;
+
+ hg = of_get_child_by_name(np, "cirrus,hg-algo");
+ hg_config->enable_hg_algo = hg ? true : false;
+
+ if (hg_config->enable_hg_algo) {
+ if (of_property_read_u32(hg, "cirrus,mem-depth", &val32) >= 0)
+ hg_config->mem_depth = val32;
+ if (of_property_read_u32(hg, "cirrus,release-rate",
+ &val32) >= 0)
+ hg_config->release_rate = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-thld", &val32) >= 0)
+ hg_config->ldo_thld = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-path-disable",
+ &val32) >= 0)
+ hg_config->ldo_path_disable = val32;
+ if (of_property_read_u32(hg, "cirrus,ldo-entry-delay",
+ &val32) >= 0)
+ hg_config->ldo_entry_delay = val32;
+
+ hg_config->vp_hg_auto = of_property_read_bool(hg,
+ "cirrus,vp-hg-auto");
+
+ if (of_property_read_u32(hg, "cirrus,vp-hg", &val32) >= 0)
+ hg_config->vp_hg = val32;
+ if (of_property_read_u32(hg, "cirrus,vp-hg-rate", &val32) >= 0)
+ hg_config->vp_hg_rate = val32;
+ if (of_property_read_u32(hg, "cirrus,vp-hg-va", &val32) >= 0)
+ hg_config->vp_hg_va = val32;
+ }
+
+ of_node_put(hg);
+
+ return 0;
+}
+
+static irqreturn_t cs35l33_irq_thread(int irq, void *data)
+{
+ struct cs35l33_private *cs35l33 = data;
+ struct snd_soc_codec *codec = cs35l33->codec;
+ unsigned int sticky_val1, sticky_val2, current_val, mask1, mask2;
+
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_2,
+ &sticky_val2);
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_1,
+ &sticky_val1);
+ regmap_read(cs35l33->regmap, CS35L33_INT_MASK_2, &mask2);
+ regmap_read(cs35l33->regmap, CS35L33_INT_MASK_1, &mask1);
+
+ /* Check to see if the unmasked bits are active,
+ * if not then exit.
+ */
+ if (!(sticky_val1 & ~mask1) && !(sticky_val2 & ~mask2))
+ return IRQ_NONE;
+
+ regmap_read(cs35l33->regmap, CS35L33_INT_STATUS_1,
+ &current_val);
+
+ /* handle the interrupts */
+
+ if (sticky_val1 & CS35L33_AMP_SHORT) {
+ dev_crit(codec->dev, "Amp short error\n");
+ if (!(current_val & CS35L33_AMP_SHORT)) {
+ dev_dbg(codec->dev,
+ "Amp short error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL,
+ CS35L33_AMP_SHORT_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL,
+ CS35L33_AMP_SHORT_RLS,
+ CS35L33_AMP_SHORT_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_AMP_SHORT_RLS,
+ 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_CAL_ERR) {
+ dev_err(codec->dev, "Cal error\n");
+
+ /* redo the calibration in next power up */
+ cs35l33->amp_cal = false;
+
+ if (!(current_val & CS35L33_CAL_ERR)) {
+ dev_dbg(codec->dev, "Cal error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ CS35L33_CAL_ERR_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_CAL_ERR_RLS,
+ 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_OTE) {
+ dev_crit(codec->dev, "Over temperature error\n");
+ if (!(current_val & CS35L33_OTE)) {
+ dev_dbg(codec->dev,
+ "Over temperature error release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS,
+ CS35L33_OTE_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTE_RLS, 0);
+ }
+ }
+
+ if (sticky_val1 & CS35L33_OTW) {
+ dev_err(codec->dev, "Over temperature warning\n");
+ if (!(current_val & CS35L33_OTW)) {
+ dev_dbg(codec->dev,
+ "Over temperature warning release\n");
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS, 0);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS,
+ CS35L33_OTW_RLS);
+ regmap_update_bits(cs35l33->regmap,
+ CS35L33_AMP_CTL, CS35L33_OTW_RLS, 0);
+ }
+ }
+ if (CS35L33_ALIVE_ERR & sticky_val1)
+ dev_err(codec->dev, "ERROR: ADSPCLK Interrupt\n");
+
+ if (CS35L33_MCLK_ERR & sticky_val1)
+ dev_err(codec->dev, "ERROR: MCLK Interrupt\n");
+
+ if (CS35L33_VMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: VMON Overflow Interrupt\n");
+
+ if (CS35L33_IMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: IMON Overflow Interrupt\n");
+
+ if (CS35L33_VPMON_OVFL & sticky_val2)
+ dev_err(codec->dev,
+ "ERROR: VPMON Overflow Interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static const char * const cs35l33_core_supplies[] = {
+ "VA",
+ "VP",
+};
+
+static int cs35l33_of_get_pdata(struct device *dev,
+ struct cs35l33_private *cs35l33)
+{
+ struct device_node *np = dev->of_node;
+ struct cs35l33_pdata *pdata = &cs35l33->pdata;
+ u32 val32;
+
+ if (!np)
+ return 0;
+
+ if (of_property_read_u32(np, "cirrus,boost-ctl", &val32) >= 0) {
+ pdata->boost_ctl = val32;
+ pdata->amp_drv_sel = 1;
+ }
+
+ if (of_property_read_u32(np, "cirrus,ramp-rate", &val32) >= 0) {
+ pdata->ramp_rate = val32;
+ cs35l33->enable_soft_ramp = true;
+ }
+
+ if (of_property_read_u32(np, "cirrus,boost-ipk", &val32) >= 0)
+ pdata->boost_ipk = val32;
+
+ if (of_property_read_u32(np, "cirrus,imon-adc-scale", &val32) >= 0) {
+ if ((val32 == 0x0) || (val32 == 0x7) || (val32 == 0x6))
+ pdata->imon_adc_scale = val32;
+ else
+ /* use default value */
+ pdata->imon_adc_scale = 0x8;
+ } else {
+ /* use default value */
+ pdata->imon_adc_scale = 0x8;
+ }
+
+ cs35l33_get_hg_data(np, pdata);
+
+ return 0;
+}
+
+static int cs35l33_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct cs35l33_private *cs35l33;
+ struct cs35l33_pdata *pdata = dev_get_platdata(&i2c_client->dev);
+ int ret, devid, i;
+ unsigned int reg;
+
+ cs35l33 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l33_private),
+ GFP_KERNEL);
+ if (!cs35l33)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c_client, cs35l33);
+ cs35l33->regmap = devm_regmap_init_i2c(i2c_client, &cs35l33_regmap);
+ if (IS_ERR(cs35l33->regmap)) {
+ ret = PTR_ERR(cs35l33->regmap);
+ dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(cs35l33->regmap, true);
+
+ for (i = 0; i < ARRAY_SIZE(cs35l33_core_supplies); i++)
+ cs35l33->core_supplies[i].supply
+ = cs35l33_core_supplies[i];
+ cs35l33->num_core_supplies = ARRAY_SIZE(cs35l33_core_supplies);
+
+ ret = devm_regulator_bulk_get(&i2c_client->dev,
+ cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to request core supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (pdata) {
+ cs35l33->pdata = *pdata;
+ } else {
+ cs35l33_of_get_pdata(&i2c_client->dev, cs35l33);
+ pdata = &cs35l33->pdata;
+ }
+
+ ret = devm_request_threaded_irq(&i2c_client->dev, i2c_client->irq, NULL,
+ cs35l33_irq_thread, IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "cs35l33", cs35l33);
+ if (ret != 0)
+ dev_warn(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ /* We could issue !RST or skip it based on AMP topology */
+ cs35l33->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+ "reset-gpios", GPIOD_OUT_HIGH);
+ if (IS_ERR(cs35l33->reset_gpio)) {
+ dev_err(&i2c_client->dev, "%s ERROR: Can't get reset GPIO\n",
+ __func__);
+ return PTR_ERR(cs35l33->reset_gpio);
+ }
+
+ ret = regulator_bulk_enable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+ if (ret != 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to enable core supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 1);
+
+ msleep(CS35L33_BOOT_DELAY);
+ regcache_cache_only(cs35l33->regmap, false);
+
+ /* initialize codec */
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_AB, &reg);
+ devid = (reg & 0xFF) << 12;
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_CD, &reg);
+ devid |= (reg & 0xFF) << 4;
+ ret = regmap_read(cs35l33->regmap, CS35L33_DEVID_E, &reg);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS35L33_CHIP_ID) {
+ dev_err(&i2c_client->dev,
+ "CS35L33 Device ID (%X). Expected ID %X\n",
+ devid, CS35L33_CHIP_ID);
+ goto err_enable;
+ }
+
+ ret = regmap_read(cs35l33->regmap, CS35L33_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+ goto err_enable;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Cirrus Logic CS35L33, Revision: %02X\n", reg & 0xFF);
+
+ ret = regmap_register_patch(cs35l33->regmap,
+ cs35l33_patch, ARRAY_SIZE(cs35l33_patch));
+ if (ret < 0) {
+ dev_err(&i2c_client->dev,
+ "Error in applying regmap patch: %d\n", ret);
+ goto err_enable;
+ }
+
+ /* disable mclk and tdm */
+ regmap_update_bits(cs35l33->regmap, CS35L33_CLK_CTL,
+ CS35L33_MCLKDIS | CS35L33_SDOUT_3ST_TDM,
+ CS35L33_MCLKDIS | CS35L33_SDOUT_3ST_TDM);
+
+ pm_runtime_set_autosuspend_delay(&i2c_client->dev, 100);
+ pm_runtime_use_autosuspend(&i2c_client->dev);
+ pm_runtime_set_active(&i2c_client->dev);
+ pm_runtime_enable(&i2c_client->dev);
+
+ ret = snd_soc_register_codec(&i2c_client->dev,
+ &soc_codec_dev_cs35l33, &cs35l33_dai, 1);
+ if (ret < 0) {
+ dev_err(&i2c_client->dev, "%s: Register codec failed\n",
+ __func__);
+ goto err_enable;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return ret;
+}
+
+static int cs35l33_i2c_remove(struct i2c_client *client)
+{
+ struct cs35l33_private *cs35l33 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ if (cs35l33->reset_gpio)
+ gpiod_set_value_cansleep(cs35l33->reset_gpio, 0);
+
+ pm_runtime_disable(&client->dev);
+ regulator_bulk_disable(cs35l33->num_core_supplies,
+ cs35l33->core_supplies);
+
+ return 0;
+}
+
+static const struct of_device_id cs35l33_of_match[] = {
+ { .compatible = "cirrus,cs35l33", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cs35l33_of_match);
+
+static const struct i2c_device_id cs35l33_id[] = {
+ {"cs35l33", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs35l33_id);
+
+static struct i2c_driver cs35l33_i2c_driver = {
+ .driver = {
+ .name = "cs35l33",
+ .pm = &cs35l33_pm_ops,
+ .of_match_table = cs35l33_of_match,
+
+ },
+ .id_table = cs35l33_id,
+ .probe = cs35l33_i2c_probe,
+ .remove = cs35l33_i2c_remove,
+
+};
+module_i2c_driver(cs35l33_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS35L33 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <paul.handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l33.h b/sound/soc/codecs/cs35l33.h
new file mode 100644
index 000000000000..c045737d1a5f
--- /dev/null
+++ b/sound/soc/codecs/cs35l33.h
@@ -0,0 +1,221 @@
+/*
+ * cs35l33.h -- CS35L33 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <paul.handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS35L33_H__
+#define __CS35L33_H__
+
+#define CS35L33_CHIP_ID 0x00035A33
+#define CS35L33_DEVID_AB 0x01 /* Device ID A & B [RO] */
+#define CS35L33_DEVID_CD 0x02 /* Device ID C & D [RO] */
+#define CS35L33_DEVID_E 0x03 /* Device ID E [RO] */
+#define CS35L33_FAB_ID 0x04 /* Fab ID [RO] */
+#define CS35L33_REV_ID 0x05 /* Revision ID [RO] */
+#define CS35L33_PWRCTL1 0x06 /* Power Ctl 1 */
+#define CS35L33_PWRCTL2 0x07 /* Power Ctl 2 */
+#define CS35L33_CLK_CTL 0x08 /* Clock Ctl */
+#define CS35L33_BST_PEAK_CTL 0x09 /* Max Current for Boost */
+#define CS35L33_PROTECT_CTL 0x0A /* Amp Protection Parameters */
+#define CS35L33_BST_CTL1 0x0B /* Boost Converter CTL1 */
+#define CS35L33_BST_CTL2 0x0C /* Boost Converter CTL2 */
+#define CS35L33_ADSP_CTL 0x0D /* Serial Port Control */
+#define CS35L33_ADC_CTL 0x0E /* ADC Control */
+#define CS35L33_DAC_CTL 0x0F /* DAC Control */
+#define CS35L33_DIG_VOL_CTL 0x10 /* Digital Volume CTL */
+#define CS35L33_CLASSD_CTL 0x11 /* Class D Amp CTL */
+#define CS35L33_AMP_CTL 0x12 /* Amp Gain/Protecton Release CTL */
+#define CS35L33_INT_MASK_1 0x13 /* Interrupt Mask 1 */
+#define CS35L33_INT_MASK_2 0x14 /* Interrupt Mask 2 */
+#define CS35L33_INT_STATUS_1 0x15 /* Interrupt Status 1 [RO] */
+#define CS35L33_INT_STATUS_2 0x16 /* Interrupt Status 2 [RO] */
+#define CS35L33_DIAG_LOCK 0x17 /* Diagnostic Mode Register Lock */
+#define CS35L33_DIAG_CTRL_1 0x18 /* Diagnostic Mode Register Control */
+#define CS35L33_DIAG_CTRL_2 0x19 /* Diagnostic Mode Register Control 2 */
+#define CS35L33_HG_MEMLDO_CTL 0x23 /* H/G Memory/LDO CTL */
+#define CS35L33_HG_REL_RATE 0x24 /* H/G Release Rate */
+#define CS35L33_LDO_DEL 0x25 /* LDO Entry Delay/VPhg Control 1 */
+#define CS35L33_HG_HEAD 0x29 /* H/G Headroom */
+#define CS35L33_HG_EN 0x2A /* H/G Enable/VPhg CNT2 */
+#define CS35L33_TX_VMON 0x2D /* TDM TX Control 1 (VMON) */
+#define CS35L33_TX_IMON 0x2E /* TDM TX Control 2 (IMON) */
+#define CS35L33_TX_VPMON 0x2F /* TDM TX Control 3 (VPMON) */
+#define CS35L33_TX_VBSTMON 0x30 /* TDM TX Control 4 (VBSTMON) */
+#define CS35L33_TX_FLAG 0x31 /* TDM TX Control 5 (FLAG) */
+#define CS35L33_TX_EN1 0x32 /* TDM TX Enable 1 */
+#define CS35L33_TX_EN2 0x33 /* TDM TX Enable 2 */
+#define CS35L33_TX_EN3 0x34 /* TDM TX Enable 3 */
+#define CS35L33_TX_EN4 0x35 /* TDM TX Enable 4 */
+#define CS35L33_RX_AUD 0x36 /* TDM RX Control 1 */
+#define CS35L33_RX_SPLY 0x37 /* TDM RX Control 2 */
+#define CS35L33_RX_ALIVE 0x38 /* TDM RX Control 3 */
+#define CS35L33_BST_CTL4 0x39 /* Boost Converter Control 4 */
+#define CS35L33_HG_STATUS 0x3F /* H/G Status */
+#define CS35L33_MAX_REGISTER 0x59
+
+#define CS35L33_MCLK_5644 5644800
+#define CS35L33_MCLK_6144 6144000
+#define CS35L33_MCLK_6 6000000
+#define CS35L33_MCLK_11289 11289600
+#define CS35L33_MCLK_12 12000000
+#define CS35L33_MCLK_12288 12288000
+
+/* CS35L33_PWRCTL1 */
+#define CS35L33_PDN_AMP (1 << 7)
+#define CS35L33_PDN_BST (1 << 2)
+#define CS35L33_PDN_ALL 1
+
+/* CS35L33_PWRCTL2 */
+#define CS35L33_PDN_VMON_SHIFT 7
+#define CS35L33_PDN_VMON (1 << CS35L33_PDN_VMON_SHIFT)
+#define CS35L33_PDN_IMON_SHIFT 6
+#define CS35L33_PDN_IMON (1 << CS35L33_PDN_IMON_SHIFT)
+#define CS35L33_PDN_VPMON_SHIFT 5
+#define CS35L33_PDN_VPMON (1 << CS35L33_PDN_VPMON_SHIFT)
+#define CS35L33_PDN_VBSTMON_SHIFT 4
+#define CS35L33_PDN_VBSTMON (1 << CS35L33_PDN_VBSTMON_SHIFT)
+#define CS35L33_SDOUT_3ST_I2S_SHIFT 3
+#define CS35L33_SDOUT_3ST_I2S (1 << CS35L33_SDOUT_3ST_I2S_SHIFT)
+#define CS35L33_PDN_SDIN_SHIFT 2
+#define CS35L33_PDN_SDIN (1 << CS35L33_PDN_SDIN_SHIFT)
+#define CS35L33_PDN_TDM_SHIFT 1
+#define CS35L33_PDN_TDM (1 << CS35L33_PDN_TDM_SHIFT)
+
+/* CS35L33_CLK_CTL */
+#define CS35L33_MCLKDIS (1 << 7)
+#define CS35L33_MCLKDIV2 (1 << 6)
+#define CS35L33_SDOUT_3ST_TDM (1 << 5)
+#define CS35L33_INT_FS_RATE (1 << 4)
+#define CS35L33_ADSP_FS 0xF
+
+/* CS35L33_PROTECT_CTL */
+#define CS35L33_ALIVE_WD_DIS (3 << 2)
+
+/* CS35L33_BST_CTL1 */
+#define CS35L33_BST_CTL_SRC (1 << 6)
+#define CS35L33_BST_CTL_SHIFT (1 << 5)
+#define CS35L33_BST_CTL_MASK 0x3F
+
+/* CS35L33_BST_CTL2 */
+#define CS35L33_TDM_WD_SEL (1 << 4)
+#define CS35L33_ALIVE_WD_DIS2 (1 << 3)
+#define CS35L33_VBST_SR_STEP 0x3
+
+/* CS35L33_ADSP_CTL */
+#define CS35L33_ADSP_DRIVE (1 << 7)
+#define CS35L33_MS_MASK (1 << 6)
+#define CS35L33_SDIN_LOC (3 << 4)
+#define CS35L33_ALIVE_RATE 0x3
+
+/* CS35L33_ADC_CTL */
+#define CS35L33_INV_VMON (1 << 7)
+#define CS35L33_INV_IMON (1 << 6)
+#define CS35L33_ADC_NOTCH_DIS (1 << 5)
+#define CS35L33_IMON_SCALE 0xF
+
+/* CS35L33_DAC_CTL */
+#define CS35L33_INV_DAC (1 << 7)
+#define CS35L33_DAC_NOTCH_DIS (1 << 5)
+#define CS35L33_DIGSFT (1 << 4)
+#define CS35L33_DSR_RATE 0xF
+
+/* CS35L33_CLASSD_CTL */
+#define CS35L33_AMP_SD (1 << 6)
+#define CS35L33_AMP_DRV_SEL_SRC (1 << 5)
+#define CS35L33_AMP_DRV_SEL_MASK 0x10
+#define CS35L33_AMP_DRV_SEL_SHIFT 4
+#define CS35L33_AMP_CAL (1 << 3)
+#define CS35L33_GAIN_CHG_ZC_MASK 0x04
+#define CS35L33_GAIN_CHG_ZC_SHIFT 2
+#define CS35L33_CLASS_D_CTL_MASK 0x3F
+
+/* CS35L33_AMP_CTL */
+#define CS35L33_AMP_GAIN 0xF0
+#define CS35L33_CAL_ERR_RLS (1 << 3)
+#define CS35L33_AMP_SHORT_RLS (1 << 2)
+#define CS35L33_OTW_RLS (1 << 1)
+#define CS35L33_OTE_RLS 1
+
+/* CS35L33_INT_MASK_1 */
+#define CS35L33_M_CAL_ERR_SHIFT 6
+#define CS35L33_M_CAL_ERR (1 << CS35L33_M_CAL_ERR_SHIFT)
+#define CS35L33_M_ALIVE_ERR_SHIFT 5
+#define CS35L33_M_ALIVE_ERR (1 << CS35L33_M_ALIVE_ERR_SHIFT)
+#define CS35L33_M_AMP_SHORT_SHIFT 2
+#define CS35L33_M_AMP_SHORT (1 << CS35L33_M_AMP_SHORT_SHIFT)
+#define CS35L33_M_OTW_SHIFT 1
+#define CS35L33_M_OTW (1 << CS35L33_M_OTW_SHIFT)
+#define CS35L33_M_OTE_SHIFT 0
+#define CS35L33_M_OTE (1 << CS35L33_M_OTE_SHIFT)
+
+/* CS35L33_INT_STATUS_1 */
+#define CS35L33_CAL_ERR (1 << 6)
+#define CS35L33_ALIVE_ERR (1 << 5)
+#define CS35L33_ADSPCLK_ERR (1 << 4)
+#define CS35L33_MCLK_ERR (1 << 3)
+#define CS35L33_AMP_SHORT (1 << 2)
+#define CS35L33_OTW (1 << 1)
+#define CS35L33_OTE (1 << 0)
+
+/* CS35L33_INT_STATUS_2 */
+#define CS35L33_VMON_OVFL (1 << 7)
+#define CS35L33_IMON_OVFL (1 << 6)
+#define CS35L33_VPMON_OVFL (1 << 5)
+#define CS35L33_VBSTMON_OVFL (1 << 4)
+#define CS35L33_PDN_DONE 1
+
+/* CS35L33_BST_CTL4 */
+#define CS35L33_BST_RGS 0x70
+#define CS35L33_BST_COEFF3 0xF
+
+/* CS35L33_HG_MEMLDO_CTL */
+#define CS35L33_MEM_DEPTH_SHIFT 5
+#define CS35L33_MEM_DEPTH_MASK (0x3 << CS35L33_MEM_DEPTH_SHIFT)
+#define CS35L33_LDO_THLD_SHIFT 1
+#define CS35L33_LDO_THLD_MASK (0xF << CS35L33_LDO_THLD_SHIFT)
+#define CS35L33_LDO_DISABLE_SHIFT 0
+#define CS35L33_LDO_DISABLE_MASK (0x1 << CS35L33_LDO_DISABLE_SHIFT)
+
+/* CS35L33_LDO_DEL */
+#define CS35L33_VP_HG_VA_SHIFT 5
+#define CS35L33_VP_HG_VA_MASK (0x7 << CS35L33_VP_HG_VA_SHIFT)
+#define CS35L33_LDO_ENTRY_DELAY_SHIFT 2
+#define CS35L33_LDO_ENTRY_DELAY_MASK (0x7 << CS35L33_LDO_ENTRY_DELAY_SHIFT)
+#define CS35L33_VP_HG_RATE_SHIFT 0
+#define CS35L33_VP_HG_RATE_MASK (0x3 << CS35L33_VP_HG_RATE_SHIFT)
+
+/* CS35L33_HG_HEAD */
+#define CS35L33_HD_RM_SHIFT 0
+#define CS35L33_HD_RM_MASK (0x7F << CS35L33_HD_RM_SHIFT)
+
+/* CS35L33_HG_EN */
+#define CS35L33_CLASS_HG_ENA_SHIFT 7
+#define CS35L33_CLASS_HG_EN_MASK (0x1 << CS35L33_CLASS_HG_ENA_SHIFT)
+#define CS35L33_VP_HG_AUTO_SHIFT 6
+#define CS35L33_VP_HG_AUTO_MASK (0x1 << 6)
+#define CS35L33_VP_HG_SHIFT 0
+#define CS35L33_VP_HG_MASK (0x1F << CS35L33_VP_HG_SHIFT)
+
+#define CS35L33_RATES (SNDRV_PCM_RATE_8000_48000)
+#define CS35L33_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* CS35L33_{RX,TX}_X */
+#define CS35L33_X_STATE_SHIFT 7
+#define CS35L33_X_STATE (1 << CS35L33_X_STATE_SHIFT)
+#define CS35L33_X_LOC_SHIFT 0
+#define CS35L33_X_LOC (0x1F << CS35L33_X_LOC_SHIFT)
+
+/* CS35L33_RX_AUD */
+#define CS35L33_AUDIN_RX_DEPTH_SHIFT 5
+#define CS35L33_AUDIN_RX_DEPTH (0x7 << CS35L33_AUDIN_RX_DEPTH_SHIFT)
+
+#endif
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 5ec5a682d186..954a4f5d3338 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -359,6 +359,11 @@ SND_SOC_DAPM_INPUT("IN2R"),
SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DSP Voice Trigger"),
+
+SND_SOC_DAPM_SWITCH("DSP3 Voice Trigger", SND_SOC_NOPM, 2, 0,
+ &arizona_voice_trigger_switch[2]),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, arizona_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -899,10 +904,16 @@ static const struct snd_soc_dapm_route cs47l24_dapm_routes[] = {
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
+ { "DRC2 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
{ "DRC2 Signal Activity", NULL, "DRC2L" },
{ "DRC2 Signal Activity", NULL, "DRC2R" },
+
+ { "DSP Voice Trigger", NULL, "SYSCLK" },
+ { "DSP Voice Trigger", NULL, "DSP3 Voice Trigger" },
+ { "DSP3 Voice Trigger", "Switch", "DSP3" },
};
static int cs47l24_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -1067,6 +1078,7 @@ static irqreturn_t cs47l24_adsp2_irq(int irq, void *data)
{
struct cs47l24_priv *priv = data;
struct arizona *arizona = priv->core.arizona;
+ struct arizona_voice_trigger_info info;
int serviced = 0;
int i, ret;
@@ -1074,6 +1086,12 @@ static irqreturn_t cs47l24_adsp2_irq(int irq, void *data)
ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
if (ret != -ENODEV)
serviced++;
+ if (ret == WM_ADSP_COMPR_VOICE_TRIGGER) {
+ info.core = i;
+ arizona_call_notifiers(arizona,
+ ARIZONA_NOTIFY_VOICE_TRIGGER,
+ &info);
+ }
}
if (!serviced) {
@@ -1096,6 +1114,7 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
arizona_init_spk(codec);
arizona_init_gpio(codec);
arizona_init_mono(codec);
+ arizona_init_notifiers(codec);
ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
"ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
new file mode 100644
index 000000000000..2c0d9c430a8c
--- /dev/null
+++ b/sound/soc/codecs/cs53l30.c
@@ -0,0 +1,1143 @@
+/*
+ * cs53l30.c -- CS53l30 ALSA Soc Audio driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Authors: Paul Handrigan <Paul.Handrigan@cirrus.com>,
+ * Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "cs53l30.h"
+
+#define CS53L30_NUM_SUPPLIES 2
+static const char *const cs53l30_supply_names[CS53L30_NUM_SUPPLIES] = {
+ "VA",
+ "VP",
+};
+
+struct cs53l30_private {
+ struct regulator_bulk_data supplies[CS53L30_NUM_SUPPLIES];
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *mute_gpio;
+ struct clk *mclk;
+ bool use_sdout2;
+ u32 mclk_rate;
+};
+
+static const struct reg_default cs53l30_reg_defaults[] = {
+ { CS53L30_PWRCTL, CS53L30_PWRCTL_DEFAULT },
+ { CS53L30_MCLKCTL, CS53L30_MCLKCTL_DEFAULT },
+ { CS53L30_INT_SR_CTL, CS53L30_INT_SR_CTL_DEFAULT },
+ { CS53L30_MICBIAS_CTL, CS53L30_MICBIAS_CTL_DEFAULT },
+ { CS53L30_ASPCFG_CTL, CS53L30_ASPCFG_CTL_DEFAULT },
+ { CS53L30_ASP_CTL1, CS53L30_ASP_CTL1_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL1, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL2, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL3, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_CTL4, CS53L30_ASP_TDMTX_CTLx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN1, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN2, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN3, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN4, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN5, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_TDMTX_EN6, CS53L30_ASP_TDMTX_ENx_DEFAULT },
+ { CS53L30_ASP_CTL2, CS53L30_ASP_CTL2_DEFAULT },
+ { CS53L30_SFT_RAMP, CS53L30_SFT_RMP_DEFAULT },
+ { CS53L30_LRCK_CTL1, CS53L30_LRCK_CTLx_DEFAULT },
+ { CS53L30_LRCK_CTL2, CS53L30_LRCK_CTLx_DEFAULT },
+ { CS53L30_MUTEP_CTL1, CS53L30_MUTEP_CTL1_DEFAULT },
+ { CS53L30_MUTEP_CTL2, CS53L30_MUTEP_CTL2_DEFAULT },
+ { CS53L30_INBIAS_CTL1, CS53L30_INBIAS_CTL1_DEFAULT },
+ { CS53L30_INBIAS_CTL2, CS53L30_INBIAS_CTL2_DEFAULT },
+ { CS53L30_DMIC1_STR_CTL, CS53L30_DMIC1_STR_CTL_DEFAULT },
+ { CS53L30_DMIC2_STR_CTL, CS53L30_DMIC2_STR_CTL_DEFAULT },
+ { CS53L30_ADCDMIC1_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
+ { CS53L30_ADCDMIC1_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
+ { CS53L30_ADC1_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
+ { CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
+ { CS53L30_ADC1A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC1A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADC1B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADCDMIC2_CTL1, CS53L30_ADCDMICx_CTL1_DEFAULT },
+ { CS53L30_ADCDMIC2_CTL2, CS53L30_ADCDMIC1_CTL2_DEFAULT },
+ { CS53L30_ADC2_CTL3, CS53L30_ADCx_CTL3_DEFAULT },
+ { CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_CTL_DEFAULT },
+ { CS53L30_ADC2A_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_AFE_CTL_DEFAULT },
+ { CS53L30_ADC2A_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_ADC2B_DIG_VOL, CS53L30_ADCxy_DIG_VOL_DEFAULT },
+ { CS53L30_INT_MASK, CS53L30_DEVICE_INT_MASK },
+};
+
+static bool cs53l30_volatile_register(struct device *dev, unsigned int reg)
+{
+ if (reg == CS53L30_IS)
+ return true;
+ else
+ return false;
+}
+
+static bool cs53l30_writeable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS53L30_DEVID_AB:
+ case CS53L30_DEVID_CD:
+ case CS53L30_DEVID_E:
+ case CS53L30_REVID:
+ case CS53L30_IS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool cs53l30_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS53L30_DEVID_AB:
+ case CS53L30_DEVID_CD:
+ case CS53L30_DEVID_E:
+ case CS53L30_REVID:
+ case CS53L30_PWRCTL:
+ case CS53L30_MCLKCTL:
+ case CS53L30_INT_SR_CTL:
+ case CS53L30_MICBIAS_CTL:
+ case CS53L30_ASPCFG_CTL:
+ case CS53L30_ASP_CTL1:
+ case CS53L30_ASP_TDMTX_CTL1:
+ case CS53L30_ASP_TDMTX_CTL2:
+ case CS53L30_ASP_TDMTX_CTL3:
+ case CS53L30_ASP_TDMTX_CTL4:
+ case CS53L30_ASP_TDMTX_EN1:
+ case CS53L30_ASP_TDMTX_EN2:
+ case CS53L30_ASP_TDMTX_EN3:
+ case CS53L30_ASP_TDMTX_EN4:
+ case CS53L30_ASP_TDMTX_EN5:
+ case CS53L30_ASP_TDMTX_EN6:
+ case CS53L30_ASP_CTL2:
+ case CS53L30_SFT_RAMP:
+ case CS53L30_LRCK_CTL1:
+ case CS53L30_LRCK_CTL2:
+ case CS53L30_MUTEP_CTL1:
+ case CS53L30_MUTEP_CTL2:
+ case CS53L30_INBIAS_CTL1:
+ case CS53L30_INBIAS_CTL2:
+ case CS53L30_DMIC1_STR_CTL:
+ case CS53L30_DMIC2_STR_CTL:
+ case CS53L30_ADCDMIC1_CTL1:
+ case CS53L30_ADCDMIC1_CTL2:
+ case CS53L30_ADC1_CTL3:
+ case CS53L30_ADC1_NG_CTL:
+ case CS53L30_ADC1A_AFE_CTL:
+ case CS53L30_ADC1B_AFE_CTL:
+ case CS53L30_ADC1A_DIG_VOL:
+ case CS53L30_ADC1B_DIG_VOL:
+ case CS53L30_ADCDMIC2_CTL1:
+ case CS53L30_ADCDMIC2_CTL2:
+ case CS53L30_ADC2_CTL3:
+ case CS53L30_ADC2_NG_CTL:
+ case CS53L30_ADC2A_AFE_CTL:
+ case CS53L30_ADC2B_AFE_CTL:
+ case CS53L30_ADC2A_DIG_VOL:
+ case CS53L30_ADC2B_DIG_VOL:
+ case CS53L30_INT_MASK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2000, 0);
+static DECLARE_TLV_DB_SCALE(adc_ng_boost_tlv, 0, 3000, 0);
+static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
+static DECLARE_TLV_DB_SCALE(dig_tlv, -9600, 100, 1);
+static DECLARE_TLV_DB_SCALE(pga_preamp_tlv, 0, 10000, 0);
+
+static const char * const input1_sel_text[] = {
+ "DMIC1 On AB In",
+ "DMIC1 On A In",
+ "DMIC1 On B In",
+ "ADC1 On AB In",
+ "ADC1 On A In",
+ "ADC1 On B In",
+ "DMIC1 Off ADC1 Off",
+};
+
+static unsigned int const input1_sel_values[] = {
+ CS53L30_CH_TYPE,
+ CS53L30_ADCxB_PDN | CS53L30_CH_TYPE,
+ CS53L30_ADCxA_PDN | CS53L30_CH_TYPE,
+ CS53L30_DMICx_PDN,
+ CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+};
+
+static const char * const input2_sel_text[] = {
+ "DMIC2 On AB In",
+ "DMIC2 On A In",
+ "DMIC2 On B In",
+ "ADC2 On AB In",
+ "ADC2 On A In",
+ "ADC2 On B In",
+ "DMIC2 Off ADC2 Off",
+};
+
+static unsigned int const input2_sel_values[] = {
+ 0x0,
+ CS53L30_ADCxB_PDN,
+ CS53L30_ADCxA_PDN,
+ CS53L30_DMICx_PDN,
+ CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_DMICx_PDN,
+ CS53L30_ADCxA_PDN | CS53L30_ADCxB_PDN | CS53L30_DMICx_PDN,
+};
+
+static const char * const input1_route_sel_text[] = {
+ "ADC1_SEL", "DMIC1_SEL",
+};
+
+static const struct soc_enum input1_route_sel_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, CS53L30_CH_TYPE_SHIFT,
+ ARRAY_SIZE(input1_route_sel_text),
+ input1_route_sel_text);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(input1_sel_enum, CS53L30_ADCDMIC1_CTL1, 0,
+ CS53L30_ADCDMICx_PDN_MASK, input1_sel_text,
+ input1_sel_values);
+
+static const struct snd_kcontrol_new input1_route_sel_mux =
+ SOC_DAPM_ENUM("Input 1 Route", input1_route_sel_enum);
+
+static const char * const input2_route_sel_text[] = {
+ "ADC2_SEL", "DMIC2_SEL",
+};
+
+/* Note: CS53L30_ADCDMIC1_CTL1 CH_TYPE controls inputs 1 and 2 */
+static const struct soc_enum input2_route_sel_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADCDMIC1_CTL1, 0,
+ ARRAY_SIZE(input2_route_sel_text),
+ input2_route_sel_text);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(input2_sel_enum, CS53L30_ADCDMIC2_CTL1, 0,
+ CS53L30_ADCDMICx_PDN_MASK, input2_sel_text,
+ input2_sel_values);
+
+static const struct snd_kcontrol_new input2_route_sel_mux =
+ SOC_DAPM_ENUM("Input 2 Route", input2_route_sel_enum);
+
+/*
+ * TB = 6144*(MCLK(int) scaling factor)/MCLK(internal)
+ * TB - Time base
+ * NOTE: If MCLK_INT_SCALE = 0, then TB=1
+ */
+static const char * const cs53l30_ng_delay_text[] = {
+ "TB*50ms", "TB*100ms", "TB*150ms", "TB*200ms",
+};
+
+static const struct soc_enum adc1_ng_delay_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_delay_text),
+ cs53l30_ng_delay_text);
+
+static const struct soc_enum adc2_ng_delay_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_DELAY_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_delay_text),
+ cs53l30_ng_delay_text);
+
+/* The noise gate threshold selected will depend on NG Boost */
+static const char * const cs53l30_ng_thres_text[] = {
+ "-64dB/-34dB", "-66dB/-36dB", "-70dB/-40dB", "-73dB/-43dB",
+ "-76dB/-46dB", "-82dB/-52dB", "-58dB", "-64dB",
+};
+
+static const struct soc_enum adc1_ng_thres_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_thres_text),
+ cs53l30_ng_thres_text);
+
+static const struct soc_enum adc2_ng_thres_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_NG_CTL, CS53L30_ADCx_NG_THRESH_SHIFT,
+ ARRAY_SIZE(cs53l30_ng_thres_text),
+ cs53l30_ng_thres_text);
+
+/* Corner frequencies are with an Fs of 48kHz. */
+static const char * const hpf_corner_freq_text[] = {
+ "1.86Hz", "120Hz", "235Hz", "466Hz",
+};
+
+static const struct soc_enum adc1_hpf_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC1_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
+ ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
+
+static const struct soc_enum adc2_hpf_enum =
+ SOC_ENUM_SINGLE(CS53L30_ADC2_CTL3, CS53L30_ADCx_HPF_CF_SHIFT,
+ ARRAY_SIZE(hpf_corner_freq_text), hpf_corner_freq_text);
+
+static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
+ SOC_SINGLE("Digital Soft-Ramp Switch", CS53L30_SFT_RAMP,
+ CS53L30_DIGSFT_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1 Noise Gate Ganging Switch", CS53L30_ADC1_CTL3,
+ CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2 Noise Gate Ganging Switch", CS53L30_ADC2_CTL3,
+ CS53L30_ADCx_NG_ALL_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1A Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCxA_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1B Noise Gate Enable Switch", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCxB_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2A Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCxA_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2B Noise Gate Enable Switch", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCxB_NG_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1 Notch Filter Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
+ SOC_SINGLE("ADC2 Notch Filter Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCx_NOTCH_DIS_SHIFT, 1, 1),
+ SOC_SINGLE("ADC1A Invert Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxA_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC1B Invert Switch", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxB_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2A Invert Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxA_INV_SHIFT, 1, 0),
+ SOC_SINGLE("ADC2B Invert Switch", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxB_INV_SHIFT, 1, 0),
+
+ SOC_SINGLE_TLV("ADC1A Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC1B Digital Boost Volume", CS53L30_ADCDMIC1_CTL2,
+ CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC2A Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxA_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC2B Digital Boost Volume", CS53L30_ADCDMIC2_CTL2,
+ CS53L30_ADCxB_DIG_BOOST_SHIFT, 1, 0, adc_boost_tlv),
+ SOC_SINGLE_TLV("ADC1 NG Boost Volume", CS53L30_ADC1_NG_CTL,
+ CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
+ SOC_SINGLE_TLV("ADC2 NG Boost Volume", CS53L30_ADC2_NG_CTL,
+ CS53L30_ADCx_NG_BOOST_SHIFT, 1, 0, adc_ng_boost_tlv),
+
+ SOC_DOUBLE_R_TLV("ADC1 Preamplifier Volume", CS53L30_ADC1A_AFE_CTL,
+ CS53L30_ADC1B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
+ 2, 0, pga_preamp_tlv),
+ SOC_DOUBLE_R_TLV("ADC2 Preamplifier Volume", CS53L30_ADC2A_AFE_CTL,
+ CS53L30_ADC2B_AFE_CTL, CS53L30_ADCxy_PREAMP_SHIFT,
+ 2, 0, pga_preamp_tlv),
+
+ SOC_ENUM("Input 1 Channel Select", input1_sel_enum),
+ SOC_ENUM("Input 2 Channel Select", input2_sel_enum),
+
+ SOC_ENUM("ADC1 HPF Select", adc1_hpf_enum),
+ SOC_ENUM("ADC2 HPF Select", adc2_hpf_enum),
+ SOC_ENUM("ADC1 NG Threshold", adc1_ng_thres_enum),
+ SOC_ENUM("ADC2 NG Threshold", adc2_ng_thres_enum),
+ SOC_ENUM("ADC1 NG Delay", adc1_ng_delay_enum),
+ SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
+
+ SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
+ CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
+ CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
+ CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+ SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
+ CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
+
+ SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
+ CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
+ CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
+ CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+ SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
+ CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
+};
+
+static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("IN1_DMIC1"),
+ SND_SOC_DAPM_INPUT("IN2"),
+ SND_SOC_DAPM_INPUT("IN3_DMIC2"),
+ SND_SOC_DAPM_INPUT("IN4"),
+ SND_SOC_DAPM_SUPPLY("MIC1 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC1_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC2 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC2_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC3 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC3_BIAS_PDN_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MIC4 Bias", CS53L30_MICBIAS_CTL,
+ CS53L30_MIC4_BIAS_PDN_SHIFT, 1, NULL, 0),
+
+ SND_SOC_DAPM_AIF_OUT("ASP_SDOUT1", NULL, 0, CS53L30_ASP_CTL1,
+ CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
+ SND_SOC_DAPM_AIF_OUT("ASP_SDOUT2", NULL, 0, CS53L30_ASP_CTL2,
+ CS53L30_ASP_SDOUTx_PDN_SHIFT, 1),
+
+ SND_SOC_DAPM_MUX("Input Mux 1", SND_SOC_NOPM, 0, 0,
+ &input1_route_sel_mux),
+ SND_SOC_DAPM_MUX("Input Mux 2", SND_SOC_NOPM, 0, 0,
+ &input2_route_sel_mux),
+
+ SND_SOC_DAPM_ADC("ADC1A", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_ADCxA_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC1B", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_ADCxB_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC2A", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_ADCxA_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("ADC2B", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_ADCxB_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("DMIC1", NULL, CS53L30_ADCDMIC1_CTL1,
+ CS53L30_DMICx_PDN_SHIFT, 1),
+ SND_SOC_DAPM_ADC("DMIC2", NULL, CS53L30_ADCDMIC2_CTL1,
+ CS53L30_DMICx_PDN_SHIFT, 1),
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes[] = {
+ /* ADC Input Paths */
+ {"ADC1A", NULL, "IN1_DMIC1"},
+ {"Input Mux 1", "ADC1_SEL", "ADC1A"},
+ {"ADC1B", NULL, "IN2"},
+
+ {"ADC2A", NULL, "IN3_DMIC2"},
+ {"Input Mux 2", "ADC2_SEL", "ADC2A"},
+ {"ADC2B", NULL, "IN4"},
+
+ /* MIC Bias Paths */
+ {"ADC1A", NULL, "MIC1 Bias"},
+ {"ADC1B", NULL, "MIC2 Bias"},
+ {"ADC2A", NULL, "MIC3 Bias"},
+ {"ADC2B", NULL, "MIC4 Bias"},
+
+ /* DMIC Paths */
+ {"DMIC1", NULL, "IN1_DMIC1"},
+ {"Input Mux 1", "DMIC1_SEL", "DMIC1"},
+
+ {"DMIC2", NULL, "IN3_DMIC2"},
+ {"Input Mux 2", "DMIC2_SEL", "DMIC2"},
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout1[] = {
+ /* Output Paths when using SDOUT1 only */
+ {"ASP_SDOUT1", NULL, "ADC1A" },
+ {"ASP_SDOUT1", NULL, "Input Mux 1"},
+ {"ASP_SDOUT1", NULL, "ADC1B"},
+
+ {"ASP_SDOUT1", NULL, "ADC2A"},
+ {"ASP_SDOUT1", NULL, "Input Mux 2"},
+ {"ASP_SDOUT1", NULL, "ADC2B"},
+
+ {"Capture", NULL, "ASP_SDOUT1"},
+};
+
+static const struct snd_soc_dapm_route cs53l30_dapm_routes_sdout2[] = {
+ /* Output Paths when using both SDOUT1 and SDOUT2 */
+ {"ASP_SDOUT1", NULL, "ADC1A" },
+ {"ASP_SDOUT1", NULL, "Input Mux 1"},
+ {"ASP_SDOUT1", NULL, "ADC1B"},
+
+ {"ASP_SDOUT2", NULL, "ADC2A"},
+ {"ASP_SDOUT2", NULL, "Input Mux 2"},
+ {"ASP_SDOUT2", NULL, "ADC2B"},
+
+ {"Capture", NULL, "ASP_SDOUT1"},
+ {"Capture", NULL, "ASP_SDOUT2"},
+};
+
+struct cs53l30_mclk_div {
+ u32 mclk_rate;
+ u32 srate;
+ u8 asp_rate;
+ u8 internal_fs_ratio;
+ u8 mclk_int_scale;
+};
+
+static struct cs53l30_mclk_div cs53l30_mclk_coeffs[] = {
+ /* NOTE: Enable MCLK_INT_SCALE to save power. */
+
+ /* MCLK, Sample Rate, asp_rate, internal_fs_ratio, mclk_int_scale */
+ {5644800, 11025, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {5644800, 22050, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {5644800, 44100, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+
+ {6000000, 8000, 0x1, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 11025, 0x2, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 12000, 0x4, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 16000, 0x5, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 22050, 0x6, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 24000, 0x8, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 32000, 0x9, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 44100, 0xA, 0, CS53L30_MCLK_INT_SCALE},
+ {6000000, 48000, 0xC, 0, CS53L30_MCLK_INT_SCALE},
+
+ {6144000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6144000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+
+ {6400000, 8000, 0x1, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 11025, 0x2, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 12000, 0x4, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 16000, 0x5, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 22050, 0x6, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 24000, 0x8, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 32000, 0x9, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 44100, 0xA, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+ {6400000, 48000, 0xC, CS53L30_INTRNL_FS_RATIO, CS53L30_MCLK_INT_SCALE},
+};
+
+struct cs53l30_mclkx_div {
+ u32 mclkx;
+ u8 ratio;
+ u8 mclkdiv;
+};
+
+static struct cs53l30_mclkx_div cs53l30_mclkx_coeffs[] = {
+ {5644800, 1, CS53L30_MCLK_DIV_BY_1},
+ {6000000, 1, CS53L30_MCLK_DIV_BY_1},
+ {6144000, 1, CS53L30_MCLK_DIV_BY_1},
+ {11289600, 2, CS53L30_MCLK_DIV_BY_2},
+ {12288000, 2, CS53L30_MCLK_DIV_BY_2},
+ {12000000, 2, CS53L30_MCLK_DIV_BY_2},
+ {19200000, 3, CS53L30_MCLK_DIV_BY_3},
+};
+
+static int cs53l30_get_mclkx_coeff(int mclkx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30_mclkx_coeffs); i++) {
+ if (cs53l30_mclkx_coeffs[i].mclkx == mclkx)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cs53l30_get_mclk_coeff(int mclk_rate, int srate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30_mclk_coeffs); i++) {
+ if (cs53l30_mclk_coeffs[i].mclk_rate == mclk_rate &&
+ cs53l30_mclk_coeffs[i].srate == srate)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cs53l30_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ int mclkx_coeff;
+ u32 mclk_rate;
+
+ /* MCLKX -> MCLK */
+ mclkx_coeff = cs53l30_get_mclkx_coeff(freq);
+ if (mclkx_coeff < 0)
+ return mclkx_coeff;
+
+ mclk_rate = cs53l30_mclkx_coeffs[mclkx_coeff].mclkx /
+ cs53l30_mclkx_coeffs[mclkx_coeff].ratio;
+
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIV_MASK,
+ cs53l30_mclkx_coeffs[mclkx_coeff].mclkdiv);
+
+ priv->mclk_rate = mclk_rate;
+
+ return 0;
+}
+
+static int cs53l30_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ u8 aspcfg = 0, aspctl1 = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aspcfg |= CS53L30_ASP_MS;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* DAI mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ /* Set TDM_PDN to turn off TDM mode -- Reset default */
+ aspctl1 |= CS53L30_ASP_TDM_PDN;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ /*
+ * Clear TDM_PDN to turn on TDM mode; Use ASP_SCLK_INV = 0
+ * with SHIFT_LEFT = 1 combination as Figure 4-13 shows in
+ * the CS53L30 datasheet
+ */
+ aspctl1 |= CS53L30_SHIFT_LEFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check to see if the SCLK is inverted */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_NF:
+ case SND_SOC_DAIFMT_IB_IF:
+ aspcfg ^= CS53L30_ASP_SCLK_INV;
+ break;
+ default:
+ break;
+ }
+
+ regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
+ CS53L30_ASP_MS | CS53L30_ASP_SCLK_INV, aspcfg);
+
+ regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
+ CS53L30_ASP_TDM_PDN | CS53L30_SHIFT_LEFT, aspctl1);
+
+ return 0;
+}
+
+static int cs53l30_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ int srate = params_rate(params);
+ int mclk_coeff;
+
+ /* MCLK -> srate */
+ mclk_coeff = cs53l30_get_mclk_coeff(priv->mclk_rate, srate);
+ if (mclk_coeff < 0)
+ return -EINVAL;
+
+ regmap_update_bits(priv->regmap, CS53L30_INT_SR_CTL,
+ CS53L30_INTRNL_FS_RATIO_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].internal_fs_ratio);
+
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_INT_SCALE_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].mclk_int_scale);
+
+ regmap_update_bits(priv->regmap, CS53L30_ASPCFG_CTL,
+ CS53L30_ASP_RATE_MASK,
+ cs53l30_mclk_coeffs[mclk_coeff].asp_rate);
+
+ return 0;
+}
+
+static int cs53l30_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int reg;
+ int i, inter_max_check, ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY)
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_LP_MASK, 0);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (dapm->bias_level == SND_SOC_BIAS_OFF) {
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret) {
+ dev_err(codec->dev,
+ "failed to enable MCLK: %d\n", ret);
+ return ret;
+ }
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIS_MASK, 0);
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK, 0);
+ msleep(50);
+ } else {
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK,
+ CS53L30_PDN_ULP);
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
+ CS53L30_PDN_DONE, 0);
+ /*
+ * If digital softramp is set, the amount of time required
+ * for power down increases and depends on the digital
+ * volume setting.
+ */
+
+ /* Set the max possible time if digsft is set */
+ regmap_read(priv->regmap, CS53L30_SFT_RAMP, &reg);
+ if (reg & CS53L30_DIGSFT_MASK)
+ inter_max_check = CS53L30_PDN_POLL_MAX;
+ else
+ inter_max_check = 10;
+
+ regmap_update_bits(priv->regmap, CS53L30_PWRCTL,
+ CS53L30_PDN_ULP_MASK,
+ CS53L30_PDN_ULP);
+ /* PDN_DONE will take a min of 20ms to be set.*/
+ msleep(20);
+ /* Clr status */
+ regmap_read(priv->regmap, CS53L30_IS, &reg);
+ for (i = 0; i < inter_max_check; i++) {
+ if (inter_max_check < 10) {
+ usleep_range(1000, 1100);
+ regmap_read(priv->regmap, CS53L30_IS, &reg);
+ if (reg & CS53L30_PDN_DONE)
+ break;
+ } else {
+ usleep_range(10000, 10100);
+ regmap_read(priv->regmap, CS53L30_IS, &reg);
+ if (reg & CS53L30_PDN_DONE)
+ break;
+ }
+ }
+ /* PDN_DONE is set. We now can disable the MCLK */
+ regmap_update_bits(priv->regmap, CS53L30_INT_MASK,
+ CS53L30_PDN_DONE, CS53L30_PDN_DONE);
+ regmap_update_bits(priv->regmap, CS53L30_MCLKCTL,
+ CS53L30_MCLK_DIS_MASK,
+ CS53L30_MCLK_DIS);
+ clk_disable_unprepare(priv->mclk);
+ break;
+ }
+
+ return 0;
+}
+
+static int cs53l30_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ u8 val = tristate ? CS53L30_ASP_3ST : 0;
+
+ return regmap_update_bits(priv->regmap, CS53L30_ASP_CTL1,
+ CS53L30_ASP_3ST_MASK, val);
+}
+
+static unsigned int const cs53l30_src_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list src_constraints = {
+ .count = ARRAY_SIZE(cs53l30_src_rates),
+ .list = cs53l30_src_rates,
+};
+
+static int cs53l30_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &src_constraints);
+
+ return 0;
+}
+
+/*
+ * Note: CS53L30 counts the slot number per byte while ASoC counts the slot
+ * number per slot_width. So there is a difference between the slots of ASoC
+ * and the slots of CS53L30.
+ */
+static int cs53l30_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+ unsigned int loc[CS53L30_TDM_SLOT_MAX] = {48, 48, 48, 48};
+ unsigned int slot_next, slot_step;
+ u64 tx_enable = 0;
+ int i;
+
+ if (!rx_mask) {
+ dev_err(dai->dev, "rx masks must not be 0\n");
+ return -EINVAL;
+ }
+
+ /* Assuming slot_width is not supposed to be greater than 64 */
+ if (slots <= 0 || slot_width <= 0 || slot_width > 64) {
+ dev_err(dai->dev, "invalid slot number or slot width\n");
+ return -EINVAL;
+ }
+
+ if (slot_width & 0x7) {
+ dev_err(dai->dev, "slot width must count in byte\n");
+ return -EINVAL;
+ }
+
+ /* How many bytes in each ASoC slot */
+ slot_step = slot_width >> 3;
+
+ for (i = 0; rx_mask && i < CS53L30_TDM_SLOT_MAX; i++) {
+ /* Find the first slot from LSB */
+ slot_next = __ffs(rx_mask);
+ /* Save the slot location by converting to CS53L30 slot */
+ loc[i] = slot_next * slot_step;
+ /* Create the mask of CS53L30 slot */
+ tx_enable |= (u64)((u64)(1 << slot_step) - 1) << (u64)loc[i];
+ /* Clear this slot from rx_mask */
+ rx_mask &= ~(1 << slot_next);
+ }
+
+ /* Error out to avoid slot shift */
+ if (rx_mask && i == CS53L30_TDM_SLOT_MAX) {
+ dev_err(dai->dev, "rx_mask exceeds max slot number: %d\n",
+ CS53L30_TDM_SLOT_MAX);
+ return -EINVAL;
+ }
+
+ /* Validate the last active CS53L30 slot */
+ slot_next = loc[i - 1] + slot_step - 1;
+ if (slot_next > 47) {
+ dev_err(dai->dev, "slot selection out of bounds: %u\n",
+ slot_next);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CS53L30_TDM_SLOT_MAX && loc[i] != 48; i++) {
+ regmap_update_bits(priv->regmap, CS53L30_ASP_TDMTX_CTL(i),
+ CS53L30_ASP_CHx_TX_LOC_MASK, loc[i]);
+ dev_dbg(dai->dev, "loc[%d]=%x\n", i, loc[i]);
+ }
+
+ for (i = 0; i < CS53L30_ASP_TDMTX_ENx_MAX && tx_enable; i++) {
+ regmap_write(priv->regmap, CS53L30_ASP_TDMTX_ENx(i),
+ tx_enable & 0xff);
+ tx_enable >>= 8;
+ dev_dbg(dai->dev, "en_reg=%x, tx_enable=%llx\n",
+ CS53L30_ASP_TDMTX_ENx(i), tx_enable & 0xff);
+ }
+
+ return 0;
+}
+
+static int cs53l30_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(dai->codec);
+
+ if (priv->mute_gpio)
+ gpiod_set_value_cansleep(priv->mute_gpio, mute);
+
+ return 0;
+}
+
+/* SNDRV_PCM_RATE_KNOT -> 12000, 24000 Hz, limit with constraint list */
+#define CS53L30_RATES (SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT)
+
+#define CS53L30_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops cs53l30_ops = {
+ .startup = cs53l30_pcm_startup,
+ .hw_params = cs53l30_pcm_hw_params,
+ .set_fmt = cs53l30_set_dai_fmt,
+ .set_sysclk = cs53l30_set_sysclk,
+ .set_tristate = cs53l30_set_tristate,
+ .set_tdm_slot = cs53l30_set_dai_tdm_slot,
+ .mute_stream = cs53l30_mute_stream,
+};
+
+static struct snd_soc_dai_driver cs53l30_dai = {
+ .name = "cs53l30",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = CS53L30_RATES,
+ .formats = CS53L30_FORMATS,
+ },
+ .ops = &cs53l30_ops,
+ .symmetric_rates = 1,
+};
+
+static int cs53l30_codec_probe(struct snd_soc_codec *codec)
+{
+ struct cs53l30_private *priv = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ if (priv->use_sdout2)
+ snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout2,
+ ARRAY_SIZE(cs53l30_dapm_routes_sdout2));
+ else
+ snd_soc_dapm_add_routes(dapm, cs53l30_dapm_routes_sdout1,
+ ARRAY_SIZE(cs53l30_dapm_routes_sdout1));
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver cs53l30_driver = {
+ .probe = cs53l30_codec_probe,
+ .set_bias_level = cs53l30_set_bias_level,
+ .idle_bias_off = true,
+
+ .dapm_widgets = cs53l30_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs53l30_dapm_widgets),
+ .dapm_routes = cs53l30_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs53l30_dapm_routes),
+
+ .controls = cs53l30_snd_controls,
+ .num_controls = ARRAY_SIZE(cs53l30_snd_controls),
+};
+
+static struct regmap_config cs53l30_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CS53L30_MAX_REGISTER,
+ .reg_defaults = cs53l30_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(cs53l30_reg_defaults),
+ .volatile_reg = cs53l30_volatile_register,
+ .writeable_reg = cs53l30_writeable_register,
+ .readable_reg = cs53l30_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int cs53l30_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct device_node *np = client->dev.of_node;
+ struct device *dev = &client->dev;
+ struct cs53l30_private *cs53l30;
+ unsigned int devid = 0;
+ unsigned int reg;
+ int ret = 0, i;
+ u8 val;
+
+ cs53l30 = devm_kzalloc(dev, sizeof(*cs53l30), GFP_KERNEL);
+ if (!cs53l30)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(cs53l30->supplies); i++)
+ cs53l30->supplies[i].supply = cs53l30_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to get supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset the Device */
+ cs53l30->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cs53l30->reset_gpio)) {
+ ret = PTR_ERR(cs53l30->reset_gpio);
+ goto error;
+ }
+
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
+
+ i2c_set_clientdata(client, cs53l30);
+
+ cs53l30->mclk_rate = 0;
+
+ cs53l30->regmap = devm_regmap_init_i2c(client, &cs53l30_regmap);
+ if (IS_ERR(cs53l30->regmap)) {
+ ret = PTR_ERR(cs53l30->regmap);
+ dev_err(dev, "regmap_init() failed: %d\n", ret);
+ goto error;
+ }
+
+ /* Initialize codec */
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_AB, &reg);
+ devid = reg << 12;
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_CD, &reg);
+ devid |= reg << 4;
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_DEVID_E, &reg);
+ devid |= (reg & 0xF0) >> 4;
+
+ if (devid != CS53L30_DEVID) {
+ ret = -ENODEV;
+ dev_err(dev, "Device ID (%X). Expected %X\n",
+ devid, CS53L30_DEVID);
+ goto error;
+ }
+
+ ret = regmap_read(cs53l30->regmap, CS53L30_REVID, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to get Revision ID: %d\n", ret);
+ goto error;
+ }
+
+ /* Check if MCLK provided */
+ cs53l30->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(cs53l30->mclk)) {
+ if (PTR_ERR(cs53l30->mclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+ /* Otherwise mark the mclk pointer to NULL */
+ cs53l30->mclk = NULL;
+ }
+
+ /* Fetch the MUTE control */
+ cs53l30->mute_gpio = devm_gpiod_get_optional(dev, "mute",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(cs53l30->mute_gpio)) {
+ ret = PTR_ERR(cs53l30->mute_gpio);
+ goto error;
+ }
+
+ if (cs53l30->mute_gpio) {
+ /* Enable MUTE controls via MUTE pin */
+ regmap_write(cs53l30->regmap, CS53L30_MUTEP_CTL1,
+ CS53L30_MUTEP_CTL1_MUTEALL);
+ /* Flip the polarity of MUTE pin */
+ if (gpiod_is_active_low(cs53l30->mute_gpio))
+ regmap_update_bits(cs53l30->regmap, CS53L30_MUTEP_CTL2,
+ CS53L30_MUTE_PIN_POLARITY, 0);
+ }
+
+ if (!of_property_read_u8(np, "cirrus,micbias-lvl", &val))
+ regmap_update_bits(cs53l30->regmap, CS53L30_MICBIAS_CTL,
+ CS53L30_MIC_BIAS_CTRL_MASK, val);
+
+ if (of_property_read_bool(np, "cirrus,use-sdout2"))
+ cs53l30->use_sdout2 = true;
+
+ dev_info(dev, "Cirrus Logic CS53L30, Revision: %02X\n", reg & 0xFF);
+
+ ret = snd_soc_register_codec(dev, &cs53l30_driver, &cs53l30_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register codec: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ return ret;
+}
+
+static int cs53l30_i2c_remove(struct i2c_client *client)
+{
+ struct cs53l30_private *cs53l30 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_codec(&client->dev);
+
+ /* Hold down reset */
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs53l30_runtime_suspend(struct device *dev)
+{
+ struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
+
+ regcache_cache_only(cs53l30->regmap, true);
+
+ /* Hold down reset */
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+
+ return 0;
+}
+
+static int cs53l30_runtime_resume(struct device *dev)
+{
+ struct cs53l30_private *cs53l30 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(cs53l30->supplies),
+ cs53l30->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (cs53l30->reset_gpio)
+ gpiod_set_value_cansleep(cs53l30->reset_gpio, 1);
+
+ regcache_cache_only(cs53l30->regmap, false);
+ ret = regcache_sync(cs53l30->regmap);
+ if (ret) {
+ dev_err(dev, "failed to synchronize regcache: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs53l30_runtime_pm = {
+ SET_RUNTIME_PM_OPS(cs53l30_runtime_suspend, cs53l30_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id cs53l30_of_match[] = {
+ { .compatible = "cirrus,cs53l30", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, cs53l30_of_match);
+
+static const struct i2c_device_id cs53l30_id[] = {
+ { "cs53l30", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs53l30_id);
+
+static struct i2c_driver cs53l30_i2c_driver = {
+ .driver = {
+ .name = "cs53l30",
+ .pm = &cs53l30_runtime_pm,
+ },
+ .id_table = cs53l30_id,
+ .probe = cs53l30_i2c_probe,
+ .remove = cs53l30_i2c_remove,
+};
+
+module_i2c_driver(cs53l30_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS53L30 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <Paul.Handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs53l30.h b/sound/soc/codecs/cs53l30.h
new file mode 100644
index 000000000000..5e39da568749
--- /dev/null
+++ b/sound/soc/codecs/cs53l30.h
@@ -0,0 +1,459 @@
+/*
+ * ALSA SoC CS53L30 codec driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>,
+ * Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS53L30_H__
+#define __CS53L30_H__
+
+/* I2C Registers */
+#define CS53L30_DEVID_AB 0x01 /* Device ID A & B [RO]. */
+#define CS53L30_DEVID_CD 0x02 /* Device ID C & D [RO]. */
+#define CS53L30_DEVID_E 0x03 /* Device ID E [RO]. */
+#define CS53L30_REVID 0x05 /* Revision ID [RO]. */
+#define CS53L30_PWRCTL 0x06 /* Power Control. */
+#define CS53L30_MCLKCTL 0x07 /* MCLK Control. */
+#define CS53L30_INT_SR_CTL 0x08 /* Internal Sample Rate Control. */
+#define CS53L30_MICBIAS_CTL 0x0A /* Mic Bias Control. */
+#define CS53L30_ASPCFG_CTL 0x0C /* ASP Config Control. */
+#define CS53L30_ASP_CTL1 0x0D /* ASP1 Control. */
+#define CS53L30_ASP_TDMTX_CTL1 0x0E /* ASP1 TDM TX Control 1 */
+#define CS53L30_ASP_TDMTX_CTL2 0x0F /* ASP1 TDM TX Control 2 */
+#define CS53L30_ASP_TDMTX_CTL3 0x10 /* ASP1 TDM TX Control 3 */
+#define CS53L30_ASP_TDMTX_CTL4 0x11 /* ASP1 TDM TX Control 4 */
+#define CS53L30_ASP_TDMTX_EN1 0x12 /* ASP1 TDM TX Enable 1 */
+#define CS53L30_ASP_TDMTX_EN2 0x13 /* ASP1 TDM TX Enable 2 */
+#define CS53L30_ASP_TDMTX_EN3 0x14 /* ASP1 TDM TX Enable 3 */
+#define CS53L30_ASP_TDMTX_EN4 0x15 /* ASP1 TDM TX Enable 4 */
+#define CS53L30_ASP_TDMTX_EN5 0x16 /* ASP1 TDM TX Enable 5 */
+#define CS53L30_ASP_TDMTX_EN6 0x17 /* ASP1 TDM TX Enable 6 */
+#define CS53L30_ASP_CTL2 0x18 /* ASP2 Control. */
+#define CS53L30_SFT_RAMP 0x1A /* Soft Ramp Control. */
+#define CS53L30_LRCK_CTL1 0x1B /* LRCK Control 1. */
+#define CS53L30_LRCK_CTL2 0x1C /* LRCK Control 2. */
+#define CS53L30_MUTEP_CTL1 0x1F /* Mute Pin Control 1. */
+#define CS53L30_MUTEP_CTL2 0x20 /* Mute Pin Control 2. */
+#define CS53L30_INBIAS_CTL1 0x21 /* Input Bias Control 1. */
+#define CS53L30_INBIAS_CTL2 0x22 /* Input Bias Control 2. */
+#define CS53L30_DMIC1_STR_CTL 0x23 /* DMIC1 Stereo Control. */
+#define CS53L30_DMIC2_STR_CTL 0x24 /* DMIC2 Stereo Control. */
+#define CS53L30_ADCDMIC1_CTL1 0x25 /* ADC1/DMIC1 Control 1. */
+#define CS53L30_ADCDMIC1_CTL2 0x26 /* ADC1/DMIC1 Control 2. */
+#define CS53L30_ADC1_CTL3 0x27 /* ADC1 Control 3. */
+#define CS53L30_ADC1_NG_CTL 0x28 /* ADC1 Noise Gate Control. */
+#define CS53L30_ADC1A_AFE_CTL 0x29 /* ADC1A AFE Control. */
+#define CS53L30_ADC1B_AFE_CTL 0x2A /* ADC1B AFE Control. */
+#define CS53L30_ADC1A_DIG_VOL 0x2B /* ADC1A Digital Volume. */
+#define CS53L30_ADC1B_DIG_VOL 0x2C /* ADC1B Digital Volume. */
+#define CS53L30_ADCDMIC2_CTL1 0x2D /* ADC2/DMIC2 Control 1. */
+#define CS53L30_ADCDMIC2_CTL2 0x2E /* ADC2/DMIC2 Control 2. */
+#define CS53L30_ADC2_CTL3 0x2F /* ADC2 Control 3. */
+#define CS53L30_ADC2_NG_CTL 0x30 /* ADC2 Noise Gate Control. */
+#define CS53L30_ADC2A_AFE_CTL 0x31 /* ADC2A AFE Control. */
+#define CS53L30_ADC2B_AFE_CTL 0x32 /* ADC2B AFE Control. */
+#define CS53L30_ADC2A_DIG_VOL 0x33 /* ADC2A Digital Volume. */
+#define CS53L30_ADC2B_DIG_VOL 0x34 /* ADC2B Digital Volume. */
+#define CS53L30_INT_MASK 0x35 /* Interrupt Mask. */
+#define CS53L30_IS 0x36 /* Interrupt Status. */
+#define CS53L30_MAX_REGISTER 0x36
+
+#define CS53L30_TDM_SLOT_MAX 4
+#define CS53L30_ASP_TDMTX_CTL(x) (CS53L30_ASP_TDMTX_CTL1 + (x))
+/* x : index for registers; n : index for slot; 8 slots per register */
+#define CS53L30_ASP_TDMTX_ENx(x) (CS53L30_ASP_TDMTX_EN6 - (x))
+#define CS53L30_ASP_TDMTX_ENn(n) CS53L30_ASP_TDMTX_ENx((n) >> 3)
+#define CS53L30_ASP_TDMTX_ENx_MAX 6
+
+/* Device ID */
+#define CS53L30_DEVID 0x53A30
+
+/* PDN_DONE Poll Maximum
+ * If soft ramp is set it will take much longer to power down
+ * the system.
+ */
+#define CS53L30_PDN_POLL_MAX 90
+
+/* Bitfield Definitions */
+
+/* R6 (0x06) CS53L30_PWRCTL - Power Control */
+#define CS53L30_PDN_ULP_SHIFT 7
+#define CS53L30_PDN_ULP_MASK (1 << CS53L30_PDN_ULP_SHIFT)
+#define CS53L30_PDN_ULP (1 << CS53L30_PDN_ULP_SHIFT)
+#define CS53L30_PDN_LP_SHIFT 6
+#define CS53L30_PDN_LP_MASK (1 << CS53L30_PDN_LP_SHIFT)
+#define CS53L30_PDN_LP (1 << CS53L30_PDN_LP_SHIFT)
+#define CS53L30_DISCHARGE_FILT_SHIFT 5
+#define CS53L30_DISCHARGE_FILT_MASK (1 << CS53L30_DISCHARGE_FILT_SHIFT)
+#define CS53L30_DISCHARGE_FILT (1 << CS53L30_DISCHARGE_FILT_SHIFT)
+#define CS53L30_THMS_PDN_SHIFT 4
+#define CS53L30_THMS_PDN_MASK (1 << CS53L30_THMS_PDN_SHIFT)
+#define CS53L30_THMS_PDN (1 << CS53L30_THMS_PDN_SHIFT)
+
+#define CS53L30_PWRCTL_DEFAULT (CS53L30_THMS_PDN)
+
+/* R7 (0x07) CS53L30_MCLKCTL - MCLK Control */
+#define CS53L30_MCLK_DIS_SHIFT 7
+#define CS53L30_MCLK_DIS_MASK (1 << CS53L30_MCLK_DIS_SHIFT)
+#define CS53L30_MCLK_DIS (1 << CS53L30_MCLK_DIS_SHIFT)
+#define CS53L30_MCLK_INT_SCALE_SHIFT 6
+#define CS53L30_MCLK_INT_SCALE_MASK (1 << CS53L30_MCLK_INT_SCALE_SHIFT)
+#define CS53L30_MCLK_INT_SCALE (1 << CS53L30_MCLK_INT_SCALE_SHIFT)
+#define CS53L30_DMIC_DRIVE_SHIFT 5
+#define CS53L30_DMIC_DRIVE_MASK (1 << CS53L30_DMIC_DRIVE_SHIFT)
+#define CS53L30_DMIC_DRIVE (1 << CS53L30_DMIC_DRIVE_SHIFT)
+#define CS53L30_MCLK_DIV_SHIFT 2
+#define CS53L30_MCLK_DIV_WIDTH 2
+#define CS53L30_MCLK_DIV_MASK (((1 << CS53L30_MCLK_DIV_WIDTH) - 1) << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_1 (0x0 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_2 (0x1 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_MCLK_DIV_BY_3 (0x2 << CS53L30_MCLK_DIV_SHIFT)
+#define CS53L30_SYNC_EN_SHIFT 1
+#define CS53L30_SYNC_EN_MASK (1 << CS53L30_SYNC_EN_SHIFT)
+#define CS53L30_SYNC_EN (1 << CS53L30_SYNC_EN_SHIFT)
+
+#define CS53L30_MCLKCTL_DEFAULT (CS53L30_MCLK_DIV_BY_2)
+
+/* R8 (0x08) CS53L30_INT_SR_CTL - Internal Sample Rate Control */
+#define CS53L30_INTRNL_FS_RATIO_SHIFT 4
+#define CS53L30_INTRNL_FS_RATIO_MASK (1 << CS53L30_INTRNL_FS_RATIO_SHIFT)
+#define CS53L30_INTRNL_FS_RATIO (1 << CS53L30_INTRNL_FS_RATIO_SHIFT)
+#define CS53L30_MCLK_19MHZ_EN_SHIFT 0
+#define CS53L30_MCLK_19MHZ_EN_MASK (1 << CS53L30_MCLK_19MHZ_EN_SHIFT)
+#define CS53L30_MCLK_19MHZ_EN (1 << CS53L30_MCLK_19MHZ_EN_SHIFT)
+
+/* 0x6 << 1 is reserved bits */
+#define CS53L30_INT_SR_CTL_DEFAULT (CS53L30_INTRNL_FS_RATIO | 0x6 << 1)
+
+/* R10 (0x0A) CS53L30_MICBIAS_CTL - Mic Bias Control */
+#define CS53L30_MIC4_BIAS_PDN_SHIFT 7
+#define CS53L30_MIC4_BIAS_PDN_MASK (1 << CS53L30_MIC4_BIAS_PDN_SHIFT)
+#define CS53L30_MIC4_BIAS_PDN (1 << CS53L30_MIC4_BIAS_PDN_SHIFT)
+#define CS53L30_MIC3_BIAS_PDN_SHIFT 6
+#define CS53L30_MIC3_BIAS_PDN_MASK (1 << CS53L30_MIC3_BIAS_PDN_SHIFT)
+#define CS53L30_MIC3_BIAS_PDN (1 << CS53L30_MIC3_BIAS_PDN_SHIFT)
+#define CS53L30_MIC2_BIAS_PDN_SHIFT 5
+#define CS53L30_MIC2_BIAS_PDN_MASK (1 << CS53L30_MIC2_BIAS_PDN_SHIFT)
+#define CS53L30_MIC2_BIAS_PDN (1 << CS53L30_MIC2_BIAS_PDN_SHIFT)
+#define CS53L30_MIC1_BIAS_PDN_SHIFT 4
+#define CS53L30_MIC1_BIAS_PDN_MASK (1 << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_MIC1_BIAS_PDN (1 << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_MICx_BIAS_PDN (0xf << CS53L30_MIC1_BIAS_PDN_SHIFT)
+#define CS53L30_VP_MIN_SHIFT 2
+#define CS53L30_VP_MIN_MASK (1 << CS53L30_VP_MIN_SHIFT)
+#define CS53L30_VP_MIN (1 << CS53L30_VP_MIN_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_SHIFT 0
+#define CS53L30_MIC_BIAS_CTRL_WIDTH 2
+#define CS53L30_MIC_BIAS_CTRL_MASK (((1 << CS53L30_MIC_BIAS_CTRL_WIDTH) - 1) << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_HIZ (0 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_1V8 (1 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+#define CS53L30_MIC_BIAS_CTRL_2V75 (2 << CS53L30_MIC_BIAS_CTRL_SHIFT)
+
+#define CS53L30_MICBIAS_CTL_DEFAULT (CS53L30_MICx_BIAS_PDN | CS53L30_VP_MIN)
+
+/* R12 (0x0C) CS53L30_ASPCFG_CTL - ASP Configuration Control */
+#define CS53L30_ASP_MS_SHIFT 7
+#define CS53L30_ASP_MS_MASK (1 << CS53L30_ASP_MS_SHIFT)
+#define CS53L30_ASP_MS (1 << CS53L30_ASP_MS_SHIFT)
+#define CS53L30_ASP_SCLK_INV_SHIFT 4
+#define CS53L30_ASP_SCLK_INV_MASK (1 << CS53L30_ASP_SCLK_INV_SHIFT)
+#define CS53L30_ASP_SCLK_INV (1 << CS53L30_ASP_SCLK_INV_SHIFT)
+#define CS53L30_ASP_RATE_SHIFT 0
+#define CS53L30_ASP_RATE_WIDTH 4
+#define CS53L30_ASP_RATE_MASK (((1 << CS53L30_ASP_RATE_WIDTH) - 1) << CS53L30_ASP_RATE_SHIFT)
+#define CS53L30_ASP_RATE_48K (0xc << CS53L30_ASP_RATE_SHIFT)
+
+#define CS53L30_ASPCFG_CTL_DEFAULT (CS53L30_ASP_RATE_48K)
+
+/* R13/R24 (0x0D/0x18) CS53L30_ASP_CTL1 & CS53L30_ASP_CTL2 - ASP Control 1~2 */
+#define CS53L30_ASP_TDM_PDN_SHIFT 7
+#define CS53L30_ASP_TDM_PDN_MASK (1 << CS53L30_ASP_TDM_PDN_SHIFT)
+#define CS53L30_ASP_TDM_PDN (1 << CS53L30_ASP_TDM_PDN_SHIFT)
+#define CS53L30_ASP_SDOUTx_PDN_SHIFT 6
+#define CS53L30_ASP_SDOUTx_PDN_MASK (1 << CS53L30_ASP_SDOUTx_PDN_SHIFT)
+#define CS53L30_ASP_SDOUTx_PDN (1 << CS53L30_ASP_SDOUTx_PDN_SHIFT)
+#define CS53L30_ASP_3ST_SHIFT 5
+#define CS53L30_ASP_3ST_MASK (1 << CS53L30_ASP_3ST_SHIFT)
+#define CS53L30_ASP_3ST (1 << CS53L30_ASP_3ST_SHIFT)
+#define CS53L30_SHIFT_LEFT_SHIFT 4
+#define CS53L30_SHIFT_LEFT_MASK (1 << CS53L30_SHIFT_LEFT_SHIFT)
+#define CS53L30_SHIFT_LEFT (1 << CS53L30_SHIFT_LEFT_SHIFT)
+#define CS53L30_ASP_SDOUTx_DRIVE_SHIFT 0
+#define CS53L30_ASP_SDOUTx_DRIVE_MASK (1 << CS53L30_ASP_SDOUTx_DRIVE_SHIFT)
+#define CS53L30_ASP_SDOUTx_DRIVE (1 << CS53L30_ASP_SDOUTx_DRIVE_SHIFT)
+
+#define CS53L30_ASP_CTL1_DEFAULT (CS53L30_ASP_TDM_PDN)
+#define CS53L30_ASP_CTL2_DEFAULT (0)
+
+/* R14 (0x0E) ~ R17 (0x11) CS53L30_ASP_TDMTX_CTLx - ASP TDM TX Control 1~4 */
+#define CS53L30_ASP_CHx_TX_STATE_SHIFT 7
+#define CS53L30_ASP_CHx_TX_STATE_MASK (1 << CS53L30_ASP_CHx_TX_STATE_SHIFT)
+#define CS53L30_ASP_CHx_TX_STATE (1 << CS53L30_ASP_CHx_TX_STATE_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC_SHIFT 0
+#define CS53L30_ASP_CHx_TX_LOC_WIDTH 6
+#define CS53L30_ASP_CHx_TX_LOC_MASK (((1 << CS53L30_ASP_CHx_TX_LOC_WIDTH) - 1) << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC_MAX (47 << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+#define CS53L30_ASP_CHx_TX_LOC(x) ((x) << CS53L30_ASP_CHx_TX_LOC_SHIFT)
+
+#define CS53L30_ASP_TDMTX_CTLx_DEFAULT (CS53L30_ASP_CHx_TX_LOC_MAX)
+
+/* R18 (0x12) ~ R23 (0x17) CS53L30_ASP_TDMTX_ENx - ASP TDM TX Enable 1~6 */
+#define CS53L30_ASP_TDMTX_ENx_DEFAULT (0)
+
+/* R26 (0x1A) CS53L30_SFT_RAMP - Soft Ramp Control */
+#define CS53L30_DIGSFT_SHIFT 5
+#define CS53L30_DIGSFT_MASK (1 << CS53L30_DIGSFT_SHIFT)
+#define CS53L30_DIGSFT (1 << CS53L30_DIGSFT_SHIFT)
+
+#define CS53L30_SFT_RMP_DEFAULT (0)
+
+/* R28 (0x1C) CS53L30_LRCK_CTL2 - LRCK Control 2 */
+#define CS53L30_LRCK_50_NPW_SHIFT 3
+#define CS53L30_LRCK_50_NPW_MASK (1 << CS53L30_LRCK_50_NPW_SHIFT)
+#define CS53L30_LRCK_50_NPW (1 << CS53L30_LRCK_50_NPW_SHIFT)
+#define CS53L30_LRCK_TPWH_SHIFT 0
+#define CS53L30_LRCK_TPWH_WIDTH 3
+#define CS53L30_LRCK_TPWH_MASK (((1 << CS53L30_LRCK_TPWH_WIDTH) - 1) << CS53L30_LRCK_TPWH_SHIFT)
+#define CS53L30_LRCK_TPWH(x) (((x) << CS53L30_LRCK_TPWH_SHIFT) & CS53L30_LRCK_TPWH_MASK)
+
+#define CS53L30_LRCK_CTLx_DEFAULT (0)
+
+/* R31 (0x1F) CS53L30_MUTEP_CTL1 - MUTE Pin Control 1 */
+#define CS53L30_MUTE_PDN_ULP_SHIFT 7
+#define CS53L30_MUTE_PDN_ULP_MASK (1 << CS53L30_MUTE_PDN_ULP_SHIFT)
+#define CS53L30_MUTE_PDN_ULP (1 << CS53L30_MUTE_PDN_ULP_SHIFT)
+#define CS53L30_MUTE_PDN_LP_SHIFT 6
+#define CS53L30_MUTE_PDN_LP_MASK (1 << CS53L30_MUTE_PDN_LP_SHIFT)
+#define CS53L30_MUTE_PDN_LP (1 << CS53L30_MUTE_PDN_LP_SHIFT)
+#define CS53L30_MUTE_M4B_PDN_SHIFT 4
+#define CS53L30_MUTE_M4B_PDN_MASK (1 << CS53L30_MUTE_M4B_PDN_SHIFT)
+#define CS53L30_MUTE_M4B_PDN (1 << CS53L30_MUTE_M4B_PDN_SHIFT)
+#define CS53L30_MUTE_M3B_PDN_SHIFT 3
+#define CS53L30_MUTE_M3B_PDN_MASK (1 << CS53L30_MUTE_M3B_PDN_SHIFT)
+#define CS53L30_MUTE_M3B_PDN (1 << CS53L30_MUTE_M3B_PDN_SHIFT)
+#define CS53L30_MUTE_M2B_PDN_SHIFT 2
+#define CS53L30_MUTE_M2B_PDN_MASK (1 << CS53L30_MUTE_M2B_PDN_SHIFT)
+#define CS53L30_MUTE_M2B_PDN (1 << CS53L30_MUTE_M2B_PDN_SHIFT)
+#define CS53L30_MUTE_M1B_PDN_SHIFT 1
+#define CS53L30_MUTE_M1B_PDN_MASK (1 << CS53L30_MUTE_M1B_PDN_SHIFT)
+#define CS53L30_MUTE_M1B_PDN (1 << CS53L30_MUTE_M1B_PDN_SHIFT)
+/* Note: be careful - x starts from 0 */
+#define CS53L30_MUTE_MxB_PDN_SHIFT(x) (CS53L30_MUTE_M1B_PDN_SHIFT + (x))
+#define CS53L30_MUTE_MxB_PDN_MASK(x) (1 << CS53L30_MUTE_MxB_PDN_SHIFT(x))
+#define CS53L30_MUTE_MxB_PDN(x) (1 << CS53L30_MUTE_MxB_PDN_SHIFT(x))
+#define CS53L30_MUTE_MB_ALL_PDN_SHIFT 0
+#define CS53L30_MUTE_MB_ALL_PDN_MASK (1 << CS53L30_MUTE_MB_ALL_PDN_SHIFT)
+#define CS53L30_MUTE_MB_ALL_PDN (1 << CS53L30_MUTE_MB_ALL_PDN_SHIFT)
+
+#define CS53L30_MUTEP_CTL1_MUTEALL (0xdf)
+#define CS53L30_MUTEP_CTL1_DEFAULT (0)
+
+/* R32 (0x20) CS53L30_MUTEP_CTL2 - MUTE Pin Control 2 */
+#define CS53L30_MUTE_PIN_POLARITY_SHIFT 7
+#define CS53L30_MUTE_PIN_POLARITY_MASK (1 << CS53L30_MUTE_PIN_POLARITY_SHIFT)
+#define CS53L30_MUTE_PIN_POLARITY (1 << CS53L30_MUTE_PIN_POLARITY_SHIFT)
+#define CS53L30_MUTE_ASP_TDM_PDN_SHIFT 6
+#define CS53L30_MUTE_ASP_TDM_PDN_MASK (1 << CS53L30_MUTE_ASP_TDM_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_TDM_PDN (1 << CS53L30_MUTE_ASP_TDM_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT 5
+#define CS53L30_MUTE_ASP_SDOUT2_PDN_MASK (1 << CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT2_PDN (1 << CS53L30_MUTE_ASP_SDOUT2_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT 4
+#define CS53L30_MUTE_ASP_SDOUT1_PDN_MASK (1 << CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUT1_PDN (1 << CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+/* Note: be careful - x starts from 0 */
+#define CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x) ((x) + CS53L30_MUTE_ASP_SDOUT1_PDN_SHIFT)
+#define CS53L30_MUTE_ASP_SDOUTx_PDN_MASK(x) (1 << CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x))
+#define CS53L30_MUTE_ASP_SDOUTx_PDN (1 << CS53L30_MUTE_ASP_SDOUTx_PDN_SHIFT(x))
+#define CS53L30_MUTE_ADC2B_PDN_SHIFT 3
+#define CS53L30_MUTE_ADC2B_PDN_MASK (1 << CS53L30_MUTE_ADC2B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2B_PDN (1 << CS53L30_MUTE_ADC2B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2A_PDN_SHIFT 2
+#define CS53L30_MUTE_ADC2A_PDN_MASK (1 << CS53L30_MUTE_ADC2A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC2A_PDN (1 << CS53L30_MUTE_ADC2A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1B_PDN_SHIFT 1
+#define CS53L30_MUTE_ADC1B_PDN_MASK (1 << CS53L30_MUTE_ADC1B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1B_PDN (1 << CS53L30_MUTE_ADC1B_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1A_PDN_SHIFT 0
+#define CS53L30_MUTE_ADC1A_PDN_MASK (1 << CS53L30_MUTE_ADC1A_PDN_SHIFT)
+#define CS53L30_MUTE_ADC1A_PDN (1 << CS53L30_MUTE_ADC1A_PDN_SHIFT)
+
+#define CS53L30_MUTEP_CTL2_DEFAULT (CS53L30_MUTE_PIN_POLARITY)
+
+/* R33 (0x21) CS53L30_INBIAS_CTL1 - Input Bias Control 1 */
+#define CS53L30_IN4M_BIAS_SHIFT 6
+#define CS53L30_IN4M_BIAS_WIDTH 2
+#define CS53L30_IN4M_BIAS_MASK (((1 << CS53L30_IN4M_BIAS_WIDTH) - 1) << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_OPEN (0 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_PULL_DOWN (1 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4M_BIAS_VCM (2 << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_SHIFT 4
+#define CS53L30_IN4P_BIAS_WIDTH 2
+#define CS53L30_IN4P_BIAS_MASK (((1 << CS53L30_IN4P_BIAS_WIDTH) - 1) << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_OPEN (0 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_PULL_DOWN (1 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN4P_BIAS_VCM (2 << CS53L30_IN4P_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_SHIFT 2
+#define CS53L30_IN3M_BIAS_WIDTH 2
+#define CS53L30_IN3M_BIAS_MASK (((1 << CS53L30_IN3M_BIAS_WIDTH) - 1) << CS53L30_IN4M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_OPEN (0 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_PULL_DOWN (1 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3M_BIAS_VCM (2 << CS53L30_IN3M_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_SHIFT 0
+#define CS53L30_IN3P_BIAS_WIDTH 2
+#define CS53L30_IN3P_BIAS_MASK (((1 << CS53L30_IN3P_BIAS_WIDTH) - 1) << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_OPEN (0 << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_PULL_DOWN (1 << CS53L30_IN3P_BIAS_SHIFT)
+#define CS53L30_IN3P_BIAS_VCM (2 << CS53L30_IN3P_BIAS_SHIFT)
+
+#define CS53L30_INBIAS_CTL1_DEFAULT (CS53L30_IN4M_BIAS_VCM | CS53L30_IN4P_BIAS_VCM |\
+ CS53L30_IN3M_BIAS_VCM | CS53L30_IN3P_BIAS_VCM)
+
+/* R34 (0x22) CS53L30_INBIAS_CTL2 - Input Bias Control 2 */
+#define CS53L30_IN2M_BIAS_SHIFT 6
+#define CS53L30_IN2M_BIAS_WIDTH 2
+#define CS53L30_IN2M_BIAS_MASK (((1 << CS53L30_IN2M_BIAS_WIDTH) - 1) << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_OPEN (0 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_PULL_DOWN (1 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2M_BIAS_VCM (2 << CS53L30_IN2M_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_SHIFT 4
+#define CS53L30_IN2P_BIAS_WIDTH 2
+#define CS53L30_IN2P_BIAS_MASK (((1 << CS53L30_IN2P_BIAS_WIDTH) - 1) << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_OPEN (0 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_PULL_DOWN (1 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN2P_BIAS_VCM (2 << CS53L30_IN2P_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_SHIFT 2
+#define CS53L30_IN1M_BIAS_WIDTH 2
+#define CS53L30_IN1M_BIAS_MASK (((1 << CS53L30_IN1M_BIAS_WIDTH) - 1) << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_OPEN (0 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_PULL_DOWN (1 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1M_BIAS_VCM (2 << CS53L30_IN1M_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_SHIFT 0
+#define CS53L30_IN1P_BIAS_WIDTH 2
+#define CS53L30_IN1P_BIAS_MASK (((1 << CS53L30_IN1P_BIAS_WIDTH) - 1) << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_OPEN (0 << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_PULL_DOWN (1 << CS53L30_IN1P_BIAS_SHIFT)
+#define CS53L30_IN1P_BIAS_VCM (2 << CS53L30_IN1P_BIAS_SHIFT)
+
+#define CS53L30_INBIAS_CTL2_DEFAULT (CS53L30_IN2M_BIAS_VCM | CS53L30_IN2P_BIAS_VCM |\
+ CS53L30_IN1M_BIAS_VCM | CS53L30_IN1P_BIAS_VCM)
+
+/* R35 (0x23) & R36 (0x24) CS53L30_DMICx_STR_CTL - DMIC1 & DMIC2 Stereo Control */
+#define CS53L30_DMICx_STEREO_ENB_SHIFT 5
+#define CS53L30_DMICx_STEREO_ENB_MASK (1 << CS53L30_DMICx_STEREO_ENB_SHIFT)
+#define CS53L30_DMICx_STEREO_ENB (1 << CS53L30_DMICx_STEREO_ENB_SHIFT)
+
+/* 0x88 and 0xCC are reserved bits */
+#define CS53L30_DMIC1_STR_CTL_DEFAULT (CS53L30_DMICx_STEREO_ENB | 0x88)
+#define CS53L30_DMIC2_STR_CTL_DEFAULT (CS53L30_DMICx_STEREO_ENB | 0xCC)
+
+/* R37/R45 (0x25/0x2D) CS53L30_ADCDMICx_CTL1 - ADC1/DMIC1 & ADC2/DMIC2 Control 1 */
+#define CS53L30_ADCxB_PDN_SHIFT 7
+#define CS53L30_ADCxB_PDN_MASK (1 << CS53L30_ADCxB_PDN_SHIFT)
+#define CS53L30_ADCxB_PDN (1 << CS53L30_ADCxB_PDN_SHIFT)
+#define CS53L30_ADCxA_PDN_SHIFT 6
+#define CS53L30_ADCxA_PDN_MASK (1 << CS53L30_ADCxA_PDN_SHIFT)
+#define CS53L30_ADCxA_PDN (1 << CS53L30_ADCxA_PDN_SHIFT)
+#define CS53L30_DMICx_PDN_SHIFT 2
+#define CS53L30_DMICx_PDN_MASK (1 << CS53L30_DMICx_PDN_SHIFT)
+#define CS53L30_DMICx_PDN (1 << CS53L30_DMICx_PDN_SHIFT)
+#define CS53L30_DMICx_SCLK_DIV_SHIFT 1
+#define CS53L30_DMICx_SCLK_DIV_MASK (1 << CS53L30_DMICx_SCLK_DIV_SHIFT)
+#define CS53L30_DMICx_SCLK_DIV (1 << CS53L30_DMICx_SCLK_DIV_SHIFT)
+#define CS53L30_CH_TYPE_SHIFT 0
+#define CS53L30_CH_TYPE_MASK (1 << CS53L30_CH_TYPE_SHIFT)
+#define CS53L30_CH_TYPE (1 << CS53L30_CH_TYPE_SHIFT)
+
+#define CS53L30_ADCDMICx_PDN_MASK 0xFF
+#define CS53L30_ADCDMICx_CTL1_DEFAULT (CS53L30_DMICx_PDN)
+
+/* R38/R46 (0x26/0x2E) CS53L30_ADCDMICx_CTL2 - ADC1/DMIC1 & ADC2/DMIC2 Control 2 */
+#define CS53L30_ADCx_NOTCH_DIS_SHIFT 7
+#define CS53L30_ADCx_NOTCH_DIS_MASK (1 << CS53L30_ADCx_NOTCH_DIS_SHIFT)
+#define CS53L30_ADCx_NOTCH_DIS (1 << CS53L30_ADCx_NOTCH_DIS_SHIFT)
+#define CS53L30_ADCxB_INV_SHIFT 5
+#define CS53L30_ADCxB_INV_MASK (1 << CS53L30_ADCxB_INV_SHIFT)
+#define CS53L30_ADCxB_INV (1 << CS53L30_ADCxB_INV_SHIFT)
+#define CS53L30_ADCxA_INV_SHIFT 4
+#define CS53L30_ADCxA_INV_MASK (1 << CS53L30_ADCxA_INV_SHIFT)
+#define CS53L30_ADCxA_INV (1 << CS53L30_ADCxA_INV_SHIFT)
+#define CS53L30_ADCxB_DIG_BOOST_SHIFT 1
+#define CS53L30_ADCxB_DIG_BOOST_MASK (1 << CS53L30_ADCxB_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxB_DIG_BOOST (1 << CS53L30_ADCxB_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxA_DIG_BOOST_SHIFT 0
+#define CS53L30_ADCxA_DIG_BOOST_MASK (1 << CS53L30_ADCxA_DIG_BOOST_SHIFT)
+#define CS53L30_ADCxA_DIG_BOOST (1 << CS53L30_ADCxA_DIG_BOOST_SHIFT)
+
+#define CS53L30_ADCDMIC1_CTL2_DEFAULT (0)
+
+/* R39/R47 (0x27/0x2F) CS53L30_ADCx_CTL3 - ADC1/ADC2 Control 3 */
+#define CS53L30_ADCx_HPF_EN_SHIFT 3
+#define CS53L30_ADCx_HPF_EN_MASK (1 << CS53L30_ADCx_HPF_EN_SHIFT)
+#define CS53L30_ADCx_HPF_EN (1 << CS53L30_ADCx_HPF_EN_SHIFT)
+#define CS53L30_ADCx_HPF_CF_SHIFT 1
+#define CS53L30_ADCx_HPF_CF_WIDTH 2
+#define CS53L30_ADCx_HPF_CF_MASK (((1 << CS53L30_ADCx_HPF_CF_WIDTH) - 1) << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_1HZ86 (0 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_120HZ (1 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_235HZ (2 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_HPF_CF_466HZ (3 << CS53L30_ADCx_HPF_CF_SHIFT)
+#define CS53L30_ADCx_NG_ALL_SHIFT 0
+#define CS53L30_ADCx_NG_ALL_MASK (1 << CS53L30_ADCx_NG_ALL_SHIFT)
+#define CS53L30_ADCx_NG_ALL (1 << CS53L30_ADCx_NG_ALL_SHIFT)
+
+#define CS53L30_ADCx_CTL3_DEFAULT (CS53L30_ADCx_HPF_EN)
+
+/* R40/R48 (0x28/0x30) CS53L30_ADCx_NG_CTL - ADC1/ADC2 Noise Gate Control */
+#define CS53L30_ADCxB_NG_SHIFT 7
+#define CS53L30_ADCxB_NG_MASK (1 << CS53L30_ADCxB_NG_SHIFT)
+#define CS53L30_ADCxB_NG (1 << CS53L30_ADCxB_NG_SHIFT)
+#define CS53L30_ADCxA_NG_SHIFT 6
+#define CS53L30_ADCxA_NG_MASK (1 << CS53L30_ADCxA_NG_SHIFT)
+#define CS53L30_ADCxA_NG (1 << CS53L30_ADCxA_NG_SHIFT)
+#define CS53L30_ADCx_NG_BOOST_SHIFT 5
+#define CS53L30_ADCx_NG_BOOST_MASK (1 << CS53L30_ADCx_NG_BOOST_SHIFT)
+#define CS53L30_ADCx_NG_BOOST (1 << CS53L30_ADCx_NG_BOOST_SHIFT)
+#define CS53L30_ADCx_NG_THRESH_SHIFT 2
+#define CS53L30_ADCx_NG_THRESH_WIDTH 3
+#define CS53L30_ADCx_NG_THRESH_MASK (((1 << CS53L30_ADCx_NG_THRESH_WIDTH) - 1) << CS53L30_ADCx_NG_THRESH_SHIFT)
+#define CS53L30_ADCx_NG_DELAY_SHIFT 0
+#define CS53L30_ADCx_NG_DELAY_WIDTH 2
+#define CS53L30_ADCx_NG_DELAY_MASK (((1 << CS53L30_ADCx_NG_DELAY_WIDTH) - 1) << CS53L30_ADCx_NG_DELAY_SHIFT)
+
+#define CS53L30_ADCx_NG_CTL_DEFAULT (0)
+
+/* R41/R42/R49/R50 (0x29/0x2A/0x31/0x32) CS53L30_ADCxy_AFE_CTL - ADC1A/1B/2A/2B AFE Control */
+#define CS53L30_ADCxy_PREAMP_SHIFT 6
+#define CS53L30_ADCxy_PREAMP_WIDTH 2
+#define CS53L30_ADCxy_PREAMP_MASK (((1 << CS53L30_ADCxy_PREAMP_WIDTH) - 1) << CS53L30_ADCxy_PREAMP_SHIFT)
+#define CS53L30_ADCxy_PGA_VOL_SHIFT 0
+#define CS53L30_ADCxy_PGA_VOL_WIDTH 6
+#define CS53L30_ADCxy_PGA_VOL_MASK (((1 << CS53L30_ADCxy_PGA_VOL_WIDTH) - 1) << CS53L30_ADCxy_PGA_VOL_SHIFT)
+
+#define CS53L30_ADCxy_AFE_CTL_DEFAULT (0)
+
+/* R43/R44/R51/R52 (0x2B/0x2C/0x33/0x34) CS53L30_ADCxy_DIG_VOL - ADC1A/1B/2A/2B Digital Volume */
+#define CS53L30_ADCxy_VOL_MUTE (0x80)
+
+#define CS53L30_ADCxy_DIG_VOL_DEFAULT (0x0)
+
+/* CS53L30_INT */
+#define CS53L30_PDN_DONE (1 << 7)
+#define CS53L30_THMS_TRIP (1 << 6)
+#define CS53L30_SYNC_DONE (1 << 5)
+#define CS53L30_ADC2B_OVFL (1 << 4)
+#define CS53L30_ADC2A_OVFL (1 << 3)
+#define CS53L30_ADC1B_OVFL (1 << 2)
+#define CS53L30_ADC1A_OVFL (1 << 1)
+#define CS53L30_MUTE_PIN (1 << 0)
+#define CS53L30_DEVICE_INT_MASK 0xFF
+
+#endif /* __CS53L30_H__ */
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 9459593eef13..f0057cd223a4 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -13,8 +13,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/i2c.h>
+#include <linux/property.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -382,11 +382,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
}
/*
- * DT to pdata conversion
+ * DT/ACPI to pdata conversion
*/
static enum da7219_aad_micbias_pulse_lvl
- da7219_aad_of_micbias_pulse_lvl(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_micbias_pulse_lvl(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2800:
@@ -400,7 +400,7 @@ static enum da7219_aad_micbias_pulse_lvl
}
static enum da7219_aad_btn_cfg
- da7219_aad_of_btn_cfg(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_btn_cfg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 2:
@@ -424,7 +424,7 @@ static enum da7219_aad_btn_cfg
}
static enum da7219_aad_mic_det_thr
- da7219_aad_of_mic_det_thr(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_mic_det_thr(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 200:
@@ -442,7 +442,7 @@ static enum da7219_aad_mic_det_thr
}
static enum da7219_aad_jack_ins_deb
- da7219_aad_of_jack_ins_deb(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_jack_ins_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 5:
@@ -468,7 +468,7 @@ static enum da7219_aad_jack_ins_deb
}
static enum da7219_aad_jack_det_rate
- da7219_aad_of_jack_det_rate(struct snd_soc_codec *codec, const char *str)
+ da7219_aad_fw_jack_det_rate(struct snd_soc_codec *codec, const char *str)
{
if (!strcmp(str, "32ms_64ms")) {
return DA7219_AAD_JACK_DET_RATE_32_64MS;
@@ -485,7 +485,7 @@ static enum da7219_aad_jack_det_rate
}
static enum da7219_aad_jack_rem_deb
- da7219_aad_of_jack_rem_deb(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_jack_rem_deb(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
@@ -503,7 +503,7 @@ static enum da7219_aad_jack_rem_deb
}
static enum da7219_aad_btn_avg
- da7219_aad_of_btn_avg(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_btn_avg(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
@@ -521,7 +521,7 @@ static enum da7219_aad_btn_avg
}
static enum da7219_aad_adc_1bit_rpt
- da7219_aad_of_adc_1bit_rpt(struct snd_soc_codec *codec, u32 val)
+ da7219_aad_fw_adc_1bit_rpt(struct snd_soc_codec *codec, u32 val)
{
switch (val) {
case 1:
@@ -538,97 +538,96 @@ static enum da7219_aad_adc_1bit_rpt
}
}
-static struct da7219_aad_pdata *da7219_aad_of_to_pdata(struct snd_soc_codec *codec)
+static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct snd_soc_codec *codec)
{
- struct device_node *np = codec->dev->of_node;
- struct device_node *aad_np = of_find_node_by_name(np, "da7219_aad");
+ struct device *dev = codec->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct fwnode_handle *aad_np;
struct da7219_aad_pdata *aad_pdata;
- const char *of_str;
- u32 of_val32;
+ const char *fw_str;
+ u32 fw_val32;
+ aad_np = device_get_named_child_node(dev, "da7219_aad");
if (!aad_np)
return NULL;
aad_pdata = devm_kzalloc(codec->dev, sizeof(*aad_pdata), GFP_KERNEL);
if (!aad_pdata)
- goto out;
+ return NULL;
- aad_pdata->irq = irq_of_parse_and_map(np, 0);
+ aad_pdata->irq = i2c->irq;
- if (of_property_read_u32(aad_np, "dlg,micbias-pulse-lvl",
- &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-lvl",
+ &fw_val32) >= 0)
aad_pdata->micbias_pulse_lvl =
- da7219_aad_of_micbias_pulse_lvl(codec, of_val32);
+ da7219_aad_fw_micbias_pulse_lvl(codec, fw_val32);
else
aad_pdata->micbias_pulse_lvl = DA7219_AAD_MICBIAS_PULSE_LVL_OFF;
- if (of_property_read_u32(aad_np, "dlg,micbias-pulse-time",
- &of_val32) >= 0)
- aad_pdata->micbias_pulse_time = of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,micbias-pulse-time",
+ &fw_val32) >= 0)
+ aad_pdata->micbias_pulse_time = fw_val32;
- if (of_property_read_u32(aad_np, "dlg,btn-cfg", &of_val32) >= 0)
- aad_pdata->btn_cfg = da7219_aad_of_btn_cfg(codec, of_val32);
+ if (fwnode_property_read_u32(aad_np, "dlg,btn-cfg", &fw_val32) >= 0)
+ aad_pdata->btn_cfg = da7219_aad_fw_btn_cfg(codec, fw_val32);
else
aad_pdata->btn_cfg = DA7219_AAD_BTN_CFG_10MS;
- if (of_property_read_u32(aad_np, "dlg,mic-det-thr", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,mic-det-thr", &fw_val32) >= 0)
aad_pdata->mic_det_thr =
- da7219_aad_of_mic_det_thr(codec, of_val32);
+ da7219_aad_fw_mic_det_thr(codec, fw_val32);
else
aad_pdata->mic_det_thr = DA7219_AAD_MIC_DET_THR_500_OHMS;
- if (of_property_read_u32(aad_np, "dlg,jack-ins-deb", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-ins-deb", &fw_val32) >= 0)
aad_pdata->jack_ins_deb =
- da7219_aad_of_jack_ins_deb(codec, of_val32);
+ da7219_aad_fw_jack_ins_deb(codec, fw_val32);
else
aad_pdata->jack_ins_deb = DA7219_AAD_JACK_INS_DEB_20MS;
- if (!of_property_read_string(aad_np, "dlg,jack-det-rate", &of_str))
+ if (!fwnode_property_read_string(aad_np, "dlg,jack-det-rate", &fw_str))
aad_pdata->jack_det_rate =
- da7219_aad_of_jack_det_rate(codec, of_str);
+ da7219_aad_fw_jack_det_rate(codec, fw_str);
else
aad_pdata->jack_det_rate = DA7219_AAD_JACK_DET_RATE_256_512MS;
- if (of_property_read_u32(aad_np, "dlg,jack-rem-deb", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,jack-rem-deb", &fw_val32) >= 0)
aad_pdata->jack_rem_deb =
- da7219_aad_of_jack_rem_deb(codec, of_val32);
+ da7219_aad_fw_jack_rem_deb(codec, fw_val32);
else
aad_pdata->jack_rem_deb = DA7219_AAD_JACK_REM_DEB_1MS;
- if (of_property_read_u32(aad_np, "dlg,a-d-btn-thr", &of_val32) >= 0)
- aad_pdata->a_d_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,a-d-btn-thr", &fw_val32) >= 0)
+ aad_pdata->a_d_btn_thr = (u8) fw_val32;
else
aad_pdata->a_d_btn_thr = 0xA;
- if (of_property_read_u32(aad_np, "dlg,d-b-btn-thr", &of_val32) >= 0)
- aad_pdata->d_b_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,d-b-btn-thr", &fw_val32) >= 0)
+ aad_pdata->d_b_btn_thr = (u8) fw_val32;
else
aad_pdata->d_b_btn_thr = 0x16;
- if (of_property_read_u32(aad_np, "dlg,b-c-btn-thr", &of_val32) >= 0)
- aad_pdata->b_c_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,b-c-btn-thr", &fw_val32) >= 0)
+ aad_pdata->b_c_btn_thr = (u8) fw_val32;
else
aad_pdata->b_c_btn_thr = 0x21;
- if (of_property_read_u32(aad_np, "dlg,c-mic-btn-thr", &of_val32) >= 0)
- aad_pdata->c_mic_btn_thr = (u8) of_val32;
+ if (fwnode_property_read_u32(aad_np, "dlg,c-mic-btn-thr", &fw_val32) >= 0)
+ aad_pdata->c_mic_btn_thr = (u8) fw_val32;
else
aad_pdata->c_mic_btn_thr = 0x3E;
- if (of_property_read_u32(aad_np, "dlg,btn-avg", &of_val32) >= 0)
- aad_pdata->btn_avg = da7219_aad_of_btn_avg(codec, of_val32);
+ if (fwnode_property_read_u32(aad_np, "dlg,btn-avg", &fw_val32) >= 0)
+ aad_pdata->btn_avg = da7219_aad_fw_btn_avg(codec, fw_val32);
else
aad_pdata->btn_avg = DA7219_AAD_BTN_AVG_2;
- if (of_property_read_u32(aad_np, "dlg,adc-1bit-rpt", &of_val32) >= 0)
+ if (fwnode_property_read_u32(aad_np, "dlg,adc-1bit-rpt", &fw_val32) >= 0)
aad_pdata->adc_1bit_rpt =
- da7219_aad_of_adc_1bit_rpt(codec, of_val32);
+ da7219_aad_fw_adc_1bit_rpt(codec, fw_val32);
else
aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
-out:
- of_node_put(aad_np);
-
return aad_pdata;
}
@@ -769,9 +768,9 @@ int da7219_aad_init(struct snd_soc_codec *codec)
da7219->aad = da7219_aad;
da7219_aad->codec = codec;
- /* Handle any DT/platform data */
- if ((codec->dev->of_node) && (da7219->pdata))
- da7219->pdata->aad_pdata = da7219_aad_of_to_pdata(codec);
+ /* Handle any DT/ACPI/platform data */
+ if (da7219->pdata && !da7219->pdata->aad_pdata)
+ da7219->pdata->aad_pdata = da7219_aad_fw_to_pdata(codec);
da7219_aad_handle_pdata(codec);
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 5c93899f1f0e..50ea94317cb3 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -1418,7 +1419,7 @@ static struct snd_soc_dai_driver da7219_dai = {
/*
- * DT
+ * DT/ACPI
*/
static const struct of_device_id da7219_of_match[] = {
@@ -1434,7 +1435,7 @@ static const struct acpi_device_id da7219_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, da7219_acpi_match);
static enum da7219_micbias_voltage
- da7219_of_micbias_lvl(struct snd_soc_codec *codec, u32 val)
+ da7219_fw_micbias_lvl(struct device *dev, u32 val)
{
switch (val) {
case 1600:
@@ -1450,13 +1451,13 @@ static enum da7219_micbias_voltage
case 2600:
return DA7219_MICBIAS_2_6V;
default:
- dev_warn(codec->dev, "Invalid micbias level");
+ dev_warn(dev, "Invalid micbias level");
return DA7219_MICBIAS_2_2V;
}
}
static enum da7219_mic_amp_in_sel
- da7219_of_mic_amp_in_sel(struct snd_soc_codec *codec, const char *str)
+ da7219_fw_mic_amp_in_sel(struct device *dev, const char *str)
{
if (!strcmp(str, "diff")) {
return DA7219_MIC_AMP_IN_SEL_DIFF;
@@ -1465,29 +1466,29 @@ static enum da7219_mic_amp_in_sel
} else if (!strcmp(str, "se_n")) {
return DA7219_MIC_AMP_IN_SEL_SE_N;
} else {
- dev_warn(codec->dev, "Invalid mic input type selection");
+ dev_warn(dev, "Invalid mic input type selection");
return DA7219_MIC_AMP_IN_SEL_DIFF;
}
}
-static struct da7219_pdata *da7219_of_to_pdata(struct snd_soc_codec *codec)
+static struct da7219_pdata *da7219_fw_to_pdata(struct snd_soc_codec *codec)
{
- struct device_node *np = codec->dev->of_node;
+ struct device *dev = codec->dev;
struct da7219_pdata *pdata;
const char *of_str;
u32 of_val32;
- pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- if (of_property_read_u32(np, "dlg,micbias-lvl", &of_val32) >= 0)
- pdata->micbias_lvl = da7219_of_micbias_lvl(codec, of_val32);
+ if (device_property_read_u32(dev, "dlg,micbias-lvl", &of_val32) >= 0)
+ pdata->micbias_lvl = da7219_fw_micbias_lvl(dev, of_val32);
else
pdata->micbias_lvl = DA7219_MICBIAS_2_2V;
- if (!of_property_read_string(np, "dlg,mic-amp-in-sel", &of_str))
- pdata->mic_amp_in_sel = da7219_of_mic_amp_in_sel(codec, of_str);
+ if (!device_property_read_string(dev, "dlg,mic-amp-in-sel", &of_str))
+ pdata->mic_amp_in_sel = da7219_fw_mic_amp_in_sel(dev, of_str);
else
pdata->mic_amp_in_sel = DA7219_MIC_AMP_IN_SEL_DIFF;
@@ -1662,11 +1663,10 @@ static int da7219_probe(struct snd_soc_codec *codec)
break;
}
- /* Handle DT/Platform data */
- if (codec->dev->of_node)
- da7219->pdata = da7219_of_to_pdata(codec);
- else
- da7219->pdata = dev_get_platdata(codec->dev);
+ /* Handle DT/ACPI/Platform data */
+ da7219->pdata = dev_get_platdata(codec->dev);
+ if (!da7219->pdata)
+ da7219->pdata = da7219_fw_to_pdata(codec);
da7219_handle_pdata(codec);
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 2abb742fc47b..4e181b270d95 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1124,8 +1124,10 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin, int repoll)
}
hdac_hdmi_parse_eld(edev, pin);
- print_hex_dump_bytes("ELD: ", DUMP_PREFIX_OFFSET,
- pin->eld.eld_buffer, pin->eld.eld_size);
+ print_hex_dump_debug("ELD: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ pin->eld.eld_buffer, pin->eld.eld_size,
+ true);
} else {
pin->eld.monitor_present = false;
pin->eld.eld_valid = false;
@@ -1816,6 +1818,7 @@ static const struct dev_pm_ops hdac_hdmi_pm = {
static const struct hda_device_id hdmi_list[] = {
HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
+ HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
{}
};
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 8e36e883e453..f27d115626db 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -112,7 +112,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
return ret;
if (hcp->hcd.ops->audio_startup) {
- ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+ ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
if (ret) {
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
@@ -122,8 +122,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
}
if (hcp->hcd.ops->get_eld) {
- ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
- sizeof(hcp->eld));
+ ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
+ hcp->eld, sizeof(hcp->eld));
if (!ret) {
ret = snd_pcm_hw_constraint_eld(substream->runtime,
@@ -144,7 +144,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
WARN_ON(hcp->current_stream != substream);
- hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+ hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
@@ -195,8 +195,8 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
hp.sample_rate = params_rate(params);
hp.channels = params_channels(params);
- return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
- &hp);
+ return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
+ &hcp->daifmt[dai->id], &hp);
}
static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
@@ -280,7 +280,8 @@ static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
dev_dbg(dai->dev, "%s()\n", __func__);
if (hcp->hcd.ops->digital_mute)
- return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+ return hcp->hcd.ops->digital_mute(dai->dev->parent,
+ hcp->hcd.data, mute);
return 0;
}
diff --git a/sound/soc/codecs/max98504.c b/sound/soc/codecs/max98504.c
new file mode 100644
index 000000000000..a7320e709890
--- /dev/null
+++ b/sound/soc/codecs/max98504.c
@@ -0,0 +1,383 @@
+/*
+ * MAX98504 ALSA SoC Audio driver
+ *
+ * Copyright 2013 - 2014 Maxim Integrated Products
+ * Copyright 2016 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <sound/soc.h>
+
+#include "max98504.h"
+
+static const char * const max98504_supply_names[] = {
+ "DVDD",
+ "DIOVDD",
+ "PVDD",
+};
+#define MAX98504_NUM_SUPPLIES ARRAY_SIZE(max98504_supply_names)
+
+struct max98504_priv {
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[MAX98504_NUM_SUPPLIES];
+ unsigned int pcm_rx_channels;
+ bool brownout_enable;
+ unsigned int brownout_threshold;
+ unsigned int brownout_attenuation;
+ unsigned int brownout_attack_hold;
+ unsigned int brownout_timed_hold;
+ unsigned int brownout_release_rate;
+};
+
+static struct reg_default max98504_reg_defaults[] = {
+ { 0x01, 0},
+ { 0x02, 0},
+ { 0x03, 0},
+ { 0x04, 0},
+ { 0x10, 0},
+ { 0x11, 0},
+ { 0x12, 0},
+ { 0x13, 0},
+ { 0x14, 0},
+ { 0x15, 0},
+ { 0x16, 0},
+ { 0x17, 0},
+ { 0x18, 0},
+ { 0x19, 0},
+ { 0x1A, 0},
+ { 0x20, 0},
+ { 0x21, 0},
+ { 0x22, 0},
+ { 0x23, 0},
+ { 0x24, 0},
+ { 0x25, 0},
+ { 0x26, 0},
+ { 0x27, 0},
+ { 0x28, 0},
+ { 0x30, 0},
+ { 0x31, 0},
+ { 0x32, 0},
+ { 0x33, 0},
+ { 0x34, 0},
+ { 0x35, 0},
+ { 0x36, 0},
+ { 0x37, 0},
+ { 0x38, 0},
+ { 0x39, 0},
+ { 0x40, 0},
+ { 0x41, 0},
+};
+
+static bool max98504_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98504_INTERRUPT_STATUS:
+ case MAX98504_INTERRUPT_FLAGS:
+ case MAX98504_INTERRUPT_FLAG_CLEARS:
+ case MAX98504_WATCHDOG_CLEAR:
+ case MAX98504_GLOBAL_ENABLE:
+ case MAX98504_SOFTWARE_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max98504_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98504_SOFTWARE_RESET:
+ case MAX98504_WATCHDOG_CLEAR:
+ case MAX98504_INTERRUPT_FLAG_CLEARS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int max98504_pcm_rx_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ regmap_write(max98504->regmap, MAX98504_PCM_RX_ENABLE,
+ max98504->pcm_rx_channels);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_write(max98504->regmap, MAX98504_PCM_RX_ENABLE, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int max98504_component_probe(struct snd_soc_component *c)
+{
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+ struct regmap *map = max98504->regmap;
+ int ret;
+
+ ret = regulator_bulk_enable(MAX98504_NUM_SUPPLIES, max98504->supplies);
+ if (ret < 0)
+ return ret;
+
+ regmap_write(map, MAX98504_SOFTWARE_RESET, 0x1);
+ msleep(20);
+
+ if (!max98504->brownout_enable)
+ return 0;
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_ENABLE, 0x1);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_1,
+ (max98504->brownout_threshold & 0x1f) << 3 |
+ (max98504->brownout_attenuation & 0x3));
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_2,
+ max98504->brownout_attack_hold & 0xff);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_3,
+ max98504->brownout_timed_hold & 0xff);
+
+ regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_4,
+ max98504->brownout_release_rate & 0xff);
+
+ return 0;
+}
+
+static void max98504_component_remove(struct snd_soc_component *c)
+{
+ struct max98504_priv *max98504 = snd_soc_component_get_drvdata(c);
+
+ regulator_bulk_disable(MAX98504_NUM_SUPPLIES, max98504->supplies);
+}
+
+static const char *spk_source_mux_text[] = {
+ "PCM Monomix", "Analog In", "PDM Left", "PDM Right"
+};
+
+static const struct soc_enum spk_source_mux_enum =
+ SOC_ENUM_SINGLE(MAX98504_SPEAKER_SOURCE_SELECT,
+ 0, ARRAY_SIZE(spk_source_mux_text),
+ spk_source_mux_text);
+
+static const struct snd_kcontrol_new spk_source_mux =
+ SOC_DAPM_ENUM("SPK Source", spk_source_mux_enum);
+
+static const struct snd_soc_dapm_route max98504_dapm_routes[] = {
+ { "SPKOUT", NULL, "Global Enable" },
+ { "SPK Source", "PCM Monomix", "DAC PCM" },
+ { "SPK Source", "Analog In", "AIN" },
+ { "SPK Source", "PDM Left", "DAC PDM" },
+ { "SPK Source", "PDM Right", "DAC PDM" },
+};
+
+static const struct snd_soc_dapm_widget max98504_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("Global Enable", MAX98504_GLOBAL_ENABLE,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_INPUT("AIN"),
+ SND_SOC_DAPM_AIF_OUT("AIF2OUTL", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2OUTR", "AIF2 Capture", 1, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC PCM", NULL, SND_SOC_NOPM, 0, 0,
+ max98504_pcm_rx_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_DAC("DAC PDM", NULL, MAX98504_PDM_RX_ENABLE, 0, 0),
+ SND_SOC_DAPM_MUX("SPK Source", SND_SOC_NOPM, 0, 0, &spk_source_mux),
+ SND_SOC_DAPM_REG(snd_soc_dapm_spk, "SPKOUT",
+ MAX98504_SPEAKER_ENABLE, 0, 1, 1, 0),
+};
+
+static int max98504_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct max98504_priv *max98504 = snd_soc_dai_get_drvdata(dai);
+ struct regmap *map = max98504->regmap;
+
+
+ switch (dai->id) {
+ case MAX98504_DAI_ID_PCM:
+ regmap_write(map, MAX98504_PCM_TX_ENABLE, tx_mask);
+ max98504->pcm_rx_channels = rx_mask;
+ break;
+
+ case MAX98504_DAI_ID_PDM:
+ regmap_write(map, MAX98504_PDM_TX_ENABLE, tx_mask);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+static int max98504_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct max98504_priv *max98504 = snd_soc_dai_get_drvdata(dai);
+ struct regmap *map = max98504->regmap;
+ unsigned int i, sources = 0;
+
+ for (i = 0; i < tx_num; i++)
+ if (tx_slot[i])
+ sources |= (1 << i);
+
+ switch (dai->id) {
+ case MAX98504_DAI_ID_PCM:
+ regmap_write(map, MAX98504_PCM_TX_CHANNEL_SOURCES,
+ sources);
+ break;
+
+ case MAX98504_DAI_ID_PDM:
+ regmap_write(map, MAX98504_PDM_TX_CONTROL, sources);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ regmap_write(map, MAX98504_MEASUREMENT_ENABLE, sources ? 0x3 : 0x01);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops max98504_dai_ops = {
+ .set_tdm_slot = max98504_set_tdm_slot,
+ .set_channel_map = max98504_set_channel_map,
+};
+
+#define MAX98504_FORMATS (SNDRV_PCM_FMTBIT_S8|SNDRV_PCM_FMTBIT_S16_LE|\
+ SNDRV_PCM_FMTBIT_S24_LE|SNDRV_PCM_FMTBIT_S32_LE)
+#define MAX98504_PDM_RATES (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|\
+ SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100|\
+ SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_88200|\
+ SNDRV_PCM_RATE_96000)
+
+static struct snd_soc_dai_driver max98504_dai[] = {
+ /* TODO: Add the PCM interface definitions */
+ {
+ .name = "max98504-aif2",
+ .id = MAX98504_DAI_ID_PDM,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98504_PDM_RATES,
+ .formats = MAX98504_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98504_PDM_RATES,
+ .formats = MAX98504_FORMATS,
+ },
+ .ops = &max98504_dai_ops,
+ },
+};
+
+static const struct snd_soc_component_driver max98504_component_driver = {
+ .probe = max98504_component_probe,
+ .remove = max98504_component_remove,
+ .dapm_widgets = max98504_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98504_dapm_widgets),
+ .dapm_routes = max98504_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max98504_dapm_routes),
+};
+
+static const struct regmap_config max98504_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98504_MAX_REGISTER,
+ .reg_defaults = max98504_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max98504_reg_defaults),
+ .volatile_reg = max98504_volatile_register,
+ .readable_reg = max98504_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int max98504_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *node = dev->of_node;
+ struct max98504_priv *max98504;
+ int i, ret;
+
+ max98504 = devm_kzalloc(dev, sizeof(*max98504), GFP_KERNEL);
+ if (!max98504)
+ return -ENOMEM;
+
+ if (node) {
+ if (!of_property_read_u32(node, "maxim,brownout-threshold",
+ &max98504->brownout_threshold))
+ max98504->brownout_enable = true;
+
+ of_property_read_u32(node, "maxim,brownout-attenuation",
+ &max98504->brownout_attenuation);
+ of_property_read_u32(node, "maxim,brownout-attack-hold-ms",
+ &max98504->brownout_attack_hold);
+ of_property_read_u32(node, "maxim,brownout-timed-hold-ms",
+ &max98504->brownout_timed_hold);
+ of_property_read_u32(node, "maxim,brownout-release-rate-ms",
+ &max98504->brownout_release_rate);
+ }
+
+ max98504->regmap = devm_regmap_init_i2c(client, &max98504_regmap);
+ if (IS_ERR(max98504->regmap)) {
+ ret = PTR_ERR(max98504->regmap);
+ dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < MAX98504_NUM_SUPPLIES; i++)
+ max98504->supplies[i].supply = max98504_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, MAX98504_NUM_SUPPLIES,
+ max98504->supplies);
+ if (ret < 0)
+ return ret;
+
+ i2c_set_clientdata(client, max98504);
+
+ return devm_snd_soc_register_component(dev, &max98504_component_driver,
+ max98504_dai, ARRAY_SIZE(max98504_dai));
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max98504_of_match[] = {
+ { .compatible = "maxim,max98504" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, max98504_of_match);
+#endif
+
+static const struct i2c_device_id max98504_i2c_id[] = {
+ { "max98504" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max98504_i2c_id);
+
+static struct i2c_driver max98504_i2c_driver = {
+ .driver = {
+ .name = "max98504",
+ .of_match_table = of_match_ptr(max98504_of_match),
+ },
+ .probe = max98504_i2c_probe,
+ .id_table = max98504_i2c_id,
+};
+module_i2c_driver(max98504_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC MAX98504 driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98504.h b/sound/soc/codecs/max98504.h
new file mode 100644
index 000000000000..afbefad2d5ce
--- /dev/null
+++ b/sound/soc/codecs/max98504.h
@@ -0,0 +1,59 @@
+/*
+ * MAX98504 ALSA SoC Audio driver
+ *
+ * Copyright 2011 - 2012 Maxim Integrated Products
+ * Copyright 2016 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef MAX98504_H_
+#define MAX98504_H_
+
+/*
+ * MAX98504 Register Definitions
+ */
+#define MAX98504_INTERRUPT_STATUS 0x01
+#define MAX98504_INTERRUPT_FLAGS 0x02
+#define MAX98504_INTERRUPT_ENABLE 0x03
+#define MAX98504_INTERRUPT_FLAG_CLEARS 0x04
+#define MAX98504_GPIO_ENABLE 0x10
+#define MAX98504_GPIO_CONFIG 0x11
+#define MAX98504_WATCHDOG_ENABLE 0x12
+#define MAX98504_WATCHDOG_CONFIG 0x13
+#define MAX98504_WATCHDOG_CLEAR 0x14
+#define MAX98504_CLOCK_MONITOR_ENABLE 0x15
+#define MAX98504_PVDD_BROWNOUT_ENABLE 0x16
+#define MAX98504_PVDD_BROWNOUT_CONFIG_1 0x17
+#define MAX98504_PVDD_BROWNOUT_CONFIG_2 0x18
+#define MAX98504_PVDD_BROWNOUT_CONFIG_3 0x19
+#define MAX98504_PVDD_BROWNOUT_CONFIG_4 0x1a
+#define MAX98504_PCM_RX_ENABLE 0x20
+#define MAX98504_PCM_TX_ENABLE 0x21
+#define MAX98504_PCM_TX_HIZ_CONTROL 0x22
+#define MAX98504_PCM_TX_CHANNEL_SOURCES 0x23
+#define MAX98504_PCM_MODE_CONFIG 0x24
+#define MAX98504_PCM_DSP_CONFIG 0x25
+#define MAX98504_PCM_CLOCK_SETUP 0x26
+#define MAX98504_PCM_SAMPLE_RATE_SETUP 0x27
+#define MAX98504_PCM_TO_SPEAKER_MONOMIX 0x28
+#define MAX98504_PDM_TX_ENABLE 0x30
+#define MAX98504_PDM_TX_HIZ_CONTROL 0x31
+#define MAX98504_PDM_TX_CONTROL 0x32
+#define MAX98504_PDM_RX_ENABLE 0x33
+#define MAX98504_SPEAKER_ENABLE 0x34
+#define MAX98504_SPEAKER_SOURCE_SELECT 0x35
+#define MAX98504_MEASUREMENT_ENABLE 0x36
+#define MAX98504_ANALOGUE_INPUT_GAIN 0x37
+#define MAX98504_TEMPERATURE_LIMIT_CONFIG 0x38
+#define MAX98504_GLOBAL_ENABLE 0x40
+#define MAX98504_SOFTWARE_RESET 0x41
+#define MAX98504_REV_ID 0x7fff
+
+#define MAX98504_MAX_REGISTER 0x7fff
+
+#define MAX98504_DAI_ID_PCM 1
+#define MAX98504_DAI_ID_PDM 2
+
+#endif /* MAX98504_H_ */
diff --git a/sound/soc/codecs/max9860.c b/sound/soc/codecs/max9860.c
new file mode 100644
index 000000000000..68074c92a7c0
--- /dev/null
+++ b/sound/soc/codecs/max9860.c
@@ -0,0 +1,753 @@
+/*
+ * Driver for the MAX9860 Mono Audio Voice Codec
+ *
+ * https://datasheets.maximintegrated.com/en/ds/MAX9860.pdf
+ *
+ * The driver does not support sidetone since the DVST register field is
+ * backwards with the mute near the maximum level instead of the minimum.
+ *
+ * Author: Peter Rosin <peda@axentia.s>
+ * Copyright 2016 Axentia Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include "max9860.h"
+
+struct max9860_priv {
+ struct regmap *regmap;
+ struct regulator *dvddio;
+ struct notifier_block dvddio_nb;
+ u8 psclk;
+ unsigned long pclk_rate;
+ int fmt;
+};
+
+static int max9860_dvddio_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct max9860_priv *max9860 = container_of(nb, struct max9860_priv,
+ dvddio_nb);
+ if (event & REGULATOR_EVENT_DISABLE) {
+ regcache_mark_dirty(max9860->regmap);
+ regcache_cache_only(max9860->regmap, true);
+ }
+
+ return 0;
+}
+
+static const struct reg_default max9860_reg_defaults[] = {
+ { MAX9860_PWRMAN, 0x00 },
+ { MAX9860_INTEN, 0x00 },
+ { MAX9860_SYSCLK, 0x00 },
+ { MAX9860_AUDIOCLKHIGH, 0x00 },
+ { MAX9860_AUDIOCLKLOW, 0x00 },
+ { MAX9860_IFC1A, 0x00 },
+ { MAX9860_IFC1B, 0x00 },
+ { MAX9860_VOICEFLTR, 0x00 },
+ { MAX9860_DACATTN, 0x00 },
+ { MAX9860_ADCLEVEL, 0x00 },
+ { MAX9860_DACGAIN, 0x00 },
+ { MAX9860_MICGAIN, 0x00 },
+ { MAX9860_MICADC, 0x00 },
+ { MAX9860_NOISEGATE, 0x00 },
+};
+
+static bool max9860_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS ... MAX9860_MICGAIN:
+ case MAX9860_MICADC ... MAX9860_PWRMAN:
+ case MAX9860_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTEN ... MAX9860_MICGAIN:
+ case MAX9860_MICADC ... MAX9860_PWRMAN:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS:
+ case MAX9860_MICREADBACK:
+ return true;
+ }
+
+ return false;
+}
+
+static bool max9860_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX9860_INTRSTATUS:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config max9860_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .readable_reg = max9860_readable,
+ .writeable_reg = max9860_writeable,
+ .volatile_reg = max9860_volatile,
+ .precious_reg = max9860_precious,
+
+ .max_register = MAX9860_MAX_REGISTER,
+ .reg_defaults = max9860_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max9860_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const DECLARE_TLV_DB_SCALE(dva_tlv, -9100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(dvg_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_RANGE(pam_tlv,
+ 0, MAX9860_PAM_MAX - 1, TLV_DB_SCALE_ITEM(-2000, 2000, 1),
+ MAX9860_PAM_MAX, MAX9860_PAM_MAX, TLV_DB_SCALE_ITEM(3000, 0, 0));
+static const DECLARE_TLV_DB_SCALE(pgam_tlv, 0, 100, 0);
+static const DECLARE_TLV_DB_SCALE(anth_tlv, -7600, 400, 1);
+static const DECLARE_TLV_DB_SCALE(agcth_tlv, -1800, 100, 0);
+
+static const char * const agchld_text[] = {
+ "AGC Disabled", "50ms", "100ms", "400ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(agchld_enum, MAX9860_MICADC,
+ MAX9860_AGCHLD_SHIFT, agchld_text);
+
+static const char * const agcsrc_text[] = {
+ "Left ADC", "Left/Right ADC"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcsrc_enum, MAX9860_MICADC,
+ MAX9860_AGCSRC_SHIFT, agcsrc_text);
+
+static const char * const agcatk_text[] = {
+ "3ms", "12ms", "50ms", "200ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcatk_enum, MAX9860_MICADC,
+ MAX9860_AGCATK_SHIFT, agcatk_text);
+
+static const char * const agcrls_text[] = {
+ "78ms", "156ms", "312ms", "625ms",
+ "1.25s", "2.5s", "5s", "10s"
+};
+
+static SOC_ENUM_SINGLE_DECL(agcrls_enum, MAX9860_MICADC,
+ MAX9860_AGCRLS_SHIFT, agcrls_text);
+
+static const char * const filter_text[] = {
+ "Disabled",
+ "Elliptical HP 217Hz notch (16kHz)",
+ "Butterworth HP 500Hz (16kHz)",
+ "Elliptical HP 217Hz notch (8kHz)",
+ "Butterworth HP 500Hz (8kHz)",
+ "Butterworth HP 200Hz (48kHz)"
+};
+
+static SOC_ENUM_SINGLE_DECL(avflt_enum, MAX9860_VOICEFLTR,
+ MAX9860_AVFLT_SHIFT, filter_text);
+
+static SOC_ENUM_SINGLE_DECL(dvflt_enum, MAX9860_VOICEFLTR,
+ MAX9860_DVFLT_SHIFT, filter_text);
+
+static const struct snd_kcontrol_new max9860_controls[] = {
+SOC_SINGLE_TLV("Master Playback Volume", MAX9860_DACATTN,
+ MAX9860_DVA_SHIFT, MAX9860_DVA_MUTE, 1, dva_tlv),
+SOC_SINGLE_TLV("DAC Gain Volume", MAX9860_DACGAIN,
+ MAX9860_DVG_SHIFT, MAX9860_DVG_MAX, 0, dvg_tlv),
+SOC_DOUBLE_TLV("Line Capture Volume", MAX9860_ADCLEVEL,
+ MAX9860_ADCLL_SHIFT, MAX9860_ADCRL_SHIFT, MAX9860_ADCxL_MIN, 1,
+ adc_tlv),
+
+SOC_ENUM("AGC Hold Time", agchld_enum),
+SOC_ENUM("AGC/Noise Gate Source", agcsrc_enum),
+SOC_ENUM("AGC Attack Time", agcatk_enum),
+SOC_ENUM("AGC Release Time", agcrls_enum),
+
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", MAX9860_NOISEGATE,
+ MAX9860_ANTH_SHIFT, MAX9860_ANTH_MAX, 0, anth_tlv),
+SOC_SINGLE_TLV("AGC Signal Threshold Volume", MAX9860_NOISEGATE,
+ MAX9860_AGCTH_SHIFT, MAX9860_AGCTH_MIN, 1, agcth_tlv),
+
+SOC_SINGLE_TLV("Mic PGA Volume", MAX9860_MICGAIN,
+ MAX9860_PGAM_SHIFT, MAX9860_PGAM_MIN, 1, pgam_tlv),
+SOC_SINGLE_TLV("Mic Preamp Volume", MAX9860_MICGAIN,
+ MAX9860_PAM_SHIFT, MAX9860_PAM_MAX, 0, pam_tlv),
+
+SOC_ENUM("ADC Filter", avflt_enum),
+SOC_ENUM("DAC Filter", dvflt_enum),
+};
+
+static const struct snd_soc_dapm_widget max9860_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MICL"),
+SND_SOC_DAPM_INPUT("MICR"),
+
+SND_SOC_DAPM_ADC("ADCL", NULL, MAX9860_PWRMAN, MAX9860_ADCLEN_SHIFT, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, MAX9860_PWRMAN, MAX9860_ADCREN_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIFOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIFOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_DAC("DAC", NULL, MAX9860_PWRMAN, MAX9860_DACEN_SHIFT, 0),
+
+SND_SOC_DAPM_OUTPUT("OUT"),
+
+SND_SOC_DAPM_SUPPLY("Supply", SND_SOC_NOPM, 0, 0,
+ NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_REGULATOR_SUPPLY("AVDD", 0, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("DVDD", 0, 0),
+SND_SOC_DAPM_CLOCK_SUPPLY("mclk"),
+};
+
+static const struct snd_soc_dapm_route max9860_dapm_routes[] = {
+ { "ADCL", NULL, "MICL" },
+ { "ADCR", NULL, "MICR" },
+ { "AIFOUTL", NULL, "ADCL" },
+ { "AIFOUTR", NULL, "ADCR" },
+
+ { "DAC", NULL, "AIFINL" },
+ { "DAC", NULL, "AIFINR" },
+ { "OUT", NULL, "DAC" },
+
+ { "Supply", NULL, "AVDD" },
+ { "Supply", NULL, "DVDD" },
+ { "Supply", NULL, "mclk" },
+
+ { "DAC", NULL, "Supply" },
+ { "ADCL", NULL, "Supply" },
+ { "ADCR", NULL, "Supply" },
+};
+
+static int max9860_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max9860_priv *max9860 = snd_soc_codec_get_drvdata(codec);
+ u8 master;
+ u8 ifc1a = 0;
+ u8 ifc1b = 0;
+ u8 sysclk = 0;
+ unsigned long n;
+ int ret;
+
+ dev_dbg(codec->dev, "hw_params %u Hz, %u channels\n",
+ params_rate(params),
+ params_channels(params));
+
+ if (params_channels(params) == 2)
+ ifc1b |= MAX9860_ST;
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ master = 0;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ master = MAX9860_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ifc1a |= master;
+
+ if (master) {
+ if (params_width(params) * params_channels(params) > 48)
+ ifc1b |= MAX9860_BSEL_64X;
+ else
+ ifc1b |= MAX9860_BSEL_48X;
+ }
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ifc1a |= MAX9860_DDLY;
+ ifc1b |= MAX9860_ADLY;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ifc1a |= MAX9860_WCI;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ if (params_width(params) != 16) {
+ dev_err(codec->dev,
+ "DSP_A works for 16 bits per sample only.\n");
+ return -EINVAL;
+ }
+ ifc1a |= MAX9860_DDLY | MAX9860_WCI | MAX9860_HIZ | MAX9860_TDM;
+ ifc1b |= MAX9860_ADLY;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ if (params_width(params) != 16) {
+ dev_err(codec->dev,
+ "DSP_B works for 16 bits per sample only.\n");
+ return -EINVAL;
+ }
+ ifc1a |= MAX9860_WCI | MAX9860_HIZ | MAX9860_TDM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (max9860->fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ return -EINVAL;
+ }
+ ifc1a ^= MAX9860_WCI;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ switch (max9860->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ return -EINVAL;
+ }
+ ifc1a ^= MAX9860_WCI;
+ /* fall through */
+ case SND_SOC_DAIFMT_IB_NF:
+ ifc1a ^= MAX9860_DBCI;
+ ifc1b ^= MAX9860_ABCI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "IFC1A %02x\n", ifc1a);
+ ret = regmap_write(max9860->regmap, MAX9860_IFC1A, ifc1a);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set IFC1A: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(codec->dev, "IFC1B %02x\n", ifc1b);
+ ret = regmap_write(max9860->regmap, MAX9860_IFC1B, ifc1b);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set IFC1B: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check if Integer Clock Mode is possible, but avoid it in slave mode
+ * since we then do not know if lrclk is derived from pclk and the
+ * datasheet mentions that the frequencies have to match exactly in
+ * order for this to work.
+ */
+ if (params_rate(params) == 8000 || params_rate(params) == 16000) {
+ if (master) {
+ switch (max9860->pclk_rate) {
+ case 12000000:
+ sysclk = MAX9860_FREQ_12MHZ;
+ break;
+ case 13000000:
+ sysclk = MAX9860_FREQ_13MHZ;
+ break;
+ case 19200000:
+ sysclk = MAX9860_FREQ_19_2MHZ;
+ break;
+ default:
+ /*
+ * Integer Clock Mode not possible. Leave
+ * sysclk at zero and fall through to the
+ * code below for PLL mode.
+ */
+ break;
+ }
+
+ if (sysclk && params_rate(params) == 16000)
+ sysclk |= MAX9860_16KHZ;
+ }
+ }
+
+ /*
+ * Largest possible n:
+ * 65536 * 96 * 48kHz / 10MHz -> 30199
+ * Smallest possible n:
+ * 65536 * 96 * 8kHz / 20MHz -> 2517
+ * Both fit nicely in the available 15 bits, no need to apply any mask.
+ */
+ n = DIV_ROUND_CLOSEST_ULL(65536ULL * 96 * params_rate(params),
+ max9860->pclk_rate);
+
+ if (!sysclk) {
+ /* PLL mode */
+ if (params_rate(params) > 24000)
+ sysclk |= MAX9860_16KHZ;
+
+ if (!master)
+ n |= 1; /* trigger rapid pll lock mode */
+ }
+
+ sysclk |= max9860->psclk;
+ dev_dbg(codec->dev, "SYSCLK %02x\n", sysclk);
+ ret = regmap_write(max9860->regmap,
+ MAX9860_SYSCLK, sysclk);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(codec->dev, "N %lu\n", n);
+ ret = regmap_write(max9860->regmap,
+ MAX9860_AUDIOCLKHIGH, n >> 8);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set NHI: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(max9860->regmap,
+ MAX9860_AUDIOCLKLOW, n & 0xff);
+ if (ret) {
+ dev_err(codec->dev, "Failed to set NLO: %d\n", ret);
+ return ret;
+ }
+
+ if (!master) {
+ dev_dbg(codec->dev, "Enable PLL\n");
+ ret = regmap_update_bits(max9860->regmap, MAX9860_AUDIOCLKHIGH,
+ MAX9860_PLL, MAX9860_PLL);
+ if (ret) {
+ dev_err(codec->dev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int max9860_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max9860_priv *max9860 = snd_soc_codec_get_drvdata(codec);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ case SND_SOC_DAIFMT_CBS_CFS:
+ max9860->fmt = fmt;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct snd_soc_dai_ops max9860_dai_ops = {
+ .hw_params = max9860_hw_params,
+ .set_fmt = max9860_set_fmt,
+};
+
+static struct snd_soc_dai_driver max9860_dai = {
+ .name = "max9860-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .ops = &max9860_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static int max9860_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(codec->dev);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ ret = regmap_update_bits(max9860->regmap, MAX9860_PWRMAN,
+ MAX9860_SHDN, MAX9860_SHDN);
+ if (ret) {
+ dev_err(codec->dev, "Failed to remove SHDN: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ ret = regmap_update_bits(max9860->regmap, MAX9860_PWRMAN,
+ MAX9860_SHDN, 0);
+ if (ret) {
+ dev_err(codec->dev, "Failed to request SHDN: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver max9860_codec_driver = {
+ .set_bias_level = max9860_set_bias_level,
+ .idle_bias_off = true,
+
+ .controls = max9860_controls,
+ .num_controls = ARRAY_SIZE(max9860_controls),
+ .dapm_widgets = max9860_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9860_dapm_widgets),
+ .dapm_routes = max9860_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9860_dapm_routes),
+};
+
+#ifdef CONFIG_PM
+static int max9860_suspend(struct device *dev)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(max9860->regmap, MAX9860_SYSCLK,
+ MAX9860_PSCLK, MAX9860_PSCLK_OFF);
+ if (ret) {
+ dev_err(dev, "Failed to disable clock: %d\n", ret);
+ return ret;
+ }
+
+ regulator_disable(max9860->dvddio);
+
+ return 0;
+}
+
+static int max9860_resume(struct device *dev)
+{
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(max9860->dvddio);
+ if (ret) {
+ dev_err(dev, "Failed to enable DVDDIO: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(max9860->regmap, false);
+ ret = regcache_sync(max9860->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to sync cache: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(max9860->regmap, MAX9860_SYSCLK,
+ MAX9860_PSCLK, max9860->psclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max9860_pm_ops = {
+ SET_RUNTIME_PM_OPS(max9860_suspend, max9860_resume, NULL)
+};
+
+static int max9860_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct max9860_priv *max9860;
+ int ret;
+ struct clk *mclk;
+ unsigned long mclk_rate;
+ int i;
+ int intr;
+
+ max9860 = devm_kzalloc(dev, sizeof(struct max9860_priv), GFP_KERNEL);
+ if (!max9860)
+ return -ENOMEM;
+
+ max9860->dvddio = devm_regulator_get(dev, "DVDDIO");
+ if (IS_ERR(max9860->dvddio)) {
+ ret = PTR_ERR(max9860->dvddio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get DVDDIO supply: %d\n", ret);
+ return ret;
+ }
+
+ max9860->dvddio_nb.notifier_call = max9860_dvddio_event;
+
+ ret = regulator_register_notifier(max9860->dvddio, &max9860->dvddio_nb);
+ if (ret)
+ dev_err(dev, "Failed to register DVDDIO notifier: %d\n", ret);
+
+ ret = regulator_enable(max9860->dvddio);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable DVDDIO: %d\n", ret);
+ return ret;
+ }
+
+ max9860->regmap = devm_regmap_init_i2c(i2c, &max9860_regmap);
+ if (IS_ERR(max9860->regmap)) {
+ ret = PTR_ERR(max9860->regmap);
+ goto err_regulator;
+ }
+
+ dev_set_drvdata(dev, max9860);
+
+ /*
+ * mclk has to be in the 10MHz to 60MHz range.
+ * psclk is used to scale mclk into pclk so that
+ * pclk is in the 10MHz to 20MHz range.
+ */
+ mclk = clk_get(dev, "mclk");
+
+ if (IS_ERR(mclk)) {
+ ret = PTR_ERR(mclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get MCLK: %d\n", ret);
+ goto err_regulator;
+ }
+
+ mclk_rate = clk_get_rate(mclk);
+ clk_put(mclk);
+
+ if (mclk_rate > 60000000 || mclk_rate < 10000000) {
+ dev_err(dev, "Bad mclk %luHz (needs 10MHz - 60MHz)\n",
+ mclk_rate);
+ ret = -EINVAL;
+ goto err_regulator;
+ }
+ if (mclk_rate >= 40000000)
+ max9860->psclk = 3;
+ else if (mclk_rate >= 20000000)
+ max9860->psclk = 2;
+ else
+ max9860->psclk = 1;
+ max9860->pclk_rate = mclk_rate >> (max9860->psclk - 1);
+ max9860->psclk <<= MAX9860_PSCLK_SHIFT;
+ dev_dbg(dev, "mclk %lu pclk %lu\n", mclk_rate, max9860->pclk_rate);
+
+ regcache_cache_bypass(max9860->regmap, true);
+ for (i = 0; i < max9860_regmap.num_reg_defaults; ++i) {
+ ret = regmap_write(max9860->regmap,
+ max9860_regmap.reg_defaults[i].reg,
+ max9860_regmap.reg_defaults[i].def);
+ if (ret) {
+ dev_err(dev, "Failed to initialize register %u: %d\n",
+ max9860_regmap.reg_defaults[i].reg, ret);
+ goto err_regulator;
+ }
+ }
+ regcache_cache_bypass(max9860->regmap, false);
+
+ ret = regmap_read(max9860->regmap, MAX9860_INTRSTATUS, &intr);
+ if (ret) {
+ dev_err(dev, "Failed to clear INTRSTATUS: %d\n", ret);
+ goto err_regulator;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ ret = snd_soc_register_codec(dev, &max9860_codec_driver,
+ &max9860_dai, 1);
+ if (ret) {
+ dev_err(dev, "Failed to register CODEC: %d\n", ret);
+ goto err_pm;
+ }
+
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev);
+err_regulator:
+ regulator_disable(max9860->dvddio);
+ return ret;
+}
+
+static int max9860_remove(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct max9860_priv *max9860 = dev_get_drvdata(dev);
+
+ snd_soc_unregister_codec(dev);
+ pm_runtime_disable(dev);
+ regulator_disable(max9860->dvddio);
+ return 0;
+}
+
+static const struct i2c_device_id max9860_i2c_id[] = {
+ { "max9860", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max9860_i2c_id);
+
+static const struct of_device_id max9860_of_match[] = {
+ { .compatible = "maxim,max9860", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max9860_of_match);
+
+static struct i2c_driver max9860_i2c_driver = {
+ .probe = max9860_probe,
+ .remove = max9860_remove,
+ .id_table = max9860_i2c_id,
+ .driver = {
+ .name = "max9860",
+ .of_match_table = max9860_of_match,
+ .pm = &max9860_pm_ops,
+ },
+};
+
+module_i2c_driver(max9860_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC MAX9860 Mono Audio Voice Codec driver");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/max9860.h b/sound/soc/codecs/max9860.h
new file mode 100644
index 000000000000..22041bd67a7d
--- /dev/null
+++ b/sound/soc/codecs/max9860.h
@@ -0,0 +1,162 @@
+/*
+ * Driver for the MAX9860 Mono Audio Voice Codec
+ *
+ * Author: Peter Rosin <peda@axentia.s>
+ * Copyright 2016 Axentia Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _SND_SOC_MAX9860
+#define _SND_SOC_MAX9860
+
+#define MAX9860_INTRSTATUS 0x00
+#define MAX9860_MICREADBACK 0x01
+#define MAX9860_INTEN 0x02
+#define MAX9860_SYSCLK 0x03
+#define MAX9860_AUDIOCLKHIGH 0x04
+#define MAX9860_AUDIOCLKLOW 0x05
+#define MAX9860_IFC1A 0x06
+#define MAX9860_IFC1B 0x07
+#define MAX9860_VOICEFLTR 0x08
+#define MAX9860_DACATTN 0x09
+#define MAX9860_ADCLEVEL 0x0a
+#define MAX9860_DACGAIN 0x0b
+#define MAX9860_MICGAIN 0x0c
+#define MAX9860_RESERVED 0x0d
+#define MAX9860_MICADC 0x0e
+#define MAX9860_NOISEGATE 0x0f
+#define MAX9860_PWRMAN 0x10
+#define MAX9860_REVISION 0xff
+
+#define MAX9860_MAX_REGISTER 0xff
+
+/* INTRSTATUS */
+#define MAX9860_CLD 0x80
+#define MAX9860_SLD 0x40
+#define MAX9860_ULK 0x20
+
+/* MICREADBACK */
+#define MAX9860_NG 0xe0
+#define MAX9860_AGC 0x1f
+
+/* INTEN */
+#define MAX9860_ICLD 0x80
+#define MAX9860_ISLD 0x40
+#define MAX9860_IULK 0x20
+
+/* SYSCLK */
+#define MAX9860_PSCLK 0x30
+#define MAX9860_PSCLK_OFF 0x00
+#define MAX9860_PSCLK_SHIFT 4
+#define MAX9860_FREQ 0x06
+#define MAX9860_FREQ_NORMAL 0x00
+#define MAX9860_FREQ_12MHZ 0x02
+#define MAX9860_FREQ_13MHZ 0x04
+#define MAX9860_FREQ_19_2MHZ 0x06
+#define MAX9860_16KHZ 0x01
+
+/* AUDIOCLKHIGH */
+#define MAX9860_PLL 0x80
+#define MAX9860_NHI 0x7f
+
+/* AUDIOCLKLOW */
+#define MAX9860_NLO 0xff
+
+/* IFC1A */
+#define MAX9860_MASTER 0x80
+#define MAX9860_WCI 0x40
+#define MAX9860_DBCI 0x20
+#define MAX9860_DDLY 0x10
+#define MAX9860_HIZ 0x08
+#define MAX9860_TDM 0x04
+
+/* IFC1B */
+#define MAX9860_ABCI 0x20
+#define MAX9860_ADLY 0x10
+#define MAX9860_ST 0x08
+#define MAX9860_BSEL 0x07
+#define MAX9860_BSEL_OFF 0x00
+#define MAX9860_BSEL_64X 0x01
+#define MAX9860_BSEL_48X 0x02
+#define MAX9860_BSEL_PCLK_2 0x04
+#define MAX9860_BSEL_PCLK_4 0x05
+#define MAX9860_BSEL_PCLK_8 0x06
+#define MAX9860_BSEL_PCLK_16 0x07
+
+/* VOICEFLTR */
+#define MAX9860_AVFLT 0xf0
+#define MAX9860_AVFLT_SHIFT 4
+#define MAX9860_AVFLT_COUNT 6
+#define MAX9860_DVFLT 0x0f
+#define MAX9860_DVFLT_SHIFT 0
+#define MAX9860_DVFLT_COUNT 6
+
+/* DACATTN */
+#define MAX9860_DVA 0xfe
+#define MAX9860_DVA_SHIFT 1
+#define MAX9860_DVA_MUTE 0x5e
+
+/* ADCLEVEL */
+#define MAX9860_ADCRL 0xf0
+#define MAX9860_ADCRL_SHIFT 4
+#define MAX9860_ADCLL 0x0f
+#define MAX9860_ADCLL_SHIFT 0
+#define MAX9860_ADCxL_MIN 15
+
+/* DACGAIN */
+#define MAX9860_DVG 0x60
+#define MAX9860_DVG_SHIFT 5
+#define MAX9860_DVG_MAX 3
+#define MAX9860_DVST 0x1f
+#define MAX9860_DVST_SHIFT 0
+#define MAX9860_DVST_MIN 31
+
+/* MICGAIN */
+#define MAX9860_PAM 0x60
+#define MAX9860_PAM_SHIFT 5
+#define MAX9860_PAM_MAX 3
+#define MAX9860_PGAM 0x1f
+#define MAX9860_PGAM_SHIFT 0
+#define MAX9860_PGAM_MIN 20
+
+/* MICADC */
+#define MAX9860_AGCSRC 0x80
+#define MAX9860_AGCSRC_SHIFT 7
+#define MAX9860_AGCSRC_COUNT 2
+#define MAX9860_AGCRLS 0x70
+#define MAX9860_AGCRLS_SHIFT 4
+#define MAX9860_AGCRLS_COUNT 8
+#define MAX9860_AGCATK 0x0c
+#define MAX9860_AGCATK_SHIFT 2
+#define MAX9860_AGCATK_COUNT 4
+#define MAX9860_AGCHLD 0x03
+#define MAX9860_AGCHLD_OFF 0x00
+#define MAX9860_AGCHLD_SHIFT 0
+#define MAX9860_AGCHLD_COUNT 4
+
+/* NOISEGATE */
+#define MAX9860_ANTH 0xf0
+#define MAX9860_ANTH_SHIFT 4
+#define MAX9860_ANTH_MAX 15
+#define MAX9860_AGCTH 0x0f
+#define MAX9860_AGCTH_SHIFT 0
+#define MAX9860_AGCTH_MIN 15
+
+/* PWRMAN */
+#define MAX9860_SHDN 0x80
+#define MAX9860_DACEN 0x08
+#define MAX9860_DACEN_SHIFT 3
+#define MAX9860_ADCLEN 0x02
+#define MAX9860_ADCLEN_SHIFT 1
+#define MAX9860_ADCREN 0x01
+#define MAX9860_ADCREN_SHIFT 0
+
+#endif /* _SND_SOC_MAX9860 */
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 2a22fddeb6af..2a22fddeb6af 100755..100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
diff --git a/sound/soc/codecs/max9867.h b/sound/soc/codecs/max9867.h
index 65590b4ad62a..65590b4ad62a 100755..100644
--- a/sound/soc/codecs/max9867.h
+++ b/sound/soc/codecs/max9867.h
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 683769f0f246..5c9707ac4bbf 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/acpi.h>
#include <linux/math64.h>
+#include <linux/semaphore.h>
#include <sound/initval.h>
#include <sound/tlv.h>
@@ -30,10 +31,22 @@
#include "nau8825.h"
+
+#define NUVOTON_CODEC_DAI "nau8825-hifi"
+
#define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 100000000
+#define NAU_FVCO_MAX 124000000
#define NAU_FVCO_MIN 90000000
+/* cross talk suppression detection */
+#define LOG10_MAGIC 646456993
+#define GAIN_AUGMENT 22500
+#define SIDETONE_BASE 207000
+
+
+static int nau8825_configure_sysclk(struct nau8825 *nau8825,
+ int clk_id, unsigned int freq);
+
struct nau8825_fll {
int mclk_src;
int ratio;
@@ -156,6 +169,661 @@ static const struct reg_default nau8825_reg_defaults[] = {
{ NAU8825_REG_CHARGE_PUMP, 0x0 },
};
+/* register backup table when cross talk detection */
+static struct reg_default nau8825_xtalk_baktab[] = {
+ { NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+ { NAU8825_REG_HSVOL_CTRL, 0 },
+ { NAU8825_REG_DACL_CTRL, 0 },
+ { NAU8825_REG_DACR_CTRL, 0 },
+};
+
+static const unsigned short logtable[256] = {
+ 0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
+ 0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
+ 0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
+ 0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
+ 0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
+ 0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
+ 0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
+ 0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
+ 0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
+ 0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
+ 0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
+ 0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
+ 0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
+ 0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
+ 0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
+ 0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
+ 0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
+ 0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
+ 0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
+ 0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
+ 0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
+ 0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
+ 0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
+ 0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
+ 0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
+ 0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
+ 0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
+ 0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
+ 0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
+ 0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
+ 0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
+ 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47
+};
+
+static struct snd_soc_dai *nau8825_get_codec_dai(struct nau8825 *nau8825)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(nau8825->dapm);
+ struct snd_soc_component *component = &codec->component;
+ struct snd_soc_dai *codec_dai, *_dai;
+
+ list_for_each_entry_safe(codec_dai, _dai, &component->dai_list, list) {
+ if (!strncmp(codec_dai->name, NUVOTON_CODEC_DAI,
+ strlen(NUVOTON_CODEC_DAI)))
+ return codec_dai;
+ }
+ return NULL;
+}
+
+static bool nau8825_dai_is_active(struct nau8825 *nau8825)
+{
+ struct snd_soc_dai *codec_dai = nau8825_get_codec_dai(nau8825);
+
+ if (codec_dai) {
+ if (codec_dai->playback_active || codec_dai->capture_active)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * nau8825_sema_acquire - acquire the semaphore of nau88l25
+ * @nau8825: component to register the codec private data with
+ * @timeout: how long in jiffies to wait before failure or zero to wait
+ * until release
+ *
+ * Attempts to acquire the semaphore with number of jiffies. If no more
+ * tasks are allowed to acquire the semaphore, calling this function will
+ * put the task to sleep. If the semaphore is not released within the
+ * specified number of jiffies, this function returns.
+ * Acquires the semaphore without jiffies. If no more tasks are allowed
+ * to acquire the semaphore, calling this function will put the task to
+ * sleep until the semaphore is released.
+ * It returns if the semaphore was acquired.
+ */
+static void nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
+{
+ int ret;
+
+ if (timeout)
+ ret = down_timeout(&nau8825->xtalk_sem, timeout);
+ else
+ ret = down_interruptible(&nau8825->xtalk_sem);
+
+ if (ret < 0)
+ dev_warn(nau8825->dev, "Acquire semaphone fail\n");
+}
+
+/**
+ * nau8825_sema_release - release the semaphore of nau88l25
+ * @nau8825: component to register the codec private data with
+ *
+ * Release the semaphore which may be called from any context and
+ * even by tasks which have never called down().
+ */
+static inline void nau8825_sema_release(struct nau8825 *nau8825)
+{
+ up(&nau8825->xtalk_sem);
+}
+
+/**
+ * nau8825_sema_reset - reset the semaphore for nau88l25
+ * @nau8825: component to register the codec private data with
+ *
+ * Reset the counter of the semaphore. Call this function to restart
+ * a new round task management.
+ */
+static inline void nau8825_sema_reset(struct nau8825 *nau8825)
+{
+ nau8825->xtalk_sem.count = 1;
+}
+
+/**
+ * Ramp up the headphone volume change gradually to target level.
+ *
+ * @nau8825: component to register the codec private data with
+ * @vol_from: the volume to start up
+ * @vol_to: the target volume
+ * @step: the volume span to move on
+ *
+ * The headphone volume is from 0dB to minimum -54dB and -1dB per step.
+ * If the volume changes sharp, there is a pop noise heard in headphone. We
+ * provide the function to ramp up the volume up or down by delaying 10ms
+ * per step.
+ */
+static void nau8825_hpvol_ramp(struct nau8825 *nau8825,
+ unsigned int vol_from, unsigned int vol_to, unsigned int step)
+{
+ unsigned int value, volume, ramp_up, from, to;
+
+ if (vol_from == vol_to || step == 0) {
+ return;
+ } else if (vol_from < vol_to) {
+ ramp_up = true;
+ from = vol_from;
+ to = vol_to;
+ } else {
+ ramp_up = false;
+ from = vol_to;
+ to = vol_from;
+ }
+ /* only handle volume from 0dB to minimum -54dB */
+ if (to > NAU8825_HP_VOL_MIN)
+ to = NAU8825_HP_VOL_MIN;
+
+ for (volume = from; volume < to; volume += step) {
+ if (ramp_up)
+ value = volume;
+ else
+ value = to - volume + from;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSVOL_CTRL,
+ NAU8825_HPL_VOL_MASK | NAU8825_HPR_VOL_MASK,
+ (value << NAU8825_HPL_VOL_SFT) | value);
+ usleep_range(10000, 10500);
+ }
+ if (ramp_up)
+ value = to;
+ else
+ value = from;
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSVOL_CTRL,
+ NAU8825_HPL_VOL_MASK | NAU8825_HPR_VOL_MASK,
+ (value << NAU8825_HPL_VOL_SFT) | value);
+}
+
+/**
+ * Computes log10 of a value; the result is round off to 3 decimal. This func-
+ * tion takes reference to dvb-math. The source code locates as the following.
+ * Linux/drivers/media/dvb-core/dvb_math.c
+ *
+ * return log10(value) * 1000
+ */
+static u32 nau8825_intlog10_dec3(u32 value)
+{
+ u32 msb, logentry, significand, interpolation, log10val;
+ u64 log2val;
+
+ /* first detect the msb (count begins at 0) */
+ msb = fls(value) - 1;
+ /**
+ * now we use a logtable after the following method:
+ *
+ * log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
+ * where x = msb and therefore 1 <= y < 2
+ * first y is determined by shifting the value left
+ * so that msb is bit 31
+ * 0x00231f56 -> 0x8C7D5800
+ * the result is y * 2^31 -> "significand"
+ * then the highest 9 bits are used for a table lookup
+ * the highest bit is discarded because it's always set
+ * the highest nine bits in our example are 100011000
+ * so we would use the entry 0x18
+ */
+ significand = value << (31 - msb);
+ logentry = (significand >> 23) & 0xff;
+ /**
+ * last step we do is interpolation because of the
+ * limitations of the log table the error is that part of
+ * the significand which isn't used for lookup then we
+ * compute the ratio between the error and the next table entry
+ * and interpolate it between the log table entry used and the
+ * next one the biggest error possible is 0x7fffff
+ * (in our example it's 0x7D5800)
+ * needed value for next table entry is 0x800000
+ * so the interpolation is
+ * (error / 0x800000) * (logtable_next - logtable_current)
+ * in the implementation the division is moved to the end for
+ * better accuracy there is also an overflow correction if
+ * logtable_next is 256
+ */
+ interpolation = ((significand & 0x7fffff) *
+ ((logtable[(logentry + 1) & 0xff] -
+ logtable[logentry]) & 0xffff)) >> 15;
+
+ log2val = ((msb << 24) + (logtable[logentry] << 8) + interpolation);
+ /**
+ * log10(x) = log2(x) * log10(2)
+ */
+ log10val = (log2val * LOG10_MAGIC) >> 31;
+ /**
+ * the result is round off to 3 decimal
+ */
+ return log10val / ((1 << 24) / 1000);
+}
+
+/**
+ * computes cross talk suppression sidetone gain.
+ *
+ * @sig_org: orignal signal level
+ * @sig_cros: cross talk signal level
+ *
+ * The orignal and cross talk signal vlues need to be characterized.
+ * Once these values have been characterized, this sidetone value
+ * can be converted to decibel with the equation below.
+ * sidetone = 20 * log (original signal level / crosstalk signal level)
+ *
+ * return cross talk sidetone gain
+ */
+static u32 nau8825_xtalk_sidetone(u32 sig_org, u32 sig_cros)
+{
+ u32 gain, sidetone;
+
+ if (unlikely(sig_org == 0) || unlikely(sig_cros == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ sig_org = nau8825_intlog10_dec3(sig_org);
+ sig_cros = nau8825_intlog10_dec3(sig_cros);
+ if (sig_org >= sig_cros)
+ gain = (sig_org - sig_cros) * 20 + GAIN_AUGMENT;
+ else
+ gain = (sig_cros - sig_org) * 20 + GAIN_AUGMENT;
+ sidetone = SIDETONE_BASE - gain * 2;
+ sidetone /= 1000;
+
+ return sidetone;
+}
+
+static int nau8825_xtalk_baktab_index_by_reg(unsigned int reg)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(nau8825_xtalk_baktab); index++)
+ if (nau8825_xtalk_baktab[index].reg == reg)
+ return index;
+ return -EINVAL;
+}
+
+static void nau8825_xtalk_backup(struct nau8825 *nau8825)
+{
+ int i;
+
+ /* Backup some register values to backup table */
+ for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
+ regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
+ &nau8825_xtalk_baktab[i].def);
+}
+
+static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+{
+ int i, volume;
+
+ /* Restore register values from backup table; When the driver restores
+ * the headphone volumem, it needs recover to original level gradually
+ * with 3dB per step for less pop noise.
+ */
+ for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
+ if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+ /* Ramping up the volume change to reduce pop noise */
+ volume = nau8825_xtalk_baktab[i].def &
+ NAU8825_HPR_VOL_MASK;
+ nau8825_hpvol_ramp(nau8825, 0, volume, 3);
+ continue;
+ }
+ regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
+ nau8825_xtalk_baktab[i].def);
+ }
+}
+
+static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
+{
+ /* Enable power of DAC path */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_DACR | NAU8825_ENABLE_DACL |
+ NAU8825_ENABLE_ADC | NAU8825_ENABLE_ADC_CLK |
+ NAU8825_ENABLE_DAC_CLK, NAU8825_ENABLE_DACR |
+ NAU8825_ENABLE_DACL | NAU8825_ENABLE_ADC |
+ NAU8825_ENABLE_ADC_CLK | NAU8825_ENABLE_DAC_CLK);
+ /* Prevent startup click by letting charge pump to ramp up and
+ * change bump enable
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN);
+ /* Enable clock sync of DAC and DAC clock */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_RDAC,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN |
+ NAU8825_RDAC_FS_BCLK_ENB,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN);
+ /* Power up output driver with 2 stage */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L);
+ /* HP outputs not shouted to ground */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSD_CTRL,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L, 0);
+ /* Enable HP boost driver */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BOOST,
+ NAU8825_HP_BOOST_DIS, NAU8825_HP_BOOST_DIS);
+ /* Enable class G compare path to supply 1.8V or 0.9V. */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLASSG_CTRL,
+ NAU8825_CLASSG_LDAC_EN | NAU8825_CLASSG_RDAC_EN,
+ NAU8825_CLASSG_LDAC_EN | NAU8825_CLASSG_RDAC_EN);
+}
+
+static void nau8825_xtalk_prepare_adc(struct nau8825 *nau8825)
+{
+ /* Power up left ADC and raise 5dB than Vmid for Vref */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ANALOG_ADC_2,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_VMID_PLUS_0_5DB);
+}
+
+static void nau8825_xtalk_clock(struct nau8825 *nau8825)
+{
+ /* Recover FLL default value */
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL1, 0x0);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL2, 0x3126);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL3, 0x0008);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL4, 0x0010);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL5, 0x0);
+ regmap_write(nau8825->regmap, NAU8825_REG_FLL6, 0x6000);
+ /* Enable internal VCO clock for detection signal generated */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN,
+ NAU8825_DCO_EN);
+ /* Given specific clock frequency of internal clock to
+ * generate signal.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_MCLK_SRC_MASK, 0xf);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
+ NAU8825_FLL_RATIO_MASK, 0x10);
+}
+
+static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
+{
+ int volume, index;
+
+ /* Backup those registers changed by cross talk detection */
+ nau8825_xtalk_backup(nau8825);
+ /* Config IIS as master to output signal by codec */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
+ (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+ /* Ramp up headphone volume to 0dB to get better performance and
+ * avoid pop noise in headphone.
+ */
+ index = nau8825_xtalk_baktab_index_by_reg(NAU8825_REG_HSVOL_CTRL);
+ if (index != -EINVAL) {
+ volume = nau8825_xtalk_baktab[index].def &
+ NAU8825_HPR_VOL_MASK;
+ nau8825_hpvol_ramp(nau8825, volume, 0, 3);
+ }
+ nau8825_xtalk_clock(nau8825);
+ nau8825_xtalk_prepare_dac(nau8825);
+ nau8825_xtalk_prepare_adc(nau8825);
+ /* Config channel path and digital gain */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACL_CTRL,
+ NAU8825_DACL_CH_SEL_MASK | NAU8825_DACL_CH_VOL_MASK,
+ NAU8825_DACL_CH_SEL_L | 0xab);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
+ NAU8825_DACR_CH_SEL_MASK | NAU8825_DACR_CH_VOL_MASK,
+ NAU8825_DACR_CH_SEL_R | 0xab);
+ /* Config cross talk parameters and generate the 23Hz sine wave with
+ * 1/16 full scale of signal level for impedance measurement.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL,
+ NAU8825_IMM_THD_MASK | NAU8825_IMM_GEN_VOL_MASK |
+ NAU8825_IMM_CYC_MASK | NAU8825_IMM_DAC_SRC_MASK,
+ (0x9 << NAU8825_IMM_THD_SFT) | NAU8825_IMM_GEN_VOL_1_16th |
+ NAU8825_IMM_CYC_8192 | NAU8825_IMM_DAC_SRC_SIN);
+ /* RMS intrruption enable */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_INTERRUPT_MASK, NAU8825_IRQ_RMS_EN, 0);
+ /* Power up left and right DAC */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL, 0);
+}
+
+static void nau8825_xtalk_clean_dac(struct nau8825 *nau8825)
+{
+ /* Disable HP boost driver */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BOOST,
+ NAU8825_HP_BOOST_DIS, 0);
+ /* HP outputs shouted to ground */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_HSD_CTRL,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L,
+ NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L);
+ /* Power down left and right DAC */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL,
+ NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL);
+ /* Enable the TESTDAC and disable L/R HP impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP |
+ NAU8825_BIAS_TESTDAC_EN, NAU8825_BIAS_TESTDAC_EN);
+ /* Power down output driver with 2 stage */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_HP_DRV_R | NAU8825_POWERUP_HP_DRV_L, 0);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_POWER_UP_CONTROL,
+ NAU8825_POWERUP_INTEGR_R | NAU8825_POWERUP_INTEGR_L |
+ NAU8825_POWERUP_DRV_IN_R | NAU8825_POWERUP_DRV_IN_L, 0);
+ /* Disable clock sync of DAC and DAC clock */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_RDAC,
+ NAU8825_RDAC_EN | NAU8825_RDAC_CLK_EN, 0);
+ /* Disable charge pump ramp up function and change bump */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_CHARGE_PUMP,
+ NAU8825_JAMNODCLOW | NAU8825_CHANRGE_PUMP_EN, 0);
+ /* Disable power of DAC path */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_DACR | NAU8825_ENABLE_DACL |
+ NAU8825_ENABLE_ADC_CLK | NAU8825_ENABLE_DAC_CLK, 0);
+ if (!nau8825->irq)
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_ENA_CTRL, NAU8825_ENABLE_ADC, 0);
+}
+
+static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
+{
+ /* Power down left ADC and restore voltage to Vmid */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ANALOG_ADC_2,
+ NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
+}
+
+static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+{
+ /* Enable internal VCO needed for interruptions */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
+ nau8825_xtalk_clean_dac(nau8825);
+ nau8825_xtalk_clean_adc(nau8825);
+ /* Clear cross talk parameters and disable */
+ regmap_write(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL, 0);
+ /* RMS intrruption disable */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
+ /* Recover default value for IIS */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
+ /* Restore value of specific register for cross talk */
+ nau8825_xtalk_restore(nau8825);
+}
+
+static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
+{
+ /* Apply ADC volume for better cross talk performance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ADC_DGAIN_CTRL,
+ NAU8825_ADC_DIG_VOL_MASK, vol);
+ /* Disables JKTIP(HPL) DAC channel for right to left measurement.
+ * Do it before sending signal in order to erase pop noise.
+ */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_TESTDACR_EN | NAU8825_BIAS_TESTDACL_EN,
+ NAU8825_BIAS_TESTDACL_EN);
+ switch (nau8825->xtalk_state) {
+ case NAU8825_XTALK_HPR_R2L:
+ /* Enable right headphone impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP,
+ NAU8825_BIAS_HPR_IMP);
+ break;
+ case NAU8825_XTALK_HPL_R2L:
+ /* Enable left headphone impedance */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_BIAS_ADJ,
+ NAU8825_BIAS_HPR_IMP | NAU8825_BIAS_HPL_IMP,
+ NAU8825_BIAS_HPL_IMP);
+ break;
+ default:
+ break;
+ }
+ msleep(100);
+ /* Impedance measurement mode enable */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_IMM_MODE_CTRL,
+ NAU8825_IMM_EN, NAU8825_IMM_EN);
+}
+
+static void nau8825_xtalk_imm_stop(struct nau8825 *nau8825)
+{
+ /* Impedance measurement mode disable */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_IMM_MODE_CTRL, NAU8825_IMM_EN, 0);
+}
+
+/* The cross talk measurement function can reduce cross talk across the
+ * JKTIP(HPL) and JKR1(HPR) outputs which measures the cross talk signal
+ * level to determine what cross talk reduction gain is. This system works by
+ * sending a 23Hz -24dBV sine wave into the headset output DAC and through
+ * the PGA. The output of the PGA is then connected to an internal current
+ * sense which measures the attenuated 23Hz signal and passing the output to
+ * an ADC which converts the measurement to a binary code. With two separated
+ * measurement, one for JKR1(HPR) and the other JKTIP(HPL), measurement data
+ * can be separated read in IMM_RMS_L for HSR and HSL after each measurement.
+ * Thus, the measurement function has four states to complete whole sequence.
+ * 1. Prepare state : Prepare the resource for detection and transfer to HPR
+ * IMM stat to make JKR1(HPR) impedance measure.
+ * 2. HPR IMM state : Read out orignal signal level of JKR1(HPR) and transfer
+ * to HPL IMM state to make JKTIP(HPL) impedance measure.
+ * 3. HPL IMM state : Read out cross talk signal level of JKTIP(HPL) and
+ * transfer to IMM state to determine suppression sidetone gain.
+ * 4. IMM state : Computes cross talk suppression sidetone gain with orignal
+ * and cross talk signal level. Apply this gain and then restore codec
+ * configuration. Then transfer to Done state for ending.
+ */
+static void nau8825_xtalk_measure(struct nau8825 *nau8825)
+{
+ u32 sidetone;
+
+ switch (nau8825->xtalk_state) {
+ case NAU8825_XTALK_PREPARE:
+ /* In prepare state, set up clock, intrruption, DAC path, ADC
+ * path and cross talk detection parameters for preparation.
+ */
+ nau8825_xtalk_prepare(nau8825);
+ msleep(280);
+ /* Trigger right headphone impedance detection */
+ nau8825->xtalk_state = NAU8825_XTALK_HPR_R2L;
+ nau8825_xtalk_imm_start(nau8825, 0x00d2);
+ break;
+ case NAU8825_XTALK_HPR_R2L:
+ /* In right headphone IMM state, read out right headphone
+ * impedance measure result, and then start up left side.
+ */
+ regmap_read(nau8825->regmap, NAU8825_REG_IMM_RMS_L,
+ &nau8825->imp_rms[NAU8825_XTALK_HPR_R2L]);
+ dev_dbg(nau8825->dev, "HPR_R2L imm: %x\n",
+ nau8825->imp_rms[NAU8825_XTALK_HPR_R2L]);
+ /* Disable then re-enable IMM mode to update */
+ nau8825_xtalk_imm_stop(nau8825);
+ /* Trigger left headphone impedance detection */
+ nau8825->xtalk_state = NAU8825_XTALK_HPL_R2L;
+ nau8825_xtalk_imm_start(nau8825, 0x00ff);
+ break;
+ case NAU8825_XTALK_HPL_R2L:
+ /* In left headphone IMM state, read out left headphone
+ * impedance measure result, and delay some time to wait
+ * detection sine wave output finish. Then, we can calculate
+ * the cross talk suppresstion side tone according to the L/R
+ * headphone imedance.
+ */
+ regmap_read(nau8825->regmap, NAU8825_REG_IMM_RMS_L,
+ &nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ dev_dbg(nau8825->dev, "HPL_R2L imm: %x\n",
+ nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ nau8825_xtalk_imm_stop(nau8825);
+ msleep(150);
+ nau8825->xtalk_state = NAU8825_XTALK_IMM;
+ break;
+ case NAU8825_XTALK_IMM:
+ /* In impedance measure state, the orignal and cross talk
+ * signal level vlues are ready. The side tone gain is deter-
+ * mined with these signal level. After all, restore codec
+ * configuration.
+ */
+ sidetone = nau8825_xtalk_sidetone(
+ nau8825->imp_rms[NAU8825_XTALK_HPR_R2L],
+ nau8825->imp_rms[NAU8825_XTALK_HPL_R2L]);
+ dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
+ regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
+ (sidetone << 8) | sidetone);
+ nau8825_xtalk_clean(nau8825);
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ break;
+ default:
+ break;
+ }
+}
+
+static void nau8825_xtalk_work(struct work_struct *work)
+{
+ struct nau8825 *nau8825 = container_of(
+ work, struct nau8825, xtalk_work);
+
+ nau8825_xtalk_measure(nau8825);
+ /* To determine the cross talk side tone gain when reach
+ * the impedance measure state.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_IMM)
+ nau8825_xtalk_measure(nau8825);
+
+ /* Delay jack report until cross talk detection process
+ * completed. It can avoid application to do playback
+ * preparation before cross talk detection is still working.
+ * Meanwhile, the protection of the cross talk detection
+ * is released.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_DONE) {
+ snd_soc_jack_report(nau8825->jack, nau8825->xtalk_event,
+ nau8825->xtalk_event_mask);
+ nau8825_sema_release(nau8825);
+ nau8825->xtalk_protect = false;
+ }
+}
+
+static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
+{
+ /* If the xtalk_protect is true, that means the process is still
+ * on going. The driver forces to cancel the cross talk task and
+ * restores the configuration to original status.
+ */
+ if (nau8825->xtalk_protect) {
+ cancel_work_sync(&nau8825->xtalk_work);
+ nau8825_xtalk_clean(nau8825);
+ }
+ /* Reset parameters for cross talk suppression function */
+ nau8825_sema_reset(nau8825);
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ nau8825->xtalk_protect = false;
+}
+
static bool nau8825_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -217,12 +885,36 @@ static bool nau8825_volatile_reg(struct device *dev, unsigned int reg)
case NAU8825_REG_SARDOUT_RAM_STATUS:
case NAU8825_REG_CHARGE_PUMP_INPUT_READ:
case NAU8825_REG_GENERAL_STATUS:
+ case NAU8825_REG_BIQ_CTRL ... NAU8825_REG_BIQ_COF10:
return true;
default:
return false;
}
}
+static int nau8825_adc_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!nau8825->irq)
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_ENA_CTRL, NAU8825_ENABLE_ADC, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nau8825_pump_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -270,6 +962,54 @@ static int nau8825_output_dac_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int nau8825_biq_coeff_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+
+ if (!component->regmap)
+ return -EINVAL;
+
+ regmap_raw_read(component->regmap, NAU8825_REG_BIQ_COF1,
+ ucontrol->value.bytes.data, params->max);
+ return 0;
+}
+
+static int nau8825_biq_coeff_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ void *data;
+
+ if (!component->regmap)
+ return -EINVAL;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ regmap_update_bits(component->regmap, NAU8825_REG_BIQ_CTRL,
+ NAU8825_BIQ_WRT_EN, 0);
+ regmap_raw_write(component->regmap, NAU8825_REG_BIQ_COF1,
+ data, params->max);
+ regmap_update_bits(component->regmap, NAU8825_REG_BIQ_CTRL,
+ NAU8825_BIQ_WRT_EN, NAU8825_BIQ_WRT_EN);
+
+ kfree(data);
+ return 0;
+}
+
+static const char * const nau8825_biq_path[] = {
+ "ADC", "DAC"
+};
+
+static const struct soc_enum nau8825_biq_path_enum =
+ SOC_ENUM_SINGLE(NAU8825_REG_BIQ_CTRL, NAU8825_BIQ_PATH_SFT,
+ ARRAY_SIZE(nau8825_biq_path), nau8825_biq_path);
+
static const char * const nau8825_adc_decimation[] = {
"32", "64", "128", "256"
};
@@ -306,6 +1046,10 @@ static const struct snd_kcontrol_new nau8825_controls[] = {
SOC_ENUM("ADC Decimation Rate", nau8825_adc_decimation_enum),
SOC_ENUM("DAC Oversampling Rate", nau8825_dac_oversampl_enum),
+ /* programmable biquad filter */
+ SOC_ENUM("BIQ Path Select", nau8825_biq_path_enum),
+ SND_SOC_BYTES_EXT("BIQ Coefficients", 20,
+ nau8825_biq_coeff_get, nau8825_biq_coeff_put),
};
/* DAC Mux 0x33[9] and 0x34[9] */
@@ -338,7 +1082,9 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
SND_SOC_DAPM_PGA("Frontend PGA", NAU8825_REG_POWER_UP_CONTROL, 14, 0,
NULL, 0),
- SND_SOC_DAPM_ADC("ADC", NULL, NAU8825_REG_ENA_CTRL, 8, 0),
+ SND_SOC_DAPM_ADC_E("ADC", NULL, SND_SOC_NOPM, 0, 0,
+ nau8825_adc_event, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("ADC Clock", NAU8825_REG_ENA_CTRL, 7, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
0),
@@ -592,9 +1338,6 @@ int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
NAU8825_HSD_AUTO_MODE | NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L,
NAU8825_HSD_AUTO_MODE | NAU8825_SPKR_DWN1R | NAU8825_SPKR_DWN1L);
- regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
- NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_EJECT_EN, 0);
-
return 0;
}
EXPORT_SYMBOL_GPL(nau8825_enable_jack_detect);
@@ -602,24 +1345,21 @@ EXPORT_SYMBOL_GPL(nau8825_enable_jack_detect);
static bool nau8825_is_jack_inserted(struct regmap *regmap)
{
- int status;
+ bool active_high, is_high;
+ int status, jkdet;
+ regmap_read(regmap, NAU8825_REG_JACK_DET_CTRL, &jkdet);
+ active_high = jkdet & NAU8825_JACK_POLARITY;
regmap_read(regmap, NAU8825_REG_I2C_DEVICE_ID, &status);
- return !(status & NAU8825_GPIO2JD1);
+ is_high = status & NAU8825_GPIO2JD1;
+ /* return jack connection status according to jack insertion logic
+ * active high or active low.
+ */
+ return active_high == is_high;
}
static void nau8825_restart_jack_detection(struct regmap *regmap)
{
- /* Chip needs one FSCLK cycle in order to generate interrupts,
- * as we cannot guarantee one will be provided by the system. Turning
- * master mode on then off enables us to generate that FSCLK cycle
- * with a minimum of contention on the clock bus.
- */
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
/* this will restart the entire jack detection process including MIC/GND
* switching and create interrupts. We have to go from 0 to 1 and back
* to 0 to restart.
@@ -630,11 +1370,30 @@ static void nau8825_restart_jack_detection(struct regmap *regmap)
NAU8825_JACK_DET_RESTART, 0);
}
+static void nau8825_int_status_clear_all(struct regmap *regmap)
+{
+ int active_irq, clear_irq, i;
+
+ /* Reset the intrruption status from rightmost bit if the corres-
+ * ponding irq event occurs.
+ */
+ regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+ for (i = 0; i < NAU8825_REG_DATA_LEN; i++) {
+ clear_irq = (0x1 << i);
+ if (active_irq & clear_irq)
+ regmap_write(regmap,
+ NAU8825_REG_INT_CLR_KEY_STATUS, clear_irq);
+ }
+}
+
static void nau8825_eject_jack(struct nau8825 *nau8825)
{
struct snd_soc_dapm_context *dapm = nau8825->dapm;
struct regmap *regmap = nau8825->regmap;
+ /* Force to cancel the cross talk detection process */
+ nau8825_xtalk_cancel(nau8825);
+
snd_soc_dapm_disable_pin(dapm, "SAR");
snd_soc_dapm_disable_pin(dapm, "MICBIAS");
/* Detach 2kOhm Resistors from MICBIAS to MICGND1/2 */
@@ -644,6 +1403,69 @@ static void nau8825_eject_jack(struct nau8825 *nau8825)
regmap_update_bits(regmap, NAU8825_REG_HSD_CTRL, 0xf, 0xf);
snd_soc_dapm_sync(dapm);
+
+ /* Clear all interruption status */
+ nau8825_int_status_clear_all(regmap);
+
+ /* Enable the insertion interruption, disable the ejection inter-
+ * ruption, and then bypass de-bounce circuit.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_EJECT_DIS | NAU8825_IRQ_INSERT_DIS,
+ NAU8825_IRQ_EJECT_DIS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_EJECT_EN |
+ NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_INSERT_EN,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_EJECT_EN |
+ NAU8825_IRQ_HEADSET_COMPLETE_EN);
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, NAU8825_JACK_DET_DB_BYPASS);
+
+ /* Disable ADC needed for interruptions at audo mode */
+ regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, 0);
+
+ /* Close clock for jack type detection at manual mode */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_DIS, 0);
+}
+
+/* Enable audo mode interruptions with internal clock. */
+static void nau8825_setup_auto_irq(struct nau8825 *nau8825)
+{
+ struct regmap *regmap = nau8825->regmap;
+
+ /* Enable headset jack type detection complete interruption and
+ * jack ejection interruption.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_HEADSET_COMPLETE_EN | NAU8825_IRQ_EJECT_EN, 0);
+
+ /* Enable internal VCO needed for interruptions */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
+
+ /* Enable ADC needed for interruptions */
+ regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC);
+
+ /* Chip needs one FSCLK cycle in order to generate interruptions,
+ * as we cannot guarantee one will be provided by the system. Turning
+ * master mode on then off enables us to generate that FSCLK cycle
+ * with a minimum of contention on the clock bus.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
+ /* Not bypass de-bounce circuit */
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, 0);
+
+ /* Unmask all interruptions */
+ regmap_write(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL, 0);
+
+ /* Restart the jack detection process at auto mode */
+ nau8825_restart_jack_detection(regmap);
}
static int nau8825_button_decode(int value)
@@ -676,6 +1498,11 @@ static int nau8825_jack_insert(struct nau8825 *nau8825)
regmap_read(regmap, NAU8825_REG_GENERAL_STATUS, &jack_status_reg);
mic_detected = (jack_status_reg >> 10) & 3;
+ /* The JKSLV and JKR2 all detected in high impedance headset */
+ if (mic_detected == 0x3)
+ nau8825->high_imped = true;
+ else
+ nau8825->high_imped = false;
switch (mic_detected) {
case 0:
@@ -773,6 +1600,33 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
if (nau8825_is_jack_inserted(regmap)) {
event |= nau8825_jack_insert(nau8825);
+ if (!nau8825->high_imped) {
+ /* Apply the cross talk suppression in the
+ * headset without high impedance.
+ */
+ if (!nau8825->xtalk_protect) {
+ /* Raise protection for cross talk de-
+ * tection if no protection before.
+ * The driver has to cancel the pro-
+ * cess and restore changes if process
+ * is ongoing when ejection.
+ */
+ nau8825->xtalk_protect = true;
+ nau8825_sema_acquire(nau8825, 0);
+ }
+ /* Startup cross talk detection process */
+ nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
+ schedule_work(&nau8825->xtalk_work);
+ } else {
+ /* The cross talk suppression shouldn't apply
+ * in the headset with high impedance. Thus,
+ * relieve the protection raised before.
+ */
+ if (nau8825->xtalk_protect) {
+ nau8825_sema_release(nau8825);
+ nau8825->xtalk_protect = false;
+ }
+ }
} else {
dev_warn(nau8825->dev, "Headset completion IRQ fired but no headset connected\n");
nau8825_eject_jack(nau8825);
@@ -780,6 +1634,37 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
event_mask |= SND_JACK_HEADSET;
clear_irq = NAU8825_HEADSET_COMPLETION_IRQ;
+ /* Record the interruption report event for driver to report
+ * the event later. The jack report will delay until cross
+ * talk detection process is done.
+ */
+ if (nau8825->xtalk_state == NAU8825_XTALK_PREPARE) {
+ nau8825->xtalk_event = event;
+ nau8825->xtalk_event_mask = event_mask;
+ }
+ } else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
+ schedule_work(&nau8825->xtalk_work);
+ clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
+ } else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
+ NAU8825_JACK_INSERTION_DETECTED) {
+ /* One more step to check GPIO status directly. Thus, the
+ * driver can confirm the real insertion interruption because
+ * the intrruption at manual mode has bypassed debounce
+ * circuit which can get rid of unstable status.
+ */
+ if (nau8825_is_jack_inserted(regmap)) {
+ /* Turn off insertion interruption at manual mode */
+ regmap_update_bits(regmap,
+ NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_INSERT_DIS,
+ NAU8825_IRQ_INSERT_DIS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_INSERT_EN, NAU8825_IRQ_INSERT_EN);
+ /* Enable interruption for jack type detection at audo
+ * mode which can detect microphone and jack type.
+ */
+ nau8825_setup_auto_irq(nau8825);
+ }
}
if (!clear_irq)
@@ -787,7 +1672,12 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
/* clears the rightmost interruption */
regmap_write(regmap, NAU8825_REG_INT_CLR_KEY_STATUS, clear_irq);
- if (event_mask)
+ /* Delay jack report until cross talk detection is done. It can avoid
+ * application to do playback preparation when cross talk detection
+ * process is still working. Otherwise, the resource like clock and
+ * power will be issued by them at the same time and conflict happens.
+ */
+ if (event_mask && nau8825->xtalk_state == NAU8825_XTALK_DONE)
snd_soc_jack_report(nau8825->jack, event, event_mask);
return IRQ_HANDLED;
@@ -921,11 +1811,16 @@ static void nau8825_init_regs(struct nau8825 *nau8825)
NAU8825_RDAC_CLK_DELAY_MASK | NAU8825_RDAC_VREF_MASK,
(0x2 << NAU8825_RDAC_CLK_DELAY_SFT) |
(0x3 << NAU8825_RDAC_VREF_SFT));
+ /* Config L/R channel */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACL_CTRL,
+ NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_L);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
+ NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_R);
}
static const struct regmap_config nau8825_regmap_config = {
- .val_bits = 16,
- .reg_bits = 16,
+ .val_bits = NAU8825_REG_DATA_LEN,
+ .reg_bits = NAU8825_REG_ADDR_LEN,
.max_register = NAU8825_REG_MAX,
.readable_reg = nau8825_readable_reg,
@@ -944,18 +1839,15 @@ static int nau8825_codec_probe(struct snd_soc_codec *codec)
nau8825->dapm = dapm;
- /* The interrupt clock is gated by x1[10:8],
- * one of them needs to be enabled all the time for
- * interrupts to happen.
- */
- snd_soc_dapm_force_enable_pin(dapm, "DDACR");
- snd_soc_dapm_sync(dapm);
+ return 0;
+}
- /* Unmask interruptions. Handler uses dapm object so we can enable
- * interruptions only after dapm is fully initialized.
- */
- regmap_write(nau8825->regmap, NAU8825_REG_INTERRUPT_DIS_CTRL, 0);
- nau8825_restart_jack_detection(nau8825->regmap);
+static int nau8825_codec_remove(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ /* Cancel and reset cross tak suppresstion detection funciton */
+ nau8825_xtalk_cancel(nau8825);
return 0;
}
@@ -973,8 +1865,8 @@ static int nau8825_codec_probe(struct snd_soc_codec *codec)
static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
struct nau8825_fll *fll_param)
{
- u64 fvco;
- unsigned int fref, i;
+ u64 fvco, fvco_max;
+ unsigned int fref, i, fvco_sel;
/* Ensure the reference clock frequency (FREF) is <= 13.5MHz by dividing
* freq_in by 1, 2, 4, or 8 using FLL pre-scalar.
@@ -999,18 +1891,23 @@ static int nau8825_calc_fll_param(unsigned int fll_in, unsigned int fs,
fll_param->ratio = fll_ratio[i].val;
/* Calculate the frequency of DCO (FDCO) given freq_out = 256 * Fs.
- * FDCO must be within the 90MHz - 100MHz or the FFL cannot be
+ * FDCO must be within the 90MHz - 124MHz or the FFL cannot be
* guaranteed across the full range of operation.
* FDCO = freq_out * 2 * mclk_src_scaling
*/
+ fvco_max = 0;
+ fvco_sel = ARRAY_SIZE(mclk_src_scaling);
for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
- if (NAU_FVCO_MIN < fvco && fvco < NAU_FVCO_MAX)
- break;
+ if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
+ fvco_max < fvco) {
+ fvco_max = fvco;
+ fvco_sel = i;
+ }
}
- if (i == ARRAY_SIZE(mclk_src_scaling))
+ if (ARRAY_SIZE(mclk_src_scaling) == fvco_sel)
return -EINVAL;
- fll_param->mclk_src = mclk_src_scaling[i].val;
+ fll_param->mclk_src = mclk_src_scaling[fvco_sel].val;
/* Calculate the FLL 10-bit integer input and the FLL 16-bit fractional
* input based on FDCO, FREF and FLL ratio.
@@ -1025,7 +1922,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
struct nau8825_fll *fll_param)
{
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_MCLK_SRC_MASK, fll_param->mclk_src);
+ NAU8825_CLK_SRC_MASK | NAU8825_CLK_MCLK_SRC_MASK,
+ NAU8825_CLK_SRC_MCLK | fll_param->mclk_src);
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
NAU8825_FLL_RATIO_MASK, fll_param->ratio);
/* FLL 16-bit fractional input */
@@ -1038,10 +1936,25 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
- NAU8825_FLL_FILTER_SW_MASK, 0x0000);
- /* FLL sigma delta modulator enable */
- regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
- NAU8825_SDM_EN_MASK, NAU8825_SDM_EN);
+ NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
+ /* Disable free-running mode */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
+ if (fll_param->fll_frac) {
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_MASK,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_FILTER);
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
+ NAU8825_SDM_EN, NAU8825_SDM_EN);
+ } else {
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
+ NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
+ NAU8825_FLL_FTR_SW_MASK, NAU8825_FLL_FTR_SW_ACCU);
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_FLL6, NAU8825_SDM_EN, 0);
+ }
}
/* freq_out must be 256*Fs in order to achieve the best performance */
@@ -1069,6 +1982,45 @@ static int nau8825_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
return 0;
}
+static int nau8825_mclk_prepare(struct nau8825 *nau8825, unsigned int freq)
+{
+ int ret = 0;
+
+ nau8825->mclk = devm_clk_get(nau8825->dev, "mclk");
+ if (IS_ERR(nau8825->mclk)) {
+ dev_info(nau8825->dev, "No 'mclk' clock found, assume MCLK is managed externally");
+ return 0;
+ }
+
+ if (!nau8825->mclk_freq) {
+ ret = clk_prepare_enable(nau8825->mclk);
+ if (ret) {
+ dev_err(nau8825->dev, "Unable to prepare codec mclk\n");
+ return ret;
+ }
+ }
+
+ if (nau8825->mclk_freq != freq) {
+ freq = clk_round_rate(nau8825->mclk, freq);
+ ret = clk_set_rate(nau8825->mclk, freq);
+ if (ret) {
+ dev_err(nau8825->dev, "Unable to set mclk rate\n");
+ return ret;
+ }
+ nau8825->mclk_freq = freq;
+ }
+
+ return 0;
+}
+
+static void nau8825_configure_mclk_as_sysclk(struct regmap *regmap)
+{
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_DCO_EN, 0);
+}
+
static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
unsigned int freq)
{
@@ -1076,40 +2028,106 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
int ret;
switch (clk_id) {
+ case NAU8825_CLK_DIS:
+ /* Clock provided externally and disable internal VCO clock */
+ nau8825_configure_mclk_as_sysclk(regmap);
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
+ }
+
+ break;
case NAU8825_CLK_MCLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ nau8825_configure_mclk_as_sysclk(regmap);
+ /* MCLK not changed by clock tree */
regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
- regmap_update_bits(regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
+ NAU8825_CLK_MCLK_SRC_MASK, 0);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
- /* We selected MCLK source but the clock itself managed externally */
- if (!nau8825->mclk)
- break;
+ ret = nau8825_mclk_prepare(nau8825, freq);
+ if (ret)
+ return ret;
- if (!nau8825->mclk_freq) {
- ret = clk_prepare_enable(nau8825->mclk);
- if (ret) {
- dev_err(nau8825->dev, "Unable to prepare codec mclk\n");
- return ret;
- }
+ break;
+ case NAU8825_CLK_INTERNAL:
+ if (nau8825_is_jack_inserted(nau8825->regmap)) {
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_DCO_EN, NAU8825_DCO_EN);
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ /* Decrease the VCO frequency for power saving */
+ regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
+ NAU8825_CLK_MCLK_SRC_MASK, 0xf);
+ regmap_update_bits(regmap, NAU8825_REG_FLL1,
+ NAU8825_FLL_RATIO_MASK, 0x10);
+ regmap_update_bits(regmap, NAU8825_REG_FLL6,
+ NAU8825_SDM_EN, NAU8825_SDM_EN);
+ } else {
+ /* The clock turns off intentionally for power saving
+ * when no headset connected.
+ */
+ nau8825_configure_mclk_as_sysclk(regmap);
+ dev_warn(nau8825->dev, "Disable clock for power saving when no headset connected\n");
+ }
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
}
- if (nau8825->mclk_freq != freq) {
- nau8825->mclk_freq = freq;
+ break;
+ case NAU8825_CLK_FLL_MCLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_MCLK);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
- freq = clk_round_rate(nau8825->mclk, freq);
- ret = clk_set_rate(nau8825->mclk, freq);
- if (ret) {
- dev_err(nau8825->dev, "Unable to set mclk rate\n");
- return ret;
- }
+ ret = nau8825_mclk_prepare(nau8825, freq);
+ if (ret)
+ return ret;
+
+ break;
+ case NAU8825_CLK_FLL_BLK:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_BLK);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
+
+ if (nau8825->mclk_freq) {
+ clk_disable_unprepare(nau8825->mclk);
+ nau8825->mclk_freq = 0;
}
break;
- case NAU8825_CLK_INTERNAL:
- regmap_update_bits(regmap, NAU8825_REG_FLL6, NAU8825_DCO_EN,
- NAU8825_DCO_EN);
- regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
- NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
+ case NAU8825_CLK_FLL_FS:
+ /* Acquire the semaphone to synchronize the playback and
+ * interrupt handler. In order to avoid the playback inter-
+ * fered by cross talk process, the driver make the playback
+ * preparation halted until cross talk process finish.
+ */
+ nau8825_sema_acquire(nau8825, 2 * HZ);
+ regmap_update_bits(regmap, NAU8825_REG_FLL3,
+ NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_FS);
+ /* Release the semaphone. */
+ nau8825_sema_release(nau8825);
if (nau8825->mclk_freq) {
clk_disable_unprepare(nau8825->mclk);
@@ -1135,6 +2153,31 @@ static int nau8825_set_sysclk(struct snd_soc_codec *codec, int clk_id,
return nau8825_configure_sysclk(nau8825, clk_id, freq);
}
+static int nau8825_resume_setup(struct nau8825 *nau8825)
+{
+ struct regmap *regmap = nau8825->regmap;
+
+ /* Close clock when jack type detection at manual mode */
+ nau8825_configure_sysclk(nau8825, NAU8825_CLK_DIS, 0);
+
+ /* Clear all interruption status */
+ nau8825_int_status_clear_all(regmap);
+
+ /* Enable both insertion and ejection interruptions, and then
+ * bypass de-bounce circuit.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_HEADSET_COMPLETE_EN |
+ NAU8825_IRQ_EJECT_EN | NAU8825_IRQ_INSERT_EN,
+ NAU8825_IRQ_OUTPUT_EN | NAU8825_IRQ_HEADSET_COMPLETE_EN);
+ regmap_update_bits(regmap, NAU8825_REG_JACK_DET_CTRL,
+ NAU8825_JACK_DET_DB_BYPASS, NAU8825_JACK_DET_DB_BYPASS);
+ regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_DIS_CTRL,
+ NAU8825_IRQ_INSERT_DIS | NAU8825_IRQ_EJECT_DIS, 0);
+
+ return 0;
+}
+
static int nau8825_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -1157,10 +2200,22 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
+ /* Setup codec configuration after resume */
+ nau8825_resume_setup(nau8825);
}
break;
case SND_SOC_BIAS_OFF:
+ /* Cancel and reset cross talk detection funciton */
+ nau8825_xtalk_cancel(nau8825);
+ /* Turn off all interruptions before system shutdown. Keep the
+ * interruption quiet before resume setup completes.
+ */
+ regmap_write(nau8825->regmap,
+ NAU8825_REG_INTERRUPT_DIS_CTRL, 0xffff);
+ /* Disable ADC needed for interruptions at audo mode */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL,
+ NAU8825_ENABLE_ADC, 0);
if (nau8825->mclk_freq)
clk_disable_unprepare(nau8825->mclk);
break;
@@ -1168,57 +2223,46 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#ifdef CONFIG_PM
-static int nau8825_suspend(struct snd_soc_codec *codec)
+static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
{
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
disable_irq(nau8825->irq);
+ snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
regcache_cache_only(nau8825->regmap, true);
regcache_mark_dirty(nau8825->regmap);
return 0;
}
-static int nau8825_resume(struct snd_soc_codec *codec)
+static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
{
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
- /* The chip may lose power and reset in S3. regcache_sync restores
- * register values including configurations for sysclk, irq, and
- * jack/button detection.
- */
regcache_cache_only(nau8825->regmap, false);
regcache_sync(nau8825->regmap);
-
- /* Check the jack plug status directly. If the headset is unplugged
- * during S3 when the chip has no power, there will be no jack
- * detection irq even after the nau8825_restart_jack_detection below,
- * because the chip just thinks no headset has ever been plugged in.
- */
- if (!nau8825_is_jack_inserted(nau8825->regmap)) {
- nau8825_eject_jack(nau8825);
- snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+ if (nau8825_is_jack_inserted(nau8825->regmap)) {
+ /* If the jack is inserted, we need to check whether the play-
+ * back is active before suspend. If active, the driver has to
+ * raise the protection for cross talk function to avoid the
+ * playback recovers before cross talk process finish. Other-
+ * wise, the playback will be interfered by cross talk func-
+ * tion. It is better to apply hardware related parameters
+ * before starting playback or record.
+ */
+ if (nau8825_dai_is_active(nau8825)) {
+ nau8825->xtalk_protect = true;
+ nau8825_sema_acquire(nau8825, 0);
+ }
}
-
enable_irq(nau8825->irq);
- /* Run jack detection to check the type (OMTP or CTIA) of the headset
- * if there is one. This handles the case where a different type of
- * headset is plugged in during S3. This triggers an IRQ iff a headset
- * is already plugged in.
- */
- nau8825_restart_jack_detection(nau8825->regmap);
-
return 0;
}
-#else
-#define nau8825_suspend NULL
-#define nau8825_resume NULL
-#endif
static struct snd_soc_codec_driver nau8825_codec_driver = {
.probe = nau8825_codec_probe,
+ .remove = nau8825_codec_remove,
.set_sysclk = nau8825_set_sysclk,
.set_pll = nau8825_set_pll,
.set_bias_level = nau8825_set_bias_level,
@@ -1318,22 +2362,8 @@ static int nau8825_read_device_properties(struct device *dev,
static int nau8825_setup_irq(struct nau8825 *nau8825)
{
- struct regmap *regmap = nau8825->regmap;
int ret;
- /* IRQ Output Enable */
- regmap_update_bits(regmap, NAU8825_REG_INTERRUPT_MASK,
- NAU8825_IRQ_OUTPUT_EN, NAU8825_IRQ_OUTPUT_EN);
-
- /* Enable internal VCO needed for interruptions */
- nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
-
- /* Enable DDACR needed for interrupts
- * It is the same as force_enable_pin("DDACR") we do later
- */
- regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
- NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
-
ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"nau8825", nau8825);
@@ -1370,6 +2400,13 @@ static int nau8825_i2c_probe(struct i2c_client *i2c,
return PTR_ERR(nau8825->regmap);
nau8825->dev = dev;
nau8825->irq = i2c->irq;
+ /* Initiate parameters, semaphone and work queue which are needed in
+ * cross talk suppression measurment function.
+ */
+ nau8825->xtalk_state = NAU8825_XTALK_DONE;
+ nau8825->xtalk_protect = false;
+ sema_init(&nau8825->xtalk_sem, 1);
+ INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
nau8825_print_device_properties(nau8825);
@@ -1405,6 +2442,7 @@ static const struct i2c_device_id nau8825_i2c_ids[] = {
{ "nau8825", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, nau8825_i2c_ids);
#ifdef CONFIG_OF
static const struct of_device_id nau8825_of_ids[] = {
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 8ceb5f385478..1c63e2abafa9 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -93,12 +93,21 @@
#define NAU8825_REG_CHARGE_PUMP_INPUT_READ 0x81
#define NAU8825_REG_GENERAL_STATUS 0x82
#define NAU8825_REG_MAX NAU8825_REG_GENERAL_STATUS
+/* 16-bit control register address, and 16-bits control register data */
+#define NAU8825_REG_ADDR_LEN 16
+#define NAU8825_REG_DATA_LEN 16
/* ENA_CTRL (0x1) */
#define NAU8825_ENABLE_DACR_SFT 10
#define NAU8825_ENABLE_DACR (1 << NAU8825_ENABLE_DACR_SFT)
#define NAU8825_ENABLE_DACL_SFT 9
+#define NAU8825_ENABLE_DACL (1 << NAU8825_ENABLE_DACL_SFT)
#define NAU8825_ENABLE_ADC_SFT 8
+#define NAU8825_ENABLE_ADC (1 << NAU8825_ENABLE_ADC_SFT)
+#define NAU8825_ENABLE_ADC_CLK_SFT 7
+#define NAU8825_ENABLE_ADC_CLK (1 << NAU8825_ENABLE_ADC_CLK_SFT)
+#define NAU8825_ENABLE_DAC_CLK_SFT 6
+#define NAU8825_ENABLE_DAC_CLK (1 << NAU8825_ENABLE_DAC_CLK_SFT)
#define NAU8825_ENABLE_SAR_SFT 1
/* CLK_DIVIDER (0x3) */
@@ -113,20 +122,28 @@
/* FLL3 (0x06) */
#define NAU8825_FLL_INTEGER_MASK (0x3ff << 0)
+#define NAU8825_FLL_CLK_SRC_SFT 10
+#define NAU8825_FLL_CLK_SRC_MASK (0x3 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_MCLK (0 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_BLK (0x2 << NAU8825_FLL_CLK_SRC_SFT)
+#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
/* FLL4 (0x07) */
#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
/* FLL5 (0x08) */
-#define NAU8825_FLL_FILTER_SW_MASK (0x1 << 14)
+#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
+#define NAU8825_FLL_LOOP_FTR_EN (0x1 << 14)
+#define NAU8825_FLL_CLK_SW_MASK (0x1 << 13)
+#define NAU8825_FLL_CLK_SW_N2 (0x1 << 13)
+#define NAU8825_FLL_CLK_SW_REF (0x0 << 13)
+#define NAU8825_FLL_FTR_SW_MASK (0x1 << 12)
+#define NAU8825_FLL_FTR_SW_ACCU (0x1 << 12)
+#define NAU8825_FLL_FTR_SW_FILTER (0x0 << 12)
/* FLL6 (0x9) */
-#define NAU8825_DCO_EN_MASK (0x1 << 15)
#define NAU8825_DCO_EN (0x1 << 15)
-#define NAU8825_DCO_DIS (0x0 << 15)
-#define NAU8825_SDM_EN_MASK (0x1 << 14)
#define NAU8825_SDM_EN (0x1 << 14)
-#define NAU8825_SDM_DIS (0x0 << 14)
/* HSD_CTRL (0xc) */
#define NAU8825_HSD_AUTO_MODE (1 << 6)
@@ -136,6 +153,7 @@
/* JACK_DET_CTRL (0xd) */
#define NAU8825_JACK_DET_RESTART (1 << 9)
+#define NAU8825_JACK_DET_DB_BYPASS (1 << 8)
#define NAU8825_JACK_INSERT_DEBOUNCE_SFT 5
#define NAU8825_JACK_INSERT_DEBOUNCE_MASK (0x7 << NAU8825_JACK_INSERT_DEBOUNCE_SFT)
#define NAU8825_JACK_EJECT_DEBOUNCE_SFT 2
@@ -145,9 +163,11 @@
/* INTERRUPT_MASK (0xf) */
#define NAU8825_IRQ_OUTPUT_EN (1 << 11)
#define NAU8825_IRQ_HEADSET_COMPLETE_EN (1 << 10)
+#define NAU8825_IRQ_RMS_EN (1 << 8)
#define NAU8825_IRQ_KEY_RELEASE_EN (1 << 7)
#define NAU8825_IRQ_KEY_SHORT_PRESS_EN (1 << 5)
#define NAU8825_IRQ_EJECT_EN (1 << 2)
+#define NAU8825_IRQ_INSERT_EN (1 << 0)
/* IRQ_STATUS (0x10) */
#define NAU8825_HEADSET_COMPLETION_IRQ (1 << 10)
@@ -168,6 +188,7 @@
#define NAU8825_IRQ_KEY_RELEASE_DIS (1 << 7)
#define NAU8825_IRQ_KEY_SHORT_PRESS_DIS (1 << 5)
#define NAU8825_IRQ_EJECT_DIS (1 << 2)
+#define NAU8825_IRQ_INSERT_DIS (1 << 0)
/* SAR_CTRL (0x13) */
#define NAU8825_SAR_ADC_EN_SFT 12
@@ -217,10 +238,21 @@
/* I2S_PCM_CTRL2 (0x1d) */
#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
+#define NAU8825_I2S_DRV_SFT 12
+#define NAU8825_I2S_DRV_MASK (0x3 << NAU8825_I2S_DRV_SFT)
#define NAU8825_I2S_MS_SFT 3
#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_SLAVE (0 << NAU8825_I2S_MS_SFT)
+#define NAU8825_I2S_BLK_DIV_MASK 0x7
+
+/* BIQ_CTRL (0x20) */
+#define NAU8825_BIQ_WRT_SFT 4
+#define NAU8825_BIQ_WRT_EN (1 << NAU8825_BIQ_WRT_SFT)
+#define NAU8825_BIQ_PATH_SFT 0
+#define NAU8825_BIQ_PATH_MASK (1 << NAU8825_BIQ_PATH_SFT)
+#define NAU8825_BIQ_PATH_ADC (0 << NAU8825_BIQ_PATH_SFT)
+#define NAU8825_BIQ_PATH_DAC (1 << NAU8825_BIQ_PATH_SFT)
/* ADC_RATE (0x2b) */
#define NAU8825_ADC_SYNC_DOWN_SFT 0
@@ -239,22 +271,72 @@
#define NAU8825_DAC_OVERSAMPLE_128 2
#define NAU8825_DAC_OVERSAMPLE_32 4
+/* ADC_DGAIN_CTRL (0x30) */
+#define NAU8825_ADC_DIG_VOL_MASK 0xff
+
/* MUTE_CTRL (0x31) */
#define NAU8825_DAC_ZERO_CROSSING_EN (1 << 9)
#define NAU8825_DAC_SOFT_MUTE (1 << 9)
/* HSVOL_CTRL (0x32) */
#define NAU8825_HP_MUTE (1 << 15)
+#define NAU8825_HP_MUTE_AUTO (1 << 14)
+#define NAU8825_HPL_MUTE (1 << 13)
+#define NAU8825_HPR_MUTE (1 << 12)
+#define NAU8825_HPL_VOL_SFT 6
+#define NAU8825_HPL_VOL_MASK (0x3f << NAU8825_HPL_VOL_SFT)
+#define NAU8825_HPR_VOL_SFT 0
+#define NAU8825_HPR_VOL_MASK (0x3f << NAU8825_HPR_VOL_SFT)
+#define NAU8825_HP_VOL_MIN 0x36
/* DACL_CTRL (0x33) */
#define NAU8825_DACL_CH_SEL_SFT 9
+#define NAU8825_DACL_CH_SEL_MASK (0x1 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_SEL_L (0x0 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_SEL_R (0x1 << NAU8825_DACL_CH_SEL_SFT)
+#define NAU8825_DACL_CH_VOL_MASK 0xff
/* DACR_CTRL (0x34) */
#define NAU8825_DACR_CH_SEL_SFT 9
+#define NAU8825_DACR_CH_SEL_MASK (0x1 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_SEL_L (0x0 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_SEL_R (0x1 << NAU8825_DACR_CH_SEL_SFT)
+#define NAU8825_DACR_CH_VOL_MASK 0xff
+
+/* IMM_MODE_CTRL (0x4C) */
+#define NAU8825_IMM_THD_SFT 8
+#define NAU8825_IMM_THD_MASK (0x3f << NAU8825_IMM_THD_SFT)
+#define NAU8825_IMM_GEN_VOL_SFT 6
+#define NAU8825_IMM_GEN_VOL_MASK (0x3 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_2nd (0x0 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_4th (0x1 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_8th (0x2 << NAU8825_IMM_GEN_VOL_SFT)
+#define NAU8825_IMM_GEN_VOL_1_16th (0x3 << NAU8825_IMM_GEN_VOL_SFT)
+
+#define NAU8825_IMM_CYC_SFT 4
+#define NAU8825_IMM_CYC_MASK (0x3 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_1024 (0x0 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_2048 (0x1 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_4096 (0x2 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_CYC_8192 (0x3 << NAU8825_IMM_CYC_SFT)
+#define NAU8825_IMM_EN (1 << 3)
+#define NAU8825_IMM_DAC_SRC_MASK 0x7
+#define NAU8825_IMM_DAC_SRC_BIQ 0x0
+#define NAU8825_IMM_DAC_SRC_DRC 0x1
+#define NAU8825_IMM_DAC_SRC_MIX 0x2
+#define NAU8825_IMM_DAC_SRC_SIN 0x3
/* CLASSG_CTRL (0x50) */
#define NAU8825_CLASSG_TIMER_SFT 8
#define NAU8825_CLASSG_TIMER_MASK (0x3f << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_1ms (0x1 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_2ms (0x2 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_8ms (0x4 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_16ms (0x8 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_32ms (0x10 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_TIMER_64ms (0x20 << NAU8825_CLASSG_TIMER_SFT)
+#define NAU8825_CLASSG_LDAC_EN (0x1 << 2)
+#define NAU8825_CLASSG_RDAC_EN (0x1 << 1)
#define NAU8825_CLASSG_EN (1 << 0)
/* I2C_DEVICE_ID (0x58) */
@@ -263,7 +345,12 @@
#define NAU8825_SOFTWARE_ID_NAU8825 0x0
/* BIAS_ADJ (0x66) */
-#define NAU8825_BIAS_TESTDAC_EN (0x3 << 8)
+#define NAU8825_BIAS_HPR_IMP (1 << 15)
+#define NAU8825_BIAS_HPL_IMP (1 << 14)
+#define NAU8825_BIAS_TESTDAC_SFT 8
+#define NAU8825_BIAS_TESTDAC_EN (0x3 << NAU8825_BIAS_TESTDAC_SFT)
+#define NAU8825_BIAS_TESTDACR_EN (0x2 << NAU8825_BIAS_TESTDAC_SFT)
+#define NAU8825_BIAS_TESTDACL_EN (0x1 << NAU8825_BIAS_TESTDAC_SFT)
#define NAU8825_BIAS_VMID (1 << 6)
#define NAU8825_BIAS_VMID_SEL_SFT 4
#define NAU8825_BIAS_VMID_SEL_MASK (3 << NAU8825_BIAS_VMID_SEL_SFT)
@@ -282,6 +369,11 @@
#define NAU8825_POWERUP_ADCL (1 << 6)
/* RDAC (0x73) */
+#define NAU8825_RDAC_FS_BCLK_ENB (1 << 15)
+#define NAU8825_RDAC_EN_SFT 12
+#define NAU8825_RDAC_EN (0x3 << NAU8825_RDAC_EN_SFT)
+#define NAU8825_RDAC_CLK_EN_SFT 8
+#define NAU8825_RDAC_CLK_EN (0x3 << NAU8825_RDAC_CLK_EN_SFT)
#define NAU8825_RDAC_CLK_DELAY_SFT 4
#define NAU8825_RDAC_CLK_DELAY_MASK (0x7 << NAU8825_RDAC_CLK_DELAY_SFT)
#define NAU8825_RDAC_VREF_SFT 2
@@ -318,8 +410,21 @@
/* System Clock Source */
enum {
- NAU8825_CLK_MCLK = 0,
+ NAU8825_CLK_DIS = 0,
+ NAU8825_CLK_MCLK,
NAU8825_CLK_INTERNAL,
+ NAU8825_CLK_FLL_MCLK,
+ NAU8825_CLK_FLL_BLK,
+ NAU8825_CLK_FLL_FS,
+};
+
+/* Cross talk detection state */
+enum {
+ NAU8825_XTALK_PREPARE = 0,
+ NAU8825_XTALK_HPR_R2L,
+ NAU8825_XTALK_HPL_R2L,
+ NAU8825_XTALK_IMM,
+ NAU8825_XTALK_DONE,
};
struct nau8825 {
@@ -328,6 +433,8 @@ struct nau8825 {
struct snd_soc_dapm_context *dapm;
struct snd_soc_jack *jack;
struct clk *mclk;
+ struct work_struct xtalk_work;
+ struct semaphore xtalk_sem;
int irq;
int mclk_freq; /* 0 - mclk is disabled */
int button_pressed;
@@ -346,6 +453,12 @@ struct nau8825 {
int key_debounce;
int jack_insert_debounce;
int jack_eject_debounce;
+ int high_imped;
+ int xtalk_state;
+ int xtalk_event;
+ int xtalk_event_mask;
+ bool xtalk_protect;
+ int imp_rms[NAU8825_XTALK_IMM];
};
int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 58325234285c..33e1fc2d1598 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -73,7 +73,7 @@ static bool pcm1681_accessible_reg(struct device *dev, unsigned int reg)
return !((reg == 0x00) || (reg == 0x0f));
}
-static bool pcm1681_writeable_reg(struct device *dev, unsigned register reg)
+static bool pcm1681_writeable_reg(struct device *dev, unsigned int reg)
{
return pcm1681_accessible_reg(dev, reg) &&
(reg != PCM1681_ZERO_DETECT_STATUS);
diff --git a/sound/soc/codecs/pcm179x.c b/sound/soc/codecs/pcm179x.c
index 06a66579ca6d..88fbdd184aa0 100644
--- a/sound/soc/codecs/pcm179x.c
+++ b/sound/soc/codecs/pcm179x.c
@@ -59,7 +59,7 @@ static bool pcm179x_accessible_reg(struct device *dev, unsigned int reg)
return reg >= 0x10 && reg <= 0x17;
}
-static bool pcm179x_writeable_reg(struct device *dev, unsigned register reg)
+static bool pcm179x_writeable_reg(struct device *dev, unsigned int reg)
{
bool accessible;
diff --git a/sound/soc/codecs/pcm5102a.c b/sound/soc/codecs/pcm5102a.c
index ed515677409b..8ba322a00363 100644
--- a/sound/soc/codecs/pcm5102a.c
+++ b/sound/soc/codecs/pcm5102a.c
@@ -57,7 +57,6 @@ static struct platform_driver pcm5102a_codec_driver = {
.remove = pcm5102a_remove,
.driver = {
.name = "pcm5102a-codec",
- .owner = THIS_MODULE,
.of_match_table = pcm5102a_of_match,
},
};
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 1bd31644a782..74c0e4eb3788 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1100,6 +1100,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Skylake Client platform")
}
},
+ {
+ .ident = "Intel Kabylake RVP",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
+ }
+ },
+
{ }
};
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
new file mode 100644
index 000000000000..77ff8ebe6dfb
--- /dev/null
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -0,0 +1,447 @@
+/*
+ * rt5514-spi.c -- RT5514 SPI driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_qos.h>
+#include <linux/sysfs.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rt5514-spi.h"
+
+static struct spi_device *rt5514_spi;
+
+struct rt5514_dsp {
+ struct device *dev;
+ struct delayed_work copy_work;
+ struct mutex dma_lock;
+ struct snd_pcm_substream *substream;
+ unsigned int buf_base, buf_limit, buf_rp;
+ size_t buf_size;
+ size_t dma_offset;
+ size_t dsp_offset;
+};
+
+static const struct snd_pcm_hardware rt5514_spi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = 0x20000 / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = 0x20000,
+};
+
+static struct snd_soc_dai_driver rt5514_spi_dai = {
+ .name = "rt5514-dsp-cpu-dai",
+ .id = 0,
+ .capture = {
+ .stream_name = "DSP Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static void rt5514_spi_copy_work(struct work_struct *work)
+{
+ struct rt5514_dsp *rt5514_dsp =
+ container_of(work, struct rt5514_dsp, copy_work.work);
+ struct snd_pcm_runtime *runtime;
+ size_t period_bytes, truncated_bytes = 0;
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ if (!rt5514_dsp->substream) {
+ dev_err(rt5514_dsp->dev, "No pcm substream\n");
+ goto done;
+ }
+
+ runtime = rt5514_dsp->substream->runtime;
+ period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
+
+ if (rt5514_dsp->buf_size - rt5514_dsp->dsp_offset < period_bytes)
+ period_bytes = rt5514_dsp->buf_size - rt5514_dsp->dsp_offset;
+
+ if (rt5514_dsp->buf_rp + period_bytes <= rt5514_dsp->buf_limit) {
+ rt5514_spi_burst_read(rt5514_dsp->buf_rp,
+ runtime->dma_area + rt5514_dsp->dma_offset,
+ period_bytes);
+
+ if (rt5514_dsp->buf_rp + period_bytes == rt5514_dsp->buf_limit)
+ rt5514_dsp->buf_rp = rt5514_dsp->buf_base;
+ else
+ rt5514_dsp->buf_rp += period_bytes;
+ } else {
+ truncated_bytes = rt5514_dsp->buf_limit - rt5514_dsp->buf_rp;
+ rt5514_spi_burst_read(rt5514_dsp->buf_rp,
+ runtime->dma_area + rt5514_dsp->dma_offset,
+ truncated_bytes);
+
+ rt5514_spi_burst_read(rt5514_dsp->buf_base,
+ runtime->dma_area + rt5514_dsp->dma_offset +
+ truncated_bytes, period_bytes - truncated_bytes);
+
+ rt5514_dsp->buf_rp = rt5514_dsp->buf_base +
+ period_bytes - truncated_bytes;
+ }
+
+ rt5514_dsp->dma_offset += period_bytes;
+ if (rt5514_dsp->dma_offset >= runtime->dma_bytes)
+ rt5514_dsp->dma_offset = 0;
+
+ rt5514_dsp->dsp_offset += period_bytes;
+
+ snd_pcm_period_elapsed(rt5514_dsp->substream);
+
+ if (rt5514_dsp->dsp_offset < rt5514_dsp->buf_size)
+ schedule_delayed_work(&rt5514_dsp->copy_work, 5);
+done:
+ mutex_unlock(&rt5514_dsp->dma_lock);
+}
+
+/* PCM for streaming audio from the DSP buffer */
+static int rt5514_spi_pcm_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &rt5514_spi_pcm_hardware);
+
+ return 0;
+}
+
+static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+ int ret;
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+ rt5514_dsp->substream = substream;
+ mutex_unlock(&rt5514_dsp->dma_lock);
+
+ return ret;
+}
+
+static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ mutex_lock(&rt5514_dsp->dma_lock);
+ rt5514_dsp->substream = NULL;
+ mutex_unlock(&rt5514_dsp->dma_lock);
+
+ cancel_delayed_work_sync(&rt5514_dsp->copy_work);
+
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int rt5514_spi_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+ u8 buf[8];
+
+ rt5514_dsp->dma_offset = 0;
+ rt5514_dsp->dsp_offset = 0;
+
+ /**
+ * The address area x1800XXXX is the register address, and it cannot
+ * support spi burst read perfectly. So we use the spi burst read
+ * individually to make sure the data correctly.
+ */
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_BASE, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_base = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_LIMIT, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_limit = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_RP, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_rp = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ rt5514_spi_burst_read(RT5514_BUFFER_VOICE_SIZE, (u8 *)&buf,
+ sizeof(buf));
+ rt5514_dsp->buf_size = buf[0] | buf[1] << 8 | buf[2] << 16 |
+ buf[3] << 24;
+
+ return 0;
+}
+
+static int rt5514_spi_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ if (cmd == SNDRV_PCM_TRIGGER_START) {
+ if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
+ rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
+ schedule_delayed_work(&rt5514_dsp->copy_work, 0);
+ }
+
+ return 0;
+}
+
+static snd_pcm_uframes_t rt5514_spi_pcm_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(rtd->platform);
+
+ return bytes_to_frames(runtime, rt5514_dsp->dma_offset);
+}
+
+static struct snd_pcm_ops rt5514_spi_pcm_ops = {
+ .open = rt5514_spi_pcm_open,
+ .hw_params = rt5514_spi_hw_params,
+ .hw_free = rt5514_spi_hw_free,
+ .trigger = rt5514_spi_trigger,
+ .prepare = rt5514_spi_prepare,
+ .pointer = rt5514_spi_pcm_pointer,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ .page = snd_pcm_lib_get_vmalloc_page,
+};
+
+static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
+{
+ struct rt5514_dsp *rt5514_dsp;
+
+ rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
+ GFP_KERNEL);
+
+ rt5514_dsp->dev = &rt5514_spi->dev;
+ mutex_init(&rt5514_dsp->dma_lock);
+ INIT_DELAYED_WORK(&rt5514_dsp->copy_work, rt5514_spi_copy_work);
+ snd_soc_platform_set_drvdata(platform, rt5514_dsp);
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver rt5514_spi_platform = {
+ .probe = rt5514_spi_pcm_probe,
+ .ops = &rt5514_spi_pcm_ops,
+};
+
+static const struct snd_soc_component_driver rt5514_spi_dai_component = {
+ .name = "rt5514-spi-dai",
+};
+
+/**
+ * rt5514_spi_burst_read - Read data from SPI by rt5514 address.
+ * @addr: Start address.
+ * @rxbuf: Data Buffer for reading.
+ * @len: Data length, it must be a multiple of 8.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
+{
+ u8 spi_cmd = RT5514_SPI_CMD_BURST_READ;
+ int status;
+ u8 write_buf[8];
+ unsigned int i, end, offset = 0;
+
+ struct spi_message message;
+ struct spi_transfer x[3];
+
+ while (offset < len) {
+ if (offset + RT5514_SPI_BUF_LEN <= len)
+ end = RT5514_SPI_BUF_LEN;
+ else
+ end = len % RT5514_SPI_BUF_LEN;
+
+ write_buf[0] = spi_cmd;
+ write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
+ write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+ write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+ write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+
+ x[0].len = 5;
+ x[0].tx_buf = write_buf;
+ spi_message_add_tail(&x[0], &message);
+
+ x[1].len = 4;
+ x[1].tx_buf = write_buf;
+ spi_message_add_tail(&x[1], &message);
+
+ x[2].len = end;
+ x[2].rx_buf = rxbuf + offset;
+ spi_message_add_tail(&x[2], &message);
+
+ status = spi_sync(rt5514_spi, &message);
+
+ if (status)
+ return false;
+
+ offset += RT5514_SPI_BUF_LEN;
+ }
+
+ for (i = 0; i < len; i += 8) {
+ write_buf[0] = rxbuf[i + 0];
+ write_buf[1] = rxbuf[i + 1];
+ write_buf[2] = rxbuf[i + 2];
+ write_buf[3] = rxbuf[i + 3];
+ write_buf[4] = rxbuf[i + 4];
+ write_buf[5] = rxbuf[i + 5];
+ write_buf[6] = rxbuf[i + 6];
+ write_buf[7] = rxbuf[i + 7];
+
+ rxbuf[i + 0] = write_buf[7];
+ rxbuf[i + 1] = write_buf[6];
+ rxbuf[i + 2] = write_buf[5];
+ rxbuf[i + 3] = write_buf[4];
+ rxbuf[i + 4] = write_buf[3];
+ rxbuf[i + 5] = write_buf[2];
+ rxbuf[i + 6] = write_buf[1];
+ rxbuf[i + 7] = write_buf[0];
+ }
+
+ return true;
+}
+
+/**
+ * rt5514_spi_burst_write - Write data to SPI by rt5514 address.
+ * @addr: Start address.
+ * @txbuf: Data Buffer for writng.
+ * @len: Data length, it must be a multiple of 8.
+ *
+ *
+ * Returns true for success.
+ */
+int rt5514_spi_burst_write(u32 addr, const u8 *txbuf, size_t len)
+{
+ u8 spi_cmd = RT5514_SPI_CMD_BURST_WRITE;
+ u8 *write_buf;
+ unsigned int i, end, offset = 0;
+
+ write_buf = kmalloc(RT5514_SPI_BUF_LEN + 6, GFP_KERNEL);
+
+ if (write_buf == NULL)
+ return -ENOMEM;
+
+ while (offset < len) {
+ if (offset + RT5514_SPI_BUF_LEN <= len)
+ end = RT5514_SPI_BUF_LEN;
+ else
+ end = len % RT5514_SPI_BUF_LEN;
+
+ write_buf[0] = spi_cmd;
+ write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
+ write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+ write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+ write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+ for (i = 0; i < end; i += 8) {
+ write_buf[i + 12] = txbuf[offset + i + 0];
+ write_buf[i + 11] = txbuf[offset + i + 1];
+ write_buf[i + 10] = txbuf[offset + i + 2];
+ write_buf[i + 9] = txbuf[offset + i + 3];
+ write_buf[i + 8] = txbuf[offset + i + 4];
+ write_buf[i + 7] = txbuf[offset + i + 5];
+ write_buf[i + 6] = txbuf[offset + i + 6];
+ write_buf[i + 5] = txbuf[offset + i + 7];
+ }
+
+ write_buf[end + 5] = spi_cmd;
+
+ spi_write(rt5514_spi, write_buf, end + 6);
+
+ offset += RT5514_SPI_BUF_LEN;
+ }
+
+ kfree(write_buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_write);
+
+static int rt5514_spi_probe(struct spi_device *spi)
+{
+ int ret;
+
+ rt5514_spi = spi;
+
+ ret = devm_snd_soc_register_platform(&spi->dev, &rt5514_spi_platform);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to register platform.\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(&spi->dev,
+ &rt5514_spi_dai_component,
+ &rt5514_spi_dai, 1);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to register component.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rt5514_of_match[] = {
+ { .compatible = "realtek,rt5514", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5514_of_match);
+
+static struct spi_driver rt5514_spi_driver = {
+ .driver = {
+ .name = "rt5514",
+ .of_match_table = of_match_ptr(rt5514_of_match),
+ },
+ .probe = rt5514_spi_probe,
+};
+module_spi_driver(rt5514_spi_driver);
+
+MODULE_DESCRIPTION("RT5514 SPI driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5514-spi.h b/sound/soc/codecs/rt5514-spi.h
new file mode 100644
index 000000000000..f69b1cdf2f9b
--- /dev/null
+++ b/sound/soc/codecs/rt5514-spi.h
@@ -0,0 +1,38 @@
+/*
+ * rt5514-spi.h -- RT5514 driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5514_SPI_H__
+#define __RT5514_SPI_H__
+
+/**
+ * RT5514_SPI_BUF_LEN is the buffer size of SPI master controller.
+*/
+#define RT5514_SPI_BUF_LEN 240
+
+#define RT5514_BUFFER_VOICE_BASE 0x18001034
+#define RT5514_BUFFER_VOICE_LIMIT 0x18001038
+#define RT5514_BUFFER_VOICE_RP 0x1800103c
+#define RT5514_BUFFER_VOICE_SIZE 0x18001040
+
+/* SPI Command */
+enum {
+ RT5514_SPI_CMD_16_READ = 0,
+ RT5514_SPI_CMD_16_WRITE,
+ RT5514_SPI_CMD_32_READ,
+ RT5514_SPI_CMD_32_WRITE,
+ RT5514_SPI_CMD_BURST_READ,
+ RT5514_SPI_CMD_BURST_WRITE,
+};
+
+int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len);
+int rt5514_spi_burst_write(u32 addr, const u8 *txbuf, size_t len);
+
+#endif /* __RT5514_SPI_H__ */
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 879bf60f4965..7162f05101d9 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -30,6 +30,9 @@
#include "rl6231.h"
#include "rt5514.h"
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+#include "rt5514-spi.h"
+#endif
static const struct reg_sequence rt5514_i2c_patch[] = {
{0x1800101c, 0x00000000},
@@ -110,6 +113,35 @@ static const struct reg_default rt5514_reg[] = {
{RT5514_VENDOR_ID2, 0x10ec5514},
};
+static void rt5514_enable_dsp_prepare(struct rt5514_priv *rt5514)
+{
+ /* Reset */
+ regmap_write(rt5514->i2c_regmap, 0x18002000, 0x000010ec);
+ /* LDO_I_limit */
+ regmap_write(rt5514->i2c_regmap, 0x18002200, 0x00028604);
+ /* I2C bypass enable */
+ regmap_write(rt5514->i2c_regmap, 0xfafafafa, 0x00000001);
+ /* mini-core reset */
+ regmap_write(rt5514->i2c_regmap, 0x18002f00, 0x0005514b);
+ regmap_write(rt5514->i2c_regmap, 0x18002f00, 0x00055149);
+ /* I2C bypass disable */
+ regmap_write(rt5514->i2c_regmap, 0xfafafafa, 0x00000000);
+ /* PIN config */
+ regmap_write(rt5514->i2c_regmap, 0x18002070, 0x00000040);
+ /* PLL3(QN)=RCOSC*(10+2) */
+ regmap_write(rt5514->i2c_regmap, 0x18002240, 0x0000000a);
+ /* PLL3 source=RCOSC, fsi=rt_clk */
+ regmap_write(rt5514->i2c_regmap, 0x18002100, 0x0000000b);
+ /* Power on RCOSC, pll3 */
+ regmap_write(rt5514->i2c_regmap, 0x18002004, 0x00808b81);
+ /* DSP clk source = pll3, ENABLE DSP clk */
+ regmap_write(rt5514->i2c_regmap, 0x18002f08, 0x00000005);
+ /* Enable DSP clk auto switch */
+ regmap_write(rt5514->i2c_regmap, 0x18001114, 0x00000001);
+ /* Reduce DSP power */
+ regmap_write(rt5514->i2c_regmap, 0x18001118, 0x00000001);
+}
+
static bool rt5514_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -248,6 +280,74 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = rt5514->dsp_enabled;
+
+ return 0;
+}
+
+static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_codec *codec = rt5514->codec;
+ const struct firmware *fw = NULL;
+
+ if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
+ return 0;
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+ rt5514->dsp_enabled = ucontrol->value.integer.value[0];
+
+ if (rt5514->dsp_enabled) {
+ rt5514_enable_dsp_prepare(rt5514);
+
+ request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
+ if (fw) {
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_write(0x4ff60000, fw->data,
+ ((fw->size/8)+1)*8);
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ request_firmware(&fw, RT5514_FIRMWARE2, codec->dev);
+ if (fw) {
+#if defined(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_write(0x4ffc0000, fw->data,
+ ((fw->size/8)+1)*8);
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ /* DSP run */
+ regmap_write(rt5514->i2c_regmap, 0x18002f00,
+ 0x00055148);
+ } else {
+ regmap_multi_reg_write(rt5514->i2c_regmap,
+ rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
+ regcache_mark_dirty(rt5514->regmap);
+ regcache_sync(rt5514->regmap);
+ }
+ }
+
+ return 0;
+}
+
static const struct snd_kcontrol_new rt5514_snd_controls[] = {
SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
@@ -257,6 +357,8 @@ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
SOC_DOUBLE_R_TLV("ADC2 Capture Volume", RT5514_DOWNFILTER1_CTRL1,
RT5514_DOWNFILTER1_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
adc_vol_tlv),
+ SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0,
+ rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put),
};
/* ADC Mixer*/
@@ -365,6 +467,35 @@ static int rt5514_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
return 0;
}
+static int rt5514_pre_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /**
+ * If the DSP is enabled in start of recording, the DSP
+ * should be disabled, and sync back to normal recording
+ * settings to make sure recording properly.
+ */
+ if (rt5514->dsp_enabled) {
+ rt5514->dsp_enabled = 0;
+ regmap_multi_reg_write(rt5514->i2c_regmap,
+ rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
+ regcache_mark_dirty(rt5514->regmap);
+ regcache_sync(rt5514->regmap);
+ }
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
/* Input Lines */
SND_SOC_DAPM_INPUT("DMIC1L"),
@@ -472,6 +603,8 @@ static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
/* Audio Interface */
SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_PRE("DAPM Pre", rt5514_pre_event),
};
static const struct snd_soc_dapm_route rt5514_dapm_routes[] = {
@@ -799,10 +932,41 @@ static int rt5514_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
return 0;
}
+static int rt5514_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (IS_ERR(rt5514->mclk))
+ break;
+
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
+ clk_disable_unprepare(rt5514->mclk);
+ } else {
+ ret = clk_prepare_enable(rt5514->mclk);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int rt5514_probe(struct snd_soc_codec *codec)
{
struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ rt5514->mclk = devm_clk_get(codec->dev, "mclk");
+ if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
rt5514->codec = codec;
return 0;
@@ -858,6 +1022,7 @@ struct snd_soc_dai_driver rt5514_dai[] = {
static struct snd_soc_codec_driver soc_codec_dev_rt5514 = {
.probe = rt5514_probe,
.idle_bias_off = true,
+ .set_bias_level = rt5514_set_bias_level,
.controls = rt5514_snd_controls,
.num_controls = ARRAY_SIZE(rt5514_snd_controls),
.dapm_widgets = rt5514_dapm_widgets,
@@ -871,7 +1036,6 @@ static const struct regmap_config rt5514_i2c_regmap = {
.reg_bits = 32,
.val_bits = 32,
- .max_register = RT5514_DSP_MAPPING | RT5514_VENDOR_ID2,
.readable_reg = rt5514_i2c_readable_register,
.cache_type = REGCACHE_NONE,
@@ -944,7 +1108,7 @@ static int rt5514_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
}
- ret = regmap_register_patch(rt5514->i2c_regmap, rt5514_i2c_patch,
+ ret = regmap_multi_reg_write(rt5514->i2c_regmap, rt5514_i2c_patch,
ARRAY_SIZE(rt5514_i2c_patch));
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply i2c_regmap patch: %d\n",
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 6ad8a612f659..68883c68e999 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -12,6 +12,8 @@
#ifndef __RT5514_H__
#define __RT5514_H__
+#include <linux/clk.h>
+
#define RT5514_DEVICE_ID 0x10ec5514
#define RT5514_RESET 0x2000
@@ -225,6 +227,9 @@
#define RT5514_PLL_INP_MAX 40000000
#define RT5514_PLL_INP_MIN 256000
+#define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin"
+#define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin"
+
/* System Clock Source */
enum {
RT5514_SCLK_S_MCLK,
@@ -240,6 +245,7 @@ enum {
struct rt5514_priv {
struct snd_soc_codec *codec;
struct regmap *i2c_regmap, *regmap;
+ struct clk *mclk;
int sysclk;
int sysclk_src;
int lrck;
@@ -247,6 +253,7 @@ struct rt5514_priv {
int pll_src;
int pll_in;
int pll_out;
+ int dsp_enabled;
};
#endif /* __RT5514_H__ */
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index d70847c9eeb0..490bfe661346 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -63,6 +63,7 @@ static const struct reg_sequence init_list[] = {
{RT5645_PR_BASE + 0x20, 0x611f},
{RT5645_PR_BASE + 0x21, 0x4040},
{RT5645_PR_BASE + 0x23, 0x0004},
+ {RT5645_ASRC_4, 0x0120},
};
static const struct reg_sequence rt5650_init_list[] = {
@@ -157,7 +158,7 @@ static const struct reg_default rt5645_reg[] = {
{ 0x83, 0x0000 },
{ 0x84, 0x0000 },
{ 0x85, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0120 },
{ 0x8e, 0x0004 },
{ 0x8f, 0x1100 },
{ 0x90, 0x0646 },
@@ -314,7 +315,7 @@ static const struct reg_default rt5650_reg[] = {
{ 0x83, 0x0000 },
{ 0x84, 0x0000 },
{ 0x85, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0120 },
{ 0x8e, 0x0004 },
{ 0x8f, 0x1100 },
{ 0x90, 0x0646 },
@@ -440,6 +441,7 @@ static bool rt5645_volatile_register(struct device *dev, unsigned int reg)
switch (reg) {
case RT5645_RESET:
+ case RT5645_PRIV_INDEX:
case RT5645_PRIV_DATA:
case RT5645_IN1_CTRL1:
case RT5645_IN1_CTRL2:
@@ -740,6 +742,14 @@ static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol,
return ret;
}
+static const char * const rt5645_dac1_vol_ctrl_mode_text[] = {
+ "immediately", "zero crossing", "soft ramp"
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ rt5645_dac1_vol_ctrl_mode, RT5645_PR_BASE,
+ RT5645_DA1_ZDET_SFT, rt5645_dac1_vol_ctrl_mode_text);
+
static const struct snd_kcontrol_new rt5645_snd_controls[] = {
/* Speaker Output Volume */
SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
@@ -806,6 +816,9 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
SOC_SINGLE("I2S2 Func Switch", RT5645_GPIO_CTRL1, RT5645_I2S2_SEL_SFT,
1, 1),
RT5645_HWEQ("Speaker HWEQ"),
+
+ /* Digital Soft Volume Control */
+ SOC_ENUM("DAC1 Digital Volume Control Func", rt5645_dac1_vol_ctrl_mode),
};
/**
@@ -3531,6 +3544,7 @@ MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id);
static const struct acpi_device_id rt5645_acpi_match[] = {
{ "10EC5645", 0 },
{ "10EC5650", 0 },
+ { "10EC5640", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
@@ -3561,6 +3575,12 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
},
},
+ {
+ .ident = "Microsoft Surface 3",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
{ }
};
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 205e0715c99a..cfc5f97549eb 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2018,6 +2018,9 @@
/* Codec Private Register definition */
+/* DAC ADC Digital Volume (0x00) */
+#define RT5645_DA1_ZDET_SFT 6
+
/* 3D Speaker Control (0x63) */
#define RT5645_3D_SPK_MASK (0x1 << 15)
#define RT5645_3D_SPK_SFT 15
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 0af5ddbef1da..8ef467f64f03 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -55,6 +55,7 @@ static const struct reg_sequence init_list[] = {
{ RT5670_PR_BASE + 0x14, 0x9a8a },
{ RT5670_PR_BASE + 0x38, 0x3ba1 },
{ RT5670_PR_BASE + 0x3d, 0x3640 },
+ { 0x8a, 0x0123 },
};
static const struct reg_default rt5670_reg[] = {
@@ -131,7 +132,7 @@ static const struct reg_default rt5670_reg[] = {
{ 0x87, 0x0000 },
{ 0x88, 0x0000 },
{ 0x89, 0x0000 },
- { 0x8a, 0x0000 },
+ { 0x8a, 0x0123 },
{ 0x8b, 0x0000 },
{ 0x8c, 0x0003 },
{ 0x8d, 0x0000 },
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 08b40460663c..527b759c1562 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -38,7 +38,6 @@
/* default value of sgtl5000 registers */
static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_CHIP_DIG_POWER, 0x0000 },
- { SGTL5000_CHIP_CLK_CTRL, 0x0008 },
{ SGTL5000_CHIP_I2S_CTRL, 0x0010 },
{ SGTL5000_CHIP_SSS_CTRL, 0x0010 },
{ SGTL5000_CHIP_ADCDAC_CTRL, 0x020c },
@@ -47,12 +46,10 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_CHIP_ANA_ADC_CTRL, 0x0000 },
{ SGTL5000_CHIP_ANA_HP_CTRL, 0x1818 },
{ SGTL5000_CHIP_ANA_CTRL, 0x0111 },
- { SGTL5000_CHIP_LINREG_CTRL, 0x0000 },
{ SGTL5000_CHIP_REF_CTRL, 0x0000 },
{ SGTL5000_CHIP_MIC_CTRL, 0x0000 },
{ SGTL5000_CHIP_LINE_OUT_CTRL, 0x0000 },
{ SGTL5000_CHIP_LINE_OUT_VOL, 0x0404 },
- { SGTL5000_CHIP_ANA_POWER, 0x7060 },
{ SGTL5000_CHIP_PLL_CTRL, 0x5000 },
{ SGTL5000_CHIP_CLK_TOP_CTRL, 0x0000 },
{ SGTL5000_CHIP_ANA_STATUS, 0x0000 },
@@ -92,35 +89,8 @@ static const char *supply_names[SGTL5000_SUPPLY_NUM] = {
"VDDD"
};
-#define LDO_CONSUMER_NAME "VDDD_LDO"
#define LDO_VOLTAGE 1200000
-
-static struct regulator_consumer_supply ldo_consumer[] = {
- REGULATOR_SUPPLY(LDO_CONSUMER_NAME, NULL),
-};
-
-static struct regulator_init_data ldo_init_data = {
- .constraints = {
- .min_uV = 1200000,
- .max_uV = 1200000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = 1,
- .consumer_supplies = &ldo_consumer[0],
-};
-
-/*
- * sgtl5000 internal ldo regulator,
- * enabled when VDDD not provided
- */
-struct ldo_regulator {
- struct regulator_desc desc;
- struct regulator_dev *dev;
- int voltage;
- void *codec_data;
- bool enabled;
-};
+#define LINREG_VDDD ((1600 - LDO_VOLTAGE / 1000) / 50)
enum sgtl5000_micbias_resistor {
SGTL5000_MICBIAS_OFF = 0,
@@ -135,7 +105,7 @@ struct sgtl5000_priv {
int master; /* i2s master or not */
int fmt; /* i2s data format */
struct regulator_bulk_data supplies[SGTL5000_SUPPLY_NUM];
- struct ldo_regulator *ldo;
+ int num_supplies;
struct regmap *regmap;
struct clk *mclk;
int revision;
@@ -415,6 +385,9 @@ static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
/* tlv for hp volume, -51.5db to 12.0db, step .5db */
static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
+/* tlv for lineout volume, 31 steps of .5db each */
+static const DECLARE_TLV_DB_SCALE(lineout_volume, -1550, 50, 0);
+
static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
/* SOC_DOUBLE_S8_TLV with invert */
{
@@ -443,6 +416,13 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
0, 3, 0, mic_gain_tlv),
+
+ SOC_DOUBLE_TLV("Lineout Playback Volume",
+ SGTL5000_CHIP_LINE_OUT_VOL,
+ SGTL5000_LINE_OUT_VOL_LEFT_SHIFT,
+ SGTL5000_LINE_OUT_VOL_RIGHT_SHIFT,
+ 0x1f, 1,
+ lineout_volume),
};
/* mute the codec used by alsa core */
@@ -778,155 +758,6 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-#ifdef CONFIG_REGULATOR
-static int ldo_regulator_is_enabled(struct regulator_dev *dev)
-{
- struct ldo_regulator *ldo = rdev_get_drvdata(dev);
-
- return ldo->enabled;
-}
-
-static int ldo_regulator_enable(struct regulator_dev *dev)
-{
- struct ldo_regulator *ldo = rdev_get_drvdata(dev);
- struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
- int reg;
-
- if (ldo_regulator_is_enabled(dev))
- return 0;
-
- /* set regulator value firstly */
- reg = (1600 - ldo->voltage / 1000) / 50;
- reg = clamp(reg, 0x0, 0xf);
-
- /* amend the voltage value, unit: uV */
- ldo->voltage = (1600 - reg * 50) * 1000;
-
- /* set voltage to register */
- snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
- SGTL5000_LINREG_VDDD_MASK, reg);
-
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_LINEREG_D_POWERUP,
- SGTL5000_LINEREG_D_POWERUP);
-
- /* when internal ldo is enabled, simple digital power can be disabled */
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_LINREG_SIMPLE_POWERUP,
- 0);
-
- ldo->enabled = 1;
- return 0;
-}
-
-static int ldo_regulator_disable(struct regulator_dev *dev)
-{
- struct ldo_regulator *ldo = rdev_get_drvdata(dev);
- struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
-
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_LINEREG_D_POWERUP,
- 0);
-
- /* clear voltage info */
- snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
- SGTL5000_LINREG_VDDD_MASK, 0);
-
- ldo->enabled = 0;
-
- return 0;
-}
-
-static int ldo_regulator_get_voltage(struct regulator_dev *dev)
-{
- struct ldo_regulator *ldo = rdev_get_drvdata(dev);
-
- return ldo->voltage;
-}
-
-static struct regulator_ops ldo_regulator_ops = {
- .is_enabled = ldo_regulator_is_enabled,
- .enable = ldo_regulator_enable,
- .disable = ldo_regulator_disable,
- .get_voltage = ldo_regulator_get_voltage,
-};
-
-static int ldo_regulator_register(struct snd_soc_codec *codec,
- struct regulator_init_data *init_data,
- int voltage)
-{
- struct ldo_regulator *ldo;
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
- struct regulator_config config = { };
-
- ldo = kzalloc(sizeof(struct ldo_regulator), GFP_KERNEL);
-
- if (!ldo)
- return -ENOMEM;
-
- ldo->desc.name = kstrdup(dev_name(codec->dev), GFP_KERNEL);
- if (!ldo->desc.name) {
- kfree(ldo);
- dev_err(codec->dev, "failed to allocate decs name memory\n");
- return -ENOMEM;
- }
-
- ldo->desc.type = REGULATOR_VOLTAGE;
- ldo->desc.owner = THIS_MODULE;
- ldo->desc.ops = &ldo_regulator_ops;
- ldo->desc.n_voltages = 1;
-
- ldo->codec_data = codec;
- ldo->voltage = voltage;
-
- config.dev = codec->dev;
- config.driver_data = ldo;
- config.init_data = init_data;
-
- ldo->dev = regulator_register(&ldo->desc, &config);
- if (IS_ERR(ldo->dev)) {
- int ret = PTR_ERR(ldo->dev);
-
- dev_err(codec->dev, "failed to register regulator\n");
- kfree(ldo->desc.name);
- kfree(ldo);
-
- return ret;
- }
- sgtl5000->ldo = ldo;
-
- return 0;
-}
-
-static int ldo_regulator_remove(struct snd_soc_codec *codec)
-{
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
- struct ldo_regulator *ldo = sgtl5000->ldo;
-
- if (!ldo)
- return 0;
-
- regulator_unregister(ldo->dev);
- kfree(ldo->desc.name);
- kfree(ldo);
-
- return 0;
-}
-#else
-static int ldo_regulator_register(struct snd_soc_codec *codec,
- struct regulator_init_data *init_data,
- int voltage)
-{
- dev_err(codec->dev, "this setup needs regulator support in the kernel\n");
- return -EINVAL;
-}
-
-static int ldo_regulator_remove(struct snd_soc_codec *codec)
-{
- return 0;
-}
-#endif
-
/*
* set dac bias
* common state changes:
@@ -940,42 +771,17 @@ static int ldo_regulator_remove(struct snd_soc_codec *codec)
static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- int ret;
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
-
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
- break;
case SND_SOC_BIAS_STANDBY:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
- ret = regulator_bulk_enable(
- ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- if (ret)
- return ret;
- udelay(10);
-
- regcache_cache_only(sgtl5000->regmap, false);
-
- ret = regcache_sync(sgtl5000->regmap);
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to restore cache: %d\n", ret);
-
- regcache_cache_only(sgtl5000->regmap, true);
- regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
-
- return ret;
- }
- }
-
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_REFTOP_POWERUP,
+ SGTL5000_REFTOP_POWERUP);
break;
case SND_SOC_BIAS_OFF:
- regcache_cache_only(sgtl5000->regmap, true);
- regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_REFTOP_POWERUP, 0);
break;
}
@@ -1113,7 +919,6 @@ static const u8 vol_quot_table[] = {
* and should be set according to:
* 1. vddd provided by external or not
* 2. vdda and vddio voltage value. > 3.1v or not
- * 3. chip revision >=0x11 or not. If >=0x11, not use external vddd.
*/
static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
{
@@ -1131,7 +936,9 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
vdda = regulator_get_voltage(sgtl5000->supplies[VDDA].consumer);
vddio = regulator_get_voltage(sgtl5000->supplies[VDDIO].consumer);
- vddd = regulator_get_voltage(sgtl5000->supplies[VDDD].consumer);
+ vddd = (sgtl5000->num_supplies > VDDD)
+ ? regulator_get_voltage(sgtl5000->supplies[VDDD].consumer)
+ : LDO_VOLTAGE;
vdda = vdda / 1000;
vddio = vddio / 1000;
@@ -1178,25 +985,6 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER, ana_pwr);
- /* set voltage to register */
- snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
- SGTL5000_LINREG_VDDD_MASK, 0x8);
-
- /*
- * if vddd linear reg has been enabled,
- * simple digital supply should be clear to get
- * proper VDDD voltage.
- */
- if (ana_pwr & SGTL5000_LINEREG_D_POWERUP)
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_LINREG_SIMPLE_POWERUP,
- 0);
- else
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_LINREG_SIMPLE_POWERUP |
- SGTL5000_STARTUP_POWERUP,
- 0);
-
/*
* set ADC/DAC VAG to vdda / 2,
* should stay in range (0.8v, 1.575v)
@@ -1256,78 +1044,43 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
return 0;
}
-static int sgtl5000_replace_vddd_with_ldo(struct snd_soc_codec *codec)
-{
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
- int ret;
-
- /* set internal ldo to 1.2v */
- ret = ldo_regulator_register(codec, &ldo_init_data, LDO_VOLTAGE);
- if (ret) {
- dev_err(codec->dev,
- "Failed to register vddd internal supplies: %d\n", ret);
- return ret;
- }
-
- sgtl5000->supplies[VDDD].supply = LDO_CONSUMER_NAME;
-
- dev_info(codec->dev, "Using internal LDO instead of VDDD\n");
- return 0;
-}
-
-static int sgtl5000_enable_regulators(struct snd_soc_codec *codec)
+static int sgtl5000_enable_regulators(struct i2c_client *client)
{
int ret;
int i;
int external_vddd = 0;
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
struct regulator *vddd;
+ struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
for (i = 0; i < ARRAY_SIZE(sgtl5000->supplies); i++)
sgtl5000->supplies[i].supply = supply_names[i];
- /* External VDDD only works before revision 0x11 */
- if (sgtl5000->revision < 0x11) {
- vddd = regulator_get_optional(codec->dev, "VDDD");
- if (IS_ERR(vddd)) {
- /* See if it's just not registered yet */
- if (PTR_ERR(vddd) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- } else {
- external_vddd = 1;
- regulator_put(vddd);
- }
- }
-
- if (!external_vddd) {
- ret = sgtl5000_replace_vddd_with_ldo(codec);
- if (ret)
- return ret;
+ vddd = regulator_get_optional(&client->dev, "VDDD");
+ if (IS_ERR(vddd)) {
+ /* See if it's just not registered yet */
+ if (PTR_ERR(vddd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ external_vddd = 1;
+ regulator_put(vddd);
}
- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->num_supplies = ARRAY_SIZE(sgtl5000->supplies)
+ - 1 + external_vddd;
+ ret = regulator_bulk_get(&client->dev, sgtl5000->num_supplies,
sgtl5000->supplies);
if (ret)
- goto err_ldo_remove;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- if (ret)
- goto err_regulator_free;
-
- /* wait for all power rails bring up */
- udelay(10);
+ return ret;
- return 0;
+ ret = regulator_bulk_enable(sgtl5000->num_supplies,
+ sgtl5000->supplies);
+ if (!ret)
+ usleep_range(10, 20);
+ else
+ regulator_bulk_free(sgtl5000->num_supplies,
+ sgtl5000->supplies);
-err_regulator_free:
- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
-err_ldo_remove:
- if (!external_vddd)
- ldo_regulator_remove(codec);
return ret;
-
}
static int sgtl5000_probe(struct snd_soc_codec *codec)
@@ -1335,10 +1088,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
int ret;
struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
- ret = sgtl5000_enable_regulators(codec);
- if (ret)
- return ret;
-
/* power up sgtl5000 */
ret = sgtl5000_set_power_regs(codec);
if (ret)
@@ -1389,25 +1138,11 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
return 0;
err:
- regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- ldo_regulator_remove(codec);
-
return ret;
}
static int sgtl5000_remove(struct snd_soc_codec *codec)
{
- struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
-
- regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
- sgtl5000->supplies);
- ldo_regulator_remove(codec);
-
return 0;
}
@@ -1448,8 +1183,9 @@ static const struct regmap_config sgtl5000_regmap = {
* and avoid problems like, not being able to probe after an audio playback
* followed by a system reset or a 'reboot' command in Linux
*/
-static int sgtl5000_fill_defaults(struct sgtl5000_priv *sgtl5000)
+static void sgtl5000_fill_defaults(struct i2c_client *client)
{
+ struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client);
int i, ret, val, index;
for (i = 0; i < ARRAY_SIZE(sgtl5000_reg_defaults); i++) {
@@ -1457,10 +1193,10 @@ static int sgtl5000_fill_defaults(struct sgtl5000_priv *sgtl5000)
index = sgtl5000_reg_defaults[i].reg;
ret = regmap_write(sgtl5000->regmap, index, val);
if (ret)
- return ret;
+ dev_err(&client->dev,
+ "%s: error %d setting reg 0x%02x to 0x%04x\n",
+ __func__, ret, index, val);
}
-
- return 0;
}
static int sgtl5000_i2c_probe(struct i2c_client *client,
@@ -1470,16 +1206,23 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
int ret, reg, rev;
struct device_node *np = client->dev.of_node;
u32 value;
+ u16 ana_pwr;
sgtl5000 = devm_kzalloc(&client->dev, sizeof(*sgtl5000), GFP_KERNEL);
if (!sgtl5000)
return -ENOMEM;
+ i2c_set_clientdata(client, sgtl5000);
+
+ ret = sgtl5000_enable_regulators(client);
+ if (ret)
+ return ret;
+
sgtl5000->regmap = devm_regmap_init_i2c(client, &sgtl5000_regmap);
if (IS_ERR(sgtl5000->regmap)) {
ret = PTR_ERR(sgtl5000->regmap);
dev_err(&client->dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
+ goto disable_regs;
}
sgtl5000->mclk = devm_clk_get(&client->dev, NULL);
@@ -1488,21 +1231,25 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
/* Defer the probe to see if the clk will be provided later */
if (ret == -ENOENT)
- return -EPROBE_DEFER;
- return ret;
+ ret = -EPROBE_DEFER;
+ goto disable_regs;
}
ret = clk_prepare_enable(sgtl5000->mclk);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&client->dev, "Error enabling clock %d\n", ret);
+ goto disable_regs;
+ }
/* Need 8 clocks before I2C accesses */
udelay(1);
/* read chip information */
ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
- if (ret)
+ if (ret) {
+ dev_err(&client->dev, "Error reading chip id %d\n", ret);
goto disable_clk;
+ }
if (((reg & SGTL5000_PARTID_MASK) >> SGTL5000_PARTID_SHIFT) !=
SGTL5000_PARTID_PART_ID) {
@@ -1516,6 +1263,44 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
dev_info(&client->dev, "sgtl5000 revision 0x%x\n", rev);
sgtl5000->revision = rev;
+ /* reconfigure the clocks in case we're using the PLL */
+ ret = regmap_write(sgtl5000->regmap,
+ SGTL5000_CHIP_CLK_CTRL,
+ SGTL5000_CHIP_CLK_CTRL_DEFAULT);
+ if (ret)
+ dev_err(&client->dev,
+ "Error %d initializing CHIP_CLK_CTRL\n", ret);
+
+ /* Follow section 2.2.1.1 of AN3663 */
+ ana_pwr = SGTL5000_ANA_POWER_DEFAULT;
+ if (sgtl5000->num_supplies <= VDDD) {
+ /* internal VDDD at 1.2V */
+ ret = regmap_update_bits(sgtl5000->regmap,
+ SGTL5000_CHIP_LINREG_CTRL,
+ SGTL5000_LINREG_VDDD_MASK,
+ LINREG_VDDD);
+ if (ret)
+ dev_err(&client->dev,
+ "Error %d setting LINREG_VDDD\n", ret);
+
+ ana_pwr |= SGTL5000_LINEREG_D_POWERUP;
+ dev_info(&client->dev,
+ "Using internal LDO instead of VDDD: check ER1\n");
+ } else {
+ /* using external LDO for VDDD
+ * Clear startup powerup and simple powerup
+ * bits to save power
+ */
+ ana_pwr &= ~(SGTL5000_STARTUP_POWERUP
+ | SGTL5000_LINREG_SIMPLE_POWERUP);
+ dev_dbg(&client->dev, "Using external VDDD\n");
+ }
+ ret = regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, ana_pwr);
+ if (ret)
+ dev_err(&client->dev,
+ "Error %d setting CHIP_ANA_POWER to %04x\n",
+ ret, ana_pwr);
+
if (np) {
if (!of_property_read_u32(np,
"micbias-resistor-k-ohms", &value)) {
@@ -1557,12 +1342,8 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
}
}
- i2c_set_clientdata(client, sgtl5000);
-
/* Ensure sgtl5000 will start with sane register values */
- ret = sgtl5000_fill_defaults(sgtl5000);
- if (ret)
- goto disable_clk;
+ sgtl5000_fill_defaults(client);
ret = snd_soc_register_codec(&client->dev,
&sgtl5000_driver, &sgtl5000_dai, 1);
@@ -1573,6 +1354,11 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
disable_clk:
clk_disable_unprepare(sgtl5000->mclk);
+
+disable_regs:
+ regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies);
+ regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies);
+
return ret;
}
@@ -1582,6 +1368,9 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)
snd_soc_unregister_codec(&client->dev);
clk_disable_unprepare(sgtl5000->mclk);
+ regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies);
+ regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies);
+
return 0;
}
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index 1c317de26176..22f3442af982 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -92,6 +92,7 @@
/*
* SGTL5000_CHIP_CLK_CTRL
*/
+#define SGTL5000_CHIP_CLK_CTRL_DEFAULT 0x0008
#define SGTL5000_RATE_MODE_MASK 0x0030
#define SGTL5000_RATE_MODE_SHIFT 4
#define SGTL5000_RATE_MODE_WIDTH 2
@@ -325,6 +326,7 @@
/*
* SGTL5000_CHIP_ANA_POWER
*/
+#define SGTL5000_ANA_POWER_DEFAULT 0x7060
#define SGTL5000_DAC_STEREO 0x4000
#define SGTL5000_LINREG_SIMPLE_POWERUP 0x2000
#define SGTL5000_STARTUP_POWERUP 0x1000
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index b8d19b77bde9..d8baca3f8413 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -28,6 +28,7 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/tlv.h>
+#include <asm/unaligned.h>
#include "tas571x.h"
@@ -63,6 +64,10 @@ static int tas571x_register_size(struct tas571x_private *priv, unsigned int reg)
case TAS571X_INPUT_MUX_REG:
case TAS571X_CH4_SRC_SELECT_REG:
case TAS571X_PWM_MUX_REG:
+ case TAS5717_CH1_RIGHT_CH_MIX_REG:
+ case TAS5717_CH1_LEFT_CH_MIX_REG:
+ case TAS5717_CH2_LEFT_CH_MIX_REG:
+ case TAS5717_CH2_RIGHT_CH_MIX_REG:
return 4;
default:
return 1;
@@ -135,6 +140,129 @@ static int tas571x_reg_read(void *context, unsigned int reg,
return 0;
}
+/*
+ * register write for 8- and 20-byte registers
+ */
+static int tas571x_reg_write_multiword(struct i2c_client *client,
+ unsigned int reg, const long values[], size_t len)
+{
+ size_t i;
+ uint8_t *buf, *p;
+ int ret;
+ size_t send_size = 1 + len * sizeof(uint32_t);
+
+ buf = kzalloc(send_size, GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+ buf[0] = reg;
+
+ for (i = 0, p = buf + 1; i < len; i++, p += sizeof(uint32_t))
+ put_unaligned_be32(values[i], p);
+
+ ret = i2c_master_send(client, buf, send_size);
+
+ kfree(buf);
+
+ if (ret == send_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+/*
+ * register read for 8- and 20-byte registers
+ */
+static int tas571x_reg_read_multiword(struct i2c_client *client,
+ unsigned int reg, long values[], size_t len)
+{
+ unsigned int i;
+ uint8_t send_buf;
+ uint8_t *recv_buf, *p;
+ struct i2c_msg msgs[2];
+ unsigned int recv_size = len * sizeof(uint32_t);
+ int ret;
+
+ recv_buf = kzalloc(recv_size, GFP_KERNEL | GFP_DMA);
+ if (!recv_buf)
+ return -ENOMEM;
+
+ send_buf = reg;
+
+ msgs[0].addr = client->addr;
+ msgs[0].len = sizeof(send_buf);
+ msgs[0].buf = &send_buf;
+ msgs[0].flags = 0;
+
+ msgs[1].addr = client->addr;
+ msgs[1].len = recv_size;
+ msgs[1].buf = recv_buf;
+ msgs[1].flags = I2C_M_RD;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ goto err_ret;
+ else if (ret != ARRAY_SIZE(msgs)) {
+ ret = -EIO;
+ goto err_ret;
+ }
+
+ for (i = 0, p = recv_buf; i < len; i++, p += sizeof(uint32_t))
+ values[i] = get_unaligned_be32(p);
+
+err_ret:
+ kfree(recv_buf);
+ return ret;
+}
+
+/*
+ * Integer array controls for setting biquad, mixer, DRC coefficients.
+ * According to the datasheet each coefficient is effectively 26bits,
+ * i.e. stored as 32bits, where bits [31:26] are ignored.
+ * TI's TAS57xx Graphical Development Environment tool however produces
+ * coefficients with more than 26 bits. For this reason we allow values
+ * in the full 32-bits reange.
+ * The coefficients are ordered as given in the TAS571x data sheet:
+ * b0, b1, b2, a1, a2
+ */
+
+static int tas571x_coefficient_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ int numcoef = kcontrol->private_value >> 16;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = numcoef;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 0xffffffff;
+ return 0;
+}
+
+static int tas571x_coefficient_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct i2c_client *i2c = to_i2c_client(codec->dev);
+ int numcoef = kcontrol->private_value >> 16;
+ int index = kcontrol->private_value & 0xffff;
+
+ return tas571x_reg_read_multiword(i2c, index,
+ ucontrol->value.integer.value, numcoef);
+}
+
+static int tas571x_coefficient_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct i2c_client *i2c = to_i2c_client(codec->dev);
+ int numcoef = kcontrol->private_value >> 16;
+ int index = kcontrol->private_value & 0xffff;
+
+ return tas571x_reg_write_multiword(i2c, index,
+ ucontrol->value.integer.value, numcoef);
+}
+
static int tas571x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int format)
{
struct tas571x_private *priv = snd_soc_codec_get_drvdata(dai->codec);
@@ -241,6 +369,15 @@ static const struct snd_soc_dai_ops tas571x_dai_ops = {
.digital_mute = tas571x_mute,
};
+
+#define BIQUAD_COEFS(xname, reg) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = tas571x_coefficient_info, \
+ .get = tas571x_coefficient_get,\
+ .put = tas571x_coefficient_put, \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .private_value = reg | (5 << 16) }
+
static const char *const tas5711_supply_names[] = {
"AVDD",
"DVDD",
@@ -264,6 +401,16 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
TAS571X_SOFT_MUTE_REG,
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
+
+ SOC_DOUBLE_R_RANGE("CH1 Mixer Volume",
+ TAS5717_CH1_LEFT_CH_MIX_REG,
+ TAS5717_CH1_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
+
+ SOC_DOUBLE_R_RANGE("CH2 Mixer Volume",
+ TAS5717_CH2_LEFT_CH_MIX_REG,
+ TAS5717_CH2_RIGHT_CH_MIX_REG,
+ 16, 0, 0x80, 0),
};
static const struct regmap_range tas571x_readonly_regs_range[] = {
@@ -340,6 +487,43 @@ static const struct snd_kcontrol_new tas5717_controls[] = {
TAS571X_SOFT_MUTE_REG,
TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
1, 1),
+
+ /*
+ * The biquads are named according to the register names.
+ * Please note that TI's TAS57xx Graphical Development Environment
+ * tool names them different.
+ */
+ BIQUAD_COEFS("CH1 - Biquad 0", TAS5717_CH1_BQ0_REG),
+ BIQUAD_COEFS("CH1 - Biquad 1", TAS5717_CH1_BQ1_REG),
+ BIQUAD_COEFS("CH1 - Biquad 2", TAS5717_CH1_BQ2_REG),
+ BIQUAD_COEFS("CH1 - Biquad 3", TAS5717_CH1_BQ3_REG),
+ BIQUAD_COEFS("CH1 - Biquad 4", TAS5717_CH1_BQ4_REG),
+ BIQUAD_COEFS("CH1 - Biquad 5", TAS5717_CH1_BQ5_REG),
+ BIQUAD_COEFS("CH1 - Biquad 6", TAS5717_CH1_BQ6_REG),
+ BIQUAD_COEFS("CH1 - Biquad 7", TAS5717_CH1_BQ7_REG),
+ BIQUAD_COEFS("CH1 - Biquad 8", TAS5717_CH1_BQ8_REG),
+ BIQUAD_COEFS("CH1 - Biquad 9", TAS5717_CH1_BQ9_REG),
+ BIQUAD_COEFS("CH1 - Biquad 10", TAS5717_CH1_BQ10_REG),
+ BIQUAD_COEFS("CH1 - Biquad 11", TAS5717_CH1_BQ11_REG),
+
+ BIQUAD_COEFS("CH2 - Biquad 0", TAS5717_CH2_BQ0_REG),
+ BIQUAD_COEFS("CH2 - Biquad 1", TAS5717_CH2_BQ1_REG),
+ BIQUAD_COEFS("CH2 - Biquad 2", TAS5717_CH2_BQ2_REG),
+ BIQUAD_COEFS("CH2 - Biquad 3", TAS5717_CH2_BQ3_REG),
+ BIQUAD_COEFS("CH2 - Biquad 4", TAS5717_CH2_BQ4_REG),
+ BIQUAD_COEFS("CH2 - Biquad 5", TAS5717_CH2_BQ5_REG),
+ BIQUAD_COEFS("CH2 - Biquad 6", TAS5717_CH2_BQ6_REG),
+ BIQUAD_COEFS("CH2 - Biquad 7", TAS5717_CH2_BQ7_REG),
+ BIQUAD_COEFS("CH2 - Biquad 8", TAS5717_CH2_BQ8_REG),
+ BIQUAD_COEFS("CH2 - Biquad 9", TAS5717_CH2_BQ9_REG),
+ BIQUAD_COEFS("CH2 - Biquad 10", TAS5717_CH2_BQ10_REG),
+ BIQUAD_COEFS("CH2 - Biquad 11", TAS5717_CH2_BQ11_REG),
+
+ BIQUAD_COEFS("CH3 - Biquad 0", TAS5717_CH3_BQ0_REG),
+ BIQUAD_COEFS("CH3 - Biquad 1", TAS5717_CH3_BQ1_REG),
+
+ BIQUAD_COEFS("CH4 - Biquad 0", TAS5717_CH4_BQ0_REG),
+ BIQUAD_COEFS("CH4 - Biquad 1", TAS5717_CH4_BQ1_REG),
};
static const struct reg_default tas5717_reg_defaults[] = {
@@ -350,6 +534,10 @@ static const struct reg_default tas5717_reg_defaults[] = {
{ 0x08, 0x00c0 },
{ 0x09, 0x00c0 },
{ 0x1b, 0x82 },
+ { TAS5717_CH1_RIGHT_CH_MIX_REG, 0x0 },
+ { TAS5717_CH1_LEFT_CH_MIX_REG, 0x800000},
+ { TAS5717_CH2_LEFT_CH_MIX_REG, 0x0 },
+ { TAS5717_CH2_RIGHT_CH_MIX_REG, 0x800000},
};
static const struct regmap_config tas5717_regmap_config = {
diff --git a/sound/soc/codecs/tas571x.h b/sound/soc/codecs/tas571x.h
index cf800c364f0f..c45677bc26ad 100644
--- a/sound/soc/codecs/tas571x.h
+++ b/sound/soc/codecs/tas571x.h
@@ -52,4 +52,44 @@
#define TAS571X_CH4_SRC_SELECT_REG 0x21
#define TAS571X_PWM_MUX_REG 0x25
+/* 20-byte biquad registers */
+#define TAS5717_CH1_BQ0_REG 0x26
+#define TAS5717_CH1_BQ1_REG 0x27
+#define TAS5717_CH1_BQ2_REG 0x28
+#define TAS5717_CH1_BQ3_REG 0x29
+#define TAS5717_CH1_BQ4_REG 0x2a
+#define TAS5717_CH1_BQ5_REG 0x2b
+#define TAS5717_CH1_BQ6_REG 0x2c
+#define TAS5717_CH1_BQ7_REG 0x2d
+#define TAS5717_CH1_BQ8_REG 0x2e
+#define TAS5717_CH1_BQ9_REG 0x2f
+
+#define TAS5717_CH2_BQ0_REG 0x30
+#define TAS5717_CH2_BQ1_REG 0x31
+#define TAS5717_CH2_BQ2_REG 0x32
+#define TAS5717_CH2_BQ3_REG 0x33
+#define TAS5717_CH2_BQ4_REG 0x34
+#define TAS5717_CH2_BQ5_REG 0x35
+#define TAS5717_CH2_BQ6_REG 0x36
+#define TAS5717_CH2_BQ7_REG 0x37
+#define TAS5717_CH2_BQ8_REG 0x38
+#define TAS5717_CH2_BQ9_REG 0x39
+
+#define TAS5717_CH1_BQ10_REG 0x58
+#define TAS5717_CH1_BQ11_REG 0x59
+
+#define TAS5717_CH4_BQ0_REG 0x5a
+#define TAS5717_CH4_BQ1_REG 0x5b
+
+#define TAS5717_CH2_BQ10_REG 0x5c
+#define TAS5717_CH2_BQ11_REG 0x5d
+
+#define TAS5717_CH3_BQ0_REG 0x5e
+#define TAS5717_CH3_BQ1_REG 0x5f
+
+#define TAS5717_CH1_RIGHT_CH_MIX_REG 0x72
+#define TAS5717_CH1_LEFT_CH_MIX_REG 0x73
+#define TAS5717_CH2_LEFT_CH_MIX_REG 0x76
+#define TAS5717_CH2_RIGHT_CH_MIX_REG 0x77
+
#endif /* _TAS571X_H */
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index fe16c34607bb..ac9b146526eb 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -38,141 +38,143 @@ struct aic31xx_pdata {
int micbias_vg;
};
+#define AIC31XX_REG(page, reg) ((page * 128) + reg)
+
/* Page Control Register */
-#define AIC31XX_PAGECTL 0x00
+#define AIC31XX_PAGECTL AIC31XX_REG(0, 0)
/* Page 0 Registers */
/* Software reset register */
-#define AIC31XX_RESET 0x01
+#define AIC31XX_RESET AIC31XX_REG(0, 1)
/* OT FLAG register */
-#define AIC31XX_OT_FLAG 0x03
+#define AIC31XX_OT_FLAG AIC31XX_REG(0, 3)
/* Clock clock Gen muxing, Multiplexers*/
-#define AIC31XX_CLKMUX 0x04
+#define AIC31XX_CLKMUX AIC31XX_REG(0, 4)
/* PLL P and R-VAL register */
-#define AIC31XX_PLLPR 0x05
+#define AIC31XX_PLLPR AIC31XX_REG(0, 5)
/* PLL J-VAL register */
-#define AIC31XX_PLLJ 0x06
+#define AIC31XX_PLLJ AIC31XX_REG(0, 6)
/* PLL D-VAL MSB register */
-#define AIC31XX_PLLDMSB 0x07
+#define AIC31XX_PLLDMSB AIC31XX_REG(0, 7)
/* PLL D-VAL LSB register */
-#define AIC31XX_PLLDLSB 0x08
+#define AIC31XX_PLLDLSB AIC31XX_REG(0, 8)
/* DAC NDAC_VAL register*/
-#define AIC31XX_NDAC 0x0B
+#define AIC31XX_NDAC AIC31XX_REG(0, 11)
/* DAC MDAC_VAL register */
-#define AIC31XX_MDAC 0x0C
+#define AIC31XX_MDAC AIC31XX_REG(0, 12)
/* DAC OSR setting register 1, MSB value */
-#define AIC31XX_DOSRMSB 0x0D
+#define AIC31XX_DOSRMSB AIC31XX_REG(0, 13)
/* DAC OSR setting register 2, LSB value */
-#define AIC31XX_DOSRLSB 0x0E
-#define AIC31XX_MINI_DSP_INPOL 0x10
+#define AIC31XX_DOSRLSB AIC31XX_REG(0, 14)
+#define AIC31XX_MINI_DSP_INPOL AIC31XX_REG(0, 16)
/* Clock setting register 8, PLL */
-#define AIC31XX_NADC 0x12
+#define AIC31XX_NADC AIC31XX_REG(0, 18)
/* Clock setting register 9, PLL */
-#define AIC31XX_MADC 0x13
+#define AIC31XX_MADC AIC31XX_REG(0, 19)
/* ADC Oversampling (AOSR) Register */
-#define AIC31XX_AOSR 0x14
+#define AIC31XX_AOSR AIC31XX_REG(0, 20)
/* Clock setting register 9, Multiplexers */
-#define AIC31XX_CLKOUTMUX 0x19
+#define AIC31XX_CLKOUTMUX AIC31XX_REG(0, 25)
/* Clock setting register 10, CLOCKOUT M divider value */
-#define AIC31XX_CLKOUTMVAL 0x1A
+#define AIC31XX_CLKOUTMVAL AIC31XX_REG(0, 26)
/* Audio Interface Setting Register 1 */
-#define AIC31XX_IFACE1 0x1B
+#define AIC31XX_IFACE1 AIC31XX_REG(0, 27)
/* Audio Data Slot Offset Programming */
-#define AIC31XX_DATA_OFFSET 0x1C
+#define AIC31XX_DATA_OFFSET AIC31XX_REG(0, 28)
/* Audio Interface Setting Register 2 */
-#define AIC31XX_IFACE2 0x1D
+#define AIC31XX_IFACE2 AIC31XX_REG(0, 29)
/* Clock setting register 11, BCLK N Divider */
-#define AIC31XX_BCLKN 0x1E
+#define AIC31XX_BCLKN AIC31XX_REG(0, 30)
/* Audio Interface Setting Register 3, Secondary Audio Interface */
-#define AIC31XX_IFACESEC1 0x1F
+#define AIC31XX_IFACESEC1 AIC31XX_REG(0, 31)
/* Audio Interface Setting Register 4 */
-#define AIC31XX_IFACESEC2 0x20
+#define AIC31XX_IFACESEC2 AIC31XX_REG(0, 32)
/* Audio Interface Setting Register 5 */
-#define AIC31XX_IFACESEC3 0x21
+#define AIC31XX_IFACESEC3 AIC31XX_REG(0, 33)
/* I2C Bus Condition */
-#define AIC31XX_I2C 0x22
+#define AIC31XX_I2C AIC31XX_REG(0, 34)
/* ADC FLAG */
-#define AIC31XX_ADCFLAG 0x24
+#define AIC31XX_ADCFLAG AIC31XX_REG(0, 36)
/* DAC Flag Registers */
-#define AIC31XX_DACFLAG1 0x25
-#define AIC31XX_DACFLAG2 0x26
+#define AIC31XX_DACFLAG1 AIC31XX_REG(0, 37)
+#define AIC31XX_DACFLAG2 AIC31XX_REG(0, 38)
/* Sticky Interrupt flag (overflow) */
-#define AIC31XX_OFFLAG 0x27
+#define AIC31XX_OFFLAG AIC31XX_REG(0, 39)
/* Sticy DAC Interrupt flags */
-#define AIC31XX_INTRDACFLAG 0x2C
+#define AIC31XX_INTRDACFLAG AIC31XX_REG(0, 44)
/* Sticy ADC Interrupt flags */
-#define AIC31XX_INTRADCFLAG 0x2D
+#define AIC31XX_INTRADCFLAG AIC31XX_REG(0, 45)
/* DAC Interrupt flags 2 */
-#define AIC31XX_INTRDACFLAG2 0x2E
+#define AIC31XX_INTRDACFLAG2 AIC31XX_REG(0, 46)
/* ADC Interrupt flags 2 */
-#define AIC31XX_INTRADCFLAG2 0x2F
+#define AIC31XX_INTRADCFLAG2 AIC31XX_REG(0, 47)
/* INT1 interrupt control */
-#define AIC31XX_INT1CTRL 0x30
+#define AIC31XX_INT1CTRL AIC31XX_REG(0, 48)
/* INT2 interrupt control */
-#define AIC31XX_INT2CTRL 0x31
+#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
/* GPIO1 control */
-#define AIC31XX_GPIO1 0x33
+#define AIC31XX_GPIO1 AIC31XX_REG(0, 50)
-#define AIC31XX_DACPRB 0x3C
+#define AIC31XX_DACPRB AIC31XX_REG(0, 60)
/* ADC Instruction Set Register */
-#define AIC31XX_ADCPRB 0x3D
+#define AIC31XX_ADCPRB AIC31XX_REG(0, 61)
/* DAC channel setup register */
-#define AIC31XX_DACSETUP 0x3F
+#define AIC31XX_DACSETUP AIC31XX_REG(0, 63)
/* DAC Mute and volume control register */
-#define AIC31XX_DACMUTE 0x40
+#define AIC31XX_DACMUTE AIC31XX_REG(0, 64)
/* Left DAC channel digital volume control */
-#define AIC31XX_LDACVOL 0x41
+#define AIC31XX_LDACVOL AIC31XX_REG(0, 65)
/* Right DAC channel digital volume control */
-#define AIC31XX_RDACVOL 0x42
+#define AIC31XX_RDACVOL AIC31XX_REG(0, 66)
/* Headset detection */
-#define AIC31XX_HSDETECT 0x43
+#define AIC31XX_HSDETECT AIC31XX_REG(0, 67)
/* ADC Digital Mic */
-#define AIC31XX_ADCSETUP 0x51
+#define AIC31XX_ADCSETUP AIC31XX_REG(0, 81)
/* ADC Digital Volume Control Fine Adjust */
-#define AIC31XX_ADCFGA 0x52
+#define AIC31XX_ADCFGA AIC31XX_REG(0, 82)
/* ADC Digital Volume Control Coarse Adjust */
-#define AIC31XX_ADCVOL 0x53
+#define AIC31XX_ADCVOL AIC31XX_REG(0, 83)
/* Page 1 Registers */
/* Headphone drivers */
-#define AIC31XX_HPDRIVER 0x9F
+#define AIC31XX_HPDRIVER AIC31XX_REG(1, 31)
/* Class-D Speakear Amplifier */
-#define AIC31XX_SPKAMP 0xA0
+#define AIC31XX_SPKAMP AIC31XX_REG(1, 32)
/* HP Output Drivers POP Removal Settings */
-#define AIC31XX_HPPOP 0xA1
+#define AIC31XX_HPPOP AIC31XX_REG(1, 33)
/* Output Driver PGA Ramp-Down Period Control */
-#define AIC31XX_SPPGARAMP 0xA2
+#define AIC31XX_SPPGARAMP AIC31XX_REG(1, 34)
/* DAC_L and DAC_R Output Mixer Routing */
-#define AIC31XX_DACMIXERROUTE 0xA3
+#define AIC31XX_DACMIXERROUTE AIC31XX_REG(1, 35)
/* Left Analog Vol to HPL */
-#define AIC31XX_LANALOGHPL 0xA4
+#define AIC31XX_LANALOGHPL AIC31XX_REG(1, 36)
/* Right Analog Vol to HPR */
-#define AIC31XX_RANALOGHPR 0xA5
+#define AIC31XX_RANALOGHPR AIC31XX_REG(1, 37)
/* Left Analog Vol to SPL */
-#define AIC31XX_LANALOGSPL 0xA6
+#define AIC31XX_LANALOGSPL AIC31XX_REG(1, 38)
/* Right Analog Vol to SPR */
-#define AIC31XX_RANALOGSPR 0xA7
+#define AIC31XX_RANALOGSPR AIC31XX_REG(1, 39)
/* HPL Driver */
-#define AIC31XX_HPLGAIN 0xA8
+#define AIC31XX_HPLGAIN AIC31XX_REG(1, 40)
/* HPR Driver */
-#define AIC31XX_HPRGAIN 0xA9
+#define AIC31XX_HPRGAIN AIC31XX_REG(1, 41)
/* SPL Driver */
-#define AIC31XX_SPLGAIN 0xAA
+#define AIC31XX_SPLGAIN AIC31XX_REG(1, 42)
/* SPR Driver */
-#define AIC31XX_SPRGAIN 0xAB
+#define AIC31XX_SPRGAIN AIC31XX_REG(1, 43)
/* HP Driver Control */
-#define AIC31XX_HPCONTROL 0xAC
+#define AIC31XX_HPCONTROL AIC31XX_REG(1, 44)
/* MIC Bias Control */
-#define AIC31XX_MICBIAS 0xAE
+#define AIC31XX_MICBIAS AIC31XX_REG(1, 46)
/* MIC PGA*/
-#define AIC31XX_MICPGA 0xAF
+#define AIC31XX_MICPGA AIC31XX_REG(1, 47)
/* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
-#define AIC31XX_MICPGAPI 0xB0
+#define AIC31XX_MICPGAPI AIC31XX_REG(1, 48)
/* ADC Input Selection for M-Terminal */
-#define AIC31XX_MICPGAMI 0xB1
+#define AIC31XX_MICPGAMI AIC31XX_REG(1, 49)
/* Input CM Settings */
-#define AIC31XX_MICPGACM 0xB2
+#define AIC31XX_MICPGACM AIC31XX_REG(1, 50)
/* Bits, masks and shifts */
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 11d85c5c787a..f1ea052a822e 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -32,6 +32,7 @@
#include <sound/tlv.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/regmap.h>
#include "tpa6130a2.h"
@@ -40,219 +41,72 @@ enum tpa_model {
TPA6140A2,
};
-static struct i2c_client *tpa6130a2_client;
-
/* This struct is used to save the context */
struct tpa6130a2_data {
- struct mutex mutex;
- unsigned char regs[TPA6130A2_CACHEREGNUM];
+ struct device *dev;
+ struct regmap *regmap;
struct regulator *supply;
int power_gpio;
- u8 power_state:1;
enum tpa_model id;
};
-static int tpa6130a2_i2c_read(int reg)
-{
- struct tpa6130a2_data *data;
- int val;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- /* If powered off, return the cached value */
- if (data->power_state) {
- val = i2c_smbus_read_byte_data(tpa6130a2_client, reg);
- if (val < 0)
- dev_err(&tpa6130a2_client->dev, "Read failed\n");
- else
- data->regs[reg] = val;
- } else {
- val = data->regs[reg];
- }
-
- return val;
-}
-
-static int tpa6130a2_i2c_write(int reg, u8 value)
-{
- struct tpa6130a2_data *data;
- int val = 0;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- if (data->power_state) {
- val = i2c_smbus_write_byte_data(tpa6130a2_client, reg, value);
- if (val < 0) {
- dev_err(&tpa6130a2_client->dev, "Write failed\n");
- return val;
- }
- }
-
- /* Either powered on or off, we save the context */
- data->regs[reg] = value;
-
- return val;
-}
-
-static u8 tpa6130a2_read(int reg)
-{
- struct tpa6130a2_data *data;
-
- if (WARN_ON(!tpa6130a2_client))
- return 0;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- return data->regs[reg];
-}
-
-static int tpa6130a2_initialize(void)
-{
- struct tpa6130a2_data *data;
- int i, ret = 0;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- for (i = 1; i < TPA6130A2_REG_VERSION; i++) {
- ret = tpa6130a2_i2c_write(i, data->regs[i]);
- if (ret < 0)
- break;
- }
-
- return ret;
-}
-
-static int tpa6130a2_power(u8 power)
+static int tpa6130a2_power(struct tpa6130a2_data *data, bool enable)
{
- struct tpa6130a2_data *data;
- u8 val;
- int ret = 0;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- mutex_lock(&data->mutex);
- if (power == data->power_state)
- goto exit;
+ int ret;
- if (power) {
+ if (enable) {
ret = regulator_enable(data->supply);
if (ret != 0) {
- dev_err(&tpa6130a2_client->dev,
+ dev_err(data->dev,
"Failed to enable supply: %d\n", ret);
- goto exit;
+ return ret;
}
/* Power on */
if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 1);
-
- data->power_state = 1;
- ret = tpa6130a2_initialize();
- if (ret < 0) {
- dev_err(&tpa6130a2_client->dev,
- "Failed to initialize chip\n");
- if (data->power_gpio >= 0)
- gpio_set_value(data->power_gpio, 0);
- regulator_disable(data->supply);
- data->power_state = 0;
- goto exit;
- }
} else {
- /* set SWS */
- val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
- val |= TPA6130A2_SWS;
- tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-
/* Power off */
if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 0);
ret = regulator_disable(data->supply);
if (ret != 0) {
- dev_err(&tpa6130a2_client->dev,
+ dev_err(data->dev,
"Failed to disable supply: %d\n", ret);
- goto exit;
+ return ret;
}
- data->power_state = 0;
+ /* device regs does not match the cache state anymore */
+ regcache_mark_dirty(data->regmap);
}
-exit:
- mutex_unlock(&data->mutex);
return ret;
}
-static int tpa6130a2_get_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tpa6130a2_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kctrl, int event)
{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- struct tpa6130a2_data *data;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- mutex_lock(&data->mutex);
-
- ucontrol->value.integer.value[0] =
- (tpa6130a2_read(reg) >> shift) & mask;
-
- if (invert)
- ucontrol->value.integer.value[0] =
- max - ucontrol->value.integer.value[0];
-
- mutex_unlock(&data->mutex);
- return 0;
-}
+ struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm);
+ struct tpa6130a2_data *data = snd_soc_component_get_drvdata(c);
+ int ret;
-static int tpa6130a2_put_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- struct tpa6130a2_data *data;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- int max = mc->max;
- unsigned int mask = (1 << fls(max)) - 1;
- unsigned int invert = mc->invert;
- unsigned int val = (ucontrol->value.integer.value[0] & mask);
- unsigned int val_reg;
-
- if (WARN_ON(!tpa6130a2_client))
- return -EINVAL;
- data = i2c_get_clientdata(tpa6130a2_client);
-
- if (invert)
- val = max - val;
-
- mutex_lock(&data->mutex);
-
- val_reg = tpa6130a2_read(reg);
- if (((val_reg >> shift) & mask) == val) {
- mutex_unlock(&data->mutex);
- return 0;
+ /* before widget power up */
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ /* Turn on the chip */
+ tpa6130a2_power(data, true);
+ /* Sync the registers */
+ ret = regcache_sync(data->regmap);
+ if (ret < 0) {
+ dev_err(c->dev, "Failed to initialize chip\n");
+ tpa6130a2_power(data, false);
+ return ret;
+ }
+ /* after widget power down */
+ } else {
+ tpa6130a2_power(data, false);
}
- val_reg &= ~(mask << shift);
- val_reg |= val << shift;
- tpa6130a2_i2c_write(reg, val_reg);
-
- mutex_unlock(&data->mutex);
-
- return 1;
+ return 0;
}
/*
@@ -273,9 +127,8 @@ static const DECLARE_TLV_DB_RANGE(tpa6130_tlv,
);
static const struct snd_kcontrol_new tpa6130a2_controls[] = {
- SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume",
+ SOC_SINGLE_TLV("Headphone Playback Volume",
TPA6130A2_REG_VOL_MUTE, 0, 0x3f, 0,
- tpa6130a2_get_volsw, tpa6130a2_put_volsw,
tpa6130_tlv),
};
@@ -286,85 +139,79 @@ static const DECLARE_TLV_DB_RANGE(tpa6140_tlv,
);
static const struct snd_kcontrol_new tpa6140a2_controls[] = {
- SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume",
+ SOC_SINGLE_TLV("Headphone Playback Volume",
TPA6130A2_REG_VOL_MUTE, 1, 0x1f, 0,
- tpa6130a2_get_volsw, tpa6130a2_put_volsw,
tpa6140_tlv),
};
-/*
- * Enable or disable channel (left or right)
- * The bit number for mute and amplifier are the same per channel:
- * bit 6: Right channel
- * bit 7: Left channel
- * in both registers.
- */
-static void tpa6130a2_channel_enable(u8 channel, int enable)
+static int tpa6130a2_component_probe(struct snd_soc_component *component)
{
- u8 val;
+ struct tpa6130a2_data *data = snd_soc_component_get_drvdata(component);
- if (enable) {
- /* Enable channel */
- /* Enable amplifier */
- val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
- val |= channel;
- val &= ~TPA6130A2_SWS;
- tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-
- /* Unmute channel */
- val = tpa6130a2_read(TPA6130A2_REG_VOL_MUTE);
- val &= ~channel;
- tpa6130a2_i2c_write(TPA6130A2_REG_VOL_MUTE, val);
- } else {
- /* Disable channel */
- /* Mute channel */
- val = tpa6130a2_read(TPA6130A2_REG_VOL_MUTE);
- val |= channel;
- tpa6130a2_i2c_write(TPA6130A2_REG_VOL_MUTE, val);
-
- /* Disable amplifier */
- val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
- val &= ~channel;
- tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
- }
+ if (data->id == TPA6140A2)
+ return snd_soc_add_component_controls(component,
+ tpa6140a2_controls, ARRAY_SIZE(tpa6140a2_controls));
+ else
+ return snd_soc_add_component_controls(component,
+ tpa6130a2_controls, ARRAY_SIZE(tpa6130a2_controls));
}
-int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable)
-{
- int ret = 0;
- if (enable) {
- ret = tpa6130a2_power(1);
- if (ret < 0)
- return ret;
- tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
- 1);
- } else {
- tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
- 0);
- ret = tpa6130a2_power(0);
- }
+static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("LEFTIN"),
+ SND_SOC_DAPM_INPUT("RIGHTIN"),
+ SND_SOC_DAPM_OUTPUT("HPLEFT"),
+ SND_SOC_DAPM_OUTPUT("HPRIGHT"),
+
+ SND_SOC_DAPM_PGA("Left Mute", TPA6130A2_REG_VOL_MUTE,
+ TPA6130A2_HP_EN_L_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Mute", TPA6130A2_REG_VOL_MUTE,
+ TPA6130A2_HP_EN_R_SHIFT, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Left PGA", TPA6130A2_REG_CONTROL,
+ TPA6130A2_HP_EN_L_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right PGA", TPA6130A2_REG_CONTROL,
+ TPA6130A2_HP_EN_R_SHIFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Power", TPA6130A2_REG_CONTROL,
+ TPA6130A2_SWS_SHIFT, 1, tpa6130a2_power_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+};
- return ret;
-}
-EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable);
+static const struct snd_soc_dapm_route tpa6130a2_dapm_routes[] = {
+ { "Left PGA", NULL, "LEFTIN" },
+ { "Right PGA", NULL, "RIGHTIN" },
-int tpa6130a2_add_controls(struct snd_soc_codec *codec)
-{
- struct tpa6130a2_data *data;
+ { "Left Mute", NULL, "Left PGA" },
+ { "Right Mute", NULL, "Right PGA" },
- if (tpa6130a2_client == NULL)
- return -ENODEV;
+ { "HPLEFT", NULL, "Left Mute" },
+ { "HPRIGHT", NULL, "Right Mute" },
- data = i2c_get_clientdata(tpa6130a2_client);
+ { "Left PGA", NULL, "Power" },
+ { "Right PGA", NULL, "Power" },
+};
- if (data->id == TPA6140A2)
- return snd_soc_add_codec_controls(codec, tpa6140a2_controls,
- ARRAY_SIZE(tpa6140a2_controls));
- else
- return snd_soc_add_codec_controls(codec, tpa6130a2_controls,
- ARRAY_SIZE(tpa6130a2_controls));
-}
-EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
+struct snd_soc_component_driver tpa6130a2_component_driver = {
+ .name = "tpa6130a2",
+ .probe = tpa6130a2_component_probe,
+ .dapm_widgets = tpa6130a2_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tpa6130a2_dapm_widgets),
+ .dapm_routes = tpa6130a2_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tpa6130a2_dapm_routes),
+};
+
+static const struct reg_default tpa6130a2_reg_defaults[] = {
+ { TPA6130A2_REG_CONTROL, TPA6130A2_SWS },
+ { TPA6130A2_REG_VOL_MUTE, TPA6130A2_MUTE_R | TPA6130A2_MUTE_L },
+};
+
+static const struct regmap_config tpa6130a2_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPA6130A2_REG_VERSION,
+ .reg_defaults = tpa6130a2_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tpa6130a2_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
static int tpa6130a2_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -374,6 +221,7 @@ static int tpa6130a2_probe(struct i2c_client *client,
struct tpa6130a2_platform_data *pdata = client->dev.platform_data;
struct device_node *np = client->dev.of_node;
const char *regulator;
+ unsigned int version;
int ret;
dev = &client->dev;
@@ -382,6 +230,12 @@ static int tpa6130a2_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
+ data->dev = dev;
+
+ data->regmap = devm_regmap_init_i2c(client, &tpa6130a2_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
if (pdata) {
data->power_gpio = pdata->power_gpio;
} else if (np) {
@@ -392,26 +246,17 @@ static int tpa6130a2_probe(struct i2c_client *client,
return -ENODEV;
}
- tpa6130a2_client = client;
-
- i2c_set_clientdata(tpa6130a2_client, data);
+ i2c_set_clientdata(client, data);
data->id = id->driver_data;
- mutex_init(&data->mutex);
-
- /* Set default register values */
- data->regs[TPA6130A2_REG_CONTROL] = TPA6130A2_SWS;
- data->regs[TPA6130A2_REG_VOL_MUTE] = TPA6130A2_MUTE_R |
- TPA6130A2_MUTE_L;
-
if (data->power_gpio >= 0) {
ret = devm_gpio_request(dev, data->power_gpio,
"tpa6130a2 enable");
if (ret < 0) {
dev_err(dev, "Failed to request power GPIO (%d)\n",
data->power_gpio);
- goto err_gpio;
+ return ret;
}
gpio_direction_output(data->power_gpio, 0);
}
@@ -432,39 +277,27 @@ static int tpa6130a2_probe(struct i2c_client *client,
if (IS_ERR(data->supply)) {
ret = PTR_ERR(data->supply);
dev_err(dev, "Failed to request supply: %d\n", ret);
- goto err_gpio;
+ return ret;
}
- ret = tpa6130a2_power(1);
+ ret = tpa6130a2_power(data, true);
if (ret != 0)
- goto err_gpio;
+ return ret;
/* Read version */
- ret = tpa6130a2_i2c_read(TPA6130A2_REG_VERSION) &
- TPA6130A2_VERSION_MASK;
- if ((ret != 1) && (ret != 2))
- dev_warn(dev, "UNTESTED version detected (%d)\n", ret);
+ regmap_read(data->regmap, TPA6130A2_REG_VERSION, &version);
+ version &= TPA6130A2_VERSION_MASK;
+ if ((version != 1) && (version != 2))
+ dev_warn(dev, "UNTESTED version detected (%d)\n", version);
/* Disable the chip */
- ret = tpa6130a2_power(0);
+ ret = tpa6130a2_power(data, false);
if (ret != 0)
- goto err_gpio;
-
- return 0;
-
-err_gpio:
- tpa6130a2_client = NULL;
+ return ret;
- return ret;
-}
-
-static int tpa6130a2_remove(struct i2c_client *client)
-{
- tpa6130a2_power(0);
- tpa6130a2_client = NULL;
-
- return 0;
+ return devm_snd_soc_register_component(&client->dev,
+ &tpa6130a2_component_driver, NULL, 0);
}
static const struct i2c_device_id tpa6130a2_id[] = {
@@ -489,7 +322,6 @@ static struct i2c_driver tpa6130a2_i2c_driver = {
.of_match_table = of_match_ptr(tpa6130a2_of_match),
},
.probe = tpa6130a2_probe,
- .remove = tpa6130a2_remove,
.id_table = tpa6130a2_id,
};
diff --git a/sound/soc/codecs/tpa6130a2.h b/sound/soc/codecs/tpa6130a2.h
index 417444020ba6..f19cad5d4172 100644
--- a/sound/soc/codecs/tpa6130a2.h
+++ b/sound/soc/codecs/tpa6130a2.h
@@ -30,19 +30,20 @@
#define TPA6130A2_REG_OUT_IMPEDANCE 0x03
#define TPA6130A2_REG_VERSION 0x04
-#define TPA6130A2_CACHEREGNUM (TPA6130A2_REG_VERSION + 1)
-
/* Register bits */
/* TPA6130A2_REG_CONTROL (0x01) */
-#define TPA6130A2_SWS (0x01 << 0)
+#define TPA6130A2_SWS_SHIFT 0
+#define TPA6130A2_SWS (0x01 << TPA6130A2_SWS_SHIFT)
#define TPA6130A2_TERMAL (0x01 << 1)
#define TPA6130A2_MODE(x) (x << 4)
#define TPA6130A2_MODE_STEREO (0x00)
#define TPA6130A2_MODE_DUAL_MONO (0x01)
#define TPA6130A2_MODE_BRIDGE (0x02)
#define TPA6130A2_MODE_MASK (0x03)
-#define TPA6130A2_HP_EN_R (0x01 << 6)
-#define TPA6130A2_HP_EN_L (0x01 << 7)
+#define TPA6130A2_HP_EN_R_SHIFT 6
+#define TPA6130A2_HP_EN_R (0x01 << TPA6130A2_HP_EN_R_SHIFT)
+#define TPA6130A2_HP_EN_L_SHIFT 7
+#define TPA6130A2_HP_EN_L (0x01 << TPA6130A2_HP_EN_L_SHIFT)
/* TPA6130A2_REG_VOL_MUTE (0x02) */
#define TPA6130A2_VOLUME(x) ((x & 0x3f) << 0)
@@ -56,7 +57,4 @@
/* TPA6130A2_REG_VERSION (0x04) */
#define TPA6130A2_VERSION_MASK (0x0f)
-extern int tpa6130a2_add_controls(struct snd_soc_codec *codec);
-extern int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable);
-
#endif /* __TPA6130A2_H__ */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index e7fe6b7b95b7..846deed6af41 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1713,6 +1713,7 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
};
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index d54f1b46c9ec..156547026a40 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1104,6 +1104,11 @@ SND_SOC_DAPM_INPUT("IN4R"),
SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DSP Voice Trigger"),
+
+SND_SOC_DAPM_SWITCH("DSP3 Voice Trigger", SND_SOC_NOPM, 2, 0,
+ &arizona_voice_trigger_switch[2]),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, wm5110_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -1998,10 +2003,16 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
+ { "DRC2 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
{ "DRC2 Signal Activity", NULL, "DRC2L" },
{ "DRC2 Signal Activity", NULL, "DRC2R" },
+
+ { "DSP Voice Trigger", NULL, "SYSCLK" },
+ { "DSP Voice Trigger", NULL, "DSP3 Voice Trigger" },
+ { "DSP3 Voice Trigger", "Switch", "DSP3" },
};
static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -2223,6 +2234,7 @@ static irqreturn_t wm5110_adsp2_irq(int irq, void *data)
{
struct wm5110_priv *priv = data;
struct arizona *arizona = priv->core.arizona;
+ struct arizona_voice_trigger_info info;
int serviced = 0;
int i, ret;
@@ -2230,6 +2242,12 @@ static irqreturn_t wm5110_adsp2_irq(int irq, void *data)
ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
if (ret != -ENODEV)
serviced++;
+ if (ret == WM_ADSP_COMPR_VOICE_TRIGGER) {
+ info.core = i;
+ arizona_call_notifiers(arizona,
+ ARIZONA_NOTIFY_VOICE_TRIGGER,
+ &info);
+ }
}
if (!serviced) {
@@ -2252,6 +2270,7 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
arizona_init_spk(codec);
arizona_init_gpio(codec);
arizona_init_mono(codec);
+ arizona_init_notifiers(codec);
ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
"ADSP2 Compressed IRQ", wm5110_adsp2_irq,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 4bcf5f8ece50..d18261a44256 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -358,6 +358,9 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
case 24:
iface |= 0x0008;
break;
+ case 32:
+ iface |= 0x000c;
+ break;
}
wm8731_set_deemph(codec);
@@ -541,7 +544,7 @@ static int wm8731_startup(struct snd_pcm_substream *substream,
#define WM8731_RATES SNDRV_PCM_RATE_8000_96000
#define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
- SNDRV_PCM_FMTBIT_S24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8731_dai_ops = {
.startup = wm8731_startup,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 6f1024f48b19..cdcc91282e8a 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -32,7 +32,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -486,7 +485,7 @@ SND_SOC_DAPM_DAC("Voice DAC", "Voice Playback", WM8753_PWR1, 4, 0),
SND_SOC_DAPM_OUTPUT("MONO1"),
SND_SOC_DAPM_MUX("Mono 2 Mux", SND_SOC_NOPM, 0, 0, &wm8753_mono2_controls),
SND_SOC_DAPM_OUTPUT("MONO2"),
-SND_SOC_DAPM_MIXER("Out3 Left + Right", -1, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Out3 Left + Right", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm8753_out3_controls),
SND_SOC_DAPM_PGA("Out 3", WM8753_PWR3, 4, 0, NULL, 0),
SND_SOC_DAPM_OUTPUT("OUT3"),
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 6ac76fe116b0..7347abff4b2c 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1,10 +1,13 @@
/*
- * wm8985.c -- WM8985 ALSA SoC Audio driver
+ * wm8985.c -- WM8985 / WM8758 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
- *
* Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
*
+ * WM8758 support:
+ * Copyright: 2016 Barix AG
+ * Author: Petr Kulhavy <petr@barix.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -40,6 +43,11 @@ static const char *wm8985_supply_names[WM8985_NUM_SUPPLIES] = {
"AVDD2"
};
+enum wm8985_type {
+ WM8985,
+ WM8758,
+};
+
static const struct reg_default wm8985_reg_defaults[] = {
{ 1, 0x0000 }, /* R1 - Power management 1 */
{ 2, 0x0000 }, /* R2 - Power management 2 */
@@ -181,6 +189,7 @@ static const int volume_update_regs[] = {
struct wm8985_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8985_NUM_SUPPLIES];
+ enum wm8985_type dev_type;
unsigned int sysclk;
unsigned int bclk;
};
@@ -289,7 +298,7 @@ static const char *depth_3d_text[] = {
};
static SOC_ENUM_SINGLE_DECL(depth_3d, WM8985_3D_CONTROL, 0, depth_3d_text);
-static const struct snd_kcontrol_new wm8985_snd_controls[] = {
+static const struct snd_kcontrol_new wm8985_common_snd_controls[] = {
SOC_SINGLE("Digital Loopback Switch", WM8985_COMPANDING_CONTROL,
0, 1, 0),
@@ -355,10 +364,6 @@ static const struct snd_kcontrol_new wm8985_snd_controls[] = {
SOC_ENUM("High Pass Filter Mode", filter_mode),
SOC_SINGLE("High Pass Filter Cutoff", WM8985_ADC_CONTROL, 4, 7, 0),
- SOC_DOUBLE_R_TLV("Aux Bypass Volume",
- WM8985_LEFT_MIXER_CTRL, WM8985_RIGHT_MIXER_CTRL, 6, 7, 0,
- aux_tlv),
-
SOC_DOUBLE_R_TLV("Input PGA Bypass Volume",
WM8985_LEFT_MIXER_CTRL, WM8985_RIGHT_MIXER_CTRL, 2, 7, 0,
bypass_tlv),
@@ -379,20 +384,30 @@ static const struct snd_kcontrol_new wm8985_snd_controls[] = {
SOC_SINGLE_TLV("EQ5 Volume", WM8985_EQ5_HIGH_SHELF, 0, 24, 1, eq_tlv),
SOC_ENUM("3D Depth", depth_3d),
+};
+
+static const struct snd_kcontrol_new wm8985_specific_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Aux Bypass Volume",
+ WM8985_LEFT_MIXER_CTRL, WM8985_RIGHT_MIXER_CTRL, 6, 7, 0,
+ aux_tlv),
SOC_ENUM("Speaker Mode", speaker_mode)
};
static const struct snd_kcontrol_new left_out_mixer[] = {
SOC_DAPM_SINGLE("Line Switch", WM8985_LEFT_MIXER_CTRL, 1, 1, 0),
- SOC_DAPM_SINGLE("Aux Switch", WM8985_LEFT_MIXER_CTRL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Switch", WM8985_LEFT_MIXER_CTRL, 0, 1, 0),
+
+ /* --- WM8985 only --- */
+ SOC_DAPM_SINGLE("Aux Switch", WM8985_LEFT_MIXER_CTRL, 5, 1, 0),
};
static const struct snd_kcontrol_new right_out_mixer[] = {
SOC_DAPM_SINGLE("Line Switch", WM8985_RIGHT_MIXER_CTRL, 1, 1, 0),
- SOC_DAPM_SINGLE("Aux Switch", WM8985_RIGHT_MIXER_CTRL, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Switch", WM8985_RIGHT_MIXER_CTRL, 0, 1, 0),
+
+ /* --- WM8985 only --- */
+ SOC_DAPM_SINGLE("Aux Switch", WM8985_RIGHT_MIXER_CTRL, 5, 1, 0),
};
static const struct snd_kcontrol_new left_input_mixer[] = {
@@ -410,6 +425,8 @@ static const struct snd_kcontrol_new right_input_mixer[] = {
static const struct snd_kcontrol_new left_boost_mixer[] = {
SOC_DAPM_SINGLE_TLV("L2 Volume", WM8985_LEFT_ADC_BOOST_CTRL,
4, 7, 0, boost_tlv),
+
+ /* --- WM8985 only --- */
SOC_DAPM_SINGLE_TLV("AUXL Volume", WM8985_LEFT_ADC_BOOST_CTRL,
0, 7, 0, boost_tlv)
};
@@ -417,11 +434,13 @@ static const struct snd_kcontrol_new left_boost_mixer[] = {
static const struct snd_kcontrol_new right_boost_mixer[] = {
SOC_DAPM_SINGLE_TLV("R2 Volume", WM8985_RIGHT_ADC_BOOST_CTRL,
4, 7, 0, boost_tlv),
+
+ /* --- WM8985 only --- */
SOC_DAPM_SINGLE_TLV("AUXR Volume", WM8985_RIGHT_ADC_BOOST_CTRL,
0, 7, 0, boost_tlv)
};
-static const struct snd_soc_dapm_widget wm8985_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget wm8985_common_dapm_widgets[] = {
SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8985_POWER_MANAGEMENT_3,
0, 0),
SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8985_POWER_MANAGEMENT_3,
@@ -431,21 +450,11 @@ static const struct snd_soc_dapm_widget wm8985_dapm_widgets[] = {
SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8985_POWER_MANAGEMENT_2,
1, 0),
- SND_SOC_DAPM_MIXER("Left Output Mixer", WM8985_POWER_MANAGEMENT_3,
- 2, 0, left_out_mixer, ARRAY_SIZE(left_out_mixer)),
- SND_SOC_DAPM_MIXER("Right Output Mixer", WM8985_POWER_MANAGEMENT_3,
- 3, 0, right_out_mixer, ARRAY_SIZE(right_out_mixer)),
-
SND_SOC_DAPM_MIXER("Left Input Mixer", WM8985_POWER_MANAGEMENT_2,
2, 0, left_input_mixer, ARRAY_SIZE(left_input_mixer)),
SND_SOC_DAPM_MIXER("Right Input Mixer", WM8985_POWER_MANAGEMENT_2,
3, 0, right_input_mixer, ARRAY_SIZE(right_input_mixer)),
- SND_SOC_DAPM_MIXER("Left Boost Mixer", WM8985_POWER_MANAGEMENT_2,
- 4, 0, left_boost_mixer, ARRAY_SIZE(left_boost_mixer)),
- SND_SOC_DAPM_MIXER("Right Boost Mixer", WM8985_POWER_MANAGEMENT_2,
- 5, 0, right_boost_mixer, ARRAY_SIZE(right_boost_mixer)),
-
SND_SOC_DAPM_PGA("Left Capture PGA", WM8985_LEFT_INP_PGA_GAIN_CTRL,
6, 1, NULL, 0),
SND_SOC_DAPM_PGA("Right Capture PGA", WM8985_RIGHT_INP_PGA_GAIN_CTRL,
@@ -468,8 +477,6 @@ static const struct snd_soc_dapm_widget wm8985_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("LIP"),
SND_SOC_DAPM_INPUT("RIN"),
SND_SOC_DAPM_INPUT("RIP"),
- SND_SOC_DAPM_INPUT("AUXL"),
- SND_SOC_DAPM_INPUT("AUXR"),
SND_SOC_DAPM_INPUT("L2"),
SND_SOC_DAPM_INPUT("R2"),
SND_SOC_DAPM_OUTPUT("HPL"),
@@ -478,13 +485,42 @@ static const struct snd_soc_dapm_widget wm8985_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("SPKR")
};
-static const struct snd_soc_dapm_route wm8985_dapm_routes[] = {
+static const struct snd_soc_dapm_widget wm8985_dapm_widgets[] = {
+ SND_SOC_DAPM_MIXER("Left Output Mixer", WM8985_POWER_MANAGEMENT_3,
+ 2, 0, left_out_mixer, ARRAY_SIZE(left_out_mixer)),
+ SND_SOC_DAPM_MIXER("Right Output Mixer", WM8985_POWER_MANAGEMENT_3,
+ 3, 0, right_out_mixer, ARRAY_SIZE(right_out_mixer)),
+
+ SND_SOC_DAPM_MIXER("Left Boost Mixer", WM8985_POWER_MANAGEMENT_2,
+ 4, 0, left_boost_mixer, ARRAY_SIZE(left_boost_mixer)),
+ SND_SOC_DAPM_MIXER("Right Boost Mixer", WM8985_POWER_MANAGEMENT_2,
+ 5, 0, right_boost_mixer, ARRAY_SIZE(right_boost_mixer)),
+
+ SND_SOC_DAPM_INPUT("AUXL"),
+ SND_SOC_DAPM_INPUT("AUXR"),
+};
+
+static const struct snd_soc_dapm_widget wm8758_dapm_widgets[] = {
+ SND_SOC_DAPM_MIXER("Left Output Mixer", WM8985_POWER_MANAGEMENT_3,
+ 2, 0, left_out_mixer,
+ ARRAY_SIZE(left_out_mixer) - 1),
+ SND_SOC_DAPM_MIXER("Right Output Mixer", WM8985_POWER_MANAGEMENT_3,
+ 3, 0, right_out_mixer,
+ ARRAY_SIZE(right_out_mixer) - 1),
+
+ SND_SOC_DAPM_MIXER("Left Boost Mixer", WM8985_POWER_MANAGEMENT_2,
+ 4, 0, left_boost_mixer,
+ ARRAY_SIZE(left_boost_mixer) - 1),
+ SND_SOC_DAPM_MIXER("Right Boost Mixer", WM8985_POWER_MANAGEMENT_2,
+ 5, 0, right_boost_mixer,
+ ARRAY_SIZE(right_boost_mixer) - 1),
+};
+
+static const struct snd_soc_dapm_route wm8985_common_dapm_routes[] = {
{ "Right Output Mixer", "PCM Switch", "Right DAC" },
- { "Right Output Mixer", "Aux Switch", "AUXR" },
{ "Right Output Mixer", "Line Switch", "Right Boost Mixer" },
{ "Left Output Mixer", "PCM Switch", "Left DAC" },
- { "Left Output Mixer", "Aux Switch", "AUXL" },
{ "Left Output Mixer", "Line Switch", "Left Boost Mixer" },
{ "Right Headphone Out", NULL, "Right Output Mixer" },
@@ -501,13 +537,11 @@ static const struct snd_soc_dapm_route wm8985_dapm_routes[] = {
{ "Right ADC", NULL, "Right Boost Mixer" },
- { "Right Boost Mixer", "AUXR Volume", "AUXR" },
{ "Right Boost Mixer", NULL, "Right Capture PGA" },
{ "Right Boost Mixer", "R2 Volume", "R2" },
{ "Left ADC", NULL, "Left Boost Mixer" },
- { "Left Boost Mixer", "AUXL Volume", "AUXL" },
{ "Left Boost Mixer", NULL, "Left Capture PGA" },
{ "Left Boost Mixer", "L2 Volume", "L2" },
@@ -522,6 +556,38 @@ static const struct snd_soc_dapm_route wm8985_dapm_routes[] = {
{ "Left Input Mixer", "MicN Switch", "LIN" },
{ "Left Input Mixer", "MicP Switch", "LIP" },
};
+static const struct snd_soc_dapm_route wm8985_aux_dapm_routes[] = {
+ { "Right Output Mixer", "Aux Switch", "AUXR" },
+ { "Left Output Mixer", "Aux Switch", "AUXL" },
+
+ { "Right Boost Mixer", "AUXR Volume", "AUXR" },
+ { "Left Boost Mixer", "AUXL Volume", "AUXL" },
+};
+
+static int wm8985_add_widgets(struct snd_soc_codec *codec)
+{
+ struct wm8985_priv *wm8985 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ switch (wm8985->dev_type) {
+ case WM8758:
+ snd_soc_dapm_new_controls(dapm, wm8758_dapm_widgets,
+ ARRAY_SIZE(wm8758_dapm_widgets));
+ break;
+
+ case WM8985:
+ snd_soc_add_codec_controls(codec, wm8985_specific_snd_controls,
+ ARRAY_SIZE(wm8985_specific_snd_controls));
+
+ snd_soc_dapm_new_controls(dapm, wm8985_dapm_widgets,
+ ARRAY_SIZE(wm8985_dapm_widgets));
+ snd_soc_dapm_add_routes(dapm, wm8985_aux_dapm_routes,
+ ARRAY_SIZE(wm8985_aux_dapm_routes));
+ break;
+ }
+
+ return 0;
+}
static int eqmode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -999,6 +1065,8 @@ static int wm8985_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8985_BIAS_CTRL, WM8985_BIASCUT,
WM8985_BIASCUT);
+ wm8985_add_widgets(codec);
+
return 0;
err_reg_enable:
@@ -1042,12 +1110,12 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8985 = {
.set_bias_level = wm8985_set_bias_level,
.suspend_bias_off = true,
- .controls = wm8985_snd_controls,
- .num_controls = ARRAY_SIZE(wm8985_snd_controls),
- .dapm_widgets = wm8985_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(wm8985_dapm_widgets),
- .dapm_routes = wm8985_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(wm8985_dapm_routes),
+ .controls = wm8985_common_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8985_common_snd_controls),
+ .dapm_widgets = wm8985_common_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8985_common_dapm_widgets),
+ .dapm_routes = wm8985_common_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8985_common_dapm_routes),
};
static const struct regmap_config wm8985_regmap = {
@@ -1074,6 +1142,8 @@ static int wm8985_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, wm8985);
+ wm8985->dev_type = WM8985;
+
wm8985->regmap = devm_regmap_init_spi(spi, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
ret = PTR_ERR(wm8985->regmap);
@@ -1115,6 +1185,8 @@ static int wm8985_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8985);
+ wm8985->dev_type = id->driver_data;
+
wm8985->regmap = devm_regmap_init_i2c(i2c, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
ret = PTR_ERR(wm8985->regmap);
@@ -1135,7 +1207,8 @@ static int wm8985_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id wm8985_i2c_id[] = {
- { "wm8985", 0 },
+ { "wm8985", WM8985 },
+ { "wm8758", WM8758 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8985_i2c_id);
@@ -1183,6 +1256,6 @@ static void __exit wm8985_exit(void)
}
module_exit(wm8985_exit);
-MODULE_DESCRIPTION("ASoC WM8985 driver");
+MODULE_DESCRIPTION("ASoC WM8985 / WM8758 driver");
MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8985.h b/sound/soc/codecs/wm8985.h
index 2e71ff507638..41b1048e3c97 100644
--- a/sound/soc/codecs/wm8985.h
+++ b/sound/soc/codecs/wm8985.h
@@ -290,6 +290,9 @@
#define WM8985_GPIO1GPD_MASK 0x0040 /* GPIO1GPD */
#define WM8985_GPIO1GPD_SHIFT 6 /* GPIO1GPD */
#define WM8985_GPIO1GPD_WIDTH 1 /* GPIO1GPD */
+#define WM8758_OPCLKDIV_MASK 0x0030 /* OPCLKDIV - [1:0] */
+#define WM8758_OPCLKDIV_SHIFT 4 /* OPCLKDIV - [1:0] */
+#define WM8758_OPCLKDIV_WIDTH 2 /* OPCLKDIV - [1:0] */
#define WM8985_GPIO1POL 0x0008 /* GPIO1POL */
#define WM8985_GPIO1POL_MASK 0x0008 /* GPIO1POL */
#define WM8985_GPIO1POL_SHIFT 3 /* GPIO1POL */
@@ -301,6 +304,12 @@
/*
* R9 (0x09) - Jack Detect Control 1
*/
+#define WM8758_JD_VMID1_MASK 0x0100 /* JD_VMID1 */
+#define WM8758_JD_VMID1_SHIFT 8 /* JD_VMID1 */
+#define WM8758_JD_VMID1_WIDTH 1 /* JD_VMID1 */
+#define WM8758_JD_VMID0_MASK 0x0080 /* JD_VMID0 */
+#define WM8758_JD_VMID0_SHIFT 7 /* JD_VMID0 */
+#define WM8758_JD_VMID0_WIDTH 1 /* JD_VMID0 */
#define WM8985_JD_EN 0x0040 /* JD_EN */
#define WM8985_JD_EN_MASK 0x0040 /* JD_EN */
#define WM8985_JD_EN_SHIFT 6 /* JD_EN */
@@ -649,6 +658,12 @@
#define WM8985_OUT4_2LNR_MASK 0x0020 /* OUT4_2LNR */
#define WM8985_OUT4_2LNR_SHIFT 5 /* OUT4_2LNR */
#define WM8985_OUT4_2LNR_WIDTH 1 /* OUT4_2LNR */
+#define WM8758_VMIDTOG_MASK 0x0010 /* VMIDTOG */
+#define WM8758_VMIDTOG_SHIFT 4 /* VMIDTOG */
+#define WM8758_VMIDTOG_WIDTH 1 /* VMIDTOG */
+#define WM8758_OUT2DEL_MASK 0x0008 /* OUT2DEL */
+#define WM8758_OUT2DEL_SHIFT 3 /* OUT2DEL */
+#define WM8758_OUT2DEL_WIDTH 1 /* OUT2DEL */
#define WM8985_POBCTRL 0x0004 /* POBCTRL */
#define WM8985_POBCTRL_MASK 0x0004 /* POBCTRL */
#define WM8985_POBCTRL_SHIFT 2 /* POBCTRL */
@@ -684,6 +699,9 @@
#define WM8985_BEEPVOL_MASK 0x000E /* BEEPVOL - [3:1] */
#define WM8985_BEEPVOL_SHIFT 1 /* BEEPVOL - [3:1] */
#define WM8985_BEEPVOL_WIDTH 3 /* BEEPVOL - [3:1] */
+#define WM8758_DELEN2_MASK 0x0004 /* DELEN2 */
+#define WM8758_DELEN2_SHIFT 2 /* DELEN2 */
+#define WM8758_DELEN2_WIDTH 1 /* DELEN2 */
#define WM8985_BEEPEN 0x0001 /* BEEPEN */
#define WM8985_BEEPEN_MASK 0x0001 /* BEEPEN */
#define WM8985_BEEPEN_SHIFT 0 /* BEEPEN */
@@ -790,6 +808,14 @@
/*
* R49 (0x31) - Output ctrl
*/
+#define WM8758_HP_COM 0x0100 /* HP_COM */
+#define WM8758_HP_COM_MASK 0x0100 /* HP_COM */
+#define WM8758_HP_COM_SHIFT 8 /* HP_COM */
+#define WM8758_HP_COM_WIDTH 1 /* HP_COM */
+#define WM8758_LINE_COM 0x0080 /* LINE_COM */
+#define WM8758_LINE_COM_MASK 0x0080 /* LINE_COM */
+#define WM8758_LINE_COM_SHIFT 7 /* LINE_COM */
+#define WM8758_LINE_COM_WIDTH 1 /* LINE_COM */
#define WM8985_DACL2RMIX 0x0040 /* DACL2RMIX */
#define WM8985_DACL2RMIX_MASK 0x0040 /* DACL2RMIX */
#define WM8985_DACL2RMIX_SHIFT 6 /* DACL2RMIX */
@@ -806,6 +832,14 @@
#define WM8985_OUT3BOOST_MASK 0x0008 /* OUT3BOOST */
#define WM8985_OUT3BOOST_SHIFT 3 /* OUT3BOOST */
#define WM8985_OUT3BOOST_WIDTH 1 /* OUT3BOOST */
+#define WM8758_OUT4ENDEL 0x0010 /* OUT4ENDEL */
+#define WM8758_OUT4ENDEL_MASK 0x0010 /* OUT4ENDEL */
+#define WM8758_OUT4ENDEL_SHIFT 4 /* OUT4ENDEL */
+#define WM8758_OUT4ENDEL_WIDTH 1 /* OUT4ENDEL */
+#define WM8758_OUT3ENDEL 0x0008 /* OUT3ENDEL */
+#define WM8758_OUT3ENDEL_MASK 0x0008 /* OUT3ENDEL */
+#define WM8758_OUT3ENDEL_SHIFT 3 /* OUT3ENDEL */
+#define WM8758_OUT3ENDEL_WIDTH 1 /* OUT3ENDEL */
#define WM8985_TSOPCTRL 0x0004 /* TSOPCTRL */
#define WM8985_TSOPCTRL_MASK 0x0004 /* TSOPCTRL */
#define WM8985_TSOPCTRL_SHIFT 2 /* TSOPCTRL */
@@ -1021,6 +1055,10 @@
#define WM8985_HALFIPBIAS_MASK 0x0080 /* HALFIPBIAS */
#define WM8985_HALFIPBIAS_SHIFT 7 /* HALFIPBIAS */
#define WM8985_HALFIPBIAS_WIDTH 1 /* HALFIPBIAS */
+#define WM8758_HALFIPBIAS 0x0040 /* HALFI_IPGA */
+#define WM8758_HALFI_IPGA_MASK 0x0040 /* HALFI_IPGA */
+#define WM8758_HALFI_IPGA_SHIFT 6 /* HALFI_IPGA */
+#define WM8758_HALFI_IPGA_WIDTH 1 /* HALFI_IPGA */
#define WM8985_VBBIASTST_MASK 0x0060 /* VBBIASTST - [6:5] */
#define WM8985_VBBIASTST_SHIFT 5 /* VBBIASTST - [6:5] */
#define WM8985_VBBIASTST_WIDTH 2 /* VBBIASTST - [6:5] */
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 449f66636205..3a5c896a2d13 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1166,6 +1166,7 @@ static const struct snd_soc_dapm_route wm8998_dapm_routes[] = {
{ "MICSUPP", NULL, "SYSCLK" },
+ { "DRC1 Signal Activity", NULL, "SYSCLK" },
{ "DRC1 Signal Activity", NULL, "DRC1L" },
{ "DRC1 Signal Activity", NULL, "DRC1R" },
};
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index a07bd7c2c587..21fbe7d07063 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -394,6 +394,7 @@ static const struct {
int compr_direction;
int num_caps;
const struct wm_adsp_fw_caps *caps;
+ bool voice_trigger;
} wm_adsp_fw[WM_ADSP_NUM_FW] = {
[WM_ADSP_FW_MBC_VSS] = { .file = "mbc-vss" },
[WM_ADSP_FW_HIFI] = { .file = "hifi" },
@@ -406,6 +407,7 @@ static const struct {
.compr_direction = SND_COMPRESS_CAPTURE,
.num_caps = ARRAY_SIZE(ctrl_caps),
.caps = ctrl_caps,
+ .voice_trigger = true,
},
[WM_ADSP_FW_ASR] = { .file = "asr" },
[WM_ADSP_FW_TRACE] = {
@@ -2366,13 +2368,15 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
dsp->running = false;
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA | ADSP2_CORE_ENA |
- ADSP2_START, 0);
+ ADSP2_CORE_ENA | ADSP2_START, 0);
/* Make sure DMAs are quiesced */
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
- regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
@@ -2998,6 +3002,9 @@ int wm_adsp_compr_handle_irq(struct wm_adsp *dsp)
goto out;
}
+ if (wm_adsp_fw[dsp->fw].voice_trigger && buf->irq_count == 2)
+ ret = WM_ADSP_COMPR_VOICE_TRIGGER;
+
out_notify:
if (compr && compr->stream)
snd_compr_fragment_elapsed(compr->stream);
@@ -3037,12 +3044,8 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
buf = compr->buf;
- if (!compr->buf) {
- ret = -ENXIO;
- goto out;
- }
-
- if (compr->buf->error) {
+ if (!compr->buf || compr->buf->error) {
+ snd_compr_stop_error(stream, SNDRV_PCM_STATE_XRUN);
ret = -EIO;
goto out;
}
@@ -3060,8 +3063,12 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
*/
if (buf->avail < wm_adsp_compr_frag_words(compr)) {
ret = wm_adsp_buffer_get_error(buf);
- if (ret < 0)
+ if (ret < 0) {
+ if (compr->buf->error)
+ snd_compr_stop_error(stream,
+ SNDRV_PCM_STATE_XRUN);
goto out;
+ }
ret = wm_adsp_buffer_reenable_irq(buf);
if (ret < 0) {
@@ -3156,11 +3163,10 @@ static int wm_adsp_compr_read(struct wm_adsp_compr *compr,
adsp_dbg(dsp, "Requested read of %zu bytes\n", count);
- if (!compr->buf)
- return -ENXIO;
-
- if (compr->buf->error)
+ if (!compr->buf || compr->buf->error) {
+ snd_compr_stop_error(compr->stream, SNDRV_PCM_STATE_XRUN);
return -EIO;
+ }
count /= WM_ADSP_DATA_WORD_SIZE;
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index feb61e2c4bb4..be3b5bcb7f17 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -19,6 +19,10 @@
#include "wmfw.h"
+/* Return values for wm_adsp_compr_handle_irq */
+#define WM_ADSP_COMPR_OK 0
+#define WM_ADSP_COMPR_VOICE_TRIGGER 1
+
struct wm_adsp_region {
int type;
unsigned int base;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 237dc67002ef..05c2d33aa74d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1599,7 +1599,14 @@ static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of(
pdata = pdev->dev.platform_data;
return pdata;
} else if (match) {
- pdata = (struct davinci_mcasp_pdata*) match->data;
+ pdata = devm_kmemdup(&pdev->dev, match->data, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "Failed to allocate memory for pdata\n");
+ ret = -ENOMEM;
+ return pdata;
+ }
} else {
/* control shouldn't reach here. something is wrong */
ret = -EINVAL;
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
index d50e08517dce..c297efe43861 100644
--- a/sound/soc/dwc/Kconfig
+++ b/sound/soc/dwc/Kconfig
@@ -7,4 +7,13 @@ config SND_DESIGNWARE_I2S
Synopsys desigwnware I2S device. The device supports upto
maximum of 8 channels each for play and record.
+config SND_DESIGNWARE_PCM
+ tristate "PCM PIO extension for I2S driver"
+ depends on SND_DESIGNWARE_I2S
+ help
+ Say Y, M or N if you want to add a custom ALSA extension that registers
+ a PCM and uses PIO to transfer data.
+
+ This functionality is specially suited for I2S devices that don't have
+ DMA support.
diff --git a/sound/soc/dwc/Makefile b/sound/soc/dwc/Makefile
index 319371f690f4..38f1ca31c5fa 100644
--- a/sound/soc/dwc/Makefile
+++ b/sound/soc/dwc/Makefile
@@ -1,3 +1,5 @@
# SYNOPSYS Platform Support
obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_i2s.o
-
+ifdef CONFIG_SND_DESIGNWARE_PCM
+obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_pcm.o
+endif
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 0db69b7e9617..dc97f4349e66 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -24,90 +24,7 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-
-/* common register for all channel */
-#define IER 0x000
-#define IRER 0x004
-#define ITER 0x008
-#define CER 0x00C
-#define CCR 0x010
-#define RXFFR 0x014
-#define TXFFR 0x018
-
-/* I2STxRxRegisters for all channels */
-#define LRBR_LTHR(x) (0x40 * x + 0x020)
-#define RRBR_RTHR(x) (0x40 * x + 0x024)
-#define RER(x) (0x40 * x + 0x028)
-#define TER(x) (0x40 * x + 0x02C)
-#define RCR(x) (0x40 * x + 0x030)
-#define TCR(x) (0x40 * x + 0x034)
-#define ISR(x) (0x40 * x + 0x038)
-#define IMR(x) (0x40 * x + 0x03C)
-#define ROR(x) (0x40 * x + 0x040)
-#define TOR(x) (0x40 * x + 0x044)
-#define RFCR(x) (0x40 * x + 0x048)
-#define TFCR(x) (0x40 * x + 0x04C)
-#define RFF(x) (0x40 * x + 0x050)
-#define TFF(x) (0x40 * x + 0x054)
-
-/* I2SCOMPRegisters */
-#define I2S_COMP_PARAM_2 0x01F0
-#define I2S_COMP_PARAM_1 0x01F4
-#define I2S_COMP_VERSION 0x01F8
-#define I2S_COMP_TYPE 0x01FC
-
-/*
- * Component parameter register fields - define the I2S block's
- * configuration.
- */
-#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25)
-#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22)
-#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19)
-#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16)
-#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9)
-#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7)
-#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6)
-#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5)
-#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4)
-#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2)
-#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0)
-
-#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10)
-#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7)
-#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3)
-#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0)
-
-/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */
-#define COMP_MAX_WORDSIZE (1 << 3)
-#define COMP_MAX_DATA_WIDTH (1 << 2)
-
-#define MAX_CHANNEL_NUM 8
-#define MIN_CHANNEL_NUM 2
-
-union dw_i2s_snd_dma_data {
- struct i2s_dma_data pd;
- struct snd_dmaengine_dai_dma_data dt;
-};
-
-struct dw_i2s_dev {
- void __iomem *i2s_base;
- struct clk *clk;
- int active;
- unsigned int capability;
- unsigned int quirks;
- unsigned int i2s_reg_comp1;
- unsigned int i2s_reg_comp2;
- struct device *dev;
- u32 ccr;
- u32 xfer_resolution;
- u32 fifo_th;
-
- /* data related to DMA transfers b/w i2s and DMAC */
- union dw_i2s_snd_dma_data play_dma_data;
- union dw_i2s_snd_dma_data capture_dma_data;
- struct i2s_clk_config_data config;
- int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
-};
+#include "local.h"
static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val)
{
@@ -145,51 +62,115 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
}
}
-static void i2s_start(struct dw_i2s_dev *dev,
- struct snd_pcm_substream *substream)
+static inline void i2s_disable_irqs(struct dw_i2s_dev *dev, u32 stream,
+ int chan_nr)
+{
+ u32 i, irq;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < (chan_nr / 2); i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x30);
+ }
+ } else {
+ for (i = 0; i < (chan_nr / 2); i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x03);
+ }
+ }
+}
+
+static inline void i2s_enable_irqs(struct dw_i2s_dev *dev, u32 stream,
+ int chan_nr)
{
- struct i2s_clk_config_data *config = &dev->config;
u32 i, irq;
- i2s_write_reg(dev->i2s_base, IER, 1);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for (i = 0; i < (config->chan_nr / 2); i++) {
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < (chan_nr / 2); i++) {
irq = i2s_read_reg(dev->i2s_base, IMR(i));
i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
}
- i2s_write_reg(dev->i2s_base, ITER, 1);
} else {
- for (i = 0; i < (config->chan_nr / 2); i++) {
+ for (i = 0; i < (chan_nr / 2); i++) {
irq = i2s_read_reg(dev->i2s_base, IMR(i));
i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
}
- i2s_write_reg(dev->i2s_base, IRER, 1);
+ }
+}
+
+static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
+{
+ struct dw_i2s_dev *dev = dev_id;
+ bool irq_valid = false;
+ u32 isr[4];
+ int i;
+
+ for (i = 0; i < 4; i++)
+ isr[i] = i2s_read_reg(dev->i2s_base, ISR(i));
+
+ i2s_clear_irqs(dev, SNDRV_PCM_STREAM_PLAYBACK);
+ i2s_clear_irqs(dev, SNDRV_PCM_STREAM_CAPTURE);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Check if TX fifo is empty. If empty fill FIFO with samples
+ * NOTE: Only two channels supported
+ */
+ if ((isr[i] & ISR_TXFE) && (i == 0) && dev->use_pio) {
+ dw_pcm_push_tx(dev);
+ irq_valid = true;
+ }
+
+ /* Data available. Record mode not supported in PIO mode */
+ if (isr[i] & ISR_RXDA)
+ irq_valid = true;
+
+ /* Error Handling: TX */
+ if (isr[i] & ISR_TXFO) {
+ dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+ irq_valid = true;
+ }
+
+ /* Error Handling: TX */
+ if (isr[i] & ISR_RXFO) {
+ dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+ irq_valid = true;
+ }
}
+ if (irq_valid)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static void i2s_start(struct dw_i2s_dev *dev,
+ struct snd_pcm_substream *substream)
+{
+ struct i2s_clk_config_data *config = &dev->config;
+
+ i2s_write_reg(dev->i2s_base, IER, 1);
+ i2s_enable_irqs(dev, substream->stream, config->chan_nr);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, ITER, 1);
+ else
+ i2s_write_reg(dev->i2s_base, IRER, 1);
+
i2s_write_reg(dev->i2s_base, CER, 1);
}
static void i2s_stop(struct dw_i2s_dev *dev,
struct snd_pcm_substream *substream)
{
- u32 i = 0, irq;
i2s_clear_irqs(dev, substream->stream);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
i2s_write_reg(dev->i2s_base, ITER, 0);
-
- for (i = 0; i < 4; i++) {
- irq = i2s_read_reg(dev->i2s_base, IMR(i));
- i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x30);
- }
- } else {
+ else
i2s_write_reg(dev->i2s_base, IRER, 0);
- for (i = 0; i < 4; i++) {
- irq = i2s_read_reg(dev->i2s_base, IMR(i));
- i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x03);
- }
- }
+ i2s_disable_irqs(dev, substream->stream, 8);
if (!dev->active) {
i2s_write_reg(dev->i2s_base, CER, 0);
@@ -223,7 +204,7 @@ static int dw_i2s_startup(struct snd_pcm_substream *substream,
static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
{
- u32 ch_reg, irq;
+ u32 ch_reg;
struct i2s_clk_config_data *config = &dev->config;
@@ -235,16 +216,12 @@ static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
dev->xfer_resolution);
i2s_write_reg(dev->i2s_base, TFCR(ch_reg),
dev->fifo_th - 1);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
} else {
i2s_write_reg(dev->i2s_base, RCR(ch_reg),
dev->xfer_resolution);
i2s_write_reg(dev->i2s_base, RFCR(ch_reg),
dev->fifo_th - 1);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
}
@@ -278,7 +255,7 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
break;
default:
- dev_err(dev->dev, "designware-i2s: unsuppted PCM fmt");
+ dev_err(dev->dev, "designware-i2s: unsupported PCM fmt");
return -EINVAL;
}
@@ -626,7 +603,7 @@ static int dw_i2s_probe(struct platform_device *pdev)
const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
- int ret;
+ int ret, irq;
struct snd_soc_dai_driver *dw_i2s_dai;
const char *clk_id;
@@ -651,6 +628,16 @@ static int dw_i2s_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq, i2s_irq_handler, 0,
+ pdev->name, dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+ }
+
dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
if (pdata) {
@@ -697,12 +684,24 @@ static int dw_i2s_probe(struct platform_device *pdev)
if (!pdata) {
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ dev_err(&pdev->dev,
+ "failed to register PCM, deferring probe\n");
+ return ret;
+ } else if (ret) {
dev_err(&pdev->dev,
- "Could not register PCM: %d\n", ret);
- goto err_clk_disable;
+ "Could not register DMA PCM: %d\n"
+ "falling back to PIO mode\n", ret);
+ ret = dw_pcm_register(pdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not register PIO PCM: %d\n",
+ ret);
+ goto err_clk_disable;
+ }
}
}
+
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/sound/soc/dwc/designware_pcm.c b/sound/soc/dwc/designware_pcm.c
new file mode 100644
index 000000000000..4a83a22fa3cb
--- /dev/null
+++ b/sound/soc/dwc/designware_pcm.c
@@ -0,0 +1,225 @@
+/*
+ * ALSA SoC Synopsys PIO PCM for I2S driver
+ *
+ * sound/soc/dwc/designware_pcm.c
+ *
+ * Copyright (C) 2016 Synopsys
+ * Jose Abreu <joabreu@synopsys.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/rcupdate.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "local.h"
+
+#define BUFFER_BYTES_MAX (3 * 2 * 8 * PERIOD_BYTES_MIN)
+#define PERIOD_BYTES_MIN 4096
+#define PERIODS_MIN 2
+
+#define dw_pcm_tx_fn(sample_bits) \
+static unsigned int dw_pcm_tx_##sample_bits(struct dw_i2s_dev *dev, \
+ struct snd_pcm_runtime *runtime, unsigned int tx_ptr, \
+ bool *period_elapsed) \
+{ \
+ const u##sample_bits (*p)[2] = (void *)runtime->dma_area; \
+ unsigned int period_pos = tx_ptr % runtime->period_size; \
+ int i; \
+\
+ for (i = 0; i < dev->fifo_th; i++) { \
+ iowrite32(p[tx_ptr][0], dev->i2s_base + LRBR_LTHR(0)); \
+ iowrite32(p[tx_ptr][1], dev->i2s_base + RRBR_RTHR(0)); \
+ period_pos++; \
+ if (++tx_ptr >= runtime->buffer_size) \
+ tx_ptr = 0; \
+ } \
+ *period_elapsed = period_pos >= runtime->period_size; \
+ return tx_ptr; \
+}
+
+dw_pcm_tx_fn(16);
+dw_pcm_tx_fn(32);
+
+#undef dw_pcm_tx_fn
+
+static const struct snd_pcm_hardware dw_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .rates = SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 32000,
+ .rate_max = 48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = BUFFER_BYTES_MAX,
+ .period_bytes_min = PERIOD_BYTES_MIN,
+ .period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN,
+ .periods_min = PERIODS_MIN,
+ .periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
+ .fifo_size = 16,
+};
+
+void dw_pcm_push_tx(struct dw_i2s_dev *dev)
+{
+ struct snd_pcm_substream *tx_substream;
+ bool tx_active, period_elapsed;
+
+ rcu_read_lock();
+ tx_substream = rcu_dereference(dev->tx_substream);
+ tx_active = tx_substream && snd_pcm_running(tx_substream);
+ if (tx_active) {
+ unsigned int tx_ptr = READ_ONCE(dev->tx_ptr);
+ unsigned int new_tx_ptr = dev->tx_fn(dev, tx_substream->runtime,
+ tx_ptr, &period_elapsed);
+ cmpxchg(&dev->tx_ptr, tx_ptr, new_tx_ptr);
+
+ if (period_elapsed)
+ snd_pcm_period_elapsed(tx_substream);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(dw_pcm_push_tx);
+
+static int dw_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_soc_set_runtime_hwparams(substream, &dw_pcm_hardware);
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ runtime->private_data = dev;
+
+ return 0;
+}
+
+static int dw_pcm_close(struct snd_pcm_substream *substream)
+{
+ synchronize_rcu();
+ return 0;
+}
+
+static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ int ret;
+
+ switch (params_channels(hw_params)) {
+ case 2:
+ break;
+ default:
+ dev_err(dev->dev, "invalid channels number\n");
+ return -EINVAL;
+ }
+
+ switch (params_format(hw_params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ dev->tx_fn = dw_pcm_tx_16;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ dev->tx_fn = dw_pcm_tx_32;
+ break;
+ default:
+ dev_err(dev->dev, "invalid format\n");
+ return -EINVAL;
+ }
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) {
+ dev_err(dev->dev, "only playback is available\n");
+ return -EINVAL;
+ }
+
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
+}
+
+static int dw_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ WRITE_ONCE(dev->tx_ptr, 0);
+ rcu_assign_pointer(dev->tx_substream, substream);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ rcu_assign_pointer(dev->tx_substream, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct dw_i2s_dev *dev = runtime->private_data;
+ snd_pcm_uframes_t pos = READ_ONCE(dev->tx_ptr);
+
+ return pos < runtime->buffer_size ? pos : 0;
+}
+
+static int dw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ size_t size = dw_pcm_hardware.buffer_bytes_max;
+
+ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_CONTINUOUS,
+ snd_dma_continuous_data(GFP_KERNEL), size, size);
+}
+
+static void dw_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static const struct snd_pcm_ops dw_pcm_ops = {
+ .open = dw_pcm_open,
+ .close = dw_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = dw_pcm_hw_params,
+ .hw_free = dw_pcm_hw_free,
+ .trigger = dw_pcm_trigger,
+ .pointer = dw_pcm_pointer,
+};
+
+static const struct snd_soc_platform_driver dw_pcm_platform = {
+ .pcm_new = dw_pcm_new,
+ .pcm_free = dw_pcm_free,
+ .ops = &dw_pcm_ops,
+};
+
+int dw_pcm_register(struct platform_device *pdev)
+{
+ return devm_snd_soc_register_platform(&pdev->dev, &dw_pcm_platform);
+}
+EXPORT_SYMBOL_GPL(dw_pcm_register);
diff --git a/sound/soc/dwc/local.h b/sound/soc/dwc/local.h
new file mode 100644
index 000000000000..68afd7577343
--- /dev/null
+++ b/sound/soc/dwc/local.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com)
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __DESIGNWARE_LOCAL_H
+#define __DESIGNWARE_LOCAL_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/designware_i2s.h>
+
+/* common register for all channel */
+#define IER 0x000
+#define IRER 0x004
+#define ITER 0x008
+#define CER 0x00C
+#define CCR 0x010
+#define RXFFR 0x014
+#define TXFFR 0x018
+
+/* Interrupt status register fields */
+#define ISR_TXFO BIT(5)
+#define ISR_TXFE BIT(4)
+#define ISR_RXFO BIT(1)
+#define ISR_RXDA BIT(0)
+
+/* I2STxRxRegisters for all channels */
+#define LRBR_LTHR(x) (0x40 * x + 0x020)
+#define RRBR_RTHR(x) (0x40 * x + 0x024)
+#define RER(x) (0x40 * x + 0x028)
+#define TER(x) (0x40 * x + 0x02C)
+#define RCR(x) (0x40 * x + 0x030)
+#define TCR(x) (0x40 * x + 0x034)
+#define ISR(x) (0x40 * x + 0x038)
+#define IMR(x) (0x40 * x + 0x03C)
+#define ROR(x) (0x40 * x + 0x040)
+#define TOR(x) (0x40 * x + 0x044)
+#define RFCR(x) (0x40 * x + 0x048)
+#define TFCR(x) (0x40 * x + 0x04C)
+#define RFF(x) (0x40 * x + 0x050)
+#define TFF(x) (0x40 * x + 0x054)
+
+/* I2SCOMPRegisters */
+#define I2S_COMP_PARAM_2 0x01F0
+#define I2S_COMP_PARAM_1 0x01F4
+#define I2S_COMP_VERSION 0x01F8
+#define I2S_COMP_TYPE 0x01FC
+
+/*
+ * Component parameter register fields - define the I2S block's
+ * configuration.
+ */
+#define COMP1_TX_WORDSIZE_3(r) (((r) & GENMASK(27, 25)) >> 25)
+#define COMP1_TX_WORDSIZE_2(r) (((r) & GENMASK(24, 22)) >> 22)
+#define COMP1_TX_WORDSIZE_1(r) (((r) & GENMASK(21, 19)) >> 19)
+#define COMP1_TX_WORDSIZE_0(r) (((r) & GENMASK(18, 16)) >> 16)
+#define COMP1_TX_CHANNELS(r) (((r) & GENMASK(10, 9)) >> 9)
+#define COMP1_RX_CHANNELS(r) (((r) & GENMASK(8, 7)) >> 7)
+#define COMP1_RX_ENABLED(r) (((r) & BIT(6)) >> 6)
+#define COMP1_TX_ENABLED(r) (((r) & BIT(5)) >> 5)
+#define COMP1_MODE_EN(r) (((r) & BIT(4)) >> 4)
+#define COMP1_FIFO_DEPTH_GLOBAL(r) (((r) & GENMASK(3, 2)) >> 2)
+#define COMP1_APB_DATA_WIDTH(r) (((r) & GENMASK(1, 0)) >> 0)
+
+#define COMP2_RX_WORDSIZE_3(r) (((r) & GENMASK(12, 10)) >> 10)
+#define COMP2_RX_WORDSIZE_2(r) (((r) & GENMASK(9, 7)) >> 7)
+#define COMP2_RX_WORDSIZE_1(r) (((r) & GENMASK(5, 3)) >> 3)
+#define COMP2_RX_WORDSIZE_0(r) (((r) & GENMASK(2, 0)) >> 0)
+
+/* Number of entries in WORDSIZE and DATA_WIDTH parameter registers */
+#define COMP_MAX_WORDSIZE (1 << 3)
+#define COMP_MAX_DATA_WIDTH (1 << 2)
+
+#define MAX_CHANNEL_NUM 8
+#define MIN_CHANNEL_NUM 2
+
+union dw_i2s_snd_dma_data {
+ struct i2s_dma_data pd;
+ struct snd_dmaengine_dai_dma_data dt;
+};
+
+struct dw_i2s_dev {
+ void __iomem *i2s_base;
+ struct clk *clk;
+ int active;
+ unsigned int capability;
+ unsigned int quirks;
+ unsigned int i2s_reg_comp1;
+ unsigned int i2s_reg_comp2;
+ struct device *dev;
+ u32 ccr;
+ u32 xfer_resolution;
+ u32 fifo_th;
+
+ /* data related to DMA transfers b/w i2s and DMAC */
+ union dw_i2s_snd_dma_data play_dma_data;
+ union dw_i2s_snd_dma_data capture_dma_data;
+ struct i2s_clk_config_data config;
+ int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+
+ /* data related to PIO transfers (TX) */
+ bool use_pio;
+ struct snd_pcm_substream __rcu *tx_substream;
+ unsigned int (*tx_fn)(struct dw_i2s_dev *dev,
+ struct snd_pcm_runtime *runtime, unsigned int tx_ptr,
+ bool *period_elapsed);
+ unsigned int tx_ptr;
+};
+
+#if IS_ENABLED(CONFIG_SND_DESIGNWARE_PCM)
+void dw_pcm_push_tx(struct dw_i2s_dev *dev);
+int dw_pcm_register(struct platform_device *pdev);
+#else
+void dw_pcm_push_tx(struct dw_i2s_dev *dev) { }
+int dw_pcm_register(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 35aabf9dc503..19bdcac71775 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -4,6 +4,7 @@ comment "Common SoC Audio options for Freescale CPUs:"
config SND_SOC_FSL_ASRC
tristate "Asynchronous Sample Rate Converter (ASRC) module support"
+ depends on HAS_DMA
select REGMAP_MMIO
select SND_SOC_GENERIC_DMAENGINE_PCM
help
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 151849f79863..beec7934a265 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -172,7 +172,7 @@ static void spdif_irq_uqrx_full(struct fsl_spdif_priv *spdif_priv, char name)
if (*pos >= size * 2) {
*pos = 0;
} else if (unlikely((*pos % size) + 3 > size)) {
- dev_err(&pdev->dev, "User bit receivce buffer overflow\n");
+ dev_err(&pdev->dev, "User bit receive buffer overflow\n");
return;
}
diff --git a/sound/soc/generic/Kconfig b/sound/soc/generic/Kconfig
index 610f61251640..c01c5dd68601 100644
--- a/sound/soc/generic/Kconfig
+++ b/sound/soc/generic/Kconfig
@@ -1,4 +1,8 @@
+config SND_SIMPLE_CARD_UTILS
+ tristate
+
config SND_SIMPLE_CARD
tristate "ASoC Simple sound card support"
+ select SND_SIMPLE_CARD_UTILS
help
This option enables generic simple sound card support
diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile
index 9c3b246792bf..45602ca8536e 100644
--- a/sound/soc/generic/Makefile
+++ b/sound/soc/generic/Makefile
@@ -1,3 +1,5 @@
+obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) := simple-card-utils.o
+
snd-soc-simple-card-objs := simple-card.o
obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
new file mode 100644
index 000000000000..d89a9a1b2471
--- /dev/null
+++ b/sound/soc/generic/simple-card-utils.c
@@ -0,0 +1,97 @@
+/*
+ * simple-card-core.c
+ *
+ * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/of.h>
+#include <sound/simple_card_utils.h>
+
+int asoc_simple_card_parse_daifmt(struct device *dev,
+ struct device_node *node,
+ struct device_node *codec,
+ char *prefix,
+ unsigned int *retfmt)
+{
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ int prefix_len = prefix ? strlen(prefix) : 0;
+ unsigned int daifmt;
+
+ daifmt = snd_soc_of_parse_daifmt(node, prefix,
+ &bitclkmaster, &framemaster);
+ daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+
+ if (prefix_len && !bitclkmaster && !framemaster) {
+ /*
+ * No dai-link level and master setting was not found from
+ * sound node level, revert back to legacy DT parsing and
+ * take the settings from codec node.
+ */
+ dev_dbg(dev, "Revert to legacy daifmt parsing\n");
+
+ daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
+ (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
+ } else {
+ if (codec == bitclkmaster)
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
+ else
+ daifmt |= (codec == framemaster) ?
+ SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
+ }
+
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+
+ *retfmt = daifmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_daifmt);
+
+int asoc_simple_card_set_dailink_name(struct device *dev,
+ struct snd_soc_dai_link *dai_link,
+ const char *fmt, ...)
+{
+ va_list ap;
+ char *name = NULL;
+ int ret = -ENOMEM;
+
+ va_start(ap, fmt);
+ name = devm_kvasprintf(dev, GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ if (name) {
+ ret = 0;
+
+ dai_link->name = name;
+ dai_link->stream_name = name;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_set_dailink_name);
+
+int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
+ char *prefix)
+{
+ char prop[128];
+ int ret;
+
+ snprintf(prop, sizeof(prop), "%sname", prefix);
+
+ /* Parse the card name from DT */
+ ret = snd_soc_of_parse_card_name(card, prop);
+ if (ret < 0)
+ return ret;
+
+ if (!card->name && card->dai_link)
+ card->name = card->dai_link->name;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 466492b7d4f5..43295f024982 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -21,6 +21,12 @@
#include <sound/soc-dai.h>
#include <sound/soc.h>
+struct asoc_simple_jack {
+ struct snd_soc_jack jack;
+ struct snd_soc_jack_pin pin;
+ struct snd_soc_jack_gpio gpio;
+};
+
struct simple_card_data {
struct snd_soc_card snd_card;
struct simple_dai_props {
@@ -29,10 +35,8 @@ struct simple_card_data {
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
- int gpio_hp_det;
- int gpio_hp_det_invert;
- int gpio_mic_det;
- int gpio_mic_det_invert;
+ struct asoc_simple_jack hp_jack;
+ struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link dai_link[]; /* dynamically allocated */
};
@@ -40,6 +44,69 @@ struct simple_card_data {
#define simple_priv_to_link(priv, i) ((priv)->snd_card.dai_link + i)
#define simple_priv_to_props(priv, i) ((priv)->dai_props + i)
+#define PREFIX "simple-audio-card,"
+
+#define asoc_simple_card_init_hp(card, sjack, prefix)\
+ asoc_simple_card_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_card_init_mic(card, sjack, prefix)\
+ asoc_simple_card_init_jack(card, sjack, 0, prefix)
+static int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix)
+{
+ struct device *dev = card->dev;
+ enum of_gpio_flags flags;
+ char prop[128];
+ char *pin_name;
+ char *gpio_name;
+ int mask;
+ int det;
+
+ sjack->gpio.gpio = -ENOENT;
+
+ if (is_hp) {
+ snprintf(prop, sizeof(prop), "%shp-det-gpio", prefix);
+ pin_name = "Headphones";
+ gpio_name = "Headphone detection";
+ mask = SND_JACK_HEADPHONE;
+ } else {
+ snprintf(prop, sizeof(prop), "%smic-det-gpio", prefix);
+ pin_name = "Mic Jack";
+ gpio_name = "Mic detection";
+ mask = SND_JACK_MICROPHONE;
+ }
+
+ det = of_get_named_gpio_flags(dev->of_node, prop, 0, &flags);
+ if (det == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (gpio_is_valid(det)) {
+ sjack->pin.pin = pin_name;
+ sjack->pin.mask = mask;
+
+ sjack->gpio.name = gpio_name;
+ sjack->gpio.report = mask;
+ sjack->gpio.gpio = det;
+ sjack->gpio.invert = !!(flags & OF_GPIO_ACTIVE_LOW);
+ sjack->gpio.debounce_time = 150;
+
+ snd_soc_card_jack_new(card, pin_name, mask,
+ &sjack->jack,
+ &sjack->pin, 1);
+
+ snd_soc_jack_add_gpios(&sjack->jack, 1,
+ &sjack->gpio);
+ }
+
+ return 0;
+}
+
+static void asoc_simple_card_remove_jack(struct asoc_simple_jack *sjack)
+{
+ if (gpio_is_valid(sjack->gpio.gpio))
+ snd_soc_jack_free_gpios(&sjack->jack, 1, &sjack->gpio);
+}
+
static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -110,32 +177,6 @@ static struct snd_soc_ops asoc_simple_card_ops = {
.hw_params = asoc_simple_card_hw_params,
};
-static struct snd_soc_jack simple_card_hp_jack;
-static struct snd_soc_jack_pin simple_card_hp_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
-};
-static struct snd_soc_jack_gpio simple_card_hp_jack_gpio = {
- .name = "Headphone detection",
- .report = SND_JACK_HEADPHONE,
- .debounce_time = 150,
-};
-
-static struct snd_soc_jack simple_card_mic_jack;
-static struct snd_soc_jack_pin simple_card_mic_jack_pins[] = {
- {
- .pin = "Mic Jack",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-static struct snd_soc_jack_gpio simple_card_mic_jack_gpio = {
- .name = "Mic detection",
- .report = SND_JACK_MICROPHONE,
- .debounce_time = 150,
-};
-
static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
struct asoc_simple_dai *set)
{
@@ -184,30 +225,14 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
if (ret < 0)
return ret;
- if (gpio_is_valid(priv->gpio_hp_det)) {
- snd_soc_card_jack_new(rtd->card, "Headphones",
- SND_JACK_HEADPHONE,
- &simple_card_hp_jack,
- simple_card_hp_jack_pins,
- ARRAY_SIZE(simple_card_hp_jack_pins));
-
- simple_card_hp_jack_gpio.gpio = priv->gpio_hp_det;
- simple_card_hp_jack_gpio.invert = priv->gpio_hp_det_invert;
- snd_soc_jack_add_gpios(&simple_card_hp_jack, 1,
- &simple_card_hp_jack_gpio);
- }
+ ret = asoc_simple_card_init_hp(rtd->card, &priv->hp_jack, PREFIX);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+ if (ret < 0)
+ return ret;
- if (gpio_is_valid(priv->gpio_mic_det)) {
- snd_soc_card_jack_new(rtd->card, "Mic Jack",
- SND_JACK_MICROPHONE,
- &simple_card_mic_jack,
- simple_card_mic_jack_pins,
- ARRAY_SIZE(simple_card_mic_jack_pins));
- simple_card_mic_jack_gpio.gpio = priv->gpio_mic_det;
- simple_card_mic_jack_gpio.invert = priv->gpio_mic_det_invert;
- snd_soc_jack_add_gpios(&simple_card_mic_jack, 1,
- &simple_card_mic_jack_gpio);
- }
return 0;
}
@@ -223,6 +248,9 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
u32 val;
int ret;
+ if (!np)
+ return 0;
+
/*
* Get node via "sound-dai = <&phandle port>"
* it will be used as xxx_of_node on soc_bind_dai_link()
@@ -238,9 +266,14 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
*args_count = args.args_count;
/* Get dai->name */
- ret = snd_soc_of_get_dai_name(np, name);
- if (ret < 0)
- return ret;
+ if (name) {
+ ret = snd_soc_of_get_dai_name(np, name);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!dai)
+ return 0;
/* Parse TDM slot */
ret = snd_soc_of_parse_tdm_slot(np, &dai->tx_slot_mask,
@@ -275,48 +308,6 @@ asoc_simple_card_sub_parse_of(struct device_node *np,
return 0;
}
-static int asoc_simple_card_parse_daifmt(struct device_node *node,
- struct simple_card_data *priv,
- struct device_node *codec,
- char *prefix, int idx)
-{
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct device *dev = simple_priv_to_dev(priv);
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
-
- daifmt = snd_soc_of_parse_daifmt(node, prefix,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
- if (strlen(prefix) && !bitclkmaster && !framemaster) {
- /*
- * No dai-link level and master setting was not found from
- * sound node level, revert back to legacy DT parsing and
- * take the settings from codec node.
- */
- dev_dbg(dev, "Revert to legacy daifmt parsing\n");
-
- daifmt = snd_soc_of_parse_daifmt(codec, NULL, NULL, NULL) |
- (daifmt & ~SND_SOC_DAIFMT_CLOCK_MASK);
- } else {
- if (codec == bitclkmaster)
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
- else
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
- }
-
- dai_link->dai_fmt = daifmt;
-
- of_node_put(bitclkmaster);
- of_node_put(framemaster);
-
- return 0;
-}
-
static int asoc_simple_card_dai_link_of(struct device_node *node,
struct simple_card_data *priv,
int idx,
@@ -328,7 +319,6 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
struct device_node *cpu = NULL;
struct device_node *plat = NULL;
struct device_node *codec = NULL;
- char *name;
char prop[128];
char *prefix = "";
int ret, cpu_args;
@@ -336,7 +326,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
/* For single DAI link & old style of DT node */
if (is_top_level_node)
- prefix = "simple-audio-card,";
+ prefix = PREFIX;
snprintf(prop, sizeof(prop), "%scpu", prefix);
cpu = of_get_child_by_name(node, prop);
@@ -353,8 +343,8 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
goto dai_link_of_err;
}
- ret = asoc_simple_card_parse_daifmt(node, priv,
- codec, prefix, idx);
+ ret = asoc_simple_card_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
if (ret < 0)
goto dai_link_of_err;
@@ -374,35 +364,28 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
if (ret < 0)
goto dai_link_of_err;
+ ret = asoc_simple_card_sub_parse_of(plat, NULL,
+ &dai_link->platform_of_node,
+ NULL, NULL);
+ if (ret < 0)
+ goto dai_link_of_err;
+
if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name) {
ret = -EINVAL;
goto dai_link_of_err;
}
- if (plat) {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(plat, "sound-dai",
- "#sound-dai-cells", 0, &args);
- dai_link->platform_of_node = args.np;
- } else {
- /* Assumes platform == cpu */
+ /* Assumes platform == cpu */
+ if (!dai_link->platform_of_node)
dai_link->platform_of_node = dai_link->cpu_of_node;
- }
- /* DAI link name is created from CPU/CODEC dai name */
- name = devm_kzalloc(dev,
- strlen(dai_link->cpu_dai_name) +
- strlen(dai_link->codec_dai_name) + 2,
- GFP_KERNEL);
- if (!name) {
- ret = -ENOMEM;
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "%s-%s",
+ dai_link->cpu_dai_name,
+ dai_link->codec_dai_name);
+ if (ret < 0)
goto dai_link_of_err;
- }
- sprintf(name, "%s-%s", dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
- dai_link->name = dai_link->stream_name = name;
dai_link->ops = &asoc_simple_card_ops;
dai_link->init = asoc_simple_card_dai_init;
@@ -438,42 +421,35 @@ static int asoc_simple_card_parse_of(struct device_node *node,
struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
- enum of_gpio_flags flags;
u32 val;
int ret;
if (!node)
return -EINVAL;
- /* Parse the card name from DT */
- snd_soc_of_parse_card_name(&priv->snd_card, "simple-audio-card,name");
-
/* The off-codec widgets */
- if (of_property_read_bool(node, "simple-audio-card,widgets")) {
+ if (of_property_read_bool(node, PREFIX "widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(&priv->snd_card,
- "simple-audio-card,widgets");
+ PREFIX "widgets");
if (ret)
return ret;
}
/* DAPM routes */
- if (of_property_read_bool(node, "simple-audio-card,routing")) {
+ if (of_property_read_bool(node, PREFIX "routing")) {
ret = snd_soc_of_parse_audio_routing(&priv->snd_card,
- "simple-audio-card,routing");
+ PREFIX "routing");
if (ret)
return ret;
}
/* Factor to mclk, used in hw_params() */
- ret = of_property_read_u32(node, "simple-audio-card,mclk-fs", &val);
+ ret = of_property_read_u32(node, PREFIX "mclk-fs", &val);
if (ret == 0)
priv->mclk_fs = val;
- dev_dbg(dev, "New simple-card: %s\n", priv->snd_card.name ?
- priv->snd_card.name : "");
-
/* Single/Muti DAI link(s) & New style of DT node */
- if (of_get_child_by_name(node, "simple-audio-card,dai-link")) {
+ if (of_get_child_by_name(node, PREFIX "dai-link")) {
struct device_node *np = NULL;
int i = 0;
@@ -494,20 +470,9 @@ static int asoc_simple_card_parse_of(struct device_node *node,
return ret;
}
- priv->gpio_hp_det = of_get_named_gpio_flags(node,
- "simple-audio-card,hp-det-gpio", 0, &flags);
- priv->gpio_hp_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
- if (priv->gpio_hp_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- priv->gpio_mic_det = of_get_named_gpio_flags(node,
- "simple-audio-card,mic-det-gpio", 0, &flags);
- priv->gpio_mic_det_invert = !!(flags & OF_GPIO_ACTIVE_LOW);
- if (priv->gpio_mic_det == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!priv->snd_card.name)
- priv->snd_card.name = priv->snd_card.dai_link->name;
+ ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
+ if (ret)
+ return ret;
return 0;
}
@@ -536,7 +501,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
int num_links, ret;
/* Get the number of DAI links */
- if (np && of_get_child_by_name(np, "simple-audio-card,dai-link"))
+ if (np && of_get_child_by_name(np, PREFIX "dai-link"))
num_links = of_get_child_count(np);
else
num_links = 1;
@@ -555,9 +520,6 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
priv->snd_card.dai_link = dai_link;
priv->snd_card.num_links = num_links;
- priv->gpio_hp_det = -ENOENT;
- priv->gpio_mic_det = -ENOENT;
-
/* Get room for the other properties */
priv->dai_props = devm_kzalloc(dev,
sizeof(*priv->dai_props) * num_links,
@@ -624,12 +586,8 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
- if (gpio_is_valid(priv->gpio_hp_det))
- snd_soc_jack_free_gpios(&simple_card_hp_jack, 1,
- &simple_card_hp_jack_gpio);
- if (gpio_is_valid(priv->gpio_mic_det))
- snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
- &simple_card_mic_jack_gpio);
+ asoc_simple_card_remove_jack(&priv->hp_jack);
+ asoc_simple_card_remove_jack(&priv->mic_jack);
return asoc_simple_card_unref(card);
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 91c15abb625e..a20c3dfbcb5d 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -7,7 +7,7 @@ config SND_MFLD_MACHINE
help
This adds support for ASoC machine driver for Intel(R) MID Medfield platform
used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SST_MFLD_PLATFORM
@@ -25,7 +25,6 @@ config SND_SST_IPC_ACPI
tristate
select SND_SST_IPC
select SND_SOC_INTEL_SST
- depends on ACPI
config SND_SOC_INTEL_SST
tristate
@@ -33,6 +32,12 @@ config SND_SOC_INTEL_SST
select SND_SOC_INTEL_SST_MATCH if ACPI
depends on (X86 || COMPILE_TEST)
+# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
+# the reverse selection, each machine driver needs to select
+# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
+config SND_SOC_INTEL_SST_FIRMWARE
+ tristate
+
config SND_SOC_INTEL_SST_ACPI
tristate
@@ -48,16 +53,33 @@ config SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_HASWELL_MACH
tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE=y
+ depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
Ultrabook platforms.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
+ tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
+ depends on X86 && ACPI && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_HDA_DSP_LOADER
+ help
+ This adds support for ASoC machine driver for Broxton-P platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_INTEL_BXT_RT298_MACH
tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
depends on X86 && ACPI && I2C
@@ -70,26 +92,28 @@ config SND_SOC_INTEL_BXT_RT298_MACH
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYT_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
+ depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
This adds audio driver for Intel Baytrail platform based boards
with the RT5640 audio codec. This driver is deprecated, use
- SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality
+ SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
config SND_SOC_INTEL_BYT_MAX98090_MACH
tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
+ depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
@@ -100,19 +124,20 @@ config SND_SOC_INTEL_BROADWELL_MACH
tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
I2C_DESIGNWARE_PLATFORM
- depends on DW_DMAC_CORE=y
+ depends on DW_DMAC_CORE
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
select SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
Ultrabook platforms.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
- depends on X86 && I2C
+ depends on X86 && I2C && ACPI
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -120,12 +145,12 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5640 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5651_MACH
tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
- depends on X86 && I2C
+ depends on X86 && I2C && ACPI
select SND_SOC_RT5651
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -133,12 +158,12 @@ config SND_SOC_INTEL_BYTCR_RT5651_MACH
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5651 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -146,12 +171,12 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5672 audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5645
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -163,16 +188,16 @@ config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
- depends on X86_INTEL_LPSS && I2C
+ depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
select SND_SOC_INTEL_SST_MATCH if ACPI
help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+ If unsure select "N".
config SND_SOC_INTEL_SKYLAKE
tristate
@@ -192,7 +217,7 @@ config SND_SOC_INTEL_SKL_RT286_MACH
help
This adds support for ASoC machine driver for Skylake platforms
with RT286 I2S audio codec.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
@@ -207,7 +232,7 @@ config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + SSM4567.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
@@ -222,5 +247,5 @@ config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + MAX98357A.
- Say Y if you have such a device
+ Say Y if you have such a device.
If unsure select "N".
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 3bc4b63b2f9d..4d3184971227 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -28,6 +28,7 @@
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
+#include <linux/dmi.h>
#include <linux/acpi.h>
#include <asm/platform_sst_audio.h>
#include <sound/core.h>
@@ -237,6 +238,9 @@ static int sst_acpi_probe(struct platform_device *pdev)
dev_err(dev, "No matching machine driver found\n");
return -ENODEV;
}
+ if (mach->machine_quirk)
+ mach = mach->machine_quirk(mach);
+
pdata = mach->pdata;
ret = kstrtouint(id->id, 16, &dev_id);
@@ -320,6 +324,44 @@ static int sst_acpi_remove(struct platform_device *pdev)
return 0;
}
+static unsigned long cht_machine_id;
+
+#define CHT_SURFACE_MACH 1
+
+static int cht_surface_quirk_cb(const struct dmi_system_id *id)
+{
+ cht_machine_id = CHT_SURFACE_MACH;
+ return 1;
+}
+
+
+static const struct dmi_system_id cht_table[] = {
+ {
+ .callback = cht_surface_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+};
+
+
+static struct sst_acpi_mach cht_surface_mach = {
+ "10EC5640", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
+ &chv_platform_data };
+
+static struct sst_acpi_mach *cht_quirk(void *arg)
+{
+ struct sst_acpi_mach *mach = arg;
+
+ dmi_check_system(cht_table);
+
+ if (cht_machine_id == CHT_SURFACE_MACH)
+ return &cht_surface_mach;
+ else
+ return mach;
+}
+
static struct sst_acpi_mach sst_acpi_bytcr[] = {
{"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
&byt_rvp_platform_data },
@@ -343,7 +385,7 @@ static struct sst_acpi_mach sst_acpi_chv[] = {
{"193C9890", "cht-bsw-max98090", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
&chv_platform_data },
/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", NULL,
+ {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", cht_quirk,
&chv_platform_data },
{},
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index a8506774f510..dac03a06bfd8 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -2,6 +2,7 @@ snd-soc-sst-haswell-objs := haswell.o
snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-broadwell-objs := broadwell.o
+snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
@@ -15,6 +16,7 @@ snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
+obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH) += snd-soc-sst-bytcr-rt5640.o
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
new file mode 100644
index 000000000000..3774b117d365
--- /dev/null
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -0,0 +1,460 @@
+/*
+ * Intel Broxton-P I2S Machine Driver
+ *
+ * Copyright (C) 2016, Intel Corporation. All rights reserved.
+ *
+ * Modified from:
+ * Intel Skylake I2S Machine driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../../codecs/da7219.h"
+#include "../../codecs/da7219-aad.h"
+
+#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
+#define BXT_MAXIM_CODEC_DAI "HiFi"
+#define DUAL_CHANNEL 2
+
+static struct snd_soc_jack broxton_headset;
+
+enum {
+ BXT_DPCM_AUDIO_PB = 0,
+ BXT_DPCM_AUDIO_CP,
+ BXT_DPCM_AUDIO_REF_CP,
+ BXT_DPCM_AUDIO_HDMI1_PB,
+ BXT_DPCM_AUDIO_HDMI2_PB,
+ BXT_DPCM_AUDIO_HDMI3_PB,
+};
+
+static const struct snd_kcontrol_new broxton_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Spk"),
+};
+
+static const struct snd_soc_dapm_widget broxton_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("HDMI1", NULL),
+ SND_SOC_DAPM_SPK("HDMI2", NULL),
+ SND_SOC_DAPM_SPK("HDMI3", NULL),
+};
+
+static const struct snd_soc_dapm_route broxton_map[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ {"Headphone Jack", NULL, "HPL"},
+ {"Headphone Jack", NULL, "HPR"},
+
+ /* speaker */
+ {"Spk", NULL, "Speaker"},
+
+ /* other jacks */
+ {"MIC", NULL, "Headset Mic"},
+
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
+
+ /* CODEC BE connections */
+ {"HiFi Playback", NULL, "ssp5 Tx"},
+ {"ssp5 Tx", NULL, "codec0_out"},
+
+ {"Playback", NULL, "ssp1 Tx"},
+ {"ssp1 Tx", NULL, "codec1_out"},
+
+ {"codec0_in", NULL, "ssp1 Rx"},
+ {"ssp1 Rx", NULL, "Capture"},
+
+ {"HDMI1", NULL, "hif5 Output"},
+ {"HDMI2", NULL, "hif6 Output"},
+ {"HDMI3", NULL, "hif7 Output"},
+
+ {"hifi3", NULL, "iDisp3 Tx"},
+ {"iDisp3 Tx", NULL, "iDisp3_out"},
+ {"hifi2", NULL, "iDisp2 Tx"},
+ {"iDisp2 Tx", NULL, "iDisp2_out"},
+ {"hifi1", NULL, "iDisp1 Tx"},
+ {"iDisp1 Tx", NULL, "iDisp1_out"},
+
+ /* DMIC */
+ {"dmic01_hifi", NULL, "DMIC01 Rx"},
+ {"DMIC01 Rx", NULL, "DMIC AIF"},
+};
+
+static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ /* set SSP to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_codec *codec = rtd->codec;
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &broxton_headset,
+ NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ da7219_aad_jack_det(codec, &broxton_headset);
+
+ snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+
+ return ret;
+}
+
+static int broxton_hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *dai = rtd->codec_dai;
+
+ return hdac_hdmi_jack_init(dai, BXT_DPCM_AUDIO_HDMI1_PB + dai->id);
+}
+
+static int broxton_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static unsigned int rates[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bxt_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops broxton_da7219_fe_ops = {
+ .startup = bxt_fe_startup,
+};
+
+static int broxton_da7219_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ DA7219_CLKSRC_MCLK, 19200000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set codec sysclk configuration\n");
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "failed to start PLL: %d\n", ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int broxton_da7219_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret < 0) {
+ dev_err(codec_dai->dev, "failed to stop PLL: %d\n", ret);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static struct snd_soc_ops broxton_da7219_ops = {
+ .hw_params = broxton_da7219_hw_params,
+ .hw_free = broxton_da7219_hw_free,
+};
+
+/* broxton digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link broxton_dais[] = {
+ /* Front End DAI links */
+ [BXT_DPCM_AUDIO_PB]
+ {
+ .name = "Bxt Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = broxton_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &broxton_da7219_fe_ops,
+ },
+ [BXT_DPCM_AUDIO_CP]
+ {
+ .name = "Bxt Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:0e.0",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &broxton_da7219_fe_ops,
+ },
+ [BXT_DPCM_AUDIO_REF_CP]
+ {
+ .name = "Bxt Audio Reference cap",
+ .stream_name = "Refcap",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .ignore_suspend = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI1_PB]
+ {
+ .name = "Bxt HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI2_PB]
+ {
+ .name = "Bxt HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [BXT_DPCM_AUDIO_HDMI3_PB]
+ {
+ .name = "Bxt HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ /* Back End DAI links */
+ {
+ /* SSP5 - Codec */
+ .name = "SSP5-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP5 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = "MX98357A:00",
+ .codec_dai_name = BXT_MAXIM_CODEC_DAI,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = broxton_ssp_fixup,
+ .dpcm_playback = 1,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:0e.0",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = BXT_DIALOG_CODEC_DAI,
+ .init = broxton_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = broxton_ssp_fixup,
+ .ops = &broxton_da7219_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:0e.0",
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:0e.0",
+ .init = broxton_hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* broxton audio machine driver for SPT + da7219 */
+static struct snd_soc_card broxton_audio_card = {
+ .name = "bxtda7219max",
+ .owner = THIS_MODULE,
+ .dai_link = broxton_dais,
+ .num_links = ARRAY_SIZE(broxton_dais),
+ .controls = broxton_controls,
+ .num_controls = ARRAY_SIZE(broxton_controls),
+ .dapm_widgets = broxton_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(broxton_widgets),
+ .dapm_routes = broxton_map,
+ .num_dapm_routes = ARRAY_SIZE(broxton_map),
+ .fully_routed = true,
+};
+
+static int broxton_audio_probe(struct platform_device *pdev)
+{
+ broxton_audio_card.dev = &pdev->dev;
+ return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card);
+}
+
+static struct platform_driver broxton_audio = {
+ .probe = broxton_audio_probe,
+ .driver = {
+ .name = "bxt_da7219_max98357a_i2s",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(broxton_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio Machine driver-DA7219 & MAX98357A in I2S mode");
+MODULE_AUTHOR("Sathyanarayana Nujella <sathyanarayana.nujella@intel.com>");
+MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com>");
+MODULE_AUTHOR("Harsha Priya <harshapriya.n@intel.com>");
+MODULE_AUTHOR("Conrad Cooke <conrad.cooke@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bxt_da7219_max98357a_i2s");
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index f4787515c0ed..253d7bfbf511 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -33,6 +33,7 @@ enum {
BXT_DPCM_AUDIO_PB = 0,
BXT_DPCM_AUDIO_CP,
BXT_DPCM_AUDIO_REF_CP,
+ BXT_DPCM_AUDIO_DMIC_CP,
BXT_DPCM_AUDIO_HDMI1_PB,
BXT_DPCM_AUDIO_HDMI2_PB,
BXT_DPCM_AUDIO_HDMI3_PB,
@@ -88,6 +89,7 @@ static const struct snd_soc_dapm_route broxton_rt298_map[] = {
/* CODEC BE connections */
{ "AIF1 Playback", NULL, "ssp5 Tx"},
{ "ssp5 Tx", NULL, "codec0_out"},
+ { "ssp5 Tx", NULL, "codec1_out"},
{ "codec0_in", NULL, "ssp5 Rx" },
{ "ssp5 Rx", NULL, "AIF1 Capture" },
@@ -104,6 +106,17 @@ static const struct snd_soc_dapm_route broxton_rt298_map[] = {
};
+static int broxton_rt298_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
@@ -118,6 +131,9 @@ static int broxton_rt298_codec_init(struct snd_soc_pcm_runtime *rtd)
return ret;
rt298_mic_detect(codec, &broxton_headset);
+
+ snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+
return 0;
}
@@ -169,6 +185,89 @@ static struct snd_soc_ops broxton_rt298_ops = {
.hw_params = broxton_rt298_hw_params,
};
+static unsigned int rates[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static unsigned int channels_dmic[] = {
+ 2, 4,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+ .count = ARRAY_SIZE(channels_dmic),
+ .list = channels_dmic,
+ .mask = 0,
+};
+
+static int broxton_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = 4;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_dmic_channels);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops broxton_dmic_ops = {
+ .startup = broxton_dmic_startup,
+};
+
+static unsigned int channels[] = {
+ 2,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bxt_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * on this platform for PCM device we support:
+ * 48Khz
+ * stereo
+ */
+
+ runtime->hw.channels_max = 2;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops broxton_rt286_fe_ops = {
+ .startup = bxt_fe_startup,
+};
+
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_rt298_dais[] = {
/* Front End DAI links */
@@ -182,8 +281,10 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
+ .init = broxton_rt298_fe_init,
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_playback = 1,
+ .ops = &broxton_rt286_fe_ops,
},
[BXT_DPCM_AUDIO_CP]
{
@@ -197,6 +298,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
+ .ops = &broxton_rt286_fe_ops,
},
[BXT_DPCM_AUDIO_REF_CP]
{
@@ -211,6 +313,20 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
+ [BXT_DPCM_AUDIO_DMIC_CP]
+ {
+ .name = "Bxt Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:0e.0",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &broxton_dmic_ops,
+ },
[BXT_DPCM_AUDIO_HDMI1_PB]
{
.name = "Bxt HDMI Port1",
@@ -276,6 +392,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.codec_name = "dmic-codec",
.codec_dai_name = "dmic-hifi",
.platform_name = "0000:00:0e.0",
+ .be_hw_params_fixup = broxton_dmic_fixup,
.ignore_suspend = 1,
.dpcm_capture = 1,
.no_pcm = 1,
@@ -341,6 +458,7 @@ static struct platform_driver broxton_audio = {
.probe = broxton_audio_probe,
.driver = {
.name = "bxt_alc298s_i2s",
+ .pm = &snd_soc_pm_ops,
},
};
module_platform_driver(broxton_audio)
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index d7ef292c402d..56056ed7fcfd 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -30,6 +30,7 @@
#include <sound/jack.h>
#include "../../codecs/rt5645.h"
#include "../atom/sst-atom-controls.h"
+#include "../common/sst-acpi.h"
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5645-aif1"
@@ -340,10 +341,13 @@ static struct snd_soc_card snd_soc_card_chtrt5650 = {
};
static struct cht_acpi_card snd_soc_cards[] = {
+ {"10EC5640", CODEC_TYPE_RT5645, &snd_soc_card_chtrt5645},
{"10EC5645", CODEC_TYPE_RT5645, &snd_soc_card_chtrt5645},
{"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
};
+static char cht_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
@@ -351,6 +355,9 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
struct cht_mc_private *drv;
struct snd_soc_card *card = snd_soc_cards[0].soc_card;
char codec_name[16];
+ struct sst_acpi_mach *mach;
+ const char *i2c_name = NULL;
+ int dai_index = 0;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
if (!drv)
@@ -366,12 +373,23 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
}
card->dev = &pdev->dev;
+ mach = card->dev->platform_data;
sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
/* set correct codec name */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
- if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00"))
+ if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00")) {
card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL);
+ dai_index = i;
+ }
+
+ /* fixup codec name based on HID */
+ i2c_name = sst_acpi_find_name_from_hid(mach->id);
+ if (i2c_name != NULL) {
+ snprintf(cht_rt5640_codec_name, sizeof(cht_rt5640_codec_name),
+ "%s%s", "i2c-", i2c_name);
+ cht_dailink[dai_index].codec_name = cht_rt5640_codec_name;
+ }
snd_soc_card_set_drvdata(card, drv);
ret_val = devm_snd_soc_register_card(&pdev->dev, card);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index d2808652b974..25db5be7fdfa 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -23,12 +23,15 @@
#include <sound/soc.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_MAXIM_CODEC_DAI "HiFi"
+#define DMIC_CH(p) p->list[p->count-1]
static struct snd_soc_jack skylake_headset;
static struct snd_soc_card skylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
struct skl_hdmi_pcm {
struct list_head head;
@@ -339,7 +342,7 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (params_channels(params) == 2)
+ if (params_channels(params) == 2 || DMIC_CH(dmic_constraints) == 2)
channels->min = channels->max = 2;
else
channels->min = channels->max = 4;
@@ -357,13 +360,23 @@ static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.mask = 0,
};
+static const unsigned int dmic_2ch[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
static int skylake_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- runtime->hw.channels_max = 4;
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_dmic_channels);
+ dmic_constraints);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@@ -382,8 +395,22 @@ static struct snd_pcm_hw_constraint_list constraints_16000 = {
.list = rates_16000,
};
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
static int skylake_refcap_startup(struct snd_pcm_substream *substream)
{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_16000);
@@ -610,6 +637,7 @@ static struct snd_soc_card skylake_audio_card = {
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau8825_private *ctx;
+ struct skl_machine_pdata *pdata;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
@@ -620,15 +648,27 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_n88l25_m98357a" },
+ { .name = "kbl_n88l25_m98357a" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
- .name = "skl_nau88l25_max98357a_i2s",
+ .name = "skl_n88l25_m98357a",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
};
module_platform_driver(skylake_audio)
@@ -637,4 +677,5 @@ module_platform_driver(skylake_audio)
MODULE_DESCRIPTION("Audio Machine driver-NAU88L25 & MAX98357A in I2S mode");
MODULE_AUTHOR("Rohit Ainapure <rohit.m.ainapure@intel.com");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_nau88l25_max98357a_i2s");
+MODULE_ALIAS("platform:skl_n88l25_m98357a");
+MODULE_ALIAS("platform:kbl_n88l25_m98357a");
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index e19aa99c4f72..69c5d5da4e86 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -27,12 +27,15 @@
#include <sound/pcm_params.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_SSM_CODEC_DAI "ssm4567-hifi"
+#define DMIC_CH(p) p->list[p->count-1]
static struct snd_soc_jack skylake_headset;
static struct snd_soc_card skylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
struct skl_hdmi_pcm {
struct list_head head;
@@ -367,7 +370,7 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (params_channels(params) == 2)
+ if (params_channels(params) == 2 || DMIC_CH(dmic_constraints) == 2)
channels->min = channels->max = 2;
else
channels->min = channels->max = 4;
@@ -405,13 +408,23 @@ static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.mask = 0,
};
+static const unsigned int dmic_2ch[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
static int skylake_dmic_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- runtime->hw.channels_max = 4;
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- &constraints_dmic_channels);
+ dmic_constraints);
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@@ -430,8 +443,22 @@ static struct snd_pcm_hw_constraint_list constraints_16000 = {
.list = rates_16000,
};
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
static int skylake_refcap_startup(struct snd_pcm_substream *substream)
{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
&constraints_16000);
@@ -662,6 +689,7 @@ static struct snd_soc_card skylake_audio_card = {
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau88125_private *ctx;
+ struct skl_machine_pdata *pdata;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
@@ -672,15 +700,27 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_n88l25_s4567" },
+ { .name = "kbl_n88l25_s4567" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
- .name = "skl_nau88l25_ssm4567_i2s",
+ .name = "skl_n88l25_s4567",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
};
module_platform_driver(skylake_audio)
@@ -693,4 +733,5 @@ MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_DESCRIPTION("Intel Audio Machine driver for SKL with NAU88L25 and SSM4567 in I2S Mode");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:skl_nau88l25_ssm4567_i2s");
+MODULE_ALIAS("platform:skl_n88l25_s4567");
+MODULE_ALIAS("platform:kbl_n88l25_s4567");
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 426b48233fdb..88c61e8cb87f 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -505,12 +505,20 @@ static int skylake_audio_probe(struct platform_device *pdev)
return devm_snd_soc_register_card(&pdev->dev, &skylake_rt286);
}
+static const struct platform_device_id skl_board_ids[] = {
+ { .name = "skl_alc286s_i2s" },
+ { .name = "kbl_alc286s_i2s" },
+ { }
+};
+
static struct platform_driver skylake_audio = {
.probe = skylake_audio_probe,
.driver = {
.name = "skl_alc286s_i2s",
.pm = &snd_soc_pm_ops,
},
+ .id_table = skl_board_ids,
+
};
module_platform_driver(skylake_audio)
@@ -520,3 +528,4 @@ MODULE_AUTHOR("Omair Mohammed Abdullah <omair.m.abdullah@intel.com>");
MODULE_DESCRIPTION("Intel SST Audio for Skylake");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:skl_alc286s_i2s");
+MODULE_ALIAS("platform:kbl_alc286s_i2s");
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index fbbb25c2ceed..1a35149bcad7 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -2,9 +2,9 @@ snd-soc-sst-dsp-objs := sst-dsp.o
snd-soc-sst-acpi-objs := sst-acpi.o
snd-soc-sst-match-objs := sst-match-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
-
-snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
+snd-soc-sst-firmware-objs := sst-firmware.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_FIRMWARE) += snd-soc-sst-firmware.o
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 8398cb227ba9..5d2949324d0e 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -20,7 +20,7 @@
#if IS_ENABLED(CONFIG_ACPI)
const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
#else
-inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
+static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
{
return NULL;
}
@@ -40,6 +40,6 @@ struct sst_acpi_mach {
/* board name */
const char *board;
- void (*machine_quirk)(void);
+ struct sst_acpi_mach * (*machine_quirk)(void *arg);
void *pdata;
};
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index 97dc1ae05e69..d13c84364c3c 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -383,10 +383,6 @@ struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
u32 index, void *private);
void sst_mem_block_unregister_all(struct sst_dsp *dsp);
-/* Create/Free DMA resources */
-int sst_dma_new(struct sst_dsp *sst);
-void sst_dma_free(struct sst_dma *dma);
-
u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
enum sst_mem_type type);
#endif
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index b5bbdf4fe93a..c00ede4ea4d7 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -285,7 +285,7 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
}
reg = sst_dsp_shim_read_unlocked(ctx, offset);
- dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
+ dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
(time < timeout) ? "successful" : "timedout");
ret = time < timeout ? 0 : -ETIME;
@@ -420,73 +420,6 @@ void sst_dsp_inbox_read(struct sst_dsp *sst, void *message, size_t bytes)
}
EXPORT_SYMBOL_GPL(sst_dsp_inbox_read);
-#ifdef CONFIG_DW_DMAC_CORE
-struct sst_dsp *sst_dsp_new(struct device *dev,
- struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
-{
- struct sst_dsp *sst;
- int err;
-
- dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
-
- sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
- if (sst == NULL)
- return NULL;
-
- spin_lock_init(&sst->spinlock);
- mutex_init(&sst->mutex);
- sst->dev = dev;
- sst->dma_dev = pdata->dma_dev;
- sst->thread_context = sst_dev->thread_context;
- sst->sst_dev = sst_dev;
- sst->id = pdata->id;
- sst->irq = pdata->irq;
- sst->ops = sst_dev->ops;
- sst->pdata = pdata;
- INIT_LIST_HEAD(&sst->used_block_list);
- INIT_LIST_HEAD(&sst->free_block_list);
- INIT_LIST_HEAD(&sst->module_list);
- INIT_LIST_HEAD(&sst->fw_list);
- INIT_LIST_HEAD(&sst->scratch_block_list);
-
- /* Initialise SST Audio DSP */
- if (sst->ops->init) {
- err = sst->ops->init(sst, pdata);
- if (err < 0)
- return NULL;
- }
-
- /* Register the ISR */
- err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
- sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
- if (err)
- goto irq_err;
-
- err = sst_dma_new(sst);
- if (err)
- dev_warn(dev, "sst_dma_new failed %d\n", err);
-
- return sst;
-
-irq_err:
- if (sst->ops->free)
- sst->ops->free(sst);
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(sst_dsp_new);
-
-void sst_dsp_free(struct sst_dsp *sst)
-{
- free_irq(sst->irq, sst);
- if (sst->ops->free)
- sst->ops->free(sst);
-
- sst_dma_free(sst->dma);
-}
-EXPORT_SYMBOL_GPL(sst_dsp_free);
-#endif
-
/* Module information */
MODULE_AUTHOR("Liam Girdwood");
MODULE_DESCRIPTION("Intel SST Core");
diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index 0b84c719ec48..859f0de00339 100644
--- a/sound/soc/intel/common/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -216,7 +216,7 @@ struct sst_pdata {
void *dsp;
};
-#ifdef CONFIG_DW_DMAC_CORE
+#if IS_ENABLED(CONFIG_DW_DMAC_CORE)
/* Initialization */
struct sst_dsp *sst_dsp_new(struct device *dev,
struct sst_dsp_device *sst_dev, struct sst_pdata *pdata);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 25993527370b..a086c35f91bb 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -1211,3 +1211,71 @@ u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
}
}
EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
+
+struct sst_dsp *sst_dsp_new(struct device *dev,
+ struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
+{
+ struct sst_dsp *sst;
+ int err;
+
+ dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
+
+ sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+ if (sst == NULL)
+ return NULL;
+
+ spin_lock_init(&sst->spinlock);
+ mutex_init(&sst->mutex);
+ sst->dev = dev;
+ sst->dma_dev = pdata->dma_dev;
+ sst->thread_context = sst_dev->thread_context;
+ sst->sst_dev = sst_dev;
+ sst->id = pdata->id;
+ sst->irq = pdata->irq;
+ sst->ops = sst_dev->ops;
+ sst->pdata = pdata;
+ INIT_LIST_HEAD(&sst->used_block_list);
+ INIT_LIST_HEAD(&sst->free_block_list);
+ INIT_LIST_HEAD(&sst->module_list);
+ INIT_LIST_HEAD(&sst->fw_list);
+ INIT_LIST_HEAD(&sst->scratch_block_list);
+
+ /* Initialise SST Audio DSP */
+ if (sst->ops->init) {
+ err = sst->ops->init(sst, pdata);
+ if (err < 0)
+ return NULL;
+ }
+
+ /* Register the ISR */
+ err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+ sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+ if (err)
+ goto irq_err;
+
+ err = sst_dma_new(sst);
+ if (err)
+ dev_warn(dev, "sst_dma_new failed %d\n", err);
+
+ return sst;
+
+irq_err:
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_new);
+
+void sst_dsp_free(struct sst_dsp *sst)
+{
+ free_irq(sst->irq, sst);
+ if (sst->ops->free)
+ sst->ops->free(sst);
+
+ sst_dma_free(sst->dma);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_free);
+
+MODULE_DESCRIPTION("Intel SST Firmware Loader");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 994256b39b9c..3154525c2b83 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -819,7 +819,6 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
mutex_lock(&pcm_data->mutex);
pm_runtime_get_sync(pdata->dev);
- snd_soc_pcm_set_drvdata(rtd, pcm_data);
pcm_data->substream = substream;
snd_soc_set_runtime_hwparams(substream, &hsw_pcm_hardware);
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
index c28f5d0e1d99..60fbc9bbe473 100644
--- a/sound/soc/intel/skylake/Makefile
+++ b/sound/soc/intel/skylake/Makefile
@@ -5,6 +5,6 @@ obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
# Skylake IPC Support
snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
- skl-sst.o bxt-sst.o
+ skl-sst.o bxt-sst.o skl-sst-utils.o
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 8b95e09e23e8..2663781278aa 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -37,11 +37,19 @@
#define BXT_ADSP_SRAM1_BASE 0xA0000
+#define BXT_INSTANCE_ID 0
+#define BXT_BASE_FW_MODULE_ID 0
+
static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
{
return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
}
+/*
+ * First boot sequence has some extra steps. Core 0 waits for power
+ * status on core 1, so power up core 1 also momentarily, keep it in
+ * reset/stall and then turn it off
+ */
static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
const void *fwdata, u32 fwsize)
{
@@ -49,7 +57,7 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
u32 reg;
stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
- if (stream_tag < 0) {
+ if (stream_tag <= 0) {
dev_err(ctx->dev, "Failed to prepare DMA FW loading err: %x\n",
stream_tag);
return stream_tag;
@@ -58,17 +66,27 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
ctx->dsp_ops.stream_tag = stream_tag;
memcpy(ctx->dmab.area, fwdata, fwsize);
- /* Purge FW request */
+ /* Step 1: Power up core 0 and core1 */
+ ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK |
+ SKL_DSP_CORE_MASK(1));
+ if (ret < 0) {
+ dev_err(ctx->dev, "dsp core0/1 power up failed\n");
+ goto base_fw_load_failed;
+ }
+
+ /* Step 2: Purge FW request */
sst_dsp_shim_write(ctx, SKL_ADSP_REG_HIPCI, SKL_ADSP_REG_HIPCI_BUSY |
- BXT_IPC_PURGE_FW | (stream_tag - 1));
+ (BXT_IPC_PURGE_FW | ((stream_tag - 1) << 9)));
- ret = skl_dsp_enable_core(ctx);
+ /* Step 3: Unset core0 reset state & unstall/run core0 */
+ ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret);
+ dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
ret = -EIO;
goto base_fw_load_failed;
}
+ /* Step 4: Wait for DONE Bit */
for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
reg = sst_dsp_shim_read(ctx, SKL_ADSP_REG_HIPCIE);
@@ -88,10 +106,18 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
SKL_ADSP_REG_HIPCIE_DONE);
}
- /* enable Interrupt */
+ /* Step 5: power down core1 */
+ ret = skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ if (ret < 0) {
+ dev_err(ctx->dev, "dsp core1 power down failed\n");
+ goto base_fw_load_failed;
+ }
+
+ /* Step 6: Enable Interrupt */
skl_ipc_int_enable(ctx);
skl_ipc_op_int_enable(ctx);
+ /* Step 7: Wait for ROM init */
for (i = BXT_INIT_TIMEOUT; i > 0; --i) {
if (SKL_FW_INIT ==
(sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS) &
@@ -112,7 +138,8 @@ static int sst_bxt_prepare_fw(struct sst_dsp *ctx,
base_fw_load_failed:
ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
- skl_dsp_disable_core(ctx);
+ skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
return ret;
}
@@ -130,23 +157,41 @@ static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
return ret;
}
+#define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
+
static int bxt_load_base_firmware(struct sst_dsp *ctx)
{
- const struct firmware *fw = NULL;
+ struct firmware stripped_fw;
struct skl_sst *skl = ctx->thread_context;
int ret;
- ret = request_firmware(&fw, ctx->fw_name, ctx->dev);
+ ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
goto sst_load_base_firmware_failed;
}
- ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+ /* check for extended manifest */
+ if (ctx->fw == NULL)
+ goto sst_load_base_firmware_failed;
+
+ ret = snd_skl_parse_uuids(ctx, BXT_ADSP_FW_BIN_HDR_OFFSET);
+ if (ret < 0)
+ goto sst_load_base_firmware_failed;
+
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
+ ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
/* Retry Enabling core and ROM load. Retry seemed to help */
if (ret < 0) {
- ret = sst_bxt_prepare_fw(ctx, fw->data, fw->size);
+ ret = sst_bxt_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
+ dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+ sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+ sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+
dev_err(ctx->dev, "Core En/ROM load fail:%d\n", ret);
goto sst_load_base_firmware_failed;
}
@@ -159,83 +204,135 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
} else {
dev_dbg(ctx->dev, "Firmware download successful\n");
ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
if (ret == 0) {
dev_err(ctx->dev, "DSP boot fail, FW Ready timeout\n");
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
ret = -EIO;
} else {
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
ret = 0;
+ skl->fw_loaded = true;
}
}
sst_load_base_firmware_failed:
- release_firmware(fw);
+ release_firmware(ctx->fw);
return ret;
}
-static int bxt_set_dsp_D0(struct sst_dsp *ctx)
+static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
struct skl_sst *skl = ctx->thread_context;
int ret;
+ struct skl_ipc_dxstate_info dx;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- skl->boot_complete = false;
-
- ret = skl_dsp_enable_core(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "enable dsp core failed ret: %d\n", ret);
+ if (skl->fw_loaded == false) {
+ skl->boot_complete = false;
+ ret = bxt_load_base_firmware(ctx);
+ if (ret < 0)
+ dev_err(ctx->dev, "reload fw failed: %d\n", ret);
return ret;
}
- /* enable interrupt */
- skl_ipc_int_enable(ctx);
- skl_ipc_op_int_enable(ctx);
+ /* If core 0 is being turned on, turn on core 1 as well */
+ if (core_id == SKL_DSP_CORE0_ID)
+ ret = skl_dsp_core_power_up(ctx, core_mask |
+ SKL_DSP_CORE_MASK(1));
+ else
+ ret = skl_dsp_core_power_up(ctx, core_mask);
+
+ if (ret < 0)
+ goto err;
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+
+ /*
+ * Enable interrupt after SPA is set and before
+ * DSP is unstalled
+ */
+ skl_ipc_int_enable(ctx);
+ skl_ipc_op_int_enable(ctx);
+ skl->boot_complete = false;
+ }
- ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
- msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
- if (ret == 0) {
- dev_err(ctx->dev, "ipc: error DSP boot timeout\n");
- dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
- sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
- sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
- return -EIO;
+ ret = skl_dsp_start_core(ctx, core_mask);
+ if (ret < 0)
+ goto err;
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+ ret = wait_event_timeout(skl->boot_wait,
+ skl->boot_complete,
+ msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+
+ /* If core 1 was turned on for booting core 0, turn it off */
+ skl_dsp_core_power_down(ctx, SKL_DSP_CORE_MASK(1));
+ if (ret == 0) {
+ dev_err(ctx->dev, "%s: DSP boot timeout\n", __func__);
+ dev_err(ctx->dev, "Error code=0x%x: FW status=0x%x\n",
+ sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE),
+ sst_dsp_shim_read(ctx, BXT_ADSP_FW_STATUS));
+ dev_err(ctx->dev, "Failed to set core0 to D0 state\n");
+ ret = -EIO;
+ goto err;
+ }
+ }
+
+ /* Tell FW if additional core in now On */
+
+ if (core_id != SKL_DSP_CORE0_ID) {
+ dx.core_mask = core_mask;
+ dx.dx_mask = core_mask;
+
+ ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
+ BXT_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "IPC set_dx for core %d fail: %d\n",
+ core_id, ret);
+ goto err;
+ }
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ skl->cores.state[core_id] = SKL_DSP_RUNNING;
return 0;
+err:
+ if (core_id == SKL_DSP_CORE0_ID)
+ core_mask |= SKL_DSP_CORE_MASK(1);
+ skl_dsp_disable_core(ctx, core_mask);
+
+ return ret;
}
-static int bxt_set_dsp_D3(struct sst_dsp *ctx)
+static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
+ int ret;
struct skl_ipc_dxstate_info dx;
struct skl_sst *skl = ctx->thread_context;
- int ret = 0;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- if (!is_skl_dsp_running(ctx))
- return ret;
-
- dx.core_mask = SKL_DSP_CORE0_MASK;
+ dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
- ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
- SKL_BASE_FW_MODULE_ID, &dx);
- if (ret < 0) {
- dev_err(ctx->dev, "Failed to set DSP to D3 state: %d\n", ret);
- return ret;
- }
+ dev_dbg(ctx->dev, "core mask=%x dx_mask=%x\n",
+ dx.core_mask, dx.dx_mask);
+
+ ret = skl_ipc_set_dx(&skl->ipc, BXT_INSTANCE_ID,
+ BXT_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0)
+ dev_err(ctx->dev,
+ "Failed to set DSP to D3:core id = %d;Continue reset\n",
+ core_id);
- ret = skl_dsp_disable_core(ctx);
+ ret = skl_dsp_disable_core(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "disbale dsp core failed: %d\n", ret);
- ret = -EIO;
+ dev_err(ctx->dev, "Failed to disable core %d", ret);
+ return ret;
}
-
- skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+ skl->cores.state[core_id] = SKL_DSP_RESET;
return 0;
}
@@ -274,6 +371,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
skl->dev = dev;
skl_dev.thread_context = skl;
+ INIT_LIST_HEAD(&skl->uuid_list);
skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
if (!skl->dsp) {
@@ -296,6 +394,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
if (ret)
return ret;
+ skl->cores.count = 2;
skl->boot_complete = false;
init_waitqueue_head(&skl->boot_wait);
@@ -305,6 +404,8 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
return ret;
}
+ skl_dsp_init_core_state(sst);
+
if (dsp)
*dsp = skl;
@@ -315,6 +416,7 @@ EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
+ skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 226db84ba20f..44ab595ce21a 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -206,6 +206,12 @@ static const struct skl_dsp_ops dsp_ops[] = {
.cleanup = skl_sst_dsp_cleanup
},
{
+ .id = 0x9d71,
+ .loader_ops = skl_get_loader_ops,
+ .init = skl_sst_dsp_init,
+ .cleanup = skl_sst_dsp_cleanup
+ },
+ {
.id = 0x5a98,
.loader_ops = bxt_get_loader_ops,
.init = bxt_sst_dsp_init,
@@ -730,7 +736,7 @@ static int skl_set_module_format(struct skl_sst *ctx,
dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
module_config->id.module_id, param_size);
- print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
+ print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
*param_data, param_size, false);
return 0;
}
@@ -1046,7 +1052,7 @@ int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
- /* If pipe is not started, do not try to stop the pipe in FW. */
+ /* If pipe is started, do stop the pipe in FW. */
if (pipe->state > SKL_PIPE_STARTED) {
ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
if (ret < 0) {
@@ -1055,18 +1061,20 @@ int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
}
pipe->state = SKL_PIPE_PAUSED;
- } else {
- /* If pipe was not created in FW, do not try to delete it */
- if (pipe->state < SKL_PIPE_CREATED)
- return 0;
+ }
- ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
- if (ret < 0)
- dev_err(ctx->dev, "Failed to delete pipeline\n");
+ /* If pipe was not created in FW, do not try to delete it */
+ if (pipe->state < SKL_PIPE_CREATED)
+ return 0;
- pipe->state = SKL_PIPE_INVALID;
+ ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to delete pipeline\n");
+ return ret;
}
+ pipe->state = SKL_PIPE_INVALID;
+
return ret;
}
@@ -1125,7 +1133,30 @@ int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
return ret;
}
- pipe->state = SKL_PIPE_CREATED;
+ pipe->state = SKL_PIPE_PAUSED;
+
+ return 0;
+}
+
+/*
+ * Reset the pipeline by sending set pipe state IPC this will reset the DMA
+ * from the DSP side
+ */
+int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+ int ret;
+
+ /* If pipe was not created in FW, do not try to pause or delete */
+ if (pipe->state < SKL_PIPE_PAUSED)
+ return 0;
+
+ ret = skl_set_pipe_state(ctx, pipe, PPL_RESET);
+ if (ret < 0) {
+ dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret);
+ return ret;
+ }
+
+ pipe->state = SKL_PIPE_RESET;
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 7d73648e5f9a..3f8e6f0b7eb5 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -17,6 +17,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
+#include <linux/pci.h>
#include "skl.h"
/* Unique identification for getting NHLT blobs */
@@ -149,6 +150,45 @@ struct nhlt_specific_cfg
return NULL;
}
+int skl_get_dmic_geo(struct skl *skl)
+{
+ struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+ struct nhlt_endpoint *epnt;
+ struct nhlt_dmic_array_config *cfg;
+ struct device *dev = &skl->pci->dev;
+ unsigned int dmic_geo = 0;
+ u8 j;
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+
+ for (j = 0; j < nhlt->endpoint_count; j++) {
+ if (epnt->linktype == NHLT_LINK_DMIC) {
+ cfg = (struct nhlt_dmic_array_config *)
+ (epnt->config.caps);
+ switch (cfg->array_type) {
+ case NHLT_MIC_ARRAY_2CH_SMALL:
+ case NHLT_MIC_ARRAY_2CH_BIG:
+ dmic_geo |= MIC_ARRAY_2CH;
+ break;
+
+ case NHLT_MIC_ARRAY_4CH_1ST_GEOM:
+ case NHLT_MIC_ARRAY_4CH_L_SHAPED:
+ case NHLT_MIC_ARRAY_4CH_2ND_GEOM:
+ dmic_geo |= MIC_ARRAY_4CH;
+ break;
+
+ default:
+ dev_warn(dev, "undefined DMIC array_type 0x%0x\n",
+ cfg->array_type);
+
+ }
+ }
+ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ }
+
+ return dmic_geo;
+}
+
static void skl_nhlt_trim_space(struct skl *skl)
{
char *s = skl->tplg_name;
diff --git a/sound/soc/intel/skylake/skl-nhlt.h b/sound/soc/intel/skylake/skl-nhlt.h
index 3769f9fefe2b..116534e7b3c5 100644
--- a/sound/soc/intel/skylake/skl-nhlt.h
+++ b/sound/soc/intel/skylake/skl-nhlt.h
@@ -103,4 +103,26 @@ struct nhlt_resource_desc {
u64 length;
} __packed;
+#define MIC_ARRAY_2CH 2
+#define MIC_ARRAY_4CH 4
+
+struct nhlt_tdm_config {
+ u8 virtual_slot;
+ u8 config_type;
+} __packed;
+
+struct nhlt_dmic_array_config {
+ struct nhlt_tdm_config tdm_config;
+ u8 array_type;
+} __packed;
+
+enum {
+ NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
+ NHLT_MIC_ARRAY_2CH_BIG = 0xb,
+ NHLT_MIC_ARRAY_4CH_1ST_GEOM = 0xc,
+ NHLT_MIC_ARRAY_4CH_L_SHAPED = 0xd,
+ NHLT_MIC_ARRAY_4CH_2ND_GEOM = 0xe,
+ NHLT_MIC_ARRAY_VENDOR_DEFINED = 0xf,
+};
+
#endif
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 7c81b31748ff..6e05bf8622f7 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -227,16 +227,25 @@ static int skl_pcm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+ struct skl *skl = get_skl_ctx(dai->dev);
unsigned int format_val;
int err;
+ struct skl_module_cfg *mconfig;
dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+ mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+
format_val = skl_get_format(substream, dai);
dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
hdac_stream(stream)->stream_tag, format_val);
snd_hdac_stream_reset(hdac_stream(stream));
+ /* In case of XRUN recovery, reset the FW pipe to clean state */
+ if (mconfig && (substream->runtime->status->state ==
+ SNDRV_PCM_STATE_XRUN))
+ skl_reset_pipe(skl->skl_sst, mconfig->pipe);
+
err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
if (err < 0)
return err;
@@ -521,6 +530,8 @@ static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
struct skl_dma_params *dma_params;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct hdac_ext_link *link;
+ struct skl *skl = get_skl_ctx(dai->dev);
+ struct skl_module_cfg *mconfig = NULL;
dma_params = (struct skl_dma_params *)
snd_soc_dai_get_dma_data(codec_dai, substream);
@@ -535,6 +546,12 @@ static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
snd_hdac_ext_link_stream_reset(link_dev);
+ /* In case of XRUN recovery, reset the FW pipe to clean state */
+ mconfig = skl_tplg_be_get_cpr_module(dai, substream->stream);
+ if (mconfig && (substream->runtime->status->state ==
+ SNDRV_PCM_STATE_XRUN))
+ skl_reset_pipe(skl->skl_sst, mconfig->pipe);
+
snd_hdac_ext_link_stream_setup(link_dev, format_val);
snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
@@ -1009,51 +1026,11 @@ static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-/* calculate runtime delay from LPIB */
-static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
- struct hdac_ext_stream *sstream,
- unsigned int pos)
-{
- struct hdac_bus *bus = ebus_to_hbus(ebus);
- struct hdac_stream *hstream = hdac_stream(sstream);
- struct snd_pcm_substream *substream = hstream->substream;
- int stream = substream->stream;
- unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
- int delay;
-
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- delay = pos - lpib_pos;
- else
- delay = lpib_pos - pos;
-
- if (delay < 0) {
- if (delay >= hstream->delay_negative_threshold)
- delay = 0;
- else
- delay += hstream->bufsize;
- }
-
- if (hstream->bufsize == delay)
- delay = 0;
-
- if (delay >= hstream->period_bytes) {
- dev_info(bus->dev,
- "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
- delay, hstream->period_bytes);
- delay = 0;
- }
-
- return bytes_to_frames(substream->runtime, delay);
-}
-
-static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
- int codec_delay)
+static snd_pcm_uframes_t skl_platform_pcm_pointer
+ (struct snd_pcm_substream *substream)
{
- struct hdac_stream *hstr = hdac_stream(hstream);
- struct snd_pcm_substream *substream = hstr->substream;
- struct hdac_ext_bus *ebus;
+ struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
unsigned int pos;
- int delay;
/* use the position buffer as default */
pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
@@ -1061,23 +1038,7 @@ static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
if (pos >= hdac_stream(hstream)->bufsize)
pos = 0;
- if (substream->runtime) {
- ebus = get_bus_ctx(substream);
- delay = skl_get_delay_from_lpib(ebus, hstream, pos)
- + codec_delay;
- substream->runtime->delay += delay;
- }
-
- return pos;
-}
-
-static snd_pcm_uframes_t skl_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
-
- return bytes_to_frames(substream->runtime,
- skl_get_position(hstream, 0));
+ return bytes_to_frames(substream->runtime, pos);
}
static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
@@ -1180,9 +1141,17 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
static int skl_platform_soc_probe(struct snd_soc_platform *platform)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(platform->dev);
+ struct skl *skl = ebus_to_skl(ebus);
+ int ret;
- if (ebus->ppcap)
- return skl_tplg_init(platform, ebus);
+ if (ebus->ppcap) {
+ ret = skl_tplg_init(platform, ebus);
+ if (ret < 0) {
+ dev_err(platform->dev, "Failed to init topology!\n");
+ return ret;
+ }
+ skl->platform = platform;
+ }
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index 13c19855ee1a..c3deefab65d6 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -34,33 +34,84 @@ void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
mutex_unlock(&ctx->mutex);
}
-static int skl_dsp_core_set_reset_state(struct sst_dsp *ctx)
+/*
+ * Initialize core power state and usage count. To be called after
+ * successful first boot. Hence core 0 will be running and other cores
+ * will be reset
+ */
+void skl_dsp_init_core_state(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int i;
+
+ skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
+ skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1;
+
+ for (i = SKL_DSP_CORE0_ID + 1; i < SKL_DSP_CORES_MAX; i++) {
+ skl->cores.state[i] = SKL_DSP_RESET;
+ skl->cores.usage_count[i] = 0;
+ }
+}
+
+/* Get the mask for all enabled cores */
+unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask, en_cores_mask;
+ u32 val;
+
+ core_mask = SKL_DSP_CORES_MASK(skl->cores.count);
+
+ val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
+
+ /* Cores having CPA bit set */
+ en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >>
+ SKL_ADSPCS_CPA_SHIFT;
+
+ /* And cores having CRST bit cleared */
+ en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >>
+ SKL_ADSPCS_CRST_SHIFT;
+
+ /* And cores having CSTALL bit cleared */
+ en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >>
+ SKL_ADSPCS_CSTALL_SHIFT;
+ en_cores_mask &= core_mask;
+
+ dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask);
+
+ return en_cores_mask;
+}
+
+static int
+skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx,
- SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
+ SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask),
+ SKL_ADSPCS_CRST_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK,
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
+ SKL_ADSPCS_CRST_MASK(core_mask),
+ SKL_ADSPCS_CRST_MASK(core_mask),
SKL_DSP_RESET_TO,
"Set reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
- dev_err(ctx->dev, "Set reset state failed\n");
+ SKL_ADSPCS_CRST_MASK(core_mask)) !=
+ SKL_ADSPCS_CRST_MASK(core_mask)) {
+ dev_err(ctx->dev, "Set reset state failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx)
+int skl_dsp_core_unset_reset_state(
+ struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
@@ -68,152 +119,160 @@ static int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx)
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK, 0);
+ SKL_ADSPCS_CRST_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CRST_MASK,
+ SKL_ADSPCS_CRST_MASK(core_mask),
0,
SKL_DSP_RESET_TO,
"Unset reset");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
- dev_err(ctx->dev, "Unset reset state failed\n");
+ SKL_ADSPCS_CRST_MASK(core_mask)) != 0) {
+ dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static bool is_skl_dsp_core_enable(struct sst_dsp *ctx)
+static bool
+is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask)
{
int val;
bool is_enable;
val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
- is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
- (val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
- !(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
- !(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
+ is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) &&
+ (val & SKL_ADSPCS_SPA_MASK(core_mask)) &&
+ !(val & SKL_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & SKL_ADSPCS_CSTALL_MASK(core_mask)));
+
+ dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n",
+ is_enable, core_mask);
- dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
return is_enable;
}
-static int skl_dsp_reset_core(struct sst_dsp *ctx)
+static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask)
{
/* stall core */
- sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+ SKL_ADSPCS_CSTALL_MASK(core_mask),
+ SKL_ADSPCS_CSTALL_MASK(core_mask));
/* set reset state */
- return skl_dsp_core_set_reset_state(ctx);
+ return skl_dsp_core_set_reset_state(ctx, core_mask);
}
-static int skl_dsp_start_core(struct sst_dsp *ctx)
+int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* unset reset state */
- ret = skl_dsp_core_unset_reset_state(ctx);
- if (ret < 0) {
- dev_dbg(ctx->dev, "dsp unset reset fails\n");
+ ret = skl_dsp_core_unset_reset_state(ctx, core_mask);
+ if (ret < 0)
return ret;
- }
/* run core */
- dev_dbg(ctx->dev, "run core...\n");
- sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- ~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
-
- if (!is_skl_dsp_core_enable(ctx)) {
- skl_dsp_reset_core(ctx);
- dev_err(ctx->dev, "DSP core enable failed\n");
+ dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask);
+ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+ SKL_ADSPCS_CSTALL_MASK(core_mask), 0);
+
+ if (!is_skl_dsp_core_enable(ctx, core_mask)) {
+ skl_dsp_reset_core(ctx, core_mask);
+ dev_err(ctx->dev, "DSP start core failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_power_up(struct sst_dsp *ctx)
+int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
+ SKL_ADSPCS_SPA_MASK(core_mask),
+ SKL_ADSPCS_SPA_MASK(core_mask));
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CPA_MASK,
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
+ SKL_ADSPCS_CPA_MASK(core_mask),
+ SKL_ADSPCS_CPA_MASK(core_mask),
SKL_DSP_PU_TO,
"Power up");
if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
- SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
- dev_err(ctx->dev, "DSP core power up failed\n");
+ SKL_ADSPCS_CPA_MASK(core_mask)) !=
+ SKL_ADSPCS_CPA_MASK(core_mask)) {
+ dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n",
+ core_mask);
ret = -EIO;
}
return ret;
}
-static int skl_dsp_core_power_down(struct sst_dsp *ctx)
+int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask)
{
/* update bits */
sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_SPA_MASK, 0);
+ SKL_ADSPCS_SPA_MASK(core_mask), 0);
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll(ctx,
SKL_ADSP_REG_ADSPCS,
- SKL_ADSPCS_CPA_MASK,
+ SKL_ADSPCS_CPA_MASK(core_mask),
0,
SKL_DSP_PD_TO,
"Power down");
}
-int skl_dsp_enable_core(struct sst_dsp *ctx)
+int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
/* power up */
- ret = skl_dsp_core_power_up(ctx);
+ ret = skl_dsp_core_power_up(ctx, core_mask);
if (ret < 0) {
- dev_dbg(ctx->dev, "dsp core power up failed\n");
+ dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n",
+ core_mask);
return ret;
}
- return skl_dsp_start_core(ctx);
+ return skl_dsp_start_core(ctx, core_mask);
}
-int skl_dsp_disable_core(struct sst_dsp *ctx)
+int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask)
{
int ret;
- ret = skl_dsp_reset_core(ctx);
+ ret = skl_dsp_reset_core(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "dsp core reset failed\n");
+ dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n",
+ core_mask);
return ret;
}
/* power down core*/
- ret = skl_dsp_core_power_down(ctx);
+ ret = skl_dsp_core_power_down(ctx, core_mask);
if (ret < 0) {
- dev_err(ctx->dev, "dsp core power down failed\n");
+ dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n",
+ core_mask, ret);
return ret;
}
- if (is_skl_dsp_core_enable(ctx)) {
- dev_err(ctx->dev, "DSP core disable failed\n");
+ if (is_skl_dsp_core_enable(ctx, core_mask)) {
+ dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n",
+ core_mask, ret);
ret = -EIO;
}
@@ -224,28 +283,25 @@ int skl_dsp_boot(struct sst_dsp *ctx)
{
int ret;
- if (is_skl_dsp_core_enable(ctx)) {
- dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
- ret = skl_dsp_reset_core(ctx);
+ if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) {
+ ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp reset failed\n");
+ dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret);
return ret;
}
- ret = skl_dsp_start_core(ctx);
+ ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp start failed\n");
+ dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret);
return ret;
}
} else {
- dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
- ret = skl_dsp_disable_core(ctx);
-
+ ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
if (ret < 0) {
- dev_err(ctx->dev, "dsp disable core failes\n");
+ dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret);
return ret;
}
- ret = skl_dsp_enable_core(ctx);
+ ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
}
return ret;
@@ -281,16 +337,74 @@ irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
return result;
}
+/*
+ * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context
+ * within the dapm mutex. Hence no separate lock is used.
+ */
+int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int ret = 0;
+
+ if (core_id >= skl->cores.count) {
+ dev_err(ctx->dev, "invalid core id: %d\n", core_id);
+ return -EINVAL;
+ }
+
+ if (skl->cores.state[core_id] == SKL_DSP_RESET) {
+ ret = ctx->fw_ops.set_state_D0(ctx, core_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to get core%d\n", core_id);
+ return ret;
+ }
+ }
+
+ skl->cores.usage_count[core_id]++;
+
+ dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
+ core_id, skl->cores.state[core_id],
+ skl->cores.usage_count[core_id]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_dsp_get_core);
+
+int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id)
+{
+ struct skl_sst *skl = ctx->thread_context;
+ int ret = 0;
+
+ if (core_id >= skl->cores.count) {
+ dev_err(ctx->dev, "invalid core id: %d\n", core_id);
+ return -EINVAL;
+ }
+
+ if (--skl->cores.usage_count[core_id] == 0) {
+ ret = ctx->fw_ops.set_state_D3(ctx, core_id);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to put core %d: %d\n",
+ core_id, ret);
+ skl->cores.usage_count[core_id]++;
+ }
+ }
+
+ dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n",
+ core_id, skl->cores.state[core_id],
+ skl->cores.usage_count[core_id]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(skl_dsp_put_core);
int skl_dsp_wake(struct sst_dsp *ctx)
{
- return ctx->fw_ops.set_state_D0(ctx);
+ return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_wake);
int skl_dsp_sleep(struct sst_dsp *ctx)
{
- return ctx->fw_ops.set_state_D3(ctx);
+ return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID);
}
EXPORT_SYMBOL_GPL(skl_dsp_sleep);
@@ -337,9 +451,7 @@ void skl_dsp_free(struct sst_dsp *dsp)
free_irq(dsp->irq, dsp);
skl_ipc_op_int_disable(dsp);
- skl_ipc_int_disable(dsp);
-
- skl_dsp_disable_core(dsp);
+ skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index deabe7308d3b..0f8629ef79ac 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <sound/memalloc.h>
#include "skl-sst-cldma.h"
+#include "skl-tplg-interface.h"
struct sst_dsp;
struct skl_sst;
@@ -76,35 +77,53 @@ struct sst_dsp_device;
#define SKL_ADSPIC_IPC 1
#define SKL_ADSPIS_IPC 1
+/* Core ID of core0 */
+#define SKL_DSP_CORE0_ID 0
+
+/* Mask for a given core index, c = 0.. number of supported cores - 1 */
+#define SKL_DSP_CORE_MASK(c) BIT(c)
+
+/*
+ * Core 0 mask = SKL_DSP_CORE_MASK(0); Defined separately
+ * since Core0 is primary core and it is used often
+ */
+#define SKL_DSP_CORE0_MASK BIT(0)
+
+/*
+ * Mask for a given number of cores
+ * nc = number of supported cores
+ */
+#define SKL_DSP_CORES_MASK(nc) GENMASK((nc - 1), 0)
+
/* ADSPCS - Audio DSP Control & Status */
-#define SKL_DSP_CORES 1
-#define SKL_DSP_CORE0_MASK 1
-#define SKL_DSP_CORES_MASK ((1 << SKL_DSP_CORES) - 1)
-
-/* Core Reset - asserted high */
-#define SKL_ADSPCS_CRST_SHIFT 0
-#define SKL_ADSPCS_CRST_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
-#define SKL_ADSPCS_CRST(x) ((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
-
-/* Core run/stall - when set to '1' core is stalled */
-#define SKL_ADSPCS_CSTALL_SHIFT 8
-#define SKL_ADSPCS_CSTALL_MASK (SKL_DSP_CORES_MASK << \
- SKL_ADSPCS_CSTALL_SHIFT)
-#define SKL_ADSPCS_CSTALL(x) ((x << SKL_ADSPCS_CSTALL_SHIFT) & \
- SKL_ADSPCS_CSTALL_MASK)
-
-/* Set Power Active - when set to '1' turn cores on */
-#define SKL_ADSPCS_SPA_SHIFT 16
-#define SKL_ADSPCS_SPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
-#define SKL_ADSPCS_SPA(x) ((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
-
-/* Current Power Active - power status of cores, set by hardware */
-#define SKL_ADSPCS_CPA_SHIFT 24
-#define SKL_ADSPCS_CPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
-#define SKL_ADSPCS_CPA(x) ((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
-
-#define SST_DSP_POWER_D0 0x0 /* full On */
-#define SST_DSP_POWER_D3 0x3 /* Off */
+
+/*
+ * Core Reset - asserted high
+ * CRST Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CRST_SHIFT 0
+#define SKL_ADSPCS_CRST_MASK(cm) ((cm) << SKL_ADSPCS_CRST_SHIFT)
+
+/*
+ * Core run/stall - when set to '1' core is stalled
+ * CSTALL Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CSTALL_SHIFT 8
+#define SKL_ADSPCS_CSTALL_MASK(cm) ((cm) << SKL_ADSPCS_CSTALL_SHIFT)
+
+/*
+ * Set Power Active - when set to '1' turn cores on
+ * SPA Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_SPA_SHIFT 16
+#define SKL_ADSPCS_SPA_MASK(cm) ((cm) << SKL_ADSPCS_SPA_SHIFT)
+
+/*
+ * Current Power Active - power status of cores, set by hardware
+ * CPA Mask for a given core mask pattern, cm
+ */
+#define SKL_ADSPCS_CPA_SHIFT 24
+#define SKL_ADSPCS_CPA_MASK(cm) ((cm) << SKL_ADSPCS_CPA_SHIFT)
enum skl_dsp_states {
SKL_DSP_RUNNING = 1,
@@ -115,8 +134,8 @@ struct skl_dsp_fw_ops {
int (*load_fw)(struct sst_dsp *ctx);
/* FW module parser/loader */
int (*parse_fw)(struct sst_dsp *ctx);
- int (*set_state_D0)(struct sst_dsp *ctx);
- int (*set_state_D3)(struct sst_dsp *ctx);
+ int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
+ int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
@@ -157,14 +176,26 @@ int skl_cldma_prepare(struct sst_dsp *ctx);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
-int skl_dsp_enable_core(struct sst_dsp *ctx);
-int skl_dsp_disable_core(struct sst_dsp *ctx);
bool is_skl_dsp_running(struct sst_dsp *ctx);
+
+unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
+void skl_dsp_init_core_state(struct sst_dsp *ctx);
+int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask);
+int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx,
+ unsigned int core_mask);
+int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask);
+
irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
int skl_dsp_wake(struct sst_dsp *ctx);
int skl_dsp_sleep(struct sst_dsp *ctx);
void skl_dsp_free(struct sst_dsp *dsp);
+int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id);
+int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id);
+
int skl_dsp_boot(struct sst_dsp *ctx);
int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
@@ -175,4 +206,11 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
void bxt_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
+ struct skl_dfw_module *dfw_config);
+int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset);
+void skl_freeup_uuid_list(struct skl_sst *ctx);
+
+int skl_dsp_strip_extended_manifest(struct firmware *fw);
+
#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 543460293b00..96f2f6889b18 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -363,7 +363,7 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
/* first process the header */
switch (reply) {
case IPC_GLB_REPLY_SUCCESS:
- dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
+ dev_dbg(ipc->dev, "ipc FW reply %x: success\n", header.primary);
/* copy the rx data from the mailbox */
sst_dsp_inbox_read(ipc->dsp, msg->rx_data, msg->rx_size);
break;
@@ -692,7 +692,7 @@ int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
/* param_block_size must be in dwords */
u16 param_block_size = msg->param_data_size / sizeof(u32);
- print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+ print_hex_dump_debug("Param data:", DUMP_PREFIX_NONE,
16, 4, buffer, param_block_size, false);
header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
index d59d1ba62a43..2e3d4e80ef97 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.h
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -45,6 +45,14 @@ struct skl_ipc_header {
u32 extension;
};
+#define SKL_DSP_CORES_MAX 2
+
+struct skl_dsp_cores {
+ unsigned int count;
+ enum skl_dsp_states state[SKL_DSP_CORES_MAX];
+ int usage_count[SKL_DSP_CORES_MAX];
+};
+
struct skl_sst {
struct device *dev;
struct sst_dsp *dsp;
@@ -60,6 +68,15 @@ struct skl_sst {
void (*enable_miscbdcge)(struct device *dev, bool enable);
/*Is CGCTL.MISCBDCGE disabled*/
bool miscbdcg_disabled;
+
+ /* Populate module information */
+ struct list_head uuid_list;
+
+ /* Is firmware loaded */
+ bool fw_loaded;
+
+ /* multi-core */
+ struct skl_dsp_cores cores;
};
struct skl_ipc_init_instance_msg {
@@ -136,5 +153,6 @@ void skl_ipc_int_disable(struct sst_dsp *dsp);
bool skl_ipc_int_status(struct sst_dsp *dsp);
void skl_ipc_free(struct sst_generic_ipc *ipc);
int skl_ipc_init(struct device *dev, struct skl_sst *skl);
+void skl_clear_module_cnt(struct sst_dsp *ctx);
#endif /* __SKL_IPC_H */
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
new file mode 100644
index 000000000000..25fcb796bd86
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -0,0 +1,256 @@
+/*
+ * skl-sst-utils.c - SKL sst utils functions
+ *
+ * Copyright (C) 2016 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include "skl-sst-dsp.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+
+#define UUID_STR_SIZE 37
+#define DEFAULT_HASH_SHA256_LEN 32
+
+/* FW Extended Manifest Header id = $AE1 */
+#define SKL_EXT_MANIFEST_HEADER_MAGIC 0x31454124
+
+struct skl_dfw_module_mod {
+ char name[100];
+ struct skl_dfw_module skl_dfw_mod;
+};
+
+struct UUID {
+ u8 id[16];
+};
+
+union seg_flags {
+ u32 ul;
+ struct {
+ u32 contents : 1;
+ u32 alloc : 1;
+ u32 load : 1;
+ u32 read_only : 1;
+ u32 code : 1;
+ u32 data : 1;
+ u32 _rsvd0 : 2;
+ u32 type : 4;
+ u32 _rsvd1 : 4;
+ u32 length : 16;
+ } r;
+} __packed;
+
+struct segment_desc {
+ union seg_flags flags;
+ u32 v_base_addr;
+ u32 file_offset;
+};
+
+struct module_type {
+ u32 load_type : 4;
+ u32 auto_start : 1;
+ u32 domain_ll : 1;
+ u32 domain_dp : 1;
+ u32 rsvd : 25;
+} __packed;
+
+struct adsp_module_entry {
+ u32 struct_id;
+ u8 name[8];
+ struct UUID uuid;
+ struct module_type type;
+ u8 hash1[DEFAULT_HASH_SHA256_LEN];
+ u32 entry_point;
+ u16 cfg_offset;
+ u16 cfg_count;
+ u32 affinity_mask;
+ u16 instance_max_count;
+ u16 instance_bss_size;
+ struct segment_desc segments[3];
+} __packed;
+
+struct adsp_fw_hdr {
+ u32 id;
+ u32 len;
+ u8 name[8];
+ u32 preload_page_count;
+ u32 fw_image_flags;
+ u32 feature_mask;
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+ u32 num_modules;
+ u32 hw_buf_base;
+ u32 hw_buf_length;
+ u32 load_offset;
+} __packed;
+
+struct uuid_module {
+ uuid_le uuid;
+ int id;
+ int is_loadable;
+
+ struct list_head list;
+};
+
+struct skl_ext_manifest_hdr {
+ u32 id;
+ u32 len;
+ u16 version_major;
+ u16 version_minor;
+ u32 entries;
+};
+
+int snd_skl_get_module_info(struct skl_sst *ctx, u8 *uuid,
+ struct skl_dfw_module *dfw_config)
+{
+ struct uuid_module *module;
+ uuid_le *uuid_mod;
+
+ uuid_mod = (uuid_le *)uuid;
+
+ list_for_each_entry(module, &ctx->uuid_list, list) {
+ if (uuid_le_cmp(*uuid_mod, module->uuid) == 0) {
+ dfw_config->module_id = module->id;
+ dfw_config->is_loadable = module->is_loadable;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_skl_get_module_info);
+
+/*
+ * Parse the firmware binary to get the UUID, module id
+ * and loadable flags
+ */
+int snd_skl_parse_uuids(struct sst_dsp *ctx, unsigned int offset)
+{
+ struct adsp_fw_hdr *adsp_hdr;
+ struct adsp_module_entry *mod_entry;
+ int i, num_entry;
+ uuid_le *uuid_bin;
+ const char *buf;
+ struct skl_sst *skl = ctx->thread_context;
+ struct uuid_module *module;
+ struct firmware stripped_fw;
+ unsigned int safe_file;
+
+ /* Get the FW pointer to derive ADSP header */
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
+ buf = stripped_fw.data;
+
+ /* check if we have enough space in file to move to header */
+ safe_file = sizeof(*adsp_hdr) + offset;
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No space for hdr\n");
+ return -EINVAL;
+ }
+
+ adsp_hdr = (struct adsp_fw_hdr *)(buf + offset);
+
+ /* check 1st module entry is in file */
+ safe_file += adsp_hdr->len + sizeof(*mod_entry);
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No module entry\n");
+ return -EINVAL;
+ }
+
+ mod_entry = (struct adsp_module_entry *)
+ (buf + offset + adsp_hdr->len);
+
+ num_entry = adsp_hdr->num_modules;
+
+ /* check all entries are in file */
+ safe_file += num_entry * sizeof(*mod_entry);
+ if (stripped_fw.size <= safe_file) {
+ dev_err(ctx->dev, "Small fw file size, No modules\n");
+ return -EINVAL;
+ }
+
+
+ /*
+ * Read the UUID(GUID) from FW Manifest.
+ *
+ * The 16 byte UUID format is: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX
+ * Populate the UUID table to store module_id and loadable flags
+ * for the module.
+ */
+
+ for (i = 0; i < num_entry; i++, mod_entry++) {
+ module = kzalloc(sizeof(*module), GFP_KERNEL);
+ if (!module)
+ return -ENOMEM;
+
+ uuid_bin = (uuid_le *)mod_entry->uuid.id;
+ memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
+
+ module->id = i;
+ module->is_loadable = mod_entry->type.load_type;
+
+ list_add_tail(&module->list, &skl->uuid_list);
+
+ dev_dbg(ctx->dev,
+ "Adding uuid :%pUL mod id: %d Loadable: %d\n",
+ &module->uuid, module->id, module->is_loadable);
+ }
+
+ return 0;
+}
+
+void skl_freeup_uuid_list(struct skl_sst *ctx)
+{
+ struct uuid_module *uuid, *_uuid;
+
+ list_for_each_entry_safe(uuid, _uuid, &ctx->uuid_list, list) {
+ list_del(&uuid->list);
+ kfree(uuid);
+ }
+}
+
+/*
+ * some firmware binary contains some extended manifest. This needs
+ * to be stripped in that case before we load and use that image.
+ *
+ * Get the module id for the module by checking
+ * the table for the UUID for the module
+ */
+int skl_dsp_strip_extended_manifest(struct firmware *fw)
+{
+ struct skl_ext_manifest_hdr *hdr;
+
+ /* check if fw file is greater than header we are looking */
+ if (fw->size < sizeof(hdr)) {
+ pr_err("%s: Firmware file small, no hdr\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr = (struct skl_ext_manifest_hdr *)fw->data;
+
+ if (hdr->id == SKL_EXT_MANIFEST_HEADER_MAGIC) {
+ fw->size -= hdr->len;
+ fw->data += hdr->len;
+ }
+
+ return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 13ec8d53b526..588f899ceb65 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -68,10 +68,13 @@ static int skl_transfer_firmware(struct sst_dsp *ctx,
return ret;
}
+#define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284
+
static int skl_load_base_firmware(struct sst_dsp *ctx)
{
int ret = 0, i;
struct skl_sst *skl = ctx->thread_context;
+ struct firmware stripped_fw;
u32 reg;
skl->boot_complete = false;
@@ -81,11 +84,25 @@ static int skl_load_base_firmware(struct sst_dsp *ctx)
ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
if (ret < 0) {
dev_err(ctx->dev, "Request firmware failed %d\n", ret);
- skl_dsp_disable_core(ctx);
return -EIO;
}
}
+ ret = snd_skl_parse_uuids(ctx, SKL_ADSP_FW_BIN_HDR_OFFSET);
+ if (ret < 0) {
+ dev_err(ctx->dev,
+ "UUID parsing err: %d\n", ret);
+ release_firmware(ctx->fw);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
+ return ret;
+ }
+
+ /* check for extended manifest */
+ stripped_fw.data = ctx->fw->data;
+ stripped_fw.size = ctx->fw->size;
+
+ skl_dsp_strip_extended_manifest(&stripped_fw);
+
ret = skl_dsp_boot(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Boot dsp core failed ret: %d", ret);
@@ -119,7 +136,7 @@ static int skl_load_base_firmware(struct sst_dsp *ctx)
goto transfer_firmware_failed;
}
- ret = skl_transfer_firmware(ctx, ctx->fw->data, ctx->fw->size);
+ ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size);
if (ret < 0) {
dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
goto transfer_firmware_failed;
@@ -133,67 +150,87 @@ static int skl_load_base_firmware(struct sst_dsp *ctx)
}
dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ skl->fw_loaded = true;
}
return 0;
transfer_firmware_failed:
ctx->cl_dev.ops.cl_cleanup_controller(ctx);
skl_load_base_firmware_failed:
- skl_dsp_disable_core(ctx);
+ skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
release_firmware(ctx->fw);
ctx->fw = NULL;
return ret;
}
-static int skl_set_dsp_D0(struct sst_dsp *ctx)
+static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
+ struct skl_ipc_dxstate_info dx;
+ struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- ret = skl_load_base_firmware(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "unable to load firmware\n");
- return ret;
+ /* If core0 is being turned on, we need to load the FW */
+ if (core_id == SKL_DSP_CORE0_ID) {
+ ret = skl_load_base_firmware(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "unable to load firmware\n");
+ return ret;
+ }
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+ /*
+ * If any core other than core 0 is being moved to D0, enable the
+ * core and send the set dx IPC for the core.
+ */
+ if (core_id != SKL_DSP_CORE0_ID) {
+ ret = skl_dsp_enable_core(ctx, core_mask);
+ if (ret < 0)
+ return ret;
+
+ dx.core_mask = core_mask;
+ dx.dx_mask = core_mask;
+
+ ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID,
+ SKL_BASE_FW_MODULE_ID, &dx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n",
+ core_id);
+ skl_dsp_disable_core(ctx, core_mask);
+ }
+ }
+
+ skl->cores.state[core_id] = SKL_DSP_RUNNING;
return ret;
}
-static int skl_set_dsp_D3(struct sst_dsp *ctx)
+static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
{
int ret;
struct skl_ipc_dxstate_info dx;
struct skl_sst *skl = ctx->thread_context;
+ unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
- dev_dbg(ctx->dev, "In %s:\n", __func__);
- mutex_lock(&ctx->mutex);
- if (!is_skl_dsp_running(ctx)) {
- mutex_unlock(&ctx->mutex);
- return 0;
- }
- mutex_unlock(&ctx->mutex);
-
- dx.core_mask = SKL_DSP_CORE0_MASK;
+ dx.core_mask = core_mask;
dx.dx_mask = SKL_IPC_D3_MASK;
+
ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
if (ret < 0)
- dev_err(ctx->dev,
- "D3 request to FW failed, continuing reset: %d", ret);
-
- /* disable Interrupt */
- ctx->cl_dev.ops.cl_cleanup_controller(ctx);
- skl_cldma_int_disable(ctx);
- skl_ipc_op_int_disable(ctx);
- skl_ipc_int_disable(ctx);
-
- ret = skl_dsp_disable_core(ctx);
- if (ret < 0) {
- dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
- ret = -EIO;
+ dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret);
+
+ if (core_id == SKL_DSP_CORE0_ID) {
+ /* disable Interrupt */
+ ctx->cl_dev.ops.cl_cleanup_controller(ctx);
+ skl_cldma_int_disable(ctx);
+ skl_ipc_op_int_disable(ctx);
+ skl_ipc_int_disable(ctx);
}
- skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+ ret = skl_dsp_disable_core(ctx, core_mask);
+ if (ret < 0)
+ return ret;
+
+ skl->cores.state[core_id] = SKL_DSP_RESET;
return ret;
}
@@ -360,6 +397,19 @@ static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id)
return ret;
}
+void skl_clear_module_cnt(struct sst_dsp *ctx)
+{
+ struct skl_module_table *module;
+
+ if (list_empty(&ctx->module_list))
+ return;
+
+ list_for_each_entry(module, &ctx->module_list, list) {
+ module->usage_cnt = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(skl_clear_module_cnt);
+
static void skl_clear_module_table(struct sst_dsp *ctx)
{
struct skl_module_table *module, *tmp;
@@ -409,6 +459,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
skl->dev = dev;
skl_dev.thread_context = skl;
+ INIT_LIST_HEAD(&skl->uuid_list);
skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
if (!skl->dsp) {
@@ -432,12 +483,16 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
if (ret)
return ret;
+ skl->cores.count = 2;
+
ret = sst->fw_ops.load_fw(sst);
if (ret < 0) {
dev_err(dev, "Load base fw failed : %d", ret);
goto cleanup;
}
+ skl_dsp_init_core_state(sst);
+
if (dsp)
*dsp = skl;
@@ -452,6 +507,7 @@ EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
skl_clear_module_table(ctx->dsp);
+ skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
ctx->dsp->ops->free(ctx->dsp);
if (ctx->boot_complete) {
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3e036b0349b9..cc0150fc2601 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -379,43 +379,6 @@ static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
}
/*
- * A pipe can have multiple modules, each of them will be a DAPM widget as
- * well. While managing a pipeline we need to get the list of all the
- * widgets in a pipelines, so this helper - skl_tplg_get_pipe_widget() helps
- * to get the SKL type widgets in that pipeline
- */
-static int skl_tplg_alloc_pipe_widget(struct device *dev,
- struct snd_soc_dapm_widget *w, struct skl_pipe *pipe)
-{
- struct skl_module_cfg *src_module = NULL;
- struct snd_soc_dapm_path *p = NULL;
- struct skl_pipe_module *p_module = NULL;
-
- p_module = devm_kzalloc(dev, sizeof(*p_module), GFP_KERNEL);
- if (!p_module)
- return -ENOMEM;
-
- p_module->w = w;
- list_add_tail(&p_module->node, &pipe->w_list);
-
- snd_soc_dapm_widget_for_each_sink_path(w, p) {
- if ((p->sink->priv == NULL)
- && (!is_skl_dsp_widget_type(w)))
- continue;
-
- if ((p->sink->priv != NULL) && p->connect
- && is_skl_dsp_widget_type(p->sink)) {
-
- src_module = p->sink->priv;
- if (pipe->ppl_id == src_module->pipe->ppl_id)
- skl_tplg_alloc_pipe_widget(dev,
- p->sink, pipe);
- }
- }
- return 0;
-}
-
-/*
* some modules can have multiple params set from user control and
* need to be set after module is initialized. If set_param flag is
* set module params will be done after module is initialised.
@@ -448,7 +411,7 @@ static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w,
if (bc->set_params == SKL_PARAM_SET) {
ret = skl_set_module_params(ctx,
- (u32 *)bc->params, bc->max,
+ (u32 *)bc->params, bc->size,
bc->param_id, mconfig);
if (ret < 0)
return ret;
@@ -483,7 +446,7 @@ static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w)
continue;
mconfig->formats_config.caps = (u32 *)&bc->params;
- mconfig->formats_config.caps_size = bc->max;
+ mconfig->formats_config.caps_size = bc->size;
break;
}
@@ -514,8 +477,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
mconfig->id.module_id, mconfig->guid);
@@ -539,6 +500,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
if (ret < 0)
return ret;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
@@ -591,9 +553,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
- skl_tplg_alloc_pipe_mem(skl, mconfig);
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
/*
* Create a list of modules for pipe.
* This list contains modules from source to sink
@@ -602,19 +561,8 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
if (ret < 0)
return ret;
- /*
- * we create a w_list of all widgets in that pipe. This list is not
- * freed on PMD event as widgets within a pipe are static. This
- * saves us cycles to get widgets in pipe every time.
- *
- * So if we have already initialized all the widgets of a pipeline
- * we skip, so check for list_empty and create the list if empty
- */
- if (list_empty(&s_pipe->w_list)) {
- ret = skl_tplg_alloc_pipe_widget(ctx->dev, w, s_pipe);
- if (ret < 0)
- return ret;
- }
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
/* Init all pipe modules from source to sink */
ret = skl_tplg_init_pipe_modules(skl, s_pipe);
@@ -949,13 +897,17 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
struct skl_pipe *s_pipe = mconfig->pipe;
int ret = 0;
+ if (s_pipe->state == SKL_PIPE_INVALID)
+ return -EINVAL;
+
skl_tplg_free_pipe_mcps(skl, mconfig);
skl_tplg_free_pipe_mem(skl, mconfig);
list_for_each_entry(w_module, &s_pipe->w_list, node) {
dst_module = w_module->w->priv;
- skl_tplg_free_pipe_mcps(skl, dst_module);
+ if (mconfig->m_state >= SKL_MODULE_INIT_DONE)
+ skl_tplg_free_pipe_mcps(skl, dst_module);
if (src_module == NULL) {
src_module = dst_module;
continue;
@@ -1102,7 +1054,7 @@ static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
if (w->power)
skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
- bc->max, bc->param_id, mconfig);
+ bc->size, bc->param_id, mconfig);
/* decrement size for TLV header */
size -= 2 * sizeof(u32);
@@ -1136,6 +1088,10 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
struct skl *skl = get_skl_ctx(w->dapm->dev);
if (ac->params) {
+ if (size > ac->max)
+ return -EINVAL;
+
+ ac->size = size;
/*
* if the param_is is of type Vendor, firmware expects actual
* parameter id and size from the control.
@@ -1151,7 +1107,7 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
if (w->power)
return skl_set_module_params(skl->skl_sst,
- (u32 *)ac->params, ac->max,
+ (u32 *)ac->params, ac->size,
ac->param_id, mconfig);
}
@@ -1159,6 +1115,39 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
}
/*
+ * Fill the dma id for host and link. In case of passthrough
+ * pipeline, this will both host and link in the same
+ * pipeline, so need to copy the link and host based on dev_type
+ */
+static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg,
+ struct skl_pipe_params *params)
+{
+ struct skl_pipe *pipe = mcfg->pipe;
+
+ if (pipe->passthru) {
+ switch (mcfg->dev_type) {
+ case SKL_DEVICE_HDALINK:
+ pipe->p_params->link_dma_id = params->link_dma_id;
+ break;
+
+ case SKL_DEVICE_HDAHOST:
+ pipe->p_params->host_dma_id = params->host_dma_id;
+ break;
+
+ default:
+ break;
+ }
+ pipe->p_params->s_fmt = params->s_fmt;
+ pipe->p_params->ch = params->ch;
+ pipe->p_params->s_freq = params->s_freq;
+ pipe->p_params->stream = params->stream;
+
+ } else {
+ memcpy(pipe->p_params, params, sizeof(*params));
+ }
+}
+
+/*
* The FE params are passed by hw_params of the DAI.
* On hw_params, the params are stored in Gateway module of the FE and we
* need to calculate the format in DSP module configuration, that
@@ -1168,10 +1157,9 @@ int skl_tplg_update_pipe_params(struct device *dev,
struct skl_module_cfg *mconfig,
struct skl_pipe_params *params)
{
- struct skl_pipe *pipe = mconfig->pipe;
struct skl_module_fmt *format = NULL;
- memcpy(pipe->p_params, params, sizeof(*params));
+ skl_tplg_fill_dma_id(mconfig, params);
if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
format = &mconfig->in_fmt[0];
@@ -1358,12 +1346,11 @@ static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai,
struct skl_module_cfg *mconfig,
struct skl_pipe_params *params)
{
- struct skl_pipe *pipe = mconfig->pipe;
struct nhlt_specific_cfg *cfg;
struct skl *skl = get_skl_ctx(dai->dev);
int link_type = skl_tplg_be_link_type(mconfig->dev_type);
- memcpy(pipe->p_params, params, sizeof(*params));
+ skl_tplg_fill_dma_id(mconfig, params);
if (link_type == NHLT_LINK_HDA)
return 0;
@@ -1554,6 +1541,55 @@ static void skl_tplg_fill_fmt(struct skl_module_fmt *dst_fmt,
}
}
+static void skl_clear_pin_config(struct snd_soc_platform *platform,
+ struct snd_soc_dapm_widget *w)
+{
+ int i;
+ struct skl_module_cfg *mconfig;
+ struct skl_pipe *pipe;
+
+ if (!strncmp(w->dapm->component->name, platform->component.name,
+ strlen(platform->component.name))) {
+ mconfig = w->priv;
+ pipe = mconfig->pipe;
+ for (i = 0; i < mconfig->max_in_queue; i++) {
+ mconfig->m_in_pin[i].in_use = false;
+ mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND;
+ }
+ for (i = 0; i < mconfig->max_out_queue; i++) {
+ mconfig->m_out_pin[i].in_use = false;
+ mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND;
+ }
+ pipe->state = SKL_PIPE_INVALID;
+ mconfig->m_state = SKL_MODULE_UNINIT;
+ }
+}
+
+void skl_cleanup_resources(struct skl *skl)
+{
+ struct skl_sst *ctx = skl->skl_sst;
+ struct snd_soc_platform *soc_platform = skl->platform;
+ struct snd_soc_dapm_widget *w;
+ struct snd_soc_card *card;
+
+ if (soc_platform == NULL)
+ return;
+
+ card = soc_platform->component.card;
+ if (!card || !card->instantiated)
+ return;
+
+ skl->resource.mem = 0;
+ skl->resource.mcps = 0;
+
+ list_for_each_entry(w, &card->widgets, list) {
+ if (is_skl_dsp_widget_type(w) && (w->priv != NULL))
+ skl_clear_pin_config(soc_platform, w);
+ }
+
+ skl_clear_module_cnt(ctx->dsp);
+}
+
/*
* Topology core widget load callback
*
@@ -1585,6 +1621,10 @@ static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
w->priv = mconfig;
memcpy(&mconfig->guid, &dfw_config->uuid, 16);
+ ret = snd_skl_get_module_info(skl->skl_sst, mconfig->guid, dfw_config);
+ if (ret < 0)
+ return ret;
+
mconfig->id.module_id = dfw_config->module_id;
mconfig->id.instance_id = dfw_config->instance_id;
mconfig->mcps = dfw_config->max_mcps;
@@ -1683,6 +1723,7 @@ static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
ac->max = dfw_ac->max;
ac->param_id = dfw_ac->param_id;
ac->set_params = dfw_ac->set_params;
+ ac->size = dfw_ac->max;
if (ac->max) {
ac->params = (char *) devm_kzalloc(dev, ac->max, GFP_KERNEL);
@@ -1733,6 +1774,60 @@ static struct snd_soc_tplg_ops skl_tplg_ops = {
.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
};
+/*
+ * A pipe can have multiple modules, each of them will be a DAPM widget as
+ * well. While managing a pipeline we need to get the list of all the
+ * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list()
+ * helps to get the SKL type widgets in that pipeline
+ */
+static int skl_tplg_create_pipe_widget_list(struct snd_soc_platform *platform)
+{
+ struct snd_soc_dapm_widget *w;
+ struct skl_module_cfg *mcfg = NULL;
+ struct skl_pipe_module *p_module = NULL;
+ struct skl_pipe *pipe;
+
+ list_for_each_entry(w, &platform->component.card->widgets, list) {
+ if (is_skl_dsp_widget_type(w) && w->priv != NULL) {
+ mcfg = w->priv;
+ pipe = mcfg->pipe;
+
+ p_module = devm_kzalloc(platform->dev,
+ sizeof(*p_module), GFP_KERNEL);
+ if (!p_module)
+ return -ENOMEM;
+
+ p_module->w = w;
+ list_add_tail(&p_module->node, &pipe->w_list);
+ }
+ }
+
+ return 0;
+}
+
+static void skl_tplg_set_pipe_type(struct skl *skl, struct skl_pipe *pipe)
+{
+ struct skl_pipe_module *w_module;
+ struct snd_soc_dapm_widget *w;
+ struct skl_module_cfg *mconfig;
+ bool host_found = false, link_found = false;
+
+ list_for_each_entry(w_module, &pipe->w_list, node) {
+ w = w_module->w;
+ mconfig = w->priv;
+
+ if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
+ host_found = true;
+ else if (mconfig->dev_type != SKL_DEVICE_NONE)
+ link_found = true;
+ }
+
+ if (host_found && link_found)
+ pipe->passthru = true;
+ else
+ pipe->passthru = false;
+}
+
/* This will be read from topology manifest, currently defined here */
#define SKL_MAX_MCPS 30000000
#define SKL_FW_MAX_MEM 1000000
@@ -1746,6 +1841,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
const struct firmware *fw;
struct hdac_bus *bus = ebus_to_hbus(ebus);
struct skl *skl = ebus_to_skl(ebus);
+ struct skl_pipeline *ppl;
ret = request_firmware(&fw, skl->tplg_name, bus->dev);
if (ret < 0) {
@@ -1775,6 +1871,12 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
skl->resource.max_mem = SKL_FW_MAX_MEM;
skl->tplg = fw;
+ ret = skl_tplg_create_pipe_widget_list(platform);
+ if (ret < 0)
+ return ret;
+
+ list_for_each_entry(ppl, &skl->ppl_list, node)
+ skl_tplg_set_pipe_type(skl, ppl->pipe);
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index e4b399cd7868..22d3ef83817d 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -244,7 +244,8 @@ enum skl_pipe_state {
SKL_PIPE_INVALID = 0,
SKL_PIPE_CREATED = 1,
SKL_PIPE_PAUSED = 2,
- SKL_PIPE_STARTED = 3
+ SKL_PIPE_STARTED = 3,
+ SKL_PIPE_RESET = 4
};
struct skl_pipe_module {
@@ -270,6 +271,7 @@ struct skl_pipe {
struct skl_pipe_params *p_params;
enum skl_pipe_state state;
struct list_head w_list;
+ bool passthru;
};
enum skl_module_state {
@@ -319,6 +321,7 @@ struct skl_algo_data {
u32 param_id;
u32 set_params;
u32 max;
+ u32 size;
char *params;
};
@@ -357,6 +360,8 @@ int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config);
int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 06d8c263c68f..cd59536a761d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -35,6 +35,8 @@
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
+static struct skl_machine_pdata skl_dmic_data;
+
/*
* initialize the PCI registers
*/
@@ -184,6 +186,7 @@ static int _skl_suspend(struct hdac_ext_bus *ebus)
{
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct pci_dev *pci = to_pci_dev(bus->dev);
int ret;
snd_hdac_ext_bus_link_power_down_all(ebus);
@@ -193,9 +196,12 @@ static int _skl_suspend(struct hdac_ext_bus *ebus)
return ret;
snd_hdac_bus_stop_chip(bus);
+ update_pci_dword(pci, AZX_PCIREG_PGCTL,
+ AZX_PGCTL_LSRMD_MASK, AZX_PGCTL_LSRMD_MASK);
skl_enable_miscbdcge(bus->dev, false);
snd_hdac_bus_enter_link_reset(bus);
skl_enable_miscbdcge(bus->dev, true);
+ skl_cleanup_resources(skl);
return 0;
}
@@ -242,6 +248,7 @@ static int skl_suspend(struct device *dev)
ret = _skl_suspend(ebus);
if (ret < 0)
return ret;
+ skl->skl_sst->fw_loaded = false;
}
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
@@ -397,6 +404,10 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
platform_device_put(pdev);
return -EIO;
}
+
+ if (mach->pdata)
+ dev_set_drvdata(&pdev->dev, mach->pdata);
+
skl->i2s_dev = pdev;
return 0;
@@ -657,6 +668,8 @@ static int skl_probe(struct pci_dev *pci,
skl->pci_id = pci->device;
+ device_disable_async_suspend(bus->dev);
+
skl->nhlt = skl_nhlt_init(bus->dev);
if (skl->nhlt == NULL)
@@ -666,6 +679,8 @@ static int skl_probe(struct pci_dev *pci,
pci_set_drvdata(skl->pci, ebus);
+ skl_dmic_data.dmic_num = skl_get_dmic_geo(skl);
+
/* check if dsp is there */
if (ebus->ppcap) {
err = skl_machine_device_register(skl,
@@ -713,7 +728,7 @@ static int skl_probe(struct pci_dev *pci,
list_for_each_entry(hlink, &ebus->hlink_list, list)
snd_hdac_ext_bus_link_put(ebus, hlink);
- /*configure PM */
+ /* configure PM */
pm_runtime_put_noidle(bus->dev);
pm_runtime_allow(bus->dev);
@@ -766,8 +781,7 @@ static void skl_remove(struct pci_dev *pci)
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
- if (skl->tplg)
- release_firmware(skl->tplg);
+ release_firmware(skl->tplg);
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
@@ -786,15 +800,23 @@ static void skl_remove(struct pci_dev *pci)
static struct sst_acpi_mach sst_skl_devdata[] = {
{ "INT343A", "skl_alc286s_i2s", "intel/dsp_fw_release.bin", NULL, NULL, NULL },
- { "INT343B", "skl_nau88l25_ssm4567_i2s", "intel/dsp_fw_release.bin",
- NULL, NULL, NULL },
- { "MX98357A", "skl_nau88l25_max98357a_i2s", "intel/dsp_fw_release.bin",
- NULL, NULL, NULL },
+ { "INT343B", "skl_n88l25_s4567", "intel/dsp_fw_release.bin",
+ NULL, NULL, &skl_dmic_data },
+ { "MX98357A", "skl_n88l25_m98357a", "intel/dsp_fw_release.bin",
+ NULL, NULL, &skl_dmic_data },
{}
};
static struct sst_acpi_mach sst_bxtp_devdata[] = {
{ "INT343A", "bxt_alc298s_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
+ { "DLGS7219", "bxt_da7219_max98357a_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
+};
+
+static struct sst_acpi_mach sst_kbl_devdata[] = {
+ { "INT343A", "kbl_alc286s_i2s", "intel/dsp_fw_kbl.bin", NULL, NULL, NULL },
+ { "INT343B", "kbl_n88l25_s4567", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
+ { "MX98357A", "kbl_n88l25_m98357a", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
+ {}
};
/* PCI IDs */
@@ -805,6 +827,9 @@ static const struct pci_device_id skl_ids[] = {
/* BXT-P */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = (unsigned long)&sst_bxtp_devdata},
+ /* KBL */
+ { PCI_DEVICE(0x8086, 0x9D71),
+ .driver_data = (unsigned long)&sst_kbl_devdata},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skl_ids);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 4b4b3876aea9..9064e5b0d676 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -48,6 +48,8 @@
#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
+#define AZX_PCIREG_PGCTL 0x44
+#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
#define AZX_CGCTL_MISCBDCGE_MASK (1 << 6)
@@ -65,6 +67,7 @@ struct skl {
unsigned int init_failed:1; /* delayed init failed */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
+ struct snd_soc_platform *platform;
struct nhlt_acpi_table *nhlt; /* nhlt ptr */
struct skl_sst *skl_sst; /* sst skl ctx */
@@ -90,6 +93,11 @@ struct skl_dma_params {
u8 stream_tag;
};
+/* to pass dmic data */
+struct skl_machine_pdata {
+ u32 dmic_num;
+};
+
struct skl_dsp_ops {
int id;
struct skl_dsp_loader_ops (*loader_ops)(void);
@@ -108,9 +116,11 @@ void skl_nhlt_free(struct nhlt_acpi_table *addr);
struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
+int skl_get_dmic_geo(struct skl *skl);
int skl_nhlt_update_topology_bin(struct skl *skl);
int skl_init_dsp(struct skl *skl);
int skl_free_dsp(struct skl *skl);
int skl_suspend_dsp(struct skl *skl);
int skl_resume_dsp(struct skl *skl);
+void skl_cleanup_resources(struct skl *skl);
#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 3abf51c07851..05cf809cf9e1 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -1,15 +1,40 @@
config SND_SOC_MEDIATEK
- tristate "ASoC support for Mediatek chip"
+ tristate
+
+config SND_SOC_MT2701
+ tristate "ASoC support for Mediatek MT2701 chip"
+ depends on ARCH_MEDIATEK
+ select SND_SOC_MEDIATEK
+ help
+ This adds ASoC driver for Mediatek MT2701 boards
+ that can be used with other codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT2701_CS42448
+ tristate "ASoc Audio driver for MT2701 with CS42448 codec"
+ depends on SND_SOC_MT2701
+ select SND_SOC_CS42XX8_I2C
+ select SND_SOC_BT_SCO
+ help
+ This adds ASoC driver for Mediatek MT2701 boards
+ with the CS42448 codecs.
+ Select Y if you have such device.
+ If unsure select "N".
+
+config SND_SOC_MT8173
+ tristate "ASoC support for Mediatek MT8173 chip"
depends on ARCH_MEDIATEK
+ select SND_SOC_MEDIATEK
help
- This adds ASoC platform driver support for Mediatek chip
+ This adds ASoC platform driver support for Mediatek MT8173 chip
that can be used with other codecs.
Select Y if you have such device.
Ex: MT8173
config SND_SOC_MT8173_MAX98090
tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_MAX98090
help
This adds ASoC driver for Mediatek MT8173 boards
@@ -19,8 +44,9 @@ config SND_SOC_MT8173_MAX98090
config SND_SOC_MT8173_RT5650
tristate "ASoC Audio driver for MT8173 with RT5650 codec"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
+ select SND_SOC_HDMI_CODEC
help
This adds ASoC driver for Mediatek MT8173 boards
with the RT5650 audio codec.
@@ -29,7 +55,7 @@ config SND_SOC_MT8173_RT5650
config SND_SOC_MT8173_RT5650_RT5514
tristate "ASoC Audio driver for MT8173 with RT5650 RT5514 codecs"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
select SND_SOC_RT5514
help
@@ -40,7 +66,7 @@ config SND_SOC_MT8173_RT5650_RT5514
config SND_SOC_MT8173_RT5650_RT5676
tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
- depends on SND_SOC_MEDIATEK && I2C
+ depends on SND_SOC_MT8173 && I2C
select SND_SOC_RT5645
select SND_SOC_RT5677
select SND_SOC_HDMI_CODEC
diff --git a/sound/soc/mediatek/Makefile b/sound/soc/mediatek/Makefile
index d486860c0a88..6bcab35dc828 100644
--- a/sound/soc/mediatek/Makefile
+++ b/sound/soc/mediatek/Makefile
@@ -1,7 +1,3 @@
-# MTK Platform Support
-obj-$(CONFIG_SND_SOC_MEDIATEK) += mtk-afe-pcm.o
-# Machine support
-obj-$(CONFIG_SND_SOC_MT8173_MAX98090) += mt8173-max98090.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650) += mt8173-rt5650.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5514) += mt8173-rt5650-rt5514.o
-obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5676) += mt8173-rt5650-rt5676.o
+obj-$(CONFIG_SND_SOC_MEDIATEK) += common/
+obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
+obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
diff --git a/sound/soc/mediatek/common/Makefile b/sound/soc/mediatek/common/Makefile
new file mode 100644
index 000000000000..a55d33bc7b01
--- /dev/null
+++ b/sound/soc/mediatek/common/Makefile
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# platform driver
+snd-soc-mtk-common-objs := mtk-afe-platform-driver.o mtk-afe-fe-dai.o
+obj-$(CONFIG_SND_SOC_MEDIATEK) += snd-soc-mtk-common.o
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
new file mode 100644
index 000000000000..b788791b0a35
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -0,0 +1,379 @@
+/*
+ * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "mtk-afe-fe-dai.h"
+#include "mtk-base-afe.h"
+
+#define AFE_BASE_END_OFFSET 8
+
+int mtk_regmap_update_bits(struct regmap *map, int reg, unsigned int mask,
+ unsigned int val)
+{
+ if (reg < 0)
+ return 0;
+ return regmap_update_bits(map, reg, mask, val);
+}
+
+int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
+{
+ if (reg < 0)
+ return 0;
+ return regmap_write(map, reg, val);
+}
+
+int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int memif_num = rtd->cpu_dai->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
+ const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
+ int ret;
+
+ memif->substream = substream;
+
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+ /* enable agent */
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1 << memif->data->agent_disable_shift,
+ 0 << memif->data->agent_disable_shift);
+
+ snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
+
+ /*
+ * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
+ * smaller than period_size due to AFE's internal buffer.
+ * This easily leads to overrun when avail_min is period_size.
+ * One more period can hold the possible unread buffer.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ int periods_max = mtk_afe_hardware->periods_max;
+
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS,
+ 3, periods_max);
+ if (ret < 0) {
+ dev_err(afe->dev, "hw_constraint_minmax failed\n");
+ return ret;
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+
+ /* dynamic allocate irq to memif */
+ if (memif->irq_usage < 0) {
+ int irq_id = mtk_dynamic_irq_acquire(afe);
+
+ if (irq_id != afe->irqs_size) {
+ /* link */
+ memif->irq_usage = irq_id;
+ } else {
+ dev_err(afe->dev, "%s() error: no more asys irq\n",
+ __func__);
+ ret = -EBUSY;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
+
+void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int irq_id;
+
+ irq_id = memif->irq_usage;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1 << memif->data->agent_disable_shift,
+ 1 << memif->data->agent_disable_shift);
+
+ if (!memif->const_irq) {
+ mtk_dynamic_irq_release(afe, irq_id);
+ memif->irq_usage = -1;
+ memif->substream = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
+
+int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int msb_at_bit33 = 0;
+ int ret, fs = 0;
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+
+ msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
+ memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
+ memif->buffer_size = substream->runtime->dma_bytes;
+
+ /* start */
+ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
+ memif->phys_buf_addr);
+ /* end */
+ mtk_regmap_write(afe->regmap,
+ memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
+ memif->phys_buf_addr + memif->buffer_size - 1);
+
+ /* set MSB to 33-bit */
+ mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
+ 1 << memif->data->msb_shift,
+ msb_at_bit33 << memif->data->msb_shift);
+
+ /* set channel */
+ if (memif->data->mono_shift >= 0) {
+ unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
+ 1 << memif->data->mono_shift,
+ mono << memif->data->mono_shift);
+ }
+
+ /* set rate */
+ if (memif->data->fs_shift < 0)
+ return 0;
+
+ fs = afe->memif_fs(substream, params_rate(params));
+
+ if (fs < 0)
+ return -EINVAL;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
+ memif->data->fs_maskbit << memif->data->fs_shift,
+ fs << memif->data->fs_shift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
+
+int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
+
+int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int counter = runtime->period_size;
+ int fs;
+
+ dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (memif->data->enable_shift >= 0)
+ mtk_regmap_update_bits(afe->regmap,
+ memif->data->enable_reg,
+ 1 << memif->data->enable_shift,
+ 1 << memif->data->enable_shift);
+
+ /* set irq counter */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit
+ << irq_data->irq_cnt_shift,
+ counter << irq_data->irq_cnt_shift);
+
+ /* set irq fs */
+ fs = afe->irq_fs(substream, runtime->rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
+ irq_data->irq_fs_maskbit
+ << irq_data->irq_fs_shift,
+ fs << irq_data->irq_fs_shift);
+
+ /* enable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 1 << irq_data->irq_en_shift);
+
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
+ 1 << memif->data->enable_shift, 0);
+ /* disable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1 << irq_data->irq_en_shift,
+ 0 << irq_data->irq_en_shift);
+ /* and clear pending IRQ */
+ mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
+ 1 << irq_data->irq_clr_shift);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
+
+int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int hd_audio = 0;
+
+ /* set hd mode */
+ switch (substream->runtime->format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ hd_audio = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ hd_audio = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ hd_audio = 1;
+ break;
+ default:
+ dev_err(afe->dev, "%s() error: unsupported format %d\n",
+ __func__, substream->runtime->format);
+ break;
+ }
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
+ 1 << memif->data->hd_shift,
+ hd_audio << memif->data->hd_shift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
+
+const struct snd_soc_dai_ops mtk_afe_fe_ops = {
+ .startup = mtk_afe_fe_startup,
+ .shutdown = mtk_afe_fe_shutdown,
+ .hw_params = mtk_afe_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mtk_afe_fe_trigger,
+};
+EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
+
+static DEFINE_MUTEX(irqs_lock);
+int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
+{
+ int i;
+
+ mutex_lock(&afe->irq_alloc_lock);
+ for (i = 0; i < afe->irqs_size; ++i) {
+ if (afe->irqs[i].irq_occupyed == 0) {
+ afe->irqs[i].irq_occupyed = 1;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return i;
+ }
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return afe->irqs_size;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
+
+int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
+{
+ mutex_lock(&afe->irq_alloc_lock);
+ if (irq_id >= 0 && irq_id < afe->irqs_size) {
+ afe->irqs[irq_id].irq_occupyed = 0;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return 0;
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
+
+int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i;
+
+ if (pm_runtime_status_suspended(dev) || afe->suspended)
+ return 0;
+
+ if (!afe->reg_back_up)
+ afe->reg_back_up =
+ devm_kcalloc(dev, afe->reg_back_up_list_num,
+ sizeof(unsigned int), GFP_KERNEL);
+
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ regmap_read(regmap, afe->reg_back_up_list[i],
+ &afe->reg_back_up[i]);
+
+ afe->suspended = true;
+ afe->runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
+
+int mtk_afe_dai_resume(struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i = 0;
+
+ if (pm_runtime_status_suspended(dev) || !afe->suspended)
+ return 0;
+
+ afe->runtime_resume(dev);
+
+ if (!afe->reg_back_up)
+ dev_dbg(dev, "%s no reg_backup\n", __func__);
+
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+ afe->reg_back_up[i]);
+
+ afe->suspended = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
+
+MODULE_DESCRIPTION("Mediatek simple fe dai operator");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.h b/sound/soc/mediatek/common/mtk-afe-fe-dai.h
new file mode 100644
index 000000000000..28cb17854da1
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.h
@@ -0,0 +1,45 @@
+/*
+ * mtk-afe-fe-dais.h -- Mediatek afe fe dai operator definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_AFE_FE_DAI_H_
+#define _MTK_AFE_FE_DAI_H_
+
+struct snd_soc_dai_ops;
+struct mtk_base_afe;
+struct mtk_base_afe_memif;
+
+int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai);
+
+extern const struct snd_soc_dai_ops mtk_afe_fe_ops;
+
+int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe);
+int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id);
+int mtk_afe_dai_suspend(struct snd_soc_dai *dai);
+int mtk_afe_dai_resume(struct snd_soc_dai *dai);
+
+#endif
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
new file mode 100644
index 000000000000..82d439c15f4e
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -0,0 +1,90 @@
+/*
+ * mtk-afe-platform-driver.c -- Mediatek afe platform driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <sound/soc.h>
+
+#include "mtk-afe-platform-driver.h"
+#include "mtk-base-afe.h"
+
+static snd_pcm_uframes_t mtk_afe_pcm_pointer
+ (struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ const struct mtk_base_memif_data *memif_data = memif->data;
+ struct regmap *regmap = afe->regmap;
+ struct device *dev = afe->dev;
+ int reg_ofs_base = memif_data->reg_ofs_base;
+ int reg_ofs_cur = memif_data->reg_ofs_cur;
+ unsigned int hw_ptr = 0, hw_base = 0;
+ int ret, pcm_ptr_bytes;
+
+ ret = regmap_read(regmap, reg_ofs_cur, &hw_ptr);
+ if (ret || hw_ptr == 0) {
+ dev_err(dev, "%s hw_ptr err\n", __func__);
+ pcm_ptr_bytes = 0;
+ goto POINTER_RETURN_FRAMES;
+ }
+
+ ret = regmap_read(regmap, reg_ofs_base, &hw_base);
+ if (ret || hw_base == 0) {
+ dev_err(dev, "%s hw_ptr err\n", __func__);
+ pcm_ptr_bytes = 0;
+ goto POINTER_RETURN_FRAMES;
+ }
+
+ pcm_ptr_bytes = hw_ptr - hw_base;
+
+POINTER_RETURN_FRAMES:
+ return bytes_to_frames(substream->runtime, pcm_ptr_bytes);
+}
+
+static const struct snd_pcm_ops mtk_afe_pcm_ops = {
+ .ioctl = snd_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+};
+
+static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ size_t size;
+ struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+
+ size = afe->mtk_afe_hardware->buffer_bytes_max;
+ return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ card->dev, size, size);
+}
+
+static void mtk_afe_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+const struct snd_soc_platform_driver mtk_afe_pcm_platform = {
+ .ops = &mtk_afe_pcm_ops,
+ .pcm_new = mtk_afe_pcm_new,
+ .pcm_free = mtk_afe_pcm_free,
+};
+EXPORT_SYMBOL_GPL(mtk_afe_pcm_platform);
+
+MODULE_DESCRIPTION("Mediatek simple platform driver");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.h b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
new file mode 100644
index 000000000000..a973fc9253b4
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
@@ -0,0 +1,23 @@
+/*
+ * mtk-afe-platform-driver.h -- Mediatek afe platform driver definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_AFE_PLATFORM_DRIVER_H_
+#define _MTK_AFE_PLATFORM_DRIVER_H_
+
+extern const struct snd_soc_platform_driver mtk_afe_pcm_platform;
+
+#endif
+
diff --git a/sound/soc/mediatek/common/mtk-base-afe.h b/sound/soc/mediatek/common/mtk-base-afe.h
new file mode 100644
index 000000000000..3a78f6f17195
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-base-afe.h
@@ -0,0 +1,104 @@
+/*
+ * mtk-base-afe.h -- Mediatek base afe structure
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_BASE_AFE_H_
+#define _MTK_BASE_AFE_H_
+
+struct mtk_base_memif_data {
+ int id;
+ const char *name;
+ int reg_ofs_base;
+ int reg_ofs_cur;
+ int fs_reg;
+ int fs_shift;
+ int fs_maskbit;
+ int mono_reg;
+ int mono_shift;
+ int enable_reg;
+ int enable_shift;
+ int hd_reg;
+ int hd_shift;
+ int msb_reg;
+ int msb_shift;
+ int agent_disable_reg;
+ int agent_disable_shift;
+};
+
+struct mtk_base_irq_data {
+ int id;
+ int irq_cnt_reg;
+ int irq_cnt_shift;
+ int irq_cnt_maskbit;
+ int irq_fs_reg;
+ int irq_fs_shift;
+ int irq_fs_maskbit;
+ int irq_en_reg;
+ int irq_en_shift;
+ int irq_clr_reg;
+ int irq_clr_shift;
+};
+
+struct device;
+struct mtk_base_afe_memif;
+struct mtk_base_afe_irq;
+struct regmap;
+struct snd_pcm_substream;
+struct snd_soc_dai;
+
+struct mtk_base_afe {
+ void __iomem *base_addr;
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex irq_alloc_lock; /* dynamic alloc irq lock */
+
+ unsigned int const *reg_back_up_list;
+ unsigned int *reg_back_up;
+ unsigned int reg_back_up_list_num;
+
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ bool suspended;
+
+ struct mtk_base_afe_memif *memif;
+ int memif_size;
+ struct mtk_base_afe_irq *irqs;
+ int irqs_size;
+
+ const struct snd_pcm_hardware *mtk_afe_hardware;
+ int (*memif_fs)(struct snd_pcm_substream *substream,
+ unsigned int rate);
+ int (*irq_fs)(struct snd_pcm_substream *substream,
+ unsigned int rate);
+
+ void *platform_priv;
+};
+
+struct mtk_base_afe_memif {
+ unsigned int phys_buf_addr;
+ int buffer_size;
+ struct snd_pcm_substream *substream;
+ const struct mtk_base_memif_data *data;
+ int irq_usage;
+ int const_irq;
+};
+
+struct mtk_base_afe_irq {
+ const struct mtk_base_irq_data *irq_data;
+ int irq_occupyed;
+};
+
+#endif
+
diff --git a/sound/soc/mediatek/mt2701/Makefile b/sound/soc/mediatek/mt2701/Makefile
new file mode 100644
index 000000000000..31c3d04d4942
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/Makefile
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2015 MediaTek Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+
+# platform driver
+snd-soc-mt2701-afe-objs := mt2701-afe-pcm.o mt2701-afe-clock-ctrl.o
+obj-$(CONFIG_SND_SOC_MT2701) += snd-soc-mt2701-afe.o
+
+# machine driver
+obj-$(CONFIG_SND_SOC_MT2701_CS42448) += mt2701-cs42448.o
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
new file mode 100644
index 000000000000..b815ecc6bbf6
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
@@ -0,0 +1,464 @@
+/*
+ * mt2701-afe-clock-ctrl.c -- Mediatek 2701 afe clock ctrl
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <sound/soc.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mt2701-afe-common.h"
+#include "mt2701-afe-clock-ctrl.h"
+
+static const char *aud_clks[MT2701_CLOCK_NUM] = {
+ [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+ [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
+ [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
+ [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
+ [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
+ [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
+ [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
+ [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
+ [MT2701_AUD_APLL_SEL] = "top_apll_sel",
+ [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
+ [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
+ [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
+ [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
+ [MT2701_AUD_AUDPLL] = "top_audpll",
+ [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
+ [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
+ [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
+ [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
+ [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
+ [MT2701_AUD_CLK_26M] = "clk_26m",
+ [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
+ [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
+ [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
+ [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
+ [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
+ [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
+ [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
+ [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
+ [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
+ [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
+ [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
+ [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
+ [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
+ [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
+ [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
+ [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
+ [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
+ [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
+ [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
+ [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
+ [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
+ [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
+ [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
+ [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+};
+
+int mt2701_init_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i = 0;
+
+ for (i = 0; i < MT2701_CLOCK_NUM; i++) {
+ afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(aud_clks[i])) {
+ dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
+ __func__, aud_clks[i]);
+ return PTR_ERR(aud_clks[i]);
+ }
+ }
+
+ return 0;
+}
+
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+{
+ int ret = 0;
+
+ ret = mt2701_turn_on_a1sys_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = mt2701_turn_on_a2sys_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
+ __func__, ret);
+ mt2701_turn_off_a1sys_clock(afe);
+ return ret;
+ }
+
+ ret = mt2701_turn_on_afe_clock(afe);
+ if (ret) {
+ dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
+ __func__, ret);
+ mt2701_turn_off_a1sys_clock(afe);
+ mt2701_turn_off_a2sys_clock(afe);
+ return ret;
+ }
+
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON,
+ AFE_DAC_CON0_AFE_ON);
+ regmap_write(afe->regmap, PWR2_TOP_CON,
+ PWR2_TOP_CON_INIT_VAL);
+ regmap_write(afe->regmap, PWR1_ASM_CON1,
+ PWR1_ASM_CON1_INIT_VAL);
+ regmap_write(afe->regmap, PWR2_ASM_CON1,
+ PWR2_ASM_CON1_INIT_VAL);
+
+ return 0;
+}
+
+void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+{
+ mt2701_turn_off_afe_clock(afe);
+ mt2701_turn_off_a1sys_clock(afe);
+ mt2701_turn_off_a2sys_clock(afe);
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON, 0);
+}
+
+int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+
+ /* Set Mux */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
+ goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
+ afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_SEL],
+ aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
+ goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
+ }
+
+ /* Set Divider */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_DIV],
+ ret);
+ goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
+ }
+
+ ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
+ MT2701_AUD_AUD_MUX1_DIV_RATE);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX1_DIV],
+ MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
+ goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
+ }
+
+ /* Enable clock gate */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
+ goto A1SYS_CLK_AUD_48K_ERR;
+ }
+
+ /* Enable infra audio */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto A1SYS_CLK_INFRA_ERR;
+ }
+
+ return 0;
+
+A1SYS_CLK_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+A1SYS_CLK_AUD_48K_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+A1SYS_CLK_AUD_MUX1_DIV_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+A1SYS_CLK_AUD_MUX1_SEL_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+
+ return ret;
+}
+
+void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+}
+
+int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret = 0;
+
+ /* Set Mux */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
+ goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
+ afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX2_SEL],
+ aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
+ goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
+ }
+
+ /* Set Divider */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
+ goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
+ }
+
+ ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
+ MT2701_AUD_AUD_MUX2_DIV_RATE);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUD_MUX2_DIV],
+ MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
+ goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
+ }
+
+ /* Enable clock gate */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
+ goto A2SYS_CLK_AUD_44K_ERR;
+ }
+
+ /* Enable infra audio */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto A2SYS_CLK_INFRA_ERR;
+ }
+
+ return 0;
+
+A2SYS_CLK_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+A2SYS_CLK_AUD_44K_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+A2SYS_CLK_AUD_MUX2_DIV_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+A2SYS_CLK_AUD_MUX2_SEL_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+
+ return ret;
+}
+
+void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+}
+
+int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+
+ /* enable INFRA_SYS */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
+ goto AFE_AUD_INFRA_ERR;
+ }
+
+ /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
+ goto AFE_AUD_AUDINTBUS_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
+ afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_AUDINTBUS],
+ aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
+ goto AFE_AUD_AUDINTBUS_ERR;
+ }
+
+ /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
+ goto AFE_AUD_ASM_H_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
+ afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_ASM_H_SEL],
+ aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
+ goto AFE_AUD_ASM_H_ERR;
+ }
+
+ /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
+ ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
+ goto AFE_AUD_ASM_M_ERR;
+ }
+
+ ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
+ afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
+ if (ret) {
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
+ aud_clks[MT2701_AUD_ASM_M_SEL],
+ aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
+ goto AFE_AUD_ASM_M_ERR;
+ }
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_AFE, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_APLL_CK, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A1SYS, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A2SYS, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+
+ return 0;
+
+AFE_AUD_ASM_M_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+AFE_AUD_ASM_H_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+AFE_AUD_AUDINTBUS_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+AFE_AUD_INFRA_ERR:
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+
+ return ret;
+}
+
+void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
+
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
+ AUDIO_TOP_CON0_PDN_APLL_CK,
+ AUDIO_TOP_CON0_PDN_APLL_CK);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A1SYS,
+ AUDIO_TOP_CON4_PDN_A1SYS);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_A2SYS,
+ AUDIO_TOP_CON4_PDN_A2SYS);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_AFE_CONN,
+ AUDIO_TOP_CON4_PDN_AFE_CONN);
+}
+
+void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
+ int mclk)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
+ int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
+ int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
+
+ /* Set MCLK Kx_SRC_SEL(domain) */
+ ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id], ret);
+
+ if (domain == 0) {
+ ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
+ afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id],
+ aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
+ } else {
+ ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
+ afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
+ __func__, aud_clks[aud_src_clk_id],
+ aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
+ }
+ clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+
+ /* Set MCLK Kx_SRC_DIV(divider) */
+ ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
+ if (ret)
+ dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
+ __func__, aud_clks[aud_src_div_id], ret);
+
+ ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+ if (ret)
+ dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
+ aud_clks[aud_src_div_id], mclk, ret);
+ clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+}
+
+MODULE_DESCRIPTION("MT2701 afe clock control");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
new file mode 100644
index 000000000000..6497d570cf09
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
@@ -0,0 +1,38 @@
+/*
+ * mt2701-afe-clock-ctrl.h -- Mediatek 2701 afe clock ctrl definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT2701_AFE_CLOCK_CTRL_H_
+#define _MT2701_AFE_CLOCK_CTRL_H_
+
+struct mtk_base_afe;
+
+int mt2701_init_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
+void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
+
+int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
+void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+
+void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
+ int mclk);
+
+#endif
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-common.h b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
new file mode 100644
index 000000000000..c19430e98adf
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
@@ -0,0 +1,172 @@
+/*
+ * mt2701-afe-common.h -- Mediatek 2701 audio driver definitions
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT_2701_AFE_COMMON_H_
+#define _MT_2701_AFE_COMMON_H_
+#include <sound/soc.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include "mt2701-reg.h"
+#include "../common/mtk-base-afe.h"
+
+#define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
+#define MT2701_PLL_DOMAIN_0_RATE 98304000
+#define MT2701_PLL_DOMAIN_1_RATE 90316800
+#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
+#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
+
+enum {
+ MT2701_I2S_1,
+ MT2701_I2S_2,
+ MT2701_I2S_3,
+ MT2701_I2S_4,
+ MT2701_I2S_NUM,
+};
+
+enum {
+ MT2701_MEMIF_DL1,
+ MT2701_MEMIF_DL2,
+ MT2701_MEMIF_DL3,
+ MT2701_MEMIF_DL4,
+ MT2701_MEMIF_DL5,
+ MT2701_MEMIF_DL_SINGLE_NUM,
+ MT2701_MEMIF_DLM = MT2701_MEMIF_DL_SINGLE_NUM,
+ MT2701_MEMIF_UL1,
+ MT2701_MEMIF_UL2,
+ MT2701_MEMIF_UL3,
+ MT2701_MEMIF_UL4,
+ MT2701_MEMIF_UL5,
+ MT2701_MEMIF_DLBT,
+ MT2701_MEMIF_ULBT,
+ MT2701_MEMIF_NUM,
+ MT2701_IO_I2S = MT2701_MEMIF_NUM,
+ MT2701_IO_2ND_I2S,
+ MT2701_IO_3RD_I2S,
+ MT2701_IO_4TH_I2S,
+ MT2701_IO_5TH_I2S,
+ MT2701_IO_6TH_I2S,
+ MT2701_IO_MRG,
+};
+
+enum {
+ MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ2,
+ MT2701_IRQ_ASYS_IRQ3,
+ MT2701_IRQ_ASYS_END,
+};
+
+/* 2701 clock def */
+enum audio_system_clock_type {
+ MT2701_AUD_INFRA_SYS_AUDIO,
+ MT2701_AUD_AUD_MUX1_SEL,
+ MT2701_AUD_AUD_MUX2_SEL,
+ MT2701_AUD_AUD_MUX1_DIV,
+ MT2701_AUD_AUD_MUX2_DIV,
+ MT2701_AUD_AUD_48K_TIMING,
+ MT2701_AUD_AUD_44K_TIMING,
+ MT2701_AUD_AUDPLL_MUX_SEL,
+ MT2701_AUD_APLL_SEL,
+ MT2701_AUD_AUD1PLL_98M,
+ MT2701_AUD_AUD2PLL_90M,
+ MT2701_AUD_HADDS2PLL_98M,
+ MT2701_AUD_HADDS2PLL_294M,
+ MT2701_AUD_AUDPLL,
+ MT2701_AUD_AUDPLL_D4,
+ MT2701_AUD_AUDPLL_D8,
+ MT2701_AUD_AUDPLL_D16,
+ MT2701_AUD_AUDPLL_D24,
+ MT2701_AUD_AUDINTBUS,
+ MT2701_AUD_CLK_26M,
+ MT2701_AUD_SYSPLL1_D4,
+ MT2701_AUD_AUD_K1_SRC_SEL,
+ MT2701_AUD_AUD_K2_SRC_SEL,
+ MT2701_AUD_AUD_K3_SRC_SEL,
+ MT2701_AUD_AUD_K4_SRC_SEL,
+ MT2701_AUD_AUD_K5_SRC_SEL,
+ MT2701_AUD_AUD_K6_SRC_SEL,
+ MT2701_AUD_AUD_K1_SRC_DIV,
+ MT2701_AUD_AUD_K2_SRC_DIV,
+ MT2701_AUD_AUD_K3_SRC_DIV,
+ MT2701_AUD_AUD_K4_SRC_DIV,
+ MT2701_AUD_AUD_K5_SRC_DIV,
+ MT2701_AUD_AUD_K6_SRC_DIV,
+ MT2701_AUD_AUD_I2S1_MCLK,
+ MT2701_AUD_AUD_I2S2_MCLK,
+ MT2701_AUD_AUD_I2S3_MCLK,
+ MT2701_AUD_AUD_I2S4_MCLK,
+ MT2701_AUD_AUD_I2S5_MCLK,
+ MT2701_AUD_AUD_I2S6_MCLK,
+ MT2701_AUD_ASM_M_SEL,
+ MT2701_AUD_ASM_H_SEL,
+ MT2701_AUD_UNIVPLL2_D4,
+ MT2701_AUD_UNIVPLL2_D2,
+ MT2701_AUD_SYSPLL_D5,
+ MT2701_CLOCK_NUM
+};
+
+static const unsigned int mt2701_afe_backup_list[] = {
+ AUDIO_TOP_CON0,
+ AUDIO_TOP_CON4,
+ AUDIO_TOP_CON5,
+ ASYS_TOP_CON,
+ AFE_CONN0,
+ AFE_CONN1,
+ AFE_CONN2,
+ AFE_CONN3,
+ AFE_CONN15,
+ AFE_CONN16,
+ AFE_CONN17,
+ AFE_CONN18,
+ AFE_CONN19,
+ AFE_CONN20,
+ AFE_CONN21,
+ AFE_CONN22,
+ AFE_DAC_CON0,
+ AFE_MEMIF_PBUF_SIZE,
+};
+
+struct snd_pcm_substream;
+struct mtk_base_irq_data;
+
+struct mt2701_i2s_data {
+ int i2s_ctrl_reg;
+ int i2s_pwn_shift;
+ int i2s_asrc_fs_shift;
+ int i2s_asrc_fs_mask;
+};
+
+enum mt2701_i2s_dir {
+ I2S_OUT,
+ I2S_IN,
+ I2S_DIR_NUM,
+};
+
+struct mt2701_i2s_path {
+ int dai_id;
+ int mclk_rate;
+ int on[I2S_DIR_NUM];
+ int occupied[I2S_DIR_NUM];
+ const struct mt2701_i2s_data *i2s_data[2];
+};
+
+struct mt2701_afe_private {
+ struct clk *clocks[MT2701_CLOCK_NUM];
+ struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+ bool mrg_enable[MT2701_STREAM_DIR_NUM];
+};
+
+#endif
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
new file mode 100644
index 000000000000..34a6123480d3
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -0,0 +1,1656 @@
+/*
+ * Mediatek ALSA SoC AFE platform driver for 2701
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ * Ir Lian <ir.lian@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+
+#include "mt2701-afe-common.h"
+
+#include "mt2701-afe-clock-ctrl.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
+
+#define AFE_IRQ_STATUS_BITS 0xff
+
+static const struct snd_pcm_hardware mt2701_afe_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
+ | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 1024 * 256,
+ .periods_min = 4,
+ .periods_max = 1024,
+ .buffer_bytes_max = 1024 * 1024 * 16,
+ .fifo_size = 0,
+};
+
+struct mt2701_afe_rate {
+ unsigned int rate;
+ unsigned int regvalue;
+};
+
+static const struct mt2701_afe_rate mt2701_afe_i2s_rates[] = {
+ { .rate = 8000, .regvalue = 0 },
+ { .rate = 12000, .regvalue = 1 },
+ { .rate = 16000, .regvalue = 2 },
+ { .rate = 24000, .regvalue = 3 },
+ { .rate = 32000, .regvalue = 4 },
+ { .rate = 48000, .regvalue = 5 },
+ { .rate = 96000, .regvalue = 6 },
+ { .rate = 192000, .regvalue = 7 },
+ { .rate = 384000, .regvalue = 8 },
+ { .rate = 7350, .regvalue = 16 },
+ { .rate = 11025, .regvalue = 17 },
+ { .rate = 14700, .regvalue = 18 },
+ { .rate = 22050, .regvalue = 19 },
+ { .rate = 29400, .regvalue = 20 },
+ { .rate = 44100, .regvalue = 21 },
+ { .rate = 88200, .regvalue = 22 },
+ { .rate = 176400, .regvalue = 23 },
+ { .rate = 352800, .regvalue = 24 },
+};
+
+static int mt2701_dai_num_to_i2s(struct mtk_base_afe *afe, int num)
+{
+ int val = num - MT2701_IO_I2S;
+
+ if (val < 0 || val >= MT2701_I2S_NUM) {
+ dev_err(afe->dev, "%s, num not available, num %d, val %d\n",
+ __func__, num, val);
+ return -EINVAL;
+ }
+ return val;
+}
+
+static int mt2701_afe_i2s_fs(unsigned int sample_rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mt2701_afe_i2s_rates); i++)
+ if (mt2701_afe_i2s_rates[i].rate == sample_rate)
+ return mt2701_afe_i2s_rates[i].regvalue;
+
+ return -EINVAL;
+}
+
+static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
+ int ret = 0;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ /* enable mclk */
+ ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
+ if (ret)
+ dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
+ i2s_num);
+
+ return ret;
+}
+
+static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int dir_invert)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ const struct mt2701_i2s_data *i2s_data;
+ int stream_dir = substream->stream;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (dir_invert) {
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ stream_dir = SNDRV_PCM_STREAM_CAPTURE;
+ else
+ stream_dir = SNDRV_PCM_STREAM_PLAYBACK;
+ }
+ i2s_data = i2s_path->i2s_data[stream_dir];
+
+ i2s_path->on[stream_dir]--;
+ if (i2s_path->on[stream_dir] < 0) {
+ dev_warn(afe->dev, "i2s_path->on: %d, dir: %d\n",
+ i2s_path->on[stream_dir], stream_dir);
+ i2s_path->on[stream_dir] = 0;
+ }
+ if (i2s_path->on[stream_dir])
+ return 0;
+
+ /* disable i2s */
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_I2S_EN, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ 1 << i2s_data->i2s_pwn_shift,
+ 1 << i2s_data->i2s_pwn_shift);
+ return 0;
+}
+
+static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
+
+ if (i2s_num < 0)
+ return;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (i2s_path->occupied[substream->stream])
+ i2s_path->occupied[substream->stream] = 0;
+ else
+ goto I2S_UNSTART;
+
+ mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+
+ /* need to disable i2s-out path when disable i2s-in */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+
+I2S_UNSTART:
+ /* disable mclk */
+ clk_disable_unprepare(afe_priv->clocks[clk_num]);
+}
+
+static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int dir_invert)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ const struct mt2701_i2s_data *i2s_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ int reg, fs, w_len = 1; /* now we support bck 64bits only */
+ int stream_dir = substream->stream;
+ unsigned int mask = 0, val = 0;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+
+ if (dir_invert) {
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ stream_dir = SNDRV_PCM_STREAM_CAPTURE;
+ else
+ stream_dir = SNDRV_PCM_STREAM_PLAYBACK;
+ }
+ i2s_data = i2s_path->i2s_data[stream_dir];
+
+ /* no need to enable if already done */
+ i2s_path->on[stream_dir]++;
+
+ if (i2s_path->on[stream_dir] != 1)
+ return 0;
+
+ fs = mt2701_afe_i2s_fs(runtime->rate);
+
+ mask = ASYS_I2S_CON_FS |
+ ASYS_I2S_CON_I2S_COUPLE_MODE | /* 0 */
+ ASYS_I2S_CON_I2S_MODE |
+ ASYS_I2S_CON_WIDE_MODE;
+
+ val = ASYS_I2S_CON_FS_SET(fs) |
+ ASYS_I2S_CON_I2S_MODE |
+ ASYS_I2S_CON_WIDE_MODE_SET(w_len);
+
+ if (stream_dir == SNDRV_PCM_STREAM_CAPTURE) {
+ mask |= ASYS_I2S_IN_PHASE_FIX;
+ val |= ASYS_I2S_IN_PHASE_FIX;
+ }
+
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg, mask, val);
+
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
+ reg = ASMO_TIMING_CON1;
+ else
+ reg = ASMI_TIMING_CON1;
+
+ regmap_update_bits(afe->regmap, reg,
+ i2s_data->i2s_asrc_fs_mask
+ << i2s_data->i2s_asrc_fs_shift,
+ fs << i2s_data->i2s_asrc_fs_shift);
+
+ /* enable i2s */
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ 1 << i2s_data->i2s_pwn_shift,
+ 0 << i2s_data->i2s_pwn_shift);
+
+ /* reset i2s hw status before enable */
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_RESET, ASYS_I2S_CON_RESET);
+ udelay(1);
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_RESET, 0);
+ udelay(1);
+ regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
+ ASYS_I2S_CON_I2S_EN, ASYS_I2S_CON_I2S_EN);
+ return 0;
+}
+
+static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int clk_domain;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+ struct mt2701_i2s_path *i2s_path;
+ int mclk_rate;
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ i2s_path = &afe_priv->i2s_path[i2s_num];
+ mclk_rate = i2s_path->mclk_rate;
+
+ if (i2s_path->occupied[substream->stream])
+ return -EBUSY;
+ i2s_path->occupied[substream->stream] = 1;
+
+ if (MT2701_PLL_DOMAIN_0_RATE % mclk_rate == 0) {
+ clk_domain = 0;
+ } else if (MT2701_PLL_DOMAIN_1_RATE % mclk_rate == 0) {
+ clk_domain = 1;
+ } else {
+ dev_err(dai->dev, "%s() bad mclk rate %d\n",
+ __func__, mclk_rate);
+ return -EINVAL;
+ }
+ mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ } else {
+ /* need to enable i2s-out path when enable i2s-in */
+ /* prepare for another direction "out" */
+ mt2701_i2s_path_prepare_enable(substream, dai, 1);
+ /* prepare for "in" */
+ mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ }
+
+ return 0;
+}
+
+static int mt2701_afe_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dai->dev);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
+
+ if (i2s_num < 0)
+ return i2s_num;
+
+ /* mclk */
+ if (dir == SND_SOC_CLOCK_IN) {
+ dev_warn(dai->dev,
+ "%s() warning: mt2701 doesn't support mclk input\n",
+ __func__);
+ return -EINVAL;
+ }
+ afe_priv->i2s_path[i2s_num].mclk_rate = freq;
+ return 0;
+}
+
+static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_MRGIF, 0);
+
+ afe_priv->mrg_enable[substream->stream] = 1;
+ return 0;
+}
+
+static int mt2701_btmrg_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_fs;
+ u32 val, msk;
+
+ stream_fs = params_rate(params);
+
+ if ((stream_fs != 8000) && (stream_fs != 16000)) {
+ dev_err(afe->dev, "%s() btmgr not supprt this stream_fs %d\n",
+ __func__, stream_fs);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_I2S_MODE_MASK,
+ AFE_MRGIF_CON_I2S_MODE_32K);
+
+ val = AFE_DAIBT_CON0_BT_FUNC_EN | AFE_DAIBT_CON0_BT_FUNC_RDY
+ | AFE_DAIBT_CON0_MRG_USE;
+ msk = val;
+
+ if (stream_fs == 16000)
+ val |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN;
+
+ msk |= AFE_DAIBT_CON0_BT_WIDE_MODE_EN;
+
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0, msk, val);
+
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0,
+ AFE_DAIBT_CON0_DAIBT_EN,
+ AFE_DAIBT_CON0_DAIBT_EN);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_I2S_EN,
+ AFE_MRGIF_CON_MRG_I2S_EN);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_EN,
+ AFE_MRGIF_CON_MRG_EN);
+ return 0;
+}
+
+static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+
+ /* if the other direction stream is not occupied */
+ if (!afe_priv->mrg_enable[!substream->stream]) {
+ regmap_update_bits(afe->regmap, AFE_DAIBT_CON0,
+ AFE_DAIBT_CON0_DAIBT_EN, 0);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_EN, 0);
+ regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
+ AFE_MRGIF_CON_MRG_I2S_EN, 0);
+ regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
+ AUDIO_TOP_CON4_PDN_MRGIF,
+ AUDIO_TOP_CON4_PDN_MRGIF);
+ }
+ afe_priv->mrg_enable[substream->stream] = 0;
+}
+
+static int mt2701_simple_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_dir = substream->stream;
+ int memif_num = rtd->cpu_dai->id;
+ struct mtk_base_afe_memif *memif_tmp;
+
+ /* can't run single DL & DLM at the same time */
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ memif_tmp = &afe->memif[MT2701_MEMIF_DLM];
+ if (memif_tmp->substream) {
+ dev_warn(afe->dev, "%s memif is not available, stream_dir %d, memif_num %d\n",
+ __func__, stream_dir, memif_num);
+ return -EBUSY;
+ }
+ }
+ return mtk_afe_fe_startup(substream, dai);
+}
+
+static int mt2701_simple_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int stream_dir = substream->stream;
+
+ /* single DL use PAIR_INTERLEAVE */
+ if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_MASK,
+ AFE_MEMIF_PBUF_SIZE_PAIR_INTERLEAVE);
+ }
+ return mtk_afe_fe_hw_params(substream, params, dai);
+}
+
+static int mt2701_dlm_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif_tmp;
+ const struct mtk_base_memif_data *memif_data;
+ int i;
+
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_tmp = &afe->memif[i];
+ if (memif_tmp->substream)
+ return -EBUSY;
+ }
+
+ /* enable agent for all signal DL (due to hw design) */
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_data = afe->memif[i].data;
+ regmap_update_bits(afe->regmap,
+ memif_data->agent_disable_reg,
+ 1 << memif_data->agent_disable_shift,
+ 0 << memif_data->agent_disable_shift);
+ }
+
+ return mtk_afe_fe_startup(substream, dai);
+}
+
+static void mt2701_dlm_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ const struct mtk_base_memif_data *memif_data;
+ int i;
+
+ for (i = MT2701_MEMIF_DL1; i < MT2701_MEMIF_DL_SINGLE_NUM; ++i) {
+ memif_data = afe->memif[i].data;
+ regmap_update_bits(afe->regmap,
+ memif_data->agent_disable_reg,
+ 1 << memif_data->agent_disable_shift,
+ 1 << memif_data->agent_disable_shift);
+ }
+ return mtk_afe_fe_shutdown(substream, dai);
+}
+
+static int mt2701_dlm_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ int channels = params_channels(params);
+
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_MASK,
+ AFE_MEMIF_PBUF_SIZE_FULL_INTERLEAVE);
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK,
+ AFE_MEMIF_PBUF_SIZE_DLM_32BYTES);
+ regmap_update_bits(afe->regmap,
+ AFE_MEMIF_PBUF_SIZE,
+ AFE_MEMIF_PBUF_SIZE_DLM_CH_MASK,
+ AFE_MEMIF_PBUF_SIZE_DLM_CH(channels));
+
+ return mtk_afe_fe_hw_params(substream, params, dai);
+}
+
+static int mt2701_dlm_fe_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif_tmp = &afe->memif[MT2701_MEMIF_DL1];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg,
+ 1 << memif_tmp->data->enable_shift,
+ 1 << memif_tmp->data->enable_shift);
+ mtk_afe_fe_trigger(substream, cmd, dai);
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ mtk_afe_fe_trigger(substream, cmd, dai);
+ regmap_update_bits(afe->regmap, memif_tmp->data->enable_reg,
+ 1 << memif_tmp->data->enable_shift, 0);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt2701_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int fs;
+
+ if (rtd->cpu_dai->id != MT2701_MEMIF_ULBT)
+ fs = mt2701_afe_i2s_fs(rate);
+ else
+ fs = (rate == 16000 ? 1 : 0);
+ return fs;
+}
+
+static int mt2701_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
+{
+ return mt2701_afe_i2s_fs(rate);
+}
+
+/* FE DAIs */
+static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
+ .startup = mt2701_simple_fe_startup,
+ .shutdown = mtk_afe_fe_shutdown,
+ .hw_params = mt2701_simple_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mtk_afe_fe_trigger,
+
+};
+
+static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
+ .startup = mt2701_dlm_fe_startup,
+ .shutdown = mt2701_dlm_fe_shutdown,
+ .hw_params = mt2701_dlm_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mt2701_dlm_fe_trigger,
+};
+
+/* I2S BE DAIs */
+static const struct snd_soc_dai_ops mt2701_afe_i2s_ops = {
+ .startup = mt2701_afe_i2s_startup,
+ .shutdown = mt2701_afe_i2s_shutdown,
+ .prepare = mt2701_afe_i2s_prepare,
+ .set_sysclk = mt2701_afe_i2s_set_sysclk,
+};
+
+/* MRG BE DAIs */
+static struct snd_soc_dai_ops mt2701_btmrg_ops = {
+ .startup = mt2701_btmrg_startup,
+ .shutdown = mt2701_btmrg_shutdown,
+ .hw_params = mt2701_btmrg_hw_params,
+};
+
+static struct snd_soc_dai_driver mt2701_afe_pcm_dais[] = {
+ /* FE DAIs: memory intefaces to CPU */
+ {
+ .name = "PCM_multi",
+ .id = MT2701_MEMIF_DLM,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "DLM",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_dlm_memif_dai_ops,
+ },
+ {
+ .name = "PCM0",
+ .id = MT2701_MEMIF_UL1,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "UL1",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM1",
+ .id = MT2701_MEMIF_UL2,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "UL2",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM_BT_DL",
+ .id = MT2701_MEMIF_DLBT,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .playback = {
+ .stream_name = "DLBT",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ {
+ .name = "PCM_BT_UL",
+ .id = MT2701_MEMIF_ULBT,
+ .suspend = mtk_afe_dai_suspend,
+ .resume = mtk_afe_dai_resume,
+ .capture = {
+ .stream_name = "ULBT",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_single_memif_dai_ops,
+ },
+ /* BE DAIs */
+ {
+ .name = "I2S0",
+ .id = MT2701_IO_I2S,
+ .playback = {
+ .stream_name = "I2S0 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .capture = {
+ .stream_name = "I2S0 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S1",
+ .id = MT2701_IO_2ND_I2S,
+ .playback = {
+ .stream_name = "I2S1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S2",
+ .id = MT2701_IO_3RD_I2S,
+ .playback = {
+ .stream_name = "I2S2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "I2S3",
+ .id = MT2701_IO_4TH_I2S,
+ .playback = {
+ .stream_name = "I2S3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .capture = {
+ .stream_name = "I2S3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE
+ | SNDRV_PCM_FMTBIT_S24_LE
+ | SNDRV_PCM_FMTBIT_S32_LE)
+ },
+ .ops = &mt2701_afe_i2s_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "MRG BT",
+ .id = MT2701_IO_MRG,
+ .playback = {
+ .stream_name = "BT Playback",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "BT Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = (SNDRV_PCM_RATE_8000
+ | SNDRV_PCM_RATE_16000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &mt2701_btmrg_ops,
+ .symmetric_rates = 1,
+ }
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o00_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I00 Switch", AFE_CONN0, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o01_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I01 Switch", AFE_CONN1, 1, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o02_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I02 Switch", AFE_CONN2, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o03_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 3, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o14_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I26 Switch", AFE_CONN14, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o15_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I12 Switch", AFE_CONN15, 12, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o16_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I13 Switch", AFE_CONN16, 13, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o17_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I14 Switch", AFE_CONN17, 14, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o18_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I15 Switch", AFE_CONN18, 15, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o19_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I16 Switch", AFE_CONN19, 16, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o20_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN20, 17, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o21_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN21, 18, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o22_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I19 Switch", AFE_CONN22, 19, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o23_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I20 Switch", AFE_CONN23, 20, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o24_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I21 Switch", AFE_CONN24, 21, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_o31_mix[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("I35 Switch", AFE_CONN41, 9, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_i02_mix[] = {
+ SOC_DAPM_SINGLE("I2S0 Switch", SND_SOC_NOPM, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s0[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S0 Out Switch",
+ ASYS_I2SO1_CON, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s1[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S1 Out Switch",
+ ASYS_I2SO2_CON, 26, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s2[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S2 Out Switch",
+ PWR2_TOP_CON, 17, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s3[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S3 Out Switch",
+ PWR2_TOP_CON, 18, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Multich I2S4 Out Switch",
+ PWR2_TOP_CON, 19, 1, 0),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
+ 1),
+};
+
+static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
+ SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
+ 1),
+};
+
+static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
+ /* inter-connections */
+ SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I01", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I02", SND_SOC_NOPM, 0, 0, mt2701_afe_i02_mix,
+ ARRAY_SIZE(mt2701_afe_i02_mix)),
+ SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I12", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I13", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I14", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I15", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I16", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I17", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I19", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I26", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("I35", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MIXER("O00", SND_SOC_NOPM, 0, 0, mt2701_afe_o00_mix,
+ ARRAY_SIZE(mt2701_afe_o00_mix)),
+ SND_SOC_DAPM_MIXER("O01", SND_SOC_NOPM, 0, 0, mt2701_afe_o01_mix,
+ ARRAY_SIZE(mt2701_afe_o01_mix)),
+ SND_SOC_DAPM_MIXER("O02", SND_SOC_NOPM, 0, 0, mt2701_afe_o02_mix,
+ ARRAY_SIZE(mt2701_afe_o02_mix)),
+ SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0, mt2701_afe_o03_mix,
+ ARRAY_SIZE(mt2701_afe_o03_mix)),
+ SND_SOC_DAPM_MIXER("O14", SND_SOC_NOPM, 0, 0, mt2701_afe_o14_mix,
+ ARRAY_SIZE(mt2701_afe_o14_mix)),
+ SND_SOC_DAPM_MIXER("O15", SND_SOC_NOPM, 0, 0, mt2701_afe_o15_mix,
+ ARRAY_SIZE(mt2701_afe_o15_mix)),
+ SND_SOC_DAPM_MIXER("O16", SND_SOC_NOPM, 0, 0, mt2701_afe_o16_mix,
+ ARRAY_SIZE(mt2701_afe_o16_mix)),
+ SND_SOC_DAPM_MIXER("O17", SND_SOC_NOPM, 0, 0, mt2701_afe_o17_mix,
+ ARRAY_SIZE(mt2701_afe_o17_mix)),
+ SND_SOC_DAPM_MIXER("O18", SND_SOC_NOPM, 0, 0, mt2701_afe_o18_mix,
+ ARRAY_SIZE(mt2701_afe_o18_mix)),
+ SND_SOC_DAPM_MIXER("O19", SND_SOC_NOPM, 0, 0, mt2701_afe_o19_mix,
+ ARRAY_SIZE(mt2701_afe_o19_mix)),
+ SND_SOC_DAPM_MIXER("O20", SND_SOC_NOPM, 0, 0, mt2701_afe_o20_mix,
+ ARRAY_SIZE(mt2701_afe_o20_mix)),
+ SND_SOC_DAPM_MIXER("O21", SND_SOC_NOPM, 0, 0, mt2701_afe_o21_mix,
+ ARRAY_SIZE(mt2701_afe_o21_mix)),
+ SND_SOC_DAPM_MIXER("O22", SND_SOC_NOPM, 0, 0, mt2701_afe_o22_mix,
+ ARRAY_SIZE(mt2701_afe_o22_mix)),
+ SND_SOC_DAPM_MIXER("O31", SND_SOC_NOPM, 0, 0, mt2701_afe_o31_mix,
+ ARRAY_SIZE(mt2701_afe_o31_mix)),
+
+ SND_SOC_DAPM_MIXER("I12I13", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s0,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s0)),
+ SND_SOC_DAPM_MIXER("I14I15", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s1,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s1)),
+ SND_SOC_DAPM_MIXER("I16I17", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s2,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s2)),
+ SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_i2s3,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
+
+ SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc0,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
+ SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc1,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
+ SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc2,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
+ SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
+ mt2701_afe_multi_ch_out_asrc3,
+ ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
+};
+
+static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
+ {"I12", NULL, "DL1"},
+ {"I13", NULL, "DL1"},
+ {"I35", NULL, "DLBT"},
+
+ {"I2S0 Playback", NULL, "O15"},
+ {"I2S0 Playback", NULL, "O16"},
+
+ {"I2S1 Playback", NULL, "O17"},
+ {"I2S1 Playback", NULL, "O18"},
+ {"I2S2 Playback", NULL, "O19"},
+ {"I2S2 Playback", NULL, "O20"},
+ {"I2S3 Playback", NULL, "O21"},
+ {"I2S3 Playback", NULL, "O22"},
+ {"BT Playback", NULL, "O31"},
+
+ {"UL1", NULL, "O00"},
+ {"UL1", NULL, "O01"},
+ {"UL2", NULL, "O02"},
+ {"UL2", NULL, "O03"},
+ {"ULBT", NULL, "O14"},
+
+ {"I00", NULL, "I2S0 Capture"},
+ {"I01", NULL, "I2S0 Capture"},
+
+ {"I02", NULL, "I2S1 Capture"},
+ {"I03", NULL, "I2S1 Capture"},
+ /* I02,03 link to UL2, also need to open I2S0 */
+ {"I02", "I2S0 Switch", "I2S0 Capture"},
+
+ {"I26", NULL, "BT Capture"},
+
+ {"ASRC_O0", "Asrc0 out Switch", "DLM"},
+ {"ASRC_O1", "Asrc1 out Switch", "DLM"},
+ {"ASRC_O2", "Asrc2 out Switch", "DLM"},
+ {"ASRC_O3", "Asrc3 out Switch", "DLM"},
+
+ {"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
+ {"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
+ {"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
+ {"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+
+ { "I12", NULL, "I12I13" },
+ { "I13", NULL, "I12I13" },
+ { "I14", NULL, "I14I15" },
+ { "I15", NULL, "I14I15" },
+ { "I16", NULL, "I16I17" },
+ { "I17", NULL, "I16I17" },
+ { "I18", NULL, "I18I19" },
+ { "I19", NULL, "I18I19" },
+
+ { "O00", "I00 Switch", "I00" },
+ { "O01", "I01 Switch", "I01" },
+ { "O02", "I02 Switch", "I02" },
+ { "O03", "I03 Switch", "I03" },
+ { "O14", "I26 Switch", "I26" },
+ { "O15", "I12 Switch", "I12" },
+ { "O16", "I13 Switch", "I13" },
+ { "O17", "I14 Switch", "I14" },
+ { "O18", "I15 Switch", "I15" },
+ { "O19", "I16 Switch", "I16" },
+ { "O20", "I17 Switch", "I17" },
+ { "O21", "I18 Switch", "I18" },
+ { "O22", "I19 Switch", "I19" },
+ { "O31", "I35 Switch", "I35" },
+
+};
+
+static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
+ .name = "mt2701-afe-pcm-dai",
+ .dapm_widgets = mt2701_afe_pcm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt2701_afe_pcm_widgets),
+ .dapm_routes = mt2701_afe_pcm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt2701_afe_pcm_routes),
+};
+
+static const struct mtk_base_memif_data memif_data[MT2701_MEMIF_NUM] = {
+ {
+ .name = "DL1",
+ .id = MT2701_MEMIF_DL1,
+ .reg_ofs_base = AFE_DL1_BASE,
+ .reg_ofs_cur = AFE_DL1_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 16,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 1,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 6,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL2",
+ .id = MT2701_MEMIF_DL2,
+ .reg_ofs_base = AFE_DL2_BASE,
+ .reg_ofs_cur = AFE_DL2_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 5,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 17,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 2,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 2,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 7,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL3",
+ .id = MT2701_MEMIF_DL3,
+ .reg_ofs_base = AFE_DL3_BASE,
+ .reg_ofs_cur = AFE_DL3_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 18,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 3,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 4,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 8,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL4",
+ .id = MT2701_MEMIF_DL4,
+ .reg_ofs_base = AFE_DL4_BASE,
+ .reg_ofs_cur = AFE_DL4_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 15,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 19,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 4,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 6,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 9,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DL5",
+ .id = MT2701_MEMIF_DL5,
+ .reg_ofs_base = AFE_DL5_BASE,
+ .reg_ofs_cur = AFE_DL5_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 20,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 20,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 5,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 8,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 10,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DLM",
+ .id = MT2701_MEMIF_DLM,
+ .reg_ofs_base = AFE_DLMCH_BASE,
+ .reg_ofs_cur = AFE_DLMCH_CUR,
+ .fs_reg = AFE_DAC_CON1,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 7,
+ .hd_reg = AFE_MEMIF_PBUF_SIZE,
+ .hd_shift = 28,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 12,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL1",
+ .id = MT2701_MEMIF_UL1,
+ .reg_ofs_base = AFE_VUL_BASE,
+ .reg_ofs_cur = AFE_VUL_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 0,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 0,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 10,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 0,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL2",
+ .id = MT2701_MEMIF_UL2,
+ .reg_ofs_base = AFE_UL2_BASE,
+ .reg_ofs_cur = AFE_UL2_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 5,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 2,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 11,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 2,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 1,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL3",
+ .id = MT2701_MEMIF_UL3,
+ .reg_ofs_base = AFE_UL3_BASE,
+ .reg_ofs_cur = AFE_UL3_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 4,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 12,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 0,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 2,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL4",
+ .id = MT2701_MEMIF_UL4,
+ .reg_ofs_base = AFE_UL4_BASE,
+ .reg_ofs_cur = AFE_UL4_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 15,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 6,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 13,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 6,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 3,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "UL5",
+ .id = MT2701_MEMIF_UL5,
+ .reg_ofs_base = AFE_UL5_BASE,
+ .reg_ofs_cur = AFE_UL5_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 20,
+ .mono_reg = AFE_DAC_CON4,
+ .mono_shift = 8,
+ .fs_maskbit = 0x1f,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 14,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 8,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 4,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "DLBT",
+ .id = MT2701_MEMIF_DLBT,
+ .reg_ofs_base = AFE_ARB1_BASE,
+ .reg_ofs_cur = AFE_ARB1_CUR,
+ .fs_reg = AFE_DAC_CON3,
+ .fs_shift = 10,
+ .fs_maskbit = 0x1f,
+ .mono_reg = AFE_DAC_CON3,
+ .mono_shift = 22,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 8,
+ .hd_reg = AFE_MEMIF_HD_CON0,
+ .hd_shift = 14,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 13,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+ {
+ .name = "ULBT",
+ .id = MT2701_MEMIF_ULBT,
+ .reg_ofs_base = AFE_DAI_BASE,
+ .reg_ofs_cur = AFE_DAI_CUR,
+ .fs_reg = AFE_DAC_CON2,
+ .fs_shift = 30,
+ .fs_maskbit = 0x1,
+ .mono_reg = -1,
+ .mono_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
+ .enable_shift = 17,
+ .hd_reg = AFE_MEMIF_HD_CON1,
+ .hd_shift = 20,
+ .agent_disable_reg = AUDIO_TOP_CON5,
+ .agent_disable_shift = 16,
+ .msb_reg = -1,
+ .msb_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT2701_IRQ_ASYS_END] = {
+ {
+ .id = MT2701_IRQ_ASYS_IRQ1,
+ .irq_cnt_reg = ASYS_IRQ1_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ1_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ1_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 0,
+ },
+ {
+ .id = MT2701_IRQ_ASYS_IRQ2,
+ .irq_cnt_reg = ASYS_IRQ2_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ2_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ2_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 1,
+ },
+ {
+ .id = MT2701_IRQ_ASYS_IRQ3,
+ .irq_cnt_reg = ASYS_IRQ3_CON,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0xffffff,
+ .irq_fs_reg = ASYS_IRQ3_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0x1f,
+ .irq_en_reg = ASYS_IRQ3_CON,
+ .irq_en_shift = 31,
+ .irq_clr_reg = ASYS_IRQ_CLR,
+ .irq_clr_shift = 2,
+ }
+};
+
+static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO1_CON,
+ .i2s_pwn_shift = 6,
+ .i2s_asrc_fs_shift = 0,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN1_CON,
+ .i2s_pwn_shift = 0,
+ .i2s_asrc_fs_shift = 0,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO2_CON,
+ .i2s_pwn_shift = 7,
+ .i2s_asrc_fs_shift = 5,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN2_CON,
+ .i2s_pwn_shift = 1,
+ .i2s_asrc_fs_shift = 5,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO3_CON,
+ .i2s_pwn_shift = 8,
+ .i2s_asrc_fs_shift = 10,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN3_CON,
+ .i2s_pwn_shift = 2,
+ .i2s_asrc_fs_shift = 10,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+ {
+ {
+ .i2s_ctrl_reg = ASYS_I2SO4_CON,
+ .i2s_pwn_shift = 9,
+ .i2s_asrc_fs_shift = 15,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ {
+ .i2s_ctrl_reg = ASYS_I2SIN4_CON,
+ .i2s_pwn_shift = 3,
+ .i2s_asrc_fs_shift = 15,
+ .i2s_asrc_fs_mask = 0x1f,
+
+ },
+ },
+};
+
+static const struct regmap_config mt2701_afe_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = AFE_END_ADDR,
+ .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
+{
+ int id;
+ struct mtk_base_afe *afe = dev;
+ struct mtk_base_afe_memif *memif;
+ struct mtk_base_afe_irq *irq;
+ u32 status;
+
+ regmap_read(afe->regmap, ASYS_IRQ_STATUS, &status);
+ regmap_write(afe->regmap, ASYS_IRQ_CLR, status);
+
+ for (id = 0; id < MT2701_MEMIF_NUM; ++id) {
+ memif = &afe->memif[id];
+ if (memif->irq_usage < 0)
+ continue;
+ irq = &afe->irqs[memif->irq_usage];
+ if (status & 1 << (irq->irq_data->irq_clr_shift))
+ snd_pcm_period_elapsed(memif->substream);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mt2701_afe_runtime_suspend(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+
+ mt2701_afe_disable_clock(afe);
+ return 0;
+}
+
+static int mt2701_afe_runtime_resume(struct device *dev)
+{
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+
+ return mt2701_afe_enable_clock(afe);
+}
+
+static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ unsigned int irq_id;
+ struct mtk_base_afe *afe;
+ struct mt2701_afe_private *afe_priv;
+ struct resource *res;
+ struct device *dev;
+
+ ret = 0;
+ afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ if (!afe)
+ return -ENOMEM;
+ afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ GFP_KERNEL);
+ if (!afe->platform_priv)
+ return -ENOMEM;
+ afe_priv = afe->platform_priv;
+
+ afe->dev = &pdev->dev;
+ dev = afe->dev;
+
+ irq_id = platform_get_irq(pdev, 0);
+ if (!irq_id) {
+ dev_err(dev, "%s no irq found\n", dev->of_node->name);
+ return -ENXIO;
+ }
+ ret = devm_request_irq(dev, irq_id, mt2701_asys_isr,
+ IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
+ if (ret) {
+ dev_err(dev, "could not request_irq for asys-isr\n");
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
+
+ if (IS_ERR(afe->base_addr))
+ return PTR_ERR(afe->base_addr);
+
+ afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
+ &mt2701_afe_regmap_config);
+ if (IS_ERR(afe->regmap))
+ return PTR_ERR(afe->regmap);
+
+ mutex_init(&afe->irq_alloc_lock);
+
+ /* memif initialize */
+ afe->memif_size = MT2701_MEMIF_NUM;
+ afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
+ GFP_KERNEL);
+
+ if (!afe->memif)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->memif_size; i++) {
+ afe->memif[i].data = &memif_data[i];
+ afe->memif[i].irq_usage = -1;
+ }
+
+ /* irq initialize */
+ afe->irqs_size = MT2701_IRQ_ASYS_END;
+ afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
+ GFP_KERNEL);
+
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++)
+ afe->irqs[i].irq_data = &irq_data[i];
+
+ /* I2S initialize */
+ for (i = 0; i < MT2701_I2S_NUM; i++) {
+ afe_priv->i2s_path[i].i2s_data[I2S_OUT]
+ = &mt2701_i2s_data[i][I2S_OUT];
+ afe_priv->i2s_path[i].i2s_data[I2S_IN]
+ = &mt2701_i2s_data[i][I2S_IN];
+ }
+
+ afe->mtk_afe_hardware = &mt2701_afe_hardware;
+ afe->memif_fs = mt2701_memif_fs;
+ afe->irq_fs = mt2701_irq_fs;
+
+ afe->reg_back_up_list = mt2701_afe_backup_list;
+ afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
+ afe->runtime_resume = mt2701_afe_runtime_resume;
+ afe->runtime_suspend = mt2701_afe_runtime_suspend;
+
+ /* initial audio related clock */
+ ret = mt2701_init_clock(afe);
+ if (ret) {
+ dev_err(dev, "init clock error\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, afe);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev))
+ goto err_pm_disable;
+
+ ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+ if (ret) {
+ dev_warn(dev, "err_platform\n");
+ goto err_platform;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev,
+ &mt2701_afe_pcm_dai_component,
+ mt2701_afe_pcm_dais,
+ ARRAY_SIZE(mt2701_afe_pcm_dais));
+ if (ret) {
+ dev_warn(dev, "err_dai_component\n");
+ goto err_dai_component;
+ }
+
+ mt2701_afe_runtime_resume(&pdev->dev);
+
+ return 0;
+
+err_dai_component:
+ snd_soc_unregister_component(&pdev->dev);
+
+err_platform:
+ snd_soc_unregister_platform(&pdev->dev);
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
+{
+ struct mtk_base_afe *afe = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mt2701_afe_runtime_suspend(&pdev->dev);
+
+ snd_soc_unregister_component(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
+ /* disable afe clock */
+ mt2701_afe_disable_clock(afe);
+ return 0;
+}
+
+static const struct of_device_id mt2701_afe_pcm_dt_match[] = {
+ { .compatible = "mediatek,mt2701-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt2701_afe_pcm_dt_match);
+
+static const struct dev_pm_ops mt2701_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt2701_afe_runtime_suspend,
+ mt2701_afe_runtime_resume, NULL)
+};
+
+static struct platform_driver mt2701_afe_pcm_driver = {
+ .driver = {
+ .name = "mt2701-audio",
+ .of_match_table = mt2701_afe_pcm_dt_match,
+#ifdef CONFIG_PM
+ .pm = &mt2701_afe_pm_ops,
+#endif
+ },
+ .probe = mt2701_afe_pcm_dev_probe,
+ .remove = mt2701_afe_pcm_dev_remove,
+};
+
+module_platform_driver(mt2701_afe_pcm_driver);
+
+MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
new file mode 100644
index 000000000000..1e7e8d43fd8a
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -0,0 +1,412 @@
+/*
+ * mt2701-cs42448.c -- MT2701 CS42448 ALSA SoC machine driver
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ir Lian <ir.lian@mediatek.com>
+ * Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_gpio.h>
+
+#include "mt2701-afe-common.h"
+
+struct mt2701_cs42448_private {
+ int i2s1_in_mux;
+ int i2s1_in_mux_gpio_sel_1;
+ int i2s1_in_mux_gpio_sel_2;
+};
+
+static const char * const i2sin_mux_switch_text[] = {
+ "ADC_SDOUT2",
+ "ADC_SDOUT3",
+ "I2S_IN_1",
+ "I2S_IN_2",
+};
+
+static const struct soc_enum i2sin_mux_enum =
+ SOC_ENUM_SINGLE_EXT(4, i2sin_mux_switch_text);
+
+static int mt2701_cs42448_i2sin1_mux_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+ struct mt2701_cs42448_private *priv = snd_soc_card_get_drvdata(card);
+
+ ucontrol->value.integer.value[0] = priv->i2s1_in_mux;
+ return 0;
+}
+
+static int mt2701_cs42448_i2sin1_mux_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
+ struct mt2701_cs42448_private *priv = snd_soc_card_get_drvdata(card);
+
+ if (ucontrol->value.integer.value[0] == priv->i2s1_in_mux)
+ return 0;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 0);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 0);
+ break;
+ case 1:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 1);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 0);
+ break;
+ case 2:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 0);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 1);
+ break;
+ case 3:
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_1, 1);
+ gpio_set_value(priv->i2s1_in_mux_gpio_sel_2, 1);
+ break;
+ default:
+ dev_warn(card->dev, "%s invalid setting\n", __func__);
+ }
+
+ priv->i2s1_in_mux = ucontrol->value.integer.value[0];
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget
+ mt2701_cs42448_asoc_card_dapm_widgets[] = {
+ SND_SOC_DAPM_LINE("Line Out Jack", NULL),
+ SND_SOC_DAPM_MIC("AMIC", NULL),
+ SND_SOC_DAPM_LINE("Tuner In", NULL),
+ SND_SOC_DAPM_LINE("Satellite Tuner In", NULL),
+ SND_SOC_DAPM_LINE("AUX In", NULL),
+};
+
+static const struct snd_kcontrol_new mt2701_cs42448_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Line Out Jack"),
+ SOC_DAPM_PIN_SWITCH("AMIC"),
+ SOC_DAPM_PIN_SWITCH("Tuner In"),
+ SOC_DAPM_PIN_SWITCH("Satellite Tuner In"),
+ SOC_DAPM_PIN_SWITCH("AUX In"),
+ SOC_ENUM_EXT("I2SIN1_MUX_Switch", i2sin_mux_enum,
+ mt2701_cs42448_i2sin1_mux_get,
+ mt2701_cs42448_i2sin1_mux_set),
+};
+
+static const unsigned int mt2701_cs42448_sampling_rates[] = {48000};
+
+static struct snd_pcm_hw_constraint_list mt2701_cs42448_constraints_rates = {
+ .count = ARRAY_SIZE(mt2701_cs42448_sampling_rates),
+ .list = mt2701_cs42448_sampling_rates,
+ .mask = 0,
+};
+
+static int mt2701_cs42448_fe_ops_startup(struct snd_pcm_substream *substream)
+{
+ int err;
+
+ err = snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &mt2701_cs42448_constraints_rates);
+ if (err < 0) {
+ dev_err(substream->pcm->card->dev,
+ "%s snd_pcm_hw_constraint_list failed: 0x%x\n",
+ __func__, err);
+ return err;
+ }
+ return 0;
+}
+
+static struct snd_soc_ops mt2701_cs42448_48k_fe_ops = {
+ .startup = mt2701_cs42448_fe_ops_startup,
+};
+
+static int mt2701_cs42448_be_ops_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ unsigned int mclk_rate;
+ unsigned int rate = params_rate(params);
+ unsigned int div_mclk_over_bck = rate > 192000 ? 2 : 4;
+ unsigned int div_bck_over_lrck = 64;
+
+ mclk_rate = rate * div_bck_over_lrck * div_mclk_over_bck;
+
+ /* mt2701 mclk */
+ snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate, SND_SOC_CLOCK_OUT);
+
+ /* codec mclk */
+ snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate, SND_SOC_CLOCK_IN);
+
+ return 0;
+}
+
+static struct snd_soc_ops mt2701_cs42448_be_ops = {
+ .hw_params = mt2701_cs42448_be_ops_hw_params
+};
+
+enum {
+ DAI_LINK_FE_MULTI_CH_OUT,
+ DAI_LINK_FE_PCM0_IN,
+ DAI_LINK_FE_PCM1_IN,
+ DAI_LINK_FE_BT_OUT,
+ DAI_LINK_FE_BT_IN,
+ DAI_LINK_BE_I2S0,
+ DAI_LINK_BE_I2S1,
+ DAI_LINK_BE_I2S2,
+ DAI_LINK_BE_I2S3,
+ DAI_LINK_BE_MRG_BT,
+};
+
+static struct snd_soc_dai_link mt2701_cs42448_dai_links[] = {
+ /* FE */
+ [DAI_LINK_FE_MULTI_CH_OUT] = {
+ .name = "mt2701-cs42448-multi-ch-out",
+ .stream_name = "mt2701-cs42448-multi-ch-out",
+ .cpu_dai_name = "PCM_multi",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_FE_PCM0_IN] = {
+ .name = "mt2701-cs42448-pcm0",
+ .stream_name = "mt2701-cs42448-pcm0-data-UL",
+ .cpu_dai_name = "PCM0",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_FE_PCM1_IN] = {
+ .name = "mt2701-cs42448-pcm1-data-UL",
+ .stream_name = "mt2701-cs42448-pcm1-data-UL",
+ .cpu_dai_name = "PCM1",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &mt2701_cs42448_48k_fe_ops,
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_FE_BT_OUT] = {
+ .name = "mt2701-cs42448-pcm-BT-out",
+ .stream_name = "mt2701-cs42448-pcm-BT",
+ .cpu_dai_name = "PCM_BT_DL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
+ [DAI_LINK_FE_BT_IN] = {
+ .name = "mt2701-cs42448-pcm-BT-in",
+ .stream_name = "mt2701-cs42448-pcm-BT",
+ .cpu_dai_name = "PCM_BT_UL",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ },
+ /* BE */
+ [DAI_LINK_BE_I2S0] = {
+ .name = "mt2701-cs42448-I2S0",
+ .cpu_dai_name = "I2S0",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S1] = {
+ .name = "mt2701-cs42448-I2S1",
+ .cpu_dai_name = "I2S1",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S2] = {
+ .name = "mt2701-cs42448-I2S2",
+ .cpu_dai_name = "I2S2",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_I2S3] = {
+ .name = "mt2701-cs42448-I2S3",
+ .cpu_dai_name = "I2S3",
+ .no_pcm = 1,
+ .codec_dai_name = "cs42448",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS
+ | SND_SOC_DAIFMT_GATED,
+ .ops = &mt2701_cs42448_be_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ [DAI_LINK_BE_MRG_BT] = {
+ .name = "mt2701-cs42448-MRG-BT",
+ .cpu_dai_name = "MRG BT",
+ .no_pcm = 1,
+ .codec_dai_name = "bt-sco-pcm-wb",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+};
+
+static struct snd_soc_card mt2701_cs42448_soc_card = {
+ .name = "mt2701-cs42448",
+ .owner = THIS_MODULE,
+ .dai_link = mt2701_cs42448_dai_links,
+ .num_links = ARRAY_SIZE(mt2701_cs42448_dai_links),
+ .controls = mt2701_cs42448_controls,
+ .num_controls = ARRAY_SIZE(mt2701_cs42448_controls),
+ .dapm_widgets = mt2701_cs42448_asoc_card_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt2701_cs42448_asoc_card_dapm_widgets),
+};
+
+static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &mt2701_cs42448_soc_card;
+ int ret;
+ int i;
+ struct device_node *platform_node, *codec_node, *codec_node_bt_mrg;
+ struct mt2701_cs42448_private *priv =
+ devm_kzalloc(&pdev->dev, sizeof(struct mt2701_cs42448_private),
+ GFP_KERNEL);
+ struct device *dev = &pdev->dev;
+
+ if (!priv)
+ return -ENOMEM;
+
+ platform_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,platform", 0);
+ if (!platform_node) {
+ dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt2701_cs42448_dai_links[i].platform_name)
+ continue;
+ mt2701_cs42448_dai_links[i].platform_of_node = platform_node;
+ }
+
+ card->dev = dev;
+
+ codec_node = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,audio-codec", 0);
+ if (!codec_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < card->num_links; i++) {
+ if (mt2701_cs42448_dai_links[i].codec_name)
+ continue;
+ mt2701_cs42448_dai_links[i].codec_of_node = codec_node;
+ }
+
+ codec_node_bt_mrg = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,audio-codec-bt-mrg", 0);
+ if (!codec_node_bt_mrg) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec-bt-mrg' missing or invalid\n");
+ return -EINVAL;
+ }
+ mt2701_cs42448_dai_links[DAI_LINK_BE_MRG_BT].codec_of_node
+ = codec_node_bt_mrg;
+
+ ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
+ return ret;
+ }
+
+ priv->i2s1_in_mux_gpio_sel_1 =
+ of_get_named_gpio(dev->of_node, "i2s1-in-sel-gpio1", 0);
+ if (gpio_is_valid(priv->i2s1_in_mux_gpio_sel_1)) {
+ ret = devm_gpio_request(dev, priv->i2s1_in_mux_gpio_sel_1,
+ "i2s1_in_mux_gpio_sel_1");
+ if (ret)
+ dev_warn(&pdev->dev, "%s devm_gpio_request fail %d\n",
+ __func__, ret);
+ gpio_direction_output(priv->i2s1_in_mux_gpio_sel_1, 0);
+ }
+
+ priv->i2s1_in_mux_gpio_sel_2 =
+ of_get_named_gpio(dev->of_node, "i2s1-in-sel-gpio2", 0);
+ if (gpio_is_valid(priv->i2s1_in_mux_gpio_sel_2)) {
+ ret = devm_gpio_request(dev, priv->i2s1_in_mux_gpio_sel_2,
+ "i2s1_in_mux_gpio_sel_2");
+ if (ret)
+ dev_warn(&pdev->dev, "%s devm_gpio_request fail2 %d\n",
+ __func__, ret);
+ gpio_direction_output(priv->i2s1_in_mux_gpio_sel_2, 0);
+ }
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+
+ if (ret)
+ dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt2701_cs42448_machine_dt_match[] = {
+ {.compatible = "mediatek,mt2701-cs42448-machine",},
+ {}
+};
+#endif
+
+static struct platform_driver mt2701_cs42448_machine = {
+ .driver = {
+ .name = "mt2701-cs42448",
+ #ifdef CONFIG_OF
+ .of_match_table = mt2701_cs42448_machine_dt_match,
+ #endif
+ },
+ .probe = mt2701_cs42448_machine_probe,
+};
+
+module_platform_driver(mt2701_cs42448_machine);
+
+/* Module information */
+MODULE_DESCRIPTION("MT2701 CS42448 ALSA SoC machine driver");
+MODULE_AUTHOR("Ir Lian <ir.lian@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mt2701 cs42448 soc card");
diff --git a/sound/soc/mediatek/mt2701/mt2701-reg.h b/sound/soc/mediatek/mt2701/mt2701-reg.h
new file mode 100644
index 000000000000..bb62b1c55957
--- /dev/null
+++ b/sound/soc/mediatek/mt2701/mt2701-reg.h
@@ -0,0 +1,186 @@
+/*
+ * mt2701-reg.h -- Mediatek 2701 audio driver reg definition
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT2701_REG_H_
+#define _MT2701_REG_H_
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <sound/soc.h>
+#include "mt2701-afe-common.h"
+
+/*****************************************************************************
+ * R E G I S T E R D E F I N I T I O N
+ *****************************************************************************/
+#define AUDIO_TOP_CON0 0x0000
+#define AUDIO_TOP_CON4 0x0010
+#define AUDIO_TOP_CON5 0x0014
+#define AFE_DAIBT_CON0 0x001c
+#define AFE_MRGIF_CON 0x003c
+#define ASMI_TIMING_CON1 0x0100
+#define ASMO_TIMING_CON1 0x0104
+#define PWR1_ASM_CON1 0x0108
+#define ASYS_TOP_CON 0x0600
+#define ASYS_I2SIN1_CON 0x0604
+#define ASYS_I2SIN2_CON 0x0608
+#define ASYS_I2SIN3_CON 0x060c
+#define ASYS_I2SIN4_CON 0x0610
+#define ASYS_I2SIN5_CON 0x0614
+#define ASYS_I2SO1_CON 0x061C
+#define ASYS_I2SO2_CON 0x0620
+#define ASYS_I2SO3_CON 0x0624
+#define ASYS_I2SO4_CON 0x0628
+#define ASYS_I2SO5_CON 0x062c
+#define PWR2_TOP_CON 0x0634
+#define AFE_CONN0 0x06c0
+#define AFE_CONN1 0x06c4
+#define AFE_CONN2 0x06c8
+#define AFE_CONN3 0x06cc
+#define AFE_CONN14 0x06f8
+#define AFE_CONN15 0x06fc
+#define AFE_CONN16 0x0700
+#define AFE_CONN17 0x0704
+#define AFE_CONN18 0x0708
+#define AFE_CONN19 0x070c
+#define AFE_CONN20 0x0710
+#define AFE_CONN21 0x0714
+#define AFE_CONN22 0x0718
+#define AFE_CONN23 0x071c
+#define AFE_CONN24 0x0720
+#define AFE_CONN41 0x0764
+#define ASYS_IRQ1_CON 0x0780
+#define ASYS_IRQ2_CON 0x0784
+#define ASYS_IRQ3_CON 0x0788
+#define ASYS_IRQ_CLR 0x07c0
+#define ASYS_IRQ_STATUS 0x07c4
+#define PWR2_ASM_CON1 0x1070
+#define AFE_DAC_CON0 0x1200
+#define AFE_DAC_CON1 0x1204
+#define AFE_DAC_CON2 0x1208
+#define AFE_DAC_CON3 0x120c
+#define AFE_DAC_CON4 0x1210
+#define AFE_MEMIF_HD_CON1 0x121c
+#define AFE_MEMIF_PBUF_SIZE 0x1238
+#define AFE_MEMIF_HD_CON0 0x123c
+#define AFE_DL1_BASE 0x1240
+#define AFE_DL1_CUR 0x1244
+#define AFE_DL2_BASE 0x1250
+#define AFE_DL2_CUR 0x1254
+#define AFE_DL3_BASE 0x1260
+#define AFE_DL3_CUR 0x1264
+#define AFE_DL4_BASE 0x1270
+#define AFE_DL4_CUR 0x1274
+#define AFE_DL5_BASE 0x1280
+#define AFE_DL5_CUR 0x1284
+#define AFE_DLMCH_BASE 0x12a0
+#define AFE_DLMCH_CUR 0x12a4
+#define AFE_ARB1_BASE 0x12b0
+#define AFE_ARB1_CUR 0x12b4
+#define AFE_VUL_BASE 0x1300
+#define AFE_VUL_CUR 0x130c
+#define AFE_UL2_BASE 0x1310
+#define AFE_UL2_END 0x1318
+#define AFE_UL2_CUR 0x131c
+#define AFE_UL3_BASE 0x1320
+#define AFE_UL3_END 0x1328
+#define AFE_UL3_CUR 0x132c
+#define AFE_UL4_BASE 0x1330
+#define AFE_UL4_END 0x1338
+#define AFE_UL4_CUR 0x133c
+#define AFE_UL5_BASE 0x1340
+#define AFE_UL5_END 0x1348
+#define AFE_UL5_CUR 0x134c
+#define AFE_DAI_BASE 0x1370
+#define AFE_DAI_CUR 0x137c
+
+/* AUDIO_TOP_CON0 (0x0000) */
+#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON (0x3 << 0)
+#define AUDIO_TOP_CON0_PDN_AFE (0x1 << 2)
+#define AUDIO_TOP_CON0_PDN_APLL_CK (0x1 << 23)
+
+/* AUDIO_TOP_CON4 (0x0010) */
+#define AUDIO_TOP_CON4_I2SO1_PWN (0x1 << 6)
+#define AUDIO_TOP_CON4_PDN_A1SYS (0x1 << 21)
+#define AUDIO_TOP_CON4_PDN_A2SYS (0x1 << 22)
+#define AUDIO_TOP_CON4_PDN_AFE_CONN (0x1 << 23)
+#define AUDIO_TOP_CON4_PDN_MRGIF (0x1 << 25)
+
+/* AFE_DAIBT_CON0 (0x001c) */
+#define AFE_DAIBT_CON0_DAIBT_EN (0x1 << 0)
+#define AFE_DAIBT_CON0_BT_FUNC_EN (0x1 << 1)
+#define AFE_DAIBT_CON0_BT_FUNC_RDY (0x1 << 3)
+#define AFE_DAIBT_CON0_BT_WIDE_MODE_EN (0x1 << 9)
+#define AFE_DAIBT_CON0_MRG_USE (0x1 << 12)
+
+/* PWR1_ASM_CON1 (0x0108) */
+#define PWR1_ASM_CON1_INIT_VAL (0x492)
+
+/* AFE_MRGIF_CON (0x003c) */
+#define AFE_MRGIF_CON_MRG_EN (0x1 << 0)
+#define AFE_MRGIF_CON_MRG_I2S_EN (0x1 << 16)
+#define AFE_MRGIF_CON_I2S_MODE_MASK (0xf << 20)
+#define AFE_MRGIF_CON_I2S_MODE_32K (0x4 << 20)
+
+/* ASYS_I2SO1_CON (0x061c) */
+#define ASYS_I2SO1_CON_FS (0x1f << 8)
+#define ASYS_I2SO1_CON_FS_SET(x) ((x) << 8)
+#define ASYS_I2SO1_CON_MULTI_CH (0x1 << 16)
+#define ASYS_I2SO1_CON_SIDEGEN (0x1 << 30)
+#define ASYS_I2SO1_CON_I2S_EN (0x1 << 0)
+/* 0:EIAJ 1:I2S */
+#define ASYS_I2SO1_CON_I2S_MODE (0x1 << 3)
+#define ASYS_I2SO1_CON_WIDE_MODE (0x1 << 1)
+#define ASYS_I2SO1_CON_WIDE_MODE_SET(x) ((x) << 1)
+
+/* PWR2_TOP_CON (0x0634) */
+#define PWR2_TOP_CON_INIT_VAL (0xffe1ffff)
+
+/* ASYS_IRQ_CLR (0x07c0) */
+#define ASYS_IRQ_CLR_ALL (0xffffffff)
+
+/* PWR2_ASM_CON1 (0x1070) */
+#define PWR2_ASM_CON1_INIT_VAL (0x492492)
+
+/* AFE_DAC_CON0 (0x1200) */
+#define AFE_DAC_CON0_AFE_ON (0x1 << 0)
+
+/* AFE_MEMIF_PBUF_SIZE (0x1238) */
+#define AFE_MEMIF_PBUF_SIZE_DLM_MASK (0x1 << 29)
+#define AFE_MEMIF_PBUF_SIZE_PAIR_INTERLEAVE (0x0 << 29)
+#define AFE_MEMIF_PBUF_SIZE_FULL_INTERLEAVE (0x1 << 29)
+#define DLMCH_BIT_WIDTH_MASK (0x1 << 28)
+#define AFE_MEMIF_PBUF_SIZE_DLM_CH_MASK (0xf << 24)
+#define AFE_MEMIF_PBUF_SIZE_DLM_CH(x) ((x) << 24)
+#define AFE_MEMIF_PBUF_SIZE_DLM_BYTE_MASK (0x3 << 12)
+#define AFE_MEMIF_PBUF_SIZE_DLM_32BYTES (0x1 << 12)
+
+/* I2S in/out register bit control */
+#define ASYS_I2S_CON_FS (0x1f << 8)
+#define ASYS_I2S_CON_FS_SET(x) ((x) << 8)
+#define ASYS_I2S_CON_RESET (0x1 << 30)
+#define ASYS_I2S_CON_I2S_EN (0x1 << 0)
+#define ASYS_I2S_CON_I2S_COUPLE_MODE (0x1 << 17)
+/* 0:EIAJ 1:I2S */
+#define ASYS_I2S_CON_I2S_MODE (0x1 << 3)
+#define ASYS_I2S_CON_WIDE_MODE (0x1 << 1)
+#define ASYS_I2S_CON_WIDE_MODE_SET(x) ((x) << 1)
+#define ASYS_I2S_IN_PHASE_FIX (0x1 << 31)
+
+#define AFE_END_ADDR 0x15e0
+#endif
diff --git a/sound/soc/mediatek/mt8173/Makefile b/sound/soc/mediatek/mt8173/Makefile
new file mode 100644
index 000000000000..0357b27d29f2
--- /dev/null
+++ b/sound/soc/mediatek/mt8173/Makefile
@@ -0,0 +1,7 @@
+# MTK Platform Support
+obj-$(CONFIG_SND_SOC_MT8173) += mt8173-afe-pcm.o
+# Machine support
+obj-$(CONFIG_SND_SOC_MT8173_MAX98090) += mt8173-max98090.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650) += mt8173-rt5650.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5514) += mt8173-rt5650-rt5514.o
+obj-$(CONFIG_SND_SOC_MT8173_RT5650_RT5676) += mt8173-rt5650-rt5676.o
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-common.h b/sound/soc/mediatek/mt8173/mt8173-afe-common.h
new file mode 100644
index 000000000000..9a4837cc181a
--- /dev/null
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-common.h
@@ -0,0 +1,73 @@
+/*
+ * mt8173_afe_common.h -- Mediatek 8173 audio driver common definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Koro Chen <koro.chen@mediatek.com>
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ * Hidalgo Huang <hidalgo.huang@mediatek.com>
+ * Ir Lian <ir.lian@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MT8173_AFE_COMMON_H_
+#define _MT8173_AFE_COMMON_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+enum {
+ MT8173_AFE_MEMIF_DL1,
+ MT8173_AFE_MEMIF_DL2,
+ MT8173_AFE_MEMIF_VUL,
+ MT8173_AFE_MEMIF_DAI,
+ MT8173_AFE_MEMIF_AWB,
+ MT8173_AFE_MEMIF_MOD_DAI,
+ MT8173_AFE_MEMIF_HDMI,
+ MT8173_AFE_MEMIF_NUM,
+ MT8173_AFE_IO_MOD_PCM1 = MT8173_AFE_MEMIF_NUM,
+ MT8173_AFE_IO_MOD_PCM2,
+ MT8173_AFE_IO_PMIC,
+ MT8173_AFE_IO_I2S,
+ MT8173_AFE_IO_2ND_I2S,
+ MT8173_AFE_IO_HW_GAIN1,
+ MT8173_AFE_IO_HW_GAIN2,
+ MT8173_AFE_IO_MRG_O,
+ MT8173_AFE_IO_MRG_I,
+ MT8173_AFE_IO_DAIBT,
+ MT8173_AFE_IO_HDMI,
+};
+
+enum {
+ MT8173_AFE_IRQ_DL1,
+ MT8173_AFE_IRQ_DL2,
+ MT8173_AFE_IRQ_VUL,
+ MT8173_AFE_IRQ_DAI,
+ MT8173_AFE_IRQ_AWB,
+ MT8173_AFE_IRQ_MOD_DAI,
+ MT8173_AFE_IRQ_HDMI,
+ MT8173_AFE_IRQ_NUM,
+};
+
+enum {
+ MT8173_CLK_INFRASYS_AUD,
+ MT8173_CLK_TOP_PDN_AUD,
+ MT8173_CLK_TOP_PDN_AUD_BUS,
+ MT8173_CLK_I2S0_M,
+ MT8173_CLK_I2S1_M,
+ MT8173_CLK_I2S2_M,
+ MT8173_CLK_I2S3_M,
+ MT8173_CLK_I2S3_B,
+ MT8173_CLK_BCK0,
+ MT8173_CLK_BCK1,
+ MT8173_CLK_NUM
+};
+
+#endif
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 2b5df2ef51a3..8a643a35d3d4 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1,5 +1,5 @@
/*
- * Mediatek ALSA SoC AFE platform driver
+ * Mediatek 8173 ALSA SoC AFE platform driver
*
* Copyright (c) 2015 MediaTek Inc.
* Author: Koro Chen <koro.chen@mediatek.com>
@@ -24,7 +24,10 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <sound/soc.h>
-#include "mtk-afe-common.h"
+#include "mt8173-afe-common.h"
+#include "../common/mtk-base-afe.h"
+#include "../common/mtk-afe-platform-driver.h"
+#include "../common/mtk-afe-fe-dai.h"
/*****************************************************************************
* R E G I S T E R D E F I N I T I O N
@@ -81,7 +84,6 @@
#define AFE_TDM_CON1 0x0548
#define AFE_TDM_CON2 0x054c
-#define AFE_BASE_END_OFFSET 8
#define AFE_IRQ_STATUS_BITS 0xff
/* AUDIO_TOP_CON0 (0x0000) */
@@ -135,7 +137,7 @@ enum afe_tdm_ch_start {
AFE_TDM_CH_ZERO,
};
-static const unsigned int mtk_afe_backup_list[] = {
+static const unsigned int mt8173_afe_backup_list[] = {
AUDIO_TOP_CON0,
AFE_CONN1,
AFE_CONN2,
@@ -152,18 +154,11 @@ static const unsigned int mtk_afe_backup_list[] = {
AFE_DAC_CON0,
};
-struct mtk_afe {
- /* address for ioremap audio hardware register */
- void __iomem *base_addr;
- struct device *dev;
- struct regmap *regmap;
- struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
- struct clk *clocks[MTK_CLK_NUM];
- unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
- bool suspended;
+struct mt8173_afe_private {
+ struct clk *clocks[MT8173_CLK_NUM];
};
-static const struct snd_pcm_hardware mtk_afe_hardware = {
+static const struct snd_pcm_hardware mt8173_afe_hardware = {
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID),
.buffer_bytes_max = 256 * 1024,
@@ -174,59 +169,12 @@ static const struct snd_pcm_hardware mtk_afe_hardware = {
.fifo_size = 0,
};
-static snd_pcm_uframes_t mtk_afe_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- unsigned int hw_ptr;
- int ret;
-
- ret = regmap_read(afe->regmap, memif->data->reg_ofs_cur, &hw_ptr);
- if (ret || hw_ptr == 0) {
- dev_err(afe->dev, "%s hw_ptr err\n", __func__);
- hw_ptr = memif->phys_buf_addr;
- }
-
- return bytes_to_frames(substream->runtime,
- hw_ptr - memif->phys_buf_addr);
-}
-
-static const struct snd_pcm_ops mtk_afe_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = mtk_afe_pcm_pointer,
-};
-
-static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- size_t size;
- struct snd_card *card = rtd->card->snd_card;
- struct snd_pcm *pcm = rtd->pcm;
-
- size = mtk_afe_hardware.buffer_bytes_max;
-
- return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- card->dev, size, size);
-}
-
-static void mtk_afe_pcm_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
-static const struct snd_soc_platform_driver mtk_afe_pcm_platform = {
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
-};
-
-struct mtk_afe_rate {
+struct mt8173_afe_rate {
unsigned int rate;
unsigned int regvalue;
};
-static const struct mtk_afe_rate mtk_afe_i2s_rates[] = {
+static const struct mt8173_afe_rate mt8173_afe_i2s_rates[] = {
{ .rate = 8000, .regvalue = 0 },
{ .rate = 11025, .regvalue = 1 },
{ .rate = 12000, .regvalue = 2 },
@@ -242,21 +190,21 @@ static const struct mtk_afe_rate mtk_afe_i2s_rates[] = {
{ .rate = 192000, .regvalue = 14 },
};
-static int mtk_afe_i2s_fs(unsigned int sample_rate)
+static int mt8173_afe_i2s_fs(unsigned int sample_rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mtk_afe_i2s_rates); i++)
- if (mtk_afe_i2s_rates[i].rate == sample_rate)
- return mtk_afe_i2s_rates[i].regvalue;
+ for (i = 0; i < ARRAY_SIZE(mt8173_afe_i2s_rates); i++)
+ if (mt8173_afe_i2s_rates[i].rate == sample_rate)
+ return mt8173_afe_i2s_rates[i].regvalue;
return -EINVAL;
}
-static int mtk_afe_set_i2s(struct mtk_afe *afe, unsigned int rate)
+static int mt8173_afe_set_i2s(struct mtk_base_afe *afe, unsigned int rate)
{
unsigned int val;
- int fs = mtk_afe_i2s_fs(rate);
+ int fs = mt8173_afe_i2s_fs(rate);
if (fs < 0)
return -EINVAL;
@@ -281,7 +229,7 @@ static int mtk_afe_set_i2s(struct mtk_afe *afe, unsigned int rate)
return 0;
}
-static void mtk_afe_set_i2s_enable(struct mtk_afe *afe, bool enable)
+static void mt8173_afe_set_i2s_enable(struct mtk_base_afe *afe, bool enable)
{
unsigned int val;
@@ -296,8 +244,8 @@ static void mtk_afe_set_i2s_enable(struct mtk_afe *afe, bool enable)
regmap_update_bits(afe->regmap, AFE_I2S_CON1, 0x1, enable);
}
-static int mtk_afe_dais_enable_clks(struct mtk_afe *afe,
- struct clk *m_ck, struct clk *b_ck)
+static int mt8173_afe_dais_enable_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, struct clk *b_ck)
{
int ret;
@@ -319,9 +267,9 @@ static int mtk_afe_dais_enable_clks(struct mtk_afe *afe,
return 0;
}
-static int mtk_afe_dais_set_clks(struct mtk_afe *afe,
- struct clk *m_ck, unsigned int mck_rate,
- struct clk *b_ck, unsigned int bck_rate)
+static int mt8173_afe_dais_set_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, unsigned int mck_rate,
+ struct clk *b_ck, unsigned int bck_rate)
{
int ret;
@@ -343,8 +291,8 @@ static int mtk_afe_dais_set_clks(struct mtk_afe *afe,
return 0;
}
-static void mtk_afe_dais_disable_clks(struct mtk_afe *afe,
- struct clk *m_ck, struct clk *b_ck)
+static void mt8173_afe_dais_disable_clks(struct mtk_base_afe *afe,
+ struct clk *m_ck, struct clk *b_ck)
{
if (m_ck)
clk_disable_unprepare(m_ck);
@@ -352,102 +300,101 @@ static void mtk_afe_dais_disable_clks(struct mtk_afe *afe,
clk_disable_unprepare(b_ck);
}
-static int mtk_afe_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_afe_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
if (dai->active)
return 0;
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL);
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S2_M], NULL);
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M, 0);
return 0;
}
-static void mtk_afe_i2s_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static void mt8173_afe_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
if (dai->active)
return;
- mtk_afe_set_i2s_enable(afe, false);
+ mt8173_afe_set_i2s_enable(afe, false);
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M,
AUD_TCON0_PDN_22M | AUD_TCON0_PDN_24M);
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S1_M], NULL);
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S2_M], NULL);
}
-static int mtk_afe_i2s_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_afe_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
int ret;
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S1_M], runtime->rate * 256,
- NULL, 0);
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S2_M], runtime->rate * 256,
- NULL, 0);
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S1_M],
+ runtime->rate * 256, NULL, 0);
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S2_M],
+ runtime->rate * 256, NULL, 0);
/* config I2S */
- ret = mtk_afe_set_i2s(afe, substream->runtime->rate);
+ ret = mt8173_afe_set_i2s(afe, substream->runtime->rate);
if (ret)
return ret;
- mtk_afe_set_i2s_enable(afe, true);
+ mt8173_afe_set_i2s_enable(afe, true);
return 0;
}
-static int mtk_afe_hdmi_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_afe_hdmi_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
if (dai->active)
return 0;
- mtk_afe_dais_enable_clks(afe, afe->clocks[MTK_CLK_I2S3_M],
- afe->clocks[MTK_CLK_I2S3_B]);
+ mt8173_afe_dais_enable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ afe_priv->clocks[MT8173_CLK_I2S3_B]);
return 0;
}
-static void mtk_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static void mt8173_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
if (dai->active)
return;
- mtk_afe_dais_disable_clks(afe, afe->clocks[MTK_CLK_I2S3_M],
- afe->clocks[MTK_CLK_I2S3_B]);
+ mt8173_afe_dais_disable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ afe_priv->clocks[MT8173_CLK_I2S3_B]);
}
-static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_afe_hdmi_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
+
unsigned int val;
- mtk_afe_dais_set_clks(afe,
- afe->clocks[MTK_CLK_I2S3_M], runtime->rate * 128,
- afe->clocks[MTK_CLK_I2S3_B],
- runtime->rate * runtime->channels * 32);
+ mt8173_afe_dais_set_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
+ runtime->rate * 128,
+ afe_priv->clocks[MT8173_CLK_I2S3_B],
+ runtime->rate * runtime->channels * 32);
val = AFE_TDM_CON1_BCK_INV |
AFE_TDM_CON1_LRCK_INV |
@@ -498,11 +445,11 @@ static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream,
return 0;
}
-static int mtk_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
+static int mt8173_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
dev_info(afe->dev, "%s cmd=%d %s\n", __func__, cmd, dai->name);
@@ -514,10 +461,14 @@ static int mtk_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
/* set connections: O30~O37: L/R/LS/RS/C/LFE/CH7/CH8 */
regmap_write(afe->regmap, AFE_HDMI_CONN0,
- AFE_HDMI_CONN0_O30_I30 | AFE_HDMI_CONN0_O31_I31 |
- AFE_HDMI_CONN0_O32_I34 | AFE_HDMI_CONN0_O33_I35 |
- AFE_HDMI_CONN0_O34_I32 | AFE_HDMI_CONN0_O35_I33 |
- AFE_HDMI_CONN0_O36_I36 | AFE_HDMI_CONN0_O37_I37);
+ AFE_HDMI_CONN0_O30_I30 |
+ AFE_HDMI_CONN0_O31_I31 |
+ AFE_HDMI_CONN0_O32_I34 |
+ AFE_HDMI_CONN0_O33_I35 |
+ AFE_HDMI_CONN0_O34_I32 |
+ AFE_HDMI_CONN0_O35_I33 |
+ AFE_HDMI_CONN0_O36_I36 |
+ AFE_HDMI_CONN0_O37_I37);
/* enable Out control */
regmap_update_bits(afe->regmap, AFE_HDMI_OUT_CON0, 0x1, 0x1);
@@ -537,284 +488,65 @@ static int mtk_afe_hdmi_trigger(struct snd_pcm_substream *substream, int cmd,
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF,
AUD_TCON0_PDN_HDMI | AUD_TCON0_PDN_SPDF);
-
return 0;
default:
return -EINVAL;
}
}
-static int mtk_afe_dais_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- int ret;
-
- memif->substream = substream;
-
- snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware);
-
- /*
- * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
- * smaller than period_size due to AFE's internal buffer.
- * This easily leads to overrun when avail_min is period_size.
- * One more period can hold the possible unread buffer.
- */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- ret = snd_pcm_hw_constraint_minmax(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS,
- 3,
- mtk_afe_hardware.periods_max);
- if (ret < 0) {
- dev_err(afe->dev, "hw_constraint_minmax failed\n");
- return ret;
- }
- }
- ret = snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
- if (ret < 0)
- dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
- return ret;
-}
-
-static void mtk_afe_dais_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_memif_fs(struct snd_pcm_substream *substream,
+ unsigned int rate)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
-
- memif->substream = NULL;
-}
-
-static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- int msb_at_bit33 = 0;
- int ret;
-
- dev_dbg(afe->dev,
- "%s period = %u, rate= %u, channels=%u\n",
- __func__, params_period_size(params), params_rate(params),
- params_channels(params));
+ struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
+ struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+ int fs;
- ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
- if (ret < 0)
- return ret;
-
- msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
- memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
- memif->buffer_size = substream->runtime->dma_bytes;
-
- /* start */
- regmap_write(afe->regmap,
- memif->data->reg_ofs_base, memif->phys_buf_addr);
- /* end */
- regmap_write(afe->regmap,
- memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
- memif->phys_buf_addr + memif->buffer_size - 1);
-
- /* set MSB to 33-bit */
- regmap_update_bits(afe->regmap, AFE_MEMIF_MSB,
- 1 << memif->data->msb_shift,
- msb_at_bit33 << memif->data->msb_shift);
-
- /* set channel */
- if (memif->data->mono_shift >= 0) {
- unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
-
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 1 << memif->data->mono_shift,
- mono << memif->data->mono_shift);
- }
-
- /* set rate */
- if (memif->data->fs_shift < 0)
- return 0;
- if (memif->data->id == MTK_AFE_MEMIF_DAI ||
- memif->data->id == MTK_AFE_MEMIF_MOD_DAI) {
- unsigned int val;
-
- switch (params_rate(params)) {
+ if (memif->data->id == MT8173_AFE_MEMIF_DAI ||
+ memif->data->id == MT8173_AFE_MEMIF_MOD_DAI) {
+ switch (rate) {
case 8000:
- val = 0;
+ fs = 0;
break;
case 16000:
- val = 1;
+ fs = 1;
break;
case 32000:
- val = 2;
+ fs = 2;
break;
default:
return -EINVAL;
}
-
- if (memif->data->id == MTK_AFE_MEMIF_DAI)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 0x3 << memif->data->fs_shift,
- val << memif->data->fs_shift);
- else
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 0x3 << memif->data->fs_shift,
- val << memif->data->fs_shift);
-
} else {
- int fs = mtk_afe_i2s_fs(params_rate(params));
-
- if (fs < 0)
- return -EINVAL;
-
- regmap_update_bits(afe->regmap, AFE_DAC_CON1,
- 0xf << memif->data->fs_shift,
- fs << memif->data->fs_shift);
+ fs = mt8173_afe_i2s_fs(rate);
}
-
- return 0;
+ return fs;
}
-static int mtk_afe_dais_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int mt8173_irq_fs(struct snd_pcm_substream *substream, unsigned int rate)
{
- return snd_pcm_lib_free_pages(substream);
+ return mt8173_afe_i2s_fs(rate);
}
-static int mtk_afe_dais_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_pcm_runtime * const runtime = substream->runtime;
- struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
- unsigned int counter = runtime->period_size;
-
- dev_info(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- if (memif->data->enable_shift >= 0)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 1 << memif->data->enable_shift,
- 1 << memif->data->enable_shift);
-
- /* set irq counter */
- regmap_update_bits(afe->regmap,
- memif->data->irq_reg_cnt,
- 0x3ffff << memif->data->irq_cnt_shift,
- counter << memif->data->irq_cnt_shift);
-
- /* set irq fs */
- if (memif->data->irq_fs_shift >= 0) {
- int fs = mtk_afe_i2s_fs(runtime->rate);
-
- if (fs < 0)
- return -EINVAL;
-
- regmap_update_bits(afe->regmap,
- AFE_IRQ_MCU_CON,
- 0xf << memif->data->irq_fs_shift,
- fs << memif->data->irq_fs_shift);
- }
- /* enable interrupt */
- regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON,
- 1 << memif->data->irq_en_shift,
- 1 << memif->data->irq_en_shift);
-
- return 0;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- if (memif->data->enable_shift >= 0)
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- 1 << memif->data->enable_shift, 0);
- /* disable interrupt */
- regmap_update_bits(afe->regmap, AFE_IRQ_MCU_CON,
- 1 << memif->data->irq_en_shift,
- 0 << memif->data->irq_en_shift);
- /* and clear pending IRQ */
- regmap_write(afe->regmap, AFE_IRQ_CLR,
- 1 << memif->data->irq_clr_shift);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/* FE DAIs */
-static const struct snd_soc_dai_ops mtk_afe_dai_ops = {
- .startup = mtk_afe_dais_startup,
- .shutdown = mtk_afe_dais_shutdown,
- .hw_params = mtk_afe_dais_hw_params,
- .hw_free = mtk_afe_dais_hw_free,
- .trigger = mtk_afe_dais_trigger,
-};
-
/* BE DAIs */
-static const struct snd_soc_dai_ops mtk_afe_i2s_ops = {
- .startup = mtk_afe_i2s_startup,
- .shutdown = mtk_afe_i2s_shutdown,
- .prepare = mtk_afe_i2s_prepare,
+static const struct snd_soc_dai_ops mt8173_afe_i2s_ops = {
+ .startup = mt8173_afe_i2s_startup,
+ .shutdown = mt8173_afe_i2s_shutdown,
+ .prepare = mt8173_afe_i2s_prepare,
};
-static const struct snd_soc_dai_ops mtk_afe_hdmi_ops = {
- .startup = mtk_afe_hdmi_startup,
- .shutdown = mtk_afe_hdmi_shutdown,
- .prepare = mtk_afe_hdmi_prepare,
- .trigger = mtk_afe_hdmi_trigger,
-
+static const struct snd_soc_dai_ops mt8173_afe_hdmi_ops = {
+ .startup = mt8173_afe_hdmi_startup,
+ .shutdown = mt8173_afe_hdmi_shutdown,
+ .prepare = mt8173_afe_hdmi_prepare,
+ .trigger = mt8173_afe_hdmi_trigger,
};
-static int mtk_afe_runtime_suspend(struct device *dev);
-static int mtk_afe_runtime_resume(struct device *dev);
-
-static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
-{
- struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
- int i;
-
- dev_dbg(afe->dev, "%s\n", __func__);
- if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
- regmap_read(afe->regmap, mtk_afe_backup_list[i],
- &afe->backup_regs[i]);
-
- afe->suspended = true;
- mtk_afe_runtime_suspend(afe->dev);
- return 0;
-}
-
-static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
-{
- struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
- int i = 0;
-
- dev_dbg(afe->dev, "%s\n", __func__);
- if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
- return 0;
-
- mtk_afe_runtime_resume(afe->dev);
-
- for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
- regmap_write(afe->regmap, mtk_afe_backup_list[i],
- afe->backup_regs[i]);
-
- afe->suspended = false;
- return 0;
-}
-
-static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
+static struct snd_soc_dai_driver mt8173_afe_pcm_dais[] = {
/* FE DAIs: memory intefaces to CPU */
{
.name = "DL1", /* downlink 1 */
- .id = MTK_AFE_MEMIF_DL1,
+ .id = MT8173_AFE_MEMIF_DL1,
.suspend = mtk_afe_dai_suspend,
.resume = mtk_afe_dai_resume,
.playback = {
@@ -824,10 +556,10 @@ static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
- .ops = &mtk_afe_dai_ops,
+ .ops = &mtk_afe_fe_ops,
}, {
.name = "VUL", /* voice uplink */
- .id = MTK_AFE_MEMIF_VUL,
+ .id = MT8173_AFE_MEMIF_VUL,
.suspend = mtk_afe_dai_suspend,
.resume = mtk_afe_dai_resume,
.capture = {
@@ -837,11 +569,11 @@ static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
- .ops = &mtk_afe_dai_ops,
+ .ops = &mtk_afe_fe_ops,
}, {
/* BE DAIs */
.name = "I2S",
- .id = MTK_AFE_IO_I2S,
+ .id = MT8173_AFE_IO_I2S,
.playback = {
.stream_name = "I2S Playback",
.channels_min = 1,
@@ -856,16 +588,16 @@ static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
- .ops = &mtk_afe_i2s_ops,
+ .ops = &mt8173_afe_i2s_ops,
.symmetric_rates = 1,
},
};
-static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = {
+static struct snd_soc_dai_driver mt8173_afe_hdmi_dais[] = {
/* FE DAIs */
{
.name = "HDMI",
- .id = MTK_AFE_MEMIF_HDMI,
+ .id = MT8173_AFE_MEMIF_HDMI,
.suspend = mtk_afe_dai_suspend,
.resume = mtk_afe_dai_resume,
.playback = {
@@ -878,11 +610,11 @@ static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = {
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
- .ops = &mtk_afe_dai_ops,
+ .ops = &mtk_afe_fe_ops,
}, {
/* BE DAIs */
.name = "HDMIO",
- .id = MTK_AFE_IO_HDMI,
+ .id = MT8173_AFE_IO_HDMI,
.playback = {
.stream_name = "HDMIO Playback",
.channels_min = 2,
@@ -893,29 +625,29 @@ static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = {
SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
- .ops = &mtk_afe_hdmi_ops,
+ .ops = &mt8173_afe_hdmi_ops,
},
};
-static const struct snd_kcontrol_new mtk_afe_o03_mix[] = {
+static const struct snd_kcontrol_new mt8173_afe_o03_mix[] = {
SOC_DAPM_SINGLE_AUTODISABLE("I05 Switch", AFE_CONN1, 21, 1, 0),
};
-static const struct snd_kcontrol_new mtk_afe_o04_mix[] = {
+static const struct snd_kcontrol_new mt8173_afe_o04_mix[] = {
SOC_DAPM_SINGLE_AUTODISABLE("I06 Switch", AFE_CONN2, 6, 1, 0),
};
-static const struct snd_kcontrol_new mtk_afe_o09_mix[] = {
+static const struct snd_kcontrol_new mt8173_afe_o09_mix[] = {
SOC_DAPM_SINGLE_AUTODISABLE("I03 Switch", AFE_CONN3, 0, 1, 0),
SOC_DAPM_SINGLE_AUTODISABLE("I17 Switch", AFE_CONN7, 30, 1, 0),
};
-static const struct snd_kcontrol_new mtk_afe_o10_mix[] = {
+static const struct snd_kcontrol_new mt8173_afe_o10_mix[] = {
SOC_DAPM_SINGLE_AUTODISABLE("I04 Switch", AFE_CONN3, 3, 1, 0),
SOC_DAPM_SINGLE_AUTODISABLE("I18 Switch", AFE_CONN8, 0, 1, 0),
};
-static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
+static const struct snd_soc_dapm_widget mt8173_afe_pcm_widgets[] = {
/* inter-connections */
SND_SOC_DAPM_MIXER("I03", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("I04", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -925,16 +657,16 @@ static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
SND_SOC_DAPM_MIXER("I18", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("O03", SND_SOC_NOPM, 0, 0,
- mtk_afe_o03_mix, ARRAY_SIZE(mtk_afe_o03_mix)),
+ mt8173_afe_o03_mix, ARRAY_SIZE(mt8173_afe_o03_mix)),
SND_SOC_DAPM_MIXER("O04", SND_SOC_NOPM, 0, 0,
- mtk_afe_o04_mix, ARRAY_SIZE(mtk_afe_o04_mix)),
+ mt8173_afe_o04_mix, ARRAY_SIZE(mt8173_afe_o04_mix)),
SND_SOC_DAPM_MIXER("O09", SND_SOC_NOPM, 0, 0,
- mtk_afe_o09_mix, ARRAY_SIZE(mtk_afe_o09_mix)),
+ mt8173_afe_o09_mix, ARRAY_SIZE(mt8173_afe_o09_mix)),
SND_SOC_DAPM_MIXER("O10", SND_SOC_NOPM, 0, 0,
- mtk_afe_o10_mix, ARRAY_SIZE(mtk_afe_o10_mix)),
+ mt8173_afe_o10_mix, ARRAY_SIZE(mt8173_afe_o10_mix)),
};
-static const struct snd_soc_dapm_route mtk_afe_pcm_routes[] = {
+static const struct snd_soc_dapm_route mt8173_afe_pcm_routes[] = {
{"I05", NULL, "DL1"},
{"I06", NULL, "DL1"},
{"I2S Playback", NULL, "O03"},
@@ -953,140 +685,257 @@ static const struct snd_soc_dapm_route mtk_afe_pcm_routes[] = {
{ "O10", "I04 Switch", "I04" },
};
-static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
+static const struct snd_soc_dapm_route mt8173_afe_hdmi_routes[] = {
{"HDMIO Playback", NULL, "HDMI"},
};
-static const struct snd_soc_component_driver mtk_afe_pcm_dai_component = {
- .name = "mtk-afe-pcm-dai",
- .dapm_widgets = mtk_afe_pcm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mtk_afe_pcm_widgets),
- .dapm_routes = mtk_afe_pcm_routes,
- .num_dapm_routes = ARRAY_SIZE(mtk_afe_pcm_routes),
+static const struct snd_soc_component_driver mt8173_afe_pcm_dai_component = {
+ .name = "mt8173-afe-pcm-dai",
+ .dapm_widgets = mt8173_afe_pcm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(mt8173_afe_pcm_widgets),
+ .dapm_routes = mt8173_afe_pcm_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_afe_pcm_routes),
};
-static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
- .name = "mtk-afe-hdmi-dai",
- .dapm_routes = mtk_afe_hdmi_routes,
- .num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
+static const struct snd_soc_component_driver mt8173_afe_hdmi_dai_component = {
+ .name = "mt8173-afe-hdmi-dai",
+ .dapm_routes = mt8173_afe_hdmi_routes,
+ .num_dapm_routes = ARRAY_SIZE(mt8173_afe_hdmi_routes),
};
-static const char *aud_clks[MTK_CLK_NUM] = {
- [MTK_CLK_INFRASYS_AUD] = "infra_sys_audio_clk",
- [MTK_CLK_TOP_PDN_AUD] = "top_pdn_audio",
- [MTK_CLK_TOP_PDN_AUD_BUS] = "top_pdn_aud_intbus",
- [MTK_CLK_I2S0_M] = "i2s0_m",
- [MTK_CLK_I2S1_M] = "i2s1_m",
- [MTK_CLK_I2S2_M] = "i2s2_m",
- [MTK_CLK_I2S3_M] = "i2s3_m",
- [MTK_CLK_I2S3_B] = "i2s3_b",
- [MTK_CLK_BCK0] = "bck0",
- [MTK_CLK_BCK1] = "bck1",
+static const char *aud_clks[MT8173_CLK_NUM] = {
+ [MT8173_CLK_INFRASYS_AUD] = "infra_sys_audio_clk",
+ [MT8173_CLK_TOP_PDN_AUD] = "top_pdn_audio",
+ [MT8173_CLK_TOP_PDN_AUD_BUS] = "top_pdn_aud_intbus",
+ [MT8173_CLK_I2S0_M] = "i2s0_m",
+ [MT8173_CLK_I2S1_M] = "i2s1_m",
+ [MT8173_CLK_I2S2_M] = "i2s2_m",
+ [MT8173_CLK_I2S3_M] = "i2s3_m",
+ [MT8173_CLK_I2S3_B] = "i2s3_b",
+ [MT8173_CLK_BCK0] = "bck0",
+ [MT8173_CLK_BCK1] = "bck1",
};
-static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
+static const struct mtk_base_memif_data memif_data[MT8173_AFE_MEMIF_NUM] = {
{
.name = "DL1",
- .id = MTK_AFE_MEMIF_DL1,
+ .id = MT8173_AFE_MEMIF_DL1,
.reg_ofs_base = AFE_DL1_BASE,
.reg_ofs_cur = AFE_DL1_CUR,
+ .fs_reg = AFE_DAC_CON1,
.fs_shift = 0,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
.mono_shift = 21,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 1,
- .irq_reg_cnt = AFE_IRQ_CNT1,
- .irq_cnt_shift = 0,
- .irq_en_shift = 0,
- .irq_fs_shift = 4,
- .irq_clr_shift = 0,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 0,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "DL2",
- .id = MTK_AFE_MEMIF_DL2,
+ .id = MT8173_AFE_MEMIF_DL2,
.reg_ofs_base = AFE_DL2_BASE,
.reg_ofs_cur = AFE_DL2_CUR,
+ .fs_reg = AFE_DAC_CON1,
.fs_shift = 4,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
.mono_shift = 22,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 2,
- .irq_reg_cnt = AFE_IRQ_CNT1,
- .irq_cnt_shift = 20,
- .irq_en_shift = 2,
- .irq_fs_shift = 16,
- .irq_clr_shift = 2,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 1,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "VUL",
- .id = MTK_AFE_MEMIF_VUL,
+ .id = MT8173_AFE_MEMIF_VUL,
.reg_ofs_base = AFE_VUL_BASE,
.reg_ofs_cur = AFE_VUL_CUR,
+ .fs_reg = AFE_DAC_CON1,
.fs_shift = 16,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
.mono_shift = 27,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 3,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 0,
- .irq_en_shift = 1,
- .irq_fs_shift = 8,
- .irq_clr_shift = 1,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 6,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "DAI",
- .id = MTK_AFE_MEMIF_DAI,
+ .id = MT8173_AFE_MEMIF_DAI,
.reg_ofs_base = AFE_DAI_BASE,
.reg_ofs_cur = AFE_DAI_CUR,
+ .fs_reg = AFE_DAC_CON0,
.fs_shift = 24,
+ .fs_maskbit = 0x3,
+ .mono_reg = -1,
.mono_shift = -1,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 4,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 20,
- .irq_en_shift = 3,
- .irq_fs_shift = 20,
- .irq_clr_shift = 3,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 5,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "AWB",
- .id = MTK_AFE_MEMIF_AWB,
+ .id = MT8173_AFE_MEMIF_AWB,
.reg_ofs_base = AFE_AWB_BASE,
.reg_ofs_cur = AFE_AWB_CUR,
+ .fs_reg = AFE_DAC_CON1,
.fs_shift = 12,
+ .fs_maskbit = 0xf,
+ .mono_reg = AFE_DAC_CON1,
.mono_shift = 24,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 6,
- .irq_reg_cnt = AFE_IRQ_CNT7,
- .irq_cnt_shift = 0,
- .irq_en_shift = 14,
- .irq_fs_shift = 24,
- .irq_clr_shift = 6,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 3,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "MOD_DAI",
- .id = MTK_AFE_MEMIF_MOD_DAI,
+ .id = MT8173_AFE_MEMIF_MOD_DAI,
.reg_ofs_base = AFE_MOD_PCM_BASE,
.reg_ofs_cur = AFE_MOD_PCM_CUR,
+ .fs_reg = AFE_DAC_CON1,
.fs_shift = 30,
+ .fs_maskbit = 0x3,
+ .mono_reg = AFE_DAC_CON1,
.mono_shift = 30,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = AFE_DAC_CON0,
.enable_shift = 7,
- .irq_reg_cnt = AFE_IRQ_CNT2,
- .irq_cnt_shift = 20,
- .irq_en_shift = 3,
- .irq_fs_shift = 20,
- .irq_clr_shift = 3,
+ .msb_reg = AFE_MEMIF_MSB,
.msb_shift = 4,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
}, {
.name = "HDMI",
- .id = MTK_AFE_MEMIF_HDMI,
+ .id = MT8173_AFE_MEMIF_HDMI,
.reg_ofs_base = AFE_HDMI_OUT_BASE,
.reg_ofs_cur = AFE_HDMI_OUT_CUR,
+ .fs_reg = -1,
.fs_shift = -1,
+ .fs_maskbit = -1,
+ .mono_reg = -1,
.mono_shift = -1,
+ .hd_reg = -1,
+ .hd_shift = -1,
+ .enable_reg = -1,
.enable_shift = -1,
- .irq_reg_cnt = AFE_IRQ_CNT5,
+ .msb_reg = AFE_MEMIF_MSB,
+ .msb_shift = 8,
+ .agent_disable_reg = -1,
+ .agent_disable_shift = -1,
+ },
+};
+
+static const struct mtk_base_irq_data irq_data[MT8173_AFE_IRQ_NUM] = {
+ {
+ .id = MT8173_AFE_IRQ_DL1,
+ .irq_cnt_reg = AFE_IRQ_CNT1,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 0,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 4,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 0,
+ }, {
+ .id = MT8173_AFE_IRQ_DL2,
+ .irq_cnt_reg = AFE_IRQ_CNT1,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 2,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 16,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 2,
+
+ }, {
+ .id = MT8173_AFE_IRQ_VUL,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 1,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 8,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 1,
+ }, {
+ .id = MT8173_AFE_IRQ_DAI,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 3,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 20,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 3,
+ }, {
+ .id = MT8173_AFE_IRQ_AWB,
+ .irq_cnt_reg = AFE_IRQ_CNT7,
.irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 14,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 24,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 6,
+ }, {
+ .id = MT8173_AFE_IRQ_DAI,
+ .irq_cnt_reg = AFE_IRQ_CNT2,
+ .irq_cnt_shift = 20,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
+ .irq_en_shift = 3,
+ .irq_fs_reg = AFE_IRQ_MCU_CON,
+ .irq_fs_shift = 20,
+ .irq_fs_maskbit = 0xf,
+ .irq_clr_reg = AFE_IRQ_CLR,
+ .irq_clr_shift = 3,
+ }, {
+ .id = MT8173_AFE_IRQ_HDMI,
+ .irq_cnt_reg = AFE_IRQ_CNT5,
+ .irq_cnt_shift = 0,
+ .irq_cnt_maskbit = 0x3ffff,
+ .irq_en_reg = AFE_IRQ_MCU_CON,
.irq_en_shift = 12,
+ .irq_fs_reg = -1,
.irq_fs_shift = -1,
+ .irq_fs_maskbit = -1,
+ .irq_clr_reg = AFE_IRQ_CLR,
.irq_clr_shift = 4,
- .msb_shift = 8,
},
};
-static const struct regmap_config mtk_afe_regmap_config = {
+static const struct regmap_config mt8173_afe_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -1094,9 +943,9 @@ static const struct regmap_config mtk_afe_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static irqreturn_t mtk_afe_irq_handler(int irq, void *dev_id)
+static irqreturn_t mt8173_afe_irq_handler(int irq, void *dev_id)
{
- struct mtk_afe *afe = dev_id;
+ struct mtk_base_afe *afe = dev_id;
unsigned int reg_value;
int i, ret;
@@ -1107,10 +956,16 @@ static irqreturn_t mtk_afe_irq_handler(int irq, void *dev_id)
goto err_irq;
}
- for (i = 0; i < MTK_AFE_MEMIF_NUM; i++) {
- struct mtk_afe_memif *memif = &afe->memif[i];
+ for (i = 0; i < MT8173_AFE_MEMIF_NUM; i++) {
+ struct mtk_base_afe_memif *memif = &afe->memif[i];
+ struct mtk_base_afe_irq *irq;
- if (!(reg_value & (1 << memif->data->irq_clr_shift)))
+ if (memif->irq_usage < 0)
+ continue;
+
+ irq = &afe->irqs[memif->irq_usage];
+
+ if (!(reg_value & (1 << irq->irq_data->irq_clr_shift)))
continue;
snd_pcm_period_elapsed(memif->substream);
@@ -1118,14 +973,16 @@ static irqreturn_t mtk_afe_irq_handler(int irq, void *dev_id)
err_irq:
/* clear irq */
- regmap_write(afe->regmap, AFE_IRQ_CLR, reg_value & AFE_IRQ_STATUS_BITS);
+ regmap_write(afe->regmap, AFE_IRQ_CLR,
+ reg_value & AFE_IRQ_STATUS_BITS);
return IRQ_HANDLED;
}
-static int mtk_afe_runtime_suspend(struct device *dev)
+static int mt8173_afe_runtime_suspend(struct device *dev)
{
- struct mtk_afe *afe = dev_get_drvdata(dev);
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
/* disable AFE */
regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0);
@@ -1134,38 +991,47 @@ static int mtk_afe_runtime_suspend(struct device *dev)
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
AUD_TCON0_PDN_AFE, AUD_TCON0_PDN_AFE);
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK1]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
- clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S2_M]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK0]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK1]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
return 0;
}
-static int mtk_afe_runtime_resume(struct device *dev)
+static int mt8173_afe_runtime_resume(struct device *dev)
{
- struct mtk_afe *afe = dev_get_drvdata(dev);
+ struct mtk_base_afe *afe = dev_get_drvdata(dev);
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
int ret;
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_INFRASYS_AUD]);
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
if (ret)
return ret;
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
if (ret)
goto err_infra;
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
if (ret)
goto err_top_aud_bus;
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK0]);
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_BCK0]);
if (ret)
goto err_top_aud;
- ret = clk_prepare_enable(afe->clocks[MTK_CLK_BCK1]);
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_BCK1]);
if (ret)
goto err_bck0;
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+ if (ret)
+ goto err_i2s1_m;
+ ret = clk_prepare_enable(afe_priv->clocks[MT8173_CLK_I2S2_M]);
+ if (ret)
+ goto err_i2s2_m;
/* enable AFE clk */
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0, AUD_TCON0_PDN_AFE, 0);
@@ -1181,39 +1047,45 @@ static int mtk_afe_runtime_resume(struct device *dev)
regmap_update_bits(afe->regmap, AFE_DAC_CON0, 0x1, 0x1);
return 0;
+err_i2s1_m:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S1_M]);
+err_i2s2_m:
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_I2S2_M]);
err_bck0:
- clk_disable_unprepare(afe->clocks[MTK_CLK_BCK0]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_BCK0]);
err_top_aud:
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD]);
err_top_aud_bus:
- clk_disable_unprepare(afe->clocks[MTK_CLK_TOP_PDN_AUD_BUS]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_TOP_PDN_AUD_BUS]);
err_infra:
- clk_disable_unprepare(afe->clocks[MTK_CLK_INFRASYS_AUD]);
+ clk_disable_unprepare(afe_priv->clocks[MT8173_CLK_INFRASYS_AUD]);
return ret;
}
-static int mtk_afe_init_audio_clk(struct mtk_afe *afe)
+static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
{
size_t i;
+ struct mt8173_afe_private *afe_priv = afe->platform_priv;
for (i = 0; i < ARRAY_SIZE(aud_clks); i++) {
- afe->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
- if (IS_ERR(afe->clocks[i])) {
+ afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
+ if (IS_ERR(afe_priv->clocks[i])) {
dev_err(afe->dev, "%s devm_clk_get %s fail\n",
__func__, aud_clks[i]);
- return PTR_ERR(afe->clocks[i]);
+ return PTR_ERR(afe_priv->clocks[i]);
}
}
- clk_set_rate(afe->clocks[MTK_CLK_BCK0], 22579200); /* 22M */
- clk_set_rate(afe->clocks[MTK_CLK_BCK1], 24576000); /* 24M */
+ clk_set_rate(afe_priv->clocks[MT8173_CLK_BCK0], 22579200); /* 22M */
+ clk_set_rate(afe_priv->clocks[MT8173_CLK_BCK1], 24576000); /* 24M */
return 0;
}
-static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
+static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
{
int ret, i;
unsigned int irq_id;
- struct mtk_afe *afe;
+ struct mtk_base_afe *afe;
+ struct mt8173_afe_private *afe_priv;
struct resource *res;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
@@ -1224,6 +1096,12 @@ static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
if (!afe)
return -ENOMEM;
+ afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ GFP_KERNEL);
+ afe_priv = afe->platform_priv;
+ if (!afe_priv)
+ return -ENOMEM;
+
afe->dev = &pdev->dev;
irq_id = platform_get_irq(pdev, 0);
@@ -1231,7 +1109,7 @@ static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
return -ENXIO;
}
- ret = devm_request_irq(afe->dev, irq_id, mtk_afe_irq_handler,
+ ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
0, "Afe_ISR_Handle", (void *)afe);
if (ret) {
dev_err(afe->dev, "could not request_irq\n");
@@ -1244,48 +1122,75 @@ static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
return PTR_ERR(afe->base_addr);
afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
- &mtk_afe_regmap_config);
+ &mt8173_afe_regmap_config);
if (IS_ERR(afe->regmap))
return PTR_ERR(afe->regmap);
/* initial audio related clock */
- ret = mtk_afe_init_audio_clk(afe);
+ ret = mt8173_afe_init_audio_clk(afe);
if (ret) {
- dev_err(afe->dev, "mtk_afe_init_audio_clk fail\n");
+ dev_err(afe->dev, "mt8173_afe_init_audio_clk fail\n");
return ret;
}
- for (i = 0; i < MTK_AFE_MEMIF_NUM; i++)
+ /* memif % irq initialize*/
+ afe->memif_size = MT8173_AFE_MEMIF_NUM;
+ afe->memif = devm_kcalloc(afe->dev, afe->memif_size,
+ sizeof(*afe->memif), GFP_KERNEL);
+ if (!afe->memif)
+ return -ENOMEM;
+
+ afe->irqs_size = MT8173_AFE_IRQ_NUM;
+ afe->irqs = devm_kcalloc(afe->dev, afe->irqs_size,
+ sizeof(*afe->irqs), GFP_KERNEL);
+ if (!afe->irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < afe->irqs_size; i++) {
afe->memif[i].data = &memif_data[i];
+ afe->irqs[i].irq_data = &irq_data[i];
+ afe->irqs[i].irq_occupyed = true;
+ afe->memif[i].irq_usage = i;
+ afe->memif[i].const_irq = 1;
+ }
+
+ afe->mtk_afe_hardware = &mt8173_afe_hardware;
+ afe->memif_fs = mt8173_memif_fs;
+ afe->irq_fs = mt8173_irq_fs;
platform_set_drvdata(pdev, afe);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
- ret = mtk_afe_runtime_resume(&pdev->dev);
+ ret = mt8173_afe_runtime_resume(&pdev->dev);
if (ret)
goto err_pm_disable;
}
+ afe->reg_back_up_list = mt8173_afe_backup_list;
+ afe->reg_back_up_list_num = ARRAY_SIZE(mt8173_afe_backup_list);
+ afe->runtime_resume = mt8173_afe_runtime_resume;
+ afe->runtime_suspend = mt8173_afe_runtime_suspend;
+
ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
if (ret)
goto err_pm_disable;
ret = snd_soc_register_component(&pdev->dev,
- &mtk_afe_pcm_dai_component,
- mtk_afe_pcm_dais,
- ARRAY_SIZE(mtk_afe_pcm_dais));
+ &mt8173_afe_pcm_dai_component,
+ mt8173_afe_pcm_dais,
+ ARRAY_SIZE(mt8173_afe_pcm_dais));
if (ret)
goto err_platform;
ret = snd_soc_register_component(&pdev->dev,
- &mtk_afe_hdmi_dai_component,
- mtk_afe_hdmi_dais,
- ARRAY_SIZE(mtk_afe_hdmi_dais));
+ &mt8173_afe_hdmi_dai_component,
+ mt8173_afe_hdmi_dais,
+ ARRAY_SIZE(mt8173_afe_hdmi_dais));
if (ret)
goto err_comp;
- dev_info(&pdev->dev, "MTK AFE driver initialized.\n");
+ dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
return 0;
err_comp:
@@ -1297,38 +1202,38 @@ err_pm_disable:
return ret;
}
-static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
+static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
- mtk_afe_runtime_suspend(&pdev->dev);
+ mt8173_afe_runtime_suspend(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
-static const struct of_device_id mtk_afe_pcm_dt_match[] = {
+static const struct of_device_id mt8173_afe_pcm_dt_match[] = {
{ .compatible = "mediatek,mt8173-afe-pcm", },
{ }
};
-MODULE_DEVICE_TABLE(of, mtk_afe_pcm_dt_match);
+MODULE_DEVICE_TABLE(of, mt8173_afe_pcm_dt_match);
-static const struct dev_pm_ops mtk_afe_pm_ops = {
- SET_RUNTIME_PM_OPS(mtk_afe_runtime_suspend, mtk_afe_runtime_resume,
- NULL)
+static const struct dev_pm_ops mt8173_afe_pm_ops = {
+ SET_RUNTIME_PM_OPS(mt8173_afe_runtime_suspend,
+ mt8173_afe_runtime_resume, NULL)
};
-static struct platform_driver mtk_afe_pcm_driver = {
+static struct platform_driver mt8173_afe_pcm_driver = {
.driver = {
- .name = "mtk-afe-pcm",
- .of_match_table = mtk_afe_pcm_dt_match,
- .pm = &mtk_afe_pm_ops,
+ .name = "mt8173-afe-pcm",
+ .of_match_table = mt8173_afe_pcm_dt_match,
+ .pm = &mt8173_afe_pm_ops,
},
- .probe = mtk_afe_pcm_dev_probe,
- .remove = mtk_afe_pcm_dev_remove,
+ .probe = mt8173_afe_pcm_dev_probe,
+ .remove = mt8173_afe_pcm_dev_remove,
};
-module_platform_driver(mtk_afe_pcm_driver);
+module_platform_driver(mt8173_afe_pcm_driver);
MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver");
MODULE_AUTHOR("Koro Chen <koro.chen@mediatek.com>");
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
index 71a1a35047ba..5524a2c727ec 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
@@ -18,7 +18,7 @@
#include <sound/soc.h>
#include <sound/jack.h>
#include <linux/gpio.h>
-#include "../codecs/max98090.h"
+#include "../../codecs/max98090.h"
static struct snd_soc_jack mt8173_max98090_jack;
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 58e083642dea..467f7049a288 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -19,7 +19,7 @@
#include <linux/of_gpio.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/rt5645.h"
+#include "../../codecs/rt5645.h"
#define MCLK_FOR_CODECS 12288000
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index bb593926c62d..1b8b2a778845 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -19,8 +19,8 @@
#include <linux/of_gpio.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/rt5645.h"
-#include "../codecs/rt5677.h"
+#include "../../codecs/rt5645.h"
+#include "../../codecs/rt5677.h"
#define MCLK_FOR_CODECS 12288000
diff --git a/sound/soc/mediatek/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index a27a6673dbe3..ba65f4157a7e 100644
--- a/sound/soc/mediatek/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -19,10 +19,24 @@
#include <linux/of_gpio.h>
#include <sound/soc.h>
#include <sound/jack.h>
-#include "../codecs/rt5645.h"
+#include "../../codecs/rt5645.h"
#define MCLK_FOR_CODECS 12288000
+enum mt8173_rt5650_mclk {
+ MT8173_RT5650_MCLK_EXTERNAL = 0,
+ MT8173_RT5650_MCLK_INTERNAL,
+};
+
+struct mt8173_rt5650_platform_data {
+ enum mt8173_rt5650_mclk pll_from;
+ /* 0 = external oscillator; 1 = internal source from mt8173 */
+};
+
+static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = {
+ .pll_from = MT8173_RT5650_MCLK_EXTERNAL,
+};
+
static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
SND_SOC_DAPM_SPK("Speaker", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
@@ -54,13 +68,29 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int mclk_clock;
int i, ret;
+ switch (mt8173_rt5650_priv.pll_from) {
+ case MT8173_RT5650_MCLK_EXTERNAL:
+ /* mclk = 12.288M */
+ mclk_clock = MCLK_FOR_CODECS;
+ break;
+ case MT8173_RT5650_MCLK_INTERNAL:
+ /* mclk = sampling rate*256 */
+ mclk_clock = params_rate(params) * 256;
+ break;
+ default:
+ /* mclk = 12.288M */
+ mclk_clock = MCLK_FOR_CODECS;
+ break;
+ }
+
for (i = 0; i < rtd->num_codecs; i++) {
struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
- /* pll from mclk 12.288M */
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
+ /* pll from mclk */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, mclk_clock,
params_rate(params) * 512);
if (ret)
return ret;
@@ -139,7 +169,9 @@ static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
enum {
DAI_LINK_PLAYBACK,
DAI_LINK_CAPTURE,
+ DAI_LINK_HDMI,
DAI_LINK_CODEC_I2S,
+ DAI_LINK_HDMI_I2S,
};
/* Digital audio interface glue - connects codec <---> CPU */
@@ -165,6 +197,16 @@ static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
.dynamic = 1,
.dpcm_capture = 1,
},
+ [DAI_LINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI PCM",
+ .cpu_dai_name = "HDMI",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
/* Back End DAI links */
[DAI_LINK_CODEC_I2S] = {
.name = "Codec",
@@ -180,6 +222,13 @@ static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
},
+ [DAI_LINK_HDMI_I2S] = {
+ .name = "HDMI BE",
+ .cpu_dai_name = "HDMIO",
+ .no_pcm = 1,
+ .codec_dai_name = "i2s-hifi",
+ .dpcm_playback = 1,
+ },
};
static struct snd_soc_card mt8173_rt5650_card = {
@@ -243,6 +292,24 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
}
+ if (device_property_present(&pdev->dev, "mediatek,mclk")) {
+ ret = device_property_read_u32(&pdev->dev,
+ "mediatek,mclk",
+ &mt8173_rt5650_priv.pll_from);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s snd_soc_register_card fail %d\n",
+ __func__, ret);
+ }
+ }
+
+ mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codec_of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 1);
+ if (!mt8173_rt5650_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
diff --git a/sound/soc/mediatek/mtk-afe-common.h b/sound/soc/mediatek/mtk-afe-common.h
deleted file mode 100644
index f341f623e887..000000000000
--- a/sound/soc/mediatek/mtk-afe-common.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * mtk_afe_common.h -- Mediatek audio driver common definitions
- *
- * Copyright (c) 2015 MediaTek Inc.
- * Author: Koro Chen <koro.chen@mediatek.com>
- * Sascha Hauer <s.hauer@pengutronix.de>
- * Hidalgo Huang <hidalgo.huang@mediatek.com>
- * Ir Lian <ir.lian@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MTK_AFE_COMMON_H_
-#define _MTK_AFE_COMMON_H_
-
-#include <linux/clk.h>
-#include <linux/regmap.h>
-
-enum {
- MTK_AFE_MEMIF_DL1,
- MTK_AFE_MEMIF_DL2,
- MTK_AFE_MEMIF_VUL,
- MTK_AFE_MEMIF_DAI,
- MTK_AFE_MEMIF_AWB,
- MTK_AFE_MEMIF_MOD_DAI,
- MTK_AFE_MEMIF_HDMI,
- MTK_AFE_MEMIF_NUM,
- MTK_AFE_IO_MOD_PCM1 = MTK_AFE_MEMIF_NUM,
- MTK_AFE_IO_MOD_PCM2,
- MTK_AFE_IO_PMIC,
- MTK_AFE_IO_I2S,
- MTK_AFE_IO_2ND_I2S,
- MTK_AFE_IO_HW_GAIN1,
- MTK_AFE_IO_HW_GAIN2,
- MTK_AFE_IO_MRG_O,
- MTK_AFE_IO_MRG_I,
- MTK_AFE_IO_DAIBT,
- MTK_AFE_IO_HDMI,
-};
-
-enum {
- MTK_AFE_IRQ_1,
- MTK_AFE_IRQ_2,
- MTK_AFE_IRQ_3,
- MTK_AFE_IRQ_4,
- MTK_AFE_IRQ_5,
- MTK_AFE_IRQ_6,
- MTK_AFE_IRQ_7,
- MTK_AFE_IRQ_8,
- MTK_AFE_IRQ_NUM,
-};
-
-enum {
- MTK_CLK_INFRASYS_AUD,
- MTK_CLK_TOP_PDN_AUD,
- MTK_CLK_TOP_PDN_AUD_BUS,
- MTK_CLK_I2S0_M,
- MTK_CLK_I2S1_M,
- MTK_CLK_I2S2_M,
- MTK_CLK_I2S3_M,
- MTK_CLK_I2S3_B,
- MTK_CLK_BCK0,
- MTK_CLK_BCK1,
- MTK_CLK_NUM
-};
-
-struct mtk_afe;
-struct snd_pcm_substream;
-
-struct mtk_afe_memif_data {
- int id;
- const char *name;
- int reg_ofs_base;
- int reg_ofs_cur;
- int fs_shift;
- int mono_shift;
- int enable_shift;
- int irq_reg_cnt;
- int irq_cnt_shift;
- int irq_en_shift;
- int irq_fs_shift;
- int irq_clr_shift;
- int msb_shift;
-};
-
-struct mtk_afe_memif {
- unsigned int phys_buf_addr;
- int buffer_size;
- struct snd_pcm_substream *substream;
- const struct mtk_afe_memif_data *data;
- const struct mtk_afe_irq_data *irqdata;
-};
-
-#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 5185a3844da9..f5451c78ede5 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -100,13 +100,14 @@ config SND_OMAP_SOC_OMAP_TWL4030
config SND_OMAP_SOC_OMAP_ABE_TWL6040
tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
- depends on TWL6040_CORE && SND_OMAP_SOC
+ depends on TWL6040_CORE && SND_OMAP_SOC && COMMON_CLK
depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
select SND_OMAP_SOC_DMIC
select SND_OMAP_SOC_MCPDM
select SND_SOC_TWL6040
select SND_SOC_DMIC
select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
+ select CLK_TWL6040
help
Say Y if you want to add support for SoC audio on OMAP boards using
ABE and twl6040 codec. This driver currently supports:
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 4a16e778966b..76ce33199bf9 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -257,8 +257,8 @@ static void omap_st_on(struct omap_mcbsp *mcbsp)
{
unsigned int w;
- if (mcbsp->pdata->enable_st_clock)
- mcbsp->pdata->enable_st_clock(mcbsp->id, 1);
+ if (mcbsp->pdata->force_ick_on)
+ mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, true);
/* Disable Sidetone clock auto-gating for normal operation */
w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
@@ -287,8 +287,8 @@ static void omap_st_off(struct omap_mcbsp *mcbsp)
w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
- if (mcbsp->pdata->enable_st_clock)
- mcbsp->pdata->enable_st_clock(mcbsp->id, 0);
+ if (mcbsp->pdata->force_ick_on)
+ mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, false);
}
static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
@@ -946,6 +946,13 @@ static int omap_st_add(struct omap_mcbsp *mcbsp, struct resource *res)
if (!st_data)
return -ENOMEM;
+ st_data->mcbsp_iclk = clk_get(mcbsp->dev, "ick");
+ if (IS_ERR(st_data->mcbsp_iclk)) {
+ dev_warn(mcbsp->dev,
+ "Failed to get ick, sidetone might be broken\n");
+ st_data->mcbsp_iclk = NULL;
+ }
+
st_data->io_base_st = devm_ioremap(mcbsp->dev, res->start,
resource_size(res));
if (!st_data->io_base_st)
@@ -1088,11 +1095,13 @@ err_thres:
return ret;
}
-void omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp)
+void omap_mcbsp_cleanup(struct omap_mcbsp *mcbsp)
{
if (mcbsp->pdata->buffer_size)
sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
- if (mcbsp->st_data)
+ if (mcbsp->st_data) {
sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group);
+ clk_put(mcbsp->st_data->mcbsp_iclk);
+ }
}
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index 96d1b086bcf8..61e93b1c185d 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -280,6 +280,7 @@ struct omap_mcbsp_reg_cfg {
struct omap_mcbsp_st_data {
void __iomem *io_base_st;
+ struct clk *mcbsp_iclk;
bool running;
bool enabled;
s16 taps[128]; /* Sidetone filter coefficients */
@@ -349,6 +350,6 @@ int omap_st_disable(struct omap_mcbsp *mcbsp);
int omap_st_is_enabled(struct omap_mcbsp *mcbsp);
int omap_mcbsp_init(struct platform_device *pdev);
-void omap_mcbsp_sysfs_remove(struct omap_mcbsp *mcbsp);
+void omap_mcbsp_cleanup(struct omap_mcbsp *mcbsp);
#endif /* __ASOC_MCBSP_H */
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 64425d352962..888133f9e65d 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -28,7 +28,6 @@
#include <sound/asoundef.h>
#include <sound/omap-pcm.h>
#include <sound/omap-hdmi-audio.h>
-#include <video/omapdss.h>
#define DRV_NAME "omap-hdmi-audio"
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index fd99d89de6a8..d018e966e533 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -788,6 +788,7 @@ static int asoc_mcbsp_probe(struct platform_device *pdev)
match = of_match_device(omap_mcbsp_of_match, &pdev->dev);
if (match) {
struct device_node *node = pdev->dev.of_node;
+ struct omap_mcbsp_platform_data *pdata_quirk = pdata;
int buffer_size;
pdata = devm_kzalloc(&pdev->dev,
@@ -799,6 +800,8 @@ static int asoc_mcbsp_probe(struct platform_device *pdev)
memcpy(pdata, match->data, sizeof(*pdata));
if (!of_property_read_u32(node, "ti,buffer-size", &buffer_size))
pdata->buffer_size = buffer_size;
+ if (pdata_quirk)
+ pdata->force_ick_on = pdata_quirk->force_ick_on;
} else if (!pdata) {
dev_err(&pdev->dev, "missing platform data.\n");
return -EINVAL;
@@ -832,7 +835,7 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
if (mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id);
- omap_mcbsp_sysfs_remove(mcbsp);
+ omap_mcbsp_cleanup(mcbsp);
clk_put(mcbsp->fclk);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index b837265ac3e9..e7cdc51fd806 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -31,6 +31,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
@@ -54,6 +55,7 @@ struct omap_mcpdm {
unsigned long phys_base;
void __iomem *io_base;
int irq;
+ struct clk *pdmclk;
struct mutex mutex;
@@ -66,6 +68,9 @@ struct omap_mcpdm {
/* McPDM needs to be restarted due to runtime reconfiguration */
bool restart;
+ /* pm state for suspend/resume handling */
+ int pm_active_count;
+
struct snd_dmaengine_dai_dma_data dma_data[2];
};
@@ -173,6 +178,10 @@ static inline int omap_mcpdm_active(struct omap_mcpdm *mcpdm)
*/
static void omap_mcpdm_open_streams(struct omap_mcpdm *mcpdm)
{
+ u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
+
+ omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN);
+
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_SET,
MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL |
MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL);
@@ -258,12 +267,9 @@ static int omap_mcpdm_dai_startup(struct snd_pcm_substream *substream,
mutex_lock(&mcpdm->mutex);
- if (!dai->active) {
- u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
-
- omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN);
+ if (!dai->active)
omap_mcpdm_open_streams(mcpdm);
- }
+
mutex_unlock(&mcpdm->mutex);
return 0;
@@ -384,6 +390,7 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai)
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int ret;
+ clk_prepare_enable(mcpdm->pdmclk);
pm_runtime_enable(mcpdm->dev);
/* Disable lines while request is ongoing */
@@ -418,8 +425,54 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
pm_runtime_disable(mcpdm->dev);
+ clk_disable_unprepare(mcpdm->pdmclk);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int omap_mcpdm_suspend(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ if (dai->active) {
+ omap_mcpdm_stop(mcpdm);
+ omap_mcpdm_close_streams(mcpdm);
+ }
+
+ mcpdm->pm_active_count = 0;
+ while (pm_runtime_active(mcpdm->dev)) {
+ pm_runtime_put_sync(mcpdm->dev);
+ mcpdm->pm_active_count++;
+ }
+
+ clk_disable_unprepare(mcpdm->pdmclk);
+
+ return 0;
+}
+
+static int omap_mcpdm_resume(struct snd_soc_dai *dai)
+{
+ struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+
+ clk_prepare_enable(mcpdm->pdmclk);
+
+ if (mcpdm->pm_active_count) {
+ while (mcpdm->pm_active_count--)
+ pm_runtime_get_sync(mcpdm->dev);
+
+ if (dai->active) {
+ omap_mcpdm_open_streams(mcpdm);
+ omap_mcpdm_start(mcpdm);
+ }
+ }
+
+
return 0;
}
+#else
+#define omap_mcpdm_suspend NULL
+#define omap_mcpdm_resume NULL
+#endif
#define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
#define OMAP_MCPDM_FORMATS SNDRV_PCM_FMTBIT_S32_LE
@@ -427,6 +480,8 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver omap_mcpdm_dai = {
.probe = omap_mcpdm_probe,
.remove = omap_mcpdm_remove,
+ .suspend = omap_mcpdm_suspend,
+ .resume = omap_mcpdm_resume,
.probe_order = SND_SOC_COMP_ORDER_LATE,
.remove_order = SND_SOC_COMP_ORDER_EARLY,
.playback = {
@@ -494,6 +549,15 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
mcpdm->dev = &pdev->dev;
+ mcpdm->pdmclk = devm_clk_get(&pdev->dev, "pdmclk");
+ if (IS_ERR(mcpdm->pdmclk)) {
+ if (PTR_ERR(mcpdm->pdmclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_warn(&pdev->dev, "Error getting pdmclk (%ld)!\n",
+ PTR_ERR(mcpdm->pdmclk));
+ mcpdm->pdmclk = NULL;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcpdm_component,
&omap_mcpdm_dai, 1);
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 54949242bc70..a76845748a10 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -33,7 +33,6 @@
#include <sound/pcm.h>
#include <sound/soc.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
-#include "../codecs/tpa6130a2.h"
#include <asm/mach-types.h>
@@ -164,19 +163,6 @@ static int rx51_spk_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static int rx51_hp_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- if (SND_SOC_DAPM_EVENT_ON(event))
- tpa6130a2_stereo_enable(codec, 1);
- else
- tpa6130a2_stereo_enable(codec, 0);
-
- return 0;
-}
-
static int rx51_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -235,7 +221,7 @@ static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
SND_SOC_DAPM_MIC("DMic", NULL),
- SND_SOC_DAPM_HP("Headphone Jack", rx51_hp_event),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("HS Mic", NULL),
SND_SOC_DAPM_LINE("FM Transmitter", NULL),
SND_SOC_DAPM_SPK("Earphone", NULL),
@@ -246,11 +232,14 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"Ext Spk", NULL, "HPROUT"},
{"Ext Spk", NULL, "HPLCOM"},
{"Ext Spk", NULL, "HPRCOM"},
- {"Headphone Jack", NULL, "LLOUT"},
- {"Headphone Jack", NULL, "RLOUT"},
{"FM Transmitter", NULL, "LLOUT"},
{"FM Transmitter", NULL, "RLOUT"},
+ {"Headphone Jack", NULL, "TPA6130A2 HPLEFT"},
+ {"Headphone Jack", NULL, "TPA6130A2 HPRIGHT"},
+ {"TPA6130A2 LEFTIN", NULL, "LLOUT"},
+ {"TPA6130A2 RIGHTIN", NULL, "RLOUT"},
+
{"DMic Rate 64", NULL, "DMic"},
{"DMic", NULL, "Mic Bias"},
@@ -286,16 +275,10 @@ static const struct snd_kcontrol_new aic34_rx51_controls[] = {
static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = rtd->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
int err;
- err = tpa6130a2_add_controls(codec);
- if (err < 0) {
- dev_err(card->dev, "Failed to add TPA6130A2 controls\n");
- return err;
- }
snd_soc_limit_volume(card, "TPA6130A2 Headphone Playback Volume", 42);
err = omap_mcbsp_st_add_controls(rtd, 2);
@@ -357,6 +340,10 @@ static struct snd_soc_aux_dev rx51_aux_dev[] = {
.name = "TLV320AIC34b",
.codec_name = "tlv320aic3x-codec.2-0019",
},
+ {
+ .name = "TPA61320A2",
+ .codec_name = "tpa6130a2.2-0060",
+ },
};
static struct snd_soc_codec_conf rx51_codec_conf[] = {
@@ -364,6 +351,10 @@ static struct snd_soc_codec_conf rx51_codec_conf[] = {
.dev_name = "tlv320aic3x-codec.2-0019",
.name_prefix = "b",
},
+ {
+ .dev_name = "tpa6130a2.2-0060",
+ .name_prefix = "TPA6130A2",
+ },
};
/* Audio card */
@@ -435,11 +426,10 @@ static int rx51_soc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Headphone amplifier node is not provided\n");
return -EINVAL;
}
-
- /* TODO: tpa6130a2a driver supports only a single instance, so
- * this driver ignores the headphone-amplifier node for now.
- * It's already mandatory in the DT binding to be future proof.
- */
+ rx51_aux_dev[1].codec_name = NULL;
+ rx51_aux_dev[1].codec_of_node = dai_node;
+ rx51_codec_conf[1].dev_name = NULL;
+ rx51_codec_conf[1].of_node = dai_node;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 574c6af28c06..652e8c5ea166 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -11,8 +11,10 @@
*/
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -23,6 +25,11 @@
#define DRV_NAME "rockchip-i2s"
+struct rk_i2s_pins {
+ u32 reg_offset;
+ u32 shift;
+};
+
struct rk_i2s_dev {
struct device *dev;
@@ -33,6 +40,7 @@ struct rk_i2s_dev {
struct snd_dmaengine_dai_dma_data playback_dma_data;
struct regmap *regmap;
+ struct regmap *grf;
/*
* Used to indicate the tx/rx status.
@@ -42,6 +50,7 @@ struct rk_i2s_dev {
bool tx_start;
bool rx_start;
bool is_master_mode;
+ const struct rk_i2s_pins *pins;
};
static int i2s_runtime_suspend(struct device *dev)
@@ -300,14 +309,38 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
I2S_TXCR_VDW_MASK | I2S_TXCR_CSR_MASK,
val);
+ if (!IS_ERR(i2s->grf) && i2s->pins) {
+ regmap_read(i2s->regmap, I2S_TXCR, &val);
+ val &= I2S_TXCR_CSR_MASK;
+
+ switch (val) {
+ case I2S_CHN_4:
+ val = I2S_IO_4CH_OUT_6CH_IN;
+ break;
+ case I2S_CHN_6:
+ val = I2S_IO_6CH_OUT_4CH_IN;
+ break;
+ case I2S_CHN_8:
+ val = I2S_IO_8CH_OUT_2CH_IN;
+ break;
+ default:
+ val = I2S_IO_2CH_OUT_8CH_IN;
+ break;
+ }
+
+ val <<= i2s->pins->shift;
+ val |= (I2S_IO_DIRECTION_MASK << i2s->pins->shift) << 16;
+ regmap_write(i2s->grf, i2s->pins->reg_offset, val);
+ }
+
regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
I2S_DMACR_TDL(16));
regmap_update_bits(i2s->regmap, I2S_DMACR, I2S_DMACR_RDL_MASK,
I2S_DMACR_RDL(16));
val = I2S_CKR_TRCM_TXRX;
- if (dai->driver->symmetric_rates || rtd->dai_link->symmetric_rates)
- val = I2S_CKR_TRCM_TXSHARE;
+ if (dai->driver->symmetric_rates && rtd->dai_link->symmetric_rates)
+ val = I2S_CKR_TRCM_TXONLY;
regmap_update_bits(i2s->regmap, I2S_CKR,
I2S_CKR_TRCM_MASK,
@@ -485,9 +518,23 @@ static const struct regmap_config rockchip_i2s_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct rk_i2s_pins rk3399_i2s_pins = {
+ .reg_offset = 0xe220,
+ .shift = 11,
+};
+
+static const struct of_device_id rockchip_i2s_match[] = {
+ { .compatible = "rockchip,rk3066-i2s", },
+ { .compatible = "rockchip,rk3188-i2s", },
+ { .compatible = "rockchip,rk3288-i2s", },
+ { .compatible = "rockchip,rk3399-i2s", .data = &rk3399_i2s_pins },
+ {},
+};
+
static int rockchip_i2s_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *of_id;
struct rk_i2s_dev *i2s;
struct snd_soc_dai_driver *soc_dai;
struct resource *res;
@@ -501,6 +548,17 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ i2s->dev = &pdev->dev;
+
+ i2s->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
+ if (!IS_ERR(i2s->grf)) {
+ of_id = of_match_device(rockchip_i2s_match, &pdev->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ i2s->pins = of_id->data;
+ }
+
/* try to prepare related clocks */
i2s->hclk = devm_clk_get(&pdev->dev, "i2s_hclk");
if (IS_ERR(i2s->hclk)) {
@@ -540,7 +598,6 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
i2s->capture_dma_data.maxburst = 4;
- i2s->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, i2s);
pm_runtime_enable(&pdev->dev);
@@ -606,14 +663,6 @@ static int rockchip_i2s_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id rockchip_i2s_match[] = {
- { .compatible = "rockchip,rk3066-i2s", },
- { .compatible = "rockchip,rk3188-i2s", },
- { .compatible = "rockchip,rk3288-i2s", },
- { .compatible = "rockchip,rk3399-i2s", },
- {},
-};
-
static const struct dev_pm_ops rockchip_i2s_pm_ops = {
SET_RUNTIME_PM_OPS(i2s_runtime_suspend, i2s_runtime_resume,
NULL)
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index dc6e2c74d088..31f11fd25393 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -81,8 +81,8 @@
#define I2S_CKR_TRCM_SHIFT 28
#define I2S_CKR_TRCM(x) (x << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_TRCM_TXRX (0 << I2S_CKR_TRCM_SHIFT)
-#define I2S_CKR_TRCM_TXSHARE (1 << I2S_CKR_TRCM_SHIFT)
-#define I2S_CKR_TRCM_RXSHARE (2 << I2S_CKR_TRCM_SHIFT)
+#define I2S_CKR_TRCM_TXONLY (1 << I2S_CKR_TRCM_SHIFT)
+#define I2S_CKR_TRCM_RXONLY (2 << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_TRCM_MASK (3 << I2S_CKR_TRCM_SHIFT)
#define I2S_CKR_MSS_SHIFT 27
#define I2S_CKR_MSS_MASTER (0 << I2S_CKR_MSS_SHIFT)
@@ -236,4 +236,11 @@ enum {
#define I2S_TXDR (0x0024)
#define I2S_RXDR (0x0028)
+/* io direction cfg register */
+#define I2S_IO_DIRECTION_MASK (7)
+#define I2S_IO_8CH_OUT_2CH_IN (0)
+#define I2S_IO_6CH_OUT_4CH_IN (4)
+#define I2S_IO_4CH_OUT_6CH_IN (6)
+#define I2S_IO_2CH_OUT_8CH_IN (7)
+
#endif /* _ROCKCHIP_IIS_H */
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index 543610282cdb..e70ffad07184 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -34,13 +34,18 @@
#define DRV_NAME "rockchip-snd-max98090"
static struct snd_soc_jack headset_jack;
+
+/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin headset_jack_pins[] = {
{
- .pin = "Headset Jack",
- .mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
},
+
};
static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
@@ -53,7 +58,7 @@ static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
static const struct snd_soc_dapm_route rk_audio_map[] = {
{"IN34", NULL, "Headset Mic"},
{"IN34", NULL, "MICBIAS"},
- {"MICBIAS", NULL, "Headset Mic"},
+ {"Headset Mic", NULL, "MICBIAS"},
{"DMICL", NULL, "Int Mic"},
{"Headphone", NULL, "HPL"},
{"Headphone", NULL, "HPR"},
@@ -114,43 +119,27 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static int rk_init(struct snd_soc_pcm_runtime *runtime)
-{
- /* Enable Headset and 4 Buttons Jack detection */
- return snd_soc_card_jack_new(runtime->card, "Headset Jack",
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3,
- &headset_jack,
- headset_jack_pins,
- ARRAY_SIZE(headset_jack_pins));
-}
-
-static int rk_98090_headset_init(struct snd_soc_component *component)
-{
- return ts3a227e_enable_jack_detect(component, &headset_jack);
-}
-
static struct snd_soc_ops rk_aif1_ops = {
.hw_params = rk_aif1_hw_params,
};
-static struct snd_soc_aux_dev rk_98090_headset_dev = {
- .name = "Headset Chip",
- .init = rk_98090_headset_init,
-};
-
static struct snd_soc_dai_link rk_dailink = {
.name = "max98090",
.stream_name = "Audio",
.codec_dai_name = "HiFi",
- .init = rk_init,
.ops = &rk_aif1_ops,
/* set max98090 as slave */
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
};
+static int rk_98090_headset_init(struct snd_soc_component *component);
+
+static struct snd_soc_aux_dev rk_98090_headset_dev = {
+ .name = "Headset Chip",
+ .init = rk_98090_headset_init,
+};
+
static struct snd_soc_card snd_soc_card_rk = {
.name = "ROCKCHIP-I2S",
.owner = THIS_MODULE,
@@ -166,6 +155,26 @@ static struct snd_soc_card snd_soc_card_rk = {
.num_controls = ARRAY_SIZE(rk_mc_controls),
};
+static int rk_98090_headset_init(struct snd_soc_component *component)
+{
+ int ret;
+
+ /* Enable Headset and 4 Buttons Jack detection */
+ ret = snd_soc_card_jack_new(&snd_soc_card_rk, "Headset Jack",
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &headset_jack,
+ headset_jack_pins,
+ ARRAY_SIZE(headset_jack_pins));
+ if (ret)
+ return ret;
+
+ ret = ts3a227e_enable_jack_detect(component, &headset_jack);
+
+ return ret;
+}
+
static int snd_rk_mc_probe(struct platform_device *pdev)
{
int ret = 0;
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index 100781e37848..4ca265737eda 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -101,21 +101,7 @@ static int rk_spdif_hw_params(struct snd_pcm_substream *substream,
int ret;
srate = params_rate(params);
- switch (srate) {
- case 32000:
- case 48000:
- case 96000:
- mclk = 96000 * 128; /* 12288000 hz */
- break;
- case 44100:
- mclk = 44100 * 256; /* 11289600 hz */
- break;
- case 192000:
- mclk = 192000 * 128; /* 24576000 hz */
- break;
- default:
- return -EINVAL;
- }
+ mclk = srate * 128;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -139,7 +125,6 @@ static int rk_spdif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- val |= SPDIF_CFGR_CLK_DIV(mclk/(srate * 256));
ret = regmap_update_bits(spdif->regmap, SPDIF_CFGR,
SPDIF_CFGR_CLK_DIV_MASK | SPDIF_CFGR_HALFWORD_ENABLE |
SDPIF_CFGR_VDW_MASK,
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 78baa26e938b..7b722b0094d9 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -224,14 +224,6 @@ config SND_SOC_SNOW
Say Y if you want to add audio support for various Snow
boards based on Exynos5 series of SoCs.
-config SND_SOC_ODROIDX2
- tristate "Audio support for Odroid-X2 and Odroid-U3"
- depends on SND_SOC_SAMSUNG && I2C
- select SND_SOC_MAX98090
- select SND_SAMSUNG_I2S
- help
- Say Y here to enable audio support for the Odroid-X2/U3.
-
config SND_SOC_ARNDALE_RT5631_ALC5631
tristate "Audio support for RT5631(ALC5631) on Arndale Board"
depends on SND_SOC_SAMSUNG && I2C
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 052fe71be518..5d03f5ce6916 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -43,7 +43,6 @@ snd-soc-tobermory-objs := tobermory.o
snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
-snd-soc-odroidx2-max98090-objs := odroidx2_max98090.o
snd-soc-arndale-rt5631-objs := arndale_rt5631.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
@@ -69,5 +68,4 @@ obj-$(CONFIG_SND_SOC_TOBERMORY) += snd-soc-tobermory.o
obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
-obj-$(CONFIG_SND_SOC_ODROIDX2) += snd-soc-odroidx2-max98090.o
obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 4a7a503fe13c..547d31032088 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -389,7 +389,8 @@ static int s3c_ac97_probe(struct platform_device *pdev)
goto err5;
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- ac97_pdata->dma_filter);
+ ac97_pdata->dma_filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err5;
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index a7616cc9b39e..3830f297e0b6 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -26,7 +26,10 @@ struct s3c_dma_params {
void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
struct s3c_dma_params *playback,
struct s3c_dma_params *capture);
-int samsung_asoc_dma_platform_register(struct device *dev,
- dma_filter_fn fn);
-
+/*
+ * @tx, @rx arguments can be NULL if the DMA channel names are "tx", "rx",
+ * otherwise actual DMA channel names must be passed to this function.
+ */
+int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
+ const char *tx, const char *rx);
#endif
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 063125937311..2c87f380bfc4 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -28,10 +28,6 @@
#include "dma.h"
-static struct snd_dmaengine_pcm_config samsung_dmaengine_pcm_config = {
- .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
-};
-
void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
struct s3c_dma_params *playback,
struct s3c_dma_params *capture)
@@ -58,15 +54,28 @@ void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
}
EXPORT_SYMBOL_GPL(samsung_asoc_init_dma_data);
-int samsung_asoc_dma_platform_register(struct device *dev,
- dma_filter_fn filter)
+int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
+ const char *tx, const char *rx)
{
- samsung_dmaengine_pcm_config.compat_filter_fn = filter;
+ unsigned int flags = SND_DMAENGINE_PCM_FLAG_COMPAT;
+
+ struct snd_dmaengine_pcm_config *pcm_conf;
+
+ pcm_conf = devm_kzalloc(dev, sizeof(*pcm_conf), GFP_KERNEL);
+ if (!pcm_conf)
+ return -ENOMEM;
+
+ pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
+ pcm_conf->compat_filter_fn = filter;
+
+ if (dev->of_node) {
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
+ pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
+ } else {
+ flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME;
+ }
- return devm_snd_dmaengine_pcm_register(dev,
- &samsung_dmaengine_pcm_config,
- SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME |
- SND_DMAENGINE_PCM_FLAG_COMPAT);
+ return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags);
}
EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 70a2559b63f9..50635ee8ff20 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
@@ -1106,19 +1107,9 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
return i2s;
}
-static const struct of_device_id exynos_i2s_match[];
-
-static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
- struct platform_device *pdev)
+static void i2s_free_sec_dai(struct i2s_dai *i2s)
{
- if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(exynos_i2s_match, pdev->dev.of_node);
- return match ? match->data : NULL;
- } else {
- return (struct samsung_i2s_dai_data *)
- platform_get_device_id(pdev)->driver_data;
- }
+ platform_device_del(i2s->pdev);
}
#ifdef CONFIG_PM
@@ -1233,9 +1224,13 @@ static int samsung_i2s_probe(struct platform_device *pdev)
const struct samsung_i2s_dai_data *i2s_dai_data;
int ret;
- /* Call during Seconday interface registration */
- i2s_dai_data = samsung_i2s_get_driver_data(pdev);
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
+ i2s_dai_data = of_device_get_match_data(&pdev->dev);
+ else
+ i2s_dai_data = (struct samsung_i2s_dai_data *)
+ platform_get_device_id(pdev)->driver_data;
+ /* Call during the secondary interface registration */
if (i2s_dai_data->dai_type == TYPE_SEC) {
sec_dai = dev_get_drvdata(&pdev->dev);
if (!sec_dai) {
@@ -1249,7 +1244,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
return ret;
return samsung_asoc_dma_platform_register(&pdev->dev,
- sec_dai->filter);
+ sec_dai->filter, "tx-sec", NULL);
}
pri_dai = i2s_alloc_dai(pdev, false);
@@ -1350,17 +1345,28 @@ static int samsung_i2s_probe(struct platform_device *pdev)
return -EINVAL;
}
- devm_snd_soc_register_component(&pri_dai->pdev->dev,
+ ret = devm_snd_soc_register_component(&pri_dai->pdev->dev,
&samsung_i2s_component,
&pri_dai->i2s_dai_drv, 1);
+ if (ret < 0)
+ goto err_free_dai;
+
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter,
+ NULL, NULL);
+ if (ret < 0)
+ goto err_free_dai;
pm_runtime_enable(&pdev->dev);
- ret = samsung_asoc_dma_platform_register(&pdev->dev, pri_dai->filter);
- if (ret != 0)
- return ret;
+ ret = i2s_register_clock_provider(pdev);
+ if (!ret)
+ return 0;
- return i2s_register_clock_provider(pdev);
+ pm_runtime_disable(&pdev->dev);
+err_free_dai:
+ if (sec_dai)
+ i2s_free_sec_dai(sec_dai);
+ return ret;
}
static int samsung_i2s_remove(struct platform_device *pdev)
@@ -1477,10 +1483,6 @@ static const struct samsung_i2s_dai_data i2sv5_dai_type_i2s1 = {
.i2s_variant_regs = &i2sv5_i2s1_regs,
};
-static const struct samsung_i2s_dai_data samsung_dai_type_pri = {
- .dai_type = TYPE_PRI,
-};
-
static const struct samsung_i2s_dai_data samsung_dai_type_sec = {
.dai_type = TYPE_SEC,
};
@@ -1492,9 +1494,6 @@ static const struct platform_device_id samsung_i2s_driver_ids[] = {
}, {
.name = "samsung-i2s-sec",
.driver_data = (kernel_ulong_t)&samsung_dai_type_sec,
- }, {
- .name = "samsung-i2sv4",
- .driver_data = (kernel_ulong_t)&i2sv5_dai_type,
},
{},
};
diff --git a/sound/soc/samsung/odroidx2_max98090.c b/sound/soc/samsung/odroidx2_max98090.c
deleted file mode 100644
index 04217279fe25..000000000000
--- a/sound/soc/samsung/odroidx2_max98090.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (C) 2014 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/of.h>
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "i2s.h"
-
-struct odroidx2_drv_data {
- const struct snd_soc_dapm_widget *dapm_widgets;
- unsigned int num_dapm_widgets;
-};
-
-/* The I2S CDCLK output clock frequency for the MAX98090 codec */
-#define MAX98090_MCLK 19200000
-
-static struct snd_soc_dai_link odroidx2_dai[];
-
-static int odroidx2_late_probe(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_dai *codec_dai;
- struct snd_soc_dai *cpu_dai;
- int ret;
-
- rtd = snd_soc_get_pcm_runtime(card, card->dai_link[0].name);
- codec_dai = rtd->codec_dai;
- cpu_dai = rtd->cpu_dai;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, MAX98090_MCLK,
- SND_SOC_CLOCK_IN);
-
- if (ret < 0 || of_find_property(odroidx2_dai[0].codec_of_node,
- "clocks", NULL))
- return ret;
-
- /* Set the cpu DAI configuration in order to use CDCLK */
- return snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_OUT);
-}
-
-static const struct snd_soc_dapm_widget odroidx2_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_MIC("Mic Jack", NULL),
- SND_SOC_DAPM_MIC("DMIC", NULL),
-};
-
-static const struct snd_soc_dapm_widget odroidu3_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone Jack", NULL),
- SND_SOC_DAPM_SPK("Speakers", NULL),
-};
-
-static struct snd_soc_dai_link odroidx2_dai[] = {
- {
- .name = "MAX98090",
- .stream_name = "MAX98090 PCM",
- .codec_dai_name = "HiFi",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM,
- }
-};
-
-static struct snd_soc_card odroidx2 = {
- .owner = THIS_MODULE,
- .dai_link = odroidx2_dai,
- .num_links = ARRAY_SIZE(odroidx2_dai),
- .fully_routed = true,
- .late_probe = odroidx2_late_probe,
-};
-
-static const struct odroidx2_drv_data odroidx2_drvdata = {
- .dapm_widgets = odroidx2_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(odroidx2_dapm_widgets),
-};
-
-static const struct odroidx2_drv_data odroidu3_drvdata = {
- .dapm_widgets = odroidu3_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(odroidu3_dapm_widgets),
-};
-
-static const struct of_device_id odroidx2_audio_of_match[] = {
- {
- .compatible = "samsung,odroidx2-audio",
- .data = &odroidx2_drvdata,
- }, {
- .compatible = "samsung,odroidu3-audio",
- .data = &odroidu3_drvdata,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, odroidx2_audio_of_match);
-
-static int odroidx2_audio_probe(struct platform_device *pdev)
-{
- struct device_node *snd_node = pdev->dev.of_node;
- struct snd_soc_card *card = &odroidx2;
- struct device_node *i2s_node, *codec_node;
- struct odroidx2_drv_data *dd;
- const struct of_device_id *of_id;
- int ret;
-
- of_id = of_match_node(odroidx2_audio_of_match, snd_node);
- dd = (struct odroidx2_drv_data *)of_id->data;
-
- card->num_dapm_widgets = dd->num_dapm_widgets;
- card->dapm_widgets = dd->dapm_widgets;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_of_parse_card_name(card, "samsung,model");
- if (ret < 0)
- return ret;
-
- ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing");
- if (ret < 0)
- return ret;
-
- codec_node = of_parse_phandle(snd_node, "samsung,audio-codec", 0);
- if (!codec_node) {
- dev_err(&pdev->dev,
- "Failed parsing samsung,i2s-codec property\n");
- return -EINVAL;
- }
-
- i2s_node = of_parse_phandle(snd_node, "samsung,i2s-controller", 0);
- if (!i2s_node) {
- dev_err(&pdev->dev,
- "Failed parsing samsung,i2s-controller property\n");
- ret = -EINVAL;
- goto err_put_codec_n;
- }
-
- odroidx2_dai[0].codec_of_node = codec_node;
- odroidx2_dai[0].cpu_of_node = i2s_node;
- odroidx2_dai[0].platform_of_node = i2s_node;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
- ret);
- goto err_put_i2s_n;
- }
- return 0;
-
-err_put_i2s_n:
- of_node_put(i2s_node);
-err_put_codec_n:
- of_node_put(codec_node);
- return ret;
-}
-
-static int odroidx2_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- of_node_put(odroidx2_dai[0].cpu_of_node);
- of_node_put(odroidx2_dai[0].codec_of_node);
-
- return 0;
-}
-
-static struct platform_driver odroidx2_audio_driver = {
- .driver = {
- .name = "odroidx2-audio",
- .of_match_table = odroidx2_audio_of_match,
- .pm = &snd_soc_pm_ops,
- },
- .probe = odroidx2_audio_probe,
- .remove = odroidx2_audio_remove,
-};
-module_platform_driver(odroidx2_audio_driver);
-
-MODULE_AUTHOR("Chen Zhen <zhen1.chen@samsung.com>");
-MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
-MODULE_DESCRIPTION("ALSA SoC Odroid X2/U3 Audio Support");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 498f563a4c9c..490c1a87fd66 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -576,7 +576,8 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
goto err5;
}
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err5;
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
index b6ab3fc5789e..bf8ae79b0fd2 100644
--- a/sound/soc/samsung/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -268,7 +268,7 @@ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
iismod &= ~S3C2412_IISMOD_SLAVE;
break;
default:
- pr_err("unknwon master/slave format\n");
+ pr_err("unknown master/slave format\n");
return -EINVAL;
}
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 204029d12f5b..d45dffb297d8 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -177,7 +177,8 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
}
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter);
+ pdata->dma_filter,
+ NULL, NULL);
if (ret)
pr_err("failed to register the DMA: %d\n", ret);
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index b3a475d73ba7..3e76f2a75a24 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -482,7 +482,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
}
ret = samsung_asoc_dma_platform_register(&pdev->dev,
- pdata->dma_filter);
+ pdata->dma_filter,
+ NULL, NULL);
if (ret)
pr_err("failed to register the dma: %d\n", ret);
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 4687f521197c..0cb9c8567546 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -435,7 +435,8 @@ static int spdif_probe(struct platform_device *pdev)
spdif->dma_playback = &spdif_stereo_out;
- ret = samsung_asoc_dma_platform_register(&pdev->dev, filter);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev, filter,
+ NULL, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
goto err4;
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index c9902a6d6fa0..9311f119feb5 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -44,6 +44,7 @@ config SND_SOC_RCAR
config SND_SOC_RSRC_CARD
tristate "Renesas Sampling Rate Convert Sound Card"
+ select SND_SIMPLE_CARD_UTILS
help
This option enables simple sound if you need sampling rate convert
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index c4c51a4d3c8f..2145957d0229 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -33,11 +33,15 @@ struct rsnd_adg {
struct clk *clkout[CLKOUTMAX];
struct clk_onecell_data onecell;
struct rsnd_mod mod;
+ u32 flags;
int rbga_rate_for_441khz; /* RBGA */
int rbgb_rate_for_48khz; /* RBGB */
};
+#define LRCLK_ASYNC (1 << 0)
+#define adg_mode_flags(adg) (adg->flags)
+
#define for_each_rsnd_clk(pos, adg, i) \
for (i = 0; \
(i < CLKMAX) && \
@@ -355,6 +359,16 @@ found_clock:
rsnd_adg_set_ssi_clk(ssi_mod, data);
+ if (!(adg_mode_flags(adg) & LRCLK_ASYNC)) {
+ struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
+ u32 ckr = 0;
+
+ if (0 == (rate % 8000))
+ ckr = 0x80000000;
+
+ rsnd_mod_bset(adg_mod, SSICKR, 0x80000000, ckr);
+ }
+
dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n",
rsnd_mod_name(ssi_mod), rsnd_mod_id(ssi_mod),
data, rate);
@@ -532,6 +546,7 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
{
struct rsnd_adg *adg;
struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *np = dev->of_node;
adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
if (!adg) {
@@ -545,6 +560,9 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
rsnd_adg_get_clkin(priv, adg);
rsnd_adg_get_clkout(priv, adg);
+ if (of_get_property(np, "clkout-lr-asynchronous", NULL))
+ adg->flags = LRCLK_ASYNC;
+
priv->adg = adg;
return 0;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 46c0ba7b6414..7d2fdf8dd188 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -206,7 +206,7 @@ static int _rsnd_gen_regmap_init(struct rsnd_priv *priv,
*/
static int rsnd_gen2_probe(struct rsnd_priv *priv)
{
- const static struct rsnd_regmap_field_conf conf_ssiu[] = {
+ static const struct rsnd_regmap_field_conf conf_ssiu[] = {
RSND_GEN_S_REG(SSI_MODE0, 0x800),
RSND_GEN_S_REG(SSI_MODE1, 0x804),
RSND_GEN_S_REG(SSI_MODE2, 0x808),
@@ -221,7 +221,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
};
- const static struct rsnd_regmap_field_conf conf_scu[] = {
+ static const struct rsnd_regmap_field_conf conf_scu[] = {
RSND_GEN_M_REG(SRC_I_BUSIF_MODE,0x0, 0x20),
RSND_GEN_M_REG(SRC_O_BUSIF_MODE,0x4, 0x20),
RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8, 0x20),
@@ -308,7 +308,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_M_REG(DVC_VOL7R, 0xe44, 0x100),
RSND_GEN_M_REG(DVC_DVUER, 0xe48, 0x100),
};
- const static struct rsnd_regmap_field_conf conf_adg[] = {
+ static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
RSND_GEN_S_REG(SSICKR, 0x08),
@@ -328,7 +328,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(SRCOUT_TIMSEL4, 0x58),
RSND_GEN_S_REG(CMDOUT_TIMSEL, 0x5c),
};
- const static struct rsnd_regmap_field_conf conf_ssi[] = {
+ static const struct rsnd_regmap_field_conf conf_ssi[] = {
RSND_GEN_M_REG(SSICR, 0x00, 0x40),
RSND_GEN_M_REG(SSISR, 0x04, 0x40),
RSND_GEN_M_REG(SSITDR, 0x08, 0x40),
@@ -359,14 +359,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
static int rsnd_gen1_probe(struct rsnd_priv *priv)
{
- const static struct rsnd_regmap_field_conf conf_adg[] = {
+ static const struct rsnd_regmap_field_conf conf_adg[] = {
RSND_GEN_S_REG(BRRA, 0x00),
RSND_GEN_S_REG(BRRB, 0x04),
RSND_GEN_S_REG(SSICKR, 0x08),
RSND_GEN_S_REG(AUDIO_CLK_SEL0, 0x0c),
RSND_GEN_S_REG(AUDIO_CLK_SEL1, 0x10),
};
- const static struct rsnd_regmap_field_conf conf_ssi[] = {
+ static const struct rsnd_regmap_field_conf conf_ssi[] = {
RSND_GEN_M_REG(SSICR, 0x00, 0x40),
RSND_GEN_M_REG(SSISR, 0x04, 0x40),
RSND_GEN_M_REG(SSITDR, 0x08, 0x40),
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
index 1bc7ecfc42a9..fa37f842b62f 100644
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -20,6 +20,7 @@
#include <sound/jack.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
+#include <sound/simple_card_utils.h>
struct rsrc_card_of_data {
const char *prefix;
@@ -46,25 +47,13 @@ static const struct of_device_id rsrc_card_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
-#define DAI_NAME_NUM 32
-struct rsrc_card_dai {
- unsigned int sysclk;
- unsigned int tx_slot_mask;
- unsigned int rx_slot_mask;
- int slots;
- int slot_width;
- struct clk *clk;
- char dai_name[DAI_NAME_NUM];
-};
-
#define IDX_CPU 0
#define IDX_CODEC 1
struct rsrc_card_priv {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct rsrc_card_dai *dai_props;
+ struct asoc_simple_dai *dai_props;
struct snd_soc_dai_link *dai_link;
- int dai_num;
u32 convert_rate;
u32 convert_channels;
};
@@ -77,7 +66,7 @@ static int rsrc_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct rsrc_card_dai *dai_props =
+ struct asoc_simple_dai *dai_props =
rsrc_priv_to_props(priv, rtd->num);
return clk_prepare_enable(dai_props->clk);
@@ -87,7 +76,7 @@ static void rsrc_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
- struct rsrc_card_dai *dai_props =
+ struct asoc_simple_dai *dai_props =
rsrc_priv_to_props(priv, rtd->num);
clk_disable_unprepare(dai_props->clk);
@@ -103,7 +92,7 @@ static int rsrc_card_dai_init(struct snd_soc_pcm_runtime *rtd)
struct rsrc_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct rsrc_card_dai *dai_props;
+ struct asoc_simple_dai *dai_props;
int num = rtd->num;
int ret;
@@ -159,44 +148,13 @@ static int rsrc_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static int rsrc_card_parse_daifmt(struct device_node *node,
- struct device_node *codec,
- struct rsrc_card_priv *priv,
- struct snd_soc_dai_link *dai_link,
- unsigned int *retfmt)
-{
- struct device_node *bitclkmaster = NULL;
- struct device_node *framemaster = NULL;
- unsigned int daifmt;
-
- daifmt = snd_soc_of_parse_daifmt(node, NULL,
- &bitclkmaster, &framemaster);
- daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
-
- if (!bitclkmaster && !framemaster)
- return -EINVAL;
-
- if (codec == bitclkmaster)
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBM_CFM : SND_SOC_DAIFMT_CBM_CFS;
- else
- daifmt |= (codec == framemaster) ?
- SND_SOC_DAIFMT_CBS_CFM : SND_SOC_DAIFMT_CBS_CFS;
-
- of_node_put(bitclkmaster);
- of_node_put(framemaster);
-
- *retfmt = daifmt;
-
- return 0;
-}
-
static int rsrc_card_parse_links(struct device_node *np,
struct rsrc_card_priv *priv,
int idx, bool is_fe)
{
+ struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
struct of_phandle_args args;
int ret;
@@ -232,9 +190,11 @@ static int rsrc_card_parse_links(struct device_node *np,
if (ret < 0)
return ret;
- /* set dai_name */
- snprintf(dai_props->dai_name, DAI_NAME_NUM, "fe.%s",
- dai_link->cpu_dai_name);
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
/*
* In soc_bind_dai_link() will check cpu name after
@@ -248,7 +208,6 @@ static int rsrc_card_parse_links(struct device_node *np,
if (!args.args_count)
dai_link->cpu_dai_name = NULL;
} else {
- struct device *dev = rsrc_priv_to_dev(priv);
const struct rsrc_card_of_data *of_data;
of_data = of_device_get_match_data(dev);
@@ -266,6 +225,12 @@ static int rsrc_card_parse_links(struct device_node *np,
if (ret < 0)
return ret;
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ dai_link->codec_dai_name);
+ if (ret < 0)
+ return ret;
+
/* additional name prefix */
if (of_data) {
priv->codec_conf.of_node = dai_link->codec_of_node;
@@ -276,18 +241,12 @@ static int rsrc_card_parse_links(struct device_node *np,
dai_link->codec_of_node,
"audio-prefix");
}
-
- /* set dai_name */
- snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
- dai_link->codec_dai_name);
}
/* Simple Card assumes platform == cpu */
dai_link->platform_of_node = dai_link->cpu_of_node;
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
- dai_link->name = dai_props->dai_name;
- dai_link->stream_name = dai_props->dai_name;
dai_link->ops = &rsrc_card_ops;
dai_link->init = rsrc_card_dai_init;
@@ -299,7 +258,7 @@ static int rsrc_card_parse_clk(struct device_node *np,
int idx, bool is_fe)
{
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
struct clk *clk;
struct device_node *of_np = is_fe ? dai_link->cpu_of_node :
dai_link->codec_of_node;
@@ -336,7 +295,7 @@ static int rsrc_card_dai_sub_link_of(struct device_node *node,
{
struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = rsrc_priv_to_link(priv, idx);
- struct rsrc_card_dai *dai_props = rsrc_priv_to_props(priv, idx);
+ struct asoc_simple_dai *dai_props = rsrc_priv_to_props(priv, idx);
int ret;
ret = rsrc_card_parse_links(np, priv, idx, is_fe);
@@ -348,7 +307,7 @@ static int rsrc_card_dai_sub_link_of(struct device_node *node,
return ret;
dev_dbg(dev, "\t%s / %04x / %d\n",
- dai_props->dai_name,
+ dai_link->name,
dai_link->dai_fmt,
dai_props->sysclk);
@@ -358,6 +317,7 @@ static int rsrc_card_dai_sub_link_of(struct device_node *node,
static int rsrc_card_dai_link_of(struct device_node *node,
struct rsrc_card_priv *priv)
{
+ struct device *dev = rsrc_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link;
struct device_node *np;
unsigned int daifmt = 0;
@@ -370,8 +330,8 @@ static int rsrc_card_dai_link_of(struct device_node *node,
dai_link = rsrc_priv_to_link(priv, i);
if (strcmp(np->name, "codec") == 0) {
- ret = rsrc_card_parse_daifmt(node, np, priv,
- dai_link, &daifmt);
+ ret = asoc_simple_card_parse_daifmt(dev, node, np,
+ NULL, &daifmt);
if (ret < 0)
return ret;
break;
@@ -402,7 +362,7 @@ static int rsrc_card_parse_of(struct device_node *node,
struct device *dev)
{
const struct rsrc_card_of_data *of_data = of_device_get_match_data(dev);
- struct rsrc_card_dai *props;
+ struct asoc_simple_dai *props;
struct snd_soc_dai_link *links;
int ret;
int num;
@@ -418,7 +378,6 @@ static int rsrc_card_parse_of(struct device_node *node,
priv->dai_props = props;
priv->dai_link = links;
- priv->dai_num = num;
/* Init snd_soc_card */
priv->snd_card.owner = THIS_MODULE;
@@ -436,9 +395,6 @@ static int rsrc_card_parse_of(struct device_node *node,
"audio-routing");
}
- /* Parse the card name from DT */
- snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
-
/* sampling rate convert */
of_property_read_u32(node, "convert-rate", &priv->convert_rate);
@@ -454,8 +410,9 @@ static int rsrc_card_parse_of(struct device_node *node,
if (ret < 0)
return ret;
- if (!priv->snd_card.name)
- priv->snd_card.name = priv->snd_card.dai_link->name;
+ ret = asoc_simple_card_parse_card_name(&priv->snd_card, "card-");
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 875733c52953..d2df46c14c68 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -530,14 +530,15 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ int ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
- platform->driver->compr_ops->pointer(cstream, tstamp);
+ ret = platform->driver->compr_ops->pointer(cstream, tstamp);
mutex_unlock(&rtd->pcm_mutex);
- return 0;
+ return ret;
}
static int soc_compr_copy(struct snd_compr_stream *cstream,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c4464858bf01..8698c26773b3 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1073,7 +1073,11 @@ static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
*/
static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
struct list_head *list, enum snd_soc_dapm_direction dir,
- int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
+ int (*fn)(struct snd_soc_dapm_widget *, struct list_head *,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction)),
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction))
{
enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
struct snd_soc_dapm_path *path;
@@ -1088,6 +1092,11 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
if (list)
list_add_tail(&widget->work_list, list);
+ if (custom_stop_condition && custom_stop_condition(widget, dir)) {
+ widget->endpoints[dir] = 1;
+ return widget->endpoints[dir];
+ }
+
if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
return widget->endpoints[dir];
@@ -1106,7 +1115,7 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
if (path->connect) {
path->walking = 1;
- con += fn(path->node[dir], list);
+ con += fn(path->node[dir], list, custom_stop_condition);
path->walking = 0;
}
}
@@ -1119,23 +1128,37 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
/*
* Recursively check for a completed path to an active or physically connected
* output widget. Returns number of complete paths.
+ *
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
*/
static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
- struct list_head *list)
+ struct list_head *list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i,
+ enum snd_soc_dapm_direction))
{
return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
- is_connected_output_ep);
+ is_connected_output_ep, custom_stop_condition);
}
/*
* Recursively check for a completed path to an active or physically connected
* input widget. Returns number of complete paths.
+ *
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
*/
static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
- struct list_head *list)
+ struct list_head *list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i,
+ enum snd_soc_dapm_direction))
{
return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
- is_connected_input_ep);
+ is_connected_input_ep, custom_stop_condition);
}
/**
@@ -1143,15 +1166,24 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
* @dai: the soc DAI.
* @stream: stream direction.
* @list: list of active widgets for this stream.
+ * @custom_stop_condition: (optional) a function meant to stop the widget graph
+ * walk based on custom logic.
*
* Queries DAPM graph as to whether an valid audio stream path exists for
* the initial stream specified by name. This takes into account
* current mixer and mux kcontrol settings. Creates list of valid widgets.
*
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+ * direction as an arguments, it should return true if the walk should be
+ * stopped and false otherwise.
+ *
* Returns the number of valid paths or negative error.
*/
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
- struct snd_soc_dapm_widget_list **list)
+ struct snd_soc_dapm_widget_list **list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *,
+ enum snd_soc_dapm_direction))
{
struct snd_soc_card *card = dai->component->card;
struct snd_soc_dapm_widget *w;
@@ -1171,9 +1203,11 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
}
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- paths = is_connected_output_ep(dai->playback_widget, &widgets);
+ paths = is_connected_output_ep(dai->playback_widget, &widgets,
+ custom_stop_condition);
else
- paths = is_connected_input_ep(dai->capture_widget, &widgets);
+ paths = is_connected_input_ep(dai->capture_widget, &widgets,
+ custom_stop_condition);
/* Drop starting point */
list_del(widgets.next);
@@ -1268,8 +1302,8 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
DAPM_UPDATE_STAT(w, power_checks);
- in = is_connected_input_ep(w, NULL);
- out = is_connected_output_ep(w, NULL);
+ in = is_connected_input_ep(w, NULL, NULL);
+ out = is_connected_output_ep(w, NULL, NULL);
return out != 0 && in != 0;
}
@@ -1928,8 +1962,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
in = 0;
out = 0;
} else {
- in = is_connected_input_ep(w, NULL);
- out = is_connected_output_ep(w, NULL);
+ in = is_connected_input_ep(w, NULL, NULL);
+ out = is_connected_output_ep(w, NULL, NULL);
}
ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
@@ -3282,6 +3316,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
mutex_unlock(&dapm->card->dapm_mutex);
return w;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
struct snd_soc_dapm_widget *
snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index aa99dac31b3b..60d702f8b9f0 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1287,6 +1287,46 @@ static int widget_in_list(struct snd_soc_dapm_widget_list *list,
return 0;
}
+static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
+ enum snd_soc_dapm_direction dir)
+{
+ struct snd_soc_card *card = widget->dapm->card;
+ struct snd_soc_pcm_runtime *rtd;
+ int i;
+
+ if (dir == SND_SOC_DAPM_DIR_OUT) {
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!rtd->dai_link->no_pcm)
+ continue;
+
+ if (rtd->cpu_dai->playback_widget == widget)
+ return true;
+
+ for (i = 0; i < rtd->num_codecs; ++i) {
+ struct snd_soc_dai *dai = rtd->codec_dais[i];
+ if (dai->playback_widget == widget)
+ return true;
+ }
+ }
+ } else { /* SND_SOC_DAPM_DIR_IN */
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!rtd->dai_link->no_pcm)
+ continue;
+
+ if (rtd->cpu_dai->capture_widget == widget)
+ return true;
+
+ for (i = 0; i < rtd->num_codecs; ++i) {
+ struct snd_soc_dai *dai = rtd->codec_dais[i];
+ if (dai->capture_widget == widget)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list)
{
@@ -1294,7 +1334,8 @@ int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int paths;
/* get number of valid DAI paths and their widgets */
- paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
+ paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list,
+ dpcm_end_walk_at_be);
dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
stream ? "capture" : "playback");
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index ee1c7c245bc7..1ac2db205a0d 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -1029,9 +1029,9 @@ static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
- if (!regmap) {
+ if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
- return -EINVAL;
+ return PTR_ERR(regmap);
}
player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index ae42294ef688..2a954bd01fd8 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -8,6 +8,15 @@ config SND_SUN4I_CODEC
Select Y or M to add support for the Codec embedded in the Allwinner
A10 and affiliated SoCs.
+config SND_SUN4I_I2S
+ tristate "Allwinner A10 I2S Support"
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select REGMAP_MMIO
+ help
+ Say Y or M if you want to add support for codecs attached to
+ the Allwinner A10 I2S. You will also need to select the
+ individual machine drivers to support below.
+
config SND_SUN4I_SPDIF
tristate "Allwinner A10 SPDIF Support"
depends on OF
diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile
index 8f5e889667f1..604c7b842837 100644
--- a/sound/soc/sunxi/Makefile
+++ b/sound/soc/sunxi/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
-
+obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
new file mode 100644
index 000000000000..687a8f83dbe5
--- /dev/null
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 2015 Andrea Venturi
+ * Andrea Venturi <be17068@iperbole.bo.it>
+ *
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#define SUN4I_I2S_CTRL_REG 0x00
+#define SUN4I_I2S_CTRL_SDO_EN_MASK GENMASK(11, 8)
+#define SUN4I_I2S_CTRL_SDO_EN(sdo) BIT(8 + (sdo))
+#define SUN4I_I2S_CTRL_MODE_MASK BIT(5)
+#define SUN4I_I2S_CTRL_MODE_SLAVE (1 << 5)
+#define SUN4I_I2S_CTRL_MODE_MASTER (0 << 5)
+#define SUN4I_I2S_CTRL_TX_EN BIT(2)
+#define SUN4I_I2S_CTRL_RX_EN BIT(1)
+#define SUN4I_I2S_CTRL_GL_EN BIT(0)
+
+#define SUN4I_I2S_FMT0_REG 0x04
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(7)
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 7)
+#define SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 7)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_MASK BIT(6)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED (1 << 6)
+#define SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL (0 << 6)
+#define SUN4I_I2S_FMT0_SR_MASK GENMASK(5, 4)
+#define SUN4I_I2S_FMT0_SR(sr) ((sr) << 4)
+#define SUN4I_I2S_FMT0_WSS_MASK GENMASK(3, 2)
+#define SUN4I_I2S_FMT0_WSS(wss) ((wss) << 2)
+#define SUN4I_I2S_FMT0_FMT_MASK GENMASK(1, 0)
+#define SUN4I_I2S_FMT0_FMT_RIGHT_J (2 << 0)
+#define SUN4I_I2S_FMT0_FMT_LEFT_J (1 << 0)
+#define SUN4I_I2S_FMT0_FMT_I2S (0 << 0)
+
+#define SUN4I_I2S_FMT1_REG 0x08
+#define SUN4I_I2S_FIFO_TX_REG 0x0c
+#define SUN4I_I2S_FIFO_RX_REG 0x10
+
+#define SUN4I_I2S_FIFO_CTRL_REG 0x14
+#define SUN4I_I2S_FIFO_CTRL_FLUSH_TX BIT(25)
+#define SUN4I_I2S_FIFO_CTRL_FLUSH_RX BIT(24)
+#define SUN4I_I2S_FIFO_CTRL_TX_MODE_MASK BIT(2)
+#define SUN4I_I2S_FIFO_CTRL_TX_MODE(mode) ((mode) << 2)
+#define SUN4I_I2S_FIFO_CTRL_RX_MODE_MASK GENMASK(1, 0)
+#define SUN4I_I2S_FIFO_CTRL_RX_MODE(mode) (mode)
+
+#define SUN4I_I2S_FIFO_STA_REG 0x18
+
+#define SUN4I_I2S_DMA_INT_CTRL_REG 0x1c
+#define SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN BIT(7)
+#define SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN BIT(3)
+
+#define SUN4I_I2S_INT_STA_REG 0x20
+
+#define SUN4I_I2S_CLK_DIV_REG 0x24
+#define SUN4I_I2S_CLK_DIV_MCLK_EN BIT(7)
+#define SUN4I_I2S_CLK_DIV_BCLK_MASK GENMASK(6, 4)
+#define SUN4I_I2S_CLK_DIV_BCLK(bclk) ((bclk) << 4)
+#define SUN4I_I2S_CLK_DIV_MCLK_MASK GENMASK(3, 0)
+#define SUN4I_I2S_CLK_DIV_MCLK(mclk) ((mclk) << 0)
+
+#define SUN4I_I2S_RX_CNT_REG 0x28
+#define SUN4I_I2S_TX_CNT_REG 0x2c
+
+#define SUN4I_I2S_TX_CHAN_SEL_REG 0x30
+#define SUN4I_I2S_TX_CHAN_SEL(num_chan) (((num_chan) - 1) << 0)
+
+#define SUN4I_I2S_TX_CHAN_MAP_REG 0x34
+#define SUN4I_I2S_TX_CHAN_MAP(chan, sample) ((sample) << (chan << 2))
+
+#define SUN4I_I2S_RX_CHAN_SEL_REG 0x38
+#define SUN4I_I2S_RX_CHAN_MAP_REG 0x3c
+
+struct sun4i_i2s {
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct regmap *regmap;
+
+ struct snd_dmaengine_dai_dma_data playback_dma_data;
+};
+
+struct sun4i_i2s_clk_div {
+ u8 div;
+ u8 val;
+};
+
+static const struct sun4i_i2s_clk_div sun4i_i2s_bclk_div[] = {
+ { .div = 2, .val = 0 },
+ { .div = 4, .val = 1 },
+ { .div = 6, .val = 2 },
+ { .div = 8, .val = 3 },
+ { .div = 12, .val = 4 },
+ { .div = 16, .val = 5 },
+};
+
+static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
+ { .div = 1, .val = 0 },
+ { .div = 2, .val = 1 },
+ { .div = 4, .val = 2 },
+ { .div = 6, .val = 3 },
+ { .div = 8, .val = 4 },
+ { .div = 12, .val = 5 },
+ { .div = 16, .val = 6 },
+ { .div = 24, .val = 7 },
+};
+
+static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
+ unsigned int oversample_rate,
+ unsigned int word_size)
+{
+ int div = oversample_rate / word_size / 2;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
+ const struct sun4i_i2s_clk_div *bdiv = &sun4i_i2s_bclk_div[i];
+
+ if (bdiv->div == div)
+ return bdiv->val;
+ }
+
+ return -EINVAL;
+}
+
+static int sun4i_i2s_get_mclk_div(struct sun4i_i2s *i2s,
+ unsigned int oversample_rate,
+ unsigned int module_rate,
+ unsigned int sampling_rate)
+{
+ int div = module_rate / sampling_rate / oversample_rate;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_mclk_div); i++) {
+ const struct sun4i_i2s_clk_div *mdiv = &sun4i_i2s_mclk_div[i];
+
+ if (mdiv->div == div)
+ return mdiv->val;
+ }
+
+ return -EINVAL;
+}
+
+static int sun4i_i2s_oversample_rates[] = { 128, 192, 256, 384, 512, 768 };
+
+static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+ unsigned int rate,
+ unsigned int word_size)
+{
+ unsigned int clk_rate;
+ int bclk_div, mclk_div;
+ int ret, i;
+
+ switch (rate) {
+ case 176400:
+ case 88200:
+ case 44100:
+ case 22050:
+ case 11025:
+ clk_rate = 22579200;
+ break;
+
+ case 192000:
+ case 128000:
+ case 96000:
+ case 64000:
+ case 48000:
+ case 32000:
+ case 24000:
+ case 16000:
+ case 12000:
+ case 8000:
+ clk_rate = 24576000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(i2s->mod_clk, clk_rate);
+ if (ret)
+ return ret;
+
+ /* Always favor the highest oversampling rate */
+ for (i = (ARRAY_SIZE(sun4i_i2s_oversample_rates) - 1); i >= 0; i--) {
+ unsigned int oversample_rate = sun4i_i2s_oversample_rates[i];
+
+ bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+ word_size);
+ mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
+ clk_rate,
+ rate);
+
+ if ((bclk_div >= 0) && (mclk_div >= 0))
+ break;
+ }
+
+ if ((bclk_div < 0) || (mclk_div < 0))
+ return -EINVAL;
+
+ regmap_write(i2s->regmap, SUN4I_I2S_CLK_DIV_REG,
+ SUN4I_I2S_CLK_DIV_BCLK(bclk_div) |
+ SUN4I_I2S_CLK_DIV_MCLK(mclk_div) |
+ SUN4I_I2S_CLK_DIV_MCLK_EN);
+
+ return 0;
+}
+
+static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ int sr, wss;
+ u32 width;
+
+ if (params_channels(params) != 2)
+ return -EINVAL;
+
+ switch (params_physical_width(params)) {
+ case 16:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ i2s->playback_dma_data.addr_width = width;
+
+ switch (params_width(params)) {
+ case 16:
+ sr = 0;
+ wss = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_WSS_MASK | SUN4I_I2S_FMT0_SR_MASK,
+ SUN4I_I2S_FMT0_WSS(wss) | SUN4I_I2S_FMT0_SR(sr));
+
+ return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+ params_width(params));
+}
+
+static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ u32 val;
+
+ /* DAI Mode */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val = SUN4I_I2S_FMT0_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = SUN4I_I2S_FMT0_FMT_LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_FMT_MASK,
+ val);
+
+ /* DAI clock polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ val = SUN4I_I2S_FMT0_LRCLK_POLARITY_INVERTED |
+ SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* Nothing to do for both normal cases */
+ val = SUN4I_I2S_FMT0_BCLK_POLARITY_NORMAL |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN4I_I2S_FMT0_BCLK_POLARITY_MASK |
+ SUN4I_I2S_FMT0_LRCLK_POLARITY_MASK,
+ val);
+
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /* BCLK and LRCLK master */
+ val = SUN4I_I2S_CTRL_MODE_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* BCLK and LRCLK slave */
+ val = SUN4I_I2S_CTRL_MODE_SLAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_MODE_MASK,
+ val);
+
+ /* Set significant bits in our FIFOs */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+ SUN4I_I2S_FIFO_CTRL_TX_MODE_MASK |
+ SUN4I_I2S_FIFO_CTRL_RX_MODE_MASK,
+ SUN4I_I2S_FIFO_CTRL_TX_MODE(1) |
+ SUN4I_I2S_FIFO_CTRL_RX_MODE(1));
+ return 0;
+}
+
+static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
+{
+ /* Flush TX FIFO */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_TX,
+ SUN4I_I2S_FIFO_CTRL_FLUSH_TX);
+
+ /* Clear TX counter */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CNT_REG, 0);
+
+ /* Enable TX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_TX_EN,
+ SUN4I_I2S_CTRL_TX_EN);
+
+ /* Enable TX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN);
+}
+
+
+static void sun4i_i2s_stop_playback(struct sun4i_i2s *i2s)
+{
+ /* Disable TX Block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_TX_EN,
+ 0);
+
+ /* Disable TX DRQ */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+ SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN,
+ 0);
+}
+
+static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sun4i_i2s_start_playback(i2s);
+ else
+ return -EINVAL;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sun4i_i2s_stop_playback(i2s);
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sun4i_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ /* Enable the whole hardware block */
+ regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN);
+
+ /* Enable the first output line */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK,
+ SUN4I_I2S_CTRL_SDO_EN(0));
+
+ /* Enable the first two channels */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CHAN_SEL_REG,
+ SUN4I_I2S_TX_CHAN_SEL(2));
+
+ /* Map them to the two first samples coming in */
+ regmap_write(i2s->regmap, SUN4I_I2S_TX_CHAN_MAP_REG,
+ SUN4I_I2S_TX_CHAN_MAP(0, 0) | SUN4I_I2S_TX_CHAN_MAP(1, 1));
+
+ return clk_prepare_enable(i2s->mod_clk);
+}
+
+static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(i2s->mod_clk);
+
+ /* Disable our output lines */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
+
+ /* Disable the whole hardware block */
+ regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG, 0);
+}
+
+static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
+ .hw_params = sun4i_i2s_hw_params,
+ .set_fmt = sun4i_i2s_set_fmt,
+ .shutdown = sun4i_i2s_shutdown,
+ .startup = sun4i_i2s_startup,
+ .trigger = sun4i_i2s_trigger,
+};
+
+static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, NULL);
+
+ snd_soc_dai_set_drvdata(dai, i2s);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver sun4i_i2s_dai = {
+ .probe = sun4i_i2s_dai_probe,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &sun4i_i2s_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static const struct snd_soc_component_driver sun4i_i2s_component = {
+ .name = "sun4i-dai",
+};
+
+static bool sun4i_i2s_rd_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_TX_REG:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static bool sun4i_i2s_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_RX_REG:
+ case SUN4I_I2S_FIFO_STA_REG:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static bool sun4i_i2s_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case SUN4I_I2S_FIFO_RX_REG:
+ case SUN4I_I2S_INT_STA_REG:
+ case SUN4I_I2S_RX_CNT_REG:
+ case SUN4I_I2S_TX_CNT_REG:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static const struct reg_default sun4i_i2s_reg_defaults[] = {
+ { SUN4I_I2S_CTRL_REG, 0x00000000 },
+ { SUN4I_I2S_FMT0_REG, 0x0000000c },
+ { SUN4I_I2S_FMT1_REG, 0x00004020 },
+ { SUN4I_I2S_FIFO_CTRL_REG, 0x000400f0 },
+ { SUN4I_I2S_DMA_INT_CTRL_REG, 0x00000000 },
+ { SUN4I_I2S_CLK_DIV_REG, 0x00000000 },
+ { SUN4I_I2S_TX_CHAN_SEL_REG, 0x00000001 },
+ { SUN4I_I2S_TX_CHAN_MAP_REG, 0x76543210 },
+ { SUN4I_I2S_RX_CHAN_SEL_REG, 0x00000001 },
+ { SUN4I_I2S_RX_CHAN_MAP_REG, 0x00003210 },
+};
+
+static const struct regmap_config sun4i_i2s_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN4I_I2S_RX_CHAN_MAP_REG,
+
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = sun4i_i2s_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sun4i_i2s_reg_defaults),
+ .writeable_reg = sun4i_i2s_wr_reg,
+ .readable_reg = sun4i_i2s_rd_reg,
+ .volatile_reg = sun4i_i2s_volatile_reg,
+};
+
+static int sun4i_i2s_runtime_resume(struct device *dev)
+{
+ struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2s->bus_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable bus clock\n");
+ return ret;
+ }
+
+ regcache_cache_only(i2s->regmap, false);
+ regcache_mark_dirty(i2s->regmap);
+
+ ret = regcache_sync(i2s->regmap);
+ if (ret) {
+ dev_err(dev, "Failed to sync regmap cache\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2s->bus_clk);
+ return ret;
+}
+
+static int sun4i_i2s_runtime_suspend(struct device *dev)
+{
+ struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+
+ regcache_cache_only(i2s->regmap, true);
+
+ clk_disable_unprepare(i2s->bus_clk);
+
+ return 0;
+}
+
+static int sun4i_i2s_probe(struct platform_device *pdev)
+{
+ struct sun4i_i2s *i2s;
+ struct resource *res;
+ void __iomem *regs;
+ int irq, ret;
+
+ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ if (!i2s)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, i2s);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Can't retrieve our interrupt\n");
+ return irq;
+ }
+
+ i2s->bus_clk = devm_clk_get(&pdev->dev, "apb");
+ if (IS_ERR(i2s->bus_clk)) {
+ dev_err(&pdev->dev, "Can't get our bus clock\n");
+ return PTR_ERR(i2s->bus_clk);
+ }
+
+ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &sun4i_i2s_regmap_config);
+ if (IS_ERR(i2s->regmap)) {
+ dev_err(&pdev->dev, "Regmap initialisation failed\n");
+ return PTR_ERR(i2s->regmap);
+ }
+
+ i2s->mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(i2s->mod_clk)) {
+ dev_err(&pdev->dev, "Can't get our mod clock\n");
+ return PTR_ERR(i2s->mod_clk);
+ }
+
+ i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
+ i2s->playback_dma_data.maxburst = 4;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = sun4i_i2s_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &sun4i_i2s_component,
+ &sun4i_i2s_dai, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register DAI\n");
+ goto err_suspend;
+ }
+
+ ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register PCM\n");
+ goto err_suspend;
+ }
+
+ return 0;
+
+err_suspend:
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int sun4i_i2s_remove(struct platform_device *pdev)
+{
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ sun4i_i2s_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_i2s_match[] = {
+ { .compatible = "allwinner,sun4i-a10-i2s", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_i2s_match);
+
+static const struct dev_pm_ops sun4i_i2s_pm_ops = {
+ .runtime_resume = sun4i_i2s_runtime_resume,
+ .runtime_suspend = sun4i_i2s_runtime_suspend,
+};
+
+static struct platform_driver sun4i_i2s_driver = {
+ .probe = sun4i_i2s_probe,
+ .remove = sun4i_i2s_remove,
+ .driver = {
+ .name = "sun4i-i2s",
+ .of_match_table = sun4i_i2s_match,
+ .pm = &sun4i_i2s_pm_ops,
+ },
+};
+module_platform_driver(sun4i_i2s_driver);
+
+MODULE_AUTHOR("Andrea Venturi <be17068@iperbole.bo.it>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 I2S driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c
deleted file mode 100644
index 026347643c81..000000000000
--- a/sound/sound_firmware.c
+++ /dev/null
@@ -1,77 +0,0 @@
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <asm/uaccess.h>
-#include "oss/sound_firmware.h"
-
-static int do_mod_firmware_load(const char *fn, char **fp)
-{
- struct file* filp;
- long l;
- char *dp;
-
- filp = filp_open(fn, 0, 0);
- if (IS_ERR(filp))
- {
- printk(KERN_INFO "Unable to load '%s'.\n", fn);
- return 0;
- }
- l = i_size_read(file_inode(filp));
- if (l <= 0 || l > 131072)
- {
- printk(KERN_INFO "Invalid firmware '%s'\n", fn);
- fput(filp);
- return 0;
- }
- dp = vmalloc(l);
- if (dp == NULL)
- {
- printk(KERN_INFO "Out of memory loading '%s'.\n", fn);
- fput(filp);
- return 0;
- }
- if (kernel_read(filp, 0, dp, l) != l)
- {
- printk(KERN_INFO "Failed to read '%s'.\n", fn);
- vfree(dp);
- fput(filp);
- return 0;
- }
- fput(filp);
- *fp = dp;
- return (int) l;
-}
-
-/**
- * mod_firmware_load - load sound driver firmware
- * @fn: filename
- * @fp: return for the buffer.
- *
- * Load the firmware for a sound module (up to 128K) into a buffer.
- * The buffer is returned in *fp. It is allocated with vmalloc so is
- * virtually linear and not DMAable. The caller should free it with
- * vfree when finished.
- *
- * The length of the buffer is returned on a successful load, the
- * value zero on a failure.
- *
- * Caution: This API is not recommended. Firmware should be loaded via
- * request_firmware.
- */
-
-int mod_firmware_load(const char *fn, char **fp)
-{
- int r;
- mm_segment_t fs = get_fs();
-
- set_fs(get_ds());
- r = do_mod_firmware_load(fn, fp);
- set_fs(fs);
- return r;
-}
-EXPORT_SYMBOL(mod_firmware_load);
-
-MODULE_LICENSE("GPL");
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1f8fb0d904e0..9038b2e7df73 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -107,8 +107,10 @@ static struct usbmix_name_map extigy_map[] = {
* e.g. no Master and fake PCM volume
* Pavel Mihaylov <bin@bash.info>
*/
-static struct usbmix_dB_map mp3plus_dB_1 = {-4781, 0}; /* just guess */
-static struct usbmix_dB_map mp3plus_dB_2 = {-1781, 618}; /* just guess */
+static struct usbmix_dB_map mp3plus_dB_1 = {.min = -4781, .max = 0};
+ /* just guess */
+static struct usbmix_dB_map mp3plus_dB_2 = {.min = -1781, .max = 618};
+ /* just guess */
static struct usbmix_name_map mp3plus_map[] = {
/* 1: IT pcm */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 28f5493da491..43c1c5021e4b 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -3,6 +3,7 @@
#include <string.h>
#include <linux/bitops.h>
+#include <stdlib.h>
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
@@ -10,6 +11,8 @@
int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
+int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
@@ -65,4 +68,38 @@ static inline int test_and_set_bit(int nr, unsigned long *addr)
return (old & mask) != 0;
}
+/**
+ * bitmap_alloc - Allocate bitmap
+ * @nr: Bit to set
+ */
+static inline unsigned long *bitmap_alloc(int nbits)
+{
+ return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
+}
+
+/*
+ * bitmap_scnprintf - print bitmap list into buffer
+ * @bitmap: bitmap
+ * @nbits: size of bitmap
+ * @buf: buffer to store output
+ * @size: size of @buf
+ */
+size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
+ char *buf, size_t size);
+
+/**
+ * bitmap_and - Do logical and on bitmaps
+ * @dst: resulting bitmap
+ * @src1: operand 1
+ * @src2: operand 2
+ * @nbits: size of bitmap
+ */
+static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ return __bitmap_and(dst, src1, src2, nbits);
+}
+
#endif /* _PERF_BITOPS_H */
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 0a1adc1111fd..38748b0e342f 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -29,3 +29,47 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
+
+size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
+ char *buf, size_t size)
+{
+ /* current bit is 'cur', most recently seen range is [rbot, rtop] */
+ int cur, rbot, rtop;
+ bool first = true;
+ size_t ret = 0;
+
+ rbot = cur = find_first_bit(bitmap, nbits);
+ while (cur < nbits) {
+ rtop = cur;
+ cur = find_next_bit(bitmap, nbits, cur + 1);
+ if (cur < nbits && cur <= rtop + 1)
+ continue;
+
+ if (!first)
+ ret += scnprintf(buf + ret, size - ret, ",");
+
+ first = false;
+
+ ret += scnprintf(buf + ret, size - ret, "%d", rbot);
+ if (rbot < rtop)
+ ret += scnprintf(buf + ret, size - ret, "-%d", rtop);
+
+ rbot = cur;
+ }
+ return ret;
+}
+
+int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
+ unsigned long result = 0;
+
+ for (k = 0; k < lim; k++)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
+ return result != 0;
+}
diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore
index 3c60335fe7be..9e9f25fb1922 100644
--- a/tools/lib/traceevent/.gitignore
+++ b/tools/lib/traceevent/.gitignore
@@ -1,2 +1,3 @@
TRACEEVENT-CFLAGS
libtraceevent-dynamic-list
+libtraceevent.so.*
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 69966abf65d1..379a2bed07c0 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -192,6 +192,9 @@ OPTIONS
--period::
Record the sample period.
+--sample-cpu::
+ Record the sample cpu.
+
-n::
--no-samples::
Don't sample.
diff --git a/tools/perf/config/Makefile b/tools/perf/Makefile.config
index 24803c58049a..24803c58049a 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/Makefile.config
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 6641abb97f0a..2d9087501633 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -161,7 +161,7 @@ TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
BPF_DIR = $(srctree)/tools/lib/bpf/
SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
-# include config/Makefile by default and rule out
+# include Makefile.config by default and rule out
# non-config cases
config := 1
@@ -183,7 +183,7 @@ ifeq ($(filter feature-dump,$(MAKECMDGOALS)),feature-dump)
FEATURE_TESTS := all
endif
endif
-include config/Makefile
+include Makefile.config
endif
ifeq ($(config),0)
@@ -706,7 +706,7 @@ $(INSTALL_DOC_TARGETS):
### Cleaning rules
#
-# This is here, not in config/Makefile, because config/Makefile does
+# This is here, not in Makefile.config, because Makefile.config does
# not get included for the clean target:
#
config-clean:
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 8f2c16d9275f..6355902fbfc8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1434,6 +1434,7 @@ struct option __record_options[] = {
OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
"per thread counts"),
OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
+ OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
&record.opts.sample_time_set,
"Record the sample timestamps"),
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index bd108683fcb8..418ed94756d3 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -128,10 +128,14 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(sym, map, 0);
+ err = symbol__disassemble(sym, map, 0);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
+ } else {
+ char msg[BUFSIZ];
+ symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
}
pthread_mutex_unlock(&notes->lock);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index a7e0f1497244..cb0f1356ff81 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,6 +52,7 @@ struct record_opts {
bool sample_weight;
bool sample_time;
bool sample_time_set;
+ bool sample_cpu;
bool period;
bool running_time;
bool full_auxtrace;
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 928e110179cb..34faecf774ae 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,3 +1,5 @@
libperf-y += Context.o
-CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default
+CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
+CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
+CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index cb20ae1c0d35..dc51bc570e51 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -41,6 +41,7 @@ perf-y += event-times.o
perf-y += backward-ring-buffer.o
perf-y += sdt.o
perf-y += is_printable_array.o
+perf-y += bitmap.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
new file mode 100644
index 000000000000..9abe6c13090f
--- /dev/null
+++ b/tools/perf/tests/bitmap.c
@@ -0,0 +1,53 @@
+#include <linux/compiler.h>
+#include <linux/bitmap.h>
+#include "tests.h"
+#include "cpumap.h"
+#include "debug.h"
+
+#define NBITS 100
+
+static unsigned long *get_bitmap(const char *str, int nbits)
+{
+ struct cpu_map *map = cpu_map__new(str);
+ unsigned long *bm = NULL;
+ int i;
+
+ bm = bitmap_alloc(nbits);
+
+ if (map && bm) {
+ bitmap_zero(bm, nbits);
+
+ for (i = 0; i < map->nr; i++)
+ set_bit(map->map[i], bm);
+ }
+
+ if (map)
+ cpu_map__put(map);
+ return bm;
+}
+
+static int test_bitmap(const char *str)
+{
+ unsigned long *bm = get_bitmap(str, NBITS);
+ char buf[100];
+ int ret;
+
+ bitmap_scnprintf(bm, NBITS, buf, sizeof(buf));
+ pr_debug("bitmap: %s\n", buf);
+
+ ret = !strcmp(buf, str);
+ free(bm);
+ return ret;
+}
+
+int test__bitmap_print(int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,5"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3,5,7,9,11,13,15,17,19,21-40"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("2-5"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1-10,12-20,22-30,32-40"));
+ return 0;
+}
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index e53bc91fa260..268e5f8e4aa2 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -31,8 +31,8 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1,
};
-SEC("func=sys_epoll_wait")
-int bpf_func__sys_epoll_wait(void *ctx)
+SEC("func=SyS_epoll_wait")
+int bpf_func__SyS_epoll_wait(void *ctx)
{
int ind =0;
int *flag = bpf_map_lookup_elem(&flip_table, &ind);
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 10eb30686c9c..778668a2a966 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -226,6 +226,10 @@ static struct test generic_tests[] = {
.func = test__is_printable_array,
},
{
+ .desc = "Test bitmap print",
+ .func = test__bitmap_print,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 68a69a195545..2af156a8d4e5 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -33,44 +33,86 @@ static unsigned int hex(char c)
return c - 'A' + 10;
}
-static size_t read_objdump_line(const char *line, size_t line_len, void *buf,
- size_t len)
+static size_t read_objdump_chunk(const char **line, unsigned char **buf,
+ size_t *buf_len)
{
- const char *p;
- size_t i, j = 0;
-
- /* Skip to a colon */
- p = strchr(line, ':');
- if (!p)
- return 0;
- i = p + 1 - line;
+ size_t bytes_read = 0;
+ unsigned char *chunk_start = *buf;
/* Read bytes */
- while (j < len) {
+ while (*buf_len > 0) {
char c1, c2;
- /* Skip spaces */
- for (; i < line_len; i++) {
- if (!isspace(line[i]))
- break;
- }
/* Get 2 hex digits */
- if (i >= line_len || !isxdigit(line[i]))
+ c1 = *(*line)++;
+ if (!isxdigit(c1))
break;
- c1 = line[i++];
- if (i >= line_len || !isxdigit(line[i]))
+ c2 = *(*line)++;
+ if (!isxdigit(c2))
break;
- c2 = line[i++];
- /* Followed by a space */
- if (i < line_len && line[i] && !isspace(line[i]))
+
+ /* Store byte and advance buf */
+ **buf = (hex(c1) << 4) | hex(c2);
+ (*buf)++;
+ (*buf_len)--;
+ bytes_read++;
+
+ /* End of chunk? */
+ if (isspace(**line))
break;
- /* Store byte */
- *(unsigned char *)buf = (hex(c1) << 4) | hex(c2);
- buf += 1;
- j++;
}
+
+ /*
+ * objdump will display raw insn as LE if code endian
+ * is LE and bytes_per_chunk > 1. In that case reverse
+ * the chunk we just read.
+ *
+ * see disassemble_bytes() at binutils/objdump.c for details
+ * how objdump chooses display endian)
+ */
+ if (bytes_read > 1 && !bigendian()) {
+ unsigned char *chunk_end = chunk_start + bytes_read - 1;
+ unsigned char tmp;
+
+ while (chunk_start < chunk_end) {
+ tmp = *chunk_start;
+ *chunk_start = *chunk_end;
+ *chunk_end = tmp;
+ chunk_start++;
+ chunk_end--;
+ }
+ }
+
+ return bytes_read;
+}
+
+static size_t read_objdump_line(const char *line, unsigned char *buf,
+ size_t buf_len)
+{
+ const char *p;
+ size_t ret, bytes_read = 0;
+
+ /* Skip to a colon */
+ p = strchr(line, ':');
+ if (!p)
+ return 0;
+ p++;
+
+ /* Skip initial spaces */
+ while (*p) {
+ if (!isspace(*p))
+ break;
+ p++;
+ }
+
+ do {
+ ret = read_objdump_chunk(&p, &buf, &buf_len);
+ bytes_read += ret;
+ p++;
+ } while (ret > 0);
+
/* return number of successfully read bytes */
- return j;
+ return bytes_read;
}
static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
@@ -95,7 +137,7 @@ static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
}
/* read objdump data into temporary buffer */
- read_bytes = read_objdump_line(line, ret, tmp, sizeof(tmp));
+ read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
if (!read_bytes)
continue;
@@ -152,7 +194,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf,
ret = read_objdump_output(f, buf, &len, addr);
if (len) {
- pr_debug("objdump read too few bytes\n");
+ pr_debug("objdump read too few bytes: %zd\n", len);
if (!ret)
ret = len;
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 9bfc0e06c61a..7c196c585472 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -90,6 +90,7 @@ int test__backward_ring_buffer(int subtest);
int test__cpu_map_print(int subtest);
int test__sdt_event(int subtest);
int test__is_printable_array(int subtest);
+int test__bitmap_print(int subtest);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 29dc6d20364e..2e2d10022355 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -1026,7 +1026,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
.use_navkeypressed = true,
},
};
- int ret = -1;
+ int ret = -1, err;
int nr_pcnt = 1;
size_t sizeof_bdl = sizeof(struct browser_disasm_line);
@@ -1050,8 +1050,11 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
(nr_pcnt - 1);
}
- if (symbol__annotate(sym, map, sizeof_bdl) < 0) {
- ui__error("%s", ui_helpline__last_msg);
+ err = symbol__disassemble(sym, map, sizeof_bdl);
+ if (err) {
+ char msg[BUFSIZ];
+ symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
goto out_free_offsets;
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 9c7ff8d31b27..42d319927762 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -162,12 +162,16 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
GtkWidget *notebook;
GtkWidget *scrolled_window;
GtkWidget *tab_label;
+ int err;
if (map->dso->annotate_warned)
return -1;
- if (symbol__annotate(sym, map, 0) < 0) {
- ui__error("%s", ui_helpline__current);
+ err = symbol__disassemble(sym, map, 0);
+ if (err) {
+ char msg[BUFSIZ];
+ symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
+ ui__error("Couldn't annotate %s: %s\n", sym->name, msg);
return -1;
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e9825fe825fd..4024d309bb00 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1123,7 +1123,46 @@ static void delete_last_nop(struct symbol *sym)
}
}
-int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
+int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map,
+ int errnum, char *buf, size_t buflen)
+{
+ struct dso *dso = map->dso;
+
+ BUG_ON(buflen == 0);
+
+ if (errnum >= 0) {
+ str_error_r(errnum, buf, buflen);
+ return 0;
+ }
+
+ switch (errnum) {
+ case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
+ char bf[SBUILD_ID_SIZE + 15] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso->has_build_id) {
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id), bf + 15);
+ build_id_msg = bf;
+ }
+ scnprintf(buf, buflen,
+ "No vmlinux file%s\nwas found in the path.\n\n"
+ "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
+ "Please use:\n\n"
+ " perf buildid-cache -vu vmlinux\n\n"
+ "or:\n\n"
+ " --vmlinux vmlinux\n", build_id_msg ?: "");
+ }
+ break;
+ default:
+ scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
+ break;
+ }
+
+ return 0;
+}
+
+int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize)
{
struct dso *dso = map->dso;
char *filename = dso__build_id_filename(dso, NULL, 0);
@@ -1134,22 +1173,20 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
bool delete_extract = false;
+ int stdout_fd[2];
int lineno = 0;
int nline;
+ pid_t pid;
if (filename)
symbol__join_symfs(symfs_filename, filename);
if (filename == NULL) {
- if (dso->has_build_id) {
- pr_err("Can't annotate %s: not enough memory\n",
- sym->name);
- return -ENOMEM;
- }
+ if (dso->has_build_id)
+ return ENOMEM;
goto fallback;
- } else if (dso__is_kcore(dso)) {
- goto fallback;
- } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
+ } else if (dso__is_kcore(dso) ||
+ readlink(symfs_filename, command, sizeof(command)) < 0 ||
strstr(command, DSO__NAME_KALLSYMS) ||
access(symfs_filename, R_OK)) {
free(filename);
@@ -1166,27 +1203,7 @@ fallback:
if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
!dso__is_kcore(dso)) {
- char bf[SBUILD_ID_SIZE + 15] = " with build id ";
- char *build_id_msg = NULL;
-
- if (dso->annotate_warned)
- goto out_free_filename;
-
- if (dso->has_build_id) {
- build_id__sprintf(dso->build_id,
- sizeof(dso->build_id), bf + 15);
- build_id_msg = bf;
- }
- err = -ENOENT;
- dso->annotate_warned = 1;
- pr_err("Can't annotate %s:\n\n"
- "No vmlinux file%s\nwas found in the path.\n\n"
- "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
- "Please use:\n\n"
- " perf buildid-cache -vu vmlinux\n\n"
- "or:\n\n"
- " --vmlinux vmlinux\n",
- sym->name, build_id_msg ?: "");
+ err = SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
goto out_free_filename;
}
@@ -1258,9 +1275,32 @@ fallback:
pr_debug("Executing: %s\n", command);
- file = popen(command, "r");
+ err = -1;
+ if (pipe(stdout_fd) < 0) {
+ pr_err("Failure creating the pipe to run %s\n", command);
+ goto out_remove_tmp;
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ pr_err("Failure forking to run %s\n", command);
+ goto out_close_stdout;
+ }
+
+ if (pid == 0) {
+ close(stdout_fd[0]);
+ dup2(stdout_fd[1], 1);
+ close(stdout_fd[1]);
+ execl("/bin/sh", "sh", "-c", command, NULL);
+ perror(command);
+ exit(-1);
+ }
+
+ close(stdout_fd[1]);
+
+ file = fdopen(stdout_fd[0], "r");
if (!file) {
- pr_err("Failure running %s\n", command);
+ pr_err("Failure creating FILE stream for %s\n", command);
/*
* If we were using debug info should retry with
* original binary.
@@ -1286,9 +1326,11 @@ fallback:
if (dso__is_kcore(dso))
delete_last_nop(sym);
- pclose(file);
-
+ fclose(file);
+ err = 0;
out_remove_tmp:
+ close(stdout_fd[0]);
+
if (dso__needs_decompress(dso))
unlink(symfs_filename);
out_free_filename:
@@ -1297,6 +1339,10 @@ out_free_filename:
if (free_filename)
free(filename);
return err;
+
+out_close_stdout:
+ close(stdout_fd[1]);
+ goto out_remove_tmp;
}
static void insert_source_line(struct rb_root *root, struct source_line *src_line)
@@ -1663,7 +1709,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct rb_root source_line = RB_ROOT;
u64 len;
- if (symbol__annotate(sym, map, 0) < 0)
+ if (symbol__disassemble(sym, map, 0) < 0)
return -1;
len = symbol__size(sym);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index a23084f54128..f67ccb027561 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -155,7 +155,27 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
+int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize);
+
+enum symbol_disassemble_errno {
+ SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
+
+ /*
+ * Choose an arbitrary negative big number not to clash with standard
+ * errno since SUS requires the errno has distinct positive values.
+ * See 'Issue 6' in the link below.
+ *
+ * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
+ */
+ __SYMBOL_ANNOTATE_ERRNO__START = -10000,
+
+ SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
+
+ __SYMBOL_ANNOTATE_ERRNO__END,
+};
+
+int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
+ int errnum, char *buf, size_t buflen);
int symbol__annotate_init(struct map *map, struct symbol *sym);
int symbol__annotate_printf(struct symbol *sym, struct map *map,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 2a40b8e1def7..097b3ed77fdd 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -239,31 +239,13 @@ void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
int perf_evlist__add_default(struct perf_evlist *evlist)
{
- struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- };
- struct perf_evsel *evsel;
-
- event_attr_init(&attr);
+ struct perf_evsel *evsel = perf_evsel__new_cycles();
- perf_event_attr__set_max_precise_ip(&attr);
-
- evsel = perf_evsel__new(&attr);
if (evsel == NULL)
- goto error;
-
- /* use asprintf() because free(evsel) assumes name is allocated */
- if (asprintf(&evsel->name, "cycles%.*s",
- attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
- goto error_free;
+ return -ENOMEM;
perf_evlist__add(evlist, evsel);
return 0;
-error_free:
- perf_evsel__delete(evsel);
-error:
- return -ENOMEM;
}
int perf_evlist__add_dummy(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 8c54df61fe64..d9b80ef881cd 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -253,6 +253,34 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
return evsel;
}
+struct perf_evsel *perf_evsel__new_cycles(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ struct perf_evsel *evsel;
+
+ event_attr_init(&attr);
+
+ perf_event_attr__set_max_precise_ip(&attr);
+
+ evsel = perf_evsel__new(&attr);
+ if (evsel == NULL)
+ goto out;
+
+ /* use asprintf() because free(evsel) assumes name is allocated */
+ if (asprintf(&evsel->name, "cycles%.*s",
+ attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
+ goto error_free;
+out:
+ return evsel;
+error_free:
+ perf_evsel__delete(evsel);
+ evsel = NULL;
+ goto out;
+}
+
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
@@ -854,7 +882,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
perf_evsel__set_sample_bit(evsel, REGS_INTR);
}
- if (target__has_cpu(&opts->target))
+ if (target__has_cpu(&opts->target) || opts->sample_cpu)
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 8a4a6c9f1480..4d44129e050b 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -175,6 +175,8 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *
return perf_evsel__newtp_idx(sys, name, 0);
}
+struct perf_evsel *perf_evsel__new_cycles(void);
+
struct event_format *event_format__new(const char *sys, const char *name);
void perf_evsel__init(struct perf_evsel *evsel,
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a18d142cdca3..de15dbcdcecf 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1672,7 +1672,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
- bool use_callchain)
+ bool use_callchain, hists__resort_cb_t cb)
{
struct rb_root *root;
struct rb_node *next;
@@ -1711,6 +1711,9 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
+ if (cb && cb(n))
+ continue;
+
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
hists__inc_stats(hists, n);
@@ -1731,12 +1734,18 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
else
use_callchain = symbol_conf.use_callchain;
- output_resort(evsel__hists(evsel), prog, use_callchain);
+ output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
- output_resort(hists, prog, symbol_conf.use_callchain);
+ output_resort(hists, prog, symbol_conf.use_callchain, NULL);
+}
+
+void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
+ hists__resort_cb_t cb)
+{
+ output_resort(hists, prog, symbol_conf.use_callchain, cb);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 49aa4fac148f..0a1edf1ab450 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -153,8 +153,12 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
+typedef int (*hists__resort_cb_t)(struct hist_entry *he);
+
void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
+void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
+ hists__resort_cb_t cb);
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 8cdcf4641c51..21c4d9b23c24 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -122,11 +122,7 @@ int target__strerror(struct target *target, int errnum,
BUG_ON(buflen == 0);
if (errnum >= 0) {
- const char *err = str_error_r(errnum, buf, buflen);
-
- if (err != buf)
- scnprintf(buf, buflen, "%s", err);
-
+ str_error_r(errnum, buf, buflen);
return 0;
}
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
index 0e37f7a760eb..5201b915f631 100644
--- a/tools/testing/radix-tree/linux/gfp.h
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -1,7 +1,7 @@
#ifndef _GFP_H
#define _GFP_H
-#define __GFP_BITS_SHIFT 22
+#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
#define __GFP_WAIT 1
#define __GFP_ACCOUNT 0
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 4e400eb83657..d4300602bf37 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -18,7 +18,8 @@ execveat.denatured: execveat
$(CC) $(CFLAGS) -o $@ $^
TEST_PROGS := execveat
-TEST_FILES := $(DEPS)
+# Makefile is a run-time dependency, since it's accessed by the execveat test
+TEST_FILES := $(DEPS) Makefile
include ../lib.mk
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
index 4fdc70fe6980..4fdc70fe6980 100644..100755
--- a/tools/testing/selftests/lib/printf.sh
+++ b/tools/testing/selftests/lib/printf.sh
diff --git a/tools/testing/selftests/media_tests/.gitignore b/tools/testing/selftests/media_tests/.gitignore
index 1c0711708b98..8745eba39012 100644
--- a/tools/testing/selftests/media_tests/.gitignore
+++ b/tools/testing/selftests/media_tests/.gitignore
@@ -1 +1,3 @@
media_device_test
+media_device_open
+video_device_test
diff --git a/tools/testing/selftests/media_tests/Makefile b/tools/testing/selftests/media_tests/Makefile
index 7071bcc1d066..6b34a0199468 100644
--- a/tools/testing/selftests/media_tests/Makefile
+++ b/tools/testing/selftests/media_tests/Makefile
@@ -1,7 +1,7 @@
-TEST_PROGS := media_device_test
+TEST_PROGS := media_device_test media_device_open video_device_test
all: $(TEST_PROGS)
include ../lib.mk
clean:
- rm -fr media_device_test
+ rm -fr media_device_test media_device_open video_device_test
diff --git a/tools/testing/selftests/media_tests/bind_unbind_sample.sh b/tools/testing/selftests/media_tests/bind_unbind_sample.sh
new file mode 100755
index 000000000000..9f362f10631a
--- /dev/null
+++ b/tools/testing/selftests/media_tests/bind_unbind_sample.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Find device number in /sys/bus/usb/drivers/drivername
+# Edit this file to update the driver numer and name
+# Example test for uvcvideo driver
+#i=0
+# while :; do
+# i=$((i+1))
+# echo 1-5:1.0 > /sys/bus/usb/drivers/uvcvideo/unbind;
+# echo 1-5:1.0 > /sys/bus/usb/drivers/uvcvideo/bind;
+# clear
+# echo $i
+#done
diff --git a/tools/testing/selftests/media_tests/media_device_open.c b/tools/testing/selftests/media_tests/media_device_open.c
new file mode 100644
index 000000000000..44343c091a20
--- /dev/null
+++ b/tools/testing/selftests/media_tests/media_device_open.c
@@ -0,0 +1,81 @@
+/*
+ * media_device_open.c - Media Controller Device Open Test
+ *
+ * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *
+ * This file is released under the GPLv2.
+ */
+
+/*
+ * This file adds a test for Media Controller API.
+ * This test should be run as root and should not be
+ * included in the Kselftest run. This test should be
+ * run when hardware and driver that makes use Media
+ * Controller API are present in the system.
+ *
+ * This test opens user specified Media Device and calls
+ * MEDIA_IOC_DEVICE_INFO ioctl, closes the file, and exits.
+ *
+ * Usage:
+ * sudo ./media_device_open -d /dev/mediaX
+ *
+ * Run this test is a loop and run bind/unbind on the driver.
+*/
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <linux/media.h>
+
+int main(int argc, char **argv)
+{
+ int opt;
+ char media_device[256];
+ int count = 0;
+ struct media_device_info mdi;
+ int ret;
+ int fd;
+
+ if (argc < 2) {
+ printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
+ exit(-1);
+ }
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "d:")) != -1) {
+ switch (opt) {
+ case 'd':
+ strncpy(media_device, optarg, sizeof(media_device) - 1);
+ media_device[sizeof(media_device)-1] = '\0';
+ break;
+ default:
+ printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]);
+ exit(-1);
+ }
+ }
+
+ if (getuid() != 0) {
+ printf("Please run the test as root - Exiting.\n");
+ exit(-1);
+ }
+
+ /* Open Media device and keep it open */
+ fd = open(media_device, O_RDWR);
+ if (fd == -1) {
+ printf("Media Device open errno %s\n", strerror(errno));
+ exit(-1);
+ }
+
+ ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi);
+ if (ret < 0)
+ printf("Media Device Info errno %s\n", strerror(errno));
+ else
+ printf("Media device model %s driver %s\n",
+ mdi.model, mdi.driver);
+}
diff --git a/tools/testing/selftests/media_tests/media_device_test.c b/tools/testing/selftests/media_tests/media_device_test.c
index cbf53a032ab5..5d49943e77d0 100644
--- a/tools/testing/selftests/media_tests/media_device_test.c
+++ b/tools/testing/selftests/media_tests/media_device_test.c
@@ -1,5 +1,5 @@
/*
- * media_devkref_test.c - Media Controller Device Kref API Test
+ * media_device_test.c - Media Controller Device ioctl loop Test
*
* Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
@@ -35,13 +35,14 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
+#include <time.h>
#include <linux/media.h>
int main(int argc, char **argv)
{
int opt;
char media_device[256];
- int count = 0;
+ int count;
struct media_device_info mdi;
int ret;
int fd;
@@ -69,6 +70,10 @@ int main(int argc, char **argv)
exit(-1);
}
+ /* Generate random number of interations */
+ srand((unsigned int) time(NULL));
+ count = rand();
+
/* Open Media device and keep it open */
fd = open(media_device, O_RDWR);
if (fd == -1) {
@@ -82,14 +87,16 @@ int main(int argc, char **argv)
"other Oops in the dmesg. Enable KaSan kernel\n"
"config option for use-after-free error detection.\n\n");
- while (count < 100) {
+ printf("Running test for %d iternations\n", count);
+
+ while (count > 0) {
ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi);
if (ret < 0)
printf("Media Device Info errno %s\n", strerror(errno));
else
- printf("Media device model %s driver %s\n",
- mdi.model, mdi.driver);
+ printf("Media device model %s driver %s - count %d\n",
+ mdi.model, mdi.driver, count);
sleep(10);
- count++;
+ count--;
}
}
diff --git a/tools/testing/selftests/media_tests/open_loop_test.sh b/tools/testing/selftests/media_tests/open_loop_test.sh
new file mode 100755
index 000000000000..dcd3c17efc17
--- /dev/null
+++ b/tools/testing/selftests/media_tests/open_loop_test.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+ i=0
+file=/dev/media$1
+ while :; do
+ echo $file
+ i=$((i+1))
+ R=$(./media_device_open -d $file);
+ # clear
+ echo -e "Loop $i\n$R"
+ done
diff --git a/tools/testing/selftests/media_tests/regression_test.txt b/tools/testing/selftests/media_tests/regression_test.txt
new file mode 100644
index 000000000000..2627367681f7
--- /dev/null
+++ b/tools/testing/selftests/media_tests/regression_test.txt
@@ -0,0 +1,43 @@
+Testing for regressions in Media Controller API register, ioctl, syscall,
+and unregister paths. There have a few problems that result in user-after
+free on media_device, media_devnode, and cdev pointers when the driver is
+unbound while ioctl is in progress.
+
+Test Procedure:
+
+Run bin/unbind loop while ioctls are in progress.
+Run rmmod and modprobe.
+Disconnect the device.
+
+Setup:
+
+Build media_device_test
+cd tools/testing/selftests/media_tests
+make
+
+Regressions test for cdev user-after free error on /dev/mediaX when driver
+is unbound:
+
+Start media_device_test to regression test media devnode dynamic alloc
+and cdev user-after-free fixes. This opens media dev files and sits in
+a loop running media ioctl MEDIA_IOC_DEVICE_INFO command once every 10
+seconds. The idea is when device file goes away, media devnode and cdev
+should stick around until this test exits.
+
+The test for a random number of iterations or until user kills it with a
+sleep 10 in between the ioctl calls.
+
+sudo ./media_device_test -d /dev/mediaX
+
+Regression test for media_devnode unregister race with ioctl_syscall:
+
+Start 6 open_loop_test.sh tests with different /dev/mediaX files. When
+device file goes away after unbind, device file name changes. Start the
+test with possible device names. If we start with /dev/media0 for example,
+after unbind, /dev/media1 or /dev/media2 could get created. The idea is
+keep ioctls going while bind/unbind runs.
+
+Copy bind_unbind_sample.txt and make changes to specify the driver name
+and number to run bind and unbind. Start the bind_unbind.sh
+
+Run dmesg looking for any user-after free errors or mutex lock errors.
diff --git a/tools/testing/selftests/media_tests/video_device_test.c b/tools/testing/selftests/media_tests/video_device_test.c
new file mode 100644
index 000000000000..66d419c28653
--- /dev/null
+++ b/tools/testing/selftests/media_tests/video_device_test.c
@@ -0,0 +1,100 @@
+/*
+ * video_device_test - Video Device Test
+ *
+ * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *
+ * This file is released under the GPLv2.
+ */
+
+/*
+ * This file adds a test for Video Device. This test should not be included
+ * in the Kselftest run. This test should be run when hardware and driver
+ * that makes use of V4L2 API is present.
+ *
+ * This test opens user specified Video Device and calls video ioctls in a
+ * loop once every 10 seconds.
+ *
+ * Usage:
+ * sudo ./video_device_test -d /dev/videoX
+ *
+ * While test is running, remove the device or unbind the driver and
+ * ensure there are no use after free errors and other Oops in the
+ * dmesg.
+ * When possible, enable KaSan kernel config option for use-after-free
+ * error detection.
+*/
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <linux/videodev2.h>
+
+int main(int argc, char **argv)
+{
+ int opt;
+ char video_dev[256];
+ int count;
+ struct v4l2_tuner vtuner;
+ struct v4l2_capability vcap;
+ int ret;
+ int fd;
+
+ if (argc < 2) {
+ printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
+ exit(-1);
+ }
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "d:")) != -1) {
+ switch (opt) {
+ case 'd':
+ strncpy(video_dev, optarg, sizeof(video_dev) - 1);
+ video_dev[sizeof(video_dev)-1] = '\0';
+ break;
+ default:
+ printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
+ exit(-1);
+ }
+ }
+
+ /* Generate random number of interations */
+ srand((unsigned int) time(NULL));
+ count = rand();
+
+ /* Open Video device and keep it open */
+ fd = open(video_dev, O_RDWR);
+ if (fd == -1) {
+ printf("Video Device open errno %s\n", strerror(errno));
+ exit(-1);
+ }
+
+ printf("\nNote:\n"
+ "While test is running, remove the device or unbind\n"
+ "driver and ensure there are no use after free errors\n"
+ "and other Oops in the dmesg. When possible, enable KaSan\n"
+ "kernel config option for use-after-free error detection.\n\n");
+
+ while (count > 0) {
+ ret = ioctl(fd, VIDIOC_QUERYCAP, &vcap);
+ if (ret < 0)
+ printf("VIDIOC_QUERYCAP errno %s\n", strerror(errno));
+ else
+ printf("Video device driver %s\n", vcap.driver);
+
+ ret = ioctl(fd, VIDIOC_G_TUNER, &vtuner);
+ if (ret < 0)
+ printf("VIDIOC_G_TUNER, errno %s\n", strerror(errno));
+ else
+ printf("type %d rangelow %d rangehigh %d\n",
+ vtuner.type, vtuner.rangelow, vtuner.rangehigh);
+ sleep(10);
+ count--;
+ }
+}
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
new file mode 100755
index 000000000000..a676d3eefefb
--- /dev/null
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -0,0 +1,422 @@
+#!/bin/bash
+# Copyright (c) 2016 Microsemi. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it would be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# Author: Logan Gunthorpe <logang@deltatee.com>
+
+REMOTE_HOST=
+LIST_DEVS=FALSE
+
+DEBUGFS=${DEBUGFS-/sys/kernel/debug}
+
+PERF_RUN_ORDER=32
+MAX_MW_SIZE=0
+RUN_DMA_TESTS=
+DONT_CLEANUP=
+MW_SIZE=65536
+
+function show_help()
+{
+ echo "Usage: $0 [OPTIONS] LOCAL_DEV REMOTE_DEV"
+ echo "Run tests on a pair of NTB endpoints."
+ echo
+ echo "If the NTB device loops back to the same host then,"
+ echo "just specifying the two PCI ids on the command line is"
+ echo "sufficient. Otherwise, if the NTB link spans two hosts"
+ echo "use the -r option to specify the hostname for the remote"
+ echo "device. SSH will then be used to test the remote side."
+ echo "An SSH key between the root users of the host would then"
+ echo "be highly recommended."
+ echo
+ echo "Options:"
+ echo " -C don't cleanup ntb modules on exit"
+ echo " -d run dma tests"
+ echo " -h show this help message"
+ echo " -l list available local and remote PCI ids"
+ echo " -r REMOTE_HOST specify the remote's hostname to connect"
+ echo " to for the test (using ssh)"
+ echo " -p NUM ntb_perf run order (default: $PERF_RUN_ORDER)"
+ echo " -w max_mw_size maxmium memory window size"
+ echo
+}
+
+function parse_args()
+{
+ OPTIND=0
+ while getopts "Cdhlm:r:p:w:" opt; do
+ case "$opt" in
+ C) DONT_CLEANUP=1 ;;
+ d) RUN_DMA_TESTS=1 ;;
+ h) show_help; exit 0 ;;
+ l) LIST_DEVS=TRUE ;;
+ m) MW_SIZE=${OPTARG} ;;
+ r) REMOTE_HOST=${OPTARG} ;;
+ p) PERF_RUN_ORDER=${OPTARG} ;;
+ w) MAX_MW_SIZE=${OPTARG} ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ exit 1
+ ;;
+ esac
+ done
+}
+
+parse_args "$@"
+shift $((OPTIND-1))
+LOCAL_DEV=$1
+shift
+parse_args "$@"
+shift $((OPTIND-1))
+REMOTE_DEV=$1
+shift
+parse_args "$@"
+
+set -e
+
+function _modprobe()
+{
+ modprobe "$@"
+}
+
+function split_remote()
+{
+ VPATH=$1
+ REMOTE=
+
+ if [[ "$VPATH" == *":/"* ]]; then
+ REMOTE=${VPATH%%:*}
+ VPATH=${VPATH#*:}
+ fi
+}
+
+function read_file()
+{
+ split_remote $1
+ if [[ "$REMOTE" != "" ]]; then
+ ssh "$REMOTE" cat "$VPATH"
+ else
+ cat "$VPATH"
+ fi
+}
+
+function write_file()
+{
+ split_remote $2
+ VALUE=$1
+
+ if [[ "$REMOTE" != "" ]]; then
+ ssh "$REMOTE" "echo \"$VALUE\" > \"$VPATH\""
+ else
+ echo "$VALUE" > "$VPATH"
+ fi
+}
+
+function link_test()
+{
+ LOC=$1
+ REM=$2
+ EXP=0
+
+ echo "Running link tests on: $(basename $LOC) / $(basename $REM)"
+
+ if ! write_file "N" "$LOC/link" 2> /dev/null; then
+ echo " Unsupported"
+ return
+ fi
+
+ write_file "N" "$LOC/link_event"
+
+ if [[ $(read_file "$REM/link") != "N" ]]; then
+ echo "Expected remote link to be down in $REM/link" >&2
+ exit -1
+ fi
+
+ write_file "Y" "$LOC/link"
+ write_file "Y" "$LOC/link_event"
+
+ echo " Passed"
+}
+
+function doorbell_test()
+{
+ LOC=$1
+ REM=$2
+ EXP=0
+
+ echo "Running db tests on: $(basename $LOC) / $(basename $REM)"
+
+ write_file "c 0xFFFFFFFF" "$REM/db"
+
+ for ((i=1; i <= 8; i++)); do
+ let DB=$(read_file "$REM/db") || true
+ if [[ "$DB" != "$EXP" ]]; then
+ echo "Doorbell doesn't match expected value $EXP " \
+ "in $REM/db" >&2
+ exit -1
+ fi
+
+ let "MASK=1 << ($i-1)" || true
+ let "EXP=$EXP | $MASK" || true
+ write_file "s $MASK" "$LOC/peer_db"
+ done
+
+ echo " Passed"
+}
+
+function read_spad()
+{
+ VPATH=$1
+ IDX=$2
+
+ ROW=($(read_file "$VPATH" | grep -e "^$IDX"))
+ let VAL=${ROW[1]} || true
+ echo $VAL
+}
+
+function scratchpad_test()
+{
+ LOC=$1
+ REM=$2
+ CNT=$(read_file "$LOC/spad" | wc -l)
+
+ echo "Running spad tests on: $(basename $LOC) / $(basename $REM)"
+
+ for ((i = 0; i < $CNT; i++)); do
+ VAL=$RANDOM
+ write_file "$i $VAL" "$LOC/peer_spad"
+ RVAL=$(read_spad "$REM/spad" $i)
+
+ if [[ "$VAL" != "$RVAL" ]]; then
+ echo "Scratchpad doesn't match expected value $VAL " \
+ "in $REM/spad, got $RVAL" >&2
+ exit -1
+ fi
+
+ done
+
+ echo " Passed"
+}
+
+function write_mw()
+{
+ split_remote $2
+
+ if [[ "$REMOTE" != "" ]]; then
+ ssh "$REMOTE" \
+ dd if=/dev/urandom "of=$VPATH" 2> /dev/null || true
+ else
+ dd if=/dev/urandom "of=$VPATH" 2> /dev/null || true
+ fi
+}
+
+function mw_test()
+{
+ IDX=$1
+ LOC=$2
+ REM=$3
+
+ echo "Running $IDX tests on: $(basename $LOC) / $(basename $REM)"
+
+ write_mw "$LOC/$IDX"
+
+ split_remote "$LOC/$IDX"
+ if [[ "$REMOTE" == "" ]]; then
+ A=$VPATH
+ else
+ A=/tmp/ntb_test.$$.A
+ ssh "$REMOTE" cat "$VPATH" > "$A"
+ fi
+
+ split_remote "$REM/peer_$IDX"
+ if [[ "$REMOTE" == "" ]]; then
+ B=$VPATH
+ else
+ B=/tmp/ntb_test.$$.B
+ ssh "$REMOTE" cat "$VPATH" > "$B"
+ fi
+
+ cmp -n $MW_SIZE "$A" "$B"
+ if [[ $? != 0 ]]; then
+ echo "Memory window $MW did not match!" >&2
+ fi
+
+ if [[ "$A" == "/tmp/*" ]]; then
+ rm "$A"
+ fi
+
+ if [[ "$B" == "/tmp/*" ]]; then
+ rm "$B"
+ fi
+
+ echo " Passed"
+}
+
+function pingpong_test()
+{
+ LOC=$1
+ REM=$2
+
+ echo "Running ping pong tests on: $(basename $LOC) / $(basename $REM)"
+
+ LOC_START=$(read_file $LOC/count)
+ REM_START=$(read_file $REM/count)
+
+ sleep 7
+
+ LOC_END=$(read_file $LOC/count)
+ REM_END=$(read_file $REM/count)
+
+ if [[ $LOC_START == $LOC_END ]] || [[ $REM_START == $REM_END ]]; then
+ echo "Ping pong counter not incrementing!" >&2
+ exit 1
+ fi
+
+ echo " Passed"
+}
+
+function perf_test()
+{
+ USE_DMA=$1
+
+ if [[ $USE_DMA == "1" ]]; then
+ WITH="with"
+ else
+ WITH="without"
+ fi
+
+ _modprobe ntb_perf run_order=$PERF_RUN_ORDER \
+ max_mw_size=$MAX_MW_SIZE use_dma=$USE_DMA
+
+ echo "Running local perf test $WITH DMA"
+ write_file "" $LOCAL_PERF/run
+ echo -n " "
+ read_file $LOCAL_PERF/run
+ echo " Passed"
+
+ echo "Running remote perf test $WITH DMA"
+ write_file "" $REMOTE_PERF/run
+ echo -n " "
+ read_file $LOCAL_PERF/run
+ echo " Passed"
+
+ _modprobe -r ntb_perf
+}
+
+function ntb_tool_tests()
+{
+ LOCAL_TOOL=$DEBUGFS/ntb_tool/$LOCAL_DEV
+ REMOTE_TOOL=$REMOTE_HOST:$DEBUGFS/ntb_tool/$REMOTE_DEV
+
+ echo "Starting ntb_tool tests..."
+
+ _modprobe ntb_tool
+
+ write_file Y $LOCAL_TOOL/link_event
+ write_file Y $REMOTE_TOOL/link_event
+
+ link_test $LOCAL_TOOL $REMOTE_TOOL
+ link_test $REMOTE_TOOL $LOCAL_TOOL
+
+ for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
+ PT=$(basename $PEER_TRANS)
+ write_file $MW_SIZE $LOCAL_TOOL/$PT
+ write_file $MW_SIZE $REMOTE_TOOL/$PT
+ done
+
+ doorbell_test $LOCAL_TOOL $REMOTE_TOOL
+ doorbell_test $REMOTE_TOOL $LOCAL_TOOL
+ scratchpad_test $LOCAL_TOOL $REMOTE_TOOL
+ scratchpad_test $REMOTE_TOOL $LOCAL_TOOL
+
+ for MW in $(ls $LOCAL_TOOL/mw*); do
+ MW=$(basename $MW)
+
+ mw_test $MW $LOCAL_TOOL $REMOTE_TOOL
+ mw_test $MW $REMOTE_TOOL $LOCAL_TOOL
+ done
+
+ _modprobe -r ntb_tool
+}
+
+function ntb_pingpong_tests()
+{
+ LOCAL_PP=$DEBUGFS/ntb_pingpong/$LOCAL_DEV
+ REMOTE_PP=$REMOTE_HOST:$DEBUGFS/ntb_pingpong/$REMOTE_DEV
+
+ echo "Starting ntb_pingpong tests..."
+
+ _modprobe ntb_pingpong
+
+ pingpong_test $LOCAL_PP $REMOTE_PP
+
+ _modprobe -r ntb_pingpong
+}
+
+function ntb_perf_tests()
+{
+ LOCAL_PERF=$DEBUGFS/ntb_perf/$LOCAL_DEV
+ REMOTE_PERF=$REMOTE_HOST:$DEBUGFS/ntb_perf/$REMOTE_DEV
+
+ echo "Starting ntb_perf tests..."
+
+ perf_test 0
+
+ if [[ $RUN_DMA_TESTS ]]; then
+ perf_test 1
+ fi
+}
+
+function cleanup()
+{
+ set +e
+ _modprobe -r ntb_tool 2> /dev/null
+ _modprobe -r ntb_perf 2> /dev/null
+ _modprobe -r ntb_pingpong 2> /dev/null
+ _modprobe -r ntb_transport 2> /dev/null
+ set -e
+}
+
+cleanup
+
+if ! [[ $$DONT_CLEANUP ]]; then
+ trap cleanup EXIT
+fi
+
+if [ "$(id -u)" != "0" ]; then
+ echo "This script must be run as root" 1>&2
+ exit 1
+fi
+
+if [[ "$LIST_DEVS" == TRUE ]]; then
+ echo "Local Devices:"
+ ls -1 /sys/bus/ntb/devices
+ echo
+
+ if [[ "$REMOTE_HOST" != "" ]]; then
+ echo "Remote Devices:"
+ ssh $REMOTE_HOST ls -1 /sys/bus/ntb/devices
+ fi
+
+ exit 0
+fi
+
+if [[ "$LOCAL_DEV" == $"" ]] || [[ "$REMOTE_DEV" == $"" ]]; then
+ show_help
+ exit 1
+fi
+
+ntb_tool_tests
+echo
+ntb_pingpong_tests
+echo
+ntb_perf_tests
+echo
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 4ca83fe80654..3c40c9d0e6c7 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -12,7 +12,8 @@ CFLAGS := -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $
export CFLAGS
-SUB_DIRS = benchmarks \
+SUB_DIRS = alignment \
+ benchmarks \
copyloops \
context_switch \
dscr \
diff --git a/tools/testing/selftests/powerpc/alignment/.gitignore b/tools/testing/selftests/powerpc/alignment/.gitignore
new file mode 100644
index 000000000000..1d980e3d7039
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/.gitignore
@@ -0,0 +1,5 @@
+copy_unaligned
+copy_first_unaligned
+paste_unaligned
+paste_last_unaligned
+copy_paste_unaligned_common
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
new file mode 100644
index 000000000000..ad6a4e49da91
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -0,0 +1,10 @@
+TEST_PROGS := copy_unaligned copy_first_unaligned paste_unaligned paste_last_unaligned
+
+all: $(TEST_PROGS)
+
+$(TEST_PROGS): ../harness.c ../utils.c copy_paste_unaligned_common.c
+
+include ../../lib.mk
+
+clean:
+ rm -f $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c b/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c
new file mode 100644
index 000000000000..47b73b3a08bd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Calls to copy_first which are not 128-byte aligned should be
+ * caught and sent a SIGBUS.
+ *
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include "utils.h"
+#include "instructions.h"
+#include "copy_paste_unaligned_common.h"
+
+unsigned int expected_instruction = PPC_INST_COPY_FIRST;
+unsigned int instruction_mask = 0xfc2007fe;
+
+int test_copy_first_unaligned(void)
+{
+ /* Only run this test on a P9 or later */
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+
+ /* Register our signal handler with SIGBUS */
+ setup_signal_handler();
+
+ /* +1 makes buf unaligned */
+ copy_first(cacheline_buf+1);
+
+ /* We should not get here */
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_copy_first_unaligned, "test_copy_first_unaligned");
+}
diff --git a/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.c b/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.c
new file mode 100644
index 000000000000..d35fa5f5d2d3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Common code for copy, copy_first, paste and paste_last unaligned
+ * tests.
+ *
+ */
+
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include "utils.h"
+#include "instructions.h"
+#include "copy_paste_unaligned_common.h"
+
+unsigned int expected_instruction;
+unsigned int instruction_mask;
+
+char cacheline_buf[128] __cacheline_aligned;
+
+void signal_action_handler(int signal_num, siginfo_t *info, void *ptr)
+{
+ ucontext_t *ctx = ptr;
+#if defined(__powerpc64__)
+ unsigned int *pc = (unsigned int *)ctx->uc_mcontext.gp_regs[PT_NIP];
+#else
+ unsigned int *pc = (unsigned int *)ctx->uc_mcontext.uc_regs->gregs[PT_NIP];
+#endif
+
+ /*
+ * Check that the signal was on the correct instruction, using a
+ * mask because the compiler assigns the register at RB.
+ */
+ if ((*pc & instruction_mask) == expected_instruction)
+ _exit(0); /* We hit the right instruction */
+
+ _exit(1);
+}
+
+void setup_signal_handler(void)
+{
+ struct sigaction signal_action;
+
+ memset(&signal_action, 0, sizeof(signal_action));
+ signal_action.sa_sigaction = signal_action_handler;
+ signal_action.sa_flags = SA_SIGINFO;
+ sigaction(SIGBUS, &signal_action, NULL);
+}
diff --git a/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.h b/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.h
new file mode 100644
index 000000000000..053899fe506e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/copy_paste_unaligned_common.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Declarations for common code for copy, copy_first, paste and
+ * paste_last unaligned tests.
+ *
+ */
+
+#ifndef _SELFTESTS_POWERPC_COPY_PASTE_H
+#define _SELFTESTS_POWERPC_COPY_PASTE_H
+
+#include <signal.h>
+
+int main(int argc, char *argv[]);
+void signal_action_handler(int signal_num, siginfo_t *info, void *ptr);
+void setup_signal_handler(void);
+extern char cacheline_buf[128] __cacheline_aligned;
+extern unsigned int expected_instruction;
+extern unsigned int instruction_mask;
+
+#endif /* _SELFTESTS_POWERPC_COPY_PASTE_H */
diff --git a/tools/testing/selftests/powerpc/alignment/copy_unaligned.c b/tools/testing/selftests/powerpc/alignment/copy_unaligned.c
new file mode 100644
index 000000000000..3a4e26461554
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/copy_unaligned.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Calls to copy which are not 128-byte aligned should be caught
+ * and sent a SIGBUS.
+ *
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include "utils.h"
+#include "instructions.h"
+#include "copy_paste_unaligned_common.h"
+
+unsigned int expected_instruction = PPC_INST_COPY;
+unsigned int instruction_mask = 0xfc0007fe;
+
+int test_copy_unaligned(void)
+{
+ /* Only run this test on a P9 or later */
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+
+ /* Register our signal handler with SIGBUS */
+ setup_signal_handler();
+
+ /* +1 makes buf unaligned */
+ copy(cacheline_buf+1);
+
+ /* We should not get here */
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_copy_unaligned, "test_copy_unaligned");
+}
diff --git a/tools/testing/selftests/powerpc/alignment/paste_last_unaligned.c b/tools/testing/selftests/powerpc/alignment/paste_last_unaligned.c
new file mode 100644
index 000000000000..6e0ad045fcc3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/paste_last_unaligned.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Calls to paste_last which are not 128-byte aligned should be
+ * caught and sent a SIGBUS.
+ *
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include "utils.h"
+#include "instructions.h"
+#include "copy_paste_unaligned_common.h"
+
+unsigned int expected_instruction = PPC_INST_PASTE_LAST;
+unsigned int instruction_mask = 0xfc2007ff;
+
+int test_paste_last_unaligned(void)
+{
+ /* Only run this test on a P9 or later */
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+
+ /* Register our signal handler with SIGBUS */
+ setup_signal_handler();
+
+ copy(cacheline_buf);
+
+ /* +1 makes buf unaligned */
+ paste_last(cacheline_buf+1);
+
+ /* We should not get here */
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_paste_last_unaligned, "test_paste_last_unaligned");
+}
diff --git a/tools/testing/selftests/powerpc/alignment/paste_unaligned.c b/tools/testing/selftests/powerpc/alignment/paste_unaligned.c
new file mode 100644
index 000000000000..6f982b45e4bd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/alignment/paste_unaligned.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016, Chris Smart, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Calls to paste which are not 128-byte aligned should be caught
+ * and sent a SIGBUS.
+ *
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include "utils.h"
+#include "instructions.h"
+#include "copy_paste_unaligned_common.h"
+
+unsigned int expected_instruction = PPC_INST_PASTE;
+unsigned int instruction_mask = 0xfc0007fe;
+
+int test_paste_unaligned(void)
+{
+ /* Only run this test on a P9 or later */
+ SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00));
+
+ /* Register our signal handler with SIGBUS */
+ setup_signal_handler();
+
+ copy(cacheline_buf);
+
+ /* +1 makes buf unaligned */
+ paste(cacheline_buf+1);
+
+ /* We should not get here */
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(test_paste_unaligned, "test_paste_unaligned");
+}
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index 6fa673316ac2..bce49ebd869e 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1,2 +1,4 @@
gettimeofday
context_switch
+mmap_bench
+futex_bench \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index 912445ff7ce7..a9adfb7de78f 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday context_switch
+TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench
CFLAGS += -O2
@@ -7,6 +7,7 @@ all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c
context_switch: ../utils.c
+context_switch: CFLAGS += -maltivec -mvsx -mabi=altivec
context_switch: LDLIBS += -lpthread
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index 7b785941adec..a36883ad48a4 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -25,7 +25,9 @@
#include <sys/types.h>
#include <sys/shm.h>
#include <linux/futex.h>
-
+#ifdef __powerpc__
+#include <altivec.h>
+#endif
#include "../utils.h"
static unsigned int timeout = 30;
@@ -37,12 +39,15 @@ static int touch_fp = 1;
double fp;
static int touch_vector = 1;
-typedef int v4si __attribute__ ((vector_size (16)));
-v4si a, b, c;
+vector int a, b, c;
#ifdef __powerpc__
static int touch_altivec = 1;
+/*
+ * Note: LTO (Link Time Optimisation) doesn't play well with this function
+ * attribute. Be very careful enabling LTO for this test.
+ */
static void __attribute__((__target__("no-vsx"))) altivec_touch_fn(void)
{
c = a + b;
@@ -369,11 +374,11 @@ static void usage(void)
fprintf(stderr, "\t\t--process\tUse processes (default threads)\n");
fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
fprintf(stderr, "\t\t--vdso\t\ttouch VDSO\n");
- fprintf(stderr, "\t\t--fp\t\ttouch FP\n");
+ fprintf(stderr, "\t\t--no-fp\t\tDon't touch FP\n");
#ifdef __powerpc__
- fprintf(stderr, "\t\t--altivec\ttouch altivec\n");
+ fprintf(stderr, "\t\t--no-altivec\tDon't touch altivec\n");
#endif
- fprintf(stderr, "\t\t--vector\ttouch vector\n");
+ fprintf(stderr, "\t\t--no-vector\tDon't touch vector\n");
}
int main(int argc, char *argv[])
diff --git a/tools/testing/selftests/powerpc/benchmarks/futex_bench.c b/tools/testing/selftests/powerpc/benchmarks/futex_bench.c
new file mode 100644
index 000000000000..2fc711d9150d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/futex_bench.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+#include <linux/futex.h>
+
+#include "utils.h"
+
+#define ITERATIONS 100000000
+
+#define futex(A, B, C, D, E, F) syscall(__NR_futex, A, B, C, D, E, F)
+
+int test_futex(void)
+{
+ struct timespec ts_start, ts_end;
+ unsigned long i = ITERATIONS;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+ while (i--) {
+ unsigned int addr = 0;
+ futex(&addr, FUTEX_WAKE, 1, NULL, NULL, 0);
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_end);
+
+ printf("time = %.6f\n", ts_end.tv_sec - ts_start.tv_sec + (ts_end.tv_nsec - ts_start.tv_nsec) / 1e9);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_futex, "futex_bench");
+}
diff --git a/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
new file mode 100644
index 000000000000..8d084a2d6e74
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <time.h>
+
+#include "utils.h"
+
+#define ITERATIONS 5000000
+
+#define MEMSIZE (128 * 1024 * 1024)
+
+int test_mmap(void)
+{
+ struct timespec ts_start, ts_end;
+ unsigned long i = ITERATIONS;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_start);
+
+ while (i--) {
+ char *c = mmap(NULL, MEMSIZE, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ FAIL_IF(c == MAP_FAILED);
+ munmap(c, MEMSIZE);
+ }
+
+ clock_gettime(CLOCK_MONOTONIC, &ts_end);
+
+ printf("time = %.6f\n", ts_end.tv_sec - ts_start.tv_sec + (ts_end.tv_nsec - ts_start.tv_nsec) / 1e9);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_mmap, "mmap_bench");
+}
diff --git a/tools/testing/selftests/powerpc/instructions.h b/tools/testing/selftests/powerpc/instructions.h
new file mode 100644
index 000000000000..0fb0bd3b28c9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/instructions.h
@@ -0,0 +1,68 @@
+#ifndef _SELFTESTS_POWERPC_INSTRUCTIONS_H
+#define _SELFTESTS_POWERPC_INSTRUCTIONS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+/* This defines the "copy" instruction from Power ISA 3.0 Book II, section 4.4. */
+#define __COPY(RA, RB, L) \
+ (0x7c00060c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10))
+#define COPY(RA, RB, L) \
+ .long __COPY((RA), (RB), (L))
+
+static inline void copy(void *i)
+{
+ asm volatile(str(COPY(0, %0, 0))";"
+ :
+ : "b" (i)
+ : "memory"
+ );
+}
+
+static inline void copy_first(void *i)
+{
+ asm volatile(str(COPY(0, %0, 1))";"
+ :
+ : "b" (i)
+ : "memory"
+ );
+}
+
+/* This defines the "paste" instruction from Power ISA 3.0 Book II, section 4.4. */
+#define __PASTE(RA, RB, L, RC) \
+ (0x7c00070c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10) | (RC) << (31-31))
+#define PASTE(RA, RB, L, RC) \
+ .long __PASTE((RA), (RB), (L), (RC))
+
+static inline int paste(void *i)
+{
+ int cr;
+
+ asm volatile(str(PASTE(0, %1, 0, 0))";"
+ "mfcr %0;"
+ : "=r" (cr)
+ : "b" (i)
+ : "memory"
+ );
+ return cr;
+}
+
+static inline int paste_last(void *i)
+{
+ int cr;
+
+ asm volatile(str(PASTE(0, %1, 1, 1))";"
+ "mfcr %0;"
+ : "=r" (cr)
+ : "b" (i)
+ : "memory"
+ );
+ return cr;
+}
+
+#define PPC_INST_COPY __COPY(0, 0, 0)
+#define PPC_INST_COPY_FIRST __COPY(0, 0, 1)
+#define PPC_INST_PASTE __PASTE(0, 0, 0, 0)
+#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1)
+
+#endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index b43ade0ec861..e715a3f2fbf4 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -1,3 +1,4 @@
hugetlb_vs_thp_test
subpage_prot
tempfile
+prot_sao \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index ee179e22308c..3bdb96eae558 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -1,13 +1,15 @@
noarg:
$(MAKE) -C ../
-TEST_PROGS := hugetlb_vs_thp_test subpage_prot
+TEST_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao
TEST_FILES := tempfile
all: $(TEST_PROGS) $(TEST_FILES)
$(TEST_PROGS): ../harness.c
+prot_sao: ../utils.c
+
include ../../lib.mk
tempfile:
diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c
new file mode 100644
index 000000000000..611530d43fa9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/prot_sao.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+
+#include <asm/cputable.h>
+
+#include "utils.h"
+
+#define SIZE (64 * 1024)
+
+int test_prot_sao(void)
+{
+ char *p;
+
+ /* 2.06 or later should support SAO */
+ SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06));
+
+ /*
+ * Ensure we can ask for PROT_SAO.
+ * We can't really verify that it does the right thing, but at least we
+ * confirm the kernel will accept it.
+ */
+ p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ FAIL_IF(p == MAP_FAILED);
+
+ /* Write to the mapping, to at least cause a fault */
+ memset(p, 0xaa, SIZE);
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_prot_sao, "prot-sao");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 42bddbed8b64..44b7df14a936 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,3 +20,5 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
+ebb_lmr
+ebb_lmr_regs \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 8d2279c4bb4b..6b0453e60d53 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test
+ cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
new file mode 100644
index 000000000000..c47ebd55ba4d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define SIZE (32 * 1024 * 1024) /* 32M */
+#define LM_SIZE 0 /* Smallest encoding, 32M */
+
+#define SECTIONS 64 /* 1 per bit in LMSER */
+#define SECTION_SIZE (SIZE / SECTIONS)
+#define SECTION_LONGS (SECTION_SIZE / sizeof(long))
+
+static unsigned long *test_mem;
+
+static int lmr_count = 0;
+
+void ebb_lmr_handler(void)
+{
+ lmr_count++;
+}
+
+void ldmx_full_section(unsigned long *mem, int section)
+{
+ unsigned long *ptr;
+ int i;
+
+ for (i = 0; i < SECTION_LONGS; i++) {
+ ptr = &mem[(SECTION_LONGS * section) + i];
+ ldmx((unsigned long) &ptr);
+ ebb_lmr_reset();
+ }
+}
+
+unsigned long section_masks[] = {
+ 0x8000000000000000,
+ 0xFF00000000000000,
+ 0x0000000F70000000,
+ 0x8000000000000001,
+ 0xF0F0F0F0F0F0F0F0,
+ 0x0F0F0F0F0F0F0F0F,
+ 0x0
+};
+
+int ebb_lmr_section_test(unsigned long *mem)
+{
+ unsigned long *mask = section_masks;
+ int i;
+
+ for (; *mask; mask++) {
+ mtspr(SPRN_LMSER, *mask);
+ printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
+
+ for (i = 0; i < 64; i++) {
+ lmr_count = 0;
+ ldmx_full_section(mem, i);
+ if (*mask & (1UL << (63 - i)))
+ FAIL_IF(lmr_count != SECTION_LONGS);
+ else
+ FAIL_IF(lmr_count);
+ }
+ }
+
+ return 0;
+}
+
+int ebb_lmr(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ setup_ebb_handler(ebb_lmr_handler);
+
+ ebb_global_enable();
+
+ FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
+
+ mtspr(SPRN_LMSER, 0);
+
+ FAIL_IF(mfspr(SPRN_LMSER) != 0);
+
+ mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
+
+ FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
+
+ /* Read every single byte to ensure we get no false positives */
+ for (i = 0; i < SECTIONS; i++)
+ ldmx_full_section(test_mem, i);
+
+ FAIL_IF(lmr_count != 0);
+
+ /* Turn on the first section */
+
+ mtspr(SPRN_LMSER, (1UL << 63));
+ FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
+
+ /* Enable LM (BESCR) */
+
+ mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
+
+ ldmx((unsigned long)&test_mem);
+
+ FAIL_IF(lmr_count != 1); // exactly one exception
+ FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled
+ FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set
+
+ printf("Simple LMR EBB OK\n");
+
+ /* This shouldn't cause an EBB since it's been disabled */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 1);
+
+ printf("LMR disable on EBB OK\n");
+
+ ebb_lmr_reset();
+
+ /* This should cause an EBB or reset is broken */
+ ldmx((unsigned long)&test_mem);
+ FAIL_IF(lmr_count != 2);
+
+ printf("LMR reset EBB OK\n");
+
+ ebb_lmr_reset();
+
+ return ebb_lmr_section_test(test_mem);
+}
+
+int main(void)
+{
+ int ret = test_harness(ebb_lmr, "ebb_lmr");
+
+ if (test_mem)
+ free(test_mem);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
new file mode 100644
index 000000000000..ef50abd557cd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
@@ -0,0 +1,39 @@
+#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
+
+#include "reg.h"
+
+#ifndef PPC_FEATURE2_ARCH_3_00
+#define PPC_FEATURE2_ARCH_3_00 0x00800000
+#endif
+
+#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
+
+static inline void ebb_lmr_reset(void)
+{
+ unsigned long bescr = mfspr(SPRN_BESCR);
+ bescr &= ~(BESCR_LMEO);
+ bescr |= BESCR_LME;
+ mtspr(SPRN_BESCR, bescr);
+}
+
+#define LDMX(t, a, b)\
+ (0x7c00026a | \
+ (((t) & 0x1f) << 21) | \
+ (((a) & 0x1f) << 16) | \
+ (((b) & 0x1f) << 11))
+
+static inline unsigned long ldmx(unsigned long address)
+{
+ unsigned long ret;
+
+ asm volatile ("mr 9, %1\r\n"
+ ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
+ "mr %0, 9\r\n":"=r"(ret)
+ :"r"(address)
+ :"r9");
+
+ return ret;
+}
+
+#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
new file mode 100644
index 000000000000..aff4241fd88a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016, Jack Miller, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "ebb.h"
+#include "ebb_lmr.h"
+
+#define CHECKS 10000
+
+int ebb_lmr_regs(void)
+{
+ int i;
+
+ SKIP_IF(!lmr_is_supported());
+
+ ebb_global_enable();
+
+ for (i = 0; i < CHECKS; i++) {
+ mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits
+ mtspr(SPRN_LMSER, i);
+
+ FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
+ FAIL_IF(mfspr(SPRN_LMSER) != i);
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
index 5da355135df2..ae9a79086111 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
@@ -51,7 +51,7 @@ static int do_count_loop(struct event *event, uint64_t instructions,
printf("Looped for %lu instructions, overhead %lu\n", instructions, overhead);
printf("Expected %lu\n", expected);
printf("Actual %llu\n", event->result.value);
- printf("Error %ld, %f%%\n", difference, percentage);
+ printf("Delta %ld, %f%%\n", difference, percentage);
printf("Took %d EBBs\n", ebb_state.stats.ebb_count);
}
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index a361ad3334ce..8b992fa5b478 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -190,7 +190,7 @@ int parse_proc_maps(void)
bool require_paranoia_below(int level)
{
- unsigned long current;
+ long current;
char *end, buf[16];
FILE *f;
int rc;
@@ -208,7 +208,7 @@ bool require_paranoia_below(int level)
goto out_close;
}
- current = strtoul(buf, &end, 10);
+ current = strtol(buf, &end, 10);
if (end == buf) {
printf("Couldn't parse " PARANOID_PATH "?\n");
@@ -216,7 +216,7 @@ bool require_paranoia_below(int level)
}
if (current >= level)
- goto out;
+ goto out_close;
rc = 0;
out_close:
diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h
index 65bfdeeebdee..fddf368ed82f 100644
--- a/tools/testing/selftests/powerpc/reg.h
+++ b/tools/testing/selftests/powerpc/reg.h
@@ -34,6 +34,11 @@
#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
+#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */
+#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */
+
+#define SPRN_LMRR 813 /* Load Monitor Region Register */
+#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */
#define SPRN_PMC1 771
#define SPRN_PMC2 772
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index bb942db845bf..82c0a9ce6e74 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -6,3 +6,4 @@ tm-vmxcopy
tm-fork
tm-tar
tm-tmspr
+tm-exec
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index d0505dbd22d5..9d301d785d9e 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,11 +1,14 @@
-TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack tm-vmxcopy tm-fork tm-tar tm-tmspr
+TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
+ tm-vmxcopy tm-fork tm-tar tm-tmspr tm-exec tm-execed
all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c ../utils.c
+CFLAGS += -mhtm
+
tm-syscall: tm-syscall-asm.S
-tm-syscall: CFLAGS += -mhtm -I../../../../../usr/include
+tm-syscall: CFLAGS += -I../../../../../usr/include
tm-tmspr: CFLAGS += -pthread
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/tm/tm-exec.c b/tools/testing/selftests/powerpc/tm/tm-exec.c
new file mode 100644
index 000000000000..3d27fa0ece04
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-exec.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016, Cyril Bur, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Syscalls can be performed provided the transactions are suspended.
+ * The exec() class of syscall is unique as a new process is loaded.
+ *
+ * It makes little sense for after an exec() call for the previously
+ * suspended transaction to still exist.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <inttypes.h>
+#include <libgen.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "utils.h"
+#include "tm.h"
+
+static char *path;
+
+static int test_exec(void)
+{
+ SKIP_IF(!have_htm());
+
+ asm __volatile__(
+ "tbegin.;"
+ "blt 1f; "
+ "tsuspend.;"
+ "1: ;"
+ : : : "memory");
+
+ execl(path, "tm-exec", "--child", NULL);
+
+ /* Shouldn't get here */
+ perror("execl() failed");
+ return 1;
+}
+
+static int after_exec(void)
+{
+ asm __volatile__(
+ "tbegin.;"
+ "blt 1f;"
+ "tsuspend.;"
+ "1: ;"
+ : : : "memory");
+
+ FAIL_IF(failure_is_nesting());
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ path = argv[0];
+
+ if (argc > 1 && strcmp(argv[1], "--child") == 0)
+ return after_exec();
+
+ return test_harness(test_exec, "tm_exec");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index 60560cb20e38..454b965a2db3 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -27,21 +27,6 @@ unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
#define TM_RETRIES 100
-long failure_code(void)
-{
- return __builtin_get_texasru() >> 24;
-}
-
-bool failure_is_persistent(void)
-{
- return (failure_code() & TM_CAUSE_PERSISTENT) == TM_CAUSE_PERSISTENT;
-}
-
-bool failure_is_syscall(void)
-{
- return (failure_code() & TM_CAUSE_SYSCALL) == TM_CAUSE_SYSCALL;
-}
-
pid_t getppid_tm(bool suspend)
{
int i;
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 24144b25772c..60318bad7d7a 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -6,8 +6,9 @@
#ifndef _SELFTESTS_POWERPC_TM_TM_H
#define _SELFTESTS_POWERPC_TM_TM_H
-#include <stdbool.h>
+#include <asm/tm.h>
#include <asm/cputable.h>
+#include <stdbool.h>
#include "../utils.h"
@@ -31,4 +32,24 @@ static inline bool have_htm_nosc(void)
#endif
}
+static inline long failure_code(void)
+{
+ return __builtin_get_texasru() >> 24;
+}
+
+static inline bool failure_is_persistent(void)
+{
+ return (failure_code() & TM_CAUSE_PERSISTENT) == TM_CAUSE_PERSISTENT;
+}
+
+static inline bool failure_is_syscall(void)
+{
+ return (failure_code() & TM_CAUSE_SYSCALL) == TM_CAUSE_SYSCALL;
+}
+
+static inline bool failure_is_nesting(void)
+{
+ return (__builtin_get_texasru() & 0x400000);
+}
+
#endif /* _SELFTESTS_POWERPC_TM_TM_H */
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/utils.h
index a985cfaa535e..fbd33e52ef8f 100644
--- a/tools/testing/selftests/powerpc/utils.h
+++ b/tools/testing/selftests/powerpc/utils.h
@@ -27,6 +27,11 @@ int test_harness(int (test_function)(void), char *name);
extern void *get_auxv_entry(int type);
int pick_online_cpu(void);
+static inline bool have_hwcap(unsigned long ftr)
+{
+ return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr;
+}
+
static inline bool have_hwcap2(unsigned long ftr2)
{
return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2;
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 4a1be1b75a7f..1d5556869137 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -10,7 +10,7 @@ TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \
TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \
skew_consistency clocksource-switch leap-a-day \
- leapcrash set-tai set-2038
+ leapcrash set-tai set-2038 set-tz
bins = $(TEST_PROGS) $(TEST_PROGS_EXTENDED)
@@ -30,6 +30,7 @@ run_destructive_tests: run_tests
./clocksource-switch
./leap-a-day -s -i 10
./leapcrash
+ ./set-tz
./set-tai
./set-2038
diff --git a/tools/testing/selftests/timers/rtctest.c b/tools/testing/selftests/timers/rtctest.c
index 624bce51b27d..4230d3052e5d 100644
--- a/tools/testing/selftests/timers/rtctest.c
+++ b/tools/testing/selftests/timers/rtctest.c
@@ -144,11 +144,12 @@ test_READ:
retval = ioctl(fd, RTC_ALM_SET, &rtc_tm);
if (retval == -1) {
- if (errno == ENOTTY) {
+ if (errno == EINVAL) {
fprintf(stderr,
"\n...Alarm IRQs not supported.\n");
goto test_PIE;
}
+
perror("RTC_ALM_SET ioctl");
exit(errno);
}
@@ -166,6 +167,12 @@ test_READ:
/* Enable alarm interrupts */
retval = ioctl(fd, RTC_AIE_ON, 0);
if (retval == -1) {
+ if (errno == EINVAL) {
+ fprintf(stderr,
+ "\n...Alarm IRQs not supported.\n");
+ goto test_PIE;
+ }
+
perror("RTC_AIE_ON ioctl");
exit(errno);
}
@@ -193,7 +200,7 @@ test_PIE:
retval = ioctl(fd, RTC_IRQP_READ, &tmp);
if (retval == -1) {
/* not all RTCs support periodic IRQs */
- if (errno == ENOTTY) {
+ if (errno == EINVAL) {
fprintf(stderr, "\nNo periodic IRQ support\n");
goto done;
}
@@ -211,7 +218,7 @@ test_PIE:
retval = ioctl(fd, RTC_IRQP_SET, tmp);
if (retval == -1) {
/* not all RTCs can change their periodic IRQ rate */
- if (errno == ENOTTY) {
+ if (errno == EINVAL) {
fprintf(stderr,
"\n...Periodic IRQ rate is fixed\n");
goto done;
diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
new file mode 100644
index 000000000000..f4184928b16b
--- /dev/null
+++ b/tools/testing/selftests/timers/set-tz.c
@@ -0,0 +1,119 @@
+/* Set tz value
+ * by: John Stultz <john.stultz@linaro.org>
+ * (C) Copyright Linaro 2016
+ * Licensed under the GPLv2
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/timex.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#ifdef KTEST
+#include "../kselftest.h"
+#else
+static inline int ksft_exit_pass(void)
+{
+ exit(0);
+}
+static inline int ksft_exit_fail(void)
+{
+ exit(1);
+}
+#endif
+
+int set_tz(int min, int dst)
+{
+ struct timezone tz;
+
+ tz.tz_minuteswest = min;
+ tz.tz_dsttime = dst;
+
+ return settimeofday(0, &tz);
+}
+
+int get_tz_min(void)
+{
+ struct timezone tz;
+ struct timeval tv;
+
+ memset(&tz, 0, sizeof(tz));
+ gettimeofday(&tv, &tz);
+ return tz.tz_minuteswest;
+}
+
+int get_tz_dst(void)
+{
+ struct timezone tz;
+ struct timeval tv;
+
+ memset(&tz, 0, sizeof(tz));
+ gettimeofday(&tv, &tz);
+ return tz.tz_dsttime;
+}
+
+int main(int argc, char **argv)
+{
+ int i, ret;
+ int min, dst;
+
+ min = get_tz_min();
+ dst = get_tz_dst();
+ printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
+
+ printf("Checking tz_minuteswest can be properly set: ");
+ for (i = -15*60; i < 15*60; i += 30) {
+ ret = set_tz(i, dst);
+ ret = get_tz_min();
+ if (ret != i) {
+ printf("[FAILED] expected: %i got %i\n", i, ret);
+ goto err;
+ }
+ }
+ printf("[OK]\n");
+
+ printf("Checking invalid tz_minuteswest values are caught: ");
+
+ if (!set_tz(-15*60-1, dst)) {
+ printf("[FAILED] %i didn't return failure!\n", -15*60-1);
+ goto err;
+ }
+
+ if (!set_tz(15*60+1, dst)) {
+ printf("[FAILED] %i didn't return failure!\n", 15*60+1);
+ goto err;
+ }
+
+ if (!set_tz(-24*60, dst)) {
+ printf("[FAILED] %i didn't return failure!\n", -24*60);
+ goto err;
+ }
+
+ if (!set_tz(24*60, dst)) {
+ printf("[FAILED] %i didn't return failure!\n", 24*60);
+ goto err;
+ }
+
+ printf("[OK]\n");
+
+ set_tz(min, dst);
+ return ksft_exit_pass();
+
+err:
+ set_tz(min, dst);
+ return ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index 00c4f65d12da..6d1437f895b8 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -101,7 +101,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
/* Start with the initial condition of 0 huge pages*/
if (write(fd, "0", sizeof(char)) != sizeof(char)) {
- perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
+ perror("Failed to write 0 to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
@@ -110,14 +110,14 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
/* Request a large number of huge pages. The Kernel will allocate
as much as it can */
if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
- perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
+ perror("Failed to write 100000 to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
lseek(fd, 0, SEEK_SET);
if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
- perror("Failed to read from /proc/sys/vm/nr_hugepages\n");
+ perror("Failed to re-read from /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
@@ -138,7 +138,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
!= strlen(initial_nr_hugepages)) {
- perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
+ perror("Failed to write value to /proc/sys/vm/nr_hugepages\n");
goto close_fd;
}
diff --git a/tools/testing/selftests/vm/on-fault-limit.c b/tools/testing/selftests/vm/on-fault-limit.c
index 245acccce42d..0ae458f32fdb 100644
--- a/tools/testing/selftests/vm/on-fault-limit.c
+++ b/tools/testing/selftests/vm/on-fault-limit.c
@@ -20,7 +20,7 @@ static int test_limit(void)
return ret;
}
- if (mlockall(MCL_CURRENT | MCL_ONFAULT | MCL_FUTURE)) {
+ if (mlockall(MCL_ONFAULT | MCL_FUTURE)) {
perror("mlockall");
return ret;
}
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index e5d6108f5e85..b0cc1a34db27 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -16,9 +16,6 @@ config HAVE_KVM_EVENTFD
bool
select EVENTFD
-config KVM_APIC_ARCHITECTURE
- bool
-
config KVM_MMIO
bool
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 3a3a699b7489..7cffd9338c49 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -21,18 +21,11 @@
#include <asm/kvm_hyp.h>
-#ifdef CONFIG_KVM_NEW_VGIC
-extern struct vgic_global kvm_vgic_global_state;
-#define vgic_v2_params kvm_vgic_global_state
-#else
-extern struct vgic_params vgic_v2_params;
-#endif
-
static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+ int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
u32 eisr0, eisr1;
int i;
bool expect_mi;
@@ -74,7 +67,7 @@ static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+ int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
u32 elrsr0, elrsr1;
elrsr0 = readl_relaxed(base + GICH_ELRSR0);
@@ -93,7 +86,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+ int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
int i;
for (i = 0; i < nr_lr; i++) {
@@ -147,7 +140,7 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
- int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+ int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
int i;
u64 live_lrs = 0;
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
deleted file mode 100644
index 1b0bee095427..000000000000
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ /dev/null
@@ -1,856 +0,0 @@
-/*
- * Contains GICv2 specific emulation code, was in vgic.c before.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
-#include "vgic.h"
-
-#define GICC_ARCH_VERSION_V2 0x2
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
-static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
-{
- return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
-}
-
-static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
- u32 word_offset = offset & 3;
-
- switch (offset & ~3) {
- case 0: /* GICD_CTLR */
- reg = vcpu->kvm->arch.vgic.enabled;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = reg & 1;
- vgic_update_state(vcpu->kvm);
- return true;
- }
- break;
-
- case 4: /* GICD_TYPER */
- reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
- reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1;
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
-
- case 8: /* GICD_IIDR */
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, &reg, word_offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- break;
- }
-
- return false;
-}
-
-static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT);
-}
-
-static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-}
-
-static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-}
-
-static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-}
-
-static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-}
-
-static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-#define GICD_ITARGETSR_SIZE 32
-#define GICD_CPUTARGETS_BITS 8
-#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
-static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int i;
- u32 val = 0;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
-
- return val;
-}
-
-static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
- u32 target;
-
- irq -= VGIC_NR_PRIVATE_IRQS;
-
- /*
- * Pick the LSB in each byte. This ensures we target exactly
- * one vcpu per IRQ. If the byte is null, assume we target
- * CPU0.
- */
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
- int shift = i * GICD_CPUTARGETS_BITS;
-
- target = ffs((val >> shift) & 0xffU);
- target = target ? (target - 1) : 0;
- dist->irq_spi_cpu[irq + i] = target;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- if (c == target)
- set_bit(irq + i, bmap);
- else
- clear_bit(irq + i, bmap);
- }
- }
-}
-
-static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- /* We treat the banked interrupts targets as read-only */
- if (offset < 32) {
- u32 roreg;
-
- roreg = 1 << vcpu->vcpu_id;
- roreg |= roreg << 8;
- roreg |= roreg << 16;
-
- vgic_reg_access(mmio, &roreg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
-static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 *reg;
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
-
- return vgic_handle_cfg_reg(reg, mmio, offset);
-}
-
-static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vgic_dispatch_sgi(vcpu, reg);
- vgic_update_state(vcpu->kvm);
- return true;
- }
-
- return false;
-}
-
-/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
-static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg = 0;
-
- /* Copy source SGIs from distributor side */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 sources = *vgic_get_sgi_sources(dist, vcpu_id, sgi);
-
- reg |= ((u32)sources) << (8 * (sgi - min_sgi));
- }
-
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, bool set)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int sgi;
- int min_sgi = (offset & ~0x3);
- int max_sgi = min_sgi + 3;
- int vcpu_id = vcpu->vcpu_id;
- u32 reg;
- bool updated = false;
-
- reg = mmio_data_read(mmio, ~0);
-
- /* Clear pending SGIs on the distributor */
- for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
- u8 mask = reg >> (8 * (sgi - min_sgi));
- u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
-
- if (set) {
- if ((*src & mask) != mask)
- updated = true;
- *src |= mask;
- } else {
- if (*src & mask)
- updated = true;
- *src &= ~mask;
- }
- }
-
- if (updated)
- vgic_update_state(vcpu->kvm);
-
- return updated;
-}
-
-static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
-}
-
-static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (!mmio->is_write)
- return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
- else
- return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
-}
-
-static const struct vgic_io_range vgic_dist_ranges[] = {
- {
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
- .base = GIC_DIST_CTRL,
- .len = 12,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_misc,
- },
- {
- .base = GIC_DIST_IGROUP,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_DIST_ENABLE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg,
- },
- {
- .base = GIC_DIST_ENABLE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg,
- },
- {
- .base = GIC_DIST_PENDING_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg,
- },
- {
- .base = GIC_DIST_PENDING_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_SET,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_active_reg,
- },
- {
- .base = GIC_DIST_ACTIVE_CLEAR,
- .len = VGIC_MAX_IRQS / 8,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_active_reg,
- },
- {
- .base = GIC_DIST_PRI,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg,
- },
- {
- .base = GIC_DIST_TARGET,
- .len = VGIC_MAX_IRQS,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_target_reg,
- },
- {
- .base = GIC_DIST_CONFIG,
- .len = VGIC_MAX_IRQS / 4,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg,
- },
- {
- .base = GIC_DIST_SGI_PENDING_CLEAR,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_clear,
- },
- {
- .base = GIC_DIST_SGI_PENDING_SET,
- .len = VGIC_NR_SGIS,
- .handle_mmio = handle_mmio_sgi_set,
- },
- {}
-};
-
-static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
-{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- u8 target_cpus;
- int sgi, mode, c, vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
-
- sgi = reg & 0xf;
- target_cpus = (reg >> 16) & 0xff;
- mode = (reg >> 24) & 3;
-
- switch (mode) {
- case 0:
- if (!target_cpus)
- return;
- break;
-
- case 1:
- target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
- break;
-
- case 2:
- target_cpus = 1 << vcpu_id;
- break;
- }
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (target_cpus & 1) {
- /* Flag the SGI as pending */
- vgic_dist_irq_set_pending(vcpu, sgi);
- *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
- kvm_debug("SGI%d from CPU%d to CPU%d\n",
- sgi, vcpu_id, c);
- }
-
- target_cpus >>= 1;
- }
-}
-
-static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long sources;
- int vcpu_id = vcpu->vcpu_id;
- int c;
-
- sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
-
- for_each_set_bit(c, &sources, dist->nr_cpus) {
- if (vgic_queue_irq(vcpu, c, irq))
- clear_bit(c, &sources);
- }
-
- *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
-
- /*
- * If the sources bitmap has been cleared it means that we
- * could queue all the SGIs onto link registers (see the
- * clear_bit above), and therefore we are done with them in
- * our emulated gic and can get rid of them.
- */
- if (!sources) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
-/**
- * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
- * @kvm: pointer to the kvm struct
- *
- * Map the virtual CPU interface into the VM before running any VCPUs. We
- * can't do this at creation time, because user space must first set the
- * virtual CPU interface address in the guest physical address space.
- */
-static int vgic_v2_map_resources(struct kvm *kvm,
- const struct vgic_params *params)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- int ret = 0;
-
- if (!irqchip_in_kernel(kvm))
- return 0;
-
- mutex_lock(&kvm->lock);
-
- if (vgic_ready(kvm))
- goto out;
-
- if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
- kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
- }
-
- vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
- KVM_VGIC_V2_DIST_SIZE,
- vgic_dist_ranges, -1, &dist->dist_iodev);
-
- /*
- * Initialize the vgic if this hasn't already been done on demand by
- * accessing the vgic state from userspace.
- */
- ret = vgic_init(kvm);
- if (ret) {
- kvm_err("Unable to allocate maps\n");
- goto out_unregister;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
- params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
- true);
- if (ret) {
- kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out_unregister;
- }
-
- dist->ready = true;
- goto out;
-
-out_unregister:
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
-
-out:
- if (ret)
- kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
- return ret;
-}
-
-static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- *vgic_get_sgi_sources(dist, vcpu->vcpu_id, irq) |= 1 << source;
-}
-
-static int vgic_v2_init_model(struct kvm *kvm)
-{
- int i;
-
- for (i = VGIC_NR_PRIVATE_IRQS; i < kvm->arch.vgic.nr_irqs; i += 4)
- vgic_set_target_reg(kvm, 0, i);
-
- return 0;
-}
-
-void vgic_v2_init_emulation(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
- dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
- dist->vm_ops.init_model = vgic_v2_init_model;
- dist->vm_ops.map_resources = vgic_v2_map_resources;
-
- kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
-}
-
-static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- bool updated = false;
- struct vgic_vmcr vmcr;
- u32 *vmcr_field;
- u32 reg;
-
- vgic_get_vmcr(vcpu, &vmcr);
-
- switch (offset & ~0x3) {
- case GIC_CPU_CTRL:
- vmcr_field = &vmcr.ctlr;
- break;
- case GIC_CPU_PRIMASK:
- vmcr_field = &vmcr.pmr;
- break;
- case GIC_CPU_BINPOINT:
- vmcr_field = &vmcr.bpr;
- break;
- case GIC_CPU_ALIAS_BINPOINT:
- vmcr_field = &vmcr.abpr;
- break;
- default:
- BUG();
- }
-
- if (!mmio->is_write) {
- reg = *vmcr_field;
- mmio_data_write(mmio, ~0, reg);
- } else {
- reg = mmio_data_read(mmio, ~0);
- if (reg != *vmcr_field) {
- *vmcr_field = reg;
- vgic_set_vmcr(vcpu, &vmcr);
- updated = true;
- }
- }
- return updated;
-}
-
-static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
-}
-
-static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
-
- if (mmio->is_write)
- return false;
-
- /* GICC_IIDR */
- reg = (PRODUCT_ID_KVM << 20) |
- (GICC_ARCH_VERSION_V2 << 16) |
- (IMPLEMENTER_ARM << 0);
- mmio_data_write(mmio, ~0, reg);
- return false;
-}
-
-/*
- * CPU Interface Register accesses - these are not accessed by the VM, but by
- * user space for saving and restoring VGIC state.
- */
-static const struct vgic_io_range vgic_cpu_ranges[] = {
- {
- .base = GIC_CPU_CTRL,
- .len = 12,
- .handle_mmio = handle_cpu_mmio_misc,
- },
- {
- .base = GIC_CPU_ALIAS_BINPOINT,
- .len = 4,
- .handle_mmio = handle_mmio_abpr,
- },
- {
- .base = GIC_CPU_ACTIVEPRIO,
- .len = 16,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GIC_CPU_IDENT,
- .len = 4,
- .handle_mmio = handle_cpu_mmio_ident,
- },
-};
-
-static int vgic_attr_regs_access(struct kvm_device *dev,
- struct kvm_device_attr *attr,
- u32 *reg, bool is_write)
-{
- const struct vgic_io_range *r = NULL, *ranges;
- phys_addr_t offset;
- int ret, cpuid, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- struct vgic_dist *vgic;
- struct kvm_exit_mmio mmio;
- u32 data;
-
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
-
- mutex_lock(&dev->kvm->lock);
-
- ret = vgic_init(dev->kvm);
- if (ret)
- goto out;
-
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
- goto out;
- }
-
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- vgic = &dev->kvm->arch.vgic;
-
- mmio.len = 4;
- mmio.is_write = is_write;
- mmio.data = &data;
- if (is_write)
- mmio_data_write(&mmio, ~0, *reg);
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- mmio.phys_addr = vgic->vgic_dist_base + offset;
- ranges = vgic_dist_ranges;
- break;
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- mmio.phys_addr = vgic->vgic_cpu_base + offset;
- ranges = vgic_cpu_ranges;
- break;
- default:
- BUG();
- }
- r = vgic_find_range(ranges, 4, offset);
-
- if (unlikely(!r || !r->handle_mmio)) {
- ret = -ENXIO;
- goto out;
- }
-
-
- spin_lock(&vgic->lock);
-
- /*
- * Ensure that no other VCPU is running by checking the vcpu->cpu
- * field. If no other VPCUs are running we can safely access the VGIC
- * state, because even if another VPU is run after this point, that
- * VCPU will not touch the vgic state, because it will block on
- * getting the vgic->lock in kvm_vgic_sync_hwstate().
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (unlikely(tmp_vcpu->cpu != -1)) {
- ret = -EBUSY;
- goto out_vgic_unlock;
- }
- }
-
- /*
- * Move all pending IRQs from the LRs on all VCPUs so the pending
- * state can be properly represented in the register state accessible
- * through this API.
- */
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
- vgic_unqueue_irqs(tmp_vcpu);
-
- offset -= r->base;
- r->handle_mmio(vcpu, &mmio, offset);
-
- if (!is_write)
- *reg = mmio_data_read(&mmio, ~0);
-
- ret = 0;
-out_vgic_unlock:
- spin_unlock(&vgic->lock);
-out:
- mutex_unlock(&dev->kvm->lock);
- return ret;
-}
-
-static int vgic_v2_create(struct kvm_device *dev, u32 type)
-{
- return kvm_vgic_create(dev->kvm, type);
-}
-
-static void vgic_v2_destroy(struct kvm_device *dev)
-{
- kfree(dev);
-}
-
-static int vgic_v2_set_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- ret = vgic_set_common_attr(dev, attr);
- if (ret != -ENXIO)
- return ret;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg;
-
- if (get_user(reg, uaddr))
- return -EFAULT;
-
- return vgic_attr_regs_access(dev, attr, &reg, true);
- }
-
- }
-
- return -ENXIO;
-}
-
-static int vgic_v2_get_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- ret = vgic_get_common_attr(dev, attr);
- if (ret != -ENXIO)
- return ret;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 reg = 0;
-
- ret = vgic_attr_regs_access(dev, attr, &reg, false);
- if (ret)
- return ret;
- return put_user(reg, uaddr);
- }
-
- }
-
- return -ENXIO;
-}
-
-static int vgic_v2_has_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- phys_addr_t offset;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_dist_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
- return vgic_has_attr_regs(vgic_cpu_ranges, offset);
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- return 0;
- case KVM_DEV_ARM_VGIC_GRP_CTRL:
- switch (attr->attr) {
- case KVM_DEV_ARM_VGIC_CTRL_INIT:
- return 0;
- }
- }
- return -ENXIO;
-}
-
-struct kvm_device_ops kvm_arm_vgic_v2_ops = {
- .name = "kvm-arm-vgic-v2",
- .create = vgic_v2_create,
- .destroy = vgic_v2_destroy,
- .set_attr = vgic_v2_set_attr,
- .get_attr = vgic_v2_get_attr,
- .has_attr = vgic_v2_has_attr,
-};
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
deleted file mode 100644
index 334cd7a89106..000000000000
--- a/virt/kvm/arm/vgic-v2.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
-static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
-{
- struct vgic_lr lr_desc;
- u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
-
- lr_desc.irq = val & GICH_LR_VIRTUALID;
- if (lr_desc.irq <= 15)
- lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
- else
- lr_desc.source = 0;
- lr_desc.state = 0;
-
- if (val & GICH_LR_PENDING_BIT)
- lr_desc.state |= LR_STATE_PENDING;
- if (val & GICH_LR_ACTIVE_BIT)
- lr_desc.state |= LR_STATE_ACTIVE;
- if (val & GICH_LR_EOI)
- lr_desc.state |= LR_EOI_INT;
- if (val & GICH_LR_HW) {
- lr_desc.state |= LR_HW;
- lr_desc.hwirq = (val & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT;
- }
-
- return lr_desc;
-}
-
-static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
- u32 lr_val;
-
- lr_val = lr_desc.irq;
-
- if (lr_desc.state & LR_STATE_PENDING)
- lr_val |= GICH_LR_PENDING_BIT;
- if (lr_desc.state & LR_STATE_ACTIVE)
- lr_val |= GICH_LR_ACTIVE_BIT;
- if (lr_desc.state & LR_EOI_INT)
- lr_val |= GICH_LR_EOI;
-
- if (lr_desc.state & LR_HW) {
- lr_val |= GICH_LR_HW;
- lr_val |= (u32)lr_desc.hwirq << GICH_LR_PHYSID_CPUID_SHIFT;
- }
-
- if (lr_desc.irq < VGIC_NR_SGIS)
- lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
-
- vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
-
- if (!(lr_desc.state & LR_STATE_MASK))
- vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
- else
- vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
-}
-
-static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
-}
-
-static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
-}
-
-static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
-}
-
-static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
-{
- u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
- u32 ret = 0;
-
- if (misr & GICH_MISR_EOI)
- ret |= INT_STATUS_EOI;
- if (misr & GICH_MISR_U)
- ret |= INT_STATUS_UNDERFLOW;
-
- return ret;
-}
-
-static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
-}
-
-static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
-}
-
-static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
-
- vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
- vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
- vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
- vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
-}
-
-static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr;
-
- vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
- vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
- vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
- vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
-
- vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
-}
-
-static void vgic_v2_enable(struct kvm_vcpu *vcpu)
-{
- /*
- * By forcing VMCR to zero, the GIC will restore the binary
- * points to their reset values. Anything else resets to zero
- * anyway.
- */
- vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
- vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
-
- /* Get the show on the road... */
- vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
-}
-
-static const struct vgic_ops vgic_v2_ops = {
- .get_lr = vgic_v2_get_lr,
- .set_lr = vgic_v2_set_lr,
- .get_elrsr = vgic_v2_get_elrsr,
- .get_eisr = vgic_v2_get_eisr,
- .clear_eisr = vgic_v2_clear_eisr,
- .get_interrupt_status = vgic_v2_get_interrupt_status,
- .enable_underflow = vgic_v2_enable_underflow,
- .disable_underflow = vgic_v2_disable_underflow,
- .get_vmcr = vgic_v2_get_vmcr,
- .set_vmcr = vgic_v2_set_vmcr,
- .enable = vgic_v2_enable,
-};
-
-struct vgic_params __section(.hyp.text) vgic_v2_params;
-
-static void vgic_cpu_init_lrs(void *params)
-{
- struct vgic_params *vgic = params;
- int i;
-
- for (i = 0; i < vgic->nr_lr; i++)
- writel_relaxed(0, vgic->vctrl_base + GICH_LR0 + (i * 4));
-}
-
-/**
- * vgic_v2_probe - probe for a GICv2 compatible interrupt controller
- * @gic_kvm_info: pointer to the GIC description
- * @ops: address of a pointer to the GICv2 operations
- * @params: address of a pointer to HW-specific parameters
- *
- * Returns 0 if a GICv2 has been found, with the low level operations
- * in *ops and the HW parameters in *params. Returns an error code
- * otherwise.
- */
-int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
- const struct vgic_ops **ops,
- const struct vgic_params **params)
-{
- int ret;
- struct vgic_params *vgic = &vgic_v2_params;
- const struct resource *vctrl_res = &gic_kvm_info->vctrl;
- const struct resource *vcpu_res = &gic_kvm_info->vcpu;
-
- memset(vgic, 0, sizeof(*vgic));
-
- if (!gic_kvm_info->maint_irq) {
- kvm_err("error getting vgic maintenance irq\n");
- ret = -ENXIO;
- goto out;
- }
- vgic->maint_irq = gic_kvm_info->maint_irq;
-
- if (!gic_kvm_info->vctrl.start) {
- kvm_err("GICH not present in the firmware table\n");
- ret = -ENXIO;
- goto out;
- }
-
- vgic->vctrl_base = ioremap(gic_kvm_info->vctrl.start,
- resource_size(&gic_kvm_info->vctrl));
- if (!vgic->vctrl_base) {
- kvm_err("Cannot ioremap GICH\n");
- ret = -ENOMEM;
- goto out;
- }
-
- vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR);
- vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1;
-
- ret = create_hyp_io_mappings(vgic->vctrl_base,
- vgic->vctrl_base + resource_size(vctrl_res),
- vctrl_res->start);
- if (ret) {
- kvm_err("Cannot map VCTRL into hyp\n");
- goto out_unmap;
- }
-
- if (!PAGE_ALIGNED(vcpu_res->start)) {
- kvm_err("GICV physical address 0x%llx not page aligned\n",
- (unsigned long long)vcpu_res->start);
- ret = -ENXIO;
- goto out_unmap;
- }
-
- if (!PAGE_ALIGNED(resource_size(vcpu_res))) {
- kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(vcpu_res),
- PAGE_SIZE);
- ret = -ENXIO;
- goto out_unmap;
- }
-
- vgic->can_emulate_gicv2 = true;
- kvm_register_device_ops(&kvm_arm_vgic_v2_ops, KVM_DEV_TYPE_ARM_VGIC_V2);
-
- vgic->vcpu_base = vcpu_res->start;
-
- kvm_info("GICH base=0x%llx, GICV base=0x%llx, IRQ=%d\n",
- gic_kvm_info->vctrl.start, vgic->vcpu_base, vgic->maint_irq);
-
- vgic->type = VGIC_V2;
- vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
-
- on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
-
- *ops = &vgic_v2_ops;
- *params = vgic;
- goto out;
-
-out_unmap:
- iounmap(vgic->vctrl_base);
-out:
- return ret;
-}
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
deleted file mode 100644
index e661e7fb9d91..000000000000
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ /dev/null
@@ -1,1074 +0,0 @@
-/*
- * GICv3 distributor and redistributor emulation
- *
- * GICv3 emulation is currently only supported on a GICv3 host (because
- * we rely on the hardware's CPU interface virtualization support), but
- * supports both hardware with or without the optional GICv2 backwards
- * compatibility features.
- *
- * Limitations of the emulation:
- * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
- * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
- * - We do not support the message based interrupts (MBIs) triggered by
- * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
- * - We do not support the (optional) backwards compatibility feature.
- * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
- * the compatiblity feature, you can use a GICv2 in the guest, though.
- * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
- * - Priorities are not emulated (same as the GICv2 emulation). Linux
- * as a guest is fine with this, because it does not use priorities.
- * - We only support Group1 interrupts. Again Linux uses only those.
- *
- * Copyright (C) 2014 ARM Ltd.
- * Author: Andre Przywara <andre.przywara@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-
-#include <linux/irqchip/arm-gic-v3.h>
-#include <kvm/arm_vgic.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
-#include "vgic.h"
-
-static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg = 0xffffffff;
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-
- return false;
-}
-
-static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg = 0;
-
- /*
- * Force ARE and DS to 1, the guest cannot change this.
- * For the time being we only support Group1 interrupts.
- */
- if (vcpu->kvm->arch.vgic.enabled)
- reg = GICD_CTLR_ENABLE_SS_G1;
- reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
- vgic_update_state(vcpu->kvm);
- return true;
- }
- return false;
-}
-
-/*
- * As this implementation does not provide compatibility
- * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
- * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
- * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
- */
-#define INTERRUPT_ID_BITS 10
-static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
-
- reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
-
- reg |= (INTERRUPT_ID_BITS - 1) << 19;
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-
- return false;
-}
-
-static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio, phys_addr_t offset)
-{
- u32 reg;
-
- reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-
- return false;
-}
-
-static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id,
- ACCESS_WRITE_SETBIT);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id,
- ACCESS_WRITE_CLEARBIT);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
- return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
- vcpu->vcpu_id);
-
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg;
-
- if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 *reg;
-
- if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
-
- return vgic_handle_cfg_reg(reg, mmio, offset);
-}
-
-/*
- * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
- * when we store the target MPIDR written by the guest.
- */
-static u32 compress_mpidr(unsigned long mpidr)
-{
- u32 ret;
-
- ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
- ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
- ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
-
- return ret;
-}
-
-static unsigned long uncompress_mpidr(u32 value)
-{
- unsigned long mpidr;
-
- mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
- mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
- mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
- mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
-
- return mpidr;
-}
-
-/*
- * Lookup the given MPIDR value to get the vcpu_id (if there is one)
- * and store that in the irq_spi_cpu[] array.
- * This limits the number of VCPUs to 255 for now, extending the data
- * type (or storing kvm_vcpu pointers) should lift the limit.
- * Store the original MPIDR value in an extra array to support read-as-written.
- * Unallocated MPIDRs are translated to a special value and caught
- * before any array accesses.
- */
-static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm *kvm = vcpu->kvm;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int spi;
- u32 reg;
- int vcpu_id;
- unsigned long *bmap, mpidr;
-
- /*
- * The upper 32 bits of each 64 bit register are zero,
- * as we don't support Aff3.
- */
- if ((offset & 4)) {
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- /* This region only covers SPIs, so no handling of private IRQs here. */
- spi = offset / 8;
-
- /* get the stored MPIDR for this IRQ */
- mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
- reg = mpidr;
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
-
- if (!mmio->is_write)
- return false;
-
- /*
- * Now clear the currently assigned vCPU from the map, making room
- * for the new one to be written below
- */
- vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
- if (likely(vcpu)) {
- vcpu_id = vcpu->vcpu_id;
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
- __clear_bit(spi, bmap);
- }
-
- dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
- vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
-
- /*
- * The spec says that non-existent MPIDR values should not be
- * forwarded to any existent (v)CPU, but should be able to become
- * pending anyway. We simply keep the irq_spi_target[] array empty, so
- * the interrupt will never be injected.
- * irq_spi_cpu[irq] gets a magic value in this case.
- */
- if (likely(vcpu)) {
- vcpu_id = vcpu->vcpu_id;
- dist->irq_spi_cpu[spi] = vcpu_id;
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
- __set_bit(spi, bmap);
- } else {
- dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
- }
-
- vgic_update_state(kvm);
-
- return true;
-}
-
-/*
- * We should be careful about promising too much when a guest reads
- * this register. Don't claim to be like any hardware implementation,
- * but just report the GIC as version 3 - which is what a Linux guest
- * would check.
- */
-static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg = 0;
-
- switch (offset + GICD_IDREGS) {
- case GICD_PIDR2:
- reg = 0x3b;
- break;
- }
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
-
- return false;
-}
-
-static const struct vgic_io_range vgic_v3_dist_ranges[] = {
- {
- .base = GICD_CTLR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_ctlr,
- },
- {
- .base = GICD_TYPER,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_typer,
- },
- {
- .base = GICD_IIDR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_iidr,
- },
- {
- /* this register is optional, it is RAZ/WI if not implemented */
- .base = GICD_STATUSR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this write only register is WI when TYPER.MBIS=0 */
- .base = GICD_SETSPI_NSR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this write only register is WI when TYPER.MBIS=0 */
- .base = GICD_CLRSPI_NSR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when DS=1 */
- .base = GICD_SETSPI_SR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when DS=1 */
- .base = GICD_CLRSPI_SR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GICD_IGROUPR,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_rao_wi,
- },
- {
- .base = GICD_ISENABLER,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg_dist,
- },
- {
- .base = GICD_ICENABLER,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg_dist,
- },
- {
- .base = GICD_ISPENDR,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg_dist,
- },
- {
- .base = GICD_ICPENDR,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg_dist,
- },
- {
- .base = GICD_ISACTIVER,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_active_reg_dist,
- },
- {
- .base = GICD_ICACTIVER,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_active_reg_dist,
- },
- {
- .base = GICD_IPRIORITYR,
- .len = 0x400,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg_dist,
- },
- {
- /* TARGETSRn is RES0 when ARE=1 */
- .base = GICD_ITARGETSR,
- .len = 0x400,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GICD_ICFGR,
- .len = 0x100,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg_dist,
- },
- {
- /* this is RAZ/WI when DS=1 */
- .base = GICD_IGRPMODR,
- .len = 0x80,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when DS=1 */
- .base = GICD_NSACR,
- .len = 0x100,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when ARE=1 */
- .base = GICD_SGIR,
- .len = 0x04,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when ARE=1 */
- .base = GICD_CPENDSGIR,
- .len = 0x10,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- /* this is RAZ/WI when ARE=1 */
- .base = GICD_SPENDSGIR,
- .len = 0x10,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GICD_IROUTER + 0x100,
- .len = 0x1ee0,
- .bits_per_irq = 64,
- .handle_mmio = handle_mmio_route_reg,
- },
- {
- .base = GICD_IDREGS,
- .len = 0x30,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_idregs,
- },
- {},
-};
-
-static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- /* since we don't support LPIs, this register is zero for now */
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 reg;
- u64 mpidr;
- struct kvm_vcpu *redist_vcpu = mmio->private;
- int target_vcpu_id = redist_vcpu->vcpu_id;
-
- /* the upper 32 bits contain the affinity value */
- if ((offset & ~3) == 4) {
- mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
- reg = compress_mpidr(mpidr);
-
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
- }
-
- reg = redist_vcpu->vcpu_id << 8;
- if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
- reg |= GICR_TYPER_LAST;
- vgic_reg_access(mmio, &reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id,
- ACCESS_WRITE_SETBIT);
-}
-
-static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id,
- ACCESS_WRITE_CLEARBIT);
-}
-
-static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id);
-}
-
-static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id);
-}
-
-static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id);
-}
-
-static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
- redist_vcpu->vcpu_id);
-}
-
-static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
- u32 *reg;
-
- reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
- redist_vcpu->vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- return false;
-}
-
-static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- struct kvm_vcpu *redist_vcpu = mmio->private;
-
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- redist_vcpu->vcpu_id, offset >> 1);
-
- return vgic_handle_cfg_reg(reg, mmio, offset);
-}
-
-#define SGI_base(x) ((x) + SZ_64K)
-
-static const struct vgic_io_range vgic_redist_ranges[] = {
- {
- .base = GICR_CTLR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_ctlr_redist,
- },
- {
- .base = GICR_TYPER,
- .len = 0x08,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_typer_redist,
- },
- {
- .base = GICR_IIDR,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_iidr,
- },
- {
- .base = GICR_WAKER,
- .len = 0x04,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = GICR_IDREGS,
- .len = 0x30,
- .bits_per_irq = 0,
- .handle_mmio = handle_mmio_idregs,
- },
- {
- .base = SGI_base(GICR_IGROUPR0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_rao_wi,
- },
- {
- .base = SGI_base(GICR_ISENABLER0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_enable_reg_redist,
- },
- {
- .base = SGI_base(GICR_ICENABLER0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_enable_reg_redist,
- },
- {
- .base = SGI_base(GICR_ISPENDR0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_pending_reg_redist,
- },
- {
- .base = SGI_base(GICR_ICPENDR0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_pending_reg_redist,
- },
- {
- .base = SGI_base(GICR_ISACTIVER0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_set_active_reg_redist,
- },
- {
- .base = SGI_base(GICR_ICACTIVER0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_clear_active_reg_redist,
- },
- {
- .base = SGI_base(GICR_IPRIORITYR0),
- .len = 0x20,
- .bits_per_irq = 8,
- .handle_mmio = handle_mmio_priority_reg_redist,
- },
- {
- .base = SGI_base(GICR_ICFGR0),
- .len = 0x08,
- .bits_per_irq = 2,
- .handle_mmio = handle_mmio_cfg_reg_redist,
- },
- {
- .base = SGI_base(GICR_IGRPMODR0),
- .len = 0x04,
- .bits_per_irq = 1,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {
- .base = SGI_base(GICR_NSACR),
- .len = 0x04,
- .handle_mmio = handle_mmio_raz_wi,
- },
- {},
-};
-
-static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- if (vgic_queue_irq(vcpu, 0, irq)) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- return true;
- }
-
- return false;
-}
-
-static int vgic_v3_map_resources(struct kvm *kvm,
- const struct vgic_params *params)
-{
- int ret = 0;
- struct vgic_dist *dist = &kvm->arch.vgic;
- gpa_t rdbase = dist->vgic_redist_base;
- struct vgic_io_device *iodevs = NULL;
- int i;
-
- if (!irqchip_in_kernel(kvm))
- return 0;
-
- mutex_lock(&kvm->lock);
-
- if (vgic_ready(kvm))
- goto out;
-
- if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
- IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
- kvm_err("Need to set vgic distributor addresses first\n");
- ret = -ENXIO;
- goto out;
- }
-
- /*
- * For a VGICv3 we require the userland to explicitly initialize
- * the VGIC before we need to use it.
- */
- if (!vgic_initialized(kvm)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
- GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
- -1, &dist->dist_iodev);
- if (ret)
- goto out;
-
- iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
- if (!iodevs) {
- ret = -ENOMEM;
- goto out_unregister;
- }
-
- for (i = 0; i < dist->nr_cpus; i++) {
- ret = vgic_register_kvm_io_dev(kvm, rdbase,
- SZ_128K, vgic_redist_ranges,
- i, &iodevs[i]);
- if (ret)
- goto out_unregister;
- rdbase += GIC_V3_REDIST_SIZE;
- }
-
- dist->redist_iodevs = iodevs;
- dist->ready = true;
- goto out;
-
-out_unregister:
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
- if (iodevs) {
- for (i = 0; i < dist->nr_cpus; i++) {
- if (iodevs[i].dev.ops)
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
- &iodevs[i].dev);
- }
- }
-
-out:
- if (ret)
- kvm_vgic_destroy(kvm);
- mutex_unlock(&kvm->lock);
- return ret;
-}
-
-static int vgic_v3_init_model(struct kvm *kvm)
-{
- int i;
- u32 mpidr;
- struct vgic_dist *dist = &kvm->arch.vgic;
- int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
-
- dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
- GFP_KERNEL);
-
- if (!dist->irq_spi_mpidr)
- return -ENOMEM;
-
- /* Initialize the target VCPUs for each IRQ to VCPU 0 */
- mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
- for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
- dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
- dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
- vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
- }
-
- return 0;
-}
-
-/* GICv3 does not keep track of SGI sources anymore. */
-static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
-{
-}
-
-void vgic_v3_init_emulation(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
- dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
- dist->vm_ops.init_model = vgic_v3_init_model;
- dist->vm_ops.map_resources = vgic_v3_map_resources;
-
- kvm->arch.max_vcpus = KVM_MAX_VCPUS;
-}
-
-/*
- * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
- * generation register ICC_SGI1R_EL1) with a given VCPU.
- * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
- * return -1.
- */
-static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
-{
- unsigned long affinity;
- int level0;
-
- /*
- * Split the current VCPU's MPIDR into affinity level 0 and the
- * rest as this is what we have to compare against.
- */
- affinity = kvm_vcpu_get_mpidr_aff(vcpu);
- level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
- affinity &= ~MPIDR_LEVEL_MASK;
-
- /* bail out if the upper three levels don't match */
- if (sgi_aff != affinity)
- return -1;
-
- /* Is this VCPU's bit set in the mask ? */
- if (!(sgi_cpu_mask & BIT(level0)))
- return -1;
-
- return level0;
-}
-
-#define SGI_AFFINITY_LEVEL(reg, level) \
- ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
- >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
-
-/**
- * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
- * @vcpu: The VCPU requesting a SGI
- * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
- *
- * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
- * This will trap in sys_regs.c and call this function.
- * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
- * target processors as well as a bitmask of 16 Aff0 CPUs.
- * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
- * check for matching ones. If this bit is set, we signal all, but not the
- * calling VCPU.
- */
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
-{
- struct kvm *kvm = vcpu->kvm;
- struct kvm_vcpu *c_vcpu;
- struct vgic_dist *dist = &kvm->arch.vgic;
- u16 target_cpus;
- u64 mpidr;
- int sgi, c;
- int vcpu_id = vcpu->vcpu_id;
- bool broadcast;
- int updated = 0;
-
- sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
- broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
- target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
- mpidr = SGI_AFFINITY_LEVEL(reg, 3);
- mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
- mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
-
- /*
- * We take the dist lock here, because we come from the sysregs
- * code path and not from the MMIO one (which already takes the lock).
- */
- spin_lock(&dist->lock);
-
- /*
- * We iterate over all VCPUs to find the MPIDRs matching the request.
- * If we have handled one CPU, we clear it's bit to detect early
- * if we are already finished. This avoids iterating through all
- * VCPUs when most of the times we just signal a single VCPU.
- */
- kvm_for_each_vcpu(c, c_vcpu, kvm) {
-
- /* Exit early if we have dealt with all requested CPUs */
- if (!broadcast && target_cpus == 0)
- break;
-
- /* Don't signal the calling VCPU */
- if (broadcast && c == vcpu_id)
- continue;
-
- if (!broadcast) {
- int level0;
-
- level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
- if (level0 == -1)
- continue;
-
- /* remove this matching VCPU from the mask */
- target_cpus &= ~BIT(level0);
- }
-
- /* Flag the SGI as pending */
- vgic_dist_irq_set_pending(c_vcpu, sgi);
- updated = 1;
- kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
- }
- if (updated)
- vgic_update_state(vcpu->kvm);
- spin_unlock(&dist->lock);
- if (updated)
- vgic_kick_vcpus(vcpu->kvm);
-}
-
-static int vgic_v3_create(struct kvm_device *dev, u32 type)
-{
- return kvm_vgic_create(dev->kvm, type);
-}
-
-static void vgic_v3_destroy(struct kvm_device *dev)
-{
- kfree(dev);
-}
-
-static int vgic_v3_set_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- ret = vgic_set_common_attr(dev, attr);
- if (ret != -ENXIO)
- return ret;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
- }
-
- return -ENXIO;
-}
-
-static int vgic_v3_get_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- int ret;
-
- ret = vgic_get_common_attr(dev, attr);
- if (ret != -ENXIO)
- return ret;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
- }
-
- return -ENXIO;
-}
-
-static int vgic_v3_has_attr(struct kvm_device *dev,
- struct kvm_device_attr *attr)
-{
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR:
- switch (attr->attr) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- return -ENXIO;
- case KVM_VGIC_V3_ADDR_TYPE_DIST:
- case KVM_VGIC_V3_ADDR_TYPE_REDIST:
- return 0;
- }
- break;
- case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
- case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
- return -ENXIO;
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
- return 0;
- case KVM_DEV_ARM_VGIC_GRP_CTRL:
- switch (attr->attr) {
- case KVM_DEV_ARM_VGIC_CTRL_INIT:
- return 0;
- }
- }
- return -ENXIO;
-}
-
-struct kvm_device_ops kvm_arm_vgic_v3_ops = {
- .name = "kvm-arm-vgic-v3",
- .create = vgic_v3_create,
- .destroy = vgic_v3_destroy,
- .set_attr = vgic_v3_set_attr,
- .get_attr = vgic_v3_get_attr,
- .has_attr = vgic_v3_has_attr,
-};
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
deleted file mode 100644
index 75b02fa86436..000000000000
--- a/virt/kvm/arm/vgic-v3.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (C) 2013 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <linux/irqchip/arm-gic-v3.h>
-#include <linux/irqchip/arm-gic-common.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-
-static u32 ich_vtr_el2;
-
-static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
-{
- struct vgic_lr lr_desc;
- u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
-
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
- lr_desc.irq = val & ICH_LR_VIRTUAL_ID_MASK;
- else
- lr_desc.irq = val & GICH_LR_VIRTUALID;
-
- lr_desc.source = 0;
- if (lr_desc.irq <= 15 &&
- vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
- lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
-
- lr_desc.state = 0;
-
- if (val & ICH_LR_PENDING_BIT)
- lr_desc.state |= LR_STATE_PENDING;
- if (val & ICH_LR_ACTIVE_BIT)
- lr_desc.state |= LR_STATE_ACTIVE;
- if (val & ICH_LR_EOI)
- lr_desc.state |= LR_EOI_INT;
- if (val & ICH_LR_HW) {
- lr_desc.state |= LR_HW;
- lr_desc.hwirq = (val >> ICH_LR_PHYS_ID_SHIFT) & GENMASK(9, 0);
- }
-
- return lr_desc;
-}
-
-static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr lr_desc)
-{
- u64 lr_val;
-
- lr_val = lr_desc.irq;
-
- /*
- * Currently all guest IRQs are Group1, as Group0 would result
- * in a FIQ in the guest, which it wouldn't expect.
- * Eventually we want to make this configurable, so we may revisit
- * this in the future.
- */
- switch (vcpu->kvm->arch.vgic.vgic_model) {
- case KVM_DEV_TYPE_ARM_VGIC_V3:
- lr_val |= ICH_LR_GROUP;
- break;
- case KVM_DEV_TYPE_ARM_VGIC_V2:
- if (lr_desc.irq < VGIC_NR_SGIS)
- lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
- break;
- default:
- BUG();
- }
-
- if (lr_desc.state & LR_STATE_PENDING)
- lr_val |= ICH_LR_PENDING_BIT;
- if (lr_desc.state & LR_STATE_ACTIVE)
- lr_val |= ICH_LR_ACTIVE_BIT;
- if (lr_desc.state & LR_EOI_INT)
- lr_val |= ICH_LR_EOI;
- if (lr_desc.state & LR_HW) {
- lr_val |= ICH_LR_HW;
- lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
- }
-
- vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
-
- if (!(lr_desc.state & LR_STATE_MASK))
- vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
- else
- vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
-}
-
-static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr;
-}
-
-static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
-}
-
-static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
-}
-
-static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
-{
- u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
- u32 ret = 0;
-
- if (misr & ICH_MISR_EOI)
- ret |= INT_STATUS_EOI;
- if (misr & ICH_MISR_U)
- ret |= INT_STATUS_UNDERFLOW;
-
- return ret;
-}
-
-static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
-
- vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
- vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
- vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
- vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
-}
-
-static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE;
-}
-
-static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE;
-}
-
-static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
-{
- u32 vmcr;
-
- vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
- vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
- vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
- vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
-
- vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
-}
-
-static void vgic_v3_enable(struct kvm_vcpu *vcpu)
-{
- struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
-
- /*
- * By forcing VMCR to zero, the GIC will restore the binary
- * points to their reset values. Anything else resets to zero
- * anyway.
- */
- vgic_v3->vgic_vmcr = 0;
- vgic_v3->vgic_elrsr = ~0;
-
- /*
- * If we are emulating a GICv3, we do it in an non-GICv2-compatible
- * way, so we force SRE to 1 to demonstrate this to the guest.
- * This goes with the spec allowing the value to be RAO/WI.
- */
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
- vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
- else
- vgic_v3->vgic_sre = 0;
-
- /* Get the show on the road... */
- vgic_v3->vgic_hcr = ICH_HCR_EN;
-}
-
-static const struct vgic_ops vgic_v3_ops = {
- .get_lr = vgic_v3_get_lr,
- .set_lr = vgic_v3_set_lr,
- .get_elrsr = vgic_v3_get_elrsr,
- .get_eisr = vgic_v3_get_eisr,
- .clear_eisr = vgic_v3_clear_eisr,
- .get_interrupt_status = vgic_v3_get_interrupt_status,
- .enable_underflow = vgic_v3_enable_underflow,
- .disable_underflow = vgic_v3_disable_underflow,
- .get_vmcr = vgic_v3_get_vmcr,
- .set_vmcr = vgic_v3_set_vmcr,
- .enable = vgic_v3_enable,
-};
-
-static struct vgic_params vgic_v3_params;
-
-static void vgic_cpu_init_lrs(void *params)
-{
- kvm_call_hyp(__vgic_v3_init_lrs);
-}
-
-/**
- * vgic_v3_probe - probe for a GICv3 compatible interrupt controller
- * @gic_kvm_info: pointer to the GIC description
- * @ops: address of a pointer to the GICv3 operations
- * @params: address of a pointer to HW-specific parameters
- *
- * Returns 0 if a GICv3 has been found, with the low level operations
- * in *ops and the HW parameters in *params. Returns an error code
- * otherwise.
- */
-int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
- const struct vgic_ops **ops,
- const struct vgic_params **params)
-{
- int ret = 0;
- struct vgic_params *vgic = &vgic_v3_params;
- const struct resource *vcpu_res = &gic_kvm_info->vcpu;
-
- vgic->maint_irq = gic_kvm_info->maint_irq;
-
- ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
-
- /*
- * The ListRegs field is 5 bits, but there is a architectural
- * maximum of 16 list registers. Just ignore bit 4...
- */
- vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1;
- vgic->can_emulate_gicv2 = false;
-
- if (!vcpu_res->start) {
- kvm_info("GICv3: no GICV resource entry\n");
- vgic->vcpu_base = 0;
- } else if (!PAGE_ALIGNED(vcpu_res->start)) {
- pr_warn("GICV physical address 0x%llx not page aligned\n",
- (unsigned long long)vcpu_res->start);
- vgic->vcpu_base = 0;
- } else if (!PAGE_ALIGNED(resource_size(vcpu_res))) {
- pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(vcpu_res),
- PAGE_SIZE);
- } else {
- vgic->vcpu_base = vcpu_res->start;
- vgic->can_emulate_gicv2 = true;
- kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
- KVM_DEV_TYPE_ARM_VGIC_V2);
- }
- if (vgic->vcpu_base == 0)
- kvm_info("disabling GICv2 emulation\n");
- kvm_register_device_ops(&kvm_arm_vgic_v3_ops, KVM_DEV_TYPE_ARM_VGIC_V3);
-
- vgic->vctrl_base = NULL;
- vgic->type = VGIC_V3;
- vgic->max_gic_vcpus = VGIC_V3_MAX_CPUS;
-
- kvm_info("GICV base=0x%llx, IRQ=%d\n",
- vgic->vcpu_base, vgic->maint_irq);
-
- on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
-
- *ops = &vgic_v3_ops;
- *params = vgic;
-
- return ret;
-}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
deleted file mode 100644
index 67cb5e948be2..000000000000
--- a/virt/kvm/arm/vgic.c
+++ /dev/null
@@ -1,2417 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/cpu.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/rculist.h>
-#include <linux/uaccess.h>
-
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-#include <trace/events/kvm.h>
-#include <asm/kvm.h>
-#include <kvm/iodev.h>
-#include <linux/irqchip/arm-gic-common.h>
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-/*
- * How the whole thing works (courtesy of Christoffer Dall):
- *
- * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
- * something is pending on the CPU interface.
- * - Interrupts that are pending on the distributor are stored on the
- * vgic.irq_pending vgic bitmap (this bitmap is updated by both user land
- * ioctls and guest mmio ops, and other in-kernel peripherals such as the
- * arch. timers).
- * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
- * recalculated
- * - To calculate the oracle, we need info for each cpu from
- * compute_pending_for_cpu, which considers:
- * - PPI: dist->irq_pending & dist->irq_enable
- * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
- * - irq_spi_target is a 'formatted' version of the GICD_ITARGETSRn
- * registers, stored on each vcpu. We only keep one bit of
- * information per interrupt, making sure that only one vcpu can
- * accept the interrupt.
- * - If any of the above state changes, we must recalculate the oracle.
- * - The same is true when injecting an interrupt, except that we only
- * consider a single interrupt at a time. The irq_spi_cpu array
- * contains the target CPU for each SPI.
- *
- * The handling of level interrupts adds some extra complexity. We
- * need to track when the interrupt has been EOIed, so we can sample
- * the 'line' again. This is achieved as such:
- *
- * - When a level interrupt is moved onto a vcpu, the corresponding
- * bit in irq_queued is set. As long as this bit is set, the line
- * will be ignored for further interrupts. The interrupt is injected
- * into the vcpu with the GICH_LR_EOI bit set (generate a
- * maintenance interrupt on EOI).
- * - When the interrupt is EOIed, the maintenance interrupt fires,
- * and clears the corresponding bit in irq_queued. This allows the
- * interrupt line to be sampled again.
- * - Note that level-triggered interrupts can also be set to pending from
- * writes to GICD_ISPENDRn and lowering the external input line does not
- * cause the interrupt to become inactive in such a situation.
- * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
- * inactive as long as the external input line is held high.
- *
- *
- * Initialization rules: there are multiple stages to the vgic
- * initialization, both for the distributor and the CPU interfaces.
- *
- * Distributor:
- *
- * - kvm_vgic_early_init(): initialization of static data that doesn't
- * depend on any sizing information or emulation type. No allocation
- * is allowed there.
- *
- * - vgic_init(): allocation and initialization of the generic data
- * structures that depend on sizing information (number of CPUs,
- * number of interrupts). Also initializes the vcpu specific data
- * structures. Can be executed lazily for GICv2.
- * [to be renamed to kvm_vgic_init??]
- *
- * CPU Interface:
- *
- * - kvm_vgic_cpu_early_init(): initialization of static data that
- * doesn't depend on any sizing information or emulation type. No
- * allocation is allowed there.
- */
-
-#include "vgic.h"
-
-static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
-static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
-static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
-static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
-static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
-static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
- int virt_irq);
-static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
-
-static const struct vgic_ops *vgic_ops;
-static const struct vgic_params *vgic;
-
-static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
-{
- vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
-}
-
-static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
-{
- return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
-}
-
-int kvm_vgic_map_resources(struct kvm *kvm)
-{
- return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
-}
-
-/*
- * struct vgic_bitmap contains a bitmap made of unsigned longs, but
- * extracts u32s out of them.
- *
- * This does not work on 64-bit BE systems, because the bitmap access
- * will store two consecutive 32-bit words with the higher-addressed
- * register's bits at the lower index and the lower-addressed register's
- * bits at the higher index.
- *
- * Therefore, swizzle the register index when accessing the 32-bit word
- * registers to access the right register's value.
- */
-#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
-#define REG_OFFSET_SWIZZLE 1
-#else
-#define REG_OFFSET_SWIZZLE 0
-#endif
-
-static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
-{
- int nr_longs;
-
- nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
-
- b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
- if (!b->private)
- return -ENOMEM;
-
- b->shared = b->private + nr_cpus;
-
- return 0;
-}
-
-static void vgic_free_bitmap(struct vgic_bitmap *b)
-{
- kfree(b->private);
- b->private = NULL;
- b->shared = NULL;
-}
-
-/*
- * Call this function to convert a u64 value to an unsigned long * bitmask
- * in a way that works on both 32-bit and 64-bit LE and BE platforms.
- *
- * Warning: Calling this function may modify *val.
- */
-static unsigned long *u64_to_bitmask(u64 *val)
-{
-#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
- *val = (*val >> 32) | (*val << 32);
-#endif
- return (unsigned long *)val;
-}
-
-u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
-{
- offset >>= 2;
- if (!offset)
- return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
- else
- return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
-}
-
-static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
- int cpuid, int irq)
-{
- if (irq < VGIC_NR_PRIVATE_IRQS)
- return test_bit(irq, x->private + cpuid);
-
- return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
-}
-
-void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val)
-{
- unsigned long *reg;
-
- if (irq < VGIC_NR_PRIVATE_IRQS) {
- reg = x->private + cpuid;
- } else {
- reg = x->shared;
- irq -= VGIC_NR_PRIVATE_IRQS;
- }
-
- if (val)
- set_bit(irq, reg);
- else
- clear_bit(irq, reg);
-}
-
-static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
-{
- return x->private + cpuid;
-}
-
-unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
-{
- return x->shared;
-}
-
-static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
-{
- int size;
-
- size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
- size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
-
- x->private = kzalloc(size, GFP_KERNEL);
- if (!x->private)
- return -ENOMEM;
-
- x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
- return 0;
-}
-
-static void vgic_free_bytemap(struct vgic_bytemap *b)
-{
- kfree(b->private);
- b->private = NULL;
- b->shared = NULL;
-}
-
-u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
-{
- u32 *reg;
-
- if (offset < VGIC_NR_PRIVATE_IRQS) {
- reg = x->private;
- offset += cpuid * VGIC_NR_PRIVATE_IRQS;
- } else {
- reg = x->shared;
- offset -= VGIC_NR_PRIVATE_IRQS;
- }
-
- return reg + (offset / sizeof(u32));
-}
-
-#define VGIC_CFG_LEVEL 0
-#define VGIC_CFG_EDGE 1
-
-static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- int irq_val;
-
- irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
- return irq_val == VGIC_CFG_EDGE;
-}
-
-static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
-}
-
-static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
-}
-
-static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
-}
-
-static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
-}
-
-static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
-}
-
-static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
-}
-
-static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
-}
-
-static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
-}
-
-static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
-}
-
-static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
-}
-
-static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
-}
-
-static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
- if (!vgic_dist_irq_get_level(vcpu, irq)) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- if (!compute_pending_for_cpu(vcpu))
- clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
- }
-}
-
-static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
-}
-
-void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
-}
-
-void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
-}
-
-static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
-{
- if (irq < VGIC_NR_PRIVATE_IRQS)
- set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
- else
- set_bit(irq - VGIC_NR_PRIVATE_IRQS,
- vcpu->arch.vgic_cpu.pending_shared);
-}
-
-void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
-{
- if (irq < VGIC_NR_PRIVATE_IRQS)
- clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
- else
- clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
- vcpu->arch.vgic_cpu.pending_shared);
-}
-
-static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
-{
- return !vgic_irq_is_queued(vcpu, irq);
-}
-
-/**
- * vgic_reg_access - access vgic register
- * @mmio: pointer to the data describing the mmio access
- * @reg: pointer to the virtual backing of vgic distributor data
- * @offset: least significant 2 bits used for word offset
- * @mode: ACCESS_ mode (see defines above)
- *
- * Helper to make vgic register access easier using one of the access
- * modes defined for vgic register access
- * (read,raz,write-ignored,setbit,clearbit,write)
- */
-void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
- phys_addr_t offset, int mode)
-{
- int word_offset = (offset & 3) * 8;
- u32 mask = (1UL << (mmio->len * 8)) - 1;
- u32 regval;
-
- /*
- * Any alignment fault should have been delivered to the guest
- * directly (ARM ARM B3.12.7 "Prioritization of aborts").
- */
-
- if (reg) {
- regval = *reg;
- } else {
- BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
- regval = 0;
- }
-
- if (mmio->is_write) {
- u32 data = mmio_data_read(mmio, mask) << word_offset;
- switch (ACCESS_WRITE_MASK(mode)) {
- case ACCESS_WRITE_IGNORED:
- return;
-
- case ACCESS_WRITE_SETBIT:
- regval |= data;
- break;
-
- case ACCESS_WRITE_CLEARBIT:
- regval &= ~data;
- break;
-
- case ACCESS_WRITE_VALUE:
- regval = (regval & ~(mask << word_offset)) | data;
- break;
- }
- *reg = regval;
- } else {
- switch (ACCESS_READ_MASK(mode)) {
- case ACCESS_READ_RAZ:
- regval = 0;
- /* fall through */
-
- case ACCESS_READ_VALUE:
- mmio_data_write(mmio, mask, regval >> word_offset);
- }
- }
-}
-
-bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- vgic_reg_access(mmio, NULL, offset,
- ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
- return false;
-}
-
-bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access)
-{
- u32 *reg;
- int mode = ACCESS_READ_VALUE | access;
- struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
-
- reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
- if (mmio->is_write) {
- if (access & ACCESS_WRITE_CLEARBIT) {
- if (offset < 4) /* Force SGI enabled */
- *reg |= 0xffff;
- vgic_retire_disabled_irqs(target_vcpu);
- }
- vgic_update_state(kvm);
- return true;
- }
-
- return false;
-}
-
-bool vgic_handle_set_pending_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
-{
- u32 *reg, orig;
- u32 level_mask;
- int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
- level_mask = (~(*reg));
-
- /* Mark both level and edge triggered irqs as pending */
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- orig = *reg;
- vgic_reg_access(mmio, reg, offset, mode);
-
- if (mmio->is_write) {
- /* Set the soft-pending flag only for level-triggered irqs */
- reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
- *reg &= level_mask;
-
- /* Ignore writes to SGIs */
- if (offset < 2) {
- *reg &= ~0xffff;
- *reg |= orig & 0xffff;
- }
-
- vgic_update_state(kvm);
- return true;
- }
-
- return false;
-}
-
-bool vgic_handle_clear_pending_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
-{
- u32 *level_active;
- u32 *reg, orig;
- int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- orig = *reg;
- vgic_reg_access(mmio, reg, offset, mode);
- if (mmio->is_write) {
- /* Re-set level triggered level-active interrupts */
- level_active = vgic_bitmap_get_reg(&dist->irq_level,
- vcpu_id, offset);
- reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
- *reg |= *level_active;
-
- /* Ignore writes to SGIs */
- if (offset < 2) {
- *reg &= ~0xffff;
- *reg |= orig & 0xffff;
- }
-
- /* Clear soft-pending flags */
- reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
- vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset, mode);
-
- vgic_update_state(kvm);
- return true;
- }
- return false;
-}
-
-bool vgic_handle_set_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
-{
- u32 *reg;
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
-
- if (mmio->is_write) {
- vgic_update_state(kvm);
- return true;
- }
-
- return false;
-}
-
-bool vgic_handle_clear_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id)
-{
- u32 *reg;
- struct vgic_dist *dist = &kvm->arch.vgic;
-
- reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
- vgic_reg_access(mmio, reg, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
-
- if (mmio->is_write) {
- vgic_update_state(kvm);
- return true;
- }
-
- return false;
-}
-
-static u32 vgic_cfg_expand(u16 val)
-{
- u32 res = 0;
- int i;
-
- /*
- * Turn a 16bit value like abcd...mnop into a 32bit word
- * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
- */
- for (i = 0; i < 16; i++)
- res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
-
- return res;
-}
-
-static u16 vgic_cfg_compress(u32 val)
-{
- u16 res = 0;
- int i;
-
- /*
- * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
- * abcd...mnop which is what we really care about.
- */
- for (i = 0; i < 16; i++)
- res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
-
- return res;
-}
-
-/*
- * The distributor uses 2 bits per IRQ for the CFG register, but the
- * LSB is always 0. As such, we only keep the upper bit, and use the
- * two above functions to compress/expand the bits
- */
-bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
- phys_addr_t offset)
-{
- u32 val;
-
- if (offset & 4)
- val = *reg >> 16;
- else
- val = *reg & 0xffff;
-
- val = vgic_cfg_expand(val);
- vgic_reg_access(mmio, &val, offset,
- ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
- if (mmio->is_write) {
- /* Ignore writes to read-only SGI and PPI bits */
- if (offset < 8)
- return false;
-
- val = vgic_cfg_compress(val);
- if (offset & 4) {
- *reg &= 0xffff;
- *reg |= val << 16;
- } else {
- *reg &= 0xffff << 16;
- *reg |= val;
- }
- }
-
- return false;
-}
-
-/**
- * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
- * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
- *
- * Move any IRQs that have already been assigned to LRs back to the
- * emulated distributor state so that the complete emulated state can be read
- * from the main emulation structures without investigating the LRs.
- */
-void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
-{
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- int i;
-
- for_each_clear_bit(i, elrsr_ptr, vgic->nr_lr) {
- struct vgic_lr lr = vgic_get_lr(vcpu, i);
-
- /*
- * There are three options for the state bits:
- *
- * 01: pending
- * 10: active
- * 11: pending and active
- */
- BUG_ON(!(lr.state & LR_STATE_MASK));
-
- /* Reestablish SGI source for pending and active IRQs */
- if (lr.irq < VGIC_NR_SGIS)
- add_sgi_source(vcpu, lr.irq, lr.source);
-
- /*
- * If the LR holds an active (10) or a pending and active (11)
- * interrupt then move the active state to the
- * distributor tracking bit.
- */
- if (lr.state & LR_STATE_ACTIVE)
- vgic_irq_set_active(vcpu, lr.irq);
-
- /*
- * Reestablish the pending state on the distributor and the
- * CPU interface and mark the LR as free for other use.
- */
- vgic_retire_lr(i, vcpu);
-
- /* Finally update the VGIC state. */
- vgic_update_state(vcpu->kvm);
- }
-}
-
-const
-struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
- int len, gpa_t offset)
-{
- while (ranges->len) {
- if (offset >= ranges->base &&
- (offset + len) <= (ranges->base + ranges->len))
- return ranges;
- ranges++;
- }
-
- return NULL;
-}
-
-static bool vgic_validate_access(const struct vgic_dist *dist,
- const struct vgic_io_range *range,
- unsigned long offset)
-{
- int irq;
-
- if (!range->bits_per_irq)
- return true; /* Not an irq-based access */
-
- irq = offset * 8 / range->bits_per_irq;
- if (irq >= dist->nr_irqs)
- return false;
-
- return true;
-}
-
-/*
- * Call the respective handler function for the given range.
- * We split up any 64 bit accesses into two consecutive 32 bit
- * handler calls and merge the result afterwards.
- * We do this in a little endian fashion regardless of the host's
- * or guest's endianness, because the GIC is always LE and the rest of
- * the code (vgic_reg_access) also puts it in a LE fashion already.
- * At this point we have already identified the handle function, so
- * range points to that one entry and offset is relative to this.
- */
-static bool call_range_handler(struct kvm_vcpu *vcpu,
- struct kvm_exit_mmio *mmio,
- unsigned long offset,
- const struct vgic_io_range *range)
-{
- struct kvm_exit_mmio mmio32;
- bool ret;
-
- if (likely(mmio->len <= 4))
- return range->handle_mmio(vcpu, mmio, offset);
-
- /*
- * Any access bigger than 4 bytes (that we currently handle in KVM)
- * is actually 8 bytes long, caused by a 64-bit access
- */
-
- mmio32.len = 4;
- mmio32.is_write = mmio->is_write;
- mmio32.private = mmio->private;
-
- mmio32.phys_addr = mmio->phys_addr + 4;
- mmio32.data = &((u32 *)mmio->data)[1];
- ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
-
- mmio32.phys_addr = mmio->phys_addr;
- mmio32.data = &((u32 *)mmio->data)[0];
- ret |= range->handle_mmio(vcpu, &mmio32, offset);
-
- return ret;
-}
-
-/**
- * vgic_handle_mmio_access - handle an in-kernel MMIO access
- * This is called by the read/write KVM IO device wrappers below.
- * @vcpu: pointer to the vcpu performing the access
- * @this: pointer to the KVM IO device in charge
- * @addr: guest physical address of the access
- * @len: size of the access
- * @val: pointer to the data region
- * @is_write: read or write access
- *
- * returns true if the MMIO access could be performed
- */
-static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this, gpa_t addr,
- int len, void *val, bool is_write)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct vgic_io_device *iodev = container_of(this,
- struct vgic_io_device, dev);
- const struct vgic_io_range *range;
- struct kvm_exit_mmio mmio;
- bool updated_state;
- gpa_t offset;
-
- offset = addr - iodev->addr;
- range = vgic_find_range(iodev->reg_ranges, len, offset);
- if (unlikely(!range || !range->handle_mmio)) {
- pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
- return -ENXIO;
- }
-
- mmio.phys_addr = addr;
- mmio.len = len;
- mmio.is_write = is_write;
- mmio.data = val;
- mmio.private = iodev->redist_vcpu;
-
- spin_lock(&dist->lock);
- offset -= range->base;
- if (vgic_validate_access(dist, range, offset)) {
- updated_state = call_range_handler(vcpu, &mmio, offset, range);
- } else {
- if (!is_write)
- memset(val, 0, len);
- updated_state = false;
- }
- spin_unlock(&dist->lock);
-
- if (updated_state)
- vgic_kick_vcpus(vcpu->kvm);
-
- return 0;
-}
-
-static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this,
- gpa_t addr, int len, void *val)
-{
- return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
-}
-
-static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
- struct kvm_io_device *this,
- gpa_t addr, int len, const void *val)
-{
- return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
- true);
-}
-
-static struct kvm_io_device_ops vgic_io_ops = {
- .read = vgic_handle_mmio_read,
- .write = vgic_handle_mmio_write,
-};
-
-/**
- * vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
- * @kvm: The VM structure pointer
- * @base: The (guest) base address for the register frame
- * @len: Length of the register frame window
- * @ranges: Describing the handler functions for each register
- * @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
- * @iodev: Points to memory to be passed on to the handler
- *
- * @iodev stores the parameters of this function to be usable by the handler
- * respectively the dispatcher function (since the KVM I/O bus framework lacks
- * an opaque parameter). Initialization is done in this function, but the
- * reference should be valid and unique for the whole VGIC lifetime.
- * If the register frame is not mapped for a specific VCPU, pass -1 to
- * @redist_vcpu_id.
- */
-int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
- const struct vgic_io_range *ranges,
- int redist_vcpu_id,
- struct vgic_io_device *iodev)
-{
- struct kvm_vcpu *vcpu = NULL;
- int ret;
-
- if (redist_vcpu_id >= 0)
- vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
-
- iodev->addr = base;
- iodev->len = len;
- iodev->reg_ranges = ranges;
- iodev->redist_vcpu = vcpu;
-
- kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
-
- mutex_lock(&kvm->slots_lock);
-
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
- &iodev->dev);
- mutex_unlock(&kvm->slots_lock);
-
- /* Mark the iodev as invalid if registration fails. */
- if (ret)
- iodev->dev.ops = NULL;
-
- return ret;
-}
-
-static int vgic_nr_shared_irqs(struct vgic_dist *dist)
-{
- return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
-}
-
-static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *active, *enabled, *act_percpu, *act_shared;
- unsigned long active_private, active_shared;
- int nr_shared = vgic_nr_shared_irqs(dist);
- int vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
- act_percpu = vcpu->arch.vgic_cpu.active_percpu;
- act_shared = vcpu->arch.vgic_cpu.active_shared;
-
- active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
- enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
- bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
-
- active = vgic_bitmap_get_shared_map(&dist->irq_active);
- enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
- bitmap_and(act_shared, active, enabled, nr_shared);
- bitmap_and(act_shared, act_shared,
- vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
- nr_shared);
-
- active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
- active_shared = find_first_bit(act_shared, nr_shared);
-
- return (active_private < VGIC_NR_PRIVATE_IRQS ||
- active_shared < nr_shared);
-}
-
-static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
- unsigned long pending_private, pending_shared;
- int nr_shared = vgic_nr_shared_irqs(dist);
- int vcpu_id;
-
- vcpu_id = vcpu->vcpu_id;
- pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
- pend_shared = vcpu->arch.vgic_cpu.pending_shared;
-
- if (!dist->enabled) {
- bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
- bitmap_zero(pend_shared, nr_shared);
- return 0;
- }
-
- pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
- enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
- bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
-
- pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
- enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
- bitmap_and(pend_shared, pending, enabled, nr_shared);
- bitmap_and(pend_shared, pend_shared,
- vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
- nr_shared);
-
- pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
- pending_shared = find_first_bit(pend_shared, nr_shared);
- return (pending_private < VGIC_NR_PRIVATE_IRQS ||
- pending_shared < vgic_nr_shared_irqs(dist));
-}
-
-/*
- * Update the interrupt state and determine which CPUs have pending
- * or active interrupts. Must be called with distributor lock held.
- */
-void vgic_update_state(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int c;
-
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (compute_pending_for_cpu(vcpu))
- set_bit(c, dist->irq_pending_on_cpu);
-
- if (compute_active_for_cpu(vcpu))
- set_bit(c, dist->irq_active_on_cpu);
- else
- clear_bit(c, dist->irq_active_on_cpu);
- }
-}
-
-static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
-{
- return vgic_ops->get_lr(vcpu, lr);
-}
-
-static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
- struct vgic_lr vlr)
-{
- vgic_ops->set_lr(vcpu, lr, vlr);
-}
-
-static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
-{
- return vgic_ops->get_elrsr(vcpu);
-}
-
-static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
-{
- return vgic_ops->get_eisr(vcpu);
-}
-
-static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
-{
- vgic_ops->clear_eisr(vcpu);
-}
-
-static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
-{
- return vgic_ops->get_interrupt_status(vcpu);
-}
-
-static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
-{
- vgic_ops->enable_underflow(vcpu);
-}
-
-static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
-{
- vgic_ops->disable_underflow(vcpu);
-}
-
-void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
-{
- vgic_ops->get_vmcr(vcpu, vmcr);
-}
-
-void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
-{
- vgic_ops->set_vmcr(vcpu, vmcr);
-}
-
-static inline void vgic_enable(struct kvm_vcpu *vcpu)
-{
- vgic_ops->enable(vcpu);
-}
-
-static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
-{
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
-
- vgic_irq_clear_queued(vcpu, vlr.irq);
-
- /*
- * We must transfer the pending state back to the distributor before
- * retiring the LR, otherwise we may loose edge-triggered interrupts.
- */
- if (vlr.state & LR_STATE_PENDING) {
- vgic_dist_irq_set_pending(vcpu, vlr.irq);
- vlr.hwirq = 0;
- }
-
- vlr.state = 0;
- vgic_set_lr(vcpu, lr_nr, vlr);
-}
-
-static bool dist_active_irq(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
-}
-
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
-{
- int i;
-
- for (i = 0; i < vgic->nr_lr; i++) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, i);
-
- if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE)
- return true;
- }
-
- return vgic_irq_is_active(vcpu, virt_irq);
-}
-
-/*
- * An interrupt may have been disabled after being made pending on the
- * CPU interface (the classic case is a timer running while we're
- * rebooting the guest - the interrupt would kick as soon as the CPU
- * interface gets enabled, with deadly consequences).
- *
- * The solution is to examine already active LRs, and check the
- * interrupt is still enabled. If not, just retire it.
- */
-static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
-{
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- int lr;
-
- for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
-
- if (!vgic_irq_is_enabled(vcpu, vlr.irq))
- vgic_retire_lr(lr, vcpu);
- }
-}
-
-static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
- int lr_nr, struct vgic_lr vlr)
-{
- if (vgic_irq_is_active(vcpu, irq)) {
- vlr.state |= LR_STATE_ACTIVE;
- kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
- vgic_irq_clear_active(vcpu, irq);
- vgic_update_state(vcpu->kvm);
- } else {
- WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
- vlr.state |= LR_STATE_PENDING;
- kvm_debug("Set pending: 0x%x\n", vlr.state);
- }
-
- if (!vgic_irq_is_edge(vcpu, irq))
- vlr.state |= LR_EOI_INT;
-
- if (vlr.irq >= VGIC_NR_SGIS) {
- struct irq_phys_map *map;
- map = vgic_irq_map_search(vcpu, irq);
-
- if (map) {
- vlr.hwirq = map->phys_irq;
- vlr.state |= LR_HW;
- vlr.state &= ~LR_EOI_INT;
-
- /*
- * Make sure we're not going to sample this
- * again, as a HW-backed interrupt cannot be
- * in the PENDING_ACTIVE stage.
- */
- vgic_irq_set_queued(vcpu, irq);
- }
- }
-
- vgic_set_lr(vcpu, lr_nr, vlr);
-}
-
-/*
- * Queue an interrupt to a CPU virtual interface. Return true on success,
- * or false if it wasn't possible to queue it.
- * sgi_source must be zero for any non-SGI interrupts.
- */
-bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- u64 elrsr = vgic_get_elrsr(vcpu);
- unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
- struct vgic_lr vlr;
- int lr;
-
- /* Sanitize the input... */
- BUG_ON(sgi_source_id & ~7);
- BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
- BUG_ON(irq >= dist->nr_irqs);
-
- kvm_debug("Queue IRQ%d\n", irq);
-
- /* Do we have an active interrupt for the same CPUID? */
- for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
- vlr = vgic_get_lr(vcpu, lr);
- if (vlr.irq == irq && vlr.source == sgi_source_id) {
- kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
- vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
- return true;
- }
- }
-
- /* Try to use another LR for this interrupt */
- lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
- if (lr >= vgic->nr_lr)
- return false;
-
- kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
-
- vlr.irq = irq;
- vlr.source = sgi_source_id;
- vlr.state = 0;
- vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
-
- return true;
-}
-
-static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
-{
- if (!vgic_can_sample_irq(vcpu, irq))
- return true; /* level interrupt, already queued */
-
- if (vgic_queue_irq(vcpu, 0, irq)) {
- if (vgic_irq_is_edge(vcpu, irq)) {
- vgic_dist_irq_clear_pending(vcpu, irq);
- vgic_cpu_irq_clear(vcpu, irq);
- } else {
- vgic_irq_set_queued(vcpu, irq);
- }
-
- return true;
- }
-
- return false;
-}
-
-/*
- * Fill the list registers with pending interrupts before running the
- * guest.
- */
-static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- unsigned long *pa_percpu, *pa_shared;
- int i, vcpu_id;
- int overflow = 0;
- int nr_shared = vgic_nr_shared_irqs(dist);
-
- vcpu_id = vcpu->vcpu_id;
-
- pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
- pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
-
- bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
- VGIC_NR_PRIVATE_IRQS);
- bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
- nr_shared);
- /*
- * We may not have any pending interrupt, or the interrupts
- * may have been serviced from another vcpu. In all cases,
- * move along.
- */
- if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
- goto epilog;
-
- /* SGIs */
- for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
- if (!queue_sgi(vcpu, i))
- overflow = 1;
- }
-
- /* PPIs */
- for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
- if (!vgic_queue_hwirq(vcpu, i))
- overflow = 1;
- }
-
- /* SPIs */
- for_each_set_bit(i, pa_shared, nr_shared) {
- if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
- overflow = 1;
- }
-
-
-
-
-epilog:
- if (overflow) {
- vgic_enable_underflow(vcpu);
- } else {
- vgic_disable_underflow(vcpu);
- /*
- * We're about to run this VCPU, and we've consumed
- * everything the distributor had in store for
- * us. Claim we don't have anything pending. We'll
- * adjust that if needed while exiting.
- */
- clear_bit(vcpu_id, dist->irq_pending_on_cpu);
- }
-}
-
-static int process_queued_irq(struct kvm_vcpu *vcpu,
- int lr, struct vgic_lr vlr)
-{
- int pending = 0;
-
- /*
- * If the IRQ was EOIed (called from vgic_process_maintenance) or it
- * went from active to non-active (called from vgic_sync_hwirq) it was
- * also ACKed and we we therefore assume we can clear the soft pending
- * state (should it had been set) for this interrupt.
- *
- * Note: if the IRQ soft pending state was set after the IRQ was
- * acked, it actually shouldn't be cleared, but we have no way of
- * knowing that unless we start trapping ACKs when the soft-pending
- * state is set.
- */
- vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
-
- /*
- * Tell the gic to start sampling this interrupt again.
- */
- vgic_irq_clear_queued(vcpu, vlr.irq);
-
- /* Any additional pending interrupt? */
- if (vgic_irq_is_edge(vcpu, vlr.irq)) {
- BUG_ON(!(vlr.state & LR_HW));
- pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
- } else {
- if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
- vgic_cpu_irq_set(vcpu, vlr.irq);
- pending = 1;
- } else {
- vgic_dist_irq_clear_pending(vcpu, vlr.irq);
- vgic_cpu_irq_clear(vcpu, vlr.irq);
- }
- }
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- vlr.state = 0;
- vlr.hwirq = 0;
- vgic_set_lr(vcpu, lr, vlr);
-
- return pending;
-}
-
-static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
-{
- u32 status = vgic_get_interrupt_status(vcpu);
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct kvm *kvm = vcpu->kvm;
- int level_pending = 0;
-
- kvm_debug("STATUS = %08x\n", status);
-
- if (status & INT_STATUS_EOI) {
- /*
- * Some level interrupts have been EOIed. Clear their
- * active bit.
- */
- u64 eisr = vgic_get_eisr(vcpu);
- unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
- int lr;
-
- for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
-
- WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
- WARN_ON(vlr.state & LR_STATE_MASK);
-
-
- /*
- * kvm_notify_acked_irq calls kvm_set_irq()
- * to reset the IRQ level, which grabs the dist->lock
- * so we call this before taking the dist->lock.
- */
- kvm_notify_acked_irq(kvm, 0,
- vlr.irq - VGIC_NR_PRIVATE_IRQS);
-
- spin_lock(&dist->lock);
- level_pending |= process_queued_irq(vcpu, lr, vlr);
- spin_unlock(&dist->lock);
- }
- }
-
- if (status & INT_STATUS_UNDERFLOW)
- vgic_disable_underflow(vcpu);
-
- /*
- * In the next iterations of the vcpu loop, if we sync the vgic state
- * after flushing it, but before entering the guest (this happens for
- * pending signals and vmid rollovers), then make sure we don't pick
- * up any old maintenance interrupts here.
- */
- vgic_clear_eisr(vcpu);
-
- return level_pending;
-}
-
-/*
- * Save the physical active state, and reset it to inactive.
- *
- * Return true if there's a pending forwarded interrupt to queue.
- */
-static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- bool level_pending;
-
- if (!(vlr.state & LR_HW))
- return false;
-
- if (vlr.state & LR_STATE_ACTIVE)
- return false;
-
- spin_lock(&dist->lock);
- level_pending = process_queued_irq(vcpu, lr, vlr);
- spin_unlock(&dist->lock);
- return level_pending;
-}
-
-/* Sync back the VGIC state after a guest run */
-static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- u64 elrsr;
- unsigned long *elrsr_ptr;
- int lr, pending;
- bool level_pending;
-
- level_pending = vgic_process_maintenance(vcpu);
-
- /* Deal with HW interrupts, and clear mappings for empty LRs */
- for (lr = 0; lr < vgic->nr_lr; lr++) {
- struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
-
- level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
- BUG_ON(vlr.irq >= dist->nr_irqs);
- }
-
- /* Check if we still have something up our sleeve... */
- elrsr = vgic_get_elrsr(vcpu);
- elrsr_ptr = u64_to_bitmask(&elrsr);
- pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
- if (level_pending || pending < vgic->nr_lr)
- set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
-}
-
-void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
-
- spin_lock(&dist->lock);
- __kvm_vgic_flush_hwstate(vcpu);
- spin_unlock(&dist->lock);
-}
-
-void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
-{
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
-
- __kvm_vgic_sync_hwstate(vcpu);
-}
-
-int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
- if (!irqchip_in_kernel(vcpu->kvm))
- return 0;
-
- return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
-}
-
-void vgic_kick_vcpus(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- int c;
-
- /*
- * We've injected an interrupt, time to find out who deserves
- * a good kick...
- */
- kvm_for_each_vcpu(c, vcpu, kvm) {
- if (kvm_vgic_vcpu_pending_irq(vcpu))
- kvm_vcpu_kick(vcpu);
- }
-}
-
-static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
-{
- int edge_triggered = vgic_irq_is_edge(vcpu, irq);
-
- /*
- * Only inject an interrupt if:
- * - edge triggered and we have a rising edge
- * - level triggered and we change level
- */
- if (edge_triggered) {
- int state = vgic_dist_irq_is_pending(vcpu, irq);
- return level > state;
- } else {
- int state = vgic_dist_irq_get_level(vcpu, irq);
- return level != state;
- }
-}
-
-static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
- unsigned int irq_num, bool level)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int edge_triggered, level_triggered;
- int enabled;
- bool ret = true, can_inject = true;
-
- trace_vgic_update_irq_pending(cpuid, irq_num, level);
-
- if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
- return -EINVAL;
-
- spin_lock(&dist->lock);
-
- vcpu = kvm_get_vcpu(kvm, cpuid);
- edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
- level_triggered = !edge_triggered;
-
- if (!vgic_validate_injection(vcpu, irq_num, level)) {
- ret = false;
- goto out;
- }
-
- if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
- cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
- if (cpuid == VCPU_NOT_ALLOCATED) {
- /* Pretend we use CPU0, and prevent injection */
- cpuid = 0;
- can_inject = false;
- }
- vcpu = kvm_get_vcpu(kvm, cpuid);
- }
-
- kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
-
- if (level) {
- if (level_triggered)
- vgic_dist_irq_set_level(vcpu, irq_num);
- vgic_dist_irq_set_pending(vcpu, irq_num);
- } else {
- if (level_triggered) {
- vgic_dist_irq_clear_level(vcpu, irq_num);
- if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
- vgic_dist_irq_clear_pending(vcpu, irq_num);
- vgic_cpu_irq_clear(vcpu, irq_num);
- if (!compute_pending_for_cpu(vcpu))
- clear_bit(cpuid, dist->irq_pending_on_cpu);
- }
- }
-
- ret = false;
- goto out;
- }
-
- enabled = vgic_irq_is_enabled(vcpu, irq_num);
-
- if (!enabled || !can_inject) {
- ret = false;
- goto out;
- }
-
- if (!vgic_can_sample_irq(vcpu, irq_num)) {
- /*
- * Level interrupt in progress, will be picked up
- * when EOId.
- */
- ret = false;
- goto out;
- }
-
- if (level) {
- vgic_cpu_irq_set(vcpu, irq_num);
- set_bit(cpuid, dist->irq_pending_on_cpu);
- }
-
-out:
- spin_unlock(&dist->lock);
-
- if (ret) {
- /* kick the specified vcpu */
- kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
- }
-
- return 0;
-}
-
-static int vgic_lazy_init(struct kvm *kvm)
-{
- int ret = 0;
-
- if (unlikely(!vgic_initialized(kvm))) {
- /*
- * We only provide the automatic initialization of the VGIC
- * for the legacy case of a GICv2. Any other type must
- * be explicitly initialized once setup with the respective
- * KVM device call.
- */
- if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
- return -EBUSY;
-
- mutex_lock(&kvm->lock);
- ret = vgic_init(kvm);
- mutex_unlock(&kvm->lock);
- }
-
- return ret;
-}
-
-/**
- * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
- * @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device. This IRQ
- * must not be mapped to a HW interrupt.
- * @level: Edge-triggered: true: to trigger the interrupt
- * false: to ignore the call
- * Level-sensitive true: raise the input signal
- * false: lower the input signal
- *
- * The GIC is not concerned with devices being active-LOW or active-HIGH for
- * level-sensitive interrupts. You can think of the level parameter as 1
- * being HIGH and 0 being LOW and all devices being active-HIGH.
- */
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
- bool level)
-{
- struct irq_phys_map *map;
- int ret;
-
- ret = vgic_lazy_init(kvm);
- if (ret)
- return ret;
-
- map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
- if (map)
- return -EINVAL;
-
- return vgic_update_irq_pending(kvm, cpuid, irq_num, level);
-}
-
-/**
- * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
- * @kvm: The VM structure pointer
- * @cpuid: The CPU for PPIs
- * @virt_irq: The virtual IRQ to be injected
- * @level: Edge-triggered: true: to trigger the interrupt
- * false: to ignore the call
- * Level-sensitive true: raise the input signal
- * false: lower the input signal
- *
- * The GIC is not concerned with devices being active-LOW or active-HIGH for
- * level-sensitive interrupts. You can think of the level parameter as 1
- * being HIGH and 0 being LOW and all devices being active-HIGH.
- */
-int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- unsigned int virt_irq, bool level)
-{
- int ret;
-
- ret = vgic_lazy_init(kvm);
- if (ret)
- return ret;
-
- return vgic_update_irq_pending(kvm, cpuid, virt_irq, level);
-}
-
-static irqreturn_t vgic_maintenance_handler(int irq, void *data)
-{
- /*
- * We cannot rely on the vgic maintenance interrupt to be
- * delivered synchronously. This means we can only use it to
- * exit the VM, and we perform the handling of EOIed
- * interrupts on the exit path (see vgic_process_maintenance).
- */
- return IRQ_HANDLED;
-}
-
-static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
- int virt_irq)
-{
- if (virt_irq < VGIC_NR_PRIVATE_IRQS)
- return &vcpu->arch.vgic_cpu.irq_phys_map_list;
- else
- return &vcpu->kvm->arch.vgic.irq_phys_map_list;
-}
-
-/**
- * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
- * @vcpu: The VCPU pointer
- * @virt_irq: The virtual IRQ number for the guest
- * @phys_irq: The hardware IRQ number of the host
- *
- * Establish a mapping between a guest visible irq (@virt_irq) and a
- * hardware irq (@phys_irq). On injection, @virt_irq will be associated with
- * the physical interrupt represented by @phys_irq. This mapping can be
- * established multiple times as long as the parameters are the same.
- *
- * Returns 0 on success or an error value otherwise.
- */
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
- struct irq_phys_map *map;
- struct irq_phys_map_entry *entry;
- int ret = 0;
-
- /* Create a new mapping */
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- spin_lock(&dist->irq_phys_map_lock);
-
- /* Try to match an existing mapping */
- map = vgic_irq_map_search(vcpu, virt_irq);
- if (map) {
- /* Make sure this mapping matches */
- if (map->phys_irq != phys_irq)
- ret = -EINVAL;
-
- /* Found an existing, valid mapping */
- goto out;
- }
-
- map = &entry->map;
- map->virt_irq = virt_irq;
- map->phys_irq = phys_irq;
-
- list_add_tail_rcu(&entry->entry, root);
-
-out:
- spin_unlock(&dist->irq_phys_map_lock);
- /* If we've found a hit in the existing list, free the useless
- * entry */
- if (ret || map != &entry->map)
- kfree(entry);
- return ret;
-}
-
-static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
- int virt_irq)
-{
- struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
- struct irq_phys_map_entry *entry;
- struct irq_phys_map *map;
-
- rcu_read_lock();
-
- list_for_each_entry_rcu(entry, root, entry) {
- map = &entry->map;
- if (map->virt_irq == virt_irq) {
- rcu_read_unlock();
- return map;
- }
- }
-
- rcu_read_unlock();
-
- return NULL;
-}
-
-static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
-{
- struct irq_phys_map_entry *entry;
-
- entry = container_of(rcu, struct irq_phys_map_entry, rcu);
- kfree(entry);
-}
-
-/**
- * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
- * @vcpu: The VCPU pointer
- * @virt_irq: The virtual IRQ number to be unmapped
- *
- * Remove an existing mapping between virtual and physical interrupts.
- */
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
-{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct irq_phys_map_entry *entry;
- struct list_head *root;
-
- root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
-
- spin_lock(&dist->irq_phys_map_lock);
-
- list_for_each_entry(entry, root, entry) {
- if (entry->map.virt_irq == virt_irq) {
- list_del_rcu(&entry->entry);
- call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
- break;
- }
- }
-
- spin_unlock(&dist->irq_phys_map_lock);
-
- return 0;
-}
-
-static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct irq_phys_map_entry *entry;
-
- spin_lock(&dist->irq_phys_map_lock);
-
- list_for_each_entry(entry, root, entry) {
- list_del_rcu(&entry->entry);
- call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
- }
-
- spin_unlock(&dist->irq_phys_map_lock);
-}
-
-void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
- kfree(vgic_cpu->pending_shared);
- kfree(vgic_cpu->active_shared);
- kfree(vgic_cpu->pend_act_shared);
- vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
- vgic_cpu->pending_shared = NULL;
- vgic_cpu->active_shared = NULL;
- vgic_cpu->pend_act_shared = NULL;
-}
-
-static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
- int sz = nr_longs * sizeof(unsigned long);
- vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
- vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
-
- if (!vgic_cpu->pending_shared
- || !vgic_cpu->active_shared
- || !vgic_cpu->pend_act_shared) {
- kvm_vgic_vcpu_destroy(vcpu);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
- *
- * No memory allocation should be performed here, only static init.
- */
-void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
-{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
-}
-
-/**
- * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
- *
- * The host's GIC naturally limits the maximum amount of VCPUs a guest
- * can use.
- */
-int kvm_vgic_get_max_vcpus(void)
-{
- return vgic->max_gic_vcpus;
-}
-
-void kvm_vgic_destroy(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vgic_vcpu_destroy(vcpu);
-
- vgic_free_bitmap(&dist->irq_enabled);
- vgic_free_bitmap(&dist->irq_level);
- vgic_free_bitmap(&dist->irq_pending);
- vgic_free_bitmap(&dist->irq_soft_pend);
- vgic_free_bitmap(&dist->irq_queued);
- vgic_free_bitmap(&dist->irq_cfg);
- vgic_free_bytemap(&dist->irq_priority);
- if (dist->irq_spi_target) {
- for (i = 0; i < dist->nr_cpus; i++)
- vgic_free_bitmap(&dist->irq_spi_target[i]);
- }
- kfree(dist->irq_sgi_sources);
- kfree(dist->irq_spi_cpu);
- kfree(dist->irq_spi_mpidr);
- kfree(dist->irq_spi_target);
- kfree(dist->irq_pending_on_cpu);
- kfree(dist->irq_active_on_cpu);
- vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
- dist->irq_sgi_sources = NULL;
- dist->irq_spi_cpu = NULL;
- dist->irq_spi_target = NULL;
- dist->irq_pending_on_cpu = NULL;
- dist->irq_active_on_cpu = NULL;
- dist->nr_cpus = 0;
-}
-
-/*
- * Allocate and initialize the various data structures. Must be called
- * with kvm->lock held!
- */
-int vgic_init(struct kvm *kvm)
-{
- struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int nr_cpus, nr_irqs;
- int ret, i, vcpu_id;
-
- if (vgic_initialized(kvm))
- return 0;
-
- nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
- if (!nr_cpus) /* No vcpus? Can't be good... */
- return -ENODEV;
-
- /*
- * If nobody configured the number of interrupts, use the
- * legacy one.
- */
- if (!dist->nr_irqs)
- dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
-
- nr_irqs = dist->nr_irqs;
-
- ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
- ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
- ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
-
- if (ret)
- goto out;
-
- dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
- dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
- dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
- GFP_KERNEL);
- dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
- GFP_KERNEL);
- dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
- GFP_KERNEL);
- if (!dist->irq_sgi_sources ||
- !dist->irq_spi_cpu ||
- !dist->irq_spi_target ||
- !dist->irq_pending_on_cpu ||
- !dist->irq_active_on_cpu) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < nr_cpus; i++)
- ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
- nr_cpus, nr_irqs);
-
- if (ret)
- goto out;
-
- ret = kvm->arch.vgic.vm_ops.init_model(kvm);
- if (ret)
- goto out;
-
- kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
- ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
- if (ret) {
- kvm_err("VGIC: Failed to allocate vcpu memory\n");
- break;
- }
-
- /*
- * Enable and configure all SGIs to be edge-triggere and
- * configure all PPIs as level-triggered.
- */
- for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- if (i < VGIC_NR_SGIS) {
- /* SGIs */
- vgic_bitmap_set_irq_val(&dist->irq_enabled,
- vcpu->vcpu_id, i, 1);
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i,
- VGIC_CFG_EDGE);
- } else if (i < VGIC_NR_PRIVATE_IRQS) {
- /* PPIs */
- vgic_bitmap_set_irq_val(&dist->irq_cfg,
- vcpu->vcpu_id, i,
- VGIC_CFG_LEVEL);
- }
- }
-
- vgic_enable(vcpu);
- }
-
-out:
- if (ret)
- kvm_vgic_destroy(kvm);
-
- return ret;
-}
-
-static int init_vgic_model(struct kvm *kvm, int type)
-{
- switch (type) {
- case KVM_DEV_TYPE_ARM_VGIC_V2:
- vgic_v2_init_emulation(kvm);
- break;
-#ifdef CONFIG_KVM_ARM_VGIC_V3
- case KVM_DEV_TYPE_ARM_VGIC_V3:
- vgic_v3_init_emulation(kvm);
- break;
-#endif
- default:
- return -ENODEV;
- }
-
- if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
- return -E2BIG;
-
- return 0;
-}
-
-/**
- * kvm_vgic_early_init - Earliest possible vgic initialization stage
- *
- * No memory allocation should be performed here, only static init.
- */
-void kvm_vgic_early_init(struct kvm *kvm)
-{
- spin_lock_init(&kvm->arch.vgic.lock);
- spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
- INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
-}
-
-int kvm_vgic_create(struct kvm *kvm, u32 type)
-{
- int i, vcpu_lock_idx = -1, ret;
- struct kvm_vcpu *vcpu;
-
- mutex_lock(&kvm->lock);
-
- if (irqchip_in_kernel(kvm)) {
- ret = -EEXIST;
- goto out;
- }
-
- /*
- * This function is also called by the KVM_CREATE_IRQCHIP handler,
- * which had no chance yet to check the availability of the GICv2
- * emulation. So check this here again. KVM_CREATE_DEVICE does
- * the proper checks already.
- */
- if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * Any time a vcpu is run, vcpu_load is called which tries to grab the
- * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
- * that no other VCPUs are run while we create the vgic.
- */
- ret = -EBUSY;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!mutex_trylock(&vcpu->mutex))
- goto out_unlock;
- vcpu_lock_idx = i;
- }
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu->arch.has_run_once)
- goto out_unlock;
- }
- ret = 0;
-
- ret = init_vgic_model(kvm, type);
- if (ret)
- goto out_unlock;
-
- kvm->arch.vgic.in_kernel = true;
- kvm->arch.vgic.vgic_model = type;
- kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
- kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
- kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
- kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
-
-out_unlock:
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
- mutex_unlock(&vcpu->mutex);
- }
-
-out:
- mutex_unlock(&kvm->lock);
- return ret;
-}
-
-static int vgic_ioaddr_overlap(struct kvm *kvm)
-{
- phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
- phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
-
- if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
- return 0;
- if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
- (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
- return -EBUSY;
- return 0;
-}
-
-static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
- phys_addr_t addr, phys_addr_t size)
-{
- int ret;
-
- if (addr & ~KVM_PHYS_MASK)
- return -E2BIG;
-
- if (addr & (SZ_4K - 1))
- return -EINVAL;
-
- if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
- return -EEXIST;
- if (addr + size < addr)
- return -EINVAL;
-
- *ioaddr = addr;
- ret = vgic_ioaddr_overlap(kvm);
- if (ret)
- *ioaddr = VGIC_ADDR_UNDEF;
-
- return ret;
-}
-
-/**
- * kvm_vgic_addr - set or get vgic VM base addresses
- * @kvm: pointer to the vm struct
- * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
- * @addr: pointer to address value
- * @write: if true set the address in the VM address space, if false read the
- * address
- *
- * Set or get the vgic base addresses for the distributor and the virtual CPU
- * interface in the VM physical address space. These addresses are properties
- * of the emulated core/SoC and therefore user space initially knows this
- * information.
- */
-int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
-{
- int r = 0;
- struct vgic_dist *vgic = &kvm->arch.vgic;
- int type_needed;
- phys_addr_t *addr_ptr, block_size;
- phys_addr_t alignment;
-
- mutex_lock(&kvm->lock);
- switch (type) {
- case KVM_VGIC_V2_ADDR_TYPE_DIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
- addr_ptr = &vgic->vgic_dist_base;
- block_size = KVM_VGIC_V2_DIST_SIZE;
- alignment = SZ_4K;
- break;
- case KVM_VGIC_V2_ADDR_TYPE_CPU:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
- addr_ptr = &vgic->vgic_cpu_base;
- block_size = KVM_VGIC_V2_CPU_SIZE;
- alignment = SZ_4K;
- break;
-#ifdef CONFIG_KVM_ARM_VGIC_V3
- case KVM_VGIC_V3_ADDR_TYPE_DIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
- addr_ptr = &vgic->vgic_dist_base;
- block_size = KVM_VGIC_V3_DIST_SIZE;
- alignment = SZ_64K;
- break;
- case KVM_VGIC_V3_ADDR_TYPE_REDIST:
- type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
- addr_ptr = &vgic->vgic_redist_base;
- block_size = KVM_VGIC_V3_REDIST_SIZE;
- alignment = SZ_64K;
- break;
-#endif
- default:
- r = -ENODEV;
- goto out;
- }
-
- if (vgic->vgic_model != type_needed) {
- r = -ENODEV;
- goto out;
- }
-
- if (write) {
- if (!IS_ALIGNED(*addr, alignment))
- r = -EINVAL;
- else
- r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
- block_size);
- } else {
- *addr = *addr_ptr;
- }
-
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- int r;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR: {
- u64 __user *uaddr = (u64 __user *)(long)attr->addr;
- u64 addr;
- unsigned long type = (unsigned long)attr->attr;
-
- if (copy_from_user(&addr, uaddr, sizeof(addr)))
- return -EFAULT;
-
- r = kvm_vgic_addr(dev->kvm, type, &addr, true);
- return (r == -ENODEV) ? -ENXIO : r;
- }
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
- u32 val;
- int ret = 0;
-
- if (get_user(val, uaddr))
- return -EFAULT;
-
- /*
- * We require:
- * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
- * - at most 1024 interrupts
- * - a multiple of 32 interrupts
- */
- if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
- val > VGIC_MAX_IRQS ||
- (val & 31))
- return -EINVAL;
-
- mutex_lock(&dev->kvm->lock);
-
- if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
- ret = -EBUSY;
- else
- dev->kvm->arch.vgic.nr_irqs = val;
-
- mutex_unlock(&dev->kvm->lock);
-
- return ret;
- }
- case KVM_DEV_ARM_VGIC_GRP_CTRL: {
- switch (attr->attr) {
- case KVM_DEV_ARM_VGIC_CTRL_INIT:
- r = vgic_init(dev->kvm);
- return r;
- }
- break;
- }
- }
-
- return -ENXIO;
-}
-
-int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
-{
- int r = -ENXIO;
-
- switch (attr->group) {
- case KVM_DEV_ARM_VGIC_GRP_ADDR: {
- u64 __user *uaddr = (u64 __user *)(long)attr->addr;
- u64 addr;
- unsigned long type = (unsigned long)attr->attr;
-
- r = kvm_vgic_addr(dev->kvm, type, &addr, false);
- if (r)
- return (r == -ENODEV) ? -ENXIO : r;
-
- if (copy_to_user(uaddr, &addr, sizeof(addr)))
- return -EFAULT;
- break;
- }
- case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
- u32 __user *uaddr = (u32 __user *)(long)attr->addr;
-
- r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
- break;
- }
-
- }
-
- return r;
-}
-
-int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
-{
- if (vgic_find_range(ranges, 4, offset))
- return 0;
- else
- return -ENXIO;
-}
-
-static int vgic_starting_cpu(unsigned int cpu)
-{
- enable_percpu_irq(vgic->maint_irq, 0);
- return 0;
-}
-
-static int vgic_dying_cpu(unsigned int cpu)
-{
- disable_percpu_irq(vgic->maint_irq);
- return 0;
-}
-
-static int kvm_vgic_probe(void)
-{
- const struct gic_kvm_info *gic_kvm_info;
- int ret;
-
- gic_kvm_info = gic_get_kvm_info();
- if (!gic_kvm_info)
- return -ENODEV;
-
- switch (gic_kvm_info->type) {
- case GIC_V2:
- ret = vgic_v2_probe(gic_kvm_info, &vgic_ops, &vgic);
- break;
- case GIC_V3:
- ret = vgic_v3_probe(gic_kvm_info, &vgic_ops, &vgic);
- break;
- default:
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-int kvm_vgic_hyp_init(void)
-{
- int ret;
-
- ret = kvm_vgic_probe();
- if (ret) {
- kvm_err("error: KVM vGIC probing failed\n");
- return ret;
- }
-
- ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
- "vgic", kvm_get_running_vcpus());
- if (ret) {
- kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
- return ret;
- }
-
- cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING,
- "AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu,
- vgic_dying_cpu);
- return 0;
-}
-
-int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries,
- int gsi)
-{
- return 0;
-}
-
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
-{
- return pin;
-}
-
-int kvm_set_irq(struct kvm *kvm, int irq_source_id,
- u32 irq, int level, bool line_status)
-{
- unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
-
- trace_kvm_set_irq(irq, level, irq_source_id);
-
- BUG_ON(!vgic_initialized(kvm));
-
- return kvm_vgic_inject_irq(kvm, 0, spi, level);
-}
-
-/* MSI not implemented yet */
-int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
-{
- return 0;
-}
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
deleted file mode 100644
index 0df74cbb6200..000000000000
--- a/virt/kvm/arm/vgic.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2012-2014 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * Derived from virt/kvm/arm/vgic.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __KVM_VGIC_H__
-#define __KVM_VGIC_H__
-
-#include <kvm/iodev.h>
-
-#define VGIC_ADDR_UNDEF (-1)
-#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
-
-#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
-#define IMPLEMENTER_ARM 0x43b
-
-#define ACCESS_READ_VALUE (1 << 0)
-#define ACCESS_READ_RAZ (0 << 0)
-#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
-#define ACCESS_WRITE_IGNORED (0 << 1)
-#define ACCESS_WRITE_SETBIT (1 << 1)
-#define ACCESS_WRITE_CLEARBIT (2 << 1)
-#define ACCESS_WRITE_VALUE (3 << 1)
-#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
-
-#define VCPU_NOT_ALLOCATED ((u8)-1)
-
-unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x);
-
-void vgic_update_state(struct kvm *kvm);
-int vgic_init_common_maps(struct kvm *kvm);
-
-u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset);
-u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset);
-
-void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq);
-void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq);
-void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
-void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
- int irq, int val);
-
-void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
-
-bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
-void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
-
-struct kvm_exit_mmio {
- phys_addr_t phys_addr;
- void *data;
- u32 len;
- bool is_write;
- void *private;
-};
-
-void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
- phys_addr_t offset, int mode);
-bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-
-static inline
-u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
-{
- return le32_to_cpu(*((u32 *)mmio->data)) & mask;
-}
-
-static inline
-void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
-{
- *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
-}
-
-struct vgic_io_range {
- phys_addr_t base;
- unsigned long len;
- int bits_per_irq;
- bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-};
-
-int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
- const struct vgic_io_range *ranges,
- int redist_id,
- struct vgic_io_device *iodev);
-
-static inline bool is_in_range(phys_addr_t addr, unsigned long len,
- phys_addr_t baseaddr, unsigned long size)
-{
- return (addr >= baseaddr) && (addr + len <= baseaddr + size);
-}
-
-const
-struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
- int len, gpa_t offset);
-
-bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id, int access);
-
-bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id);
-
-bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id);
-
-bool vgic_handle_set_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id);
-
-bool vgic_handle_clear_active_reg(struct kvm *kvm,
- struct kvm_exit_mmio *mmio,
- phys_addr_t offset, int vcpu_id);
-
-bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
- phys_addr_t offset);
-
-void vgic_kick_vcpus(struct kvm *kvm);
-
-int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset);
-int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
-int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
-
-int vgic_init(struct kvm *kvm);
-void vgic_v2_init_emulation(struct kvm *kvm);
-void vgic_v3_init_emulation(struct kvm *kvm);
-
-#endif
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 2c7f0d5a62ea..fb4b0a79a950 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -157,6 +157,9 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
int i;
+ INIT_LIST_HEAD(&dist->lpi_list_head);
+ spin_lock_init(&dist->lpi_list_lock);
+
dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
if (!dist->spis)
return -ENOMEM;
@@ -177,6 +180,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL;
irq->target_vcpu = vcpu0;
+ kref_init(&irq->refcount);
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
irq->targets = 0;
else
@@ -211,6 +215,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
irq->vcpu = NULL;
irq->target_vcpu = vcpu;
irq->targets = 1U << vcpu->vcpu_id;
+ kref_init(&irq->refcount);
if (vgic_irq_is_sgi(i)) {
/* SGIs */
irq->enabled = 1;
@@ -253,9 +258,16 @@ int vgic_init(struct kvm *kvm)
if (ret)
goto out;
+ if (vgic_has_its(kvm))
+ dist->msis_require_devid = true;
+
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_init(vcpu);
+ ret = kvm_vgic_setup_default_irq_routing(kvm);
+ if (ret)
+ goto out;
+
dist->initialized = true;
out:
return ret;
@@ -271,7 +283,6 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
dist->initialized = false;
kfree(dist->spis);
- kfree(dist->redist_iodevs);
dist->nr_spis = 0;
mutex_unlock(&kvm->lock);
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index c675513270bb..b31a51a14efb 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -17,36 +17,116 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <trace/events/kvm.h>
+#include <kvm/arm_vgic.h>
+#include "vgic.h"
-int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries,
- int gsi)
+/**
+ * vgic_irqfd_set_irq: inject the IRQ corresponding to the
+ * irqchip routing entry
+ *
+ * This is the entry point for irqfd IRQ injection
+ */
+static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
{
- return 0;
+ unsigned int spi_id = e->irqchip.pin + VGIC_NR_PRIVATE_IRQS;
+
+ if (!vgic_valid_spi(kvm, spi_id))
+ return -EINVAL;
+ return kvm_vgic_inject_irq(kvm, 0, spi_id, level);
}
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip,
- unsigned int pin)
+/**
+ * kvm_set_routing_entry: populate a kvm routing entry
+ * from a user routing entry
+ *
+ * @kvm: the VM this entry is applied to
+ * @e: kvm kernel routing entry handle
+ * @ue: user api routing entry handle
+ * return 0 on success, -EINVAL on errors.
+ */
+#ifdef KVM_CAP_X2APIC_API
+int kvm_set_routing_entry(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+#else
+/* Remove this version and the ifdefery once merged into 4.8 */
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+#endif
{
- return pin;
+ int r = -EINVAL;
+
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_IRQCHIP:
+ e->set = vgic_irqfd_set_irq;
+ e->irqchip.irqchip = ue->u.irqchip.irqchip;
+ e->irqchip.pin = ue->u.irqchip.pin;
+ if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
+ (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
+ goto out;
+ break;
+ case KVM_IRQ_ROUTING_MSI:
+ e->set = kvm_set_msi;
+ e->msi.address_lo = ue->u.msi.address_lo;
+ e->msi.address_hi = ue->u.msi.address_hi;
+ e->msi.data = ue->u.msi.data;
+ e->msi.flags = ue->flags;
+ e->msi.devid = ue->u.msi.devid;
+ break;
+ default:
+ goto out;
+ }
+ r = 0;
+out:
+ return r;
}
-int kvm_set_irq(struct kvm *kvm, int irq_source_id,
- u32 irq, int level, bool line_status)
+/**
+ * kvm_set_msi: inject the MSI corresponding to the
+ * MSI routing entry
+ *
+ * This is the entry point for irqfd MSI injection
+ * and userspace MSI injection.
+ */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
{
- unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
+ struct kvm_msi msi;
- trace_kvm_set_irq(irq, level, irq_source_id);
+ msi.address_lo = e->msi.address_lo;
+ msi.address_hi = e->msi.address_hi;
+ msi.data = e->msi.data;
+ msi.flags = e->msi.flags;
+ msi.devid = e->msi.devid;
- BUG_ON(!vgic_initialized(kvm));
+ if (!vgic_has_its(kvm))
+ return -ENODEV;
- return kvm_vgic_inject_irq(kvm, 0, spi, level);
+ return vgic_its_inject_msi(kvm, &msi);
}
-/* MSI not implemented yet */
-int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
+int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
{
- return 0;
+ struct kvm_irq_routing_entry *entries;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u32 nr = dist->nr_spis;
+ int i, ret;
+
+ entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < nr; i++) {
+ entries[i].gsi = i;
+ entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries[i].u.irqchip.irqchip = 0;
+ entries[i].u.irqchip.pin = i;
+ }
+ ret = kvm_set_irq_routing(kvm, entries, nr, 0);
+ kfree(entries);
+ return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
new file mode 100644
index 000000000000..07411cf967b9
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -0,0 +1,1500 @@
+/*
+ * GICv3 ITS emulation
+ *
+ * Copyright (C) 2015,2016 ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+/*
+ * Creates a new (reference to a) struct vgic_irq for a given LPI.
+ * If this LPI is already mapped on another ITS, we increase its refcount
+ * and return a pointer to the existing structure.
+ * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
+ * This function returns a pointer to the _unlocked_ structure.
+ */
+static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
+
+ /* In this case there is no put, since we keep the reference. */
+ if (irq)
+ return irq;
+
+ irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
+ if (!irq)
+ return NULL;
+
+ INIT_LIST_HEAD(&irq->lpi_list);
+ INIT_LIST_HEAD(&irq->ap_list);
+ spin_lock_init(&irq->irq_lock);
+
+ irq->config = VGIC_CONFIG_EDGE;
+ kref_init(&irq->refcount);
+ irq->intid = intid;
+
+ spin_lock(&dist->lpi_list_lock);
+
+ /*
+ * There could be a race with another vgic_add_lpi(), so we need to
+ * check that we don't add a second list entry with the same LPI.
+ */
+ list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
+ if (oldirq->intid != intid)
+ continue;
+
+ /* Someone was faster with adding this LPI, lets use that. */
+ kfree(irq);
+ irq = oldirq;
+
+ /*
+ * This increases the refcount, the caller is expected to
+ * call vgic_put_irq() on the returned pointer once it's
+ * finished with the IRQ.
+ */
+ vgic_get_irq_kref(irq);
+
+ goto out_unlock;
+ }
+
+ list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
+ dist->lpi_list_count++;
+
+out_unlock:
+ spin_unlock(&dist->lpi_list_lock);
+
+ return irq;
+}
+
+struct its_device {
+ struct list_head dev_list;
+
+ /* the head for the list of ITTEs */
+ struct list_head itt_head;
+ u32 device_id;
+};
+
+#define COLLECTION_NOT_MAPPED ((u32)~0)
+
+struct its_collection {
+ struct list_head coll_list;
+
+ u32 collection_id;
+ u32 target_addr;
+};
+
+#define its_is_collection_mapped(coll) ((coll) && \
+ ((coll)->target_addr != COLLECTION_NOT_MAPPED))
+
+struct its_itte {
+ struct list_head itte_list;
+
+ struct vgic_irq *irq;
+ struct its_collection *collection;
+ u32 lpi;
+ u32 event_id;
+};
+
+/*
+ * Find and returns a device in the device table for an ITS.
+ * Must be called with the its_lock mutex held.
+ */
+static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
+{
+ struct its_device *device;
+
+ list_for_each_entry(device, &its->device_list, dev_list)
+ if (device_id == device->device_id)
+ return device;
+
+ return NULL;
+}
+
+/*
+ * Find and returns an interrupt translation table entry (ITTE) for a given
+ * Device ID/Event ID pair on an ITS.
+ * Must be called with the its_lock mutex held.
+ */
+static struct its_itte *find_itte(struct vgic_its *its, u32 device_id,
+ u32 event_id)
+{
+ struct its_device *device;
+ struct its_itte *itte;
+
+ device = find_its_device(its, device_id);
+ if (device == NULL)
+ return NULL;
+
+ list_for_each_entry(itte, &device->itt_head, itte_list)
+ if (itte->event_id == event_id)
+ return itte;
+
+ return NULL;
+}
+
+/* To be used as an iterator this macro misses the enclosing parentheses */
+#define for_each_lpi_its(dev, itte, its) \
+ list_for_each_entry(dev, &(its)->device_list, dev_list) \
+ list_for_each_entry(itte, &(dev)->itt_head, itte_list)
+
+/*
+ * We only implement 48 bits of PA at the moment, although the ITS
+ * supports more. Let's be restrictive here.
+ */
+#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
+#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
+#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
+#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
+
+#define GIC_LPI_OFFSET 8192
+
+/*
+ * Finds and returns a collection in the ITS collection table.
+ * Must be called with the its_lock mutex held.
+ */
+static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
+{
+ struct its_collection *collection;
+
+ list_for_each_entry(collection, &its->collection_list, coll_list) {
+ if (coll_id == collection->collection_id)
+ return collection;
+ }
+
+ return NULL;
+}
+
+#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
+#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
+
+/*
+ * Reads the configuration data for a given LPI from guest memory and
+ * updates the fields in struct vgic_irq.
+ * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
+ * VCPU. Unconditionally applies if filter_vcpu is NULL.
+ */
+static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
+ struct kvm_vcpu *filter_vcpu)
+{
+ u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
+ u8 prop;
+ int ret;
+
+ ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+ &prop, 1);
+
+ if (ret)
+ return ret;
+
+ spin_lock(&irq->irq_lock);
+
+ if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
+ irq->priority = LPI_PROP_PRIORITY(prop);
+ irq->enabled = LPI_PROP_ENABLE_BIT(prop);
+
+ vgic_queue_irq_unlock(kvm, irq);
+ } else {
+ spin_unlock(&irq->irq_lock);
+ }
+
+ return 0;
+}
+
+/*
+ * Create a snapshot of the current LPI list, so that we can enumerate all
+ * LPIs without holding any lock.
+ * Returns the array length and puts the kmalloc'ed array into intid_ptr.
+ */
+static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq;
+ u32 *intids;
+ int irq_count = dist->lpi_list_count, i = 0;
+
+ /*
+ * We use the current value of the list length, which may change
+ * after the kmalloc. We don't care, because the guest shouldn't
+ * change anything while the command handling is still running,
+ * and in the worst case we would miss a new IRQ, which one wouldn't
+ * expect to be covered by this command anyway.
+ */
+ intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
+ if (!intids)
+ return -ENOMEM;
+
+ spin_lock(&dist->lpi_list_lock);
+ list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ /* We don't need to "get" the IRQ, as we hold the list lock. */
+ intids[i] = irq->intid;
+ if (++i == irq_count)
+ break;
+ }
+ spin_unlock(&dist->lpi_list_lock);
+
+ *intid_ptr = intids;
+ return irq_count;
+}
+
+/*
+ * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
+ * is targeting) to the VGIC's view, which deals with target VCPUs.
+ * Needs to be called whenever either the collection for a LPIs has
+ * changed or the collection itself got retargeted.
+ */
+static void update_affinity_itte(struct kvm *kvm, struct its_itte *itte)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!its_is_collection_mapped(itte->collection))
+ return;
+
+ vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
+
+ spin_lock(&itte->irq->irq_lock);
+ itte->irq->target_vcpu = vcpu;
+ spin_unlock(&itte->irq->irq_lock);
+}
+
+/*
+ * Updates the target VCPU for every LPI targeting this collection.
+ * Must be called with the its_lock mutex held.
+ */
+static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
+ struct its_collection *coll)
+{
+ struct its_device *device;
+ struct its_itte *itte;
+
+ for_each_lpi_its(device, itte, its) {
+ if (!itte->collection || coll != itte->collection)
+ continue;
+
+ update_affinity_itte(kvm, itte);
+ }
+}
+
+static u32 max_lpis_propbaser(u64 propbaser)
+{
+ int nr_idbits = (propbaser & 0x1f) + 1;
+
+ return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
+}
+
+/*
+ * Scan the whole LPI pending table and sync the pending bit in there
+ * with our own data structures. This relies on the LPI being
+ * mapped before.
+ */
+static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
+{
+ gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
+ struct vgic_irq *irq;
+ int last_byte_offset = -1;
+ int ret = 0;
+ u32 *intids;
+ int nr_irqs, i;
+
+ nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
+ if (nr_irqs < 0)
+ return nr_irqs;
+
+ for (i = 0; i < nr_irqs; i++) {
+ int byte_offset, bit_nr;
+ u8 pendmask;
+
+ byte_offset = intids[i] / BITS_PER_BYTE;
+ bit_nr = intids[i] % BITS_PER_BYTE;
+
+ /*
+ * For contiguously allocated LPIs chances are we just read
+ * this very same byte in the last iteration. Reuse that.
+ */
+ if (byte_offset != last_byte_offset) {
+ ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
+ &pendmask, 1);
+ if (ret) {
+ kfree(intids);
+ return ret;
+ }
+ last_byte_offset = byte_offset;
+ }
+
+ irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
+ spin_lock(&irq->irq_lock);
+ irq->pending = pendmask & (1U << bit_nr);
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+
+ kfree(intids);
+
+ return ret;
+}
+
+static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ u32 reg = 0;
+
+ mutex_lock(&its->cmd_lock);
+ if (its->creadr == its->cwriter)
+ reg |= GITS_CTLR_QUIESCENT;
+ if (its->enabled)
+ reg |= GITS_CTLR_ENABLE;
+ mutex_unlock(&its->cmd_lock);
+
+ return reg;
+}
+
+static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ its->enabled = !!(val & GITS_CTLR_ENABLE);
+}
+
+static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ u64 reg = GITS_TYPER_PLPIS;
+
+ /*
+ * We use linear CPU numbers for redistributor addressing,
+ * so GITS_TYPER.PTA is 0.
+ * Also we force all PROPBASER registers to be the same, so
+ * CommonLPIAff is 0 as well.
+ * To avoid memory waste in the guest, we keep the number of IDBits and
+ * DevBits low - as least for the time being.
+ */
+ reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
+ reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
+
+ return extract_bytes(reg, addr & 7, len);
+}
+
+static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+}
+
+static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ switch (addr & 0xffff) {
+ case GITS_PIDR0:
+ return 0x92; /* part number, bits[7:0] */
+ case GITS_PIDR1:
+ return 0xb4; /* part number, bits[11:8] */
+ case GITS_PIDR2:
+ return GIC_PIDR2_ARCH_GICv3 | 0x0b;
+ case GITS_PIDR4:
+ return 0x40; /* This is a 64K software visible page */
+ /* The following are the ID registers for (any) GIC. */
+ case GITS_CIDR0:
+ return 0x0d;
+ case GITS_CIDR1:
+ return 0xf0;
+ case GITS_CIDR2:
+ return 0x05;
+ case GITS_CIDR3:
+ return 0xb1;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the target VCPU and the LPI number for a given devid/eventid pair
+ * and make this IRQ pending, possibly injecting it.
+ * Must be called with the its_lock mutex held.
+ */
+static void vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid)
+{
+ struct its_itte *itte;
+
+ if (!its->enabled)
+ return;
+
+ itte = find_itte(its, devid, eventid);
+ /* Triggering an unmapped IRQ gets silently dropped. */
+ if (itte && its_is_collection_mapped(itte->collection)) {
+ struct kvm_vcpu *vcpu;
+
+ vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
+ if (vcpu && vcpu->arch.vgic_cpu.lpis_enabled) {
+ spin_lock(&itte->irq->irq_lock);
+ itte->irq->pending = true;
+ vgic_queue_irq_unlock(kvm, itte->irq);
+ }
+ }
+}
+
+/*
+ * Queries the KVM IO bus framework to get the ITS pointer from the given
+ * doorbell address.
+ * We then call vgic_its_trigger_msi() with the decoded data.
+ */
+int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+ u64 address;
+ struct kvm_io_device *kvm_io_dev;
+ struct vgic_io_device *iodev;
+
+ if (!vgic_has_its(kvm))
+ return -ENODEV;
+
+ if (!(msi->flags & KVM_MSI_VALID_DEVID))
+ return -EINVAL;
+
+ address = (u64)msi->address_hi << 32 | msi->address_lo;
+
+ kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
+ if (!kvm_io_dev)
+ return -ENODEV;
+
+ iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
+
+ mutex_lock(&iodev->its->its_lock);
+ vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
+ mutex_unlock(&iodev->its->its_lock);
+
+ return 0;
+}
+
+/* Requires the its_lock to be held. */
+static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
+{
+ list_del(&itte->itte_list);
+
+ /* This put matches the get in vgic_add_lpi. */
+ vgic_put_irq(kvm, itte->irq);
+
+ kfree(itte);
+}
+
+static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
+{
+ return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
+}
+
+#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
+#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
+#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
+#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
+#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
+#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
+#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
+
+/*
+ * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ struct its_itte *itte;
+
+
+ itte = find_itte(its, device_id, event_id);
+ if (itte && itte->collection) {
+ /*
+ * Though the spec talks about removing the pending state, we
+ * don't bother here since we clear the ITTE anyway and the
+ * pending state is a property of the ITTE struct.
+ */
+ its_free_itte(kvm, itte);
+ return 0;
+ }
+
+ return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
+}
+
+/*
+ * The MOVI command moves an ITTE to a different collection.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct kvm_vcpu *vcpu;
+ struct its_itte *itte;
+ struct its_collection *collection;
+
+ itte = find_itte(its, device_id, event_id);
+ if (!itte)
+ return E_ITS_MOVI_UNMAPPED_INTERRUPT;
+
+ if (!its_is_collection_mapped(itte->collection))
+ return E_ITS_MOVI_UNMAPPED_COLLECTION;
+
+ collection = find_collection(its, coll_id);
+ if (!its_is_collection_mapped(collection))
+ return E_ITS_MOVI_UNMAPPED_COLLECTION;
+
+ itte->collection = collection;
+ vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+
+ spin_lock(&itte->irq->irq_lock);
+ itte->irq->target_vcpu = vcpu;
+ spin_unlock(&itte->irq->irq_lock);
+
+ return 0;
+}
+
+/*
+ * Check whether an ID can be stored into the corresponding guest table.
+ * For a direct table this is pretty easy, but gets a bit nasty for
+ * indirect tables. We check whether the resulting guest physical address
+ * is actually valid (covered by a memslot and guest accessbible).
+ * For this we have to read the respective first level entry.
+ */
+static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
+{
+ int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
+ int index;
+ u64 indirect_ptr;
+ gfn_t gfn;
+
+ if (!(baser & GITS_BASER_INDIRECT)) {
+ phys_addr_t addr;
+
+ if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
+ return false;
+
+ addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
+ gfn = addr >> PAGE_SHIFT;
+
+ return kvm_is_visible_gfn(its->dev->kvm, gfn);
+ }
+
+ /* calculate and check the index into the 1st level */
+ index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+ if (index >= (l1_tbl_size / sizeof(u64)))
+ return false;
+
+ /* Each 1st level entry is represented by a 64-bit value. */
+ if (kvm_read_guest(its->dev->kvm,
+ BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
+ &indirect_ptr, sizeof(indirect_ptr)))
+ return false;
+
+ indirect_ptr = le64_to_cpu(indirect_ptr);
+
+ /* check the valid bit of the first level entry */
+ if (!(indirect_ptr & BIT_ULL(63)))
+ return false;
+
+ /*
+ * Mask the guest physical address and calculate the frame number.
+ * Any address beyond our supported 48 bits of PA will be caught
+ * by the actual check in the final step.
+ */
+ indirect_ptr &= GENMASK_ULL(51, 16);
+
+ /* Find the address of the actual entry */
+ index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+ indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
+ gfn = indirect_ptr >> PAGE_SHIFT;
+
+ return kvm_is_visible_gfn(its->dev->kvm, gfn);
+}
+
+static int vgic_its_alloc_collection(struct vgic_its *its,
+ struct its_collection **colp,
+ u32 coll_id)
+{
+ struct its_collection *collection;
+
+ if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
+ return E_ITS_MAPC_COLLECTION_OOR;
+
+ collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+
+ collection->collection_id = coll_id;
+ collection->target_addr = COLLECTION_NOT_MAPPED;
+
+ list_add_tail(&collection->coll_list, &its->collection_list);
+ *colp = collection;
+
+ return 0;
+}
+
+static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
+{
+ struct its_collection *collection;
+ struct its_device *device;
+ struct its_itte *itte;
+
+ /*
+ * Clearing the mapping for that collection ID removes the
+ * entry from the list. If there wasn't any before, we can
+ * go home early.
+ */
+ collection = find_collection(its, coll_id);
+ if (!collection)
+ return;
+
+ for_each_lpi_its(device, itte, its)
+ if (itte->collection &&
+ itte->collection->collection_id == coll_id)
+ itte->collection = NULL;
+
+ list_del(&collection->coll_list);
+ kfree(collection);
+}
+
+/*
+ * The MAPTI and MAPI commands map LPIs to ITTEs.
+ * Must be called with its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct its_itte *itte;
+ struct its_device *device;
+ struct its_collection *collection, *new_coll = NULL;
+ int lpi_nr;
+
+ device = find_its_device(its, device_id);
+ if (!device)
+ return E_ITS_MAPTI_UNMAPPED_DEVICE;
+
+ if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
+ lpi_nr = its_cmd_get_physical_id(its_cmd);
+ else
+ lpi_nr = event_id;
+ if (lpi_nr < GIC_LPI_OFFSET ||
+ lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
+ return E_ITS_MAPTI_PHYSICALID_OOR;
+
+ collection = find_collection(its, coll_id);
+ if (!collection) {
+ int ret = vgic_its_alloc_collection(its, &collection, coll_id);
+ if (ret)
+ return ret;
+ new_coll = collection;
+ }
+
+ itte = find_itte(its, device_id, event_id);
+ if (!itte) {
+ itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
+ if (!itte) {
+ if (new_coll)
+ vgic_its_free_collection(its, coll_id);
+ return -ENOMEM;
+ }
+
+ itte->event_id = event_id;
+ list_add_tail(&itte->itte_list, &device->itt_head);
+ }
+
+ itte->collection = collection;
+ itte->lpi = lpi_nr;
+ itte->irq = vgic_add_lpi(kvm, lpi_nr);
+ update_affinity_itte(kvm, itte);
+
+ /*
+ * We "cache" the configuration table entries in out struct vgic_irq's.
+ * However we only have those structs for mapped IRQs, so we read in
+ * the respective config data from memory here upon mapping the LPI.
+ */
+ update_lpi_config(kvm, itte->irq, NULL);
+
+ return 0;
+}
+
+/* Requires the its_lock to be held. */
+static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
+{
+ struct its_itte *itte, *temp;
+
+ /*
+ * The spec says that unmapping a device with still valid
+ * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
+ * since we cannot leave the memory unreferenced.
+ */
+ list_for_each_entry_safe(itte, temp, &device->itt_head, itte_list)
+ its_free_itte(kvm, itte);
+
+ list_del(&device->dev_list);
+ kfree(device);
+}
+
+/*
+ * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ bool valid = its_cmd_get_validbit(its_cmd);
+ struct its_device *device;
+
+ if (!vgic_its_check_id(its, its->baser_device_table, device_id))
+ return E_ITS_MAPD_DEVICE_OOR;
+
+ device = find_its_device(its, device_id);
+
+ /*
+ * The spec says that calling MAPD on an already mapped device
+ * invalidates all cached data for this device. We implement this
+ * by removing the mapping and re-establishing it.
+ */
+ if (device)
+ vgic_its_unmap_device(kvm, device);
+
+ /*
+ * The spec does not say whether unmapping a not-mapped device
+ * is an error, so we are done in any case.
+ */
+ if (!valid)
+ return 0;
+
+ device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ device->device_id = device_id;
+ INIT_LIST_HEAD(&device->itt_head);
+
+ list_add_tail(&device->dev_list, &its->device_list);
+
+ return 0;
+}
+
+/*
+ * The MAPC command maps collection IDs to redistributors.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u16 coll_id;
+ u32 target_addr;
+ struct its_collection *collection;
+ bool valid;
+
+ valid = its_cmd_get_validbit(its_cmd);
+ coll_id = its_cmd_get_collection(its_cmd);
+ target_addr = its_cmd_get_target_addr(its_cmd);
+
+ if (target_addr >= atomic_read(&kvm->online_vcpus))
+ return E_ITS_MAPC_PROCNUM_OOR;
+
+ if (!valid) {
+ vgic_its_free_collection(its, coll_id);
+ } else {
+ collection = find_collection(its, coll_id);
+
+ if (!collection) {
+ int ret;
+
+ ret = vgic_its_alloc_collection(its, &collection,
+ coll_id);
+ if (ret)
+ return ret;
+ collection->target_addr = target_addr;
+ } else {
+ collection->target_addr = target_addr;
+ update_affinity_collection(kvm, its, collection);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The CLEAR command removes the pending state for a particular LPI.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ struct its_itte *itte;
+
+
+ itte = find_itte(its, device_id, event_id);
+ if (!itte)
+ return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
+
+ itte->irq->pending = false;
+
+ return 0;
+}
+
+/*
+ * The INV command syncs the configuration bits from the memory table.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 device_id = its_cmd_get_deviceid(its_cmd);
+ u32 event_id = its_cmd_get_id(its_cmd);
+ struct its_itte *itte;
+
+
+ itte = find_itte(its, device_id, event_id);
+ if (!itte)
+ return E_ITS_INV_UNMAPPED_INTERRUPT;
+
+ return update_lpi_config(kvm, itte->irq, NULL);
+}
+
+/*
+ * The INVALL command requests flushing of all IRQ data in this collection.
+ * Find the VCPU mapped to that collection, then iterate over the VM's list
+ * of mapped LPIs and update the configuration for each IRQ which targets
+ * the specified vcpu. The configuration will be read from the in-memory
+ * configuration table.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 coll_id = its_cmd_get_collection(its_cmd);
+ struct its_collection *collection;
+ struct kvm_vcpu *vcpu;
+ struct vgic_irq *irq;
+ u32 *intids;
+ int irq_count, i;
+
+ collection = find_collection(its, coll_id);
+ if (!its_is_collection_mapped(collection))
+ return E_ITS_INVALL_UNMAPPED_COLLECTION;
+
+ vcpu = kvm_get_vcpu(kvm, collection->target_addr);
+
+ irq_count = vgic_copy_lpi_list(kvm, &intids);
+ if (irq_count < 0)
+ return irq_count;
+
+ for (i = 0; i < irq_count; i++) {
+ irq = vgic_get_irq(kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
+ update_lpi_config(kvm, irq, vcpu);
+ vgic_put_irq(kvm, irq);
+ }
+
+ kfree(intids);
+
+ return 0;
+}
+
+/*
+ * The MOVALL command moves the pending state of all IRQs targeting one
+ * redistributor to another. We don't hold the pending state in the VCPUs,
+ * but in the IRQs instead, so there is really not much to do for us here.
+ * However the spec says that no IRQ must target the old redistributor
+ * afterwards, so we make sure that no LPI is using the associated target_vcpu.
+ * This command affects all LPIs in the system that target that redistributor.
+ */
+static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ u32 target1_addr = its_cmd_get_target_addr(its_cmd);
+ u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
+ struct kvm_vcpu *vcpu1, *vcpu2;
+ struct vgic_irq *irq;
+
+ if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
+ target2_addr >= atomic_read(&kvm->online_vcpus))
+ return E_ITS_MOVALL_PROCNUM_OOR;
+
+ if (target1_addr == target2_addr)
+ return 0;
+
+ vcpu1 = kvm_get_vcpu(kvm, target1_addr);
+ vcpu2 = kvm_get_vcpu(kvm, target2_addr);
+
+ spin_lock(&dist->lpi_list_lock);
+
+ list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ spin_lock(&irq->irq_lock);
+
+ if (irq->target_vcpu == vcpu1)
+ irq->target_vcpu = vcpu2;
+
+ spin_unlock(&irq->irq_lock);
+ }
+
+ spin_unlock(&dist->lpi_list_lock);
+
+ return 0;
+}
+
+/*
+ * The INT command injects the LPI associated with that DevID/EvID pair.
+ * Must be called with the its_lock mutex held.
+ */
+static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ u32 msi_data = its_cmd_get_id(its_cmd);
+ u64 msi_devid = its_cmd_get_deviceid(its_cmd);
+
+ vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
+
+ return 0;
+}
+
+/*
+ * This function is called with the its_cmd lock held, but the ITS data
+ * structure lock dropped.
+ */
+static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
+ u64 *its_cmd)
+{
+ int ret = -ENODEV;
+
+ mutex_lock(&its->its_lock);
+ switch (its_cmd_get_command(its_cmd)) {
+ case GITS_CMD_MAPD:
+ ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_MAPC:
+ ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_MAPI:
+ ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_MAPTI:
+ ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_MOVI:
+ ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_DISCARD:
+ ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_CLEAR:
+ ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_MOVALL:
+ ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_INT:
+ ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_INV:
+ ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_INVALL:
+ ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
+ break;
+ case GITS_CMD_SYNC:
+ /* we ignore this command: we are in sync all of the time */
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&its->its_lock);
+
+ return ret;
+}
+
+static u64 vgic_sanitise_its_baser(u64 reg)
+{
+ reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
+ GITS_BASER_SHAREABILITY_SHIFT,
+ vgic_sanitise_shareability);
+ reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
+ GITS_BASER_INNER_CACHEABILITY_SHIFT,
+ vgic_sanitise_inner_cacheability);
+ reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
+ GITS_BASER_OUTER_CACHEABILITY_SHIFT,
+ vgic_sanitise_outer_cacheability);
+
+ /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
+ reg &= ~GENMASK_ULL(15, 12);
+
+ /* We support only one (ITS) page size: 64K */
+ reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
+
+ return reg;
+}
+
+static u64 vgic_sanitise_its_cbaser(u64 reg)
+{
+ reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
+ GITS_CBASER_SHAREABILITY_SHIFT,
+ vgic_sanitise_shareability);
+ reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
+ GITS_CBASER_INNER_CACHEABILITY_SHIFT,
+ vgic_sanitise_inner_cacheability);
+ reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
+ GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
+ vgic_sanitise_outer_cacheability);
+
+ /*
+ * Sanitise the physical address to be 64k aligned.
+ * Also limit the physical addresses to 48 bits.
+ */
+ reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
+
+ return reg;
+}
+
+static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ return extract_bytes(its->cbaser, addr & 7, len);
+}
+
+static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ /* When GITS_CTLR.Enable is 1, this register is RO. */
+ if (its->enabled)
+ return;
+
+ mutex_lock(&its->cmd_lock);
+ its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
+ its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
+ its->creadr = 0;
+ /*
+ * CWRITER is architecturally UNKNOWN on reset, but we need to reset
+ * it to CREADR to make sure we start with an empty command buffer.
+ */
+ its->cwriter = its->creadr;
+ mutex_unlock(&its->cmd_lock);
+}
+
+#define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
+#define ITS_CMD_SIZE 32
+#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
+
+/*
+ * By writing to CWRITER the guest announces new commands to be processed.
+ * To avoid any races in the first place, we take the its_cmd lock, which
+ * protects our ring buffer variables, so that there is only one user
+ * per ITS handling commands at a given time.
+ */
+static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ gpa_t cbaser;
+ u64 cmd_buf[4];
+ u32 reg;
+
+ if (!its)
+ return;
+
+ mutex_lock(&its->cmd_lock);
+
+ reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
+ reg = ITS_CMD_OFFSET(reg);
+ if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
+ mutex_unlock(&its->cmd_lock);
+ return;
+ }
+
+ its->cwriter = reg;
+ cbaser = CBASER_ADDRESS(its->cbaser);
+
+ while (its->cwriter != its->creadr) {
+ int ret = kvm_read_guest(kvm, cbaser + its->creadr,
+ cmd_buf, ITS_CMD_SIZE);
+ /*
+ * If kvm_read_guest() fails, this could be due to the guest
+ * programming a bogus value in CBASER or something else going
+ * wrong from which we cannot easily recover.
+ * According to section 6.3.2 in the GICv3 spec we can just
+ * ignore that command then.
+ */
+ if (!ret)
+ vgic_its_handle_command(kvm, its, cmd_buf);
+
+ its->creadr += ITS_CMD_SIZE;
+ if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
+ its->creadr = 0;
+ }
+
+ mutex_unlock(&its->cmd_lock);
+}
+
+static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ return extract_bytes(its->cwriter, addr & 0x7, len);
+}
+
+static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ return extract_bytes(its->creadr, addr & 0x7, len);
+}
+
+#define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
+static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len)
+{
+ u64 reg;
+
+ switch (BASER_INDEX(addr)) {
+ case 0:
+ reg = its->baser_device_table;
+ break;
+ case 1:
+ reg = its->baser_coll_table;
+ break;
+ default:
+ reg = 0;
+ break;
+ }
+
+ return extract_bytes(reg, addr & 7, len);
+}
+
+#define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
+static void vgic_mmio_write_its_baser(struct kvm *kvm,
+ struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u64 entry_size, device_type;
+ u64 reg, *regptr, clearbits = 0;
+
+ /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
+ if (its->enabled)
+ return;
+
+ switch (BASER_INDEX(addr)) {
+ case 0:
+ regptr = &its->baser_device_table;
+ entry_size = 8;
+ device_type = GITS_BASER_TYPE_DEVICE;
+ break;
+ case 1:
+ regptr = &its->baser_coll_table;
+ entry_size = 8;
+ device_type = GITS_BASER_TYPE_COLLECTION;
+ clearbits = GITS_BASER_INDIRECT;
+ break;
+ default:
+ return;
+ }
+
+ reg = update_64bit_reg(*regptr, addr & 7, len, val);
+ reg &= ~GITS_BASER_RO_MASK;
+ reg &= ~clearbits;
+
+ reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
+ reg |= device_type << GITS_BASER_TYPE_SHIFT;
+ reg = vgic_sanitise_its_baser(reg);
+
+ *regptr = reg;
+}
+
+#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
+{ \
+ .reg_offset = off, \
+ .len = length, \
+ .access_flags = acc, \
+ .its_read = rd, \
+ .its_write = wr, \
+}
+
+static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len, unsigned long val)
+{
+ /* Ignore */
+}
+
+static struct vgic_register_region its_registers[] = {
+ REGISTER_ITS_DESC(GITS_CTLR,
+ vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_IIDR,
+ vgic_mmio_read_its_iidr, its_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_TYPER,
+ vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_CBASER,
+ vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_CWRITER,
+ vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_CREADR,
+ vgic_mmio_read_its_creadr, its_mmio_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_BASER,
+ vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_ITS_DESC(GITS_IDREGS_BASE,
+ vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
+ VGIC_ACCESS_32bit),
+};
+
+/* This is called on setting the LPI enable bit in the redistributor. */
+void vgic_enable_lpis(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
+ its_sync_lpi_pending_table(vcpu);
+}
+
+static int vgic_its_init_its(struct kvm *kvm, struct vgic_its *its)
+{
+ struct vgic_io_device *iodev = &its->iodev;
+ int ret;
+
+ if (its->initialized)
+ return 0;
+
+ if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
+ return -ENXIO;
+
+ iodev->regions = its_registers;
+ iodev->nr_regions = ARRAY_SIZE(its_registers);
+ kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
+
+ iodev->base_addr = its->vgic_its_base;
+ iodev->iodev_type = IODEV_ITS;
+ iodev->its = its;
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
+ KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
+ mutex_unlock(&kvm->slots_lock);
+
+ if (!ret)
+ its->initialized = true;
+
+ return ret;
+}
+
+#define INITIAL_BASER_VALUE \
+ (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
+ GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
+ GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
+ ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \
+ GITS_BASER_PAGE_SIZE_64K)
+
+#define INITIAL_PROPBASER_VALUE \
+ (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
+ GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
+ GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
+
+static int vgic_its_create(struct kvm_device *dev, u32 type)
+{
+ struct vgic_its *its;
+
+ if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
+ return -ENODEV;
+
+ its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
+ if (!its)
+ return -ENOMEM;
+
+ mutex_init(&its->its_lock);
+ mutex_init(&its->cmd_lock);
+
+ its->vgic_its_base = VGIC_ADDR_UNDEF;
+
+ INIT_LIST_HEAD(&its->device_list);
+ INIT_LIST_HEAD(&its->collection_list);
+
+ dev->kvm->arch.vgic.has_its = true;
+ its->initialized = false;
+ its->enabled = false;
+ its->dev = dev;
+
+ its->baser_device_table = INITIAL_BASER_VALUE |
+ ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
+ its->baser_coll_table = INITIAL_BASER_VALUE |
+ ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
+ dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
+
+ dev->private = its;
+
+ return 0;
+}
+
+static void vgic_its_destroy(struct kvm_device *kvm_dev)
+{
+ struct kvm *kvm = kvm_dev->kvm;
+ struct vgic_its *its = kvm_dev->private;
+ struct its_device *dev;
+ struct its_itte *itte;
+ struct list_head *dev_cur, *dev_temp;
+ struct list_head *cur, *temp;
+
+ /*
+ * We may end up here without the lists ever having been initialized.
+ * Check this and bail out early to avoid dereferencing a NULL pointer.
+ */
+ if (!its->device_list.next)
+ return;
+
+ mutex_lock(&its->its_lock);
+ list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
+ dev = container_of(dev_cur, struct its_device, dev_list);
+ list_for_each_safe(cur, temp, &dev->itt_head) {
+ itte = (container_of(cur, struct its_itte, itte_list));
+ its_free_itte(kvm, itte);
+ }
+ list_del(dev_cur);
+ kfree(dev);
+ }
+
+ list_for_each_safe(cur, temp, &its->collection_list) {
+ list_del(cur);
+ kfree(container_of(cur, struct its_collection, coll_list));
+ }
+ mutex_unlock(&its->its_lock);
+
+ kfree(its);
+}
+
+static int vgic_its_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_ITS_ADDR_TYPE:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ break;
+ }
+ return -ENXIO;
+}
+
+static int vgic_its_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct vgic_its *its = dev->private;
+ int ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+ unsigned long type = (unsigned long)attr->attr;
+ u64 addr;
+
+ if (type != KVM_VGIC_ITS_ADDR_TYPE)
+ return -ENODEV;
+
+ if (its->initialized)
+ return -EBUSY;
+
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
+ addr, SZ_64K);
+ if (ret)
+ return ret;
+
+ its->vgic_its_base = addr;
+
+ return 0;
+ }
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return vgic_its_init_its(dev->kvm, its);
+ }
+ break;
+ }
+ return -ENXIO;
+}
+
+static int vgic_its_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+ struct vgic_its *its = dev->private;
+ u64 addr = its->vgic_its_base;
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+ unsigned long type = (unsigned long)attr->attr;
+
+ if (type != KVM_VGIC_ITS_ADDR_TYPE)
+ return -ENODEV;
+
+ if (copy_to_user(uaddr, &addr, sizeof(addr)))
+ return -EFAULT;
+ break;
+ default:
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static struct kvm_device_ops kvm_arm_vgic_its_ops = {
+ .name = "kvm-arm-vgic-its",
+ .create = vgic_its_create,
+ .destroy = vgic_its_destroy,
+ .set_attr = vgic_its_set_attr,
+ .get_attr = vgic_its_get_attr,
+ .has_attr = vgic_its_has_attr,
+};
+
+int kvm_vgic_register_its_device(void)
+{
+ return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
+ KVM_DEV_TYPE_ARM_VGIC_ITS);
+}
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 0130c4b147b7..1813f93b5cde 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -21,8 +21,8 @@
/* common helpers */
-static int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
- phys_addr_t addr, phys_addr_t alignment)
+int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
+ phys_addr_t addr, phys_addr_t alignment)
{
if (addr & ~KVM_PHYS_MASK)
return -E2BIG;
@@ -210,20 +210,27 @@ static void vgic_destroy(struct kvm_device *dev)
kfree(dev);
}
-void kvm_register_vgic_device(unsigned long type)
+int kvm_register_vgic_device(unsigned long type)
{
+ int ret = -ENODEV;
+
switch (type) {
case KVM_DEV_TYPE_ARM_VGIC_V2:
- kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
- KVM_DEV_TYPE_ARM_VGIC_V2);
+ ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V2);
break;
#ifdef CONFIG_KVM_ARM_VGIC_V3
case KVM_DEV_TYPE_ARM_VGIC_V3:
- kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
- KVM_DEV_TYPE_ARM_VGIC_V3);
+ ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V3);
+ if (ret)
+ break;
+ ret = kvm_vgic_register_its_device();
break;
#endif
}
+
+ return ret;
}
/** vgic_attr_regs_access: allows user space to read/write VGIC registers
@@ -428,4 +435,3 @@ struct kvm_device_ops kvm_arm_vgic_v3_ops = {
};
#endif /* CONFIG_KVM_ARM_VGIC_V3 */
-
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index a21393637e4b..b44b359cbbad 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -102,6 +102,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
irq->source |= 1U << source_vcpu->vcpu_id;
vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+ vgic_put_irq(source_vcpu->kvm, irq);
}
}
@@ -116,6 +117,8 @@ static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->targets << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
@@ -143,6 +146,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -157,6 +161,8 @@ static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->source << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
}
@@ -178,6 +184,7 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
irq->pending = false;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -201,6 +208,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
} else {
spin_unlock(&irq->irq_lock);
}
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -429,6 +437,7 @@ int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
struct vgic_io_device dev = {
.regions = vgic_v2_cpu_registers,
.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
+ .iodev_type = IODEV_CPUIF,
};
return vgic_uaccess(vcpu, &dev, is_write, offset, val);
@@ -440,6 +449,7 @@ int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
struct vgic_io_device dev = {
.regions = vgic_v2_dist_registers,
.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
+ .iodev_type = IODEV_DIST,
};
return vgic_uaccess(vcpu, &dev, is_write, offset, val);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index a0c515a412a7..ff668e0dd586 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -23,12 +23,35 @@
#include "vgic-mmio.h"
/* extract @num bytes at @offset bytes offset in data */
-static unsigned long extract_bytes(unsigned long data, unsigned int offset,
- unsigned int num)
+unsigned long extract_bytes(unsigned long data, unsigned int offset,
+ unsigned int num)
{
return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
}
+/* allows updates of any half of a 64-bit register (or the whole thing) */
+u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
+ unsigned long val)
+{
+ int lower = (offset & 4) * 8;
+ int upper = lower + 8 * len - 1;
+
+ reg &= ~GENMASK_ULL(upper, lower);
+ val &= GENMASK_ULL(len * 8 - 1, 0);
+
+ return reg | ((u64)val << lower);
+}
+
+bool vgic_has_its(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+ return false;
+
+ return dist->has_its;
+}
+
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
@@ -43,7 +66,12 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
case GICD_TYPER:
value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
value = (value >> 5) - 1;
- value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
+ if (vgic_has_its(vcpu->kvm)) {
+ value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
+ value |= GICD_TYPER_LPIS;
+ } else {
+ value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
+ }
break;
case GICD_IIDR:
value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
@@ -80,15 +108,17 @@ static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ unsigned long ret = 0;
if (!irq)
return 0;
/* The upper word is RAZ for us. */
- if (addr & 4)
- return 0;
+ if (!(addr & 4))
+ ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
- return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
+ vgic_put_irq(vcpu->kvm, irq);
+ return ret;
}
static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
@@ -96,15 +126,17 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
unsigned long val)
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
-
- if (!irq)
- return;
+ struct vgic_irq *irq;
/* The upper word is WI for us since we don't implement Aff3. */
if (addr & 4)
return;
+ irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+ if (!irq)
+ return;
+
spin_lock(&irq->irq_lock);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
@@ -112,6 +144,32 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
+}
+
+static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
+}
+
+
+static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ bool was_enabled = vgic_cpu->lpis_enabled;
+
+ if (!vgic_has_its(vcpu->kvm))
+ return;
+
+ vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
+
+ if (!was_enabled && vgic_cpu->lpis_enabled)
+ vgic_enable_lpis(vcpu);
}
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
@@ -125,6 +183,8 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
value |= ((target_vcpu_id & 0xffff) << 8);
if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
value |= GICR_TYPER_LAST;
+ if (vgic_has_its(vcpu->kvm))
+ value |= GICR_TYPER_PLPIS;
return extract_bytes(value, addr & 7, len);
}
@@ -147,6 +207,142 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
return 0;
}
+/* We want to avoid outer shareable. */
+u64 vgic_sanitise_shareability(u64 field)
+{
+ switch (field) {
+ case GIC_BASER_OuterShareable:
+ return GIC_BASER_InnerShareable;
+ default:
+ return field;
+ }
+}
+
+/* Avoid any inner non-cacheable mapping. */
+u64 vgic_sanitise_inner_cacheability(u64 field)
+{
+ switch (field) {
+ case GIC_BASER_CACHE_nCnB:
+ case GIC_BASER_CACHE_nC:
+ return GIC_BASER_CACHE_RaWb;
+ default:
+ return field;
+ }
+}
+
+/* Non-cacheable or same-as-inner are OK. */
+u64 vgic_sanitise_outer_cacheability(u64 field)
+{
+ switch (field) {
+ case GIC_BASER_CACHE_SameAsInner:
+ case GIC_BASER_CACHE_nC:
+ return field;
+ default:
+ return GIC_BASER_CACHE_nC;
+ }
+}
+
+u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
+ u64 (*sanitise_fn)(u64))
+{
+ u64 field = (reg & field_mask) >> field_shift;
+
+ field = sanitise_fn(field) << field_shift;
+ return (reg & ~field_mask) | field;
+}
+
+#define PROPBASER_RES0_MASK \
+ (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
+#define PENDBASER_RES0_MASK \
+ (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
+ GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
+
+static u64 vgic_sanitise_pendbaser(u64 reg)
+{
+ reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
+ GICR_PENDBASER_SHAREABILITY_SHIFT,
+ vgic_sanitise_shareability);
+ reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
+ GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
+ vgic_sanitise_inner_cacheability);
+ reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
+ GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
+ vgic_sanitise_outer_cacheability);
+
+ reg &= ~PENDBASER_RES0_MASK;
+ reg &= ~GENMASK_ULL(51, 48);
+
+ return reg;
+}
+
+static u64 vgic_sanitise_propbaser(u64 reg)
+{
+ reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
+ GICR_PROPBASER_SHAREABILITY_SHIFT,
+ vgic_sanitise_shareability);
+ reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
+ GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
+ vgic_sanitise_inner_cacheability);
+ reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
+ GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
+ vgic_sanitise_outer_cacheability);
+
+ reg &= ~PROPBASER_RES0_MASK;
+ reg &= ~GENMASK_ULL(51, 48);
+ return reg;
+}
+
+static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+
+ return extract_bytes(dist->propbaser, addr & 7, len);
+}
+
+static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 propbaser = dist->propbaser;
+
+ /* Storing a value with LPIs already enabled is undefined */
+ if (vgic_cpu->lpis_enabled)
+ return;
+
+ propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
+ propbaser = vgic_sanitise_propbaser(propbaser);
+
+ dist->propbaser = propbaser;
+}
+
+static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ return extract_bytes(vgic_cpu->pendbaser, addr & 7, len);
+}
+
+static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ u64 pendbaser = vgic_cpu->pendbaser;
+
+ /* Storing a value with LPIs already enabled is undefined */
+ if (vgic_cpu->lpis_enabled)
+ return;
+
+ pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
+ pendbaser = vgic_sanitise_pendbaser(pendbaser);
+
+ vgic_cpu->pendbaser = pendbaser;
+}
+
/*
* The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
* redistributors, while SPIs are covered by registers in the distributor
@@ -218,7 +414,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+ vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
@@ -227,10 +423,10 @@ static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
@@ -285,24 +481,18 @@ unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
{
- int nr_vcpus = atomic_read(&kvm->online_vcpus);
struct kvm_vcpu *vcpu;
- struct vgic_io_device *devices;
int c, ret = 0;
- devices = kmalloc(sizeof(struct vgic_io_device) * nr_vcpus * 2,
- GFP_KERNEL);
- if (!devices)
- return -ENOMEM;
-
kvm_for_each_vcpu(c, vcpu, kvm) {
gpa_t rd_base = redist_base_address + c * SZ_64K * 2;
gpa_t sgi_base = rd_base + SZ_64K;
- struct vgic_io_device *rd_dev = &devices[c * 2];
- struct vgic_io_device *sgi_dev = &devices[c * 2 + 1];
+ struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+ struct vgic_io_device *sgi_dev = &vcpu->arch.vgic_cpu.sgi_iodev;
kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
rd_dev->base_addr = rd_base;
+ rd_dev->iodev_type = IODEV_REDIST;
rd_dev->regions = vgic_v3_rdbase_registers;
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
rd_dev->redist_vcpu = vcpu;
@@ -317,6 +507,7 @@ int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
sgi_dev->base_addr = sgi_base;
+ sgi_dev->iodev_type = IODEV_REDIST;
sgi_dev->regions = vgic_v3_sgibase_registers;
sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
sgi_dev->redist_vcpu = vcpu;
@@ -335,14 +526,15 @@ int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
if (ret) {
/* The current c failed, so we start with the previous one. */
for (c--; c >= 0; c--) {
+ struct vgic_cpu *vgic_cpu;
+
+ vcpu = kvm_get_vcpu(kvm, c);
+ vgic_cpu = &vcpu->arch.vgic_cpu;
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
- &devices[c * 2].dev);
+ &vgic_cpu->rd_iodev.dev);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
- &devices[c * 2 + 1].dev);
+ &vgic_cpu->sgi_iodev.dev);
}
- kfree(devices);
- } else {
- kvm->arch.vgic.redist_iodevs = devices;
}
return ret;
@@ -451,5 +643,6 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
irq->pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 9f6fab74dce7..3bad3c5ed431 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -56,6 +56,8 @@ unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
if (irq->enabled)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
@@ -74,6 +76,8 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
spin_lock(&irq->irq_lock);
irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -92,6 +96,7 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
irq->enabled = false;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -108,6 +113,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
if (irq->pending)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
@@ -129,6 +136,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
irq->soft_pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -152,6 +160,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -168,6 +177,8 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
if (irq->active)
value |= (1U << i);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
@@ -242,6 +253,7 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false);
+ vgic_put_irq(vcpu->kvm, irq);
}
vgic_change_active_finish(vcpu, intid);
}
@@ -257,6 +269,7 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true);
+ vgic_put_irq(vcpu->kvm, irq);
}
vgic_change_active_finish(vcpu, intid);
}
@@ -272,6 +285,8 @@ unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->priority << (i * 8);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return val;
@@ -298,6 +313,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
spin_unlock(&irq->irq_lock);
+
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -313,6 +330,8 @@ unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
if (irq->config == VGIC_CONFIG_EDGE)
value |= (2U << (i * 2));
+
+ vgic_put_irq(vcpu->kvm, irq);
}
return value;
@@ -326,7 +345,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
int i;
for (i = 0; i < len * 4; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq;
/*
* The configuration cannot be changed for SGIs in general,
@@ -337,14 +356,18 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
if (intid + i < VGIC_NR_PRIVATE_IRQS)
continue;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock(&irq->irq_lock);
+
if (test_bit(i * 2 + 1, &val)) {
irq->config = VGIC_CONFIG_EDGE;
} else {
irq->config = VGIC_CONFIG_LEVEL;
irq->pending = irq->line_level | irq->soft_pending;
}
+
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -450,8 +473,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
{
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
- struct kvm_vcpu *r_vcpu;
- unsigned long data;
+ unsigned long data = 0;
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
addr - iodev->base_addr);
@@ -460,8 +482,21 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0;
}
- r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
- data = region->read(r_vcpu, addr, len);
+ switch (iodev->iodev_type) {
+ case IODEV_CPUIF:
+ data = region->read(vcpu, addr, len);
+ break;
+ case IODEV_DIST:
+ data = region->read(vcpu, addr, len);
+ break;
+ case IODEV_REDIST:
+ data = region->read(iodev->redist_vcpu, addr, len);
+ break;
+ case IODEV_ITS:
+ data = region->its_read(vcpu->kvm, iodev->its, addr, len);
+ break;
+ }
+
vgic_data_host_to_mmio_bus(val, len, data);
return 0;
}
@@ -471,7 +506,6 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
{
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
- struct kvm_vcpu *r_vcpu;
unsigned long data = vgic_data_mmio_bus_to_host(val, len);
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
@@ -482,8 +516,21 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
if (!check_region(region, addr, len))
return 0;
- r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
- region->write(r_vcpu, addr, len, data);
+ switch (iodev->iodev_type) {
+ case IODEV_CPUIF:
+ region->write(vcpu, addr, len, data);
+ break;
+ case IODEV_DIST:
+ region->write(vcpu, addr, len, data);
+ break;
+ case IODEV_REDIST:
+ region->write(iodev->redist_vcpu, addr, len, data);
+ break;
+ case IODEV_ITS:
+ region->its_write(vcpu->kvm, iodev->its, addr, len, data);
+ break;
+ }
+
return 0;
}
@@ -513,6 +560,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
}
io_device->base_addr = dist_base_address;
+ io_device->iodev_type = IODEV_DIST;
io_device->redist_vcpu = NULL;
mutex_lock(&kvm->slots_lock);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index 850901482aec..0b3ecf9d100e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -21,10 +21,19 @@ struct vgic_register_region {
unsigned int len;
unsigned int bits_per_irq;
unsigned int access_flags;
- unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
- unsigned int len);
- void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
- unsigned long val);
+ union {
+ unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
+ unsigned int len);
+ unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len);
+ };
+ union {
+ void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
+ unsigned int len, unsigned long val);
+ void (*its_write)(struct kvm *kvm, struct vgic_its *its,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+ };
};
extern struct kvm_io_device_ops kvm_io_gic_ops;
@@ -87,6 +96,12 @@ unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
unsigned long data);
+unsigned long extract_bytes(unsigned long data, unsigned int offset,
+ unsigned int num);
+
+u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
+ unsigned long val);
+
unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
@@ -147,4 +162,12 @@ unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+u64 vgic_sanitise_outer_cacheability(u64 reg);
+u64 vgic_sanitise_inner_cacheability(u64 reg);
+u64 vgic_sanitise_shareability(u64 reg);
+u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
+ u64 (*sanitise_fn)(u64));
+#endif
+
#endif
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index e31405ee5515..0bf6709d1006 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -124,6 +124,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -332,20 +333,25 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
+ ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ if (ret) {
+ kvm_err("Cannot register GICv2 KVM device\n");
+ iounmap(kvm_vgic_global_state.vctrl_base);
+ return ret;
+ }
+
ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
kvm_vgic_global_state.vctrl_base +
resource_size(&info->vctrl),
info->vctrl.start);
-
if (ret) {
kvm_err("Cannot map VCTRL into hyp\n");
+ kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
iounmap(kvm_vgic_global_state.vctrl_base);
return ret;
}
kvm_vgic_global_state.can_emulate_gicv2 = true;
- kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
-
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.type = VGIC_V2;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 346b4ad12b49..0506543df38a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -81,6 +81,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
else
intid = val & GICH_LR_VIRTUALID;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+ if (!irq) /* An LPI could have been unmapped. */
+ continue;
spin_lock(&irq->irq_lock);
@@ -113,6 +115,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
}
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -190,6 +193,11 @@ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
}
+#define INITIAL_PENDBASER_VALUE \
+ (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
+ GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
+ GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
+
void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -207,10 +215,12 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
* way, so we force SRE to 1 to demonstrate this to the guest.
* This goes with the spec allowing the value to be RAO/WI.
*/
- if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
- else
+ vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
+ } else {
vgic_v3->vgic_sre = 0;
+ }
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
@@ -296,6 +306,7 @@ out:
int vgic_v3_probe(const struct gic_kvm_info *info)
{
u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+ int ret;
/*
* The ListRegs field is 5 bits, but there is a architectural
@@ -319,12 +330,22 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
- kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ if (ret) {
+ kvm_err("Cannot register GICv2 KVM device.\n");
+ return ret;
+ }
kvm_info("vgic-v2@%llx\n", info->vcpu.start);
}
+ ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
+ if (ret) {
+ kvm_err("Cannot register GICv3 KVM device.\n");
+ kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
+ return ret;
+ }
+
if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n");
- kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
kvm_vgic_global_state.vctrl_base = NULL;
kvm_vgic_global_state.type = VGIC_V3;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 69b61abefa19..e7aeac719e09 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -33,10 +33,17 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
/*
* Locking order is always:
- * vgic_cpu->ap_list_lock
- * vgic_irq->irq_lock
+ * its->cmd_lock (mutex)
+ * its->its_lock (mutex)
+ * vgic_cpu->ap_list_lock
+ * kvm->lpi_list_lock
+ * vgic_irq->irq_lock
*
- * (that is, always take the ap_list_lock before the struct vgic_irq lock).
+ * If you need to take multiple locks, always take the upper lock first,
+ * then the lower ones, e.g. first take the its_lock, then the irq_lock.
+ * If you are already holding a lock and need to take a higher one, you
+ * have to drop the lower ranking lock first and re-aquire it after having
+ * taken the upper one.
*
* When taking more than one ap_list_lock at the same time, always take the
* lowest numbered VCPU's ap_list_lock first, so:
@@ -45,6 +52,41 @@ struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
*/
+/*
+ * Iterate over the VM's list of mapped LPIs to find the one with a
+ * matching interrupt ID and return a reference to the IRQ structure.
+ */
+static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct vgic_irq *irq = NULL;
+
+ spin_lock(&dist->lpi_list_lock);
+
+ list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ if (irq->intid != intid)
+ continue;
+
+ /*
+ * This increases the refcount, the caller is expected to
+ * call vgic_put_irq() later once it's finished with the IRQ.
+ */
+ vgic_get_irq_kref(irq);
+ goto out_unlock;
+ }
+ irq = NULL;
+
+out_unlock:
+ spin_unlock(&dist->lpi_list_lock);
+
+ return irq;
+}
+
+/*
+ * This looks up the virtual interrupt ID to get the corresponding
+ * struct vgic_irq. It also increases the refcount, so any caller is expected
+ * to call vgic_put_irq() once it's finished with this IRQ.
+ */
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid)
{
@@ -56,14 +98,43 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
if (intid <= VGIC_MAX_SPI)
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
- /* LPIs are not yet covered */
+ /* LPIs */
if (intid >= VGIC_MIN_LPI)
- return NULL;
+ return vgic_get_lpi(kvm, intid);
WARN(1, "Looking up struct vgic_irq for reserved INTID");
return NULL;
}
+/*
+ * We can't do anything in here, because we lack the kvm pointer to
+ * lock and remove the item from the lpi_list. So we keep this function
+ * empty and use the return value of kref_put() to trigger the freeing.
+ */
+static void vgic_irq_release(struct kref *ref)
+{
+}
+
+void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+{
+ struct vgic_dist *dist;
+
+ if (irq->intid < VGIC_MIN_LPI)
+ return;
+
+ if (!kref_put(&irq->refcount, vgic_irq_release))
+ return;
+
+ dist = &kvm->arch.vgic;
+
+ spin_lock(&dist->lpi_list_lock);
+ list_del(&irq->lpi_list);
+ dist->lpi_list_count--;
+ spin_unlock(&dist->lpi_list_lock);
+
+ kfree(irq);
+}
+
/**
* kvm_vgic_target_oracle - compute the target vcpu for an irq
*
@@ -236,6 +307,11 @@ retry:
goto retry;
}
+ /*
+ * Grab a reference to the irq to reflect the fact that it is
+ * now in the ap_list.
+ */
+ vgic_get_irq_kref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu;
@@ -269,14 +345,17 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
if (!irq)
return -EINVAL;
- if (irq->hw != mapped_irq)
+ if (irq->hw != mapped_irq) {
+ vgic_put_irq(kvm, irq);
return -EINVAL;
+ }
spin_lock(&irq->irq_lock);
if (!vgic_validate_injection(irq, level)) {
/* Nothing to see here, move along... */
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(kvm, irq);
return 0;
}
@@ -288,6 +367,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
}
vgic_queue_irq_unlock(kvm, irq);
+ vgic_put_irq(kvm, irq);
return 0;
}
@@ -330,25 +410,28 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
irq->hwintid = phys_irq;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return 0;
}
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
-
- BUG_ON(!irq);
+ struct vgic_irq *irq;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ BUG_ON(!irq);
+
spin_lock(&irq->irq_lock);
irq->hw = false;
irq->hwintid = 0;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return 0;
}
@@ -386,6 +469,15 @@ retry:
list_del(&irq->ap_list);
irq->vcpu = NULL;
spin_unlock(&irq->irq_lock);
+
+ /*
+ * This vgic_put_irq call matches the
+ * vgic_get_irq_kref in vgic_queue_irq_unlock,
+ * where we added the LPI to the ap_list. As
+ * we remove the irq from the list, we drop
+ * also drop the refcount.
+ */
+ vgic_put_irq(vcpu->kvm, irq);
continue;
}
@@ -614,6 +706,8 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
spin_lock(&irq->irq_lock);
map_is_active = irq->hw && irq->active;
spin_unlock(&irq->irq_lock);
+ vgic_put_irq(vcpu->kvm, irq);
return map_is_active;
}
+
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 7b300ca370b7..1d8e21d5c13f 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -25,6 +25,7 @@
#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
#define INTERRUPT_ID_BITS_SPIS 10
+#define INTERRUPT_ID_BITS_ITS 16
#define VGIC_PRI_BITS 5
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
@@ -38,9 +39,13 @@ struct vgic_vmcr {
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
+void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
void vgic_kick_vcpus(struct kvm *kvm);
+int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
+ phys_addr_t addr, phys_addr_t alignment);
+
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
@@ -59,6 +64,14 @@ int vgic_v2_map_resources(struct kvm *kvm);
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type);
+static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+{
+ if (irq->intid < VGIC_MIN_LPI)
+ return;
+
+ kref_get(&irq->refcount);
+}
+
#ifdef CONFIG_KVM_ARM_VGIC_V3
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
@@ -71,6 +84,10 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu);
int vgic_v3_probe(const struct gic_kvm_info *info);
int vgic_v3_map_resources(struct kvm *kvm);
int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+bool vgic_has_its(struct kvm *kvm);
+int kvm_vgic_register_its_device(void);
+void vgic_enable_lpis(struct kvm_vcpu *vcpu);
+int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
#else
static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
{
@@ -122,9 +139,28 @@ static inline int vgic_register_redist_iodevs(struct kvm *kvm,
{
return -ENODEV;
}
+
+static inline bool vgic_has_its(struct kvm *kvm)
+{
+ return false;
+}
+
+static inline int kvm_vgic_register_its_device(void)
+{
+ return -ENODEV;
+}
+
+static inline void vgic_enable_lpis(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+ return -ENODEV;
+}
#endif
-void kvm_register_vgic_device(unsigned long type);
+int kvm_register_vgic_device(unsigned long type);
int vgic_lazy_init(struct kvm *kvm);
int vgic_init(struct kvm *kvm);
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
index 8db197bb6c7a..3bcc9990adf7 100644
--- a/virt/kvm/irqchip.c
+++ b/virt/kvm/irqchip.c
@@ -62,12 +62,14 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
{
struct kvm_kernel_irq_routing_entry route;
- if (!irqchip_in_kernel(kvm) || msi->flags != 0)
+ if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
return -EINVAL;
route.msi.address_lo = msi->address_lo;
route.msi.address_hi = msi->address_hi;
route.msi.data = msi->data;
+ route.msi.flags = msi->flags;
+ route.msi.devid = msi->devid;
return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
}
@@ -135,7 +137,8 @@ void kvm_free_irq_routing(struct kvm *kvm)
free_irq_routing_table(rt);
}
-static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+static int setup_routing_entry(struct kvm *kvm,
+ struct kvm_irq_routing_table *rt,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
@@ -154,7 +157,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
e->gsi = ue->gsi;
e->type = ue->type;
- r = kvm_set_routing_entry(e, ue);
+ r = kvm_set_routing_entry(kvm, e, ue);
if (r)
goto out;
if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
@@ -176,6 +179,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
unsigned flags)
{
struct kvm_irq_routing_table *new, *old;
+ struct kvm_kernel_irq_routing_entry *e;
u32 i, j, nr_rt_entries = 0;
int r;
@@ -199,23 +203,25 @@ int kvm_set_irq_routing(struct kvm *kvm,
new->chip[i][j] = -1;
for (i = 0; i < nr; ++i) {
- struct kvm_kernel_irq_routing_entry *e;
-
r = -ENOMEM;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
goto out;
r = -EINVAL;
- if (ue->flags) {
- kfree(e);
- goto out;
- }
- r = setup_routing_entry(new, e, ue);
- if (r) {
- kfree(e);
- goto out;
+ switch (ue->type) {
+ case KVM_IRQ_ROUTING_MSI:
+ if (ue->flags & ~KVM_MSI_VALID_DEVID)
+ goto free_entry;
+ break;
+ default:
+ if (ue->flags)
+ goto free_entry;
+ break;
}
+ r = setup_routing_entry(kvm, new, e, ue);
+ if (r)
+ goto free_entry;
++ue;
}
@@ -232,7 +238,10 @@ int kvm_set_irq_routing(struct kvm *kvm,
new = old;
r = 0;
+ goto out;
+free_entry:
+ kfree(e);
out:
free_irq_routing_table(new);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2e791367c576..cc081ccfcaa3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1444,6 +1444,52 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
return true;
}
+static int hva_to_pfn_remapped(struct vm_area_struct *vma,
+ unsigned long addr, bool *async,
+ bool write_fault, kvm_pfn_t *p_pfn)
+{
+ unsigned long pfn;
+ int r;
+
+ r = follow_pfn(vma, addr, &pfn);
+ if (r) {
+ /*
+ * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
+ * not call the fault handler, so do it here.
+ */
+ bool unlocked = false;
+ r = fixup_user_fault(current, current->mm, addr,
+ (write_fault ? FAULT_FLAG_WRITE : 0),
+ &unlocked);
+ if (unlocked)
+ return -EAGAIN;
+ if (r)
+ return r;
+
+ r = follow_pfn(vma, addr, &pfn);
+ if (r)
+ return r;
+
+ }
+
+
+ /*
+ * Get a reference here because callers of *hva_to_pfn* and
+ * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
+ * returned pfn. This is only needed if the VMA has VM_MIXEDMAP
+ * set, but the kvm_get_pfn/kvm_release_pfn_clean pair will
+ * simply do nothing for reserved pfns.
+ *
+ * Whoever called remap_pfn_range is also going to call e.g.
+ * unmap_mapping_range before the underlying pages are freed,
+ * causing a call to our MMU notifier.
+ */
+ kvm_get_pfn(pfn);
+
+ *p_pfn = pfn;
+ return 0;
+}
+
/*
* Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest
@@ -1463,7 +1509,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
{
struct vm_area_struct *vma;
kvm_pfn_t pfn = 0;
- int npages;
+ int npages, r;
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
@@ -1485,14 +1531,17 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
goto exit;
}
+retry:
vma = find_vma_intersection(current->mm, addr, addr + 1);
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
- else if ((vma->vm_flags & VM_PFNMAP)) {
- pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff;
- BUG_ON(!kvm_is_reserved_pfn(pfn));
+ else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
+ r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
+ if (r == -EAGAIN)
+ goto retry;
+ if (r < 0)
+ pfn = KVM_PFN_ERR_FAULT;
} else {
if (async && vma_is_valid(vma, write_fault))
*async = true;
@@ -2348,9 +2397,20 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (id >= KVM_MAX_VCPU_ID)
return -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus == KVM_MAX_VCPUS) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
+
+ kvm->created_vcpus++;
+ mutex_unlock(&kvm->lock);
+
vcpu = kvm_arch_vcpu_create(kvm, id);
- if (IS_ERR(vcpu))
- return PTR_ERR(vcpu);
+ if (IS_ERR(vcpu)) {
+ r = PTR_ERR(vcpu);
+ goto vcpu_decrement;
+ }
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
@@ -2359,14 +2419,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
- if (!kvm_vcpu_compatible(vcpu)) {
- r = -EINVAL;
- goto unlock_vcpu_destroy;
- }
- if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
- r = -EINVAL;
- goto unlock_vcpu_destroy;
- }
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
@@ -2399,6 +2451,10 @@ unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
+vcpu_decrement:
+ mutex_lock(&kvm->lock);
+ kvm->created_vcpus--;
+ mutex_unlock(&kvm->lock);
return r;
}
@@ -3487,6 +3543,30 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
return r;
}
+struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ gpa_t addr)
+{
+ struct kvm_io_bus *bus;
+ int dev_idx, srcu_idx;
+ struct kvm_io_device *iodev = NULL;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+
+ dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
+ if (dev_idx < 0)
+ goto out_unlock;
+
+ iodev = bus->range[dev_idx].dev;
+
+out_unlock:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return iodev;
+}
+EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
+
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt)